-/* ELF support for AArch64.
- Copyright 2009-2013 Free Software Foundation, Inc.
+/* AArch64-specific support for NN-bit ELF.
+ Copyright (C) 2009-2014 Free Software Foundation, Inc.
Contributed by ARM Ltd.
This file is part of BFD, the Binary File Descriptor library.
indicate that foo is thread local and should be accessed via the
traditional TLS mechanims.
- The relocations R_AARCH64_TLSDESC_{ADR_PAGE,LD64_LO12_NC,ADD_LO12_NC}
+ The relocations R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC}
against foo indicate that 'foo' is thread local and should be accessed
via a TLS descriptor mechanism.
In the traditional TLS mechanism, the double GOT entry is used to
provide the tls_index structure, containing module and offset
- entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD64
+ entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD
on the module entry. The loader will subsequently fixup this
relocation with the module identity.
For global traditional TLS symbols the static linker places an
- R_AARCH64_TLS_DTPREL64 relocation on the offset entry. The loader
+ R_AARCH64_TLS_DTPREL relocation on the offset entry. The loader
will subsequently fixup the offset. For local TLS symbols the static
linker fixes up offset.
The flow:
- aarch64_check_relocs()
+ elfNN_aarch64_check_relocs()
This function is invoked for each relocation.
The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and
- R_AARCH64_TLSDESC_{ADR_PAGE,LD64_LO12_NC,ADD_LO12_NC} are
+ R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC} are
spotted. One time creation of local symbol data structures are
created when the first local symbol is seen.
The reference count for a symbol is incremented. The GOT type for
each symbol is marked as general dynamic.
- elf64_aarch64_allocate_dynrelocs ()
+ elfNN_aarch64_allocate_dynrelocs ()
For each global with positive reference count we allocate a double
GOT slot. For a traditional TLS symbol we allocate space for two
allocate space for one relocation on the slot. Record the GOT offset
for this symbol.
- elf64_aarch64_size_dynamic_sections ()
+ elfNN_aarch64_size_dynamic_sections ()
Iterate all input BFDS, look for in the local symbol data structure
constructed earlier for local TLS symbols and allocate them double
GOT slots along with space for a single GOT relocation. Update the
local symbol structure to record the GOT offset allocated.
- elf64_aarch64_relocate_section ()
+ elfNN_aarch64_relocate_section ()
- Calls elf64_aarch64_final_link_relocate ()
+ Calls elfNN_aarch64_final_link_relocate ()
Emit the relevant TLS relocations against the GOT for each TLS
symbol. For local TLS symbols emit the GOT offset directly. The GOT
emitted. All of the TLS code that uses the GOT offset needs to take
care to mask out this flag bit before using the offset.
- elf64_aarch64_final_link_relocate ()
+ elfNN_aarch64_final_link_relocate ()
Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */
#include "bfd_stdint.h"
#include "elf-bfd.h"
#include "bfdlink.h"
+#include "objalloc.h"
#include "elf/aarch64.h"
-
-static bfd_reloc_status_type
-bfd_elf_aarch64_put_addend (bfd *abfd,
- bfd_byte *address,
- reloc_howto_type *howto, bfd_signed_vma addend);
-
-#define IS_AARCH64_TLS_RELOC(R_TYPE) \
- ((R_TYPE) == R_AARCH64_TLSGD_ADR_PAGE21 \
- || (R_TYPE) == R_AARCH64_TLSGD_ADD_LO12_NC \
- || (R_TYPE) == R_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
- || (R_TYPE) == R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
- || (R_TYPE) == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
- || (R_TYPE) == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
- || (R_TYPE) == R_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
- || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_LO12 \
- || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_HI12 \
- || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
- || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G2 \
- || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G1 \
- || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
- || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G0 \
- || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
- || (R_TYPE) == R_AARCH64_TLS_DTPMOD64 \
- || (R_TYPE) == R_AARCH64_TLS_DTPREL64 \
- || (R_TYPE) == R_AARCH64_TLS_TPREL64 \
+#include "elfxx-aarch64.h"
+
+#define ARCH_SIZE NN
+
+#if ARCH_SIZE == 64
+#define AARCH64_R(NAME) R_AARCH64_ ## NAME
+#define AARCH64_R_STR(NAME) "R_AARCH64_" #NAME
+#define HOWTO64(...) HOWTO (__VA_ARGS__)
+#define HOWTO32(...) EMPTY_HOWTO (0)
+#define LOG_FILE_ALIGN 3
+#endif
+
+#if ARCH_SIZE == 32
+#define AARCH64_R(NAME) R_AARCH64_P32_ ## NAME
+#define AARCH64_R_STR(NAME) "R_AARCH64_P32_" #NAME
+#define HOWTO64(...) EMPTY_HOWTO (0)
+#define HOWTO32(...) HOWTO (__VA_ARGS__)
+#define LOG_FILE_ALIGN 2
+#endif
+
+#define IS_AARCH64_TLS_RELOC(R_TYPE) \
+ ((R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \
+ || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \
+ || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
+ || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
+ || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
+ || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
+ || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC \
+ || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
+ || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12 \
+ || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12 \
+ || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
+ || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2 \
+ || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 \
+ || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
+ || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0 \
+ || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
+ || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPMOD \
+ || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPREL \
+ || (R_TYPE) == BFD_RELOC_AARCH64_TLS_TPREL \
|| IS_AARCH64_TLSDESC_RELOC ((R_TYPE)))
-#define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
- ((R_TYPE) == R_AARCH64_TLSDESC_LD_PREL19 \
- || (R_TYPE) == R_AARCH64_TLSDESC_ADR_PREL21 \
- || (R_TYPE) == R_AARCH64_TLSDESC_ADR_PAGE21 \
- || (R_TYPE) == R_AARCH64_TLSDESC_ADD_LO12_NC \
- || (R_TYPE) == R_AARCH64_TLSDESC_LD64_LO12_NC \
- || (R_TYPE) == R_AARCH64_TLSDESC_OFF_G1 \
- || (R_TYPE) == R_AARCH64_TLSDESC_OFF_G0_NC \
- || (R_TYPE) == R_AARCH64_TLSDESC_LDR \
- || (R_TYPE) == R_AARCH64_TLSDESC_ADD \
- || (R_TYPE) == R_AARCH64_TLSDESC_CALL \
- || (R_TYPE) == R_AARCH64_TLSDESC)
+#define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
+ ((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \
+ || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \
+ || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \
+ || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC \
+ || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC \
+ || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC \
+ || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G1 \
+ || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \
+ || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
+ || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \
+ || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \
+ || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC)
#define ELIMINATE_COPY_RELOCS 0
/* Return size of a relocation entry. HTAB is the bfd's
- elf64_aarch64_link_hash_entry. */
-#define RELOC_SIZE(HTAB) (sizeof (Elf64_External_Rela))
+ elf_aarch64_link_hash_entry. */
+#define RELOC_SIZE(HTAB) (sizeof (ElfNN_External_Rela))
-/* GOT Entry size - 8 bytes. */
-#define GOT_ENTRY_SIZE (8)
+/* GOT Entry size - 8 bytes in ELF64 and 4 bytes in ELF32. */
+#define GOT_ENTRY_SIZE (ARCH_SIZE / 8)
#define PLT_ENTRY_SIZE (32)
#define PLT_SMALL_ENTRY_SIZE (16)
#define PLT_TLSDESC_ENTRY_SIZE (32)
-/* Take the PAGE component of an address or offset. */
-#define PG(x) ((x) & ~ 0xfff)
-#define PG_OFFSET(x) ((x) & 0xfff)
-
/* Encoding of the nop instruction */
#define INSN_NOP 0xd503201f
if the distance between the PLTGOT and the PLT is < 4GB use
these PLT entries. Note that the dynamic linker gets &PLTGOT[2]
in x16 and needs to work out PLTGOT[1] by using an address of
- [x16,#-8]. */
-static const bfd_byte elf64_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] =
+ [x16,#-GOT_ENTRY_SIZE]. */
+static const bfd_byte elfNN_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] =
{
0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
+#if ARCH_SIZE == 64
0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
+#else
+ 0x11, 0x0A, 0x40, 0xb9, /* ldr w17, [x16, #PLT_GOT+0x8] */
+ 0x10, 0x22, 0x00, 0x11, /* add w16, w16,#PLT_GOT+0x8 */
+#endif
0x20, 0x02, 0x1f, 0xd6, /* br x17 */
0x1f, 0x20, 0x03, 0xd5, /* nop */
0x1f, 0x20, 0x03, 0xd5, /* nop */
/* Per function entry in a procedure linkage table looks like this
if the distance between the PLTGOT and the PLT is < 4GB use
these PLT entries. */
-static const bfd_byte elf64_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
+static const bfd_byte elfNN_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
{
0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
+#if ARCH_SIZE == 64
0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
+#else
+ 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
+ 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
+#endif
0x20, 0x02, 0x1f, 0xd6, /* br x17. */
};
static const bfd_byte
-elf64_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] =
+elfNN_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] =
{
0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
- 0x42, 0x08, 0x40, 0xF9, /* ldr x2, [x2, #0] */
+#if ARCH_SIZE == 64
+ 0x42, 0x00, 0x40, 0xf9, /* ldr x2, [x2, #0] */
0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
- 0x40, 0x00, 0x1F, 0xD6, /* br x2 */
+#else
+ 0x42, 0x00, 0x40, 0xb9, /* ldr w2, [x2, #0] */
+ 0x63, 0x00, 0x00, 0x11, /* add w3, w3, 0 */
+#endif
+ 0x40, 0x00, 0x1f, 0xd6, /* br x2 */
0x1f, 0x20, 0x03, 0xd5, /* nop */
0x1f, 0x20, 0x03, 0xd5, /* nop */
};
-#define elf_info_to_howto elf64_aarch64_info_to_howto
-#define elf_info_to_howto_rel elf64_aarch64_info_to_howto
+#define elf_info_to_howto elfNN_aarch64_info_to_howto
+#define elf_info_to_howto_rel elfNN_aarch64_info_to_howto
#define AARCH64_ELF_ABI_VERSION 0
/* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
#define ALL_ONES (~ (bfd_vma) 0)
-static reloc_howto_type elf64_aarch64_howto_none =
- HOWTO (R_AARCH64_NONE, /* type */
- 0, /* rightshift */
- 0, /* size (0 = byte, 1 = short, 2 = long) */
- 0, /* bitsize */
- FALSE, /* pc_relative */
- 0, /* bitpos */
- complain_overflow_dont,/* complain_on_overflow */
- bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_NONE", /* name */
- FALSE, /* partial_inplace */
- 0, /* src_mask */
- 0, /* dst_mask */
- FALSE); /* pcrel_offset */
+/* Indexed by the bfd interal reloc enumerators.
+ Therefore, the table needs to be synced with BFD_RELOC_AARCH64_*
+ in reloc.c. */
-static reloc_howto_type elf64_aarch64_howto_dynrelocs[] =
+static reloc_howto_type elfNN_aarch64_howto_table[] =
{
- HOWTO (R_AARCH64_COPY, /* type */
- 0, /* rightshift */
- 2, /* size (0 = byte, 1 = short, 2 = long) */
- 64, /* bitsize */
- FALSE, /* pc_relative */
- 0, /* bitpos */
- complain_overflow_bitfield, /* complain_on_overflow */
- bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_COPY", /* name */
- TRUE, /* partial_inplace */
- 0xffffffff, /* src_mask */
- 0xffffffff, /* dst_mask */
- FALSE), /* pcrel_offset */
-
- HOWTO (R_AARCH64_GLOB_DAT, /* type */
- 0, /* rightshift */
- 2, /* size (0 = byte, 1 = short, 2 = long) */
- 64, /* bitsize */
- FALSE, /* pc_relative */
- 0, /* bitpos */
- complain_overflow_bitfield, /* complain_on_overflow */
- bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_GLOB_DAT", /* name */
- TRUE, /* partial_inplace */
- 0xffffffff, /* src_mask */
- 0xffffffff, /* dst_mask */
- FALSE), /* pcrel_offset */
-
- HOWTO (R_AARCH64_JUMP_SLOT, /* type */
- 0, /* rightshift */
- 2, /* size (0 = byte, 1 = short, 2 = long) */
- 64, /* bitsize */
- FALSE, /* pc_relative */
- 0, /* bitpos */
- complain_overflow_bitfield, /* complain_on_overflow */
- bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_JUMP_SLOT", /* name */
- TRUE, /* partial_inplace */
- 0xffffffff, /* src_mask */
- 0xffffffff, /* dst_mask */
- FALSE), /* pcrel_offset */
-
- HOWTO (R_AARCH64_RELATIVE, /* type */
- 0, /* rightshift */
- 2, /* size (0 = byte, 1 = short, 2 = long) */
- 64, /* bitsize */
- FALSE, /* pc_relative */
- 0, /* bitpos */
- complain_overflow_bitfield, /* complain_on_overflow */
- bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_RELATIVE", /* name */
- TRUE, /* partial_inplace */
- ALL_ONES, /* src_mask */
- ALL_ONES, /* dst_mask */
- FALSE), /* pcrel_offset */
-
- HOWTO (R_AARCH64_TLS_DTPMOD64, /* type */
- 0, /* rightshift */
- 2, /* size (0 = byte, 1 = short, 2 = long) */
- 64, /* bitsize */
- FALSE, /* pc_relative */
- 0, /* bitpos */
- complain_overflow_dont, /* complain_on_overflow */
- bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_TLS_DTPMOD64", /* name */
- FALSE, /* partial_inplace */
- 0, /* src_mask */
- ALL_ONES, /* dst_mask */
- FALSE), /* pc_reloffset */
-
- HOWTO (R_AARCH64_TLS_DTPREL64, /* type */
- 0, /* rightshift */
- 2, /* size (0 = byte, 1 = short, 2 = long) */
- 64, /* bitsize */
- FALSE, /* pc_relative */
- 0, /* bitpos */
- complain_overflow_dont, /* complain_on_overflow */
- bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_TLS_DTPREL64", /* name */
- FALSE, /* partial_inplace */
- 0, /* src_mask */
- ALL_ONES, /* dst_mask */
- FALSE), /* pcrel_offset */
+ EMPTY_HOWTO (0),
- HOWTO (R_AARCH64_TLS_TPREL64, /* type */
- 0, /* rightshift */
- 2, /* size (0 = byte, 1 = short, 2 = long) */
- 64, /* bitsize */
- FALSE, /* pc_relative */
- 0, /* bitpos */
- complain_overflow_dont, /* complain_on_overflow */
- bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_TLS_TPREL64", /* name */
- FALSE, /* partial_inplace */
- 0, /* src_mask */
- ALL_ONES, /* dst_mask */
- FALSE), /* pcrel_offset */
+ /* Basic data relocations. */
- HOWTO (R_AARCH64_TLSDESC, /* type */
+#if ARCH_SIZE == 64
+ HOWTO (R_AARCH64_NULL, /* type */
0, /* rightshift */
- 2, /* size (0 = byte, 1 = short, 2 = long) */
- 64, /* bitsize */
+ 0, /* size (0 = byte, 1 = short, 2 = long) */
+ 0, /* bitsize */
FALSE, /* pc_relative */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_TLSDESC", /* name */
+ "R_AARCH64_NULL", /* name */
FALSE, /* partial_inplace */
0, /* src_mask */
- ALL_ONES, /* dst_mask */
+ 0, /* dst_mask */
FALSE), /* pcrel_offset */
-
-};
-
-/* Note: code such as elf64_aarch64_reloc_type_lookup expect to use e.g.
- R_AARCH64_PREL64 as an index into this, and find the R_AARCH64_PREL64 HOWTO
- in that slot. */
-
-static reloc_howto_type elf64_aarch64_howto_table[] =
-{
- /* Basic data relocations. */
-
- HOWTO (R_AARCH64_NULL, /* type */
+#else
+ HOWTO (R_AARCH64_NONE, /* type */
0, /* rightshift */
0, /* size (0 = byte, 1 = short, 2 = long) */
0, /* bitsize */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_NULL", /* name */
+ "R_AARCH64_NONE", /* name */
FALSE, /* partial_inplace */
0, /* src_mask */
0, /* dst_mask */
FALSE), /* pcrel_offset */
+#endif
/* .xword: (S+A) */
- HOWTO (R_AARCH64_ABS64, /* type */
+ HOWTO64 (AARCH64_R (ABS64), /* type */
0, /* rightshift */
4, /* size (4 = long long) */
64, /* bitsize */
0, /* bitpos */
complain_overflow_unsigned, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_ABS64", /* name */
+ AARCH64_R_STR (ABS64), /* name */
FALSE, /* partial_inplace */
ALL_ONES, /* src_mask */
ALL_ONES, /* dst_mask */
FALSE), /* pcrel_offset */
/* .word: (S+A) */
- HOWTO (R_AARCH64_ABS32, /* type */
+ HOWTO (AARCH64_R (ABS32), /* type */
0, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
32, /* bitsize */
0, /* bitpos */
complain_overflow_unsigned, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_ABS32", /* name */
+ AARCH64_R_STR (ABS32), /* name */
FALSE, /* partial_inplace */
0xffffffff, /* src_mask */
0xffffffff, /* dst_mask */
FALSE), /* pcrel_offset */
/* .half: (S+A) */
- HOWTO (R_AARCH64_ABS16, /* type */
+ HOWTO (AARCH64_R (ABS16), /* type */
0, /* rightshift */
1, /* size (0 = byte, 1 = short, 2 = long) */
16, /* bitsize */
0, /* bitpos */
complain_overflow_unsigned, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_ABS16", /* name */
+ AARCH64_R_STR (ABS16), /* name */
FALSE, /* partial_inplace */
0xffff, /* src_mask */
0xffff, /* dst_mask */
FALSE), /* pcrel_offset */
/* .xword: (S+A-P) */
- HOWTO (R_AARCH64_PREL64, /* type */
+ HOWTO64 (AARCH64_R (PREL64), /* type */
0, /* rightshift */
4, /* size (4 = long long) */
64, /* bitsize */
0, /* bitpos */
complain_overflow_signed, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_PREL64", /* name */
+ AARCH64_R_STR (PREL64), /* name */
FALSE, /* partial_inplace */
ALL_ONES, /* src_mask */
ALL_ONES, /* dst_mask */
TRUE), /* pcrel_offset */
/* .word: (S+A-P) */
- HOWTO (R_AARCH64_PREL32, /* type */
+ HOWTO (AARCH64_R (PREL32), /* type */
0, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
32, /* bitsize */
0, /* bitpos */
complain_overflow_signed, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_PREL32", /* name */
+ AARCH64_R_STR (PREL32), /* name */
FALSE, /* partial_inplace */
0xffffffff, /* src_mask */
0xffffffff, /* dst_mask */
TRUE), /* pcrel_offset */
/* .half: (S+A-P) */
- HOWTO (R_AARCH64_PREL16, /* type */
+ HOWTO (AARCH64_R (PREL16), /* type */
0, /* rightshift */
1, /* size (0 = byte, 1 = short, 2 = long) */
16, /* bitsize */
0, /* bitpos */
complain_overflow_signed, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_PREL16", /* name */
+ AARCH64_R_STR (PREL16), /* name */
FALSE, /* partial_inplace */
0xffff, /* src_mask */
0xffff, /* dst_mask */
unsigned data or abs address inline. */
/* MOVZ: ((S+A) >> 0) & 0xffff */
- HOWTO (R_AARCH64_MOVW_UABS_G0, /* type */
+ HOWTO (AARCH64_R (MOVW_UABS_G0), /* type */
0, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
16, /* bitsize */
0, /* bitpos */
complain_overflow_unsigned, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_MOVW_UABS_G0", /* name */
+ AARCH64_R_STR (MOVW_UABS_G0), /* name */
FALSE, /* partial_inplace */
0xffff, /* src_mask */
0xffff, /* dst_mask */
FALSE), /* pcrel_offset */
/* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */
- HOWTO (R_AARCH64_MOVW_UABS_G0_NC, /* type */
+ HOWTO (AARCH64_R (MOVW_UABS_G0_NC), /* type */
0, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
16, /* bitsize */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_MOVW_UABS_G0_NC", /* name */
+ AARCH64_R_STR (MOVW_UABS_G0_NC), /* name */
FALSE, /* partial_inplace */
0xffff, /* src_mask */
0xffff, /* dst_mask */
FALSE), /* pcrel_offset */
/* MOVZ: ((S+A) >> 16) & 0xffff */
- HOWTO (R_AARCH64_MOVW_UABS_G1, /* type */
+ HOWTO (AARCH64_R (MOVW_UABS_G1), /* type */
16, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
16, /* bitsize */
0, /* bitpos */
complain_overflow_unsigned, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_MOVW_UABS_G1", /* name */
+ AARCH64_R_STR (MOVW_UABS_G1), /* name */
FALSE, /* partial_inplace */
0xffff, /* src_mask */
0xffff, /* dst_mask */
FALSE), /* pcrel_offset */
/* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */
- HOWTO (R_AARCH64_MOVW_UABS_G1_NC, /* type */
+ HOWTO64 (AARCH64_R (MOVW_UABS_G1_NC), /* type */
16, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
16, /* bitsize */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_MOVW_UABS_G1_NC", /* name */
+ AARCH64_R_STR (MOVW_UABS_G1_NC), /* name */
FALSE, /* partial_inplace */
0xffff, /* src_mask */
0xffff, /* dst_mask */
FALSE), /* pcrel_offset */
/* MOVZ: ((S+A) >> 32) & 0xffff */
- HOWTO (R_AARCH64_MOVW_UABS_G2, /* type */
+ HOWTO64 (AARCH64_R (MOVW_UABS_G2), /* type */
32, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
16, /* bitsize */
0, /* bitpos */
complain_overflow_unsigned, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_MOVW_UABS_G2", /* name */
+ AARCH64_R_STR (MOVW_UABS_G2), /* name */
FALSE, /* partial_inplace */
0xffff, /* src_mask */
0xffff, /* dst_mask */
FALSE), /* pcrel_offset */
/* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */
- HOWTO (R_AARCH64_MOVW_UABS_G2_NC, /* type */
+ HOWTO64 (AARCH64_R (MOVW_UABS_G2_NC), /* type */
32, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
16, /* bitsize */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_MOVW_UABS_G2_NC", /* name */
+ AARCH64_R_STR (MOVW_UABS_G2_NC), /* name */
FALSE, /* partial_inplace */
0xffff, /* src_mask */
0xffff, /* dst_mask */
FALSE), /* pcrel_offset */
/* MOVZ: ((S+A) >> 48) & 0xffff */
- HOWTO (R_AARCH64_MOVW_UABS_G3, /* type */
+ HOWTO64 (AARCH64_R (MOVW_UABS_G3), /* type */
48, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
16, /* bitsize */
0, /* bitpos */
complain_overflow_unsigned, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_MOVW_UABS_G3", /* name */
+ AARCH64_R_STR (MOVW_UABS_G3), /* name */
FALSE, /* partial_inplace */
0xffff, /* src_mask */
0xffff, /* dst_mask */
to MOVN or MOVZ depending on sign of calculated value. */
/* MOV[ZN]: ((S+A) >> 0) & 0xffff */
- HOWTO (R_AARCH64_MOVW_SABS_G0, /* type */
+ HOWTO (AARCH64_R (MOVW_SABS_G0), /* type */
0, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
16, /* bitsize */
0, /* bitpos */
complain_overflow_signed, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_MOVW_SABS_G0", /* name */
+ AARCH64_R_STR (MOVW_SABS_G0), /* name */
FALSE, /* partial_inplace */
0xffff, /* src_mask */
0xffff, /* dst_mask */
FALSE), /* pcrel_offset */
/* MOV[ZN]: ((S+A) >> 16) & 0xffff */
- HOWTO (R_AARCH64_MOVW_SABS_G1, /* type */
+ HOWTO64 (AARCH64_R (MOVW_SABS_G1), /* type */
16, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
16, /* bitsize */
0, /* bitpos */
complain_overflow_signed, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_MOVW_SABS_G1", /* name */
+ AARCH64_R_STR (MOVW_SABS_G1), /* name */
FALSE, /* partial_inplace */
0xffff, /* src_mask */
0xffff, /* dst_mask */
FALSE), /* pcrel_offset */
/* MOV[ZN]: ((S+A) >> 32) & 0xffff */
- HOWTO (R_AARCH64_MOVW_SABS_G2, /* type */
+ HOWTO64 (AARCH64_R (MOVW_SABS_G2), /* type */
32, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
16, /* bitsize */
0, /* bitpos */
complain_overflow_signed, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_MOVW_SABS_G2", /* name */
+ AARCH64_R_STR (MOVW_SABS_G2), /* name */
FALSE, /* partial_inplace */
0xffff, /* src_mask */
0xffff, /* dst_mask */
addresses: PG(x) is (x & ~0xfff). */
/* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
- HOWTO (R_AARCH64_LD_PREL_LO19, /* type */
+ HOWTO (AARCH64_R (LD_PREL_LO19), /* type */
2, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
19, /* bitsize */
0, /* bitpos */
complain_overflow_signed, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_LD_PREL_LO19", /* name */
+ AARCH64_R_STR (LD_PREL_LO19), /* name */
FALSE, /* partial_inplace */
0x7ffff, /* src_mask */
0x7ffff, /* dst_mask */
TRUE), /* pcrel_offset */
/* ADR: (S+A-P) & 0x1fffff */
- HOWTO (R_AARCH64_ADR_PREL_LO21, /* type */
+ HOWTO (AARCH64_R (ADR_PREL_LO21), /* type */
0, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
21, /* bitsize */
0, /* bitpos */
complain_overflow_signed, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_ADR_PREL_LO21", /* name */
+ AARCH64_R_STR (ADR_PREL_LO21), /* name */
FALSE, /* partial_inplace */
0x1fffff, /* src_mask */
0x1fffff, /* dst_mask */
TRUE), /* pcrel_offset */
/* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
- HOWTO (R_AARCH64_ADR_PREL_PG_HI21, /* type */
+ HOWTO (AARCH64_R (ADR_PREL_PG_HI21), /* type */
12, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
21, /* bitsize */
0, /* bitpos */
complain_overflow_signed, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_ADR_PREL_PG_HI21", /* name */
+ AARCH64_R_STR (ADR_PREL_PG_HI21), /* name */
FALSE, /* partial_inplace */
0x1fffff, /* src_mask */
0x1fffff, /* dst_mask */
TRUE), /* pcrel_offset */
/* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */
- HOWTO (R_AARCH64_ADR_PREL_PG_HI21_NC, /* type */
+ HOWTO64 (AARCH64_R (ADR_PREL_PG_HI21_NC), /* type */
12, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
21, /* bitsize */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_ADR_PREL_PG_HI21_NC", /* name */
+ AARCH64_R_STR (ADR_PREL_PG_HI21_NC), /* name */
FALSE, /* partial_inplace */
0x1fffff, /* src_mask */
0x1fffff, /* dst_mask */
TRUE), /* pcrel_offset */
/* ADD: (S+A) & 0xfff [no overflow check] */
- HOWTO (R_AARCH64_ADD_ABS_LO12_NC, /* type */
+ HOWTO (AARCH64_R (ADD_ABS_LO12_NC), /* type */
0, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
12, /* bitsize */
10, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_ADD_ABS_LO12_NC", /* name */
+ AARCH64_R_STR (ADD_ABS_LO12_NC), /* name */
FALSE, /* partial_inplace */
0x3ffc00, /* src_mask */
0x3ffc00, /* dst_mask */
FALSE), /* pcrel_offset */
/* LD/ST8: (S+A) & 0xfff */
- HOWTO (R_AARCH64_LDST8_ABS_LO12_NC, /* type */
+ HOWTO (AARCH64_R (LDST8_ABS_LO12_NC), /* type */
0, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
12, /* bitsize */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_LDST8_ABS_LO12_NC", /* name */
+ AARCH64_R_STR (LDST8_ABS_LO12_NC), /* name */
FALSE, /* partial_inplace */
0xfff, /* src_mask */
0xfff, /* dst_mask */
/* Relocations for control-flow instructions. */
/* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
- HOWTO (R_AARCH64_TSTBR14, /* type */
+ HOWTO (AARCH64_R (TSTBR14), /* type */
2, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
14, /* bitsize */
0, /* bitpos */
complain_overflow_signed, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_TSTBR14", /* name */
+ AARCH64_R_STR (TSTBR14), /* name */
FALSE, /* partial_inplace */
0x3fff, /* src_mask */
0x3fff, /* dst_mask */
TRUE), /* pcrel_offset */
/* B.cond: ((S+A-P) >> 2) & 0x7ffff */
- HOWTO (R_AARCH64_CONDBR19, /* type */
+ HOWTO (AARCH64_R (CONDBR19), /* type */
2, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
19, /* bitsize */
0, /* bitpos */
complain_overflow_signed, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_CONDBR19", /* name */
+ AARCH64_R_STR (CONDBR19), /* name */
FALSE, /* partial_inplace */
0x7ffff, /* src_mask */
0x7ffff, /* dst_mask */
TRUE), /* pcrel_offset */
- EMPTY_HOWTO (281),
-
/* B: ((S+A-P) >> 2) & 0x3ffffff */
- HOWTO (R_AARCH64_JUMP26, /* type */
+ HOWTO (AARCH64_R (JUMP26), /* type */
2, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
26, /* bitsize */
0, /* bitpos */
complain_overflow_signed, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_JUMP26", /* name */
+ AARCH64_R_STR (JUMP26), /* name */
FALSE, /* partial_inplace */
0x3ffffff, /* src_mask */
0x3ffffff, /* dst_mask */
TRUE), /* pcrel_offset */
/* BL: ((S+A-P) >> 2) & 0x3ffffff */
- HOWTO (R_AARCH64_CALL26, /* type */
+ HOWTO (AARCH64_R (CALL26), /* type */
2, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
26, /* bitsize */
0, /* bitpos */
complain_overflow_signed, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_CALL26", /* name */
+ AARCH64_R_STR (CALL26), /* name */
FALSE, /* partial_inplace */
0x3ffffff, /* src_mask */
0x3ffffff, /* dst_mask */
TRUE), /* pcrel_offset */
/* LD/ST16: (S+A) & 0xffe */
- HOWTO (R_AARCH64_LDST16_ABS_LO12_NC, /* type */
+ HOWTO (AARCH64_R (LDST16_ABS_LO12_NC), /* type */
1, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
12, /* bitsize */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_LDST16_ABS_LO12_NC", /* name */
+ AARCH64_R_STR (LDST16_ABS_LO12_NC), /* name */
FALSE, /* partial_inplace */
0xffe, /* src_mask */
0xffe, /* dst_mask */
FALSE), /* pcrel_offset */
/* LD/ST32: (S+A) & 0xffc */
- HOWTO (R_AARCH64_LDST32_ABS_LO12_NC, /* type */
+ HOWTO (AARCH64_R (LDST32_ABS_LO12_NC), /* type */
2, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
12, /* bitsize */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_LDST32_ABS_LO12_NC", /* name */
+ AARCH64_R_STR (LDST32_ABS_LO12_NC), /* name */
FALSE, /* partial_inplace */
0xffc, /* src_mask */
0xffc, /* dst_mask */
FALSE), /* pcrel_offset */
/* LD/ST64: (S+A) & 0xff8 */
- HOWTO (R_AARCH64_LDST64_ABS_LO12_NC, /* type */
+ HOWTO (AARCH64_R (LDST64_ABS_LO12_NC), /* type */
3, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
12, /* bitsize */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_LDST64_ABS_LO12_NC", /* name */
+ AARCH64_R_STR (LDST64_ABS_LO12_NC), /* name */
FALSE, /* partial_inplace */
0xff8, /* src_mask */
0xff8, /* dst_mask */
FALSE), /* pcrel_offset */
- EMPTY_HOWTO (287),
- EMPTY_HOWTO (288),
- EMPTY_HOWTO (289),
- EMPTY_HOWTO (290),
- EMPTY_HOWTO (291),
- EMPTY_HOWTO (292),
- EMPTY_HOWTO (293),
- EMPTY_HOWTO (294),
- EMPTY_HOWTO (295),
- EMPTY_HOWTO (296),
- EMPTY_HOWTO (297),
- EMPTY_HOWTO (298),
-
/* LD/ST128: (S+A) & 0xff0 */
- HOWTO (R_AARCH64_LDST128_ABS_LO12_NC, /* type */
+ HOWTO (AARCH64_R (LDST128_ABS_LO12_NC), /* type */
4, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
12, /* bitsize */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_LDST128_ABS_LO12_NC", /* name */
+ AARCH64_R_STR (LDST128_ABS_LO12_NC), /* name */
FALSE, /* partial_inplace */
0xff0, /* src_mask */
0xff0, /* dst_mask */
FALSE), /* pcrel_offset */
- EMPTY_HOWTO (300),
- EMPTY_HOWTO (301),
- EMPTY_HOWTO (302),
- EMPTY_HOWTO (303),
- EMPTY_HOWTO (304),
- EMPTY_HOWTO (305),
- EMPTY_HOWTO (306),
- EMPTY_HOWTO (307),
- EMPTY_HOWTO (308),
-
/* Set a load-literal immediate field to bits
0x1FFFFC of G(S)-P */
- HOWTO (R_AARCH64_GOT_LD_PREL19, /* type */
+ HOWTO (AARCH64_R (GOT_LD_PREL19), /* type */
2, /* rightshift */
2, /* size (0 = byte,1 = short,2 = long) */
19, /* bitsize */
0, /* bitpos */
complain_overflow_signed, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_GOT_LD_PREL19", /* name */
+ AARCH64_R_STR (GOT_LD_PREL19), /* name */
FALSE, /* partial_inplace */
0xffffe0, /* src_mask */
0xffffe0, /* dst_mask */
TRUE), /* pcrel_offset */
- EMPTY_HOWTO (310),
-
/* Get to the page for the GOT entry for the symbol
(G(S) - P) using an ADRP instruction. */
- HOWTO (R_AARCH64_ADR_GOT_PAGE, /* type */
+ HOWTO (AARCH64_R (ADR_GOT_PAGE), /* type */
12, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
21, /* bitsize */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_ADR_GOT_PAGE", /* name */
+ AARCH64_R_STR (ADR_GOT_PAGE), /* name */
FALSE, /* partial_inplace */
0x1fffff, /* src_mask */
0x1fffff, /* dst_mask */
TRUE), /* pcrel_offset */
- /* LD64: GOT offset G(S) & 0xff8 */
- HOWTO (R_AARCH64_LD64_GOT_LO12_NC, /* type */
+ /* LD64: GOT offset G(S) & 0xff8 */
+ HOWTO64 (AARCH64_R (LD64_GOT_LO12_NC), /* type */
3, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
12, /* bitsize */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_LD64_GOT_LO12_NC", /* name */
+ AARCH64_R_STR (LD64_GOT_LO12_NC), /* name */
FALSE, /* partial_inplace */
0xff8, /* src_mask */
0xff8, /* dst_mask */
- FALSE) /* pcrel_offset */
-};
+ FALSE), /* pcrel_offset */
-static reloc_howto_type elf64_aarch64_tls_howto_table[] =
-{
- EMPTY_HOWTO (512),
+ /* LD32: GOT offset G(S) & 0xffc */
+ HOWTO32 (AARCH64_R (LD32_GOT_LO12_NC), /* type */
+ 2, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 12, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ AARCH64_R_STR (LD32_GOT_LO12_NC), /* name */
+ FALSE, /* partial_inplace */
+ 0xffc, /* src_mask */
+ 0xffc, /* dst_mask */
+ FALSE), /* pcrel_offset */
/* Get to the page for the GOT entry for the symbol
(G(S) - P) using an ADRP instruction. */
- HOWTO (R_AARCH64_TLSGD_ADR_PAGE21, /* type */
+ HOWTO (AARCH64_R (TLSGD_ADR_PAGE21), /* type */
12, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
21, /* bitsize */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_TLSGD_ADR_PAGE21", /* name */
+ AARCH64_R_STR (TLSGD_ADR_PAGE21), /* name */
FALSE, /* partial_inplace */
0x1fffff, /* src_mask */
0x1fffff, /* dst_mask */
TRUE), /* pcrel_offset */
/* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
- HOWTO (R_AARCH64_TLSGD_ADD_LO12_NC, /* type */
+ HOWTO (AARCH64_R (TLSGD_ADD_LO12_NC), /* type */
0, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
12, /* bitsize */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_TLSGD_ADD_LO12_NC", /* name */
+ AARCH64_R_STR (TLSGD_ADD_LO12_NC), /* name */
FALSE, /* partial_inplace */
0xfff, /* src_mask */
0xfff, /* dst_mask */
FALSE), /* pcrel_offset */
- EMPTY_HOWTO (515),
- EMPTY_HOWTO (516),
- EMPTY_HOWTO (517),
- EMPTY_HOWTO (518),
- EMPTY_HOWTO (519),
- EMPTY_HOWTO (520),
- EMPTY_HOWTO (521),
- EMPTY_HOWTO (522),
- EMPTY_HOWTO (523),
- EMPTY_HOWTO (524),
- EMPTY_HOWTO (525),
- EMPTY_HOWTO (526),
- EMPTY_HOWTO (527),
- EMPTY_HOWTO (528),
- EMPTY_HOWTO (529),
- EMPTY_HOWTO (530),
- EMPTY_HOWTO (531),
- EMPTY_HOWTO (532),
- EMPTY_HOWTO (533),
- EMPTY_HOWTO (534),
- EMPTY_HOWTO (535),
- EMPTY_HOWTO (536),
- EMPTY_HOWTO (537),
- EMPTY_HOWTO (538),
-
- HOWTO (R_AARCH64_TLSIE_MOVW_GOTTPREL_G1, /* type */
+ HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G1), /* type */
16, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
16, /* bitsize */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_TLSIE_MOVW_GOTTPREL_G1", /* name */
+ AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G1), /* name */
FALSE, /* partial_inplace */
0xffff, /* src_mask */
0xffff, /* dst_mask */
FALSE), /* pcrel_offset */
- HOWTO (R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC, /* type */
+ HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G0_NC), /* type */
0, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
32, /* bitsize */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC", /* name */
+ AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G0_NC), /* name */
FALSE, /* partial_inplace */
0xffff, /* src_mask */
0xffff, /* dst_mask */
FALSE), /* pcrel_offset */
- HOWTO (R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21, /* type */
+ HOWTO (AARCH64_R (TLSIE_ADR_GOTTPREL_PAGE21), /* type */
12, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
21, /* bitsize */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21", /* name */
+ AARCH64_R_STR (TLSIE_ADR_GOTTPREL_PAGE21), /* name */
FALSE, /* partial_inplace */
0x1fffff, /* src_mask */
0x1fffff, /* dst_mask */
FALSE), /* pcrel_offset */
- HOWTO (R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC, /* type */
+ HOWTO64 (AARCH64_R (TLSIE_LD64_GOTTPREL_LO12_NC), /* type */
3, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
12, /* bitsize */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC", /* name */
+ AARCH64_R_STR (TLSIE_LD64_GOTTPREL_LO12_NC), /* name */
FALSE, /* partial_inplace */
0xff8, /* src_mask */
0xff8, /* dst_mask */
FALSE), /* pcrel_offset */
- HOWTO (R_AARCH64_TLSIE_LD_GOTTPREL_PREL19, /* type */
+ HOWTO32 (AARCH64_R (TLSIE_LD32_GOTTPREL_LO12_NC), /* type */
+ 2, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 12, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ AARCH64_R_STR (TLSIE_LD32_GOTTPREL_LO12_NC), /* name */
+ FALSE, /* partial_inplace */
+ 0xffc, /* src_mask */
+ 0xffc, /* dst_mask */
+ FALSE), /* pcrel_offset */
+
+ HOWTO (AARCH64_R (TLSIE_LD_GOTTPREL_PREL19), /* type */
2, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
21, /* bitsize */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_TLSIE_LD_GOTTPREL_PREL19", /* name */
+ AARCH64_R_STR (TLSIE_LD_GOTTPREL_PREL19), /* name */
FALSE, /* partial_inplace */
0x1ffffc, /* src_mask */
0x1ffffc, /* dst_mask */
FALSE), /* pcrel_offset */
- HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G2, /* type */
+ HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G2), /* type */
32, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
12, /* bitsize */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_TLSLE_MOVW_TPREL_G2", /* name */
+ AARCH64_R_STR (TLSLE_MOVW_TPREL_G2), /* name */
FALSE, /* partial_inplace */
0xffff, /* src_mask */
0xffff, /* dst_mask */
FALSE), /* pcrel_offset */
- HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G1, /* type */
+ HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G1), /* type */
16, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
12, /* bitsize */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_TLSLE_MOVW_TPREL_G1", /* name */
+ AARCH64_R_STR (TLSLE_MOVW_TPREL_G1), /* name */
FALSE, /* partial_inplace */
0xffff, /* src_mask */
0xffff, /* dst_mask */
FALSE), /* pcrel_offset */
- HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G1_NC, /* type */
+ HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G1_NC), /* type */
16, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
12, /* bitsize */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_TLSLE_MOVW_TPREL_G1_NC", /* name */
+ AARCH64_R_STR (TLSLE_MOVW_TPREL_G1_NC), /* name */
FALSE, /* partial_inplace */
0xffff, /* src_mask */
0xffff, /* dst_mask */
FALSE), /* pcrel_offset */
- HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G0, /* type */
+ HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0), /* type */
0, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
12, /* bitsize */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_TLSLE_MOVW_TPREL_G0", /* name */
+ AARCH64_R_STR (TLSLE_MOVW_TPREL_G0), /* name */
FALSE, /* partial_inplace */
0xffff, /* src_mask */
0xffff, /* dst_mask */
FALSE), /* pcrel_offset */
- HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G0_NC, /* type */
+ HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0_NC), /* type */
0, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
12, /* bitsize */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_TLSLE_MOVW_TPREL_G0_NC", /* name */
+ AARCH64_R_STR (TLSLE_MOVW_TPREL_G0_NC), /* name */
FALSE, /* partial_inplace */
0xffff, /* src_mask */
0xffff, /* dst_mask */
FALSE), /* pcrel_offset */
- HOWTO (R_AARCH64_TLSLE_ADD_TPREL_HI12, /* type */
+ HOWTO (AARCH64_R (TLSLE_ADD_TPREL_HI12), /* type */
12, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
12, /* bitsize */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_TLSLE_ADD_TPREL_HI12", /* name */
+ AARCH64_R_STR (TLSLE_ADD_TPREL_HI12), /* name */
FALSE, /* partial_inplace */
0xfff, /* src_mask */
0xfff, /* dst_mask */
FALSE), /* pcrel_offset */
- HOWTO (R_AARCH64_TLSLE_ADD_TPREL_LO12, /* type */
+ HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12), /* type */
0, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
12, /* bitsize */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_TLSLE_ADD_TPREL_LO12", /* name */
+ AARCH64_R_STR (TLSLE_ADD_TPREL_LO12), /* name */
FALSE, /* partial_inplace */
0xfff, /* src_mask */
0xfff, /* dst_mask */
FALSE), /* pcrel_offset */
- HOWTO (R_AARCH64_TLSLE_ADD_TPREL_LO12_NC, /* type */
+ HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12_NC), /* type */
0, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
12, /* bitsize */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_TLSLE_ADD_TPREL_LO12_NC", /* name */
+ AARCH64_R_STR (TLSLE_ADD_TPREL_LO12_NC), /* name */
FALSE, /* partial_inplace */
0xfff, /* src_mask */
0xfff, /* dst_mask */
FALSE), /* pcrel_offset */
-};
-static reloc_howto_type elf64_aarch64_tlsdesc_howto_table[] =
-{
- HOWTO (R_AARCH64_TLSDESC_LD_PREL19, /* type */
+ HOWTO (AARCH64_R (TLSDESC_LD_PREL19), /* type */
2, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
21, /* bitsize */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_TLSDESC_LD_PREL19", /* name */
+ AARCH64_R_STR (TLSDESC_LD_PREL19), /* name */
FALSE, /* partial_inplace */
0x1ffffc, /* src_mask */
0x1ffffc, /* dst_mask */
TRUE), /* pcrel_offset */
- HOWTO (R_AARCH64_TLSDESC_ADR_PREL21, /* type */
+ HOWTO (AARCH64_R (TLSDESC_ADR_PREL21), /* type */
0, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
21, /* bitsize */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_TLSDESC_ADR_PREL21", /* name */
+ AARCH64_R_STR (TLSDESC_ADR_PREL21), /* name */
FALSE, /* partial_inplace */
0x1fffff, /* src_mask */
0x1fffff, /* dst_mask */
/* Get to the page for the GOT entry for the symbol
(G(S) - P) using an ADRP instruction. */
- HOWTO (R_AARCH64_TLSDESC_ADR_PAGE21, /* type */
+ HOWTO (AARCH64_R (TLSDESC_ADR_PAGE21), /* type */
12, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
21, /* bitsize */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_TLSDESC_ADR_PAGE21", /* name */
+ AARCH64_R_STR (TLSDESC_ADR_PAGE21), /* name */
FALSE, /* partial_inplace */
0x1fffff, /* src_mask */
0x1fffff, /* dst_mask */
TRUE), /* pcrel_offset */
- /* LD64: GOT offset G(S) & 0xfff. */
- HOWTO (R_AARCH64_TLSDESC_LD64_LO12_NC, /* type */
+ /* LD64: GOT offset G(S) & 0xff8. */
+ HOWTO64 (AARCH64_R (TLSDESC_LD64_LO12_NC), /* type */
3, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
12, /* bitsize */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_TLSDESC_LD64_LO12_NC", /* name */
+ AARCH64_R_STR (TLSDESC_LD64_LO12_NC), /* name */
FALSE, /* partial_inplace */
- 0xfff, /* src_mask */
- 0xfff, /* dst_mask */
+ 0xff8, /* src_mask */
+ 0xff8, /* dst_mask */
+ FALSE), /* pcrel_offset */
+
+ /* LD32: GOT offset G(S) & 0xffc. */
+ HOWTO32 (AARCH64_R (TLSDESC_LD32_LO12_NC), /* type */
+ 2, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 12, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ AARCH64_R_STR (TLSDESC_LD32_LO12_NC), /* name */
+ FALSE, /* partial_inplace */
+ 0xffc, /* src_mask */
+ 0xffc, /* dst_mask */
FALSE), /* pcrel_offset */
/* ADD: GOT offset G(S) & 0xfff. */
- HOWTO (R_AARCH64_TLSDESC_ADD_LO12_NC, /* type */
+ HOWTO (AARCH64_R (TLSDESC_ADD_LO12_NC), /* type */
0, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
12, /* bitsize */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_TLSDESC_ADD_LO12_NC", /* name */
+ AARCH64_R_STR (TLSDESC_ADD_LO12_NC), /* name */
FALSE, /* partial_inplace */
0xfff, /* src_mask */
0xfff, /* dst_mask */
FALSE), /* pcrel_offset */
- HOWTO (R_AARCH64_TLSDESC_OFF_G1, /* type */
+ HOWTO64 (AARCH64_R (TLSDESC_OFF_G1), /* type */
16, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
12, /* bitsize */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_TLSDESC_OFF_G1", /* name */
+ AARCH64_R_STR (TLSDESC_OFF_G1), /* name */
FALSE, /* partial_inplace */
0xffff, /* src_mask */
0xffff, /* dst_mask */
FALSE), /* pcrel_offset */
- HOWTO (R_AARCH64_TLSDESC_OFF_G0_NC, /* type */
+ HOWTO64 (AARCH64_R (TLSDESC_OFF_G0_NC), /* type */
0, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
12, /* bitsize */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_TLSDESC_OFF_G0_NC", /* name */
+ AARCH64_R_STR (TLSDESC_OFF_G0_NC), /* name */
FALSE, /* partial_inplace */
0xffff, /* src_mask */
0xffff, /* dst_mask */
FALSE), /* pcrel_offset */
- HOWTO (R_AARCH64_TLSDESC_LDR, /* type */
+ HOWTO64 (AARCH64_R (TLSDESC_LDR), /* type */
0, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
12, /* bitsize */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_TLSDESC_LDR", /* name */
+ AARCH64_R_STR (TLSDESC_LDR), /* name */
FALSE, /* partial_inplace */
0x0, /* src_mask */
0x0, /* dst_mask */
FALSE), /* pcrel_offset */
- HOWTO (R_AARCH64_TLSDESC_ADD, /* type */
+ HOWTO64 (AARCH64_R (TLSDESC_ADD), /* type */
0, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
12, /* bitsize */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_TLSDESC_ADD", /* name */
+ AARCH64_R_STR (TLSDESC_ADD), /* name */
FALSE, /* partial_inplace */
0x0, /* src_mask */
0x0, /* dst_mask */
FALSE), /* pcrel_offset */
- HOWTO (R_AARCH64_TLSDESC_CALL, /* type */
+ HOWTO (AARCH64_R (TLSDESC_CALL), /* type */
0, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
12, /* bitsize */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
- "R_AARCH64_TLSDESC_CALL", /* name */
+ AARCH64_R_STR (TLSDESC_CALL), /* name */
FALSE, /* partial_inplace */
0x0, /* src_mask */
0x0, /* dst_mask */
FALSE), /* pcrel_offset */
+
+ HOWTO (AARCH64_R (COPY), /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 64, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ AARCH64_R_STR (COPY), /* name */
+ TRUE, /* partial_inplace */
+ 0xffffffff, /* src_mask */
+ 0xffffffff, /* dst_mask */
+ FALSE), /* pcrel_offset */
+
+ HOWTO (AARCH64_R (GLOB_DAT), /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 64, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ AARCH64_R_STR (GLOB_DAT), /* name */
+ TRUE, /* partial_inplace */
+ 0xffffffff, /* src_mask */
+ 0xffffffff, /* dst_mask */
+ FALSE), /* pcrel_offset */
+
+ HOWTO (AARCH64_R (JUMP_SLOT), /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 64, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ AARCH64_R_STR (JUMP_SLOT), /* name */
+ TRUE, /* partial_inplace */
+ 0xffffffff, /* src_mask */
+ 0xffffffff, /* dst_mask */
+ FALSE), /* pcrel_offset */
+
+ HOWTO (AARCH64_R (RELATIVE), /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 64, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ AARCH64_R_STR (RELATIVE), /* name */
+ TRUE, /* partial_inplace */
+ ALL_ONES, /* src_mask */
+ ALL_ONES, /* dst_mask */
+ FALSE), /* pcrel_offset */
+
+ HOWTO (AARCH64_R (TLS_DTPMOD), /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 64, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+#if ARCH_SIZE == 64
+ AARCH64_R_STR (TLS_DTPMOD64), /* name */
+#else
+ AARCH64_R_STR (TLS_DTPMOD), /* name */
+#endif
+ FALSE, /* partial_inplace */
+ 0, /* src_mask */
+ ALL_ONES, /* dst_mask */
+ FALSE), /* pc_reloffset */
+
+ HOWTO (AARCH64_R (TLS_DTPREL), /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 64, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+#if ARCH_SIZE == 64
+ AARCH64_R_STR (TLS_DTPREL64), /* name */
+#else
+ AARCH64_R_STR (TLS_DTPREL), /* name */
+#endif
+ FALSE, /* partial_inplace */
+ 0, /* src_mask */
+ ALL_ONES, /* dst_mask */
+ FALSE), /* pcrel_offset */
+
+ HOWTO (AARCH64_R (TLS_TPREL), /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 64, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+#if ARCH_SIZE == 64
+ AARCH64_R_STR (TLS_TPREL64), /* name */
+#else
+ AARCH64_R_STR (TLS_TPREL), /* name */
+#endif
+ FALSE, /* partial_inplace */
+ 0, /* src_mask */
+ ALL_ONES, /* dst_mask */
+ FALSE), /* pcrel_offset */
+
+ HOWTO (AARCH64_R (TLSDESC), /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 64, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ AARCH64_R_STR (TLSDESC), /* name */
+ FALSE, /* partial_inplace */
+ 0, /* src_mask */
+ ALL_ONES, /* dst_mask */
+ FALSE), /* pcrel_offset */
+
+ HOWTO (AARCH64_R (IRELATIVE), /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 64, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ AARCH64_R_STR (IRELATIVE), /* name */
+ FALSE, /* partial_inplace */
+ 0, /* src_mask */
+ ALL_ONES, /* dst_mask */
+ FALSE), /* pcrel_offset */
+
+ EMPTY_HOWTO (0),
};
-static reloc_howto_type *
-elf64_aarch64_howto_from_type (unsigned int r_type)
+static reloc_howto_type elfNN_aarch64_howto_none =
+ HOWTO (R_AARCH64_NONE, /* type */
+ 0, /* rightshift */
+ 0, /* size (0 = byte, 1 = short, 2 = long) */
+ 0, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont,/* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "R_AARCH64_NONE", /* name */
+ FALSE, /* partial_inplace */
+ 0, /* src_mask */
+ 0, /* dst_mask */
+ FALSE); /* pcrel_offset */
+
+/* Given HOWTO, return the bfd internal relocation enumerator. */
+
+static bfd_reloc_code_real_type
+elfNN_aarch64_bfd_reloc_from_howto (reloc_howto_type *howto)
{
- if (r_type >= R_AARCH64_static_min && r_type < R_AARCH64_static_max)
- return &elf64_aarch64_howto_table[r_type - R_AARCH64_static_min];
+ const int size
+ = (int) ARRAY_SIZE (elfNN_aarch64_howto_table);
+ const ptrdiff_t offset
+ = howto - elfNN_aarch64_howto_table;
- if (r_type >= R_AARCH64_tls_min && r_type < R_AARCH64_tls_max)
- return &elf64_aarch64_tls_howto_table[r_type - R_AARCH64_tls_min];
+ if (offset > 0 && offset < size - 1)
+ return BFD_RELOC_AARCH64_RELOC_START + offset;
- if (r_type >= R_AARCH64_tlsdesc_min && r_type < R_AARCH64_tlsdesc_max)
- return &elf64_aarch64_tlsdesc_howto_table[r_type - R_AARCH64_tlsdesc_min];
+ if (howto == &elfNN_aarch64_howto_none)
+ return BFD_RELOC_AARCH64_NONE;
- if (r_type >= R_AARCH64_dyn_min && r_type < R_AARCH64_dyn_max)
- return &elf64_aarch64_howto_dynrelocs[r_type - R_AARCH64_dyn_min];
+ return BFD_RELOC_AARCH64_RELOC_START;
+}
- switch (r_type)
+/* Given R_TYPE, return the bfd internal relocation enumerator. */
+
+static bfd_reloc_code_real_type
+elfNN_aarch64_bfd_reloc_from_type (unsigned int r_type)
+{
+ static bfd_boolean initialized_p = FALSE;
+ /* Indexed by R_TYPE, values are offsets in the howto_table. */
+ static unsigned int offsets[R_AARCH64_end];
+
+ if (initialized_p == FALSE)
{
- case R_AARCH64_NONE:
- return &elf64_aarch64_howto_none;
+ unsigned int i;
+ for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
+ if (elfNN_aarch64_howto_table[i].type != 0)
+ offsets[elfNN_aarch64_howto_table[i].type] = i;
+
+ initialized_p = TRUE;
}
- bfd_set_error (bfd_error_bad_value);
- return NULL;
-}
-static void
-elf64_aarch64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc,
- Elf_Internal_Rela *elf_reloc)
-{
- unsigned int r_type;
+ if (r_type == R_AARCH64_NONE || r_type == R_AARCH64_NULL)
+ return BFD_RELOC_AARCH64_NONE;
- r_type = ELF64_R_TYPE (elf_reloc->r_info);
- bfd_reloc->howto = elf64_aarch64_howto_from_type (r_type);
+ return BFD_RELOC_AARCH64_RELOC_START + offsets[r_type];
}
-struct elf64_aarch64_reloc_map
+struct elf_aarch64_reloc_map
{
- bfd_reloc_code_real_type bfd_reloc_val;
- unsigned int elf_reloc_val;
+ bfd_reloc_code_real_type from;
+ bfd_reloc_code_real_type to;
};
-/* All entries in this list must also be present in
- elf64_aarch64_howto_table. */
-static const struct elf64_aarch64_reloc_map elf64_aarch64_reloc_map[] =
+/* Map bfd generic reloc to AArch64-specific reloc. */
+static const struct elf_aarch64_reloc_map elf_aarch64_reloc_map[] =
{
- {BFD_RELOC_NONE, R_AARCH64_NONE},
+ {BFD_RELOC_NONE, BFD_RELOC_AARCH64_NONE},
/* Basic data relocations. */
- {BFD_RELOC_CTOR, R_AARCH64_ABS64},
- {BFD_RELOC_64, R_AARCH64_ABS64},
- {BFD_RELOC_32, R_AARCH64_ABS32},
- {BFD_RELOC_16, R_AARCH64_ABS16},
- {BFD_RELOC_64_PCREL, R_AARCH64_PREL64},
- {BFD_RELOC_32_PCREL, R_AARCH64_PREL32},
- {BFD_RELOC_16_PCREL, R_AARCH64_PREL16},
-
- /* Group relocations to low order bits of a 16, 32, 48 or 64 bit
- value inline. */
- {BFD_RELOC_AARCH64_MOVW_G0_NC, R_AARCH64_MOVW_UABS_G0_NC},
- {BFD_RELOC_AARCH64_MOVW_G1_NC, R_AARCH64_MOVW_UABS_G1_NC},
- {BFD_RELOC_AARCH64_MOVW_G2_NC, R_AARCH64_MOVW_UABS_G2_NC},
-
- /* Group relocations to create high bits of a 16, 32, 48 or 64 bit
- signed value inline. */
- {BFD_RELOC_AARCH64_MOVW_G0_S, R_AARCH64_MOVW_SABS_G0},
- {BFD_RELOC_AARCH64_MOVW_G1_S, R_AARCH64_MOVW_SABS_G1},
- {BFD_RELOC_AARCH64_MOVW_G2_S, R_AARCH64_MOVW_SABS_G2},
-
- /* Group relocations to create high bits of a 16, 32, 48 or 64 bit
- unsigned value inline. */
- {BFD_RELOC_AARCH64_MOVW_G0, R_AARCH64_MOVW_UABS_G0},
- {BFD_RELOC_AARCH64_MOVW_G1, R_AARCH64_MOVW_UABS_G1},
- {BFD_RELOC_AARCH64_MOVW_G2, R_AARCH64_MOVW_UABS_G2},
- {BFD_RELOC_AARCH64_MOVW_G3, R_AARCH64_MOVW_UABS_G3},
-
- /* Relocations to generate 19, 21 and 33 bit PC-relative load/store. */
- {BFD_RELOC_AARCH64_LD_LO19_PCREL, R_AARCH64_LD_PREL_LO19},
- {BFD_RELOC_AARCH64_ADR_LO21_PCREL, R_AARCH64_ADR_PREL_LO21},
- {BFD_RELOC_AARCH64_ADR_HI21_PCREL, R_AARCH64_ADR_PREL_PG_HI21},
- {BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL, R_AARCH64_ADR_PREL_PG_HI21_NC},
- {BFD_RELOC_AARCH64_ADD_LO12, R_AARCH64_ADD_ABS_LO12_NC},
- {BFD_RELOC_AARCH64_LDST8_LO12, R_AARCH64_LDST8_ABS_LO12_NC},
- {BFD_RELOC_AARCH64_LDST16_LO12, R_AARCH64_LDST16_ABS_LO12_NC},
- {BFD_RELOC_AARCH64_LDST32_LO12, R_AARCH64_LDST32_ABS_LO12_NC},
- {BFD_RELOC_AARCH64_LDST64_LO12, R_AARCH64_LDST64_ABS_LO12_NC},
- {BFD_RELOC_AARCH64_LDST128_LO12, R_AARCH64_LDST128_ABS_LO12_NC},
-
- /* Relocations for control-flow instructions. */
- {BFD_RELOC_AARCH64_TSTBR14, R_AARCH64_TSTBR14},
- {BFD_RELOC_AARCH64_BRANCH19, R_AARCH64_CONDBR19},
- {BFD_RELOC_AARCH64_JUMP26, R_AARCH64_JUMP26},
- {BFD_RELOC_AARCH64_CALL26, R_AARCH64_CALL26},
-
- /* Relocations for PIC. */
- {BFD_RELOC_AARCH64_GOT_LD_PREL19, R_AARCH64_GOT_LD_PREL19},
- {BFD_RELOC_AARCH64_ADR_GOT_PAGE, R_AARCH64_ADR_GOT_PAGE},
- {BFD_RELOC_AARCH64_LD64_GOT_LO12_NC, R_AARCH64_LD64_GOT_LO12_NC},
-
- /* Relocations for TLS. */
- {BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21, R_AARCH64_TLSGD_ADR_PAGE21},
- {BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC, R_AARCH64_TLSGD_ADD_LO12_NC},
- {BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
- R_AARCH64_TLSIE_MOVW_GOTTPREL_G1},
- {BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
- R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC},
- {BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
- R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21},
- {BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC,
- R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC},
- {BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19,
- R_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
- {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2, R_AARCH64_TLSLE_MOVW_TPREL_G2},
- {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1, R_AARCH64_TLSLE_MOVW_TPREL_G1},
- {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
- R_AARCH64_TLSLE_MOVW_TPREL_G1_NC},
- {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0, R_AARCH64_TLSLE_MOVW_TPREL_G0},
- {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
- R_AARCH64_TLSLE_MOVW_TPREL_G0_NC},
- {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12, R_AARCH64_TLSLE_ADD_TPREL_LO12},
- {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12, R_AARCH64_TLSLE_ADD_TPREL_HI12},
- {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
- R_AARCH64_TLSLE_ADD_TPREL_LO12_NC},
- {BFD_RELOC_AARCH64_TLSDESC_LD_PREL19, R_AARCH64_TLSDESC_LD_PREL19},
- {BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, R_AARCH64_TLSDESC_ADR_PREL21},
- {BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21, R_AARCH64_TLSDESC_ADR_PAGE21},
- {BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC, R_AARCH64_TLSDESC_ADD_LO12_NC},
- {BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC, R_AARCH64_TLSDESC_LD64_LO12_NC},
- {BFD_RELOC_AARCH64_TLSDESC_OFF_G1, R_AARCH64_TLSDESC_OFF_G1},
- {BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC, R_AARCH64_TLSDESC_OFF_G0_NC},
- {BFD_RELOC_AARCH64_TLSDESC_LDR, R_AARCH64_TLSDESC_LDR},
- {BFD_RELOC_AARCH64_TLSDESC_ADD, R_AARCH64_TLSDESC_ADD},
- {BFD_RELOC_AARCH64_TLSDESC_CALL, R_AARCH64_TLSDESC_CALL},
- {BFD_RELOC_AARCH64_TLS_DTPMOD64, R_AARCH64_TLS_DTPMOD64},
- {BFD_RELOC_AARCH64_TLS_DTPREL64, R_AARCH64_TLS_DTPREL64},
- {BFD_RELOC_AARCH64_TLS_TPREL64, R_AARCH64_TLS_TPREL64},
- {BFD_RELOC_AARCH64_TLSDESC, R_AARCH64_TLSDESC},
+ {BFD_RELOC_CTOR, BFD_RELOC_AARCH64_NN},
+ {BFD_RELOC_64, BFD_RELOC_AARCH64_64},
+ {BFD_RELOC_32, BFD_RELOC_AARCH64_32},
+ {BFD_RELOC_16, BFD_RELOC_AARCH64_16},
+ {BFD_RELOC_64_PCREL, BFD_RELOC_AARCH64_64_PCREL},
+ {BFD_RELOC_32_PCREL, BFD_RELOC_AARCH64_32_PCREL},
+ {BFD_RELOC_16_PCREL, BFD_RELOC_AARCH64_16_PCREL},
};
+/* Given the bfd internal relocation enumerator in CODE, return the
+ corresponding howto entry. */
+
static reloc_howto_type *
-elf64_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
- bfd_reloc_code_real_type code)
+elfNN_aarch64_howto_from_bfd_reloc (bfd_reloc_code_real_type code)
{
unsigned int i;
- for (i = 0; i < ARRAY_SIZE (elf64_aarch64_reloc_map); i++)
- if (elf64_aarch64_reloc_map[i].bfd_reloc_val == code)
- return elf64_aarch64_howto_from_type
- (elf64_aarch64_reloc_map[i].elf_reloc_val);
+ /* Convert bfd generic reloc to AArch64-specific reloc. */
+ if (code < BFD_RELOC_AARCH64_RELOC_START
+ || code > BFD_RELOC_AARCH64_RELOC_END)
+ for (i = 0; i < ARRAY_SIZE (elf_aarch64_reloc_map); i++)
+ if (elf_aarch64_reloc_map[i].from == code)
+ {
+ code = elf_aarch64_reloc_map[i].to;
+ break;
+ }
+
+ if (code > BFD_RELOC_AARCH64_RELOC_START
+ && code < BFD_RELOC_AARCH64_RELOC_END)
+ if (elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START].type)
+ return &elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START];
+
+ if (code == BFD_RELOC_AARCH64_NONE)
+ return &elfNN_aarch64_howto_none;
- bfd_set_error (bfd_error_bad_value);
return NULL;
}
static reloc_howto_type *
-elf64_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
- const char *r_name)
+elfNN_aarch64_howto_from_type (unsigned int r_type)
{
- unsigned int i;
+ bfd_reloc_code_real_type val;
+ reloc_howto_type *howto;
- for (i = 0; i < ARRAY_SIZE (elf64_aarch64_howto_table); i++)
- if (elf64_aarch64_howto_table[i].name != NULL
- && strcasecmp (elf64_aarch64_howto_table[i].name, r_name) == 0)
- return &elf64_aarch64_howto_table[i];
+#if ARCH_SIZE == 32
+ if (r_type > 256)
+ {
+ bfd_set_error (bfd_error_bad_value);
+ return NULL;
+ }
+#endif
- return NULL;
-}
+ if (r_type == R_AARCH64_NONE)
+ return &elfNN_aarch64_howto_none;
-/* Support for core dump NOTE sections. */
+ val = elfNN_aarch64_bfd_reloc_from_type (r_type);
+ howto = elfNN_aarch64_howto_from_bfd_reloc (val);
-static bfd_boolean
-elf64_aarch64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
-{
- int offset;
- size_t size;
+ if (howto != NULL)
+ return howto;
- switch (note->descsz)
- {
- default:
- return FALSE;
+ bfd_set_error (bfd_error_bad_value);
+ return NULL;
+}
- case 408: /* sizeof(struct elf_prstatus) on Linux/arm64. */
- /* pr_cursig */
- elf_tdata (abfd)->core->signal
- = bfd_get_16 (abfd, note->descdata + 12);
+static void
+elfNN_aarch64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc,
+ Elf_Internal_Rela *elf_reloc)
+{
+ unsigned int r_type;
- /* pr_pid */
- elf_tdata (abfd)->core->lwpid
- = bfd_get_32 (abfd, note->descdata + 32);
+ r_type = ELFNN_R_TYPE (elf_reloc->r_info);
+ bfd_reloc->howto = elfNN_aarch64_howto_from_type (r_type);
+}
- /* pr_reg */
- offset = 112;
- size = 272;
+static reloc_howto_type *
+elfNN_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
+ bfd_reloc_code_real_type code)
+{
+ reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (code);
- break;
- }
+ if (howto != NULL)
+ return howto;
- /* Make a ".reg/999" section. */
- return _bfd_elfcore_make_pseudosection (abfd, ".reg",
- size, note->descpos + offset);
+ bfd_set_error (bfd_error_bad_value);
+ return NULL;
}
-#define TARGET_LITTLE_SYM bfd_elf64_littleaarch64_vec
-#define TARGET_LITTLE_NAME "elf64-littleaarch64"
-#define TARGET_BIG_SYM bfd_elf64_bigaarch64_vec
-#define TARGET_BIG_NAME "elf64-bigaarch64"
+static reloc_howto_type *
+elfNN_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
+ const char *r_name)
+{
+ unsigned int i;
+
+ for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
+ if (elfNN_aarch64_howto_table[i].name != NULL
+ && strcasecmp (elfNN_aarch64_howto_table[i].name, r_name) == 0)
+ return &elfNN_aarch64_howto_table[i];
-#define elf_backend_grok_prstatus elf64_aarch64_grok_prstatus
+ return NULL;
+}
-typedef unsigned long int insn32;
+#define TARGET_LITTLE_SYM aarch64_elfNN_le_vec
+#define TARGET_LITTLE_NAME "elfNN-littleaarch64"
+#define TARGET_BIG_SYM aarch64_elfNN_be_vec
+#define TARGET_BIG_NAME "elfNN-bigaarch64"
/* The linker script knows the section names for placement.
The entry_names are used to do simple name mangling on the stubs.
static const uint32_t aarch64_long_branch_stub[] =
{
+#if ARCH_SIZE == 64
0x58000090, /* ldr ip0, 1f */
+#else
+ 0x18000090, /* ldr wip0, 1f */
+#endif
0x10000011, /* adr ip1, #0 */
0x8b110210, /* add ip0, ip0, ip1 */
0xd61f0200, /* br ip0 */
- 0x00000000, /* 1: .xword
- R_AARCH64_PREL64(X) + 12
+ 0x00000000, /* 1: .xword or .word
+ R_AARCH64_PRELNN(X) + 12
*/
0x00000000,
};
string. */
#define STUB_SUFFIX ".stub"
-enum elf64_aarch64_stub_type
+enum elf_aarch64_stub_type
{
aarch64_stub_none,
aarch64_stub_adrp_branch,
aarch64_stub_long_branch,
};
-struct elf64_aarch64_stub_hash_entry
+struct elf_aarch64_stub_hash_entry
{
/* Base hash table entry structure. */
struct bfd_hash_entry root;
bfd_vma target_value;
asection *target_section;
- enum elf64_aarch64_stub_type stub_type;
+ enum elf_aarch64_stub_type stub_type;
/* The symbol table entry, if any, that this was derived from. */
- struct elf64_aarch64_link_hash_entry *h;
+ struct elf_aarch64_link_hash_entry *h;
/* Destination symbol type */
unsigned char st_type;
/* Used to build a map of a section. This is required for mixed-endian
code/data. */
-typedef struct elf64_elf_section_map
+typedef struct elf_elf_section_map
{
bfd_vma vma;
char type;
}
-elf64_aarch64_section_map;
+elf_aarch64_section_map;
typedef struct _aarch64_elf_section_data
struct bfd_elf_section_data elf;
unsigned int mapcount;
unsigned int mapsize;
- elf64_aarch64_section_map *map;
+ elf_aarch64_section_map *map;
}
_aarch64_elf_section_data;
-#define elf64_aarch64_section_data(sec) \
+#define elf_aarch64_section_data(sec) \
((_aarch64_elf_section_data *) elf_section_data (sec))
-/* The size of the thread control block. */
-#define TCB_SIZE 16
+/* The size of the thread control block which is defined to be two pointers. */
+#define TCB_SIZE (ARCH_SIZE/8)*2
struct elf_aarch64_local_symbol
{
#define elf_aarch64_tdata(bfd) \
((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any)
-#define elf64_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
+#define elf_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
#define is_aarch64_elf(bfd) \
(bfd_get_flavour (bfd) == bfd_target_elf_flavour \
&& elf_object_id (bfd) == AARCH64_ELF_DATA)
static bfd_boolean
-elf64_aarch64_mkobject (bfd *abfd)
+elfNN_aarch64_mkobject (bfd *abfd)
{
return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata),
AARCH64_ELF_DATA);
}
-#define elf64_aarch64_hash_entry(ent) \
- ((struct elf64_aarch64_link_hash_entry *)(ent))
+#define elf_aarch64_hash_entry(ent) \
+ ((struct elf_aarch64_link_hash_entry *)(ent))
#define GOT_UNKNOWN 0
#define GOT_NORMAL 1
#define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
/* AArch64 ELF linker hash entry. */
-struct elf64_aarch64_link_hash_entry
+struct elf_aarch64_link_hash_entry
{
struct elf_link_hash_entry root;
/* A pointer to the most recently used stub hash entry against this
symbol. */
- struct elf64_aarch64_stub_hash_entry *stub_cache;
+ struct elf_aarch64_stub_hash_entry *stub_cache;
/* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset
is from the end of the jump table and reserved entries within the PLTGOT.
};
static unsigned int
-elf64_aarch64_symbol_got_type (struct elf_link_hash_entry *h,
+elfNN_aarch64_symbol_got_type (struct elf_link_hash_entry *h,
bfd *abfd,
unsigned long r_symndx)
{
if (h)
- return elf64_aarch64_hash_entry (h)->got_type;
+ return elf_aarch64_hash_entry (h)->got_type;
- if (! elf64_aarch64_locals (abfd))
+ if (! elf_aarch64_locals (abfd))
return GOT_UNKNOWN;
- return elf64_aarch64_locals (abfd)[r_symndx].got_type;
+ return elf_aarch64_locals (abfd)[r_symndx].got_type;
}
/* Get the AArch64 elf linker hash table from a link_info structure. */
-#define elf64_aarch64_hash_table(info) \
- ((struct elf64_aarch64_link_hash_table *) ((info)->hash))
+#define elf_aarch64_hash_table(info) \
+ ((struct elf_aarch64_link_hash_table *) ((info)->hash))
#define aarch64_stub_hash_lookup(table, string, create, copy) \
- ((struct elf64_aarch64_stub_hash_entry *) \
+ ((struct elf_aarch64_stub_hash_entry *) \
bfd_hash_lookup ((table), (string), (create), (copy)))
/* AArch64 ELF linker hash table. */
-struct elf64_aarch64_link_hash_table
+struct elf_aarch64_link_hash_table
{
/* The main hash table. */
struct elf_link_hash_table root;
asection *stub_sec;
} *stub_group;
- /* Assorted information used by elf64_aarch64_size_stubs. */
+ /* Assorted information used by elfNN_aarch64_size_stubs. */
unsigned int bfd_count;
int top_index;
asection **input_list;
loader via DT_TLSDESC_GOT. The magic value (bfd_vma) -1
indicates an offset is not allocated. */
bfd_vma dt_tlsdesc_got;
-};
-
-
-/* Return non-zero if the indicated VALUE has overflowed the maximum
- range expressible by a unsigned number with the indicated number of
- BITS. */
-
-static bfd_reloc_status_type
-aarch64_unsigned_overflow (bfd_vma value, unsigned int bits)
-{
- bfd_vma lim;
- if (bits >= sizeof (bfd_vma) * 8)
- return bfd_reloc_ok;
- lim = (bfd_vma) 1 << bits;
- if (value >= lim)
- return bfd_reloc_overflow;
- return bfd_reloc_ok;
-}
-
-
-/* Return non-zero if the indicated VALUE has overflowed the maximum
- range expressible by an signed number with the indicated number of
- BITS. */
-static bfd_reloc_status_type
-aarch64_signed_overflow (bfd_vma value, unsigned int bits)
-{
- bfd_signed_vma svalue = (bfd_signed_vma) value;
- bfd_signed_vma lim;
-
- if (bits >= sizeof (bfd_vma) * 8)
- return bfd_reloc_ok;
- lim = (bfd_signed_vma) 1 << (bits - 1);
- if (svalue < -lim || svalue >= lim)
- return bfd_reloc_overflow;
- return bfd_reloc_ok;
-}
+ /* Used by local STT_GNU_IFUNC symbols. */
+ htab_t loc_hash_table;
+ void * loc_hash_memory;
+};
/* Create an entry in an AArch64 ELF linker hash table. */
static struct bfd_hash_entry *
-elf64_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry,
+elfNN_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry,
struct bfd_hash_table *table,
const char *string)
{
- struct elf64_aarch64_link_hash_entry *ret =
- (struct elf64_aarch64_link_hash_entry *) entry;
+ struct elf_aarch64_link_hash_entry *ret =
+ (struct elf_aarch64_link_hash_entry *) entry;
/* Allocate the structure if it has not already been allocated by a
subclass. */
if (ret == NULL)
ret = bfd_hash_allocate (table,
- sizeof (struct elf64_aarch64_link_hash_entry));
+ sizeof (struct elf_aarch64_link_hash_entry));
if (ret == NULL)
return (struct bfd_hash_entry *) ret;
/* Call the allocation method of the superclass. */
- ret = ((struct elf64_aarch64_link_hash_entry *)
+ ret = ((struct elf_aarch64_link_hash_entry *)
_bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
table, string));
if (ret != NULL)
{
entry = bfd_hash_allocate (table,
sizeof (struct
- elf64_aarch64_stub_hash_entry));
+ elf_aarch64_stub_hash_entry));
if (entry == NULL)
return entry;
}
entry = bfd_hash_newfunc (entry, table, string);
if (entry != NULL)
{
- struct elf64_aarch64_stub_hash_entry *eh;
+ struct elf_aarch64_stub_hash_entry *eh;
/* Initialize the local fields. */
- eh = (struct elf64_aarch64_stub_hash_entry *) entry;
+ eh = (struct elf_aarch64_stub_hash_entry *) entry;
eh->stub_sec = NULL;
eh->stub_offset = 0;
eh->target_value = 0;
return entry;
}
+/* Compute a hash of a local hash entry. We use elf_link_hash_entry
+ for local symbol so that we can handle local STT_GNU_IFUNC symbols
+ as global symbol. We reuse indx and dynstr_index for local symbol
+ hash since they aren't used by global symbols in this backend. */
+
+static hashval_t
+elfNN_aarch64_local_htab_hash (const void *ptr)
+{
+ struct elf_link_hash_entry *h
+ = (struct elf_link_hash_entry *) ptr;
+ return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
+}
+
+/* Compare local hash entries. */
+
+static int
+elfNN_aarch64_local_htab_eq (const void *ptr1, const void *ptr2)
+{
+ struct elf_link_hash_entry *h1
+ = (struct elf_link_hash_entry *) ptr1;
+ struct elf_link_hash_entry *h2
+ = (struct elf_link_hash_entry *) ptr2;
+
+ return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
+}
+
+/* Find and/or create a hash entry for local symbol. */
+
+static struct elf_link_hash_entry *
+elfNN_aarch64_get_local_sym_hash (struct elf_aarch64_link_hash_table *htab,
+ bfd *abfd, const Elf_Internal_Rela *rel,
+ bfd_boolean create)
+{
+ struct elf_aarch64_link_hash_entry e, *ret;
+ asection *sec = abfd->sections;
+ hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
+ ELFNN_R_SYM (rel->r_info));
+ void **slot;
+
+ e.root.indx = sec->id;
+ e.root.dynstr_index = ELFNN_R_SYM (rel->r_info);
+ slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
+ create ? INSERT : NO_INSERT);
+
+ if (!slot)
+ return NULL;
+
+ if (*slot)
+ {
+ ret = (struct elf_aarch64_link_hash_entry *) *slot;
+ return &ret->root;
+ }
+
+ ret = (struct elf_aarch64_link_hash_entry *)
+ objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
+ sizeof (struct elf_aarch64_link_hash_entry));
+ if (ret)
+ {
+ memset (ret, 0, sizeof (*ret));
+ ret->root.indx = sec->id;
+ ret->root.dynstr_index = ELFNN_R_SYM (rel->r_info);
+ ret->root.dynindx = -1;
+ *slot = ret;
+ }
+ return &ret->root;
+}
/* Copy the extra info we tack onto an elf_link_hash_entry. */
static void
-elf64_aarch64_copy_indirect_symbol (struct bfd_link_info *info,
+elfNN_aarch64_copy_indirect_symbol (struct bfd_link_info *info,
struct elf_link_hash_entry *dir,
struct elf_link_hash_entry *ind)
{
- struct elf64_aarch64_link_hash_entry *edir, *eind;
+ struct elf_aarch64_link_hash_entry *edir, *eind;
- edir = (struct elf64_aarch64_link_hash_entry *) dir;
- eind = (struct elf64_aarch64_link_hash_entry *) ind;
+ edir = (struct elf_aarch64_link_hash_entry *) dir;
+ eind = (struct elf_aarch64_link_hash_entry *) ind;
if (eind->dyn_relocs != NULL)
{
/* Create an AArch64 elf linker hash table. */
static struct bfd_link_hash_table *
-elf64_aarch64_link_hash_table_create (bfd *abfd)
+elfNN_aarch64_link_hash_table_create (bfd *abfd)
{
- struct elf64_aarch64_link_hash_table *ret;
- bfd_size_type amt = sizeof (struct elf64_aarch64_link_hash_table);
+ struct elf_aarch64_link_hash_table *ret;
+ bfd_size_type amt = sizeof (struct elf_aarch64_link_hash_table);
ret = bfd_zmalloc (amt);
if (ret == NULL)
return NULL;
if (!_bfd_elf_link_hash_table_init
- (&ret->root, abfd, elf64_aarch64_link_hash_newfunc,
- sizeof (struct elf64_aarch64_link_hash_entry), AARCH64_ELF_DATA))
+ (&ret->root, abfd, elfNN_aarch64_link_hash_newfunc,
+ sizeof (struct elf_aarch64_link_hash_entry), AARCH64_ELF_DATA))
{
free (ret);
return NULL;
ret->dt_tlsdesc_got = (bfd_vma) - 1;
if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
- sizeof (struct elf64_aarch64_stub_hash_entry)))
+ sizeof (struct elf_aarch64_stub_hash_entry)))
+ {
+ free (ret);
+ return NULL;
+ }
+
+ ret->loc_hash_table = htab_try_create (1024,
+ elfNN_aarch64_local_htab_hash,
+ elfNN_aarch64_local_htab_eq,
+ NULL);
+ ret->loc_hash_memory = objalloc_create ();
+ if (!ret->loc_hash_table || !ret->loc_hash_memory)
{
free (ret);
return NULL;
/* Free the derived linker hash table. */
static void
-elf64_aarch64_hash_table_free (struct bfd_link_hash_table *hash)
+elfNN_aarch64_hash_table_free (struct bfd_link_hash_table *hash)
{
- struct elf64_aarch64_link_hash_table *ret
- = (struct elf64_aarch64_link_hash_table *) hash;
+ struct elf_aarch64_link_hash_table *ret
+ = (struct elf_aarch64_link_hash_table *) hash;
+
+ if (ret->loc_hash_table)
+ htab_delete (ret->loc_hash_table);
+ if (ret->loc_hash_memory)
+ objalloc_free ((struct objalloc *) ret->loc_hash_memory);
bfd_hash_table_free (&ret->stub_hash_table);
_bfd_elf_link_hash_table_free (hash);
}
-static bfd_vma
-aarch64_resolve_relocation (unsigned int r_type, bfd_vma place, bfd_vma value,
- bfd_vma addend, bfd_boolean weak_undef_p)
-{
- switch (r_type)
- {
- case R_AARCH64_TLSDESC_CALL:
- case R_AARCH64_NONE:
- case R_AARCH64_NULL:
- break;
-
- case R_AARCH64_ADR_PREL_LO21:
- case R_AARCH64_CONDBR19:
- case R_AARCH64_LD_PREL_LO19:
- case R_AARCH64_PREL16:
- case R_AARCH64_PREL32:
- case R_AARCH64_PREL64:
- case R_AARCH64_TSTBR14:
- if (weak_undef_p)
- value = place;
- value = value + addend - place;
- break;
-
- case R_AARCH64_CALL26:
- case R_AARCH64_JUMP26:
- value = value + addend - place;
- break;
-
- case R_AARCH64_ABS16:
- case R_AARCH64_ABS32:
- case R_AARCH64_MOVW_SABS_G0:
- case R_AARCH64_MOVW_SABS_G1:
- case R_AARCH64_MOVW_SABS_G2:
- case R_AARCH64_MOVW_UABS_G0:
- case R_AARCH64_MOVW_UABS_G0_NC:
- case R_AARCH64_MOVW_UABS_G1:
- case R_AARCH64_MOVW_UABS_G1_NC:
- case R_AARCH64_MOVW_UABS_G2:
- case R_AARCH64_MOVW_UABS_G2_NC:
- case R_AARCH64_MOVW_UABS_G3:
- value = value + addend;
- break;
-
- case R_AARCH64_ADR_PREL_PG_HI21:
- case R_AARCH64_ADR_PREL_PG_HI21_NC:
- if (weak_undef_p)
- value = PG (place);
- value = PG (value + addend) - PG (place);
- break;
-
- case R_AARCH64_GOT_LD_PREL19:
- value = value + addend - place;
- break;
-
- case R_AARCH64_ADR_GOT_PAGE:
- case R_AARCH64_TLSDESC_ADR_PAGE21:
- case R_AARCH64_TLSGD_ADR_PAGE21:
- case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
- value = PG (value + addend) - PG (place);
- break;
-
- case R_AARCH64_ADD_ABS_LO12_NC:
- case R_AARCH64_LD64_GOT_LO12_NC:
- case R_AARCH64_LDST8_ABS_LO12_NC:
- case R_AARCH64_LDST16_ABS_LO12_NC:
- case R_AARCH64_LDST32_ABS_LO12_NC:
- case R_AARCH64_LDST64_ABS_LO12_NC:
- case R_AARCH64_LDST128_ABS_LO12_NC:
- case R_AARCH64_TLSDESC_ADD_LO12_NC:
- case R_AARCH64_TLSDESC_ADD:
- case R_AARCH64_TLSDESC_LD64_LO12_NC:
- case R_AARCH64_TLSDESC_LDR:
- case R_AARCH64_TLSGD_ADD_LO12_NC:
- case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
- case R_AARCH64_TLSLE_ADD_TPREL_LO12:
- case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
- value = PG_OFFSET (value + addend);
- break;
-
- case R_AARCH64_TLSLE_MOVW_TPREL_G1:
- case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
- value = (value + addend) & (bfd_vma) 0xffff0000;
- break;
- case R_AARCH64_TLSLE_ADD_TPREL_HI12:
- value = (value + addend) & (bfd_vma) 0xfff000;
- break;
-
- case R_AARCH64_TLSLE_MOVW_TPREL_G0:
- case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
- value = (value + addend) & (bfd_vma) 0xffff;
- break;
-
- case R_AARCH64_TLSLE_MOVW_TPREL_G2:
- value = (value + addend) & ~(bfd_vma) 0xffffffff;
- value -= place & ~(bfd_vma) 0xffffffff;
- break;
- }
- return value;
-}
-
static bfd_boolean
aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
bfd_vma offset, bfd_vma value)
reloc_howto_type *howto;
bfd_vma place;
- howto = elf64_aarch64_howto_from_type (r_type);
+ howto = elfNN_aarch64_howto_from_type (r_type);
place = (input_section->output_section->vma + input_section->output_offset
+ offset);
- value = aarch64_resolve_relocation (r_type, place, value, 0, FALSE);
- return bfd_elf_aarch64_put_addend (input_bfd,
- input_section->contents + offset,
- howto, value);
+
+ r_type = elfNN_aarch64_bfd_reloc_from_type (r_type);
+ value = _bfd_aarch64_elf_resolve_relocation (r_type, place, value, 0, FALSE);
+ return _bfd_aarch64_elf_put_addend (input_bfd,
+ input_section->contents + offset, r_type,
+ howto, value);
}
-static enum elf64_aarch64_stub_type
+static enum elf_aarch64_stub_type
aarch64_select_branch_stub (bfd_vma value, bfd_vma place)
{
if (aarch64_valid_for_adrp_p (value, place))
/* Determine the type of stub needed, if any, for a call. */
-static enum elf64_aarch64_stub_type
+static enum elf_aarch64_stub_type
aarch64_type_of_stub (struct bfd_link_info *info,
asection *input_sec,
const Elf_Internal_Rela *rel,
unsigned char st_type,
- struct elf64_aarch64_link_hash_entry *hash,
+ struct elf_aarch64_link_hash_entry *hash,
bfd_vma destination)
{
bfd_vma location;
bfd_signed_vma branch_offset;
unsigned int r_type;
- struct elf64_aarch64_link_hash_table *globals;
- enum elf64_aarch64_stub_type stub_type = aarch64_stub_none;
+ struct elf_aarch64_link_hash_table *globals;
+ enum elf_aarch64_stub_type stub_type = aarch64_stub_none;
bfd_boolean via_plt_p;
if (st_type != STT_FUNC)
return stub_type;
- globals = elf64_aarch64_hash_table (info);
+ globals = elf_aarch64_hash_table (info);
via_plt_p = (globals->root.splt != NULL && hash != NULL
&& hash->root.plt.offset != (bfd_vma) - 1);
branch_offset = (bfd_signed_vma) (destination - location);
- r_type = ELF64_R_TYPE (rel->r_info);
+ r_type = ELFNN_R_TYPE (rel->r_info);
/* We don't want to redirect any old unconditional jump in this way,
only one which is being used for a sibcall, where it is
acceptable for the IP0 and IP1 registers to be clobbered. */
- if ((r_type == R_AARCH64_CALL26 || r_type == R_AARCH64_JUMP26)
+ if ((r_type == AARCH64_R (CALL26) || r_type == AARCH64_R (JUMP26))
&& (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET
|| branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET))
{
/* Build a name for an entry in the stub hash table. */
static char *
-elf64_aarch64_stub_name (const asection *input_section,
+elfNN_aarch64_stub_name (const asection *input_section,
const asection *sym_sec,
- const struct elf64_aarch64_link_hash_entry *hash,
+ const struct elf_aarch64_link_hash_entry *hash,
const Elf_Internal_Rela *rel)
{
char *stub_name;
snprintf (stub_name, len, "%08x_%x:%x+%" BFD_VMA_FMT "x",
(unsigned int) input_section->id,
(unsigned int) sym_sec->id,
- (unsigned int) ELF64_R_SYM (rel->r_info),
+ (unsigned int) ELFNN_R_SYM (rel->r_info),
rel->r_addend);
}
/* Look up an entry in the stub hash. Stub entries are cached because
creating the stub name takes a bit of time. */
-static struct elf64_aarch64_stub_hash_entry *
-elf64_aarch64_get_stub_entry (const asection *input_section,
+static struct elf_aarch64_stub_hash_entry *
+elfNN_aarch64_get_stub_entry (const asection *input_section,
const asection *sym_sec,
struct elf_link_hash_entry *hash,
const Elf_Internal_Rela *rel,
- struct elf64_aarch64_link_hash_table *htab)
+ struct elf_aarch64_link_hash_table *htab)
{
- struct elf64_aarch64_stub_hash_entry *stub_entry;
- struct elf64_aarch64_link_hash_entry *h =
- (struct elf64_aarch64_link_hash_entry *) hash;
+ struct elf_aarch64_stub_hash_entry *stub_entry;
+ struct elf_aarch64_link_hash_entry *h =
+ (struct elf_aarch64_link_hash_entry *) hash;
const asection *id_sec;
if ((input_section->flags & SEC_CODE) == 0)
{
char *stub_name;
- stub_name = elf64_aarch64_stub_name (id_sec, sym_sec, h, rel);
+ stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, h, rel);
if (stub_name == NULL)
return NULL;
/* Add a new stub entry to the stub hash. Not all fields of the new
stub entry are initialised. */
-static struct elf64_aarch64_stub_hash_entry *
-elf64_aarch64_add_stub (const char *stub_name,
+static struct elf_aarch64_stub_hash_entry *
+elfNN_aarch64_add_stub (const char *stub_name,
asection *section,
- struct elf64_aarch64_link_hash_table *htab)
+ struct elf_aarch64_link_hash_table *htab)
{
asection *link_sec;
asection *stub_sec;
- struct elf64_aarch64_stub_hash_entry *stub_entry;
+ struct elf_aarch64_stub_hash_entry *stub_entry;
link_sec = htab->stub_group[section->id].link_sec;
stub_sec = htab->stub_group[section->id].stub_sec;
aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
void *in_arg ATTRIBUTE_UNUSED)
{
- struct elf64_aarch64_stub_hash_entry *stub_entry;
+ struct elf_aarch64_stub_hash_entry *stub_entry;
asection *stub_sec;
bfd *stub_bfd;
bfd_byte *loc;
unsigned int i;
/* Massage our args to the form they really have. */
- stub_entry = (struct elf64_aarch64_stub_hash_entry *) gen_entry;
+ stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
stub_sec = stub_entry->stub_sec;
switch (stub_entry->stub_type)
{
case aarch64_stub_adrp_branch:
- if (aarch64_relocate (R_AARCH64_ADR_PREL_PG_HI21, stub_bfd, stub_sec,
+ if (aarch64_relocate (AARCH64_R (ADR_PREL_PG_HI21), stub_bfd, stub_sec,
stub_entry->stub_offset, sym_value))
/* The stub would not have been relaxed if the offset was out
of range. */
BFD_FAIL ();
_bfd_final_link_relocate
- (elf64_aarch64_howto_from_type (R_AARCH64_ADD_ABS_LO12_NC),
+ (elfNN_aarch64_howto_from_type (AARCH64_R (ADD_ABS_LO12_NC)),
stub_bfd,
stub_sec,
stub_sec->contents,
case aarch64_stub_long_branch:
/* We want the value relative to the address 12 bytes back from the
value itself. */
- _bfd_final_link_relocate (elf64_aarch64_howto_from_type
- (R_AARCH64_PREL64), stub_bfd, stub_sec,
+ _bfd_final_link_relocate (elfNN_aarch64_howto_from_type
+ (AARCH64_R (PRELNN)), stub_bfd, stub_sec,
stub_sec->contents,
stub_entry->stub_offset + 16,
sym_value + 12, 0);
aarch64_size_one_stub (struct bfd_hash_entry *gen_entry,
void *in_arg ATTRIBUTE_UNUSED)
{
- struct elf64_aarch64_stub_hash_entry *stub_entry;
+ struct elf_aarch64_stub_hash_entry *stub_entry;
int size;
/* Massage our args to the form they really have. */
- stub_entry = (struct elf64_aarch64_stub_hash_entry *) gen_entry;
+ stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
switch (stub_entry->stub_type)
{
0 when no stubs will be needed, and 1 on success. */
int
-elf64_aarch64_setup_section_lists (bfd *output_bfd,
+elfNN_aarch64_setup_section_lists (bfd *output_bfd,
struct bfd_link_info *info)
{
bfd *input_bfd;
asection *section;
asection **input_list, **list;
bfd_size_type amt;
- struct elf64_aarch64_link_hash_table *htab =
- elf64_aarch64_hash_table (info);
+ struct elf_aarch64_link_hash_table *htab =
+ elf_aarch64_hash_table (info);
if (!is_elf_hash_table (htab))
return 0;
return 1;
}
-/* Used by elf64_aarch64_next_input_section and group_sections. */
+/* Used by elfNN_aarch64_next_input_section and group_sections. */
#define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
/* The linker repeatedly calls this function for each input section,
we may insert linker stubs. */
void
-elf64_aarch64_next_input_section (struct bfd_link_info *info, asection *isec)
+elfNN_aarch64_next_input_section (struct bfd_link_info *info, asection *isec)
{
- struct elf64_aarch64_link_hash_table *htab =
- elf64_aarch64_hash_table (info);
+ struct elf_aarch64_link_hash_table *htab =
+ elf_aarch64_hash_table (info);
if (isec->output_section->index <= htab->top_index)
{
the middle of a function is not a good idea. */
static void
-group_sections (struct elf64_aarch64_link_hash_table *htab,
+group_sections (struct elf_aarch64_link_hash_table *htab,
bfd_size_type stub_group_size,
bfd_boolean stubs_always_before_branch)
{
instruction. */
bfd_boolean
-elf64_aarch64_size_stubs (bfd *output_bfd,
+elfNN_aarch64_size_stubs (bfd *output_bfd,
bfd *stub_bfd,
struct bfd_link_info *info,
bfd_signed_vma group_size,
bfd_size_type stub_group_size;
bfd_boolean stubs_always_before_branch;
bfd_boolean stub_changed = 0;
- struct elf64_aarch64_link_hash_table *htab = elf64_aarch64_hash_table (info);
+ struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
/* Propagate mach to stub bfd, because it may not have been
finalized when we created stub_bfd. */
for (; irela < irelaend; irela++)
{
unsigned int r_type, r_indx;
- enum elf64_aarch64_stub_type stub_type;
- struct elf64_aarch64_stub_hash_entry *stub_entry;
+ enum elf_aarch64_stub_type stub_type;
+ struct elf_aarch64_stub_hash_entry *stub_entry;
asection *sym_sec;
bfd_vma sym_value;
bfd_vma destination;
- struct elf64_aarch64_link_hash_entry *hash;
+ struct elf_aarch64_link_hash_entry *hash;
const char *sym_name;
char *stub_name;
const asection *id_sec;
unsigned char st_type;
bfd_size_type len;
- r_type = ELF64_R_TYPE (irela->r_info);
- r_indx = ELF64_R_SYM (irela->r_info);
+ r_type = ELFNN_R_TYPE (irela->r_info);
+ r_indx = ELFNN_R_SYM (irela->r_info);
if (r_type >= (unsigned int) R_AARCH64_end)
{
/* Only look for stubs on unconditional branch and
branch and link instructions. */
- if (r_type != (unsigned int) R_AARCH64_CALL26
- && r_type != (unsigned int) R_AARCH64_JUMP26)
+ if (r_type != (unsigned int) AARCH64_R (CALL26)
+ && r_type != (unsigned int) AARCH64_R (JUMP26))
continue;
/* Now determine the call target, its name, value,
int e_indx;
e_indx = r_indx - symtab_hdr->sh_info;
- hash = ((struct elf64_aarch64_link_hash_entry *)
+ hash = ((struct elf_aarch64_link_hash_entry *)
elf_sym_hashes (input_bfd)[e_indx]);
while (hash->root.root.type == bfd_link_hash_indirect
|| hash->root.root.type == bfd_link_hash_warning)
- hash = ((struct elf64_aarch64_link_hash_entry *)
+ hash = ((struct elf_aarch64_link_hash_entry *)
hash->root.root.u.i.link);
if (hash->root.root.type == bfd_link_hash_defined
|| hash->root.root.type == bfd_link_hash_defweak)
{
- struct elf64_aarch64_link_hash_table *globals =
- elf64_aarch64_hash_table (info);
+ struct elf_aarch64_link_hash_table *globals =
+ elf_aarch64_hash_table (info);
sym_sec = hash->root.root.u.def.section;
sym_value = hash->root.root.u.def.value;
/* For a destination in a shared library,
target address to decide whether a long
branch stub is needed.
For absolute code, they cannot be handled. */
- struct elf64_aarch64_link_hash_table *globals =
- elf64_aarch64_hash_table (info);
+ struct elf_aarch64_link_hash_table *globals =
+ elf_aarch64_hash_table (info);
if (globals->root.splt != NULL && hash != NULL
&& hash->root.plt.offset != (bfd_vma) - 1)
id_sec = htab->stub_group[section->id].link_sec;
/* Get the name of this stub. */
- stub_name = elf64_aarch64_stub_name (id_sec, sym_sec, hash,
+ stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, hash,
irela);
if (!stub_name)
goto error_ret_free_internal;
continue;
}
- stub_entry = elf64_aarch64_add_stub (stub_name, section,
+ stub_entry = elfNN_aarch64_add_stub (stub_name, section,
htab);
if (stub_entry == NULL)
{
linker. */
bfd_boolean
-elf64_aarch64_build_stubs (struct bfd_link_info *info)
+elfNN_aarch64_build_stubs (struct bfd_link_info *info)
{
asection *stub_sec;
struct bfd_hash_table *table;
- struct elf64_aarch64_link_hash_table *htab;
+ struct elf_aarch64_link_hash_table *htab;
- htab = elf64_aarch64_hash_table (info);
+ htab = elf_aarch64_hash_table (info);
for (stub_sec = htab->stub_bfd->sections;
stub_sec != NULL; stub_sec = stub_sec->next)
/* Add an entry to the code/data map for section SEC. */
static void
-elf64_aarch64_section_map_add (asection *sec, char type, bfd_vma vma)
+elfNN_aarch64_section_map_add (asection *sec, char type, bfd_vma vma)
{
struct _aarch64_elf_section_data *sec_data =
- elf64_aarch64_section_data (sec);
+ elf_aarch64_section_data (sec);
unsigned int newidx;
if (sec_data->map == NULL)
{
- sec_data->map = bfd_malloc (sizeof (elf64_aarch64_section_map));
+ sec_data->map = bfd_malloc (sizeof (elf_aarch64_section_map));
sec_data->mapcount = 0;
sec_data->mapsize = 1;
}
{
sec_data->mapsize *= 2;
sec_data->map = bfd_realloc_or_free
- (sec_data->map, sec_data->mapsize * sizeof (elf64_aarch64_section_map));
+ (sec_data->map, sec_data->mapsize * sizeof (elf_aarch64_section_map));
}
if (sec_data->map)
/* Initialise maps of insn/data for input BFDs. */
void
-bfd_elf64_aarch64_init_maps (bfd *abfd)
+bfd_elfNN_aarch64_init_maps (bfd *abfd)
{
Elf_Internal_Sym *isymbuf;
Elf_Internal_Shdr *hdr;
if (bfd_is_aarch64_special_symbol_name
(name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP))
- elf64_aarch64_section_map_add (sec, name[1], isym->st_value);
+ elfNN_aarch64_section_map_add (sec, name[1], isym->st_value);
}
}
}
/* Set option values needed during linking. */
void
-bfd_elf64_aarch64_set_options (struct bfd *output_bfd,
+bfd_elfNN_aarch64_set_options (struct bfd *output_bfd,
struct bfd_link_info *link_info,
int no_enum_warn,
int no_wchar_warn, int pic_veneer)
{
- struct elf64_aarch64_link_hash_table *globals;
+ struct elf_aarch64_link_hash_table *globals;
- globals = elf64_aarch64_hash_table (link_info);
+ globals = elf_aarch64_hash_table (link_info);
globals->pic_veneer = pic_veneer;
BFD_ASSERT (is_aarch64_elf (output_bfd));
elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
}
-#define MASK(n) ((1u << (n)) - 1)
-
-/* Decode the 26-bit offset of unconditional branch. */
-static inline uint32_t
-decode_branch_ofs_26 (uint32_t insn)
-{
- return insn & MASK (26);
-}
-
-/* Decode the 19-bit offset of conditional branch and compare & branch. */
-static inline uint32_t
-decode_cond_branch_ofs_19 (uint32_t insn)
-{
- return (insn >> 5) & MASK (19);
-}
-
-/* Decode the 19-bit offset of load literal. */
-static inline uint32_t
-decode_ld_lit_ofs_19 (uint32_t insn)
-{
- return (insn >> 5) & MASK (19);
-}
-
-/* Decode the 14-bit offset of test & branch. */
-static inline uint32_t
-decode_tst_branch_ofs_14 (uint32_t insn)
-{
- return (insn >> 5) & MASK (14);
-}
-
-/* Decode the 16-bit imm of move wide. */
-static inline uint32_t
-decode_movw_imm (uint32_t insn)
-{
- return (insn >> 5) & MASK (16);
-}
-
-/* Decode the 21-bit imm of adr. */
-static inline uint32_t
-decode_adr_imm (uint32_t insn)
-{
- return ((insn >> 29) & MASK (2)) | ((insn >> 3) & (MASK (19) << 2));
-}
-
-/* Decode the 12-bit imm of add immediate. */
-static inline uint32_t
-decode_add_imm (uint32_t insn)
-{
- return (insn >> 10) & MASK (12);
-}
-
-
-/* Encode the 26-bit offset of unconditional branch. */
-static inline uint32_t
-reencode_branch_ofs_26 (uint32_t insn, uint32_t ofs)
-{
- return (insn & ~MASK (26)) | (ofs & MASK (26));
-}
-
-/* Encode the 19-bit offset of conditional branch and compare & branch. */
-static inline uint32_t
-reencode_cond_branch_ofs_19 (uint32_t insn, uint32_t ofs)
-{
- return (insn & ~(MASK (19) << 5)) | ((ofs & MASK (19)) << 5);
-}
-
-/* Decode the 19-bit offset of load literal. */
-static inline uint32_t
-reencode_ld_lit_ofs_19 (uint32_t insn, uint32_t ofs)
-{
- return (insn & ~(MASK (19) << 5)) | ((ofs & MASK (19)) << 5);
-}
-
-/* Encode the 14-bit offset of test & branch. */
-static inline uint32_t
-reencode_tst_branch_ofs_14 (uint32_t insn, uint32_t ofs)
-{
- return (insn & ~(MASK (14) << 5)) | ((ofs & MASK (14)) << 5);
-}
-
-/* Reencode the imm field of move wide. */
-static inline uint32_t
-reencode_movw_imm (uint32_t insn, uint32_t imm)
-{
- return (insn & ~(MASK (16) << 5)) | ((imm & MASK (16)) << 5);
-}
-
-/* Reencode the imm field of adr. */
-static inline uint32_t
-reencode_adr_imm (uint32_t insn, uint32_t imm)
-{
- return (insn & ~((MASK (2) << 29) | (MASK (19) << 5)))
- | ((imm & MASK (2)) << 29) | ((imm & (MASK (19) << 2)) << 3);
-}
-
-/* Reencode the imm field of ld/st pos immediate. */
-static inline uint32_t
-reencode_ldst_pos_imm (uint32_t insn, uint32_t imm)
-{
- return (insn & ~(MASK (12) << 10)) | ((imm & MASK (12)) << 10);
-}
-
-/* Reencode the imm field of add immediate. */
-static inline uint32_t
-reencode_add_imm (uint32_t insn, uint32_t imm)
-{
- return (insn & ~(MASK (12) << 10)) | ((imm & MASK (12)) << 10);
-}
-
-/* Reencode mov[zn] to movz. */
-static inline uint32_t
-reencode_movzn_to_movz (uint32_t opcode)
-{
- return opcode | (1 << 30);
-}
-
-/* Reencode mov[zn] to movn. */
-static inline uint32_t
-reencode_movzn_to_movn (uint32_t opcode)
-{
- return opcode & ~(1 << 30);
-}
-
-/* Insert the addend/value into the instruction or data object being
- relocated. */
-static bfd_reloc_status_type
-bfd_elf_aarch64_put_addend (bfd *abfd,
- bfd_byte *address,
- reloc_howto_type *howto, bfd_signed_vma addend)
-{
- bfd_reloc_status_type status = bfd_reloc_ok;
- bfd_signed_vma old_addend = addend;
- bfd_vma contents;
- int size;
-
- size = bfd_get_reloc_size (howto);
- switch (size)
- {
- case 2:
- contents = bfd_get_16 (abfd, address);
- break;
- case 4:
- if (howto->src_mask != 0xffffffff)
- /* Must be 32-bit instruction, always little-endian. */
- contents = bfd_getl32 (address);
- else
- /* Must be 32-bit data (endianness dependent). */
- contents = bfd_get_32 (abfd, address);
- break;
- case 8:
- contents = bfd_get_64 (abfd, address);
- break;
- default:
- abort ();
- }
-
- switch (howto->complain_on_overflow)
- {
- case complain_overflow_dont:
- break;
- case complain_overflow_signed:
- status = aarch64_signed_overflow (addend,
- howto->bitsize + howto->rightshift);
- break;
- case complain_overflow_unsigned:
- status = aarch64_unsigned_overflow (addend,
- howto->bitsize + howto->rightshift);
- break;
- case complain_overflow_bitfield:
- default:
- abort ();
- }
-
- addend >>= howto->rightshift;
-
- switch (howto->type)
- {
- case R_AARCH64_JUMP26:
- case R_AARCH64_CALL26:
- contents = reencode_branch_ofs_26 (contents, addend);
- break;
-
- case R_AARCH64_CONDBR19:
- contents = reencode_cond_branch_ofs_19 (contents, addend);
- break;
-
- case R_AARCH64_TSTBR14:
- contents = reencode_tst_branch_ofs_14 (contents, addend);
- break;
-
- case R_AARCH64_LD_PREL_LO19:
- case R_AARCH64_GOT_LD_PREL19:
- if (old_addend & ((1 << howto->rightshift) - 1))
- return bfd_reloc_overflow;
- contents = reencode_ld_lit_ofs_19 (contents, addend);
- break;
-
- case R_AARCH64_TLSDESC_CALL:
- break;
-
- case R_AARCH64_TLSGD_ADR_PAGE21:
- case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
- case R_AARCH64_TLSDESC_ADR_PAGE21:
- case R_AARCH64_ADR_GOT_PAGE:
- case R_AARCH64_ADR_PREL_LO21:
- case R_AARCH64_ADR_PREL_PG_HI21:
- case R_AARCH64_ADR_PREL_PG_HI21_NC:
- contents = reencode_adr_imm (contents, addend);
- break;
-
- case R_AARCH64_TLSGD_ADD_LO12_NC:
- case R_AARCH64_TLSLE_ADD_TPREL_LO12:
- case R_AARCH64_TLSLE_ADD_TPREL_HI12:
- case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
- case R_AARCH64_TLSDESC_ADD_LO12_NC:
- case R_AARCH64_ADD_ABS_LO12_NC:
- /* Corresponds to: add rd, rn, #uimm12 to provide the low order
- 12 bits of the page offset following
- R_AARCH64_ADR_PREL_PG_HI21 which computes the
- (pc-relative) page base. */
- contents = reencode_add_imm (contents, addend);
- break;
-
- case R_AARCH64_LDST8_ABS_LO12_NC:
- case R_AARCH64_LDST16_ABS_LO12_NC:
- case R_AARCH64_LDST32_ABS_LO12_NC:
- case R_AARCH64_LDST64_ABS_LO12_NC:
- case R_AARCH64_LDST128_ABS_LO12_NC:
- case R_AARCH64_TLSDESC_LD64_LO12_NC:
- case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
- case R_AARCH64_LD64_GOT_LO12_NC:
- if (old_addend & ((1 << howto->rightshift) - 1))
- return bfd_reloc_overflow;
- /* Used for ldr*|str* rt, [rn, #uimm12] to provide the low order
- 12 bits of the page offset following R_AARCH64_ADR_PREL_PG_HI21
- which computes the (pc-relative) page base. */
- contents = reencode_ldst_pos_imm (contents, addend);
- break;
-
- /* Group relocations to create high bits of a 16, 32, 48 or 64
- bit signed data or abs address inline. Will change
- instruction to MOVN or MOVZ depending on sign of calculated
- value. */
-
- case R_AARCH64_TLSLE_MOVW_TPREL_G2:
- case R_AARCH64_TLSLE_MOVW_TPREL_G1:
- case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
- case R_AARCH64_TLSLE_MOVW_TPREL_G0:
- case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
- case R_AARCH64_MOVW_SABS_G0:
- case R_AARCH64_MOVW_SABS_G1:
- case R_AARCH64_MOVW_SABS_G2:
- /* NOTE: We can only come here with movz or movn. */
- if (addend < 0)
- {
- /* Force use of MOVN. */
- addend = ~addend;
- contents = reencode_movzn_to_movn (contents);
- }
- else
- {
- /* Force use of MOVZ. */
- contents = reencode_movzn_to_movz (contents);
- }
- /* fall through */
-
- /* Group relocations to create a 16, 32, 48 or 64 bit unsigned
- data or abs address inline. */
-
- case R_AARCH64_MOVW_UABS_G0:
- case R_AARCH64_MOVW_UABS_G0_NC:
- case R_AARCH64_MOVW_UABS_G1:
- case R_AARCH64_MOVW_UABS_G1_NC:
- case R_AARCH64_MOVW_UABS_G2:
- case R_AARCH64_MOVW_UABS_G2_NC:
- case R_AARCH64_MOVW_UABS_G3:
- contents = reencode_movw_imm (contents, addend);
- break;
-
- default:
- /* Repack simple data */
- if (howto->dst_mask & (howto->dst_mask + 1))
- return bfd_reloc_notsupported;
-
- contents = ((contents & ~howto->dst_mask) | (addend & howto->dst_mask));
- break;
- }
-
- switch (size)
- {
- case 2:
- bfd_put_16 (abfd, contents, address);
- break;
- case 4:
- if (howto->dst_mask != 0xffffffff)
- /* must be 32-bit instruction, always little-endian */
- bfd_putl32 (contents, address);
- else
- /* must be 32-bit data (endianness dependent) */
- bfd_put_32 (abfd, contents, address);
- break;
- case 8:
- bfd_put_64 (abfd, contents, address);
- break;
- default:
- abort ();
- }
-
- return status;
-}
-
static bfd_vma
aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h,
- struct elf64_aarch64_link_hash_table
+ struct elf_aarch64_link_hash_table
*globals, struct bfd_link_info *info,
bfd_vma value, bfd *output_bfd,
bfd_boolean *unresolved_reloc_p)
if (h != NULL)
{
+ BFD_ASSERT (basegot != NULL);
off = h->got.offset;
BFD_ASSERT (off != (bfd_vma) - 1);
if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
/* This is actually a static link, or it is a -Bsymbolic link
and the symbol is defined locally. We must initialize this
entry in the global offset table. Since the offset must
- always be a multiple of 8, we use the least significant bit
- to record whether we have initialized it already.
+ always be a multiple of 8 (4 in the case of ILP32), we use
+ the least significant bit to record whether we have
+ initialized it already.
When doing a dynamic link, we create a .rel(a).got relocation
entry to initialize the value. This is done in the
finish_dynamic_symbol routine. */
off &= ~1;
else
{
- bfd_put_64 (output_bfd, value, basegot->contents + off);
+ bfd_put_NN (output_bfd, value, basegot->contents + off);
h->got.offset |= 1;
}
}
/* Change R_TYPE to a more efficient access model where possible,
return the new reloc type. */
-static unsigned int
-aarch64_tls_transition_without_check (unsigned int r_type,
+static bfd_reloc_code_real_type
+aarch64_tls_transition_without_check (bfd_reloc_code_real_type r_type,
struct elf_link_hash_entry *h)
{
bfd_boolean is_local = h == NULL;
+
switch (r_type)
{
- case R_AARCH64_TLSGD_ADR_PAGE21:
- case R_AARCH64_TLSDESC_ADR_PAGE21:
- return is_local
- ? R_AARCH64_TLSLE_MOVW_TPREL_G1 : R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21;
-
- case R_AARCH64_TLSGD_ADD_LO12_NC:
- case R_AARCH64_TLSDESC_LD64_LO12_NC:
- return is_local
- ? R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
- : R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC;
-
- case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
- return is_local ? R_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type;
-
- case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
- return is_local ? R_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type;
-
- case R_AARCH64_TLSDESC_ADD_LO12_NC:
- case R_AARCH64_TLSDESC_CALL:
+ case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
+ case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
+ return (is_local
+ ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
+ : BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21);
+
+ case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
+ return (is_local
+ ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
+ : BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC);
+
+ case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
+ return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type;
+
+ case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
+ return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type;
+
+ case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSDESC_CALL:
/* Instructions with these relocations will become NOPs. */
- return R_AARCH64_NONE;
+ return BFD_RELOC_AARCH64_NONE;
+
+ default:
+ break;
}
return r_type;
}
static unsigned int
-aarch64_reloc_got_type (unsigned int r_type)
+aarch64_reloc_got_type (bfd_reloc_code_real_type r_type)
{
switch (r_type)
{
- case R_AARCH64_LD64_GOT_LO12_NC:
- case R_AARCH64_ADR_GOT_PAGE:
- case R_AARCH64_GOT_LD_PREL19:
+ case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
+ case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
+ case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
+ case BFD_RELOC_AARCH64_GOT_LD_PREL19:
return GOT_NORMAL;
- case R_AARCH64_TLSGD_ADR_PAGE21:
- case R_AARCH64_TLSGD_ADD_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
+ case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
return GOT_TLS_GD;
- case R_AARCH64_TLSDESC_ADD_LO12_NC:
- case R_AARCH64_TLSDESC_ADR_PAGE21:
- case R_AARCH64_TLSDESC_CALL:
- case R_AARCH64_TLSDESC_LD64_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
+ case BFD_RELOC_AARCH64_TLSDESC_CALL:
+ case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
return GOT_TLSDESC_GD;
- case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
- case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
+ case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
return GOT_TLS_IE;
- case R_AARCH64_TLSLE_ADD_TPREL_HI12:
- case R_AARCH64_TLSLE_ADD_TPREL_LO12:
- case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
- case R_AARCH64_TLSLE_MOVW_TPREL_G0:
- case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
- case R_AARCH64_TLSLE_MOVW_TPREL_G1:
- case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
- case R_AARCH64_TLSLE_MOVW_TPREL_G2:
+ case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
+ case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
+ case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
return GOT_UNKNOWN;
+
+ default:
+ break;
}
return GOT_UNKNOWN;
}
static bfd_boolean
aarch64_can_relax_tls (bfd *input_bfd,
struct bfd_link_info *info,
- unsigned int r_type,
+ bfd_reloc_code_real_type r_type,
struct elf_link_hash_entry *h,
unsigned long r_symndx)
{
if (! IS_AARCH64_TLS_RELOC (r_type))
return FALSE;
- symbol_got_type = elf64_aarch64_symbol_got_type (h, input_bfd, r_symndx);
+ symbol_got_type = elfNN_aarch64_symbol_got_type (h, input_bfd, r_symndx);
reloc_got_type = aarch64_reloc_got_type (r_type);
if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type))
return TRUE;
}
-static unsigned int
+/* Given the relocation code R_TYPE, return the relaxed bfd reloc
+ enumerator. */
+
+static bfd_reloc_code_real_type
aarch64_tls_transition (bfd *input_bfd,
struct bfd_link_info *info,
unsigned int r_type,
struct elf_link_hash_entry *h,
unsigned long r_symndx)
{
- if (! aarch64_can_relax_tls (input_bfd, info, r_type, h, r_symndx))
- return r_type;
+ bfd_reloc_code_real_type bfd_r_type
+ = elfNN_aarch64_bfd_reloc_from_type (r_type);
+
+ if (! aarch64_can_relax_tls (input_bfd, info, bfd_r_type, h, r_symndx))
+ return bfd_r_type;
- return aarch64_tls_transition_without_check (r_type, h);
+ return aarch64_tls_transition_without_check (bfd_r_type, h);
}
/* Return the base VMA address which should be subtracted from real addresses
- when resolving R_AARCH64_TLS_DTPREL64 relocation. */
+ when resolving R_AARCH64_TLS_DTPREL relocation. */
static bfd_vma
dtpoff_base (struct bfd_link_info *info)
return elf_hash_table (info)->tls_sec->vma;
}
-
/* Return the base VMA address which should be subtracted from real addresses
when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */
/* local symbol */
struct elf_aarch64_local_symbol *l;
- l = elf64_aarch64_locals (input_bfd);
+ l = elf_aarch64_locals (input_bfd);
return &l[r_symndx].got_offset;
}
}
referred to in h. */
if (h != NULL)
{
- struct elf64_aarch64_link_hash_entry *eh;
- eh = (struct elf64_aarch64_link_hash_entry *) h;
+ struct elf_aarch64_link_hash_entry *eh;
+ eh = (struct elf_aarch64_link_hash_entry *) h;
return &eh->tlsdesc_got_jump_table_offset;
}
else
/* local symbol */
struct elf_aarch64_local_symbol *l;
- l = elf64_aarch64_locals (input_bfd);
+ l = elf_aarch64_locals (input_bfd);
return &l[r_symndx].tlsdesc_got_jump_table_offset;
}
}
/* Perform a relocation as part of a final link. */
static bfd_reloc_status_type
-elf64_aarch64_final_link_relocate (reloc_howto_type *howto,
+elfNN_aarch64_final_link_relocate (reloc_howto_type *howto,
bfd *input_bfd,
bfd *output_bfd,
asection *input_section,
struct elf_link_hash_entry *h,
bfd_boolean *unresolved_reloc_p,
bfd_boolean save_addend,
- bfd_vma *saved_addend)
+ bfd_vma *saved_addend,
+ Elf_Internal_Sym *sym)
{
+ Elf_Internal_Shdr *symtab_hdr;
unsigned int r_type = howto->type;
+ bfd_reloc_code_real_type bfd_r_type
+ = elfNN_aarch64_bfd_reloc_from_howto (howto);
+ bfd_reloc_code_real_type new_bfd_r_type;
unsigned long r_symndx;
bfd_byte *hit_data = contents + rel->r_offset;
bfd_vma place;
bfd_signed_vma signed_addend;
- struct elf64_aarch64_link_hash_table *globals;
+ struct elf_aarch64_link_hash_table *globals;
bfd_boolean weak_undef_p;
- globals = elf64_aarch64_hash_table (info);
+ globals = elf_aarch64_hash_table (info);
+
+ symtab_hdr = &elf_symtab_hdr (input_bfd);
BFD_ASSERT (is_aarch64_elf (input_bfd));
- r_symndx = ELF64_R_SYM (rel->r_info);
+ r_symndx = ELFNN_R_SYM (rel->r_info);
/* It is possible to have linker relaxations on some TLS access
models. Update our information here. */
- r_type = aarch64_tls_transition (input_bfd, info, r_type, h, r_symndx);
-
- if (r_type != howto->type)
- howto = elf64_aarch64_howto_from_type (r_type);
+ new_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type, h, r_symndx);
+ if (new_bfd_r_type != bfd_r_type)
+ {
+ bfd_r_type = new_bfd_r_type;
+ howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
+ BFD_ASSERT (howto != NULL);
+ r_type = howto->type;
+ }
place = input_section->output_section->vma
+ input_section->output_offset + rel->r_offset;
signed_addend = saved_addend ? *saved_addend : 0;
signed_addend += rel->r_addend;
- weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
- : bfd_is_und_section (sym_sec));
- switch (r_type)
+ weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
+ : bfd_is_und_section (sym_sec));
+
+ /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
+ it here if it is defined in a non-shared object. */
+ if (h != NULL
+ && h->type == STT_GNU_IFUNC
+ && h->def_regular)
+ {
+ asection *plt;
+ const char *name;
+ asection *base_got;
+ bfd_vma off;
+
+ if ((input_section->flags & SEC_ALLOC) == 0
+ || h->plt.offset == (bfd_vma) -1)
+ abort ();
+
+ /* STT_GNU_IFUNC symbol must go through PLT. */
+ plt = globals->root.splt ? globals->root.splt : globals->root.iplt;
+ value = (plt->output_section->vma + plt->output_offset + h->plt.offset);
+
+ switch (bfd_r_type)
+ {
+ default:
+ if (h->root.root.string)
+ name = h->root.root.string;
+ else
+ name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
+ NULL);
+ (*_bfd_error_handler)
+ (_("%B: relocation %s against STT_GNU_IFUNC "
+ "symbol `%s' isn't handled by %s"), input_bfd,
+ howto->name, name, __FUNCTION__);
+ bfd_set_error (bfd_error_bad_value);
+ return FALSE;
+
+ case BFD_RELOC_AARCH64_NN:
+ if (rel->r_addend != 0)
+ {
+ if (h->root.root.string)
+ name = h->root.root.string;
+ else
+ name = bfd_elf_sym_name (input_bfd, symtab_hdr,
+ sym, NULL);
+ (*_bfd_error_handler)
+ (_("%B: relocation %s against STT_GNU_IFUNC "
+ "symbol `%s' has non-zero addend: %d"),
+ input_bfd, howto->name, name, rel->r_addend);
+ bfd_set_error (bfd_error_bad_value);
+ return FALSE;
+ }
+
+ /* Generate dynamic relocation only when there is a
+ non-GOT reference in a shared object. */
+ if (info->shared && h->non_got_ref)
+ {
+ Elf_Internal_Rela outrel;
+ asection *sreloc;
+
+ /* Need a dynamic relocation to get the real function
+ address. */
+ outrel.r_offset = _bfd_elf_section_offset (output_bfd,
+ info,
+ input_section,
+ rel->r_offset);
+ if (outrel.r_offset == (bfd_vma) -1
+ || outrel.r_offset == (bfd_vma) -2)
+ abort ();
+
+ outrel.r_offset += (input_section->output_section->vma
+ + input_section->output_offset);
+
+ if (h->dynindx == -1
+ || h->forced_local
+ || info->executable)
+ {
+ /* This symbol is resolved locally. */
+ outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
+ outrel.r_addend = (h->root.u.def.value
+ + h->root.u.def.section->output_section->vma
+ + h->root.u.def.section->output_offset);
+ }
+ else
+ {
+ outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
+ outrel.r_addend = 0;
+ }
+
+ sreloc = globals->root.irelifunc;
+ elf_append_rela (output_bfd, sreloc, &outrel);
+
+ /* If this reloc is against an external symbol, we
+ do not want to fiddle with the addend. Otherwise,
+ we need to include the symbol value so that it
+ becomes an addend for the dynamic reloc. For an
+ internal symbol, we have updated addend. */
+ return bfd_reloc_ok;
+ }
+ /* FALLTHROUGH */
+ case BFD_RELOC_AARCH64_JUMP26:
+ case BFD_RELOC_AARCH64_CALL26:
+ value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
+ signed_addend,
+ weak_undef_p);
+ return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
+ howto, value);
+ case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
+ case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
+ case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
+ case BFD_RELOC_AARCH64_GOT_LD_PREL19:
+ base_got = globals->root.sgot;
+ off = h->got.offset;
+
+ if (base_got == NULL)
+ abort ();
+
+ if (off == (bfd_vma) -1)
+ {
+ bfd_vma plt_index;
+
+ /* We can't use h->got.offset here to save state, or
+ even just remember the offset, as finish_dynamic_symbol
+ would use that as offset into .got. */
+
+ if (globals->root.splt != NULL)
+ {
+ plt_index = ((h->plt.offset - globals->plt_header_size) /
+ globals->plt_entry_size);
+ off = (plt_index + 3) * GOT_ENTRY_SIZE;
+ base_got = globals->root.sgotplt;
+ }
+ else
+ {
+ plt_index = h->plt.offset / globals->plt_entry_size;
+ off = plt_index * GOT_ENTRY_SIZE;
+ base_got = globals->root.igotplt;
+ }
+
+ if (h->dynindx == -1
+ || h->forced_local
+ || info->symbolic)
+ {
+ /* This references the local definition. We must
+ initialize this entry in the global offset table.
+ Since the offset must always be a multiple of 8,
+ we use the least significant bit to record
+ whether we have initialized it already.
+
+ When doing a dynamic link, we create a .rela.got
+ relocation entry to initialize the value. This
+ is done in the finish_dynamic_symbol routine. */
+ if ((off & 1) != 0)
+ off &= ~1;
+ else
+ {
+ bfd_put_NN (output_bfd, value,
+ base_got->contents + off);
+ /* Note that this is harmless as -1 | 1 still is -1. */
+ h->got.offset |= 1;
+ }
+ }
+ value = (base_got->output_section->vma
+ + base_got->output_offset + off);
+ }
+ else
+ value = aarch64_calculate_got_entry_vma (h, globals, info,
+ value, output_bfd,
+ unresolved_reloc_p);
+ value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
+ 0, weak_undef_p);
+ return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type, howto, value);
+ case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
+ case BFD_RELOC_AARCH64_ADD_LO12:
+ break;
+ }
+ }
+
+ switch (bfd_r_type)
{
- case R_AARCH64_NONE:
- case R_AARCH64_NULL:
- case R_AARCH64_TLSDESC_CALL:
+ case BFD_RELOC_AARCH64_NONE:
+ case BFD_RELOC_AARCH64_TLSDESC_CALL:
*unresolved_reloc_p = FALSE;
return bfd_reloc_ok;
- case R_AARCH64_ABS64:
+ case BFD_RELOC_AARCH64_NN:
/* When generating a shared object or relocatable executable, these
relocations are copied into the output file to be resolved at
*unresolved_reloc_p = FALSE;
- sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd,
- input_section, 1);
- if (sreloc == NULL)
- return bfd_reloc_notsupported;
-
skip = FALSE;
relocate = FALSE;
else if (h != NULL
&& h->dynindx != -1
&& (!info->shared || !info->symbolic || !h->def_regular))
- outrel.r_info = ELF64_R_INFO (h->dynindx, r_type);
+ outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
else
{
int symbol;
relocate the text and data segments independently,
so the symbol does not matter. */
symbol = 0;
- outrel.r_info = ELF64_R_INFO (symbol, R_AARCH64_RELATIVE);
+ outrel.r_info = ELFNN_R_INFO (symbol, AARCH64_R (RELATIVE));
outrel.r_addend += value;
}
- loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (htab);
- bfd_elf64_swap_reloca_out (output_bfd, &outrel, loc);
+ sreloc = elf_section_data (input_section)->sreloc;
+ if (sreloc == NULL || sreloc->contents == NULL)
+ return bfd_reloc_notsupported;
+
+ loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (globals);
+ bfd_elfNN_swap_reloca_out (output_bfd, &outrel, loc);
- if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
+ if (sreloc->reloc_count * RELOC_SIZE (globals) > sreloc->size)
{
/* Sanity to check that we have previously allocated
sufficient space in the relocation section for the
value += signed_addend;
break;
- case R_AARCH64_JUMP26:
- case R_AARCH64_CALL26:
+ case BFD_RELOC_AARCH64_JUMP26:
+ case BFD_RELOC_AARCH64_CALL26:
{
asection *splt = globals->root.splt;
bfd_boolean via_plt_p =
{
/* The target is out of reach, so redirect the branch to
the local stub for this function. */
- struct elf64_aarch64_stub_hash_entry *stub_entry;
- stub_entry = elf64_aarch64_get_stub_entry (input_section,
+ struct elf_aarch64_stub_hash_entry *stub_entry;
+ stub_entry = elfNN_aarch64_get_stub_entry (input_section,
sym_sec, h,
rel, globals);
if (stub_entry != NULL)
}
}
}
- value = aarch64_resolve_relocation (r_type, place, value,
- signed_addend, weak_undef_p);
+ value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
+ signed_addend, weak_undef_p);
break;
- case R_AARCH64_ABS16:
- case R_AARCH64_ABS32:
- case R_AARCH64_ADD_ABS_LO12_NC:
- case R_AARCH64_ADR_PREL_LO21:
- case R_AARCH64_ADR_PREL_PG_HI21:
- case R_AARCH64_ADR_PREL_PG_HI21_NC:
- case R_AARCH64_CONDBR19:
- case R_AARCH64_LD_PREL_LO19:
- case R_AARCH64_LDST8_ABS_LO12_NC:
- case R_AARCH64_LDST16_ABS_LO12_NC:
- case R_AARCH64_LDST32_ABS_LO12_NC:
- case R_AARCH64_LDST64_ABS_LO12_NC:
- case R_AARCH64_LDST128_ABS_LO12_NC:
- case R_AARCH64_MOVW_SABS_G0:
- case R_AARCH64_MOVW_SABS_G1:
- case R_AARCH64_MOVW_SABS_G2:
- case R_AARCH64_MOVW_UABS_G0:
- case R_AARCH64_MOVW_UABS_G0_NC:
- case R_AARCH64_MOVW_UABS_G1:
- case R_AARCH64_MOVW_UABS_G1_NC:
- case R_AARCH64_MOVW_UABS_G2:
- case R_AARCH64_MOVW_UABS_G2_NC:
- case R_AARCH64_MOVW_UABS_G3:
- case R_AARCH64_PREL16:
- case R_AARCH64_PREL32:
- case R_AARCH64_PREL64:
- case R_AARCH64_TSTBR14:
- value = aarch64_resolve_relocation (r_type, place, value,
- signed_addend, weak_undef_p);
+ case BFD_RELOC_AARCH64_16:
+#if ARCH_SIZE == 64
+ case BFD_RELOC_AARCH64_32:
+#endif
+ case BFD_RELOC_AARCH64_ADD_LO12:
+ case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
+ case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
+ case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
+ case BFD_RELOC_AARCH64_BRANCH19:
+ case BFD_RELOC_AARCH64_LD_LO19_PCREL:
+ case BFD_RELOC_AARCH64_LDST8_LO12:
+ case BFD_RELOC_AARCH64_LDST16_LO12:
+ case BFD_RELOC_AARCH64_LDST32_LO12:
+ case BFD_RELOC_AARCH64_LDST64_LO12:
+ case BFD_RELOC_AARCH64_LDST128_LO12:
+ case BFD_RELOC_AARCH64_MOVW_G0_S:
+ case BFD_RELOC_AARCH64_MOVW_G1_S:
+ case BFD_RELOC_AARCH64_MOVW_G2_S:
+ case BFD_RELOC_AARCH64_MOVW_G0:
+ case BFD_RELOC_AARCH64_MOVW_G0_NC:
+ case BFD_RELOC_AARCH64_MOVW_G1:
+ case BFD_RELOC_AARCH64_MOVW_G1_NC:
+ case BFD_RELOC_AARCH64_MOVW_G2:
+ case BFD_RELOC_AARCH64_MOVW_G2_NC:
+ case BFD_RELOC_AARCH64_MOVW_G3:
+ case BFD_RELOC_AARCH64_16_PCREL:
+ case BFD_RELOC_AARCH64_32_PCREL:
+ case BFD_RELOC_AARCH64_64_PCREL:
+ case BFD_RELOC_AARCH64_TSTBR14:
+ value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
+ signed_addend, weak_undef_p);
break;
- case R_AARCH64_LD64_GOT_LO12_NC:
- case R_AARCH64_ADR_GOT_PAGE:
- case R_AARCH64_GOT_LD_PREL19:
+ case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
+ case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
+ case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
+ case BFD_RELOC_AARCH64_GOT_LD_PREL19:
if (globals->root.sgot == NULL)
BFD_ASSERT (h != NULL);
value = aarch64_calculate_got_entry_vma (h, globals, info, value,
output_bfd,
unresolved_reloc_p);
- value = aarch64_resolve_relocation (r_type, place, value,
- 0, weak_undef_p);
+ value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
+ 0, weak_undef_p);
}
break;
- case R_AARCH64_TLSGD_ADR_PAGE21:
- case R_AARCH64_TLSGD_ADD_LO12_NC:
- case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
- case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
+ case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
+ case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
if (globals->root.sgot == NULL)
return bfd_reloc_notsupported;
value = (symbol_got_offset (input_bfd, h, r_symndx)
+ globals->root.sgot->output_section->vma
- + globals->root.sgot->output_section->output_offset);
+ + globals->root.sgot->output_offset);
- value = aarch64_resolve_relocation (r_type, place, value,
- 0, weak_undef_p);
+ value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
+ 0, weak_undef_p);
*unresolved_reloc_p = FALSE;
break;
- case R_AARCH64_TLSLE_ADD_TPREL_HI12:
- case R_AARCH64_TLSLE_ADD_TPREL_LO12:
- case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
- case R_AARCH64_TLSLE_MOVW_TPREL_G0:
- case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
- case R_AARCH64_TLSLE_MOVW_TPREL_G1:
- case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
- case R_AARCH64_TLSLE_MOVW_TPREL_G2:
- value = aarch64_resolve_relocation (r_type, place, value,
- signed_addend - tpoff_base (info), weak_undef_p);
+ case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
+ case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
+ case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
+ value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
+ signed_addend - tpoff_base (info),
+ weak_undef_p);
*unresolved_reloc_p = FALSE;
break;
- case R_AARCH64_TLSDESC_ADR_PAGE21:
- case R_AARCH64_TLSDESC_LD64_LO12_NC:
- case R_AARCH64_TLSDESC_ADD_LO12_NC:
- case R_AARCH64_TLSDESC_ADD:
- case R_AARCH64_TLSDESC_LDR:
+ case BFD_RELOC_AARCH64_TLSDESC_ADD:
+ case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
+ case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSDESC_LDR:
if (globals->root.sgot == NULL)
return bfd_reloc_notsupported;
-
value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
+ globals->root.sgotplt->output_section->vma
- + globals->root.sgotplt->output_section->output_offset
+ + globals->root.sgotplt->output_offset
+ globals->sgotplt_jump_table_size);
- value = aarch64_resolve_relocation (r_type, place, value,
- 0, weak_undef_p);
+ value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
+ 0, weak_undef_p);
*unresolved_reloc_p = FALSE;
break;
if (save_addend)
return bfd_reloc_continue;
- return bfd_elf_aarch64_put_addend (input_bfd, hit_data, howto, value);
+ return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
+ howto, value);
}
/* Handle TLS relaxations. Relaxing is possible for symbols that use
case of error. */
static bfd_reloc_status_type
-elf64_aarch64_tls_relax (struct elf64_aarch64_link_hash_table *globals,
+elfNN_aarch64_tls_relax (struct elf_aarch64_link_hash_table *globals,
bfd *input_bfd, bfd_byte *contents,
Elf_Internal_Rela *rel, struct elf_link_hash_entry *h)
{
bfd_boolean is_local = h == NULL;
- unsigned int r_type = ELF64_R_TYPE (rel->r_info);
+ unsigned int r_type = ELFNN_R_TYPE (rel->r_info);
unsigned long insn;
BFD_ASSERT (globals && input_bfd && contents && rel);
- switch (r_type)
+ switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
{
- case R_AARCH64_TLSGD_ADR_PAGE21:
- case R_AARCH64_TLSDESC_ADR_PAGE21:
+ case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
+ case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
if (is_local)
{
/* GD->LE relaxation:
return bfd_reloc_continue;
}
- case R_AARCH64_TLSDESC_LD64_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
if (is_local)
{
/* GD->LE relaxation:
ldr xd, [x0, #:tlsdesc_lo12:var] => ldr x0, [x0, #:gottprel_lo12:var]
*/
insn = bfd_getl32 (contents + rel->r_offset);
- insn &= 0xfffffff0;
+ insn &= 0xffffffe0;
bfd_putl32 (insn, contents + rel->r_offset);
return bfd_reloc_continue;
}
- case R_AARCH64_TLSGD_ADD_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
if (is_local)
{
/* GD->LE relaxation
/* First kill the tls_get_addr reloc on the bl instruction. */
BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
- rel[1].r_info = ELF64_R_INFO (STN_UNDEF, R_AARCH64_NONE);
+ rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
bfd_putl32 (0xf2800000, contents + rel->r_offset);
bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
NOP => add x0, x1, x0
*/
- BFD_ASSERT (ELF64_R_TYPE (rel[1].r_info) == R_AARCH64_CALL26);
+ BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
/* Remove the relocation on the BL instruction. */
- rel[1].r_info = ELF64_R_INFO (STN_UNDEF, R_AARCH64_NONE);
+ rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
bfd_putl32 (0xf9400000, contents + rel->r_offset);
return bfd_reloc_continue;
}
- case R_AARCH64_TLSDESC_ADD_LO12_NC:
- case R_AARCH64_TLSDESC_CALL:
+ case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSDESC_CALL:
/* GD->IE/LE relaxation:
add x0, x0, #:tlsdesc_lo12:var => nop
blr xd => nop
bfd_putl32 (INSN_NOP, contents + rel->r_offset);
return bfd_reloc_ok;
- case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
+ case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
/* IE->LE relaxation:
adrp xd, :gottprel:var => movz xd, :tprel_g1:var
*/
}
return bfd_reloc_continue;
- case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
/* IE->LE relaxation:
ldr xd, [xm, #:gottprel_lo12:var] => movk xd, :tprel_g0_nc:var
*/
/* Relocate an AArch64 ELF section. */
static bfd_boolean
-elf64_aarch64_relocate_section (bfd *output_bfd,
+elfNN_aarch64_relocate_section (bfd *output_bfd,
struct bfd_link_info *info,
bfd *input_bfd,
asection *input_section,
Elf_Internal_Rela *rel;
Elf_Internal_Rela *relend;
const char *name;
- struct elf64_aarch64_link_hash_table *globals;
+ struct elf_aarch64_link_hash_table *globals;
bfd_boolean save_addend = FALSE;
bfd_vma addend = 0;
- globals = elf64_aarch64_hash_table (info);
+ globals = elf_aarch64_hash_table (info);
symtab_hdr = &elf_symtab_hdr (input_bfd);
sym_hashes = elf_sym_hashes (input_bfd);
for (; rel < relend; rel++)
{
unsigned int r_type;
- unsigned int relaxed_r_type;
+ bfd_reloc_code_real_type bfd_r_type;
+ bfd_reloc_code_real_type relaxed_bfd_r_type;
reloc_howto_type *howto;
unsigned long r_symndx;
Elf_Internal_Sym *sym;
bfd_boolean unresolved_reloc = FALSE;
char *error_message = NULL;
- r_symndx = ELF64_R_SYM (rel->r_info);
- r_type = ELF64_R_TYPE (rel->r_info);
+ r_symndx = ELFNN_R_SYM (rel->r_info);
+ r_type = ELFNN_R_TYPE (rel->r_info);
- bfd_reloc.howto = elf64_aarch64_howto_from_type (r_type);
+ bfd_reloc.howto = elfNN_aarch64_howto_from_type (r_type);
howto = bfd_reloc.howto;
if (howto == NULL)
input_bfd, input_section, r_type);
return FALSE;
}
+ bfd_r_type = elfNN_aarch64_bfd_reloc_from_howto (howto);
h = NULL;
sym = NULL;
if (r_symndx < symtab_hdr->sh_info)
{
sym = local_syms + r_symndx;
- sym_type = ELF64_ST_TYPE (sym->st_info);
+ sym_type = ELFNN_ST_TYPE (sym->st_info);
sec = local_sections[r_symndx];
/* An object file might have a reference to a local
}
relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
+
+ /* Relocate against local STT_GNU_IFUNC symbol. */
+ if (!info->relocatable
+ && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
+ {
+ h = elfNN_aarch64_get_local_sym_hash (globals, input_bfd,
+ rel, FALSE);
+ if (h == NULL)
+ abort ();
+
+ /* Set STT_GNU_IFUNC symbol value. */
+ h->root.u.def.value = sym->st_value;
+ h->root.u.def.section = sec;
+ }
}
else
{
- bfd_boolean warned;
+ bfd_boolean warned, ignored;
RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
r_symndx, symtab_hdr, sym_hashes,
h, sec, relocation,
- unresolved_reloc, warned);
+ unresolved_reloc, warned, ignored);
sym_type = h->type;
}
&& (h == NULL
|| h->root.type == bfd_link_hash_defined
|| h->root.type == bfd_link_hash_defweak)
- && IS_AARCH64_TLS_RELOC (r_type) != (sym_type == STT_TLS))
+ && IS_AARCH64_TLS_RELOC (bfd_r_type) != (sym_type == STT_TLS))
{
(*_bfd_error_handler)
((sym_type == STT_TLS
input_section, (long) rel->r_offset, howto->name, name);
}
-
/* We relax only if we can see that there can be a valid transition
from a reloc type to another.
- We call elf64_aarch64_final_link_relocate unless we're completely
+ We call elfNN_aarch64_final_link_relocate unless we're completely
done, i.e., the relaxation produced the final output we want. */
- relaxed_r_type = aarch64_tls_transition (input_bfd, info, r_type,
- h, r_symndx);
- if (relaxed_r_type != r_type)
+ relaxed_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type,
+ h, r_symndx);
+ if (relaxed_bfd_r_type != bfd_r_type)
{
- r_type = relaxed_r_type;
- howto = elf64_aarch64_howto_from_type (r_type);
-
- r = elf64_aarch64_tls_relax (globals, input_bfd, contents, rel, h);
+ bfd_r_type = relaxed_bfd_r_type;
+ howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
+ BFD_ASSERT (howto != NULL);
+ r_type = howto->type;
+ r = elfNN_aarch64_tls_relax (globals, input_bfd, contents, rel, h);
unresolved_reloc = 0;
}
else
output of each relocation as the addend for the next. */
if (rel + 1 < relend
&& rel->r_offset == rel[1].r_offset
- && ELF64_R_TYPE (rel[1].r_info) != R_AARCH64_NONE
- && ELF64_R_TYPE (rel[1].r_info) != R_AARCH64_NULL)
+ && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NONE
+ && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NULL)
save_addend = TRUE;
else
save_addend = FALSE;
if (r == bfd_reloc_continue)
- r = elf64_aarch64_final_link_relocate (howto, input_bfd, output_bfd,
+ r = elfNN_aarch64_final_link_relocate (howto, input_bfd, output_bfd,
input_section, contents, rel,
relocation, info, sec,
h, &unresolved_reloc,
- save_addend, &addend);
+ save_addend, &addend, sym);
- switch (r_type)
+ switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
{
- case R_AARCH64_TLSGD_ADR_PAGE21:
- case R_AARCH64_TLSGD_ADD_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
+ case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
{
bfd_boolean need_relocs = FALSE;
if (need_relocs)
{
Elf_Internal_Rela rela;
- rela.r_info = ELF64_R_INFO (indx, R_AARCH64_TLS_DTPMOD64);
+ rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPMOD));
rela.r_addend = 0;
rela.r_offset = globals->root.sgot->output_section->vma +
globals->root.sgot->output_offset + off;
loc = globals->root.srelgot->contents;
loc += globals->root.srelgot->reloc_count++
* RELOC_SIZE (htab);
- bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
+ bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
if (indx == 0)
{
- bfd_put_64 (output_bfd,
+ bfd_put_NN (output_bfd,
relocation - dtpoff_base (info),
globals->root.sgot->contents + off
+ GOT_ENTRY_SIZE);
relocation to fixup the tls offset at load
time. */
rela.r_info =
- ELF64_R_INFO (indx, R_AARCH64_TLS_DTPREL64);
+ ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPREL));
rela.r_addend = 0;
rela.r_offset =
(globals->root.sgot->output_section->vma
loc = globals->root.srelgot->contents;
loc += globals->root.srelgot->reloc_count++
* RELOC_SIZE (globals);
- bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
- bfd_put_64 (output_bfd, (bfd_vma) 0,
+ bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
+ bfd_put_NN (output_bfd, (bfd_vma) 0,
globals->root.sgot->contents + off
+ GOT_ENTRY_SIZE);
}
}
else
{
- bfd_put_64 (output_bfd, (bfd_vma) 1,
+ bfd_put_NN (output_bfd, (bfd_vma) 1,
globals->root.sgot->contents + off);
- bfd_put_64 (output_bfd,
+ bfd_put_NN (output_bfd,
relocation - dtpoff_base (info),
globals->root.sgot->contents + off
+ GOT_ENTRY_SIZE);
}
break;
- case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
- case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
+ case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
{
bfd_boolean need_relocs = FALSE;
else
rela.r_addend = 0;
- rela.r_info = ELF64_R_INFO (indx, R_AARCH64_TLS_TPREL64);
+ rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_TPREL));
rela.r_offset = globals->root.sgot->output_section->vma +
globals->root.sgot->output_offset + off;
loc += globals->root.srelgot->reloc_count++
* RELOC_SIZE (htab);
- bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
+ bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
- bfd_put_64 (output_bfd, rela.r_addend,
+ bfd_put_NN (output_bfd, rela.r_addend,
globals->root.sgot->contents + off);
}
else
- bfd_put_64 (output_bfd, relocation - tpoff_base (info),
+ bfd_put_NN (output_bfd, relocation - tpoff_base (info),
globals->root.sgot->contents + off);
symbol_got_offset_mark (input_bfd, h, r_symndx);
}
break;
- case R_AARCH64_TLSLE_ADD_TPREL_LO12:
- case R_AARCH64_TLSLE_ADD_TPREL_HI12:
- case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
- case R_AARCH64_TLSLE_MOVW_TPREL_G2:
- case R_AARCH64_TLSLE_MOVW_TPREL_G1:
- case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
- case R_AARCH64_TLSLE_MOVW_TPREL_G0:
- case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
+ case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
+ case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
+ case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
break;
- case R_AARCH64_TLSDESC_ADR_PAGE21:
- case R_AARCH64_TLSDESC_LD64_LO12_NC:
- case R_AARCH64_TLSDESC_ADD_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
+ case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx))
{
bfd_boolean need_relocs = FALSE;
{
bfd_byte *loc;
Elf_Internal_Rela rela;
- rela.r_info = ELF64_R_INFO (indx, R_AARCH64_TLSDESC);
+ rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLSDESC));
+
rela.r_addend = 0;
rela.r_offset = (globals->root.sgotplt->output_section->vma
+ globals->root.sgotplt->output_offset
loc += globals->root.srelplt->reloc_count++
* RELOC_SIZE (globals);
- bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
+ bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
- bfd_put_64 (output_bfd, (bfd_vma) 0,
+ bfd_put_NN (output_bfd, (bfd_vma) 0,
globals->root.sgotplt->contents + off +
globals->sgotplt_jump_table_size);
- bfd_put_64 (output_bfd, (bfd_vma) 0,
+ bfd_put_NN (output_bfd, (bfd_vma) 0,
globals->root.sgotplt->contents + off +
globals->sgotplt_jump_table_size +
GOT_ENTRY_SIZE);
symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx);
}
break;
+ default:
+ break;
}
if (!save_addend)
/* Set the right machine number. */
static bfd_boolean
-elf64_aarch64_object_p (bfd *abfd)
+elfNN_aarch64_object_p (bfd *abfd)
{
+#if ARCH_SIZE == 32
+ bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64_ilp32);
+#else
bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64);
+#endif
return TRUE;
}
/* Function to keep AArch64 specific flags in the ELF header. */
static bfd_boolean
-elf64_aarch64_set_private_flags (bfd *abfd, flagword flags)
+elfNN_aarch64_set_private_flags (bfd *abfd, flagword flags)
{
if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
{
return TRUE;
}
-/* Copy backend specific data from one object module to another. */
-
-static bfd_boolean
-elf64_aarch64_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
-{
- flagword in_flags;
-
- if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
- return TRUE;
-
- in_flags = elf_elfheader (ibfd)->e_flags;
-
- elf_elfheader (obfd)->e_flags = in_flags;
- elf_flags_init (obfd) = TRUE;
-
- /* Also copy the EI_OSABI field. */
- elf_elfheader (obfd)->e_ident[EI_OSABI] =
- elf_elfheader (ibfd)->e_ident[EI_OSABI];
-
- /* Copy object attributes. */
- _bfd_elf_copy_obj_attributes (ibfd, obfd);
-
- return TRUE;
-}
-
/* Merge backend specific data from an object file to the output
object file when linking. */
static bfd_boolean
-elf64_aarch64_merge_private_bfd_data (bfd *ibfd, bfd *obfd)
+elfNN_aarch64_merge_private_bfd_data (bfd *ibfd, bfd *obfd)
{
flagword out_flags;
flagword in_flags;
/* Display the flags field. */
static bfd_boolean
-elf64_aarch64_print_private_bfd_data (bfd *abfd, void *ptr)
+elfNN_aarch64_print_private_bfd_data (bfd *abfd, void *ptr)
{
FILE *file = (FILE *) ptr;
unsigned long flags;
/* Update the got entry reference counts for the section being removed. */
static bfd_boolean
-elf64_aarch64_gc_sweep_hook (bfd *abfd,
+elfNN_aarch64_gc_sweep_hook (bfd *abfd,
struct bfd_link_info *info,
asection *sec,
const Elf_Internal_Rela * relocs)
{
- struct elf64_aarch64_link_hash_table *htab;
+ struct elf_aarch64_link_hash_table *htab;
Elf_Internal_Shdr *symtab_hdr;
struct elf_link_hash_entry **sym_hashes;
struct elf_aarch64_local_symbol *locals;
if (info->relocatable)
return TRUE;
- htab = elf64_aarch64_hash_table (info);
+ htab = elf_aarch64_hash_table (info);
if (htab == NULL)
return FALSE;
symtab_hdr = &elf_symtab_hdr (abfd);
sym_hashes = elf_sym_hashes (abfd);
- locals = elf64_aarch64_locals (abfd);
+ locals = elf_aarch64_locals (abfd);
relend = relocs + sec->reloc_count;
for (rel = relocs; rel < relend; rel++)
unsigned int r_type;
struct elf_link_hash_entry *h = NULL;
- r_symndx = ELF64_R_SYM (rel->r_info);
+ r_symndx = ELFNN_R_SYM (rel->r_info);
if (r_symndx >= symtab_hdr->sh_info)
{
- struct elf64_aarch64_link_hash_entry *eh;
- struct elf_dyn_relocs **pp;
- struct elf_dyn_relocs *p;
h = sym_hashes[r_symndx - symtab_hdr->sh_info];
while (h->root.type == bfd_link_hash_indirect
|| h->root.type == bfd_link_hash_warning)
h = (struct elf_link_hash_entry *) h->root.u.i.link;
- eh = (struct elf64_aarch64_link_hash_entry *) h;
-
- for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
- {
- if (p->sec == sec)
- {
- /* Everything must go for SEC. */
- *pp = p->next;
- break;
- }
- }
}
else
{
/* A local symbol. */
isym = bfd_sym_from_r_symndx (&htab->sym_cache,
abfd, r_symndx);
- if (isym == NULL)
- return FALSE;
+
+ /* Check relocation against local STT_GNU_IFUNC symbol. */
+ if (isym != NULL
+ && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
+ {
+ h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel, FALSE);
+ if (h == NULL)
+ abort ();
+ }
+ }
+
+ if (h)
+ {
+ struct elf_aarch64_link_hash_entry *eh;
+ struct elf_dyn_relocs **pp;
+ struct elf_dyn_relocs *p;
+
+ eh = (struct elf_aarch64_link_hash_entry *) h;
+
+ for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
+ if (p->sec == sec)
+ {
+ /* Everything must go for SEC. */
+ *pp = p->next;
+ break;
+ }
}
- r_type = ELF64_R_TYPE (rel->r_info);
- r_type = aarch64_tls_transition (abfd,info, r_type, h ,r_symndx);
- switch (r_type)
+ r_type = ELFNN_R_TYPE (rel->r_info);
+ switch (aarch64_tls_transition (abfd,info, r_type, h ,r_symndx))
{
- case R_AARCH64_LD64_GOT_LO12_NC:
- case R_AARCH64_GOT_LD_PREL19:
- case R_AARCH64_ADR_GOT_PAGE:
- case R_AARCH64_TLSGD_ADR_PAGE21:
- case R_AARCH64_TLSGD_ADD_LO12_NC:
- case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
- case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
- case R_AARCH64_TLSLE_ADD_TPREL_LO12:
- case R_AARCH64_TLSLE_ADD_TPREL_HI12:
- case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
- case R_AARCH64_TLSLE_MOVW_TPREL_G2:
- case R_AARCH64_TLSLE_MOVW_TPREL_G1:
- case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
- case R_AARCH64_TLSLE_MOVW_TPREL_G0:
- case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
- case R_AARCH64_TLSDESC_ADR_PAGE21:
- case R_AARCH64_TLSDESC_ADD_LO12_NC:
- case R_AARCH64_TLSDESC_LD64_LO12_NC:
- if (h != NULL)
+ case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
+ case BFD_RELOC_AARCH64_GOT_LD_PREL19:
+ case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
+ case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
+ case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
+ case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
+ case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
+ case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
+ case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
+ if (h != NULL)
{
if (h->got.refcount > 0)
h->got.refcount -= 1;
+
+ if (h->type == STT_GNU_IFUNC)
+ {
+ if (h->plt.refcount > 0)
+ h->plt.refcount -= 1;
+ }
}
else if (locals != NULL)
{
}
break;
- case R_AARCH64_ADR_PREL_PG_HI21_NC:
- case R_AARCH64_ADR_PREL_PG_HI21:
- case R_AARCH64_ADR_PREL_LO21:
+ case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
+ case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
+ case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
if (h != NULL && info->executable)
{
if (h->plt.refcount > 0)
}
break;
- case R_AARCH64_CALL26:
- case R_AARCH64_JUMP26:
- /* If this is a local symbol then we resolve it
- directly without creating a PLT entry. */
+ case BFD_RELOC_AARCH64_CALL26:
+ case BFD_RELOC_AARCH64_JUMP26:
+ /* If this is a local symbol then we resolve it
+ directly without creating a PLT entry. */
if (h == NULL)
continue;
h->plt.refcount -= 1;
break;
- case R_AARCH64_ABS64:
+ case BFD_RELOC_AARCH64_NN:
if (h != NULL && info->executable)
{
if (h->plt.refcount > 0)
h->plt.refcount -= 1;
}
break;
-
+
default:
break;
}
understand. */
static bfd_boolean
-elf64_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info,
+elfNN_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info,
struct elf_link_hash_entry *h)
{
- struct elf64_aarch64_link_hash_table *htab;
+ struct elf_aarch64_link_hash_table *htab;
asection *s;
/* If this is a function, put it in the procedure linkage table. We
will fill in the contents of the procedure linkage table later,
when we know the address of the .got section. */
- if (h->type == STT_FUNC || h->needs_plt)
+ if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
{
if (h->plt.refcount <= 0
- || SYMBOL_CALLS_LOCAL (info, h)
- || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
- && h->root.type == bfd_link_hash_undefweak))
+ || (h->type != STT_GNU_IFUNC
+ && (SYMBOL_CALLS_LOCAL (info, h)
+ || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
+ && h->root.type == bfd_link_hash_undefweak))))
{
/* This case can occur if we saw a CALL26 reloc in
an input file, but the symbol wasn't referred to
both the dynamic object and the regular object will refer to the
same memory location for the variable. */
- htab = elf64_aarch64_hash_table (info);
+ htab = elf_aarch64_hash_table (info);
/* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker
to copy the initial value out of the dynamic object and into the
}
static bfd_boolean
-elf64_aarch64_allocate_local_symbols (bfd *abfd, unsigned number)
+elfNN_aarch64_allocate_local_symbols (bfd *abfd, unsigned number)
{
struct elf_aarch64_local_symbol *locals;
- locals = elf64_aarch64_locals (abfd);
+ locals = elf_aarch64_locals (abfd);
if (locals == NULL)
{
locals = (struct elf_aarch64_local_symbol *)
bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol));
if (locals == NULL)
return FALSE;
- elf64_aarch64_locals (abfd) = locals;
+ elf_aarch64_locals (abfd) = locals;
+ }
+ return TRUE;
+}
+
+/* Create the .got section to hold the global offset table. */
+
+static bfd_boolean
+aarch64_elf_create_got_section (bfd *abfd, struct bfd_link_info *info)
+{
+ const struct elf_backend_data *bed = get_elf_backend_data (abfd);
+ flagword flags;
+ asection *s;
+ struct elf_link_hash_entry *h;
+ struct elf_link_hash_table *htab = elf_hash_table (info);
+
+ /* This function may be called more than once. */
+ s = bfd_get_linker_section (abfd, ".got");
+ if (s != NULL)
+ return TRUE;
+
+ flags = bed->dynamic_sec_flags;
+
+ s = bfd_make_section_anyway_with_flags (abfd,
+ (bed->rela_plts_and_copies_p
+ ? ".rela.got" : ".rel.got"),
+ (bed->dynamic_sec_flags
+ | SEC_READONLY));
+ if (s == NULL
+ || ! bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
+ return FALSE;
+ htab->srelgot = s;
+
+ s = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
+ if (s == NULL
+ || !bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
+ return FALSE;
+ htab->sgot = s;
+ htab->sgot->size += GOT_ENTRY_SIZE;
+
+ if (bed->want_got_sym)
+ {
+ /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got
+ (or .got.plt) section. We don't do this in the linker script
+ because we don't want to define the symbol if we are not creating
+ a global offset table. */
+ h = _bfd_elf_define_linkage_sym (abfd, info, s,
+ "_GLOBAL_OFFSET_TABLE_");
+ elf_hash_table (info)->hgot = h;
+ if (h == NULL)
+ return FALSE;
+ }
+
+ if (bed->want_got_plt)
+ {
+ s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
+ if (s == NULL
+ || !bfd_set_section_alignment (abfd, s,
+ bed->s->log_file_align))
+ return FALSE;
+ htab->sgotplt = s;
}
+
+ /* The first bit of the global offset table is the header. */
+ s->size += bed->got_header_size;
+
return TRUE;
}
/* Look through the relocs for a section during the first phase. */
static bfd_boolean
-elf64_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info,
+elfNN_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info,
asection *sec, const Elf_Internal_Rela *relocs)
{
Elf_Internal_Shdr *symtab_hdr;
const Elf_Internal_Rela *rel_end;
asection *sreloc;
- struct elf64_aarch64_link_hash_table *htab;
+ struct elf_aarch64_link_hash_table *htab;
if (info->relocatable)
return TRUE;
BFD_ASSERT (is_aarch64_elf (abfd));
- htab = elf64_aarch64_hash_table (info);
+ htab = elf_aarch64_hash_table (info);
sreloc = NULL;
symtab_hdr = &elf_symtab_hdr (abfd);
struct elf_link_hash_entry *h;
unsigned long r_symndx;
unsigned int r_type;
+ bfd_reloc_code_real_type bfd_r_type;
+ Elf_Internal_Sym *isym;
- r_symndx = ELF64_R_SYM (rel->r_info);
- r_type = ELF64_R_TYPE (rel->r_info);
+ r_symndx = ELFNN_R_SYM (rel->r_info);
+ r_type = ELFNN_R_TYPE (rel->r_info);
if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
{
}
if (r_symndx < symtab_hdr->sh_info)
- h = NULL;
+ {
+ /* A local symbol. */
+ isym = bfd_sym_from_r_symndx (&htab->sym_cache,
+ abfd, r_symndx);
+ if (isym == NULL)
+ return FALSE;
+
+ /* Check relocation against local STT_GNU_IFUNC symbol. */
+ if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
+ {
+ h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel,
+ TRUE);
+ if (h == NULL)
+ return FALSE;
+
+ /* Fake a STT_GNU_IFUNC symbol. */
+ h->type = STT_GNU_IFUNC;
+ h->def_regular = 1;
+ h->ref_regular = 1;
+ h->forced_local = 1;
+ h->root.type = bfd_link_hash_defined;
+ }
+ else
+ h = NULL;
+ }
else
{
h = sym_hashes[r_symndx - symtab_hdr->sh_info];
}
/* Could be done earlier, if h were already available. */
- r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx);
+ bfd_r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx);
+
+ if (h != NULL)
+ {
+ /* Create the ifunc sections for static executables. If we
+ never see an indirect function symbol nor we are building
+ a static executable, those sections will be empty and
+ won't appear in output. */
+ switch (bfd_r_type)
+ {
+ default:
+ break;
+
+ case BFD_RELOC_AARCH64_NN:
+ case BFD_RELOC_AARCH64_CALL26:
+ case BFD_RELOC_AARCH64_JUMP26:
+ case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
+ case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
+ case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
+ case BFD_RELOC_AARCH64_GOT_LD_PREL19:
+ case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
+ case BFD_RELOC_AARCH64_ADD_LO12:
+ if (htab->root.dynobj == NULL)
+ htab->root.dynobj = abfd;
+ if (!_bfd_elf_create_ifunc_sections (htab->root.dynobj, info))
+ return FALSE;
+ break;
+ }
+
+ /* It is referenced by a non-shared object. */
+ h->ref_regular = 1;
+ h->root.non_ir_ref = 1;
+ }
- switch (r_type)
+ switch (bfd_r_type)
{
- case R_AARCH64_ABS64:
+ case BFD_RELOC_AARCH64_NN:
/* We don't need to handle relocs into sections not going into
the "real" output. */
htab->root.dynobj = abfd;
sreloc = _bfd_elf_make_dynamic_reloc_section
- (sec, htab->root.dynobj, 3, abfd, /*rela? */ TRUE);
+ (sec, htab->root.dynobj, LOG_FILE_ALIGN, abfd, /*rela? */ TRUE);
if (sreloc == NULL)
return FALSE;
relocations we need for this symbol. */
if (h != NULL)
{
- struct elf64_aarch64_link_hash_entry *eh;
- eh = (struct elf64_aarch64_link_hash_entry *) h;
+ struct elf_aarch64_link_hash_entry *eh;
+ eh = (struct elf_aarch64_link_hash_entry *) h;
head = &eh->dyn_relocs;
}
else
asection *s;
void **vpp;
- Elf_Internal_Sym *isym;
isym = bfd_sym_from_r_symndx (&htab->sym_cache,
abfd, r_symndx);
/* RR: We probably want to keep a consistency check that
there are no dangling GOT_PAGE relocs. */
- case R_AARCH64_LD64_GOT_LO12_NC:
- case R_AARCH64_GOT_LD_PREL19:
- case R_AARCH64_ADR_GOT_PAGE:
- case R_AARCH64_TLSGD_ADR_PAGE21:
- case R_AARCH64_TLSGD_ADD_LO12_NC:
- case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
- case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
- case R_AARCH64_TLSLE_ADD_TPREL_LO12:
- case R_AARCH64_TLSLE_ADD_TPREL_HI12:
- case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
- case R_AARCH64_TLSLE_MOVW_TPREL_G2:
- case R_AARCH64_TLSLE_MOVW_TPREL_G1:
- case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
- case R_AARCH64_TLSLE_MOVW_TPREL_G0:
- case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
- case R_AARCH64_TLSDESC_ADR_PAGE21:
- case R_AARCH64_TLSDESC_ADD_LO12_NC:
- case R_AARCH64_TLSDESC_LD64_LO12_NC:
+ case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
+ case BFD_RELOC_AARCH64_GOT_LD_PREL19:
+ case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
+ case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
+ case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
+ case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
+ case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
+ case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
+ case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
{
unsigned got_type;
unsigned old_got_type;
- got_type = aarch64_reloc_got_type (r_type);
+ got_type = aarch64_reloc_got_type (bfd_r_type);
if (h)
{
h->got.refcount += 1;
- old_got_type = elf64_aarch64_hash_entry (h)->got_type;
+ old_got_type = elf_aarch64_hash_entry (h)->got_type;
}
else
{
struct elf_aarch64_local_symbol *locals;
- if (!elf64_aarch64_allocate_local_symbols
+ if (!elfNN_aarch64_allocate_local_symbols
(abfd, symtab_hdr->sh_info))
return FALSE;
- locals = elf64_aarch64_locals (abfd);
+ locals = elf_aarch64_locals (abfd);
BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
locals[r_symndx].got_refcount += 1;
old_got_type = locals[r_symndx].got_type;
if (old_got_type != got_type)
{
if (h != NULL)
- elf64_aarch64_hash_entry (h)->got_type = got_type;
+ elf_aarch64_hash_entry (h)->got_type = got_type;
else
{
struct elf_aarch64_local_symbol *locals;
- locals = elf64_aarch64_locals (abfd);
+ locals = elf_aarch64_locals (abfd);
BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
locals[r_symndx].got_type = got_type;
}
}
- if (htab->root.sgot == NULL)
- {
- if (htab->root.dynobj == NULL)
- htab->root.dynobj = abfd;
- if (!_bfd_elf_create_got_section (htab->root.dynobj, info))
- return FALSE;
- }
+ if (htab->root.dynobj == NULL)
+ htab->root.dynobj = abfd;
+ if (! aarch64_elf_create_got_section (htab->root.dynobj, info))
+ return FALSE;
break;
}
- case R_AARCH64_ADR_PREL_PG_HI21_NC:
- case R_AARCH64_ADR_PREL_PG_HI21:
- case R_AARCH64_ADR_PREL_LO21:
+ case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
+ case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
+ case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
if (h != NULL && info->executable)
{
/* If this reloc is in a read-only section, we might
relocations in shared libraries. */
break;
- case R_AARCH64_CALL26:
- case R_AARCH64_JUMP26:
+ case BFD_RELOC_AARCH64_CALL26:
+ case BFD_RELOC_AARCH64_JUMP26:
/* If this is a local symbol then we resolve it
directly without creating a PLT entry. */
if (h == NULL)
continue;
h->needs_plt = 1;
- h->plt.refcount += 1;
+ if (h->plt.refcount <= 0)
+ h->plt.refcount = 1;
+ else
+ h->plt.refcount += 1;
+ break;
+
+ default:
break;
}
}
+
return TRUE;
}
/* Treat mapping symbols as special target symbols. */
static bfd_boolean
-elf64_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED,
+elfNN_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED,
asymbol *sym)
{
return bfd_is_aarch64_special_symbol_name (sym->name,
that it uses aarch64_elf_find_function. */
static bfd_boolean
-elf64_aarch64_find_nearest_line (bfd *abfd,
+elfNN_aarch64_find_nearest_line (bfd *abfd,
asection *section,
asymbol **symbols,
bfd_vma offset,
}
static bfd_boolean
-elf64_aarch64_find_inliner_info (bfd *abfd,
+elfNN_aarch64_find_inliner_info (bfd *abfd,
const char **filename_ptr,
const char **functionname_ptr,
unsigned int *line_ptr)
static void
-elf64_aarch64_post_process_headers (bfd *abfd,
- struct bfd_link_info *link_info
- ATTRIBUTE_UNUSED)
+elfNN_aarch64_post_process_headers (bfd *abfd,
+ struct bfd_link_info *link_info)
{
Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
i_ehdrp = elf_elfheader (abfd);
- i_ehdrp->e_ident[EI_OSABI] = 0;
i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION;
+
+ _bfd_elf_post_process_headers (abfd, link_info);
}
static enum elf_reloc_type_class
-elf64_aarch64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
+elfNN_aarch64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
const asection *rel_sec ATTRIBUTE_UNUSED,
const Elf_Internal_Rela *rela)
{
- switch ((int) ELF64_R_TYPE (rela->r_info))
+ switch ((int) ELFNN_R_TYPE (rela->r_info))
{
- case R_AARCH64_RELATIVE:
+ case AARCH64_R (RELATIVE):
return reloc_class_relative;
- case R_AARCH64_JUMP_SLOT:
+ case AARCH64_R (JUMP_SLOT):
return reloc_class_plt;
- case R_AARCH64_COPY:
+ case AARCH64_R (COPY):
return reloc_class_copy;
default:
return reloc_class_normal;
}
}
-/* Set the right machine number for an AArch64 ELF file. */
-
-static bfd_boolean
-elf64_aarch64_section_flags (flagword *flags, const Elf_Internal_Shdr *hdr)
-{
- if (hdr->sh_type == SHT_NOTE)
- *flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_SAME_CONTENTS;
-
- return TRUE;
-}
-
/* Handle an AArch64 specific section when reading an object file. This is
called when bfd_section_from_shdr finds a section with an unknown
type. */
static bfd_boolean
-elf64_aarch64_section_from_shdr (bfd *abfd,
+elfNN_aarch64_section_from_shdr (bfd *abfd,
Elf_Internal_Shdr *hdr,
const char *name, int shindex)
{
/* Unfortunately we need to keep a list of sections for which
an _aarch64_elf_section_data structure has been allocated. This
- is because it is possible for functions like elf64_aarch64_write_section
+ is because it is possible for functions like elfNN_aarch64_write_section
to be called on a section which has had an elf_data_structure
allocated for it (and so the used_by_bfd field is valid) but
for which the AArch64 extended version of this structure - the
/* Output a single mapping symbol. */
static bfd_boolean
-elf64_aarch64_output_map_sym (output_arch_syminfo *osi,
+elfNN_aarch64_output_map_sym (output_arch_syminfo *osi,
enum map_symbol_type type, bfd_vma offset)
{
static const char *names[2] = { "$x", "$d" };
/* Output mapping symbols for PLT entries associated with H. */
static bfd_boolean
-elf64_aarch64_output_plt_map (struct elf_link_hash_entry *h, void *inf)
+elfNN_aarch64_output_plt_map (struct elf_link_hash_entry *h, void *inf)
{
output_arch_syminfo *osi = (output_arch_syminfo *) inf;
bfd_vma addr;
addr = h->plt.offset;
if (addr == 32)
{
- if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
+ if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
return FALSE;
}
return TRUE;
/* Output a single local symbol for a generated stub. */
static bfd_boolean
-elf64_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name,
+elfNN_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name,
bfd_vma offset, bfd_vma size)
{
Elf_Internal_Sym sym;
static bfd_boolean
aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
{
- struct elf64_aarch64_stub_hash_entry *stub_entry;
+ struct elf_aarch64_stub_hash_entry *stub_entry;
asection *stub_sec;
bfd_vma addr;
char *stub_name;
output_arch_syminfo *osi;
/* Massage our args to the form they really have. */
- stub_entry = (struct elf64_aarch64_stub_hash_entry *) gen_entry;
+ stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
osi = (output_arch_syminfo *) in_arg;
stub_sec = stub_entry->stub_sec;
switch (stub_entry->stub_type)
{
case aarch64_stub_adrp_branch:
- if (!elf64_aarch64_output_stub_sym (osi, stub_name, addr,
+ if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
sizeof (aarch64_adrp_branch_stub)))
return FALSE;
- if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
+ if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
return FALSE;
break;
case aarch64_stub_long_branch:
- if (!elf64_aarch64_output_stub_sym
+ if (!elfNN_aarch64_output_stub_sym
(osi, stub_name, addr, sizeof (aarch64_long_branch_stub)))
return FALSE;
- if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
+ if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
return FALSE;
- if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
+ if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
return FALSE;
break;
default:
/* Output mapping symbols for linker generated sections. */
static bfd_boolean
-elf64_aarch64_output_arch_local_syms (bfd *output_bfd,
+elfNN_aarch64_output_arch_local_syms (bfd *output_bfd,
struct bfd_link_info *info,
void *finfo,
int (*func) (void *, const char *,
*))
{
output_arch_syminfo osi;
- struct elf64_aarch64_link_hash_table *htab;
+ struct elf_aarch64_link_hash_table *htab;
- htab = elf64_aarch64_hash_table (info);
+ htab = elf_aarch64_hash_table (info);
osi.finfo = finfo;
osi.info = info;
(output_bfd, htab->root.splt->output_section);
osi.sec = htab->root.splt;
- elf_link_hash_traverse (&htab->root, elf64_aarch64_output_plt_map,
+ elf_link_hash_traverse (&htab->root, elfNN_aarch64_output_plt_map,
(void *) &osi);
return TRUE;
/* Allocate target specific section data. */
static bfd_boolean
-elf64_aarch64_new_section_hook (bfd *abfd, asection *sec)
+elfNN_aarch64_new_section_hook (bfd *abfd, asection *sec)
{
if (!sec->used_by_bfd)
{
}
static bfd_boolean
-elf64_aarch64_close_and_cleanup (bfd *abfd)
+elfNN_aarch64_close_and_cleanup (bfd *abfd)
{
if (abfd->sections)
bfd_map_over_sections (abfd,
}
static bfd_boolean
-elf64_aarch64_bfd_free_cached_info (bfd *abfd)
+elfNN_aarch64_bfd_free_cached_info (bfd *abfd)
{
if (abfd->sections)
bfd_map_over_sections (abfd,
return _bfd_free_cached_info (abfd);
}
-static bfd_boolean
-elf64_aarch64_is_function_type (unsigned int type)
-{
- return type == STT_FUNC;
-}
-
/* Create dynamic sections. This is different from the ARM backend in that
the got, plt, gotplt and their relocation sections are all created in the
standard part of the bfd elf backend. */
static bfd_boolean
-elf64_aarch64_create_dynamic_sections (bfd *dynobj,
+elfNN_aarch64_create_dynamic_sections (bfd *dynobj,
struct bfd_link_info *info)
{
- struct elf64_aarch64_link_hash_table *htab;
- struct elf_link_hash_entry *h;
+ struct elf_aarch64_link_hash_table *htab;
+
+ /* We need to create .got section. */
+ if (!aarch64_elf_create_got_section (dynobj, info))
+ return FALSE;
if (!_bfd_elf_create_dynamic_sections (dynobj, info))
return FALSE;
- htab = elf64_aarch64_hash_table (info);
+ htab = elf_aarch64_hash_table (info);
htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
if (!info->shared)
htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
if (!htab->sdynbss || (!info->shared && !htab->srelbss))
abort ();
- /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the
- dynobj's .got section. We don't do this in the linker script
- because we don't want to define the symbol if we are not creating
- a global offset table. */
- h = _bfd_elf_define_linkage_sym (dynobj, info,
- htab->root.sgot, "_GLOBAL_OFFSET_TABLE_");
- elf_hash_table (info)->hgot = h;
- if (h == NULL)
- return FALSE;
-
return TRUE;
}
dynamic relocs. */
static bfd_boolean
-elf64_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
+elfNN_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
{
struct bfd_link_info *info;
- struct elf64_aarch64_link_hash_table *htab;
- struct elf64_aarch64_link_hash_entry *eh;
+ struct elf_aarch64_link_hash_table *htab;
+ struct elf_aarch64_link_hash_entry *eh;
struct elf_dyn_relocs *p;
/* An example of a bfd_link_hash_indirect symbol is versioned
There is no need to process bfd_link_hash_indirect symbols here
because we will also be presented with the concrete instance of
- the symbol and elf64_aarch64_copy_indirect_symbol () will have been
+ the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
called to copy all relevant data from the generic to the concrete
symbol instance.
*/
h = (struct elf_link_hash_entry *) h->root.u.i.link;
info = (struct bfd_link_info *) inf;
- htab = elf64_aarch64_hash_table (info);
+ htab = elf_aarch64_hash_table (info);
- if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
+ /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
+ here if it is defined and referenced in a non-shared object. */
+ if (h->type == STT_GNU_IFUNC
+ && h->def_regular)
+ return TRUE;
+ else if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
{
/* Make sure this symbol is output as a dynamic symbol.
Undefined weak syms won't yet be marked as dynamic. */
h->needs_plt = 0;
}
- eh = (struct elf64_aarch64_link_hash_entry *) h;
+ eh = (struct elf_aarch64_link_hash_entry *) h;
eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
if (h->got.refcount > 0)
{
bfd_boolean dyn;
- unsigned got_type = elf64_aarch64_hash_entry (h)->got_type;
+ unsigned got_type = elf_aarch64_hash_entry (h)->got_type;
h->got.offset = (bfd_vma) - 1;
return TRUE;
}
+/* Allocate space in .plt, .got and associated reloc sections for
+ ifunc dynamic relocs. */
+
+static bfd_boolean
+elfNN_aarch64_allocate_ifunc_dynrelocs (struct elf_link_hash_entry *h,
+ void *inf)
+{
+ struct bfd_link_info *info;
+ struct elf_aarch64_link_hash_table *htab;
+ struct elf_aarch64_link_hash_entry *eh;
+
+ /* An example of a bfd_link_hash_indirect symbol is versioned
+ symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
+ -> __gxx_personality_v0(bfd_link_hash_defined)
+
+ There is no need to process bfd_link_hash_indirect symbols here
+ because we will also be presented with the concrete instance of
+ the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
+ called to copy all relevant data from the generic to the concrete
+ symbol instance.
+ */
+ if (h->root.type == bfd_link_hash_indirect)
+ return TRUE;
+
+ if (h->root.type == bfd_link_hash_warning)
+ h = (struct elf_link_hash_entry *) h->root.u.i.link;
+
+ info = (struct bfd_link_info *) inf;
+ htab = elf_aarch64_hash_table (info);
+
+ eh = (struct elf_aarch64_link_hash_entry *) h;
+
+ /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
+ here if it is defined and referenced in a non-shared object. */
+ if (h->type == STT_GNU_IFUNC
+ && h->def_regular)
+ return _bfd_elf_allocate_ifunc_dyn_relocs (info, h,
+ &eh->dyn_relocs,
+ htab->plt_entry_size,
+ htab->plt_header_size,
+ GOT_ENTRY_SIZE);
+ return TRUE;
+}
+
+/* Allocate space in .plt, .got and associated reloc sections for
+ local dynamic relocs. */
+
+static bfd_boolean
+elfNN_aarch64_allocate_local_dynrelocs (void **slot, void *inf)
+{
+ struct elf_link_hash_entry *h
+ = (struct elf_link_hash_entry *) *slot;
+
+ if (h->type != STT_GNU_IFUNC
+ || !h->def_regular
+ || !h->ref_regular
+ || !h->forced_local
+ || h->root.type != bfd_link_hash_defined)
+ abort ();
+
+ return elfNN_aarch64_allocate_dynrelocs (h, inf);
+}
+
+/* Allocate space in .plt, .got and associated reloc sections for
+ local ifunc dynamic relocs. */
+
+static bfd_boolean
+elfNN_aarch64_allocate_local_ifunc_dynrelocs (void **slot, void *inf)
+{
+ struct elf_link_hash_entry *h
+ = (struct elf_link_hash_entry *) *slot;
+ if (h->type != STT_GNU_IFUNC
+ || !h->def_regular
+ || !h->ref_regular
+ || !h->forced_local
+ || h->root.type != bfd_link_hash_defined)
+ abort ();
+ return elfNN_aarch64_allocate_ifunc_dynrelocs (h, inf);
+}
/* This is the most important function of all . Innocuosly named
though ! */
static bfd_boolean
-elf64_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
+elfNN_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
struct bfd_link_info *info)
{
- struct elf64_aarch64_link_hash_table *htab;
+ struct elf_aarch64_link_hash_table *htab;
bfd *dynobj;
asection *s;
bfd_boolean relocs;
bfd *ibfd;
- htab = elf64_aarch64_hash_table ((info));
+ htab = elf_aarch64_hash_table ((info));
dynobj = htab->root.dynobj;
BFD_ASSERT (dynobj != NULL);
}
}
- locals = elf64_aarch64_locals (ibfd);
+ locals = elf_aarch64_locals (ibfd);
if (!locals)
continue;
/* Allocate global sym .plt and .got entries, and space for global
sym dynamic relocs. */
- elf_link_hash_traverse (&htab->root, elf64_aarch64_allocate_dynrelocs,
+ elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_dynrelocs,
+ info);
+
+ /* Allocate global ifunc sym .plt and .got entries, and space for global
+ ifunc sym dynamic relocs. */
+ elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_ifunc_dynrelocs,
info);
+ /* Allocate .plt and .got entries, and space for local symbols. */
+ htab_traverse (htab->loc_hash_table,
+ elfNN_aarch64_allocate_local_dynrelocs,
+ info);
+
+ /* Allocate .plt and .got entries, and space for local ifunc symbols. */
+ htab_traverse (htab->loc_hash_table,
+ elfNN_aarch64_allocate_local_ifunc_dynrelocs,
+ info);
/* For every jump slot reserved in the sgotplt, reloc_count is
incremented. However, when we reserve space for TLS descriptors,
if (htab->root.dynamic_sections_created)
{
/* Add some entries to the .dynamic section. We fill in the
- values later, in elf64_aarch64_finish_dynamic_sections, but we
+ values later, in elfNN_aarch64_finish_dynamic_sections, but we
must add the entries now so that we get the correct size for
the .dynamic section. The DT_DEBUG entry is filled in by the
dynamic linker and used by the debugger. */
#undef add_dynamic_entry
return TRUE;
-
-
}
static inline void
-elf64_aarch64_update_plt_entry (bfd *output_bfd,
- unsigned int r_type,
- bfd_byte *plt_entry, bfd_vma value)
+elf_aarch64_update_plt_entry (bfd *output_bfd,
+ bfd_reloc_code_real_type r_type,
+ bfd_byte *plt_entry, bfd_vma value)
{
- reloc_howto_type *howto;
- howto = elf64_aarch64_howto_from_type (r_type);
- bfd_elf_aarch64_put_addend (output_bfd, plt_entry, howto, value);
+ reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (r_type);
+
+ _bfd_aarch64_elf_put_addend (output_bfd, plt_entry, r_type, howto, value);
}
static void
-elf64_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h,
- struct elf64_aarch64_link_hash_table
- *htab, bfd *output_bfd)
+elfNN_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h,
+ struct elf_aarch64_link_hash_table
+ *htab, bfd *output_bfd,
+ struct bfd_link_info *info)
{
bfd_byte *plt_entry;
bfd_vma plt_index;
bfd_vma plt_entry_address;
Elf_Internal_Rela rela;
bfd_byte *loc;
+ asection *plt, *gotplt, *relplt;
+
+ /* When building a static executable, use .iplt, .igot.plt and
+ .rela.iplt sections for STT_GNU_IFUNC symbols. */
+ if (htab->root.splt != NULL)
+ {
+ plt = htab->root.splt;
+ gotplt = htab->root.sgotplt;
+ relplt = htab->root.srelplt;
+ }
+ else
+ {
+ plt = htab->root.iplt;
+ gotplt = htab->root.igotplt;
+ relplt = htab->root.irelplt;
+ }
- plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
+ /* Get the index in the procedure linkage table which
+ corresponds to this symbol. This is the index of this symbol
+ in all the symbols for which we are making plt entries. The
+ first entry in the procedure linkage table is reserved.
+
+ Get the offset into the .got table of the entry that
+ corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
+ bytes. The first three are reserved for the dynamic linker.
+
+ For static executables, we don't reserve anything. */
+
+ if (plt == htab->root.splt)
+ {
+ plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
+ got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
+ }
+ else
+ {
+ plt_index = h->plt.offset / htab->plt_entry_size;
+ got_offset = plt_index * GOT_ENTRY_SIZE;
+ }
- /* Offset in the GOT is PLT index plus got GOT headers(3)
- times 8. */
- got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
- plt_entry = htab->root.splt->contents + h->plt.offset;
- plt_entry_address = htab->root.splt->output_section->vma
- + htab->root.splt->output_section->output_offset + h->plt.offset;
- gotplt_entry_address = htab->root.sgotplt->output_section->vma +
- htab->root.sgotplt->output_offset + got_offset;
+ plt_entry = plt->contents + h->plt.offset;
+ plt_entry_address = plt->output_section->vma
+ + plt->output_offset + h->plt.offset;
+ gotplt_entry_address = gotplt->output_section->vma +
+ gotplt->output_offset + got_offset;
/* Copy in the boiler-plate for the PLTn entry. */
- memcpy (plt_entry, elf64_aarch64_small_plt_entry, PLT_SMALL_ENTRY_SIZE);
+ memcpy (plt_entry, elfNN_aarch64_small_plt_entry, PLT_SMALL_ENTRY_SIZE);
/* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
- elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADR_PREL_PG_HI21,
- plt_entry,
- PG (gotplt_entry_address) -
- PG (plt_entry_address));
+ elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
+ plt_entry,
+ PG (gotplt_entry_address) -
+ PG (plt_entry_address));
/* Fill in the lo12 bits for the load from the pltgot. */
- elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_LDST64_ABS_LO12_NC,
- plt_entry + 4,
- PG_OFFSET (gotplt_entry_address));
+ elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
+ plt_entry + 4,
+ PG_OFFSET (gotplt_entry_address));
- /* Fill in the the lo12 bits for the add from the pltgot entry. */
- elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADD_ABS_LO12_NC,
- plt_entry + 8,
- PG_OFFSET (gotplt_entry_address));
+ /* Fill in the lo12 bits for the add from the pltgot entry. */
+ elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
+ plt_entry + 8,
+ PG_OFFSET (gotplt_entry_address));
/* All the GOTPLT Entries are essentially initialized to PLT0. */
- bfd_put_64 (output_bfd,
- (htab->root.splt->output_section->vma
- + htab->root.splt->output_offset),
- htab->root.sgotplt->contents + got_offset);
+ bfd_put_NN (output_bfd,
+ plt->output_section->vma + plt->output_offset,
+ gotplt->contents + got_offset);
- /* Fill in the entry in the .rela.plt section. */
rela.r_offset = gotplt_entry_address;
- rela.r_info = ELF64_R_INFO (h->dynindx, R_AARCH64_JUMP_SLOT);
- rela.r_addend = 0;
+
+ if (h->dynindx == -1
+ || ((info->executable
+ || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
+ && h->def_regular
+ && h->type == STT_GNU_IFUNC))
+ {
+ /* If an STT_GNU_IFUNC symbol is locally defined, generate
+ R_AARCH64_IRELATIVE instead of R_AARCH64_JUMP_SLOT. */
+ rela.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
+ rela.r_addend = (h->root.u.def.value
+ + h->root.u.def.section->output_section->vma
+ + h->root.u.def.section->output_offset);
+ }
+ else
+ {
+ /* Fill in the entry in the .rela.plt section. */
+ rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (JUMP_SLOT));
+ rela.r_addend = 0;
+ }
/* Compute the relocation entry to used based on PLT index and do
not adjust reloc_count. The reloc_count has already been adjusted
to account for this entry. */
- loc = htab->root.srelplt->contents + plt_index * RELOC_SIZE (htab);
- bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
+ loc = relplt->contents + plt_index * RELOC_SIZE (htab);
+ bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
}
/* Size sections even though they're not dynamic. We use it to setup
_TLS_MODULE_BASE_, if needed. */
static bfd_boolean
-elf64_aarch64_always_size_sections (bfd *output_bfd,
+elfNN_aarch64_always_size_sections (bfd *output_bfd,
struct bfd_link_info *info)
{
asection *tls_sec;
/* Finish up dynamic symbol handling. We set the contents of various
dynamic sections here. */
static bfd_boolean
-elf64_aarch64_finish_dynamic_symbol (bfd *output_bfd,
+elfNN_aarch64_finish_dynamic_symbol (bfd *output_bfd,
struct bfd_link_info *info,
struct elf_link_hash_entry *h,
Elf_Internal_Sym *sym)
{
- struct elf64_aarch64_link_hash_table *htab;
- htab = elf64_aarch64_hash_table (info);
+ struct elf_aarch64_link_hash_table *htab;
+ htab = elf_aarch64_hash_table (info);
if (h->plt.offset != (bfd_vma) - 1)
{
+ asection *plt, *gotplt, *relplt;
+
/* This symbol has an entry in the procedure linkage table. Set
it up. */
- if (h->dynindx == -1
- || htab->root.splt == NULL
- || htab->root.sgotplt == NULL || htab->root.srelplt == NULL)
+ /* When building a static executable, use .iplt, .igot.plt and
+ .rela.iplt sections for STT_GNU_IFUNC symbols. */
+ if (htab->root.splt != NULL)
+ {
+ plt = htab->root.splt;
+ gotplt = htab->root.sgotplt;
+ relplt = htab->root.srelplt;
+ }
+ else
+ {
+ plt = htab->root.iplt;
+ gotplt = htab->root.igotplt;
+ relplt = htab->root.irelplt;
+ }
+
+ /* This symbol has an entry in the procedure linkage table. Set
+ it up. */
+ if ((h->dynindx == -1
+ && !((h->forced_local || info->executable)
+ && h->def_regular
+ && h->type == STT_GNU_IFUNC))
+ || plt == NULL
+ || gotplt == NULL
+ || relplt == NULL)
abort ();
- elf64_aarch64_create_small_pltn_entry (h, htab, output_bfd);
+ elfNN_aarch64_create_small_pltn_entry (h, htab, output_bfd, info);
if (!h->def_regular)
{
/* Mark the symbol as undefined, rather than as defined in
}
if (h->got.offset != (bfd_vma) - 1
- && elf64_aarch64_hash_entry (h)->got_type == GOT_NORMAL)
+ && elf_aarch64_hash_entry (h)->got_type == GOT_NORMAL)
{
Elf_Internal_Rela rela;
bfd_byte *loc;
+ htab->root.sgot->output_offset
+ (h->got.offset & ~(bfd_vma) 1));
- if (info->shared && SYMBOL_REFERENCES_LOCAL (info, h))
+ if (h->def_regular
+ && h->type == STT_GNU_IFUNC)
+ {
+ if (info->shared)
+ {
+ /* Generate R_AARCH64_GLOB_DAT. */
+ goto do_glob_dat;
+ }
+ else
+ {
+ asection *plt;
+
+ if (!h->pointer_equality_needed)
+ abort ();
+
+ /* For non-shared object, we can't use .got.plt, which
+ contains the real function address if we need pointer
+ equality. We load the GOT entry with the PLT entry. */
+ plt = htab->root.splt ? htab->root.splt : htab->root.iplt;
+ bfd_put_NN (output_bfd, (plt->output_section->vma
+ + plt->output_offset
+ + h->plt.offset),
+ htab->root.sgot->contents
+ + (h->got.offset & ~(bfd_vma) 1));
+ return TRUE;
+ }
+ }
+ else if (info->shared && SYMBOL_REFERENCES_LOCAL (info, h))
{
if (!h->def_regular)
return FALSE;
BFD_ASSERT ((h->got.offset & 1) != 0);
- rela.r_info = ELF64_R_INFO (0, R_AARCH64_RELATIVE);
+ rela.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
rela.r_addend = (h->root.u.def.value
+ h->root.u.def.section->output_section->vma
+ h->root.u.def.section->output_offset);
}
else
{
+do_glob_dat:
BFD_ASSERT ((h->got.offset & 1) == 0);
- bfd_put_64 (output_bfd, (bfd_vma) 0,
+ bfd_put_NN (output_bfd, (bfd_vma) 0,
htab->root.sgot->contents + h->got.offset);
- rela.r_info = ELF64_R_INFO (h->dynindx, R_AARCH64_GLOB_DAT);
+ rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (GLOB_DAT));
rela.r_addend = 0;
}
loc = htab->root.srelgot->contents;
loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
- bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
+ bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
}
if (h->needs_copy)
rela.r_offset = (h->root.u.def.value
+ h->root.u.def.section->output_section->vma
+ h->root.u.def.section->output_offset);
- rela.r_info = ELF64_R_INFO (h->dynindx, R_AARCH64_COPY);
+ rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (COPY));
rela.r_addend = 0;
loc = htab->srelbss->contents;
loc += htab->srelbss->reloc_count++ * RELOC_SIZE (htab);
- bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
+ bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
}
/* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
return TRUE;
}
+/* Finish up local dynamic symbol handling. We set the contents of
+ various dynamic sections here. */
+
+static bfd_boolean
+elfNN_aarch64_finish_local_dynamic_symbol (void **slot, void *inf)
+{
+ struct elf_link_hash_entry *h
+ = (struct elf_link_hash_entry *) *slot;
+ struct bfd_link_info *info
+ = (struct bfd_link_info *) inf;
+
+ return elfNN_aarch64_finish_dynamic_symbol (info->output_bfd,
+ info, h, NULL);
+}
+
static void
-elf64_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
- struct elf64_aarch64_link_hash_table
+elfNN_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
+ struct elf_aarch64_link_hash_table
*htab)
{
/* Fill in PLT0. Fixme:RR Note this doesn't distinguish between
small and large plts and at the minute just generates
the small PLT. */
- /* PLT0 of the small PLT looks like this -
+ /* PLT0 of the small PLT looks like this in ELF64 -
stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack.
adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT
ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the
add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the
// GOTPLT entry for this.
br x17
+ PLT0 will be slightly different in ELF32 due to different got entry
+ size.
*/
- bfd_vma plt_got_base;
+ bfd_vma plt_got_2nd_ent; /* Address of GOT[2]. */
bfd_vma plt_base;
- memcpy (htab->root.splt->contents, elf64_aarch64_small_plt0_entry,
+ memcpy (htab->root.splt->contents, elfNN_aarch64_small_plt0_entry,
PLT_ENTRY_SIZE);
elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
PLT_ENTRY_SIZE;
- plt_got_base = (htab->root.sgotplt->output_section->vma
- + htab->root.sgotplt->output_offset);
+ plt_got_2nd_ent = (htab->root.sgotplt->output_section->vma
+ + htab->root.sgotplt->output_offset
+ + GOT_ENTRY_SIZE * 2);
plt_base = htab->root.splt->output_section->vma +
- htab->root.splt->output_section->output_offset;
+ htab->root.splt->output_offset;
/* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
- elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADR_PREL_PG_HI21,
- htab->root.splt->contents + 4,
- PG (plt_got_base + 16) - PG (plt_base + 4));
+ elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
+ htab->root.splt->contents + 4,
+ PG (plt_got_2nd_ent) - PG (plt_base + 4));
- elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_LDST64_ABS_LO12_NC,
- htab->root.splt->contents + 8,
- PG_OFFSET (plt_got_base + 16));
+ elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
+ htab->root.splt->contents + 8,
+ PG_OFFSET (plt_got_2nd_ent));
- elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADD_ABS_LO12_NC,
- htab->root.splt->contents + 12,
- PG_OFFSET (plt_got_base + 16));
+ elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
+ htab->root.splt->contents + 12,
+ PG_OFFSET (plt_got_2nd_ent));
}
static bfd_boolean
-elf64_aarch64_finish_dynamic_sections (bfd *output_bfd,
+elfNN_aarch64_finish_dynamic_sections (bfd *output_bfd,
struct bfd_link_info *info)
{
- struct elf64_aarch64_link_hash_table *htab;
+ struct elf_aarch64_link_hash_table *htab;
bfd *dynobj;
asection *sdyn;
- htab = elf64_aarch64_hash_table (info);
+ htab = elf_aarch64_hash_table (info);
dynobj = htab->root.dynobj;
sdyn = bfd_get_linker_section (dynobj, ".dynamic");
if (htab->root.dynamic_sections_created)
{
- Elf64_External_Dyn *dyncon, *dynconend;
+ ElfNN_External_Dyn *dyncon, *dynconend;
if (sdyn == NULL || htab->root.sgot == NULL)
abort ();
- dyncon = (Elf64_External_Dyn *) sdyn->contents;
- dynconend = (Elf64_External_Dyn *) (sdyn->contents + sdyn->size);
+ dyncon = (ElfNN_External_Dyn *) sdyn->contents;
+ dynconend = (ElfNN_External_Dyn *) (sdyn->contents + sdyn->size);
for (; dyncon < dynconend; dyncon++)
{
Elf_Internal_Dyn dyn;
asection *s;
- bfd_elf64_swap_dyn_in (dynobj, dyncon, &dyn);
+ bfd_elfNN_swap_dyn_in (dynobj, dyncon, &dyn);
switch (dyn.d_tag)
{
break;
case DT_PLTRELSZ:
- s = htab->root.srelplt->output_section;
+ s = htab->root.srelplt;
dyn.d_un.d_val = s->size;
break;
about changing the DT_RELA entry. */
if (htab->root.srelplt != NULL)
{
- s = htab->root.srelplt->output_section;
+ s = htab->root.srelplt;
dyn.d_un.d_val -= s->size;
}
break;
break;
}
- bfd_elf64_swap_dyn_out (output_bfd, &dyn, dyncon);
+ bfd_elfNN_swap_dyn_out (output_bfd, &dyn, dyncon);
}
}
/* Fill in the special first entry in the procedure linkage table. */
if (htab->root.splt && htab->root.splt->size > 0)
{
- elf64_aarch64_init_small_plt0_entry (output_bfd, htab);
+ elfNN_aarch64_init_small_plt0_entry (output_bfd, htab);
elf_section_data (htab->root.splt->output_section)->
this_hdr.sh_entsize = htab->plt_entry_size;
if (htab->tlsdesc_plt)
{
- bfd_put_64 (output_bfd, (bfd_vma) 0,
+ bfd_put_NN (output_bfd, (bfd_vma) 0,
htab->root.sgot->contents + htab->dt_tlsdesc_got);
memcpy (htab->root.splt->contents + htab->tlsdesc_plt,
- elf64_aarch64_tlsdesc_small_plt_entry,
- sizeof (elf64_aarch64_tlsdesc_small_plt_entry));
+ elfNN_aarch64_tlsdesc_small_plt_entry,
+ sizeof (elfNN_aarch64_tlsdesc_small_plt_entry));
{
bfd_vma adrp1_addr =
htab->root.splt->output_section->vma
+ htab->root.splt->output_offset + htab->tlsdesc_plt + 4;
- bfd_vma adrp2_addr =
- htab->root.splt->output_section->vma
- + htab->root.splt->output_offset + htab->tlsdesc_plt + 8;
+ bfd_vma adrp2_addr = adrp1_addr + 4;
bfd_vma got_addr =
htab->root.sgot->output_section->vma
+ htab->root.sgotplt->output_offset;
bfd_vma dt_tlsdesc_got = got_addr + htab->dt_tlsdesc_got;
- bfd_vma opcode;
+
+ bfd_byte *plt_entry =
+ htab->root.splt->contents + htab->tlsdesc_plt;
/* adrp x2, DT_TLSDESC_GOT */
- opcode = bfd_get_32 (output_bfd,
- htab->root.splt->contents
- + htab->tlsdesc_plt + 4);
- opcode = reencode_adr_imm
- (opcode, (PG (dt_tlsdesc_got) - PG (adrp1_addr)) >> 12);
- bfd_put_32 (output_bfd, opcode,
- htab->root.splt->contents + htab->tlsdesc_plt + 4);
+ elf_aarch64_update_plt_entry (output_bfd,
+ BFD_RELOC_AARCH64_ADR_HI21_PCREL,
+ plt_entry + 4,
+ (PG (dt_tlsdesc_got)
+ - PG (adrp1_addr)));
/* adrp x3, 0 */
- opcode = bfd_get_32 (output_bfd,
- htab->root.splt->contents
- + htab->tlsdesc_plt + 8);
- opcode = reencode_adr_imm
- (opcode, (PG (pltgot_addr) - PG (adrp2_addr)) >> 12);
- bfd_put_32 (output_bfd, opcode,
- htab->root.splt->contents + htab->tlsdesc_plt + 8);
+ elf_aarch64_update_plt_entry (output_bfd,
+ BFD_RELOC_AARCH64_ADR_HI21_PCREL,
+ plt_entry + 8,
+ (PG (pltgot_addr)
+ - PG (adrp2_addr)));
/* ldr x2, [x2, #0] */
- opcode = bfd_get_32 (output_bfd,
- htab->root.splt->contents
- + htab->tlsdesc_plt + 12);
- opcode = reencode_ldst_pos_imm (opcode,
- PG_OFFSET (dt_tlsdesc_got) >> 3);
- bfd_put_32 (output_bfd, opcode,
- htab->root.splt->contents + htab->tlsdesc_plt + 12);
+ elf_aarch64_update_plt_entry (output_bfd,
+ BFD_RELOC_AARCH64_LDSTNN_LO12,
+ plt_entry + 12,
+ PG_OFFSET (dt_tlsdesc_got));
/* add x3, x3, 0 */
- opcode = bfd_get_32 (output_bfd,
- htab->root.splt->contents
- + htab->tlsdesc_plt + 16);
- opcode = reencode_add_imm (opcode, PG_OFFSET (pltgot_addr));
- bfd_put_32 (output_bfd, opcode,
- htab->root.splt->contents + htab->tlsdesc_plt + 16);
+ elf_aarch64_update_plt_entry (output_bfd,
+ BFD_RELOC_AARCH64_ADD_LO12,
+ plt_entry + 16,
+ PG_OFFSET (pltgot_addr));
}
}
}
/* Fill in the first three entries in the global offset table. */
if (htab->root.sgotplt->size > 0)
{
- /* Set the first entry in the global offset table to the address of
- the dynamic section. */
- if (sdyn == NULL)
- bfd_put_64 (output_bfd, (bfd_vma) 0,
- htab->root.sgotplt->contents);
- else
- bfd_put_64 (output_bfd,
- sdyn->output_section->vma + sdyn->output_offset,
- htab->root.sgotplt->contents);
+ bfd_put_NN (output_bfd, (bfd_vma) 0, htab->root.sgotplt->contents);
+
/* Write GOT[1] and GOT[2], needed for the dynamic linker. */
- bfd_put_64 (output_bfd,
+ bfd_put_NN (output_bfd,
(bfd_vma) 0,
htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
- bfd_put_64 (output_bfd,
+ bfd_put_NN (output_bfd,
(bfd_vma) 0,
htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
}
+ if (htab->root.sgot)
+ {
+ if (htab->root.sgot->size > 0)
+ {
+ bfd_vma addr =
+ sdyn ? sdyn->output_section->vma + sdyn->output_offset : 0;
+ bfd_put_NN (output_bfd, addr, htab->root.sgot->contents);
+ }
+ }
+
elf_section_data (htab->root.sgotplt->output_section)->
this_hdr.sh_entsize = GOT_ENTRY_SIZE;
}
elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
= GOT_ENTRY_SIZE;
+ /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
+ htab_traverse (htab->loc_hash_table,
+ elfNN_aarch64_finish_local_dynamic_symbol,
+ info);
+
return TRUE;
}
or (bfd_vma) -1 if it should not be included. */
static bfd_vma
-elf64_aarch64_plt_sym_val (bfd_vma i, const asection *plt,
+elfNN_aarch64_plt_sym_val (bfd_vma i, const asection *plt,
const arelent *rel ATTRIBUTE_UNUSED)
{
return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE;
/* We use this so we can override certain functions
(though currently we don't). */
-const struct elf_size_info elf64_aarch64_size_info =
+const struct elf_size_info elfNN_aarch64_size_info =
{
- sizeof (Elf64_External_Ehdr),
- sizeof (Elf64_External_Phdr),
- sizeof (Elf64_External_Shdr),
- sizeof (Elf64_External_Rel),
- sizeof (Elf64_External_Rela),
- sizeof (Elf64_External_Sym),
- sizeof (Elf64_External_Dyn),
+ sizeof (ElfNN_External_Ehdr),
+ sizeof (ElfNN_External_Phdr),
+ sizeof (ElfNN_External_Shdr),
+ sizeof (ElfNN_External_Rel),
+ sizeof (ElfNN_External_Rela),
+ sizeof (ElfNN_External_Sym),
+ sizeof (ElfNN_External_Dyn),
sizeof (Elf_External_Note),
4, /* Hash table entry size. */
1, /* Internal relocs per external relocs. */
- 64, /* Arch size. */
- 3, /* Log_file_align. */
- ELFCLASS64, EV_CURRENT,
- bfd_elf64_write_out_phdrs,
- bfd_elf64_write_shdrs_and_ehdr,
- bfd_elf64_checksum_contents,
- bfd_elf64_write_relocs,
- bfd_elf64_swap_symbol_in,
- bfd_elf64_swap_symbol_out,
- bfd_elf64_slurp_reloc_table,
- bfd_elf64_slurp_symbol_table,
- bfd_elf64_swap_dyn_in,
- bfd_elf64_swap_dyn_out,
- bfd_elf64_swap_reloc_in,
- bfd_elf64_swap_reloc_out,
- bfd_elf64_swap_reloca_in,
- bfd_elf64_swap_reloca_out
+ ARCH_SIZE, /* Arch size. */
+ LOG_FILE_ALIGN, /* Log_file_align. */
+ ELFCLASSNN, EV_CURRENT,
+ bfd_elfNN_write_out_phdrs,
+ bfd_elfNN_write_shdrs_and_ehdr,
+ bfd_elfNN_checksum_contents,
+ bfd_elfNN_write_relocs,
+ bfd_elfNN_swap_symbol_in,
+ bfd_elfNN_swap_symbol_out,
+ bfd_elfNN_slurp_reloc_table,
+ bfd_elfNN_slurp_symbol_table,
+ bfd_elfNN_swap_dyn_in,
+ bfd_elfNN_swap_dyn_out,
+ bfd_elfNN_swap_reloc_in,
+ bfd_elfNN_swap_reloc_out,
+ bfd_elfNN_swap_reloca_in,
+ bfd_elfNN_swap_reloca_out
};
#define ELF_ARCH bfd_arch_aarch64
#define ELF_MINPAGESIZE 0x1000
#define ELF_COMMONPAGESIZE 0x1000
-#define bfd_elf64_close_and_cleanup \
- elf64_aarch64_close_and_cleanup
-
-#define bfd_elf64_bfd_copy_private_bfd_data \
- elf64_aarch64_copy_private_bfd_data
+#define bfd_elfNN_close_and_cleanup \
+ elfNN_aarch64_close_and_cleanup
-#define bfd_elf64_bfd_free_cached_info \
- elf64_aarch64_bfd_free_cached_info
+#define bfd_elfNN_bfd_free_cached_info \
+ elfNN_aarch64_bfd_free_cached_info
-#define bfd_elf64_bfd_is_target_special_symbol \
- elf64_aarch64_is_target_special_symbol
+#define bfd_elfNN_bfd_is_target_special_symbol \
+ elfNN_aarch64_is_target_special_symbol
-#define bfd_elf64_bfd_link_hash_table_create \
- elf64_aarch64_link_hash_table_create
+#define bfd_elfNN_bfd_link_hash_table_create \
+ elfNN_aarch64_link_hash_table_create
-#define bfd_elf64_bfd_link_hash_table_free \
- elf64_aarch64_hash_table_free
+#define bfd_elfNN_bfd_link_hash_table_free \
+ elfNN_aarch64_hash_table_free
-#define bfd_elf64_bfd_merge_private_bfd_data \
- elf64_aarch64_merge_private_bfd_data
+#define bfd_elfNN_bfd_merge_private_bfd_data \
+ elfNN_aarch64_merge_private_bfd_data
-#define bfd_elf64_bfd_print_private_bfd_data \
- elf64_aarch64_print_private_bfd_data
+#define bfd_elfNN_bfd_print_private_bfd_data \
+ elfNN_aarch64_print_private_bfd_data
-#define bfd_elf64_bfd_reloc_type_lookup \
- elf64_aarch64_reloc_type_lookup
+#define bfd_elfNN_bfd_reloc_type_lookup \
+ elfNN_aarch64_reloc_type_lookup
-#define bfd_elf64_bfd_reloc_name_lookup \
- elf64_aarch64_reloc_name_lookup
+#define bfd_elfNN_bfd_reloc_name_lookup \
+ elfNN_aarch64_reloc_name_lookup
-#define bfd_elf64_bfd_set_private_flags \
- elf64_aarch64_set_private_flags
+#define bfd_elfNN_bfd_set_private_flags \
+ elfNN_aarch64_set_private_flags
-#define bfd_elf64_find_inliner_info \
- elf64_aarch64_find_inliner_info
+#define bfd_elfNN_find_inliner_info \
+ elfNN_aarch64_find_inliner_info
-#define bfd_elf64_find_nearest_line \
- elf64_aarch64_find_nearest_line
+#define bfd_elfNN_find_nearest_line \
+ elfNN_aarch64_find_nearest_line
-#define bfd_elf64_mkobject \
- elf64_aarch64_mkobject
+#define bfd_elfNN_mkobject \
+ elfNN_aarch64_mkobject
-#define bfd_elf64_new_section_hook \
- elf64_aarch64_new_section_hook
+#define bfd_elfNN_new_section_hook \
+ elfNN_aarch64_new_section_hook
#define elf_backend_adjust_dynamic_symbol \
- elf64_aarch64_adjust_dynamic_symbol
+ elfNN_aarch64_adjust_dynamic_symbol
#define elf_backend_always_size_sections \
- elf64_aarch64_always_size_sections
+ elfNN_aarch64_always_size_sections
#define elf_backend_check_relocs \
- elf64_aarch64_check_relocs
+ elfNN_aarch64_check_relocs
#define elf_backend_copy_indirect_symbol \
- elf64_aarch64_copy_indirect_symbol
+ elfNN_aarch64_copy_indirect_symbol
/* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
to them in our hash. */
#define elf_backend_create_dynamic_sections \
- elf64_aarch64_create_dynamic_sections
+ elfNN_aarch64_create_dynamic_sections
#define elf_backend_init_index_section \
_bfd_elf_init_2_index_sections
-#define elf_backend_is_function_type \
- elf64_aarch64_is_function_type
-
#define elf_backend_finish_dynamic_sections \
- elf64_aarch64_finish_dynamic_sections
+ elfNN_aarch64_finish_dynamic_sections
#define elf_backend_finish_dynamic_symbol \
- elf64_aarch64_finish_dynamic_symbol
+ elfNN_aarch64_finish_dynamic_symbol
#define elf_backend_gc_sweep_hook \
- elf64_aarch64_gc_sweep_hook
+ elfNN_aarch64_gc_sweep_hook
#define elf_backend_object_p \
- elf64_aarch64_object_p
+ elfNN_aarch64_object_p
#define elf_backend_output_arch_local_syms \
- elf64_aarch64_output_arch_local_syms
+ elfNN_aarch64_output_arch_local_syms
#define elf_backend_plt_sym_val \
- elf64_aarch64_plt_sym_val
+ elfNN_aarch64_plt_sym_val
#define elf_backend_post_process_headers \
- elf64_aarch64_post_process_headers
+ elfNN_aarch64_post_process_headers
#define elf_backend_relocate_section \
- elf64_aarch64_relocate_section
+ elfNN_aarch64_relocate_section
#define elf_backend_reloc_type_class \
- elf64_aarch64_reloc_type_class
-
-#define elf_backend_section_flags \
- elf64_aarch64_section_flags
+ elfNN_aarch64_reloc_type_class
#define elf_backend_section_from_shdr \
- elf64_aarch64_section_from_shdr
+ elfNN_aarch64_section_from_shdr
#define elf_backend_size_dynamic_sections \
- elf64_aarch64_size_dynamic_sections
+ elfNN_aarch64_size_dynamic_sections
#define elf_backend_size_info \
- elf64_aarch64_size_info
+ elfNN_aarch64_size_info
#define elf_backend_can_refcount 1
#define elf_backend_can_gc_sections 1
#undef elf_backend_obj_attrs_section
#define elf_backend_obj_attrs_section ".ARM.attributes"
-#include "elf64-target.h"
+#include "elfNN-target.h"