/* X86-64 specific support for 64-bit ELF
- Copyright 2000, 2001, 2002, 2003, 2004, 2005
+ Copyright 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
Free Software Foundation, Inc.
Contributed by Jan Hubicka <jh@suse.cz>.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
+ the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
+ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
+ MA 02110-1301, USA. */
-#include "bfd.h"
#include "sysdep.h"
+#include "bfd.h"
#include "bfdlink.h"
#include "libbfd.h"
#include "elf-bfd.h"
+#include "bfd_stdint.h"
#include "elf/x86-64.h"
#define MINUS_ONE (~ (bfd_vma) 0)
/* The relocation "howto" table. Order of fields:
- type, size, bitsize, pc_relative, complain_on_overflow,
- special_function, name, partial_inplace, src_mask, dst_pack, pcrel_offset. */
+ type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
+ special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
static reloc_howto_type x86_64_elf_howto_table[] =
{
HOWTO(R_X86_64_NONE, 0, 0, 0, FALSE, 0, complain_overflow_dont,
HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
FALSE, 0xffffffff, 0xffffffff, TRUE),
+ HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
+ bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
+ FALSE),
+ HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
+ bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
+ MINUS_ONE, TRUE),
+ HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
+ bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
+ FALSE, MINUS_ONE, MINUS_ONE, TRUE),
+ HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
+ bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
+ MINUS_ONE, FALSE),
+ HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
+ bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
+ MINUS_ONE, FALSE),
+ EMPTY_HOWTO (32),
+ EMPTY_HOWTO (33),
+ HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
+ complain_overflow_bitfield, bfd_elf_generic_reloc,
+ "R_X86_64_GOTPC32_TLSDESC",
+ FALSE, 0xffffffff, 0xffffffff, TRUE),
+ HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
+ complain_overflow_dont, bfd_elf_generic_reloc,
+ "R_X86_64_TLSDESC_CALL",
+ FALSE, 0, 0, FALSE),
+ HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
+ complain_overflow_bitfield, bfd_elf_generic_reloc,
+ "R_X86_64_TLSDESC",
+ FALSE, MINUS_ONE, MINUS_ONE, FALSE),
/* We have a gap in the reloc numbers here.
R_X86_64_standard counts the number up to this point, and
R_X86_64_vt_offset is the value to subtract from a reloc type of
R_X86_64_GNU_VT* to form an index into this table. */
-#define R_X86_64_standard (R_X86_64_GOTPC32 + 1)
+#define R_X86_64_standard (R_X86_64_TLSDESC + 1)
#define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
/* GNU extension to record C++ vtable hierarchy. */
{ BFD_RELOC_64_PCREL, R_X86_64_PC64, },
{ BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
{ BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
+ { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
+ { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
+ { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
+ { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
+ { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
+ { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
+ { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
+ { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
{ BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
{ BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
};
+static reloc_howto_type *
+elf64_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
+{
+ unsigned i;
+
+ if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
+ || r_type >= (unsigned int) R_X86_64_max)
+ {
+ if (r_type >= (unsigned int) R_X86_64_standard)
+ {
+ (*_bfd_error_handler) (_("%B: invalid relocation type %d"),
+ abfd, (int) r_type);
+ r_type = R_X86_64_NONE;
+ }
+ i = r_type;
+ }
+ else
+ i = r_type - (unsigned int) R_X86_64_vt_offset;
+ BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
+ return &x86_64_elf_howto_table[i];
+}
/* Given a BFD reloc type, return a HOWTO structure. */
static reloc_howto_type *
-elf64_x86_64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
+elf64_x86_64_reloc_type_lookup (bfd *abfd,
bfd_reloc_code_real_type code)
{
unsigned int i;
i++)
{
if (x86_64_reloc_map[i].bfd_reloc_val == code)
- return &x86_64_elf_howto_table[i];
+ return elf64_x86_64_rtype_to_howto (abfd,
+ x86_64_reloc_map[i].elf_reloc_val);
}
return 0;
}
+static reloc_howto_type *
+elf64_x86_64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
+ const char *r_name)
+{
+ unsigned int i;
+
+ for (i = 0;
+ i < (sizeof (x86_64_elf_howto_table)
+ / sizeof (x86_64_elf_howto_table[0]));
+ i++)
+ if (x86_64_elf_howto_table[i].name != NULL
+ && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
+ return &x86_64_elf_howto_table[i];
+
+ return NULL;
+}
+
/* Given an x86_64 ELF reloc type, fill in an arelent structure. */
static void
elf64_x86_64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr,
Elf_Internal_Rela *dst)
{
- unsigned r_type, i;
+ unsigned r_type;
r_type = ELF64_R_TYPE (dst->r_info);
- if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
- || r_type >= (unsigned int) R_X86_64_max)
- {
- if (r_type >= (unsigned int) R_X86_64_standard)
- {
- (*_bfd_error_handler) (_("%B: invalid relocation type %d"),
- abfd, (int) r_type);
- r_type = R_X86_64_NONE;
- }
- i = r_type;
- }
- else
- i = r_type - (unsigned int) R_X86_64_vt_offset;
- cache_ptr->howto = &x86_64_elf_howto_table[i];
+ cache_ptr->howto = elf64_x86_64_rtype_to_howto (abfd, r_type);
BFD_ASSERT (r_type == cache_ptr->howto->type);
}
\f
{
0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
- 0x90, 0x90, 0x90, 0x90 /* pad out to 16 bytes with nops. */
+ 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
};
/* Subsequent entries in a procedure linkage table look like this. */
#define GOT_NORMAL 1
#define GOT_TLS_GD 2
#define GOT_TLS_IE 3
+#define GOT_TLS_GDESC 4
+#define GOT_TLS_GD_BOTH_P(type) \
+ ((type) == (GOT_TLS_GD | GOT_TLS_GDESC))
+#define GOT_TLS_GD_P(type) \
+ ((type) == GOT_TLS_GD || GOT_TLS_GD_BOTH_P (type))
+#define GOT_TLS_GDESC_P(type) \
+ ((type) == GOT_TLS_GDESC || GOT_TLS_GD_BOTH_P (type))
+#define GOT_TLS_GD_ANY_P(type) \
+ (GOT_TLS_GD_P (type) || GOT_TLS_GDESC_P (type))
unsigned char tls_type;
+
+ /* Offset of the GOTPLT entry reserved for the TLS descriptor,
+ starting at the end of the jump table. */
+ bfd_vma tlsdesc_got;
};
#define elf64_x86_64_hash_entry(ent) \
/* tls_type for each local got entry. */
char *local_got_tls_type;
+
+ /* GOTPLT entries for TLS descriptors. */
+ bfd_vma *local_tlsdesc_gotent;
};
#define elf64_x86_64_tdata(abfd) \
#define elf64_x86_64_local_got_tls_type(abfd) \
(elf64_x86_64_tdata (abfd)->local_got_tls_type)
+#define elf64_x86_64_local_tlsdesc_gotent(abfd) \
+ (elf64_x86_64_tdata (abfd)->local_tlsdesc_gotent)
/* x86-64 ELF linker hash table. */
asection *sdynbss;
asection *srelbss;
+ /* The offset into splt of the PLT entry for the TLS descriptor
+ resolver. Special values are 0, if not necessary (or not found
+ to be necessary yet), and -1 if needed but not determined
+ yet. */
+ bfd_vma tlsdesc_plt;
+ /* The offset into sgot of the GOT entry used by the PLT entry
+ above. */
+ bfd_vma tlsdesc_got;
+
union {
bfd_signed_vma refcount;
bfd_vma offset;
} tls_ld_got;
+ /* The amount of space used by the jump slots in the GOT. */
+ bfd_vma sgotplt_jump_table_size;
+
/* Small local sym to section mapping cache. */
struct sym_sec_cache sym_sec;
};
#define elf64_x86_64_hash_table(p) \
((struct elf64_x86_64_link_hash_table *) ((p)->hash))
+#define elf64_x86_64_compute_jump_table_size(htab) \
+ ((htab)->srelplt->reloc_count * GOT_ENTRY_SIZE)
+
/* Create an entry in an x86-64 ELF linker hash table. */
static struct bfd_hash_entry *
eh = (struct elf64_x86_64_link_hash_entry *) entry;
eh->dyn_relocs = NULL;
eh->tls_type = GOT_UNKNOWN;
+ eh->tlsdesc_got = (bfd_vma) -1;
}
return entry;
if (ret == NULL)
return NULL;
- if (! _bfd_elf_link_hash_table_init (&ret->elf, abfd, link_hash_newfunc))
+ if (!_bfd_elf_link_hash_table_init (&ret->elf, abfd, link_hash_newfunc,
+ sizeof (struct elf64_x86_64_link_hash_entry)))
{
free (ret);
return NULL;
ret->sdynbss = NULL;
ret->srelbss = NULL;
ret->sym_sec.abfd = NULL;
+ ret->tlsdesc_plt = 0;
+ ret->tlsdesc_got = 0;
ret->tls_ld_got.refcount = 0;
+ ret->sgotplt_jump_table_size = 0;
return &ret->elf.root;
}
/* Copy the extra info we tack onto an elf_link_hash_entry. */
static void
-elf64_x86_64_copy_indirect_symbol (const struct elf_backend_data *bed,
+elf64_x86_64_copy_indirect_symbol (struct bfd_link_info *info,
struct elf_link_hash_entry *dir,
struct elf_link_hash_entry *ind)
{
struct elf64_x86_64_dyn_relocs **pp;
struct elf64_x86_64_dyn_relocs *p;
- if (ind->root.type == bfd_link_hash_indirect)
- abort ();
-
- /* Add reloc counts against the weak sym to the strong sym
+ /* Add reloc counts against the indirect sym to the direct sym
list. Merge any entries against the same section. */
for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
{
dir->pointer_equality_needed |= ind->pointer_equality_needed;
}
else
- _bfd_elf_link_hash_copy_indirect (bed, dir, ind);
+ _bfd_elf_link_hash_copy_indirect (info, dir, ind);
}
static bfd_boolean
elf64_x86_64_mkobject (bfd *abfd)
{
- bfd_size_type amt = sizeof (struct elf64_x86_64_obj_tdata);
- abfd->tdata.any = bfd_zalloc (abfd, amt);
if (abfd->tdata.any == NULL)
- return FALSE;
- return TRUE;
+ {
+ bfd_size_type amt = sizeof (struct elf64_x86_64_obj_tdata);
+ abfd->tdata.any = bfd_zalloc (abfd, amt);
+ if (abfd->tdata.any == NULL)
+ return FALSE;
+ }
+ return bfd_elf_mkobject (abfd);
}
static bfd_boolean
return TRUE;
}
-static int
-elf64_x86_64_tls_transition (struct bfd_link_info *info, int r_type, int is_local)
+typedef union
+ {
+ unsigned char c[2];
+ uint16_t i;
+ }
+x86_64_opcode16;
+
+typedef union
+ {
+ unsigned char c[4];
+ uint32_t i;
+ }
+x86_64_opcode32;
+
+/* Return TRUE if the TLS access code sequence support transition
+ from R_TYPE. */
+
+static bfd_boolean
+elf64_x86_64_check_tls_transition (bfd *abfd, asection *sec,
+ bfd_byte *contents,
+ Elf_Internal_Shdr *symtab_hdr,
+ struct elf_link_hash_entry **sym_hashes,
+ unsigned int r_type,
+ const Elf_Internal_Rela *rel,
+ const Elf_Internal_Rela *relend)
{
- if (info->shared)
- return r_type;
+ unsigned int val;
+ unsigned long r_symndx;
+ struct elf_link_hash_entry *h;
+ bfd_vma offset;
+ /* Get the section contents. */
+ if (contents == NULL)
+ {
+ if (elf_section_data (sec)->this_hdr.contents != NULL)
+ contents = elf_section_data (sec)->this_hdr.contents;
+ else
+ {
+ /* FIXME: How to better handle error condition? */
+ if (!bfd_malloc_and_get_section (abfd, sec, &contents))
+ return FALSE;
+
+ /* Cache the section contents for elf_link_input_bfd. */
+ elf_section_data (sec)->this_hdr.contents = contents;
+ }
+ }
+
+ offset = rel->r_offset;
switch (r_type)
{
case R_X86_64_TLSGD:
+ case R_X86_64_TLSLD:
+ if ((rel + 1) >= relend)
+ return FALSE;
+
+ if (r_type == R_X86_64_TLSGD)
+ {
+ /* Check transition from GD access model. Only
+ .byte 0x66; leaq foo@tlsgd(%rip), %rdi
+ .word 0x6666; rex64; call __tls_get_addr
+ can transit to different access model. */
+
+ static x86_64_opcode32 leaq = { { 0x66, 0x48, 0x8d, 0x3d } },
+ call = { { 0x66, 0x66, 0x48, 0xe8 } };
+ if (offset < 4
+ || (offset + 12) > sec->size
+ || bfd_get_32 (abfd, contents + offset - 4) != leaq.i
+ || bfd_get_32 (abfd, contents + offset + 4) != call.i)
+ return FALSE;
+ }
+ else
+ {
+ /* Check transition from LD access model. Only
+ leaq foo@tlsld(%rip), %rdi;
+ call __tls_get_addr
+ can transit to different access model. */
+
+ static x86_64_opcode32 ld = { { 0x48, 0x8d, 0x3d, 0xe8 } };
+ x86_64_opcode32 op;
+
+ if (offset < 3 || (offset + 9) > sec->size)
+ return FALSE;
+
+ op.i = bfd_get_32 (abfd, contents + offset - 3);
+ op.c[3] = bfd_get_8 (abfd, contents + offset + 4);
+ if (op.i != ld.i)
+ return FALSE;
+ }
+
+ r_symndx = ELF64_R_SYM (rel[1].r_info);
+ if (r_symndx < symtab_hdr->sh_info)
+ return FALSE;
+
+ h = sym_hashes[r_symndx - symtab_hdr->sh_info];
+ return (h != NULL
+ && h->root.root.string != NULL
+ && (ELF64_R_TYPE (rel[1].r_info) == R_X86_64_PC32
+ || ELF64_R_TYPE (rel[1].r_info) == R_X86_64_PLT32)
+ && (strcmp (h->root.root.string, "__tls_get_addr") == 0));
+
case R_X86_64_GOTTPOFF:
- if (is_local)
- return R_X86_64_TPOFF32;
- return R_X86_64_GOTTPOFF;
+ /* Check transition from IE access model:
+ movq foo@gottpoff(%rip), %reg
+ addq foo@gottpoff(%rip), %reg
+ */
+
+ if (offset < 3 || (offset + 4) > sec->size)
+ return FALSE;
+
+ val = bfd_get_8 (abfd, contents + offset - 3);
+ if (val != 0x48 && val != 0x4c)
+ return FALSE;
+
+ val = bfd_get_8 (abfd, contents + offset - 2);
+ if (val != 0x8b && val != 0x03)
+ return FALSE;
+
+ val = bfd_get_8 (abfd, contents + offset - 1);
+ return (val & 0xc7) == 5;
+
+ case R_X86_64_GOTPC32_TLSDESC:
+ /* Check transition from GDesc access model:
+ leaq x@tlsdesc(%rip), %rax
+
+ Make sure it's a leaq adding rip to a 32-bit offset
+ into any register, although it's probably almost always
+ going to be rax. */
+
+ if (offset < 3 || (offset + 4) > sec->size)
+ return FALSE;
+
+ val = bfd_get_8 (abfd, contents + offset - 3);
+ if ((val & 0xfb) != 0x48)
+ return FALSE;
+
+ if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
+ return FALSE;
+
+ val = bfd_get_8 (abfd, contents + offset - 1);
+ return (val & 0xc7) == 0x05;
+
+ case R_X86_64_TLSDESC_CALL:
+ /* Check transition from GDesc access model:
+ call *x@tlsdesc(%rax)
+ */
+ if (offset + 2 <= sec->size)
+ {
+ /* Make sure that it's a call *x@tlsdesc(%rax). */
+ static x86_64_opcode16 call = { { 0xff, 0x10 } };
+ return bfd_get_16 (abfd, contents + offset) == call.i;
+ }
+
+ return FALSE;
+
+ default:
+ abort ();
+ }
+}
+
+/* Return TRUE if the TLS access transition is OK or no transition
+ will be performed. Update R_TYPE if there is a transition. */
+
+static bfd_boolean
+elf64_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
+ asection *sec, bfd_byte *contents,
+ Elf_Internal_Shdr *symtab_hdr,
+ struct elf_link_hash_entry **sym_hashes,
+ unsigned int *r_type, int tls_type,
+ const Elf_Internal_Rela *rel,
+ const Elf_Internal_Rela *relend,
+ struct elf_link_hash_entry *h)
+{
+ unsigned int from_type = *r_type;
+ unsigned int to_type = from_type;
+ bfd_boolean check = TRUE;
+
+ switch (from_type)
+ {
+ case R_X86_64_TLSGD:
+ case R_X86_64_GOTPC32_TLSDESC:
+ case R_X86_64_TLSDESC_CALL:
+ case R_X86_64_GOTTPOFF:
+ if (!info->shared)
+ {
+ if (h == NULL)
+ to_type = R_X86_64_TPOFF32;
+ else
+ to_type = R_X86_64_GOTTPOFF;
+ }
+
+ /* When we are called from elf64_x86_64_relocate_section,
+ CONTENTS isn't NULL and there may be additional transitions
+ based on TLS_TYPE. */
+ if (contents != NULL)
+ {
+ unsigned int new_to_type = to_type;
+
+ if (!info->shared
+ && h != NULL
+ && h->dynindx == -1
+ && tls_type == GOT_TLS_IE)
+ new_to_type = R_X86_64_TPOFF32;
+
+ if (to_type == R_X86_64_TLSGD
+ || to_type == R_X86_64_GOTPC32_TLSDESC
+ || to_type == R_X86_64_TLSDESC_CALL)
+ {
+ if (tls_type == GOT_TLS_IE)
+ new_to_type = R_X86_64_GOTTPOFF;
+ }
+
+ /* We checked the transition before when we were called from
+ elf64_x86_64_check_relocs. We only want to check the new
+ transition which hasn't been checked before. */
+ check = new_to_type != to_type && from_type == to_type;
+ to_type = new_to_type;
+ }
+
+ break;
+
case R_X86_64_TLSLD:
- return R_X86_64_TPOFF32;
+ if (!info->shared)
+ to_type = R_X86_64_TPOFF32;
+ break;
+
+ default:
+ return TRUE;
}
- return r_type;
+ /* Return TRUE if there is no transition. */
+ if (from_type == to_type)
+ return TRUE;
+
+ /* Check if the transition can be performed. */
+ if (check
+ && ! elf64_x86_64_check_tls_transition (abfd, sec, contents,
+ symtab_hdr, sym_hashes,
+ from_type, rel, relend))
+ {
+ reloc_howto_type *from, *to;
+
+ from = elf64_x86_64_rtype_to_howto (abfd, from_type);
+ to = elf64_x86_64_rtype_to_howto (abfd, to_type);
+
+ (*_bfd_error_handler)
+ (_("%B: TLS transition from %s to %s against `%s' at 0x%lx "
+ "in section `%A' failed"),
+ abfd, sec, from->name, to->name,
+ h ? h->root.root.string : "a local symbol",
+ (unsigned long) rel->r_offset);
+ bfd_set_error (bfd_error_bad_value);
+ return FALSE;
+ }
+
+ *r_type = to_type;
+ return TRUE;
}
/* Look through the relocs for a section during the first phase, and
linkage table, and dynamic reloc sections. */
static bfd_boolean
-elf64_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, asection *sec,
+elf64_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
+ asection *sec,
const Elf_Internal_Rela *relocs)
{
struct elf64_x86_64_link_hash_table *htab;
h = (struct elf_link_hash_entry *) h->root.u.i.link;
}
- r_type = elf64_x86_64_tls_transition (info, r_type, h == NULL);
+ if (! elf64_x86_64_tls_transition (info, abfd, sec, NULL,
+ symtab_hdr, sym_hashes,
+ &r_type, GOT_UNKNOWN,
+ rel, rel_end, h))
+ return FALSE;
+
switch (r_type)
{
case R_X86_64_TLSLD:
case R_X86_64_GOT32:
case R_X86_64_GOTPCREL:
case R_X86_64_TLSGD:
+ case R_X86_64_GOT64:
+ case R_X86_64_GOTPCREL64:
+ case R_X86_64_GOTPLT64:
+ case R_X86_64_GOTPC32_TLSDESC:
+ case R_X86_64_TLSDESC_CALL:
/* This symbol requires a global offset table entry. */
{
int tls_type, old_tls_type;
default: tls_type = GOT_NORMAL; break;
case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
+ case R_X86_64_GOTPC32_TLSDESC:
+ case R_X86_64_TLSDESC_CALL:
+ tls_type = GOT_TLS_GDESC; break;
}
if (h != NULL)
{
+ if (r_type == R_X86_64_GOTPLT64)
+ {
+ /* This relocation indicates that we also need
+ a PLT entry, as this is a function. We don't need
+ a PLT entry for local symbols. */
+ h->needs_plt = 1;
+ h->plt.refcount += 1;
+ }
h->got.refcount += 1;
old_tls_type = elf64_x86_64_hash_entry (h)->tls_type;
}
bfd_size_type size;
size = symtab_hdr->sh_info;
- size *= sizeof (bfd_signed_vma) + sizeof (char);
+ size *= sizeof (bfd_signed_vma)
+ + sizeof (bfd_vma) + sizeof (char);
local_got_refcounts = ((bfd_signed_vma *)
bfd_zalloc (abfd, size));
if (local_got_refcounts == NULL)
return FALSE;
elf_local_got_refcounts (abfd) = local_got_refcounts;
+ elf64_x86_64_local_tlsdesc_gotent (abfd)
+ = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
elf64_x86_64_local_got_tls_type (abfd)
- = (char *) (local_got_refcounts + symtab_hdr->sh_info);
+ = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
}
local_got_refcounts[r_symndx] += 1;
old_tls_type
/* If a TLS symbol is accessed using IE at least once,
there is no point to use dynamic model for it. */
if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
- && (old_tls_type != GOT_TLS_GD || tls_type != GOT_TLS_IE))
+ && (! GOT_TLS_GD_ANY_P (old_tls_type)
+ || tls_type != GOT_TLS_IE))
{
- if (old_tls_type == GOT_TLS_IE && tls_type == GOT_TLS_GD)
+ if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
tls_type = old_tls_type;
+ else if (GOT_TLS_GD_ANY_P (old_tls_type)
+ && GOT_TLS_GD_ANY_P (tls_type))
+ tls_type |= old_tls_type;
else
{
(*_bfd_error_handler)
- (_("%B: %s' accessed both as normal and thread local symbol"),
+ (_("%B: '%s' accessed both as normal and thread local symbol"),
abfd, h ? h->root.root.string : "<local>");
return FALSE;
}
case R_X86_64_GOTOFF64:
case R_X86_64_GOTPC32:
+ case R_X86_64_GOTPC64:
create_got:
if (htab->sgot == NULL)
{
h->plt.refcount += 1;
break;
+ case R_X86_64_PLTOFF64:
+ /* This tries to form the 'address' of a function relative
+ to GOT. For global symbols we need a PLT entry. */
+ if (h != NULL)
+ {
+ h->needs_plt = 1;
+ h->plt.refcount += 1;
+ }
+ goto create_got;
+
case R_X86_64_8:
case R_X86_64_16:
case R_X86_64_32:
&& (r_type != R_X86_64_PC32)
&& (r_type != R_X86_64_PC64))
|| (h != NULL
- && (! info->symbolic
+ && (! SYMBOLIC_BIND (info, h)
|| h->root.type == bfd_link_hash_defweak
|| !h->def_regular))))
|| (ELIMINATE_COPY_RELOCS
if (name == NULL)
return FALSE;
- if (strncmp (name, ".rela", 5) != 0
+ if (! CONST_STRNEQ (name, ".rela")
|| strcmp (bfd_get_section_name (abfd, sec),
name + 5) != 0)
{
}
else
{
+ void **vpp;
/* Track dynamic relocs needed for local syms too.
We really need local syms available to do this
easily. Oh well. */
if (s == NULL)
return FALSE;
- head = ((struct elf64_x86_64_dyn_relocs **)
- &elf_section_data (s)->local_dynrel);
+ /* Beware of type punned pointers vs strict aliasing
+ rules. */
+ vpp = &(elf_section_data (s)->local_dynrel);
+ head = (struct elf64_x86_64_dyn_relocs **)vpp;
}
p = *head;
/* This relocation describes which C++ vtable entries are actually
used. Record for later use during GC. */
case R_X86_64_GNU_VTENTRY:
- if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
+ BFD_ASSERT (h != NULL);
+ if (h != NULL
+ && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
return FALSE;
break;
static asection *
elf64_x86_64_gc_mark_hook (asection *sec,
- struct bfd_link_info *info ATTRIBUTE_UNUSED,
+ struct bfd_link_info *info,
Elf_Internal_Rela *rel,
struct elf_link_hash_entry *h,
Elf_Internal_Sym *sym)
{
if (h != NULL)
- {
- switch (ELF64_R_TYPE (rel->r_info))
- {
- case R_X86_64_GNU_VTINHERIT:
- case R_X86_64_GNU_VTENTRY:
- break;
-
- default:
- switch (h->root.type)
- {
- case bfd_link_hash_defined:
- case bfd_link_hash_defweak:
- return h->root.u.def.section;
-
- case bfd_link_hash_common:
- return h->root.u.c.p->section;
-
- default:
- break;
- }
- }
- }
- else
- return bfd_section_from_elf_index (sec->owner, sym->st_shndx);
-
- return NULL;
+ switch (ELF64_R_TYPE (rel->r_info))
+ {
+ case R_X86_64_GNU_VTINHERIT:
+ case R_X86_64_GNU_VTENTRY:
+ return NULL;
+ }
+
+ return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
}
/* Update the got entry reference counts for the section being removed. */
static bfd_boolean
elf64_x86_64_gc_sweep_hook (bfd *abfd, struct bfd_link_info *info,
- asection *sec, const Elf_Internal_Rela *relocs)
+ asection *sec,
+ const Elf_Internal_Rela *relocs)
{
Elf_Internal_Shdr *symtab_hdr;
struct elf_link_hash_entry **sym_hashes;
}
r_type = ELF64_R_TYPE (rel->r_info);
- r_type = elf64_x86_64_tls_transition (info, r_type, h != NULL);
+ if (! elf64_x86_64_tls_transition (info, abfd, sec, NULL,
+ symtab_hdr, sym_hashes,
+ &r_type, GOT_UNKNOWN,
+ rel, relend, h))
+ return FALSE;
+
switch (r_type)
{
case R_X86_64_TLSLD:
break;
case R_X86_64_TLSGD:
+ case R_X86_64_GOTPC32_TLSDESC:
+ case R_X86_64_TLSDESC_CALL:
case R_X86_64_GOTTPOFF:
case R_X86_64_GOT32:
case R_X86_64_GOTPCREL:
+ case R_X86_64_GOT64:
+ case R_X86_64_GOTPCREL64:
+ case R_X86_64_GOTPLT64:
if (h != NULL)
{
+ if (r_type == R_X86_64_GOTPLT64 && h->plt.refcount > 0)
+ h->plt.refcount -= 1;
if (h->got.refcount > 0)
h->got.refcount -= 1;
}
/* Fall thru */
case R_X86_64_PLT32:
+ case R_X86_64_PLTOFF64:
if (h != NULL)
{
if (h->plt.refcount > 0)
{
struct elf64_x86_64_link_hash_table *htab;
asection *s;
- unsigned int power_of_two;
/* If this is a function, put it in the procedure linkage table. We
will fill in the contents of the procedure linkage table later,
h->needs_copy = 1;
}
- /* We need to figure out the alignment required for this symbol. I
- have no idea how ELF linkers handle this. 16-bytes is the size
- of the largest type that requires hard alignment -- long double. */
- /* FIXME: This is VERY ugly. Should be fixed for all architectures using
- this construct. */
- power_of_two = bfd_log2 (h->size);
- if (power_of_two > 4)
- power_of_two = 4;
-
- /* Apply the required alignment. */
s = htab->sdynbss;
- s->size = BFD_ALIGN (s->size, (bfd_size_type) (1 << power_of_two));
- if (power_of_two > bfd_get_section_alignment (htab->elf.dynobj, s))
- {
- if (! bfd_set_section_alignment (htab->elf.dynobj, s, power_of_two))
- return FALSE;
- }
- /* Define the symbol as being at this point in the section. */
- h->root.u.def.section = s;
- h->root.u.def.value = s->size;
-
- /* Increment the section size to make room for the symbol. */
- s->size += h->size;
-
- return TRUE;
+ return _bfd_elf_adjust_dynamic_copy (h, s);
}
/* Allocate space in .plt, .got and associated reloc sections for
/* We also need to make an entry in the .rela.plt section. */
htab->srelplt->size += sizeof (Elf64_External_Rela);
+ htab->srelplt->reloc_count++;
}
else
{
h->needs_plt = 0;
}
+ eh = (struct elf64_x86_64_link_hash_entry *) h;
+ eh->tlsdesc_got = (bfd_vma) -1;
+
/* If R_X86_64_GOTTPOFF symbol is now local to the binary,
make it a R_X86_64_TPOFF32 requiring no GOT entry. */
if (h->got.refcount > 0
return FALSE;
}
- s = htab->sgot;
- h->got.offset = s->size;
- s->size += GOT_ENTRY_SIZE;
- /* R_X86_64_TLSGD needs 2 consecutive GOT slots. */
- if (tls_type == GOT_TLS_GD)
- s->size += GOT_ENTRY_SIZE;
+ if (GOT_TLS_GDESC_P (tls_type))
+ {
+ eh->tlsdesc_got = htab->sgotplt->size
+ - elf64_x86_64_compute_jump_table_size (htab);
+ htab->sgotplt->size += 2 * GOT_ENTRY_SIZE;
+ h->got.offset = (bfd_vma) -2;
+ }
+ if (! GOT_TLS_GDESC_P (tls_type)
+ || GOT_TLS_GD_P (tls_type))
+ {
+ s = htab->sgot;
+ h->got.offset = s->size;
+ s->size += GOT_ENTRY_SIZE;
+ if (GOT_TLS_GD_P (tls_type))
+ s->size += GOT_ENTRY_SIZE;
+ }
dyn = htab->elf.dynamic_sections_created;
/* R_X86_64_TLSGD needs one dynamic relocation if local symbol
and two if global.
R_X86_64_GOTTPOFF needs one dynamic relocation. */
- if ((tls_type == GOT_TLS_GD && h->dynindx == -1)
+ if ((GOT_TLS_GD_P (tls_type) && h->dynindx == -1)
|| tls_type == GOT_TLS_IE)
htab->srelgot->size += sizeof (Elf64_External_Rela);
- else if (tls_type == GOT_TLS_GD)
+ else if (GOT_TLS_GD_P (tls_type))
htab->srelgot->size += 2 * sizeof (Elf64_External_Rela);
- else if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
- || h->root.type != bfd_link_hash_undefweak)
+ else if (! GOT_TLS_GDESC_P (tls_type)
+ && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
+ || h->root.type != bfd_link_hash_undefweak)
&& (info->shared
|| WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
htab->srelgot->size += sizeof (Elf64_External_Rela);
+ if (GOT_TLS_GDESC_P (tls_type))
+ {
+ htab->srelplt->size += sizeof (Elf64_External_Rela);
+ htab->tlsdesc_plt = (bfd_vma) -1;
+ }
}
else
h->got.offset = (bfd_vma) -1;
- eh = (struct elf64_x86_64_link_hash_entry *) h;
if (eh->dyn_relocs == NULL)
return TRUE;
/* Also discard relocs on undefined weak syms with non-default
visibility. */
- if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
+ if (eh->dyn_relocs != NULL
&& h->root.type == bfd_link_hash_undefweak)
- eh->dyn_relocs = NULL;
+ {
+ if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
+ eh->dyn_relocs = NULL;
+
+ /* Make sure undefined weak symbols are output as a dynamic
+ symbol in PIEs. */
+ else if (h->dynindx == -1
+ && !h->forced_local)
+ {
+ if (! bfd_elf_link_record_dynamic_symbol (info, h))
+ return FALSE;
+ }
+ }
}
else if (ELIMINATE_COPY_RELOCS)
{
bfd_signed_vma *local_got;
bfd_signed_vma *end_local_got;
char *local_tls_type;
+ bfd_vma *local_tlsdesc_gotent;
bfd_size_type locsymcount;
Elf_Internal_Shdr *symtab_hdr;
asection *srel;
{
struct elf64_x86_64_dyn_relocs *p;
- for (p = *((struct elf64_x86_64_dyn_relocs **)
- &elf_section_data (s)->local_dynrel);
+ for (p = (struct elf64_x86_64_dyn_relocs *)
+ (elf_section_data (s)->local_dynrel);
p != NULL;
p = p->next)
{
locsymcount = symtab_hdr->sh_info;
end_local_got = local_got + locsymcount;
local_tls_type = elf64_x86_64_local_got_tls_type (ibfd);
+ local_tlsdesc_gotent = elf64_x86_64_local_tlsdesc_gotent (ibfd);
s = htab->sgot;
srel = htab->srelgot;
- for (; local_got < end_local_got; ++local_got, ++local_tls_type)
+ for (; local_got < end_local_got;
+ ++local_got, ++local_tls_type, ++local_tlsdesc_gotent)
{
+ *local_tlsdesc_gotent = (bfd_vma) -1;
if (*local_got > 0)
{
- *local_got = s->size;
- s->size += GOT_ENTRY_SIZE;
- if (*local_tls_type == GOT_TLS_GD)
- s->size += GOT_ENTRY_SIZE;
+ if (GOT_TLS_GDESC_P (*local_tls_type))
+ {
+ *local_tlsdesc_gotent = htab->sgotplt->size
+ - elf64_x86_64_compute_jump_table_size (htab);
+ htab->sgotplt->size += 2 * GOT_ENTRY_SIZE;
+ *local_got = (bfd_vma) -2;
+ }
+ if (! GOT_TLS_GDESC_P (*local_tls_type)
+ || GOT_TLS_GD_P (*local_tls_type))
+ {
+ *local_got = s->size;
+ s->size += GOT_ENTRY_SIZE;
+ if (GOT_TLS_GD_P (*local_tls_type))
+ s->size += GOT_ENTRY_SIZE;
+ }
if (info->shared
- || *local_tls_type == GOT_TLS_GD
+ || GOT_TLS_GD_ANY_P (*local_tls_type)
|| *local_tls_type == GOT_TLS_IE)
- srel->size += sizeof (Elf64_External_Rela);
+ {
+ if (GOT_TLS_GDESC_P (*local_tls_type))
+ {
+ htab->srelplt->size += sizeof (Elf64_External_Rela);
+ htab->tlsdesc_plt = (bfd_vma) -1;
+ }
+ if (! GOT_TLS_GDESC_P (*local_tls_type)
+ || GOT_TLS_GD_P (*local_tls_type))
+ srel->size += sizeof (Elf64_External_Rela);
+ }
}
else
*local_got = (bfd_vma) -1;
sym dynamic relocs. */
elf_link_hash_traverse (&htab->elf, allocate_dynrelocs, (PTR) info);
+ /* For every jump slot reserved in the sgotplt, reloc_count is
+ incremented. However, when we reserve space for TLS descriptors,
+ it's not incremented, so in order to compute the space reserved
+ for them, it suffices to multiply the reloc count by the jump
+ slot size. */
+ if (htab->srelplt)
+ htab->sgotplt_jump_table_size
+ = elf64_x86_64_compute_jump_table_size (htab);
+
+ if (htab->tlsdesc_plt)
+ {
+ /* If we're not using lazy TLS relocations, don't generate the
+ PLT and GOT entries they require. */
+ if ((info->flags & DF_BIND_NOW))
+ htab->tlsdesc_plt = 0;
+ else
+ {
+ htab->tlsdesc_got = htab->sgot->size;
+ htab->sgot->size += GOT_ENTRY_SIZE;
+ /* Reserve room for the initial entry.
+ FIXME: we could probably do away with it in this case. */
+ if (htab->splt->size == 0)
+ htab->splt->size += PLT_ENTRY_SIZE;
+ htab->tlsdesc_plt = htab->splt->size;
+ htab->splt->size += PLT_ENTRY_SIZE;
+ }
+ }
+
/* We now have determined the sizes of the various dynamic sections.
Allocate memory for them. */
relocs = FALSE;
/* Strip this section if we don't need it; see the
comment below. */
}
- else if (strncmp (bfd_get_section_name (dynobj, s), ".rela", 5) == 0)
+ else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
{
if (s->size != 0 && s != htab->srelplt)
relocs = TRUE;
/* We use the reloc_count field as a counter if we need
to copy relocs into the output file. */
- s->reloc_count = 0;
+ if (s != htab->srelplt)
+ s->reloc_count = 0;
}
else
{
|| !add_dynamic_entry (DT_PLTREL, DT_RELA)
|| !add_dynamic_entry (DT_JMPREL, 0))
return FALSE;
+
+ if (htab->tlsdesc_plt
+ && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
+ || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
+ return FALSE;
}
if (relocs)
return TRUE;
}
+static bfd_boolean
+elf64_x86_64_always_size_sections (bfd *output_bfd,
+ struct bfd_link_info *info)
+{
+ asection *tls_sec = elf_hash_table (info)->tls_sec;
+
+ if (tls_sec)
+ {
+ struct elf_link_hash_entry *tlsbase;
+
+ tlsbase = elf_link_hash_lookup (elf_hash_table (info),
+ "_TLS_MODULE_BASE_",
+ FALSE, FALSE, FALSE);
+
+ if (tlsbase && tlsbase->type == STT_TLS)
+ {
+ struct bfd_link_hash_entry *bh = NULL;
+ const struct elf_backend_data *bed
+ = get_elf_backend_data (output_bfd);
+
+ if (!(_bfd_generic_link_add_one_symbol
+ (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
+ tls_sec, 0, NULL, FALSE,
+ bed->collect, &bh)))
+ return FALSE;
+ tlsbase = (struct elf_link_hash_entry *)bh;
+ tlsbase->def_regular = 1;
+ tlsbase->other = STV_HIDDEN;
+ (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
+ }
+ }
+
+ return TRUE;
+}
+
/* Return the base VMA address which should be subtracted from real addresses
when resolving @dtpoff relocation.
This is PT_TLS segment p_vaddr. */
Elf_Internal_Shdr *symtab_hdr;
struct elf_link_hash_entry **sym_hashes;
bfd_vma *local_got_offsets;
+ bfd_vma *local_tlsdesc_gotents;
Elf_Internal_Rela *rel;
Elf_Internal_Rela *relend;
- if (info->relocatable)
- return TRUE;
-
htab = elf64_x86_64_hash_table (info);
symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
sym_hashes = elf_sym_hashes (input_bfd);
local_got_offsets = elf_local_got_offsets (input_bfd);
+ local_tlsdesc_gotents = elf64_x86_64_local_tlsdesc_gotent (input_bfd);
rel = relocs;
relend = relocs + input_section->reloc_count;
struct elf_link_hash_entry *h;
Elf_Internal_Sym *sym;
asection *sec;
- bfd_vma off;
+ bfd_vma off, offplt;
bfd_vma relocation;
bfd_boolean unresolved_reloc;
bfd_reloc_status_type r;
h, sec, relocation,
unresolved_reloc, warned);
}
+
+ if (sec != NULL && elf_discarded_section (sec))
+ {
+ /* For relocs against symbols from removed linkonce sections,
+ or sections discarded by a linker script, we just want the
+ section contents zeroed. Avoid any special processing. */
+ _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
+ rel->r_info = 0;
+ rel->r_addend = 0;
+ continue;
+ }
+
+ if (info->relocatable)
+ continue;
+
/* When generating a shared object, the relocations handled here are
copied into the output file to be resolved at run time. */
switch (r_type)
{
+ asection *base_got;
case R_X86_64_GOT32:
+ case R_X86_64_GOT64:
/* Relocation is to the entry for this symbol in the global
offset table. */
case R_X86_64_GOTPCREL:
- /* Use global offset table as symbol value. */
+ case R_X86_64_GOTPCREL64:
+ /* Use global offset table entry as symbol value. */
+ case R_X86_64_GOTPLT64:
+ /* This is the same as GOT64 for relocation purposes, but
+ indicates the existence of a PLT entry. The difficulty is,
+ that we must calculate the GOT slot offset from the PLT
+ offset, if this symbol got a PLT entry (it was global).
+ Additionally if it's computed from the PLT entry, then that
+ GOT offset is relative to .got.plt, not to .got. */
+ base_got = htab->sgot;
+
if (htab->sgot == NULL)
abort ();
bfd_boolean dyn;
off = h->got.offset;
+ if (h->needs_plt
+ && h->plt.offset != (bfd_vma)-1
+ && off == (bfd_vma)-1)
+ {
+ /* We can't use h->got.offset here to save
+ state, or even just remember the offset, as
+ finish_dynamic_symbol would use that as offset into
+ .got. */
+ bfd_vma plt_index = h->plt.offset / PLT_ENTRY_SIZE - 1;
+ off = (plt_index + 3) * GOT_ENTRY_SIZE;
+ base_got = htab->sgotplt;
+ }
+
dyn = htab->elf.dynamic_sections_created;
if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
else
{
bfd_put_64 (output_bfd, relocation,
- htab->sgot->contents + off);
+ base_got->contents + off);
+ /* Note that this is harmless for the GOTPLT64 case,
+ as -1 | 1 still is -1. */
h->got.offset |= 1;
}
}
else
{
bfd_put_64 (output_bfd, relocation,
- htab->sgot->contents + off);
+ base_got->contents + off);
if (info->shared)
{
if (s == NULL)
abort ();
- outrel.r_offset = (htab->sgot->output_section->vma
- + htab->sgot->output_offset
+ outrel.r_offset = (base_got->output_section->vma
+ + base_got->output_offset
+ off);
outrel.r_info = ELF64_R_INFO (0, R_X86_64_RELATIVE);
outrel.r_addend = relocation;
if (off >= (bfd_vma) -2)
abort ();
- relocation = htab->sgot->output_section->vma
- + htab->sgot->output_offset + off;
- if (r_type != R_X86_64_GOTPCREL)
+ relocation = base_got->output_section->vma
+ + base_got->output_offset + off;
+ if (r_type != R_X86_64_GOTPCREL && r_type != R_X86_64_GOTPCREL64)
relocation -= htab->sgotplt->output_section->vma
- htab->sgotplt->output_offset;
break;
case R_X86_64_GOTPC32:
+ case R_X86_64_GOTPC64:
/* Use global offset table as symbol value. */
relocation = htab->sgotplt->output_section->vma
+ htab->sgotplt->output_offset;
unresolved_reloc = FALSE;
break;
+ case R_X86_64_PLTOFF64:
+ /* Relocation is PLT entry relative to GOT. For local
+ symbols it's the symbol itself relative to GOT. */
+ if (h != NULL
+ /* See PLT32 handling. */
+ && h->plt.offset != (bfd_vma) -1
+ && htab->splt != NULL)
+ {
+ relocation = (htab->splt->output_section->vma
+ + htab->splt->output_offset
+ + h->plt.offset);
+ unresolved_reloc = FALSE;
+ }
+
+ relocation -= htab->sgotplt->output_section->vma
+ + htab->sgotplt->output_offset;
+ break;
+
case R_X86_64_PLT32:
/* Relocation is to the entry for this symbol in the
procedure linkage table. */
/* FIXME: The ABI says the linker should make sure the value is
the same when it's zeroextended to 64 bit. */
- /* r_symndx will be zero only for relocs against symbols
- from removed linkonce sections, or sections discarded by
- a linker script. */
- if (r_symndx == 0
- || (input_section->flags & SEC_ALLOC) == 0)
+ if ((input_section->flags & SEC_ALLOC) == 0)
break;
if ((info->shared
|| r_type == R_X86_64_PC32
|| r_type == R_X86_64_PC64
|| !info->shared
- || !info->symbolic
+ || !SYMBOLIC_BIND (info, h)
|| !h->def_regular))
{
outrel.r_info = ELF64_R_INFO (h->dynindx, r_type);
{
asection *osec;
+ /* We are turning this relocation into one
+ against a section symbol. It would be
+ proper to subtract the symbol's value,
+ osec->vma, from the emitted reloc addend,
+ but ld.so expects buggy relocs. */
osec = sec->output_section;
sindx = elf_section_data (osec)->dynindx;
- BFD_ASSERT (sindx > 0);
+ if (sindx == 0)
+ {
+ asection *oi = htab->elf.text_index_section;
+ sindx = elf_section_data (oi)->dynindx;
+ }
+ BFD_ASSERT (sindx != 0);
}
outrel.r_info = ELF64_R_INFO (sindx, r_type);
break;
case R_X86_64_TLSGD:
+ case R_X86_64_GOTPC32_TLSDESC:
+ case R_X86_64_TLSDESC_CALL:
case R_X86_64_GOTTPOFF:
- r_type = elf64_x86_64_tls_transition (info, r_type, h == NULL);
tls_type = GOT_UNKNOWN;
if (h == NULL && local_got_offsets)
tls_type = elf64_x86_64_local_got_tls_type (input_bfd) [r_symndx];
else if (h != NULL)
- {
- tls_type = elf64_x86_64_hash_entry (h)->tls_type;
- if (!info->shared && h->dynindx == -1 && tls_type == GOT_TLS_IE)
- r_type = R_X86_64_TPOFF32;
- }
- if (r_type == R_X86_64_TLSGD)
- {
- if (tls_type == GOT_TLS_IE)
- r_type = R_X86_64_GOTTPOFF;
- }
+ tls_type = elf64_x86_64_hash_entry (h)->tls_type;
+
+ if (! elf64_x86_64_tls_transition (info, input_bfd,
+ input_section, contents,
+ symtab_hdr, sym_hashes,
+ &r_type, tls_type, rel,
+ relend, h))
+ return FALSE;
if (r_type == R_X86_64_TPOFF32)
{
+ bfd_vma roff = rel->r_offset;
+
BFD_ASSERT (! unresolved_reloc);
+
if (ELF64_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
{
- unsigned int i;
- static unsigned char tlsgd[8]
- = { 0x66, 0x48, 0x8d, 0x3d, 0x66, 0x66, 0x48, 0xe8 };
-
/* GD->LE transition.
.byte 0x66; leaq foo@tlsgd(%rip), %rdi
- .word 0x6666; rex64; call __tls_get_addr@plt
+ .word 0x6666; rex64; call __tls_get_addr
Change it into:
movq %fs:0, %rax
leaq foo@tpoff(%rax), %rax */
- BFD_ASSERT (rel->r_offset >= 4);
- for (i = 0; i < 4; i++)
- BFD_ASSERT (bfd_get_8 (input_bfd,
- contents + rel->r_offset - 4 + i)
- == tlsgd[i]);
- BFD_ASSERT (rel->r_offset + 12 <= input_section->size);
- for (i = 0; i < 4; i++)
- BFD_ASSERT (bfd_get_8 (input_bfd,
- contents + rel->r_offset + 4 + i)
- == tlsgd[i+4]);
- BFD_ASSERT (rel + 1 < relend);
- BFD_ASSERT (ELF64_R_TYPE (rel[1].r_info) == R_X86_64_PLT32);
- memcpy (contents + rel->r_offset - 4,
+ memcpy (contents + roff - 4,
"\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
16);
bfd_put_32 (output_bfd, tpoff (info, relocation),
- contents + rel->r_offset + 8);
- /* Skip R_X86_64_PLT32. */
+ contents + roff + 8);
+ /* Skip R_X86_64_PC32/R_X86_64_PLT32. */
rel++;
continue;
}
- else
+ else if (ELF64_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
+ {
+ /* GDesc -> LE transition.
+ It's originally something like:
+ leaq x@tlsdesc(%rip), %rax
+
+ Change it to:
+ movl $x@tpoff, %rax
+ */
+
+ unsigned int val, type, type2;
+
+ type = bfd_get_8 (input_bfd, contents + roff - 3);
+ type2 = bfd_get_8 (input_bfd, contents + roff - 2);
+ val = bfd_get_8 (input_bfd, contents + roff - 1);
+ bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
+ contents + roff - 3);
+ bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
+ bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
+ contents + roff - 1);
+ bfd_put_32 (output_bfd, tpoff (info, relocation),
+ contents + roff);
+ continue;
+ }
+ else if (ELF64_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
+ {
+ /* GDesc -> LE transition.
+ It's originally:
+ call *(%rax)
+ Turn it into:
+ xchg %ax,%ax. */
+ bfd_put_8 (output_bfd, 0x66, contents + roff);
+ bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
+ continue;
+ }
+ else if (ELF64_R_TYPE (rel->r_info) == R_X86_64_GOTTPOFF)
{
- unsigned int val, type, reg;
-
/* IE->LE transition:
Originally it can be one of:
movq foo@gottpoff(%rip), %reg
movq $foo, %reg
leaq foo(%reg), %reg
addq $foo, %reg. */
- BFD_ASSERT (rel->r_offset >= 3);
- val = bfd_get_8 (input_bfd, contents + rel->r_offset - 3);
- BFD_ASSERT (val == 0x48 || val == 0x4c);
- type = bfd_get_8 (input_bfd, contents + rel->r_offset - 2);
- BFD_ASSERT (type == 0x8b || type == 0x03);
- reg = bfd_get_8 (input_bfd, contents + rel->r_offset - 1);
- BFD_ASSERT ((reg & 0xc7) == 5);
+
+ unsigned int val, type, reg;
+
+ val = bfd_get_8 (input_bfd, contents + roff - 3);
+ type = bfd_get_8 (input_bfd, contents + roff - 2);
+ reg = bfd_get_8 (input_bfd, contents + roff - 1);
reg >>= 3;
- BFD_ASSERT (rel->r_offset + 4 <= input_section->size);
if (type == 0x8b)
{
/* movq */
if (val == 0x4c)
bfd_put_8 (output_bfd, 0x49,
- contents + rel->r_offset - 3);
+ contents + roff - 3);
bfd_put_8 (output_bfd, 0xc7,
- contents + rel->r_offset - 2);
+ contents + roff - 2);
bfd_put_8 (output_bfd, 0xc0 | reg,
- contents + rel->r_offset - 1);
+ contents + roff - 1);
}
else if (reg == 4)
{
special */
if (val == 0x4c)
bfd_put_8 (output_bfd, 0x49,
- contents + rel->r_offset - 3);
+ contents + roff - 3);
bfd_put_8 (output_bfd, 0x81,
- contents + rel->r_offset - 2);
+ contents + roff - 2);
bfd_put_8 (output_bfd, 0xc0 | reg,
- contents + rel->r_offset - 1);
+ contents + roff - 1);
}
else
{
/* addq -> leaq */
if (val == 0x4c)
bfd_put_8 (output_bfd, 0x4d,
- contents + rel->r_offset - 3);
+ contents + roff - 3);
bfd_put_8 (output_bfd, 0x8d,
- contents + rel->r_offset - 2);
+ contents + roff - 2);
bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
- contents + rel->r_offset - 1);
+ contents + roff - 1);
}
bfd_put_32 (output_bfd, tpoff (info, relocation),
- contents + rel->r_offset);
+ contents + roff);
continue;
}
+ else
+ BFD_ASSERT (FALSE);
}
if (htab->sgot == NULL)
abort ();
if (h != NULL)
- off = h->got.offset;
+ {
+ off = h->got.offset;
+ offplt = elf64_x86_64_hash_entry (h)->tlsdesc_got;
+ }
else
{
if (local_got_offsets == NULL)
abort ();
off = local_got_offsets[r_symndx];
+ offplt = local_tlsdesc_gotents[r_symndx];
}
if ((off & 1) != 0)
Elf_Internal_Rela outrel;
bfd_byte *loc;
int dr_type, indx;
+ asection *sreloc;
if (htab->srelgot == NULL)
abort ();
+ indx = h && h->dynindx != -1 ? h->dynindx : 0;
+
+ if (GOT_TLS_GDESC_P (tls_type))
+ {
+ outrel.r_info = ELF64_R_INFO (indx, R_X86_64_TLSDESC);
+ BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
+ + 2 * GOT_ENTRY_SIZE <= htab->sgotplt->size);
+ outrel.r_offset = (htab->sgotplt->output_section->vma
+ + htab->sgotplt->output_offset
+ + offplt
+ + htab->sgotplt_jump_table_size);
+ sreloc = htab->srelplt;
+ loc = sreloc->contents;
+ loc += sreloc->reloc_count++
+ * sizeof (Elf64_External_Rela);
+ BFD_ASSERT (loc + sizeof (Elf64_External_Rela)
+ <= sreloc->contents + sreloc->size);
+ if (indx == 0)
+ outrel.r_addend = relocation - dtpoff_base (info);
+ else
+ outrel.r_addend = 0;
+ bfd_elf64_swap_reloca_out (output_bfd, &outrel, loc);
+ }
+
+ sreloc = htab->srelgot;
+
outrel.r_offset = (htab->sgot->output_section->vma
+ htab->sgot->output_offset + off);
- indx = h && h->dynindx != -1 ? h->dynindx : 0;
- if (r_type == R_X86_64_TLSGD)
+ if (GOT_TLS_GD_P (tls_type))
dr_type = R_X86_64_DTPMOD64;
+ else if (GOT_TLS_GDESC_P (tls_type))
+ goto dr_done;
else
dr_type = R_X86_64_TPOFF64;
bfd_put_64 (output_bfd, 0, htab->sgot->contents + off);
outrel.r_addend = 0;
- if (dr_type == R_X86_64_TPOFF64 && indx == 0)
+ if ((dr_type == R_X86_64_TPOFF64
+ || dr_type == R_X86_64_TLSDESC) && indx == 0)
outrel.r_addend = relocation - dtpoff_base (info);
outrel.r_info = ELF64_R_INFO (indx, dr_type);
- loc = htab->srelgot->contents;
- loc += htab->srelgot->reloc_count++ * sizeof (Elf64_External_Rela);
+ loc = sreloc->contents;
+ loc += sreloc->reloc_count++ * sizeof (Elf64_External_Rela);
+ BFD_ASSERT (loc + sizeof (Elf64_External_Rela)
+ <= sreloc->contents + sreloc->size);
bfd_elf64_swap_reloca_out (output_bfd, &outrel, loc);
- if (r_type == R_X86_64_TLSGD)
+ if (GOT_TLS_GD_P (tls_type))
{
if (indx == 0)
{
outrel.r_info = ELF64_R_INFO (indx,
R_X86_64_DTPOFF64);
outrel.r_offset += GOT_ENTRY_SIZE;
- htab->srelgot->reloc_count++;
+ sreloc->reloc_count++;
loc += sizeof (Elf64_External_Rela);
+ BFD_ASSERT (loc + sizeof (Elf64_External_Rela)
+ <= sreloc->contents + sreloc->size);
bfd_elf64_swap_reloca_out (output_bfd, &outrel, loc);
}
}
+ dr_done:
if (h != NULL)
h->got.offset |= 1;
else
local_got_offsets[r_symndx] |= 1;
}
- if (off >= (bfd_vma) -2)
+ if (off >= (bfd_vma) -2
+ && ! GOT_TLS_GDESC_P (tls_type))
abort ();
if (r_type == ELF64_R_TYPE (rel->r_info))
{
- relocation = htab->sgot->output_section->vma
- + htab->sgot->output_offset + off;
+ if (r_type == R_X86_64_GOTPC32_TLSDESC
+ || r_type == R_X86_64_TLSDESC_CALL)
+ relocation = htab->sgotplt->output_section->vma
+ + htab->sgotplt->output_offset
+ + offplt + htab->sgotplt_jump_table_size;
+ else
+ relocation = htab->sgot->output_section->vma
+ + htab->sgot->output_offset + off;
unresolved_reloc = FALSE;
}
else
{
- unsigned int i;
- static unsigned char tlsgd[8]
- = { 0x66, 0x48, 0x8d, 0x3d, 0x66, 0x66, 0x48, 0xe8 };
-
- /* GD->IE transition.
- .byte 0x66; leaq foo@tlsgd(%rip), %rdi
- .word 0x6666; rex64; call __tls_get_addr@plt
- Change it into:
- movq %fs:0, %rax
- addq foo@gottpoff(%rip), %rax */
- BFD_ASSERT (rel->r_offset >= 4);
- for (i = 0; i < 4; i++)
- BFD_ASSERT (bfd_get_8 (input_bfd,
- contents + rel->r_offset - 4 + i)
- == tlsgd[i]);
- BFD_ASSERT (rel->r_offset + 12 <= input_section->size);
- for (i = 0; i < 4; i++)
- BFD_ASSERT (bfd_get_8 (input_bfd,
- contents + rel->r_offset + 4 + i)
- == tlsgd[i+4]);
- BFD_ASSERT (rel + 1 < relend);
- BFD_ASSERT (ELF64_R_TYPE (rel[1].r_info) == R_X86_64_PLT32);
- memcpy (contents + rel->r_offset - 4,
- "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
- 16);
-
- relocation = (htab->sgot->output_section->vma
- + htab->sgot->output_offset + off
- - rel->r_offset
- - input_section->output_section->vma
- - input_section->output_offset
- - 12);
- bfd_put_32 (output_bfd, relocation,
- contents + rel->r_offset + 8);
- /* Skip R_X86_64_PLT32. */
- rel++;
- continue;
+ bfd_vma roff = rel->r_offset;
+
+ if (ELF64_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
+ {
+ /* GD->IE transition.
+ .byte 0x66; leaq foo@tlsgd(%rip), %rdi
+ .word 0x6666; rex64; call __tls_get_addr@plt
+ Change it into:
+ movq %fs:0, %rax
+ addq foo@gottpoff(%rip), %rax */
+ memcpy (contents + roff - 4,
+ "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
+ 16);
+
+ relocation = (htab->sgot->output_section->vma
+ + htab->sgot->output_offset + off
+ - roff
+ - input_section->output_section->vma
+ - input_section->output_offset
+ - 12);
+ bfd_put_32 (output_bfd, relocation,
+ contents + roff + 8);
+ /* Skip R_X86_64_PLT32. */
+ rel++;
+ continue;
+ }
+ else if (ELF64_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
+ {
+ /* GDesc -> IE transition.
+ It's originally something like:
+ leaq x@tlsdesc(%rip), %rax
+
+ Change it to:
+ movq x@gottpoff(%rip), %rax # before xchg %ax,%ax
+ */
+
+ unsigned int val, type, type2;
+
+ type = bfd_get_8 (input_bfd, contents + roff - 3);
+ type2 = bfd_get_8 (input_bfd, contents + roff - 2);
+ val = bfd_get_8 (input_bfd, contents + roff - 1);
+
+ /* Now modify the instruction as appropriate. To
+ turn a leaq into a movq in the form we use it, it
+ suffices to change the second byte from 0x8d to
+ 0x8b. */
+ bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
+
+ bfd_put_32 (output_bfd,
+ htab->sgot->output_section->vma
+ + htab->sgot->output_offset + off
+ - rel->r_offset
+ - input_section->output_section->vma
+ - input_section->output_offset
+ - 4,
+ contents + roff);
+ continue;
+ }
+ else if (ELF64_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
+ {
+ /* GDesc -> IE transition.
+ It's originally:
+ call *(%rax)
+
+ Change it to:
+ xchg %ax,%ax. */
+
+ unsigned int val, type;
+
+ type = bfd_get_8 (input_bfd, contents + roff);
+ val = bfd_get_8 (input_bfd, contents + roff + 1);
+ bfd_put_8 (output_bfd, 0x66, contents + roff);
+ bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
+ continue;
+ }
+ else
+ BFD_ASSERT (FALSE);
}
break;
case R_X86_64_TLSLD:
- if (! info->shared)
+ if (! elf64_x86_64_tls_transition (info, input_bfd,
+ input_section, contents,
+ symtab_hdr, sym_hashes,
+ &r_type, GOT_UNKNOWN,
+ rel, relend, h))
+ return FALSE;
+
+ if (r_type != R_X86_64_TLSLD)
{
/* LD->LE transition:
- Ensure it is:
- leaq foo@tlsld(%rip), %rdi; call __tls_get_addr@plt.
+ leaq foo@tlsld(%rip), %rdi; call __tls_get_addr.
We change it into:
.word 0x6666; .byte 0x66; movl %fs:0, %rax. */
- BFD_ASSERT (rel->r_offset >= 3);
- BFD_ASSERT (bfd_get_8 (input_bfd, contents + rel->r_offset - 3)
- == 0x48);
- BFD_ASSERT (bfd_get_8 (input_bfd, contents + rel->r_offset - 2)
- == 0x8d);
- BFD_ASSERT (bfd_get_8 (input_bfd, contents + rel->r_offset - 1)
- == 0x3d);
- BFD_ASSERT (rel->r_offset + 9 <= input_section->size);
- BFD_ASSERT (bfd_get_8 (input_bfd, contents + rel->r_offset + 4)
- == 0xe8);
- BFD_ASSERT (rel + 1 < relend);
- BFD_ASSERT (ELF64_R_TYPE (rel[1].r_info) == R_X86_64_PLT32);
+
+ BFD_ASSERT (r_type == R_X86_64_TPOFF32);
memcpy (contents + rel->r_offset - 3,
"\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
- /* Skip R_X86_64_PLT32. */
+ /* Skip R_X86_64_PC32/R_X86_64_PLT32. */
rel++;
continue;
}
&& !((input_section->flags & SEC_DEBUGGING) != 0
&& h->def_dynamic))
(*_bfd_error_handler)
- (_("%B(%A+0x%lx): unresolvable relocation against symbol `%s'"),
+ (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
input_bfd,
input_section,
(long) rel->r_offset,
+ howto->name,
h->root.root.string);
r = _bfd_final_link_relocate (howto, input_bfd, input_section,
if (r == bfd_reloc_overflow)
{
- if (h != NULL
- && h->root.type == bfd_link_hash_undefweak
- && howto->pc_relative)
- /* Ignore reloc overflow on branches to undefweak syms. */
- continue;
-
if (! ((*info->callbacks->reloc_overflow)
(info, (h ? &h->root : NULL), name, howto->name,
(bfd_vma) 0, input_bfd, input_section,
}
if (h->got.offset != (bfd_vma) -1
- && elf64_x86_64_hash_entry (h)->tls_type != GOT_TLS_GD
+ && ! GOT_TLS_GD_ANY_P (elf64_x86_64_hash_entry (h)->tls_type)
&& elf64_x86_64_hash_entry (h)->tls_type != GOT_TLS_IE)
{
Elf_Internal_Rela rela;
/* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. */
if (strcmp (h->root.root.string, "_DYNAMIC") == 0
- || strcmp (h->root.root.string, "_GLOBAL_OFFSET_TABLE_") == 0)
+ || h == htab->elf.hgot)
sym->st_shndx = SHN_ABS;
return TRUE;
dyn.d_un.d_val -= s->size;
}
break;
+
+ case DT_TLSDESC_PLT:
+ s = htab->splt;
+ dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
+ + htab->tlsdesc_plt;
+ break;
+
+ case DT_TLSDESC_GOT:
+ s = htab->sgot;
+ dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
+ + htab->tlsdesc_got;
+ break;
}
bfd_elf64_swap_dyn_out (output_bfd, &dyn, dyncon);
elf_section_data (htab->splt->output_section)->this_hdr.sh_entsize =
PLT_ENTRY_SIZE;
+
+ if (htab->tlsdesc_plt)
+ {
+ bfd_put_64 (output_bfd, (bfd_vma) 0,
+ htab->sgot->contents + htab->tlsdesc_got);
+
+ memcpy (htab->splt->contents + htab->tlsdesc_plt,
+ elf64_x86_64_plt0_entry,
+ PLT_ENTRY_SIZE);
+
+ /* Add offset for pushq GOT+8(%rip), since the
+ instruction uses 6 bytes subtract this value. */
+ bfd_put_32 (output_bfd,
+ (htab->sgotplt->output_section->vma
+ + htab->sgotplt->output_offset
+ + 8
+ - htab->splt->output_section->vma
+ - htab->splt->output_offset
+ - htab->tlsdesc_plt
+ - 6),
+ htab->splt->contents + htab->tlsdesc_plt + 2);
+ /* Add offset for jmp *GOT+TDG(%rip), where TGD stands for
+ htab->tlsdesc_got. The 12 is the offset to the end of
+ the instruction. */
+ bfd_put_32 (output_bfd,
+ (htab->sgot->output_section->vma
+ + htab->sgot->output_offset
+ + htab->tlsdesc_got
+ - htab->splt->output_section->vma
+ - htab->splt->output_offset
+ - htab->tlsdesc_plt
+ - 12),
+ htab->splt->contents + htab->tlsdesc_plt + 8);
+ }
}
}
}
else if (sym->st_shndx == SHN_X86_64_LCOMMON
&& (elf_section_flags (*oldsec) & SHF_X86_64_LARGE) == 0)
- *psec = *sec = bfd_com_section_ptr;
+ *psec = *sec = bfd_com_section_ptr;
}
return TRUE;
}
static int
-elf64_x86_64_additional_program_headers (bfd *abfd)
+elf64_x86_64_additional_program_headers (bfd *abfd,
+ struct bfd_link_info *info ATTRIBUTE_UNUSED)
{
asection *s;
- int count = 0;
+ int count = 0;
/* Check to see if we need a large readonly segment. */
s = bfd_get_section_by_name (abfd, ".lrodata");
return count;
}
-static const struct bfd_elf_special_section
+/* Return TRUE if symbol should be hashed in the `.gnu.hash' section. */
+
+static bfd_boolean
+elf64_x86_64_hash_symbol (struct elf_link_hash_entry *h)
+{
+ if (h->plt.offset != (bfd_vma) -1
+ && !h->def_regular
+ && !h->pointer_equality_needed)
+ return FALSE;
+
+ return _bfd_elf_hash_symbol (h);
+}
+
+static const struct bfd_elf_special_section
elf64_x86_64_special_sections[]=
{
- { ".gnu.linkonce.lb", 16, -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
- { ".gnu.linkonce.lr", 16, -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
- { ".gnu.linkonce.lt", 16, -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
- { ".lbss", 5, -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
- { ".ldata", 6, -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
- { ".lrodata", 8, -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
- { NULL, 0, 0, 0, 0 }
+ { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
+ { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
+ { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
+ { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
+ { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
+ { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
+ { NULL, 0, 0, 0, 0 }
};
#define TARGET_LITTLE_SYM bfd_elf64_x86_64_vec
#define TARGET_LITTLE_NAME "elf64-x86-64"
#define ELF_ARCH bfd_arch_i386
#define ELF_MACHINE_CODE EM_X86_64
-#define ELF_MAXPAGESIZE 0x100000
+#define ELF_MAXPAGESIZE 0x200000
+#define ELF_MINPAGESIZE 0x1000
+#define ELF_COMMONPAGESIZE 0x1000
#define elf_backend_can_gc_sections 1
#define elf_backend_can_refcount 1
#define bfd_elf64_bfd_link_hash_table_create \
elf64_x86_64_link_hash_table_create
#define bfd_elf64_bfd_reloc_type_lookup elf64_x86_64_reloc_type_lookup
+#define bfd_elf64_bfd_reloc_name_lookup \
+ elf64_x86_64_reloc_name_lookup
#define elf_backend_adjust_dynamic_symbol elf64_x86_64_adjust_dynamic_symbol
#define elf_backend_check_relocs elf64_x86_64_check_relocs
#define elf_backend_reloc_type_class elf64_x86_64_reloc_type_class
#define elf_backend_relocate_section elf64_x86_64_relocate_section
#define elf_backend_size_dynamic_sections elf64_x86_64_size_dynamic_sections
+#define elf_backend_always_size_sections elf64_x86_64_always_size_sections
+#define elf_backend_init_index_section _bfd_elf_init_1_index_section
#define elf_backend_plt_sym_val elf64_x86_64_plt_sym_val
#define elf_backend_object_p elf64_x86_64_elf_object_p
#define bfd_elf64_mkobject elf64_x86_64_mkobject
elf64_x86_64_special_sections
#define elf_backend_additional_program_headers \
elf64_x86_64_additional_program_headers
+#define elf_backend_hash_symbol \
+ elf64_x86_64_hash_symbol
+
+#include "elf64-target.h"
+
+/* FreeBSD support. */
+
+#undef TARGET_LITTLE_SYM
+#define TARGET_LITTLE_SYM bfd_elf64_x86_64_freebsd_vec
+#undef TARGET_LITTLE_NAME
+#define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
+
+#undef ELF_OSABI
+#define ELF_OSABI ELFOSABI_FREEBSD
+
+#undef elf_backend_post_process_headers
+#define elf_backend_post_process_headers _bfd_elf_set_osabi
+
+#undef elf64_bed
+#define elf64_bed elf64_x86_64_fbsd_bed
#include "elf64-target.h"