X-Git-Url: http://drtracing.org/?a=blobdiff_plain;f=bfd%2Felf64-x86-64.c;h=63aff4630f5f00a7ac9e3d9be5473c0ae3e59d7e;hb=4f501a245f67d0b43f245b09515c87bfeec983ec;hp=364379aa701b89f9473934e1d16c3851794267b3;hpb=493f652c956f2037c1c638c1887b634d67da0835;p=deliverable%2Fbinutils-gdb.git diff --git a/bfd/elf64-x86-64.c b/bfd/elf64-x86-64.c index 364379aa70..63aff4630f 100644 --- a/bfd/elf64-x86-64.c +++ b/bfd/elf64-x86-64.c @@ -1,5 +1,5 @@ /* X86-64 specific support for ELF - Copyright (C) 2000-2016 Free Software Foundation, Inc. + Copyright (C) 2000-2017 Free Software Foundation, Inc. Contributed by Jan Hubicka . This file is part of BFD, the Binary File Descriptor library. @@ -19,15 +19,8 @@ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ -#include "sysdep.h" -#include "bfd.h" -#include "bfdlink.h" -#include "libbfd.h" -#include "elf-bfd.h" +#include "elfxx-x86.h" #include "elf-nacl.h" -#include "bfd_stdint.h" -#include "objalloc.h" -#include "hashtab.h" #include "dwarf2.h" #include "libiberty.h" @@ -47,9 +40,6 @@ relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE since they are the same. */ -#define ABI_64_P(abfd) \ - (get_elf_backend_data (abfd)->s->elfclass == ELFCLASS64) - /* The relocation "howto" table. Order of fields: type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow, special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */ @@ -285,8 +275,9 @@ elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type) { if (r_type >= (unsigned int) R_X86_64_standard) { - (*_bfd_error_handler) (_("%B: invalid relocation type %d"), - abfd, (int) r_type); + /* xgettext:c-format */ + _bfd_error_handler (_("%B: invalid relocation type %d"), + abfd, (int) r_type); r_type = R_X86_64_NONE; } i = r_type; @@ -527,39 +518,33 @@ elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz, /* Functions for the x86-64 ELF linker. */ -/* The name of the dynamic interpreter. This is put in the .interp - section. */ - -#define ELF64_DYNAMIC_INTERPRETER "/lib/ld64.so.1" -#define ELF32_DYNAMIC_INTERPRETER "/lib/ldx32.so.1" - -/* If ELIMINATE_COPY_RELOCS is non-zero, the linker will try to avoid - copying dynamic variables from a shared lib into an app's dynbss - section, and instead use a dynamic relocation to point into the - shared lib. */ -#define ELIMINATE_COPY_RELOCS 1 - /* The size in bytes of an entry in the global offset table. */ #define GOT_ENTRY_SIZE 8 -/* The size in bytes of an entry in the procedure linkage table. */ +/* The size in bytes of an entry in the lazy procedure linkage table. */ -#define PLT_ENTRY_SIZE 16 +#define LAZY_PLT_ENTRY_SIZE 16 -/* The first entry in a procedure linkage table looks like this. See the - SVR4 ABI i386 supplement and the x86-64 ABI to see how this works. */ +/* The size in bytes of an entry in the non-lazy procedure linkage + table. */ -static const bfd_byte elf_x86_64_plt0_entry[PLT_ENTRY_SIZE] = +#define NON_LAZY_PLT_ENTRY_SIZE 8 + +/* The first entry in a lazy procedure linkage table looks like this. + See the SVR4 ABI i386 supplement and the x86-64 ABI to see how this + works. */ + +static const bfd_byte elf_x86_64_lazy_plt0_entry[LAZY_PLT_ENTRY_SIZE] = { 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */ 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */ 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */ }; -/* Subsequent entries in a procedure linkage table look like this. */ +/* Subsequent entries in a lazy procedure linkage table look like this. */ -static const bfd_byte elf_x86_64_plt_entry[PLT_ENTRY_SIZE] = +static const bfd_byte elf_x86_64_lazy_plt_entry[LAZY_PLT_ENTRY_SIZE] = { 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */ 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */ @@ -569,59 +554,98 @@ static const bfd_byte elf_x86_64_plt_entry[PLT_ENTRY_SIZE] = 0, 0, 0, 0 /* replaced with offset to start of .plt0. */ }; -/* The first entry in a procedure linkage table with BND relocations +/* The first entry in a lazy procedure linkage table with BND prefix like this. */ -static const bfd_byte elf_x86_64_bnd_plt0_entry[PLT_ENTRY_SIZE] = +static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] = { 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */ 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */ 0x0f, 0x1f, 0 /* nopl (%rax) */ }; -/* Subsequent entries for legacy branches in a procedure linkage table - with BND relocations look like this. */ +/* Subsequent entries for branches with BND prefx in a lazy procedure + linkage table look like this. */ -static const bfd_byte elf_x86_64_legacy_plt_entry[PLT_ENTRY_SIZE] = +static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] = { 0x68, 0, 0, 0, 0, /* pushq immediate */ - 0xe9, 0, 0, 0, 0, /* jmpq relative */ - 0x66, 0x0f, 0x1f, 0x44, 0, 0 /* nopw (%rax,%rax,1) */ + 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */ + 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */ }; -/* Subsequent entries for branches with BND prefx in a procedure linkage - table with BND relocations look like this. */ +/* The first entry in the IBT-enabled lazy procedure linkage table is the + the same as the lazy PLT with BND prefix so that bound registers are + preserved when control is passed to dynamic linker. Subsequent + entries for a IBT-enabled lazy procedure linkage table look like + this. */ -static const bfd_byte elf_x86_64_bnd_plt_entry[PLT_ENTRY_SIZE] = +static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] = { + 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */ 0x68, 0, 0, 0, 0, /* pushq immediate */ 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */ - 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */ + 0x90 /* nop */ }; -/* Entries for legacy branches in the second procedure linkage table - look like this. */ +/* The first entry in the x32 IBT-enabled lazy procedure linkage table + is the same as the normal lazy PLT. Subsequent entries for an + x32 IBT-enabled lazy procedure linkage table look like this. */ -static const bfd_byte elf_x86_64_legacy_plt2_entry[8] = +static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] = { - 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */ - 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */ - 0x66, 0x90 /* xchg %ax,%ax */ + 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */ + 0x68, 0, 0, 0, 0, /* pushq immediate */ + 0xe9, 0, 0, 0, 0, /* jmpq relative */ + 0x66, 0x90 /* xchg %ax,%ax */ }; -/* Entries for branches with BND prefix in the second procedure linkage - table look like this. */ +/* Entries in the non-lazey procedure linkage table look like this. */ -static const bfd_byte elf_x86_64_bnd_plt2_entry[8] = +static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] = { - 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */ - 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */ - 0x90 /* nop */ + 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */ + 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */ + 0x66, 0x90 /* xchg %ax,%ax */ }; -/* .eh_frame covering the .plt section. */ +/* Entries for branches with BND prefix in the non-lazey procedure + linkage table look like this. */ + +static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] = +{ + 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */ + 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */ + 0x90 /* nop */ +}; -static const bfd_byte elf_x86_64_eh_frame_plt[] = +/* Entries for branches with IBT-enabled in the non-lazey procedure + linkage table look like this. They have the same size as the lazy + PLT entry. */ + +static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] = +{ + 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */ + 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */ + 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */ + 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */ +}; + +/* Entries for branches with IBT-enabled in the x32 non-lazey procedure + linkage table look like this. They have the same size as the lazy + PLT entry. */ + +static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] = +{ + 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */ + 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */ + 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */ + 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */ +}; + +/* .eh_frame covering the lazy .plt section. */ + +static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] = { #define PLT_CIE_LENGTH 20 #define PLT_FDE_LENGTH 36 @@ -658,40 +682,148 @@ static const bfd_byte elf_x86_64_eh_frame_plt[] = DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop }; -/* Architecture-specific backend data for x86-64. */ +/* .eh_frame covering the lazy BND .plt section. */ -struct elf_x86_64_backend_data +static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_plt[] = +{ + PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */ + 0, 0, 0, 0, /* CIE ID */ + 1, /* CIE version */ + 'z', 'R', 0, /* Augmentation string */ + 1, /* Code alignment factor */ + 0x78, /* Data alignment factor */ + 16, /* Return address column */ + 1, /* Augmentation size */ + DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */ + DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */ + DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */ + DW_CFA_nop, DW_CFA_nop, + + PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */ + PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */ + 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */ + 0, 0, 0, 0, /* .plt size goes here */ + 0, /* Augmentation size */ + DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */ + DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */ + DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */ + DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */ + DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */ + 11, /* Block length */ + DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */ + DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */ + DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge, + DW_OP_lit3, DW_OP_shl, DW_OP_plus, + DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop +}; + +/* .eh_frame covering the lazy .plt section with IBT-enabled. */ + +static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] = { - /* Templates for the initial PLT entry and for subsequent entries. */ - const bfd_byte *plt0_entry; - const bfd_byte *plt_entry; - unsigned int plt_entry_size; /* Size of each PLT entry. */ + PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */ + 0, 0, 0, 0, /* CIE ID */ + 1, /* CIE version */ + 'z', 'R', 0, /* Augmentation string */ + 1, /* Code alignment factor */ + 0x78, /* Data alignment factor */ + 16, /* Return address column */ + 1, /* Augmentation size */ + DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */ + DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */ + DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */ + DW_CFA_nop, DW_CFA_nop, + + PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */ + PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */ + 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */ + 0, 0, 0, 0, /* .plt size goes here */ + 0, /* Augmentation size */ + DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */ + DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */ + DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */ + DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */ + DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */ + 11, /* Block length */ + DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */ + DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */ + DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge, + DW_OP_lit3, DW_OP_shl, DW_OP_plus, + DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop +}; - /* Offsets into plt0_entry that are to be replaced with GOT[1] and GOT[2]. */ - unsigned int plt0_got1_offset; - unsigned int plt0_got2_offset; +/* .eh_frame covering the x32 lazy .plt section with IBT-enabled. */ + +static const bfd_byte elf_x32_eh_frame_lazy_ibt_plt[] = +{ + PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */ + 0, 0, 0, 0, /* CIE ID */ + 1, /* CIE version */ + 'z', 'R', 0, /* Augmentation string */ + 1, /* Code alignment factor */ + 0x78, /* Data alignment factor */ + 16, /* Return address column */ + 1, /* Augmentation size */ + DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */ + DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */ + DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */ + DW_CFA_nop, DW_CFA_nop, - /* Offset of the end of the PC-relative instruction containing - plt0_got2_offset. */ - unsigned int plt0_got2_insn_end; + PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */ + PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */ + 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */ + 0, 0, 0, 0, /* .plt size goes here */ + 0, /* Augmentation size */ + DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */ + DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */ + DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */ + DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */ + DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */ + 11, /* Block length */ + DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */ + DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */ + DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge, + DW_OP_lit3, DW_OP_shl, DW_OP_plus, + DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop +}; - /* Offsets into plt_entry that are to be replaced with... */ - unsigned int plt_got_offset; /* ... address of this symbol in .got. */ - unsigned int plt_reloc_offset; /* ... offset into relocation table. */ - unsigned int plt_plt_offset; /* ... offset to start of .plt. */ +/* .eh_frame covering the non-lazy .plt section. */ - /* Length of the PC-relative instruction containing plt_got_offset. */ - unsigned int plt_got_insn_size; +static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] = +{ +#define PLT_GOT_FDE_LENGTH 20 + PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */ + 0, 0, 0, 0, /* CIE ID */ + 1, /* CIE version */ + 'z', 'R', 0, /* Augmentation string */ + 1, /* Code alignment factor */ + 0x78, /* Data alignment factor */ + 16, /* Return address column */ + 1, /* Augmentation size */ + DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */ + DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */ + DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */ + DW_CFA_nop, DW_CFA_nop, - /* Offset of the end of the PC-relative jump to plt0_entry. */ - unsigned int plt_plt_insn_end; + PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */ + PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */ + 0, 0, 0, 0, /* the start of non-lazy .plt goes here */ + 0, 0, 0, 0, /* non-lazy .plt size goes here */ + 0, /* Augmentation size */ + DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, + DW_CFA_nop, DW_CFA_nop, DW_CFA_nop +}; - /* Offset into plt_entry where the initial value of the GOT entry points. */ - unsigned int plt_lazy_offset; +/* Architecture-specific backend data for x86-64. */ - /* .eh_frame covering the .plt section. */ - const bfd_byte *eh_frame_plt; - unsigned int eh_frame_plt_size; +struct elf_x86_64_backend_data +{ + /* Target system. */ + enum + { + is_normal, + is_nacl + } os; }; #define get_elf_x86_64_arch_data(bed) \ @@ -700,15 +832,13 @@ struct elf_x86_64_backend_data #define get_elf_x86_64_backend_data(abfd) \ get_elf_x86_64_arch_data (get_elf_backend_data (abfd)) -#define GET_PLT_ENTRY_SIZE(abfd) \ - get_elf_x86_64_backend_data (abfd)->plt_entry_size - /* These are the standard parameters. */ -static const struct elf_x86_64_backend_data elf_x86_64_arch_bed = +static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt = { - elf_x86_64_plt0_entry, /* plt0_entry */ - elf_x86_64_plt_entry, /* plt_entry */ - sizeof (elf_x86_64_plt_entry), /* plt_entry_size */ + elf_x86_64_lazy_plt0_entry, /* plt0_entry */ + LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */ + elf_x86_64_lazy_plt_entry, /* plt_entry */ + LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */ 2, /* plt0_got1_offset */ 8, /* plt0_got2_offset */ 12, /* plt0_got2_insn_end */ @@ -716,17 +846,31 @@ static const struct elf_x86_64_backend_data elf_x86_64_arch_bed = 7, /* plt_reloc_offset */ 12, /* plt_plt_offset */ 6, /* plt_got_insn_size */ - PLT_ENTRY_SIZE, /* plt_plt_insn_end */ + LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */ 6, /* plt_lazy_offset */ - elf_x86_64_eh_frame_plt, /* eh_frame_plt */ - sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */ + elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */ + elf_x86_64_lazy_plt_entry, /* pic_plt_entry */ + elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */ + sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */ + }; + +static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_plt = + { + elf_x86_64_non_lazy_plt_entry, /* plt_entry */ + elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */ + NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */ + 2, /* plt_got_offset */ + 6, /* plt_got_insn_size */ + elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */ + sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */ }; -static const struct elf_x86_64_backend_data elf_x86_64_bnd_arch_bed = +static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_plt = { - elf_x86_64_bnd_plt0_entry, /* plt0_entry */ - elf_x86_64_bnd_plt_entry, /* plt_entry */ - sizeof (elf_x86_64_bnd_plt_entry), /* plt_entry_size */ + elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */ + LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */ + elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */ + LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */ 2, /* plt0_got1_offset */ 1+8, /* plt0_got2_offset */ 1+12, /* plt0_got2_insn_end */ @@ -736,38 +880,95 @@ static const struct elf_x86_64_backend_data elf_x86_64_bnd_arch_bed = 1+6, /* plt_got_insn_size */ 11, /* plt_plt_insn_end */ 0, /* plt_lazy_offset */ - elf_x86_64_eh_frame_plt, /* eh_frame_plt */ - sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */ + elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */ + elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */ + elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */ + sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */ }; -#define elf_backend_arch_data &elf_x86_64_arch_bed +static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt = + { + elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */ + elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */ + NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */ + 1+2, /* plt_got_offset */ + 1+6, /* plt_got_insn_size */ + elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */ + sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */ + }; -/* Is a undefined weak symbol which is resolved to 0. Reference to an - undefined weak symbol is resolved to 0 when building executable if - it isn't dynamic and - 1. Has non-GOT/non-PLT relocations in text section. Or - 2. Has no GOT/PLT relocation. - */ -#define UNDEFINED_WEAK_RESOLVED_TO_ZERO(INFO, GOT_RELOC, EH) \ - ((EH)->elf.root.type == bfd_link_hash_undefweak \ - && bfd_link_executable (INFO) \ - && (elf_x86_64_hash_table (INFO)->interp == NULL \ - || !(GOT_RELOC) \ - || (EH)->has_non_got_reloc \ - || !(INFO)->dynamic_undefined_weak)) - -/* x86-64 ELF linker hash entry. */ - -struct elf_x86_64_link_hash_entry -{ - struct elf_link_hash_entry elf; +static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_ibt_plt = + { + elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */ + LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */ + elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */ + LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */ + 2, /* plt0_got1_offset */ + 1+8, /* plt0_got2_offset */ + 1+12, /* plt0_got2_insn_end */ + 4+1+2, /* plt_got_offset */ + 4+1, /* plt_reloc_offset */ + 4+1+6, /* plt_plt_offset */ + 4+1+6, /* plt_got_insn_size */ + 4+1+5+5, /* plt_plt_insn_end */ + 0, /* plt_lazy_offset */ + elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */ + elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */ + elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */ + sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */ + }; + +static const struct elf_x86_lazy_plt_layout elf_x32_lazy_ibt_plt = + { + elf_x86_64_lazy_plt0_entry, /* plt0_entry */ + LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */ + elf_x32_lazy_ibt_plt_entry, /* plt_entry */ + LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */ + 2, /* plt0_got1_offset */ + 8, /* plt0_got2_offset */ + 12, /* plt0_got2_insn_end */ + 4+2, /* plt_got_offset */ + 4+1, /* plt_reloc_offset */ + 4+6, /* plt_plt_offset */ + 4+6, /* plt_got_insn_size */ + 4+5+5, /* plt_plt_insn_end */ + 0, /* plt_lazy_offset */ + elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */ + elf_x32_lazy_ibt_plt_entry, /* pic_plt_entry */ + elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */ + sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */ + }; + +static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt = + { + elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */ + elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */ + LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */ + 4+1+2, /* plt_got_offset */ + 4+1+6, /* plt_got_insn_size */ + elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */ + sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */ + }; + +static const struct elf_x86_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt = + { + elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */ + elf_x32_non_lazy_ibt_plt_entry, /* pic_plt_entry */ + LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */ + 4+2, /* plt_got_offset */ + 4+6, /* plt_got_insn_size */ + elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */ + sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */ + }; + +static const struct elf_x86_64_backend_data elf_x86_64_arch_bed = + { + is_normal /* os */ + }; - /* Track dynamic relocs copied for this symbol. */ - struct elf_dyn_relocs *dyn_relocs; +#define elf_backend_arch_data &elf_x86_64_arch_bed -#define GOT_UNKNOWN 0 -#define GOT_NORMAL 1 -#define GOT_TLS_GD 2 +/* Values in tls_type of x86 ELF linker hash entry. */ #define GOT_TLS_IE 3 #define GOT_TLS_GDESC 4 #define GOT_TLS_GD_BOTH_P(type) \ @@ -778,527 +979,57 @@ struct elf_x86_64_link_hash_entry ((type) == GOT_TLS_GDESC || GOT_TLS_GD_BOTH_P (type)) #define GOT_TLS_GD_ANY_P(type) \ (GOT_TLS_GD_P (type) || GOT_TLS_GDESC_P (type)) - unsigned char tls_type; - - /* TRUE if a weak symbol with a real definition needs a copy reloc. - When there is a weak symbol with a real definition, the processor - independent code will have arranged for us to see the real - definition first. We need to copy the needs_copy bit from the - real definition and check it when allowing copy reloc in PIE. */ - unsigned int needs_copy : 1; - - /* TRUE if symbol has at least one BND relocation. */ - unsigned int has_bnd_reloc : 1; - - /* TRUE if symbol has GOT or PLT relocations. */ - unsigned int has_got_reloc : 1; - - /* TRUE if symbol has non-GOT/non-PLT relocations in text sections. */ - unsigned int has_non_got_reloc : 1; - - /* Reference count of C/C++ function pointer relocations in read-write - section which can be resolved at run-time. */ - bfd_signed_vma func_pointer_refcount; - - /* Information about the GOT PLT entry. Filled when there are both - GOT and PLT relocations against the same function. */ - union gotplt_union plt_got; - - /* Information about the second PLT entry. Filled when has_bnd_reloc is - set. */ - union gotplt_union plt_bnd; - - /* Offset of the GOTPLT entry reserved for the TLS descriptor, - starting at the end of the jump table. */ - bfd_vma tlsdesc_got; -}; - -#define elf_x86_64_hash_entry(ent) \ - ((struct elf_x86_64_link_hash_entry *)(ent)) - -struct elf_x86_64_obj_tdata -{ - struct elf_obj_tdata root; - - /* tls_type for each local got entry. */ - char *local_got_tls_type; - - /* GOTPLT entries for TLS descriptors. */ - bfd_vma *local_tlsdesc_gotent; -}; - -#define elf_x86_64_tdata(abfd) \ - ((struct elf_x86_64_obj_tdata *) (abfd)->tdata.any) - -#define elf_x86_64_local_got_tls_type(abfd) \ - (elf_x86_64_tdata (abfd)->local_got_tls_type) - -#define elf_x86_64_local_tlsdesc_gotent(abfd) \ - (elf_x86_64_tdata (abfd)->local_tlsdesc_gotent) #define is_x86_64_elf(bfd) \ (bfd_get_flavour (bfd) == bfd_target_elf_flavour \ && elf_tdata (bfd) != NULL \ && elf_object_id (bfd) == X86_64_ELF_DATA) -static bfd_boolean -elf_x86_64_mkobject (bfd *abfd) -{ - return bfd_elf_allocate_object (abfd, sizeof (struct elf_x86_64_obj_tdata), - X86_64_ELF_DATA); -} - -/* x86-64 ELF linker hash table. */ - -struct elf_x86_64_link_hash_table -{ - struct elf_link_hash_table elf; - - /* Short-cuts to get to dynamic linker sections. */ - asection *interp; - asection *sdynbss; - asection *srelbss; - asection *plt_eh_frame; - asection *plt_bnd; - asection *plt_got; - - union - { - bfd_signed_vma refcount; - bfd_vma offset; - } tls_ld_got; - - /* The amount of space used by the jump slots in the GOT. */ - bfd_vma sgotplt_jump_table_size; - - /* Small local sym cache. */ - struct sym_cache sym_cache; - - bfd_vma (*r_info) (bfd_vma, bfd_vma); - bfd_vma (*r_sym) (bfd_vma); - unsigned int pointer_r_type; - const char *dynamic_interpreter; - int dynamic_interpreter_size; - - /* _TLS_MODULE_BASE_ symbol. */ - struct bfd_link_hash_entry *tls_module_base; - - /* Used by local STT_GNU_IFUNC symbols. */ - htab_t loc_hash_table; - void * loc_hash_memory; - - /* The offset into splt of the PLT entry for the TLS descriptor - resolver. Special values are 0, if not necessary (or not found - to be necessary yet), and -1 if needed but not determined - yet. */ - bfd_vma tlsdesc_plt; - /* The offset into sgot of the GOT entry used by the PLT entry - above. */ - bfd_vma tlsdesc_got; - - /* The index of the next R_X86_64_JUMP_SLOT entry in .rela.plt. */ - bfd_vma next_jump_slot_index; - /* The index of the next R_X86_64_IRELATIVE entry in .rela.plt. */ - bfd_vma next_irelative_index; - - /* TRUE if there are dynamic relocs against IFUNC symbols that apply - to read-only sections. */ - bfd_boolean readonly_dynrelocs_against_ifunc; -}; - -/* Get the x86-64 ELF linker hash table from a link_info structure. */ - -#define elf_x86_64_hash_table(p) \ - (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \ - == X86_64_ELF_DATA ? ((struct elf_x86_64_link_hash_table *) ((p)->hash)) : NULL) - #define elf_x86_64_compute_jump_table_size(htab) \ ((htab)->elf.srelplt->reloc_count * GOT_ENTRY_SIZE) -/* Create an entry in an x86-64 ELF linker hash table. */ - -static struct bfd_hash_entry * -elf_x86_64_link_hash_newfunc (struct bfd_hash_entry *entry, - struct bfd_hash_table *table, - const char *string) -{ - /* Allocate the structure if it has not already been allocated by a - subclass. */ - if (entry == NULL) - { - entry = (struct bfd_hash_entry *) - bfd_hash_allocate (table, - sizeof (struct elf_x86_64_link_hash_entry)); - if (entry == NULL) - return entry; - } - - /* Call the allocation method of the superclass. */ - entry = _bfd_elf_link_hash_newfunc (entry, table, string); - if (entry != NULL) - { - struct elf_x86_64_link_hash_entry *eh; - - eh = (struct elf_x86_64_link_hash_entry *) entry; - eh->dyn_relocs = NULL; - eh->tls_type = GOT_UNKNOWN; - eh->needs_copy = 0; - eh->has_bnd_reloc = 0; - eh->has_got_reloc = 0; - eh->has_non_got_reloc = 0; - eh->func_pointer_refcount = 0; - eh->plt_bnd.offset = (bfd_vma) -1; - eh->plt_got.offset = (bfd_vma) -1; - eh->tlsdesc_got = (bfd_vma) -1; - } - - return entry; -} - -/* Compute a hash of a local hash entry. We use elf_link_hash_entry - for local symbol so that we can handle local STT_GNU_IFUNC symbols - as global symbol. We reuse indx and dynstr_index for local symbol - hash since they aren't used by global symbols in this backend. */ - -static hashval_t -elf_x86_64_local_htab_hash (const void *ptr) +static bfd_boolean +elf64_x86_64_elf_object_p (bfd *abfd) { - struct elf_link_hash_entry *h - = (struct elf_link_hash_entry *) ptr; - return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index); + /* Set the right machine number for an x86-64 elf64 file. */ + bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64); + return TRUE; } -/* Compare local hash entries. */ - -static int -elf_x86_64_local_htab_eq (const void *ptr1, const void *ptr2) +static bfd_boolean +elf32_x86_64_elf_object_p (bfd *abfd) { - struct elf_link_hash_entry *h1 - = (struct elf_link_hash_entry *) ptr1; - struct elf_link_hash_entry *h2 - = (struct elf_link_hash_entry *) ptr2; - - return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index; + /* Set the right machine number for an x86-64 elf32 file. */ + bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32); + return TRUE; } -/* Find and/or create a hash entry for local symbol. */ +/* Return TRUE if the TLS access code sequence support transition + from R_TYPE. */ -static struct elf_link_hash_entry * -elf_x86_64_get_local_sym_hash (struct elf_x86_64_link_hash_table *htab, - bfd *abfd, const Elf_Internal_Rela *rel, - bfd_boolean create) +static bfd_boolean +elf_x86_64_check_tls_transition (bfd *abfd, + struct bfd_link_info *info, + asection *sec, + bfd_byte *contents, + Elf_Internal_Shdr *symtab_hdr, + struct elf_link_hash_entry **sym_hashes, + unsigned int r_type, + const Elf_Internal_Rela *rel, + const Elf_Internal_Rela *relend) { - struct elf_x86_64_link_hash_entry e, *ret; - asection *sec = abfd->sections; - hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id, - htab->r_sym (rel->r_info)); - void **slot; - - e.elf.indx = sec->id; - e.elf.dynstr_index = htab->r_sym (rel->r_info); - slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h, - create ? INSERT : NO_INSERT); - - if (!slot) - return NULL; + unsigned int val; + unsigned long r_symndx; + bfd_boolean largepic = FALSE; + struct elf_link_hash_entry *h; + bfd_vma offset; + struct elf_x86_link_hash_table *htab; + bfd_byte *call; + bfd_boolean indirect_call; - if (*slot) - { - ret = (struct elf_x86_64_link_hash_entry *) *slot; - return &ret->elf; - } - - ret = (struct elf_x86_64_link_hash_entry *) - objalloc_alloc ((struct objalloc *) htab->loc_hash_memory, - sizeof (struct elf_x86_64_link_hash_entry)); - if (ret) - { - memset (ret, 0, sizeof (*ret)); - ret->elf.indx = sec->id; - ret->elf.dynstr_index = htab->r_sym (rel->r_info); - ret->elf.dynindx = -1; - ret->func_pointer_refcount = 0; - ret->plt_got.offset = (bfd_vma) -1; - *slot = ret; - } - return &ret->elf; -} - -/* Destroy an X86-64 ELF linker hash table. */ - -static void -elf_x86_64_link_hash_table_free (bfd *obfd) -{ - struct elf_x86_64_link_hash_table *htab - = (struct elf_x86_64_link_hash_table *) obfd->link.hash; - - if (htab->loc_hash_table) - htab_delete (htab->loc_hash_table); - if (htab->loc_hash_memory) - objalloc_free ((struct objalloc *) htab->loc_hash_memory); - _bfd_elf_link_hash_table_free (obfd); -} - -/* Create an X86-64 ELF linker hash table. */ - -static struct bfd_link_hash_table * -elf_x86_64_link_hash_table_create (bfd *abfd) -{ - struct elf_x86_64_link_hash_table *ret; - bfd_size_type amt = sizeof (struct elf_x86_64_link_hash_table); - - ret = (struct elf_x86_64_link_hash_table *) bfd_zmalloc (amt); - if (ret == NULL) - return NULL; - - if (!_bfd_elf_link_hash_table_init (&ret->elf, abfd, - elf_x86_64_link_hash_newfunc, - sizeof (struct elf_x86_64_link_hash_entry), - X86_64_ELF_DATA)) - { - free (ret); - return NULL; - } - - if (ABI_64_P (abfd)) - { - ret->r_info = elf64_r_info; - ret->r_sym = elf64_r_sym; - ret->pointer_r_type = R_X86_64_64; - ret->dynamic_interpreter = ELF64_DYNAMIC_INTERPRETER; - ret->dynamic_interpreter_size = sizeof ELF64_DYNAMIC_INTERPRETER; - } - else - { - ret->r_info = elf32_r_info; - ret->r_sym = elf32_r_sym; - ret->pointer_r_type = R_X86_64_32; - ret->dynamic_interpreter = ELF32_DYNAMIC_INTERPRETER; - ret->dynamic_interpreter_size = sizeof ELF32_DYNAMIC_INTERPRETER; - } - - ret->loc_hash_table = htab_try_create (1024, - elf_x86_64_local_htab_hash, - elf_x86_64_local_htab_eq, - NULL); - ret->loc_hash_memory = objalloc_create (); - if (!ret->loc_hash_table || !ret->loc_hash_memory) - { - elf_x86_64_link_hash_table_free (abfd); - return NULL; - } - ret->elf.root.hash_table_free = elf_x86_64_link_hash_table_free; - - return &ret->elf.root; -} - -/* Create .plt, .rela.plt, .got, .got.plt, .rela.got, .dynbss, and - .rela.bss sections in DYNOBJ, and set up shortcuts to them in our - hash table. */ - -static bfd_boolean -elf_x86_64_create_dynamic_sections (bfd *dynobj, - struct bfd_link_info *info) -{ - struct elf_x86_64_link_hash_table *htab; - - if (!_bfd_elf_create_dynamic_sections (dynobj, info)) - return FALSE; - - htab = elf_x86_64_hash_table (info); - if (htab == NULL) - return FALSE; - - /* Set the contents of the .interp section to the interpreter. */ - if (bfd_link_executable (info) && !info->nointerp) - { - asection *s = bfd_get_linker_section (dynobj, ".interp"); - if (s == NULL) - abort (); - s->size = htab->dynamic_interpreter_size; - s->contents = (unsigned char *) htab->dynamic_interpreter; - htab->interp = s; - } - - htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss"); - if (!htab->sdynbss) - abort (); - - if (bfd_link_executable (info)) - { - /* Always allow copy relocs for building executables. */ - asection *s = bfd_get_linker_section (dynobj, ".rela.bss"); - if (s == NULL) - { - const struct elf_backend_data *bed = get_elf_backend_data (dynobj); - s = bfd_make_section_anyway_with_flags (dynobj, - ".rela.bss", - (bed->dynamic_sec_flags - | SEC_READONLY)); - if (s == NULL - || ! bfd_set_section_alignment (dynobj, s, - bed->s->log_file_align)) - return FALSE; - } - htab->srelbss = s; - } - - if (!info->no_ld_generated_unwind_info - && htab->plt_eh_frame == NULL - && htab->elf.splt != NULL) - { - flagword flags = (SEC_ALLOC | SEC_LOAD | SEC_READONLY - | SEC_HAS_CONTENTS | SEC_IN_MEMORY - | SEC_LINKER_CREATED); - htab->plt_eh_frame - = bfd_make_section_anyway_with_flags (dynobj, ".eh_frame", flags); - if (htab->plt_eh_frame == NULL - || !bfd_set_section_alignment (dynobj, htab->plt_eh_frame, 3)) - return FALSE; - } - return TRUE; -} - -/* Copy the extra info we tack onto an elf_link_hash_entry. */ - -static void -elf_x86_64_copy_indirect_symbol (struct bfd_link_info *info, - struct elf_link_hash_entry *dir, - struct elf_link_hash_entry *ind) -{ - struct elf_x86_64_link_hash_entry *edir, *eind; - - edir = (struct elf_x86_64_link_hash_entry *) dir; - eind = (struct elf_x86_64_link_hash_entry *) ind; - - if (!edir->has_bnd_reloc) - edir->has_bnd_reloc = eind->has_bnd_reloc; - - if (!edir->has_got_reloc) - edir->has_got_reloc = eind->has_got_reloc; - - if (!edir->has_non_got_reloc) - edir->has_non_got_reloc = eind->has_non_got_reloc; - - if (eind->dyn_relocs != NULL) - { - if (edir->dyn_relocs != NULL) - { - struct elf_dyn_relocs **pp; - struct elf_dyn_relocs *p; - - /* Add reloc counts against the indirect sym to the direct sym - list. Merge any entries against the same section. */ - for (pp = &eind->dyn_relocs; (p = *pp) != NULL; ) - { - struct elf_dyn_relocs *q; - - for (q = edir->dyn_relocs; q != NULL; q = q->next) - if (q->sec == p->sec) - { - q->pc_count += p->pc_count; - q->count += p->count; - *pp = p->next; - break; - } - if (q == NULL) - pp = &p->next; - } - *pp = edir->dyn_relocs; - } - - edir->dyn_relocs = eind->dyn_relocs; - eind->dyn_relocs = NULL; - } - - if (ind->root.type == bfd_link_hash_indirect - && dir->got.refcount <= 0) - { - edir->tls_type = eind->tls_type; - eind->tls_type = GOT_UNKNOWN; - } - - if (ELIMINATE_COPY_RELOCS - && ind->root.type != bfd_link_hash_indirect - && dir->dynamic_adjusted) - { - /* If called to transfer flags for a weakdef during processing - of elf_adjust_dynamic_symbol, don't copy non_got_ref. - We clear it ourselves for ELIMINATE_COPY_RELOCS. */ - dir->ref_dynamic |= ind->ref_dynamic; - dir->ref_regular |= ind->ref_regular; - dir->ref_regular_nonweak |= ind->ref_regular_nonweak; - dir->needs_plt |= ind->needs_plt; - dir->pointer_equality_needed |= ind->pointer_equality_needed; - } - else - { - if (eind->func_pointer_refcount > 0) - { - edir->func_pointer_refcount += eind->func_pointer_refcount; - eind->func_pointer_refcount = 0; - } - - _bfd_elf_link_hash_copy_indirect (info, dir, ind); - } -} - -static bfd_boolean -elf64_x86_64_elf_object_p (bfd *abfd) -{ - /* Set the right machine number for an x86-64 elf64 file. */ - bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64); - return TRUE; -} - -static bfd_boolean -elf32_x86_64_elf_object_p (bfd *abfd) -{ - /* Set the right machine number for an x86-64 elf32 file. */ - bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32); - return TRUE; -} - -/* Return TRUE if the TLS access code sequence support transition - from R_TYPE. */ - -static bfd_boolean -elf_x86_64_check_tls_transition (bfd *abfd, - struct bfd_link_info *info, - asection *sec, - bfd_byte *contents, - Elf_Internal_Shdr *symtab_hdr, - struct elf_link_hash_entry **sym_hashes, - unsigned int r_type, - const Elf_Internal_Rela *rel, - const Elf_Internal_Rela *relend) -{ - unsigned int val; - unsigned long r_symndx; - bfd_boolean largepic = FALSE; - struct elf_link_hash_entry *h; - bfd_vma offset; - struct elf_x86_64_link_hash_table *htab; - - /* Get the section contents. */ - if (contents == NULL) - { - if (elf_section_data (sec)->this_hdr.contents != NULL) - contents = elf_section_data (sec)->this_hdr.contents; - else - { - /* FIXME: How to better handle error condition? */ - if (!bfd_malloc_and_get_section (abfd, sec, &contents)) - return FALSE; - - /* Cache the section contents for elf_link_input_bfd. */ - elf_section_data (sec)->this_hdr.contents = contents; - } - } - - htab = elf_x86_64_hash_table (info); - offset = rel->r_offset; - switch (r_type) + htab = elf_x86_hash_table (info, X86_64_ELF_DATA); + offset = rel->r_offset; + switch (r_type) { case R_X86_64_TLSGD: case R_X86_64_TLSLD: @@ -1309,32 +1040,61 @@ elf_x86_64_check_tls_transition (bfd *abfd, { /* Check transition from GD access model. For 64bit, only .byte 0x66; leaq foo@tlsgd(%rip), %rdi - .word 0x6666; rex64; call __tls_get_addr + .word 0x6666; rex64; call __tls_get_addr@PLT + or + .byte 0x66; leaq foo@tlsgd(%rip), %rdi + .byte 0x66; rex64 + call *__tls_get_addr@GOTPCREL(%rip) + which may be converted to + addr32 call __tls_get_addr can transit to different access model. For 32bit, only leaq foo@tlsgd(%rip), %rdi - .word 0x6666; rex64; call __tls_get_addr - can transit to different access model. For largepic + .word 0x6666; rex64; call __tls_get_addr@PLT + or + leaq foo@tlsgd(%rip), %rdi + .byte 0x66; rex64 + call *__tls_get_addr@GOTPCREL(%rip) + which may be converted to + addr32 call __tls_get_addr + can transit to different access model. For largepic, we also support: + leaq foo@tlsgd(%rip), %rdi + movabsq $__tls_get_addr@pltoff, %rax + addq $r15, %rax + call *%rax + or leaq foo@tlsgd(%rip), %rdi movabsq $__tls_get_addr@pltoff, %rax addq $rbx, %rax - call *%rax. */ + call *%rax */ - static const unsigned char call[] = { 0x66, 0x66, 0x48, 0xe8 }; static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d }; if ((offset + 12) > sec->size) return FALSE; - if (memcmp (contents + offset + 4, call, 4) != 0) + call = contents + offset + 4; + if (call[0] != 0x66 + || !((call[1] == 0x48 + && call[2] == 0xff + && call[3] == 0x15) + || (call[1] == 0x48 + && call[2] == 0x67 + && call[3] == 0xe8) + || (call[1] == 0x66 + && call[2] == 0x48 + && call[3] == 0xe8))) { if (!ABI_64_P (abfd) || (offset + 19) > sec->size || offset < 3 - || memcmp (contents + offset - 3, leaq + 1, 3) != 0 - || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0 - || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5) - != 0) + || memcmp (call - 7, leaq + 1, 3) != 0 + || memcmp (call, "\x48\xb8", 2) != 0 + || call[11] != 0x01 + || call[13] != 0xff + || call[14] != 0xd0 + || !((call[10] == 0x48 && call[12] == 0xd8) + || (call[10] == 0x4c && call[12] == 0xf8))) return FALSE; largepic = TRUE; } @@ -1350,18 +1110,29 @@ elf_x86_64_check_tls_transition (bfd *abfd, || memcmp (contents + offset - 3, leaq + 1, 3) != 0) return FALSE; } + indirect_call = call[2] == 0xff; } else { /* Check transition from LD access model. Only leaq foo@tlsld(%rip), %rdi; - call __tls_get_addr + call __tls_get_addr@PLT + or + leaq foo@tlsld(%rip), %rdi; + call *__tls_get_addr@GOTPCREL(%rip) + which may be converted to + addr32 call __tls_get_addr can transit to different access model. For largepic we also support: + leaq foo@tlsld(%rip), %rdi + movabsq $__tls_get_addr@pltoff, %rax + addq $r15, %rax + call *%rax + or leaq foo@tlsld(%rip), %rdi movabsq $__tls_get_addr@pltoff, %rax addq $rbx, %rax - call *%rax. */ + call *%rax */ static const unsigned char lea[] = { 0x48, 0x8d, 0x3d }; @@ -1371,16 +1142,23 @@ elf_x86_64_check_tls_transition (bfd *abfd, if (memcmp (contents + offset - 3, lea, 3) != 0) return FALSE; - if (0xe8 != *(contents + offset + 4)) + call = contents + offset + 4; + if (!(call[0] == 0xe8 + || (call[0] == 0xff && call[1] == 0x15) + || (call[0] == 0x67 && call[1] == 0xe8))) { if (!ABI_64_P (abfd) || (offset + 19) > sec->size - || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0 - || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5) - != 0) + || memcmp (call, "\x48\xb8", 2) != 0 + || call[11] != 0x01 + || call[13] != 0xff + || call[14] != 0xd0 + || !((call[10] == 0x48 && call[12] == 0xd8) + || (call[10] == 0x4c && call[12] == 0xf8))) return FALSE; largepic = TRUE; } + indirect_call = call[0] == 0xff; } r_symndx = htab->r_sym (rel[1].r_info); @@ -1388,16 +1166,16 @@ elf_x86_64_check_tls_transition (bfd *abfd, return FALSE; h = sym_hashes[r_symndx - symtab_hdr->sh_info]; - /* Use strncmp to check __tls_get_addr since __tls_get_addr - may be versioned. */ - return (h != NULL - && h->root.root.string != NULL - && (largepic - ? ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLTOFF64 - : (ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PC32 - || ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLT32)) - && (strncmp (h->root.root.string, - "__tls_get_addr", 14) == 0)); + if (h == NULL + || !((struct elf_x86_link_hash_entry *) h)->tls_get_addr) + return FALSE; + else if (largepic) + return ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLTOFF64; + else if (indirect_call) + return ELF32_R_TYPE (rel[1].r_info) == R_X86_64_GOTPCRELX; + else + return (ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PC32 + || ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLT32); case R_X86_64_GOTTPOFF: /* Check transition from IE access model: @@ -1460,8 +1238,8 @@ elf_x86_64_check_tls_transition (bfd *abfd, if (offset + 2 <= sec->size) { /* Make sure that it's a call *x@tlsdesc(%rax). */ - static const unsigned char call[] = { 0xff, 0x10 }; - return memcmp (contents + offset, call, 2) == 0; + call = contents + offset; + return call[0] == 0xff && call[1] == 0x10; } return FALSE; @@ -1483,7 +1261,8 @@ elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd, const Elf_Internal_Rela *rel, const Elf_Internal_Rela *relend, struct elf_link_hash_entry *h, - unsigned long r_symndx) + unsigned long r_symndx, + bfd_boolean from_relocate_section) { unsigned int from_type = *r_type; unsigned int to_type = from_type; @@ -1509,10 +1288,9 @@ elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd, to_type = R_X86_64_GOTTPOFF; } - /* When we are called from elf_x86_64_relocate_section, - CONTENTS isn't NULL and there may be additional transitions - based on TLS_TYPE. */ - if (contents != NULL) + /* When we are called from elf_x86_64_relocate_section, there may + be additional transitions based on TLS_TYPE. */ + if (from_relocate_section) { unsigned int new_to_type = to_type; @@ -1568,9 +1346,9 @@ elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd, name = h->root.root.string; else { - struct elf_x86_64_link_hash_table *htab; + struct elf_x86_link_hash_table *htab; - htab = elf_x86_64_hash_table (info); + htab = elf_x86_hash_table (info, X86_64_ELF_DATA); if (htab == NULL) name = "*unknown*"; else @@ -1583,11 +1361,11 @@ elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd, } } - (*_bfd_error_handler) - (_("%B: TLS transition from %s to %s against `%s' at 0x%lx " + _bfd_error_handler + /* xgettext:c-format */ + (_("%B: TLS transition from %s to %s against `%s' at %#Lx " "in section `%A' failed"), - abfd, sec, from->name, to->name, name, - (unsigned long) rel->r_offset); + abfd, from->name, to->name, name, rel->r_offset, sec); bfd_set_error (bfd_error_bad_value); return FALSE; } @@ -1602,7 +1380,8 @@ elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd, #define check_relocs_failed sec_flg1 static bfd_boolean -elf_x86_64_need_pic (bfd *input_bfd, asection *sec, +elf_x86_64_need_pic (struct bfd_link_info *info, + bfd *input_bfd, asection *sec, struct elf_link_hash_entry *h, Elf_Internal_Shdr *symtab_hdr, Elf_Internal_Sym *isym, @@ -1611,6 +1390,7 @@ elf_x86_64_need_pic (bfd *input_bfd, asection *sec, const char *v = ""; const char *und = ""; const char *pic = ""; + const char *object; const char *name; if (h) @@ -1628,7 +1408,10 @@ elf_x86_64_need_pic (bfd *input_bfd, asection *sec, v = _("protected symbol "); break; default: - v = _("symbol "); + if (((struct elf_x86_link_hash_entry *) h)->def_protected) + v = _("protected symbol "); + else + v = _("symbol "); pic = _("; recompile with -fPIC"); break; } @@ -1642,14 +1425,444 @@ elf_x86_64_need_pic (bfd *input_bfd, asection *sec, pic = _("; recompile with -fPIC"); } - (*_bfd_error_handler) (_("%B: relocation %s against %s%s`%s' can " - "not be used when making a shared object%s"), - input_bfd, howto->name, und, v, name, pic); + if (bfd_link_dll (info)) + object = _("a shared object"); + else if (bfd_link_pie (info)) + object = _("a PIE object"); + else + object = _("a PDE object"); + + /* xgettext:c-format */ + _bfd_error_handler (_("%B: relocation %s against %s%s`%s' can " + "not be used when making %s%s"), + input_bfd, howto->name, und, v, name, + object, pic); bfd_set_error (bfd_error_bad_value); sec->check_relocs_failed = 1; return FALSE; } +/* With the local symbol, foo, we convert + mov foo@GOTPCREL(%rip), %reg + to + lea foo(%rip), %reg + and convert + call/jmp *foo@GOTPCREL(%rip) + to + nop call foo/jmp foo nop + When PIC is false, convert + test %reg, foo@GOTPCREL(%rip) + to + test $foo, %reg + and convert + binop foo@GOTPCREL(%rip), %reg + to + binop $foo, %reg + where binop is one of adc, add, and, cmp, or, sbb, sub, xor + instructions. */ + +static bfd_boolean +elf_x86_64_convert_load_reloc (bfd *abfd, asection *sec, + bfd_byte *contents, + Elf_Internal_Rela *irel, + struct elf_link_hash_entry *h, + bfd_boolean *converted, + struct bfd_link_info *link_info) +{ + struct elf_x86_link_hash_table *htab; + bfd_boolean is_pic; + bfd_boolean require_reloc_pc32; + bfd_boolean relocx; + bfd_boolean to_reloc_pc32; + asection *tsec; + char symtype; + bfd_signed_vma raddend; + unsigned int opcode; + unsigned int modrm; + unsigned int r_type = ELF32_R_TYPE (irel->r_info); + unsigned int r_symndx; + bfd_vma toff; + bfd_vma roff = irel->r_offset; + + if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2)) + return TRUE; + + raddend = irel->r_addend; + /* Addend for 32-bit PC-relative relocation must be -4. */ + if (raddend != -4) + return TRUE; + + htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA); + is_pic = bfd_link_pic (link_info); + + relocx = (r_type == R_X86_64_GOTPCRELX + || r_type == R_X86_64_REX_GOTPCRELX); + + /* TRUE if we can convert only to R_X86_64_PC32. Enable it for + --no-relax. */ + require_reloc_pc32 + = link_info->disable_target_specific_optimizations > 1; + + r_symndx = htab->r_sym (irel->r_info); + + opcode = bfd_get_8 (abfd, contents + roff - 2); + + /* Convert mov to lea since it has been done for a while. */ + if (opcode != 0x8b) + { + /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX + for call, jmp or one of adc, add, and, cmp, or, sbb, sub, + test, xor instructions. */ + if (!relocx) + return TRUE; + } + + /* We convert only to R_X86_64_PC32: + 1. Branch. + 2. R_X86_64_GOTPCREL since we can't modify REX byte. + 3. require_reloc_pc32 is true. + 4. PIC. + */ + to_reloc_pc32 = (opcode == 0xff + || !relocx + || require_reloc_pc32 + || is_pic); + + /* Get the symbol referred to by the reloc. */ + if (h == NULL) + { + Elf_Internal_Sym *isym + = bfd_sym_from_r_symndx (&htab->sym_cache, abfd, r_symndx); + + /* Skip relocation against undefined symbols. */ + if (isym->st_shndx == SHN_UNDEF) + return TRUE; + + symtype = ELF_ST_TYPE (isym->st_info); + + if (isym->st_shndx == SHN_ABS) + tsec = bfd_abs_section_ptr; + else if (isym->st_shndx == SHN_COMMON) + tsec = bfd_com_section_ptr; + else if (isym->st_shndx == SHN_X86_64_LCOMMON) + tsec = &_bfd_elf_large_com_section; + else + tsec = bfd_section_from_elf_index (abfd, isym->st_shndx); + + toff = isym->st_value; + } + else + { + /* Undefined weak symbol is only bound locally in executable + and its reference is resolved as 0 without relocation + overflow. We can only perform this optimization for + GOTPCRELX relocations since we need to modify REX byte. + It is OK convert mov with R_X86_64_GOTPCREL to + R_X86_64_PC32. */ + if ((relocx || opcode == 0x8b) + && UNDEFINED_WEAK_RESOLVED_TO_ZERO (link_info, + X86_64_ELF_DATA, + TRUE, + elf_x86_hash_entry (h))) + { + if (opcode == 0xff) + { + /* Skip for branch instructions since R_X86_64_PC32 + may overflow. */ + if (require_reloc_pc32) + return TRUE; + } + else if (relocx) + { + /* For non-branch instructions, we can convert to + R_X86_64_32/R_X86_64_32S since we know if there + is a REX byte. */ + to_reloc_pc32 = FALSE; + } + + /* Since we don't know the current PC when PIC is true, + we can't convert to R_X86_64_PC32. */ + if (to_reloc_pc32 && is_pic) + return TRUE; + + goto convert; + } + /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since + ld.so may use its link-time address. */ + else if (h->start_stop + || ((h->def_regular + || h->root.type == bfd_link_hash_defined + || h->root.type == bfd_link_hash_defweak) + && h != htab->elf.hdynamic + && SYMBOL_REFERENCES_LOCAL (link_info, h))) + { + /* bfd_link_hash_new or bfd_link_hash_undefined is + set by an assignment in a linker script in + bfd_elf_record_link_assignment. start_stop is set + on __start_SECNAME/__stop_SECNAME which mark section + SECNAME. */ + if (h->start_stop + || (h->def_regular + && (h->root.type == bfd_link_hash_new + || h->root.type == bfd_link_hash_undefined + || ((h->root.type == bfd_link_hash_defined + || h->root.type == bfd_link_hash_defweak) + && h->root.u.def.section == bfd_und_section_ptr)))) + { + /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */ + if (require_reloc_pc32) + return TRUE; + goto convert; + } + tsec = h->root.u.def.section; + toff = h->root.u.def.value; + symtype = h->type; + } + else + return TRUE; + } + + /* Don't convert GOTPCREL relocation against large section. */ + if (elf_section_data (tsec) != NULL + && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0) + return TRUE; + + /* We can only estimate relocation overflow for R_X86_64_PC32. */ + if (!to_reloc_pc32) + goto convert; + + if (tsec->sec_info_type == SEC_INFO_TYPE_MERGE) + { + /* At this stage in linking, no SEC_MERGE symbol has been + adjusted, so all references to such symbols need to be + passed through _bfd_merged_section_offset. (Later, in + relocate_section, all SEC_MERGE symbols *except* for + section symbols have been adjusted.) + + gas may reduce relocations against symbols in SEC_MERGE + sections to a relocation against the section symbol when + the original addend was zero. When the reloc is against + a section symbol we should include the addend in the + offset passed to _bfd_merged_section_offset, since the + location of interest is the original symbol. On the + other hand, an access to "sym+addend" where "sym" is not + a section symbol should not include the addend; Such an + access is presumed to be an offset from "sym"; The + location of interest is just "sym". */ + if (symtype == STT_SECTION) + toff += raddend; + + toff = _bfd_merged_section_offset (abfd, &tsec, + elf_section_data (tsec)->sec_info, + toff); + + if (symtype != STT_SECTION) + toff += raddend; + } + else + toff += raddend; + + /* Don't convert if R_X86_64_PC32 relocation overflows. */ + if (tsec->output_section == sec->output_section) + { + if ((toff - roff + 0x80000000) > 0xffffffff) + return TRUE; + } + else + { + bfd_signed_vma distance; + + /* At this point, we don't know the load addresses of TSEC + section nor SEC section. We estimate the distrance between + SEC and TSEC. We store the estimated distances in the + compressed_size field of the output section, which is only + used to decompress the compressed input section. */ + if (sec->output_section->compressed_size == 0) + { + asection *asect; + bfd_size_type size = 0; + for (asect = link_info->output_bfd->sections; + asect != NULL; + asect = asect->next) + /* Skip debug sections since compressed_size is used to + compress debug sections. */ + if ((asect->flags & SEC_DEBUGGING) == 0) + { + asection *i; + for (i = asect->map_head.s; + i != NULL; + i = i->map_head.s) + { + size = align_power (size, i->alignment_power); + size += i->size; + } + asect->compressed_size = size; + } + } + + /* Don't convert GOTPCREL relocations if TSEC isn't placed + after SEC. */ + distance = (tsec->output_section->compressed_size + - sec->output_section->compressed_size); + if (distance < 0) + return TRUE; + + /* Take PT_GNU_RELRO segment into account by adding + maxpagesize. */ + if ((toff + distance + get_elf_backend_data (abfd)->maxpagesize + - roff + 0x80000000) > 0xffffffff) + return TRUE; + } + +convert: + if (opcode == 0xff) + { + /* We have "call/jmp *foo@GOTPCREL(%rip)". */ + unsigned int nop; + unsigned int disp; + bfd_vma nop_offset; + + /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to + R_X86_64_PC32. */ + modrm = bfd_get_8 (abfd, contents + roff - 1); + if (modrm == 0x25) + { + /* Convert to "jmp foo nop". */ + modrm = 0xe9; + nop = NOP_OPCODE; + nop_offset = irel->r_offset + 3; + disp = bfd_get_32 (abfd, contents + irel->r_offset); + irel->r_offset -= 1; + bfd_put_32 (abfd, disp, contents + irel->r_offset); + } + else + { + struct elf_x86_link_hash_entry *eh + = (struct elf_x86_link_hash_entry *) h; + + /* Convert to "nop call foo". ADDR_PREFIX_OPCODE + is a nop prefix. */ + modrm = 0xe8; + /* To support TLS optimization, always use addr32 prefix for + "call *__tls_get_addr@GOTPCREL(%rip)". */ + if (eh && eh->tls_get_addr) + { + nop = 0x67; + nop_offset = irel->r_offset - 2; + } + else + { + nop = link_info->call_nop_byte; + if (link_info->call_nop_as_suffix) + { + nop_offset = irel->r_offset + 3; + disp = bfd_get_32 (abfd, contents + irel->r_offset); + irel->r_offset -= 1; + bfd_put_32 (abfd, disp, contents + irel->r_offset); + } + else + nop_offset = irel->r_offset - 2; + } + } + bfd_put_8 (abfd, nop, contents + nop_offset); + bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1); + r_type = R_X86_64_PC32; + } + else + { + unsigned int rex; + unsigned int rex_mask = REX_R; + + if (r_type == R_X86_64_REX_GOTPCRELX) + rex = bfd_get_8 (abfd, contents + roff - 3); + else + rex = 0; + + if (opcode == 0x8b) + { + if (to_reloc_pc32) + { + /* Convert "mov foo@GOTPCREL(%rip), %reg" to + "lea foo(%rip), %reg". */ + opcode = 0x8d; + r_type = R_X86_64_PC32; + } + else + { + /* Convert "mov foo@GOTPCREL(%rip), %reg" to + "mov $foo, %reg". */ + opcode = 0xc7; + modrm = bfd_get_8 (abfd, contents + roff - 1); + modrm = 0xc0 | (modrm & 0x38) >> 3; + if ((rex & REX_W) != 0 + && ABI_64_P (link_info->output_bfd)) + { + /* Keep the REX_W bit in REX byte for LP64. */ + r_type = R_X86_64_32S; + goto rewrite_modrm_rex; + } + else + { + /* If the REX_W bit in REX byte isn't needed, + use R_X86_64_32 and clear the W bit to avoid + sign-extend imm32 to imm64. */ + r_type = R_X86_64_32; + /* Clear the W bit in REX byte. */ + rex_mask |= REX_W; + goto rewrite_modrm_rex; + } + } + } + else + { + /* R_X86_64_PC32 isn't supported. */ + if (to_reloc_pc32) + return TRUE; + + modrm = bfd_get_8 (abfd, contents + roff - 1); + if (opcode == 0x85) + { + /* Convert "test %reg, foo@GOTPCREL(%rip)" to + "test $foo, %reg". */ + modrm = 0xc0 | (modrm & 0x38) >> 3; + opcode = 0xf7; + } + else + { + /* Convert "binop foo@GOTPCREL(%rip), %reg" to + "binop $foo, %reg". */ + modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c); + opcode = 0x81; + } + + /* Use R_X86_64_32 with 32-bit operand to avoid relocation + overflow when sign-extending imm32 to imm64. */ + r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32; + +rewrite_modrm_rex: + bfd_put_8 (abfd, modrm, contents + roff - 1); + + if (rex) + { + /* Move the R bit to the B bit in REX byte. */ + rex = (rex & ~rex_mask) | (rex & REX_R) >> 2; + bfd_put_8 (abfd, rex, contents + roff - 3); + } + + /* No addend for R_X86_64_32/R_X86_64_32S relocations. */ + irel->r_addend = 0; + } + + bfd_put_8 (abfd, opcode, contents + roff - 2); + } + + irel->r_info = htab->r_info (r_symndx, r_type); + + *converted = TRUE; + + return TRUE; +} + /* Look through the relocs for a section during the first phase, and calculate needed space in the global offset table, procedure linkage table, and dynamic reloc sections. */ @@ -1659,27 +1872,43 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, asection *sec, const Elf_Internal_Rela *relocs) { - struct elf_x86_64_link_hash_table *htab; + struct elf_x86_link_hash_table *htab; Elf_Internal_Shdr *symtab_hdr; struct elf_link_hash_entry **sym_hashes; const Elf_Internal_Rela *rel; const Elf_Internal_Rela *rel_end; asection *sreloc; - bfd_boolean use_plt_got; + bfd_byte *contents; if (bfd_link_relocatable (info)) return TRUE; + /* Don't do anything special with non-loaded, non-alloced sections. + In particular, any relocs in such sections should not affect GOT + and PLT reference counting (ie. we don't allow them to create GOT + or PLT entries), there's no possibility or desire to optimize TLS + relocs, and there's not much point in propagating relocs to shared + libs that the dynamic linker won't relocate. */ + if ((sec->flags & SEC_ALLOC) == 0) + return TRUE; + BFD_ASSERT (is_x86_64_elf (abfd)); - htab = elf_x86_64_hash_table (info); + htab = elf_x86_hash_table (info, X86_64_ELF_DATA); if (htab == NULL) { sec->check_relocs_failed = 1; return FALSE; } - use_plt_got = get_elf_x86_64_backend_data (abfd) == &elf_x86_64_arch_bed; + /* Get the section contents. */ + if (elf_section_data (sec)->this_hdr.contents != NULL) + contents = elf_section_data (sec)->this_hdr.contents; + else if (!bfd_malloc_and_get_section (abfd, sec, &contents)) + { + sec->check_relocs_failed = 1; + return FALSE; + } symtab_hdr = &elf_symtab_hdr (abfd); sym_hashes = elf_sym_hashes (abfd); @@ -1690,9 +1919,9 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, for (rel = relocs; rel < rel_end; rel++) { unsigned int r_type; - unsigned long r_symndx; + unsigned int r_symndx; struct elf_link_hash_entry *h; - struct elf_x86_64_link_hash_entry *eh; + struct elf_x86_link_hash_entry *eh; Elf_Internal_Sym *isym; const char *name; bfd_boolean size_reloc; @@ -1702,8 +1931,9 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr)) { - (*_bfd_error_handler) (_("%B: bad symbol index: %d"), - abfd, r_symndx); + /* xgettext:c-format */ + _bfd_error_handler (_("%B: bad symbol index: %d"), + abfd, r_symndx); goto error_return; } @@ -1718,12 +1948,14 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, /* Check relocation against local STT_GNU_IFUNC symbol. */ if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC) { - h = elf_x86_64_get_local_sym_hash (htab, abfd, rel, - TRUE); + h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel, + TRUE); if (h == NULL) goto error_return; /* Fake a STT_GNU_IFUNC symbol. */ + h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr, + isym, NULL); h->type = STT_GNU_IFUNC; h->def_regular = 1; h->ref_regular = 1; @@ -1764,7 +1996,8 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, else name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL); - (*_bfd_error_handler) + _bfd_error_handler + /* xgettext:c-format */ (_("%B: relocation %s against symbol `%s' isn't " "supported in x32 mode"), abfd, x86_64_elf_howto_table[r_type].name, name); @@ -1776,97 +2009,31 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, if (h != NULL) { - switch (r_type) - { - default: - break; - - case R_X86_64_PC32_BND: - case R_X86_64_PLT32_BND: - case R_X86_64_PC32: - case R_X86_64_PLT32: - case R_X86_64_32: - case R_X86_64_64: - /* MPX PLT is supported only if elf_x86_64_arch_bed - is used in 64-bit mode. */ - if (ABI_64_P (abfd) - && info->bndplt - && (get_elf_x86_64_backend_data (abfd) - == &elf_x86_64_arch_bed)) - { - elf_x86_64_hash_entry (h)->has_bnd_reloc = 1; - - /* Create the second PLT for Intel MPX support. */ - if (htab->plt_bnd == NULL) - { - unsigned int plt_bnd_align; - const struct elf_backend_data *bed; - - bed = get_elf_backend_data (info->output_bfd); - BFD_ASSERT (sizeof (elf_x86_64_bnd_plt2_entry) == 8 - && (sizeof (elf_x86_64_bnd_plt2_entry) - == sizeof (elf_x86_64_legacy_plt2_entry))); - plt_bnd_align = 3; - - if (htab->elf.dynobj == NULL) - htab->elf.dynobj = abfd; - htab->plt_bnd - = bfd_make_section_anyway_with_flags (htab->elf.dynobj, - ".plt.bnd", - (bed->dynamic_sec_flags - | SEC_ALLOC - | SEC_CODE - | SEC_LOAD - | SEC_READONLY)); - if (htab->plt_bnd == NULL - || !bfd_set_section_alignment (htab->elf.dynobj, - htab->plt_bnd, - plt_bnd_align)) - goto error_return; - } - } - - case R_X86_64_32S: - case R_X86_64_PC64: - case R_X86_64_GOTPCREL: - case R_X86_64_GOTPCRELX: - case R_X86_64_REX_GOTPCRELX: - case R_X86_64_GOTPCREL64: - if (htab->elf.dynobj == NULL) - htab->elf.dynobj = abfd; - /* Create the ifunc sections for static executables. */ - if (h->type == STT_GNU_IFUNC - && !_bfd_elf_create_ifunc_sections (htab->elf.dynobj, - info)) - goto error_return; - break; - } - /* It is referenced by a non-shared object. */ h->ref_regular = 1; - h->root.non_ir_ref = 1; + h->root.non_ir_ref_regular = 1; if (h->type == STT_GNU_IFUNC) elf_tdata (info->output_bfd)->has_gnu_symbols |= elf_gnu_symbol_ifunc; } - if (! elf_x86_64_tls_transition (info, abfd, sec, NULL, + if (! elf_x86_64_tls_transition (info, abfd, sec, contents, symtab_hdr, sym_hashes, &r_type, GOT_UNKNOWN, - rel, rel_end, h, r_symndx)) + rel, rel_end, h, r_symndx, FALSE)) goto error_return; - eh = (struct elf_x86_64_link_hash_entry *) h; + eh = (struct elf_x86_link_hash_entry *) h; switch (r_type) { case R_X86_64_TLSLD: - htab->tls_ld_got.refcount += 1; + htab->tls_ld_or_ldm_got.refcount += 1; goto create_got; case R_X86_64_TPOFF32: if (!bfd_link_executable (info) && ABI_64_P (abfd)) - return elf_x86_64_need_pic (abfd, sec, h, symtab_hdr, isym, + return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym, &x86_64_elf_howto_table[r_type]); if (eh != NULL) eh->has_got_reloc = 1; @@ -1924,14 +2091,14 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, if (local_got_refcounts == NULL) goto error_return; elf_local_got_refcounts (abfd) = local_got_refcounts; - elf_x86_64_local_tlsdesc_gotent (abfd) + elf_x86_local_tlsdesc_gotent (abfd) = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info); - elf_x86_64_local_got_tls_type (abfd) + elf_x86_local_got_tls_type (abfd) = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info); } local_got_refcounts[r_symndx] += 1; old_tls_type - = elf_x86_64_local_got_tls_type (abfd) [r_symndx]; + = elf_x86_local_got_tls_type (abfd) [r_symndx]; } /* If a TLS symbol is accessed using IE at least once, @@ -1952,8 +2119,10 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, else name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL); - (*_bfd_error_handler) - (_("%B: '%s' accessed both as normal and thread local symbol"), + _bfd_error_handler + /* xgettext:c-format */ + (_("%B: '%s' accessed both as normal and" + " thread local symbol"), abfd, name); bfd_set_error (bfd_error_bad_value); goto error_return; @@ -1965,7 +2134,7 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, if (eh != NULL) eh->tls_type = tls_type; else - elf_x86_64_local_got_tls_type (abfd) [r_symndx] = tls_type; + elf_x86_local_got_tls_type (abfd) [r_symndx] = tls_type; } } /* Fall through */ @@ -1976,14 +2145,6 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, create_got: if (eh != NULL) eh->has_got_reloc = 1; - if (htab->elf.sgot == NULL) - { - if (htab->elf.dynobj == NULL) - htab->elf.dynobj = abfd; - if (!_bfd_elf_create_got_section (htab->elf.dynobj, - info)) - goto error_return; - } break; case R_X86_64_PLT32: @@ -2023,6 +2184,7 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, case R_X86_64_32: if (!ABI_64_P (abfd)) goto pointer; + /* Fall through. */ case R_X86_64_8: case R_X86_64_16: case R_X86_64_32S: @@ -2036,9 +2198,8 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, && h != NULL && !h->def_regular && h->def_dynamic - && (sec->flags & SEC_READONLY) == 0)) - && (sec->flags & SEC_ALLOC) != 0) - return elf_x86_64_need_pic (abfd, sec, h, symtab_hdr, isym, + && (sec->flags & SEC_READONLY) == 0))) + return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym, &x86_64_elf_howto_table[r_type]); /* Fall through. */ @@ -2051,15 +2212,12 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, pointer: if (eh != NULL && (sec->flags & SEC_CODE) != 0) eh->has_non_got_reloc = 1; - /* STT_GNU_IFUNC symbol must go through PLT even if it is - locally defined and undefined symbol may turn out to be - a STT_GNU_IFUNC symbol later. */ + /* We are called after all symbols have been resolved. Only + relocation against STT_GNU_IFUNC symbol must go through + PLT. */ if (h != NULL && (bfd_link_executable (info) - || ((h->type == STT_GNU_IFUNC - || h->root.type == bfd_link_hash_undefweak - || h->root.type == bfd_link_hash_undefined) - && SYMBOLIC_BIND (info, h)))) + || h->type == STT_GNU_IFUNC)) { /* If this reloc is in a read-only section, we might need a copy reloc. We can't check reliably at this @@ -2069,9 +2227,13 @@ pointer: adjust_dynamic_symbol. */ h->non_got_ref = 1; - /* We may need a .plt entry if the function this reloc - refers to is in a shared lib. */ - h->plt.refcount += 1; + /* We may need a .plt entry if the symbol is a function + defined in a shared lib or is a STT_GNU_IFUNC function + referenced from the code or read-only section. */ + if (!h->def_regular + || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0) + h->plt.refcount += 1; + if (r_type == R_X86_64_PC32) { /* Since something like ".long foo - ." may be used @@ -2118,18 +2280,23 @@ do_size: If on the other hand, we are creating an executable, we may need to keep relocations for symbols satisfied by a dynamic library if we manage to avoid copy relocs for the - symbol. */ + symbol. + + Generate dynamic pointer relocation against STT_GNU_IFUNC + symbol in the non-code section. */ if ((bfd_link_pic (info) - && (sec->flags & SEC_ALLOC) != 0 && (! IS_X86_64_PCREL_TYPE (r_type) || (h != NULL && (! (bfd_link_pie (info) || SYMBOLIC_BIND (info, h)) || h->root.type == bfd_link_hash_defweak || !h->def_regular)))) + || (h != NULL + && h->type == STT_GNU_IFUNC + && r_type == htab->pointer_r_type + && (sec->flags & SEC_CODE) == 0) || (ELIMINATE_COPY_RELOCS && !bfd_link_pic (info) - && (sec->flags & SEC_ALLOC) != 0 && h != NULL && (h->root.type == bfd_link_hash_defweak || !h->def_regular))) @@ -2142,9 +2309,6 @@ do_size: this reloc. */ if (sreloc == NULL) { - if (htab->elf.dynobj == NULL) - htab->elf.dynobj = abfd; - sreloc = _bfd_elf_make_dynamic_reloc_section (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2, abfd, /*rela?*/ TRUE); @@ -2208,284 +2372,46 @@ do_size: case R_X86_64_GNU_VTINHERIT: if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset)) goto error_return; - break; - - /* This relocation describes which C++ vtable entries are actually - used. Record for later use during GC. */ - case R_X86_64_GNU_VTENTRY: - BFD_ASSERT (h != NULL); - if (h != NULL - && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend)) - goto error_return; - break; - - default: - break; - } - - if (use_plt_got - && h != NULL - && h->plt.refcount > 0 - && (((info->flags & DF_BIND_NOW) && !h->pointer_equality_needed) - || h->got.refcount > 0) - && htab->plt_got == NULL) - { - /* Create the GOT procedure linkage table. */ - unsigned int plt_got_align; - const struct elf_backend_data *bed; - - bed = get_elf_backend_data (info->output_bfd); - BFD_ASSERT (sizeof (elf_x86_64_legacy_plt2_entry) == 8 - && (sizeof (elf_x86_64_bnd_plt2_entry) - == sizeof (elf_x86_64_legacy_plt2_entry))); - plt_got_align = 3; - - if (htab->elf.dynobj == NULL) - htab->elf.dynobj = abfd; - htab->plt_got - = bfd_make_section_anyway_with_flags (htab->elf.dynobj, - ".plt.got", - (bed->dynamic_sec_flags - | SEC_ALLOC - | SEC_CODE - | SEC_LOAD - | SEC_READONLY)); - if (htab->plt_got == NULL - || !bfd_set_section_alignment (htab->elf.dynobj, - htab->plt_got, - plt_got_align)) - goto error_return; - } - - if ((r_type == R_X86_64_GOTPCREL - || r_type == R_X86_64_GOTPCRELX - || r_type == R_X86_64_REX_GOTPCRELX) - && (h == NULL || h->type != STT_GNU_IFUNC)) - sec->need_convert_load = 1; - } - - return TRUE; - -error_return: - sec->check_relocs_failed = 1; - return FALSE; -} - -/* Return the section that should be marked against GC for a given - relocation. */ - -static asection * -elf_x86_64_gc_mark_hook (asection *sec, - struct bfd_link_info *info, - Elf_Internal_Rela *rel, - struct elf_link_hash_entry *h, - Elf_Internal_Sym *sym) -{ - if (h != NULL) - switch (ELF32_R_TYPE (rel->r_info)) - { - case R_X86_64_GNU_VTINHERIT: - case R_X86_64_GNU_VTENTRY: - return NULL; - } - - return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym); -} - -/* Remove undefined weak symbol from the dynamic symbol table if it - is resolved to 0. */ - -static bfd_boolean -elf_x86_64_fixup_symbol (struct bfd_link_info *info, - struct elf_link_hash_entry *h) -{ - if (h->dynindx != -1 - && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, - elf_x86_64_hash_entry (h)->has_got_reloc, - elf_x86_64_hash_entry (h))) - { - h->dynindx = -1; - _bfd_elf_strtab_delref (elf_hash_table (info)->dynstr, - h->dynstr_index); - } - return TRUE; -} - -/* Adjust a symbol defined by a dynamic object and referenced by a - regular object. The current definition is in some section of the - dynamic object, but we're not including those sections. We have to - change the definition to something the rest of the link can - understand. */ - -static bfd_boolean -elf_x86_64_adjust_dynamic_symbol (struct bfd_link_info *info, - struct elf_link_hash_entry *h) -{ - struct elf_x86_64_link_hash_table *htab; - asection *s; - struct elf_x86_64_link_hash_entry *eh; - struct elf_dyn_relocs *p; - - /* STT_GNU_IFUNC symbol must go through PLT. */ - if (h->type == STT_GNU_IFUNC) - { - /* All local STT_GNU_IFUNC references must be treate as local - calls via local PLT. */ - if (h->ref_regular - && SYMBOL_CALLS_LOCAL (info, h)) - { - bfd_size_type pc_count = 0, count = 0; - struct elf_dyn_relocs **pp; - - eh = (struct elf_x86_64_link_hash_entry *) h; - for (pp = &eh->dyn_relocs; (p = *pp) != NULL; ) - { - pc_count += p->pc_count; - p->count -= p->pc_count; - p->pc_count = 0; - count += p->count; - if (p->count == 0) - *pp = p->next; - else - pp = &p->next; - } - - if (pc_count || count) - { - h->needs_plt = 1; - h->non_got_ref = 1; - if (h->plt.refcount <= 0) - h->plt.refcount = 1; - else - h->plt.refcount += 1; - } - } - - if (h->plt.refcount <= 0) - { - h->plt.offset = (bfd_vma) -1; - h->needs_plt = 0; - } - return TRUE; - } - - /* If this is a function, put it in the procedure linkage table. We - will fill in the contents of the procedure linkage table later, - when we know the address of the .got section. */ - if (h->type == STT_FUNC - || h->needs_plt) - { - if (h->plt.refcount <= 0 - || SYMBOL_CALLS_LOCAL (info, h) - || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT - && h->root.type == bfd_link_hash_undefweak)) - { - /* This case can occur if we saw a PLT32 reloc in an input - file, but the symbol was never referred to by a dynamic - object, or if all references were garbage collected. In - such a case, we don't actually need to build a procedure - linkage table, and we can just do a PC32 reloc instead. */ - h->plt.offset = (bfd_vma) -1; - h->needs_plt = 0; - } - - return TRUE; - } - else - /* It's possible that we incorrectly decided a .plt reloc was - needed for an R_X86_64_PC32 reloc to a non-function sym in - check_relocs. We can't decide accurately between function and - non-function syms in check-relocs; Objects loaded later in - the link may change h->type. So fix it now. */ - h->plt.offset = (bfd_vma) -1; - - /* If this is a weak symbol, and there is a real definition, the - processor independent code will have arranged for us to see the - real definition first, and we can just use the same value. */ - if (h->u.weakdef != NULL) - { - BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined - || h->u.weakdef->root.type == bfd_link_hash_defweak); - h->root.u.def.section = h->u.weakdef->root.u.def.section; - h->root.u.def.value = h->u.weakdef->root.u.def.value; - if (ELIMINATE_COPY_RELOCS || info->nocopyreloc) - { - eh = (struct elf_x86_64_link_hash_entry *) h; - h->non_got_ref = h->u.weakdef->non_got_ref; - eh->needs_copy = h->u.weakdef->needs_copy; - } - return TRUE; - } - - /* This is a reference to a symbol defined by a dynamic object which - is not a function. */ - - /* If we are creating a shared library, we must presume that the - only references to the symbol are via the global offset table. - For such cases we need not do anything here; the relocations will - be handled correctly by relocate_section. */ - if (!bfd_link_executable (info)) - return TRUE; - - /* If there are no references to this symbol that do not use the - GOT, we don't need to generate a copy reloc. */ - if (!h->non_got_ref) - return TRUE; + break; - /* If -z nocopyreloc was given, we won't generate them either. */ - if (info->nocopyreloc) - { - h->non_got_ref = 0; - return TRUE; - } + /* This relocation describes which C++ vtable entries are actually + used. Record for later use during GC. */ + case R_X86_64_GNU_VTENTRY: + BFD_ASSERT (h != NULL); + if (h != NULL + && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend)) + goto error_return; + break; - if (ELIMINATE_COPY_RELOCS) - { - eh = (struct elf_x86_64_link_hash_entry *) h; - for (p = eh->dyn_relocs; p != NULL; p = p->next) - { - s = p->sec->output_section; - if (s != NULL && (s->flags & SEC_READONLY) != 0) - break; + default: + break; } - /* If we didn't find any dynamic relocs in read-only sections, then - we'll be keeping the dynamic relocs and avoiding the copy reloc. */ - if (p == NULL) - { - h->non_got_ref = 0; - return TRUE; - } + if ((r_type == R_X86_64_GOTPCREL + || r_type == R_X86_64_GOTPCRELX + || r_type == R_X86_64_REX_GOTPCRELX) + && (h == NULL || h->type != STT_GNU_IFUNC)) + sec->need_convert_load = 1; } - /* We must allocate the symbol in our .dynbss section, which will - become part of the .bss section of the executable. There will be - an entry for this symbol in the .dynsym section. The dynamic - object will contain position independent code, so all references - from the dynamic object to this symbol will go through the global - offset table. The dynamic linker will use the .dynsym entry to - determine the address it must put in the global offset table, so - both the dynamic object and the regular object will refer to the - same memory location for the variable. */ - - htab = elf_x86_64_hash_table (info); - if (htab == NULL) - return FALSE; - - /* We must generate a R_X86_64_COPY reloc to tell the dynamic linker - to copy the initial value out of the dynamic object and into the - runtime process image. */ - if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0) + if (elf_section_data (sec)->this_hdr.contents != contents) { - const struct elf_backend_data *bed; - bed = get_elf_backend_data (info->output_bfd); - htab->srelbss->size += bed->s->sizeof_rela; - h->needs_copy = 1; + if (!info->keep_memory) + free (contents); + else + { + /* Cache the section contents for elf_link_input_bfd. */ + elf_section_data (sec)->this_hdr.contents = contents; + } } - s = htab->sdynbss; + return TRUE; - return _bfd_elf_adjust_dynamic_copy (info, h, s); +error_return: + if (elf_section_data (sec)->this_hdr.contents != contents) + free (contents); + sec->check_relocs_failed = 1; + return FALSE; } /* Allocate space in .plt, .got and associated reloc sections for @@ -2495,8 +2421,8 @@ static bfd_boolean elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf) { struct bfd_link_info *info; - struct elf_x86_64_link_hash_table *htab; - struct elf_x86_64_link_hash_entry *eh; + struct elf_x86_link_hash_table *htab; + struct elf_x86_link_hash_entry *eh; struct elf_dyn_relocs *p; const struct elf_backend_data *bed; unsigned int plt_entry_size; @@ -2505,16 +2431,18 @@ elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf) if (h->root.type == bfd_link_hash_indirect) return TRUE; - eh = (struct elf_x86_64_link_hash_entry *) h; + eh = (struct elf_x86_link_hash_entry *) h; info = (struct bfd_link_info *) inf; - htab = elf_x86_64_hash_table (info); + htab = elf_x86_hash_table (info, X86_64_ELF_DATA); if (htab == NULL) return FALSE; bed = get_elf_backend_data (info->output_bfd); - plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd); + + plt_entry_size = htab->plt.plt_entry_size; resolved_to_zero = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, + X86_64_ELF_DATA, eh->has_got_reloc, eh); @@ -2550,17 +2478,18 @@ elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf) &eh->dyn_relocs, &htab->readonly_dynrelocs_against_ifunc, plt_entry_size, - plt_entry_size, - GOT_ENTRY_SIZE)) + (htab->plt.has_plt0 + * plt_entry_size), + GOT_ENTRY_SIZE, TRUE)) { - asection *s = htab->plt_bnd; + asection *s = htab->plt_second; if (h->plt.offset != (bfd_vma) -1 && s != NULL) { - /* Use the .plt.bnd section if it is created. */ - eh->plt_bnd.offset = s->size; + /* Use the second PLT section if it is created. */ + eh->plt_second.offset = s->size; - /* Make room for this entry in the .plt.bnd section. */ - s->size += sizeof (elf_x86_64_legacy_plt2_entry); + /* Make room for this entry in the second PLT section. */ + s->size += htab->non_lazy_plt->plt_entry_size; } return TRUE; @@ -2574,29 +2503,18 @@ elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf) && (h->plt.refcount > eh->func_pointer_refcount || eh->plt_got.refcount > 0)) { - bfd_boolean use_plt_got; + bfd_boolean use_plt_got = eh->plt_got.refcount > 0; /* Clear the reference count of function pointer relocations if PLT is used. */ eh->func_pointer_refcount = 0; - if ((info->flags & DF_BIND_NOW) && !h->pointer_equality_needed) - { - /* Don't use the regular PLT for DF_BIND_NOW. */ - h->plt.offset = (bfd_vma) -1; - - /* Use the GOT PLT. */ - h->got.refcount = 1; - eh->plt_got.refcount = 1; - } - - use_plt_got = eh->plt_got.refcount > 0; - /* Make sure this symbol is output as a dynamic symbol. Undefined weak syms won't yet be marked as dynamic. */ if (h->dynindx == -1 && !h->forced_local - && !resolved_to_zero) + && !resolved_to_zero + && h->root.type == bfd_link_hash_undefweak) { if (! bfd_elf_link_record_dynamic_symbol (info, h)) return FALSE; @@ -2606,22 +2524,22 @@ elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf) || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h)) { asection *s = htab->elf.splt; - asection *bnd_s = htab->plt_bnd; + asection *second_s = htab->plt_second; asection *got_s = htab->plt_got; /* If this is the first .plt entry, make room for the special first entry. The .plt section is used by prelink to undo prelinking for dynamic relocations. */ if (s->size == 0) - s->size = plt_entry_size; + s->size = htab->plt.has_plt0 * plt_entry_size; if (use_plt_got) eh->plt_got.offset = got_s->size; else { h->plt.offset = s->size; - if (bnd_s) - eh->plt_bnd.offset = bnd_s->size; + if (second_s) + eh->plt_second.offset = second_s->size; } /* If this symbol is not defined in a regular file, and we are @@ -2641,12 +2559,12 @@ elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf) } else { - if (bnd_s) + if (second_s) { - /* We need to make a call to the entry of the second - PLT instead of regular PLT entry. */ - h->root.u.def.section = bnd_s; - h->root.u.def.value = eh->plt_bnd.offset; + /* We need to make a call to the entry of the + second PLT instead of regular PLT entry. */ + h->root.u.def.section = second_s; + h->root.u.def.value = eh->plt_second.offset; } else { @@ -2658,12 +2576,12 @@ elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf) /* Make room for this entry. */ if (use_plt_got) - got_s->size += sizeof (elf_x86_64_legacy_plt2_entry); + got_s->size += htab->non_lazy_plt->plt_entry_size; else { s->size += plt_entry_size; - if (bnd_s) - bnd_s->size += sizeof (elf_x86_64_legacy_plt2_entry); + if (second_s) + second_s->size += htab->non_lazy_plt->plt_entry_size; /* We also need to make an entry in the .got.plt section, which will be placed in the .got section by the linker @@ -2702,7 +2620,7 @@ elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf) if (h->got.refcount > 0 && bfd_link_executable (info) && h->dynindx == -1 - && elf_x86_64_hash_entry (h)->tls_type == GOT_TLS_IE) + && elf_x86_hash_entry (h)->tls_type == GOT_TLS_IE) { h->got.offset = (bfd_vma) -1; } @@ -2710,13 +2628,14 @@ elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf) { asection *s; bfd_boolean dyn; - int tls_type = elf_x86_64_hash_entry (h)->tls_type; + int tls_type = elf_x86_hash_entry (h)->tls_type; /* Make sure this symbol is output as a dynamic symbol. Undefined weak syms won't yet be marked as dynamic. */ if (h->dynindx == -1 && !h->forced_local - && !resolved_to_zero) + && !resolved_to_zero + && h->root.type == bfd_link_hash_undefweak) { if (! bfd_elf_link_record_dynamic_symbol (info, h)) return FALSE; @@ -2853,6 +2772,7 @@ elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf) if (h->dynindx == -1 && ! h->forced_local && ! resolved_to_zero + && h->root.type == bfd_link_hash_undefweak && ! bfd_elf_link_record_dynamic_symbol (info, h)) return FALSE; @@ -2902,511 +2822,108 @@ elf_x86_64_allocate_local_dynrelocs (void **slot, void *inf) return elf_x86_64_allocate_dynrelocs (h, inf); } -/* Find any dynamic relocs that apply to read-only sections. */ - -static bfd_boolean -elf_x86_64_readonly_dynrelocs (struct elf_link_hash_entry *h, - void * inf) -{ - struct elf_x86_64_link_hash_entry *eh; - struct elf_dyn_relocs *p; - - /* Skip local IFUNC symbols. */ - if (h->forced_local && h->type == STT_GNU_IFUNC) - return TRUE; - - eh = (struct elf_x86_64_link_hash_entry *) h; - for (p = eh->dyn_relocs; p != NULL; p = p->next) - { - asection *s = p->sec->output_section; - - if (s != NULL && (s->flags & SEC_READONLY) != 0) - { - struct bfd_link_info *info = (struct bfd_link_info *) inf; - - info->flags |= DF_TEXTREL; - - if ((info->warn_shared_textrel && bfd_link_pic (info)) - || info->error_textrel) - info->callbacks->einfo (_("%P: %B: warning: relocation against `%s' in readonly section `%A'\n"), - p->sec->owner, h->root.root.string, - p->sec); - - /* Not an error, just cut short the traversal. */ - return FALSE; - } - } - return TRUE; -} - -/* With the local symbol, foo, we convert - mov foo@GOTPCREL(%rip), %reg - to - lea foo(%rip), %reg - and convert - call/jmp *foo@GOTPCREL(%rip) - to - nop call foo/jmp foo nop - When PIC is false, convert - test %reg, foo@GOTPCREL(%rip) - to - test $foo, %reg - and convert - binop foo@GOTPCREL(%rip), %reg - to - binop $foo, %reg - where binop is one of adc, add, and, cmp, or, sbb, sub, xor - instructions. */ +/* Convert load via the GOT slot to load immediate. */ static bfd_boolean elf_x86_64_convert_load (bfd *abfd, asection *sec, struct bfd_link_info *link_info) -{ - Elf_Internal_Shdr *symtab_hdr; - Elf_Internal_Rela *internal_relocs; - Elf_Internal_Rela *irel, *irelend; - bfd_byte *contents; - struct elf_x86_64_link_hash_table *htab; - bfd_boolean changed_contents; - bfd_boolean changed_relocs; - bfd_signed_vma *local_got_refcounts; - bfd_vma maxpagesize; - bfd_boolean is_pic; - bfd_boolean require_reloc_pc32; - - /* Don't even try to convert non-ELF outputs. */ - if (!is_elf_hash_table (link_info->hash)) - return FALSE; - - /* Nothing to do if there is no need or no output. */ - if ((sec->flags & (SEC_CODE | SEC_RELOC)) != (SEC_CODE | SEC_RELOC) - || sec->need_convert_load == 0 - || bfd_is_abs_section (sec->output_section)) - return TRUE; - - symtab_hdr = &elf_tdata (abfd)->symtab_hdr; - - /* Load the relocations for this section. */ - internal_relocs = (_bfd_elf_link_read_relocs - (abfd, sec, NULL, (Elf_Internal_Rela *) NULL, - link_info->keep_memory)); - if (internal_relocs == NULL) - return FALSE; - - htab = elf_x86_64_hash_table (link_info); - changed_contents = FALSE; - changed_relocs = FALSE; - local_got_refcounts = elf_local_got_refcounts (abfd); - maxpagesize = get_elf_backend_data (abfd)->maxpagesize; - - /* Get the section contents. */ - if (elf_section_data (sec)->this_hdr.contents != NULL) - contents = elf_section_data (sec)->this_hdr.contents; - else - { - if (!bfd_malloc_and_get_section (abfd, sec, &contents)) - goto error_return; - } - - is_pic = bfd_link_pic (link_info); - - /* TRUE if we can convert only to R_X86_64_PC32. Enable it for - --no-relax. */ - require_reloc_pc32 - = link_info->disable_target_specific_optimizations > 1; - - irelend = internal_relocs + sec->reloc_count; - for (irel = internal_relocs; irel < irelend; irel++) - { - unsigned int r_type = ELF32_R_TYPE (irel->r_info); - unsigned int r_symndx = htab->r_sym (irel->r_info); - unsigned int indx; - struct elf_link_hash_entry *h; - asection *tsec; - char symtype; - bfd_vma toff, roff; - bfd_signed_vma raddend; - unsigned int opcode; - unsigned int modrm; - bfd_boolean relocx; - bfd_boolean to_reloc_pc32; - - relocx = (r_type == R_X86_64_GOTPCRELX - || r_type == R_X86_64_REX_GOTPCRELX); - if (!relocx && r_type != R_X86_64_GOTPCREL) - continue; - - roff = irel->r_offset; - if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2)) - continue; - - raddend = irel->r_addend; - /* Addend for 32-bit PC-relative relocation must be -4. */ - if (raddend != -4) - continue; - - opcode = bfd_get_8 (abfd, contents + roff - 2); - - /* Convert mov to lea since it has been done for a while. */ - if (opcode != 0x8b) - { - /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX - for call, jmp or one of adc, add, and, cmp, or, sbb, sub, - test, xor instructions. */ - if (!relocx) - continue; - } - - /* We convert only to R_X86_64_PC32: - 1. Branch. - 2. R_X86_64_GOTPCREL since we can't modify REX byte. - 3. require_reloc_pc32 is true. - 4. PIC. - */ - to_reloc_pc32 = (opcode == 0xff - || !relocx - || require_reloc_pc32 - || is_pic); - - /* Get the symbol referred to by the reloc. */ - if (r_symndx < symtab_hdr->sh_info) - { - Elf_Internal_Sym *isym; - - isym = bfd_sym_from_r_symndx (&htab->sym_cache, - abfd, r_symndx); - - symtype = ELF_ST_TYPE (isym->st_info); - - /* STT_GNU_IFUNC must keep GOTPCREL relocations and skip - relocation against undefined symbols. */ - if (symtype == STT_GNU_IFUNC || isym->st_shndx == SHN_UNDEF) - continue; - - if (isym->st_shndx == SHN_ABS) - tsec = bfd_abs_section_ptr; - else if (isym->st_shndx == SHN_COMMON) - tsec = bfd_com_section_ptr; - else if (isym->st_shndx == SHN_X86_64_LCOMMON) - tsec = &_bfd_elf_large_com_section; - else - tsec = bfd_section_from_elf_index (abfd, isym->st_shndx); - - h = NULL; - toff = isym->st_value; - } - else - { - indx = r_symndx - symtab_hdr->sh_info; - h = elf_sym_hashes (abfd)[indx]; - BFD_ASSERT (h != NULL); - - while (h->root.type == bfd_link_hash_indirect - || h->root.type == bfd_link_hash_warning) - h = (struct elf_link_hash_entry *) h->root.u.i.link; - - /* STT_GNU_IFUNC must keep GOTPCREL relocations. We also - avoid optimizing GOTPCREL relocations againt _DYNAMIC - since ld.so may use its link-time address. */ - if (h->type == STT_GNU_IFUNC) - continue; - - /* Undefined weak symbol is only bound locally in executable - and its reference is resolved as 0 without relocation - overflow. We can only perform this optimization for - GOTPCRELX relocations since we need to modify REX byte. - It is OK convert mov with R_X86_64_GOTPCREL to - R_X86_64_PC32. */ - if ((relocx || opcode == 0x8b) - && UNDEFINED_WEAK_RESOLVED_TO_ZERO (link_info, - TRUE, - elf_x86_64_hash_entry (h))) - { - if (opcode == 0xff) - { - /* Skip for branch instructions since R_X86_64_PC32 - may overflow. */ - if (require_reloc_pc32) - continue; - } - else if (relocx) - { - /* For non-branch instructions, we can convert to - R_X86_64_32/R_X86_64_32S since we know if there - is a REX byte. */ - to_reloc_pc32 = FALSE; - } - - /* Since we don't know the current PC when PIC is true, - we can't convert to R_X86_64_PC32. */ - if (to_reloc_pc32 && is_pic) - continue; - - goto convert; - } - else if ((h->def_regular - || h->root.type == bfd_link_hash_defined - || h->root.type == bfd_link_hash_defweak) - && h != htab->elf.hdynamic - && SYMBOL_REFERENCES_LOCAL (link_info, h)) - { - /* bfd_link_hash_new or bfd_link_hash_undefined is - set by an assignment in a linker script in - bfd_elf_record_link_assignment. */ - if (h->def_regular - && (h->root.type == bfd_link_hash_new - || h->root.type == bfd_link_hash_undefined)) - { - /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */ - if (require_reloc_pc32) - continue; - goto convert; - } - tsec = h->root.u.def.section; - toff = h->root.u.def.value; - symtype = h->type; - } - else - continue; - } - - /* We can only estimate relocation overflow for R_X86_64_PC32. */ - if (!to_reloc_pc32) - goto convert; - - if (tsec->sec_info_type == SEC_INFO_TYPE_MERGE) - { - /* At this stage in linking, no SEC_MERGE symbol has been - adjusted, so all references to such symbols need to be - passed through _bfd_merged_section_offset. (Later, in - relocate_section, all SEC_MERGE symbols *except* for - section symbols have been adjusted.) - - gas may reduce relocations against symbols in SEC_MERGE - sections to a relocation against the section symbol when - the original addend was zero. When the reloc is against - a section symbol we should include the addend in the - offset passed to _bfd_merged_section_offset, since the - location of interest is the original symbol. On the - other hand, an access to "sym+addend" where "sym" is not - a section symbol should not include the addend; Such an - access is presumed to be an offset from "sym"; The - location of interest is just "sym". */ - if (symtype == STT_SECTION) - toff += raddend; - - toff = _bfd_merged_section_offset (abfd, &tsec, - elf_section_data (tsec)->sec_info, - toff); - - if (symtype != STT_SECTION) - toff += raddend; - } - else - toff += raddend; - - /* Don't convert if R_X86_64_PC32 relocation overflows. */ - if (tsec->output_section == sec->output_section) - { - if ((toff - roff + 0x80000000) > 0xffffffff) - continue; - } - else - { - bfd_signed_vma distance; - - /* At this point, we don't know the load addresses of TSEC - section nor SEC section. We estimate the distrance between - SEC and TSEC. We store the estimated distances in the - compressed_size field of the output section, which is only - used to decompress the compressed input section. */ - if (sec->output_section->compressed_size == 0) - { - asection *asect; - bfd_size_type size = 0; - for (asect = link_info->output_bfd->sections; - asect != NULL; - asect = asect->next) - /* Skip debug sections since compressed_size is used to - compress debug sections. */ - if ((asect->flags & SEC_DEBUGGING) == 0) - { - asection *i; - for (i = asect->map_head.s; - i != NULL; - i = i->map_head.s) - { - size = align_power (size, i->alignment_power); - size += i->size; - } - asect->compressed_size = size; - } - } - - /* Don't convert GOTPCREL relocations if TSEC isn't placed - after SEC. */ - distance = (tsec->output_section->compressed_size - - sec->output_section->compressed_size); - if (distance < 0) - continue; - - /* Take PT_GNU_RELRO segment into account by adding - maxpagesize. */ - if ((toff + distance + maxpagesize - roff + 0x80000000) - > 0xffffffff) - continue; - } - -convert: - if (opcode == 0xff) - { - /* We have "call/jmp *foo@GOTPCREL(%rip)". */ - unsigned int nop; - unsigned int disp; - bfd_vma nop_offset; - - /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to - R_X86_64_PC32. */ - modrm = bfd_get_8 (abfd, contents + roff - 1); - if (modrm == 0x25) - { - /* Convert to "jmp foo nop". */ - modrm = 0xe9; - nop = NOP_OPCODE; - nop_offset = irel->r_offset + 3; - disp = bfd_get_32 (abfd, contents + irel->r_offset); - irel->r_offset -= 1; - bfd_put_32 (abfd, disp, contents + irel->r_offset); - } - else - { - /* Convert to "nop call foo". ADDR_PREFIX_OPCODE - is a nop prefix. */ - modrm = 0xe8; - nop = link_info->call_nop_byte; - if (link_info->call_nop_as_suffix) - { - nop_offset = irel->r_offset + 3; - disp = bfd_get_32 (abfd, contents + irel->r_offset); - irel->r_offset -= 1; - bfd_put_32 (abfd, disp, contents + irel->r_offset); - } - else - nop_offset = irel->r_offset - 2; - } - bfd_put_8 (abfd, nop, contents + nop_offset); - bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1); - r_type = R_X86_64_PC32; - } - else - { - unsigned int rex; - unsigned int rex_mask = REX_R; - - if (r_type == R_X86_64_REX_GOTPCRELX) - rex = bfd_get_8 (abfd, contents + roff - 3); - else - rex = 0; - - if (opcode == 0x8b) - { - if (to_reloc_pc32) - { - /* Convert "mov foo@GOTPCREL(%rip), %reg" to - "lea foo(%rip), %reg". */ - opcode = 0x8d; - r_type = R_X86_64_PC32; - } - else - { - /* Convert "mov foo@GOTPCREL(%rip), %reg" to - "mov $foo, %reg". */ - opcode = 0xc7; - modrm = bfd_get_8 (abfd, contents + roff - 1); - modrm = 0xc0 | (modrm & 0x38) >> 3; - if ((rex & REX_W) != 0 - && ABI_64_P (link_info->output_bfd)) - { - /* Keep the REX_W bit in REX byte for LP64. */ - r_type = R_X86_64_32S; - goto rewrite_modrm_rex; - } - else - { - /* If the REX_W bit in REX byte isn't needed, - use R_X86_64_32 and clear the W bit to avoid - sign-extend imm32 to imm64. */ - r_type = R_X86_64_32; - /* Clear the W bit in REX byte. */ - rex_mask |= REX_W; - goto rewrite_modrm_rex; - } - } - } - else - { - /* R_X86_64_PC32 isn't supported. */ - if (to_reloc_pc32) - continue; +{ + Elf_Internal_Shdr *symtab_hdr; + Elf_Internal_Rela *internal_relocs; + Elf_Internal_Rela *irel, *irelend; + bfd_byte *contents; + struct elf_x86_link_hash_table *htab; + bfd_boolean changed; + bfd_signed_vma *local_got_refcounts; - modrm = bfd_get_8 (abfd, contents + roff - 1); - if (opcode == 0x85) - { - /* Convert "test %reg, foo@GOTPCREL(%rip)" to - "test $foo, %reg". */ - modrm = 0xc0 | (modrm & 0x38) >> 3; - opcode = 0xf7; - } - else - { - /* Convert "binop foo@GOTPCREL(%rip), %reg" to - "binop $foo, %reg". */ - modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c); - opcode = 0x81; - } + /* Don't even try to convert non-ELF outputs. */ + if (!is_elf_hash_table (link_info->hash)) + return FALSE; - /* Use R_X86_64_32 with 32-bit operand to avoid relocation - overflow when sign-extending imm32 to imm64. */ - r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32; + /* Nothing to do if there is no need or no output. */ + if ((sec->flags & (SEC_CODE | SEC_RELOC)) != (SEC_CODE | SEC_RELOC) + || sec->need_convert_load == 0 + || bfd_is_abs_section (sec->output_section)) + return TRUE; -rewrite_modrm_rex: - bfd_put_8 (abfd, modrm, contents + roff - 1); + symtab_hdr = &elf_tdata (abfd)->symtab_hdr; - if (rex) - { - /* Move the R bit to the B bit in REX byte. */ - rex = (rex & ~rex_mask) | (rex & REX_R) >> 2; - bfd_put_8 (abfd, rex, contents + roff - 3); - } + /* Load the relocations for this section. */ + internal_relocs = (_bfd_elf_link_read_relocs + (abfd, sec, NULL, (Elf_Internal_Rela *) NULL, + link_info->keep_memory)); + if (internal_relocs == NULL) + return FALSE; - /* No addend for R_X86_64_32/R_X86_64_32S relocations. */ - irel->r_addend = 0; - } + changed = FALSE; + htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA); + local_got_refcounts = elf_local_got_refcounts (abfd); - bfd_put_8 (abfd, opcode, contents + roff - 2); - } + /* Get the section contents. */ + if (elf_section_data (sec)->this_hdr.contents != NULL) + contents = elf_section_data (sec)->this_hdr.contents; + else + { + if (!bfd_malloc_and_get_section (abfd, sec, &contents)) + goto error_return; + } - irel->r_info = htab->r_info (r_symndx, r_type); - changed_contents = TRUE; - changed_relocs = TRUE; + irelend = internal_relocs + sec->reloc_count; + for (irel = internal_relocs; irel < irelend; irel++) + { + unsigned int r_type = ELF32_R_TYPE (irel->r_info); + unsigned int r_symndx; + struct elf_link_hash_entry *h; + bfd_boolean converted; - if (h) + if (r_type != R_X86_64_GOTPCRELX + && r_type != R_X86_64_REX_GOTPCRELX + && r_type != R_X86_64_GOTPCREL) + continue; + + r_symndx = htab->r_sym (irel->r_info); + if (r_symndx < symtab_hdr->sh_info) + h = _bfd_elf_x86_get_local_sym_hash (htab, sec->owner, + (const Elf_Internal_Rela *) irel, + FALSE); + else { - if (h->got.refcount > 0) - h->got.refcount -= 1; + h = elf_sym_hashes (abfd)[r_symndx - symtab_hdr->sh_info]; + while (h->root.type == bfd_link_hash_indirect + || h->root.type == bfd_link_hash_warning) + h = (struct elf_link_hash_entry *) h->root.u.i.link; } - else + + /* STT_GNU_IFUNC must keep GOTPCREL relocations. */ + if (h != NULL && h->type == STT_GNU_IFUNC) + continue; + + converted = FALSE; + if (!elf_x86_64_convert_load_reloc (abfd, sec, contents, irel, h, + &converted, link_info)) + goto error_return; + + if (converted) { - if (local_got_refcounts != NULL - && local_got_refcounts[r_symndx] > 0) - local_got_refcounts[r_symndx] -= 1; + changed = converted; + if (h) + { + if (h->got.refcount > 0) + h->got.refcount -= 1; + } + else + { + if (local_got_refcounts != NULL + && local_got_refcounts[r_symndx] > 0) + local_got_refcounts[r_symndx] -= 1; + } } } if (contents != NULL && elf_section_data (sec)->this_hdr.contents != contents) { - if (!changed_contents && !link_info->keep_memory) + if (!changed && !link_info->keep_memory) free (contents); else { @@ -3417,7 +2934,7 @@ rewrite_modrm_rex: if (elf_section_data (sec)->relocs != internal_relocs) { - if (!changed_relocs) + if (!changed) free (internal_relocs); else elf_section_data (sec)->relocs = internal_relocs; @@ -3441,14 +2958,14 @@ static bfd_boolean elf_x86_64_size_dynamic_sections (bfd *output_bfd, struct bfd_link_info *info) { - struct elf_x86_64_link_hash_table *htab; + struct elf_x86_link_hash_table *htab; bfd *dynobj; asection *s; bfd_boolean relocs; bfd *ibfd; const struct elf_backend_data *bed; - htab = elf_x86_64_hash_table (info); + htab = elf_x86_hash_table (info, X86_64_ELF_DATA); if (htab == NULL) return FALSE; bed = get_elf_backend_data (output_bfd); @@ -3502,6 +3019,7 @@ elf_x86_64_size_dynamic_sections (bfd *output_bfd, info->flags |= DF_TEXTREL; if ((info->warn_shared_textrel && bfd_link_pic (info)) || info->error_textrel) + /* xgettext:c-format */ info->callbacks->einfo (_("%P: %B: warning: relocation in readonly section `%A'\n"), p->sec->owner, p->sec); } @@ -3516,8 +3034,8 @@ elf_x86_64_size_dynamic_sections (bfd *output_bfd, symtab_hdr = &elf_symtab_hdr (ibfd); locsymcount = symtab_hdr->sh_info; end_local_got = local_got + locsymcount; - local_tls_type = elf_x86_64_local_got_tls_type (ibfd); - local_tlsdesc_gotent = elf_x86_64_local_tlsdesc_gotent (ibfd); + local_tls_type = elf_x86_local_got_tls_type (ibfd); + local_tlsdesc_gotent = elf_x86_local_tlsdesc_gotent (ibfd); s = htab->elf.sgot; srel = htab->elf.srelgot; for (; local_got < end_local_got; @@ -3561,16 +3079,16 @@ elf_x86_64_size_dynamic_sections (bfd *output_bfd, } } - if (htab->tls_ld_got.refcount > 0) + if (htab->tls_ld_or_ldm_got.refcount > 0) { /* Allocate 2 got entries and 1 dynamic reloc for R_X86_64_TLSLD relocs. */ - htab->tls_ld_got.offset = htab->elf.sgot->size; + htab->tls_ld_or_ldm_got.offset = htab->elf.sgot->size; htab->elf.sgot->size += 2 * GOT_ENTRY_SIZE; htab->elf.srelgot->size += bed->s->sizeof_rela; } else - htab->tls_ld_got.offset = -1; + htab->tls_ld_or_ldm_got.offset = -1; /* Allocate global sym .plt and .got entries, and space for global sym dynamic relocs. */ @@ -3612,9 +3130,9 @@ elf_x86_64_size_dynamic_sections (bfd *output_bfd, /* Reserve room for the initial entry. FIXME: we could probably do away with it in this case. */ if (htab->elf.splt->size == 0) - htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd); + htab->elf.splt->size = htab->plt.plt_entry_size; htab->tlsdesc_plt = htab->elf.splt->size; - htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd); + htab->elf.splt->size += htab->plt.plt_entry_size; } } @@ -3637,15 +3155,29 @@ elf_x86_64_size_dynamic_sections (bfd *output_bfd, htab->elf.sgotplt->size = 0; } - if (htab->plt_eh_frame != NULL - && htab->elf.splt != NULL - && htab->elf.splt->size != 0 - && !bfd_is_abs_section (htab->elf.splt->output_section) - && _bfd_elf_eh_frame_present (info)) + if (_bfd_elf_eh_frame_present (info)) { - const struct elf_x86_64_backend_data *arch_data - = get_elf_x86_64_arch_data (bed); - htab->plt_eh_frame->size = arch_data->eh_frame_plt_size; + if (htab->plt_eh_frame != NULL + && htab->elf.splt != NULL + && htab->elf.splt->size != 0 + && !bfd_is_abs_section (htab->elf.splt->output_section)) + htab->plt_eh_frame->size = htab->plt.eh_frame_plt_size; + + if (htab->plt_got_eh_frame != NULL + && htab->plt_got != NULL + && htab->plt_got->size != 0 + && !bfd_is_abs_section (htab->plt_got->output_section)) + htab->plt_got_eh_frame->size + = htab->non_lazy_plt->eh_frame_plt_size; + + /* Unwind info for the second PLT and .plt.got sections are + identical. */ + if (htab->plt_second_eh_frame != NULL + && htab->plt_second != NULL + && htab->plt_second->size != 0 + && !bfd_is_abs_section (htab->plt_second->output_section)) + htab->plt_second_eh_frame->size + = htab->non_lazy_plt->eh_frame_plt_size; } /* We now have determined the sizes of the various dynamic sections. @@ -3661,10 +3193,13 @@ elf_x86_64_size_dynamic_sections (bfd *output_bfd, || s == htab->elf.sgotplt || s == htab->elf.iplt || s == htab->elf.igotplt - || s == htab->plt_bnd + || s == htab->plt_second || s == htab->plt_got || s == htab->plt_eh_frame - || s == htab->sdynbss) + || s == htab->plt_got_eh_frame + || s == htab->plt_second_eh_frame + || s == htab->elf.sdynbss + || s == htab->elf.sdynrelro) { /* Strip this section if we don't need it; see the comment below. */ @@ -3717,15 +3252,34 @@ elf_x86_64_size_dynamic_sections (bfd *output_bfd, if (htab->plt_eh_frame != NULL && htab->plt_eh_frame->contents != NULL) { - const struct elf_x86_64_backend_data *arch_data - = get_elf_x86_64_arch_data (bed); - memcpy (htab->plt_eh_frame->contents, - arch_data->eh_frame_plt, htab->plt_eh_frame->size); + htab->plt.eh_frame_plt, htab->plt_eh_frame->size); bfd_put_32 (dynobj, htab->elf.splt->size, htab->plt_eh_frame->contents + PLT_FDE_LEN_OFFSET); } + if (htab->plt_got_eh_frame != NULL + && htab->plt_got_eh_frame->contents != NULL) + { + memcpy (htab->plt_got_eh_frame->contents, + htab->non_lazy_plt->eh_frame_plt, + htab->plt_got_eh_frame->size); + bfd_put_32 (dynobj, htab->plt_got->size, + (htab->plt_got_eh_frame->contents + + PLT_FDE_LEN_OFFSET)); + } + + if (htab->plt_second_eh_frame != NULL + && htab->plt_second_eh_frame->contents != NULL) + { + memcpy (htab->plt_second_eh_frame->contents, + htab->non_lazy_plt->eh_frame_plt, + htab->plt_second_eh_frame->size); + bfd_put_32 (dynobj, htab->plt_second->size, + (htab->plt_second_eh_frame->contents + + PLT_FDE_LEN_OFFSET)); + } + if (htab->elf.dynamic_sections_created) { /* Add some entries to the .dynamic section. We fill in the @@ -3748,21 +3302,21 @@ elf_x86_64_size_dynamic_sections (bfd *output_bfd, relocation. */ if (!add_dynamic_entry (DT_PLTGOT, 0)) return FALSE; + } - if (htab->elf.srelplt->size != 0) - { - if (!add_dynamic_entry (DT_PLTRELSZ, 0) - || !add_dynamic_entry (DT_PLTREL, DT_RELA) - || !add_dynamic_entry (DT_JMPREL, 0)) - return FALSE; - } - - if (htab->tlsdesc_plt - && (!add_dynamic_entry (DT_TLSDESC_PLT, 0) - || !add_dynamic_entry (DT_TLSDESC_GOT, 0))) + if (htab->elf.srelplt->size != 0) + { + if (!add_dynamic_entry (DT_PLTRELSZ, 0) + || !add_dynamic_entry (DT_PLTREL, DT_RELA) + || !add_dynamic_entry (DT_JMPREL, 0)) return FALSE; } + if (htab->tlsdesc_plt + && (!add_dynamic_entry (DT_TLSDESC_PLT, 0) + || !add_dynamic_entry (DT_TLSDESC_GOT, 0))) + return FALSE; + if (relocs) { if (!add_dynamic_entry (DT_RELA, 0) @@ -3774,7 +3328,7 @@ elf_x86_64_size_dynamic_sections (bfd *output_bfd, then we need a DT_TEXTREL entry. */ if ((info->flags & DF_TEXTREL) == 0) elf_link_hash_traverse (&htab->elf, - elf_x86_64_readonly_dynrelocs, + _bfd_x86_elf_readonly_dynrelocs, info); if ((info->flags & DF_TEXTREL) != 0) @@ -3797,88 +3351,6 @@ elf_x86_64_size_dynamic_sections (bfd *output_bfd, return TRUE; } -static bfd_boolean -elf_x86_64_always_size_sections (bfd *output_bfd, - struct bfd_link_info *info) -{ - asection *tls_sec = elf_hash_table (info)->tls_sec; - - if (tls_sec) - { - struct elf_link_hash_entry *tlsbase; - - tlsbase = elf_link_hash_lookup (elf_hash_table (info), - "_TLS_MODULE_BASE_", - FALSE, FALSE, FALSE); - - if (tlsbase && tlsbase->type == STT_TLS) - { - struct elf_x86_64_link_hash_table *htab; - struct bfd_link_hash_entry *bh = NULL; - const struct elf_backend_data *bed - = get_elf_backend_data (output_bfd); - - htab = elf_x86_64_hash_table (info); - if (htab == NULL) - return FALSE; - - if (!(_bfd_generic_link_add_one_symbol - (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL, - tls_sec, 0, NULL, FALSE, - bed->collect, &bh))) - return FALSE; - - htab->tls_module_base = bh; - - tlsbase = (struct elf_link_hash_entry *)bh; - tlsbase->def_regular = 1; - tlsbase->other = STV_HIDDEN; - tlsbase->root.linker_def = 1; - (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE); - } - } - - return TRUE; -} - -/* _TLS_MODULE_BASE_ needs to be treated especially when linking - executables. Rather than setting it to the beginning of the TLS - section, we have to set it to the end. This function may be called - multiple times, it is idempotent. */ - -static void -elf_x86_64_set_tls_module_base (struct bfd_link_info *info) -{ - struct elf_x86_64_link_hash_table *htab; - struct bfd_link_hash_entry *base; - - if (!bfd_link_executable (info)) - return; - - htab = elf_x86_64_hash_table (info); - if (htab == NULL) - return; - - base = htab->tls_module_base; - if (base == NULL) - return; - - base->u.def.value = htab->elf.tls_size; -} - -/* Return the base VMA address which should be subtracted from real addresses - when resolving @dtpoff relocation. - This is PT_TLS segment p_vaddr. */ - -static bfd_vma -elf_x86_64_dtpoff_base (struct bfd_link_info *info) -{ - /* If tls_sec is NULL, we should have signalled an error already. */ - if (elf_hash_table (info)->tls_sec == NULL) - return 0; - return elf_hash_table (info)->tls_sec->vma; -} - /* Return the relocation value for @tpoff relocation if STT_TLS virtual address is ADDRESS. */ @@ -3928,7 +3400,7 @@ elf_x86_64_relocate_section (bfd *output_bfd, Elf_Internal_Sym *local_syms, asection **local_sections) { - struct elf_x86_64_link_hash_table *htab; + struct elf_x86_link_hash_table *htab; Elf_Internal_Shdr *symtab_hdr; struct elf_link_hash_entry **sym_hashes; bfd_vma *local_got_offsets; @@ -3936,7 +3408,7 @@ elf_x86_64_relocate_section (bfd *output_bfd, Elf_Internal_Rela *rel; Elf_Internal_Rela *wrel; Elf_Internal_Rela *relend; - const unsigned int plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd); + unsigned int plt_entry_size; BFD_ASSERT (is_x86_64_elf (input_bfd)); @@ -3944,15 +3416,16 @@ elf_x86_64_relocate_section (bfd *output_bfd, if (input_section->check_relocs_failed) return FALSE; - htab = elf_x86_64_hash_table (info); + htab = elf_x86_hash_table (info, X86_64_ELF_DATA); if (htab == NULL) return FALSE; + plt_entry_size = htab->plt.plt_entry_size; symtab_hdr = &elf_symtab_hdr (input_bfd); sym_hashes = elf_sym_hashes (input_bfd); local_got_offsets = elf_local_got_offsets (input_bfd); - local_tlsdesc_gotents = elf_x86_64_local_tlsdesc_gotent (input_bfd); + local_tlsdesc_gotents = elf_x86_local_tlsdesc_gotent (input_bfd); - elf_x86_64_set_tls_module_base (info); + _bfd_x86_elf_set_tls_module_base (info); rel = wrel = relocs; relend = relocs + input_section->reloc_count; @@ -3962,7 +3435,7 @@ elf_x86_64_relocate_section (bfd *output_bfd, reloc_howto_type *howto; unsigned long r_symndx; struct elf_link_hash_entry *h; - struct elf_x86_64_link_hash_entry *eh; + struct elf_x86_link_hash_entry *eh; Elf_Internal_Sym *sym; asection *sec; bfd_vma off, offplt, plt_offset; @@ -3973,6 +3446,7 @@ elf_x86_64_relocate_section (bfd *output_bfd, asection *base_got, *resolved_plt; bfd_vma st_size; bfd_boolean resolved_to_zero; + bfd_boolean relative_reloc; r_type = ELF32_R_TYPE (rel->r_info); if (r_type == (int) R_X86_64_GNU_VTINHERIT @@ -3984,13 +3458,7 @@ elf_x86_64_relocate_section (bfd *output_bfd, } if (r_type >= (int) R_X86_64_standard) - { - (*_bfd_error_handler) - (_("%B: unrecognized relocation (0x%x) in section `%A'"), - input_bfd, input_section, r_type); - bfd_set_error (bfd_error_bad_value); - return FALSE; - } + return _bfd_unrecognized_reloc (input_bfd, input_section, r_type); if (r_type != (int) R_X86_64_32 || ABI_64_P (output_bfd)) @@ -4016,8 +3484,8 @@ elf_x86_64_relocate_section (bfd *output_bfd, if (!bfd_link_relocatable (info) && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC) { - h = elf_x86_64_get_local_sym_hash (htab, input_bfd, - rel, FALSE); + h = _bfd_elf_x86_get_local_sym_hash (htab, input_bfd, + rel, FALSE); if (h == NULL) abort (); @@ -4081,7 +3549,7 @@ elf_x86_64_relocate_section (bfd *output_bfd, } } - eh = (struct elf_x86_64_link_hash_entry *) h; + eh = (struct elf_x86_link_hash_entry *) h; /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it here if it is defined in a non-shared object. */ @@ -4101,16 +3569,93 @@ elf_x86_64_relocate_section (bfd *output_bfd, continue; abort (); } - else if (h->plt.offset == (bfd_vma) -1) - abort (); + + switch (r_type) + { + default: + break; + + case R_X86_64_GOTPCREL: + case R_X86_64_GOTPCRELX: + case R_X86_64_REX_GOTPCRELX: + case R_X86_64_GOTPCREL64: + base_got = htab->elf.sgot; + off = h->got.offset; + + if (base_got == NULL) + abort (); + + if (off == (bfd_vma) -1) + { + /* We can't use h->got.offset here to save state, or + even just remember the offset, as finish_dynamic_symbol + would use that as offset into .got. */ + + if (h->plt.offset == (bfd_vma) -1) + abort (); + + if (htab->elf.splt != NULL) + { + plt_index = (h->plt.offset / plt_entry_size + - htab->plt.has_plt0); + off = (plt_index + 3) * GOT_ENTRY_SIZE; + base_got = htab->elf.sgotplt; + } + else + { + plt_index = h->plt.offset / plt_entry_size; + off = plt_index * GOT_ENTRY_SIZE; + base_got = htab->elf.igotplt; + } + + if (h->dynindx == -1 + || h->forced_local + || info->symbolic) + { + /* This references the local defitionion. We must + initialize this entry in the global offset table. + Since the offset must always be a multiple of 8, + we use the least significant bit to record + whether we have initialized it already. + + When doing a dynamic link, we create a .rela.got + relocation entry to initialize the value. This + is done in the finish_dynamic_symbol routine. */ + if ((off & 1) != 0) + off &= ~1; + else + { + bfd_put_64 (output_bfd, relocation, + base_got->contents + off); + /* Note that this is harmless for the GOTPLT64 + case, as -1 | 1 still is -1. */ + h->got.offset |= 1; + } + } + } + + relocation = (base_got->output_section->vma + + base_got->output_offset + off); + + goto do_relocation; + } + + if (h->plt.offset == (bfd_vma) -1) + { + /* Handle static pointers of STT_GNU_IFUNC symbols. */ + if (r_type == htab->pointer_r_type + && (input_section->flags & SEC_CODE) == 0) + goto do_ifunc_pointer; + goto bad_ifunc_reloc; + } /* STT_GNU_IFUNC symbol must go through PLT. */ if (htab->elf.splt != NULL) { - if (htab->plt_bnd != NULL) + if (htab->plt_second != NULL) { - resolved_plt = htab->plt_bnd; - plt_offset = eh->plt_bnd.offset; + resolved_plt = htab->plt_second; + plt_offset = eh->plt_second.offset; } else { @@ -4130,15 +3675,17 @@ elf_x86_64_relocate_section (bfd *output_bfd, switch (r_type) { default: +bad_ifunc_reloc: if (h->root.root.string) name = h->root.root.string; else name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, NULL); - (*_bfd_error_handler) + _bfd_error_handler + /* xgettext:c-format */ (_("%B: relocation %s against STT_GNU_IFUNC " - "symbol `%s' isn't handled by %s"), input_bfd, - howto->name, name, __FUNCTION__); + "symbol `%s' isn't supported"), input_bfd, + howto->name, name); bfd_set_error (bfd_error_bad_value); return FALSE; @@ -4152,6 +3699,7 @@ elf_x86_64_relocate_section (bfd *output_bfd, goto do_relocation; /* FALLTHROUGH */ case R_X86_64_64: +do_ifunc_pointer: if (rel->r_addend != 0) { if (h->root.root.string) @@ -4159,17 +3707,20 @@ elf_x86_64_relocate_section (bfd *output_bfd, else name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, NULL); - (*_bfd_error_handler) + _bfd_error_handler + /* xgettext:c-format */ (_("%B: relocation %s against STT_GNU_IFUNC " - "symbol `%s' has non-zero addend: %d"), + "symbol `%s' has non-zero addend: %Ld"), input_bfd, howto->name, name, rel->r_addend); bfd_set_error (bfd_error_bad_value); return FALSE; } /* Generate dynamic relcoation only when there is a - non-GOT reference in a shared object. */ - if (bfd_link_pic (info) && h->non_got_ref) + non-GOT reference in a shared object or there is no + PLT. */ + if ((bfd_link_pic (info) && h->non_got_ref) + || h->plt.offset == (bfd_vma) -1) { Elf_Internal_Rela outrel; asection *sreloc; @@ -4191,6 +3742,10 @@ elf_x86_64_relocate_section (bfd *output_bfd, || h->forced_local || bfd_link_executable (info)) { + info->callbacks->minfo (_("Local IFUNC function `%s' in %B\n"), + h->root.root.string, + h->root.u.def.section->owner); + /* This symbol is resolved locally. */ outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE); outrel.r_addend = (h->root.u.def.value @@ -4203,7 +3758,16 @@ elf_x86_64_relocate_section (bfd *output_bfd, outrel.r_addend = 0; } - sreloc = htab->elf.irelifunc; + /* Dynamic relocations are stored in + 1. .rela.ifunc section in PIC object. + 2. .rela.got section in dynamic executable. + 3. .rela.iplt section in static executable. */ + if (bfd_link_pic (info)) + sreloc = htab->elf.irelifunc; + else if (htab->elf.splt != NULL) + sreloc = htab->elf.srelgot; + else + sreloc = htab->elf.irelplt; elf_append_rela (output_bfd, sreloc, &outrel); /* If this reloc is against an external symbol, we @@ -4218,73 +3782,14 @@ elf_x86_64_relocate_section (bfd *output_bfd, case R_X86_64_PC32_BND: case R_X86_64_PC64: case R_X86_64_PLT32: - case R_X86_64_PLT32_BND: - goto do_relocation; - - case R_X86_64_GOTPCREL: - case R_X86_64_GOTPCRELX: - case R_X86_64_REX_GOTPCRELX: - case R_X86_64_GOTPCREL64: - base_got = htab->elf.sgot; - off = h->got.offset; - - if (base_got == NULL) - abort (); - - if (off == (bfd_vma) -1) - { - /* We can't use h->got.offset here to save state, or - even just remember the offset, as finish_dynamic_symbol - would use that as offset into .got. */ - - if (htab->elf.splt != NULL) - { - plt_index = h->plt.offset / plt_entry_size - 1; - off = (plt_index + 3) * GOT_ENTRY_SIZE; - base_got = htab->elf.sgotplt; - } - else - { - plt_index = h->plt.offset / plt_entry_size; - off = plt_index * GOT_ENTRY_SIZE; - base_got = htab->elf.igotplt; - } - - if (h->dynindx == -1 - || h->forced_local - || info->symbolic) - { - /* This references the local defitionion. We must - initialize this entry in the global offset table. - Since the offset must always be a multiple of 8, - we use the least significant bit to record - whether we have initialized it already. - - When doing a dynamic link, we create a .rela.got - relocation entry to initialize the value. This - is done in the finish_dynamic_symbol routine. */ - if ((off & 1) != 0) - off &= ~1; - else - { - bfd_put_64 (output_bfd, relocation, - base_got->contents + off); - /* Note that this is harmless for the GOTPLT64 - case, as -1 | 1 still is -1. */ - h->got.offset |= 1; - } - } - } - - relocation = (base_got->output_section->vma - + base_got->output_offset + off); - + case R_X86_64_PLT32_BND: goto do_relocation; } } resolved_to_zero = (eh != NULL && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, + X86_64_ELF_DATA, eh->has_got_reloc, eh)); @@ -4302,12 +3807,13 @@ elf_x86_64_relocate_section (bfd *output_bfd, case R_X86_64_GOTPCREL64: /* Use global offset table entry as symbol value. */ case R_X86_64_GOTPLT64: - /* This is obsolete and treated the the same as GOT64. */ + /* This is obsolete and treated the same as GOT64. */ base_got = htab->elf.sgot; if (htab->elf.sgot == NULL) abort (); + relative_reloc = FALSE; if (h != NULL) { bfd_boolean dyn; @@ -4321,7 +3827,8 @@ elf_x86_64_relocate_section (bfd *output_bfd, state, or even just remember the offset, as finish_dynamic_symbol would use that as offset into .got. */ - bfd_vma plt_index = h->plt.offset / plt_entry_size - 1; + bfd_vma plt_index = (h->plt.offset / plt_entry_size + - htab->plt.has_plt0); off = (plt_index + 3) * GOT_ENTRY_SIZE; base_got = htab->elf.sgotplt; } @@ -4354,6 +3861,17 @@ elf_x86_64_relocate_section (bfd *output_bfd, /* Note that this is harmless for the GOTPLT64 case, as -1 | 1 still is -1. */ h->got.offset |= 1; + + if (h->dynindx == -1 + && !h->forced_local + && h->root.type != bfd_link_hash_undefweak + && bfd_link_pic (info)) + { + /* If this symbol isn't dynamic in PIC, + generate R_X86_64_RELATIVE here. */ + eh->no_finish_dynamic_symbol = 1; + relative_reloc = TRUE; + } } } else @@ -4375,30 +3893,32 @@ elf_x86_64_relocate_section (bfd *output_bfd, { bfd_put_64 (output_bfd, relocation, base_got->contents + off); + local_got_offsets[r_symndx] |= 1; if (bfd_link_pic (info)) - { - asection *s; - Elf_Internal_Rela outrel; - - /* We need to generate a R_X86_64_RELATIVE reloc - for the dynamic linker. */ - s = htab->elf.srelgot; - if (s == NULL) - abort (); - - outrel.r_offset = (base_got->output_section->vma - + base_got->output_offset - + off); - outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE); - outrel.r_addend = relocation; - elf_append_rela (output_bfd, s, &outrel); - } - - local_got_offsets[r_symndx] |= 1; + relative_reloc = TRUE; } } + if (relative_reloc) + { + asection *s; + Elf_Internal_Rela outrel; + + /* We need to generate a R_X86_64_RELATIVE reloc + for the dynamic linker. */ + s = htab->elf.srelgot; + if (s == NULL) + abort (); + + outrel.r_offset = (base_got->output_section->vma + + base_got->output_offset + + off); + outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE); + outrel.r_addend = relocation; + elf_append_rela (output_bfd, s, &outrel); + } + if (off >= (bfd_vma) -2) abort (); @@ -4443,8 +3963,10 @@ elf_x86_64_relocate_section (bfd *output_bfd, break; } - (*_bfd_error_handler) - (_("%B: relocation R_X86_64_GOTOFF64 against undefined %s `%s' can not be used when making a shared object"), + _bfd_error_handler + /* xgettext:c-format */ + (_("%B: relocation R_X86_64_GOTOFF64 against undefined %s" + " `%s' can not be used when making a shared object"), input_bfd, v, h->root.root.string); bfd_set_error (bfd_error_bad_value); return FALSE; @@ -4455,8 +3977,10 @@ elf_x86_64_relocate_section (bfd *output_bfd, || h->type == STT_OBJECT) && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED) { - (*_bfd_error_handler) - (_("%B: relocation R_X86_64_GOTOFF64 against protected %s `%s' can not be used when making a shared object"), + _bfd_error_handler + /* xgettext:c-format */ + (_("%B: relocation R_X86_64_GOTOFF64 against protected %s" + " `%s' can not be used when making a shared object"), input_bfd, h->type == STT_FUNC ? "function" : "data", h->root.root.string); @@ -4487,13 +4011,20 @@ elf_x86_64_relocate_section (bfd *output_bfd, symbols it's the symbol itself relative to GOT. */ if (h != NULL /* See PLT32 handling. */ - && h->plt.offset != (bfd_vma) -1 + && (h->plt.offset != (bfd_vma) -1 + || eh->plt_got.offset != (bfd_vma) -1) && htab->elf.splt != NULL) { - if (htab->plt_bnd != NULL) + if (eh->plt_got.offset != (bfd_vma) -1) + { + /* Use the GOT PLT. */ + resolved_plt = htab->plt_got; + plt_offset = eh->plt_got.offset; + } + else if (htab->plt_second != NULL) { - resolved_plt = htab->plt_bnd; - plt_offset = eh->plt_bnd.offset; + resolved_plt = htab->plt_second; + plt_offset = eh->plt_second.offset; } else { @@ -4533,10 +4064,10 @@ elf_x86_64_relocate_section (bfd *output_bfd, if (h->plt.offset != (bfd_vma) -1) { - if (htab->plt_bnd != NULL) + if (htab->plt_second != NULL) { - resolved_plt = htab->plt_bnd; - plt_offset = eh->plt_bnd.offset; + resolved_plt = htab->plt_second; + plt_offset = eh->plt_second.offset; } else { @@ -4568,16 +4099,20 @@ elf_x86_64_relocate_section (bfd *output_bfd, case R_X86_64_PC32: case R_X86_64_PC32_BND: /* Don't complain about -fPIC if the symbol is undefined when - building executable unless it is unresolved weak symbol. */ + building executable unless it is unresolved weak symbol or + -z nocopyreloc is used. */ if ((input_section->flags & SEC_ALLOC) != 0 && (input_section->flags & SEC_READONLY) != 0 && h != NULL && ((bfd_link_executable (info) - && h->root.type == bfd_link_hash_undefweak - && !resolved_to_zero) - || (bfd_link_pic (info) - && !(bfd_link_pie (info) - && h->root.type == bfd_link_hash_undefined)))) + && ((h->root.type == bfd_link_hash_undefweak + && !resolved_to_zero) + || ((info->nocopyreloc + || (eh->def_protected + && elf_has_no_copy_on_protected (h->root.u.def.section->owner))) + && h->def_dynamic + && !(h->root.u.def.section->flags & SEC_CODE)))) + || bfd_link_dll (info))) { bfd_boolean fail = FALSE; bfd_boolean branch @@ -4589,7 +4124,8 @@ elf_x86_64_relocate_section (bfd *output_bfd, { /* Symbol is referenced locally. Make sure it is defined locally or for a branch. */ - fail = !h->def_regular && !branch; + fail = (!(h->def_regular || ELF_COMMON_DEF_P (h)) + && !branch); } else if (!(bfd_link_pie (info) && (h->needs_copy || eh->needs_copy))) @@ -4602,7 +4138,7 @@ elf_x86_64_relocate_section (bfd *output_bfd, } if (fail) - return elf_x86_64_need_pic (input_bfd, input_section, + return elf_x86_64_need_pic (info, input_bfd, input_section, h, NULL, NULL, howto); } /* Fall through. */ @@ -4630,7 +4166,9 @@ direct: && (h->needs_copy || eh->needs_copy || h->root.type == bfd_link_hash_undefined) - && IS_X86_64_PCREL_TYPE (r_type)) + && (IS_X86_64_PCREL_TYPE (r_type) + || r_type == R_X86_64_SIZE32 + || r_type == R_X86_64_SIZE64)) && (h == NULL || ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT && !resolved_to_zero) @@ -4719,22 +4257,13 @@ direct: else name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, NULL); - if (addend < 0) - (*_bfd_error_handler) - (_("%B: addend -0x%x in relocation %s against " - "symbol `%s' at 0x%lx in section `%A' is " - "out of range"), - input_bfd, input_section, addend, - howto->name, name, - (unsigned long) rel->r_offset); - else - (*_bfd_error_handler) - (_("%B: addend 0x%x in relocation %s against " - "symbol `%s' at 0x%lx in section `%A' is " - "out of range"), - input_bfd, input_section, addend, - howto->name, name, - (unsigned long) rel->r_offset); + _bfd_error_handler + /* xgettext:c-format */ + (_("%B: addend %s%#x in relocation %s against " + "symbol `%s' at %#Lx in section `%A' is " + "out of range"), + input_bfd, addend < 0 ? "-" : "", addend, + howto->name, name, rel->r_offset, input_section); bfd_set_error (bfd_error_bad_value); return FALSE; } @@ -4800,15 +4329,15 @@ direct: case R_X86_64_GOTTPOFF: tls_type = GOT_UNKNOWN; if (h == NULL && local_got_offsets) - tls_type = elf_x86_64_local_got_tls_type (input_bfd) [r_symndx]; + tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx]; else if (h != NULL) - tls_type = elf_x86_64_hash_entry (h)->tls_type; + tls_type = elf_x86_hash_entry (h)->tls_type; if (! elf_x86_64_tls_transition (info, input_bfd, input_section, contents, symtab_hdr, sym_hashes, &r_type, tls_type, rel, - relend, h, r_symndx)) + relend, h, r_symndx, TRUE)) return FALSE; if (r_type == R_X86_64_TPOFF32) @@ -4820,39 +4349,53 @@ direct: if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD) { /* GD->LE transition. For 64bit, change - .byte 0x66; leaq foo@tlsgd(%rip), %rdi - .word 0x6666; rex64; call __tls_get_addr + .byte 0x66; leaq foo@tlsgd(%rip), %rdi + .word 0x6666; rex64; call __tls_get_addr@PLT + or + .byte 0x66; leaq foo@tlsgd(%rip), %rdi + .byte 0x66; rex64 + call *__tls_get_addr@GOTPCREL(%rip) + which may be converted to + addr32 call __tls_get_addr into: - movq %fs:0, %rax - leaq foo@tpoff(%rax), %rax + movq %fs:0, %rax + leaq foo@tpoff(%rax), %rax For 32bit, change - leaq foo@tlsgd(%rip), %rdi - .word 0x6666; rex64; call __tls_get_addr + leaq foo@tlsgd(%rip), %rdi + .word 0x6666; rex64; call __tls_get_addr@PLT + or + leaq foo@tlsgd(%rip), %rdi + .byte 0x66; rex64 + call *__tls_get_addr@GOTPCREL(%rip) + which may be converted to + addr32 call __tls_get_addr into: - movl %fs:0, %eax - leaq foo@tpoff(%rax), %rax + movl %fs:0, %eax + leaq foo@tpoff(%rax), %rax For largepic, change: - leaq foo@tlsgd(%rip), %rdi - movabsq $__tls_get_addr@pltoff, %rax - addq %rbx, %rax - call *%rax + leaq foo@tlsgd(%rip), %rdi + movabsq $__tls_get_addr@pltoff, %rax + addq %r15, %rax + call *%rax into: - movq %fs:0, %rax - leaq foo@tpoff(%rax), %rax - nopw 0x0(%rax,%rax,1) */ + movq %fs:0, %rax + leaq foo@tpoff(%rax), %rax + nopw 0x0(%rax,%rax,1) */ int largepic = 0; - if (ABI_64_P (output_bfd) - && contents[roff + 5] == (bfd_byte) '\xb8') + if (ABI_64_P (output_bfd)) { - memcpy (contents + roff - 3, - "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80" - "\0\0\0\0\x66\x0f\x1f\x44\0", 22); - largepic = 1; + if (contents[roff + 5] == 0xb8) + { + memcpy (contents + roff - 3, + "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80" + "\0\0\0\0\x66\x0f\x1f\x44\0", 22); + largepic = 1; + } + else + memcpy (contents + roff - 4, + "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0", + 16); } - else if (ABI_64_P (output_bfd)) - memcpy (contents + roff - 4, - "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0", - 16); else memcpy (contents + roff - 3, "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0", @@ -4860,7 +4403,8 @@ direct: bfd_put_32 (output_bfd, elf_x86_64_tpoff (info, relocation), contents + roff + 8 + largepic); - /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */ + /* Skip R_X86_64_PC32, R_X86_64_PLT32, + R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */ rel++; wrel++; continue; @@ -4984,7 +4528,7 @@ direct: if (h != NULL) { off = h->got.offset; - offplt = elf_x86_64_hash_entry (h)->tlsdesc_got; + offplt = elf_x86_hash_entry (h)->tlsdesc_got; } else { @@ -5019,7 +4563,7 @@ direct: + htab->sgotplt_jump_table_size); sreloc = htab->elf.srelplt; if (indx == 0) - outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info); + outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info); else outrel.r_addend = 0; elf_append_rela (output_bfd, sreloc, &outrel); @@ -5041,7 +4585,7 @@ direct: outrel.r_addend = 0; if ((dr_type == R_X86_64_TPOFF64 || dr_type == R_X86_64_TLSDESC) && indx == 0) - outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info); + outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info); outrel.r_info = htab->r_info (indx, dr_type); elf_append_rela (output_bfd, sreloc, &outrel); @@ -5052,7 +4596,7 @@ direct: { BFD_ASSERT (! unresolved_reloc); bfd_put_64 (output_bfd, - relocation - elf_x86_64_dtpoff_base (info), + relocation - _bfd_x86_elf_dtpoff_base (info), htab->elf.sgot->contents + off + GOT_ENTRY_SIZE); } else @@ -5096,39 +4640,53 @@ direct: if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD) { /* GD->IE transition. For 64bit, change - .byte 0x66; leaq foo@tlsgd(%rip), %rdi - .word 0x6666; rex64; call __tls_get_addr@plt + .byte 0x66; leaq foo@tlsgd(%rip), %rdi + .word 0x6666; rex64; call __tls_get_addr@PLT + or + .byte 0x66; leaq foo@tlsgd(%rip), %rdi + .byte 0x66; rex64 + call *__tls_get_addr@GOTPCREL(%rip + which may be converted to + addr32 call __tls_get_addr into: - movq %fs:0, %rax - addq foo@gottpoff(%rip), %rax + movq %fs:0, %rax + addq foo@gottpoff(%rip), %rax For 32bit, change - leaq foo@tlsgd(%rip), %rdi - .word 0x6666; rex64; call __tls_get_addr@plt + leaq foo@tlsgd(%rip), %rdi + .word 0x6666; rex64; call __tls_get_addr@PLT + or + leaq foo@tlsgd(%rip), %rdi + .byte 0x66; rex64; + call *__tls_get_addr@GOTPCREL(%rip) + which may be converted to + addr32 call __tls_get_addr into: - movl %fs:0, %eax - addq foo@gottpoff(%rip), %rax + movl %fs:0, %eax + addq foo@gottpoff(%rip), %rax For largepic, change: - leaq foo@tlsgd(%rip), %rdi - movabsq $__tls_get_addr@pltoff, %rax - addq %rbx, %rax - call *%rax + leaq foo@tlsgd(%rip), %rdi + movabsq $__tls_get_addr@pltoff, %rax + addq %r15, %rax + call *%rax into: - movq %fs:0, %rax - addq foo@gottpoff(%rax), %rax - nopw 0x0(%rax,%rax,1) */ + movq %fs:0, %rax + addq foo@gottpoff(%rax), %rax + nopw 0x0(%rax,%rax,1) */ int largepic = 0; - if (ABI_64_P (output_bfd) - && contents[roff + 5] == (bfd_byte) '\xb8') + if (ABI_64_P (output_bfd)) { - memcpy (contents + roff - 3, - "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05" - "\0\0\0\0\x66\x0f\x1f\x44\0", 22); - largepic = 1; + if (contents[roff + 5] == 0xb8) + { + memcpy (contents + roff - 3, + "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05" + "\0\0\0\0\x66\x0f\x1f\x44\0", 22); + largepic = 1; + } + else + memcpy (contents + roff - 4, + "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0", + 16); } - else if (ABI_64_P (output_bfd)) - memcpy (contents + roff - 4, - "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0", - 16); else memcpy (contents + roff - 3, "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0", @@ -5195,40 +4753,65 @@ direct: if (! elf_x86_64_tls_transition (info, input_bfd, input_section, contents, symtab_hdr, sym_hashes, - &r_type, GOT_UNKNOWN, - rel, relend, h, r_symndx)) + &r_type, GOT_UNKNOWN, rel, + relend, h, r_symndx, TRUE)) return FALSE; if (r_type != R_X86_64_TLSLD) { /* LD->LE transition: - leaq foo@tlsld(%rip), %rdi; call __tls_get_addr. + leaq foo@tlsld(%rip), %rdi + call __tls_get_addr@PLT For 64bit, we change it into: - .word 0x6666; .byte 0x66; movq %fs:0, %rax. + .word 0x6666; .byte 0x66; movq %fs:0, %rax For 32bit, we change it into: - nopl 0x0(%rax); movl %fs:0, %eax. + nopl 0x0(%rax); movl %fs:0, %eax + Or + leaq foo@tlsld(%rip), %rdi; + call *__tls_get_addr@GOTPCREL(%rip) + which may be converted to + addr32 call __tls_get_addr + For 64bit, we change it into: + .word 0x6666; .word 0x6666; movq %fs:0, %rax + For 32bit, we change it into: + nopw 0x0(%rax); movl %fs:0, %eax For largepic, change: - leaq foo@tlsgd(%rip), %rdi - movabsq $__tls_get_addr@pltoff, %rax - addq %rbx, %rax - call *%rax - into: - data32 data32 data32 nopw %cs:0x0(%rax,%rax,1) - movq %fs:0, %eax */ + leaq foo@tlsgd(%rip), %rdi + movabsq $__tls_get_addr@pltoff, %rax + addq %rbx, %rax + call *%rax + into + data16 data16 data16 nopw %cs:0x0(%rax,%rax,1) + movq %fs:0, %eax */ BFD_ASSERT (r_type == R_X86_64_TPOFF32); - if (ABI_64_P (output_bfd) - && contents[rel->r_offset + 5] == (bfd_byte) '\xb8') - memcpy (contents + rel->r_offset - 3, - "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0" - "\x64\x48\x8b\x04\x25\0\0\0", 22); - else if (ABI_64_P (output_bfd)) - memcpy (contents + rel->r_offset - 3, - "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12); + if (ABI_64_P (output_bfd)) + { + if (contents[rel->r_offset + 5] == 0xb8) + memcpy (contents + rel->r_offset - 3, + "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0" + "\x64\x48\x8b\x04\x25\0\0\0", 22); + else if (contents[rel->r_offset + 4] == 0xff + || contents[rel->r_offset + 4] == 0x67) + memcpy (contents + rel->r_offset - 3, + "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", + 13); + else + memcpy (contents + rel->r_offset - 3, + "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12); + } else - memcpy (contents + rel->r_offset - 3, - "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12); - /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */ + { + if (contents[rel->r_offset + 4] == 0xff) + memcpy (contents + rel->r_offset - 3, + "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", + 13); + else + memcpy (contents + rel->r_offset - 3, + "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12); + } + /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX + and R_X86_64_PLTOFF64. */ rel++; wrel++; continue; @@ -5237,7 +4820,7 @@ direct: if (htab->elf.sgot == NULL) abort (); - off = htab->tls_ld_got.offset; + off = htab->tls_ld_or_ldm_got.offset; if (off & 1) off &= ~1; else @@ -5258,7 +4841,7 @@ direct: outrel.r_addend = 0; elf_append_rela (output_bfd, htab->elf.srelgot, &outrel); - htab->tls_ld_got.offset |= 1; + htab->tls_ld_or_ldm_got.offset |= 1; } relocation = htab->elf.sgot->output_section->vma + htab->elf.sgot->output_offset + off; @@ -5268,7 +4851,7 @@ direct: case R_X86_64_DTPOFF32: if (!bfd_link_executable (info) || (input_section->flags & SEC_CODE) == 0) - relocation -= elf_x86_64_dtpoff_base (info); + relocation -= _bfd_x86_elf_dtpoff_base (info); else relocation = elf_x86_64_tpoff (info, relocation); break; @@ -5281,7 +4864,7 @@ direct: case R_X86_64_DTPOFF64: BFD_ASSERT ((input_section->flags & SEC_CODE) == 0); - relocation -= elf_x86_64_dtpoff_base (info); + relocation -= _bfd_x86_elf_dtpoff_base (info); break; default: @@ -5297,14 +4880,29 @@ direct: && _bfd_elf_section_offset (output_bfd, info, input_section, rel->r_offset) != (bfd_vma) -1) { - (*_bfd_error_handler) - (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"), - input_bfd, - input_section, - (long) rel->r_offset, - howto->name, - h->root.root.string); - return FALSE; + switch (r_type) + { + case R_X86_64_32S: + sec = h->root.u.def.section; + if ((info->nocopyreloc + || (eh->def_protected + && elf_has_no_copy_on_protected (h->root.u.def.section->owner))) + && !(h->root.u.def.section->flags & SEC_CODE)) + return elf_x86_64_need_pic (info, input_bfd, input_section, + h, NULL, NULL, howto); + /* Fall through. */ + + default: + _bfd_error_handler + /* xgettext:c-format */ + (_("%B(%A+%#Lx): unresolvable %s relocation against symbol `%s'"), + input_bfd, + input_section, + rel->r_offset, + howto->name, + h->root.root.string); + return FALSE; + } } do_relocation: @@ -5331,19 +4929,16 @@ check_relocation_error: } if (r == bfd_reloc_overflow) - { - if (! ((*info->callbacks->reloc_overflow) - (info, (h ? &h->root : NULL), name, howto->name, - (bfd_vma) 0, input_bfd, input_section, - rel->r_offset))) - return FALSE; - } + (*info->callbacks->reloc_overflow) + (info, (h ? &h->root : NULL), name, howto->name, + (bfd_vma) 0, input_bfd, input_section, rel->r_offset); else { - (*_bfd_error_handler) - (_("%B(%A+0x%lx): reloc against `%s': error %d"), + _bfd_error_handler + /* xgettext:c-format */ + (_("%B(%A+%#Lx): reloc against `%s': error %d"), input_bfd, input_section, - (long) rel->r_offset, name, (int) r); + rel->r_offset, name, (int) r); return FALSE; } } @@ -5384,37 +4979,34 @@ elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, struct elf_link_hash_entry *h, Elf_Internal_Sym *sym) { - struct elf_x86_64_link_hash_table *htab; - const struct elf_x86_64_backend_data *abed; - bfd_boolean use_plt_bnd; - struct elf_x86_64_link_hash_entry *eh; + struct elf_x86_link_hash_table *htab; + bfd_boolean use_plt_second; + struct elf_x86_link_hash_entry *eh; bfd_boolean local_undefweak; - htab = elf_x86_64_hash_table (info); + htab = elf_x86_hash_table (info, X86_64_ELF_DATA); if (htab == NULL) return FALSE; - /* Use MPX backend data in case of BND relocation. Use .plt_bnd - section only if there is .plt section. */ - use_plt_bnd = htab->elf.splt != NULL && htab->plt_bnd != NULL; - abed = (use_plt_bnd - ? &elf_x86_64_bnd_arch_bed - : get_elf_x86_64_backend_data (output_bfd)); + /* Use the second PLT section only if there is .plt section. */ + use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL; - eh = (struct elf_x86_64_link_hash_entry *) h; + eh = (struct elf_x86_link_hash_entry *) h; + if (eh->no_finish_dynamic_symbol) + abort (); /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for resolved undefined weak symbols in executable so that their references have value 0 at run-time. */ local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, + X86_64_ELF_DATA, eh->has_got_reloc, eh); if (h->plt.offset != (bfd_vma) -1) { bfd_vma plt_index; - bfd_vma got_offset, plt_offset, plt_plt_offset, plt_got_offset; - bfd_vma plt_plt_insn_end, plt_got_insn_size; + bfd_vma got_offset, plt_offset; Elf_Internal_Rela rela; bfd_byte *loc; asection *plt, *gotplt, *relplt, *resolved_plt; @@ -5461,60 +5053,30 @@ elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, if (plt == htab->elf.splt) { - got_offset = h->plt.offset / abed->plt_entry_size - 1; + got_offset = (h->plt.offset / htab->plt.plt_entry_size + - htab->plt.has_plt0); got_offset = (got_offset + 3) * GOT_ENTRY_SIZE; } else { - got_offset = h->plt.offset / abed->plt_entry_size; + got_offset = h->plt.offset / htab->plt.plt_entry_size; got_offset = got_offset * GOT_ENTRY_SIZE; } - plt_plt_insn_end = abed->plt_plt_insn_end; - plt_plt_offset = abed->plt_plt_offset; - plt_got_insn_size = abed->plt_got_insn_size; - plt_got_offset = abed->plt_got_offset; - if (use_plt_bnd) + /* Fill in the entry in the procedure linkage table. */ + memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry, + htab->plt.plt_entry_size); + if (use_plt_second) { - /* Use the second PLT with BND relocations. */ - const bfd_byte *plt_entry, *plt2_entry; - - if (eh->has_bnd_reloc) - { - plt_entry = elf_x86_64_bnd_plt_entry; - plt2_entry = elf_x86_64_bnd_plt2_entry; - } - else - { - plt_entry = elf_x86_64_legacy_plt_entry; - plt2_entry = elf_x86_64_legacy_plt2_entry; - - /* Subtract 1 since there is no BND prefix. */ - plt_plt_insn_end -= 1; - plt_plt_offset -= 1; - plt_got_insn_size -= 1; - plt_got_offset -= 1; - } + memcpy (htab->plt_second->contents + eh->plt_second.offset, + htab->non_lazy_plt->plt_entry, + htab->non_lazy_plt->plt_entry_size); - BFD_ASSERT (sizeof (elf_x86_64_bnd_plt_entry) - == sizeof (elf_x86_64_legacy_plt_entry)); - - /* Fill in the entry in the procedure linkage table. */ - memcpy (plt->contents + h->plt.offset, - plt_entry, sizeof (elf_x86_64_legacy_plt_entry)); - /* Fill in the entry in the second PLT. */ - memcpy (htab->plt_bnd->contents + eh->plt_bnd.offset, - plt2_entry, sizeof (elf_x86_64_legacy_plt2_entry)); - - resolved_plt = htab->plt_bnd; - plt_offset = eh->plt_bnd.offset; + resolved_plt = htab->plt_second; + plt_offset = eh->plt_second.offset; } else { - /* Fill in the entry in the procedure linkage table. */ - memcpy (plt->contents + h->plt.offset, abed->plt_entry, - abed->plt_entry_size); - resolved_plt = plt; plt_offset = h->plt.offset; } @@ -5529,15 +5091,17 @@ elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, - resolved_plt->output_section->vma - resolved_plt->output_offset - plt_offset - - plt_got_insn_size); + - htab->plt.plt_got_insn_size); /* Check PC-relative offset overflow in PLT entry. */ if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff) + /* xgettext:c-format */ info->callbacks->einfo (_("%F%B: PC-relative offset overflow in PLT entry for `%s'\n"), output_bfd, h->root.root.string); bfd_put_32 (output_bfd, plt_got_pcrel_offset, - resolved_plt->contents + plt_offset + plt_got_offset); + (resolved_plt->contents + plt_offset + + htab->plt.plt_got_offset)); /* Fill in the entry in the global offset table, initially this points to the second part of the PLT entry. Leave the entry @@ -5545,11 +5109,12 @@ elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, against undefined weak symbol in PIE. */ if (!local_undefweak) { - bfd_put_64 (output_bfd, (plt->output_section->vma - + plt->output_offset - + h->plt.offset - + abed->plt_lazy_offset), - gotplt->contents + got_offset); + if (htab->plt.has_plt0) + bfd_put_64 (output_bfd, (plt->output_section->vma + + plt->output_offset + + h->plt.offset + + htab->lazy_plt->plt_lazy_offset), + gotplt->contents + got_offset); /* Fill in the entry in the .rela.plt section. */ rela.r_offset = (gotplt->output_section->vma @@ -5561,6 +5126,10 @@ elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, && h->def_regular && h->type == STT_GNU_IFUNC)) { + info->callbacks->minfo (_("Local IFUNC function `%s' in %B\n"), + h->root.root.string, + h->root.u.def.section->owner); + /* If an STT_GNU_IFUNC symbol is locally defined, generate R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */ rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE); @@ -5577,24 +5146,28 @@ elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, plt_index = htab->next_jump_slot_index++; } - /* Don't fill PLT entry for static executables. */ - if (plt == htab->elf.splt) + /* Don't fill the second and third slots in PLT entry for + static executables nor without PLT0. */ + if (plt == htab->elf.splt && htab->plt.has_plt0) { - bfd_vma plt0_offset = h->plt.offset + plt_plt_insn_end; + bfd_vma plt0_offset + = h->plt.offset + htab->lazy_plt->plt_plt_insn_end; /* Put relocation index. */ bfd_put_32 (output_bfd, plt_index, (plt->contents + h->plt.offset - + abed->plt_reloc_offset)); + + htab->lazy_plt->plt_reloc_offset)); /* Put offset for jmp .PLT0 and check for overflow. We don't check relocation index for overflow since branch displacement will overflow first. */ if (plt0_offset > 0x80000000) + /* xgettext:c-format */ info->callbacks->einfo (_("%F%B: branch displacement overflow in PLT entry for `%s'\n"), output_bfd, h->root.root.string); bfd_put_32 (output_bfd, - plt0_offset, - plt->contents + h->plt.offset + plt_plt_offset); + (plt->contents + h->plt.offset + + htab->lazy_plt->plt_plt_offset)); } bed = get_elf_backend_data (output_bfd); @@ -5604,11 +5177,10 @@ elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, } else if (eh->plt_got.offset != (bfd_vma) -1) { - bfd_vma got_offset, plt_offset, plt_got_offset, plt_got_insn_size; + bfd_vma got_offset, plt_offset; asection *plt, *got; bfd_boolean got_after_plt; int32_t got_pcrel_offset; - const bfd_byte *got_plt_entry; /* Set the entry in the GOT procedure linkage table. */ plt = htab->plt_got; @@ -5616,30 +5188,18 @@ elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, got_offset = h->got.offset; if (got_offset == (bfd_vma) -1 - || h->type == STT_GNU_IFUNC + || (h->type == STT_GNU_IFUNC && h->def_regular) || plt == NULL || got == NULL) abort (); - /* Use the second PLT entry template for the GOT PLT since they + /* Use the non-lazy PLT entry template for the GOT PLT since they are the identical. */ - plt_got_insn_size = elf_x86_64_bnd_arch_bed.plt_got_insn_size; - plt_got_offset = elf_x86_64_bnd_arch_bed.plt_got_offset; - if (eh->has_bnd_reloc) - got_plt_entry = elf_x86_64_bnd_plt2_entry; - else - { - got_plt_entry = elf_x86_64_legacy_plt2_entry; - - /* Subtract 1 since there is no BND prefix. */ - plt_got_insn_size -= 1; - plt_got_offset -= 1; - } - /* Fill in the entry in the GOT procedure linkage table. */ plt_offset = eh->plt_got.offset; memcpy (plt->contents + plt_offset, - got_plt_entry, sizeof (elf_x86_64_legacy_plt2_entry)); + htab->non_lazy_plt->plt_entry, + htab->non_lazy_plt->plt_entry_size); /* Put offset the PC-relative instruction referring to the GOT entry, subtracting the size of that instruction. */ @@ -5649,17 +5209,19 @@ elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, - plt->output_section->vma - plt->output_offset - plt_offset - - plt_got_insn_size); + - htab->non_lazy_plt->plt_got_insn_size); /* Check PC-relative offset overflow in GOT PLT entry. */ got_after_plt = got->output_section->vma > plt->output_section->vma; if ((got_after_plt && got_pcrel_offset < 0) || (!got_after_plt && got_pcrel_offset > 0)) + /* xgettext:c-format */ info->callbacks->einfo (_("%F%B: PC-relative offset overflow in GOT PLT entry for `%s'\n"), output_bfd, h->root.root.string); bfd_put_32 (output_bfd, got_pcrel_offset, - plt->contents + plt_offset + plt_got_offset); + (plt->contents + plt_offset + + htab->non_lazy_plt->plt_got_offset)); } if (!local_undefweak @@ -5683,11 +5245,12 @@ elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, /* Don't generate dynamic GOT relocation against undefined weak symbol in executable. */ if (h->got.offset != (bfd_vma) -1 - && ! GOT_TLS_GD_ANY_P (elf_x86_64_hash_entry (h)->tls_type) - && elf_x86_64_hash_entry (h)->tls_type != GOT_TLS_IE + && ! GOT_TLS_GD_ANY_P (elf_x86_hash_entry (h)->tls_type) + && elf_x86_hash_entry (h)->tls_type != GOT_TLS_IE && !local_undefweak) { Elf_Internal_Rela rela; + asection *relgot = htab->elf.srelgot; /* This symbol has an entry in the global offset table. Set it up. */ @@ -5706,7 +5269,32 @@ elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, if (h->def_regular && h->type == STT_GNU_IFUNC) { - if (bfd_link_pic (info)) + if (h->plt.offset == (bfd_vma) -1) + { + /* STT_GNU_IFUNC is referenced without PLT. */ + if (htab->elf.splt == NULL) + { + /* use .rel[a].iplt section to store .got relocations + in static executable. */ + relgot = htab->elf.irelplt; + } + if (SYMBOL_REFERENCES_LOCAL (info, h)) + { + info->callbacks->minfo (_("Local IFUNC function `%s' in %B\n"), + output_bfd, + h->root.root.string, + h->root.u.def.section->owner); + + rela.r_info = htab->r_info (0, + R_X86_64_IRELATIVE); + rela.r_addend = (h->root.u.def.value + + h->root.u.def.section->output_section->vma + + h->root.u.def.section->output_offset); + } + else + goto do_glob_dat; + } + else if (bfd_link_pic (info)) { /* Generate R_X86_64_GLOB_DAT. */ goto do_glob_dat; @@ -5714,6 +5302,7 @@ elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, else { asection *plt; + bfd_vma plt_offset; if (!h->pointer_equality_needed) abort (); @@ -5721,10 +5310,19 @@ elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, /* For non-shared object, we can't use .got.plt, which contains the real function addres if we need pointer equality. We load the GOT entry with the PLT entry. */ - plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt; + if (htab->plt_second != NULL) + { + plt = htab->plt_second; + plt_offset = eh->plt_second.offset; + } + else + { + plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt; + plt_offset = h->plt.offset; + } bfd_put_64 (output_bfd, (plt->output_section->vma + plt->output_offset - + h->plt.offset), + + plt_offset), htab->elf.sgot->contents + h->got.offset); return TRUE; } @@ -5750,19 +5348,21 @@ do_glob_dat: rela.r_addend = 0; } - elf_append_rela (output_bfd, htab->elf.srelgot, &rela); + elf_append_rela (output_bfd, relgot, &rela); } if (h->needs_copy) { Elf_Internal_Rela rela; + asection *s; /* This symbol needs a copy reloc. Set it up. */ if (h->dynindx == -1 || (h->root.type != bfd_link_hash_defined && h->root.type != bfd_link_hash_defweak) - || htab->srelbss == NULL) + || htab->elf.srelbss == NULL + || htab->elf.sreldynrelro == NULL) abort (); rela.r_offset = (h->root.u.def.value @@ -5770,7 +5370,11 @@ do_glob_dat: + h->root.u.def.section->output_offset); rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY); rela.r_addend = 0; - elf_append_rela (output_bfd, htab->srelbss, &rela); + if (h->root.u.def.section == htab->elf.sdynrelro) + s = htab->elf.sreldynrelro; + else + s = htab->elf.srelbss; + elf_append_rela (output_bfd, s, &rela); } return TRUE; @@ -5788,7 +5392,7 @@ elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf) = (struct bfd_link_info *) inf; return elf_x86_64_finish_dynamic_symbol (info->output_bfd, - info, h, NULL); + info, h, NULL); } /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry @@ -5807,7 +5411,7 @@ elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh, return TRUE; return elf_x86_64_finish_dynamic_symbol (info->output_bfd, - info, h, NULL); + info, h, NULL); } /* Used to decide how to sort relocs in an optimal manner for the @@ -5820,7 +5424,8 @@ elf_x86_64_reloc_type_class (const struct bfd_link_info *info, { bfd *abfd = info->output_bfd; const struct elf_backend_data *bed = get_elf_backend_data (abfd); - struct elf_x86_64_link_hash_table *htab = elf_x86_64_hash_table (info); + struct elf_x86_link_hash_table *htab + = elf_x86_hash_table (info, X86_64_ELF_DATA); if (htab->elf.dynsym != NULL && htab->elf.dynsym->contents != NULL) @@ -5828,19 +5433,24 @@ elf_x86_64_reloc_type_class (const struct bfd_link_info *info, /* Check relocation against STT_GNU_IFUNC symbol if there are dynamic symbols. */ unsigned long r_symndx = htab->r_sym (rela->r_info); - Elf_Internal_Sym sym; - if (!bed->s->swap_symbol_in (abfd, - (htab->elf.dynsym->contents - + r_symndx * bed->s->sizeof_sym), - 0, &sym)) - abort (); + if (r_symndx != STN_UNDEF) + { + Elf_Internal_Sym sym; + if (!bed->s->swap_symbol_in (abfd, + (htab->elf.dynsym->contents + + r_symndx * bed->s->sizeof_sym), + 0, &sym)) + abort (); - if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC) - return reloc_class_ifunc; + if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC) + return reloc_class_ifunc; + } } switch ((int) ELF32_R_TYPE (rela->r_info)) { + case R_X86_64_IRELATIVE: + return reloc_class_ifunc; case R_X86_64_RELATIVE: case R_X86_64_RELATIVE64: return reloc_class_relative; @@ -5859,21 +5469,14 @@ static bfd_boolean elf_x86_64_finish_dynamic_sections (bfd *output_bfd, struct bfd_link_info *info) { - struct elf_x86_64_link_hash_table *htab; + struct elf_x86_link_hash_table *htab; bfd *dynobj; asection *sdyn; - const struct elf_x86_64_backend_data *abed; - htab = elf_x86_64_hash_table (info); + htab = elf_x86_hash_table (info, X86_64_ELF_DATA); if (htab == NULL) return FALSE; - /* Use MPX backend data in case of BND relocation. Use .plt_bnd - section only if there is .plt section. */ - abed = (htab->elf.splt != NULL && htab->plt_bnd != NULL - ? &elf_x86_64_bnd_arch_bed - : get_elf_x86_64_backend_data (output_bfd)); - dynobj = htab->elf.dynobj; sdyn = bfd_get_linker_section (dynobj, ".dynamic"); @@ -5916,21 +5519,6 @@ elf_x86_64_finish_dynamic_sections (bfd *output_bfd, dyn.d_un.d_val = s->size; break; - case DT_RELASZ: - /* The procedure linkage table relocs (DT_JMPREL) should - not be included in the overall relocs (DT_RELA). - Therefore, we override the DT_RELASZ entry here to - make it not include the JMPREL relocs. Since the - linker script arranges for .rela.plt to follow all - other relocation sections, we don't have to worry - about changing the DT_RELA entry. */ - if (htab->elf.srelplt != NULL) - { - s = htab->elf.srelplt->output_section; - dyn.d_un.d_val -= s->size; - } - break; - case DT_TLSDESC_PLT: s = htab->elf.splt; dyn.d_un.d_ptr = s->output_section->vma + s->output_offset @@ -5947,104 +5535,119 @@ elf_x86_64_finish_dynamic_sections (bfd *output_bfd, (*bed->s->swap_dyn_out) (output_bfd, &dyn, dyncon); } - /* Fill in the special first entry in the procedure linkage table. */ if (htab->elf.splt && htab->elf.splt->size > 0) { - /* Fill in the first entry in the procedure linkage table. */ - memcpy (htab->elf.splt->contents, - abed->plt0_entry, abed->plt_entry_size); - /* Add offset for pushq GOT+8(%rip), since the instruction - uses 6 bytes subtract this value. */ - bfd_put_32 (output_bfd, - (htab->elf.sgotplt->output_section->vma - + htab->elf.sgotplt->output_offset - + 8 - - htab->elf.splt->output_section->vma - - htab->elf.splt->output_offset - - 6), - htab->elf.splt->contents + abed->plt0_got1_offset); - /* Add offset for the PC-relative instruction accessing GOT+16, - subtracting the offset to the end of that instruction. */ - bfd_put_32 (output_bfd, - (htab->elf.sgotplt->output_section->vma - + htab->elf.sgotplt->output_offset - + 16 - - htab->elf.splt->output_section->vma - - htab->elf.splt->output_offset - - abed->plt0_got2_insn_end), - htab->elf.splt->contents + abed->plt0_got2_offset); - elf_section_data (htab->elf.splt->output_section) - ->this_hdr.sh_entsize = abed->plt_entry_size; + ->this_hdr.sh_entsize = htab->plt.plt_entry_size; - if (htab->tlsdesc_plt) + if (htab->plt.has_plt0) { - bfd_put_64 (output_bfd, (bfd_vma) 0, - htab->elf.sgot->contents + htab->tlsdesc_got); - - memcpy (htab->elf.splt->contents + htab->tlsdesc_plt, - abed->plt0_entry, abed->plt_entry_size); - - /* Add offset for pushq GOT+8(%rip), since the - instruction uses 6 bytes subtract this value. */ + /* Fill in the special first entry in the procedure linkage + table. */ + memcpy (htab->elf.splt->contents, + htab->lazy_plt->plt0_entry, + htab->lazy_plt->plt0_entry_size); + /* Add offset for pushq GOT+8(%rip), since the instruction + uses 6 bytes subtract this value. */ bfd_put_32 (output_bfd, (htab->elf.sgotplt->output_section->vma + htab->elf.sgotplt->output_offset + 8 - htab->elf.splt->output_section->vma - htab->elf.splt->output_offset - - htab->tlsdesc_plt - 6), - htab->elf.splt->contents - + htab->tlsdesc_plt + abed->plt0_got1_offset); - /* Add offset for the PC-relative instruction accessing GOT+TDG, - where TGD stands for htab->tlsdesc_got, subtracting the offset - to the end of that instruction. */ + (htab->elf.splt->contents + + htab->lazy_plt->plt0_got1_offset)); + /* Add offset for the PC-relative instruction accessing + GOT+16, subtracting the offset to the end of that + instruction. */ bfd_put_32 (output_bfd, - (htab->elf.sgot->output_section->vma - + htab->elf.sgot->output_offset - + htab->tlsdesc_got + (htab->elf.sgotplt->output_section->vma + + htab->elf.sgotplt->output_offset + + 16 - htab->elf.splt->output_section->vma - htab->elf.splt->output_offset - - htab->tlsdesc_plt - - abed->plt0_got2_insn_end), - htab->elf.splt->contents - + htab->tlsdesc_plt + abed->plt0_got2_offset); + - htab->lazy_plt->plt0_got2_insn_end), + (htab->elf.splt->contents + + htab->lazy_plt->plt0_got2_offset)); + + if (htab->tlsdesc_plt) + { + bfd_put_64 (output_bfd, (bfd_vma) 0, + htab->elf.sgot->contents + htab->tlsdesc_got); + + memcpy (htab->elf.splt->contents + htab->tlsdesc_plt, + htab->lazy_plt->plt0_entry, + htab->lazy_plt->plt0_entry_size); + + /* Add offset for pushq GOT+8(%rip), since the + instruction uses 6 bytes subtract this value. */ + bfd_put_32 (output_bfd, + (htab->elf.sgotplt->output_section->vma + + htab->elf.sgotplt->output_offset + + 8 + - htab->elf.splt->output_section->vma + - htab->elf.splt->output_offset + - htab->tlsdesc_plt + - 6), + (htab->elf.splt->contents + + htab->tlsdesc_plt + + htab->lazy_plt->plt0_got1_offset)); + /* Add offset for the PC-relative instruction accessing + GOT+TDG, where TDG stands for htab->tlsdesc_got, + subtracting the offset to the end of that + instruction. */ + bfd_put_32 (output_bfd, + (htab->elf.sgot->output_section->vma + + htab->elf.sgot->output_offset + + htab->tlsdesc_got + - htab->elf.splt->output_section->vma + - htab->elf.splt->output_offset + - htab->tlsdesc_plt + - htab->lazy_plt->plt0_got2_insn_end), + (htab->elf.splt->contents + + htab->tlsdesc_plt + + htab->lazy_plt->plt0_got2_offset)); + } } } - } - if (htab->plt_bnd != NULL) - elf_section_data (htab->plt_bnd->output_section) - ->this_hdr.sh_entsize = sizeof (elf_x86_64_bnd_plt2_entry); + if (htab->plt_got != NULL && htab->plt_got->size > 0) + elf_section_data (htab->plt_got->output_section) + ->this_hdr.sh_entsize = htab->non_lazy_plt->plt_entry_size; - if (htab->elf.sgotplt) + if (htab->plt_second != NULL && htab->plt_second->size > 0) + elf_section_data (htab->plt_second->output_section) + ->this_hdr.sh_entsize = htab->non_lazy_plt->plt_entry_size; + } + + /* GOT is always created in setup_gnu_properties. But it may not be + needed. */ + if (htab->elf.sgotplt && htab->elf.sgotplt->size > 0) { if (bfd_is_abs_section (htab->elf.sgotplt->output_section)) { - (*_bfd_error_handler) + _bfd_error_handler (_("discarded output section: `%A'"), htab->elf.sgotplt); return FALSE; } - /* Fill in the first three entries in the global offset table. */ - if (htab->elf.sgotplt->size > 0) - { - /* Set the first entry in the global offset table to the address of - the dynamic section. */ - if (sdyn == NULL) - bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents); - else - bfd_put_64 (output_bfd, - sdyn->output_section->vma + sdyn->output_offset, - htab->elf.sgotplt->contents); - /* Write GOT[1] and GOT[2], needed for the dynamic linker. */ - bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE); - bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE*2); - } - - elf_section_data (htab->elf.sgotplt->output_section)->this_hdr.sh_entsize = - GOT_ENTRY_SIZE; + /* Set the first entry in the global offset table to the address of + the dynamic section. */ + if (sdyn == NULL) + bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents); + else + bfd_put_64 (output_bfd, + sdyn->output_section->vma + sdyn->output_offset, + htab->elf.sgotplt->contents); + /* Write GOT[1] and GOT[2], needed for the dynamic linker. */ + bfd_put_64 (output_bfd, (bfd_vma) 0, + htab->elf.sgotplt->contents + GOT_ENTRY_SIZE); + bfd_put_64 (output_bfd, (bfd_vma) 0, + htab->elf.sgotplt->contents + GOT_ENTRY_SIZE*2); + + elf_section_data (htab->elf.sgotplt->output_section)->this_hdr.sh_entsize + = GOT_ENTRY_SIZE; } /* Adjust .eh_frame for .plt section. */ @@ -6074,15 +5677,66 @@ elf_x86_64_finish_dynamic_sections (bfd *output_bfd, } } + /* Adjust .eh_frame for .plt.got section. */ + if (htab->plt_got_eh_frame != NULL + && htab->plt_got_eh_frame->contents != NULL) + { + if (htab->plt_got != NULL + && htab->plt_got->size != 0 + && (htab->plt_got->flags & SEC_EXCLUDE) == 0 + && htab->plt_got->output_section != NULL + && htab->plt_got_eh_frame->output_section != NULL) + { + bfd_vma plt_start = htab->plt_got->output_section->vma; + bfd_vma eh_frame_start = htab->plt_got_eh_frame->output_section->vma + + htab->plt_got_eh_frame->output_offset + + PLT_FDE_START_OFFSET; + bfd_put_signed_32 (dynobj, plt_start - eh_frame_start, + htab->plt_got_eh_frame->contents + + PLT_FDE_START_OFFSET); + } + if (htab->plt_got_eh_frame->sec_info_type == SEC_INFO_TYPE_EH_FRAME) + { + if (! _bfd_elf_write_section_eh_frame (output_bfd, info, + htab->plt_got_eh_frame, + htab->plt_got_eh_frame->contents)) + return FALSE; + } + } + + /* Adjust .eh_frame for the second PLT section. */ + if (htab->plt_second_eh_frame != NULL + && htab->plt_second_eh_frame->contents != NULL) + { + if (htab->plt_second != NULL + && htab->plt_second->size != 0 + && (htab->plt_second->flags & SEC_EXCLUDE) == 0 + && htab->plt_second->output_section != NULL + && htab->plt_second_eh_frame->output_section != NULL) + { + bfd_vma plt_start = htab->plt_second->output_section->vma; + bfd_vma eh_frame_start + = (htab->plt_second_eh_frame->output_section->vma + + htab->plt_second_eh_frame->output_offset + + PLT_FDE_START_OFFSET); + bfd_put_signed_32 (dynobj, plt_start - eh_frame_start, + htab->plt_second_eh_frame->contents + + PLT_FDE_START_OFFSET); + } + if (htab->plt_second_eh_frame->sec_info_type + == SEC_INFO_TYPE_EH_FRAME) + { + if (! _bfd_elf_write_section_eh_frame (output_bfd, info, + htab->plt_second_eh_frame, + htab->plt_second_eh_frame->contents)) + return FALSE; + } + } + if (htab->elf.sgot && htab->elf.sgot->size > 0) elf_section_data (htab->elf.sgot->output_section)->this_hdr.sh_entsize = GOT_ENTRY_SIZE; - /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */ - htab_traverse (htab->loc_hash_table, - elf_x86_64_finish_local_dynamic_symbol, - info); - /* Fill PLT entries for undefined weak symbols in PIE. */ if (bfd_link_pie (info)) bfd_hash_traverse (&info->hash->table, @@ -6092,126 +5746,231 @@ elf_x86_64_finish_dynamic_sections (bfd *output_bfd, return TRUE; } -/* Return an array of PLT entry symbol values. */ +/* Fill PLT/GOT entries and allocate dynamic relocations for local + STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table. + It has to be done before elf_link_sort_relocs is called so that + dynamic relocations are properly sorted. */ -static bfd_vma * -elf_x86_64_get_plt_sym_val (bfd *abfd, asymbol **dynsyms, asection *plt, - asection *relplt) +static bfd_boolean +elf_x86_64_output_arch_local_syms + (bfd *output_bfd ATTRIBUTE_UNUSED, + struct bfd_link_info *info, + void *flaginfo ATTRIBUTE_UNUSED, + int (*func) (void *, const char *, + Elf_Internal_Sym *, + asection *, + struct elf_link_hash_entry *) ATTRIBUTE_UNUSED) { - bfd_boolean (*slurp_relocs) (bfd *, asection *, asymbol **, bfd_boolean); - arelent *p; - long count, i; - bfd_vma *plt_sym_val; - bfd_vma plt_offset; - bfd_byte *plt_contents; - const struct elf_x86_64_backend_data *bed; - Elf_Internal_Shdr *hdr; - asection *plt_bnd; - - /* Get the .plt section contents. PLT passed down may point to the - .plt.bnd section. Make sure that PLT always points to the .plt - section. */ - plt_bnd = bfd_get_section_by_name (abfd, ".plt.bnd"); - if (plt_bnd) - { - if (plt != plt_bnd) - abort (); - plt = bfd_get_section_by_name (abfd, ".plt"); - if (plt == NULL) - abort (); - bed = &elf_x86_64_bnd_arch_bed; - } - else - bed = get_elf_x86_64_backend_data (abfd); + struct elf_x86_link_hash_table *htab + = elf_x86_hash_table (info, X86_64_ELF_DATA); + if (htab == NULL) + return FALSE; + + /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */ + htab_traverse (htab->loc_hash_table, + elf_x86_64_finish_local_dynamic_symbol, + info); + + return TRUE; +} + +/* Forward declaration. */ +static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt; - plt_contents = (bfd_byte *) bfd_malloc (plt->size); - if (plt_contents == NULL) - return NULL; - if (!bfd_get_section_contents (abfd, (asection *) plt, - plt_contents, 0, plt->size)) +/* Similar to _bfd_elf_get_synthetic_symtab. Support PLTs with all + dynamic relocations. */ + +static long +elf_x86_64_get_synthetic_symtab (bfd *abfd, + long symcount ATTRIBUTE_UNUSED, + asymbol **syms ATTRIBUTE_UNUSED, + long dynsymcount, + asymbol **dynsyms, + asymbol **ret) +{ + long count, i, n; + int j; + bfd_byte *plt_contents; + long relsize; + const struct elf_x86_lazy_plt_layout *lazy_plt; + const struct elf_x86_non_lazy_plt_layout *non_lazy_plt; + const struct elf_x86_lazy_plt_layout *lazy_bnd_plt; + const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_plt; + const struct elf_x86_lazy_plt_layout *lazy_ibt_plt; + const struct elf_x86_non_lazy_plt_layout *non_lazy_ibt_plt; + asection *plt; + enum elf_x86_plt_type plt_type; + struct elf_x86_plt plts[] = { -bad_return: - free (plt_contents); - return NULL; - } + { ".plt", NULL, NULL, plt_unknown, 0, 0, 0, 0 }, + { ".plt.got", NULL, NULL, plt_non_lazy, 0, 0, 0, 0 }, + { ".plt.sec", NULL, NULL, plt_second, 0, 0, 0, 0 }, + { ".plt.bnd", NULL, NULL, plt_second, 0, 0, 0, 0 }, + { NULL, NULL, NULL, plt_non_lazy, 0, 0, 0, 0 } + }; - slurp_relocs = get_elf_backend_data (abfd)->s->slurp_reloc_table; - if (! (*slurp_relocs) (abfd, relplt, dynsyms, TRUE)) - goto bad_return; + *ret = NULL; - hdr = &elf_section_data (relplt)->this_hdr; - count = relplt->size / hdr->sh_entsize; + if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0) + return 0; - plt_sym_val = (bfd_vma *) bfd_malloc (sizeof (bfd_vma) * count); - if (plt_sym_val == NULL) - goto bad_return; + if (dynsymcount <= 0) + return 0; - for (i = 0; i < count; i++) - plt_sym_val[i] = -1; + relsize = bfd_get_dynamic_reloc_upper_bound (abfd); + if (relsize <= 0) + return -1; - plt_offset = bed->plt_entry_size; - p = relplt->relocation; - for (i = 0; i < count; i++, p++) + if (get_elf_x86_64_backend_data (abfd)->os == is_normal) + { + lazy_plt = &elf_x86_64_lazy_plt; + non_lazy_plt = &elf_x86_64_non_lazy_plt; + lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt; + non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt; + if (ABI_64_P (abfd)) + { + lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt; + non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt; + } + else + { + lazy_ibt_plt = &elf_x32_lazy_ibt_plt; + non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt; + } + } + else { - long reloc_index; + lazy_plt = &elf_x86_64_nacl_plt; + non_lazy_plt = NULL; + lazy_bnd_plt = NULL; + non_lazy_bnd_plt = NULL; + lazy_ibt_plt = NULL; + non_lazy_ibt_plt = NULL; + } - /* Skip unknown relocation. */ - if (p->howto == NULL) + count = 0; + for (j = 0; plts[j].name != NULL; j++) + { + plt = bfd_get_section_by_name (abfd, plts[j].name); + if (plt == NULL || plt->size == 0) continue; - if (p->howto->type != R_X86_64_JUMP_SLOT - && p->howto->type != R_X86_64_IRELATIVE) - continue; + /* Get the PLT section contents. */ + plt_contents = (bfd_byte *) bfd_malloc (plt->size); + if (plt_contents == NULL) + break; + if (!bfd_get_section_contents (abfd, (asection *) plt, + plt_contents, 0, plt->size)) + { + free (plt_contents); + break; + } - reloc_index = H_GET_32 (abfd, (plt_contents + plt_offset - + bed->plt_reloc_offset)); - if (reloc_index < count) + /* Check what kind of PLT it is. */ + plt_type = plt_unknown; + if (plts[j].type == plt_unknown + && (plt->size >= (lazy_plt->plt_entry_size + + lazy_plt->plt_entry_size))) { - if (plt_bnd) + /* Match lazy PLT first. Need to check the first two + instructions. */ + if ((memcmp (plt_contents, lazy_plt->plt0_entry, + lazy_plt->plt0_got1_offset) == 0) + && (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6, + 2) == 0)) + plt_type = plt_lazy; + else if (lazy_bnd_plt != NULL + && (memcmp (plt_contents, lazy_bnd_plt->plt0_entry, + lazy_bnd_plt->plt0_got1_offset) == 0) + && (memcmp (plt_contents + 6, + lazy_bnd_plt->plt0_entry + 6, 3) == 0)) { - /* This is the index in .plt section. */ - long plt_index = plt_offset / bed->plt_entry_size; - /* Store VMA + the offset in .plt.bnd section. */ - plt_sym_val[reloc_index] = - (plt_bnd->vma - + (plt_index - 1) * sizeof (elf_x86_64_legacy_plt2_entry)); + plt_type = plt_lazy | plt_second; + /* The fist entry in the lazy IBT PLT is the same as the + lazy BND PLT. */ + if ((memcmp (plt_contents + lazy_ibt_plt->plt_entry_size, + lazy_ibt_plt->plt_entry, + lazy_ibt_plt->plt_got_offset) == 0)) + lazy_plt = lazy_ibt_plt; + else + lazy_plt = lazy_bnd_plt; } - else - plt_sym_val[reloc_index] = plt->vma + plt_offset; } - plt_offset += bed->plt_entry_size; - /* PR binutils/18437: Skip extra relocations in the .rela.plt - section. */ - if (plt_offset >= plt->size) - break; - } + if (non_lazy_plt != NULL + && (plt_type == plt_unknown || plt_type == plt_non_lazy) + && plt->size >= non_lazy_plt->plt_entry_size) + { + /* Match non-lazy PLT. */ + if (memcmp (plt_contents, non_lazy_plt->plt_entry, + non_lazy_plt->plt_got_offset) == 0) + plt_type = plt_non_lazy; + } - free (plt_contents); + if (plt_type == plt_unknown || plt_type == plt_second) + { + if (non_lazy_bnd_plt != NULL + && plt->size >= non_lazy_bnd_plt->plt_entry_size + && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry, + non_lazy_bnd_plt->plt_got_offset) == 0)) + { + /* Match BND PLT. */ + plt_type = plt_second; + non_lazy_plt = non_lazy_bnd_plt; + } + else if (non_lazy_ibt_plt != NULL + && plt->size >= non_lazy_ibt_plt->plt_entry_size + && (memcmp (plt_contents, + non_lazy_ibt_plt->plt_entry, + non_lazy_ibt_plt->plt_got_offset) == 0)) + { + /* Match IBT PLT. */ + plt_type = plt_second; + non_lazy_plt = non_lazy_ibt_plt; + } + } - return plt_sym_val; -} + if (plt_type == plt_unknown) + { + free (plt_contents); + continue; + } -/* Similar to _bfd_elf_get_synthetic_symtab, with .plt.bnd section - support. */ + plts[j].sec = plt; + plts[j].type = plt_type; -static long -elf_x86_64_get_synthetic_symtab (bfd *abfd, - long symcount, - asymbol **syms, - long dynsymcount, - asymbol **dynsyms, - asymbol **ret) -{ - /* Pass the .plt.bnd section to _bfd_elf_ifunc_get_synthetic_symtab - as PLT if it exists. */ - asection *plt = bfd_get_section_by_name (abfd, ".plt.bnd"); - if (plt == NULL) - plt = bfd_get_section_by_name (abfd, ".plt"); - return _bfd_elf_ifunc_get_synthetic_symtab (abfd, symcount, syms, - dynsymcount, dynsyms, ret, - plt, - elf_x86_64_get_plt_sym_val); + if ((plt_type & plt_lazy)) + { + plts[j].plt_got_offset = lazy_plt->plt_got_offset; + plts[j].plt_got_insn_size = lazy_plt->plt_got_insn_size; + plts[j].plt_entry_size = lazy_plt->plt_entry_size; + /* Skip PLT0 in lazy PLT. */ + i = 1; + } + else + { + plts[j].plt_got_offset = non_lazy_plt->plt_got_offset; + plts[j].plt_got_insn_size = non_lazy_plt->plt_got_insn_size; + plts[j].plt_entry_size = non_lazy_plt->plt_entry_size; + i = 0; + } + + /* Skip lazy PLT when the second PLT is used. */ + if (plt_type == (plt_lazy | plt_second)) + plts[j].count = 0; + else + { + n = plt->size / plts[j].plt_entry_size; + plts[j].count = n; + count += n - i; + } + + plts[j].contents = plt_contents; + } + + return _bfd_x86_elf_get_synthetic_symtab (abfd, count, relsize, + (bfd_vma) 0, plts, dynsyms, + ret); } /* Handle an x86-64 specific section when reading an object file. This @@ -6236,7 +5995,7 @@ elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr, static bfd_boolean elf_x86_64_add_symbol_hook (bfd *abfd, - struct bfd_link_info *info, + struct bfd_link_info *info ATTRIBUTE_UNUSED, Elf_Internal_Sym *sym, const char **namep ATTRIBUTE_UNUSED, flagword *flagsp ATTRIBUTE_UNUSED, @@ -6265,12 +6024,6 @@ elf_x86_64_add_symbol_hook (bfd *abfd, return TRUE; } - if (ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE - && (abfd->flags & DYNAMIC) == 0 - && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour) - elf_tdata (info->output_bfd)->has_gnu_symbols - |= elf_gnu_symbol_unique; - return TRUE; } @@ -6389,19 +6142,6 @@ elf_x86_64_additional_program_headers (bfd *abfd, return count; } -/* Return TRUE if symbol should be hashed in the `.gnu.hash' section. */ - -static bfd_boolean -elf_x86_64_hash_symbol (struct elf_link_hash_entry *h) -{ - if (h->plt.offset != (bfd_vma) -1 - && !h->def_regular - && !h->pointer_equality_needed) - return FALSE; - - return _bfd_elf_hash_symbol (h); -} - /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */ static bfd_boolean @@ -6413,8 +6153,54 @@ elf_x86_64_relocs_compatible (const bfd_target *input, && _bfd_elf_relocs_compatible (input, output)); } +/* Set up x86-64 GNU properties. Return the first relocatable ELF input + with GNU properties if found. Otherwise, return NULL. */ + +static bfd * +elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info) +{ + struct elf_x86_plt_layout_table plt_layout; + + plt_layout.is_vxworks = FALSE; + if (get_elf_x86_64_backend_data (info->output_bfd)->os == is_normal) + { + if (info->bndplt) + { + plt_layout.lazy_plt = &elf_x86_64_lazy_bnd_plt; + plt_layout.non_lazy_plt = &elf_x86_64_non_lazy_bnd_plt; + } + else + { + plt_layout.lazy_plt = &elf_x86_64_lazy_plt; + plt_layout.non_lazy_plt = &elf_x86_64_non_lazy_plt; + } + + if (ABI_64_P (info->output_bfd)) + { + plt_layout.lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt; + plt_layout.non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt; + } + else + { + plt_layout.lazy_ibt_plt = &elf_x32_lazy_ibt_plt; + plt_layout.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt; + } + plt_layout.normal_target = TRUE; + } + else + { + plt_layout.lazy_plt = &elf_x86_64_nacl_plt; + plt_layout.non_lazy_plt = NULL; + plt_layout.lazy_ibt_plt = NULL; + plt_layout.non_lazy_ibt_plt = NULL; + plt_layout.normal_target = FALSE; + } + + return _bfd_x86_elf_link_setup_gnu_properties (info, &plt_layout); +} + static const struct bfd_elf_special_section - elf_x86_64_special_sections[]= +elf_x86_64_special_sections[]= { { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE}, { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE}, @@ -6443,23 +6229,22 @@ static const struct bfd_elf_special_section #define elf_backend_rela_normal 1 #define elf_backend_plt_alignment 4 #define elf_backend_extern_protected_data 1 +#define elf_backend_caches_rawsize 1 +#define elf_backend_dtrel_excludes_plt 1 +#define elf_backend_want_dynrelro 1 #define elf_info_to_howto elf_x86_64_info_to_howto -#define bfd_elf64_bfd_link_hash_table_create \ - elf_x86_64_link_hash_table_create #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup #define bfd_elf64_bfd_reloc_name_lookup \ elf_x86_64_reloc_name_lookup -#define elf_backend_adjust_dynamic_symbol elf_x86_64_adjust_dynamic_symbol #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible #define elf_backend_check_relocs elf_x86_64_check_relocs -#define elf_backend_copy_indirect_symbol elf_x86_64_copy_indirect_symbol -#define elf_backend_create_dynamic_sections elf_x86_64_create_dynamic_sections +#define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol -#define elf_backend_gc_mark_hook elf_x86_64_gc_mark_hook +#define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo #ifdef CORE_HEADER @@ -6468,10 +6253,8 @@ static const struct bfd_elf_special_section #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class #define elf_backend_relocate_section elf_x86_64_relocate_section #define elf_backend_size_dynamic_sections elf_x86_64_size_dynamic_sections -#define elf_backend_always_size_sections elf_x86_64_always_size_sections #define elf_backend_init_index_section _bfd_elf_init_1_index_section #define elf_backend_object_p elf64_x86_64_elf_object_p -#define bfd_elf64_mkobject elf_x86_64_mkobject #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab #define elf_backend_section_from_shdr \ @@ -6495,12 +6278,8 @@ static const struct bfd_elf_special_section elf_x86_64_special_sections #define elf_backend_additional_program_headers \ elf_x86_64_additional_program_headers -#define elf_backend_hash_symbol \ - elf_x86_64_hash_symbol -#define elf_backend_omit_section_dynsym \ - ((bfd_boolean (*) (bfd *, struct bfd_link_info *, asection *)) bfd_true) -#define elf_backend_fixup_symbol \ - elf_x86_64_fixup_symbol +#define elf_backend_setup_gnu_properties \ + elf_x86_64_link_setup_gnu_properties #include "elf64-target.h" @@ -6630,11 +6409,11 @@ static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] = 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */ /* 32 bytes of nop to pad out to the standard size. */ - 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */ + 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */ 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */ - 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */ + 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */ 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */ - 0x66, /* excess data32 prefix */ + 0x66, /* excess data16 prefix */ 0x90 /* nop */ }; @@ -6646,7 +6425,7 @@ static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] = 0x41, 0xff, 0xe3, /* jmpq *%r11 */ /* 15-byte nop sequence to pad out to the next 32-byte boundary. */ - 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */ + 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */ 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */ /* Lazy GOT entries point here (32-byte aligned). */ @@ -6656,7 +6435,7 @@ static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] = 0, 0, 0, 0, /* replaced with offset to start of .plt0. */ /* 22 bytes of nop to pad out to the standard size. */ - 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */ + 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */ 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */ 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */ }; @@ -6702,9 +6481,10 @@ static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] = DW_CFA_nop, DW_CFA_nop }; -static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed = +static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt = { elf_x86_64_nacl_plt0_entry, /* plt0_entry */ + NACL_PLT_ENTRY_SIZE, /* plt0_entry_size */ elf_x86_64_nacl_plt_entry, /* plt_entry */ NACL_PLT_ENTRY_SIZE, /* plt_entry_size */ 2, /* plt0_got1_offset */ @@ -6716,8 +6496,15 @@ static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed = 7, /* plt_got_insn_size */ 42, /* plt_plt_insn_end */ 32, /* plt_lazy_offset */ + elf_x86_64_nacl_plt0_entry, /* pic_plt0_entry */ + elf_x86_64_nacl_plt_entry, /* pic_plt_entry */ elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */ - sizeof (elf_x86_64_nacl_eh_frame_plt), /* eh_frame_plt_size */ + sizeof (elf_x86_64_nacl_eh_frame_plt) /* eh_frame_plt_size */ + }; + +static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed = + { + is_nacl /* os */ }; #undef elf_backend_arch_data @@ -6751,14 +6538,10 @@ elf32_x86_64_nacl_elf_object_p (bfd *abfd) #undef elf32_bed #define elf32_bed elf32_x86_64_nacl_bed -#define bfd_elf32_bfd_link_hash_table_create \ - elf_x86_64_link_hash_table_create #define bfd_elf32_bfd_reloc_type_lookup \ elf_x86_64_reloc_type_lookup #define bfd_elf32_bfd_reloc_name_lookup \ elf_x86_64_reloc_name_lookup -#define bfd_elf32_mkobject \ - elf_x86_64_mkobject #define bfd_elf32_get_synthetic_symtab \ elf_x86_64_get_synthetic_symtab