X-Git-Url: http://drtracing.org/?a=blobdiff_plain;f=bfd%2Felf64-x86-64.c;h=5d3a65379c7180cbcfc26a4ed167801410bd29c9;hb=5430098f1807e084fe4ff5057040d68435f3d8a2;hp=4899f9858a6c5e184b0476f55b861be7aced0d5a;hpb=e66cdd681f47dc51beaeee3d813f1c9cba27dedf;p=deliverable%2Fbinutils-gdb.git diff --git a/bfd/elf64-x86-64.c b/bfd/elf64-x86-64.c index 4899f9858a..5d3a65379c 100644 --- a/bfd/elf64-x86-64.c +++ b/bfd/elf64-x86-64.c @@ -1,5 +1,5 @@ /* X86-64 specific support for ELF - Copyright (C) 2000-2016 Free Software Foundation, Inc. + Copyright (C) 2000-2017 Free Software Foundation, Inc. Contributed by Jan Hubicka . This file is part of BFD, the Binary File Descriptor library. @@ -285,8 +285,9 @@ elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type) { if (r_type >= (unsigned int) R_X86_64_standard) { - (*_bfd_error_handler) (_("%B: invalid relocation type %d"), - abfd, (int) r_type); + /* xgettext:c-format */ + _bfd_error_handler (_("%B: invalid relocation type %d"), + abfd, (int) r_type); r_type = R_X86_64_NONE; } i = r_type; @@ -658,6 +659,68 @@ static const bfd_byte elf_x86_64_eh_frame_plt[] = DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop }; +/* .eh_frame covering the BND .plt section. */ + +static const bfd_byte elf_x86_64_eh_frame_bnd_plt[] = +{ + PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */ + 0, 0, 0, 0, /* CIE ID */ + 1, /* CIE version */ + 'z', 'R', 0, /* Augmentation string */ + 1, /* Code alignment factor */ + 0x78, /* Data alignment factor */ + 16, /* Return address column */ + 1, /* Augmentation size */ + DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */ + DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */ + DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */ + DW_CFA_nop, DW_CFA_nop, + + PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */ + PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */ + 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */ + 0, 0, 0, 0, /* .plt size goes here */ + 0, /* Augmentation size */ + DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */ + DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */ + DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */ + DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */ + DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */ + 11, /* Block length */ + DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */ + DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */ + DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge, + DW_OP_lit3, DW_OP_shl, DW_OP_plus, + DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop +}; + +/* .eh_frame covering the .plt.got section. */ + +static const bfd_byte elf_x86_64_eh_frame_plt_got[] = +{ +#define PLT_GOT_FDE_LENGTH 20 + PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */ + 0, 0, 0, 0, /* CIE ID */ + 1, /* CIE version */ + 'z', 'R', 0, /* Augmentation string */ + 1, /* Code alignment factor */ + 0x78, /* Data alignment factor */ + 16, /* Return address column */ + 1, /* Augmentation size */ + DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */ + DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */ + DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */ + DW_CFA_nop, DW_CFA_nop, + + PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */ + PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */ + 0, 0, 0, 0, /* the start of .plt.got goes here */ + 0, 0, 0, 0, /* .plt.got size goes here */ + 0, /* Augmentation size */ + DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, + DW_CFA_nop, DW_CFA_nop, DW_CFA_nop +}; + /* Architecture-specific backend data for x86-64. */ struct elf_x86_64_backend_data @@ -692,6 +755,10 @@ struct elf_x86_64_backend_data /* .eh_frame covering the .plt section. */ const bfd_byte *eh_frame_plt; unsigned int eh_frame_plt_size; + + /* .eh_frame covering the .plt.got section. */ + const bfd_byte *eh_frame_plt_got; + unsigned int eh_frame_plt_got_size; }; #define get_elf_x86_64_arch_data(bed) \ @@ -720,6 +787,8 @@ static const struct elf_x86_64_backend_data elf_x86_64_arch_bed = 6, /* plt_lazy_offset */ elf_x86_64_eh_frame_plt, /* eh_frame_plt */ sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */ + elf_x86_64_eh_frame_plt_got, /* eh_frame_plt_got */ + sizeof (elf_x86_64_eh_frame_plt_got), /* eh_frame_plt_got_size */ }; static const struct elf_x86_64_backend_data elf_x86_64_bnd_arch_bed = @@ -736,8 +805,10 @@ static const struct elf_x86_64_backend_data elf_x86_64_bnd_arch_bed = 1+6, /* plt_got_insn_size */ 11, /* plt_plt_insn_end */ 0, /* plt_lazy_offset */ - elf_x86_64_eh_frame_plt, /* eh_frame_plt */ - sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */ + elf_x86_64_eh_frame_bnd_plt, /* eh_frame_plt */ + sizeof (elf_x86_64_eh_frame_bnd_plt), /* eh_frame_plt_size */ + elf_x86_64_eh_frame_plt_got, /* eh_frame_plt_got */ + sizeof (elf_x86_64_eh_frame_plt_got), /* eh_frame_plt_got_size */ }; #define elf_backend_arch_data &elf_x86_64_arch_bed @@ -748,11 +819,11 @@ static const struct elf_x86_64_backend_data elf_x86_64_bnd_arch_bed = 1. Has non-GOT/non-PLT relocations in text section. Or 2. Has no GOT/PLT relocation. */ -#define UNDEFINED_WEAK_RESOLVED_TO_ZERO(INFO, EH) \ +#define UNDEFINED_WEAK_RESOLVED_TO_ZERO(INFO, GOT_RELOC, EH) \ ((EH)->elf.root.type == bfd_link_hash_undefweak \ && bfd_link_executable (INFO) \ && (elf_x86_64_hash_table (INFO)->interp == NULL \ - || !(EH)->has_got_reloc \ + || !(GOT_RELOC) \ || (EH)->has_non_got_reloc \ || !(INFO)->dynamic_undefined_weak)) @@ -796,6 +867,11 @@ struct elf_x86_64_link_hash_entry /* TRUE if symbol has non-GOT/non-PLT relocations in text sections. */ unsigned int has_non_got_reloc : 1; + /* 0: symbol isn't __tls_get_addr. + 1: symbol is __tls_get_addr. + 2: symbol is unknown. */ + unsigned int tls_get_addr : 2; + /* Reference count of C/C++ function pointer relocations in read-write section which can be resolved at run-time. */ bfd_signed_vma func_pointer_refcount; @@ -856,11 +932,11 @@ struct elf_x86_64_link_hash_table /* Short-cuts to get to dynamic linker sections. */ asection *interp; - asection *sdynbss; - asection *srelbss; asection *plt_eh_frame; asection *plt_bnd; + asection *plt_bnd_eh_frame; asection *plt_got; + asection *plt_got_eh_frame; union { @@ -946,6 +1022,7 @@ elf_x86_64_link_hash_newfunc (struct bfd_hash_entry *entry, eh->has_bnd_reloc = 0; eh->has_got_reloc = 0; eh->has_non_got_reloc = 0; + eh->tls_get_addr = 2; eh->func_pointer_refcount = 0; eh->plt_bnd.offset = (bfd_vma) -1; eh->plt_got.offset = (bfd_vma) -1; @@ -1109,27 +1186,15 @@ elf_x86_64_create_dynamic_sections (bfd *dynobj, if (htab == NULL) return FALSE; - htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss"); - if (!htab->sdynbss) - abort (); - - if (bfd_link_executable (info)) + /* Set the contents of the .interp section to the interpreter. */ + if (bfd_link_executable (info) && !info->nointerp) { - /* Always allow copy relocs for building executables. */ - asection *s = bfd_get_linker_section (dynobj, ".rela.bss"); + asection *s = bfd_get_linker_section (dynobj, ".interp"); if (s == NULL) - { - const struct elf_backend_data *bed = get_elf_backend_data (dynobj); - s = bfd_make_section_anyway_with_flags (dynobj, - ".rela.bss", - (bed->dynamic_sec_flags - | SEC_READONLY)); - if (s == NULL - || ! bfd_set_section_alignment (dynobj, s, - bed->s->log_file_align)) - return FALSE; - } - htab->srelbss = s; + abort (); + s->size = htab->dynamic_interpreter_size; + s->contents = (unsigned char *) htab->dynamic_interpreter; + htab->interp = s; } if (!info->no_ld_generated_unwind_info @@ -1142,9 +1207,21 @@ elf_x86_64_create_dynamic_sections (bfd *dynobj, htab->plt_eh_frame = bfd_make_section_anyway_with_flags (dynobj, ".eh_frame", flags); if (htab->plt_eh_frame == NULL - || !bfd_set_section_alignment (dynobj, htab->plt_eh_frame, 3)) + || !bfd_set_section_alignment (dynobj, htab->plt_eh_frame, + ABI_64_P (dynobj) ? 3 : 2)) return FALSE; } + + /* Align .got section to its entry size. */ + if (htab->elf.sgot != NULL + && !bfd_set_section_alignment (dynobj, htab->elf.sgot, 3)) + return FALSE; + + /* Align .got.plt section to its entry size. */ + if (htab->elf.sgotplt != NULL + && !bfd_set_section_alignment (dynobj, htab->elf.sgotplt, 3)) + return FALSE; + return TRUE; } @@ -1160,14 +1237,9 @@ elf_x86_64_copy_indirect_symbol (struct bfd_link_info *info, edir = (struct elf_x86_64_link_hash_entry *) dir; eind = (struct elf_x86_64_link_hash_entry *) ind; - if (!edir->has_bnd_reloc) - edir->has_bnd_reloc = eind->has_bnd_reloc; - - if (!edir->has_got_reloc) - edir->has_got_reloc = eind->has_got_reloc; - - if (!edir->has_non_got_reloc) - edir->has_non_got_reloc = eind->has_non_got_reloc; + edir->has_bnd_reloc |= eind->has_bnd_reloc; + edir->has_got_reloc |= eind->has_got_reloc; + edir->has_non_got_reloc |= eind->has_non_got_reloc; if (eind->dyn_relocs != NULL) { @@ -1214,7 +1286,8 @@ elf_x86_64_copy_indirect_symbol (struct bfd_link_info *info, /* If called to transfer flags for a weakdef during processing of elf_adjust_dynamic_symbol, don't copy non_got_ref. We clear it ourselves for ELIMINATE_COPY_RELOCS. */ - dir->ref_dynamic |= ind->ref_dynamic; + if (dir->versioned != versioned_hidden) + dir->ref_dynamic |= ind->ref_dynamic; dir->ref_regular |= ind->ref_regular; dir->ref_regular_nonweak |= ind->ref_regular_nonweak; dir->needs_plt |= ind->needs_plt; @@ -1268,22 +1341,8 @@ elf_x86_64_check_tls_transition (bfd *abfd, struct elf_link_hash_entry *h; bfd_vma offset; struct elf_x86_64_link_hash_table *htab; - - /* Get the section contents. */ - if (contents == NULL) - { - if (elf_section_data (sec)->this_hdr.contents != NULL) - contents = elf_section_data (sec)->this_hdr.contents; - else - { - /* FIXME: How to better handle error condition? */ - if (!bfd_malloc_and_get_section (abfd, sec, &contents)) - return FALSE; - - /* Cache the section contents for elf_link_input_bfd. */ - elf_section_data (sec)->this_hdr.contents = contents; - } - } + bfd_byte *call; + bfd_boolean indirect_call, tls_get_addr; htab = elf_x86_64_hash_table (info); offset = rel->r_offset; @@ -1298,32 +1357,61 @@ elf_x86_64_check_tls_transition (bfd *abfd, { /* Check transition from GD access model. For 64bit, only .byte 0x66; leaq foo@tlsgd(%rip), %rdi - .word 0x6666; rex64; call __tls_get_addr + .word 0x6666; rex64; call __tls_get_addr@PLT + or + .byte 0x66; leaq foo@tlsgd(%rip), %rdi + .byte 0x66; rex64 + call *__tls_get_addr@GOTPCREL(%rip) + which may be converted to + addr32 call __tls_get_addr can transit to different access model. For 32bit, only leaq foo@tlsgd(%rip), %rdi - .word 0x6666; rex64; call __tls_get_addr - can transit to different access model. For largepic + .word 0x6666; rex64; call __tls_get_addr@PLT + or + leaq foo@tlsgd(%rip), %rdi + .byte 0x66; rex64 + call *__tls_get_addr@GOTPCREL(%rip) + which may be converted to + addr32 call __tls_get_addr + can transit to different access model. For largepic, we also support: + leaq foo@tlsgd(%rip), %rdi + movabsq $__tls_get_addr@pltoff, %rax + addq $r15, %rax + call *%rax + or leaq foo@tlsgd(%rip), %rdi movabsq $__tls_get_addr@pltoff, %rax addq $rbx, %rax - call *%rax. */ + call *%rax */ - static const unsigned char call[] = { 0x66, 0x66, 0x48, 0xe8 }; static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d }; if ((offset + 12) > sec->size) return FALSE; - if (memcmp (contents + offset + 4, call, 4) != 0) + call = contents + offset + 4; + if (call[0] != 0x66 + || !((call[1] == 0x48 + && call[2] == 0xff + && call[3] == 0x15) + || (call[1] == 0x48 + && call[2] == 0x67 + && call[3] == 0xe8) + || (call[1] == 0x66 + && call[2] == 0x48 + && call[3] == 0xe8))) { if (!ABI_64_P (abfd) || (offset + 19) > sec->size || offset < 3 - || memcmp (contents + offset - 3, leaq + 1, 3) != 0 - || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0 - || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5) - != 0) + || memcmp (call - 7, leaq + 1, 3) != 0 + || memcmp (call, "\x48\xb8", 2) != 0 + || call[11] != 0x01 + || call[13] != 0xff + || call[14] != 0xd0 + || !((call[10] == 0x48 && call[12] == 0xd8) + || (call[10] == 0x4c && call[12] == 0xf8))) return FALSE; largepic = TRUE; } @@ -1339,18 +1427,29 @@ elf_x86_64_check_tls_transition (bfd *abfd, || memcmp (contents + offset - 3, leaq + 1, 3) != 0) return FALSE; } + indirect_call = call[2] == 0xff; } else { /* Check transition from LD access model. Only leaq foo@tlsld(%rip), %rdi; - call __tls_get_addr + call __tls_get_addr@PLT + or + leaq foo@tlsld(%rip), %rdi; + call *__tls_get_addr@GOTPCREL(%rip) + which may be converted to + addr32 call __tls_get_addr can transit to different access model. For largepic we also support: + leaq foo@tlsld(%rip), %rdi + movabsq $__tls_get_addr@pltoff, %rax + addq $r15, %rax + call *%rax + or leaq foo@tlsld(%rip), %rdi movabsq $__tls_get_addr@pltoff, %rax addq $rbx, %rax - call *%rax. */ + call *%rax */ static const unsigned char lea[] = { 0x48, 0x8d, 0x3d }; @@ -1360,33 +1459,60 @@ elf_x86_64_check_tls_transition (bfd *abfd, if (memcmp (contents + offset - 3, lea, 3) != 0) return FALSE; - if (0xe8 != *(contents + offset + 4)) + call = contents + offset + 4; + if (!(call[0] == 0xe8 + || (call[0] == 0xff && call[1] == 0x15) + || (call[0] == 0x67 && call[1] == 0xe8))) { if (!ABI_64_P (abfd) || (offset + 19) > sec->size - || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0 - || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5) - != 0) + || memcmp (call, "\x48\xb8", 2) != 0 + || call[11] != 0x01 + || call[13] != 0xff + || call[14] != 0xd0 + || !((call[10] == 0x48 && call[12] == 0xd8) + || (call[10] == 0x4c && call[12] == 0xf8))) return FALSE; largepic = TRUE; } + indirect_call = call[0] == 0xff; } r_symndx = htab->r_sym (rel[1].r_info); if (r_symndx < symtab_hdr->sh_info) return FALSE; + tls_get_addr = FALSE; h = sym_hashes[r_symndx - symtab_hdr->sh_info]; - /* Use strncmp to check __tls_get_addr since __tls_get_addr - may be versioned. */ - return (h != NULL - && h->root.root.string != NULL - && (largepic - ? ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLTOFF64 - : (ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PC32 - || ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLT32)) - && (strncmp (h->root.root.string, - "__tls_get_addr", 14) == 0)); + if (h != NULL && h->root.root.string != NULL) + { + struct elf_x86_64_link_hash_entry *eh + = (struct elf_x86_64_link_hash_entry *) h; + tls_get_addr = eh->tls_get_addr == 1; + if (eh->tls_get_addr > 1) + { + /* Use strncmp to check __tls_get_addr since + __tls_get_addr may be versioned. */ + if (strncmp (h->root.root.string, "__tls_get_addr", 14) + == 0) + { + eh->tls_get_addr = 1; + tls_get_addr = TRUE; + } + else + eh->tls_get_addr = 0; + } + } + + if (!tls_get_addr) + return FALSE; + else if (largepic) + return ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLTOFF64; + else if (indirect_call) + return ELF32_R_TYPE (rel[1].r_info) == R_X86_64_GOTPCRELX; + else + return (ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PC32 + || ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLT32); case R_X86_64_GOTTPOFF: /* Check transition from IE access model: @@ -1449,8 +1575,8 @@ elf_x86_64_check_tls_transition (bfd *abfd, if (offset + 2 <= sec->size) { /* Make sure that it's a call *x@tlsdesc(%rax). */ - static const unsigned char call[] = { 0xff, 0x10 }; - return memcmp (contents + offset, call, 2) == 0; + call = contents + offset; + return call[0] == 0xff && call[1] == 0x10; } return FALSE; @@ -1472,7 +1598,8 @@ elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd, const Elf_Internal_Rela *rel, const Elf_Internal_Rela *relend, struct elf_link_hash_entry *h, - unsigned long r_symndx) + unsigned long r_symndx, + bfd_boolean from_relocate_section) { unsigned int from_type = *r_type; unsigned int to_type = from_type; @@ -1498,10 +1625,9 @@ elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd, to_type = R_X86_64_GOTTPOFF; } - /* When we are called from elf_x86_64_relocate_section, - CONTENTS isn't NULL and there may be additional transitions - based on TLS_TYPE. */ - if (contents != NULL) + /* When we are called from elf_x86_64_relocate_section, there may + be additional transitions based on TLS_TYPE. */ + if (from_relocate_section) { unsigned int new_to_type = to_type; @@ -1572,7 +1698,8 @@ elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd, } } - (*_bfd_error_handler) + _bfd_error_handler + /* xgettext:c-format */ (_("%B: TLS transition from %s to %s against `%s' at 0x%lx " "in section `%A' failed"), abfd, sec, from->name, to->name, name, @@ -1631,14 +1758,431 @@ elf_x86_64_need_pic (bfd *input_bfd, asection *sec, pic = _("; recompile with -fPIC"); } - (*_bfd_error_handler) (_("%B: relocation %s against %s%s`%s' can " - "not be used when making a shared object%s"), - input_bfd, howto->name, und, v, name, pic); + /* xgettext:c-format */ + _bfd_error_handler (_("%B: relocation %s against %s%s`%s' can " + "not be used when making a shared object%s"), + input_bfd, howto->name, und, v, name, pic); bfd_set_error (bfd_error_bad_value); sec->check_relocs_failed = 1; return FALSE; } +/* With the local symbol, foo, we convert + mov foo@GOTPCREL(%rip), %reg + to + lea foo(%rip), %reg + and convert + call/jmp *foo@GOTPCREL(%rip) + to + nop call foo/jmp foo nop + When PIC is false, convert + test %reg, foo@GOTPCREL(%rip) + to + test $foo, %reg + and convert + binop foo@GOTPCREL(%rip), %reg + to + binop $foo, %reg + where binop is one of adc, add, and, cmp, or, sbb, sub, xor + instructions. */ + +static bfd_boolean +elf_x86_64_convert_load_reloc (bfd *abfd, asection *sec, + bfd_byte *contents, + Elf_Internal_Rela *irel, + struct elf_link_hash_entry *h, + bfd_boolean *converted, + struct bfd_link_info *link_info) +{ + struct elf_x86_64_link_hash_table *htab; + bfd_boolean is_pic; + bfd_boolean require_reloc_pc32; + bfd_boolean relocx; + bfd_boolean to_reloc_pc32; + asection *tsec; + char symtype; + bfd_signed_vma raddend; + unsigned int opcode; + unsigned int modrm; + unsigned int r_type = ELF32_R_TYPE (irel->r_info); + unsigned int r_symndx; + bfd_vma toff; + bfd_vma roff = irel->r_offset; + + if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2)) + return TRUE; + + raddend = irel->r_addend; + /* Addend for 32-bit PC-relative relocation must be -4. */ + if (raddend != -4) + return TRUE; + + htab = elf_x86_64_hash_table (link_info); + is_pic = bfd_link_pic (link_info); + + relocx = (r_type == R_X86_64_GOTPCRELX + || r_type == R_X86_64_REX_GOTPCRELX); + + /* TRUE if we can convert only to R_X86_64_PC32. Enable it for + --no-relax. */ + require_reloc_pc32 + = link_info->disable_target_specific_optimizations > 1; + + r_symndx = htab->r_sym (irel->r_info); + + opcode = bfd_get_8 (abfd, contents + roff - 2); + + /* Convert mov to lea since it has been done for a while. */ + if (opcode != 0x8b) + { + /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX + for call, jmp or one of adc, add, and, cmp, or, sbb, sub, + test, xor instructions. */ + if (!relocx) + return TRUE; + } + + /* We convert only to R_X86_64_PC32: + 1. Branch. + 2. R_X86_64_GOTPCREL since we can't modify REX byte. + 3. require_reloc_pc32 is true. + 4. PIC. + */ + to_reloc_pc32 = (opcode == 0xff + || !relocx + || require_reloc_pc32 + || is_pic); + + /* Get the symbol referred to by the reloc. */ + if (h == NULL) + { + Elf_Internal_Sym *isym + = bfd_sym_from_r_symndx (&htab->sym_cache, abfd, r_symndx); + + /* Skip relocation against undefined symbols. */ + if (isym->st_shndx == SHN_UNDEF) + return TRUE; + + symtype = ELF_ST_TYPE (isym->st_info); + + if (isym->st_shndx == SHN_ABS) + tsec = bfd_abs_section_ptr; + else if (isym->st_shndx == SHN_COMMON) + tsec = bfd_com_section_ptr; + else if (isym->st_shndx == SHN_X86_64_LCOMMON) + tsec = &_bfd_elf_large_com_section; + else + tsec = bfd_section_from_elf_index (abfd, isym->st_shndx); + + toff = isym->st_value; + } + else + { + /* Undefined weak symbol is only bound locally in executable + and its reference is resolved as 0 without relocation + overflow. We can only perform this optimization for + GOTPCRELX relocations since we need to modify REX byte. + It is OK convert mov with R_X86_64_GOTPCREL to + R_X86_64_PC32. */ + if ((relocx || opcode == 0x8b) + && UNDEFINED_WEAK_RESOLVED_TO_ZERO (link_info, + TRUE, + elf_x86_64_hash_entry (h))) + { + if (opcode == 0xff) + { + /* Skip for branch instructions since R_X86_64_PC32 + may overflow. */ + if (require_reloc_pc32) + return TRUE; + } + else if (relocx) + { + /* For non-branch instructions, we can convert to + R_X86_64_32/R_X86_64_32S since we know if there + is a REX byte. */ + to_reloc_pc32 = FALSE; + } + + /* Since we don't know the current PC when PIC is true, + we can't convert to R_X86_64_PC32. */ + if (to_reloc_pc32 && is_pic) + return TRUE; + + goto convert; + } + /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since + ld.so may use its link-time address. */ + else if ((h->def_regular + || h->root.type == bfd_link_hash_defined + || h->root.type == bfd_link_hash_defweak) + && h != htab->elf.hdynamic + && SYMBOL_REFERENCES_LOCAL (link_info, h)) + { + /* bfd_link_hash_new or bfd_link_hash_undefined is + set by an assignment in a linker script in + bfd_elf_record_link_assignment. */ + if (h->def_regular + && (h->root.type == bfd_link_hash_new + || h->root.type == bfd_link_hash_undefined + || ((h->root.type == bfd_link_hash_defined + || h->root.type == bfd_link_hash_defweak) + && h->root.u.def.section == bfd_und_section_ptr))) + { + /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */ + if (require_reloc_pc32) + return TRUE; + goto convert; + } + tsec = h->root.u.def.section; + toff = h->root.u.def.value; + symtype = h->type; + } + else + return TRUE; + } + + /* Don't convert GOTPCREL relocation against large section. */ + if (elf_section_data (tsec) != NULL + && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0) + return TRUE; + + /* We can only estimate relocation overflow for R_X86_64_PC32. */ + if (!to_reloc_pc32) + goto convert; + + if (tsec->sec_info_type == SEC_INFO_TYPE_MERGE) + { + /* At this stage in linking, no SEC_MERGE symbol has been + adjusted, so all references to such symbols need to be + passed through _bfd_merged_section_offset. (Later, in + relocate_section, all SEC_MERGE symbols *except* for + section symbols have been adjusted.) + + gas may reduce relocations against symbols in SEC_MERGE + sections to a relocation against the section symbol when + the original addend was zero. When the reloc is against + a section symbol we should include the addend in the + offset passed to _bfd_merged_section_offset, since the + location of interest is the original symbol. On the + other hand, an access to "sym+addend" where "sym" is not + a section symbol should not include the addend; Such an + access is presumed to be an offset from "sym"; The + location of interest is just "sym". */ + if (symtype == STT_SECTION) + toff += raddend; + + toff = _bfd_merged_section_offset (abfd, &tsec, + elf_section_data (tsec)->sec_info, + toff); + + if (symtype != STT_SECTION) + toff += raddend; + } + else + toff += raddend; + + /* Don't convert if R_X86_64_PC32 relocation overflows. */ + if (tsec->output_section == sec->output_section) + { + if ((toff - roff + 0x80000000) > 0xffffffff) + return TRUE; + } + else + { + bfd_signed_vma distance; + + /* At this point, we don't know the load addresses of TSEC + section nor SEC section. We estimate the distrance between + SEC and TSEC. We store the estimated distances in the + compressed_size field of the output section, which is only + used to decompress the compressed input section. */ + if (sec->output_section->compressed_size == 0) + { + asection *asect; + bfd_size_type size = 0; + for (asect = link_info->output_bfd->sections; + asect != NULL; + asect = asect->next) + /* Skip debug sections since compressed_size is used to + compress debug sections. */ + if ((asect->flags & SEC_DEBUGGING) == 0) + { + asection *i; + for (i = asect->map_head.s; + i != NULL; + i = i->map_head.s) + { + size = align_power (size, i->alignment_power); + size += i->size; + } + asect->compressed_size = size; + } + } + + /* Don't convert GOTPCREL relocations if TSEC isn't placed + after SEC. */ + distance = (tsec->output_section->compressed_size + - sec->output_section->compressed_size); + if (distance < 0) + return TRUE; + + /* Take PT_GNU_RELRO segment into account by adding + maxpagesize. */ + if ((toff + distance + get_elf_backend_data (abfd)->maxpagesize + - roff + 0x80000000) > 0xffffffff) + return TRUE; + } + +convert: + if (opcode == 0xff) + { + /* We have "call/jmp *foo@GOTPCREL(%rip)". */ + unsigned int nop; + unsigned int disp; + bfd_vma nop_offset; + + /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to + R_X86_64_PC32. */ + modrm = bfd_get_8 (abfd, contents + roff - 1); + if (modrm == 0x25) + { + /* Convert to "jmp foo nop". */ + modrm = 0xe9; + nop = NOP_OPCODE; + nop_offset = irel->r_offset + 3; + disp = bfd_get_32 (abfd, contents + irel->r_offset); + irel->r_offset -= 1; + bfd_put_32 (abfd, disp, contents + irel->r_offset); + } + else + { + struct elf_x86_64_link_hash_entry *eh + = (struct elf_x86_64_link_hash_entry *) h; + + /* Convert to "nop call foo". ADDR_PREFIX_OPCODE + is a nop prefix. */ + modrm = 0xe8; + /* To support TLS optimization, always use addr32 prefix for + "call *__tls_get_addr@GOTPCREL(%rip)". */ + if (eh && eh->tls_get_addr == 1) + { + nop = 0x67; + nop_offset = irel->r_offset - 2; + } + else + { + nop = link_info->call_nop_byte; + if (link_info->call_nop_as_suffix) + { + nop_offset = irel->r_offset + 3; + disp = bfd_get_32 (abfd, contents + irel->r_offset); + irel->r_offset -= 1; + bfd_put_32 (abfd, disp, contents + irel->r_offset); + } + else + nop_offset = irel->r_offset - 2; + } + } + bfd_put_8 (abfd, nop, contents + nop_offset); + bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1); + r_type = R_X86_64_PC32; + } + else + { + unsigned int rex; + unsigned int rex_mask = REX_R; + + if (r_type == R_X86_64_REX_GOTPCRELX) + rex = bfd_get_8 (abfd, contents + roff - 3); + else + rex = 0; + + if (opcode == 0x8b) + { + if (to_reloc_pc32) + { + /* Convert "mov foo@GOTPCREL(%rip), %reg" to + "lea foo(%rip), %reg". */ + opcode = 0x8d; + r_type = R_X86_64_PC32; + } + else + { + /* Convert "mov foo@GOTPCREL(%rip), %reg" to + "mov $foo, %reg". */ + opcode = 0xc7; + modrm = bfd_get_8 (abfd, contents + roff - 1); + modrm = 0xc0 | (modrm & 0x38) >> 3; + if ((rex & REX_W) != 0 + && ABI_64_P (link_info->output_bfd)) + { + /* Keep the REX_W bit in REX byte for LP64. */ + r_type = R_X86_64_32S; + goto rewrite_modrm_rex; + } + else + { + /* If the REX_W bit in REX byte isn't needed, + use R_X86_64_32 and clear the W bit to avoid + sign-extend imm32 to imm64. */ + r_type = R_X86_64_32; + /* Clear the W bit in REX byte. */ + rex_mask |= REX_W; + goto rewrite_modrm_rex; + } + } + } + else + { + /* R_X86_64_PC32 isn't supported. */ + if (to_reloc_pc32) + return TRUE; + + modrm = bfd_get_8 (abfd, contents + roff - 1); + if (opcode == 0x85) + { + /* Convert "test %reg, foo@GOTPCREL(%rip)" to + "test $foo, %reg". */ + modrm = 0xc0 | (modrm & 0x38) >> 3; + opcode = 0xf7; + } + else + { + /* Convert "binop foo@GOTPCREL(%rip), %reg" to + "binop $foo, %reg". */ + modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c); + opcode = 0x81; + } + + /* Use R_X86_64_32 with 32-bit operand to avoid relocation + overflow when sign-extending imm32 to imm64. */ + r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32; + +rewrite_modrm_rex: + bfd_put_8 (abfd, modrm, contents + roff - 1); + + if (rex) + { + /* Move the R bit to the B bit in REX byte. */ + rex = (rex & ~rex_mask) | (rex & REX_R) >> 2; + bfd_put_8 (abfd, rex, contents + roff - 3); + } + + /* No addend for R_X86_64_32/R_X86_64_32S relocations. */ + irel->r_addend = 0; + } + + bfd_put_8 (abfd, opcode, contents + roff - 2); + } + + irel->r_info = htab->r_info (r_symndx, r_type); + + *converted = TRUE; + + return TRUE; +} + /* Look through the relocs for a section during the first phase, and calculate needed space in the global offset table, procedure linkage table, and dynamic reloc sections. */ @@ -1654,16 +2198,38 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, const Elf_Internal_Rela *rel; const Elf_Internal_Rela *rel_end; asection *sreloc; + bfd_byte *contents; bfd_boolean use_plt_got; if (bfd_link_relocatable (info)) return TRUE; + /* Don't do anything special with non-loaded, non-alloced sections. + In particular, any relocs in such sections should not affect GOT + and PLT reference counting (ie. we don't allow them to create GOT + or PLT entries), there's no possibility or desire to optimize TLS + relocs, and there's not much point in propagating relocs to shared + libs that the dynamic linker won't relocate. */ + if ((sec->flags & SEC_ALLOC) == 0) + return TRUE; + BFD_ASSERT (is_x86_64_elf (abfd)); htab = elf_x86_64_hash_table (info); if (htab == NULL) - return FALSE; + { + sec->check_relocs_failed = 1; + return FALSE; + } + + /* Get the section contents. */ + if (elf_section_data (sec)->this_hdr.contents != NULL) + contents = elf_section_data (sec)->this_hdr.contents; + else if (!bfd_malloc_and_get_section (abfd, sec, &contents)) + { + sec->check_relocs_failed = 1; + return FALSE; + } use_plt_got = get_elf_x86_64_backend_data (abfd) == &elf_x86_64_arch_bed; @@ -1688,9 +2254,10 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr)) { - (*_bfd_error_handler) (_("%B: bad symbol index: %d"), - abfd, r_symndx); - return FALSE; + /* xgettext:c-format */ + _bfd_error_handler (_("%B: bad symbol index: %d"), + abfd, r_symndx); + goto error_return; } if (r_symndx < symtab_hdr->sh_info) @@ -1699,7 +2266,7 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, isym = bfd_sym_from_r_symndx (&htab->sym_cache, abfd, r_symndx); if (isym == NULL) - return FALSE; + goto error_return; /* Check relocation against local STT_GNU_IFUNC symbol. */ if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC) @@ -1707,7 +2274,7 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, h = elf_x86_64_get_local_sym_hash (htab, abfd, rel, TRUE); if (h == NULL) - return FALSE; + goto error_return; /* Fake a STT_GNU_IFUNC symbol. */ h->type = STT_GNU_IFUNC; @@ -1750,12 +2317,13 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, else name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL); - (*_bfd_error_handler) + _bfd_error_handler + /* xgettext:c-format */ (_("%B: relocation %s against symbol `%s' isn't " "supported in x32 mode"), abfd, x86_64_elf_howto_table[r_type].name, name); bfd_set_error (bfd_error_bad_value); - return FALSE; + goto error_return; } break; } @@ -1776,23 +2344,21 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, /* MPX PLT is supported only if elf_x86_64_arch_bed is used in 64-bit mode. */ if (ABI_64_P (abfd) - && info->bndplt - && (get_elf_x86_64_backend_data (abfd) - == &elf_x86_64_arch_bed)) + && info->bndplt + && (get_elf_x86_64_backend_data (abfd) + == &elf_x86_64_arch_bed)) { elf_x86_64_hash_entry (h)->has_bnd_reloc = 1; /* Create the second PLT for Intel MPX support. */ if (htab->plt_bnd == NULL) { - unsigned int plt_bnd_align; const struct elf_backend_data *bed; bed = get_elf_backend_data (info->output_bfd); BFD_ASSERT (sizeof (elf_x86_64_bnd_plt2_entry) == 8 && (sizeof (elf_x86_64_bnd_plt2_entry) == sizeof (elf_x86_64_legacy_plt2_entry))); - plt_bnd_align = 3; if (htab->elf.dynobj == NULL) htab->elf.dynobj = abfd; @@ -1807,10 +2373,28 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, if (htab->plt_bnd == NULL || !bfd_set_section_alignment (htab->elf.dynobj, htab->plt_bnd, - plt_bnd_align)) - return FALSE; + 3)) + goto error_return; + } + + if (!info->no_ld_generated_unwind_info + && htab->plt_bnd_eh_frame == NULL) + { + flagword flags = (SEC_ALLOC | SEC_LOAD | SEC_READONLY + | SEC_HAS_CONTENTS | SEC_IN_MEMORY + | SEC_LINKER_CREATED); + htab->plt_bnd_eh_frame + = bfd_make_section_anyway_with_flags (htab->elf.dynobj, + ".eh_frame", + flags); + if (htab->plt_bnd_eh_frame == NULL + || !bfd_set_section_alignment (htab->elf.dynobj, + htab->plt_bnd_eh_frame, + 3)) + goto error_return; } } + /* Fall through. */ case R_X86_64_32S: case R_X86_64_PC64: @@ -1824,7 +2408,7 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, if (h->type == STT_GNU_IFUNC && !_bfd_elf_create_ifunc_sections (htab->elf.dynobj, info)) - return FALSE; + goto error_return; break; } @@ -1837,11 +2421,11 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, |= elf_gnu_symbol_ifunc; } - if (! elf_x86_64_tls_transition (info, abfd, sec, NULL, + if (! elf_x86_64_tls_transition (info, abfd, sec, contents, symtab_hdr, sym_hashes, &r_type, GOT_UNKNOWN, - rel, rel_end, h, r_symndx)) - return FALSE; + rel, rel_end, h, r_symndx, FALSE)) + goto error_return; eh = (struct elf_x86_64_link_hash_entry *) h; switch (r_type) @@ -1908,7 +2492,7 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, local_got_refcounts = ((bfd_signed_vma *) bfd_zalloc (abfd, size)); if (local_got_refcounts == NULL) - return FALSE; + goto error_return; elf_local_got_refcounts (abfd) = local_got_refcounts; elf_x86_64_local_tlsdesc_gotent (abfd) = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info); @@ -1938,11 +2522,12 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, else name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL); - (*_bfd_error_handler) + _bfd_error_handler + /* xgettext:c-format */ (_("%B: '%s' accessed both as normal and thread local symbol"), abfd, name); bfd_set_error (bfd_error_bad_value); - return FALSE; + goto error_return; } } @@ -1968,7 +2553,7 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, htab->elf.dynobj = abfd; if (!_bfd_elf_create_got_section (htab->elf.dynobj, info)) - return FALSE; + goto error_return; } break; @@ -2009,6 +2594,7 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, case R_X86_64_32: if (!ABI_64_P (abfd)) goto pointer; + /* Fall through. */ case R_X86_64_8: case R_X86_64_16: case R_X86_64_32S: @@ -2022,8 +2608,7 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, && h != NULL && !h->def_regular && h->def_dynamic - && (sec->flags & SEC_READONLY) == 0)) - && (sec->flags & SEC_ALLOC) != 0) + && (sec->flags & SEC_READONLY) == 0))) return elf_x86_64_need_pic (abfd, sec, h, symtab_hdr, isym, &x86_64_elf_howto_table[r_type]); /* Fall through. */ @@ -2037,15 +2622,12 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, pointer: if (eh != NULL && (sec->flags & SEC_CODE) != 0) eh->has_non_got_reloc = 1; - /* STT_GNU_IFUNC symbol must go through PLT even if it is - locally defined and undefined symbol may turn out to be - a STT_GNU_IFUNC symbol later. */ + /* We are called after all symbols have been resolved. Only + relocation against STT_GNU_IFUNC symbol must go through + PLT. */ if (h != NULL && (bfd_link_executable (info) - || ((h->type == STT_GNU_IFUNC - || h->root.type == bfd_link_hash_undefweak - || h->root.type == bfd_link_hash_undefined) - && SYMBOLIC_BIND (info, h)))) + || h->type == STT_GNU_IFUNC)) { /* If this reloc is in a read-only section, we might need a copy reloc. We can't check reliably at this @@ -2055,9 +2637,13 @@ pointer: adjust_dynamic_symbol. */ h->non_got_ref = 1; - /* We may need a .plt entry if the function this reloc - refers to is in a shared lib. */ - h->plt.refcount += 1; + /* We may need a .plt entry if the symbol is a function + defined in a shared lib or is a STT_GNU_IFUNC function + referenced from the code or read-only section. */ + if (!h->def_regular + || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0) + h->plt.refcount += 1; + if (r_type == R_X86_64_PC32) { /* Since something like ".long foo - ." may be used @@ -2104,18 +2690,23 @@ do_size: If on the other hand, we are creating an executable, we may need to keep relocations for symbols satisfied by a dynamic library if we manage to avoid copy relocs for the - symbol. */ + symbol. + + Generate dynamic pointer relocation against STT_GNU_IFUNC + symbol in the non-code section. */ if ((bfd_link_pic (info) - && (sec->flags & SEC_ALLOC) != 0 && (! IS_X86_64_PCREL_TYPE (r_type) || (h != NULL && (! (bfd_link_pie (info) || SYMBOLIC_BIND (info, h)) || h->root.type == bfd_link_hash_defweak || !h->def_regular)))) + || (h != NULL + && h->type == STT_GNU_IFUNC + && r_type == htab->pointer_r_type + && (sec->flags & SEC_CODE) == 0) || (ELIMINATE_COPY_RELOCS && !bfd_link_pic (info) - && (sec->flags & SEC_ALLOC) != 0 && h != NULL && (h->root.type == bfd_link_hash_defweak || !h->def_regular))) @@ -2136,7 +2727,7 @@ do_size: abfd, /*rela?*/ TRUE); if (sreloc == NULL) - return FALSE; + goto error_return; } /* If this is a global symbol, we count the number of @@ -2154,7 +2745,7 @@ do_size: isym = bfd_sym_from_r_symndx (&htab->sym_cache, abfd, r_symndx); if (isym == NULL) - return FALSE; + goto error_return; s = bfd_section_from_elf_index (abfd, isym->st_shndx); if (s == NULL) @@ -2174,7 +2765,7 @@ do_size: p = ((struct elf_dyn_relocs *) bfd_alloc (htab->elf.dynobj, amt)); if (p == NULL) - return FALSE; + goto error_return; p->next = *head; *head = p; p->sec = sec; @@ -2193,7 +2784,7 @@ do_size: Reconstruct it for later use during GC. */ case R_X86_64_GNU_VTINHERIT: if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset)) - return FALSE; + goto error_return; break; /* This relocation describes which C++ vtable entries are actually @@ -2202,7 +2793,7 @@ do_size: BFD_ASSERT (h != NULL); if (h != NULL && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend)) - return FALSE; + goto error_return; break; default: @@ -2240,7 +2831,25 @@ do_size: || !bfd_set_section_alignment (htab->elf.dynobj, htab->plt_got, plt_got_align)) - return FALSE; + goto error_return; + + if (!info->no_ld_generated_unwind_info + && htab->plt_got_eh_frame == NULL + && get_elf_x86_64_backend_data (abfd)->eh_frame_plt_got != NULL) + { + flagword flags = (SEC_ALLOC | SEC_LOAD | SEC_READONLY + | SEC_HAS_CONTENTS | SEC_IN_MEMORY + | SEC_LINKER_CREATED); + htab->plt_got_eh_frame + = bfd_make_section_anyway_with_flags (htab->elf.dynobj, + ".eh_frame", + flags); + if (htab->plt_got_eh_frame == NULL + || !bfd_set_section_alignment (htab->elf.dynobj, + htab->plt_got_eh_frame, + ABI_64_P (htab->elf.dynobj) ? 3 : 2)) + goto error_return; + } } if ((r_type == R_X86_64_GOTPCREL @@ -2250,7 +2859,24 @@ do_size: sec->need_convert_load = 1; } + if (elf_section_data (sec)->this_hdr.contents != contents) + { + if (!info->keep_memory) + free (contents); + else + { + /* Cache the section contents for elf_link_input_bfd. */ + elf_section_data (sec)->this_hdr.contents = contents; + } + } + return TRUE; + +error_return: + if (elf_section_data (sec)->this_hdr.contents != contents) + free (contents); + sec->check_relocs_failed = 1; + return FALSE; } /* Return the section that should be marked against GC for a given @@ -2283,6 +2909,7 @@ elf_x86_64_fixup_symbol (struct bfd_link_info *info, { if (h->dynindx != -1 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, + elf_x86_64_hash_entry (h)->has_got_reloc, elf_x86_64_hash_entry (h))) { h->dynindx = -1; @@ -2303,7 +2930,7 @@ elf_x86_64_adjust_dynamic_symbol (struct bfd_link_info *info, struct elf_link_hash_entry *h) { struct elf_x86_64_link_hash_table *htab; - asection *s; + asection *s, *srel; struct elf_x86_64_link_hash_entry *eh; struct elf_dyn_relocs *p; @@ -2333,12 +2960,17 @@ elf_x86_64_adjust_dynamic_symbol (struct bfd_link_info *info, if (pc_count || count) { - h->needs_plt = 1; h->non_got_ref = 1; - if (h->plt.refcount <= 0) - h->plt.refcount = 1; - else - h->plt.refcount += 1; + if (pc_count) + { + /* Increment PLT reference count only for PC-relative + references. */ + h->needs_plt = 1; + if (h->plt.refcount <= 0) + h->plt.refcount = 1; + else + h->plt.refcount += 1; + } } } @@ -2456,16 +3088,24 @@ elf_x86_64_adjust_dynamic_symbol (struct bfd_link_info *info, /* We must generate a R_X86_64_COPY reloc to tell the dynamic linker to copy the initial value out of the dynamic object and into the runtime process image. */ + if ((h->root.u.def.section->flags & SEC_READONLY) != 0) + { + s = htab->elf.sdynrelro; + srel = htab->elf.sreldynrelro; + } + else + { + s = htab->elf.sdynbss; + srel = htab->elf.srelbss; + } if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0) { const struct elf_backend_data *bed; bed = get_elf_backend_data (info->output_bfd); - htab->srelbss->size += bed->s->sizeof_rela; + srel->size += bed->s->sizeof_rela; h->needs_copy = 1; } - s = htab->sdynbss; - return _bfd_elf_adjust_dynamic_copy (info, h, s); } @@ -2495,7 +3135,9 @@ elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf) bed = get_elf_backend_data (info->output_bfd); plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd); - resolved_to_zero = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh); + resolved_to_zero = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, + eh->has_got_reloc, + eh); /* We can't use the GOT PLT if pointer equality is needed since finish_dynamic_symbol won't clear symbol value and the dynamic @@ -2530,7 +3172,7 @@ elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf) &htab->readonly_dynrelocs_against_ifunc, plt_entry_size, plt_entry_size, - GOT_ENTRY_SIZE)) + GOT_ENTRY_SIZE, TRUE)) { asection *s = htab->plt_bnd; if (h->plt.offset != (bfd_vma) -1 && s != NULL) @@ -2907,6 +3549,7 @@ elf_x86_64_readonly_dynrelocs (struct elf_link_hash_entry *h, if ((info->warn_shared_textrel && bfd_link_pic (info)) || info->error_textrel) + /* xgettext:c-format */ info->callbacks->einfo (_("%P: %B: warning: relocation against `%s' in readonly section `%A'\n"), p->sec->owner, h->root.root.string, p->sec); @@ -2918,24 +3561,7 @@ elf_x86_64_readonly_dynrelocs (struct elf_link_hash_entry *h, return TRUE; } -/* With the local symbol, foo, we convert - mov foo@GOTPCREL(%rip), %reg - to - lea foo(%rip), %reg - and convert - call/jmp *foo@GOTPCREL(%rip) - to - nop call foo/jmp foo nop - When PIC is false, convert - test %reg, foo@GOTPCREL(%rip) - to - test $foo, %reg - and convert - binop foo@GOTPCREL(%rip), %reg - to - binop $foo, %reg - where binop is one of adc, add, and, cmp, or, sbb, sub, xor - instructions. */ +/* Convert load via the GOT slot to load immediate. */ static bfd_boolean elf_x86_64_convert_load (bfd *abfd, asection *sec, @@ -2946,12 +3572,8 @@ elf_x86_64_convert_load (bfd *abfd, asection *sec, Elf_Internal_Rela *irel, *irelend; bfd_byte *contents; struct elf_x86_64_link_hash_table *htab; - bfd_boolean changed_contents; - bfd_boolean changed_relocs; + bfd_boolean changed; bfd_signed_vma *local_got_refcounts; - bfd_vma maxpagesize; - bfd_boolean is_pic; - bfd_boolean require_reloc_pc32; /* Don't even try to convert non-ELF outputs. */ if (!is_elf_hash_table (link_info->hash)) @@ -2972,11 +3594,9 @@ elf_x86_64_convert_load (bfd *abfd, asection *sec, if (internal_relocs == NULL) return FALSE; + changed = FALSE; htab = elf_x86_64_hash_table (link_info); - changed_contents = FALSE; - changed_relocs = FALSE; local_got_refcounts = elf_local_got_refcounts (abfd); - maxpagesize = get_elf_backend_data (abfd)->maxpagesize; /* Get the section contents. */ if (elf_section_data (sec)->this_hdr.contents != NULL) @@ -2987,401 +3607,62 @@ elf_x86_64_convert_load (bfd *abfd, asection *sec, goto error_return; } - is_pic = bfd_link_pic (link_info); - - /* TRUE if we can convert only to R_X86_64_PC32. Enable it for - --no-relax. */ - require_reloc_pc32 - = link_info->disable_target_specific_optimizations > 1; - irelend = internal_relocs + sec->reloc_count; for (irel = internal_relocs; irel < irelend; irel++) { unsigned int r_type = ELF32_R_TYPE (irel->r_info); - unsigned int r_symndx = htab->r_sym (irel->r_info); - unsigned int indx; + unsigned int r_symndx; struct elf_link_hash_entry *h; - asection *tsec; - char symtype; - bfd_vma toff, roff; - bfd_signed_vma raddend; - unsigned int opcode; - unsigned int modrm; - bfd_boolean relocx; - bfd_boolean to_reloc_pc32; - - relocx = (r_type == R_X86_64_GOTPCRELX - || r_type == R_X86_64_REX_GOTPCRELX); - if (!relocx && r_type != R_X86_64_GOTPCREL) - continue; - - roff = irel->r_offset; - if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2)) - continue; + bfd_boolean converted; - raddend = irel->r_addend; - /* Addend for 32-bit PC-relative relocation must be -4. */ - if (raddend != -4) + if (r_type != R_X86_64_GOTPCRELX + && r_type != R_X86_64_REX_GOTPCRELX + && r_type != R_X86_64_GOTPCREL) continue; - opcode = bfd_get_8 (abfd, contents + roff - 2); - - /* Convert mov to lea since it has been done for a while. */ - if (opcode != 0x8b) - { - /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX - for call, jmp or one of adc, add, and, cmp, or, sbb, sub, - test, xor instructions. */ - if (!relocx) - continue; - } - - /* We convert only to R_X86_64_PC32: - 1. Branch. - 2. R_X86_64_GOTPCREL since we can't modify REX byte. - 3. require_reloc_pc32 is true. - 4. PIC. - */ - to_reloc_pc32 = (opcode == 0xff - || !relocx - || require_reloc_pc32 - || is_pic); - - /* Get the symbol referred to by the reloc. */ + r_symndx = htab->r_sym (irel->r_info); if (r_symndx < symtab_hdr->sh_info) - { - Elf_Internal_Sym *isym; - - isym = bfd_sym_from_r_symndx (&htab->sym_cache, - abfd, r_symndx); - - symtype = ELF_ST_TYPE (isym->st_info); - - /* STT_GNU_IFUNC must keep GOTPCREL relocations and skip - relocation against undefined symbols. */ - if (symtype == STT_GNU_IFUNC || isym->st_shndx == SHN_UNDEF) - continue; - - if (isym->st_shndx == SHN_ABS) - tsec = bfd_abs_section_ptr; - else if (isym->st_shndx == SHN_COMMON) - tsec = bfd_com_section_ptr; - else if (isym->st_shndx == SHN_X86_64_LCOMMON) - tsec = &_bfd_elf_large_com_section; - else - tsec = bfd_section_from_elf_index (abfd, isym->st_shndx); - - h = NULL; - toff = isym->st_value; - } + h = elf_x86_64_get_local_sym_hash (htab, sec->owner, + (const Elf_Internal_Rela *) irel, + FALSE); else { - indx = r_symndx - symtab_hdr->sh_info; - h = elf_sym_hashes (abfd)[indx]; - BFD_ASSERT (h != NULL); - + h = elf_sym_hashes (abfd)[r_symndx - symtab_hdr->sh_info]; while (h->root.type == bfd_link_hash_indirect || h->root.type == bfd_link_hash_warning) h = (struct elf_link_hash_entry *) h->root.u.i.link; - - /* STT_GNU_IFUNC must keep GOTPCREL relocations. We also - avoid optimizing GOTPCREL relocations againt _DYNAMIC - since ld.so may use its link-time address. */ - if (h->type == STT_GNU_IFUNC) - continue; - - /* Undefined weak symbol is only bound locally in executable - and its reference is resolved as 0 without relocation - overflow. We can only perform this optimization for - GOTPCRELX relocations since we need to modify REX byte. - It is OK convert mov with R_X86_64_GOTPCREL to - R_X86_64_PC32. */ - if ((relocx || opcode == 0x8b) - && UNDEFINED_WEAK_RESOLVED_TO_ZERO (link_info, - elf_x86_64_hash_entry (h))) - { - if (opcode == 0xff) - { - /* Skip for branch instructions since R_X86_64_PC32 - may overflow. */ - if (require_reloc_pc32) - continue; - } - else if (relocx) - { - /* For non-branch instructions, we can convert to - R_X86_64_32/R_X86_64_32S since we know if there - is a REX byte. */ - to_reloc_pc32 = FALSE; - } - - /* Since we don't know the current PC when PIC is true, - we can't convert to R_X86_64_PC32. */ - if (to_reloc_pc32 && is_pic) - continue; - - goto convert; - } - else if ((h->def_regular - || h->root.type == bfd_link_hash_defined - || h->root.type == bfd_link_hash_defweak) - && h != htab->elf.hdynamic - && SYMBOL_REFERENCES_LOCAL (link_info, h)) - { - /* bfd_link_hash_new or bfd_link_hash_undefined is - set by an assignment in a linker script in - bfd_elf_record_link_assignment. */ - if (h->def_regular - && (h->root.type == bfd_link_hash_new - || h->root.type == bfd_link_hash_undefined)) - { - /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */ - if (require_reloc_pc32) - continue; - goto convert; - } - tsec = h->root.u.def.section; - toff = h->root.u.def.value; - symtype = h->type; - } - else - continue; - } - - /* We can only estimate relocation overflow for R_X86_64_PC32. */ - if (!to_reloc_pc32) - goto convert; - - if (tsec->sec_info_type == SEC_INFO_TYPE_MERGE) - { - /* At this stage in linking, no SEC_MERGE symbol has been - adjusted, so all references to such symbols need to be - passed through _bfd_merged_section_offset. (Later, in - relocate_section, all SEC_MERGE symbols *except* for - section symbols have been adjusted.) - - gas may reduce relocations against symbols in SEC_MERGE - sections to a relocation against the section symbol when - the original addend was zero. When the reloc is against - a section symbol we should include the addend in the - offset passed to _bfd_merged_section_offset, since the - location of interest is the original symbol. On the - other hand, an access to "sym+addend" where "sym" is not - a section symbol should not include the addend; Such an - access is presumed to be an offset from "sym"; The - location of interest is just "sym". */ - if (symtype == STT_SECTION) - toff += raddend; - - toff = _bfd_merged_section_offset (abfd, &tsec, - elf_section_data (tsec)->sec_info, - toff); - - if (symtype != STT_SECTION) - toff += raddend; } - else - toff += raddend; - - /* Don't convert if R_X86_64_PC32 relocation overflows. */ - if (tsec->output_section == sec->output_section) - { - if ((toff - roff + 0x80000000) > 0xffffffff) - continue; - } - else - { - bfd_signed_vma distance; - - /* At this point, we don't know the load addresses of TSEC - section nor SEC section. We estimate the distrance between - SEC and TSEC. We store the estimated distances in the - compressed_size field of the output section, which is only - used to decompress the compressed input section. */ - if (sec->output_section->compressed_size == 0) - { - asection *asect; - bfd_size_type size = 0; - for (asect = link_info->output_bfd->sections; - asect != NULL; - asect = asect->next) - { - asection *i; - for (i = asect->map_head.s; - i != NULL; - i = i->map_head.s) - { - size = align_power (size, i->alignment_power); - size += i->size; - } - asect->compressed_size = size; - } - } - - /* Don't convert GOTPCREL relocations if TSEC isn't placed - after SEC. */ - distance = (tsec->output_section->compressed_size - - sec->output_section->compressed_size); - if (distance < 0) - continue; - /* Take PT_GNU_RELRO segment into account by adding - maxpagesize. */ - if ((toff + distance + maxpagesize - roff + 0x80000000) - > 0xffffffff) - continue; - } + /* STT_GNU_IFUNC must keep GOTPCREL relocations. */ + if (h != NULL && h->type == STT_GNU_IFUNC) + continue; -convert: - if (opcode == 0xff) - { - /* We have "call/jmp *foo@GOTPCREL(%rip)". */ - unsigned int nop; - unsigned int disp; - bfd_vma nop_offset; + converted = FALSE; + if (!elf_x86_64_convert_load_reloc (abfd, sec, contents, irel, h, + &converted, link_info)) + goto error_return; - /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to - R_X86_64_PC32. */ - modrm = bfd_get_8 (abfd, contents + roff - 1); - if (modrm == 0x25) - { - /* Convert to "jmp foo nop". */ - modrm = 0xe9; - nop = NOP_OPCODE; - nop_offset = irel->r_offset + 3; - disp = bfd_get_32 (abfd, contents + irel->r_offset); - irel->r_offset -= 1; - bfd_put_32 (abfd, disp, contents + irel->r_offset); - } - else - { - /* Convert to "nop call foo". ADDR_PREFIX_OPCODE - is a nop prefix. */ - modrm = 0xe8; - nop = link_info->call_nop_byte; - if (link_info->call_nop_as_suffix) - { - nop_offset = irel->r_offset + 3; - disp = bfd_get_32 (abfd, contents + irel->r_offset); - irel->r_offset -= 1; - bfd_put_32 (abfd, disp, contents + irel->r_offset); - } - else - nop_offset = irel->r_offset - 2; - } - bfd_put_8 (abfd, nop, contents + nop_offset); - bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1); - r_type = R_X86_64_PC32; - } - else + if (converted) { - unsigned int rex; - unsigned int rex_mask = REX_R; - - if (r_type == R_X86_64_REX_GOTPCRELX) - rex = bfd_get_8 (abfd, contents + roff - 3); - else - rex = 0; - - if (opcode == 0x8b) + changed = converted; + if (h) { - if (to_reloc_pc32) - { - /* Convert "mov foo@GOTPCREL(%rip), %reg" to - "lea foo(%rip), %reg". */ - opcode = 0x8d; - r_type = R_X86_64_PC32; - } - else - { - /* Convert "mov foo@GOTPCREL(%rip), %reg" to - "mov $foo, %reg". */ - opcode = 0xc7; - modrm = bfd_get_8 (abfd, contents + roff - 1); - modrm = 0xc0 | (modrm & 0x38) >> 3; - if ((rex & REX_W) != 0 - && ABI_64_P (link_info->output_bfd)) - { - /* Keep the REX_W bit in REX byte for LP64. */ - r_type = R_X86_64_32S; - goto rewrite_modrm_rex; - } - else - { - /* If the REX_W bit in REX byte isn't needed, - use R_X86_64_32 and clear the W bit to avoid - sign-extend imm32 to imm64. */ - r_type = R_X86_64_32; - /* Clear the W bit in REX byte. */ - rex_mask |= REX_W; - goto rewrite_modrm_rex; - } - } + if (h->got.refcount > 0) + h->got.refcount -= 1; } else { - /* R_X86_64_PC32 isn't supported. */ - if (to_reloc_pc32) - continue; - - modrm = bfd_get_8 (abfd, contents + roff - 1); - if (opcode == 0x85) - { - /* Convert "test %reg, foo@GOTPCREL(%rip)" to - "test $foo, %reg". */ - modrm = 0xc0 | (modrm & 0x38) >> 3; - opcode = 0xf7; - } - else - { - /* Convert "binop foo@GOTPCREL(%rip), %reg" to - "binop $foo, %reg". */ - modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c); - opcode = 0x81; - } - - /* Use R_X86_64_32 with 32-bit operand to avoid relocation - overflow when sign-extending imm32 to imm64. */ - r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32; - -rewrite_modrm_rex: - bfd_put_8 (abfd, modrm, contents + roff - 1); - - if (rex) - { - /* Move the R bit to the B bit in REX byte. */ - rex = (rex & ~rex_mask) | (rex & REX_R) >> 2; - bfd_put_8 (abfd, rex, contents + roff - 3); - } - - /* No addend for R_X86_64_32/R_X86_64_32S relocations. */ - irel->r_addend = 0; + if (local_got_refcounts != NULL + && local_got_refcounts[r_symndx] > 0) + local_got_refcounts[r_symndx] -= 1; } - - bfd_put_8 (abfd, opcode, contents + roff - 2); - } - - irel->r_info = htab->r_info (r_symndx, r_type); - changed_contents = TRUE; - changed_relocs = TRUE; - - if (h) - { - if (h->got.refcount > 0) - h->got.refcount -= 1; - } - else - { - if (local_got_refcounts != NULL - && local_got_refcounts[r_symndx] > 0) - local_got_refcounts[r_symndx] -= 1; } } if (contents != NULL && elf_section_data (sec)->this_hdr.contents != contents) { - if (!changed_contents && !link_info->keep_memory) + if (!changed && !link_info->keep_memory) free (contents); else { @@ -3392,7 +3673,7 @@ rewrite_modrm_rex: if (elf_section_data (sec)->relocs != internal_relocs) { - if (!changed_relocs) + if (!changed) free (internal_relocs); else elf_section_data (sec)->relocs = internal_relocs; @@ -3422,6 +3703,7 @@ elf_x86_64_size_dynamic_sections (bfd *output_bfd, bfd_boolean relocs; bfd *ibfd; const struct elf_backend_data *bed; + const struct elf_x86_64_backend_data *arch_data; htab = elf_x86_64_hash_table (info); if (htab == NULL) @@ -3432,20 +3714,6 @@ elf_x86_64_size_dynamic_sections (bfd *output_bfd, if (dynobj == NULL) abort (); - if (htab->elf.dynamic_sections_created) - { - /* Set the contents of the .interp section to the interpreter. */ - if (bfd_link_executable (info) && !info->nointerp) - { - s = bfd_get_linker_section (dynobj, ".interp"); - if (s == NULL) - abort (); - s->size = htab->dynamic_interpreter_size; - s->contents = (unsigned char *) htab->dynamic_interpreter; - htab->interp = s; - } - } - /* Set up .got offsets for local syms, and space for local dynamic relocs. */ for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next) @@ -3491,6 +3759,7 @@ elf_x86_64_size_dynamic_sections (bfd *output_bfd, info->flags |= DF_TEXTREL; if ((info->warn_shared_textrel && bfd_link_pic (info)) || info->error_textrel) + /* xgettext:c-format */ info->callbacks->einfo (_("%P: %B: warning: relocation in readonly section `%A'\n"), p->sec->owner, p->sec); } @@ -3626,15 +3895,31 @@ elf_x86_64_size_dynamic_sections (bfd *output_bfd, htab->elf.sgotplt->size = 0; } - if (htab->plt_eh_frame != NULL - && htab->elf.splt != NULL - && htab->elf.splt->size != 0 - && !bfd_is_abs_section (htab->elf.splt->output_section) - && _bfd_elf_eh_frame_present (info)) + arch_data = (htab->plt_bnd != NULL + ? &elf_x86_64_bnd_arch_bed + : get_elf_x86_64_arch_data (bed)); + + if (_bfd_elf_eh_frame_present (info)) { - const struct elf_x86_64_backend_data *arch_data - = get_elf_x86_64_arch_data (bed); - htab->plt_eh_frame->size = arch_data->eh_frame_plt_size; + if (htab->plt_eh_frame != NULL + && htab->elf.splt != NULL + && htab->elf.splt->size != 0 + && !bfd_is_abs_section (htab->elf.splt->output_section)) + htab->plt_eh_frame->size = arch_data->eh_frame_plt_size; + + if (htab->plt_got_eh_frame != NULL + && htab->plt_got != NULL + && htab->plt_got->size != 0 + && !bfd_is_abs_section (htab->plt_got->output_section)) + htab->plt_got_eh_frame->size = arch_data->eh_frame_plt_got_size; + + /* Unwind info for .plt.bnd and .plt.got sections are + identical. */ + if (htab->plt_bnd_eh_frame != NULL + && htab->plt_bnd != NULL + && htab->plt_bnd->size != 0 + && !bfd_is_abs_section (htab->plt_bnd->output_section)) + htab->plt_bnd_eh_frame->size = arch_data->eh_frame_plt_got_size; } /* We now have determined the sizes of the various dynamic sections. @@ -3653,7 +3938,10 @@ elf_x86_64_size_dynamic_sections (bfd *output_bfd, || s == htab->plt_bnd || s == htab->plt_got || s == htab->plt_eh_frame - || s == htab->sdynbss) + || s == htab->plt_got_eh_frame + || s == htab->plt_bnd_eh_frame + || s == htab->elf.sdynbss + || s == htab->elf.sdynrelro) { /* Strip this section if we don't need it; see the comment below. */ @@ -3706,15 +3994,34 @@ elf_x86_64_size_dynamic_sections (bfd *output_bfd, if (htab->plt_eh_frame != NULL && htab->plt_eh_frame->contents != NULL) { - const struct elf_x86_64_backend_data *arch_data - = get_elf_x86_64_arch_data (bed); - memcpy (htab->plt_eh_frame->contents, arch_data->eh_frame_plt, htab->plt_eh_frame->size); bfd_put_32 (dynobj, htab->elf.splt->size, htab->plt_eh_frame->contents + PLT_FDE_LEN_OFFSET); } + if (htab->plt_got_eh_frame != NULL + && htab->plt_got_eh_frame->contents != NULL) + { + memcpy (htab->plt_got_eh_frame->contents, + arch_data->eh_frame_plt_got, + htab->plt_got_eh_frame->size); + bfd_put_32 (dynobj, htab->plt_got->size, + (htab->plt_got_eh_frame->contents + + PLT_FDE_LEN_OFFSET)); + } + + if (htab->plt_bnd_eh_frame != NULL + && htab->plt_bnd_eh_frame->contents != NULL) + { + memcpy (htab->plt_bnd_eh_frame->contents, + arch_data->eh_frame_plt_got, + htab->plt_bnd_eh_frame->size); + bfd_put_32 (dynobj, htab->plt_bnd->size, + (htab->plt_bnd_eh_frame->contents + + PLT_FDE_LEN_OFFSET)); + } + if (htab->elf.dynamic_sections_created) { /* Add some entries to the .dynamic section. We fill in the @@ -3974,7 +4281,8 @@ elf_x86_64_relocate_section (bfd *output_bfd, if (r_type >= (int) R_X86_64_standard) { - (*_bfd_error_handler) + _bfd_error_handler + /* xgettext:c-format */ (_("%B: unrecognized relocation (0x%x) in section `%A'"), input_bfd, input_section, r_type); bfd_set_error (bfd_error_bad_value); @@ -4090,8 +4398,84 @@ elf_x86_64_relocate_section (bfd *output_bfd, continue; abort (); } - else if (h->plt.offset == (bfd_vma) -1) - abort (); + + switch (r_type) + { + default: + break; + + case R_X86_64_GOTPCREL: + case R_X86_64_GOTPCRELX: + case R_X86_64_REX_GOTPCRELX: + case R_X86_64_GOTPCREL64: + base_got = htab->elf.sgot; + off = h->got.offset; + + if (base_got == NULL) + abort (); + + if (off == (bfd_vma) -1) + { + /* We can't use h->got.offset here to save state, or + even just remember the offset, as finish_dynamic_symbol + would use that as offset into .got. */ + + if (h->plt.offset == (bfd_vma) -1) + abort (); + + if (htab->elf.splt != NULL) + { + plt_index = h->plt.offset / plt_entry_size - 1; + off = (plt_index + 3) * GOT_ENTRY_SIZE; + base_got = htab->elf.sgotplt; + } + else + { + plt_index = h->plt.offset / plt_entry_size; + off = plt_index * GOT_ENTRY_SIZE; + base_got = htab->elf.igotplt; + } + + if (h->dynindx == -1 + || h->forced_local + || info->symbolic) + { + /* This references the local defitionion. We must + initialize this entry in the global offset table. + Since the offset must always be a multiple of 8, + we use the least significant bit to record + whether we have initialized it already. + + When doing a dynamic link, we create a .rela.got + relocation entry to initialize the value. This + is done in the finish_dynamic_symbol routine. */ + if ((off & 1) != 0) + off &= ~1; + else + { + bfd_put_64 (output_bfd, relocation, + base_got->contents + off); + /* Note that this is harmless for the GOTPLT64 + case, as -1 | 1 still is -1. */ + h->got.offset |= 1; + } + } + } + + relocation = (base_got->output_section->vma + + base_got->output_offset + off); + + goto do_relocation; + } + + if (h->plt.offset == (bfd_vma) -1) + { + /* Handle static pointers of STT_GNU_IFUNC symbols. */ + if (r_type == htab->pointer_r_type + && (input_section->flags & SEC_CODE) == 0) + goto do_ifunc_pointer; + goto bad_ifunc_reloc; + } /* STT_GNU_IFUNC symbol must go through PLT. */ if (htab->elf.splt != NULL) @@ -4119,15 +4503,17 @@ elf_x86_64_relocate_section (bfd *output_bfd, switch (r_type) { default: +bad_ifunc_reloc: if (h->root.root.string) name = h->root.root.string; else name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, NULL); - (*_bfd_error_handler) + _bfd_error_handler + /* xgettext:c-format */ (_("%B: relocation %s against STT_GNU_IFUNC " - "symbol `%s' isn't handled by %s"), input_bfd, - howto->name, name, __FUNCTION__); + "symbol `%s' isn't supported"), input_bfd, + howto->name, name); bfd_set_error (bfd_error_bad_value); return FALSE; @@ -4141,6 +4527,7 @@ elf_x86_64_relocate_section (bfd *output_bfd, goto do_relocation; /* FALLTHROUGH */ case R_X86_64_64: +do_ifunc_pointer: if (rel->r_addend != 0) { if (h->root.root.string) @@ -4148,7 +4535,8 @@ elf_x86_64_relocate_section (bfd *output_bfd, else name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, NULL); - (*_bfd_error_handler) + _bfd_error_handler + /* xgettext:c-format */ (_("%B: relocation %s against STT_GNU_IFUNC " "symbol `%s' has non-zero addend: %d"), input_bfd, howto->name, name, rel->r_addend); @@ -4157,8 +4545,10 @@ elf_x86_64_relocate_section (bfd *output_bfd, } /* Generate dynamic relcoation only when there is a - non-GOT reference in a shared object. */ - if (bfd_link_pic (info) && h->non_got_ref) + non-GOT reference in a shared object or there is no + PLT. */ + if ((bfd_link_pic (info) && h->non_got_ref) + || h->plt.offset == (bfd_vma) -1) { Elf_Internal_Rela outrel; asection *sreloc; @@ -4192,7 +4582,16 @@ elf_x86_64_relocate_section (bfd *output_bfd, outrel.r_addend = 0; } - sreloc = htab->elf.irelifunc; + /* Dynamic relocations are stored in + 1. .rela.ifunc section in PIC object. + 2. .rela.got section in dynamic executable. + 3. .rela.iplt section in static executable. */ + if (bfd_link_pic (info)) + sreloc = htab->elf.irelifunc; + else if (htab->elf.splt != NULL) + sreloc = htab->elf.srelgot; + else + sreloc = htab->elf.irelplt; elf_append_rela (output_bfd, sreloc, &outrel); /* If this reloc is against an external symbol, we @@ -4209,71 +4608,13 @@ elf_x86_64_relocate_section (bfd *output_bfd, case R_X86_64_PLT32: case R_X86_64_PLT32_BND: goto do_relocation; - - case R_X86_64_GOTPCREL: - case R_X86_64_GOTPCRELX: - case R_X86_64_REX_GOTPCRELX: - case R_X86_64_GOTPCREL64: - base_got = htab->elf.sgot; - off = h->got.offset; - - if (base_got == NULL) - abort (); - - if (off == (bfd_vma) -1) - { - /* We can't use h->got.offset here to save state, or - even just remember the offset, as finish_dynamic_symbol - would use that as offset into .got. */ - - if (htab->elf.splt != NULL) - { - plt_index = h->plt.offset / plt_entry_size - 1; - off = (plt_index + 3) * GOT_ENTRY_SIZE; - base_got = htab->elf.sgotplt; - } - else - { - plt_index = h->plt.offset / plt_entry_size; - off = plt_index * GOT_ENTRY_SIZE; - base_got = htab->elf.igotplt; - } - - if (h->dynindx == -1 - || h->forced_local - || info->symbolic) - { - /* This references the local defitionion. We must - initialize this entry in the global offset table. - Since the offset must always be a multiple of 8, - we use the least significant bit to record - whether we have initialized it already. - - When doing a dynamic link, we create a .rela.got - relocation entry to initialize the value. This - is done in the finish_dynamic_symbol routine. */ - if ((off & 1) != 0) - off &= ~1; - else - { - bfd_put_64 (output_bfd, relocation, - base_got->contents + off); - /* Note that this is harmless for the GOTPLT64 - case, as -1 | 1 still is -1. */ - h->got.offset |= 1; - } - } - } - - relocation = (base_got->output_section->vma - + base_got->output_offset + off); - - goto do_relocation; } } resolved_to_zero = (eh != NULL - && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh)); + && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, + eh->has_got_reloc, + eh)); /* When generating a shared object, the relocations handled here are copied into the output file to be resolved at run time. */ @@ -4430,7 +4771,8 @@ elf_x86_64_relocate_section (bfd *output_bfd, break; } - (*_bfd_error_handler) + _bfd_error_handler + /* xgettext:c-format */ (_("%B: relocation R_X86_64_GOTOFF64 against undefined %s `%s' can not be used when making a shared object"), input_bfd, v, h->root.root.string); bfd_set_error (bfd_error_bad_value); @@ -4442,7 +4784,8 @@ elf_x86_64_relocate_section (bfd *output_bfd, || h->type == STT_OBJECT) && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED) { - (*_bfd_error_handler) + _bfd_error_handler + /* xgettext:c-format */ (_("%B: relocation R_X86_64_GOTOFF64 against protected %s `%s' can not be used when making a shared object"), input_bfd, h->type == STT_FUNC ? "function" : "data", @@ -4474,10 +4817,17 @@ elf_x86_64_relocate_section (bfd *output_bfd, symbols it's the symbol itself relative to GOT. */ if (h != NULL /* See PLT32 handling. */ - && h->plt.offset != (bfd_vma) -1 + && (h->plt.offset != (bfd_vma) -1 + || eh->plt_got.offset != (bfd_vma) -1) && htab->elf.splt != NULL) { - if (htab->plt_bnd != NULL) + if (eh->plt_got.offset != (bfd_vma) -1) + { + /* Use the GOT PLT. */ + resolved_plt = htab->plt_got; + plt_offset = eh->plt_got.offset; + } + else if (htab->plt_bnd != NULL) { resolved_plt = htab->plt_bnd; plt_offset = eh->plt_bnd.offset; @@ -4576,7 +4926,8 @@ elf_x86_64_relocate_section (bfd *output_bfd, { /* Symbol is referenced locally. Make sure it is defined locally or for a branch. */ - fail = !h->def_regular && !branch; + fail = (!(h->def_regular || ELF_COMMON_DEF_P (h)) + && !branch); } else if (!(bfd_link_pie (info) && (h->needs_copy || eh->needs_copy))) @@ -4617,7 +4968,9 @@ direct: && (h->needs_copy || eh->needs_copy || h->root.type == bfd_link_hash_undefined) - && IS_X86_64_PCREL_TYPE (r_type)) + && (IS_X86_64_PCREL_TYPE (r_type) + || r_type == R_X86_64_SIZE32 + || r_type == R_X86_64_SIZE64)) && (h == NULL || ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT && !resolved_to_zero) @@ -4707,7 +5060,8 @@ direct: name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, NULL); if (addend < 0) - (*_bfd_error_handler) + _bfd_error_handler + /* xgettext:c-format */ (_("%B: addend -0x%x in relocation %s against " "symbol `%s' at 0x%lx in section `%A' is " "out of range"), @@ -4715,7 +5069,8 @@ direct: howto->name, name, (unsigned long) rel->r_offset); else - (*_bfd_error_handler) + _bfd_error_handler + /* xgettext:c-format */ (_("%B: addend 0x%x in relocation %s against " "symbol `%s' at 0x%lx in section `%A' is " "out of range"), @@ -4795,7 +5150,7 @@ direct: input_section, contents, symtab_hdr, sym_hashes, &r_type, tls_type, rel, - relend, h, r_symndx)) + relend, h, r_symndx, TRUE)) return FALSE; if (r_type == R_X86_64_TPOFF32) @@ -4807,39 +5162,53 @@ direct: if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD) { /* GD->LE transition. For 64bit, change - .byte 0x66; leaq foo@tlsgd(%rip), %rdi - .word 0x6666; rex64; call __tls_get_addr + .byte 0x66; leaq foo@tlsgd(%rip), %rdi + .word 0x6666; rex64; call __tls_get_addr@PLT + or + .byte 0x66; leaq foo@tlsgd(%rip), %rdi + .byte 0x66; rex64 + call *__tls_get_addr@GOTPCREL(%rip) + which may be converted to + addr32 call __tls_get_addr into: - movq %fs:0, %rax - leaq foo@tpoff(%rax), %rax + movq %fs:0, %rax + leaq foo@tpoff(%rax), %rax For 32bit, change - leaq foo@tlsgd(%rip), %rdi - .word 0x6666; rex64; call __tls_get_addr + leaq foo@tlsgd(%rip), %rdi + .word 0x6666; rex64; call __tls_get_addr@PLT + or + leaq foo@tlsgd(%rip), %rdi + .byte 0x66; rex64 + call *__tls_get_addr@GOTPCREL(%rip) + which may be converted to + addr32 call __tls_get_addr into: - movl %fs:0, %eax - leaq foo@tpoff(%rax), %rax + movl %fs:0, %eax + leaq foo@tpoff(%rax), %rax For largepic, change: - leaq foo@tlsgd(%rip), %rdi - movabsq $__tls_get_addr@pltoff, %rax - addq %rbx, %rax - call *%rax + leaq foo@tlsgd(%rip), %rdi + movabsq $__tls_get_addr@pltoff, %rax + addq %r15, %rax + call *%rax into: - movq %fs:0, %rax - leaq foo@tpoff(%rax), %rax - nopw 0x0(%rax,%rax,1) */ + movq %fs:0, %rax + leaq foo@tpoff(%rax), %rax + nopw 0x0(%rax,%rax,1) */ int largepic = 0; - if (ABI_64_P (output_bfd) - && contents[roff + 5] == (bfd_byte) '\xb8') + if (ABI_64_P (output_bfd)) { - memcpy (contents + roff - 3, - "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80" - "\0\0\0\0\x66\x0f\x1f\x44\0", 22); - largepic = 1; + if (contents[roff + 5] == 0xb8) + { + memcpy (contents + roff - 3, + "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80" + "\0\0\0\0\x66\x0f\x1f\x44\0", 22); + largepic = 1; + } + else + memcpy (contents + roff - 4, + "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0", + 16); } - else if (ABI_64_P (output_bfd)) - memcpy (contents + roff - 4, - "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0", - 16); else memcpy (contents + roff - 3, "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0", @@ -4847,7 +5216,8 @@ direct: bfd_put_32 (output_bfd, elf_x86_64_tpoff (info, relocation), contents + roff + 8 + largepic); - /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */ + /* Skip R_X86_64_PC32, R_X86_64_PLT32, + R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */ rel++; wrel++; continue; @@ -5083,39 +5453,53 @@ direct: if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD) { /* GD->IE transition. For 64bit, change - .byte 0x66; leaq foo@tlsgd(%rip), %rdi - .word 0x6666; rex64; call __tls_get_addr@plt + .byte 0x66; leaq foo@tlsgd(%rip), %rdi + .word 0x6666; rex64; call __tls_get_addr@PLT + or + .byte 0x66; leaq foo@tlsgd(%rip), %rdi + .byte 0x66; rex64 + call *__tls_get_addr@GOTPCREL(%rip + which may be converted to + addr32 call __tls_get_addr into: - movq %fs:0, %rax - addq foo@gottpoff(%rip), %rax + movq %fs:0, %rax + addq foo@gottpoff(%rip), %rax For 32bit, change - leaq foo@tlsgd(%rip), %rdi - .word 0x6666; rex64; call __tls_get_addr@plt + leaq foo@tlsgd(%rip), %rdi + .word 0x6666; rex64; call __tls_get_addr@PLT + or + leaq foo@tlsgd(%rip), %rdi + .byte 0x66; rex64; + call *__tls_get_addr@GOTPCREL(%rip) + which may be converted to + addr32 call __tls_get_addr into: - movl %fs:0, %eax - addq foo@gottpoff(%rip), %rax + movl %fs:0, %eax + addq foo@gottpoff(%rip), %rax For largepic, change: - leaq foo@tlsgd(%rip), %rdi - movabsq $__tls_get_addr@pltoff, %rax - addq %rbx, %rax - call *%rax + leaq foo@tlsgd(%rip), %rdi + movabsq $__tls_get_addr@pltoff, %rax + addq %r15, %rax + call *%rax into: - movq %fs:0, %rax - addq foo@gottpoff(%rax), %rax - nopw 0x0(%rax,%rax,1) */ + movq %fs:0, %rax + addq foo@gottpoff(%rax), %rax + nopw 0x0(%rax,%rax,1) */ int largepic = 0; - if (ABI_64_P (output_bfd) - && contents[roff + 5] == (bfd_byte) '\xb8') + if (ABI_64_P (output_bfd)) { - memcpy (contents + roff - 3, - "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05" - "\0\0\0\0\x66\x0f\x1f\x44\0", 22); - largepic = 1; + if (contents[roff + 5] == 0xb8) + { + memcpy (contents + roff - 3, + "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05" + "\0\0\0\0\x66\x0f\x1f\x44\0", 22); + largepic = 1; + } + else + memcpy (contents + roff - 4, + "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0", + 16); } - else if (ABI_64_P (output_bfd)) - memcpy (contents + roff - 4, - "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0", - 16); else memcpy (contents + roff - 3, "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0", @@ -5182,40 +5566,65 @@ direct: if (! elf_x86_64_tls_transition (info, input_bfd, input_section, contents, symtab_hdr, sym_hashes, - &r_type, GOT_UNKNOWN, - rel, relend, h, r_symndx)) + &r_type, GOT_UNKNOWN, rel, + relend, h, r_symndx, TRUE)) return FALSE; if (r_type != R_X86_64_TLSLD) { /* LD->LE transition: - leaq foo@tlsld(%rip), %rdi; call __tls_get_addr. + leaq foo@tlsld(%rip), %rdi + call __tls_get_addr@PLT + For 64bit, we change it into: + .word 0x6666; .byte 0x66; movq %fs:0, %rax + For 32bit, we change it into: + nopl 0x0(%rax); movl %fs:0, %eax + Or + leaq foo@tlsld(%rip), %rdi; + call *__tls_get_addr@GOTPCREL(%rip) + which may be converted to + addr32 call __tls_get_addr For 64bit, we change it into: - .word 0x6666; .byte 0x66; movq %fs:0, %rax. + .word 0x6666; .word 0x6666; movq %fs:0, %rax For 32bit, we change it into: - nopl 0x0(%rax); movl %fs:0, %eax. + nopw 0x0(%rax); movl %fs:0, %eax For largepic, change: - leaq foo@tlsgd(%rip), %rdi - movabsq $__tls_get_addr@pltoff, %rax - addq %rbx, %rax - call *%rax - into: - data32 data32 data32 nopw %cs:0x0(%rax,%rax,1) - movq %fs:0, %eax */ + leaq foo@tlsgd(%rip), %rdi + movabsq $__tls_get_addr@pltoff, %rax + addq %rbx, %rax + call *%rax + into + data16 data16 data16 nopw %cs:0x0(%rax,%rax,1) + movq %fs:0, %eax */ BFD_ASSERT (r_type == R_X86_64_TPOFF32); - if (ABI_64_P (output_bfd) - && contents[rel->r_offset + 5] == (bfd_byte) '\xb8') - memcpy (contents + rel->r_offset - 3, - "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0" - "\x64\x48\x8b\x04\x25\0\0\0", 22); - else if (ABI_64_P (output_bfd)) - memcpy (contents + rel->r_offset - 3, - "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12); + if (ABI_64_P (output_bfd)) + { + if (contents[rel->r_offset + 5] == 0xb8) + memcpy (contents + rel->r_offset - 3, + "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0" + "\x64\x48\x8b\x04\x25\0\0\0", 22); + else if (contents[rel->r_offset + 4] == 0xff + || contents[rel->r_offset + 4] == 0x67) + memcpy (contents + rel->r_offset - 3, + "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", + 13); + else + memcpy (contents + rel->r_offset - 3, + "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12); + } else - memcpy (contents + rel->r_offset - 3, - "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12); - /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */ + { + if (contents[rel->r_offset + 4] == 0xff) + memcpy (contents + rel->r_offset - 3, + "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", + 13); + else + memcpy (contents + rel->r_offset - 3, + "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12); + } + /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX + and R_X86_64_PLTOFF64. */ rel++; wrel++; continue; @@ -5284,7 +5693,8 @@ direct: && _bfd_elf_section_offset (output_bfd, info, input_section, rel->r_offset) != (bfd_vma) -1) { - (*_bfd_error_handler) + _bfd_error_handler + /* xgettext:c-format */ (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"), input_bfd, input_section, @@ -5318,16 +5728,13 @@ check_relocation_error: } if (r == bfd_reloc_overflow) - { - if (! ((*info->callbacks->reloc_overflow) - (info, (h ? &h->root : NULL), name, howto->name, - (bfd_vma) 0, input_bfd, input_section, - rel->r_offset))) - return FALSE; - } + (*info->callbacks->reloc_overflow) + (info, (h ? &h->root : NULL), name, howto->name, + (bfd_vma) 0, input_bfd, input_section, rel->r_offset); else { - (*_bfd_error_handler) + _bfd_error_handler + /* xgettext:c-format */ (_("%B(%A+0x%lx): reloc against `%s': error %d"), input_bfd, input_section, (long) rel->r_offset, name, (int) r); @@ -5393,7 +5800,9 @@ elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for resolved undefined weak symbols in executable so that their references have value 0 at run-time. */ - local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh); + local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, + eh->has_got_reloc, + eh); if (h->plt.offset != (bfd_vma) -1) { @@ -5518,6 +5927,7 @@ elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, /* Check PC-relative offset overflow in PLT entry. */ if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff) + /* xgettext:c-format */ info->callbacks->einfo (_("%F%B: PC-relative offset overflow in PLT entry for `%s'\n"), output_bfd, h->root.root.string); @@ -5576,6 +5986,7 @@ elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, check relocation index for overflow since branch displacement will overflow first. */ if (plt0_offset > 0x80000000) + /* xgettext:c-format */ info->callbacks->einfo (_("%F%B: branch displacement overflow in PLT entry for `%s'\n"), output_bfd, h->root.root.string); bfd_put_32 (output_bfd, - plt0_offset, @@ -5640,6 +6051,7 @@ elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, got_after_plt = got->output_section->vma > plt->output_section->vma; if ((got_after_plt && got_pcrel_offset < 0) || (!got_after_plt && got_pcrel_offset > 0)) + /* xgettext:c-format */ info->callbacks->einfo (_("%F%B: PC-relative offset overflow in GOT PLT entry for `%s'\n"), output_bfd, h->root.root.string); @@ -5673,6 +6085,7 @@ elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, && !local_undefweak) { Elf_Internal_Rela rela; + asection *relgot = htab->elf.srelgot; /* This symbol has an entry in the global offset table. Set it up. */ @@ -5691,7 +6104,27 @@ elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, if (h->def_regular && h->type == STT_GNU_IFUNC) { - if (bfd_link_pic (info)) + if (h->plt.offset == (bfd_vma) -1) + { + /* STT_GNU_IFUNC is referenced without PLT. */ + if (htab->elf.splt == NULL) + { + /* use .rel[a].iplt section to store .got relocations + in static executable. */ + relgot = htab->elf.irelplt; + } + if (SYMBOL_REFERENCES_LOCAL (info, h)) + { + rela.r_info = htab->r_info (0, + R_X86_64_IRELATIVE); + rela.r_addend = (h->root.u.def.value + + h->root.u.def.section->output_section->vma + + h->root.u.def.section->output_offset); + } + else + goto do_glob_dat; + } + else if (bfd_link_pic (info)) { /* Generate R_X86_64_GLOB_DAT. */ goto do_glob_dat; @@ -5735,19 +6168,21 @@ do_glob_dat: rela.r_addend = 0; } - elf_append_rela (output_bfd, htab->elf.srelgot, &rela); + elf_append_rela (output_bfd, relgot, &rela); } if (h->needs_copy) { Elf_Internal_Rela rela; + asection *s; /* This symbol needs a copy reloc. Set it up. */ if (h->dynindx == -1 || (h->root.type != bfd_link_hash_defined && h->root.type != bfd_link_hash_defweak) - || htab->srelbss == NULL) + || htab->elf.srelbss == NULL + || htab->elf.sreldynrelro == NULL) abort (); rela.r_offset = (h->root.u.def.value @@ -5755,7 +6190,11 @@ do_glob_dat: + h->root.u.def.section->output_offset); rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY); rela.r_addend = 0; - elf_append_rela (output_bfd, htab->srelbss, &rela); + if (h->root.u.def.section == htab->elf.sdynrelro) + s = htab->elf.sreldynrelro; + else + s = htab->elf.srelbss; + elf_append_rela (output_bfd, s, &rela); } return TRUE; @@ -5813,19 +6252,24 @@ elf_x86_64_reloc_type_class (const struct bfd_link_info *info, /* Check relocation against STT_GNU_IFUNC symbol if there are dynamic symbols. */ unsigned long r_symndx = htab->r_sym (rela->r_info); - Elf_Internal_Sym sym; - if (!bed->s->swap_symbol_in (abfd, - (htab->elf.dynsym->contents - + r_symndx * bed->s->sizeof_sym), - 0, &sym)) - abort (); + if (r_symndx != STN_UNDEF) + { + Elf_Internal_Sym sym; + if (!bed->s->swap_symbol_in (abfd, + (htab->elf.dynsym->contents + + r_symndx * bed->s->sizeof_sym), + 0, &sym)) + abort (); - if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC) - return reloc_class_ifunc; + if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC) + return reloc_class_ifunc; + } } switch ((int) ELF32_R_TYPE (rela->r_info)) { + case R_X86_64_IRELATIVE: + return reloc_class_ifunc; case R_X86_64_RELATIVE: case R_X86_64_RELATIVE64: return reloc_class_relative; @@ -5901,21 +6345,6 @@ elf_x86_64_finish_dynamic_sections (bfd *output_bfd, dyn.d_un.d_val = s->size; break; - case DT_RELASZ: - /* The procedure linkage table relocs (DT_JMPREL) should - not be included in the overall relocs (DT_RELA). - Therefore, we override the DT_RELASZ entry here to - make it not include the JMPREL relocs. Since the - linker script arranges for .rela.plt to follow all - other relocation sections, we don't have to worry - about changing the DT_RELA entry. */ - if (htab->elf.srelplt != NULL) - { - s = htab->elf.srelplt->output_section; - dyn.d_un.d_val -= s->size; - } - break; - case DT_TLSDESC_PLT: s = htab->elf.splt; dyn.d_un.d_ptr = s->output_section->vma + s->output_offset @@ -6007,7 +6436,7 @@ elf_x86_64_finish_dynamic_sections (bfd *output_bfd, { if (bfd_is_abs_section (htab->elf.sgotplt->output_section)) { - (*_bfd_error_handler) + _bfd_error_handler (_("discarded output section: `%A'"), htab->elf.sgotplt); return FALSE; } @@ -6059,15 +6488,64 @@ elf_x86_64_finish_dynamic_sections (bfd *output_bfd, } } + /* Adjust .eh_frame for .plt.got section. */ + if (htab->plt_got_eh_frame != NULL + && htab->plt_got_eh_frame->contents != NULL) + { + if (htab->plt_got != NULL + && htab->plt_got->size != 0 + && (htab->plt_got->flags & SEC_EXCLUDE) == 0 + && htab->plt_got->output_section != NULL + && htab->plt_got_eh_frame->output_section != NULL) + { + bfd_vma plt_start = htab->plt_got->output_section->vma; + bfd_vma eh_frame_start = htab->plt_got_eh_frame->output_section->vma + + htab->plt_got_eh_frame->output_offset + + PLT_FDE_START_OFFSET; + bfd_put_signed_32 (dynobj, plt_start - eh_frame_start, + htab->plt_got_eh_frame->contents + + PLT_FDE_START_OFFSET); + } + if (htab->plt_got_eh_frame->sec_info_type == SEC_INFO_TYPE_EH_FRAME) + { + if (! _bfd_elf_write_section_eh_frame (output_bfd, info, + htab->plt_got_eh_frame, + htab->plt_got_eh_frame->contents)) + return FALSE; + } + } + + /* Adjust .eh_frame for .plt.bnd section. */ + if (htab->plt_bnd_eh_frame != NULL + && htab->plt_bnd_eh_frame->contents != NULL) + { + if (htab->plt_bnd != NULL + && htab->plt_bnd->size != 0 + && (htab->plt_bnd->flags & SEC_EXCLUDE) == 0 + && htab->plt_bnd->output_section != NULL + && htab->plt_bnd_eh_frame->output_section != NULL) + { + bfd_vma plt_start = htab->plt_bnd->output_section->vma; + bfd_vma eh_frame_start = htab->plt_bnd_eh_frame->output_section->vma + + htab->plt_bnd_eh_frame->output_offset + + PLT_FDE_START_OFFSET; + bfd_put_signed_32 (dynobj, plt_start - eh_frame_start, + htab->plt_bnd_eh_frame->contents + + PLT_FDE_START_OFFSET); + } + if (htab->plt_bnd_eh_frame->sec_info_type == SEC_INFO_TYPE_EH_FRAME) + { + if (! _bfd_elf_write_section_eh_frame (output_bfd, info, + htab->plt_bnd_eh_frame, + htab->plt_bnd_eh_frame->contents)) + return FALSE; + } + } + if (htab->elf.sgot && htab->elf.sgot->size > 0) elf_section_data (htab->elf.sgot->output_section)->this_hdr.sh_entsize = GOT_ENTRY_SIZE; - /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */ - htab_traverse (htab->loc_hash_table, - elf_x86_64_finish_local_dynamic_symbol, - info); - /* Fill PLT entries for undefined weak symbols in PIE. */ if (bfd_link_pie (info)) bfd_hash_traverse (&info->hash->table, @@ -6077,6 +6555,33 @@ elf_x86_64_finish_dynamic_sections (bfd *output_bfd, return TRUE; } +/* Fill PLT/GOT entries and allocate dynamic relocations for local + STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table. + It has to be done before elf_link_sort_relocs is called so that + dynamic relocations are properly sorted. */ + +static bfd_boolean +elf_x86_64_output_arch_local_syms + (bfd *output_bfd ATTRIBUTE_UNUSED, + struct bfd_link_info *info, + void *flaginfo ATTRIBUTE_UNUSED, + int (*func) (void *, const char *, + Elf_Internal_Sym *, + asection *, + struct elf_link_hash_entry *) ATTRIBUTE_UNUSED) +{ + struct elf_x86_64_link_hash_table *htab = elf_x86_64_hash_table (info); + if (htab == NULL) + return FALSE; + + /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */ + htab_traverse (htab->loc_hash_table, + elf_x86_64_finish_local_dynamic_symbol, + info); + + return TRUE; +} + /* Return an array of PLT entry symbol values. */ static bfd_vma * @@ -6221,7 +6726,7 @@ elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr, static bfd_boolean elf_x86_64_add_symbol_hook (bfd *abfd, - struct bfd_link_info *info, + struct bfd_link_info *info ATTRIBUTE_UNUSED, Elf_Internal_Sym *sym, const char **namep ATTRIBUTE_UNUSED, flagword *flagsp ATTRIBUTE_UNUSED, @@ -6250,12 +6755,6 @@ elf_x86_64_add_symbol_hook (bfd *abfd, return TRUE; } - if (ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE - && (abfd->flags & DYNAMIC) == 0 - && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour) - elf_tdata (info->output_bfd)->has_gnu_symbols - |= elf_gnu_symbol_unique; - return TRUE; } @@ -6398,8 +6897,80 @@ elf_x86_64_relocs_compatible (const bfd_target *input, && _bfd_elf_relocs_compatible (input, output)); } +/* Parse x86-64 GNU properties. */ + +static enum elf_property_kind +elf_x86_64_parse_gnu_properties (bfd *abfd, unsigned int type, + bfd_byte *ptr, unsigned int datasz) +{ + elf_property *prop; + + switch (type) + { + case GNU_PROPERTY_X86_ISA_1_USED: + case GNU_PROPERTY_X86_ISA_1_NEEDED: + if (datasz != 4) + { + _bfd_error_handler + ((type == GNU_PROPERTY_X86_ISA_1_USED + ? _("error: %B: ") + : _("error: %B: ")), + abfd, datasz); + return property_corrupt; + } + prop = _bfd_elf_get_property (abfd, type, datasz); + prop->u.number = bfd_h_get_32 (abfd, ptr); + prop->pr_kind = property_number; + break; + + default: + return property_ignored; + } + + return property_number; +} + +/* Merge x86-64 GNU property BPROP with APROP. If APROP isn't NULL, + return TRUE if APROP is updated. Otherwise, return TRUE if BPROP + should be merged with ABFD. */ + +static bfd_boolean +elf_x86_64_merge_gnu_properties (bfd *abfd ATTRIBUTE_UNUSED, + elf_property *aprop, + elf_property *bprop) +{ + unsigned int number; + bfd_boolean updated = FALSE; + unsigned int pr_type = aprop != NULL ? aprop->pr_type : bprop->pr_type; + + switch (pr_type) + { + case GNU_PROPERTY_X86_ISA_1_USED: + case GNU_PROPERTY_X86_ISA_1_NEEDED: + if (aprop != NULL && bprop != NULL) + { + number = aprop->u.number; + aprop->u.number = number | bprop->u.number; + updated = number != (unsigned int) aprop->u.number; + } + else + { + /* Return TRUE if APROP is NULL to indicate that BPROP should + be added to ABFD. */ + updated = aprop == NULL; + } + break; + + default: + /* Never should happen. */ + abort (); + } + + return updated; +} + static const struct bfd_elf_special_section - elf_x86_64_special_sections[]= +elf_x86_64_special_sections[]= { { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE}, { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE}, @@ -6428,6 +6999,9 @@ static const struct bfd_elf_special_section #define elf_backend_rela_normal 1 #define elf_backend_plt_alignment 4 #define elf_backend_extern_protected_data 1 +#define elf_backend_caches_rawsize 1 +#define elf_backend_dtrel_excludes_plt 1 +#define elf_backend_want_dynrelro 1 #define elf_info_to_howto elf_x86_64_info_to_howto @@ -6444,6 +7018,7 @@ static const struct bfd_elf_special_section #define elf_backend_create_dynamic_sections elf_x86_64_create_dynamic_sections #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol +#define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms #define elf_backend_gc_mark_hook elf_x86_64_gc_mark_hook #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo @@ -6486,6 +7061,10 @@ static const struct bfd_elf_special_section ((bfd_boolean (*) (bfd *, struct bfd_link_info *, asection *)) bfd_true) #define elf_backend_fixup_symbol \ elf_x86_64_fixup_symbol +#define elf_backend_parse_gnu_properties \ + elf_x86_64_parse_gnu_properties +#define elf_backend_merge_gnu_properties \ + elf_x86_64_merge_gnu_properties #include "elf64-target.h" @@ -6549,18 +7128,18 @@ static const struct bfd_elf_special_section #define elf_backend_strtab_flags SHF_STRINGS static bfd_boolean -elf64_x86_64_set_special_info_link (const bfd *ibfd ATTRIBUTE_UNUSED, - bfd *obfd ATTRIBUTE_UNUSED, - const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED, - Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED) +elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED, + bfd *obfd ATTRIBUTE_UNUSED, + const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED, + Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED) { /* PR 19938: FIXME: Need to add code for setting the sh_info and sh_link fields of Solaris specific section types. */ return FALSE; } -#undef elf_backend_set_special_section_info_and_link -#define elf_backend_set_special_section_info_and_link elf64_x86_64_set_special_info_link +#undef elf_backend_copy_special_section_fields +#define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields #include "elf64-target.h" @@ -6594,7 +7173,7 @@ elf64_x86_64_nacl_elf_object_p (bfd *abfd) #undef elf_backend_want_plt_sym #define elf_backend_want_plt_sym 0 #undef elf_backend_strtab_flags -#undef elf_backend_set_special_section_info_and_link +#undef elf_backend_copy_special_section_fields /* NaCl uses substantially different PLT entries for the same effects. */ @@ -6615,11 +7194,11 @@ static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] = 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */ /* 32 bytes of nop to pad out to the standard size. */ - 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */ + 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */ 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */ - 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */ + 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */ 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */ - 0x66, /* excess data32 prefix */ + 0x66, /* excess data16 prefix */ 0x90 /* nop */ }; @@ -6631,7 +7210,7 @@ static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] = 0x41, 0xff, 0xe3, /* jmpq *%r11 */ /* 15-byte nop sequence to pad out to the next 32-byte boundary. */ - 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */ + 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */ 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */ /* Lazy GOT entries point here (32-byte aligned). */ @@ -6641,7 +7220,7 @@ static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] = 0, 0, 0, 0, /* replaced with offset to start of .plt0. */ /* 22 bytes of nop to pad out to the standard size. */ - 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */ + 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */ 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */ 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */ }; @@ -6703,6 +7282,8 @@ static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed = 32, /* plt_lazy_offset */ elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */ sizeof (elf_x86_64_nacl_eh_frame_plt), /* eh_frame_plt_size */ + NULL, /* eh_frame_plt_got */ + 0, /* eh_frame_plt_got_size */ }; #undef elf_backend_arch_data