+ if (bed->target_id == X86_64_ELF_DATA)
+ {
+ ret->sizeof_reloc = sizeof (Elf32_External_Rela);
+ ret->pointer_r_type = R_X86_64_32;
+ ret->dynamic_interpreter = ELFX32_DYNAMIC_INTERPRETER;
+ ret->dynamic_interpreter_size
+ = sizeof ELFX32_DYNAMIC_INTERPRETER;
+ }
+ else
+ {
+ ret->is_reloc_section = elf_i386_is_reloc_section;
+ ret->dt_reloc = DT_REL;
+ ret->dt_reloc_sz = DT_RELSZ;
+ ret->dt_reloc_ent = DT_RELENT;
+ ret->sizeof_reloc = sizeof (Elf32_External_Rel);
+ ret->got_entry_size = 4;
+ ret->pcrel_plt = FALSE;
+ ret->pointer_r_type = R_386_32;
+ ret->dynamic_interpreter = ELF32_DYNAMIC_INTERPRETER;
+ ret->dynamic_interpreter_size
+ = sizeof ELF32_DYNAMIC_INTERPRETER;
+ ret->tls_get_addr = "___tls_get_addr";
+ }
+ }
+
+ ret->loc_hash_table = htab_try_create (1024,
+ _bfd_x86_elf_local_htab_hash,
+ _bfd_x86_elf_local_htab_eq,
+ NULL);
+ ret->loc_hash_memory = objalloc_create ();
+ if (!ret->loc_hash_table || !ret->loc_hash_memory)
+ {
+ elf_x86_link_hash_table_free (abfd);
+ return NULL;
+ }
+ ret->elf.root.hash_table_free = elf_x86_link_hash_table_free;
+
+ return &ret->elf.root;
+}
+
+/* Sort relocs into address order. */
+
+int
+_bfd_x86_elf_compare_relocs (const void *ap, const void *bp)
+{
+ const arelent *a = * (const arelent **) ap;
+ const arelent *b = * (const arelent **) bp;
+
+ if (a->address > b->address)
+ return 1;
+ else if (a->address < b->address)
+ return -1;
+ else
+ return 0;
+}
+
+/* Mark symbol, NAME, as locally defined by linker if it is referenced
+ and not defined in a relocatable object file. */
+
+static void
+elf_x86_linker_defined (struct bfd_link_info *info, const char *name)
+{
+ struct elf_link_hash_entry *h;
+
+ h = elf_link_hash_lookup (elf_hash_table (info), name,
+ FALSE, FALSE, FALSE);
+ if (h == NULL)
+ return;
+
+ while (h->root.type == bfd_link_hash_indirect)
+ h = (struct elf_link_hash_entry *) h->root.u.i.link;
+
+ if (h->root.type == bfd_link_hash_new
+ || h->root.type == bfd_link_hash_undefined
+ || h->root.type == bfd_link_hash_undefweak
+ || h->root.type == bfd_link_hash_common
+ || (!h->def_regular && h->def_dynamic))
+ {
+ elf_x86_hash_entry (h)->local_ref = 2;
+ elf_x86_hash_entry (h)->linker_def = 1;
+ }
+}
+
+/* Hide a linker-defined symbol, NAME, with hidden visibility. */
+
+static void
+elf_x86_hide_linker_defined (struct bfd_link_info *info,
+ const char *name)
+{
+ struct elf_link_hash_entry *h;
+
+ h = elf_link_hash_lookup (elf_hash_table (info), name,
+ FALSE, FALSE, FALSE);
+ if (h == NULL)
+ return;
+
+ while (h->root.type == bfd_link_hash_indirect)
+ h = (struct elf_link_hash_entry *) h->root.u.i.link;
+
+ if (ELF_ST_VISIBILITY (h->other) == STV_INTERNAL
+ || ELF_ST_VISIBILITY (h->other) == STV_HIDDEN)
+ _bfd_elf_link_hash_hide_symbol (info, h, TRUE);
+}
+
+bfd_boolean
+_bfd_x86_elf_link_check_relocs (bfd *abfd, struct bfd_link_info *info)
+{
+ if (!bfd_link_relocatable (info))
+ {
+ /* Check for __tls_get_addr reference. */
+ struct elf_x86_link_hash_table *htab;
+ const struct elf_backend_data *bed = get_elf_backend_data (abfd);
+ htab = elf_x86_hash_table (info, bed->target_id);
+ if (htab)
+ {
+ struct elf_link_hash_entry *h;
+
+ h = elf_link_hash_lookup (elf_hash_table (info),
+ htab->tls_get_addr,
+ FALSE, FALSE, FALSE);
+ if (h != NULL)
+ {
+ elf_x86_hash_entry (h)->tls_get_addr = 1;
+
+ /* Check the versioned __tls_get_addr symbol. */
+ while (h->root.type == bfd_link_hash_indirect)
+ {
+ h = (struct elf_link_hash_entry *) h->root.u.i.link;
+ elf_x86_hash_entry (h)->tls_get_addr = 1;
+ }
+ }
+
+ /* "__ehdr_start" will be defined by linker as a hidden symbol
+ later if it is referenced and not defined. */
+ elf_x86_linker_defined (info, "__ehdr_start");
+
+ if (bfd_link_executable (info))
+ {
+ /* References to __bss_start, _end and _edata should be
+ locally resolved within executables. */
+ elf_x86_linker_defined (info, "__bss_start");
+ elf_x86_linker_defined (info, "_end");
+ elf_x86_linker_defined (info, "_edata");
+ }
+ else
+ {
+ /* Hide hidden __bss_start, _end and _edata in shared
+ libraries. */
+ elf_x86_hide_linker_defined (info, "__bss_start");
+ elf_x86_hide_linker_defined (info, "_end");
+ elf_x86_hide_linker_defined (info, "_edata");
+ }
+ }
+ }
+
+ /* Invoke the regular ELF backend linker to do all the work. */
+ return _bfd_elf_link_check_relocs (abfd, info);
+}
+
+bfd_boolean
+_bfd_elf_x86_valid_reloc_p (asection *input_section,
+ struct bfd_link_info *info,
+ struct elf_x86_link_hash_table *htab,
+ const Elf_Internal_Rela *rel,
+ struct elf_link_hash_entry *h,
+ Elf_Internal_Sym *sym,
+ Elf_Internal_Shdr *symtab_hdr,
+ bfd_boolean *no_dynreloc_p)
+{
+ bfd_boolean valid_p = TRUE;
+
+ *no_dynreloc_p = FALSE;
+
+ /* Check If relocation against non-preemptible absolute symbol is
+ valid in PIC. FIXME: Can't use SYMBOL_REFERENCES_LOCAL_P since
+ it may call _bfd_elf_link_hide_sym_by_version and result in
+ ld-elfvers/ vers21 test failure. */
+ if (bfd_link_pic (info)
+ && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
+ {
+ const struct elf_backend_data *bed;
+ unsigned int r_type;
+ Elf_Internal_Rela irel;
+
+ /* Skip non-absolute symbol. */
+ if (h)
+ {
+ if (!ABS_SYMBOL_P (h))
+ return valid_p;
+ }
+ else if (sym->st_shndx != SHN_ABS)
+ return valid_p;
+
+ bed = get_elf_backend_data (input_section->owner);
+ r_type = ELF32_R_TYPE (rel->r_info);
+ irel = *rel;
+
+ /* Only allow relocations against absolute symbol, which can be
+ resolved as absolute value + addend. GOTPCREL relocations
+ are allowed since absolute value + addend is stored in the
+ GOT slot. */
+ if (bed->target_id == X86_64_ELF_DATA)
+ {
+ r_type &= ~R_X86_64_converted_reloc_bit;
+ valid_p = (r_type == R_X86_64_64
+ || r_type == R_X86_64_32
+ || r_type == R_X86_64_32S
+ || r_type == R_X86_64_16
+ || r_type == R_X86_64_8
+ || r_type == R_X86_64_GOTPCREL
+ || r_type == R_X86_64_GOTPCRELX
+ || r_type == R_X86_64_REX_GOTPCRELX);
+ if (!valid_p)
+ {
+ unsigned int r_symndx = htab->r_sym (rel->r_info);
+ irel.r_info = htab->r_info (r_symndx, r_type);
+ }
+ }
+ else
+ valid_p = (r_type == R_386_32
+ || r_type == R_386_16
+ || r_type == R_386_8);
+
+ if (valid_p)
+ *no_dynreloc_p = TRUE;
+ else
+ {
+ const char *name;
+ arelent internal_reloc;
+
+ if (!bed->elf_info_to_howto (input_section->owner,
+ &internal_reloc, &irel)
+ || internal_reloc.howto == NULL)
+ abort ();
+
+ if (h)
+ name = h->root.root.string;
+ else
+ name = bfd_elf_sym_name (input_section->owner, symtab_hdr,
+ sym, NULL);
+ info->callbacks->einfo
+ /* xgettext:c-format */
+ (_("%F%P: %pB: relocation %s against absolute symbol "
+ "`%s' in section `%pA' is disallowed\n"),
+ input_section->owner, internal_reloc.howto->name, name,
+ input_section);
+ bfd_set_error (bfd_error_bad_value);
+ }
+ }
+
+ return valid_p;
+}
+
+/* Set the sizes of the dynamic sections. */
+
+bfd_boolean
+_bfd_x86_elf_size_dynamic_sections (bfd *output_bfd,
+ struct bfd_link_info *info)
+{
+ struct elf_x86_link_hash_table *htab;
+ bfd *dynobj;
+ asection *s;
+ bfd_boolean relocs;
+ bfd *ibfd;
+ const struct elf_backend_data *bed
+ = get_elf_backend_data (output_bfd);
+
+ htab = elf_x86_hash_table (info, bed->target_id);
+ if (htab == NULL)
+ return FALSE;
+ dynobj = htab->elf.dynobj;
+ if (dynobj == NULL)
+ abort ();
+
+ /* Set up .got offsets for local syms, and space for local dynamic
+ relocs. */
+ for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
+ {
+ bfd_signed_vma *local_got;
+ bfd_signed_vma *end_local_got;
+ char *local_tls_type;
+ bfd_vma *local_tlsdesc_gotent;
+ bfd_size_type locsymcount;
+ Elf_Internal_Shdr *symtab_hdr;
+ asection *srel;
+
+ if (! is_x86_elf (ibfd, htab))
+ continue;
+
+ for (s = ibfd->sections; s != NULL; s = s->next)
+ {
+ struct elf_dyn_relocs *p;
+
+ for (p = ((struct elf_dyn_relocs *)
+ elf_section_data (s)->local_dynrel);
+ p != NULL;
+ p = p->next)
+ {
+ if (!bfd_is_abs_section (p->sec)
+ && bfd_is_abs_section (p->sec->output_section))
+ {
+ /* Input section has been discarded, either because
+ it is a copy of a linkonce section or due to
+ linker script /DISCARD/, so we'll be discarding
+ the relocs too. */
+ }
+ else if (htab->elf.target_os == is_vxworks
+ && strcmp (p->sec->output_section->name,
+ ".tls_vars") == 0)
+ {
+ /* Relocations in vxworks .tls_vars sections are
+ handled specially by the loader. */
+ }
+ else if (p->count != 0)
+ {
+ srel = elf_section_data (p->sec)->sreloc;
+ srel->size += p->count * htab->sizeof_reloc;
+ if ((p->sec->output_section->flags & SEC_READONLY) != 0
+ && (info->flags & DF_TEXTREL) == 0)
+ {
+ info->flags |= DF_TEXTREL;
+ if (bfd_link_textrel_check (info))
+ /* xgettext:c-format */
+ info->callbacks->einfo
+ (_("%P: %pB: warning: relocation "
+ "in read-only section `%pA'\n"),
+ p->sec->owner, p->sec);
+ }
+ }
+ }
+ }
+
+ local_got = elf_local_got_refcounts (ibfd);
+ if (!local_got)
+ continue;
+
+ symtab_hdr = &elf_symtab_hdr (ibfd);
+ locsymcount = symtab_hdr->sh_info;
+ end_local_got = local_got + locsymcount;
+ local_tls_type = elf_x86_local_got_tls_type (ibfd);
+ local_tlsdesc_gotent = elf_x86_local_tlsdesc_gotent (ibfd);
+ s = htab->elf.sgot;
+ srel = htab->elf.srelgot;
+ for (; local_got < end_local_got;
+ ++local_got, ++local_tls_type, ++local_tlsdesc_gotent)
+ {
+ *local_tlsdesc_gotent = (bfd_vma) -1;
+ if (*local_got > 0)
+ {
+ if (GOT_TLS_GDESC_P (*local_tls_type))
+ {
+ *local_tlsdesc_gotent = htab->elf.sgotplt->size
+ - elf_x86_compute_jump_table_size (htab);
+ htab->elf.sgotplt->size += 2 * htab->got_entry_size;
+ *local_got = (bfd_vma) -2;
+ }
+ if (! GOT_TLS_GDESC_P (*local_tls_type)
+ || GOT_TLS_GD_P (*local_tls_type))
+ {
+ *local_got = s->size;
+ s->size += htab->got_entry_size;
+ if (GOT_TLS_GD_P (*local_tls_type)
+ || *local_tls_type == GOT_TLS_IE_BOTH)
+ s->size += htab->got_entry_size;
+ }
+ if ((bfd_link_pic (info) && *local_tls_type != GOT_ABS)
+ || GOT_TLS_GD_ANY_P (*local_tls_type)
+ || (*local_tls_type & GOT_TLS_IE))
+ {
+ if (*local_tls_type == GOT_TLS_IE_BOTH)
+ srel->size += 2 * htab->sizeof_reloc;
+ else if (GOT_TLS_GD_P (*local_tls_type)
+ || ! GOT_TLS_GDESC_P (*local_tls_type))
+ srel->size += htab->sizeof_reloc;
+ if (GOT_TLS_GDESC_P (*local_tls_type))
+ {
+ htab->elf.srelplt->size += htab->sizeof_reloc;
+ if (bed->target_id == X86_64_ELF_DATA)
+ htab->elf.tlsdesc_plt = (bfd_vma) -1;
+ }
+ }
+ }
+ else
+ *local_got = (bfd_vma) -1;
+ }
+ }
+
+ if (htab->tls_ld_or_ldm_got.refcount > 0)
+ {
+ /* Allocate 2 got entries and 1 dynamic reloc for R_386_TLS_LDM
+ or R_X86_64_TLSLD relocs. */
+ htab->tls_ld_or_ldm_got.offset = htab->elf.sgot->size;
+ htab->elf.sgot->size += 2 * htab->got_entry_size;
+ htab->elf.srelgot->size += htab->sizeof_reloc;
+ }
+ else
+ htab->tls_ld_or_ldm_got.offset = -1;
+
+ /* Allocate global sym .plt and .got entries, and space for global
+ sym dynamic relocs. */
+ elf_link_hash_traverse (&htab->elf, elf_x86_allocate_dynrelocs,
+ info);
+
+ /* Allocate .plt and .got entries, and space for local symbols. */
+ htab_traverse (htab->loc_hash_table, elf_x86_allocate_local_dynreloc,
+ info);
+
+ /* For every jump slot reserved in the sgotplt, reloc_count is
+ incremented. However, when we reserve space for TLS descriptors,
+ it's not incremented, so in order to compute the space reserved
+ for them, it suffices to multiply the reloc count by the jump
+ slot size.
+
+ PR ld/13302: We start next_irelative_index at the end of .rela.plt
+ so that R_{386,X86_64}_IRELATIVE entries come last. */
+ if (htab->elf.srelplt)
+ {
+ htab->next_tls_desc_index = htab->elf.srelplt->reloc_count;
+ htab->sgotplt_jump_table_size
+ = elf_x86_compute_jump_table_size (htab);
+ htab->next_irelative_index = htab->elf.srelplt->reloc_count - 1;
+ }
+ else if (htab->elf.irelplt)
+ htab->next_irelative_index = htab->elf.irelplt->reloc_count - 1;
+
+ if (htab->elf.tlsdesc_plt)
+ {
+ /* NB: tlsdesc_plt is set only for x86-64. If we're not using
+ lazy TLS relocations, don't generate the PLT and GOT entries
+ they require. */
+ if ((info->flags & DF_BIND_NOW))
+ htab->elf.tlsdesc_plt = 0;
+ else
+ {
+ htab->elf.tlsdesc_got = htab->elf.sgot->size;
+ htab->elf.sgot->size += htab->got_entry_size;
+ /* Reserve room for the initial entry.
+ FIXME: we could probably do away with it in this case. */
+ if (htab->elf.splt->size == 0)
+ htab->elf.splt->size = htab->plt.plt_entry_size;
+ htab->elf.tlsdesc_plt = htab->elf.splt->size;
+ htab->elf.splt->size += htab->plt.plt_entry_size;
+ }
+ }
+
+ if (htab->elf.sgotplt)
+ {
+ /* Don't allocate .got.plt section if there are no GOT nor PLT
+ entries and there is no reference to _GLOBAL_OFFSET_TABLE_. */
+ if ((htab->elf.hgot == NULL
+ || !htab->got_referenced)
+ && (htab->elf.sgotplt->size == bed->got_header_size)
+ && (htab->elf.splt == NULL
+ || htab->elf.splt->size == 0)
+ && (htab->elf.sgot == NULL
+ || htab->elf.sgot->size == 0)
+ && (htab->elf.iplt == NULL
+ || htab->elf.iplt->size == 0)
+ && (htab->elf.igotplt == NULL
+ || htab->elf.igotplt->size == 0))
+ {
+ htab->elf.sgotplt->size = 0;
+ /* Solaris requires to keep _GLOBAL_OFFSET_TABLE_ even if it
+ isn't used. */
+ if (htab->elf.hgot != NULL
+ && htab->elf.target_os != is_solaris)
+ {
+ /* Remove the unused _GLOBAL_OFFSET_TABLE_ from symbol
+ table. */
+ htab->elf.hgot->root.type = bfd_link_hash_undefined;
+ htab->elf.hgot->root.u.undef.abfd
+ = htab->elf.hgot->root.u.def.section->owner;
+ htab->elf.hgot->root.linker_def = 0;
+ htab->elf.hgot->ref_regular = 0;
+ htab->elf.hgot->def_regular = 0;
+ }
+ }
+ }
+
+ if (_bfd_elf_eh_frame_present (info))
+ {
+ if (htab->plt_eh_frame != NULL
+ && htab->elf.splt != NULL
+ && htab->elf.splt->size != 0
+ && !bfd_is_abs_section (htab->elf.splt->output_section))
+ htab->plt_eh_frame->size = htab->plt.eh_frame_plt_size;
+
+ if (htab->plt_got_eh_frame != NULL
+ && htab->plt_got != NULL
+ && htab->plt_got->size != 0
+ && !bfd_is_abs_section (htab->plt_got->output_section))
+ htab->plt_got_eh_frame->size
+ = htab->non_lazy_plt->eh_frame_plt_size;
+
+ /* Unwind info for the second PLT and .plt.got sections are
+ identical. */
+ if (htab->plt_second_eh_frame != NULL
+ && htab->plt_second != NULL
+ && htab->plt_second->size != 0
+ && !bfd_is_abs_section (htab->plt_second->output_section))
+ htab->plt_second_eh_frame->size
+ = htab->non_lazy_plt->eh_frame_plt_size;
+ }
+
+ /* We now have determined the sizes of the various dynamic sections.
+ Allocate memory for them. */
+ relocs = FALSE;
+ for (s = dynobj->sections; s != NULL; s = s->next)
+ {
+ bfd_boolean strip_section = TRUE;
+
+ if ((s->flags & SEC_LINKER_CREATED) == 0)
+ continue;
+
+ if (s == htab->elf.splt
+ || s == htab->elf.sgot)
+ {
+ /* Strip this section if we don't need it; see the
+ comment below. */
+ /* We'd like to strip these sections if they aren't needed, but if
+ we've exported dynamic symbols from them we must leave them.
+ It's too late to tell BFD to get rid of the symbols. */
+
+ if (htab->elf.hplt != NULL)
+ strip_section = FALSE;
+ }
+ else if (s == htab->elf.sgotplt
+ || s == htab->elf.iplt
+ || s == htab->elf.igotplt
+ || s == htab->plt_second
+ || s == htab->plt_got
+ || s == htab->plt_eh_frame
+ || s == htab->plt_got_eh_frame
+ || s == htab->plt_second_eh_frame
+ || s == htab->elf.sdynbss
+ || s == htab->elf.sdynrelro)
+ {
+ /* Strip these too. */
+ }
+ else if (htab->is_reloc_section (bfd_section_name (s)))
+ {
+ if (s->size != 0
+ && s != htab->elf.srelplt
+ && s != htab->srelplt2)
+ relocs = TRUE;
+
+ /* We use the reloc_count field as a counter if we need
+ to copy relocs into the output file. */
+ if (s != htab->elf.srelplt)
+ s->reloc_count = 0;
+ }
+ else
+ {
+ /* It's not one of our sections, so don't allocate space. */
+ continue;
+ }
+
+ if (s->size == 0)
+ {
+ /* If we don't need this section, strip it from the
+ output file. This is mostly to handle .rel.bss and
+ .rel.plt. We must create both sections in
+ create_dynamic_sections, because they must be created
+ before the linker maps input sections to output
+ sections. The linker does that before
+ adjust_dynamic_symbol is called, and it is that
+ function which decides whether anything needs to go
+ into these sections. */
+ if (strip_section)
+ s->flags |= SEC_EXCLUDE;
+ continue;
+ }
+
+ if ((s->flags & SEC_HAS_CONTENTS) == 0)
+ continue;
+
+ /* NB: Initially, the iplt section has minimal alignment to
+ avoid moving dot of the following section backwards when
+ it is empty. Update its section alignment now since it
+ is non-empty. */
+ if (s == htab->elf.iplt)
+ bfd_set_section_alignment (s, htab->plt.iplt_alignment);
+
+ /* Allocate memory for the section contents. We use bfd_zalloc
+ here in case unused entries are not reclaimed before the
+ section's contents are written out. This should not happen,
+ but this way if it does, we get a R_386_NONE or R_X86_64_NONE
+ reloc instead of garbage. */
+ s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
+ if (s->contents == NULL)
+ return FALSE;
+ }
+
+ if (htab->plt_eh_frame != NULL
+ && htab->plt_eh_frame->contents != NULL)
+ {
+ memcpy (htab->plt_eh_frame->contents,
+ htab->plt.eh_frame_plt,
+ htab->plt_eh_frame->size);
+ bfd_put_32 (dynobj, htab->elf.splt->size,
+ htab->plt_eh_frame->contents + PLT_FDE_LEN_OFFSET);
+ }
+
+ if (htab->plt_got_eh_frame != NULL
+ && htab->plt_got_eh_frame->contents != NULL)
+ {
+ memcpy (htab->plt_got_eh_frame->contents,
+ htab->non_lazy_plt->eh_frame_plt,
+ htab->plt_got_eh_frame->size);
+ bfd_put_32 (dynobj, htab->plt_got->size,
+ (htab->plt_got_eh_frame->contents
+ + PLT_FDE_LEN_OFFSET));
+ }
+
+ if (htab->plt_second_eh_frame != NULL
+ && htab->plt_second_eh_frame->contents != NULL)
+ {
+ memcpy (htab->plt_second_eh_frame->contents,
+ htab->non_lazy_plt->eh_frame_plt,
+ htab->plt_second_eh_frame->size);
+ bfd_put_32 (dynobj, htab->plt_second->size,
+ (htab->plt_second_eh_frame->contents
+ + PLT_FDE_LEN_OFFSET));
+ }
+
+ if (htab->elf.dynamic_sections_created)
+ {
+ /* Add some entries to the .dynamic section. We fill in the
+ values later, in elf_{i386,x86_64}_finish_dynamic_sections,
+ but we must add the entries now so that we get the correct
+ size for the .dynamic section. The DT_DEBUG entry is filled
+ in by the dynamic linker and used by the debugger. */
+#define add_dynamic_entry(TAG, VAL) \
+ _bfd_elf_add_dynamic_entry (info, TAG, VAL)
+
+ if (bfd_link_executable (info))
+ {
+ if (!add_dynamic_entry (DT_DEBUG, 0))
+ return FALSE;
+ }
+
+ if (htab->elf.splt->size != 0)
+ {
+ /* DT_PLTGOT is used by prelink even if there is no PLT
+ relocation. */
+ if (!add_dynamic_entry (DT_PLTGOT, 0))
+ return FALSE;
+ }
+
+ if (htab->elf.srelplt->size != 0)
+ {
+ if (!add_dynamic_entry (DT_PLTRELSZ, 0)
+ || !add_dynamic_entry (DT_PLTREL, htab->dt_reloc)
+ || !add_dynamic_entry (DT_JMPREL, 0))
+ return FALSE;
+ }
+
+ if (htab->elf.tlsdesc_plt
+ && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
+ || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
+ return FALSE;
+
+ if (relocs)
+ {
+ if (!add_dynamic_entry (htab->dt_reloc, 0)
+ || !add_dynamic_entry (htab->dt_reloc_sz, 0)
+ || !add_dynamic_entry (htab->dt_reloc_ent,
+ htab->sizeof_reloc))
+ return FALSE;
+
+ /* If any dynamic relocs apply to a read-only section,
+ then we need a DT_TEXTREL entry. */
+ if ((info->flags & DF_TEXTREL) == 0)
+ elf_link_hash_traverse (&htab->elf,
+ _bfd_elf_maybe_set_textrel, info);
+
+ if ((info->flags & DF_TEXTREL) != 0)
+ {
+ if (htab->elf.ifunc_resolvers)
+ info->callbacks->einfo
+ (_("%P: warning: GNU indirect functions with DT_TEXTREL "
+ "may result in a segfault at runtime; recompile with %s\n"),
+ bfd_link_dll (info) ? "-fPIC" : "-fPIE");
+
+ if (!add_dynamic_entry (DT_TEXTREL, 0))
+ return FALSE;
+ }
+ }
+ if (htab->elf.target_os == is_vxworks
+ && !elf_vxworks_add_dynamic_entries (output_bfd, info))
+ return FALSE;
+ }
+#undef add_dynamic_entry
+
+ return TRUE;
+}
+
+/* Finish up the x86 dynamic sections. */
+
+struct elf_x86_link_hash_table *
+_bfd_x86_elf_finish_dynamic_sections (bfd *output_bfd,
+ struct bfd_link_info *info)
+{
+ struct elf_x86_link_hash_table *htab;
+ const struct elf_backend_data *bed;
+ bfd *dynobj;
+ asection *sdyn;
+ bfd_byte *dyncon, *dynconend;
+ bfd_size_type sizeof_dyn;
+
+ bed = get_elf_backend_data (output_bfd);
+ htab = elf_x86_hash_table (info, bed->target_id);
+ if (htab == NULL)
+ return htab;
+
+ dynobj = htab->elf.dynobj;
+ sdyn = bfd_get_linker_section (dynobj, ".dynamic");
+
+ /* GOT is always created in setup_gnu_properties. But it may not be
+ needed. .got.plt section may be needed for static IFUNC. */
+ if (htab->elf.sgotplt && htab->elf.sgotplt->size > 0)
+ {
+ bfd_vma dynamic_addr;
+
+ if (bfd_is_abs_section (htab->elf.sgotplt->output_section))
+ {
+ _bfd_error_handler
+ (_("discarded output section: `%pA'"), htab->elf.sgotplt);
+ return NULL;
+ }
+
+ elf_section_data (htab->elf.sgotplt->output_section)->this_hdr.sh_entsize
+ = htab->got_entry_size;
+
+ dynamic_addr = (sdyn == NULL
+ ? (bfd_vma) 0
+ : sdyn->output_section->vma + sdyn->output_offset);
+
+ /* Set the first entry in the global offset table to the address
+ of the dynamic section. Write GOT[1] and GOT[2], needed for
+ the dynamic linker. */
+ if (htab->got_entry_size == 8)
+ {
+ bfd_put_64 (output_bfd, dynamic_addr,
+ htab->elf.sgotplt->contents);
+ bfd_put_64 (output_bfd, (bfd_vma) 0,
+ htab->elf.sgotplt->contents + 8);
+ bfd_put_64 (output_bfd, (bfd_vma) 0,
+ htab->elf.sgotplt->contents + 8*2);
+ }
+ else
+ {
+ bfd_put_32 (output_bfd, dynamic_addr,
+ htab->elf.sgotplt->contents);
+ bfd_put_32 (output_bfd, 0,
+ htab->elf.sgotplt->contents + 4);
+ bfd_put_32 (output_bfd, 0,
+ htab->elf.sgotplt->contents + 4*2);
+ }
+ }
+
+ if (!htab->elf.dynamic_sections_created)
+ return htab;
+
+ if (sdyn == NULL || htab->elf.sgot == NULL)
+ abort ();
+
+ sizeof_dyn = bed->s->sizeof_dyn;
+ dyncon = sdyn->contents;
+ dynconend = sdyn->contents + sdyn->size;
+ for (; dyncon < dynconend; dyncon += sizeof_dyn)
+ {
+ Elf_Internal_Dyn dyn;
+ asection *s;
+
+ (*bed->s->swap_dyn_in) (dynobj, dyncon, &dyn);
+
+ switch (dyn.d_tag)
+ {
+ default:
+ if (htab->elf.target_os == is_vxworks
+ && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
+ break;
+ continue;
+
+ case DT_PLTGOT:
+ s = htab->elf.sgotplt;
+ dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
+ break;
+
+ case DT_JMPREL:
+ dyn.d_un.d_ptr = htab->elf.srelplt->output_section->vma;
+ break;
+
+ case DT_PLTRELSZ:
+ s = htab->elf.srelplt->output_section;
+ dyn.d_un.d_val = s->size;
+ break;
+
+ case DT_TLSDESC_PLT:
+ s = htab->elf.splt;
+ dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
+ + htab->elf.tlsdesc_plt;
+ break;
+
+ case DT_TLSDESC_GOT:
+ s = htab->elf.sgot;
+ dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
+ + htab->elf.tlsdesc_got;
+ break;
+ }
+
+ (*bed->s->swap_dyn_out) (output_bfd, &dyn, dyncon);
+ }
+
+ if (htab->plt_got != NULL && htab->plt_got->size > 0)
+ elf_section_data (htab->plt_got->output_section)
+ ->this_hdr.sh_entsize = htab->non_lazy_plt->plt_entry_size;
+
+ if (htab->plt_second != NULL && htab->plt_second->size > 0)
+ elf_section_data (htab->plt_second->output_section)
+ ->this_hdr.sh_entsize = htab->non_lazy_plt->plt_entry_size;
+
+ /* Adjust .eh_frame for .plt section. */
+ if (htab->plt_eh_frame != NULL
+ && htab->plt_eh_frame->contents != NULL)
+ {
+ if (htab->elf.splt != NULL
+ && htab->elf.splt->size != 0
+ && (htab->elf.splt->flags & SEC_EXCLUDE) == 0
+ && htab->elf.splt->output_section != NULL
+ && htab->plt_eh_frame->output_section != NULL)