+ toff = isym->st_value;
+ }
+ else
+ {
+ /* Undefined weak symbol is only bound locally in executable
+ and its reference is resolved as 0 without relocation
+ overflow. We can only perform this optimization for
+ GOTPCRELX relocations since we need to modify REX byte.
+ It is OK convert mov with R_X86_64_GOTPCREL to
+ R_X86_64_PC32. */
+ if ((relocx || opcode == 0x8b)
+ && UNDEFINED_WEAK_RESOLVED_TO_ZERO (link_info,
+ X86_64_ELF_DATA,
+ TRUE,
+ elf_x86_hash_entry (h)))
+ {
+ if (opcode == 0xff)
+ {
+ /* Skip for branch instructions since R_X86_64_PC32
+ may overflow. */
+ if (require_reloc_pc32)
+ return TRUE;
+ }
+ else if (relocx)
+ {
+ /* For non-branch instructions, we can convert to
+ R_X86_64_32/R_X86_64_32S since we know if there
+ is a REX byte. */
+ to_reloc_pc32 = FALSE;
+ }
+
+ /* Since we don't know the current PC when PIC is true,
+ we can't convert to R_X86_64_PC32. */
+ if (to_reloc_pc32 && is_pic)
+ return TRUE;
+
+ goto convert;
+ }
+ /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
+ ld.so may use its link-time address. */
+ else if (h->start_stop
+ || ((h->def_regular
+ || h->root.type == bfd_link_hash_defined
+ || h->root.type == bfd_link_hash_defweak)
+ && h != htab->elf.hdynamic
+ && SYMBOL_REFERENCES_LOCAL (link_info, h)))
+ {
+ /* bfd_link_hash_new or bfd_link_hash_undefined is
+ set by an assignment in a linker script in
+ bfd_elf_record_link_assignment. start_stop is set
+ on __start_SECNAME/__stop_SECNAME which mark section
+ SECNAME. */
+ if (h->start_stop
+ || (h->def_regular
+ && (h->root.type == bfd_link_hash_new
+ || h->root.type == bfd_link_hash_undefined
+ || ((h->root.type == bfd_link_hash_defined
+ || h->root.type == bfd_link_hash_defweak)
+ && h->root.u.def.section == bfd_und_section_ptr))))
+ {
+ /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
+ if (require_reloc_pc32)
+ return TRUE;
+ goto convert;
+ }
+ tsec = h->root.u.def.section;
+ toff = h->root.u.def.value;
+ symtype = h->type;
+ }
+ else
+ return TRUE;
+ }
+
+ /* Don't convert GOTPCREL relocation against large section. */
+ if (elf_section_data (tsec) != NULL
+ && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
+ return TRUE;
+
+ /* We can only estimate relocation overflow for R_X86_64_PC32. */
+ if (!to_reloc_pc32)
+ goto convert;
+
+ if (tsec->sec_info_type == SEC_INFO_TYPE_MERGE)
+ {
+ /* At this stage in linking, no SEC_MERGE symbol has been
+ adjusted, so all references to such symbols need to be
+ passed through _bfd_merged_section_offset. (Later, in
+ relocate_section, all SEC_MERGE symbols *except* for
+ section symbols have been adjusted.)
+
+ gas may reduce relocations against symbols in SEC_MERGE
+ sections to a relocation against the section symbol when
+ the original addend was zero. When the reloc is against
+ a section symbol we should include the addend in the
+ offset passed to _bfd_merged_section_offset, since the
+ location of interest is the original symbol. On the
+ other hand, an access to "sym+addend" where "sym" is not
+ a section symbol should not include the addend; Such an
+ access is presumed to be an offset from "sym"; The
+ location of interest is just "sym". */
+ if (symtype == STT_SECTION)
+ toff += raddend;
+
+ toff = _bfd_merged_section_offset (abfd, &tsec,
+ elf_section_data (tsec)->sec_info,
+ toff);
+
+ if (symtype != STT_SECTION)
+ toff += raddend;
+ }
+ else
+ toff += raddend;
+
+ /* Don't convert if R_X86_64_PC32 relocation overflows. */
+ if (tsec->output_section == sec->output_section)
+ {
+ if ((toff - roff + 0x80000000) > 0xffffffff)
+ return TRUE;
+ }
+ else
+ {
+ bfd_signed_vma distance;
+
+ /* At this point, we don't know the load addresses of TSEC
+ section nor SEC section. We estimate the distrance between
+ SEC and TSEC. We store the estimated distances in the
+ compressed_size field of the output section, which is only
+ used to decompress the compressed input section. */
+ if (sec->output_section->compressed_size == 0)
+ {
+ asection *asect;
+ bfd_size_type size = 0;
+ for (asect = link_info->output_bfd->sections;
+ asect != NULL;
+ asect = asect->next)
+ /* Skip debug sections since compressed_size is used to
+ compress debug sections. */
+ if ((asect->flags & SEC_DEBUGGING) == 0)
+ {
+ asection *i;
+ for (i = asect->map_head.s;
+ i != NULL;
+ i = i->map_head.s)
+ {
+ size = align_power (size, i->alignment_power);
+ size += i->size;
+ }
+ asect->compressed_size = size;
+ }
+ }
+
+ /* Don't convert GOTPCREL relocations if TSEC isn't placed
+ after SEC. */
+ distance = (tsec->output_section->compressed_size
+ - sec->output_section->compressed_size);
+ if (distance < 0)
+ return TRUE;
+
+ /* Take PT_GNU_RELRO segment into account by adding
+ maxpagesize. */
+ if ((toff + distance + get_elf_backend_data (abfd)->maxpagesize
+ - roff + 0x80000000) > 0xffffffff)
+ return TRUE;
+ }
+
+convert:
+ if (opcode == 0xff)
+ {
+ /* We have "call/jmp *foo@GOTPCREL(%rip)". */
+ unsigned int nop;
+ unsigned int disp;
+ bfd_vma nop_offset;
+
+ /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
+ R_X86_64_PC32. */
+ modrm = bfd_get_8 (abfd, contents + roff - 1);
+ if (modrm == 0x25)
+ {
+ /* Convert to "jmp foo nop". */
+ modrm = 0xe9;
+ nop = NOP_OPCODE;
+ nop_offset = irel->r_offset + 3;
+ disp = bfd_get_32 (abfd, contents + irel->r_offset);
+ irel->r_offset -= 1;
+ bfd_put_32 (abfd, disp, contents + irel->r_offset);
+ }
+ else
+ {
+ struct elf_x86_link_hash_entry *eh
+ = (struct elf_x86_link_hash_entry *) h;
+
+ /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
+ is a nop prefix. */
+ modrm = 0xe8;
+ /* To support TLS optimization, always use addr32 prefix for
+ "call *__tls_get_addr@GOTPCREL(%rip)". */
+ if (eh && eh->tls_get_addr)
+ {
+ nop = 0x67;
+ nop_offset = irel->r_offset - 2;
+ }
+ else
+ {
+ nop = link_info->call_nop_byte;
+ if (link_info->call_nop_as_suffix)
+ {
+ nop_offset = irel->r_offset + 3;
+ disp = bfd_get_32 (abfd, contents + irel->r_offset);
+ irel->r_offset -= 1;
+ bfd_put_32 (abfd, disp, contents + irel->r_offset);
+ }
+ else
+ nop_offset = irel->r_offset - 2;
+ }
+ }
+ bfd_put_8 (abfd, nop, contents + nop_offset);
+ bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
+ r_type = R_X86_64_PC32;
+ }
+ else
+ {
+ unsigned int rex;
+ unsigned int rex_mask = REX_R;
+
+ if (r_type == R_X86_64_REX_GOTPCRELX)
+ rex = bfd_get_8 (abfd, contents + roff - 3);
+ else
+ rex = 0;
+
+ if (opcode == 0x8b)
+ {
+ if (to_reloc_pc32)
+ {
+ /* Convert "mov foo@GOTPCREL(%rip), %reg" to
+ "lea foo(%rip), %reg". */
+ opcode = 0x8d;
+ r_type = R_X86_64_PC32;
+ }
+ else
+ {
+ /* Convert "mov foo@GOTPCREL(%rip), %reg" to
+ "mov $foo, %reg". */
+ opcode = 0xc7;
+ modrm = bfd_get_8 (abfd, contents + roff - 1);
+ modrm = 0xc0 | (modrm & 0x38) >> 3;
+ if ((rex & REX_W) != 0
+ && ABI_64_P (link_info->output_bfd))
+ {
+ /* Keep the REX_W bit in REX byte for LP64. */
+ r_type = R_X86_64_32S;
+ goto rewrite_modrm_rex;
+ }
+ else
+ {
+ /* If the REX_W bit in REX byte isn't needed,
+ use R_X86_64_32 and clear the W bit to avoid
+ sign-extend imm32 to imm64. */
+ r_type = R_X86_64_32;
+ /* Clear the W bit in REX byte. */
+ rex_mask |= REX_W;
+ goto rewrite_modrm_rex;
+ }
+ }
+ }
+ else
+ {
+ /* R_X86_64_PC32 isn't supported. */
+ if (to_reloc_pc32)
+ return TRUE;
+
+ modrm = bfd_get_8 (abfd, contents + roff - 1);
+ if (opcode == 0x85)
+ {
+ /* Convert "test %reg, foo@GOTPCREL(%rip)" to
+ "test $foo, %reg". */
+ modrm = 0xc0 | (modrm & 0x38) >> 3;
+ opcode = 0xf7;
+ }
+ else
+ {
+ /* Convert "binop foo@GOTPCREL(%rip), %reg" to
+ "binop $foo, %reg". */
+ modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
+ opcode = 0x81;
+ }
+
+ /* Use R_X86_64_32 with 32-bit operand to avoid relocation
+ overflow when sign-extending imm32 to imm64. */
+ r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
+
+rewrite_modrm_rex:
+ bfd_put_8 (abfd, modrm, contents + roff - 1);
+
+ if (rex)
+ {
+ /* Move the R bit to the B bit in REX byte. */
+ rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
+ bfd_put_8 (abfd, rex, contents + roff - 3);
+ }
+
+ /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
+ irel->r_addend = 0;
+ }
+
+ bfd_put_8 (abfd, opcode, contents + roff - 2);
+ }
+
+ irel->r_info = htab->r_info (r_symndx, r_type);
+
+ *converted = TRUE;
+
+ return TRUE;
+}
+
+/* Look through the relocs for a section during the first phase, and
+ calculate needed space in the global offset table, procedure
+ linkage table, and dynamic reloc sections. */
+
+static bfd_boolean
+elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
+ asection *sec,
+ const Elf_Internal_Rela *relocs)
+{
+ struct elf_x86_link_hash_table *htab;
+ Elf_Internal_Shdr *symtab_hdr;
+ struct elf_link_hash_entry **sym_hashes;
+ const Elf_Internal_Rela *rel;
+ const Elf_Internal_Rela *rel_end;
+ asection *sreloc;
+ bfd_byte *contents;
+
+ if (bfd_link_relocatable (info))
+ return TRUE;
+
+ /* Don't do anything special with non-loaded, non-alloced sections.
+ In particular, any relocs in such sections should not affect GOT
+ and PLT reference counting (ie. we don't allow them to create GOT
+ or PLT entries), there's no possibility or desire to optimize TLS
+ relocs, and there's not much point in propagating relocs to shared
+ libs that the dynamic linker won't relocate. */
+ if ((sec->flags & SEC_ALLOC) == 0)
+ return TRUE;
+
+ BFD_ASSERT (is_x86_64_elf (abfd));
+
+ htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
+ if (htab == NULL)
+ {
+ sec->check_relocs_failed = 1;
+ return FALSE;
+ }
+
+ /* Get the section contents. */
+ if (elf_section_data (sec)->this_hdr.contents != NULL)
+ contents = elf_section_data (sec)->this_hdr.contents;
+ else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
+ {
+ sec->check_relocs_failed = 1;
+ return FALSE;
+ }
+
+ symtab_hdr = &elf_symtab_hdr (abfd);
+ sym_hashes = elf_sym_hashes (abfd);
+
+ sreloc = NULL;
+
+ rel_end = relocs + sec->reloc_count;
+ for (rel = relocs; rel < rel_end; rel++)
+ {
+ unsigned int r_type;
+ unsigned int r_symndx;
+ struct elf_link_hash_entry *h;
+ struct elf_x86_link_hash_entry *eh;
+ Elf_Internal_Sym *isym;
+ const char *name;
+ bfd_boolean size_reloc;
+
+ r_symndx = htab->r_sym (rel->r_info);
+ r_type = ELF32_R_TYPE (rel->r_info);
+
+ if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
+ {
+ /* xgettext:c-format */
+ _bfd_error_handler (_("%B: bad symbol index: %d"),
+ abfd, r_symndx);
+ goto error_return;
+ }
+
+ if (r_symndx < symtab_hdr->sh_info)
+ {
+ /* A local symbol. */
+ isym = bfd_sym_from_r_symndx (&htab->sym_cache,
+ abfd, r_symndx);
+ if (isym == NULL)
+ goto error_return;
+
+ /* Check relocation against local STT_GNU_IFUNC symbol. */
+ if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
+ {
+ h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel,
+ TRUE);
+ if (h == NULL)
+ goto error_return;
+
+ /* Fake a STT_GNU_IFUNC symbol. */
+ h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr,
+ isym, NULL);
+ h->type = STT_GNU_IFUNC;
+ h->def_regular = 1;