X-Git-Url: http://drtracing.org/?a=blobdiff_plain;f=gold%2Fx86_64.cc;h=e61d7d1bf590f8e30ea8ff07f1427a173a10bfc7;hb=bb593acb76bc52b23ddbad3f9b5199be26879da5;hp=802d4984e72f10931e88db68e9b9e9aed17f6c1e;hpb=b3705d2a514a4235bd08c02f708f02b1c9285019;p=deliverable%2Fbinutils-gdb.git diff --git a/gold/x86_64.cc b/gold/x86_64.cc index 802d4984e7..e61d7d1bf5 100644 --- a/gold/x86_64.cc +++ b/gold/x86_64.cc @@ -1,6 +1,6 @@ // x86_64.cc -- x86_64 target support for gold. -// Copyright 2006, 2007, 2008, 2009 Free Software Foundation, Inc. +// Copyright 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc. // Written by Ian Lance Taylor . // This file is part of gold. @@ -25,6 +25,7 @@ #include #include "elfcpp.h" +#include "dwarf.h" #include "parameters.h" #include "reloc.h" #include "x86_64.h" @@ -39,13 +40,200 @@ #include "tls.h" #include "freebsd.h" #include "gc.h" +#include "icf.h" namespace { using namespace gold; -class Output_data_plt_x86_64; +// A class to handle the PLT data. + +class Output_data_plt_x86_64 : public Output_section_data +{ + public: + typedef Output_data_reloc Reloc_section; + + Output_data_plt_x86_64(Layout* layout, Output_data_got<64, false>* got, + Output_data_space* got_plt, + Output_data_space* got_irelative) + : Output_section_data(16), layout_(layout), tlsdesc_rel_(NULL), + irelative_rel_(NULL), got_(got), got_plt_(got_plt), + got_irelative_(got_irelative), count_(0), irelative_count_(0), + tlsdesc_got_offset_(-1U), free_list_() + { this->init(layout); } + + Output_data_plt_x86_64(Layout* layout, Output_data_got<64, false>* got, + Output_data_space* got_plt, + Output_data_space* got_irelative, + unsigned int plt_count) + : Output_section_data((plt_count + 1) * plt_entry_size, 16, false), + layout_(layout), tlsdesc_rel_(NULL), irelative_rel_(NULL), got_(got), + got_plt_(got_plt), got_irelative_(got_irelative), count_(plt_count), + irelative_count_(0), tlsdesc_got_offset_(-1U), free_list_() + { + this->init(layout); + + // Initialize the free list and reserve the first entry. + this->free_list_.init((plt_count + 1) * plt_entry_size, false); + this->free_list_.remove(0, plt_entry_size); + } + + // Initialize the PLT section. + void + init(Layout* layout); + + // Add an entry to the PLT. + void + add_entry(Symbol_table*, Layout*, Symbol* gsym); + + // Add an entry to the PLT for a local STT_GNU_IFUNC symbol. + unsigned int + add_local_ifunc_entry(Symbol_table* symtab, Layout*, + Sized_relobj_file<64, false>* relobj, + unsigned int local_sym_index); + + // Add the relocation for a PLT entry. + void + add_relocation(Symbol_table*, Layout*, Symbol* gsym, + unsigned int got_offset); + + // Add the reserved TLSDESC_PLT entry to the PLT. + void + reserve_tlsdesc_entry(unsigned int got_offset) + { this->tlsdesc_got_offset_ = got_offset; } + + // Return true if a TLSDESC_PLT entry has been reserved. + bool + has_tlsdesc_entry() const + { return this->tlsdesc_got_offset_ != -1U; } + + // Return the GOT offset for the reserved TLSDESC_PLT entry. + unsigned int + get_tlsdesc_got_offset() const + { return this->tlsdesc_got_offset_; } + + // Return the offset of the reserved TLSDESC_PLT entry. + unsigned int + get_tlsdesc_plt_offset() const + { return (this->count_ + this->irelative_count_ + 1) * plt_entry_size; } + + // Return the .rela.plt section data. + Reloc_section* + rela_plt() + { return this->rel_; } + + // Return where the TLSDESC relocations should go. + Reloc_section* + rela_tlsdesc(Layout*); + + // Return where the IRELATIVE relocations should go in the PLT + // relocations. + Reloc_section* + rela_irelative(Symbol_table*, Layout*); + + // Return whether we created a section for IRELATIVE relocations. + bool + has_irelative_section() const + { return this->irelative_rel_ != NULL; } + + // Return the number of PLT entries. + unsigned int + entry_count() const + { return this->count_ + this->irelative_count_; } + + // Return the offset of the first non-reserved PLT entry. + static unsigned int + first_plt_entry_offset() + { return plt_entry_size; } + + // Return the size of a PLT entry. + static unsigned int + get_plt_entry_size() + { return plt_entry_size; } + + // Reserve a slot in the PLT for an existing symbol in an incremental update. + void + reserve_slot(unsigned int plt_index) + { + this->free_list_.remove((plt_index + 1) * plt_entry_size, + (plt_index + 2) * plt_entry_size); + } + + // Return the PLT address to use for a global symbol. + uint64_t + address_for_global(const Symbol*); + + // Return the PLT address to use for a local symbol. + uint64_t + address_for_local(const Relobj*, unsigned int symndx); + + protected: + void + do_adjust_output_section(Output_section* os); + + // Write to a map file. + void + do_print_to_mapfile(Mapfile* mapfile) const + { mapfile->print_output_data(this, _("** PLT")); } + + private: + // The size of an entry in the PLT. + static const int plt_entry_size = 16; + + // The first entry in the PLT. + // From the AMD64 ABI: "Unlike Intel386 ABI, this ABI uses the same + // procedure linkage table for both programs and shared objects." + static const unsigned char first_plt_entry[plt_entry_size]; + + // Other entries in the PLT for an executable. + static const unsigned char plt_entry[plt_entry_size]; + + // The reserved TLSDESC entry in the PLT for an executable. + static const unsigned char tlsdesc_plt_entry[plt_entry_size]; + + // The .eh_frame unwind information for the PLT. + static const int plt_eh_frame_cie_size = 16; + static const int plt_eh_frame_fde_size = 32; + static const unsigned char plt_eh_frame_cie[plt_eh_frame_cie_size]; + static const unsigned char plt_eh_frame_fde[plt_eh_frame_fde_size]; + + // Set the final size. + void + set_final_data_size(); + + // Write out the PLT data. + void + do_write(Output_file*); + + // A pointer to the Layout class, so that we can find the .dynamic + // section when we write out the GOT PLT section. + Layout* layout_; + // The reloc section. + Reloc_section* rel_; + // The TLSDESC relocs, if necessary. These must follow the regular + // PLT relocs. + Reloc_section* tlsdesc_rel_; + // The IRELATIVE relocs, if necessary. These must follow the + // regular PLT relocations and the TLSDESC relocations. + Reloc_section* irelative_rel_; + // The .got section. + Output_data_got<64, false>* got_; + // The .got.plt section. + Output_data_space* got_plt_; + // The part of the .got.plt section used for IRELATIVE relocs. + Output_data_space* got_irelative_; + // The number of PLT entries. + unsigned int count_; + // Number of PLT entries with R_X86_64_IRELATIVE relocs. These + // follow the regular PLT entries. + unsigned int irelative_count_; + // Offset of the reserved TLSDESC_GOT entry when needed. + unsigned int tlsdesc_got_offset_; + // List of available regions within the section, for incremental + // update links. + Free_list free_list_; +}; // The x86_64 target class. // See the ABI at @@ -54,7 +242,7 @@ class Output_data_plt_x86_64; // http://people.redhat.com/drepper/tls.pdf // http://www.lsd.ic.unicamp.br/~oliva/writeups/TLS/RFC-TLSDESC-x86.txt -class Target_x86_64 : public Target_freebsd<64, false> +class Target_x86_64 : public Sized_target<64, false> { public: // In the x86_64 ABI (p 68), it says "The AMD64 ABI architectures @@ -62,10 +250,11 @@ class Target_x86_64 : public Target_freebsd<64, false> typedef Output_data_reloc Reloc_section; Target_x86_64() - : Target_freebsd<64, false>(&x86_64_info), - got_(NULL), plt_(NULL), got_plt_(NULL), global_offset_table_(NULL), - rela_dyn_(NULL), copy_relocs_(elfcpp::R_X86_64_COPY), dynbss_(NULL), - got_mod_index_offset_(-1U), tlsdesc_reloc_info_(), + : Sized_target<64, false>(&x86_64_info), + got_(NULL), plt_(NULL), got_plt_(NULL), got_irelative_(NULL), + got_tlsdesc_(NULL), global_offset_table_(NULL), rela_dyn_(NULL), + rela_irelative_(NULL), copy_relocs_(elfcpp::R_X86_64_COPY), + dynbss_(NULL), got_mod_index_offset_(-1U), tlsdesc_reloc_info_(), tls_base_symbol_defined_(false) { } @@ -77,7 +266,7 @@ class Target_x86_64 : public Target_freebsd<64, false> void gc_process_relocs(Symbol_table* symtab, Layout* layout, - Sized_relobj<64, false>* object, + Sized_relobj_file<64, false>* object, unsigned int data_shndx, unsigned int sh_type, const unsigned char* prelocs, @@ -91,7 +280,7 @@ class Target_x86_64 : public Target_freebsd<64, false> void scan_relocs(Symbol_table* symtab, Layout* layout, - Sized_relobj<64, false>* object, + Sized_relobj_file<64, false>* object, unsigned int data_shndx, unsigned int sh_type, const unsigned char* prelocs, @@ -127,7 +316,7 @@ class Target_x86_64 : public Target_freebsd<64, false> void scan_relocatable_relocs(Symbol_table* symtab, Layout* layout, - Sized_relobj<64, false>* object, + Sized_relobj_file<64, false>* object, unsigned int data_shndx, unsigned int sh_type, const unsigned char* prelocs, @@ -176,7 +365,30 @@ class Target_x86_64 : public Target_freebsd<64, false> uint64_t do_reloc_addend(void* arg, unsigned int r_type, uint64_t addend) const; - // Adjust -fstack-split code which calls non-stack-split code. + // Return the PLT section. + uint64_t + do_plt_address_for_global(const Symbol* gsym) const + { return this->plt_section()->address_for_global(gsym); } + + uint64_t + do_plt_address_for_local(const Relobj* relobj, unsigned int symndx) const + { return this->plt_section()->address_for_local(relobj, symndx); } + + // This function should be defined in targets that can use relocation + // types to determine (implemented in local_reloc_may_be_function_pointer + // and global_reloc_may_be_function_pointer) + // if a function's pointer is taken. ICF uses this in safe mode to only + // fold those functions whose pointer is defintely not taken. For x86_64 + // pie binaries, safe ICF cannot be done by looking at relocation types. + bool + do_can_check_for_function_pointers() const + { return !parameters->options().pie(); } + + // Return the base for a DW_EH_PE_datarel encoding. + uint64_t + do_ehframe_datarel_base() const; + + // Adjust -fsplit-stack code which calls non-split-stack code. void do_calls_non_split(Relobj* object, unsigned int shndx, section_offset_type fnoffset, section_size_type fnsize, @@ -185,15 +397,77 @@ class Target_x86_64 : public Target_freebsd<64, false> // Return the size of the GOT section. section_size_type - got_size() + got_size() const { gold_assert(this->got_ != NULL); return this->got_->data_size(); } + // Return the number of entries in the GOT. + unsigned int + got_entry_count() const + { + if (this->got_ == NULL) + return 0; + return this->got_size() / 8; + } + + // Return the number of entries in the PLT. + unsigned int + plt_entry_count() const; + + // Return the offset of the first non-reserved PLT entry. + unsigned int + first_plt_entry_offset() const; + + // Return the size of each PLT entry. + unsigned int + plt_entry_size() const; + + // Create the GOT section for an incremental update. + Output_data_got<64, false>* + init_got_plt_for_update(Symbol_table* symtab, + Layout* layout, + unsigned int got_count, + unsigned int plt_count); + + // Reserve a GOT entry for a local symbol, and regenerate any + // necessary dynamic relocations. + void + reserve_local_got_entry(unsigned int got_index, + Sized_relobj<64, false>* obj, + unsigned int r_sym, + unsigned int got_type); + + // Reserve a GOT entry for a global symbol, and regenerate any + // necessary dynamic relocations. + void + reserve_global_got_entry(unsigned int got_index, Symbol* gsym, + unsigned int got_type); + + // Register an existing PLT entry for a global symbol. + void + register_global_plt_entry(Symbol_table*, Layout*, unsigned int plt_index, + Symbol* gsym); + + // Force a COPY relocation for a given symbol. + void + emit_copy_reloc(Symbol_table*, Symbol*, Output_section*, off_t); + + // Apply an incremental relocation. + void + apply_relocation(const Relocate_info<64, false>* relinfo, + elfcpp::Elf_types<64>::Elf_Addr r_offset, + unsigned int r_type, + elfcpp::Elf_types<64>::Elf_Swxword r_addend, + const Symbol* gsym, + unsigned char* view, + elfcpp::Elf_types<64>::Elf_Addr address, + section_size_type view_size); + // Add a new reloc argument, returning the index in the vector. size_t - add_tlsdesc_info(Sized_relobj<64, false>* object, unsigned int r_sym) + add_tlsdesc_info(Sized_relobj_file<64, false>* object, unsigned int r_sym) { this->tlsdesc_reloc_info_.push_back(Tlsdesc_info(object, r_sym)); return this->tlsdesc_reloc_info_.size() - 1; @@ -208,9 +482,12 @@ class Target_x86_64 : public Target_freebsd<64, false> : issued_non_pic_error_(false) { } + static inline int + get_reference_flags(unsigned int r_type); + inline void local(Symbol_table* symtab, Layout* layout, Target_x86_64* target, - Sized_relobj<64, false>* object, + Sized_relobj_file<64, false>* object, unsigned int data_shndx, Output_section* output_section, const elfcpp::Rela<64, false>& reloc, unsigned int r_type, @@ -218,22 +495,49 @@ class Target_x86_64 : public Target_freebsd<64, false> inline void global(Symbol_table* symtab, Layout* layout, Target_x86_64* target, - Sized_relobj<64, false>* object, + Sized_relobj_file<64, false>* object, unsigned int data_shndx, Output_section* output_section, const elfcpp::Rela<64, false>& reloc, unsigned int r_type, Symbol* gsym); + inline bool + local_reloc_may_be_function_pointer(Symbol_table* symtab, Layout* layout, + Target_x86_64* target, + Sized_relobj_file<64, false>* object, + unsigned int data_shndx, + Output_section* output_section, + const elfcpp::Rela<64, false>& reloc, + unsigned int r_type, + const elfcpp::Sym<64, false>& lsym); + + inline bool + global_reloc_may_be_function_pointer(Symbol_table* symtab, Layout* layout, + Target_x86_64* target, + Sized_relobj_file<64, false>* object, + unsigned int data_shndx, + Output_section* output_section, + const elfcpp::Rela<64, false>& reloc, + unsigned int r_type, + Symbol* gsym); + private: static void - unsupported_reloc_local(Sized_relobj<64, false>*, unsigned int r_type); + unsupported_reloc_local(Sized_relobj_file<64, false>*, unsigned int r_type); static void - unsupported_reloc_global(Sized_relobj<64, false>*, unsigned int r_type, + unsupported_reloc_global(Sized_relobj_file<64, false>*, unsigned int r_type, Symbol*); void - check_non_pic(Relobj*, unsigned int r_type); + check_non_pic(Relobj*, unsigned int r_type, Symbol*); + + inline bool + possible_function_pointer_reloc(unsigned int r_type); + + bool + reloc_needs_plt_for_ifunc(Sized_relobj_file<64, false>*, + unsigned int r_type); // Whether we have issued an error about a non-PIC compilation. bool issued_non_pic_error_; @@ -244,7 +548,7 @@ class Target_x86_64 : public Target_freebsd<64, false> { public: Relocate() - : skip_call_tls_get_addr_(false), saw_tls_block_reloc_(false) + : skip_call_tls_get_addr_(false) { } ~Relocate() @@ -335,12 +639,6 @@ class Target_x86_64 : public Target_freebsd<64, false> // This is set if we should skip the next reloc, which should be a // PLT32 reloc against ___tls_get_addr. bool skip_call_tls_get_addr_; - - // This is set if we see a relocation which could load the address - // of the TLS block. Whether we see such a relocation determines - // how we handle the R_X86_64_DTPOFF32 relocation, which is used - // in debugging sections. - bool saw_tls_block_reloc_; }; // A class which returns the size required for a relocation type, @@ -369,6 +667,14 @@ class Target_x86_64 : public Target_freebsd<64, false> return this->got_plt_; } + // Get the GOT section for TLSDESC entries. + Output_data_got<64, false>* + got_tlsdesc_section() const + { + gold_assert(this->got_tlsdesc_ != NULL); + return this->got_tlsdesc_; + } + // Create the PLT section. void make_plt_section(Symbol_table* symtab, Layout* layout); @@ -377,6 +683,12 @@ class Target_x86_64 : public Target_freebsd<64, false> void make_plt_entry(Symbol_table*, Layout*, Symbol*); + // Create a PLT entry for a local STT_GNU_IFUNC symbol. + void + make_local_ifunc_plt_entry(Symbol_table*, Layout*, + Sized_relobj_file<64, false>* relobj, + unsigned int local_sym_index); + // Define the _TLS_MODULE_BASE_ symbol in the TLS segment. void define_tls_base_symbol(Symbol_table*, Layout*); @@ -388,7 +700,7 @@ class Target_x86_64 : public Target_freebsd<64, false> // Create a GOT entry for the TLS module index. unsigned int got_mod_index_entry(Symbol_table* symtab, Layout* layout, - Sized_relobj<64, false>* object); + Sized_relobj_file<64, false>* object); // Get the PLT section. Output_data_plt_x86_64* @@ -406,10 +718,14 @@ class Target_x86_64 : public Target_freebsd<64, false> Reloc_section* rela_tlsdesc_section(Layout*) const; + // Get the section to use for IRELATIVE relocations. + Reloc_section* + rela_irelative_section(Layout*); + // Add a potential copy relocation. void copy_reloc(Symbol_table* symtab, Layout* layout, - Sized_relobj<64, false>* object, + Sized_relobj_file<64, false>* object, unsigned int shndx, Output_section* output_section, Symbol* sym, const elfcpp::Rela<64, false>& reloc) { @@ -423,6 +739,10 @@ class Target_x86_64 : public Target_freebsd<64, false> // general Target structure. static const Target::Target_info x86_64_info; + // The types of GOT entries needed for this platform. + // These values are exposed to the ABI in an incremental link. + // Do not renumber existing values without changing the version + // number of the .gnu_incremental_inputs section. enum Got_type { GOT_TYPE_STANDARD = 0, // GOT entry for a regular symbol @@ -436,12 +756,12 @@ class Target_x86_64 : public Target_freebsd<64, false> // R_X86_64_TLSDESC against a local symbol. struct Tlsdesc_info { - Tlsdesc_info(Sized_relobj<64, false>* a_object, unsigned int a_r_sym) + Tlsdesc_info(Sized_relobj_file<64, false>* a_object, unsigned int a_r_sym) : object(a_object), r_sym(a_r_sym) { } // The object in which the local symbol is defined. - Sized_relobj<64, false>* object; + Sized_relobj_file<64, false>* object; // The local symbol index in the object. unsigned int r_sym; }; @@ -452,10 +772,16 @@ class Target_x86_64 : public Target_freebsd<64, false> Output_data_plt_x86_64* plt_; // The GOT PLT section. Output_data_space* got_plt_; + // The GOT section for IRELATIVE relocations. + Output_data_space* got_irelative_; + // The GOT section for TLSDESC relocations. + Output_data_got<64, false>* got_tlsdesc_; // The _GLOBAL_OFFSET_TABLE_ symbol. Symbol* global_offset_table_; // The dynamic reloc section. Reloc_section* rela_dyn_; + // The section to use for IRELATIVE relocs. + Reloc_section* rela_irelative_; // Relocs saved to avoid a COPY reloc. Copy_relocs copy_relocs_; // Space for variables copied with a COPY reloc. @@ -479,6 +805,7 @@ const Target::Target_info Target_x86_64::x86_64_info = false, // has_resolve true, // has_code_fill true, // is_default_stack_executable + true, // can_icf_inline_merge_sections '\0', // wrap_char "/lib/ld64.so.1", // program interpreter 0x400000, // default_text_segment_address @@ -496,7 +823,7 @@ const Target::Target_info Target_x86_64::x86_64_info = // we handle the SHF_X86_64_LARGE. void -Target_x86_64::do_new_output_section(Output_section *os) const +Target_x86_64::do_new_output_section(Output_section* os) const { if ((os->flags() & elfcpp::SHF_X86_64_LARGE) != 0) os->set_is_large_section(); @@ -511,27 +838,39 @@ Target_x86_64::got_section(Symbol_table* symtab, Layout* layout) { gold_assert(symtab != NULL && layout != NULL); + // When using -z now, we can treat .got.plt as a relro section. + // Without -z now, it is modified after program startup by lazy + // PLT relocations. + bool is_got_plt_relro = parameters->options().now(); + Output_section_order got_order = (is_got_plt_relro + ? ORDER_RELRO + : ORDER_RELRO_LAST); + Output_section_order got_plt_order = (is_got_plt_relro + ? ORDER_RELRO + : ORDER_NON_RELRO_FIRST); + this->got_ = new Output_data_got<64, false>(); - Output_section* os; - os = layout->add_output_section_data(".got", elfcpp::SHT_PROGBITS, - (elfcpp::SHF_ALLOC - | elfcpp::SHF_WRITE), - this->got_, false, true, true, - false); + layout->add_output_section_data(".got", elfcpp::SHT_PROGBITS, + (elfcpp::SHF_ALLOC + | elfcpp::SHF_WRITE), + this->got_, got_order, true); this->got_plt_ = new Output_data_space(8, "** GOT PLT"); - os = layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS, - (elfcpp::SHF_ALLOC - | elfcpp::SHF_WRITE), - this->got_plt_, false, false, - false, true); + layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS, + (elfcpp::SHF_ALLOC + | elfcpp::SHF_WRITE), + this->got_plt_, got_plt_order, + is_got_plt_relro); // The first three entries are reserved. this->got_plt_->set_current_data_size(3 * 8); - // Those bytes can go into the relro segment. - layout->increase_relro(3 * 8); + if (!is_got_plt_relro) + { + // Those bytes can go into the relro segment. + layout->increase_relro(3 * 8); + } // Define _GLOBAL_OFFSET_TABLE_ at the start of the PLT. this->global_offset_table_ = @@ -542,6 +881,24 @@ Target_x86_64::got_section(Symbol_table* symtab, Layout* layout) elfcpp::STB_LOCAL, elfcpp::STV_HIDDEN, 0, false, false); + + // If there are any IRELATIVE relocations, they get GOT entries + // in .got.plt after the jump slot entries. + this->got_irelative_ = new Output_data_space(8, "** GOT IRELATIVE PLT"); + layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS, + (elfcpp::SHF_ALLOC + | elfcpp::SHF_WRITE), + this->got_irelative_, + got_plt_order, is_got_plt_relro); + + // If there are any TLSDESC relocations, they get GOT entries in + // .got.plt after the jump slot and IRELATIVE entries. + this->got_tlsdesc_ = new Output_data_got<64, false>(); + layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS, + (elfcpp::SHF_ALLOC + | elfcpp::SHF_WRITE), + this->got_tlsdesc_, + got_plt_order, is_got_plt_relro); } return this->got_; @@ -557,116 +914,49 @@ Target_x86_64::rela_dyn_section(Layout* layout) gold_assert(layout != NULL); this->rela_dyn_ = new Reloc_section(parameters->options().combreloc()); layout->add_output_section_data(".rela.dyn", elfcpp::SHT_RELA, - elfcpp::SHF_ALLOC, this->rela_dyn_, true, - false, false, false); + elfcpp::SHF_ALLOC, this->rela_dyn_, + ORDER_DYNAMIC_RELOCS, false); } return this->rela_dyn_; } -// A class to handle the PLT data. +// Get the section to use for IRELATIVE relocs, creating it if +// necessary. These go in .rela.dyn, but only after all other dynamic +// relocations. They need to follow the other dynamic relocations so +// that they can refer to global variables initialized by those +// relocs. -class Output_data_plt_x86_64 : public Output_section_data +Target_x86_64::Reloc_section* +Target_x86_64::rela_irelative_section(Layout* layout) { - public: - typedef Output_data_reloc Reloc_section; - - Output_data_plt_x86_64(Layout*, Output_data_got<64, false>*, - Output_data_space*); - - // Add an entry to the PLT. - void - add_entry(Symbol* gsym); - - // Add the reserved TLSDESC_PLT entry to the PLT. - void - reserve_tlsdesc_entry(unsigned int got_offset) - { this->tlsdesc_got_offset_ = got_offset; } - - // Return true if a TLSDESC_PLT entry has been reserved. - bool - has_tlsdesc_entry() const - { return this->tlsdesc_got_offset_ != -1U; } - - // Return the GOT offset for the reserved TLSDESC_PLT entry. - unsigned int - get_tlsdesc_got_offset() const - { return this->tlsdesc_got_offset_; } - - // Return the offset of the reserved TLSDESC_PLT entry. - unsigned int - get_tlsdesc_plt_offset() const - { return (this->count_ + 1) * plt_entry_size; } - - // Return the .rela.plt section data. - const Reloc_section* - rela_plt() const - { return this->rel_; } - - // Return where the TLSDESC relocations should go. - Reloc_section* - rela_tlsdesc(Layout*); - - protected: - void - do_adjust_output_section(Output_section* os); - - // Write to a map file. - void - do_print_to_mapfile(Mapfile* mapfile) const - { mapfile->print_output_data(this, _("** PLT")); } - - private: - // The size of an entry in the PLT. - static const int plt_entry_size = 16; - - // The first entry in the PLT. - // From the AMD64 ABI: "Unlike Intel386 ABI, this ABI uses the same - // procedure linkage table for both programs and shared objects." - static unsigned char first_plt_entry[plt_entry_size]; - - // Other entries in the PLT for an executable. - static unsigned char plt_entry[plt_entry_size]; - - // The reserved TLSDESC entry in the PLT for an executable. - static unsigned char tlsdesc_plt_entry[plt_entry_size]; - - // Set the final size. - void - set_final_data_size(); - - // Write out the PLT data. - void - do_write(Output_file*); - - // The reloc section. - Reloc_section* rel_; - // The TLSDESC relocs, if necessary. These must follow the regular - // PLT relocs. - Reloc_section* tlsdesc_rel_; - // The .got section. - Output_data_got<64, false>* got_; - // The .got.plt section. - Output_data_space* got_plt_; - // The number of PLT entries. - unsigned int count_; - // Offset of the reserved TLSDESC_GOT entry when needed. - unsigned int tlsdesc_got_offset_; -}; + if (this->rela_irelative_ == NULL) + { + // Make sure we have already created the dynamic reloc section. + this->rela_dyn_section(layout); + this->rela_irelative_ = new Reloc_section(false); + layout->add_output_section_data(".rela.dyn", elfcpp::SHT_RELA, + elfcpp::SHF_ALLOC, this->rela_irelative_, + ORDER_DYNAMIC_RELOCS, false); + gold_assert(this->rela_dyn_->output_section() + == this->rela_irelative_->output_section()); + } + return this->rela_irelative_; +} -// Create the PLT section. The ordinary .got section is an argument, -// since we need to refer to the start. We also create our own .got -// section just for PLT entries. +// Initialize the PLT section. -Output_data_plt_x86_64::Output_data_plt_x86_64(Layout* layout, - Output_data_got<64, false>* got, - Output_data_space* got_plt) - : Output_section_data(8), tlsdesc_rel_(NULL), got_(got), got_plt_(got_plt), - count_(0), tlsdesc_got_offset_(-1U) +void +Output_data_plt_x86_64::init(Layout* layout) { this->rel_ = new Reloc_section(false); layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA, - elfcpp::SHF_ALLOC, this->rel_, true, - false, false, false); + elfcpp::SHF_ALLOC, this->rel_, + ORDER_DYNAMIC_PLT_RELOCS, false); + + // Add unwind information if requested. + if (parameters->options().ld_generated_unwind_info()) + layout->add_eh_frame_for_plt(this, plt_eh_frame_cie, plt_eh_frame_cie_size, + plt_eh_frame_fde, plt_eh_frame_fde_size); } void @@ -678,33 +968,128 @@ Output_data_plt_x86_64::do_adjust_output_section(Output_section* os) // Add an entry to the PLT. void -Output_data_plt_x86_64::add_entry(Symbol* gsym) +Output_data_plt_x86_64::add_entry(Symbol_table* symtab, Layout* layout, + Symbol* gsym) { gold_assert(!gsym->has_plt_offset()); - // Note that when setting the PLT offset we skip the initial - // reserved PLT entry. - gsym->set_plt_offset((this->count_ + 1) * plt_entry_size); + unsigned int plt_index; + off_t plt_offset; + section_offset_type got_offset; - ++this->count_; + unsigned int* pcount; + unsigned int offset; + unsigned int reserved; + Output_data_space* got; + if (gsym->type() == elfcpp::STT_GNU_IFUNC + && gsym->can_use_relative_reloc(false)) + { + pcount = &this->irelative_count_; + offset = 0; + reserved = 0; + got = this->got_irelative_; + } + else + { + pcount = &this->count_; + offset = 1; + reserved = 3; + got = this->got_plt_; + } - section_offset_type got_offset = this->got_plt_->current_data_size(); + if (!this->is_data_size_valid()) + { + // Note that when setting the PLT offset for a non-IRELATIVE + // entry we skip the initial reserved PLT entry. + plt_index = *pcount + offset; + plt_offset = plt_index * plt_entry_size; - // Every PLT entry needs a GOT entry which points back to the PLT - // entry (this will be changed by the dynamic linker, normally - // lazily when the function is called). - this->got_plt_->set_current_data_size(got_offset + 8); + ++*pcount; + + got_offset = (plt_index - offset + reserved) * 8; + gold_assert(got_offset == got->current_data_size()); + + // Every PLT entry needs a GOT entry which points back to the PLT + // entry (this will be changed by the dynamic linker, normally + // lazily when the function is called). + got->set_current_data_size(got_offset + 8); + } + else + { + // FIXME: This is probably not correct for IRELATIVE relocs. + + // For incremental updates, find an available slot. + plt_offset = this->free_list_.allocate(plt_entry_size, plt_entry_size, 0); + if (plt_offset == -1) + gold_fallback(_("out of patch space (PLT);" + " relink with --incremental-full")); + + // The GOT and PLT entries have a 1-1 correspondance, so the GOT offset + // can be calculated from the PLT index, adjusting for the three + // reserved entries at the beginning of the GOT. + plt_index = plt_offset / plt_entry_size - 1; + got_offset = (plt_index - offset + reserved) * 8; + } + + gsym->set_plt_offset(plt_offset); // Every PLT entry needs a reloc. - gsym->set_needs_dynsym_entry(); - this->rel_->add_global(gsym, elfcpp::R_X86_64_JUMP_SLOT, this->got_plt_, - got_offset, 0); + this->add_relocation(symtab, layout, gsym, got_offset); // Note that we don't need to save the symbol. The contents of the // PLT are independent of which symbols are used. The symbols only // appear in the relocations. } +// Add an entry to the PLT for a local STT_GNU_IFUNC symbol. Return +// the PLT offset. + +unsigned int +Output_data_plt_x86_64::add_local_ifunc_entry( + Symbol_table* symtab, + Layout* layout, + Sized_relobj_file<64, false>* relobj, + unsigned int local_sym_index) +{ + unsigned int plt_offset = this->irelative_count_ * plt_entry_size; + ++this->irelative_count_; + + section_offset_type got_offset = this->got_irelative_->current_data_size(); + + // Every PLT entry needs a GOT entry which points back to the PLT + // entry. + this->got_irelative_->set_current_data_size(got_offset + 8); + + // Every PLT entry needs a reloc. + Reloc_section* rela = this->rela_irelative(symtab, layout); + rela->add_symbolless_local_addend(relobj, local_sym_index, + elfcpp::R_X86_64_IRELATIVE, + this->got_irelative_, got_offset, 0); + + return plt_offset; +} + +// Add the relocation for a PLT entry. + +void +Output_data_plt_x86_64::add_relocation(Symbol_table* symtab, Layout* layout, + Symbol* gsym, unsigned int got_offset) +{ + if (gsym->type() == elfcpp::STT_GNU_IFUNC + && gsym->can_use_relative_reloc(false)) + { + Reloc_section* rela = this->rela_irelative(symtab, layout); + rela->add_symbolless_global_addend(gsym, elfcpp::R_X86_64_IRELATIVE, + this->got_irelative_, got_offset, 0); + } + else + { + gsym->set_needs_dynsym_entry(); + this->rel_->add_global(gsym, elfcpp::R_X86_64_JUMP_SLOT, this->got_plt_, + got_offset, 0); + } +} + // Return where the TLSDESC relocations should go, creating it if // necessary. These follow the JUMP_SLOT relocations. @@ -716,18 +1101,79 @@ Output_data_plt_x86_64::rela_tlsdesc(Layout* layout) this->tlsdesc_rel_ = new Reloc_section(false); layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA, elfcpp::SHF_ALLOC, this->tlsdesc_rel_, - true, false, false, false); - gold_assert(this->tlsdesc_rel_->output_section() == - this->rel_->output_section()); + ORDER_DYNAMIC_PLT_RELOCS, false); + gold_assert(this->tlsdesc_rel_->output_section() + == this->rel_->output_section()); } return this->tlsdesc_rel_; } +// Return where the IRELATIVE relocations should go in the PLT. These +// follow the JUMP_SLOT and the TLSDESC relocations. + +Output_data_plt_x86_64::Reloc_section* +Output_data_plt_x86_64::rela_irelative(Symbol_table* symtab, Layout* layout) +{ + if (this->irelative_rel_ == NULL) + { + // Make sure we have a place for the TLSDESC relocations, in + // case we see any later on. + this->rela_tlsdesc(layout); + this->irelative_rel_ = new Reloc_section(false); + layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA, + elfcpp::SHF_ALLOC, this->irelative_rel_, + ORDER_DYNAMIC_PLT_RELOCS, false); + gold_assert(this->irelative_rel_->output_section() + == this->rel_->output_section()); + + if (parameters->doing_static_link()) + { + // A statically linked executable will only have a .rela.plt + // section to hold R_X86_64_IRELATIVE relocs for + // STT_GNU_IFUNC symbols. The library will use these + // symbols to locate the IRELATIVE relocs at program startup + // time. + symtab->define_in_output_data("__rela_iplt_start", NULL, + Symbol_table::PREDEFINED, + this->irelative_rel_, 0, 0, + elfcpp::STT_NOTYPE, elfcpp::STB_GLOBAL, + elfcpp::STV_HIDDEN, 0, false, true); + symtab->define_in_output_data("__rela_iplt_end", NULL, + Symbol_table::PREDEFINED, + this->irelative_rel_, 0, 0, + elfcpp::STT_NOTYPE, elfcpp::STB_GLOBAL, + elfcpp::STV_HIDDEN, 0, true, true); + } + } + return this->irelative_rel_; +} + +// Return the PLT address to use for a global symbol. + +uint64_t +Output_data_plt_x86_64::address_for_global(const Symbol* gsym) +{ + uint64_t offset = 0; + if (gsym->type() == elfcpp::STT_GNU_IFUNC + && gsym->can_use_relative_reloc(false)) + offset = (this->count_ + 1) * plt_entry_size; + return this->address() + offset; +} + +// Return the PLT address to use for a local symbol. These are always +// IRELATIVE relocs. + +uint64_t +Output_data_plt_x86_64::address_for_local(const Relobj*, unsigned int) +{ + return this->address() + (this->count_ + 1) * plt_entry_size; +} + // Set the final size. void Output_data_plt_x86_64::set_final_data_size() { - unsigned int count = this->count_; + unsigned int count = this->count_ + this->irelative_count_; if (this->has_tlsdesc_entry()) ++count; this->set_data_size((count + 1) * plt_entry_size); @@ -735,7 +1181,7 @@ Output_data_plt_x86_64::set_final_data_size() // The first entry in the PLT for an executable. -unsigned char Output_data_plt_x86_64::first_plt_entry[plt_entry_size] = +const unsigned char Output_data_plt_x86_64::first_plt_entry[plt_entry_size] = { // From AMD64 ABI Draft 0.98, page 76 0xff, 0x35, // pushq contents of memory address @@ -747,7 +1193,7 @@ unsigned char Output_data_plt_x86_64::first_plt_entry[plt_entry_size] = // Subsequent entries in the PLT for an executable. -unsigned char Output_data_plt_x86_64::plt_entry[plt_entry_size] = +const unsigned char Output_data_plt_x86_64::plt_entry[plt_entry_size] = { // From AMD64 ABI Draft 0.98, page 76 0xff, 0x25, // jmpq indirect @@ -760,7 +1206,7 @@ unsigned char Output_data_plt_x86_64::plt_entry[plt_entry_size] = // The reserved TLSDESC entry in the PLT for an executable. -unsigned char Output_data_plt_x86_64::tlsdesc_plt_entry[plt_entry_size] = +const unsigned char Output_data_plt_x86_64::tlsdesc_plt_entry[plt_entry_size] = { // From Alexandre Oliva, "Thread-Local Storage Descriptors for IA32 // and AMD64/EM64T", Version 0.9.4 (2005-10-10). @@ -772,6 +1218,54 @@ unsigned char Output_data_plt_x86_64::tlsdesc_plt_entry[plt_entry_size] = 0x40, 0 }; +// The .eh_frame unwind information for the PLT. + +const unsigned char +Output_data_plt_x86_64::plt_eh_frame_cie[plt_eh_frame_cie_size] = +{ + 1, // CIE version. + 'z', // Augmentation: augmentation size included. + 'R', // Augmentation: FDE encoding included. + '\0', // End of augmentation string. + 1, // Code alignment factor. + 0x78, // Data alignment factor. + 16, // Return address column. + 1, // Augmentation size. + (elfcpp::DW_EH_PE_pcrel // FDE encoding. + | elfcpp::DW_EH_PE_sdata4), + elfcpp::DW_CFA_def_cfa, 7, 8, // DW_CFA_def_cfa: r7 (rsp) ofs 8. + elfcpp::DW_CFA_offset + 16, 1,// DW_CFA_offset: r16 (rip) at cfa-8. + elfcpp::DW_CFA_nop, // Align to 16 bytes. + elfcpp::DW_CFA_nop +}; + +const unsigned char +Output_data_plt_x86_64::plt_eh_frame_fde[plt_eh_frame_fde_size] = +{ + 0, 0, 0, 0, // Replaced with offset to .plt. + 0, 0, 0, 0, // Replaced with size of .plt. + 0, // Augmentation size. + elfcpp::DW_CFA_def_cfa_offset, 16, // DW_CFA_def_cfa_offset: 16. + elfcpp::DW_CFA_advance_loc + 6, // Advance 6 to __PLT__ + 6. + elfcpp::DW_CFA_def_cfa_offset, 24, // DW_CFA_def_cfa_offset: 24. + elfcpp::DW_CFA_advance_loc + 10, // Advance 10 to __PLT__ + 16. + elfcpp::DW_CFA_def_cfa_expression, // DW_CFA_def_cfa_expression. + 11, // Block length. + elfcpp::DW_OP_breg7, 8, // Push %rsp + 8. + elfcpp::DW_OP_breg16, 0, // Push %rip. + elfcpp::DW_OP_lit15, // Push 0xf. + elfcpp::DW_OP_and, // & (%rip & 0xf). + elfcpp::DW_OP_lit11, // Push 0xb. + elfcpp::DW_OP_ge, // >= ((%rip & 0xf) >= 0xb) + elfcpp::DW_OP_lit3, // Push 3. + elfcpp::DW_OP_shl, // << (((%rip & 0xf) >= 0xb) << 3) + elfcpp::DW_OP_plus, // + ((((%rip&0xf)>=0xb)<<3)+%rsp+8 + elfcpp::DW_CFA_nop, // Align to 32 bytes. + elfcpp::DW_CFA_nop, + elfcpp::DW_CFA_nop, + elfcpp::DW_CFA_nop +}; + // Write out the PLT. This uses the hand-coded instructions above, // and adjusts them as needed. This is specified by the AMD64 ABI. @@ -784,8 +1278,12 @@ Output_data_plt_x86_64::do_write(Output_file* of) unsigned char* const oview = of->get_output_view(offset, oview_size); const off_t got_file_offset = this->got_plt_->offset(); + gold_assert(parameters->incremental_update() + || (got_file_offset + this->got_plt_->data_size() + == this->got_irelative_->offset())); const section_size_type got_size = - convert_to_section_size_type(this->got_plt_->data_size()); + convert_to_section_size_type(this->got_plt_->data_size() + + this->got_irelative_->data_size()); unsigned char* const got_view = of->get_output_view(got_file_offset, got_size); @@ -812,12 +1310,20 @@ Output_data_plt_x86_64::do_write(Output_file* of) unsigned char* got_pov = got_view; - memset(got_pov, 0, 24); - got_pov += 24; + // The first entry in the GOT is the address of the .dynamic section + // aka the PT_DYNAMIC segment. The next two entries are reserved. + // We saved space for them when we created the section in + // Target_x86_64::got_section. + Output_section* dynamic = this->layout_->dynamic_section(); + uint32_t dynamic_addr = dynamic == NULL ? 0 : dynamic->address(); + elfcpp::Swap<64, false>::writeval(got_pov, dynamic_addr); + got_pov += 8; + memset(got_pov, 0, 16); + got_pov += 16; unsigned int plt_offset = plt_entry_size; unsigned int got_offset = 24; - const unsigned int count = this->count_; + const unsigned int count = this->count_ + this->irelative_count_; for (unsigned int plt_index = 0; plt_index < count; ++plt_index, @@ -837,74 +1343,312 @@ Output_data_plt_x86_64::do_write(Output_file* of) elfcpp::Swap<32, false>::writeval(pov + 12, - (plt_offset + plt_entry_size)); - // Set the entry in the GOT. - elfcpp::Swap<64, false>::writeval(got_pov, plt_address + plt_offset + 6); - } + // Set the entry in the GOT. + elfcpp::Swap<64, false>::writeval(got_pov, plt_address + plt_offset + 6); + } + + if (this->has_tlsdesc_entry()) + { + // Set and adjust the reserved TLSDESC PLT entry. + unsigned int tlsdesc_got_offset = this->get_tlsdesc_got_offset(); + memcpy(pov, tlsdesc_plt_entry, plt_entry_size); + elfcpp::Swap_unaligned<32, false>::writeval(pov + 2, + (got_address + 8 + - (plt_address + plt_offset + + 6))); + elfcpp::Swap_unaligned<32, false>::writeval(pov + 8, + (got_base + + tlsdesc_got_offset + - (plt_address + plt_offset + + 12))); + pov += plt_entry_size; + } + + gold_assert(static_cast(pov - oview) == oview_size); + gold_assert(static_cast(got_pov - got_view) == got_size); + + of->write_output_view(offset, oview_size, oview); + of->write_output_view(got_file_offset, got_size, got_view); +} + +// Create the PLT section. + +void +Target_x86_64::make_plt_section(Symbol_table* symtab, Layout* layout) +{ + if (this->plt_ == NULL) + { + // Create the GOT sections first. + this->got_section(symtab, layout); + + this->plt_ = new Output_data_plt_x86_64(layout, this->got_, + this->got_plt_, + this->got_irelative_); + layout->add_output_section_data(".plt", elfcpp::SHT_PROGBITS, + (elfcpp::SHF_ALLOC + | elfcpp::SHF_EXECINSTR), + this->plt_, ORDER_PLT, false); + + // Make the sh_info field of .rela.plt point to .plt. + Output_section* rela_plt_os = this->plt_->rela_plt()->output_section(); + rela_plt_os->set_info_section(this->plt_->output_section()); + } +} + +// Return the section for TLSDESC relocations. + +Target_x86_64::Reloc_section* +Target_x86_64::rela_tlsdesc_section(Layout* layout) const +{ + return this->plt_section()->rela_tlsdesc(layout); +} + +// Create a PLT entry for a global symbol. + +void +Target_x86_64::make_plt_entry(Symbol_table* symtab, Layout* layout, + Symbol* gsym) +{ + if (gsym->has_plt_offset()) + return; + + if (this->plt_ == NULL) + this->make_plt_section(symtab, layout); + + this->plt_->add_entry(symtab, layout, gsym); +} + +// Make a PLT entry for a local STT_GNU_IFUNC symbol. + +void +Target_x86_64::make_local_ifunc_plt_entry(Symbol_table* symtab, Layout* layout, + Sized_relobj_file<64, false>* relobj, + unsigned int local_sym_index) +{ + if (relobj->local_has_plt_offset(local_sym_index)) + return; + if (this->plt_ == NULL) + this->make_plt_section(symtab, layout); + unsigned int plt_offset = this->plt_->add_local_ifunc_entry(symtab, layout, + relobj, + local_sym_index); + relobj->set_local_plt_offset(local_sym_index, plt_offset); +} + +// Return the number of entries in the PLT. + +unsigned int +Target_x86_64::plt_entry_count() const +{ + if (this->plt_ == NULL) + return 0; + return this->plt_->entry_count(); +} + +// Return the offset of the first non-reserved PLT entry. + +unsigned int +Target_x86_64::first_plt_entry_offset() const +{ + return Output_data_plt_x86_64::first_plt_entry_offset(); +} + +// Return the size of each PLT entry. + +unsigned int +Target_x86_64::plt_entry_size() const +{ + return Output_data_plt_x86_64::get_plt_entry_size(); +} + +// Create the GOT and PLT sections for an incremental update. + +Output_data_got<64, false>* +Target_x86_64::init_got_plt_for_update(Symbol_table* symtab, + Layout* layout, + unsigned int got_count, + unsigned int plt_count) +{ + gold_assert(this->got_ == NULL); + + this->got_ = new Output_data_got<64, false>(got_count * 8); + layout->add_output_section_data(".got", elfcpp::SHT_PROGBITS, + (elfcpp::SHF_ALLOC + | elfcpp::SHF_WRITE), + this->got_, ORDER_RELRO_LAST, + true); + + // Add the three reserved entries. + this->got_plt_ = new Output_data_space((plt_count + 3) * 8, 8, "** GOT PLT"); + layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS, + (elfcpp::SHF_ALLOC + | elfcpp::SHF_WRITE), + this->got_plt_, ORDER_NON_RELRO_FIRST, + false); + + // Define _GLOBAL_OFFSET_TABLE_ at the start of the PLT. + this->global_offset_table_ = + symtab->define_in_output_data("_GLOBAL_OFFSET_TABLE_", NULL, + Symbol_table::PREDEFINED, + this->got_plt_, + 0, 0, elfcpp::STT_OBJECT, + elfcpp::STB_LOCAL, + elfcpp::STV_HIDDEN, 0, + false, false); + + // If there are any TLSDESC relocations, they get GOT entries in + // .got.plt after the jump slot entries. + // FIXME: Get the count for TLSDESC entries. + this->got_tlsdesc_ = new Output_data_got<64, false>(0); + layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS, + elfcpp::SHF_ALLOC | elfcpp::SHF_WRITE, + this->got_tlsdesc_, + ORDER_NON_RELRO_FIRST, false); + + // If there are any IRELATIVE relocations, they get GOT entries in + // .got.plt after the jump slot and TLSDESC entries. + this->got_irelative_ = new Output_data_space(0, 8, "** GOT IRELATIVE PLT"); + layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS, + elfcpp::SHF_ALLOC | elfcpp::SHF_WRITE, + this->got_irelative_, + ORDER_NON_RELRO_FIRST, false); + + // Create the PLT section. + this->plt_ = new Output_data_plt_x86_64(layout, this->got_, this->got_plt_, + this->got_irelative_, plt_count); + layout->add_output_section_data(".plt", elfcpp::SHT_PROGBITS, + elfcpp::SHF_ALLOC | elfcpp::SHF_EXECINSTR, + this->plt_, ORDER_PLT, false); - if (this->has_tlsdesc_entry()) - { - // Set and adjust the reserved TLSDESC PLT entry. - unsigned int tlsdesc_got_offset = this->get_tlsdesc_got_offset(); - memcpy(pov, tlsdesc_plt_entry, plt_entry_size); - elfcpp::Swap_unaligned<32, false>::writeval(pov + 2, - (got_address + 8 - - (plt_address + plt_offset - + 6))); - elfcpp::Swap_unaligned<32, false>::writeval(pov + 8, - (got_base - + tlsdesc_got_offset - - (plt_address + plt_offset - + 12))); - pov += plt_entry_size; - } + // Make the sh_info field of .rela.plt point to .plt. + Output_section* rela_plt_os = this->plt_->rela_plt()->output_section(); + rela_plt_os->set_info_section(this->plt_->output_section()); - gold_assert(static_cast(pov - oview) == oview_size); - gold_assert(static_cast(got_pov - got_view) == got_size); + // Create the rela_dyn section. + this->rela_dyn_section(layout); - of->write_output_view(offset, oview_size, oview); - of->write_output_view(got_file_offset, got_size, got_view); + return this->got_; } -// Create the PLT section. +// Reserve a GOT entry for a local symbol, and regenerate any +// necessary dynamic relocations. void -Target_x86_64::make_plt_section(Symbol_table* symtab, Layout* layout) +Target_x86_64::reserve_local_got_entry( + unsigned int got_index, + Sized_relobj<64, false>* obj, + unsigned int r_sym, + unsigned int got_type) { - if (this->plt_ == NULL) - { - // Create the GOT sections first. - this->got_section(symtab, layout); + unsigned int got_offset = got_index * 8; + Reloc_section* rela_dyn = this->rela_dyn_section(NULL); - this->plt_ = new Output_data_plt_x86_64(layout, this->got_, - this->got_plt_); - layout->add_output_section_data(".plt", elfcpp::SHT_PROGBITS, - (elfcpp::SHF_ALLOC - | elfcpp::SHF_EXECINSTR), - this->plt_, false, false, false, false); + this->got_->reserve_local(got_index, obj, r_sym, got_type); + switch (got_type) + { + case GOT_TYPE_STANDARD: + if (parameters->options().output_is_position_independent()) + rela_dyn->add_local_relative(obj, r_sym, elfcpp::R_X86_64_RELATIVE, + this->got_, got_offset, 0, false); + break; + case GOT_TYPE_TLS_OFFSET: + rela_dyn->add_local(obj, r_sym, elfcpp::R_X86_64_TPOFF64, + this->got_, got_offset, 0); + break; + case GOT_TYPE_TLS_PAIR: + this->got_->reserve_slot(got_index + 1); + rela_dyn->add_local(obj, r_sym, elfcpp::R_X86_64_DTPMOD64, + this->got_, got_offset, 0); + break; + case GOT_TYPE_TLS_DESC: + gold_fatal(_("TLS_DESC not yet supported for incremental linking")); + // this->got_->reserve_slot(got_index + 1); + // rela_dyn->add_target_specific(elfcpp::R_X86_64_TLSDESC, arg, + // this->got_, got_offset, 0); + break; + default: + gold_unreachable(); } } -// Return the section for TLSDESC relocations. +// Reserve a GOT entry for a global symbol, and regenerate any +// necessary dynamic relocations. -Target_x86_64::Reloc_section* -Target_x86_64::rela_tlsdesc_section(Layout* layout) const +void +Target_x86_64::reserve_global_got_entry(unsigned int got_index, Symbol* gsym, + unsigned int got_type) { - return this->plt_section()->rela_tlsdesc(layout); + unsigned int got_offset = got_index * 8; + Reloc_section* rela_dyn = this->rela_dyn_section(NULL); + + this->got_->reserve_global(got_index, gsym, got_type); + switch (got_type) + { + case GOT_TYPE_STANDARD: + if (!gsym->final_value_is_known()) + { + if (gsym->is_from_dynobj() + || gsym->is_undefined() + || gsym->is_preemptible() + || gsym->type() == elfcpp::STT_GNU_IFUNC) + rela_dyn->add_global(gsym, elfcpp::R_X86_64_GLOB_DAT, + this->got_, got_offset, 0); + else + rela_dyn->add_global_relative(gsym, elfcpp::R_X86_64_RELATIVE, + this->got_, got_offset, 0); + } + break; + case GOT_TYPE_TLS_OFFSET: + rela_dyn->add_global_relative(gsym, elfcpp::R_X86_64_TPOFF64, + this->got_, got_offset, 0); + break; + case GOT_TYPE_TLS_PAIR: + this->got_->reserve_slot(got_index + 1); + rela_dyn->add_global_relative(gsym, elfcpp::R_X86_64_DTPMOD64, + this->got_, got_offset, 0); + rela_dyn->add_global_relative(gsym, elfcpp::R_X86_64_DTPOFF64, + this->got_, got_offset + 8, 0); + break; + case GOT_TYPE_TLS_DESC: + this->got_->reserve_slot(got_index + 1); + rela_dyn->add_global_relative(gsym, elfcpp::R_X86_64_TLSDESC, + this->got_, got_offset, 0); + break; + default: + gold_unreachable(); + } } -// Create a PLT entry for a global symbol. +// Register an existing PLT entry for a global symbol. void -Target_x86_64::make_plt_entry(Symbol_table* symtab, Layout* layout, - Symbol* gsym) +Target_x86_64::register_global_plt_entry(Symbol_table* symtab, + Layout* layout, + unsigned int plt_index, + Symbol* gsym) { - if (gsym->has_plt_offset()) - return; + gold_assert(this->plt_ != NULL); + gold_assert(!gsym->has_plt_offset()); - if (this->plt_ == NULL) - this->make_plt_section(symtab, layout); + this->plt_->reserve_slot(plt_index); + + gsym->set_plt_offset((plt_index + 1) * this->plt_entry_size()); + + unsigned int got_offset = (plt_index + 3) * 8; + this->plt_->add_relocation(symtab, layout, gsym, got_offset); +} + +// Force a COPY relocation for a given symbol. - this->plt_->add_entry(gsym); +void +Target_x86_64::emit_copy_reloc( + Symbol_table* symtab, Symbol* sym, Output_section* os, off_t offset) +{ + this->copy_relocs_.emit_copy_reloc(symtab, + symtab->get_sized_symbol<64>(sym), + os, + offset, + this->rela_dyn_section(NULL)); } // Define the _TLS_MODULE_BASE_ symbol in the TLS segment. @@ -957,7 +1701,7 @@ Target_x86_64::reserve_tlsdesc_entries(Symbol_table* symtab, unsigned int Target_x86_64::got_mod_index_entry(Symbol_table* symtab, Layout* layout, - Sized_relobj<64, false>* object) + Sized_relobj_file<64, false>* object) { if (this->got_mod_index_offset_ == -1U) { @@ -1028,11 +1772,79 @@ Target_x86_64::optimize_tls_reloc(bool is_final, int r_type) } } +// Get the Reference_flags for a particular relocation. + +int +Target_x86_64::Scan::get_reference_flags(unsigned int r_type) +{ + switch (r_type) + { + case elfcpp::R_X86_64_NONE: + case elfcpp::R_X86_64_GNU_VTINHERIT: + case elfcpp::R_X86_64_GNU_VTENTRY: + case elfcpp::R_X86_64_GOTPC32: + case elfcpp::R_X86_64_GOTPC64: + // No symbol reference. + return 0; + + case elfcpp::R_X86_64_64: + case elfcpp::R_X86_64_32: + case elfcpp::R_X86_64_32S: + case elfcpp::R_X86_64_16: + case elfcpp::R_X86_64_8: + return Symbol::ABSOLUTE_REF; + + case elfcpp::R_X86_64_PC64: + case elfcpp::R_X86_64_PC32: + case elfcpp::R_X86_64_PC16: + case elfcpp::R_X86_64_PC8: + case elfcpp::R_X86_64_GOTOFF64: + return Symbol::RELATIVE_REF; + + case elfcpp::R_X86_64_PLT32: + case elfcpp::R_X86_64_PLTOFF64: + return Symbol::FUNCTION_CALL | Symbol::RELATIVE_REF; + + case elfcpp::R_X86_64_GOT64: + case elfcpp::R_X86_64_GOT32: + case elfcpp::R_X86_64_GOTPCREL64: + case elfcpp::R_X86_64_GOTPCREL: + case elfcpp::R_X86_64_GOTPLT64: + // Absolute in GOT. + return Symbol::ABSOLUTE_REF; + + case elfcpp::R_X86_64_TLSGD: // Global-dynamic + case elfcpp::R_X86_64_GOTPC32_TLSDESC: // Global-dynamic (from ~oliva url) + case elfcpp::R_X86_64_TLSDESC_CALL: + case elfcpp::R_X86_64_TLSLD: // Local-dynamic + case elfcpp::R_X86_64_DTPOFF32: + case elfcpp::R_X86_64_DTPOFF64: + case elfcpp::R_X86_64_GOTTPOFF: // Initial-exec + case elfcpp::R_X86_64_TPOFF32: // Local-exec + return Symbol::TLS_REF; + + case elfcpp::R_X86_64_COPY: + case elfcpp::R_X86_64_GLOB_DAT: + case elfcpp::R_X86_64_JUMP_SLOT: + case elfcpp::R_X86_64_RELATIVE: + case elfcpp::R_X86_64_IRELATIVE: + case elfcpp::R_X86_64_TPOFF64: + case elfcpp::R_X86_64_DTPMOD64: + case elfcpp::R_X86_64_TLSDESC: + case elfcpp::R_X86_64_SIZE32: + case elfcpp::R_X86_64_SIZE64: + default: + // Not expected. We will give an error later. + return 0; + } +} + // Report an unsupported relocation against a local symbol. void -Target_x86_64::Scan::unsupported_reloc_local(Sized_relobj<64, false>* object, - unsigned int r_type) +Target_x86_64::Scan::unsupported_reloc_local( + Sized_relobj_file<64, false>* object, + unsigned int r_type) { gold_error(_("%s: unsupported reloc %u against local symbol"), object->name().c_str(), r_type); @@ -1044,26 +1856,56 @@ Target_x86_64::Scan::unsupported_reloc_local(Sized_relobj<64, false>* object, // Here we know the section is allocated, but we don't know that it is // read-only. But we check for all the relocation types which the // glibc dynamic linker supports, so it seems appropriate to issue an -// error even if the section is not read-only. +// error even if the section is not read-only. If GSYM is not NULL, +// it is the symbol the relocation is against; if it is NULL, the +// relocation is against a local symbol. void -Target_x86_64::Scan::check_non_pic(Relobj* object, unsigned int r_type) +Target_x86_64::Scan::check_non_pic(Relobj* object, unsigned int r_type, + Symbol* gsym) { switch (r_type) { - // These are the relocation types supported by glibc for x86_64. + // These are the relocation types supported by glibc for x86_64 + // which should always work. case elfcpp::R_X86_64_RELATIVE: + case elfcpp::R_X86_64_IRELATIVE: case elfcpp::R_X86_64_GLOB_DAT: case elfcpp::R_X86_64_JUMP_SLOT: case elfcpp::R_X86_64_DTPMOD64: case elfcpp::R_X86_64_DTPOFF64: case elfcpp::R_X86_64_TPOFF64: case elfcpp::R_X86_64_64: - case elfcpp::R_X86_64_32: - case elfcpp::R_X86_64_PC32: case elfcpp::R_X86_64_COPY: return; + // glibc supports these reloc types, but they can overflow. + case elfcpp::R_X86_64_PC32: + // A PC relative reference is OK against a local symbol or if + // the symbol is defined locally. + if (gsym == NULL + || (!gsym->is_from_dynobj() + && !gsym->is_undefined() + && !gsym->is_preemptible())) + return; + /* Fall through. */ + case elfcpp::R_X86_64_32: + if (this->issued_non_pic_error_) + return; + gold_assert(parameters->options().output_is_position_independent()); + if (gsym == NULL) + object->error(_("requires dynamic R_X86_64_32 reloc which may " + "overflow at runtime; recompile with -fPIC")); + else + object->error(_("requires dynamic %s reloc against '%s' which may " + "overflow at runtime; recompile with -fPIC"), + (r_type == elfcpp::R_X86_64_32 + ? "R_X86_64_32" + : "R_X86_64_PC32"), + gsym->name()); + this->issued_non_pic_error_ = true; + return; + default: // This prevents us from issuing more than one error per reloc // section. But we can still wind up issuing more than one @@ -1071,8 +1913,9 @@ Target_x86_64::Scan::check_non_pic(Relobj* object, unsigned int r_type) if (this->issued_non_pic_error_) return; gold_assert(parameters->options().output_is_position_independent()); - object->error(_("requires unsupported dynamic reloc; " - "recompile with -fPIC")); + object->error(_("requires unsupported dynamic reloc %u; " + "recompile with -fPIC"), + r_type); this->issued_non_pic_error_ = true; return; @@ -1081,24 +1924,47 @@ Target_x86_64::Scan::check_non_pic(Relobj* object, unsigned int r_type) } } +// Return whether we need to make a PLT entry for a relocation of the +// given type against a STT_GNU_IFUNC symbol. + +bool +Target_x86_64::Scan::reloc_needs_plt_for_ifunc( + Sized_relobj_file<64, false>* object, + unsigned int r_type) +{ + int flags = Scan::get_reference_flags(r_type); + if (flags & Symbol::TLS_REF) + gold_error(_("%s: unsupported TLS reloc %u for IFUNC symbol"), + object->name().c_str(), r_type); + return flags != 0; +} + // Scan a relocation for a local symbol. inline void Target_x86_64::Scan::local(Symbol_table* symtab, Layout* layout, Target_x86_64* target, - Sized_relobj<64, false>* object, + Sized_relobj_file<64, false>* object, unsigned int data_shndx, Output_section* output_section, const elfcpp::Rela<64, false>& reloc, unsigned int r_type, const elfcpp::Sym<64, false>& lsym) { + // A local STT_GNU_IFUNC symbol may require a PLT entry. + bool is_ifunc = lsym.get_st_type() == elfcpp::STT_GNU_IFUNC; + if (is_ifunc && this->reloc_needs_plt_for_ifunc(object, r_type)) + { + unsigned int r_sym = elfcpp::elf_r_sym<64>(reloc.get_r_info()); + target->make_local_ifunc_plt_entry(symtab, layout, object, r_sym); + } + switch (r_type) { case elfcpp::R_X86_64_NONE: - case elfcpp::R_386_GNU_VTINHERIT: - case elfcpp::R_386_GNU_VTENTRY: + case elfcpp::R_X86_64_GNU_VTINHERIT: + case elfcpp::R_X86_64_GNU_VTENTRY: break; case elfcpp::R_X86_64_64: @@ -1112,11 +1978,11 @@ Target_x86_64::Scan::local(Symbol_table* symtab, { unsigned int r_sym = elfcpp::elf_r_sym<64>(reloc.get_r_info()); Reloc_section* rela_dyn = target->rela_dyn_section(layout); - rela_dyn->add_local_relative(object, r_sym, - elfcpp::R_X86_64_RELATIVE, - output_section, data_shndx, - reloc.get_r_offset(), - reloc.get_r_addend()); + rela_dyn->add_local_relative(object, r_sym, + elfcpp::R_X86_64_RELATIVE, + output_section, data_shndx, + reloc.get_r_offset(), + reloc.get_r_addend(), is_ifunc); } break; @@ -1130,7 +1996,7 @@ Target_x86_64::Scan::local(Symbol_table* symtab, // because that is always a 64-bit relocation. if (parameters->options().output_is_position_independent()) { - this->check_non_pic(object, r_type); + this->check_non_pic(object, r_type, NULL); Reloc_section* rela_dyn = target->rela_dyn_section(layout); unsigned int r_sym = elfcpp::elf_r_sym<64>(reloc.get_r_info()); @@ -1187,7 +2053,16 @@ Target_x86_64::Scan::local(Symbol_table* symtab, // The symbol requires a GOT entry. Output_data_got<64, false>* got = target->got_section(symtab, layout); unsigned int r_sym = elfcpp::elf_r_sym<64>(reloc.get_r_info()); - if (got->add_local(object, r_sym, GOT_TYPE_STANDARD)) + + // For a STT_GNU_IFUNC symbol we want the PLT offset. That + // lets function pointers compare correctly with shared + // libraries. Otherwise we would need an IRELATIVE reloc. + bool is_new; + if (is_ifunc) + is_new = got->add_local_plt(object, r_sym, GOT_TYPE_STANDARD); + else + is_new = got->add_local(object, r_sym, GOT_TYPE_STANDARD); + if (is_new) { // If we are generating a shared object, we need to add a // dynamic relocation for this symbol's GOT entry. @@ -1196,12 +2071,16 @@ Target_x86_64::Scan::local(Symbol_table* symtab, Reloc_section* rela_dyn = target->rela_dyn_section(layout); // R_X86_64_RELATIVE assumes a 64-bit relocation. if (r_type != elfcpp::R_X86_64_GOT32) - rela_dyn->add_local_relative( - object, r_sym, elfcpp::R_X86_64_RELATIVE, got, - object->local_got_offset(r_sym, GOT_TYPE_STANDARD), 0); + { + unsigned int got_offset = + object->local_got_offset(r_sym, GOT_TYPE_STANDARD); + rela_dyn->add_local_relative(object, r_sym, + elfcpp::R_X86_64_RELATIVE, + got, got_offset, 0, is_ifunc); + } else { - this->check_non_pic(object, r_type); + this->check_non_pic(object, r_type, NULL); gold_assert(lsym.get_st_type() != elfcpp::STT_SECTION); rela_dyn->add_local( @@ -1219,6 +2098,7 @@ Target_x86_64::Scan::local(Symbol_table* symtab, case elfcpp::R_X86_64_GLOB_DAT: case elfcpp::R_X86_64_JUMP_SLOT: case elfcpp::R_X86_64_RELATIVE: + case elfcpp::R_X86_64_IRELATIVE: // These are outstanding tls relocs, which are unexpected when linking case elfcpp::R_X86_64_TPOFF64: case elfcpp::R_X86_64_DTPMOD64: @@ -1274,9 +2154,13 @@ Target_x86_64::Scan::local(Symbol_table* symtab, // Create reserved PLT and GOT entries for the resolver. target->reserve_tlsdesc_entries(symtab, layout); - // Generate a double GOT entry with an R_X86_64_TLSDESC reloc. - Output_data_got<64, false>* got - = target->got_section(symtab, layout); + // Generate a double GOT entry with an + // R_X86_64_TLSDESC reloc. The R_X86_64_TLSDESC reloc + // is resolved lazily, so the GOT entry needs to be in + // an area in .got.plt, not .got. Call got_section to + // make sure the section has been created. + target->got_section(symtab, layout); + Output_data_got<64, false>* got = target->got_tlsdesc_section(); unsigned int r_sym = elfcpp::elf_r_sym<64>(reloc.get_r_info()); if (!object->local_has_got_offset(r_sym, GOT_TYPE_TLS_DESC)) { @@ -1356,32 +2240,109 @@ Target_x86_64::Scan::local(Symbol_table* symtab, // Report an unsupported relocation against a global symbol. void -Target_x86_64::Scan::unsupported_reloc_global(Sized_relobj<64, false>* object, - unsigned int r_type, - Symbol* gsym) +Target_x86_64::Scan::unsupported_reloc_global( + Sized_relobj_file<64, false>* object, + unsigned int r_type, + Symbol* gsym) { gold_error(_("%s: unsupported reloc %u against global symbol %s"), object->name().c_str(), r_type, gsym->demangled_name().c_str()); } +// Returns true if this relocation type could be that of a function pointer. +inline bool +Target_x86_64::Scan::possible_function_pointer_reloc(unsigned int r_type) +{ + switch (r_type) + { + case elfcpp::R_X86_64_64: + case elfcpp::R_X86_64_32: + case elfcpp::R_X86_64_32S: + case elfcpp::R_X86_64_16: + case elfcpp::R_X86_64_8: + case elfcpp::R_X86_64_GOT64: + case elfcpp::R_X86_64_GOT32: + case elfcpp::R_X86_64_GOTPCREL64: + case elfcpp::R_X86_64_GOTPCREL: + case elfcpp::R_X86_64_GOTPLT64: + { + return true; + } + } + return false; +} + +// For safe ICF, scan a relocation for a local symbol to check if it +// corresponds to a function pointer being taken. In that case mark +// the function whose pointer was taken as not foldable. + +inline bool +Target_x86_64::Scan::local_reloc_may_be_function_pointer( + Symbol_table* , + Layout* , + Target_x86_64* , + Sized_relobj_file<64, false>* , + unsigned int , + Output_section* , + const elfcpp::Rela<64, false>& , + unsigned int r_type, + const elfcpp::Sym<64, false>&) +{ + // When building a shared library, do not fold any local symbols as it is + // not possible to distinguish pointer taken versus a call by looking at + // the relocation types. + return (parameters->options().shared() + || possible_function_pointer_reloc(r_type)); +} + +// For safe ICF, scan a relocation for a global symbol to check if it +// corresponds to a function pointer being taken. In that case mark +// the function whose pointer was taken as not foldable. + +inline bool +Target_x86_64::Scan::global_reloc_may_be_function_pointer( + Symbol_table*, + Layout* , + Target_x86_64* , + Sized_relobj_file<64, false>* , + unsigned int , + Output_section* , + const elfcpp::Rela<64, false>& , + unsigned int r_type, + Symbol* gsym) +{ + // When building a shared library, do not fold symbols whose visibility + // is hidden, internal or protected. + return ((parameters->options().shared() + && (gsym->visibility() == elfcpp::STV_INTERNAL + || gsym->visibility() == elfcpp::STV_PROTECTED + || gsym->visibility() == elfcpp::STV_HIDDEN)) + || possible_function_pointer_reloc(r_type)); +} + // Scan a relocation for a global symbol. inline void Target_x86_64::Scan::global(Symbol_table* symtab, Layout* layout, Target_x86_64* target, - Sized_relobj<64, false>* object, + Sized_relobj_file<64, false>* object, unsigned int data_shndx, Output_section* output_section, const elfcpp::Rela<64, false>& reloc, unsigned int r_type, Symbol* gsym) { + // A STT_GNU_IFUNC symbol may require a PLT entry. + if (gsym->type() == elfcpp::STT_GNU_IFUNC + && this->reloc_needs_plt_for_ifunc(object, r_type)) + target->make_plt_entry(symtab, layout, gsym); + switch (r_type) { case elfcpp::R_X86_64_NONE: - case elfcpp::R_386_GNU_VTINHERIT: - case elfcpp::R_386_GNU_VTENTRY: + case elfcpp::R_X86_64_GNU_VTINHERIT: + case elfcpp::R_X86_64_GNU_VTENTRY: break; case elfcpp::R_X86_64_64: @@ -1402,25 +2363,46 @@ Target_x86_64::Scan::global(Symbol_table* symtab, gsym->set_needs_dynsym_value(); } // Make a dynamic relocation if necessary. - if (gsym->needs_dynamic_reloc(Symbol::ABSOLUTE_REF)) + if (gsym->needs_dynamic_reloc(Scan::get_reference_flags(r_type))) { if (gsym->may_need_copy_reloc()) { target->copy_reloc(symtab, layout, object, data_shndx, output_section, gsym, reloc); } + else if (r_type == elfcpp::R_X86_64_64 + && gsym->type() == elfcpp::STT_GNU_IFUNC + && gsym->can_use_relative_reloc(false) + && !gsym->is_from_dynobj() + && !gsym->is_undefined() + && !gsym->is_preemptible()) + { + // Use an IRELATIVE reloc for a locally defined + // STT_GNU_IFUNC symbol. This makes a function + // address in a PIE executable match the address in a + // shared library that it links against. + Reloc_section* rela_dyn = + target->rela_irelative_section(layout); + unsigned int r_type = elfcpp::R_X86_64_IRELATIVE; + rela_dyn->add_symbolless_global_addend(gsym, r_type, + output_section, object, + data_shndx, + reloc.get_r_offset(), + reloc.get_r_addend()); + } else if (r_type == elfcpp::R_X86_64_64 && gsym->can_use_relative_reloc(false)) { Reloc_section* rela_dyn = target->rela_dyn_section(layout); - rela_dyn->add_global_relative(gsym, elfcpp::R_X86_64_RELATIVE, - output_section, object, - data_shndx, reloc.get_r_offset(), - reloc.get_r_addend()); + rela_dyn->add_global_relative(gsym, elfcpp::R_X86_64_RELATIVE, + output_section, object, + data_shndx, + reloc.get_r_offset(), + reloc.get_r_addend()); } else { - this->check_non_pic(object, r_type); + this->check_non_pic(object, r_type, gsym); Reloc_section* rela_dyn = target->rela_dyn_section(layout); rela_dyn->add_global(gsym, r_type, output_section, object, data_shndx, reloc.get_r_offset(), @@ -1439,10 +2421,7 @@ Target_x86_64::Scan::global(Symbol_table* symtab, if (gsym->needs_plt_entry()) target->make_plt_entry(symtab, layout, gsym); // Make a dynamic relocation if necessary. - int flags = Symbol::NON_PIC_REF; - if (gsym->is_func()) - flags |= Symbol::FUNCTION_CALL; - if (gsym->needs_dynamic_reloc(flags)) + if (gsym->needs_dynamic_reloc(Scan::get_reference_flags(r_type))) { if (gsym->may_need_copy_reloc()) { @@ -1451,7 +2430,7 @@ Target_x86_64::Scan::global(Symbol_table* symtab, } else { - this->check_non_pic(object, r_type); + this->check_non_pic(object, r_type, gsym); Reloc_section* rela_dyn = target->rela_dyn_section(layout); rela_dyn->add_global(gsym, r_type, output_section, object, data_shndx, reloc.get_r_offset(), @@ -1470,23 +2449,64 @@ Target_x86_64::Scan::global(Symbol_table* symtab, // The symbol requires a GOT entry. Output_data_got<64, false>* got = target->got_section(symtab, layout); if (gsym->final_value_is_known()) - got->add_global(gsym, GOT_TYPE_STANDARD); + { + // For a STT_GNU_IFUNC symbol we want the PLT address. + if (gsym->type() == elfcpp::STT_GNU_IFUNC) + got->add_global_plt(gsym, GOT_TYPE_STANDARD); + else + got->add_global(gsym, GOT_TYPE_STANDARD); + } else { // If this symbol is not fully resolved, we need to add a // dynamic relocation for it. Reloc_section* rela_dyn = target->rela_dyn_section(layout); - if (gsym->is_from_dynobj() - || gsym->is_undefined() - || gsym->is_preemptible()) + + // Use a GLOB_DAT rather than a RELATIVE reloc if: + // + // 1) The symbol may be defined in some other module. + // + // 2) We are building a shared library and this is a + // protected symbol; using GLOB_DAT means that the dynamic + // linker can use the address of the PLT in the main + // executable when appropriate so that function address + // comparisons work. + // + // 3) This is a STT_GNU_IFUNC symbol in position dependent + // code, again so that function address comparisons work. + if (gsym->is_from_dynobj() + || gsym->is_undefined() + || gsym->is_preemptible() + || (gsym->visibility() == elfcpp::STV_PROTECTED + && parameters->options().shared()) + || (gsym->type() == elfcpp::STT_GNU_IFUNC + && parameters->options().output_is_position_independent())) got->add_global_with_rela(gsym, GOT_TYPE_STANDARD, rela_dyn, elfcpp::R_X86_64_GLOB_DAT); else { - if (got->add_global(gsym, GOT_TYPE_STANDARD)) - rela_dyn->add_global_relative( - gsym, elfcpp::R_X86_64_RELATIVE, got, - gsym->got_offset(GOT_TYPE_STANDARD), 0); + // For a STT_GNU_IFUNC symbol we want to write the PLT + // offset into the GOT, so that function pointer + // comparisons work correctly. + bool is_new; + if (gsym->type() != elfcpp::STT_GNU_IFUNC) + is_new = got->add_global(gsym, GOT_TYPE_STANDARD); + else + { + is_new = got->add_global_plt(gsym, GOT_TYPE_STANDARD); + // Tell the dynamic linker to use the PLT address + // when resolving relocations. + if (gsym->is_from_dynobj() + && !parameters->options().shared()) + gsym->set_needs_dynsym_value(); + } + if (is_new) + { + unsigned int got_off = gsym->got_offset(GOT_TYPE_STANDARD); + rela_dyn->add_global_relative(gsym, + elfcpp::R_X86_64_RELATIVE, + got, got_off, 0); + } } } // For GOTPLT64, we also need a PLT entry (but only if the @@ -1529,6 +2549,7 @@ Target_x86_64::Scan::global(Symbol_table* symtab, case elfcpp::R_X86_64_GLOB_DAT: case elfcpp::R_X86_64_JUMP_SLOT: case elfcpp::R_X86_64_RELATIVE: + case elfcpp::R_X86_64_IRELATIVE: // These are outstanding tls relocs, which are unexpected when linking case elfcpp::R_X86_64_TPOFF64: case elfcpp::R_X86_64_DTPMOD64: @@ -1584,10 +2605,14 @@ Target_x86_64::Scan::global(Symbol_table* symtab, // Create reserved PLT and GOT entries for the resolver. target->reserve_tlsdesc_entries(symtab, layout); - // Create a double GOT entry with an R_X86_64_TLSDESC reloc. - Output_data_got<64, false>* got - = target->got_section(symtab, layout); - Reloc_section *rt = target->rela_tlsdesc_section(layout); + // Create a double GOT entry with an R_X86_64_TLSDESC + // reloc. The R_X86_64_TLSDESC reloc is resolved + // lazily, so the GOT entry needs to be in an area in + // .got.plt, not .got. Call got_section to make sure + // the section has been created. + target->got_section(symtab, layout); + Output_data_got<64, false>* got = target->got_tlsdesc_section(); + Reloc_section* rt = target->rela_tlsdesc_section(layout); got->add_global_pair_with_rela(gsym, GOT_TYPE_TLS_DESC, rt, elfcpp::R_X86_64_TLSDESC, 0); } @@ -1661,7 +2686,7 @@ Target_x86_64::Scan::global(Symbol_table* symtab, void Target_x86_64::gc_process_relocs(Symbol_table* symtab, Layout* layout, - Sized_relobj<64, false>* object, + Sized_relobj_file<64, false>* object, unsigned int data_shndx, unsigned int sh_type, const unsigned char* prelocs, @@ -1678,7 +2703,8 @@ Target_x86_64::gc_process_relocs(Symbol_table* symtab, } gold::gc_process_relocs<64, false, Target_x86_64, elfcpp::SHT_RELA, - Target_x86_64::Scan>( + Target_x86_64::Scan, + Target_x86_64::Relocatable_size_for_reloc>( symtab, layout, this, @@ -1697,7 +2723,7 @@ Target_x86_64::gc_process_relocs(Symbol_table* symtab, void Target_x86_64::scan_relocs(Symbol_table* symtab, Layout* layout, - Sized_relobj<64, false>* object, + Sized_relobj_file<64, false>* object, unsigned int data_shndx, unsigned int sh_type, const unsigned char* prelocs, @@ -1774,6 +2800,47 @@ Target_x86_64::do_finalize_sections( uint64_t data_size = this->got_plt_->current_data_size(); symtab->get_sized_symbol<64>(sym)->set_symsize(data_size); } + + if (parameters->doing_static_link() + && (this->plt_ == NULL || !this->plt_->has_irelative_section())) + { + // If linking statically, make sure that the __rela_iplt symbols + // were defined if necessary, even if we didn't create a PLT. + static const Define_symbol_in_segment syms[] = + { + { + "__rela_iplt_start", // name + elfcpp::PT_LOAD, // segment_type + elfcpp::PF_W, // segment_flags_set + elfcpp::PF(0), // segment_flags_clear + 0, // value + 0, // size + elfcpp::STT_NOTYPE, // type + elfcpp::STB_GLOBAL, // binding + elfcpp::STV_HIDDEN, // visibility + 0, // nonvis + Symbol::SEGMENT_START, // offset_from_base + true // only_if_ref + }, + { + "__rela_iplt_end", // name + elfcpp::PT_LOAD, // segment_type + elfcpp::PF_W, // segment_flags_set + elfcpp::PF(0), // segment_flags_clear + 0, // value + 0, // size + elfcpp::STT_NOTYPE, // type + elfcpp::STB_GLOBAL, // binding + elfcpp::STV_HIDDEN, // visibility + 0, // nonvis + Symbol::SEGMENT_START, // offset_from_base + true // only_if_ref + } + }; + + symtab->define_symbols(layout, 2, syms, + layout->script_options()->saw_sections_clause()); + } } // Perform a relocation. @@ -1808,20 +2875,28 @@ Target_x86_64::Relocate::relocate(const Relocate_info<64, false>* relinfo, } } - // Pick the value to use for symbols defined in shared objects. + const Sized_relobj_file<64, false>* object = relinfo->object; + + // Pick the value to use for symbols defined in the PLT. Symbol_value<64> symval; if (gsym != NULL - && gsym->use_plt_offset(r_type == elfcpp::R_X86_64_PC64 - || r_type == elfcpp::R_X86_64_PC32 - || r_type == elfcpp::R_X86_64_PC16 - || r_type == elfcpp::R_X86_64_PC8)) + && gsym->use_plt_offset(Scan::get_reference_flags(r_type))) { - symval.set_output_value(target->plt_section()->address() + symval.set_output_value(target->plt_address_for_global(gsym) + gsym->plt_offset()); psymval = &symval; } + else if (gsym == NULL && psymval->is_ifunc_symbol()) + { + unsigned int r_sym = elfcpp::elf_r_sym<64>(rela.get_r_info()); + if (object->local_has_plt_offset(r_sym)) + { + symval.set_output_value(target->plt_address_for_local(object, r_sym) + + object->local_plt_offset(r_sym)); + psymval = &symval; + } + } - const Sized_relobj<64, false>* object = relinfo->object; const elfcpp::Elf_Xword addend = rela.get_r_addend(); // Get the GOT offset if needed. @@ -1859,8 +2934,8 @@ Target_x86_64::Relocate::relocate(const Relocate_info<64, false>* relinfo, switch (r_type) { case elfcpp::R_X86_64_NONE: - case elfcpp::R_386_GNU_VTINHERIT: - case elfcpp::R_386_GNU_VTENTRY: + case elfcpp::R_X86_64_GNU_VTINHERIT: + case elfcpp::R_X86_64_GNU_VTENTRY: break; case elfcpp::R_X86_64_64: @@ -1997,6 +3072,7 @@ Target_x86_64::Relocate::relocate(const Relocate_info<64, false>* relinfo, case elfcpp::R_X86_64_GLOB_DAT: case elfcpp::R_X86_64_JUMP_SLOT: case elfcpp::R_X86_64_RELATIVE: + case elfcpp::R_X86_64_IRELATIVE: // These are outstanding tls relocs, which are unexpected when linking case elfcpp::R_X86_64_TPOFF64: case elfcpp::R_X86_64_DTPMOD64: @@ -2047,23 +3123,39 @@ Target_x86_64::Relocate::relocate_tls(const Relocate_info<64, false>* relinfo, { Output_segment* tls_segment = relinfo->layout->tls_segment(); - const Sized_relobj<64, false>* object = relinfo->object; + const Sized_relobj_file<64, false>* object = relinfo->object; const elfcpp::Elf_Xword addend = rela.get_r_addend(); + elfcpp::Shdr<64, false> data_shdr(relinfo->data_shdr); + bool is_executable = (data_shdr.get_sh_flags() & elfcpp::SHF_EXECINSTR) != 0; elfcpp::Elf_types<64>::Elf_Addr value = psymval->value(relinfo->object, 0); const bool is_final = (gsym == NULL ? !parameters->options().shared() : gsym->final_value_is_known()); - const tls::Tls_optimization optimized_type + tls::Tls_optimization optimized_type = Target_x86_64::optimize_tls_reloc(is_final, r_type); switch (r_type) { case elfcpp::R_X86_64_TLSGD: // Global-dynamic - this->saw_tls_block_reloc_ = true; + if (!is_executable && optimized_type == tls::TLSOPT_TO_LE) + { + // If this code sequence is used in a non-executable section, + // we will not optimize the R_X86_64_DTPOFF32/64 relocation, + // on the assumption that it's being used by itself in a debug + // section. Therefore, in the unlikely event that the code + // sequence appears in a non-executable section, we simply + // leave it unoptimized. + optimized_type = tls::TLSOPT_NONE; + } if (optimized_type == tls::TLSOPT_TO_LE) { - gold_assert(tls_segment != NULL); + if (tls_segment == NULL) + { + gold_assert(parameters->errors()->error_count() > 0 + || issue_undefined_symbol_error(gsym)); + return; + } this->tls_gd_to_le(relinfo, relnum, tls_segment, rela, r_type, value, view, view_size); @@ -2089,7 +3181,12 @@ Target_x86_64::Relocate::relocate_tls(const Relocate_info<64, false>* relinfo, } if (optimized_type == tls::TLSOPT_TO_IE) { - gold_assert(tls_segment != NULL); + if (tls_segment == NULL) + { + gold_assert(parameters->errors()->error_count() > 0 + || issue_undefined_symbol_error(gsym)); + return; + } value = target->got_plt_section()->address() + got_offset; this->tls_gd_to_ie(relinfo, relnum, tls_segment, rela, r_type, value, view, address, view_size); @@ -2111,10 +3208,19 @@ Target_x86_64::Relocate::relocate_tls(const Relocate_info<64, false>* relinfo, case elfcpp::R_X86_64_GOTPC32_TLSDESC: // Global-dynamic (from ~oliva url) case elfcpp::R_X86_64_TLSDESC_CALL: - this->saw_tls_block_reloc_ = true; + if (!is_executable && optimized_type == tls::TLSOPT_TO_LE) + { + // See above comment for R_X86_64_TLSGD. + optimized_type = tls::TLSOPT_NONE; + } if (optimized_type == tls::TLSOPT_TO_LE) { - gold_assert(tls_segment != NULL); + if (tls_segment == NULL) + { + gold_assert(parameters->errors()->error_count() > 0 + || issue_undefined_symbol_error(gsym)); + return; + } this->tls_desc_gd_to_le(relinfo, relnum, tls_segment, rela, r_type, value, view, view_size); @@ -2125,22 +3231,36 @@ Target_x86_64::Relocate::relocate_tls(const Relocate_info<64, false>* relinfo, unsigned int got_type = (optimized_type == tls::TLSOPT_TO_IE ? GOT_TYPE_TLS_OFFSET : GOT_TYPE_TLS_DESC); - unsigned int got_offset; + unsigned int got_offset = 0; + if (r_type == elfcpp::R_X86_64_GOTPC32_TLSDESC + && optimized_type == tls::TLSOPT_NONE) + { + // We created GOT entries in the .got.tlsdesc portion of + // the .got.plt section, but the offset stored in the + // symbol is the offset within .got.tlsdesc. + got_offset = (target->got_size() + + target->got_plt_section()->data_size()); + } if (gsym != NULL) { gold_assert(gsym->has_got_offset(got_type)); - got_offset = gsym->got_offset(got_type) - target->got_size(); + got_offset += gsym->got_offset(got_type) - target->got_size(); } else { unsigned int r_sym = elfcpp::elf_r_sym<64>(rela.get_r_info()); gold_assert(object->local_has_got_offset(r_sym, got_type)); - got_offset = (object->local_got_offset(r_sym, got_type) - - target->got_size()); + got_offset += (object->local_got_offset(r_sym, got_type) + - target->got_size()); } if (optimized_type == tls::TLSOPT_TO_IE) { - gold_assert(tls_segment != NULL); + if (tls_segment == NULL) + { + gold_assert(parameters->errors()->error_count() > 0 + || issue_undefined_symbol_error(gsym)); + return; + } value = target->got_plt_section()->address() + got_offset; this->tls_desc_gd_to_ie(relinfo, relnum, tls_segment, rela, r_type, value, view, address, @@ -2165,10 +3285,19 @@ Target_x86_64::Relocate::relocate_tls(const Relocate_info<64, false>* relinfo, break; case elfcpp::R_X86_64_TLSLD: // Local-dynamic - this->saw_tls_block_reloc_ = true; + if (!is_executable && optimized_type == tls::TLSOPT_TO_LE) + { + // See above comment for R_X86_64_TLSGD. + optimized_type = tls::TLSOPT_NONE; + } if (optimized_type == tls::TLSOPT_TO_LE) { - gold_assert(tls_segment != NULL); + if (tls_segment == NULL) + { + gold_assert(parameters->errors()->error_count() > 0 + || issue_undefined_symbol_error(gsym)); + return; + } this->tls_ld_to_le(relinfo, relnum, tls_segment, rela, r_type, value, view, view_size); break; @@ -2190,38 +3319,49 @@ Target_x86_64::Relocate::relocate_tls(const Relocate_info<64, false>* relinfo, break; case elfcpp::R_X86_64_DTPOFF32: - if (optimized_type == tls::TLSOPT_TO_LE) - { - // This relocation type is used in debugging information. - // In that case we need to not optimize the value. If we - // haven't seen a TLSLD reloc, then we assume we should not - // optimize this reloc. - if (this->saw_tls_block_reloc_) + // This relocation type is used in debugging information. + // In that case we need to not optimize the value. If the + // section is not executable, then we assume we should not + // optimize this reloc. See comments above for R_X86_64_TLSGD, + // R_X86_64_GOTPC32_TLSDESC, R_X86_64_TLSDESC_CALL, and + // R_X86_64_TLSLD. + if (optimized_type == tls::TLSOPT_TO_LE && is_executable) + { + if (tls_segment == NULL) { - gold_assert(tls_segment != NULL); - value -= tls_segment->memsz(); + gold_assert(parameters->errors()->error_count() > 0 + || issue_undefined_symbol_error(gsym)); + return; } - } + value -= tls_segment->memsz(); + } Relocate_functions<64, false>::rela32(view, value, addend); break; case elfcpp::R_X86_64_DTPOFF64: - if (optimized_type == tls::TLSOPT_TO_LE) - { - // See R_X86_64_DTPOFF32, just above, for why we test this. - if (this->saw_tls_block_reloc_) + // See R_X86_64_DTPOFF32, just above, for why we check for is_executable. + if (optimized_type == tls::TLSOPT_TO_LE && is_executable) + { + if (tls_segment == NULL) { - gold_assert(tls_segment != NULL); - value -= tls_segment->memsz(); + gold_assert(parameters->errors()->error_count() > 0 + || issue_undefined_symbol_error(gsym)); + return; } - } + value -= tls_segment->memsz(); + } Relocate_functions<64, false>::rela64(view, value, addend); break; case elfcpp::R_X86_64_GOTTPOFF: // Initial-exec if (optimized_type == tls::TLSOPT_TO_LE) { - gold_assert(tls_segment != NULL); + if (tls_segment == NULL) + { + gold_assert(parameters->errors()->error_count() > 0 + || issue_undefined_symbol_error(gsym)); + return; + } Target_x86_64::Relocate::tls_ie_to_le(relinfo, relnum, tls_segment, rela, r_type, value, view, view_size); @@ -2256,6 +3396,12 @@ Target_x86_64::Relocate::relocate_tls(const Relocate_info<64, false>* relinfo, break; case elfcpp::R_X86_64_TPOFF32: // Local-exec + if (tls_segment == NULL) + { + gold_assert(parameters->errors()->error_count() > 0 + || issue_undefined_symbol_error(gsym)); + return; + } value -= tls_segment->memsz(); Relocate_functions<64, false>::rela32(view, value, addend); break; @@ -2527,6 +3673,32 @@ Target_x86_64::relocate_section( reloc_symbol_changes); } +// Apply an incremental relocation. Incremental relocations always refer +// to global symbols. + +void +Target_x86_64::apply_relocation( + const Relocate_info<64, false>* relinfo, + elfcpp::Elf_types<64>::Elf_Addr r_offset, + unsigned int r_type, + elfcpp::Elf_types<64>::Elf_Swxword r_addend, + const Symbol* gsym, + unsigned char* view, + elfcpp::Elf_types<64>::Elf_Addr address, + section_size_type view_size) +{ + gold::apply_relocation<64, false, Target_x86_64, Target_x86_64::Relocate>( + relinfo, + this, + r_offset, + r_type, + r_addend, + gsym, + view, + address, + view_size); +} + // Return the size of a relocation while scanning during a relocatable // link. @@ -2538,8 +3710,8 @@ Target_x86_64::Relocatable_size_for_reloc::get_size_for_reloc( switch (r_type) { case elfcpp::R_X86_64_NONE: - case elfcpp::R_386_GNU_VTINHERIT: - case elfcpp::R_386_GNU_VTENTRY: + case elfcpp::R_X86_64_GNU_VTINHERIT: + case elfcpp::R_X86_64_GNU_VTENTRY: case elfcpp::R_X86_64_TLSGD: // Global-dynamic case elfcpp::R_X86_64_GOTPC32_TLSDESC: // Global-dynamic (from ~oliva url) case elfcpp::R_X86_64_TLSDESC_CALL: @@ -2581,6 +3753,7 @@ Target_x86_64::Relocatable_size_for_reloc::get_size_for_reloc( case elfcpp::R_X86_64_GLOB_DAT: case elfcpp::R_X86_64_JUMP_SLOT: case elfcpp::R_X86_64_RELATIVE: + case elfcpp::R_X86_64_IRELATIVE: // These are outstanding tls relocs, which are unexpected when linking case elfcpp::R_X86_64_TPOFF64: case elfcpp::R_X86_64_DTPMOD64: @@ -2601,7 +3774,7 @@ Target_x86_64::Relocatable_size_for_reloc::get_size_for_reloc( void Target_x86_64::scan_relocatable_relocs(Symbol_table* symtab, Layout* layout, - Sized_relobj<64, false>* object, + Sized_relobj_file<64, false>* object, unsigned int data_shndx, unsigned int sh_type, const unsigned char* prelocs, @@ -2674,7 +3847,7 @@ uint64_t Target_x86_64::do_dynsym_value(const Symbol* gsym) const { gold_assert(gsym->is_from_dynobj() && gsym->has_plt_offset()); - return this->plt_section()->address() + gsym->plt_offset(); + return this->plt_address_for_global(gsym) + gsym->plt_offset(); } // Return a string used to fill a code section with nops to take up @@ -2759,8 +3932,23 @@ Target_x86_64::do_reloc_addend(void* arg, unsigned int r_type, return psymval->value(ti.object, 0); } +// Return the value to use for the base of a DW_EH_PE_datarel offset +// in an FDE. Solaris and SVR4 use DW_EH_PE_datarel because their +// assembler can not write out the difference between two labels in +// different sections, so instead of using a pc-relative value they +// use an offset from the GOT. + +uint64_t +Target_x86_64::do_ehframe_datarel_base() const +{ + gold_assert(this->global_offset_table_ != NULL); + Symbol* sym = this->global_offset_table_; + Sized_symbol<64>* ssym = static_cast*>(sym); + return ssym->value(); +} + // FNOFFSET in section SHNDX in OBJECT is the start of a function -// compiled with -fstack-split. The function calls non-stack-split +// compiled with -fsplit-stack. The function calls non-split-stack // code. We have to change the function so that it always ensures // that it has enough stack space to run some random function. @@ -2827,7 +4015,7 @@ class Target_selector_x86_64 : public Target_selector_freebsd public: Target_selector_x86_64() : Target_selector_freebsd(elfcpp::EM_X86_64, 64, false, "elf64-x86-64", - "elf64-x86-64-freebsd") + "elf64-x86-64-freebsd", "elf_x86_64") { } Target*