gas: run the hwcaps-bump tests with 64-bit sparc objects only.
[deliverable/binutils-gdb.git] / gold / powerpc.cc
index 71e38f9609220b001de92c8645265e487e4e41e0..1e95d5b24529ee066130aaf53bbc469b09be0327 100644 (file)
@@ -1,6 +1,6 @@
 // powerpc.cc -- powerpc target support for gold.
 
-// Copyright (C) 2008-2015 Free Software Foundation, Inc.
+// Copyright (C) 2008-2016 Free Software Foundation, Inc.
 // Written by David S. Miller <davem@davemloft.net>
 //        and David Edelsohn <edelsohn@gnu.org>
 
@@ -663,6 +663,21 @@ class Target_powerpc : public Sized_target<size, big_endian>
                          const unsigned char* plocal_symbols,
                          Relocatable_relocs*);
 
+  // Scan the relocs for --emit-relocs.
+  void
+  emit_relocs_scan(Symbol_table* symtab,
+                  Layout* layout,
+                  Sized_relobj_file<size, big_endian>* object,
+                  unsigned int data_shndx,
+                  unsigned int sh_type,
+                  const unsigned char* prelocs,
+                  size_t reloc_count,
+                  Output_section* output_section,
+                  bool needs_special_offset_handling,
+                  size_t local_symbol_count,
+                  const unsigned char* plocal_syms,
+                  Relocatable_relocs* rr);
+
   // Emit relocations for a section.
   void
   relocate_relocs(const Relocate_info<size, big_endian>*,
@@ -1105,19 +1120,6 @@ class Target_powerpc : public Sized_target<size, big_endian>
     }
   };
 
-  // A class which returns the size required for a relocation type,
-  // used while scanning relocs during a relocatable link.
-  class Relocatable_size_for_reloc
-  {
-   public:
-    unsigned int
-    get_size_for_reloc(unsigned int, Relobj*)
-    {
-      gold_unreachable();
-      return 0;
-    }
-  };
-
   // Optimize the TLS relocation type based on what we know about the
   // symbol.  IS_FINAL is true if the final address of this symbol is
   // known at link time.
@@ -2437,9 +2439,8 @@ class Stub_control
   // the stubbed branches.
   Stub_control(int32_t size, bool no_size_errors)
     : state_(NO_GROUP), stub_group_size_(abs(size)),
-      stub14_group_size_(abs(size) >> 10),
       stubs_always_before_branch_(size < 0),
-      suppress_size_errors_(no_size_errors),
+      suppress_size_errors_(no_size_errors), group_size_(0),
       group_end_addr_(0), owner_(NULL), output_section_(NULL)
   {
   }
@@ -2477,24 +2478,28 @@ class Stub_control
 
   State state_;
   uint32_t stub_group_size_;
-  uint32_t stub14_group_size_;
   bool stubs_always_before_branch_;
   bool suppress_size_errors_;
+  // Current max size of group.  Starts at stub_group_size_ but is
+  // reduced to stub_group_size_/1024 on seeing a section with
+  // external conditional branches.
+  uint32_t group_size_;
   uint64_t group_end_addr_;
+  // owner_ and output_section_ specify the section to which stubs are
+  // attached.  The stubs are placed at the end of this section.
   const Output_section::Input_section* owner_;
   Output_section* output_section_;
 };
 
 // Return true iff input section can be handled by current stub
-// group.
+// group.  Sections are presented to this function in reverse order,
+// so the first section is the tail of the group.
 
 bool
 Stub_control::can_add_to_stub_group(Output_section* o,
                                    const Output_section::Input_section* i,
                                    bool has14)
 {
-  uint32_t group_size
-    = has14 ? this->stub14_group_size_ : this->stub_group_size_;
   bool whole_sec = o->order() == ORDER_INIT || o->order() == ORDER_FINI;
   uint64_t this_size;
   uint64_t start_addr = o->address();
@@ -2508,46 +2513,90 @@ Stub_control::can_add_to_stub_group(Output_section* o,
       start_addr += i->relobj()->output_section_offset(i->shndx());
       this_size = i->data_size();
     }
-  uint64_t end_addr = start_addr + this_size;
-  bool toobig = this_size > group_size;
 
-  if (toobig && !this->suppress_size_errors_)
+  uint32_t group_size = this->stub_group_size_;
+  if (has14)
+    this->group_size_ = group_size = group_size >> 10;
+
+  if (this_size > group_size && !this->suppress_size_errors_)
     gold_warning(_("%s:%s exceeds group size"),
                 i->relobj()->name().c_str(),
                 i->relobj()->section_name(i->shndx()).c_str());
 
-  if (this->state_ != HAS_STUB_SECTION
-      && (!whole_sec || this->output_section_ != o)
-      && (this->state_ == NO_GROUP
-         || this->group_end_addr_ - end_addr < group_size))
-    {
-      this->owner_ = i;
-      this->output_section_ = o;
-    }
+  gold_debug(DEBUG_TARGET, "maybe add%s %s:%s size=%#llx total=%#llx",
+            has14 ? " 14bit" : "",
+            i->relobj()->name().c_str(),
+            i->relobj()->section_name(i->shndx()).c_str(),
+            (long long) this_size,
+            (long long) this->group_end_addr_ - start_addr);
 
-  if (this->state_ == NO_GROUP)
-    {
-      this->state_ = FINDING_STUB_SECTION;
-      this->group_end_addr_ = end_addr;
-    }
-  else if (this->group_end_addr_ - start_addr < group_size)
-    ;
-  // Adding this section would make the group larger than GROUP_SIZE.
-  else if (this->state_ == FINDING_STUB_SECTION
-          && !this->stubs_always_before_branch_
-          && !toobig)
+  uint64_t end_addr = start_addr + this_size;
+  if (this->state_ == HAS_STUB_SECTION)
     {
-      // But wait, there's more!  Input sections up to GROUP_SIZE
-      // bytes before the stub table can be handled by it too.
-      this->state_ = HAS_STUB_SECTION;
-      this->group_end_addr_ = end_addr;
+      // Can we add this section, which is before the stubs, to the
+      // group?
+      if (this->group_end_addr_ - start_addr <= this->group_size_)
+       return true;
     }
   else
     {
-      this->state_ = NO_GROUP;
-      return false;
+      // Stubs are added at the end of "owner_".
+      // The current section can always be the stub owner, except when
+      // whole_sec is true and the current section isn't the last of
+      // the pasted sections.  (This restriction for the whole_sec
+      // case is just to simplify the corner case mentioned in
+      // group_sections.)
+      // Note that "owner_" itself is not necessarily part of the
+      // group of sections served by these stubs!
+      if (!whole_sec || this->output_section_ != o)
+       {
+         this->owner_ = i;
+         this->output_section_ = o;
+       }
+
+      if (this->state_ == FINDING_STUB_SECTION)
+       {
+         if (this->group_end_addr_ - start_addr <= this->group_size_)
+           return true;
+         // The group after the stubs has reached maximum size.
+         // Now see about adding sections before the stubs to the
+         // group.  If the current section has a 14-bit branch and
+         // the group after the stubs exceeds group_size_ (because
+         // they didn't have 14-bit branches), don't add sections
+         // before the stubs:  The size of stubs for such a large
+         // group may exceed the reach of a 14-bit branch.
+         if (!this->stubs_always_before_branch_
+             && this_size <= this->group_size_
+             && this->group_end_addr_ - end_addr <= this->group_size_)
+           {
+             gold_debug(DEBUG_TARGET, "adding before stubs");
+             this->state_ = HAS_STUB_SECTION;
+             this->group_end_addr_ = end_addr;
+             return true;
+           }
+       }
+      else if (this->state_ == NO_GROUP)
+       {
+         // Only here on very first use of Stub_control
+         this->state_ = FINDING_STUB_SECTION;
+         this->group_size_ = group_size;
+         this->group_end_addr_ = end_addr;
+         return true;
+       }
+      else
+       gold_unreachable();
     }
-  return true;
+
+  gold_debug(DEBUG_TARGET, "nope, didn't fit\n");
+
+  // The section fails to fit in the current group.  Set up a few
+  // things for the next group.  owner_ and output_section_ will be
+  // set later after we've retrieved those values for the current
+  // group.
+  this->state_ = FINDING_STUB_SECTION;
+  this->group_size_ = group_size;
+  this->group_end_addr_ = end_addr;
+  return false;
 }
 
 // Look over all the input sections, deciding where to place stubs.
@@ -2885,7 +2934,7 @@ Target_powerpc<size, big_endian>::do_relax(int pass,
        }
       this->stub_tables_.clear();
       this->stub_group_size_ = this->stub_group_size_ / 4 * 3;
-      gold_info(_("%s: stub group size is too large; retrying with %d"),
+      gold_info(_("%s: stub group size is too large; retrying with %#x"),
                program_name, this->stub_group_size_);
       this->group_sections(layout, task, true);
     }
@@ -2980,7 +3029,13 @@ Target_powerpc<size, big_endian>::do_relax(int pass,
              Stub_table<size, big_endian>* stub_table
                = static_cast<Stub_table<size, big_endian>*>(
                    i->relaxed_input_section());
-             off += stub_table->set_address_and_size(os, off);
+             Address stub_table_size = stub_table->set_address_and_size(os, off);
+             off += stub_table_size;
+             // After a few iterations, set current stub table size
+             // as min size threshold, so later stub tables can only
+             // grow in size.
+             if (pass >= 4)
+               stub_table->set_min_size_threshold(stub_table_size);
            }
          else
            off += i->data_size();
@@ -3632,8 +3687,8 @@ class Stub_table : public Output_relaxed_input_section
       targ_(targ), plt_call_stubs_(), long_branch_stubs_(),
       orig_data_size_(owner->current_data_size()),
       plt_size_(0), last_plt_size_(0),
-      branch_size_(0), last_branch_size_(0), eh_frame_added_(false),
-      need_save_res_(false)
+      branch_size_(0), last_branch_size_(0), min_size_threshold_(0),
+      eh_frame_added_(false), need_save_res_(false)
   {
     this->set_output_section(output_section);
 
@@ -3724,6 +3779,11 @@ class Stub_table : public Output_relaxed_input_section
       off = align_address(off, this->stub_align());
     // Include original section size and alignment padding in size
     my_size += off - start_off;
+    // Ensure new size is always larger than min size
+    // threshold. Alignment requirement is included in "my_size", so
+    // increase "my_size" does not invalidate alignment.
+    if (my_size < this->min_size_threshold_)
+      my_size = this->min_size_threshold_;
     this->reset_address_and_file_offset();
     this->set_current_data_size(my_size);
     this->set_address_and_file_offset(os->address() + start_off,
@@ -3749,6 +3809,9 @@ class Stub_table : public Output_relaxed_input_section
   plt_size() const
   { return this->plt_size_; }
 
+  void set_min_size_threshold(Address min_size)
+  { this->min_size_threshold_ = min_size; }
+
   bool
   size_update()
   {
@@ -4013,6 +4076,13 @@ class Stub_table : public Output_relaxed_input_section
   section_size_type orig_data_size_;
   // size of stubs
   section_size_type plt_size_, last_plt_size_, branch_size_, last_branch_size_;
+  // Some rare cases cause (PR/20529) fluctuation in stub table
+  // size, which leads to an endless relax loop. This is to be fixed
+  // by, after the first few iterations, allowing only increase of
+  // stub table size. This variable sets the minimal possible size of
+  // a stub table, it is zero for the first few iterations, then
+  // increases monotonically.
+  Address min_size_threshold_;
   // Whether .eh_frame info has been created for this stub section.
   bool eh_frame_added_;
   // Set if this stub group needs a copy of out-of-line register
@@ -6022,7 +6092,7 @@ Target_powerpc<size, big_endian>::Scan::global(
          ppc_object->set_opd_discard(reloc.get_r_offset());
          break;
        }
-      // Fall thru
+      // Fall through.
     case elfcpp::R_PPC64_UADDR64:
     case elfcpp::R_POWERPC_ADDR32:
     case elfcpp::R_POWERPC_UADDR32:
@@ -6129,7 +6199,7 @@ Target_powerpc<size, big_endian>::Scan::global(
                      || gsym->is_preemptible())))
            target->make_plt_entry(symtab, layout, gsym);
        }
-      // Fall thru
+      // Fall through.
 
     case elfcpp::R_PPC64_REL64:
     case elfcpp::R_POWERPC_REL32:
@@ -6430,7 +6500,9 @@ Target_powerpc<size, big_endian>::gc_process_relocs(
     const unsigned char* plocal_symbols)
 {
   typedef Target_powerpc<size, big_endian> Powerpc;
-  typedef typename Target_powerpc<size, big_endian>::Scan Scan;
+  typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
+      Classify_reloc;
+
   Powerpc_relobj<size, big_endian>* ppc_object
     = static_cast<Powerpc_relobj<size, big_endian>*>(object);
   if (size == 64)
@@ -6460,8 +6532,7 @@ Target_powerpc<size, big_endian>::gc_process_relocs(
       return;
     }
 
-  gold::gc_process_relocs<size, big_endian, Powerpc, elfcpp::SHT_RELA, Scan,
-                         typename Target_powerpc::Relocatable_size_for_reloc>(
+  gold::gc_process_relocs<size, big_endian, Powerpc, Scan, Classify_reloc>(
     symtab,
     layout,
     this,
@@ -6707,7 +6778,8 @@ Target_powerpc<size, big_endian>::scan_relocs(
     const unsigned char* plocal_symbols)
 {
   typedef Target_powerpc<size, big_endian> Powerpc;
-  typedef typename Target_powerpc<size, big_endian>::Scan Scan;
+  typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
+      Classify_reloc;
 
   if (sh_type == elfcpp::SHT_REL)
     {
@@ -6716,7 +6788,7 @@ Target_powerpc<size, big_endian>::scan_relocs(
       return;
     }
 
-  gold::scan_relocs<size, big_endian, Powerpc, elfcpp::SHT_RELA, Scan>(
+  gold::scan_relocs<size, big_endian, Powerpc, Scan, Classify_reloc>(
     symtab,
     layout,
     this,
@@ -7517,6 +7589,7 @@ Target_powerpc<size, big_endian>::Relocate::relocate(
       if (size != 64)
        // R_PPC_TLSGD, R_PPC_TLSLD, R_PPC_EMB_RELST_LO, R_PPC_EMB_RELST_HI
        break;
+      // Fall through.
     case elfcpp::R_POWERPC_TPREL16:
     case elfcpp::R_POWERPC_TPREL16_LO:
     case elfcpp::R_POWERPC_TPREL16_HI:
@@ -7540,6 +7613,7 @@ Target_powerpc<size, big_endian>::Relocate::relocate(
        // R_PPC_EMB_NADDR32, R_PPC_EMB_NADDR16, R_PPC_EMB_NADDR16_LO
        // R_PPC_EMB_NADDR16_HI, R_PPC_EMB_NADDR16_HA, R_PPC_EMB_SDAI16
        break;
+      // Fall through.
     case elfcpp::R_POWERPC_DTPREL16:
     case elfcpp::R_POWERPC_DTPREL16_LO:
     case elfcpp::R_POWERPC_DTPREL16_HI:
@@ -7568,6 +7642,7 @@ Target_powerpc<size, big_endian>::Relocate::relocate(
     case elfcpp::R_POWERPC_ADDR14_BRTAKEN:
     case elfcpp::R_POWERPC_REL14_BRTAKEN:
       branch_bit = 1 << 21;
+      // Fall through.
     case elfcpp::R_POWERPC_ADDR14_BRNTAKEN:
     case elfcpp::R_POWERPC_REL14_BRNTAKEN:
       {
@@ -7723,6 +7798,7 @@ Target_powerpc<size, big_endian>::Relocate::relocate(
              && preloc != NULL
              && target->abiversion() >= 2
              && !parameters->options().output_is_position_independent()
+             && rela.get_r_addend() == d_offset + 4
              && gsym != NULL
              && strcmp(gsym->name(), ".TOC.") == 0)
            {
@@ -7931,6 +8007,7 @@ Target_powerpc<size, big_endian>::Relocate::relocate(
          maybe_dq_reloc = true;
          break;
        }
+      // Fall through.
     case elfcpp::R_POWERPC_ADDR16:
     case elfcpp::R_POWERPC_REL16:
     case elfcpp::R_PPC64_TOC16:
@@ -7965,6 +8042,7 @@ Target_powerpc<size, big_endian>::Relocate::relocate(
       if (size == 32)
        // R_PPC_EMB_MRKREF, R_PPC_EMB_RELST_LO, R_PPC_EMB_RELST_HA
        goto unsupp;
+      // Fall through.
     case elfcpp::R_POWERPC_ADDR16_HI:
     case elfcpp::R_POWERPC_REL16_HI:
     case elfcpp::R_PPC64_TOC16_HI:
@@ -7985,6 +8063,7 @@ Target_powerpc<size, big_endian>::Relocate::relocate(
       if (size == 32)
        // R_PPC_EMB_RELSEC16, R_PPC_EMB_RELST_HI, R_PPC_EMB_BIT_FLD
        goto unsupp;
+      // Fall through.
     case elfcpp::R_POWERPC_ADDR16_HA:
     case elfcpp::R_POWERPC_REL16_HA:
     case elfcpp::R_PPC64_TOC16_HA:
@@ -8007,6 +8086,7 @@ Target_powerpc<size, big_endian>::Relocate::relocate(
       if (size == 32)
        // R_PPC_EMB_NADDR16_LO
        goto unsupp;
+      // Fall through.
     case elfcpp::R_PPC64_ADDR16_HIGHER:
     case elfcpp::R_PPC64_TPREL16_HIGHER:
       Reloc::addr16_hi2(view, value);
@@ -8016,6 +8096,7 @@ Target_powerpc<size, big_endian>::Relocate::relocate(
       if (size == 32)
        // R_PPC_EMB_NADDR16_HI
        goto unsupp;
+      // Fall through.
     case elfcpp::R_PPC64_ADDR16_HIGHERA:
     case elfcpp::R_PPC64_TPREL16_HIGHERA:
       Reloc::addr16_ha2(view, value);
@@ -8025,6 +8106,7 @@ Target_powerpc<size, big_endian>::Relocate::relocate(
       if (size == 32)
        // R_PPC_EMB_NADDR16_HA
        goto unsupp;
+      // Fall through.
     case elfcpp::R_PPC64_ADDR16_HIGHEST:
     case elfcpp::R_PPC64_TPREL16_HIGHEST:
       Reloc::addr16_hi3(view, value);
@@ -8034,6 +8116,7 @@ Target_powerpc<size, big_endian>::Relocate::relocate(
       if (size == 32)
        // R_PPC_EMB_SDAI16
        goto unsupp;
+      // Fall through.
     case elfcpp::R_PPC64_ADDR16_HIGHESTA:
     case elfcpp::R_PPC64_TPREL16_HIGHESTA:
       Reloc::addr16_ha3(view, value);
@@ -8044,11 +8127,13 @@ Target_powerpc<size, big_endian>::Relocate::relocate(
       if (size == 32)
        // R_PPC_EMB_NADDR32, R_PPC_EMB_NADDR16
        goto unsupp;
+      // Fall through.
     case elfcpp::R_PPC64_TPREL16_DS:
     case elfcpp::R_PPC64_TPREL16_LO_DS:
       if (size == 32)
        // R_PPC_TLSGD, R_PPC_TLSLD
        break;
+      // Fall through.
     case elfcpp::R_PPC64_ADDR16_DS:
     case elfcpp::R_PPC64_ADDR16_LO_DS:
     case elfcpp::R_PPC64_TOC16_DS:
@@ -8177,11 +8262,13 @@ Target_powerpc<size, big_endian>::relocate_section(
   typedef typename Target_powerpc<size, big_endian>::Relocate Powerpc_relocate;
   typedef typename Target_powerpc<size, big_endian>::Relocate_comdat_behavior
     Powerpc_comdat_behavior;
+  typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
+      Classify_reloc;
 
   gold_assert(sh_type == elfcpp::SHT_RELA);
 
-  gold::relocate_section<size, big_endian, Powerpc, elfcpp::SHT_RELA,
-                        Powerpc_relocate, Powerpc_comdat_behavior>(
+  gold::relocate_section<size, big_endian, Powerpc, Powerpc_relocate,
+                        Powerpc_comdat_behavior, Classify_reloc>(
     relinfo,
     this,
     prelocs,
@@ -8194,9 +8281,26 @@ Target_powerpc<size, big_endian>::relocate_section(
     reloc_symbol_changes);
 }
 
+template<int size, bool big_endian>
 class Powerpc_scan_relocatable_reloc
 {
 public:
+  typedef typename Reloc_types<elfcpp::SHT_RELA, size, big_endian>::Reloc
+      Reltype;
+  static const int reloc_size =
+      Reloc_types<elfcpp::SHT_RELA, size, big_endian>::reloc_size;
+  static const int sh_type = elfcpp::SHT_RELA;
+
+  // Return the symbol referred to by the relocation.
+  static inline unsigned int
+  get_r_sym(const Reltype* reloc)
+  { return elfcpp::elf_r_sym<size>(reloc->get_r_info()); }
+
+  // Return the type of the relocation.
+  static inline unsigned int
+  get_r_type(const Reltype* reloc)
+  { return elfcpp::elf_r_type<size>(reloc->get_r_info()); }
+
   // Return the strategy to use for a local symbol which is not a
   // section symbol, given the relocation type.
   inline Relocatable_relocs::Reloc_strategy
@@ -8244,10 +8348,11 @@ Target_powerpc<size, big_endian>::scan_relocatable_relocs(
     const unsigned char* plocal_symbols,
     Relocatable_relocs* rr)
 {
+  typedef Powerpc_scan_relocatable_reloc<size, big_endian> Scan_strategy;
+
   gold_assert(sh_type == elfcpp::SHT_RELA);
 
-  gold::scan_relocatable_relocs<size, big_endian, elfcpp::SHT_RELA,
-                               Powerpc_scan_relocatable_reloc>(
+  gold::scan_relocatable_relocs<size, big_endian, Scan_strategy>(
     symtab,
     layout,
     object,
@@ -8261,6 +8366,45 @@ Target_powerpc<size, big_endian>::scan_relocatable_relocs(
     rr);
 }
 
+// Scan the relocs for --emit-relocs.
+
+template<int size, bool big_endian>
+void
+Target_powerpc<size, big_endian>::emit_relocs_scan(
+    Symbol_table* symtab,
+    Layout* layout,
+    Sized_relobj_file<size, big_endian>* object,
+    unsigned int data_shndx,
+    unsigned int sh_type,
+    const unsigned char* prelocs,
+    size_t reloc_count,
+    Output_section* output_section,
+    bool needs_special_offset_handling,
+    size_t local_symbol_count,
+    const unsigned char* plocal_syms,
+    Relocatable_relocs* rr)
+{
+  typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
+      Classify_reloc;
+  typedef gold::Default_emit_relocs_strategy<Classify_reloc>
+      Emit_relocs_strategy;
+
+  gold_assert(sh_type == elfcpp::SHT_RELA);
+
+  gold::scan_relocatable_relocs<size, big_endian, Emit_relocs_strategy>(
+    symtab,
+    layout,
+    object,
+    data_shndx,
+    prelocs,
+    reloc_count,
+    output_section,
+    needs_special_offset_handling,
+    local_symbol_count,
+    plocal_syms,
+    rr);
+}
+
 // Emit relocations for a section.
 // This is a modified version of the function by the same name in
 // target-reloc.h.  Using relocate_special_relocatable for
This page took 0.031018 seconds and 4 git commands to generate.