+protected:
+ // Abstract method to be implemented by sub-classes.
+ virtual void
+ do_write(unsigned char*, section_size_type) = 0;
+
+private:
+ // The last insn of a stub is a jump to destination insn. This field records
+ // the destination address.
+ AArch64_address destination_address_;
+ // The stub offset. Note this has difference interpretations between an
+ // Reloc_stub and an Erratum_stub. For Reloc_stub this is the offset from the
+ // beginning of the containing stub_table, whereas for Erratum_stub, this is
+ // the offset from the end of reloc_stubs.
+ section_offset_type offset_;
+ // Stub type.
+ const int type_;
+}; // End of "Stub_base".
+
+
+// Erratum stub class. An erratum stub differs from a reloc stub in that for
+// each erratum occurrence, we generate an erratum stub. We never share erratum
+// stubs, whereas for reloc stubs, different branch insns share a single reloc
+// stub as long as the branch targets are the same. (More to the point, reloc
+// stubs can be shared because they're used to reach a specific target, whereas
+// erratum stubs branch back to the original control flow.)
+
+template<int size, bool big_endian>
+class Erratum_stub : public Stub_base<size, big_endian>
+{
+public:
+ typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
+ typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
+ typedef AArch64_insn_utilities<big_endian> Insn_utilities;
+ typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype;
+
+ static const int STUB_ADDR_ALIGN;
+
+ static const Insntype invalid_insn = static_cast<Insntype>(-1);
+
+ Erratum_stub(The_aarch64_relobj* relobj, int type,
+ unsigned shndx, unsigned int sh_offset)
+ : Stub_base<size, big_endian>(type), relobj_(relobj),
+ shndx_(shndx), sh_offset_(sh_offset),
+ erratum_insn_(invalid_insn),
+ erratum_address_(this->invalid_address)
+ {}
+
+ ~Erratum_stub() {}
+
+ // Return the object that contains the erratum.
+ The_aarch64_relobj*
+ relobj()
+ { return this->relobj_; }
+
+ // Get section index of the erratum.
+ unsigned int
+ shndx() const
+ { return this->shndx_; }
+
+ // Get section offset of the erratum.
+ unsigned int
+ sh_offset() const
+ { return this->sh_offset_; }
+
+ // Get the erratum insn. This is the insn located at erratum_insn_address.
+ Insntype
+ erratum_insn() const
+ {
+ gold_assert(this->erratum_insn_ != this->invalid_insn);
+ return this->erratum_insn_;
+ }
+
+ // Set the insn that the erratum happens to.
+ void
+ set_erratum_insn(Insntype insn)
+ { this->erratum_insn_ = insn; }
+
+ // For 843419, the erratum insn is ld/st xt, [xn, #uimm], which may be a
+ // relocation spot, in this case, the erratum_insn_ recorded at scanning phase
+ // is no longer the one we want to write out to the stub, update erratum_insn_
+ // with relocated version. Also note that in this case xn must not be "PC", so
+ // it is safe to move the erratum insn from the origin place to the stub. For
+ // 835769, the erratum insn is multiply-accumulate insn, which could not be a
+ // relocation spot (assertion added though).
+ void
+ update_erratum_insn(Insntype insn)
+ {
+ gold_assert(this->erratum_insn_ != this->invalid_insn);
+ switch (this->type())
+ {
+ case ST_E_843419:
+ gold_assert(Insn_utilities::aarch64_ldst_uimm(insn));
+ gold_assert(Insn_utilities::aarch64_ldst_uimm(this->erratum_insn()));
+ gold_assert(Insn_utilities::aarch64_rd(insn) ==
+ Insn_utilities::aarch64_rd(this->erratum_insn()));
+ gold_assert(Insn_utilities::aarch64_rn(insn) ==
+ Insn_utilities::aarch64_rn(this->erratum_insn()));
+ // Update plain ld/st insn with relocated insn.
+ this->erratum_insn_ = insn;
+ break;
+ case ST_E_835769:
+ gold_assert(insn == this->erratum_insn());
+ break;
+ default:
+ gold_unreachable();
+ }
+ }
+
+
+ // Return the address where an erratum must be done.
+ AArch64_address
+ erratum_address() const
+ {
+ gold_assert(this->erratum_address_ != this->invalid_address);
+ return this->erratum_address_;
+ }
+
+ // Set the address where an erratum must be done.
+ void
+ set_erratum_address(AArch64_address addr)
+ { this->erratum_address_ = addr; }
+
+ // Comparator used to group Erratum_stubs in a set by (obj, shndx,
+ // sh_offset). We do not include 'type' in the calculation, because there is
+ // at most one stub type at (obj, shndx, sh_offset).
+ bool
+ operator<(const Erratum_stub<size, big_endian>& k) const
+ {
+ if (this == &k)
+ return false;
+ // We group stubs by relobj.
+ if (this->relobj_ != k.relobj_)
+ return this->relobj_ < k.relobj_;
+ // Then by section index.
+ if (this->shndx_ != k.shndx_)
+ return this->shndx_ < k.shndx_;
+ // Lastly by section offset.
+ return this->sh_offset_ < k.sh_offset_;
+ }
+
+ void
+ invalidate_erratum_stub()
+ {
+ gold_assert(this->relobj_ != NULL);
+ this->relobj_ = NULL;
+ }
+
+ bool
+ is_invalidated_erratum_stub()
+ { return this->relobj_ == NULL; }
+
+protected:
+ virtual void
+ do_write(unsigned char*, section_size_type);
+
+private:
+ // The object that needs to be fixed.
+ The_aarch64_relobj* relobj_;
+ // The shndx in the object that needs to be fixed.
+ const unsigned int shndx_;
+ // The section offset in the obejct that needs to be fixed.
+ const unsigned int sh_offset_;
+ // The insn to be fixed.
+ Insntype erratum_insn_;
+ // The address of the above insn.
+ AArch64_address erratum_address_;
+}; // End of "Erratum_stub".
+
+
+// Erratum sub class to wrap additional info needed by 843419. In fixing this
+// erratum, we may choose to replace 'adrp' with 'adr', in this case, we need
+// adrp's code position (two or three insns before erratum insn itself).
+
+template<int size, bool big_endian>
+class E843419_stub : public Erratum_stub<size, big_endian>
+{
+public:
+ typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype;
+
+ E843419_stub(AArch64_relobj<size, big_endian>* relobj,
+ unsigned int shndx, unsigned int sh_offset,
+ unsigned int adrp_sh_offset)
+ : Erratum_stub<size, big_endian>(relobj, ST_E_843419, shndx, sh_offset),
+ adrp_sh_offset_(adrp_sh_offset)
+ {}
+
+ unsigned int
+ adrp_sh_offset() const
+ { return this->adrp_sh_offset_; }
+
+private:
+ // Section offset of "adrp". (We do not need a "adrp_shndx_" field, because we
+ // can obtain it from its parent.)
+ const unsigned int adrp_sh_offset_;
+};
+
+
+template<int size, bool big_endian>
+const int Erratum_stub<size, big_endian>::STUB_ADDR_ALIGN = 4;
+
+// Comparator used in set definition.
+template<int size, bool big_endian>
+struct Erratum_stub_less
+{
+ bool
+ operator()(const Erratum_stub<size, big_endian>* s1,
+ const Erratum_stub<size, big_endian>* s2) const
+ { return *s1 < *s2; }
+};
+
+// Erratum_stub implementation for writing stub to output file.
+
+template<int size, bool big_endian>
+void
+Erratum_stub<size, big_endian>::do_write(unsigned char* view, section_size_type)
+{
+ typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
+ const Insntype* insns = this->insns();
+ uint32_t num_insns = this->insn_num();
+ Insntype* ip = reinterpret_cast<Insntype*>(view);
+ // For current implemented erratum 843419 and 835769, the first insn in the
+ // stub is always a copy of the problematic insn (in 843419, the mem access
+ // insn, in 835769, the mac insn), followed by a jump-back.
+ elfcpp::Swap<32, big_endian>::writeval(ip, this->erratum_insn());
+ for (uint32_t i = 1; i < num_insns; ++i)
+ elfcpp::Swap<32, big_endian>::writeval(ip + i, insns[i]);
+}
+
+
+// Reloc stub class.
+
+template<int size, bool big_endian>
+class Reloc_stub : public Stub_base<size, big_endian>
+{
+ public:
+ typedef Reloc_stub<size, big_endian> This;
+ typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
+
+ // Branch range. This is used to calculate the section group size, as well as
+ // determine whether a stub is needed.
+ static const int MAX_BRANCH_OFFSET = ((1 << 25) - 1) << 2;
+ static const int MIN_BRANCH_OFFSET = -((1 << 25) << 2);
+
+ // Constant used to determine if an offset fits in the adrp instruction
+ // encoding.
+ static const int MAX_ADRP_IMM = (1 << 20) - 1;
+ static const int MIN_ADRP_IMM = -(1 << 20);
+
+ static const int BYTES_PER_INSN = 4;
+ static const int STUB_ADDR_ALIGN;
+
+ // Determine whether the offset fits in the jump/branch instruction.
+ static bool
+ aarch64_valid_branch_offset_p(int64_t offset)
+ { return offset >= MIN_BRANCH_OFFSET && offset <= MAX_BRANCH_OFFSET; }
+
+ // Determine whether the offset fits in the adrp immediate field.
+ static bool
+ aarch64_valid_for_adrp_p(AArch64_address location, AArch64_address dest)
+ {
+ typedef AArch64_relocate_functions<size, big_endian> Reloc;
+ int64_t adrp_imm = (Reloc::Page(dest) - Reloc::Page(location)) >> 12;
+ return adrp_imm >= MIN_ADRP_IMM && adrp_imm <= MAX_ADRP_IMM;
+ }
+
+ // Determine the stub type for a certain relocation or ST_NONE, if no stub is
+ // needed.
+ static int
+ stub_type_for_reloc(unsigned int r_type, AArch64_address address,
+ AArch64_address target);
+
+ Reloc_stub(int type)
+ : Stub_base<size, big_endian>(type)
+ { }
+
+ ~Reloc_stub()
+ { }
+