+// Set, or unset, the architecture of the Tag_also_compatible_with attribute.
+// The tag is removed if ARCH is -1.
+// This is adapted from set_secondary_compatible_arch() in bfd/elf32-arm.c.
+
+template<bool big_endian>
+void
+Target_arm<big_endian>::set_secondary_compatible_arch(
+ Attributes_section_data* pasd,
+ int arch)
+{
+ Object_attribute* known_attributes =
+ pasd->known_attributes(Object_attribute::OBJ_ATTR_PROC);
+
+ if (arch == -1)
+ {
+ known_attributes[elfcpp::Tag_also_compatible_with].set_string_value("");
+ return;
+ }
+
+ // Note: the tag and its argument below are uleb128 values, though
+ // currently-defined values fit in one byte for each.
+ char sv[3];
+ sv[0] = elfcpp::Tag_CPU_arch;
+ gold_assert(arch != 0);
+ sv[1] = arch;
+ sv[2] = '\0';
+
+ known_attributes[elfcpp::Tag_also_compatible_with].set_string_value(sv);
+}
+
+// Combine two values for Tag_CPU_arch, taking secondary compatibility tags
+// into account.
+// This is adapted from tag_cpu_arch_combine() in bfd/elf32-arm.c.
+
+template<bool big_endian>
+int
+Target_arm<big_endian>::tag_cpu_arch_combine(
+ const char* name,
+ int oldtag,
+ int* secondary_compat_out,
+ int newtag,
+ int secondary_compat)
+{
+#define T(X) elfcpp::TAG_CPU_ARCH_##X
+ static const int v6t2[] =
+ {
+ T(V6T2), // PRE_V4.
+ T(V6T2), // V4.
+ T(V6T2), // V4T.
+ T(V6T2), // V5T.
+ T(V6T2), // V5TE.
+ T(V6T2), // V5TEJ.
+ T(V6T2), // V6.
+ T(V7), // V6KZ.
+ T(V6T2) // V6T2.
+ };
+ static const int v6k[] =
+ {
+ T(V6K), // PRE_V4.
+ T(V6K), // V4.
+ T(V6K), // V4T.
+ T(V6K), // V5T.
+ T(V6K), // V5TE.
+ T(V6K), // V5TEJ.
+ T(V6K), // V6.
+ T(V6KZ), // V6KZ.
+ T(V7), // V6T2.
+ T(V6K) // V6K.
+ };
+ static const int v7[] =
+ {
+ T(V7), // PRE_V4.
+ T(V7), // V4.
+ T(V7), // V4T.
+ T(V7), // V5T.
+ T(V7), // V5TE.
+ T(V7), // V5TEJ.
+ T(V7), // V6.
+ T(V7), // V6KZ.
+ T(V7), // V6T2.
+ T(V7), // V6K.
+ T(V7) // V7.
+ };
+ static const int v6_m[] =
+ {
+ -1, // PRE_V4.
+ -1, // V4.
+ T(V6K), // V4T.
+ T(V6K), // V5T.
+ T(V6K), // V5TE.
+ T(V6K), // V5TEJ.
+ T(V6K), // V6.
+ T(V6KZ), // V6KZ.
+ T(V7), // V6T2.
+ T(V6K), // V6K.
+ T(V7), // V7.
+ T(V6_M) // V6_M.
+ };
+ static const int v6s_m[] =
+ {
+ -1, // PRE_V4.
+ -1, // V4.
+ T(V6K), // V4T.
+ T(V6K), // V5T.
+ T(V6K), // V5TE.
+ T(V6K), // V5TEJ.
+ T(V6K), // V6.
+ T(V6KZ), // V6KZ.
+ T(V7), // V6T2.
+ T(V6K), // V6K.
+ T(V7), // V7.
+ T(V6S_M), // V6_M.
+ T(V6S_M) // V6S_M.
+ };
+ static const int v7e_m[] =
+ {
+ -1, // PRE_V4.
+ -1, // V4.
+ T(V7E_M), // V4T.
+ T(V7E_M), // V5T.
+ T(V7E_M), // V5TE.
+ T(V7E_M), // V5TEJ.
+ T(V7E_M), // V6.
+ T(V7E_M), // V6KZ.
+ T(V7E_M), // V6T2.
+ T(V7E_M), // V6K.
+ T(V7E_M), // V7.
+ T(V7E_M), // V6_M.
+ T(V7E_M), // V6S_M.
+ T(V7E_M) // V7E_M.
+ };
+ static const int v8[] =
+ {
+ T(V8), // PRE_V4.
+ T(V8), // V4.
+ T(V8), // V4T.
+ T(V8), // V5T.
+ T(V8), // V5TE.
+ T(V8), // V5TEJ.
+ T(V8), // V6.
+ T(V8), // V6KZ.
+ T(V8), // V6T2.
+ T(V8), // V6K.
+ T(V8), // V7.
+ T(V8), // V6_M.
+ T(V8), // V6S_M.
+ T(V8), // V7E_M.
+ T(V8) // V8.
+ };
+ static const int v4t_plus_v6_m[] =
+ {
+ -1, // PRE_V4.
+ -1, // V4.
+ T(V4T), // V4T.
+ T(V5T), // V5T.
+ T(V5TE), // V5TE.
+ T(V5TEJ), // V5TEJ.
+ T(V6), // V6.
+ T(V6KZ), // V6KZ.
+ T(V6T2), // V6T2.
+ T(V6K), // V6K.
+ T(V7), // V7.
+ T(V6_M), // V6_M.
+ T(V6S_M), // V6S_M.
+ T(V7E_M), // V7E_M.
+ T(V8), // V8.
+ T(V4T_PLUS_V6_M) // V4T plus V6_M.
+ };
+ static const int* comb[] =
+ {
+ v6t2,
+ v6k,
+ v7,
+ v6_m,
+ v6s_m,
+ v7e_m,
+ v8,
+ // Pseudo-architecture.
+ v4t_plus_v6_m
+ };
+
+ // Check we've not got a higher architecture than we know about.
+
+ if (oldtag > elfcpp::MAX_TAG_CPU_ARCH || newtag > elfcpp::MAX_TAG_CPU_ARCH)
+ {
+ gold_error(_("%s: unknown CPU architecture"), name);
+ return -1;
+ }
+
+ // Override old tag if we have a Tag_also_compatible_with on the output.
+
+ if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
+ || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
+ oldtag = T(V4T_PLUS_V6_M);
+
+ // And override the new tag if we have a Tag_also_compatible_with on the
+ // input.
+
+ if ((newtag == T(V6_M) && secondary_compat == T(V4T))
+ || (newtag == T(V4T) && secondary_compat == T(V6_M)))
+ newtag = T(V4T_PLUS_V6_M);
+
+ // Architectures before V6KZ add features monotonically.
+ int tagh = std::max(oldtag, newtag);
+ if (tagh <= elfcpp::TAG_CPU_ARCH_V6KZ)
+ return tagh;
+
+ int tagl = std::min(oldtag, newtag);
+ int result = comb[tagh - T(V6T2)][tagl];
+
+ // Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
+ // as the canonical version.
+ if (result == T(V4T_PLUS_V6_M))
+ {
+ result = T(V4T);
+ *secondary_compat_out = T(V6_M);
+ }
+ else
+ *secondary_compat_out = -1;
+
+ if (result == -1)
+ {
+ gold_error(_("%s: conflicting CPU architectures %d/%d"),
+ name, oldtag, newtag);
+ return -1;
+ }
+
+ return result;
+#undef T
+}
+
+// Helper to print AEABI enum tag value.
+
+template<bool big_endian>
+std::string
+Target_arm<big_endian>::aeabi_enum_name(unsigned int value)
+{
+ static const char* aeabi_enum_names[] =
+ { "", "variable-size", "32-bit", "" };
+ const size_t aeabi_enum_names_size =
+ sizeof(aeabi_enum_names) / sizeof(aeabi_enum_names[0]);
+
+ if (value < aeabi_enum_names_size)
+ return std::string(aeabi_enum_names[value]);
+ else
+ {
+ char buffer[100];
+ sprintf(buffer, "<unknown value %u>", value);
+ return std::string(buffer);
+ }
+}
+
+// Return the string value to store in TAG_CPU_name.
+
+template<bool big_endian>
+std::string
+Target_arm<big_endian>::tag_cpu_name_value(unsigned int value)
+{
+ static const char* name_table[] = {
+ // These aren't real CPU names, but we can't guess
+ // that from the architecture version alone.
+ "Pre v4",
+ "ARM v4",
+ "ARM v4T",
+ "ARM v5T",
+ "ARM v5TE",
+ "ARM v5TEJ",
+ "ARM v6",
+ "ARM v6KZ",
+ "ARM v6T2",
+ "ARM v6K",
+ "ARM v7",
+ "ARM v6-M",
+ "ARM v6S-M",
+ "ARM v7E-M",
+ "ARM v8"
+ };
+ const size_t name_table_size = sizeof(name_table) / sizeof(name_table[0]);
+
+ if (value < name_table_size)
+ return std::string(name_table[value]);
+ else
+ {
+ char buffer[100];
+ sprintf(buffer, "<unknown CPU value %u>", value);
+ return std::string(buffer);
+ }
+}
+
+// Query attributes object to see if integer divide instructions may be
+// present in an object.
+
+template<bool big_endian>
+bool
+Target_arm<big_endian>::attributes_accept_div(int arch, int profile,
+ const Object_attribute* div_attr)
+{
+ switch (div_attr->int_value())
+ {
+ case 0:
+ // Integer divide allowed if instruction contained in
+ // architecture.
+ if (arch == elfcpp::TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
+ return true;
+ else if (arch >= elfcpp::TAG_CPU_ARCH_V7E_M)
+ return true;
+ else
+ return false;
+
+ case 1:
+ // Integer divide explicitly prohibited.
+ return false;
+
+ default:
+ // Unrecognised case - treat as allowing divide everywhere.
+ case 2:
+ // Integer divide allowed in ARM state.
+ return true;
+ }
+}
+
+// Query attributes object to see if integer divide instructions are
+// forbidden to be in the object. This is not the inverse of
+// attributes_accept_div.
+
+template<bool big_endian>
+bool
+Target_arm<big_endian>::attributes_forbid_div(const Object_attribute* div_attr)
+{
+ return div_attr->int_value() == 1;
+}
+
+// Merge object attributes from input file called NAME with those of the
+// output. The input object attributes are in the object pointed by PASD.
+
+template<bool big_endian>
+void
+Target_arm<big_endian>::merge_object_attributes(
+ const char* name,
+ const Attributes_section_data* pasd)
+{
+ // Return if there is no attributes section data.
+ if (pasd == NULL)
+ return;
+
+ // If output has no object attributes, just copy.
+ const int vendor = Object_attribute::OBJ_ATTR_PROC;
+ if (this->attributes_section_data_ == NULL)
+ {
+ this->attributes_section_data_ = new Attributes_section_data(*pasd);
+ Object_attribute* out_attr =
+ this->attributes_section_data_->known_attributes(vendor);
+
+ // We do not output objects with Tag_MPextension_use_legacy - we move
+ // the attribute's value to Tag_MPextension_use. */
+ if (out_attr[elfcpp::Tag_MPextension_use_legacy].int_value() != 0)
+ {
+ if (out_attr[elfcpp::Tag_MPextension_use].int_value() != 0
+ && out_attr[elfcpp::Tag_MPextension_use_legacy].int_value()
+ != out_attr[elfcpp::Tag_MPextension_use].int_value())
+ {
+ gold_error(_("%s has both the current and legacy "
+ "Tag_MPextension_use attributes"),
+ name);
+ }
+
+ out_attr[elfcpp::Tag_MPextension_use] =
+ out_attr[elfcpp::Tag_MPextension_use_legacy];
+ out_attr[elfcpp::Tag_MPextension_use_legacy].set_type(0);
+ out_attr[elfcpp::Tag_MPextension_use_legacy].set_int_value(0);
+ }
+
+ return;
+ }
+
+ const Object_attribute* in_attr = pasd->known_attributes(vendor);
+ Object_attribute* out_attr =
+ this->attributes_section_data_->known_attributes(vendor);
+
+ // This needs to happen before Tag_ABI_FP_number_model is merged. */
+ if (in_attr[elfcpp::Tag_ABI_VFP_args].int_value()
+ != out_attr[elfcpp::Tag_ABI_VFP_args].int_value())
+ {
+ // Ignore mismatches if the object doesn't use floating point. */
+ if (out_attr[elfcpp::Tag_ABI_FP_number_model].int_value()
+ == elfcpp::AEABI_FP_number_model_none
+ || (in_attr[elfcpp::Tag_ABI_FP_number_model].int_value()
+ != elfcpp::AEABI_FP_number_model_none
+ && out_attr[elfcpp::Tag_ABI_VFP_args].int_value()
+ == elfcpp::AEABI_VFP_args_compatible))
+ out_attr[elfcpp::Tag_ABI_VFP_args].set_int_value(
+ in_attr[elfcpp::Tag_ABI_VFP_args].int_value());
+ else if (in_attr[elfcpp::Tag_ABI_FP_number_model].int_value()
+ != elfcpp::AEABI_FP_number_model_none
+ && in_attr[elfcpp::Tag_ABI_VFP_args].int_value()
+ != elfcpp::AEABI_VFP_args_compatible
+ && parameters->options().warn_mismatch())
+ gold_error(_("%s uses VFP register arguments, output does not"),
+ name);
+ }
+
+ for (int i = 4; i < Vendor_object_attributes::NUM_KNOWN_ATTRIBUTES; ++i)
+ {
+ // Merge this attribute with existing attributes.
+ switch (i)
+ {
+ case elfcpp::Tag_CPU_raw_name:
+ case elfcpp::Tag_CPU_name:
+ // These are merged after Tag_CPU_arch.
+ break;
+
+ case elfcpp::Tag_ABI_optimization_goals:
+ case elfcpp::Tag_ABI_FP_optimization_goals:
+ // Use the first value seen.
+ break;
+
+ case elfcpp::Tag_CPU_arch:
+ {
+ unsigned int saved_out_attr = out_attr->int_value();
+ // Merge Tag_CPU_arch and Tag_also_compatible_with.
+ int secondary_compat =
+ this->get_secondary_compatible_arch(pasd);
+ int secondary_compat_out =
+ this->get_secondary_compatible_arch(
+ this->attributes_section_data_);
+ out_attr[i].set_int_value(
+ tag_cpu_arch_combine(name, out_attr[i].int_value(),
+ &secondary_compat_out,
+ in_attr[i].int_value(),
+ secondary_compat));
+ this->set_secondary_compatible_arch(this->attributes_section_data_,
+ secondary_compat_out);
+
+ // Merge Tag_CPU_name and Tag_CPU_raw_name.
+ if (out_attr[i].int_value() == saved_out_attr)
+ ; // Leave the names alone.
+ else if (out_attr[i].int_value() == in_attr[i].int_value())
+ {
+ // The output architecture has been changed to match the
+ // input architecture. Use the input names.
+ out_attr[elfcpp::Tag_CPU_name].set_string_value(
+ in_attr[elfcpp::Tag_CPU_name].string_value());
+ out_attr[elfcpp::Tag_CPU_raw_name].set_string_value(
+ in_attr[elfcpp::Tag_CPU_raw_name].string_value());
+ }
+ else
+ {
+ out_attr[elfcpp::Tag_CPU_name].set_string_value("");
+ out_attr[elfcpp::Tag_CPU_raw_name].set_string_value("");
+ }
+
+ // If we still don't have a value for Tag_CPU_name,
+ // make one up now. Tag_CPU_raw_name remains blank.
+ if (out_attr[elfcpp::Tag_CPU_name].string_value() == "")
+ {
+ const std::string cpu_name =
+ this->tag_cpu_name_value(out_attr[i].int_value());
+ // FIXME: If we see an unknown CPU, this will be set
+ // to "<unknown CPU n>", where n is the attribute value.
+ // This is different from BFD, which leaves the name alone.
+ out_attr[elfcpp::Tag_CPU_name].set_string_value(cpu_name);
+ }
+ }
+ break;
+
+ case elfcpp::Tag_ARM_ISA_use:
+ case elfcpp::Tag_THUMB_ISA_use:
+ case elfcpp::Tag_WMMX_arch:
+ case elfcpp::Tag_Advanced_SIMD_arch:
+ // ??? Do Advanced_SIMD (NEON) and WMMX conflict?
+ case elfcpp::Tag_ABI_FP_rounding:
+ case elfcpp::Tag_ABI_FP_exceptions:
+ case elfcpp::Tag_ABI_FP_user_exceptions:
+ case elfcpp::Tag_ABI_FP_number_model:
+ case elfcpp::Tag_VFP_HP_extension:
+ case elfcpp::Tag_CPU_unaligned_access:
+ case elfcpp::Tag_T2EE_use:
+ case elfcpp::Tag_Virtualization_use:
+ case elfcpp::Tag_MPextension_use:
+ // Use the largest value specified.
+ if (in_attr[i].int_value() > out_attr[i].int_value())
+ out_attr[i].set_int_value(in_attr[i].int_value());
+ break;
+
+ case elfcpp::Tag_ABI_align8_preserved:
+ case elfcpp::Tag_ABI_PCS_RO_data:
+ // Use the smallest value specified.
+ if (in_attr[i].int_value() < out_attr[i].int_value())
+ out_attr[i].set_int_value(in_attr[i].int_value());
+ break;
+
+ case elfcpp::Tag_ABI_align8_needed:
+ if ((in_attr[i].int_value() > 0 || out_attr[i].int_value() > 0)
+ && (in_attr[elfcpp::Tag_ABI_align8_preserved].int_value() == 0
+ || (out_attr[elfcpp::Tag_ABI_align8_preserved].int_value()
+ == 0)))
+ {
+ // This error message should be enabled once all non-conforming
+ // binaries in the toolchain have had the attributes set
+ // properly.
+ // gold_error(_("output 8-byte data alignment conflicts with %s"),
+ // name);
+ }
+ // Fall through.
+ case elfcpp::Tag_ABI_FP_denormal:
+ case elfcpp::Tag_ABI_PCS_GOT_use:
+ {
+ // These tags have 0 = don't care, 1 = strong requirement,
+ // 2 = weak requirement.
+ static const int order_021[3] = {0, 2, 1};
+
+ // Use the "greatest" from the sequence 0, 2, 1, or the largest
+ // value if greater than 2 (for future-proofing).
+ if ((in_attr[i].int_value() > 2
+ && in_attr[i].int_value() > out_attr[i].int_value())
+ || (in_attr[i].int_value() <= 2
+ && out_attr[i].int_value() <= 2
+ && (order_021[in_attr[i].int_value()]
+ > order_021[out_attr[i].int_value()])))
+ out_attr[i].set_int_value(in_attr[i].int_value());
+ }
+ break;
+
+ case elfcpp::Tag_CPU_arch_profile:
+ if (out_attr[i].int_value() != in_attr[i].int_value())
+ {
+ // 0 will merge with anything.
+ // 'A' and 'S' merge to 'A'.
+ // 'R' and 'S' merge to 'R'.
+ // 'M' and 'A|R|S' is an error.
+ if (out_attr[i].int_value() == 0
+ || (out_attr[i].int_value() == 'S'
+ && (in_attr[i].int_value() == 'A'
+ || in_attr[i].int_value() == 'R')))
+ out_attr[i].set_int_value(in_attr[i].int_value());
+ else if (in_attr[i].int_value() == 0
+ || (in_attr[i].int_value() == 'S'
+ && (out_attr[i].int_value() == 'A'
+ || out_attr[i].int_value() == 'R')))
+ ; // Do nothing.
+ else if (parameters->options().warn_mismatch())
+ {
+ gold_error
+ (_("conflicting architecture profiles %c/%c"),
+ in_attr[i].int_value() ? in_attr[i].int_value() : '0',
+ out_attr[i].int_value() ? out_attr[i].int_value() : '0');
+ }
+ }
+ break;
+ case elfcpp::Tag_VFP_arch:
+ {
+ static const struct
+ {
+ int ver;
+ int regs;
+ } vfp_versions[7] =
+ {
+ {0, 0},
+ {1, 16},
+ {2, 16},
+ {3, 32},
+ {3, 16},
+ {4, 32},
+ {4, 16}
+ };
+
+ // Values greater than 6 aren't defined, so just pick the
+ // biggest.
+ if (in_attr[i].int_value() > 6
+ && in_attr[i].int_value() > out_attr[i].int_value())
+ {
+ *out_attr = *in_attr;
+ break;
+ }
+ // The output uses the superset of input features
+ // (ISA version) and registers.
+ int ver = std::max(vfp_versions[in_attr[i].int_value()].ver,
+ vfp_versions[out_attr[i].int_value()].ver);
+ int regs = std::max(vfp_versions[in_attr[i].int_value()].regs,
+ vfp_versions[out_attr[i].int_value()].regs);
+ // This assumes all possible supersets are also a valid
+ // options.
+ int newval;
+ for (newval = 6; newval > 0; newval--)
+ {
+ if (regs == vfp_versions[newval].regs
+ && ver == vfp_versions[newval].ver)
+ break;
+ }
+ out_attr[i].set_int_value(newval);
+ }
+ break;
+ case elfcpp::Tag_PCS_config:
+ if (out_attr[i].int_value() == 0)
+ out_attr[i].set_int_value(in_attr[i].int_value());
+ else if (in_attr[i].int_value() != 0
+ && out_attr[i].int_value() != 0
+ && parameters->options().warn_mismatch())
+ {
+ // It's sometimes ok to mix different configs, so this is only
+ // a warning.
+ gold_warning(_("%s: conflicting platform configuration"), name);
+ }
+ break;
+ case elfcpp::Tag_ABI_PCS_R9_use:
+ if (in_attr[i].int_value() != out_attr[i].int_value()
+ && out_attr[i].int_value() != elfcpp::AEABI_R9_unused
+ && in_attr[i].int_value() != elfcpp::AEABI_R9_unused
+ && parameters->options().warn_mismatch())
+ {
+ gold_error(_("%s: conflicting use of R9"), name);
+ }
+ if (out_attr[i].int_value() == elfcpp::AEABI_R9_unused)
+ out_attr[i].set_int_value(in_attr[i].int_value());
+ break;
+ case elfcpp::Tag_ABI_PCS_RW_data:
+ if (in_attr[i].int_value() == elfcpp::AEABI_PCS_RW_data_SBrel
+ && (in_attr[elfcpp::Tag_ABI_PCS_R9_use].int_value()
+ != elfcpp::AEABI_R9_SB)
+ && (out_attr[elfcpp::Tag_ABI_PCS_R9_use].int_value()
+ != elfcpp::AEABI_R9_unused)
+ && parameters->options().warn_mismatch())
+ {
+ gold_error(_("%s: SB relative addressing conflicts with use "
+ "of R9"),
+ name);
+ }
+ // Use the smallest value specified.
+ if (in_attr[i].int_value() < out_attr[i].int_value())
+ out_attr[i].set_int_value(in_attr[i].int_value());
+ break;
+ case elfcpp::Tag_ABI_PCS_wchar_t:
+ if (out_attr[i].int_value()
+ && in_attr[i].int_value()
+ && out_attr[i].int_value() != in_attr[i].int_value()
+ && parameters->options().warn_mismatch()
+ && parameters->options().wchar_size_warning())
+ {
+ gold_warning(_("%s uses %u-byte wchar_t yet the output is to "
+ "use %u-byte wchar_t; use of wchar_t values "
+ "across objects may fail"),
+ name, in_attr[i].int_value(),
+ out_attr[i].int_value());
+ }
+ else if (in_attr[i].int_value() && !out_attr[i].int_value())
+ out_attr[i].set_int_value(in_attr[i].int_value());
+ break;
+ case elfcpp::Tag_ABI_enum_size:
+ if (in_attr[i].int_value() != elfcpp::AEABI_enum_unused)
+ {
+ if (out_attr[i].int_value() == elfcpp::AEABI_enum_unused
+ || out_attr[i].int_value() == elfcpp::AEABI_enum_forced_wide)
+ {
+ // The existing object is compatible with anything.
+ // Use whatever requirements the new object has.
+ out_attr[i].set_int_value(in_attr[i].int_value());
+ }
+ else if (in_attr[i].int_value() != elfcpp::AEABI_enum_forced_wide
+ && out_attr[i].int_value() != in_attr[i].int_value()
+ && parameters->options().warn_mismatch()
+ && parameters->options().enum_size_warning())
+ {
+ unsigned int in_value = in_attr[i].int_value();
+ unsigned int out_value = out_attr[i].int_value();
+ gold_warning(_("%s uses %s enums yet the output is to use "
+ "%s enums; use of enum values across objects "
+ "may fail"),
+ name,
+ this->aeabi_enum_name(in_value).c_str(),
+ this->aeabi_enum_name(out_value).c_str());
+ }
+ }
+ break;
+ case elfcpp::Tag_ABI_VFP_args:
+ // Already done.
+ break;
+ case elfcpp::Tag_ABI_WMMX_args:
+ if (in_attr[i].int_value() != out_attr[i].int_value()
+ && parameters->options().warn_mismatch())
+ {
+ gold_error(_("%s uses iWMMXt register arguments, output does "
+ "not"),
+ name);
+ }
+ break;
+ case Object_attribute::Tag_compatibility:
+ // Merged in target-independent code.
+ break;
+ case elfcpp::Tag_ABI_HardFP_use:
+ // 1 (SP) and 2 (DP) conflict, so combine to 3 (SP & DP).
+ if ((in_attr[i].int_value() == 1 && out_attr[i].int_value() == 2)
+ || (in_attr[i].int_value() == 2 && out_attr[i].int_value() == 1))
+ out_attr[i].set_int_value(3);
+ else if (in_attr[i].int_value() > out_attr[i].int_value())
+ out_attr[i].set_int_value(in_attr[i].int_value());
+ break;
+ case elfcpp::Tag_ABI_FP_16bit_format:
+ if (in_attr[i].int_value() != 0 && out_attr[i].int_value() != 0)
+ {
+ if (in_attr[i].int_value() != out_attr[i].int_value()
+ && parameters->options().warn_mismatch())
+ gold_error(_("fp16 format mismatch between %s and output"),
+ name);
+ }
+ if (in_attr[i].int_value() != 0)
+ out_attr[i].set_int_value(in_attr[i].int_value());
+ break;
+
+ case elfcpp::Tag_DIV_use:
+ {
+ // A value of zero on input means that the divide
+ // instruction may be used if available in the base
+ // architecture as specified via Tag_CPU_arch and
+ // Tag_CPU_arch_profile. A value of 1 means that the user
+ // did not want divide instructions. A value of 2
+ // explicitly means that divide instructions were allowed
+ // in ARM and Thumb state.
+ int arch = this->
+ get_aeabi_object_attribute(elfcpp::Tag_CPU_arch)->
+ int_value();
+ int profile = this->
+ get_aeabi_object_attribute(elfcpp::Tag_CPU_arch_profile)->
+ int_value();
+ if (in_attr[i].int_value() == out_attr[i].int_value())
+ {
+ // Do nothing.
+ }
+ else if (attributes_forbid_div(&in_attr[i])
+ && !attributes_accept_div(arch, profile, &out_attr[i]))
+ out_attr[i].set_int_value(1);
+ else if (attributes_forbid_div(&out_attr[i])
+ && attributes_accept_div(arch, profile, &in_attr[i]))
+ out_attr[i].set_int_value(in_attr[i].int_value());
+ else if (in_attr[i].int_value() == 2)
+ out_attr[i].set_int_value(in_attr[i].int_value());
+ }
+ break;
+
+ case elfcpp::Tag_MPextension_use_legacy:
+ // We don't output objects with Tag_MPextension_use_legacy - we
+ // move the value to Tag_MPextension_use.
+ if (in_attr[i].int_value() != 0
+ && in_attr[elfcpp::Tag_MPextension_use].int_value() != 0)
+ {
+ if (in_attr[elfcpp::Tag_MPextension_use].int_value()
+ != in_attr[i].int_value())
+ {
+ gold_error(_("%s has both the current and legacy "
+ "Tag_MPextension_use attributes"),
+ name);
+ }
+ }
+
+ if (in_attr[i].int_value()
+ > out_attr[elfcpp::Tag_MPextension_use].int_value())
+ out_attr[elfcpp::Tag_MPextension_use] = in_attr[i];
+
+ break;
+
+ case elfcpp::Tag_nodefaults:
+ // This tag is set if it exists, but the value is unused (and is
+ // typically zero). We don't actually need to do anything here -
+ // the merge happens automatically when the type flags are merged
+ // below.
+ break;
+ case elfcpp::Tag_also_compatible_with:
+ // Already done in Tag_CPU_arch.
+ break;
+ case elfcpp::Tag_conformance:
+ // Keep the attribute if it matches. Throw it away otherwise.
+ // No attribute means no claim to conform.
+ if (in_attr[i].string_value() != out_attr[i].string_value())
+ out_attr[i].set_string_value("");
+ break;
+
+ default:
+ {
+ const char* err_object = NULL;
+
+ // The "known_obj_attributes" table does contain some undefined
+ // attributes. Ensure that there are unused.
+ if (out_attr[i].int_value() != 0
+ || out_attr[i].string_value() != "")
+ err_object = "output";
+ else if (in_attr[i].int_value() != 0
+ || in_attr[i].string_value() != "")
+ err_object = name;
+
+ if (err_object != NULL
+ && parameters->options().warn_mismatch())
+ {
+ // Attribute numbers >=64 (mod 128) can be safely ignored.
+ if ((i & 127) < 64)
+ gold_error(_("%s: unknown mandatory EABI object attribute "
+ "%d"),
+ err_object, i);
+ else
+ gold_warning(_("%s: unknown EABI object attribute %d"),
+ err_object, i);
+ }
+
+ // Only pass on attributes that match in both inputs.
+ if (!in_attr[i].matches(out_attr[i]))
+ {
+ out_attr[i].set_int_value(0);
+ out_attr[i].set_string_value("");
+ }
+ }
+ }
+
+ // If out_attr was copied from in_attr then it won't have a type yet.
+ if (in_attr[i].type() && !out_attr[i].type())
+ out_attr[i].set_type(in_attr[i].type());
+ }
+
+ // Merge Tag_compatibility attributes and any common GNU ones.
+ this->attributes_section_data_->merge(name, pasd);
+
+ // Check for any attributes not known on ARM.
+ typedef Vendor_object_attributes::Other_attributes Other_attributes;
+ const Other_attributes* in_other_attributes = pasd->other_attributes(vendor);
+ Other_attributes::const_iterator in_iter = in_other_attributes->begin();
+ Other_attributes* out_other_attributes =
+ this->attributes_section_data_->other_attributes(vendor);
+ Other_attributes::iterator out_iter = out_other_attributes->begin();
+
+ while (in_iter != in_other_attributes->end()
+ || out_iter != out_other_attributes->end())
+ {
+ const char* err_object = NULL;
+ int err_tag = 0;
+
+ // The tags for each list are in numerical order.
+ // If the tags are equal, then merge.
+ if (out_iter != out_other_attributes->end()
+ && (in_iter == in_other_attributes->end()
+ || in_iter->first > out_iter->first))
+ {
+ // This attribute only exists in output. We can't merge, and we
+ // don't know what the tag means, so delete it.
+ err_object = "output";
+ err_tag = out_iter->first;
+ int saved_tag = out_iter->first;
+ delete out_iter->second;
+ out_other_attributes->erase(out_iter);
+ out_iter = out_other_attributes->upper_bound(saved_tag);
+ }
+ else if (in_iter != in_other_attributes->end()
+ && (out_iter != out_other_attributes->end()
+ || in_iter->first < out_iter->first))
+ {
+ // This attribute only exists in input. We can't merge, and we
+ // don't know what the tag means, so ignore it.
+ err_object = name;
+ err_tag = in_iter->first;
+ ++in_iter;
+ }
+ else // The tags are equal.
+ {
+ // As present, all attributes in the list are unknown, and
+ // therefore can't be merged meaningfully.
+ err_object = "output";
+ err_tag = out_iter->first;
+
+ // Only pass on attributes that match in both inputs.
+ if (!in_iter->second->matches(*(out_iter->second)))
+ {
+ // No match. Delete the attribute.
+ int saved_tag = out_iter->first;
+ delete out_iter->second;
+ out_other_attributes->erase(out_iter);
+ out_iter = out_other_attributes->upper_bound(saved_tag);
+ }
+ else
+ {
+ // Matched. Keep the attribute and move to the next.
+ ++out_iter;
+ ++in_iter;
+ }
+ }
+
+ if (err_object && parameters->options().warn_mismatch())
+ {
+ // Attribute numbers >=64 (mod 128) can be safely ignored. */
+ if ((err_tag & 127) < 64)
+ {
+ gold_error(_("%s: unknown mandatory EABI object attribute %d"),
+ err_object, err_tag);
+ }
+ else
+ {
+ gold_warning(_("%s: unknown EABI object attribute %d"),
+ err_object, err_tag);
+ }
+ }
+ }
+}
+
+// Stub-generation methods for Target_arm.
+
+// Make a new Arm_input_section object.
+
+template<bool big_endian>
+Arm_input_section<big_endian>*
+Target_arm<big_endian>::new_arm_input_section(
+ Relobj* relobj,
+ unsigned int shndx)
+{
+ Section_id sid(relobj, shndx);
+
+ Arm_input_section<big_endian>* arm_input_section =
+ new Arm_input_section<big_endian>(relobj, shndx);
+ arm_input_section->init();
+
+ // Register new Arm_input_section in map for look-up.
+ std::pair<typename Arm_input_section_map::iterator, bool> ins =
+ this->arm_input_section_map_.insert(std::make_pair(sid, arm_input_section));
+
+ // Make sure that it we have not created another Arm_input_section
+ // for this input section already.
+ gold_assert(ins.second);
+
+ return arm_input_section;
+}
+
+// Find the Arm_input_section object corresponding to the SHNDX-th input
+// section of RELOBJ.
+
+template<bool big_endian>
+Arm_input_section<big_endian>*
+Target_arm<big_endian>::find_arm_input_section(
+ Relobj* relobj,
+ unsigned int shndx) const
+{
+ Section_id sid(relobj, shndx);
+ typename Arm_input_section_map::const_iterator p =
+ this->arm_input_section_map_.find(sid);
+ return (p != this->arm_input_section_map_.end()) ? p->second : NULL;
+}
+
+// Make a new stub table.
+
+template<bool big_endian>
+Stub_table<big_endian>*
+Target_arm<big_endian>::new_stub_table(Arm_input_section<big_endian>* owner)
+{
+ Stub_table<big_endian>* stub_table =
+ new Stub_table<big_endian>(owner);
+ this->stub_tables_.push_back(stub_table);
+
+ stub_table->set_address(owner->address() + owner->data_size());
+ stub_table->set_file_offset(owner->offset() + owner->data_size());
+ stub_table->finalize_data_size();
+
+ return stub_table;
+}
+
+// Scan a relocation for stub generation.
+
+template<bool big_endian>
+void
+Target_arm<big_endian>::scan_reloc_for_stub(
+ const Relocate_info<32, big_endian>* relinfo,
+ unsigned int r_type,
+ const Sized_symbol<32>* gsym,
+ unsigned int r_sym,
+ const Symbol_value<32>* psymval,
+ elfcpp::Elf_types<32>::Elf_Swxword addend,
+ Arm_address address)
+{
+ const Arm_relobj<big_endian>* arm_relobj =
+ Arm_relobj<big_endian>::as_arm_relobj(relinfo->object);
+
+ bool target_is_thumb;
+ Symbol_value<32> symval;
+ if (gsym != NULL)
+ {
+ // This is a global symbol. Determine if we use PLT and if the
+ // final target is THUMB.
+ if (gsym->use_plt_offset(Scan::get_reference_flags(r_type)))
+ {
+ // This uses a PLT, change the symbol value.
+ symval.set_output_value(this->plt_address_for_global(gsym));
+ psymval = &symval;
+ target_is_thumb = false;
+ }
+ else if (gsym->is_undefined())
+ // There is no need to generate a stub symbol is undefined.
+ return;
+ else
+ {
+ target_is_thumb =
+ ((gsym->type() == elfcpp::STT_ARM_TFUNC)
+ || (gsym->type() == elfcpp::STT_FUNC
+ && !gsym->is_undefined()
+ && ((psymval->value(arm_relobj, 0) & 1) != 0)));
+ }
+ }
+ else
+ {
+ // This is a local symbol. Determine if the final target is THUMB.
+ target_is_thumb = arm_relobj->local_symbol_is_thumb_function(r_sym);
+ }
+
+ // Strip LSB if this points to a THUMB target.
+ const Arm_reloc_property* reloc_property =
+ arm_reloc_property_table->get_implemented_static_reloc_property(r_type);
+ gold_assert(reloc_property != NULL);
+ if (target_is_thumb
+ && reloc_property->uses_thumb_bit()
+ && ((psymval->value(arm_relobj, 0) & 1) != 0))
+ {
+ Arm_address stripped_value =
+ psymval->value(arm_relobj, 0) & ~static_cast<Arm_address>(1);
+ symval.set_output_value(stripped_value);
+ psymval = &symval;
+ }
+
+ // Get the symbol value.
+ Symbol_value<32>::Value value = psymval->value(arm_relobj, 0);
+
+ // Owing to pipelining, the PC relative branches below actually skip
+ // two instructions when the branch offset is 0.
+ Arm_address destination;
+ switch (r_type)
+ {
+ case elfcpp::R_ARM_CALL:
+ case elfcpp::R_ARM_JUMP24:
+ case elfcpp::R_ARM_PLT32:
+ // ARM branches.
+ destination = value + addend + 8;
+ break;
+ case elfcpp::R_ARM_THM_CALL:
+ case elfcpp::R_ARM_THM_XPC22:
+ case elfcpp::R_ARM_THM_JUMP24:
+ case elfcpp::R_ARM_THM_JUMP19:
+ // THUMB branches.
+ destination = value + addend + 4;
+ break;
+ default:
+ gold_unreachable();
+ }
+
+ Reloc_stub* stub = NULL;
+ Stub_type stub_type =
+ Reloc_stub::stub_type_for_reloc(r_type, address, destination,
+ target_is_thumb);
+ if (stub_type != arm_stub_none)
+ {
+ // Try looking up an existing stub from a stub table.
+ Stub_table<big_endian>* stub_table =
+ arm_relobj->stub_table(relinfo->data_shndx);
+ gold_assert(stub_table != NULL);
+
+ // Locate stub by destination.
+ Reloc_stub::Key stub_key(stub_type, gsym, arm_relobj, r_sym, addend);
+
+ // Create a stub if there is not one already
+ stub = stub_table->find_reloc_stub(stub_key);
+ if (stub == NULL)
+ {
+ // create a new stub and add it to stub table.
+ stub = this->stub_factory().make_reloc_stub(stub_type);
+ stub_table->add_reloc_stub(stub, stub_key);
+ }
+
+ // Record the destination address.
+ stub->set_destination_address(destination
+ | (target_is_thumb ? 1 : 0));
+ }
+
+ // For Cortex-A8, we need to record a relocation at 4K page boundary.
+ if (this->fix_cortex_a8_
+ && (r_type == elfcpp::R_ARM_THM_JUMP24
+ || r_type == elfcpp::R_ARM_THM_JUMP19
+ || r_type == elfcpp::R_ARM_THM_CALL
+ || r_type == elfcpp::R_ARM_THM_XPC22)
+ && (address & 0xfffU) == 0xffeU)
+ {
+ // Found a candidate. Note we haven't checked the destination is
+ // within 4K here: if we do so (and don't create a record) we can't
+ // tell that a branch should have been relocated when scanning later.
+ this->cortex_a8_relocs_info_[address] =
+ new Cortex_a8_reloc(stub, r_type,
+ destination | (target_is_thumb ? 1 : 0));
+ }
+}
+
+// This function scans a relocation sections for stub generation.
+// The template parameter Relocate must be a class type which provides
+// a single function, relocate(), which implements the machine
+// specific part of a relocation.
+
+// BIG_ENDIAN is the endianness of the data. SH_TYPE is the section type:
+// SHT_REL or SHT_RELA.
+
+// PRELOCS points to the relocation data. RELOC_COUNT is the number
+// of relocs. OUTPUT_SECTION is the output section.
+// NEEDS_SPECIAL_OFFSET_HANDLING is true if input offsets need to be
+// mapped to output offsets.
+
+// VIEW is the section data, VIEW_ADDRESS is its memory address, and
+// VIEW_SIZE is the size. These refer to the input section, unless
+// NEEDS_SPECIAL_OFFSET_HANDLING is true, in which case they refer to
+// the output section.
+
+template<bool big_endian>
+template<int sh_type>
+void inline
+Target_arm<big_endian>::scan_reloc_section_for_stubs(
+ const Relocate_info<32, big_endian>* relinfo,
+ const unsigned char* prelocs,
+ size_t reloc_count,
+ Output_section* output_section,
+ bool needs_special_offset_handling,
+ const unsigned char* view,
+ elfcpp::Elf_types<32>::Elf_Addr view_address,
+ section_size_type)
+{
+ typedef typename Reloc_types<sh_type, 32, big_endian>::Reloc Reltype;
+ const int reloc_size =
+ Reloc_types<sh_type, 32, big_endian>::reloc_size;
+
+ Arm_relobj<big_endian>* arm_object =
+ Arm_relobj<big_endian>::as_arm_relobj(relinfo->object);
+ unsigned int local_count = arm_object->local_symbol_count();
+
+ gold::Default_comdat_behavior default_comdat_behavior;
+ Comdat_behavior comdat_behavior = CB_UNDETERMINED;
+
+ for (size_t i = 0; i < reloc_count; ++i, prelocs += reloc_size)
+ {
+ Reltype reloc(prelocs);
+
+ typename elfcpp::Elf_types<32>::Elf_WXword r_info = reloc.get_r_info();
+ unsigned int r_sym = elfcpp::elf_r_sym<32>(r_info);
+ unsigned int r_type = elfcpp::elf_r_type<32>(r_info);
+
+ r_type = this->get_real_reloc_type(r_type);
+
+ // Only a few relocation types need stubs.
+ if ((r_type != elfcpp::R_ARM_CALL)
+ && (r_type != elfcpp::R_ARM_JUMP24)
+ && (r_type != elfcpp::R_ARM_PLT32)
+ && (r_type != elfcpp::R_ARM_THM_CALL)
+ && (r_type != elfcpp::R_ARM_THM_XPC22)
+ && (r_type != elfcpp::R_ARM_THM_JUMP24)
+ && (r_type != elfcpp::R_ARM_THM_JUMP19)
+ && (r_type != elfcpp::R_ARM_V4BX))
+ continue;
+
+ section_offset_type offset =
+ convert_to_section_size_type(reloc.get_r_offset());
+
+ if (needs_special_offset_handling)
+ {
+ offset = output_section->output_offset(relinfo->object,
+ relinfo->data_shndx,
+ offset);
+ if (offset == -1)
+ continue;
+ }
+
+ // Create a v4bx stub if --fix-v4bx-interworking is used.
+ if (r_type == elfcpp::R_ARM_V4BX)
+ {
+ if (this->fix_v4bx() == General_options::FIX_V4BX_INTERWORKING)
+ {
+ // Get the BX instruction.
+ typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
+ const Valtype* wv =
+ reinterpret_cast<const Valtype*>(view + offset);
+ elfcpp::Elf_types<32>::Elf_Swxword insn =
+ elfcpp::Swap<32, big_endian>::readval(wv);
+ const uint32_t reg = (insn & 0xf);
+
+ if (reg < 0xf)
+ {
+ // Try looking up an existing stub from a stub table.
+ Stub_table<big_endian>* stub_table =
+ arm_object->stub_table(relinfo->data_shndx);
+ gold_assert(stub_table != NULL);
+
+ if (stub_table->find_arm_v4bx_stub(reg) == NULL)
+ {
+ // create a new stub and add it to stub table.
+ Arm_v4bx_stub* stub =
+ this->stub_factory().make_arm_v4bx_stub(reg);
+ gold_assert(stub != NULL);
+ stub_table->add_arm_v4bx_stub(stub);
+ }
+ }
+ }
+ continue;
+ }
+
+ // Get the addend.
+ Stub_addend_reader<sh_type, big_endian> stub_addend_reader;
+ elfcpp::Elf_types<32>::Elf_Swxword addend =
+ stub_addend_reader(r_type, view + offset, reloc);
+
+ const Sized_symbol<32>* sym;
+
+ Symbol_value<32> symval;
+ const Symbol_value<32> *psymval;
+ bool is_defined_in_discarded_section;
+ unsigned int shndx;
+ const Symbol* gsym = NULL;
+ if (r_sym < local_count)
+ {
+ sym = NULL;
+ psymval = arm_object->local_symbol(r_sym);
+
+ // If the local symbol belongs to a section we are discarding,
+ // and that section is a debug section, try to find the
+ // corresponding kept section and map this symbol to its
+ // counterpart in the kept section. The symbol must not
+ // correspond to a section we are folding.
+ bool is_ordinary;
+ shndx = psymval->input_shndx(&is_ordinary);
+ is_defined_in_discarded_section =
+ (is_ordinary
+ && shndx != elfcpp::SHN_UNDEF
+ && !arm_object->is_section_included(shndx)
+ && !relinfo->symtab->is_section_folded(arm_object, shndx));
+
+ // We need to compute the would-be final value of this local
+ // symbol.
+ if (!is_defined_in_discarded_section)
+ {
+ typedef Sized_relobj_file<32, big_endian> ObjType;
+ if (psymval->is_section_symbol())
+ symval.set_is_section_symbol();
+ typename ObjType::Compute_final_local_value_status status =
+ arm_object->compute_final_local_value(r_sym, psymval, &symval,
+ relinfo->symtab);
+ if (status == ObjType::CFLV_OK)
+ {
+ // Currently we cannot handle a branch to a target in
+ // a merged section. If this is the case, issue an error
+ // and also free the merge symbol value.
+ if (!symval.has_output_value())
+ {
+ const std::string& section_name =
+ arm_object->section_name(shndx);
+ arm_object->error(_("cannot handle branch to local %u "
+ "in a merged section %s"),
+ r_sym, section_name.c_str());
+ }
+ psymval = &symval;
+ }
+ else
+ {
+ // We cannot determine the final value.
+ continue;
+ }
+ }
+ }
+ else
+ {
+ gsym = arm_object->global_symbol(r_sym);
+ gold_assert(gsym != NULL);
+ if (gsym->is_forwarder())
+ gsym = relinfo->symtab->resolve_forwards(gsym);
+
+ sym = static_cast<const Sized_symbol<32>*>(gsym);
+ if (sym->has_symtab_index() && sym->symtab_index() != -1U)
+ symval.set_output_symtab_index(sym->symtab_index());
+ else
+ symval.set_no_output_symtab_entry();
+
+ // We need to compute the would-be final value of this global
+ // symbol.
+ const Symbol_table* symtab = relinfo->symtab;
+ const Sized_symbol<32>* sized_symbol =
+ symtab->get_sized_symbol<32>(gsym);
+ Symbol_table::Compute_final_value_status status;
+ Arm_address value =
+ symtab->compute_final_value<32>(sized_symbol, &status);
+
+ // Skip this if the symbol has not output section.
+ if (status == Symbol_table::CFVS_NO_OUTPUT_SECTION)
+ continue;
+ symval.set_output_value(value);
+
+ if (gsym->type() == elfcpp::STT_TLS)
+ symval.set_is_tls_symbol();
+ else if (gsym->type() == elfcpp::STT_GNU_IFUNC)
+ symval.set_is_ifunc_symbol();
+ psymval = &symval;
+
+ is_defined_in_discarded_section =
+ (gsym->is_defined_in_discarded_section()
+ && gsym->is_undefined());
+ shndx = 0;
+ }
+
+ Symbol_value<32> symval2;
+ if (is_defined_in_discarded_section)
+ {
+ std::string name = arm_object->section_name(relinfo->data_shndx);
+
+ if (comdat_behavior == CB_UNDETERMINED)
+ comdat_behavior = default_comdat_behavior.get(name.c_str());
+
+ if (comdat_behavior == CB_PRETEND)
+ {
+ // FIXME: This case does not work for global symbols.
+ // We have no place to store the original section index.
+ // Fortunately this does not matter for comdat sections,
+ // only for sections explicitly discarded by a linker
+ // script.
+ bool found;
+ typename elfcpp::Elf_types<32>::Elf_Addr value =
+ arm_object->map_to_kept_section(shndx, name, &found);
+ if (found)
+ symval2.set_output_value(value + psymval->input_value());
+ else
+ symval2.set_output_value(0);
+ }
+ else
+ {
+ if (comdat_behavior == CB_ERROR)
+ issue_discarded_error(relinfo, i, offset, r_sym, gsym);
+ symval2.set_output_value(0);
+ }
+ symval2.set_no_output_symtab_entry();
+ psymval = &symval2;
+ }
+
+ // If symbol is a section symbol, we don't know the actual type of
+ // destination. Give up.
+ if (psymval->is_section_symbol())
+ continue;
+
+ this->scan_reloc_for_stub(relinfo, r_type, sym, r_sym, psymval,
+ addend, view_address + offset);
+ }
+}
+
+// Scan an input section for stub generation.
+
+template<bool big_endian>
+void
+Target_arm<big_endian>::scan_section_for_stubs(
+ const Relocate_info<32, big_endian>* relinfo,
+ unsigned int sh_type,
+ const unsigned char* prelocs,
+ size_t reloc_count,
+ Output_section* output_section,
+ bool needs_special_offset_handling,
+ const unsigned char* view,
+ Arm_address view_address,
+ section_size_type view_size)
+{
+ if (sh_type == elfcpp::SHT_REL)
+ this->scan_reloc_section_for_stubs<elfcpp::SHT_REL>(
+ relinfo,
+ prelocs,
+ reloc_count,
+ output_section,
+ needs_special_offset_handling,
+ view,
+ view_address,
+ view_size);
+ else if (sh_type == elfcpp::SHT_RELA)
+ // We do not support RELA type relocations yet. This is provided for
+ // completeness.
+ this->scan_reloc_section_for_stubs<elfcpp::SHT_RELA>(
+ relinfo,
+ prelocs,
+ reloc_count,
+ output_section,
+ needs_special_offset_handling,
+ view,
+ view_address,
+ view_size);
+ else
+ gold_unreachable();
+}
+
+// Group input sections for stub generation.
+//
+// We group input sections in an output section so that the total size,
+// including any padding space due to alignment is smaller than GROUP_SIZE
+// unless the only input section in group is bigger than GROUP_SIZE already.
+// Then an ARM stub table is created to follow the last input section
+// in group. For each group an ARM stub table is created an is placed
+// after the last group. If STUB_ALWAYS_AFTER_BRANCH is false, we further
+// extend the group after the stub table.
+
+template<bool big_endian>
+void
+Target_arm<big_endian>::group_sections(
+ Layout* layout,
+ section_size_type group_size,
+ bool stubs_always_after_branch,
+ const Task* task)
+{
+ // Group input sections and insert stub table
+ Layout::Section_list section_list;
+ layout->get_executable_sections(§ion_list);
+ for (Layout::Section_list::const_iterator p = section_list.begin();
+ p != section_list.end();
+ ++p)
+ {
+ Arm_output_section<big_endian>* output_section =
+ Arm_output_section<big_endian>::as_arm_output_section(*p);
+ output_section->group_sections(group_size, stubs_always_after_branch,
+ this, task);
+ }
+}
+
+// Relaxation hook. This is where we do stub generation.
+
+template<bool big_endian>
+bool
+Target_arm<big_endian>::do_relax(
+ int pass,
+ const Input_objects* input_objects,
+ Symbol_table* symtab,
+ Layout* layout,
+ const Task* task)
+{
+ // No need to generate stubs if this is a relocatable link.
+ gold_assert(!parameters->options().relocatable());
+
+ // If this is the first pass, we need to group input sections into
+ // stub groups.
+ bool done_exidx_fixup = false;
+ typedef typename Stub_table_list::iterator Stub_table_iterator;
+ if (pass == 1)
+ {
+ // Determine the stub group size. The group size is the absolute
+ // value of the parameter --stub-group-size. If --stub-group-size
+ // is passed a negative value, we restrict stubs to be always after
+ // the stubbed branches.
+ int32_t stub_group_size_param =
+ parameters->options().stub_group_size();
+ bool stubs_always_after_branch = stub_group_size_param < 0;
+ section_size_type stub_group_size = abs(stub_group_size_param);
+
+ if (stub_group_size == 1)
+ {
+ // Default value.
+ // Thumb branch range is +-4MB has to be used as the default
+ // maximum size (a given section can contain both ARM and Thumb
+ // code, so the worst case has to be taken into account). If we are
+ // fixing cortex-a8 errata, the branch range has to be even smaller,
+ // since wide conditional branch has a range of +-1MB only.
+ //
+ // This value is 48K less than that, which allows for 4096
+ // 12-byte stubs. If we exceed that, then we will fail to link.
+ // The user will have to relink with an explicit group size
+ // option.
+ stub_group_size = 4145152;
+ }
+
+ // The Cortex-A8 erratum fix depends on stubs not being in the same 4K
+ // page as the first half of a 32-bit branch straddling two 4K pages.
+ // This is a crude way of enforcing that. In addition, long conditional
+ // branches of THUMB-2 have a range of +-1M. If we are fixing cortex-A8
+ // erratum, limit the group size to (1M - 12k) to avoid unreachable
+ // cortex-A8 stubs from long conditional branches.
+ if (this->fix_cortex_a8_)
+ {
+ stubs_always_after_branch = true;
+ const section_size_type cortex_a8_group_size = 1024 * (1024 - 12);
+ stub_group_size = std::max(stub_group_size, cortex_a8_group_size);
+ }
+
+ group_sections(layout, stub_group_size, stubs_always_after_branch, task);
+
+ // Also fix .ARM.exidx section coverage.
+ Arm_output_section<big_endian>* exidx_output_section = NULL;
+ for (Layout::Section_list::const_iterator p =
+ layout->section_list().begin();
+ p != layout->section_list().end();
+ ++p)
+ if ((*p)->type() == elfcpp::SHT_ARM_EXIDX)
+ {
+ if (exidx_output_section == NULL)
+ exidx_output_section =
+ Arm_output_section<big_endian>::as_arm_output_section(*p);
+ else
+ // We cannot handle this now.
+ gold_error(_("multiple SHT_ARM_EXIDX sections %s and %s in a "
+ "non-relocatable link"),
+ exidx_output_section->name(),
+ (*p)->name());
+ }
+
+ if (exidx_output_section != NULL)
+ {
+ this->fix_exidx_coverage(layout, input_objects, exidx_output_section,
+ symtab, task);
+ done_exidx_fixup = true;
+ }
+ }
+ else
+ {
+ // If this is not the first pass, addresses and file offsets have
+ // been reset at this point, set them here.
+ for (Stub_table_iterator sp = this->stub_tables_.begin();
+ sp != this->stub_tables_.end();
+ ++sp)
+ {
+ Arm_input_section<big_endian>* owner = (*sp)->owner();
+ off_t off = align_address(owner->original_size(),
+ (*sp)->addralign());
+ (*sp)->set_address_and_file_offset(owner->address() + off,
+ owner->offset() + off);
+ }
+ }
+
+ // The Cortex-A8 stubs are sensitive to layout of code sections. At the
+ // beginning of each relaxation pass, just blow away all the stubs.
+ // Alternatively, we could selectively remove only the stubs and reloc
+ // information for code sections that have moved since the last pass.
+ // That would require more book-keeping.
+ if (this->fix_cortex_a8_)
+ {
+ // Clear all Cortex-A8 reloc information.
+ for (typename Cortex_a8_relocs_info::const_iterator p =
+ this->cortex_a8_relocs_info_.begin();
+ p != this->cortex_a8_relocs_info_.end();
+ ++p)
+ delete p->second;
+ this->cortex_a8_relocs_info_.clear();
+
+ // Remove all Cortex-A8 stubs.
+ for (Stub_table_iterator sp = this->stub_tables_.begin();
+ sp != this->stub_tables_.end();
+ ++sp)
+ (*sp)->remove_all_cortex_a8_stubs();
+ }
+
+ // Scan relocs for relocation stubs
+ for (Input_objects::Relobj_iterator op = input_objects->relobj_begin();
+ op != input_objects->relobj_end();
+ ++op)
+ {
+ Arm_relobj<big_endian>* arm_relobj =
+ Arm_relobj<big_endian>::as_arm_relobj(*op);
+ // Lock the object so we can read from it. This is only called
+ // single-threaded from Layout::finalize, so it is OK to lock.
+ Task_lock_obj<Object> tl(task, arm_relobj);
+ arm_relobj->scan_sections_for_stubs(this, symtab, layout);
+ }
+
+ // Check all stub tables to see if any of them have their data sizes
+ // or addresses alignments changed. These are the only things that
+ // matter.
+ bool any_stub_table_changed = false;
+ Unordered_set<const Output_section*> sections_needing_adjustment;
+ for (Stub_table_iterator sp = this->stub_tables_.begin();
+ (sp != this->stub_tables_.end()) && !any_stub_table_changed;
+ ++sp)
+ {
+ if ((*sp)->update_data_size_and_addralign())
+ {
+ // Update data size of stub table owner.
+ Arm_input_section<big_endian>* owner = (*sp)->owner();
+ uint64_t address = owner->address();
+ off_t offset = owner->offset();
+ owner->reset_address_and_file_offset();
+ owner->set_address_and_file_offset(address, offset);
+
+ sections_needing_adjustment.insert(owner->output_section());
+ any_stub_table_changed = true;
+ }
+ }
+
+ // Output_section_data::output_section() returns a const pointer but we
+ // need to update output sections, so we record all output sections needing
+ // update above and scan the sections here to find out what sections need
+ // to be updated.
+ for (Layout::Section_list::const_iterator p = layout->section_list().begin();
+ p != layout->section_list().end();
+ ++p)
+ {
+ if (sections_needing_adjustment.find(*p)
+ != sections_needing_adjustment.end())
+ (*p)->set_section_offsets_need_adjustment();
+ }
+
+ // Stop relaxation if no EXIDX fix-up and no stub table change.
+ bool continue_relaxation = done_exidx_fixup || any_stub_table_changed;
+
+ // Finalize the stubs in the last relaxation pass.
+ if (!continue_relaxation)
+ {
+ for (Stub_table_iterator sp = this->stub_tables_.begin();
+ (sp != this->stub_tables_.end()) && !any_stub_table_changed;
+ ++sp)
+ (*sp)->finalize_stubs();
+
+ // Update output local symbol counts of objects if necessary.
+ for (Input_objects::Relobj_iterator op = input_objects->relobj_begin();
+ op != input_objects->relobj_end();
+ ++op)
+ {
+ Arm_relobj<big_endian>* arm_relobj =
+ Arm_relobj<big_endian>::as_arm_relobj(*op);
+
+ // Update output local symbol counts. We need to discard local
+ // symbols defined in parts of input sections that are discarded by
+ // relaxation.
+ if (arm_relobj->output_local_symbol_count_needs_update())
+ {
+ // We need to lock the object's file to update it.
+ Task_lock_obj<Object> tl(task, arm_relobj);
+ arm_relobj->update_output_local_symbol_count();
+ }
+ }
+ }
+
+ return continue_relaxation;
+}
+
+// Relocate a stub.
+
+template<bool big_endian>
+void
+Target_arm<big_endian>::relocate_stub(
+ Stub* stub,
+ const Relocate_info<32, big_endian>* relinfo,
+ Output_section* output_section,
+ unsigned char* view,
+ Arm_address address,
+ section_size_type view_size)
+{
+ Relocate relocate;
+ const Stub_template* stub_template = stub->stub_template();
+ for (size_t i = 0; i < stub_template->reloc_count(); i++)
+ {
+ size_t reloc_insn_index = stub_template->reloc_insn_index(i);
+ const Insn_template* insn = &stub_template->insns()[reloc_insn_index];
+
+ unsigned int r_type = insn->r_type();
+ section_size_type reloc_offset = stub_template->reloc_offset(i);
+ section_size_type reloc_size = insn->size();
+ gold_assert(reloc_offset + reloc_size <= view_size);
+
+ // This is the address of the stub destination.
+ Arm_address target = stub->reloc_target(i) + insn->reloc_addend();
+ Symbol_value<32> symval;
+ symval.set_output_value(target);
+
+ // Synthesize a fake reloc just in case. We don't have a symbol so
+ // we use 0.
+ unsigned char reloc_buffer[elfcpp::Elf_sizes<32>::rel_size];
+ memset(reloc_buffer, 0, sizeof(reloc_buffer));
+ elfcpp::Rel_write<32, big_endian> reloc_write(reloc_buffer);
+ reloc_write.put_r_offset(reloc_offset);
+ reloc_write.put_r_info(elfcpp::elf_r_info<32>(0, r_type));
+
+ relocate.relocate(relinfo, elfcpp::SHT_REL, this, output_section,
+ this->fake_relnum_for_stubs, reloc_buffer,
+ NULL, &symval, view + reloc_offset,
+ address + reloc_offset, reloc_size);
+ }
+}
+
+// Determine whether an object attribute tag takes an integer, a
+// string or both.
+
+template<bool big_endian>
+int
+Target_arm<big_endian>::do_attribute_arg_type(int tag) const
+{
+ if (tag == Object_attribute::Tag_compatibility)
+ return (Object_attribute::ATTR_TYPE_FLAG_INT_VAL
+ | Object_attribute::ATTR_TYPE_FLAG_STR_VAL);
+ else if (tag == elfcpp::Tag_nodefaults)
+ return (Object_attribute::ATTR_TYPE_FLAG_INT_VAL
+ | Object_attribute::ATTR_TYPE_FLAG_NO_DEFAULT);
+ else if (tag == elfcpp::Tag_CPU_raw_name || tag == elfcpp::Tag_CPU_name)
+ return Object_attribute::ATTR_TYPE_FLAG_STR_VAL;
+ else if (tag < 32)
+ return Object_attribute::ATTR_TYPE_FLAG_INT_VAL;
+ else
+ return ((tag & 1) != 0
+ ? Object_attribute::ATTR_TYPE_FLAG_STR_VAL
+ : Object_attribute::ATTR_TYPE_FLAG_INT_VAL);
+}
+
+// Reorder attributes.
+//
+// The ABI defines that Tag_conformance should be emitted first, and that
+// Tag_nodefaults should be second (if either is defined). This sets those
+// two positions, and bumps up the position of all the remaining tags to
+// compensate.
+
+template<bool big_endian>
+int
+Target_arm<big_endian>::do_attributes_order(int num) const
+{
+ // Reorder the known object attributes in output. We want to move
+ // Tag_conformance to position 4 and Tag_conformance to position 5
+ // and shift everything between 4 .. Tag_conformance - 1 to make room.
+ if (num == 4)
+ return elfcpp::Tag_conformance;
+ if (num == 5)
+ return elfcpp::Tag_nodefaults;
+ if ((num - 2) < elfcpp::Tag_nodefaults)
+ return num - 2;
+ if ((num - 1) < elfcpp::Tag_conformance)
+ return num - 1;
+ return num;
+}
+
+// Scan a span of THUMB code for Cortex-A8 erratum.
+
+template<bool big_endian>
+void
+Target_arm<big_endian>::scan_span_for_cortex_a8_erratum(
+ Arm_relobj<big_endian>* arm_relobj,
+ unsigned int shndx,
+ section_size_type span_start,
+ section_size_type span_end,
+ const unsigned char* view,
+ Arm_address address)
+{
+ // Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
+ //
+ // The opcode is BLX.W, BL.W, B.W, Bcc.W
+ // The branch target is in the same 4KB region as the
+ // first half of the branch.
+ // The instruction before the branch is a 32-bit
+ // length non-branch instruction.
+ section_size_type i = span_start;
+ bool last_was_32bit = false;
+ bool last_was_branch = false;
+ while (i < span_end)
+ {
+ typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
+ const Valtype* wv = reinterpret_cast<const Valtype*>(view + i);
+ uint32_t insn = elfcpp::Swap<16, big_endian>::readval(wv);
+ bool is_blx = false, is_b = false;
+ bool is_bl = false, is_bcc = false;
+
+ bool insn_32bit = (insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000;
+ if (insn_32bit)
+ {
+ // Load the rest of the insn (in manual-friendly order).
+ insn = (insn << 16) | elfcpp::Swap<16, big_endian>::readval(wv + 1);
+
+ // Encoding T4: B<c>.W.
+ is_b = (insn & 0xf800d000U) == 0xf0009000U;
+ // Encoding T1: BL<c>.W.
+ is_bl = (insn & 0xf800d000U) == 0xf000d000U;
+ // Encoding T2: BLX<c>.W.
+ is_blx = (insn & 0xf800d000U) == 0xf000c000U;
+ // Encoding T3: B<c>.W (not permitted in IT block).
+ is_bcc = ((insn & 0xf800d000U) == 0xf0008000U
+ && (insn & 0x07f00000U) != 0x03800000U);
+ }
+
+ bool is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
+
+ // If this instruction is a 32-bit THUMB branch that crosses a 4K
+ // page boundary and it follows 32-bit non-branch instruction,
+ // we need to work around.
+ if (is_32bit_branch
+ && ((address + i) & 0xfffU) == 0xffeU
+ && last_was_32bit
+ && !last_was_branch)
+ {
+ // Check to see if there is a relocation stub for this branch.
+ bool force_target_arm = false;
+ bool force_target_thumb = false;
+ const Cortex_a8_reloc* cortex_a8_reloc = NULL;
+ Cortex_a8_relocs_info::const_iterator p =
+ this->cortex_a8_relocs_info_.find(address + i);
+
+ if (p != this->cortex_a8_relocs_info_.end())
+ {
+ cortex_a8_reloc = p->second;
+ bool target_is_thumb = (cortex_a8_reloc->destination() & 1) != 0;
+
+ if (cortex_a8_reloc->r_type() == elfcpp::R_ARM_THM_CALL
+ && !target_is_thumb)
+ force_target_arm = true;
+ else if (cortex_a8_reloc->r_type() == elfcpp::R_ARM_THM_CALL
+ && target_is_thumb)
+ force_target_thumb = true;
+ }
+
+ off_t offset;
+ Stub_type stub_type = arm_stub_none;
+
+ // Check if we have an offending branch instruction.
+ uint16_t upper_insn = (insn >> 16) & 0xffffU;
+ uint16_t lower_insn = insn & 0xffffU;
+ typedef class Arm_relocate_functions<big_endian> RelocFuncs;
+
+ if (cortex_a8_reloc != NULL
+ && cortex_a8_reloc->reloc_stub() != NULL)
+ // We've already made a stub for this instruction, e.g.
+ // it's a long branch or a Thumb->ARM stub. Assume that
+ // stub will suffice to work around the A8 erratum (see
+ // setting of always_after_branch above).
+ ;
+ else if (is_bcc)
+ {
+ offset = RelocFuncs::thumb32_cond_branch_offset(upper_insn,
+ lower_insn);
+ stub_type = arm_stub_a8_veneer_b_cond;
+ }
+ else if (is_b || is_bl || is_blx)
+ {
+ offset = RelocFuncs::thumb32_branch_offset(upper_insn,
+ lower_insn);
+ if (is_blx)
+ offset &= ~3;
+
+ stub_type = (is_blx
+ ? arm_stub_a8_veneer_blx
+ : (is_bl
+ ? arm_stub_a8_veneer_bl
+ : arm_stub_a8_veneer_b));
+ }
+
+ if (stub_type != arm_stub_none)
+ {
+ Arm_address pc_for_insn = address + i + 4;
+
+ // The original instruction is a BL, but the target is
+ // an ARM instruction. If we were not making a stub,
+ // the BL would have been converted to a BLX. Use the
+ // BLX stub instead in that case.
+ if (this->may_use_v5t_interworking() && force_target_arm
+ && stub_type == arm_stub_a8_veneer_bl)
+ {
+ stub_type = arm_stub_a8_veneer_blx;
+ is_blx = true;
+ is_bl = false;
+ }
+ // Conversely, if the original instruction was
+ // BLX but the target is Thumb mode, use the BL stub.
+ else if (force_target_thumb
+ && stub_type == arm_stub_a8_veneer_blx)
+ {
+ stub_type = arm_stub_a8_veneer_bl;
+ is_blx = false;
+ is_bl = true;
+ }
+
+ if (is_blx)
+ pc_for_insn &= ~3;
+
+ // If we found a relocation, use the proper destination,
+ // not the offset in the (unrelocated) instruction.
+ // Note this is always done if we switched the stub type above.
+ if (cortex_a8_reloc != NULL)
+ offset = (off_t) (cortex_a8_reloc->destination() - pc_for_insn);
+
+ Arm_address target = (pc_for_insn + offset) | (is_blx ? 0 : 1);
+
+ // Add a new stub if destination address is in the same page.
+ if (((address + i) & ~0xfffU) == (target & ~0xfffU))
+ {
+ Cortex_a8_stub* stub =
+ this->stub_factory_.make_cortex_a8_stub(stub_type,
+ arm_relobj, shndx,
+ address + i,
+ target, insn);
+ Stub_table<big_endian>* stub_table =
+ arm_relobj->stub_table(shndx);
+ gold_assert(stub_table != NULL);
+ stub_table->add_cortex_a8_stub(address + i, stub);
+ }
+ }
+ }
+
+ i += insn_32bit ? 4 : 2;
+ last_was_32bit = insn_32bit;
+ last_was_branch = is_32bit_branch;
+ }
+}
+
+// Apply the Cortex-A8 workaround.
+
+template<bool big_endian>
+void
+Target_arm<big_endian>::apply_cortex_a8_workaround(
+ const Cortex_a8_stub* stub,
+ Arm_address stub_address,
+ unsigned char* insn_view,
+ Arm_address insn_address)
+{
+ typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
+ Valtype* wv = reinterpret_cast<Valtype*>(insn_view);
+ Valtype upper_insn = elfcpp::Swap<16, big_endian>::readval(wv);
+ Valtype lower_insn = elfcpp::Swap<16, big_endian>::readval(wv + 1);
+ off_t branch_offset = stub_address - (insn_address + 4);
+
+ typedef class Arm_relocate_functions<big_endian> RelocFuncs;
+ switch (stub->stub_template()->type())
+ {
+ case arm_stub_a8_veneer_b_cond:
+ // For a conditional branch, we re-write it to be an unconditional
+ // branch to the stub. We use the THUMB-2 encoding here.
+ upper_insn = 0xf000U;
+ lower_insn = 0xb800U;
+ // Fall through.
+ case arm_stub_a8_veneer_b:
+ case arm_stub_a8_veneer_bl:
+ case arm_stub_a8_veneer_blx:
+ if ((lower_insn & 0x5000U) == 0x4000U)
+ // For a BLX instruction, make sure that the relocation is
+ // rounded up to a word boundary. This follows the semantics of
+ // the instruction which specifies that bit 1 of the target
+ // address will come from bit 1 of the base address.
+ branch_offset = (branch_offset + 2) & ~3;
+
+ // Put BRANCH_OFFSET back into the insn.
+ gold_assert(!Bits<25>::has_overflow32(branch_offset));
+ upper_insn = RelocFuncs::thumb32_branch_upper(upper_insn, branch_offset);
+ lower_insn = RelocFuncs::thumb32_branch_lower(lower_insn, branch_offset);
+ break;
+
+ default:
+ gold_unreachable();
+ }
+
+ // Put the relocated value back in the object file:
+ elfcpp::Swap<16, big_endian>::writeval(wv, upper_insn);
+ elfcpp::Swap<16, big_endian>::writeval(wv + 1, lower_insn);
+}
+
+// Target selector for ARM. Note this is never instantiated directly.
+// It's only used in Target_selector_arm_nacl, below.
+
+template<bool big_endian>
+class Target_selector_arm : public Target_selector
+{
+ public:
+ Target_selector_arm()
+ : Target_selector(elfcpp::EM_ARM, 32, big_endian,
+ (big_endian ? "elf32-bigarm" : "elf32-littlearm"),
+ (big_endian ? "armelfb" : "armelf"))
+ { }
+
+ Target*
+ do_instantiate_target()
+ { return new Target_arm<big_endian>(); }
+};
+
+// Fix .ARM.exidx section coverage.
+
+template<bool big_endian>
+void
+Target_arm<big_endian>::fix_exidx_coverage(
+ Layout* layout,
+ const Input_objects* input_objects,
+ Arm_output_section<big_endian>* exidx_section,
+ Symbol_table* symtab,
+ const Task* task)
+{
+ // We need to look at all the input sections in output in ascending
+ // order of output address. We do that by building a sorted list
+ // of output sections by addresses. Then we looks at the output sections
+ // in order. The input sections in an output section are already sorted
+ // by addresses within the output section.
+
+ typedef std::set<Output_section*, output_section_address_less_than>
+ Sorted_output_section_list;
+ Sorted_output_section_list sorted_output_sections;
+
+ // Find out all the output sections of input sections pointed by
+ // EXIDX input sections.
+ for (Input_objects::Relobj_iterator p = input_objects->relobj_begin();
+ p != input_objects->relobj_end();
+ ++p)
+ {
+ Arm_relobj<big_endian>* arm_relobj =
+ Arm_relobj<big_endian>::as_arm_relobj(*p);
+ std::vector<unsigned int> shndx_list;
+ arm_relobj->get_exidx_shndx_list(&shndx_list);
+ for (size_t i = 0; i < shndx_list.size(); ++i)
+ {
+ const Arm_exidx_input_section* exidx_input_section =
+ arm_relobj->exidx_input_section_by_shndx(shndx_list[i]);
+ gold_assert(exidx_input_section != NULL);
+ if (!exidx_input_section->has_errors())
+ {
+ unsigned int text_shndx = exidx_input_section->link();
+ Output_section* os = arm_relobj->output_section(text_shndx);
+ if (os != NULL && (os->flags() & elfcpp::SHF_ALLOC) != 0)
+ sorted_output_sections.insert(os);
+ }
+ }
+ }
+
+ // Go over the output sections in ascending order of output addresses.
+ typedef typename Arm_output_section<big_endian>::Text_section_list
+ Text_section_list;
+ Text_section_list sorted_text_sections;
+ for (typename Sorted_output_section_list::iterator p =
+ sorted_output_sections.begin();
+ p != sorted_output_sections.end();
+ ++p)
+ {
+ Arm_output_section<big_endian>* arm_output_section =
+ Arm_output_section<big_endian>::as_arm_output_section(*p);
+ arm_output_section->append_text_sections_to_list(&sorted_text_sections);
+ }
+
+ exidx_section->fix_exidx_coverage(layout, sorted_text_sections, symtab,
+ merge_exidx_entries(), task);
+}
+
+template<bool big_endian>
+void
+Target_arm<big_endian>::do_define_standard_symbols(
+ Symbol_table* symtab,
+ Layout* layout)
+{
+ // Handle the .ARM.exidx section.
+ Output_section* exidx_section = layout->find_output_section(".ARM.exidx");
+
+ if (exidx_section != NULL)
+ {
+ // Create __exidx_start and __exidx_end symbols.
+ symtab->define_in_output_data("__exidx_start",
+ NULL, // version
+ Symbol_table::PREDEFINED,
+ exidx_section,
+ 0, // value
+ 0, // symsize
+ elfcpp::STT_NOTYPE,
+ elfcpp::STB_GLOBAL,
+ elfcpp::STV_HIDDEN,
+ 0, // nonvis
+ false, // offset_is_from_end
+ true); // only_if_ref
+
+ symtab->define_in_output_data("__exidx_end",
+ NULL, // version
+ Symbol_table::PREDEFINED,
+ exidx_section,
+ 0, // value
+ 0, // symsize
+ elfcpp::STT_NOTYPE,
+ elfcpp::STB_GLOBAL,
+ elfcpp::STV_HIDDEN,
+ 0, // nonvis
+ true, // offset_is_from_end
+ true); // only_if_ref
+ }
+ else
+ {
+ // Define __exidx_start and __exidx_end even when .ARM.exidx
+ // section is missing to match ld's behaviour.
+ symtab->define_as_constant("__exidx_start", NULL,
+ Symbol_table::PREDEFINED,
+ 0, 0, elfcpp::STT_OBJECT,
+ elfcpp::STB_GLOBAL, elfcpp::STV_HIDDEN, 0,
+ true, false);
+ symtab->define_as_constant("__exidx_end", NULL,
+ Symbol_table::PREDEFINED,
+ 0, 0, elfcpp::STT_OBJECT,
+ elfcpp::STB_GLOBAL, elfcpp::STV_HIDDEN, 0,
+ true, false);
+ }
+}
+
+// NaCl variant. It uses different PLT contents.
+
+template<bool big_endian>
+class Output_data_plt_arm_nacl;
+
+template<bool big_endian>
+class Target_arm_nacl : public Target_arm<big_endian>
+{
+ public:
+ Target_arm_nacl()
+ : Target_arm<big_endian>(&arm_nacl_info)
+ { }
+
+ protected:
+ virtual Output_data_plt_arm<big_endian>*
+ do_make_data_plt(
+ Layout* layout,
+ Arm_output_data_got<big_endian>* got,
+ Output_data_space* got_plt,
+ Output_data_space* got_irelative)
+ { return new Output_data_plt_arm_nacl<big_endian>(
+ layout, got, got_plt, got_irelative); }
+
+ private:
+ static const Target::Target_info arm_nacl_info;
+};
+
+template<bool big_endian>
+const Target::Target_info Target_arm_nacl<big_endian>::arm_nacl_info =
+{
+ 32, // size
+ big_endian, // is_big_endian
+ elfcpp::EM_ARM, // machine_code
+ false, // has_make_symbol
+ false, // has_resolve
+ false, // has_code_fill
+ true, // is_default_stack_executable
+ false, // can_icf_inline_merge_sections
+ '\0', // wrap_char
+ "/lib/ld-nacl-arm.so.1", // dynamic_linker
+ 0x20000, // default_text_segment_address
+ 0x10000, // abi_pagesize (overridable by -z max-page-size)
+ 0x10000, // common_pagesize (overridable by -z common-page-size)
+ true, // isolate_execinstr
+ 0x10000000, // rosegment_gap
+ elfcpp::SHN_UNDEF, // small_common_shndx
+ elfcpp::SHN_UNDEF, // large_common_shndx
+ 0, // small_common_section_flags
+ 0, // large_common_section_flags
+ ".ARM.attributes", // attributes_section
+ "aeabi", // attributes_vendor
+ "_start", // entry_symbol_name
+ 32, // hash_entry_size
+ elfcpp::SHT_PROGBITS, // unwind_section_type
+};
+
+template<bool big_endian>
+class Output_data_plt_arm_nacl : public Output_data_plt_arm<big_endian>
+{
+ public:
+ Output_data_plt_arm_nacl(
+ Layout* layout,
+ Arm_output_data_got<big_endian>* got,
+ Output_data_space* got_plt,
+ Output_data_space* got_irelative)
+ : Output_data_plt_arm<big_endian>(layout, 16, got, got_plt, got_irelative)
+ { }
+
+ protected:
+ // Return the offset of the first non-reserved PLT entry.
+ virtual unsigned int
+ do_first_plt_entry_offset() const
+ { return sizeof(first_plt_entry); }
+
+ // Return the size of a PLT entry.
+ virtual unsigned int
+ do_get_plt_entry_size() const
+ { return sizeof(plt_entry); }
+
+ virtual void
+ do_fill_first_plt_entry(unsigned char* pov,
+ Arm_address got_address,
+ Arm_address plt_address);
+
+ virtual void
+ do_fill_plt_entry(unsigned char* pov,
+ Arm_address got_address,
+ Arm_address plt_address,
+ unsigned int got_offset,
+ unsigned int plt_offset);
+
+ private:
+ inline uint32_t arm_movw_immediate(uint32_t value)
+ {
+ return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
+ }
+
+ inline uint32_t arm_movt_immediate(uint32_t value)
+ {
+ return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
+ }
+
+ // Template for the first PLT entry.
+ static const uint32_t first_plt_entry[16];
+
+ // Template for subsequent PLT entries.
+ static const uint32_t plt_entry[4];
+};
+
+// The first entry in the PLT.
+template<bool big_endian>
+const uint32_t Output_data_plt_arm_nacl<big_endian>::first_plt_entry[16] =
+{
+ // First bundle:
+ 0xe300c000, // movw ip, #:lower16:&GOT[2]-.+8
+ 0xe340c000, // movt ip, #:upper16:&GOT[2]-.+8
+ 0xe08cc00f, // add ip, ip, pc
+ 0xe52dc008, // str ip, [sp, #-8]!
+ // Second bundle:
+ 0xe3ccc103, // bic ip, ip, #0xc0000000
+ 0xe59cc000, // ldr ip, [ip]
+ 0xe3ccc13f, // bic ip, ip, #0xc000000f
+ 0xe12fff1c, // bx ip
+ // Third bundle:
+ 0xe320f000, // nop
+ 0xe320f000, // nop
+ 0xe320f000, // nop
+ // .Lplt_tail:
+ 0xe50dc004, // str ip, [sp, #-4]
+ // Fourth bundle:
+ 0xe3ccc103, // bic ip, ip, #0xc0000000
+ 0xe59cc000, // ldr ip, [ip]
+ 0xe3ccc13f, // bic ip, ip, #0xc000000f
+ 0xe12fff1c, // bx ip
+};
+
+template<bool big_endian>
+void
+Output_data_plt_arm_nacl<big_endian>::do_fill_first_plt_entry(
+ unsigned char* pov,
+ Arm_address got_address,
+ Arm_address plt_address)
+{
+ // Write first PLT entry. All but first two words are constants.
+ const size_t num_first_plt_words = (sizeof(first_plt_entry)
+ / sizeof(first_plt_entry[0]));
+
+ int32_t got_displacement = got_address + 8 - (plt_address + 16);
+
+ elfcpp::Swap<32, big_endian>::writeval
+ (pov + 0, first_plt_entry[0] | arm_movw_immediate (got_displacement));
+ elfcpp::Swap<32, big_endian>::writeval
+ (pov + 4, first_plt_entry[1] | arm_movt_immediate (got_displacement));
+
+ for (size_t i = 2; i < num_first_plt_words; ++i)
+ elfcpp::Swap<32, big_endian>::writeval(pov + i * 4, first_plt_entry[i]);
+}
+
+// Subsequent entries in the PLT.
+
+template<bool big_endian>
+const uint32_t Output_data_plt_arm_nacl<big_endian>::plt_entry[4] =
+{
+ 0xe300c000, // movw ip, #:lower16:&GOT[n]-.+8
+ 0xe340c000, // movt ip, #:upper16:&GOT[n]-.+8
+ 0xe08cc00f, // add ip, ip, pc
+ 0xea000000, // b .Lplt_tail
+};
+
+template<bool big_endian>
+void
+Output_data_plt_arm_nacl<big_endian>::do_fill_plt_entry(
+ unsigned char* pov,
+ Arm_address got_address,
+ Arm_address plt_address,
+ unsigned int got_offset,
+ unsigned int plt_offset)
+{
+ // Calculate the displacement between the PLT slot and the
+ // common tail that's part of the special initial PLT slot.
+ int32_t tail_displacement = (plt_address + (11 * sizeof(uint32_t))
+ - (plt_address + plt_offset
+ + sizeof(plt_entry) + sizeof(uint32_t)));
+ gold_assert((tail_displacement & 3) == 0);
+ tail_displacement >>= 2;
+
+ gold_assert ((tail_displacement & 0xff000000) == 0
+ || (-tail_displacement & 0xff000000) == 0);
+
+ // Calculate the displacement between the PLT slot and the entry
+ // in the GOT. The offset accounts for the value produced by
+ // adding to pc in the penultimate instruction of the PLT stub.
+ const int32_t got_displacement = (got_address + got_offset
+ - (plt_address + sizeof(plt_entry)));
+
+ elfcpp::Swap<32, big_endian>::writeval
+ (pov + 0, plt_entry[0] | arm_movw_immediate (got_displacement));
+ elfcpp::Swap<32, big_endian>::writeval
+ (pov + 4, plt_entry[1] | arm_movt_immediate (got_displacement));
+ elfcpp::Swap<32, big_endian>::writeval
+ (pov + 8, plt_entry[2]);
+ elfcpp::Swap<32, big_endian>::writeval
+ (pov + 12, plt_entry[3] | (tail_displacement & 0x00ffffff));
+}
+
+// Target selectors.
+
+template<bool big_endian>
+class Target_selector_arm_nacl
+ : public Target_selector_nacl<Target_selector_arm<big_endian>,
+ Target_arm_nacl<big_endian> >
+{
+ public:
+ Target_selector_arm_nacl()
+ : Target_selector_nacl<Target_selector_arm<big_endian>,
+ Target_arm_nacl<big_endian> >(
+ "arm",
+ big_endian ? "elf32-bigarm-nacl" : "elf32-littlearm-nacl",
+ big_endian ? "armelfb_nacl" : "armelf_nacl")
+ { }