Skip relocations in non-loaded, non-alloced sections
[deliverable/binutils-gdb.git] / bfd / elf32-arm.c
index 8698fff93056ab183ffb19fad50a13a731de0019..8de01b46671cd2ac1cbd94ef4373db439a62a9c4 100644 (file)
@@ -3520,6 +3520,11 @@ using_thumb_only (struct elf32_arm_link_hash_table *globals)
 
   arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
 
+  /* Force return logic to be reviewed for each new architecture.  */
+  BFD_ASSERT (arch <= TAG_CPU_ARCH_V8
+             || arch == TAG_CPU_ARCH_V8M_BASE
+             || arch == TAG_CPU_ARCH_V8M_MAIN);
+
   if (arch == TAG_CPU_ARCH_V6_M
       || arch == TAG_CPU_ARCH_V6S_M
       || arch == TAG_CPU_ARCH_V7E_M
@@ -3535,9 +3540,25 @@ using_thumb_only (struct elf32_arm_link_hash_table *globals)
 static bfd_boolean
 using_thumb2 (struct elf32_arm_link_hash_table *globals)
 {
-  int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
-                                      Tag_CPU_arch);
-  return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7;
+  int arch;
+  int thumb_isa = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
+                                           Tag_THUMB_ISA_use);
+
+  if (thumb_isa)
+    return thumb_isa == 2;
+
+  arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
+
+  /* Force return logic to be reviewed for each new architecture.  */
+  BFD_ASSERT (arch <= TAG_CPU_ARCH_V8
+             || arch == TAG_CPU_ARCH_V8M_BASE
+             || arch == TAG_CPU_ARCH_V8M_MAIN);
+
+  return (arch == TAG_CPU_ARCH_V6T2
+         || arch == TAG_CPU_ARCH_V7
+         || arch == TAG_CPU_ARCH_V7E_M
+         || arch == TAG_CPU_ARCH_V8
+         || arch == TAG_CPU_ARCH_V8M_MAIN);
 }
 
 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
@@ -3742,19 +3763,16 @@ arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
 {
   const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
                                             Tag_CPU_arch);
-  return arch == TAG_CPU_ARCH_V6T2
-        || arch == TAG_CPU_ARCH_V6K
-        || arch == TAG_CPU_ARCH_V7
-        || arch == TAG_CPU_ARCH_V7E_M;
-}
 
-static bfd_boolean
-arch_has_thumb2_nop (struct elf32_arm_link_hash_table *globals)
-{
-  const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
-                                            Tag_CPU_arch);
-  return (arch == TAG_CPU_ARCH_V6T2 || arch == TAG_CPU_ARCH_V7
-         || arch == TAG_CPU_ARCH_V7E_M);
+  /* Force return logic to be reviewed for each new architecture.  */
+  BFD_ASSERT (arch <= TAG_CPU_ARCH_V8
+             || arch == TAG_CPU_ARCH_V8M_BASE
+             || arch == TAG_CPU_ARCH_V8M_MAIN);
+
+  return (arch == TAG_CPU_ARCH_V6T2
+         || arch == TAG_CPU_ARCH_V6K
+         || arch == TAG_CPU_ARCH_V7
+         || arch == TAG_CPU_ARCH_V8);
 }
 
 static bfd_boolean
@@ -4136,68 +4154,155 @@ elf32_arm_get_stub_entry (const asection *input_section,
   return stub_entry;
 }
 
-/* Find or create a stub section.  Returns a pointer to the stub section, and
-   the section to which the stub section will be attached (in *LINK_SEC_P).
+/* Whether veneers of type STUB_TYPE require to be in a dedicated output
+   section.  */
+
+static bfd_boolean
+arm_dedicated_stub_output_section_required (enum elf32_arm_stub_type stub_type)
+{
+  if (stub_type >= max_stub_type)
+    abort ();  /* Should be unreachable.  */
+
+  return FALSE;
+}
+
+/* Required alignment (as a power of 2) for the dedicated section holding
+   veneers of type STUB_TYPE, or 0 if veneers of this type are interspersed
+   with input sections.  */
+
+static int
+arm_dedicated_stub_output_section_required_alignment
+  (enum elf32_arm_stub_type stub_type)
+{
+  if (stub_type >= max_stub_type)
+    abort ();  /* Should be unreachable.  */
+
+  BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
+  return 0;
+}
+
+/* Name of the dedicated output section to put veneers of type STUB_TYPE, or
+   NULL if veneers of this type are interspersed with input sections.  */
+
+static const char *
+arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type)
+{
+  if (stub_type >= max_stub_type)
+    abort ();  /* Should be unreachable.  */
+
+  BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
+  return NULL;
+}
+
+/* If veneers of type STUB_TYPE should go in a dedicated output section,
+   returns the address of the hash table field in HTAB holding a pointer to the
+   corresponding input section.  Otherwise, returns NULL.  */
+
+static asection **
+arm_dedicated_stub_input_section_ptr
+  (struct elf32_arm_link_hash_table *htab ATTRIBUTE_UNUSED,
+   enum elf32_arm_stub_type stub_type)
+{
+  if (stub_type >= max_stub_type)
+    abort ();  /* Should be unreachable.  */
+
+  BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
+  return NULL;
+}
+
+/* Find or create a stub section to contain a stub of type STUB_TYPE.  SECTION
+   is the section that branch into veneer and can be NULL if stub should go in
+   a dedicated output section.  Returns a pointer to the stub section, and the
+   section to which the stub section will be attached (in *LINK_SEC_P).
    LINK_SEC_P may be NULL.  */
 
 static asection *
 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
-                                  struct elf32_arm_link_hash_table *htab)
+                                  struct elf32_arm_link_hash_table *htab,
+                                  enum elf32_arm_stub_type stub_type)
 {
-  asection *link_sec;
-  asection *stub_sec;
-  asection *out_sec;
-
-  link_sec = htab->stub_group[section->id].link_sec;
-  BFD_ASSERT (link_sec != NULL);
-  stub_sec = htab->stub_group[section->id].stub_sec;
+  asection *link_sec, *out_sec, **stub_sec_p;
+  const char *stub_sec_prefix;
+  bfd_boolean dedicated_output_section =
+    arm_dedicated_stub_output_section_required (stub_type);
+  int align;
 
-  if (stub_sec == NULL)
+  if (dedicated_output_section)
     {
-      stub_sec = htab->stub_group[link_sec->id].stub_sec;
-      if (stub_sec == NULL)
+      bfd *output_bfd = htab->obfd;
+      const char *out_sec_name =
+       arm_dedicated_stub_output_section_name (stub_type);
+      link_sec = NULL;
+      stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
+      stub_sec_prefix = out_sec_name;
+      align = arm_dedicated_stub_output_section_required_alignment (stub_type);
+      out_sec = bfd_get_section_by_name (output_bfd, out_sec_name);
+      if (out_sec == NULL)
        {
-         size_t namelen;
-         bfd_size_type len;
-         char *s_name;
-
-         namelen = strlen (link_sec->name);
-         len = namelen + sizeof (STUB_SUFFIX);
-         s_name = (char *) bfd_alloc (htab->stub_bfd, len);
-         if (s_name == NULL)
-           return NULL;
-
-         memcpy (s_name, link_sec->name, namelen);
-         memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
-         out_sec = link_sec->output_section;
-         stub_sec = (*htab->add_stub_section) (s_name, out_sec, link_sec,
-                                               htab->nacl_p ? 4 : 3);
-         if (stub_sec == NULL)
-           return NULL;
-         htab->stub_group[link_sec->id].stub_sec = stub_sec;
+         (*_bfd_error_handler) (_("No address assigned to the veneers output "
+                                  "section %s"), out_sec_name);
+         return NULL;
        }
-      htab->stub_group[section->id].stub_sec = stub_sec;
+    }
+  else
+    {
+      link_sec = htab->stub_group[section->id].link_sec;
+      BFD_ASSERT (link_sec != NULL);
+      stub_sec_p = &htab->stub_group[section->id].stub_sec;
+      if (*stub_sec_p == NULL)
+       stub_sec_p = &htab->stub_group[link_sec->id].stub_sec;
+      stub_sec_prefix = link_sec->name;
+      out_sec = link_sec->output_section;
+      align = htab->nacl_p ? 4 : 3;
+    }
+
+  if (*stub_sec_p == NULL)
+    {
+      size_t namelen;
+      bfd_size_type len;
+      char *s_name;
+
+      namelen = strlen (stub_sec_prefix);
+      len = namelen + sizeof (STUB_SUFFIX);
+      s_name = (char *) bfd_alloc (htab->stub_bfd, len);
+      if (s_name == NULL)
+       return NULL;
+
+      memcpy (s_name, stub_sec_prefix, namelen);
+      memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
+      *stub_sec_p = (*htab->add_stub_section) (s_name, out_sec, link_sec,
+                                              align);
+      if (*stub_sec_p == NULL)
+       return NULL;
+
+      out_sec->flags |= SEC_ALLOC | SEC_LOAD | SEC_READONLY | SEC_CODE
+                       | SEC_HAS_CONTENTS | SEC_RELOC | SEC_IN_MEMORY
+                       | SEC_KEEP;
     }
 
+  if (!dedicated_output_section)
+    htab->stub_group[section->id].stub_sec = *stub_sec_p;
+
   if (link_sec_p)
     *link_sec_p = link_sec;
 
-  return stub_sec;
+  return *stub_sec_p;
 }
 
 /* Add a new stub entry to the stub hash.  Not all fields of the new
    stub entry are initialised.  */
 
 static struct elf32_arm_stub_hash_entry *
-elf32_arm_add_stub (const char *stub_name,
-                   asection *section,
-                   struct elf32_arm_link_hash_table *htab)
+elf32_arm_add_stub (const char *stub_name, asection *section,
+                   struct elf32_arm_link_hash_table *htab,
+                   enum elf32_arm_stub_type stub_type)
 {
   asection *link_sec;
   asection *stub_sec;
   struct elf32_arm_stub_hash_entry *stub_entry;
 
-  stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab);
+  stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab,
+                                               stub_type);
   if (stub_sec == NULL)
     return NULL;
 
@@ -4348,6 +4453,18 @@ arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type)
   return FALSE;
 }
 
+/* Returns the padding needed for the dedicated section used stubs of type
+   STUB_TYPE.  */
+
+static int
+arm_dedicated_stub_section_padding (enum elf32_arm_stub_type stub_type)
+{
+  if (stub_type >= max_stub_type)
+    abort ();  /* Should be unreachable.  */
+
+  return 0;
+}
+
 static bfd_boolean
 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
                    void * in_arg)
@@ -5187,7 +5304,7 @@ elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab,
       return TRUE;
     }
 
-  stub_entry = elf32_arm_add_stub (stub_name, section, htab);
+  stub_entry = elf32_arm_add_stub (stub_name, section, htab, stub_type);
   if (stub_entry == NULL)
     {
       if (!sym_claimed)
@@ -5324,6 +5441,7 @@ elf32_arm_size_stubs (bfd *output_bfd,
       bfd *input_bfd;
       unsigned int bfd_indx;
       asection *stub_sec;
+      enum elf32_arm_stub_type stub_type;
       bfd_boolean stub_changed = FALSE;
       unsigned prev_num_a8_fixes = num_a8_fixes;
 
@@ -5379,7 +5497,6 @@ elf32_arm_size_stubs (bfd *output_bfd,
              for (; irela < irelaend; irela++)
                {
                  unsigned int r_type, r_indx;
-                 enum elf32_arm_stub_type stub_type;
                  asection *sym_sec;
                  bfd_vma sym_value;
                  bfd_vma destination;
@@ -5693,14 +5810,34 @@ elf32_arm_size_stubs (bfd *output_bfd,
          stub_sec->size = 0;
        }
 
+      /* Compute stub section size, considering padding.  */
       bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
+      for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
+          stub_type++)
+       {
+         int size, padding;
+         asection **stub_sec_p;
+
+         padding = arm_dedicated_stub_section_padding (stub_type);
+         stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
+         /* Skip if no stub input section or no stub section padding
+            required.  */
+         if ((stub_sec_p != NULL && *stub_sec_p == NULL) || padding == 0)
+           continue;
+         /* Stub section padding required but no dedicated section.  */
+         BFD_ASSERT (stub_sec_p);
+
+         size = (*stub_sec_p)->size;
+         size = (size + padding - 1) & ~(padding - 1);
+         (*stub_sec_p)->size = size;
+       }
 
       /* Add Cortex-A8 erratum veneers to stub section sizes too.  */
       if (htab->fix_cortex_a8)
        for (i = 0; i < num_a8_fixes; i++)
          {
            stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
-                        a8_fixes[i].section, htab);
+                        a8_fixes[i].section, htab, a8_fixes[i].stub_type);
 
            if (stub_sec == NULL)
              return FALSE;
@@ -5798,7 +5935,8 @@ elf32_arm_build_stubs (struct bfd_link_info *info)
       if (!strstr (stub_sec->name, STUB_SUFFIX))
        continue;
 
-      /* Allocate memory to hold the linker stubs.  */
+      /* Allocate memory to hold the linker stubs.  Zeroing the stub sections
+        must at least be done for stub section requiring padding.  */
       size = stub_sec->size;
       stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
       if (stub_sec->contents == NULL && size != 0)
@@ -6491,6 +6629,37 @@ bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
     && arm_make_glue_section (abfd, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
 }
 
+/* Mark output sections of veneers needing a dedicated one with SEC_KEEP.  This
+   ensures they are not marked for deletion by
+   strip_excluded_output_sections () when veneers are going to be created
+   later.  Not doing so would trigger assert on empty section size in
+   lang_size_sections_1 ().  */
+
+void
+bfd_elf32_arm_keep_private_stub_output_sections (struct bfd_link_info *info)
+{
+  enum elf32_arm_stub_type stub_type;
+
+  /* If we are only performing a partial
+     link do not bother adding the glue.  */
+  if (bfd_link_relocatable (info))
+    return;
+
+  for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
+    {
+      asection *out_sec;
+      const char *out_sec_name;
+
+      if (!arm_dedicated_stub_output_section_required (stub_type))
+       continue;
+
+     out_sec_name = arm_dedicated_stub_output_section_name (stub_type);
+     out_sec = bfd_get_section_by_name (info->output_bfd, out_sec_name);
+     if (out_sec != NULL)
+       out_sec->flags |= SEC_KEEP;
+    }
+}
+
 /* Select a BFD to be used to hold the sections used by the glue code.
    This function is called from the linker scripts in ld/emultempl/
    {armelf/pe}.em.  */
@@ -8836,7 +9005,7 @@ elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
       if (!is_local)
        /* add r0,pc; ldr r0, [r0]  */
        insn = 0x44786800;
-      else if (arch_has_thumb2_nop (globals))
+      else if (using_thumb2 (globals))
        /* nop.w */
        insn = 0xf3af8000;
       else
@@ -9666,7 +9835,7 @@ elf32_arm_final_link_relocate (reloc_howto_type *           howto,
        if (h && h->root.type == bfd_link_hash_undefweak
            && plt_offset == (bfd_vma) -1)
          {
-           if (arch_has_thumb2_nop (globals))
+           if (thumb2)
              {
                bfd_put_16 (input_bfd, 0xf3af, hit_data);
                bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
@@ -11358,14 +11527,11 @@ elf32_arm_relocate_section (bfd *                  output_bfd,
              && r_symndx != STN_UNDEF
              && bfd_is_und_section (sec)
              && ELF_ST_BIND (sym->st_info) != STB_WEAK)
-           {
-             if (!info->callbacks->undefined_symbol
-                 (info, bfd_elf_string_from_elf_section
-                  (input_bfd, symtab_hdr->sh_link, sym->st_name),
-                  input_bfd, input_section,
-                  rel->r_offset, TRUE))
-               return FALSE;
-           }
+           (*info->callbacks->undefined_symbol)
+             (info, bfd_elf_string_from_elf_section
+              (input_bfd, symtab_hdr->sh_link, sym->st_name),
+              input_bfd, input_section,
+              rel->r_offset, TRUE);
 
          if (globals->use_rel)
            {
@@ -11585,20 +11751,15 @@ elf32_arm_relocate_section (bfd *                  output_bfd,
              /* If the overflowing reloc was to an undefined symbol,
                 we have already printed one error message and there
                 is no point complaining again.  */
-             if ((! h ||
-                  h->root.type != bfd_link_hash_undefined)
-                 && (!((*info->callbacks->reloc_overflow)
-                       (info, (h ? &h->root : NULL), name, howto->name,
-                        (bfd_vma) 0, input_bfd, input_section,
-                        rel->r_offset))))
-                 return FALSE;
+             if (!h || h->root.type != bfd_link_hash_undefined)
+               (*info->callbacks->reloc_overflow)
+                 (info, (h ? &h->root : NULL), name, howto->name,
+                  (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
              break;
 
            case bfd_reloc_undefined:
-             if (!((*info->callbacks->undefined_symbol)
-                   (info, name, input_bfd, input_section,
-                    rel->r_offset, TRUE)))
-               return FALSE;
+             (*info->callbacks->undefined_symbol)
+               (info, name, input_bfd, input_section, rel->r_offset, TRUE);
              break;
 
            case bfd_reloc_outofrange:
@@ -11619,10 +11780,8 @@ elf32_arm_relocate_section (bfd *                  output_bfd,
 
            common_error:
              BFD_ASSERT (error_message != NULL);
-             if (!((*info->callbacks->reloc_dangerous)
-                   (info, error_message, input_bfd, input_section,
-                    rel->r_offset)))
-               return FALSE;
+             (*info->callbacks->reloc_dangerous)
+               (info, error_message, input_bfd, input_section, rel->r_offset);
              break;
            }
        }
This page took 0.032086 seconds and 4 git commands to generate.