Delete relocations associatesd with deleted exidx entries.
[deliverable/binutils-gdb.git] / bfd / elf32-arm.c
index 834d61506526054c653a8fd70b36b453ac676293..c58b65d0461c65359e5c0cc28b7965868a211cb3 100644 (file)
@@ -2138,6 +2138,8 @@ typedef unsigned short int insn16;
 
 #define STUB_ENTRY_NAME   "__%s_veneer"
 
+#define CMSE_PREFIX "__acle_se_"
+
 /* The name of the dynamic interpreter.  This is put in the .interp
    section.  */
 #define ELF_DYNAMIC_INTERPRETER     "/usr/lib/ld.so.1"
@@ -2561,6 +2563,13 @@ static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic[] =
   DATA_WORD (0, R_ARM_NONE, 0),         /* .word 0 */
 };
 
+/* Stub used for transition to secure state (aka SG veneer).  */
+static const insn_sequence elf32_arm_stub_cmse_branch_thumb_only[] =
+{
+  THUMB32_INSN (0xe97fe97f),           /* sg.  */
+  THUMB32_B_INSN (0xf000b800, -4),     /* b.w original_branch_dest.  */
+};
+
 
 /* Cortex-A8 erratum-workaround stubs.  */
 
@@ -2640,6 +2649,7 @@ static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
   DEF_STUB(long_branch_v4t_thumb_tls_pic) \
   DEF_STUB(long_branch_arm_nacl) \
   DEF_STUB(long_branch_arm_nacl_pic) \
+  DEF_STUB(cmse_branch_thumb_only) \
   DEF_STUB(a8_veneer_b_cond) \
   DEF_STUB(a8_veneer_b) \
   DEF_STUB(a8_veneer_bl) \
@@ -3134,6 +3144,14 @@ struct elf32_arm_link_hash_table
   /* True if the target uses REL relocations.  */
   int use_rel;
 
+  /* Nonzero if import library must be a secure gateway import library
+     as per ARMv8-M Security Extensions.  */
+  int cmse_implib;
+
+  /* The import library whose symbols' address must remain stable in
+     the import library generated.  */
+  bfd *in_implib_bfd;
+
   /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt.  */
   bfd_vma next_tls_desc_index;
 
@@ -3192,6 +3210,13 @@ struct elf32_arm_link_hash_table
      information on stub grouping.  */
   struct map_stub *stub_group;
 
+  /* Input stub section holding secure gateway veneers.  */
+  asection *cmse_stub_sec;
+
+  /* Offset in cmse_stub_sec where new SG veneers (not in input import library)
+     start to be allocated.  */
+  bfd_vma new_cmse_stub_offset;
+
   /* Number of elements in stub_group.  */
   unsigned int top_id;
 
@@ -3340,12 +3365,16 @@ elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
    union and *ARM_PLT at the ARM-specific information.  */
 
 static bfd_boolean
-elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_entry *h,
+elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_table *globals,
+                       struct elf32_arm_link_hash_entry *h,
                        unsigned long r_symndx, union gotplt_union **root_plt,
                        struct arm_plt_info **arm_plt)
 {
   struct arm_local_iplt_info *local_iplt;
 
+  if (globals->root.splt == NULL && globals->root.iplt == NULL)
+    return FALSE;
+
   if (h != NULL)
     {
       *root_plt = &h->root.plt;
@@ -3439,7 +3468,7 @@ stub_hash_newfunc (struct bfd_hash_entry *entry,
       /* Initialize the local fields.  */
       eh = (struct elf32_arm_stub_hash_entry *) entry;
       eh->stub_sec = NULL;
-      eh->stub_offset = 0;
+      eh->stub_offset = (bfd_vma) -1;
       eh->source_value = 0;
       eh->target_value = 0;
       eh->target_section = NULL;
@@ -3447,7 +3476,7 @@ stub_hash_newfunc (struct bfd_hash_entry *entry,
       eh->stub_type = arm_stub_none;
       eh->stub_size = 0;
       eh->stub_template = NULL;
-      eh->stub_template_size = 0;
+      eh->stub_template_size = -1;
       eh->h = NULL;
       eh->id_sec = NULL;
       eh->output_name = NULL;
@@ -3826,6 +3855,7 @@ arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
     case arm_stub_long_branch_v4t_thumb_arm_pic:
     case arm_stub_long_branch_v4t_thumb_tls_pic:
     case arm_stub_long_branch_thumb_only_pic:
+    case arm_stub_cmse_branch_thumb_only:
       return TRUE;
     case arm_stub_none:
       BFD_FAIL ();
@@ -3897,8 +3927,9 @@ arm_type_of_stub (struct bfd_link_info *info,
      the address of the appropriate trampoline.  */
   if (r_type != R_ARM_TLS_CALL
       && r_type != R_ARM_THM_TLS_CALL
-      && elf32_arm_get_plt_info (input_bfd, hash, ELF32_R_SYM (rel->r_info),
-                                &root_plt, &arm_plt)
+      && elf32_arm_get_plt_info (input_bfd, globals, hash,
+                                ELF32_R_SYM (rel->r_info), &root_plt,
+                                &arm_plt)
       && root_plt->offset != (bfd_vma) -1)
     {
       asection *splt;
@@ -4213,6 +4244,7 @@ elf32_arm_get_stub_entry (const asection *input_section,
      Stub names need to include a section id, as there may well be
      more than one stub used to reach say, printf, and we need to
      distinguish between them.  */
+  BFD_ASSERT (input_section->id <= htab->top_id);
   id_sec = htab->stub_group[input_section->id].link_sec;
 
   if (h != NULL && h->stub_cache != NULL
@@ -4250,7 +4282,16 @@ arm_dedicated_stub_output_section_required (enum elf32_arm_stub_type stub_type)
   if (stub_type >= max_stub_type)
     abort ();  /* Should be unreachable.  */
 
-  return FALSE;
+  switch (stub_type)
+    {
+    case arm_stub_cmse_branch_thumb_only:
+      return TRUE;
+
+    default:
+      return FALSE;
+    }
+
+  abort ();  /* Should be unreachable.  */
 }
 
 /* Required alignment (as a power of 2) for the dedicated section holding
@@ -4264,8 +4305,19 @@ arm_dedicated_stub_output_section_required_alignment
   if (stub_type >= max_stub_type)
     abort ();  /* Should be unreachable.  */
 
-  BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
-  return 0;
+  switch (stub_type)
+    {
+    /* Vectors of Secure Gateway veneers must be aligned on 32byte
+       boundary.  */
+    case arm_stub_cmse_branch_thumb_only:
+      return 5;
+
+    default:
+      BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
+      return 0;
+    }
+
+  abort ();  /* Should be unreachable.  */
 }
 
 /* Name of the dedicated output section to put veneers of type STUB_TYPE, or
@@ -4277,8 +4329,17 @@ arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type)
   if (stub_type >= max_stub_type)
     abort ();  /* Should be unreachable.  */
 
-  BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
-  return NULL;
+  switch (stub_type)
+    {
+    case arm_stub_cmse_branch_thumb_only:
+      return ".gnu.sgstubs";
+
+    default:
+      BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
+      return NULL;
+    }
+
+  abort ();  /* Should be unreachable.  */
 }
 
 /* If veneers of type STUB_TYPE should go in a dedicated output section,
@@ -4286,15 +4347,23 @@ arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type)
    corresponding input section.  Otherwise, returns NULL.  */
 
 static asection **
-arm_dedicated_stub_input_section_ptr
-  (struct elf32_arm_link_hash_table *htab ATTRIBUTE_UNUSED,
-   enum elf32_arm_stub_type stub_type)
+arm_dedicated_stub_input_section_ptr (struct elf32_arm_link_hash_table *htab,
+                                     enum elf32_arm_stub_type stub_type)
 {
   if (stub_type >= max_stub_type)
     abort ();  /* Should be unreachable.  */
 
-  BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
-  return NULL;
+  switch (stub_type)
+    {
+    case arm_stub_cmse_branch_thumb_only:
+      return &htab->cmse_stub_sec;
+
+    default:
+      BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
+      return NULL;
+    }
+
+  abort ();  /* Should be unreachable.  */
 }
 
 /* Find or create a stub section to contain a stub of type STUB_TYPE.  SECTION
@@ -4333,6 +4402,7 @@ elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
     }
   else
     {
+      BFD_ASSERT (section->id <= htab->top_id);
       link_sec = htab->stub_group[section->id].link_sec;
       BFD_ASSERT (link_sec != NULL);
       stub_sec_p = &htab->stub_group[section->id].stub_sec;
@@ -4407,7 +4477,7 @@ elf32_arm_add_stub (const char *stub_name, asection *section,
     }
 
   stub_entry->stub_sec = stub_sec;
-  stub_entry->stub_offset = 0;
+  stub_entry->stub_offset = (bfd_vma) -1;
   stub_entry->id_sec = link_sec;
 
   return stub_entry;
@@ -4518,6 +4588,7 @@ arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
     case arm_stub_long_branch_thumb_only_pic:
     case arm_stub_long_branch_any_tls_pic:
     case arm_stub_long_branch_v4t_thumb_tls_pic:
+    case arm_stub_cmse_branch_thumb_only:
     case arm_stub_a8_veneer_blx:
       return 4;
 
@@ -4539,7 +4610,16 @@ arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type)
   if (stub_type >= max_stub_type)
     abort ();  /* Should be unreachable.  */
 
-  return FALSE;
+  switch (stub_type)
+    {
+    case arm_stub_cmse_branch_thumb_only:
+      return TRUE;
+
+    default:
+      return FALSE;
+    }
+
+  abort ();  /* Should be unreachable.  */
 }
 
 /* Returns the padding needed for the dedicated section used stubs of type
@@ -4551,7 +4631,35 @@ arm_dedicated_stub_section_padding (enum elf32_arm_stub_type stub_type)
   if (stub_type >= max_stub_type)
     abort ();  /* Should be unreachable.  */
 
-  return 0;
+  switch (stub_type)
+    {
+    case arm_stub_cmse_branch_thumb_only:
+      return 32;
+
+    default:
+      return 0;
+    }
+
+  abort ();  /* Should be unreachable.  */
+}
+
+/* If veneers of type STUB_TYPE should go in a dedicated output section,
+   returns the address of the hash table field in HTAB holding the offset at
+   which new veneers should be layed out in the stub section.  */
+
+static bfd_vma*
+arm_new_stubs_start_offset_ptr (struct elf32_arm_link_hash_table *htab,
+                               enum elf32_arm_stub_type stub_type)
+{
+  switch (stub_type)
+    {
+    case arm_stub_cmse_branch_thumb_only:
+      return &htab->new_cmse_stub_offset;
+
+    default:
+      BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
+      return NULL;
+    }
 }
 
 static bfd_boolean
@@ -4559,6 +4667,7 @@ arm_build_one_stub (struct bfd_hash_entry *gen_entry,
                    void * in_arg)
 {
 #define MAXRELOCS 3
+  bfd_boolean removed_sg_veneer;
   struct elf32_arm_stub_hash_entry *stub_entry;
   struct elf32_arm_link_hash_table *globals;
   struct bfd_link_info *info;
@@ -4573,6 +4682,7 @@ arm_build_one_stub (struct bfd_hash_entry *gen_entry,
   int stub_reloc_idx[MAXRELOCS] = {-1, -1};
   int stub_reloc_offset[MAXRELOCS] = {0, 0};
   int nrelocs = 0;
+  int just_allocated = 0;
 
   /* Massage our args to the form they really have.  */
   stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
@@ -4589,8 +4699,12 @@ arm_build_one_stub (struct bfd_hash_entry *gen_entry,
     /* We have to do less-strictly-aligned fixes last.  */
     return TRUE;
 
-  /* Make a note of the offset within the stubs for this entry.  */
-  stub_entry->stub_offset = stub_sec->size;
+  /* Assign a slot at the end of section if none assigned yet.  */
+  if (stub_entry->stub_offset == (bfd_vma) -1)
+    {
+      stub_entry->stub_offset = stub_sec->size;
+      just_allocated = 1;
+    }
   loc = stub_sec->contents + stub_entry->stub_offset;
 
   stub_bfd = stub_sec->owner;
@@ -4664,7 +4778,8 @@ arm_build_one_stub (struct bfd_hash_entry *gen_entry,
        }
     }
 
-  stub_sec->size += size;
+  if (just_allocated)
+    stub_sec->size += size;
 
   /* Stub size has already been computed in arm_size_one_stub. Check
      consistency.  */
@@ -4674,9 +4789,11 @@ arm_build_one_stub (struct bfd_hash_entry *gen_entry,
   if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
     sym_value |= 1;
 
-  /* Assume there is at least one and at most MAXRELOCS entries to relocate
-     in each stub.  */
-  BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
+  /* Assume non empty slots have at least one and at most MAXRELOCS entries
+     to relocate in each stub.  */
+  removed_sg_veneer =
+    (size == 0 && stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
+  BFD_ASSERT (removed_sg_veneer || (nrelocs != 0 && nrelocs <= MAXRELOCS));
 
   for (i = 0; i < nrelocs; i++)
     {
@@ -4778,9 +4895,17 @@ arm_size_one_stub (struct bfd_hash_entry *gen_entry,
   size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
                                      &template_size);
 
-  stub_entry->stub_size = size;
-  stub_entry->stub_template = template_sequence;
-  stub_entry->stub_template_size = template_size;
+  /* Initialized to -1.  Null size indicates an empty slot full of zeros.  */
+  if (stub_entry->stub_template_size)
+    {
+      stub_entry->stub_size = size;
+      stub_entry->stub_template = template_sequence;
+      stub_entry->stub_template_size = template_size;
+    }
+
+  /* Already accounted for.  */
+  if (stub_entry->stub_offset != (bfd_vma) -1)
+    return TRUE;
 
   size = (size + 7) & ~7;
   stub_entry->stub_sec->size += size;
@@ -5345,10 +5470,10 @@ cortex_a8_erratum_scan (bfd *input_bfd,
    and *NEW_STUB is set to FALSE.  Otherwise, *NEW_STUB is set to
    TRUE and the stub entry is initialized.
 
-   Returns whether the stub could be successfully created or updated, or FALSE
-   if an error occured.  */
+   Returns the stub that was created or updated, or NULL if an error
+   occurred.  */
 
-static bfd_boolean
+static struct elf32_arm_stub_hash_entry *
 elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab,
                       enum elf32_arm_stub_type stub_type, asection *section,
                       Elf_Internal_Rela *irela, asection *sym_sec,
@@ -5371,6 +5496,7 @@ elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab,
     {
       BFD_ASSERT (irela);
       BFD_ASSERT (section);
+      BFD_ASSERT (section->id <= htab->top_id);
 
       /* Support for grouping stub sections.  */
       id_sec = htab->stub_group[section->id].link_sec;
@@ -5379,7 +5505,7 @@ elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab,
       stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, irela,
                                       stub_type);
       if (!stub_name)
-       return FALSE;
+       return NULL;
     }
 
   stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, FALSE,
@@ -5390,7 +5516,7 @@ elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab,
       if (!sym_claimed)
        free (stub_name);
       stub_entry->target_value = sym_value;
-      return TRUE;
+      return stub_entry;
     }
 
   stub_entry = elf32_arm_add_stub (stub_name, section, htab, stub_type);
@@ -5398,7 +5524,7 @@ elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab,
     {
       if (!sym_claimed)
        free (stub_name);
-      return FALSE;
+      return NULL;
     }
 
   stub_entry->target_value = sym_value;
@@ -5419,7 +5545,7 @@ elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab,
       if (stub_entry->output_name == NULL)
        {
          free (stub_name);
-         return FALSE;
+         return NULL;
        }
 
       /* For historical reasons, use the existing names for ARM-to-Thumb and
@@ -5439,9 +5565,485 @@ elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab,
     }
 
   *new_stub = TRUE;
+  return stub_entry;
+}
+
+/* Scan symbols in INPUT_BFD to identify secure entry functions needing a
+   gateway veneer to transition from non secure to secure state and create them
+   accordingly.
+
+   "ARMv8-M Security Extensions: Requirements on Development Tools" document
+   defines the conditions that govern Secure Gateway veneer creation for a
+   given symbol <SYM> as follows:
+   - it has function type
+   - it has non local binding
+   - a symbol named __acle_se_<SYM> (called special symbol) exists with the
+     same type, binding and value as <SYM> (called normal symbol).
+   An entry function can handle secure state transition itself in which case
+   its special symbol would have a different value from the normal symbol.
+
+   OUT_ATTR gives the output attributes, SYM_HASHES the symbol index to hash
+   entry mapping while HTAB gives the name to hash entry mapping.
+   *CMSE_STUB_CREATED is increased by the number of secure gateway veneer
+   created.
+
+   The return value gives whether a stub failed to be allocated.  */
+
+static bfd_boolean
+cmse_scan (bfd *input_bfd, struct elf32_arm_link_hash_table *htab,
+          obj_attribute *out_attr, struct elf_link_hash_entry **sym_hashes,
+          int *cmse_stub_created)
+{
+  const struct elf_backend_data *bed;
+  Elf_Internal_Shdr *symtab_hdr;
+  unsigned i, j, sym_count, ext_start;
+  Elf_Internal_Sym *cmse_sym, *local_syms;
+  struct elf32_arm_link_hash_entry *hash, *cmse_hash = NULL;
+  enum arm_st_branch_type branch_type;
+  char *sym_name, *lsym_name;
+  bfd_vma sym_value;
+  asection *section;
+  struct elf32_arm_stub_hash_entry *stub_entry;
+  bfd_boolean is_v8m, new_stub, cmse_invalid, ret = TRUE;
+
+  bed = get_elf_backend_data (input_bfd);
+  symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
+  sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
+  ext_start = symtab_hdr->sh_info;
+  is_v8m = (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
+           && out_attr[Tag_CPU_arch_profile].i == 'M');
+
+  local_syms = (Elf_Internal_Sym *) symtab_hdr->contents;
+  if (local_syms == NULL)
+    local_syms = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
+                                      symtab_hdr->sh_info, 0, NULL, NULL,
+                                      NULL);
+  if (symtab_hdr->sh_info && local_syms == NULL)
+    return FALSE;
+
+  /* Scan symbols.  */
+  for (i = 0; i < sym_count; i++)
+    {
+      cmse_invalid = FALSE;
+
+      if (i < ext_start)
+       {
+         cmse_sym = &local_syms[i];
+         /* Not a special symbol.  */
+         if (!ARM_GET_SYM_CMSE_SPCL (cmse_sym->st_target_internal))
+           continue;
+         sym_name = bfd_elf_string_from_elf_section (input_bfd,
+                                                     symtab_hdr->sh_link,
+                                                     cmse_sym->st_name);
+         /* Special symbol with local binding.  */
+         cmse_invalid = TRUE;
+       }
+      else
+       {
+         cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
+         sym_name = (char *) cmse_hash->root.root.root.string;
+
+         /* Not a special symbol.  */
+         if (!ARM_GET_SYM_CMSE_SPCL (cmse_hash->root.target_internal))
+           continue;
+
+         /* Special symbol has incorrect binding or type.  */
+         if ((cmse_hash->root.root.type != bfd_link_hash_defined
+              && cmse_hash->root.root.type != bfd_link_hash_defweak)
+             || cmse_hash->root.type != STT_FUNC)
+           cmse_invalid = TRUE;
+       }
+
+      if (!is_v8m)
+       {
+         (*_bfd_error_handler) (_("%B: Special symbol `%s' only allowed for "
+                                  "ARMv8-M architecture or later."),
+                                input_bfd, sym_name);
+         is_v8m = TRUE; /* Avoid multiple warning.  */
+         ret = FALSE;
+       }
+
+      if (cmse_invalid)
+       {
+         (*_bfd_error_handler) (_("%B: invalid special symbol `%s'."),
+                                input_bfd, sym_name);
+         (*_bfd_error_handler) (_("It must be a global or weak function "
+                                  "symbol."));
+         ret = FALSE;
+         if (i < ext_start)
+           continue;
+       }
+
+      sym_name += strlen (CMSE_PREFIX);
+      hash = (struct elf32_arm_link_hash_entry *)
+       elf_link_hash_lookup (&(htab)->root, sym_name, FALSE, FALSE, TRUE);
+
+      /* No associated normal symbol or it is neither global nor weak.  */
+      if (!hash
+         || (hash->root.root.type != bfd_link_hash_defined
+             && hash->root.root.type != bfd_link_hash_defweak)
+         || hash->root.type != STT_FUNC)
+       {
+         /* Initialize here to avoid warning about use of possibly
+            uninitialized variable.  */
+         j = 0;
+
+         if (!hash)
+           {
+             /* Searching for a normal symbol with local binding.  */
+             for (; j < ext_start; j++)
+               {
+                 lsym_name =
+                   bfd_elf_string_from_elf_section (input_bfd,
+                                                    symtab_hdr->sh_link,
+                                                    local_syms[j].st_name);
+                 if (!strcmp (sym_name, lsym_name))
+                   break;
+               }
+           }
+
+         if (hash || j < ext_start)
+           {
+             (*_bfd_error_handler)
+               (_("%B: invalid standard symbol `%s'."), input_bfd, sym_name);
+             (*_bfd_error_handler)
+               (_("It must be a global or weak function symbol."));
+           }
+         else
+           (*_bfd_error_handler)
+             (_("%B: absent standard symbol `%s'."), input_bfd, sym_name);
+         ret = FALSE;
+         if (!hash)
+           continue;
+       }
+
+      sym_value = hash->root.root.u.def.value;
+      section = hash->root.root.u.def.section;
+
+      if (cmse_hash->root.root.u.def.section != section)
+       {
+         (*_bfd_error_handler)
+           (_("%B: `%s' and its special symbol are in different sections."),
+            input_bfd, sym_name);
+         ret = FALSE;
+       }
+      if (cmse_hash->root.root.u.def.value != sym_value)
+       continue; /* Ignore: could be an entry function starting with SG.  */
+
+       /* If this section is a link-once section that will be discarded, then
+          don't create any stubs.  */
+      if (section->output_section == NULL)
+       {
+         (*_bfd_error_handler)
+           (_("%B: entry function `%s' not output."), input_bfd, sym_name);
+         continue;
+       }
+
+      if (hash->root.size == 0)
+       {
+         (*_bfd_error_handler)
+           (_("%B: entry function `%s' is empty."), input_bfd, sym_name);
+         ret = FALSE;
+       }
+
+      if (!ret)
+       continue;
+      branch_type = ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
+      stub_entry
+       = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
+                                NULL, NULL, section, hash, sym_name,
+                                sym_value, branch_type, &new_stub);
+
+      if (stub_entry == NULL)
+        ret = FALSE;
+      else
+       {
+         BFD_ASSERT (new_stub);
+         (*cmse_stub_created)++;
+       }
+    }
+
+  if (!symtab_hdr->contents)
+    free (local_syms);
+  return ret;
+}
+
+/* Return TRUE iff a symbol identified by its linker HASH entry is a secure
+   code entry function, ie can be called from non secure code without using a
+   veneer.  */
+
+static bfd_boolean
+cmse_entry_fct_p (struct elf32_arm_link_hash_entry *hash)
+{
+  bfd_byte contents[4];
+  uint32_t first_insn;
+  asection *section;
+  file_ptr offset;
+  bfd *abfd;
+
+  /* Defined symbol of function type.  */
+  if (hash->root.root.type != bfd_link_hash_defined
+      && hash->root.root.type != bfd_link_hash_defweak)
+    return FALSE;
+  if (hash->root.type != STT_FUNC)
+    return FALSE;
+
+  /* Read first instruction.  */
+  section = hash->root.root.u.def.section;
+  abfd = section->owner;
+  offset = hash->root.root.u.def.value - section->vma;
+  if (!bfd_get_section_contents (abfd, section, contents, offset,
+                                sizeof (contents)))
+    return FALSE;
+
+  first_insn = bfd_get_32 (abfd, contents);
+
+  /* Starts by SG instruction.  */
+  return first_insn == 0xe97fe97f;
+}
+
+/* Output the name (in symbol table) of the veneer GEN_ENTRY if it is a new
+   secure gateway veneers (ie. the veneers was not in the input import library)
+   and there is no output import library (GEN_INFO->out_implib_bfd is NULL.  */
+
+static bfd_boolean
+arm_list_new_cmse_stub (struct bfd_hash_entry *gen_entry, void *gen_info)
+{
+  struct elf32_arm_stub_hash_entry *stub_entry;
+  struct bfd_link_info *info;
+
+  /* Massage our args to the form they really have.  */
+  stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
+  info = (struct bfd_link_info *) gen_info;
+
+  if (info->out_implib_bfd)
+    return TRUE;
+
+  if (stub_entry->stub_type != arm_stub_cmse_branch_thumb_only)
+    return TRUE;
+
+  if (stub_entry->stub_offset == (bfd_vma) -1)
+    (*_bfd_error_handler) ("  %s", stub_entry->output_name);
+
   return TRUE;
 }
 
+/* Set offset of each secure gateway veneers so that its address remain
+   identical to the one in the input import library referred by
+   HTAB->in_implib_bfd.  A warning is issued for veneers that disappeared
+   (present in input import library but absent from the executable being
+   linked) or if new veneers appeared and there is no output import library
+   (INFO->out_implib_bfd is NULL and *CMSE_STUB_CREATED is bigger than the
+   number of secure gateway veneers found in the input import library.
+
+   The function returns whether an error occurred.  If no error occurred,
+   *CMSE_STUB_CREATED gives the number of SG veneers created by both cmse_scan
+   and this function and HTAB->new_cmse_stub_offset is set to the biggest
+   veneer observed set for new veneers to be layed out after.  */
+
+static bfd_boolean
+set_cmse_veneer_addr_from_implib (struct bfd_link_info *info,
+                                 struct elf32_arm_link_hash_table *htab,
+                                 int *cmse_stub_created)
+{
+  long symsize;
+  char *sym_name;
+  flagword flags;
+  long i, symcount;
+  bfd *in_implib_bfd;
+  asection *stub_out_sec;
+  bfd_boolean ret = TRUE;
+  Elf_Internal_Sym *intsym;
+  const char *out_sec_name;
+  bfd_size_type cmse_stub_size;
+  asymbol **sympp = NULL, *sym;
+  struct elf32_arm_link_hash_entry *hash;
+  const insn_sequence *cmse_stub_template;
+  struct elf32_arm_stub_hash_entry *stub_entry;
+  int cmse_stub_template_size, new_cmse_stubs_created = *cmse_stub_created;
+  bfd_vma veneer_value, stub_offset, next_cmse_stub_offset;
+  bfd_vma cmse_stub_array_start = (bfd_vma) -1, cmse_stub_sec_vma = 0;
+
+  /* No input secure gateway import library.  */
+  if (!htab->in_implib_bfd)
+    return TRUE;
+
+  in_implib_bfd = htab->in_implib_bfd;
+  if (!htab->cmse_implib)
+    {
+      (*_bfd_error_handler) (_("%B: --in-implib only supported for Secure "
+                              "Gateway import libraries."), in_implib_bfd);
+      return FALSE;
+    }
+
+  /* Get symbol table size.  */
+  symsize = bfd_get_symtab_upper_bound (in_implib_bfd);
+  if (symsize < 0)
+    return FALSE;
+
+  /* Read in the input secure gateway import library's symbol table.  */
+  sympp = (asymbol **) xmalloc (symsize);
+  symcount = bfd_canonicalize_symtab (in_implib_bfd, sympp);
+  if (symcount < 0)
+    {
+      ret = FALSE;
+      goto free_sym_buf;
+    }
+
+  htab->new_cmse_stub_offset = 0;
+  cmse_stub_size =
+    find_stub_size_and_template (arm_stub_cmse_branch_thumb_only,
+                                &cmse_stub_template,
+                                &cmse_stub_template_size);
+  out_sec_name =
+    arm_dedicated_stub_output_section_name (arm_stub_cmse_branch_thumb_only);
+  stub_out_sec =
+    bfd_get_section_by_name (htab->obfd, out_sec_name);
+  if (stub_out_sec != NULL)
+    cmse_stub_sec_vma = stub_out_sec->vma;
+
+  /* Set addresses of veneers mentionned in input secure gateway import
+     library's symbol table.  */
+  for (i = 0; i < symcount; i++)
+    {
+      sym = sympp[i];
+      flags = sym->flags;
+      sym_name = (char *) bfd_asymbol_name (sym);
+      intsym = &((elf_symbol_type *) sym)->internal_elf_sym;
+
+      if (sym->section != bfd_abs_section_ptr
+         || !(flags & (BSF_GLOBAL | BSF_WEAK))
+         || (flags & BSF_FUNCTION) != BSF_FUNCTION
+         || (ARM_GET_SYM_BRANCH_TYPE (intsym->st_target_internal)
+             != ST_BRANCH_TO_THUMB))
+       {
+         (*_bfd_error_handler) (_("%B: invalid import library entry: `%s'."),
+                                in_implib_bfd, sym_name);
+         (*_bfd_error_handler) (_("Symbol should be absolute, global and "
+                                  "refer to Thumb functions."));
+         ret = FALSE;
+         continue;
+       }
+
+      veneer_value = bfd_asymbol_value (sym);
+      stub_offset = veneer_value - cmse_stub_sec_vma;
+      stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, sym_name,
+                                        FALSE, FALSE);
+      hash = (struct elf32_arm_link_hash_entry *)
+       elf_link_hash_lookup (&(htab)->root, sym_name, FALSE, FALSE, TRUE);
+
+      /* Stub entry should have been created by cmse_scan or the symbol be of
+        a secure function callable from non secure code.  */
+      if (!stub_entry && !hash)
+       {
+         bfd_boolean new_stub;
+
+         (*_bfd_error_handler)
+           (_("Entry function `%s' disappeared from secure code."), sym_name);
+         hash = (struct elf32_arm_link_hash_entry *)
+           elf_link_hash_lookup (&(htab)->root, sym_name, TRUE, TRUE, TRUE);
+         stub_entry
+           = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
+                                    NULL, NULL, bfd_abs_section_ptr, hash,
+                                    sym_name, veneer_value,
+                                    ST_BRANCH_TO_THUMB, &new_stub);
+         if (stub_entry == NULL)
+           ret = FALSE;
+         else
+         {
+           BFD_ASSERT (new_stub);
+           new_cmse_stubs_created++;
+           (*cmse_stub_created)++;
+         }
+         stub_entry->stub_template_size = stub_entry->stub_size = 0;
+         stub_entry->stub_offset = stub_offset;
+       }
+      /* Symbol found is not callable from non secure code.  */
+      else if (!stub_entry)
+       {
+         if (!cmse_entry_fct_p (hash))
+           {
+             (*_bfd_error_handler) (_("`%s' refers to a non entry function."),
+                                    sym_name);
+             ret = FALSE;
+           }
+         continue;
+       }
+      else
+       {
+         /* Only stubs for SG veneers should have been created.  */
+         BFD_ASSERT (stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
+
+         /* Check visibility hasn't changed.  */
+         if (!!(flags & BSF_GLOBAL)
+             != (hash->root.root.type == bfd_link_hash_defined))
+           (*_bfd_error_handler)
+             (_("%B: visibility of symbol `%s' has changed."), in_implib_bfd,
+              sym_name);
+
+         stub_entry->stub_offset = stub_offset;
+       }
+
+      /* Size should match that of a SG veneer.  */
+      if (intsym->st_size != cmse_stub_size)
+       {
+         (*_bfd_error_handler) (_("%B: incorrect size for symbol `%s'."),
+                                in_implib_bfd, sym_name);
+         ret = FALSE;
+       }
+
+      /* Previous veneer address is before current SG veneer section.  */
+      if (veneer_value < cmse_stub_sec_vma)
+       {
+         /* Avoid offset underflow.  */
+         if (stub_entry)
+           stub_entry->stub_offset = 0;
+         stub_offset = 0;
+         ret = FALSE;
+       }
+
+      /* Complain if stub offset not a multiple of stub size.  */
+      if (stub_offset % cmse_stub_size)
+       {
+         (*_bfd_error_handler)
+           (_("Offset of veneer for entry function `%s' not a multiple of "
+              "its size."), sym_name);
+         ret = FALSE;
+       }
+
+      if (!ret)
+       continue;
+
+      new_cmse_stubs_created--;
+      if (veneer_value < cmse_stub_array_start)
+       cmse_stub_array_start = veneer_value;
+      next_cmse_stub_offset = stub_offset + ((cmse_stub_size + 7) & ~7);
+      if (next_cmse_stub_offset > htab->new_cmse_stub_offset)
+       htab->new_cmse_stub_offset = next_cmse_stub_offset;
+    }
+
+  if (!info->out_implib_bfd && new_cmse_stubs_created != 0)
+    {
+      BFD_ASSERT (new_cmse_stubs_created > 0);
+      (*_bfd_error_handler)
+       (_("new entry function(s) introduced but no output import library "
+          "specified:"));
+      bfd_hash_traverse (&htab->stub_hash_table, arm_list_new_cmse_stub, info);
+    }
+
+  if (cmse_stub_array_start != cmse_stub_sec_vma)
+    {
+      (*_bfd_error_handler)
+       (_("Start address of `%s' is different from previous link."),
+        out_sec_name);
+      ret = FALSE;
+    }
+
+free_sym_buf:
+  free (sympp);
+  return ret;
+}
+
 /* Determine and set the size of the stub section for a final link.
 
    The basic idea here is to examine all the relocations looking for
@@ -5458,8 +6060,11 @@ elf32_arm_size_stubs (bfd *output_bfd,
                                                      unsigned int),
                      void (*layout_sections_again) (void))
 {
+  bfd_boolean ret = TRUE;
+  obj_attribute *out_attr;
+  int cmse_stub_created = 0;
   bfd_size_type stub_group_size;
-  bfd_boolean stubs_always_after_branch;
+  bfd_boolean m_profile, stubs_always_after_branch, first_veneer_scan = TRUE;
   struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
   struct a8_erratum_fix *a8_fixes = NULL;
   unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
@@ -5488,6 +6093,9 @@ elf32_arm_size_stubs (bfd *output_bfd,
   htab->layout_sections_again = layout_sections_again;
   stubs_always_after_branch = group_size < 0;
 
+  out_attr = elf_known_obj_attributes_proc (output_bfd);
+  m_profile = out_attr[Tag_CPU_arch_profile].i == 'M';
+
   /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
      as the first half of a 32-bit branch straddling two 4K pages.  This is a
      crude way of enforcing that.  */
@@ -5553,6 +6161,21 @@ elf32_arm_size_stubs (bfd *output_bfd,
          if (symtab_hdr->sh_info == 0)
            continue;
 
+         /* Limit scan of symbols to object file whose profile is
+            Microcontroller to not hinder performance in the general case.  */
+         if (m_profile && first_veneer_scan)
+           {
+             struct elf_link_hash_entry **sym_hashes;
+
+             sym_hashes = elf_sym_hashes (input_bfd);
+             if (!cmse_scan (input_bfd, htab, out_attr, sym_hashes,
+                             &cmse_stub_created))
+               goto error_ret_free_local;
+
+             if (cmse_stub_created != 0)
+               stub_changed = TRUE;
+           }
+
          /* Walk over each section attached to the input bfd.  */
          for (section = input_bfd->sections;
               section != NULL;
@@ -5782,6 +6405,7 @@ elf32_arm_size_stubs (bfd *output_bfd,
                  do
                    {
                      bfd_boolean new_stub;
+                     struct elf32_arm_stub_hash_entry *stub_entry;
 
                      /* Determine what (if any) linker stub is needed.  */
                      stub_type = arm_type_of_stub (info, section, irela,
@@ -5793,12 +6417,13 @@ elf32_arm_size_stubs (bfd *output_bfd,
 
                      /* We've either created a stub for this reloc already,
                         or we are about to.  */
-                     created_stub =
+                     stub_entry =
                        elf32_arm_create_stub (htab, stub_type, section, irela,
                                               sym_sec, hash,
                                               (char *) sym_name, sym_value,
                                               branch_type, &new_stub);
 
+                     created_stub = stub_entry != NULL;
                      if (!created_stub)
                        goto error_ret_free_internal;
                      else if (!new_stub)
@@ -5880,6 +6505,11 @@ elf32_arm_size_stubs (bfd *output_bfd,
            }
        }
 
+      if (first_veneer_scan
+         && !set_cmse_veneer_addr_from_implib (info, htab,
+                                               &cmse_stub_created))
+       ret = FALSE;
+
       if (prev_num_a8_fixes != num_a8_fixes)
        stub_changed = TRUE;
 
@@ -5899,6 +6529,24 @@ elf32_arm_size_stubs (bfd *output_bfd,
          stub_sec->size = 0;
        }
 
+      /* Add new SG veneers after those already in the input import
+        library.  */
+      for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
+          stub_type++)
+       {
+         bfd_vma *start_offset_p;
+         asection **stub_sec_p;
+
+         start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
+         stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
+         if (start_offset_p == NULL)
+           continue;
+
+         BFD_ASSERT (stub_sec_p != NULL);
+         if (*stub_sec_p != NULL)
+           (*stub_sec_p)->size = *start_offset_p;
+       }
+
       /* Compute stub section size, considering padding.  */
       bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
       for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
@@ -5939,6 +6587,7 @@ elf32_arm_size_stubs (bfd *output_bfd,
 
       /* Ask the linker to do its stuff.  */
       (*htab->layout_sections_again) ();
+      first_veneer_scan = FALSE;
     }
 
   /* Add stubs for Cortex-A8 erratum fixes now.  */
@@ -5966,7 +6615,7 @@ elf32_arm_size_stubs (bfd *output_bfd,
            }
 
          stub_entry->stub_sec = stub_sec;
-         stub_entry->stub_offset = 0;
+         stub_entry->stub_offset = (bfd_vma) -1;
          stub_entry->id_sec = link_sec;
          stub_entry->stub_type = a8_fixes[i].stub_type;
          stub_entry->source_value = a8_fixes[i].offset;
@@ -5994,7 +6643,7 @@ elf32_arm_size_stubs (bfd *output_bfd,
       htab->a8_erratum_fixes = NULL;
       htab->num_a8_erratum_fixes = 0;
     }
-  return TRUE;
+  return ret;
 }
 
 /* Build all the stubs associated with the current output file.  The
@@ -6008,6 +6657,7 @@ elf32_arm_build_stubs (struct bfd_link_info *info)
 {
   asection *stub_sec;
   struct bfd_hash_table *table;
+  enum elf32_arm_stub_type stub_type;
   struct elf32_arm_link_hash_table *htab;
 
   htab = elf32_arm_hash_table (info);
@@ -6025,14 +6675,33 @@ elf32_arm_build_stubs (struct bfd_link_info *info)
        continue;
 
       /* Allocate memory to hold the linker stubs.  Zeroing the stub sections
-        must at least be done for stub section requiring padding.  */
+        must at least be done for stub section requiring padding and for SG
+        veneers to ensure that a non secure code branching to a removed SG
+        veneer causes an error.  */
       size = stub_sec->size;
       stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
       if (stub_sec->contents == NULL && size != 0)
        return FALSE;
+
       stub_sec->size = 0;
     }
 
+  /* Add new SG veneers after those already in the input import library.  */
+  for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
+    {
+      bfd_vma *start_offset_p;
+      asection **stub_sec_p;
+
+      start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
+      stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
+      if (start_offset_p == NULL)
+       continue;
+
+      BFD_ASSERT (stub_sec_p != NULL);
+      if (*stub_sec_p != NULL)
+       (*stub_sec_p)->size = *start_offset_p;
+    }
+
   /* Build the stubs as directed by the stub hash table.  */
   table = &htab->stub_hash_table;
   bfd_hash_traverse (table, arm_build_one_stub, info);
@@ -7923,8 +8592,8 @@ bfd_elf32_arm_stm32l4xx_erratum_scan (bfd *abfd,
                     if the instruction is not the last instruction of
                     an IT block, we cannot create a jump there, so we
                     bail out.  */
-                   if ((is_ldm || is_vldm) &&
-                       stm32l4xx_need_create_replacing_stub
+                   if ((is_ldm || is_vldm)
+                       && stm32l4xx_need_create_replacing_stub
                        (insn, globals->stm32l4xx_fix))
                      {
                        if (is_not_last_in_it_block)
@@ -7972,8 +8641,8 @@ bfd_elf32_arm_stm32l4xx_erratum_scan (bfd *abfd,
                     There can be no nested IT blocks so an IT block
                     is naturally a new one for which it is worth
                     computing its size.  */
-                 bfd_boolean is_newitblock = ((insn & 0xff00) == 0xbf00) &&
-                   ((insn & 0x000f) != 0x0000);
+                 bfd_boolean is_newitblock = ((insn & 0xff00) == 0xbf00)
+                   && ((insn & 0x000f) != 0x0000);
                  /* If we have a new IT block we compute its size.  */
                  if (is_newitblock)
                    {
@@ -8008,17 +8677,9 @@ error_return:
 /* Set target relocation values needed during linking.  */
 
 void
-bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
+bfd_elf32_arm_set_target_params (struct bfd *output_bfd,
                                 struct bfd_link_info *link_info,
-                                int target1_is_rel,
-                                char * target2_type,
-                                int fix_v4bx,
-                                int use_blx,
-                                bfd_arm_vfp11_fix vfp11_fix,
-                                bfd_arm_stm32l4xx_fix stm32l4xx_fix,
-                                int no_enum_warn, int no_wchar_warn,
-                                int pic_veneer, int fix_cortex_a8,
-                                int fix_arm1176)
+                                struct elf32_arm_params *params)
 {
   struct elf32_arm_link_hash_table *globals;
 
@@ -8026,29 +8687,33 @@ bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
   if (globals == NULL)
     return;
 
-  globals->target1_is_rel = target1_is_rel;
-  if (strcmp (target2_type, "rel") == 0)
+  globals->target1_is_rel = params->target1_is_rel;
+  if (strcmp (params->target2_type, "rel") == 0)
     globals->target2_reloc = R_ARM_REL32;
-  else if (strcmp (target2_type, "abs") == 0)
+  else if (strcmp (params->target2_type, "abs") == 0)
     globals->target2_reloc = R_ARM_ABS32;
-  else if (strcmp (target2_type, "got-rel") == 0)
+  else if (strcmp (params->target2_type, "got-rel") == 0)
     globals->target2_reloc = R_ARM_GOT_PREL;
   else
     {
       _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
-                         target2_type);
+                         params->target2_type);
     }
-  globals->fix_v4bx = fix_v4bx;
-  globals->use_blx |= use_blx;
-  globals->vfp11_fix = vfp11_fix;
-  globals->stm32l4xx_fix = stm32l4xx_fix;
-  globals->pic_veneer = pic_veneer;
-  globals->fix_cortex_a8 = fix_cortex_a8;
-  globals->fix_arm1176 = fix_arm1176;
+  globals->fix_v4bx = params->fix_v4bx;
+  globals->use_blx |= params->use_blx;
+  globals->vfp11_fix = params->vfp11_denorm_fix;
+  globals->stm32l4xx_fix = params->stm32l4xx_fix;
+  globals->pic_veneer = params->pic_veneer;
+  globals->fix_cortex_a8 = params->fix_cortex_a8;
+  globals->fix_arm1176 = params->fix_arm1176;
+  globals->cmse_implib = params->cmse_implib;
+  globals->in_implib_bfd = params->in_implib_bfd;
 
   BFD_ASSERT (is_arm_elf (output_bfd));
-  elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
-  elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
+  elf_arm_tdata (output_bfd)->no_enum_size_warning
+    = params->no_enum_size_warning;
+  elf_arm_tdata (output_bfd)->no_wchar_size_warning
+    = params->no_wchar_size_warning;
 }
 
 /* Replace the target offset of a Thumb bl or b.w instruction.  */
@@ -9280,7 +9945,8 @@ elf32_arm_final_link_relocate (reloc_howto_type *           howto,
   /* Find out whether the symbol has a PLT.  Set ST_VALUE, BRANCH_TYPE and
      VALUE appropriately for relocations that we resolve at link time.  */
   has_iplt_entry = FALSE;
-  if (elf32_arm_get_plt_info (input_bfd, eh, r_symndx, &root_plt, &arm_plt)
+  if (elf32_arm_get_plt_info (input_bfd, globals, eh, r_symndx, &root_plt,
+                             &arm_plt)
       && root_plt->offset != (bfd_vma) -1)
     {
       plt_offset = root_plt->offset;
@@ -10400,9 +11066,9 @@ elf32_arm_final_link_relocate (reloc_howto_type *           howto,
                {
                  if (dynreloc_st_type == STT_GNU_IFUNC)
                    outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
-                 else if (bfd_link_pic (info) &&
-                          (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
-                           || h->root.type != bfd_link_hash_undefweak))
+                 else if (bfd_link_pic (info)
+                          && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
+                              || h->root.type != bfd_link_hash_undefweak))
                    outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
                  else
                    outrel.r_info = 0;
@@ -10434,8 +11100,8 @@ elf32_arm_final_link_relocate (reloc_howto_type *           howto,
        {
          bfd_vma off;
 
-         BFD_ASSERT (local_got_offsets != NULL &&
-                     local_got_offsets[r_symndx] != (bfd_vma) -1);
+         BFD_ASSERT (local_got_offsets != NULL
+                     && local_got_offsets[r_symndx] != (bfd_vma) -1);
 
          off = local_got_offsets[r_symndx];
 
@@ -13693,7 +14359,8 @@ elf32_arm_gc_sweep_hook (bfd *                     abfd,
        }
 
       if (may_need_local_target_p
-         && elf32_arm_get_plt_info (abfd, eh, r_symndx, &root_plt, &arm_plt))
+         && elf32_arm_get_plt_info (abfd, globals, eh, r_symndx, &root_plt,
+                                    &arm_plt))
        {
          /* If PLT refcount book-keeping is wrong and too low, we'll
             see a zero value (going to -1) for the root PLT reference
@@ -14161,7 +14828,11 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
 }
 
 /* Unwinding tables are not referenced directly.  This pass marks them as
-   required if the corresponding code section is marked.  */
+   required if the corresponding code section is marked.  Similarly, ARMv8-M
+   secure entry functions can only be referenced by SG veneers which are
+   created after the GC process. They need to be marked in case they reside in
+   their own section (as would be the case if code was compiled with
+   -ffunction-sections).  */
 
 static bfd_boolean
 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
@@ -14169,10 +14840,21 @@ elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
 {
   bfd *sub;
   Elf_Internal_Shdr **elf_shdrp;
-  bfd_boolean again;
+  asection *cmse_sec;
+  obj_attribute *out_attr;
+  Elf_Internal_Shdr *symtab_hdr;
+  unsigned i, sym_count, ext_start;
+  const struct elf_backend_data *bed;
+  struct elf_link_hash_entry **sym_hashes;
+  struct elf32_arm_link_hash_entry *cmse_hash;
+  bfd_boolean again, is_v8m, first_bfd_browse = TRUE;
 
   _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
 
+  out_attr = elf_known_obj_attributes_proc (info->output_bfd);
+  is_v8m = out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
+          && out_attr[Tag_CPU_arch_profile].i == 'M';
+
   /* Marking EH data may cause additional code sections to be marked,
      requiring multiple passes.  */
   again = TRUE;
@@ -14203,7 +14885,35 @@ elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
                    return FALSE;
                }
            }
+
+         /* Mark section holding ARMv8-M secure entry functions.  We mark all
+            of them so no need for a second browsing.  */
+         if (is_v8m && first_bfd_browse)
+           {
+             sym_hashes = elf_sym_hashes (sub);
+             bed = get_elf_backend_data (sub);
+             symtab_hdr = &elf_tdata (sub)->symtab_hdr;
+             sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
+             ext_start = symtab_hdr->sh_info;
+
+             /* Scan symbols.  */
+             for (i = ext_start; i < sym_count; i++)
+               {
+                 cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
+
+                 /* Assume it is a special symbol.  If not, cmse_scan will
+                    warn about it and user can do something about it.  */
+                 if (ARM_GET_SYM_CMSE_SPCL (cmse_hash->root.target_internal))
+                   {
+                     cmse_sec = cmse_hash->root.root.u.def.section;
+                     if (!cmse_sec->gc_mark
+                         && !_bfd_elf_gc_mark (info, cmse_sec, gc_mark_hook))
+                       return FALSE;
+                   }
+               }
+           }
        }
+      first_bfd_browse = FALSE;
     }
 
   return TRUE;
@@ -15240,9 +15950,9 @@ elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
              || !add_dynamic_entry (DT_JMPREL, 0))
            return FALSE;
 
-         if (htab->dt_tlsdesc_plt &&
-               (!add_dynamic_entry (DT_TLSDESC_PLT,0)
-                || !add_dynamic_entry (DT_TLSDESC_GOT,0)))
+         if (htab->dt_tlsdesc_plt
+             && (!add_dynamic_entry (DT_TLSDESC_PLT,0)
+                 || !add_dynamic_entry (DT_TLSDESC_GOT,0)))
            return FALSE;
        }
 
@@ -16521,6 +17231,95 @@ elf32_arm_output_arch_local_syms (bfd *output_bfd,
   return TRUE;
 }
 
+/* Filter normal symbols of CMSE entry functions of ABFD to include in
+   the import library.  All SYMCOUNT symbols of ABFD can be examined
+   from their pointers in SYMS.  Pointers of symbols to keep should be
+   stored continuously at the beginning of that array.
+
+   Returns the number of symbols to keep.  */
+
+static unsigned int
+elf32_arm_filter_cmse_symbols (bfd *abfd ATTRIBUTE_UNUSED,
+                              struct bfd_link_info *info,
+                              asymbol **syms, long symcount)
+{
+  size_t maxnamelen;
+  char *cmse_name;
+  long src_count, dst_count = 0;
+  struct elf32_arm_link_hash_table *htab;
+
+  htab = elf32_arm_hash_table (info);
+  if (!htab->stub_bfd || !htab->stub_bfd->sections)
+    symcount = 0;
+
+  maxnamelen = 128;
+  cmse_name = (char *) bfd_malloc (maxnamelen);
+  for (src_count = 0; src_count < symcount; src_count++)
+    {
+      struct elf32_arm_link_hash_entry *cmse_hash;
+      asymbol *sym;
+      flagword flags;
+      char *name;
+      size_t namelen;
+
+      sym = syms[src_count];
+      flags = sym->flags;
+      name = (char *) bfd_asymbol_name (sym);
+
+      if ((flags & BSF_FUNCTION) != BSF_FUNCTION)
+       continue;
+      if (!(flags & (BSF_GLOBAL | BSF_WEAK)))
+       continue;
+
+      namelen = strlen (name) + sizeof (CMSE_PREFIX) + 1;
+      if (namelen > maxnamelen)
+       {
+         cmse_name = (char *)
+           bfd_realloc (cmse_name, namelen);
+         maxnamelen = namelen;
+       }
+      snprintf (cmse_name, maxnamelen, "%s%s", CMSE_PREFIX, name);
+      cmse_hash = (struct elf32_arm_link_hash_entry *)
+       elf_link_hash_lookup (&(htab)->root, cmse_name, FALSE, FALSE, TRUE);
+
+      if (!cmse_hash
+         || (cmse_hash->root.root.type != bfd_link_hash_defined
+             && cmse_hash->root.root.type != bfd_link_hash_defweak)
+         || cmse_hash->root.type != STT_FUNC)
+       continue;
+
+      if (!ARM_GET_SYM_CMSE_SPCL (cmse_hash->root.target_internal))
+       continue;
+
+      syms[dst_count++] = sym;
+    }
+  free (cmse_name);
+
+  syms[dst_count] = NULL;
+
+  return dst_count;
+}
+
+/* Filter symbols of ABFD to include in the import library.  All
+   SYMCOUNT symbols of ABFD can be examined from their pointers in
+   SYMS.  Pointers of symbols to keep should be stored continuously at
+   the beginning of that array.
+
+   Returns the number of symbols to keep.  */
+
+static unsigned int
+elf32_arm_filter_implib_symbols (bfd *abfd ATTRIBUTE_UNUSED,
+                                struct bfd_link_info *info,
+                                asymbol **syms, long symcount)
+{
+  struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
+
+  if (globals->cmse_implib)
+    return elf32_arm_filter_cmse_symbols (abfd, info, syms, symcount);
+  else
+    return _bfd_elf_filter_global_symbols (abfd, info, syms, symcount);
+}
+
 /* Allocate target specific section data.  */
 
 static bfd_boolean
@@ -17444,39 +18243,6 @@ stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table * htab,
 /* End of stm32l4xx work-around.  */
 
 
-static void
-elf32_arm_add_relocation (bfd *output_bfd, struct bfd_link_info *info,
-                         asection *output_sec, Elf_Internal_Rela *rel)
-{
-  BFD_ASSERT (output_sec && rel);
-  struct bfd_elf_section_reloc_data *output_reldata;
-  struct elf32_arm_link_hash_table *htab;
-  struct bfd_elf_section_data *oesd = elf_section_data (output_sec);
-  Elf_Internal_Shdr *rel_hdr;
-
-
-  if (oesd->rel.hdr)
-    {
-      rel_hdr = oesd->rel.hdr;
-      output_reldata = &(oesd->rel);
-    }
-  else if (oesd->rela.hdr)
-    {
-      rel_hdr = oesd->rela.hdr;
-      output_reldata = &(oesd->rela);
-    }
-  else
-    {
-      abort ();
-    }
-
-  bfd_byte *erel = rel_hdr->contents;
-  erel += output_reldata->count * rel_hdr->sh_entsize;
-  htab = elf32_arm_hash_table (info);
-  SWAP_RELOC_OUT (htab) (output_bfd, rel, erel);
-  output_reldata->count++;
-}
-
 /* Do code byteswapping.  Return FALSE afterwards so that the section is
    written out as normal.  */
 
@@ -17728,18 +18494,6 @@ elf32_arm_write_section (bfd *output_bfd,
                               adjust offset by hand.  */
                            prel31_offset = text_sec->output_offset
                                            + text_sec->size;
-
-                           /* New relocation entity.  */
-                           asection *text_out = text_sec->output_section;
-                           Elf_Internal_Rela rel;
-                           rel.r_addend = 0;
-                           rel.r_offset = exidx_offset;
-                           rel.r_info = ELF32_R_INFO (text_out->target_index,
-                                                      R_ARM_PREL31);
-
-                           elf32_arm_add_relocation (output_bfd, link_info,
-                                                     sec->output_section,
-                                                     &rel);
                          }
 
                        /* First address we can't unwind.  */
@@ -17855,6 +18609,9 @@ elf32_arm_swap_symbol_in (bfd * abfd,
                          const void *pshn,
                          Elf_Internal_Sym *dst)
 {
+  Elf_Internal_Shdr *symtab_hdr;
+  const char *name = NULL;
+
   if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
     return FALSE;
   dst->st_target_internal = 0;
@@ -17883,6 +18640,13 @@ elf32_arm_swap_symbol_in (bfd * abfd,
   else
     ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_UNKNOWN);
 
+  /* Mark CMSE special symbols.  */
+  symtab_hdr = & elf_symtab_hdr (abfd);
+  if (symtab_hdr->sh_size)
+    name = bfd_elf_sym_name (abfd, symtab_hdr, dst, NULL);
+  if (name && CONST_STRNEQ (name, CMSE_PREFIX))
+    ARM_SET_SYM_CMSE_SPCL (dst->st_target_internal);
+
   return TRUE;
 }
 
@@ -18251,7 +19015,82 @@ elf32_arm_count_additional_relocs (asection *sec)
 {
   struct _arm_elf_section_data *arm_data;
   arm_data = get_arm_elf_section_data (sec);
-  return arm_data->additional_reloc_count;
+
+  return arm_data == NULL ? 0 : arm_data->additional_reloc_count;
+}
+
+static unsigned int
+elf32_arm_count_output_relocs (struct bfd_link_info *  info,
+                              asection *              o,
+                              bfd_boolean             rela)
+{
+  struct bfd_elf_section_data *esdo;
+  struct bfd_link_order *p;
+  bfd_size_type count;
+
+  esdo = elf_section_data (o->output_section);
+  if (esdo->this_hdr.sh_type != SHT_ARM_EXIDX)
+    return _bfd_elf_default_count_output_relocs (info, o, rela);
+
+  /* PR 20595: Skip relocations for deleted exidx entries.  */
+  count = 0;
+  for (p = o->map_head.link_order; p != NULL; p = p->next)
+    {
+      struct _arm_elf_section_data *arm_data;
+      struct bfd_elf_section_data *esd;
+      arm_unwind_table_edit *edit_list;
+      Elf_Internal_Rela *relocs;
+      asection *sec;
+      bfd_size_type num_rel;
+      bfd_size_type num_rela;
+      unsigned int i;
+
+      if (p->type == bfd_section_reloc_link_order
+         || p->type == bfd_symbol_reloc_link_order)
+       {
+         count++;
+         continue;
+       }
+
+      sec = p->u.indirect.section;
+      arm_data = get_arm_elf_section_data (sec);
+      esd = &arm_data->elf;
+
+      if (arm_data->additional_reloc_count)
+       count += arm_data->additional_reloc_count;
+
+      edit_list = arm_data->u.exidx.unwind_edit_list;
+      if (!edit_list)
+       {
+         count += sec->reloc_count;
+         continue;
+       }
+
+      relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
+                                         info->keep_memory);
+      num_rel = esd->rel.hdr ? NUM_SHDR_ENTRIES (esd->rel.hdr) : 0;
+      num_rela = esd->rela.hdr ? NUM_SHDR_ENTRIES (esd->rela.hdr) : 0;
+      if (rela)
+       relocs += num_rel;
+
+      for (i = 0; i < (rela ? num_rela : num_rel); i++)
+       {
+         arm_unwind_table_edit *edit_node;
+         unsigned int index;
+
+         index = (relocs[i].r_offset - sec->vma) / 8;
+
+         for (edit_node = edit_list;
+              edit_node->next && edit_node->next->index > index;
+              edit_node++);
+
+         if (edit_node->type != DELETE_EXIDX_ENTRY
+             || edit_node->index != index)
+           count++;
+       }
+    }
+
+  return count;
 }
 
 /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
@@ -18382,6 +19221,139 @@ elf32_arm_backend_symbol_processing (bfd *abfd, asymbol *sym)
     sym->flags |= BSF_KEEP;
 }
 
+static bfd_boolean
+emit_relocs (bfd *                          output_bfd,
+            asection *                     input_section,
+            Elf_Internal_Shdr *            input_rel_hdr,
+            Elf_Internal_Rela *            internal_relocs,
+            struct elf_link_hash_entry **  rel_hash,
+            bfd_boolean (*                 fallback) (bfd *, asection *,
+                                                      Elf_Internal_Shdr *,
+                                                      Elf_Internal_Rela *,
+                                                      struct elf_link_hash_entry **))
+{
+  _arm_elf_section_data *arm_data;
+  struct bfd_elf_section_reloc_data *output_reldata;
+  Elf_Internal_Shdr *output_rel_hdr;
+  Elf_Internal_Rela *irela;
+  Elf_Internal_Rela *irelaend;
+  asection *output_section;
+  const struct elf_backend_data *bed;
+  void (*swap_out) (bfd *, const Elf_Internal_Rela *, bfd_byte *);
+  struct bfd_elf_section_data *esdo;
+  arm_unwind_table_edit *edit_list, *edit_tail;
+  bfd_byte *erel;
+  bfd_vma offset;
+
+  arm_data = get_arm_elf_section_data (input_section);
+
+  if (!arm_data || arm_data->elf.this_hdr.sh_type != SHT_ARM_EXIDX)
+    goto fallback_label;
+
+  edit_list = arm_data->u.exidx.unwind_edit_list;
+  edit_tail = arm_data->u.exidx.unwind_edit_tail;
+
+  if (!edit_list)
+    goto fallback_label;
+
+  output_section = input_section->output_section;
+  offset = output_section->vma + input_section->output_offset;
+
+  bed = get_elf_backend_data (output_bfd);
+  esdo = elf_section_data (output_section);
+  if (esdo->rel.hdr && esdo->rel.hdr->sh_entsize == input_rel_hdr->sh_entsize)
+    {
+      output_reldata = &esdo->rel;
+      swap_out = bed->s->swap_reloc_out;
+    }
+  else if (esdo->rela.hdr
+          && esdo->rela.hdr->sh_entsize == input_rel_hdr->sh_entsize)
+    {
+      output_reldata = &esdo->rela;
+      swap_out = bed->s->swap_reloca_out;
+    }
+  else
+    {
+      (*_bfd_error_handler)
+       (_("%B: relocation size mismatch in %B section %A"),
+        output_bfd, input_section->owner, input_section);
+        bfd_set_error (bfd_error_wrong_format);
+      return FALSE;
+    }
+
+  output_rel_hdr = output_reldata->hdr;
+  erel = output_rel_hdr->contents;
+  erel += output_reldata->count * input_rel_hdr->sh_entsize;
+
+  irela = internal_relocs;
+  irelaend = irela + (NUM_SHDR_ENTRIES (input_rel_hdr)
+                     * bed->s->int_rels_per_ext_rel);
+  while (irela < irelaend)
+    {
+      arm_unwind_table_edit *edit_node, *edit_next;
+      Elf_Internal_Rela rel;
+      bfd_vma bias;
+      bfd_vma index;
+
+      index = (irela->r_offset - offset) / 8;
+
+      bias = 0;
+      edit_node = edit_list;
+      for (edit_next = edit_list;
+          edit_next && edit_next->index <= index;
+          edit_next = edit_node->next)
+       {
+         bias++;
+         edit_node = edit_next;
+       }
+
+      if (edit_node->type != DELETE_EXIDX_ENTRY || edit_node->index != index)
+       {
+         rel.r_offset = irela->r_offset - bias * 8;
+         rel.r_info = irela->r_info;
+         rel.r_addend = irela->r_addend;
+
+         (*swap_out) (output_bfd, &rel, erel);
+         erel += output_rel_hdr->sh_entsize;
+         output_reldata->count++;
+       }
+
+      irela += bed->s->int_rels_per_ext_rel;
+    }
+
+  if (edit_tail->type == INSERT_EXIDX_CANTUNWIND_AT_END)
+    {
+      /* New relocation entity.  */
+      asection *text_sec = edit_tail->linked_section;
+      asection *text_out = text_sec->output_section;
+      bfd_vma exidx_offset = offset + input_section->size - 8;
+      Elf_Internal_Rela rel;
+
+      rel.r_addend = 0;
+      rel.r_offset = exidx_offset;
+      rel.r_info = ELF32_R_INFO (text_out->target_index, R_ARM_PREL31);
+      (*swap_out) (output_bfd, &rel, erel);
+      output_reldata->count++;
+    }
+
+  return TRUE;
+
+fallback_label:
+  return fallback (output_bfd, input_section, input_rel_hdr,
+                  internal_relocs, rel_hash);
+}
+
+static bfd_boolean
+elf32_arm_emit_relocs (bfd *                          output_bfd,
+                      asection *                     input_section,
+                      Elf_Internal_Shdr *            input_rel_hdr,
+                      Elf_Internal_Rela *            internal_relocs,
+                      struct elf_link_hash_entry **  rel_hash)
+{
+  return emit_relocs (output_bfd, input_section, input_rel_hdr, internal_relocs,
+                     rel_hash, _bfd_elf_link_output_relocs);
+}
+
 #undef  elf_backend_copy_special_section_fields
 #define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
 
@@ -18412,6 +19384,7 @@ elf32_arm_backend_symbol_processing (bfd *abfd, asymbol *sym)
 #define bfd_elf32_bfd_final_link               elf32_arm_final_link
 #define bfd_elf32_get_synthetic_symtab  elf32_arm_get_synthetic_symtab
 
+#define elf_backend_emit_relocs                        elf32_arm_emit_relocs
 #define elf_backend_get_symbol_type             elf32_arm_get_symbol_type
 #define elf_backend_gc_mark_hook                elf32_arm_gc_mark_hook
 #define elf_backend_gc_mark_extra_sections     elf32_arm_gc_mark_extra_sections
@@ -18437,9 +19410,11 @@ elf32_arm_backend_symbol_processing (bfd *abfd, asymbol *sym)
 #define elf_backend_modify_segment_map         elf32_arm_modify_segment_map
 #define elf_backend_additional_program_headers  elf32_arm_additional_program_headers
 #define elf_backend_output_arch_local_syms      elf32_arm_output_arch_local_syms
+#define elf_backend_filter_implib_symbols      elf32_arm_filter_implib_symbols
 #define elf_backend_begin_write_processing      elf32_arm_begin_write_processing
 #define elf_backend_add_symbol_hook            elf32_arm_add_symbol_hook
 #define elf_backend_count_additional_relocs    elf32_arm_count_additional_relocs
+#define elf_backend_count_output_relocs                elf32_arm_count_output_relocs
 #define elf_backend_symbol_processing          elf32_arm_backend_symbol_processing
 
 #define elf_backend_can_refcount       1
@@ -18465,9 +19440,9 @@ elf32_arm_backend_symbol_processing (bfd *abfd, asymbol *sym)
 #define elf_backend_obj_attrs_order            elf32_arm_obj_attrs_order
 #define elf_backend_obj_attrs_handle_unknown   elf32_arm_obj_attrs_handle_unknown
 
-#undef elf_backend_section_flags
+#undef  elf_backend_section_flags
 #define elf_backend_section_flags              elf32_arm_section_flags
-#undef elf_backend_lookup_section_flags_hook
+#undef  elf_backend_lookup_section_flags_hook
 #define elf_backend_lookup_section_flags_hook   elf32_arm_lookup_section_flags
 
 #include "elf32-target.h"
@@ -18606,6 +19581,17 @@ elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
   elf_vxworks_final_write_processing (abfd, linker);
 }
 
+static bfd_boolean
+elf32_arm_vxworks_emit_relocs (bfd *                          output_bfd,
+                              asection *                     input_section,
+                              Elf_Internal_Shdr *            input_rel_hdr,
+                              Elf_Internal_Rela *            internal_relocs,
+                              struct elf_link_hash_entry **  rel_hash)
+{
+  return emit_relocs (output_bfd, input_section, input_rel_hdr, internal_relocs,
+                     rel_hash, elf_vxworks_emit_relocs);
+}
+
 #undef  elf32_bed
 #define elf32_bed elf32_arm_vxworks_bed
 
@@ -18614,7 +19600,7 @@ elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
 #undef  elf_backend_final_write_processing
 #define elf_backend_final_write_processing     elf32_arm_vxworks_final_write_processing
 #undef  elf_backend_emit_relocs
-#define elf_backend_emit_relocs                        elf_vxworks_emit_relocs
+#define elf_backend_emit_relocs                        elf32_arm_vxworks_emit_relocs
 
 #undef  elf_backend_may_use_rel_p
 #define elf_backend_may_use_rel_p      0
@@ -18977,7 +19963,8 @@ elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
 #define ELF_DYNAMIC_SEC_FLAGS \
   (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
 
-#undef elf_backend_emit_relocs
+#undef  elf_backend_emit_relocs
+#define elf_backend_emit_relocs                        elf32_arm_emit_relocs
 
 #undef  bfd_elf32_bfd_link_hash_table_create
 #define bfd_elf32_bfd_link_hash_table_create   elf32_arm_symbian_link_hash_table_create
This page took 0.045485 seconds and 4 git commands to generate.