+/* Comparison function for sorting/searching relocations relating to Cortex-A8
+ erratum fix. */
+
+static int
+a8_reloc_compare (const void *a, const void *b)
+{
+ const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
+ const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
+
+ if (ra->from < rb->from)
+ return -1;
+ else if (ra->from > rb->from)
+ return 1;
+ else
+ return 0;
+}
+
+static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
+ const char *, char **);
+
+/* Helper function to scan code for sequences which might trigger the Cortex-A8
+ branch/TLB erratum. Fill in the table described by A8_FIXES_P,
+ NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
+ otherwise. */
+
+static bfd_boolean
+cortex_a8_erratum_scan (bfd *input_bfd,
+ struct bfd_link_info *info,
+ struct a8_erratum_fix **a8_fixes_p,
+ unsigned int *num_a8_fixes_p,
+ unsigned int *a8_fix_table_size_p,
+ struct a8_erratum_reloc *a8_relocs,
+ unsigned int num_a8_relocs,
+ unsigned prev_num_a8_fixes,
+ bfd_boolean *stub_changed_p)
+{
+ asection *section;
+ struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
+ struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
+ unsigned int num_a8_fixes = *num_a8_fixes_p;
+ unsigned int a8_fix_table_size = *a8_fix_table_size_p;
+
+ for (section = input_bfd->sections;
+ section != NULL;
+ section = section->next)
+ {
+ bfd_byte *contents = NULL;
+ struct _arm_elf_section_data *sec_data;
+ unsigned int span;
+ bfd_vma base_vma;
+
+ if (elf_section_type (section) != SHT_PROGBITS
+ || (elf_section_flags (section) & SHF_EXECINSTR) == 0
+ || (section->flags & SEC_EXCLUDE) != 0
+ || (section->sec_info_type == ELF_INFO_TYPE_JUST_SYMS)
+ || (section->output_section == bfd_abs_section_ptr))
+ continue;
+
+ base_vma = section->output_section->vma + section->output_offset;
+
+ if (elf_section_data (section)->this_hdr.contents != NULL)
+ contents = elf_section_data (section)->this_hdr.contents;
+ else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
+ return TRUE;
+
+ sec_data = elf32_arm_section_data (section);
+
+ for (span = 0; span < sec_data->mapcount; span++)
+ {
+ unsigned int span_start = sec_data->map[span].vma;
+ unsigned int span_end = (span == sec_data->mapcount - 1)
+ ? section->size : sec_data->map[span + 1].vma;
+ unsigned int i;
+ char span_type = sec_data->map[span].type;
+ bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
+
+ if (span_type != 't')
+ continue;
+
+ /* Span is entirely within a single 4KB region: skip scanning. */
+ if (((base_vma + span_start) & ~0xfff)
+ == ((base_vma + span_end) & ~0xfff))
+ continue;
+
+ /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
+
+ * The opcode is BLX.W, BL.W, B.W, Bcc.W
+ * The branch target is in the same 4KB region as the
+ first half of the branch.
+ * The instruction before the branch is a 32-bit
+ length non-branch instruction. */
+ for (i = span_start; i < span_end;)
+ {
+ unsigned int insn = bfd_getl16 (&contents[i]);
+ bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
+ bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
+
+ if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
+ insn_32bit = TRUE;
+
+ if (insn_32bit)
+ {
+ /* Load the rest of the insn (in manual-friendly order). */
+ insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
+
+ /* Encoding T4: B<c>.W. */
+ is_b = (insn & 0xf800d000) == 0xf0009000;
+ /* Encoding T1: BL<c>.W. */
+ is_bl = (insn & 0xf800d000) == 0xf000d000;
+ /* Encoding T2: BLX<c>.W. */
+ is_blx = (insn & 0xf800d000) == 0xf000c000;
+ /* Encoding T3: B<c>.W (not permitted in IT block). */
+ is_bcc = (insn & 0xf800d000) == 0xf0008000
+ && (insn & 0x07f00000) != 0x03800000;
+ }
+
+ is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
+
+ if (((base_vma + i) & 0xfff) == 0xffe
+ && insn_32bit
+ && is_32bit_branch
+ && last_was_32bit
+ && ! last_was_branch)
+ {
+ bfd_signed_vma offset;
+ bfd_boolean force_target_arm = FALSE;
+ bfd_boolean force_target_thumb = FALSE;
+ bfd_vma target;
+ enum elf32_arm_stub_type stub_type = arm_stub_none;
+ struct a8_erratum_reloc key, *found;
+
+ key.from = base_vma + i;
+ found = (struct a8_erratum_reloc *)
+ bsearch (&key, a8_relocs, num_a8_relocs,
+ sizeof (struct a8_erratum_reloc),
+ &a8_reloc_compare);
+
+ if (found)
+ {
+ char *error_message = NULL;
+ struct elf_link_hash_entry *entry;
+
+ /* We don't care about the error returned from this
+ function, only if there is glue or not. */
+ entry = find_thumb_glue (info, found->sym_name,
+ &error_message);
+
+ if (entry)
+ found->non_a8_stub = TRUE;
+
+ if (found->r_type == R_ARM_THM_CALL
+ && found->st_type != STT_ARM_TFUNC)
+ force_target_arm = TRUE;
+ else if (found->r_type == R_ARM_THM_CALL
+ && found->st_type == STT_ARM_TFUNC)
+ force_target_thumb = TRUE;
+ }
+
+ /* Check if we have an offending branch instruction. */
+
+ if (found && found->non_a8_stub)
+ /* We've already made a stub for this instruction, e.g.
+ it's a long branch or a Thumb->ARM stub. Assume that
+ stub will suffice to work around the A8 erratum (see
+ setting of always_after_branch above). */
+ ;
+ else if (is_bcc)
+ {
+ offset = (insn & 0x7ff) << 1;
+ offset |= (insn & 0x3f0000) >> 4;
+ offset |= (insn & 0x2000) ? 0x40000 : 0;
+ offset |= (insn & 0x800) ? 0x80000 : 0;
+ offset |= (insn & 0x4000000) ? 0x100000 : 0;
+ if (offset & 0x100000)
+ offset |= ~ ((bfd_signed_vma) 0xfffff);
+ stub_type = arm_stub_a8_veneer_b_cond;
+ }
+ else if (is_b || is_bl || is_blx)
+ {
+ int s = (insn & 0x4000000) != 0;
+ int j1 = (insn & 0x2000) != 0;
+ int j2 = (insn & 0x800) != 0;
+ int i1 = !(j1 ^ s);
+ int i2 = !(j2 ^ s);
+
+ offset = (insn & 0x7ff) << 1;
+ offset |= (insn & 0x3ff0000) >> 4;
+ offset |= i2 << 22;
+ offset |= i1 << 23;
+ offset |= s << 24;
+ if (offset & 0x1000000)
+ offset |= ~ ((bfd_signed_vma) 0xffffff);
+
+ if (is_blx)
+ offset &= ~ ((bfd_signed_vma) 3);
+
+ stub_type = is_blx ? arm_stub_a8_veneer_blx :
+ is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
+ }
+
+ if (stub_type != arm_stub_none)
+ {
+ bfd_vma pc_for_insn = base_vma + i + 4;
+
+ /* The original instruction is a BL, but the target is
+ an ARM instruction. If we were not making a stub,
+ the BL would have been converted to a BLX. Use the
+ BLX stub instead in that case. */
+ if (htab->use_blx && force_target_arm
+ && stub_type == arm_stub_a8_veneer_bl)
+ {
+ stub_type = arm_stub_a8_veneer_blx;
+ is_blx = TRUE;
+ is_bl = FALSE;
+ }
+ /* Conversely, if the original instruction was
+ BLX but the target is Thumb mode, use the BL
+ stub. */
+ else if (force_target_thumb
+ && stub_type == arm_stub_a8_veneer_blx)
+ {
+ stub_type = arm_stub_a8_veneer_bl;
+ is_blx = FALSE;
+ is_bl = TRUE;
+ }
+
+ if (is_blx)
+ pc_for_insn &= ~ ((bfd_vma) 3);
+
+ /* If we found a relocation, use the proper destination,
+ not the offset in the (unrelocated) instruction.
+ Note this is always done if we switched the stub type
+ above. */
+ if (found)
+ offset =
+ (bfd_signed_vma) (found->destination - pc_for_insn);
+
+ target = pc_for_insn + offset;
+
+ /* The BLX stub is ARM-mode code. Adjust the offset to
+ take the different PC value (+8 instead of +4) into
+ account. */
+ if (stub_type == arm_stub_a8_veneer_blx)
+ offset += 4;
+
+ if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
+ {
+ char *stub_name = NULL;
+
+ if (num_a8_fixes == a8_fix_table_size)
+ {
+ a8_fix_table_size *= 2;
+ a8_fixes = (struct a8_erratum_fix *)
+ bfd_realloc (a8_fixes,
+ sizeof (struct a8_erratum_fix)
+ * a8_fix_table_size);
+ }
+
+ if (num_a8_fixes < prev_num_a8_fixes)
+ {
+ /* If we're doing a subsequent scan,
+ check if we've found the same fix as
+ before, and try and reuse the stub
+ name. */
+ stub_name = a8_fixes[num_a8_fixes].stub_name;
+ if ((a8_fixes[num_a8_fixes].section != section)
+ || (a8_fixes[num_a8_fixes].offset != i))
+ {
+ free (stub_name);
+ stub_name = NULL;
+ *stub_changed_p = TRUE;
+ }
+ }
+
+ if (!stub_name)
+ {
+ stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
+ if (stub_name != NULL)
+ sprintf (stub_name, "%x:%x", section->id, i);
+ }
+
+ a8_fixes[num_a8_fixes].input_bfd = input_bfd;
+ a8_fixes[num_a8_fixes].section = section;
+ a8_fixes[num_a8_fixes].offset = i;
+ a8_fixes[num_a8_fixes].addend = offset;
+ a8_fixes[num_a8_fixes].orig_insn = insn;
+ a8_fixes[num_a8_fixes].stub_name = stub_name;
+ a8_fixes[num_a8_fixes].stub_type = stub_type;
+
+ num_a8_fixes++;
+ }
+ }
+ }
+
+ i += insn_32bit ? 4 : 2;
+ last_was_32bit = insn_32bit;
+ last_was_branch = is_32bit_branch;
+ }
+ }
+
+ if (elf_section_data (section)->this_hdr.contents == NULL)
+ free (contents);
+ }
+
+ *a8_fixes_p = a8_fixes;
+ *num_a8_fixes_p = num_a8_fixes;
+ *a8_fix_table_size_p = a8_fix_table_size;
+
+ return FALSE;
+}
+
+/* Determine and set the size of the stub section for a final link.
+
+ The basic idea here is to examine all the relocations looking for
+ PC-relative calls to a target that is unreachable with a "bl"
+ instruction. */
+
+bfd_boolean
+elf32_arm_size_stubs (bfd *output_bfd,
+ bfd *stub_bfd,
+ struct bfd_link_info *info,
+ bfd_signed_vma group_size,
+ asection * (*add_stub_section) (const char *, asection *),
+ void (*layout_sections_again) (void))
+{
+ bfd_size_type stub_group_size;
+ bfd_boolean stubs_always_after_branch;
+ struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
+ struct a8_erratum_fix *a8_fixes = NULL;
+ unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
+ struct a8_erratum_reloc *a8_relocs = NULL;
+ unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
+
+ if (htab->fix_cortex_a8)
+ {
+ a8_fixes = (struct a8_erratum_fix *)
+ bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
+ a8_relocs = (struct a8_erratum_reloc *)
+ bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
+ }
+
+ /* Propagate mach to stub bfd, because it may not have been
+ finalized when we created stub_bfd. */
+ bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
+ bfd_get_mach (output_bfd));
+
+ /* Stash our params away. */
+ htab->stub_bfd = stub_bfd;
+ htab->add_stub_section = add_stub_section;
+ htab->layout_sections_again = layout_sections_again;
+ stubs_always_after_branch = group_size < 0;
+
+ /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
+ as the first half of a 32-bit branch straddling two 4K pages. This is a
+ crude way of enforcing that. */
+ if (htab->fix_cortex_a8)
+ stubs_always_after_branch = 1;
+
+ if (group_size < 0)
+ stub_group_size = -group_size;
+ else
+ stub_group_size = group_size;
+
+ if (stub_group_size == 1)
+ {
+ /* Default values. */
+ /* Thumb branch range is +-4MB has to be used as the default
+ maximum size (a given section can contain both ARM and Thumb
+ code, so the worst case has to be taken into account).
+
+ This value is 24K less than that, which allows for 2025
+ 12-byte stubs. If we exceed that, then we will fail to link.
+ The user will have to relink with an explicit group size
+ option. */
+ stub_group_size = 4170000;
+ }
+
+ group_sections (htab, stub_group_size, stubs_always_after_branch);
+
+ /* If we're applying the cortex A8 fix, we need to determine the
+ program header size now, because we cannot change it later --
+ that could alter section placements. Notice the A8 erratum fix
+ ends up requiring the section addresses to remain unchanged
+ modulo the page size. That's something we cannot represent
+ inside BFD, and we don't want to force the section alignment to
+ be the page size. */
+ if (htab->fix_cortex_a8)
+ (*htab->layout_sections_again) ();
+
+ while (1)
+ {
+ bfd *input_bfd;
+ unsigned int bfd_indx;
+ asection *stub_sec;
+ bfd_boolean stub_changed = FALSE;
+ unsigned prev_num_a8_fixes = num_a8_fixes;
+
+ num_a8_fixes = 0;
+ for (input_bfd = info->input_bfds, bfd_indx = 0;
+ input_bfd != NULL;
+ input_bfd = input_bfd->link_next, bfd_indx++)
+ {
+ Elf_Internal_Shdr *symtab_hdr;
+ asection *section;
+ Elf_Internal_Sym *local_syms = NULL;
+
+ num_a8_relocs = 0;
+
+ /* We'll need the symbol table in a second. */
+ symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
+ if (symtab_hdr->sh_info == 0)
+ continue;
+
+ /* Walk over each section attached to the input bfd. */
+ for (section = input_bfd->sections;
+ section != NULL;
+ section = section->next)
+ {
+ Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
+
+ /* If there aren't any relocs, then there's nothing more
+ to do. */
+ if ((section->flags & SEC_RELOC) == 0
+ || section->reloc_count == 0
+ || (section->flags & SEC_CODE) == 0)
+ continue;
+
+ /* If this section is a link-once section that will be
+ discarded, then don't create any stubs. */
+ if (section->output_section == NULL
+ || section->output_section->owner != output_bfd)
+ continue;
+
+ /* Get the relocs. */
+ internal_relocs
+ = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
+ NULL, info->keep_memory);
+ if (internal_relocs == NULL)
+ goto error_ret_free_local;
+
+ /* Now examine each relocation. */
+ irela = internal_relocs;
+ irelaend = irela + section->reloc_count;
+ for (; irela < irelaend; irela++)
+ {
+ unsigned int r_type, r_indx;
+ enum elf32_arm_stub_type stub_type;
+ struct elf32_arm_stub_hash_entry *stub_entry;
+ asection *sym_sec;
+ bfd_vma sym_value;
+ bfd_vma destination;
+ struct elf32_arm_link_hash_entry *hash;
+ const char *sym_name;
+ char *stub_name;
+ const asection *id_sec;
+ unsigned char st_type;
+ bfd_boolean created_stub = FALSE;
+
+ r_type = ELF32_R_TYPE (irela->r_info);
+ r_indx = ELF32_R_SYM (irela->r_info);
+
+ if (r_type >= (unsigned int) R_ARM_max)
+ {
+ bfd_set_error (bfd_error_bad_value);
+ error_ret_free_internal:
+ if (elf_section_data (section)->relocs == NULL)
+ free (internal_relocs);
+ goto error_ret_free_local;
+ }
+
+ /* Only look for stubs on branch instructions. */
+ if ((r_type != (unsigned int) R_ARM_CALL)
+ && (r_type != (unsigned int) R_ARM_THM_CALL)
+ && (r_type != (unsigned int) R_ARM_JUMP24)
+ && (r_type != (unsigned int) R_ARM_THM_JUMP19)
+ && (r_type != (unsigned int) R_ARM_THM_XPC22)
+ && (r_type != (unsigned int) R_ARM_THM_JUMP24)
+ && (r_type != (unsigned int) R_ARM_PLT32))
+ continue;
+
+ /* Now determine the call target, its name, value,
+ section. */
+ sym_sec = NULL;
+ sym_value = 0;
+ destination = 0;
+ hash = NULL;
+ sym_name = NULL;
+ if (r_indx < symtab_hdr->sh_info)
+ {
+ /* It's a local symbol. */
+ Elf_Internal_Sym *sym;
+ Elf_Internal_Shdr *hdr;
+
+ if (local_syms == NULL)
+ {
+ local_syms
+ = (Elf_Internal_Sym *) symtab_hdr->contents;
+ if (local_syms == NULL)
+ local_syms
+ = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
+ symtab_hdr->sh_info, 0,
+ NULL, NULL, NULL);
+ if (local_syms == NULL)
+ goto error_ret_free_internal;
+ }
+
+ sym = local_syms + r_indx;
+ hdr = elf_elfsections (input_bfd)[sym->st_shndx];
+ sym_sec = hdr->bfd_section;
+ if (!sym_sec)
+ /* This is an undefined symbol. It can never
+ be resolved. */
+ continue;
+
+ if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
+ sym_value = sym->st_value;
+ destination = (sym_value + irela->r_addend
+ + sym_sec->output_offset
+ + sym_sec->output_section->vma);
+ st_type = ELF_ST_TYPE (sym->st_info);
+ sym_name
+ = bfd_elf_string_from_elf_section (input_bfd,
+ symtab_hdr->sh_link,
+ sym->st_name);
+ }
+ else
+ {
+ /* It's an external symbol. */
+ int e_indx;
+
+ e_indx = r_indx - symtab_hdr->sh_info;
+ hash = ((struct elf32_arm_link_hash_entry *)
+ elf_sym_hashes (input_bfd)[e_indx]);
+
+ while (hash->root.root.type == bfd_link_hash_indirect
+ || hash->root.root.type == bfd_link_hash_warning)
+ hash = ((struct elf32_arm_link_hash_entry *)
+ hash->root.root.u.i.link);
+
+ if (hash->root.root.type == bfd_link_hash_defined
+ || hash->root.root.type == bfd_link_hash_defweak)
+ {
+ sym_sec = hash->root.root.u.def.section;
+ sym_value = hash->root.root.u.def.value;
+
+ struct elf32_arm_link_hash_table *globals =
+ elf32_arm_hash_table (info);
+
+ /* For a destination in a shared library,
+ use the PLT stub as target address to
+ decide whether a branch stub is
+ needed. */
+ if (globals->splt != NULL && hash != NULL
+ && hash->root.plt.offset != (bfd_vma) -1)
+ {
+ sym_sec = globals->splt;
+ sym_value = hash->root.plt.offset;
+ if (sym_sec->output_section != NULL)
+ destination = (sym_value
+ + sym_sec->output_offset
+ + sym_sec->output_section->vma);
+ }
+ else if (sym_sec->output_section != NULL)
+ destination = (sym_value + irela->r_addend
+ + sym_sec->output_offset
+ + sym_sec->output_section->vma);
+ }
+ else if ((hash->root.root.type == bfd_link_hash_undefined)
+ || (hash->root.root.type == bfd_link_hash_undefweak))
+ {
+ /* For a shared library, use the PLT stub as
+ target address to decide whether a long
+ branch stub is needed.
+ For absolute code, they cannot be handled. */
+ struct elf32_arm_link_hash_table *globals =
+ elf32_arm_hash_table (info);
+
+ if (globals->splt != NULL && hash != NULL
+ && hash->root.plt.offset != (bfd_vma) -1)
+ {
+ sym_sec = globals->splt;
+ sym_value = hash->root.plt.offset;
+ if (sym_sec->output_section != NULL)
+ destination = (sym_value
+ + sym_sec->output_offset
+ + sym_sec->output_section->vma);
+ }
+ else
+ continue;
+ }
+ else
+ {
+ bfd_set_error (bfd_error_bad_value);
+ goto error_ret_free_internal;
+ }
+ st_type = ELF_ST_TYPE (hash->root.type);
+ sym_name = hash->root.root.root.string;
+ }
+
+ do
+ {
+ /* Determine what (if any) linker stub is needed. */
+ stub_type = arm_type_of_stub (info, section, irela,
+ st_type, hash,
+ destination, sym_sec,
+ input_bfd, sym_name);
+ if (stub_type == arm_stub_none)
+ break;
+
+ /* Support for grouping stub sections. */
+ id_sec = htab->stub_group[section->id].link_sec;
+
+ /* Get the name of this stub. */
+ stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash,
+ irela);
+ if (!stub_name)
+ goto error_ret_free_internal;
+
+ /* We've either created a stub for this reloc already,
+ or we are about to. */
+ created_stub = TRUE;
+
+ stub_entry = arm_stub_hash_lookup
+ (&htab->stub_hash_table, stub_name,
+ FALSE, FALSE);
+ if (stub_entry != NULL)
+ {
+ /* The proper stub has already been created. */
+ free (stub_name);
+ stub_entry->target_value = sym_value;
+ break;
+ }
+
+ stub_entry = elf32_arm_add_stub (stub_name, section,
+ htab);
+ if (stub_entry == NULL)
+ {
+ free (stub_name);
+ goto error_ret_free_internal;
+ }
+
+ stub_entry->target_value = sym_value;
+ stub_entry->target_section = sym_sec;
+ stub_entry->stub_type = stub_type;
+ stub_entry->h = hash;
+ stub_entry->st_type = st_type;
+
+ if (sym_name == NULL)
+ sym_name = "unnamed";
+ stub_entry->output_name = (char *)
+ bfd_alloc (htab->stub_bfd,
+ sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
+ + strlen (sym_name));
+ if (stub_entry->output_name == NULL)
+ {
+ free (stub_name);
+ goto error_ret_free_internal;
+ }
+
+ /* For historical reasons, use the existing names for
+ ARM-to-Thumb and Thumb-to-ARM stubs. */
+ if ( ((r_type == (unsigned int) R_ARM_THM_CALL)
+ || (r_type == (unsigned int) R_ARM_THM_JUMP24))
+ && st_type != STT_ARM_TFUNC)
+ sprintf (stub_entry->output_name,
+ THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
+ else if ( ((r_type == (unsigned int) R_ARM_CALL)
+ || (r_type == (unsigned int) R_ARM_JUMP24))
+ && st_type == STT_ARM_TFUNC)
+ sprintf (stub_entry->output_name,
+ ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
+ else
+ sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
+ sym_name);
+
+ stub_changed = TRUE;
+ }
+ while (0);
+
+ /* Look for relocations which might trigger Cortex-A8
+ erratum. */
+ if (htab->fix_cortex_a8
+ && (r_type == (unsigned int) R_ARM_THM_JUMP24
+ || r_type == (unsigned int) R_ARM_THM_JUMP19
+ || r_type == (unsigned int) R_ARM_THM_CALL
+ || r_type == (unsigned int) R_ARM_THM_XPC22))
+ {
+ bfd_vma from = section->output_section->vma
+ + section->output_offset
+ + irela->r_offset;
+
+ if ((from & 0xfff) == 0xffe)
+ {
+ /* Found a candidate. Note we haven't checked the
+ destination is within 4K here: if we do so (and
+ don't create an entry in a8_relocs) we can't tell
+ that a branch should have been relocated when
+ scanning later. */
+ if (num_a8_relocs == a8_reloc_table_size)
+ {
+ a8_reloc_table_size *= 2;
+ a8_relocs = (struct a8_erratum_reloc *)
+ bfd_realloc (a8_relocs,
+ sizeof (struct a8_erratum_reloc)
+ * a8_reloc_table_size);
+ }
+
+ a8_relocs[num_a8_relocs].from = from;
+ a8_relocs[num_a8_relocs].destination = destination;
+ a8_relocs[num_a8_relocs].r_type = r_type;
+ a8_relocs[num_a8_relocs].st_type = st_type;
+ a8_relocs[num_a8_relocs].sym_name = sym_name;
+ a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
+
+ num_a8_relocs++;
+ }
+ }
+ }
+
+ /* We're done with the internal relocs, free them. */
+ if (elf_section_data (section)->relocs == NULL)
+ free (internal_relocs);
+ }
+
+ if (htab->fix_cortex_a8)
+ {
+ /* Sort relocs which might apply to Cortex-A8 erratum. */
+ qsort (a8_relocs, num_a8_relocs,
+ sizeof (struct a8_erratum_reloc),
+ &a8_reloc_compare);
+
+ /* Scan for branches which might trigger Cortex-A8 erratum. */
+ if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
+ &num_a8_fixes, &a8_fix_table_size,
+ a8_relocs, num_a8_relocs,
+ prev_num_a8_fixes, &stub_changed)
+ != 0)
+ goto error_ret_free_local;
+ }
+ }
+
+ if (prev_num_a8_fixes != num_a8_fixes)
+ stub_changed = TRUE;
+
+ if (!stub_changed)
+ break;
+
+ /* OK, we've added some stubs. Find out the new size of the
+ stub sections. */
+ for (stub_sec = htab->stub_bfd->sections;
+ stub_sec != NULL;
+ stub_sec = stub_sec->next)
+ {
+ /* Ignore non-stub sections. */
+ if (!strstr (stub_sec->name, STUB_SUFFIX))
+ continue;
+
+ stub_sec->size = 0;
+ }
+
+ bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
+
+ /* Add Cortex-A8 erratum veneers to stub section sizes too. */
+ if (htab->fix_cortex_a8)
+ for (i = 0; i < num_a8_fixes; i++)
+ {
+ stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
+ a8_fixes[i].section, htab);
+
+ if (stub_sec == NULL)
+ goto error_ret_free_local;
+
+ stub_sec->size
+ += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
+ NULL);
+ }
+
+
+ /* Ask the linker to do its stuff. */
+ (*htab->layout_sections_again) ();
+ }
+
+ /* Add stubs for Cortex-A8 erratum fixes now. */
+ if (htab->fix_cortex_a8)
+ {
+ for (i = 0; i < num_a8_fixes; i++)
+ {
+ struct elf32_arm_stub_hash_entry *stub_entry;
+ char *stub_name = a8_fixes[i].stub_name;
+ asection *section = a8_fixes[i].section;
+ unsigned int section_id = a8_fixes[i].section->id;
+ asection *link_sec = htab->stub_group[section_id].link_sec;
+ asection *stub_sec = htab->stub_group[section_id].stub_sec;
+ const insn_sequence *template_sequence;
+ int template_size, size = 0;
+
+ stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
+ TRUE, FALSE);
+ if (stub_entry == NULL)
+ {
+ (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
+ section->owner,
+ stub_name);
+ return FALSE;
+ }
+
+ stub_entry->stub_sec = stub_sec;
+ stub_entry->stub_offset = 0;
+ stub_entry->id_sec = link_sec;
+ stub_entry->stub_type = a8_fixes[i].stub_type;
+ stub_entry->target_section = a8_fixes[i].section;
+ stub_entry->target_value = a8_fixes[i].offset;
+ stub_entry->target_addend = a8_fixes[i].addend;
+ stub_entry->orig_insn = a8_fixes[i].orig_insn;
+ stub_entry->st_type = STT_ARM_TFUNC;
+
+ size = find_stub_size_and_template (a8_fixes[i].stub_type,
+ &template_sequence,
+ &template_size);
+
+ stub_entry->stub_size = size;
+ stub_entry->stub_template = template_sequence;
+ stub_entry->stub_template_size = template_size;
+ }
+
+ /* Stash the Cortex-A8 erratum fix array for use later in
+ elf32_arm_write_section(). */
+ htab->a8_erratum_fixes = a8_fixes;
+ htab->num_a8_erratum_fixes = num_a8_fixes;
+ }
+ else
+ {
+ htab->a8_erratum_fixes = NULL;
+ htab->num_a8_erratum_fixes = 0;
+ }
+ return TRUE;
+
+ error_ret_free_local:
+ return FALSE;
+}
+
+/* Build all the stubs associated with the current output file. The
+ stubs are kept in a hash table attached to the main linker hash
+ table. We also set up the .plt entries for statically linked PIC
+ functions here. This function is called via arm_elf_finish in the
+ linker. */
+
+bfd_boolean
+elf32_arm_build_stubs (struct bfd_link_info *info)
+{
+ asection *stub_sec;
+ struct bfd_hash_table *table;
+ struct elf32_arm_link_hash_table *htab;
+
+ htab = elf32_arm_hash_table (info);
+
+ for (stub_sec = htab->stub_bfd->sections;
+ stub_sec != NULL;
+ stub_sec = stub_sec->next)
+ {
+ bfd_size_type size;
+
+ /* Ignore non-stub sections. */
+ if (!strstr (stub_sec->name, STUB_SUFFIX))
+ continue;
+
+ /* Allocate memory to hold the linker stubs. */
+ size = stub_sec->size;
+ stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
+ if (stub_sec->contents == NULL && size != 0)
+ return FALSE;
+ stub_sec->size = 0;
+ }
+
+ /* Build the stubs as directed by the stub hash table. */
+ table = &htab->stub_hash_table;
+ bfd_hash_traverse (table, arm_build_one_stub, info);
+ if (htab->fix_cortex_a8)
+ {
+ /* Place the cortex a8 stubs last. */
+ htab->fix_cortex_a8 = -1;
+ bfd_hash_traverse (table, arm_build_one_stub, info);
+ }
+
+ return TRUE;
+}
+
+/* Locate the Thumb encoded calling stub for NAME. */
+
+static struct elf_link_hash_entry *
+find_thumb_glue (struct bfd_link_info *link_info,
+ const char *name,
+ char **error_message)
+{
+ char *tmp_name;
+ struct elf_link_hash_entry *hash;
+ struct elf32_arm_link_hash_table *hash_table;
+
+ /* We need a pointer to the armelf specific hash table. */
+ hash_table = elf32_arm_hash_table (link_info);
+
+ tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
+ + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
+
+ BFD_ASSERT (tmp_name);
+
+ sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
+
+ hash = elf_link_hash_lookup
+ (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
+
+ if (hash == NULL
+ && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
+ tmp_name, name) == -1)
+ *error_message = (char *) bfd_errmsg (bfd_error_system_call);
+
+ free (tmp_name);
+
+ return hash;
+}
+
+/* Locate the ARM encoded calling stub for NAME. */
+
+static struct elf_link_hash_entry *
+find_arm_glue (struct bfd_link_info *link_info,
+ const char *name,
+ char **error_message)
+{
+ char *tmp_name;
+ struct elf_link_hash_entry *myh;
+ struct elf32_arm_link_hash_table *hash_table;
+
+ /* We need a pointer to the elfarm specific hash table. */
+ hash_table = elf32_arm_hash_table (link_info);
+
+ tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
+ + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
+
+ BFD_ASSERT (tmp_name);
+
+ sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
+
+ myh = elf_link_hash_lookup
+ (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
+
+ if (myh == NULL
+ && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
+ tmp_name, name) == -1)
+ *error_message = (char *) bfd_errmsg (bfd_error_system_call);
+
+ free (tmp_name);
+
+ return myh;
+}
+
+/* ARM->Thumb glue (static images):
+
+ .arm
+ __func_from_arm:
+ ldr r12, __func_addr
+ bx r12
+ __func_addr:
+ .word func @ behave as if you saw a ARM_32 reloc.
+
+ (v5t static images)
+ .arm
+ __func_from_arm:
+ ldr pc, __func_addr
+ __func_addr:
+ .word func @ behave as if you saw a ARM_32 reloc.
+
+ (relocatable images)
+ .arm
+ __func_from_arm:
+ ldr r12, __func_offset
+ add r12, r12, pc
+ bx r12
+ __func_offset:
+ .word func - . */
+
+#define ARM2THUMB_STATIC_GLUE_SIZE 12
+static const insn32 a2t1_ldr_insn = 0xe59fc000;
+static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
+static const insn32 a2t3_func_addr_insn = 0x00000001;
+
+#define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
+static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
+static const insn32 a2t2v5_func_addr_insn = 0x00000001;
+
+#define ARM2THUMB_PIC_GLUE_SIZE 16
+static const insn32 a2t1p_ldr_insn = 0xe59fc004;
+static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
+static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
+
+/* Thumb->ARM: Thumb->(non-interworking aware) ARM
+
+ .thumb .thumb
+ .align 2 .align 2
+ __func_from_thumb: __func_from_thumb:
+ bx pc push {r6, lr}
+ nop ldr r6, __func_addr
+ .arm mov lr, pc
+ b func bx r6
+ .arm
+ ;; back_to_thumb
+ ldmia r13! {r6, lr}
+ bx lr
+ __func_addr:
+ .word func */
+
+#define THUMB2ARM_GLUE_SIZE 8
+static const insn16 t2a1_bx_pc_insn = 0x4778;
+static const insn16 t2a2_noop_insn = 0x46c0;
+static const insn32 t2a3_b_insn = 0xea000000;
+
+#define VFP11_ERRATUM_VENEER_SIZE 8
+
+#define ARM_BX_VENEER_SIZE 12
+static const insn32 armbx1_tst_insn = 0xe3100001;
+static const insn32 armbx2_moveq_insn = 0x01a0f000;
+static const insn32 armbx3_bx_insn = 0xe12fff10;
+
+#ifndef ELFARM_NABI_C_INCLUDED
+static void
+arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
+{
+ asection * s;
+ bfd_byte * contents;
+
+ if (size == 0)
+ {
+ /* Do not include empty glue sections in the output. */
+ if (abfd != NULL)
+ {
+ s = bfd_get_section_by_name (abfd, name);
+ if (s != NULL)
+ s->flags |= SEC_EXCLUDE;
+ }
+ return;
+ }
+
+ BFD_ASSERT (abfd != NULL);
+
+ s = bfd_get_section_by_name (abfd, name);
+ BFD_ASSERT (s != NULL);
+
+ contents = (bfd_byte *) bfd_alloc (abfd, size);
+
+ BFD_ASSERT (s->size == size);
+ s->contents = contents;
+}
+
+bfd_boolean
+bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
+{
+ struct elf32_arm_link_hash_table * globals;
+
+ globals = elf32_arm_hash_table (info);
+ BFD_ASSERT (globals != NULL);
+
+ arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
+ globals->arm_glue_size,
+ ARM2THUMB_GLUE_SECTION_NAME);
+
+ arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
+ globals->thumb_glue_size,
+ THUMB2ARM_GLUE_SECTION_NAME);
+
+ arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
+ globals->vfp11_erratum_glue_size,
+ VFP11_ERRATUM_VENEER_SECTION_NAME);
+
+ arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
+ globals->bx_glue_size,
+ ARM_BX_GLUE_SECTION_NAME);
+
+ return TRUE;
+}
+
+/* Allocate space and symbols for calling a Thumb function from Arm mode.
+ returns the symbol identifying the stub. */
+