X-Git-Url: http://drtracing.org/?a=blobdiff_plain;f=bfd%2Felf32-arm.c;h=2d53304e4e3593e5f4798865e421145557a8f34c;hb=e1ec24c6f31afae5a5175abecdcd9fd4d7893c4b;hp=a47d02c020df4b666669b448dc90a1c351bbc749;hpb=d3626fb08bd9456f3252ed95d31f67f83c833cfd;p=deliverable%2Fbinutils-gdb.git diff --git a/bfd/elf32-arm.c b/bfd/elf32-arm.c index a47d02c020..2d53304e4e 100644 --- a/bfd/elf32-arm.c +++ b/bfd/elf32-arm.c @@ -1,6 +1,6 @@ /* 32-bit ELF support for ARM Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, - 2008 Free Software Foundation, Inc. + 2008, 2009 Free Software Foundation, Inc. This file is part of BFD, the Binary File Descriptor library. @@ -20,6 +20,8 @@ MA 02110-1301, USA. */ #include "sysdep.h" +#include + #include "bfd.h" #include "libiberty.h" #include "libbfd.h" @@ -61,6 +63,11 @@ static struct elf_backend_data elf32_arm_vxworks_bed; +static bfd_boolean elf32_arm_write_section (bfd *output_bfd, + struct bfd_link_info *link_info, + asection *sec, + bfd_byte *contents); + /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g. R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO in that slot. */ @@ -1881,7 +1888,8 @@ typedef unsigned short int insn16; interworkable. */ #define INTERWORK_FLAG(abfd) \ (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \ - || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK)) + || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \ + || ((abfd)->flags & BFD_LINKER_CREATED)) /* The linker script knows the section names for placement. The entry_names are used to do simple name mangling on the stubs. @@ -2018,11 +2026,15 @@ enum stub_insn_type DATA_TYPE }; -#define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0} -#define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0} -#define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0} -#define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)} -#define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)} +#define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0} +/* A bit of a hack. A Thumb conditional branch, in which the proper condition + is inserted in arm_build_one_stub(). */ +#define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1} +#define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0} +#define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)} +#define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0} +#define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)} +#define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)} typedef struct { @@ -2156,25 +2168,79 @@ static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] = DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */ }; +/* Cortex-A8 erratum-workaround stubs. */ + +/* Stub used for conditional branches (which may be beyond +/-1MB away, so we + can't use a conditional branch to reach this stub). */ + +static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] = + { + THUMB16_BCOND_INSN(0xd001), /* b.n true. */ + THUMB32_B_INSN(0xf000b800, -4), /* b.w insn_after_original_branch. */ + THUMB32_B_INSN(0xf000b800, -4) /* true: b.w original_branch_dest. */ + }; + +/* Stub used for b.w and bl.w instructions. */ + +static const insn_sequence elf32_arm_stub_a8_veneer_b[] = + { + THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */ + }; + +static const insn_sequence elf32_arm_stub_a8_veneer_bl[] = + { + THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */ + }; + +/* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w + instruction (which switches to ARM mode) to point to this stub. Jump to the + real destination using an ARM-mode branch. */ + +static const insn_sequence elf32_arm_stub_a8_veneer_blx[] = + { + ARM_REL_INSN(0xea000000, -8) /* b original_branch_dest. */ + }; + /* Section name for stubs is the associated section name plus this string. */ #define STUB_SUFFIX ".stub" -enum elf32_arm_stub_type -{ +/* One entry per long/short branch stub defined above. */ +#define DEF_STUBS \ + DEF_STUB(long_branch_any_any) \ + DEF_STUB(long_branch_v4t_arm_thumb) \ + DEF_STUB(long_branch_thumb_only) \ + DEF_STUB(long_branch_v4t_thumb_thumb) \ + DEF_STUB(long_branch_v4t_thumb_arm) \ + DEF_STUB(short_branch_v4t_thumb_arm) \ + DEF_STUB(long_branch_any_arm_pic) \ + DEF_STUB(long_branch_any_thumb_pic) \ + DEF_STUB(long_branch_v4t_thumb_thumb_pic) \ + DEF_STUB(long_branch_v4t_arm_thumb_pic) \ + DEF_STUB(long_branch_v4t_thumb_arm_pic) \ + DEF_STUB(long_branch_thumb_only_pic) \ + DEF_STUB(a8_veneer_b_cond) \ + DEF_STUB(a8_veneer_b) \ + DEF_STUB(a8_veneer_bl) \ + DEF_STUB(a8_veneer_blx) + +#define DEF_STUB(x) arm_stub_##x, +enum elf32_arm_stub_type { arm_stub_none, - arm_stub_long_branch_any_any, - arm_stub_long_branch_v4t_arm_thumb, - arm_stub_long_branch_thumb_only, - arm_stub_long_branch_v4t_thumb_thumb, - arm_stub_long_branch_v4t_thumb_arm, - arm_stub_short_branch_v4t_thumb_arm, - arm_stub_long_branch_any_arm_pic, - arm_stub_long_branch_any_thumb_pic, - arm_stub_long_branch_v4t_arm_thumb_pic, - arm_stub_long_branch_v4t_thumb_arm_pic, - arm_stub_long_branch_thumb_only_pic, - arm_stub_long_branch_v4t_thumb_thumb_pic, + DEF_STUBS +}; +#undef DEF_STUB + +typedef struct +{ + const insn_sequence* template; + int template_size; +} stub_def; + +#define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)}, +static const stub_def stub_definitions[] = { + {NULL, 0}, + DEF_STUBS }; struct elf32_arm_stub_hash_entry @@ -2193,6 +2259,13 @@ struct elf32_arm_stub_hash_entry bfd_vma target_value; asection *target_section; + /* Offset to apply to relocation referencing target_value. */ + bfd_vma target_addend; + + /* The instruction which caused this stub to be generated (only valid for + Cortex-A8 erratum workaround stubs at present). */ + unsigned long orig_insn; + /* The stub type. */ enum elf32_arm_stub_type stub_type; /* Its encoding size in bytes. */ @@ -2260,20 +2333,86 @@ typedef struct elf32_vfp11_erratum_list } elf32_vfp11_erratum_list; +typedef enum +{ + DELETE_EXIDX_ENTRY, + INSERT_EXIDX_CANTUNWIND_AT_END +} +arm_unwind_edit_type; + +/* A (sorted) list of edits to apply to an unwind table. */ +typedef struct arm_unwind_table_edit +{ + arm_unwind_edit_type type; + /* Note: we sometimes want to insert an unwind entry corresponding to a + section different from the one we're currently writing out, so record the + (text) section this edit relates to here. */ + asection *linked_section; + unsigned int index; + struct arm_unwind_table_edit *next; +} +arm_unwind_table_edit; + typedef struct _arm_elf_section_data { + /* Information about mapping symbols. */ struct bfd_elf_section_data elf; unsigned int mapcount; unsigned int mapsize; elf32_arm_section_map *map; + /* Information about CPU errata. */ unsigned int erratumcount; elf32_vfp11_erratum_list *erratumlist; + /* Information about unwind tables. */ + union + { + /* Unwind info attached to a text section. */ + struct + { + asection *arm_exidx_sec; + } text; + + /* Unwind info attached to an .ARM.exidx section. */ + struct + { + arm_unwind_table_edit *unwind_edit_list; + arm_unwind_table_edit *unwind_edit_tail; + } exidx; + } u; } _arm_elf_section_data; #define elf32_arm_section_data(sec) \ ((_arm_elf_section_data *) elf_section_data (sec)) +/* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum. + These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs), + so may be created multiple times: we use an array of these entries whilst + relaxing which we can refresh easily, then create stubs for each potentially + erratum-triggering instruction once we've settled on a solution. */ + +struct a8_erratum_fix { + bfd *input_bfd; + asection *section; + bfd_vma offset; + bfd_vma addend; + unsigned long orig_insn; + char *stub_name; + enum elf32_arm_stub_type stub_type; +}; + +/* A table of relocs applied to branches which might trigger Cortex-A8 + erratum. */ + +struct a8_erratum_reloc { + bfd_vma from; + bfd_vma destination; + unsigned int r_type; + unsigned char st_type; + const char *sym_name; + bfd_boolean non_a8_stub; +}; + /* The size of the thread control block. */ #define TCB_SIZE 8 @@ -2405,6 +2544,12 @@ struct elf32_arm_link_hash_table veneers. */ bfd_size_type vfp11_erratum_glue_size; + /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This + holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and + elf32_arm_write_section(). */ + struct a8_erratum_fix *a8_erratum_fixes; + unsigned int num_a8_erratum_fixes; + /* An arbitrary input BFD chosen to hold the glue sections. */ bfd * bfd_of_glue_owner; @@ -2423,6 +2568,9 @@ struct elf32_arm_link_hash_table 2 = Generate v4 interworing stubs. */ int fix_v4bx; + /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */ + int fix_cortex_a8; + /* Nonzero if the ARM/Thumb BLX instructions are available for use. */ int use_blx; @@ -2470,8 +2618,8 @@ struct elf32_arm_link_hash_table bfd_vma offset; } tls_ldm_got; - /* Small local sym to section mapping cache. */ - struct sym_sec_cache sym_sec; + /* Small local sym cache. */ + struct sym_cache sym_cache; /* For convenience in allocate_dynrelocs. */ bfd * obfd; @@ -2600,15 +2748,9 @@ create_got_section (bfd *dynobj, struct bfd_link_info *info) if (!htab->sgot || !htab->sgotplt) abort (); - htab->srelgot = bfd_make_section_with_flags (dynobj, - RELOC_SECTION (htab, ".got"), - (SEC_ALLOC | SEC_LOAD - | SEC_HAS_CONTENTS - | SEC_IN_MEMORY - | SEC_LINKER_CREATED - | SEC_READONLY)); - if (htab->srelgot == NULL - || ! bfd_set_section_alignment (dynobj, htab->srelgot, 2)) + htab->srelgot = bfd_get_section_by_name (dynobj, + RELOC_SECTION (htab, ".got")); + if (htab->srelgot == NULL) return FALSE; return TRUE; } @@ -2762,6 +2904,7 @@ elf32_arm_link_hash_table_create (bfd *abfd) ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE; ret->vfp11_erratum_glue_size = 0; ret->num_vfp11_fixes = 0; + ret->fix_cortex_a8 = 0; ret->bfd_of_glue_owner = NULL; ret->byteswap_code = 0; ret->target1_is_rel = 0; @@ -2778,7 +2921,7 @@ elf32_arm_link_hash_table_create (bfd *abfd) ret->vxworks_p = 0; ret->symbian_p = 0; ret->use_rel = 1; - ret->sym_sec.abfd = NULL; + ret->sym_cache.abfd = NULL; ret->obfd = abfd; ret->tls_ldm_got.refcount = 0; ret->stub_bfd = NULL; @@ -2915,14 +3058,15 @@ arm_type_of_stub (struct bfd_link_info *info, places. */ } - if (r_type == R_ARM_THM_CALL) + if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24) { /* Handle cases where: - this call goes too far (different Thumb/Thumb2 max distance) - - it's a Thumb->Arm call and blx is not available. A stub is - needed in this case, but only if this call is not through a - PLT entry. Indeed, PLT stubs handle mode switching already. + - it's a Thumb->Arm call and blx is not available, or it's a + Thumb->Arm branch (not bl). A stub is needed in this case, + but only if this call is not through a PLT entry. Indeed, + PLT stubs handle mode switching already. */ if ((!thumb2 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET @@ -2931,7 +3075,8 @@ arm_type_of_stub (struct bfd_link_info *info, && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET))) || ((st_type != STT_ARM_TFUNC) - && ((r_type == R_ARM_THM_CALL) && !globals->use_blx) + && (((r_type == R_ARM_THM_CALL) && !globals->use_blx) + || (r_type == R_ARM_THM_JUMP24)) && !use_plt)) { if (st_type == STT_ARM_TFUNC) @@ -2941,14 +3086,19 @@ arm_type_of_stub (struct bfd_link_info *info, { stub_type = (info->shared | globals->pic_veneer) /* PIC stubs. */ - ? ((globals->use_blx) - /* V5T and above. */ + ? ((globals->use_blx + && (r_type ==R_ARM_THM_CALL)) + /* V5T and above. Stub starts with ARM code, so + we must be able to switch mode before + reaching it, which is only possible for 'bl' + (ie R_ARM_THM_CALL relocation). */ ? arm_stub_long_branch_any_thumb_pic /* On V4T, use Thumb code only. */ : arm_stub_long_branch_v4t_thumb_thumb_pic) /* non-PIC stubs. */ - : ((globals->use_blx) + : ((globals->use_blx + && (r_type ==R_ARM_THM_CALL)) /* V5T and above. */ ? arm_stub_long_branch_any_any /* V4T. */ @@ -2978,14 +3128,16 @@ arm_type_of_stub (struct bfd_link_info *info, stub_type = (info->shared | globals->pic_veneer) /* PIC stubs. */ - ? ((globals->use_blx) + ? ((globals->use_blx + && (r_type ==R_ARM_THM_CALL)) /* V5T and above. */ ? arm_stub_long_branch_any_arm_pic /* V4T PIC stub. */ : arm_stub_long_branch_v4t_thumb_arm_pic) /* non-PIC stubs. */ - : ((globals->use_blx) + : ((globals->use_blx + && (r_type ==R_ARM_THM_CALL)) /* V5T and above. */ ? arm_stub_long_branch_any_any /* V4T. */ @@ -2999,7 +3151,7 @@ arm_type_of_stub (struct bfd_link_info *info, } } } - else if (r_type == R_ARM_CALL) + else if (r_type == R_ARM_CALL || r_type == R_ARM_JUMP24 || r_type == R_ARM_PLT32) { if (st_type == STT_ARM_TFUNC) { @@ -3019,7 +3171,9 @@ arm_type_of_stub (struct bfd_link_info *info, the mode change (bit 24 (H) of BLX encoding). */ if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2) || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET) - || !globals->use_blx) + || ((r_type == R_ARM_CALL) && !globals->use_blx) + || (r_type == R_ARM_JUMP24) + || (r_type == R_ARM_PLT32)) { stub_type = (info->shared | globals->pic_veneer) /* PIC stubs. */ @@ -3140,17 +3294,16 @@ elf32_arm_get_stub_entry (const asection *input_section, return stub_entry; } -/* Add a new stub entry to the stub hash. Not all fields of the new - stub entry are initialised. */ +/* Find or create a stub section. Returns a pointer to the stub section, and + the section to which the stub section will be attached (in *LINK_SEC_P). + LINK_SEC_P may be NULL. */ -static struct elf32_arm_stub_hash_entry * -elf32_arm_add_stub (const char *stub_name, - asection *section, - struct elf32_arm_link_hash_table *htab) +static asection * +elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section, + struct elf32_arm_link_hash_table *htab) { asection *link_sec; asection *stub_sec; - struct elf32_arm_stub_hash_entry *stub_entry; link_sec = htab->stub_group[section->id].link_sec; stub_sec = htab->stub_group[section->id].stub_sec; @@ -3178,6 +3331,28 @@ elf32_arm_add_stub (const char *stub_name, } htab->stub_group[section->id].stub_sec = stub_sec; } + + if (link_sec_p) + *link_sec_p = link_sec; + + return stub_sec; +} + +/* Add a new stub entry to the stub hash. Not all fields of the new + stub entry are initialised. */ + +static struct elf32_arm_stub_hash_entry * +elf32_arm_add_stub (const char *stub_name, + asection *section, + struct elf32_arm_link_hash_table *htab) +{ + asection *link_sec; + asection *stub_sec; + struct elf32_arm_stub_hash_entry *stub_entry; + + stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab); + if (stub_sec == NULL) + return NULL; /* Enter this entry into the linker stub hash table. */ stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, @@ -3223,10 +3398,16 @@ put_thumb_insn (struct elf32_arm_link_hash_table * htab, bfd_putb16 (val, ptr); } +static bfd_reloc_status_type elf32_arm_final_link_relocate + (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *, + Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *, + const char *, int, struct elf_link_hash_entry *, bfd_boolean *, char **); + static bfd_boolean arm_build_one_stub (struct bfd_hash_entry *gen_entry, void * in_arg) { +#define MAXRELOCS 2 struct elf32_arm_stub_hash_entry *stub_entry; struct bfd_link_info *info; struct elf32_arm_link_hash_table *htab; @@ -3240,8 +3421,9 @@ arm_build_one_stub (struct bfd_hash_entry *gen_entry, const insn_sequence *template; int i; struct elf32_arm_link_hash_table * globals; - int stub_reloc_idx = -1; - int stub_reloc_offset = 0; + int stub_reloc_idx[MAXRELOCS] = {-1, -1}; + int stub_reloc_offset[MAXRELOCS] = {0, 0}; + int nrelocs = 0; /* Massage our args to the form they really have. */ stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry; @@ -3276,26 +3458,50 @@ arm_build_one_stub (struct bfd_hash_entry *gen_entry, switch (template[i].type) { case THUMB16_TYPE: - put_thumb_insn (globals, stub_bfd, template[i].data, loc + size); - size += 2; + { + bfd_vma data = template[i].data; + if (template[i].reloc_addend != 0) + { + /* We've borrowed the reloc_addend field to mean we should + insert a condition code into this (Thumb-1 branch) + instruction. See THUMB16_BCOND_INSN. */ + BFD_ASSERT ((data & 0xff00) == 0xd000); + data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8; + } + put_thumb_insn (globals, stub_bfd, data, loc + size); + size += 2; + } break; + case THUMB32_TYPE: + put_thumb_insn (globals, stub_bfd, (template[i].data >> 16) & 0xffff, + loc + size); + put_thumb_insn (globals, stub_bfd, template[i].data & 0xffff, + loc + size + 2); + if (template[i].r_type != R_ARM_NONE) + { + stub_reloc_idx[nrelocs] = i; + stub_reloc_offset[nrelocs++] = size; + } + size += 4; + break; + case ARM_TYPE: put_arm_insn (globals, stub_bfd, template[i].data, loc + size); /* Handle cases where the target is encoded within the instruction. */ if (template[i].r_type == R_ARM_JUMP24) { - stub_reloc_idx = i; - stub_reloc_offset = size; + stub_reloc_idx[nrelocs] = i; + stub_reloc_offset[nrelocs++] = size; } size += 4; break; case DATA_TYPE: bfd_put_32 (stub_bfd, template[i].data, loc + size); - stub_reloc_idx = i; - stub_reloc_offset = size; + stub_reloc_idx[nrelocs] = i; + stub_reloc_offset[nrelocs++] = size; size += 4; break; @@ -3315,89 +3521,72 @@ arm_build_one_stub (struct bfd_hash_entry *gen_entry, if (stub_entry->st_type == STT_ARM_TFUNC) sym_value |= 1; - /* Assume there is one and only one entry to relocate in each stub. */ - BFD_ASSERT (stub_reloc_idx != -1); + /* Assume there is at least one and at most MAXRELOCS entries to relocate + in each stub. */ + BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS); - _bfd_final_link_relocate (elf32_arm_howto_from_type (template[stub_reloc_idx].r_type), - stub_bfd, stub_sec, stub_sec->contents, - stub_entry->stub_offset + stub_reloc_offset, - sym_value, template[stub_reloc_idx].reloc_addend); + for (i = 0; i < nrelocs; i++) + if (template[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24 + || template[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19 + || template[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL + || template[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22) + { + Elf_Internal_Rela rel; + bfd_boolean unresolved_reloc; + char *error_message; + int sym_flags + = (template[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22) + ? STT_ARM_TFUNC : 0; + bfd_vma points_to = sym_value + stub_entry->target_addend; + + rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i]; + rel.r_info = ELF32_R_INFO (0, template[stub_reloc_idx[i]].r_type); + rel.r_addend = template[stub_reloc_idx[i]].reloc_addend; + + if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0) + /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[] + template should refer back to the instruction after the original + branch. */ + points_to = sym_value; + + /* Note: _bfd_final_link_relocate doesn't handle these relocations + properly. We should probably use this function unconditionally, + rather than only for certain relocations listed in the enclosing + conditional, for the sake of consistency. */ + elf32_arm_final_link_relocate (elf32_arm_howto_from_type + (template[stub_reloc_idx[i]].r_type), + stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel, + points_to, info, stub_entry->target_section, "", sym_flags, + (struct elf_link_hash_entry *) stub_entry, &unresolved_reloc, + &error_message); + } + else + { + _bfd_final_link_relocate (elf32_arm_howto_from_type + (template[stub_reloc_idx[i]].r_type), stub_bfd, stub_sec, + stub_sec->contents, stub_entry->stub_offset + stub_reloc_offset[i], + sym_value + stub_entry->target_addend, + template[stub_reloc_idx[i]].reloc_addend); + } return TRUE; +#undef MAXRELOCS } -/* As above, but don't actually build the stub. Just bump offset so - we know stub section sizes. */ +/* Calculate the template, template size and instruction size for a stub. + Return value is the instruction size. */ -static bfd_boolean -arm_size_one_stub (struct bfd_hash_entry *gen_entry, - void * in_arg) +static unsigned int +find_stub_size_and_template (enum elf32_arm_stub_type stub_type, + const insn_sequence **stub_template, + int *stub_template_size) { - struct elf32_arm_stub_hash_entry *stub_entry; - struct elf32_arm_link_hash_table *htab; - const insn_sequence *template; - int template_size; - int size; - int i; + const insn_sequence *template = NULL; + int template_size = 0, i; + unsigned int size; - /* Massage our args to the form they really have. */ - stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry; - htab = (struct elf32_arm_link_hash_table *) in_arg; - - switch (stub_entry->stub_type) - { - case arm_stub_long_branch_any_any: - template = elf32_arm_stub_long_branch_any_any; - template_size = ARRAY_SIZE (elf32_arm_stub_long_branch_any_any); - break; - case arm_stub_long_branch_v4t_arm_thumb: - template = elf32_arm_stub_long_branch_v4t_arm_thumb; - template_size = ARRAY_SIZE (elf32_arm_stub_long_branch_v4t_arm_thumb); - break; - case arm_stub_long_branch_thumb_only: - template = elf32_arm_stub_long_branch_thumb_only; - template_size = ARRAY_SIZE (elf32_arm_stub_long_branch_thumb_only); - break; - case arm_stub_long_branch_v4t_thumb_thumb: - template = elf32_arm_stub_long_branch_v4t_thumb_thumb; - template_size = ARRAY_SIZE (elf32_arm_stub_long_branch_v4t_thumb_thumb); - break; - case arm_stub_long_branch_v4t_thumb_arm: - template = elf32_arm_stub_long_branch_v4t_thumb_arm; - template_size = ARRAY_SIZE (elf32_arm_stub_long_branch_v4t_thumb_arm); - break; - case arm_stub_short_branch_v4t_thumb_arm: - template = elf32_arm_stub_short_branch_v4t_thumb_arm; - template_size = ARRAY_SIZE (elf32_arm_stub_short_branch_v4t_thumb_arm); - break; - case arm_stub_long_branch_any_arm_pic: - template = elf32_arm_stub_long_branch_any_arm_pic; - template_size = ARRAY_SIZE (elf32_arm_stub_long_branch_any_arm_pic); - break; - case arm_stub_long_branch_any_thumb_pic: - template = elf32_arm_stub_long_branch_any_thumb_pic; - template_size = ARRAY_SIZE (elf32_arm_stub_long_branch_any_thumb_pic); - break; - case arm_stub_long_branch_v4t_arm_thumb_pic: - template = elf32_arm_stub_long_branch_v4t_arm_thumb_pic; - template_size = ARRAY_SIZE (elf32_arm_stub_long_branch_v4t_arm_thumb_pic); - break; - case arm_stub_long_branch_v4t_thumb_arm_pic: - template = elf32_arm_stub_long_branch_v4t_thumb_arm_pic; - template_size = ARRAY_SIZE (elf32_arm_stub_long_branch_v4t_thumb_arm_pic); - break; - case arm_stub_long_branch_thumb_only_pic: - template = elf32_arm_stub_long_branch_thumb_only_pic; - template_size = ARRAY_SIZE (elf32_arm_stub_long_branch_thumb_only_pic); - break; - case arm_stub_long_branch_v4t_thumb_thumb_pic: - template = elf32_arm_stub_long_branch_v4t_thumb_thumb_pic; - template_size = ARRAY_SIZE (elf32_arm_stub_long_branch_v4t_thumb_thumb_pic); - break; - default: - BFD_FAIL (); - return FALSE; - } + template = stub_definitions[stub_type].template; + template_size = stub_definitions[stub_type].template_size; size = 0; for (i = 0; i < template_size; i++) @@ -3409,9 +3598,7 @@ arm_size_one_stub (struct bfd_hash_entry *gen_entry, break; case ARM_TYPE: - size += 4; - break; - + case THUMB32_TYPE: case DATA_TYPE: size += 4; break; @@ -3422,6 +3609,37 @@ arm_size_one_stub (struct bfd_hash_entry *gen_entry, } } + if (stub_template) + *stub_template = template; + + if (stub_template_size) + *stub_template_size = template_size; + + return size; +} + +/* As above, but don't actually build the stub. Just bump offset so + we know stub section sizes. */ + +static bfd_boolean +arm_size_one_stub (struct bfd_hash_entry *gen_entry, + void * in_arg) +{ + struct elf32_arm_stub_hash_entry *stub_entry; + struct elf32_arm_link_hash_table *htab; + const insn_sequence *template; + int template_size, size; + + /* Massage our args to the form they really have. */ + stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry; + htab = (struct elf32_arm_link_hash_table *) in_arg; + + BFD_ASSERT((stub_entry->stub_type > arm_stub_none) + && stub_entry->stub_type < ARRAY_SIZE(stub_definitions)); + + size = find_stub_size_and_template (stub_entry->stub_type, &template, + &template_size); + stub_entry->stub_size = size; stub_entry->stub_template = template; stub_entry->stub_template_size = template_size; @@ -3638,6 +3856,293 @@ group_sections (struct elf32_arm_link_hash_table *htab, #undef NEXT_SEC } +/* Comparison function for sorting/searching relocations relating to Cortex-A8 + erratum fix. */ + +static int +a8_reloc_compare (const void *a, const void *b) +{ + const struct a8_erratum_reloc *ra = a, *rb = b; + + if (ra->from < rb->from) + return -1; + else if (ra->from > rb->from) + return 1; + else + return 0; +} + +static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *, + const char *, char **); + +/* Helper function to scan code for sequences which might trigger the Cortex-A8 + branch/TLB erratum. Fill in the table described by A8_FIXES_P, + NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false + otherwise. */ + +static bfd_boolean +cortex_a8_erratum_scan (bfd *input_bfd, + struct bfd_link_info *info, + struct a8_erratum_fix **a8_fixes_p, + unsigned int *num_a8_fixes_p, + unsigned int *a8_fix_table_size_p, + struct a8_erratum_reloc *a8_relocs, + unsigned int num_a8_relocs) +{ + asection *section; + struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info); + struct a8_erratum_fix *a8_fixes = *a8_fixes_p; + unsigned int num_a8_fixes = *num_a8_fixes_p; + unsigned int a8_fix_table_size = *a8_fix_table_size_p; + + for (section = input_bfd->sections; + section != NULL; + section = section->next) + { + bfd_byte *contents = NULL; + struct _arm_elf_section_data *sec_data; + unsigned int span; + bfd_vma base_vma; + + if (elf_section_type (section) != SHT_PROGBITS + || (elf_section_flags (section) & SHF_EXECINSTR) == 0 + || (section->flags & SEC_EXCLUDE) != 0 + || (section->sec_info_type == ELF_INFO_TYPE_JUST_SYMS) + || (section->output_section == bfd_abs_section_ptr)) + continue; + + base_vma = section->output_section->vma + section->output_offset; + + if (elf_section_data (section)->this_hdr.contents != NULL) + contents = elf_section_data (section)->this_hdr.contents; + else if (! bfd_malloc_and_get_section (input_bfd, section, &contents)) + return TRUE; + + sec_data = elf32_arm_section_data (section); + + for (span = 0; span < sec_data->mapcount; span++) + { + unsigned int span_start = sec_data->map[span].vma; + unsigned int span_end = (span == sec_data->mapcount - 1) + ? section->size : sec_data->map[span + 1].vma; + unsigned int i; + char span_type = sec_data->map[span].type; + bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE; + + if (span_type != 't') + continue; + + /* Span is entirely within a single 4KB region: skip scanning. */ + if (((base_vma + span_start) & ~0xfff) + == ((base_vma + span_end) & ~0xfff)) + continue; + + /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where: + + * The opcode is BLX.W, BL.W, B.W, Bcc.W + * The branch target is in the same 4KB region as the + first half of the branch. + * The instruction before the branch is a 32-bit + length non-branch instruction. */ + for (i = span_start; i < span_end;) + { + unsigned int insn = bfd_getl16 (&contents[i]); + bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE; + bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch; + + if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000) + insn_32bit = TRUE; + + if (insn_32bit) + { + /* Load the rest of the insn (in manual-friendly order). */ + insn = (insn << 16) | bfd_getl16 (&contents[i + 2]); + + /* Encoding T4: B.W. */ + is_b = (insn & 0xf800d000) == 0xf0009000; + /* Encoding T1: BL.W. */ + is_bl = (insn & 0xf800d000) == 0xf000d000; + /* Encoding T2: BLX.W. */ + is_blx = (insn & 0xf800d000) == 0xf000c000; + /* Encoding T3: B.W (not permitted in IT block). */ + is_bcc = (insn & 0xf800d000) == 0xf0008000 + && (insn & 0x07f00000) != 0x03800000; + } + + is_32bit_branch = is_b || is_bl || is_blx || is_bcc; + + if (((base_vma + i) & 0xfff) == 0xffe + && insn_32bit + && is_32bit_branch + && last_was_32bit + && ! last_was_branch) + { + bfd_signed_vma offset; + bfd_boolean force_target_arm = FALSE; + bfd_boolean force_target_thumb = FALSE; + bfd_vma target; + enum elf32_arm_stub_type stub_type = arm_stub_none; + struct a8_erratum_reloc key, *found; + + key.from = base_vma + i; + found = bsearch (&key, a8_relocs, num_a8_relocs, + sizeof (struct a8_erratum_reloc), + &a8_reloc_compare); + + if (found) + { + char *error_message = NULL; + struct elf_link_hash_entry *entry; + + /* We don't care about the error returned from this + function, only if there is glue or not. */ + entry = find_thumb_glue (info, found->sym_name, + &error_message); + + if (entry) + found->non_a8_stub = TRUE; + + if (found->r_type == R_ARM_THM_CALL + && found->st_type != STT_ARM_TFUNC) + force_target_arm = TRUE; + else if (found->r_type == R_ARM_THM_CALL + && found->st_type == STT_ARM_TFUNC) + force_target_thumb = TRUE; + } + + /* Check if we have an offending branch instruction. */ + + if (found && found->non_a8_stub) + /* We've already made a stub for this instruction, e.g. + it's a long branch or a Thumb->ARM stub. Assume that + stub will suffice to work around the A8 erratum (see + setting of always_after_branch above). */ + ; + else if (is_bcc) + { + offset = (insn & 0x7ff) << 1; + offset |= (insn & 0x3f0000) >> 4; + offset |= (insn & 0x2000) ? 0x40000 : 0; + offset |= (insn & 0x800) ? 0x80000 : 0; + offset |= (insn & 0x4000000) ? 0x100000 : 0; + if (offset & 0x100000) + offset |= ~ ((bfd_signed_vma) 0xfffff); + stub_type = arm_stub_a8_veneer_b_cond; + } + else if (is_b || is_bl || is_blx) + { + int s = (insn & 0x4000000) != 0; + int j1 = (insn & 0x2000) != 0; + int j2 = (insn & 0x800) != 0; + int i1 = !(j1 ^ s); + int i2 = !(j2 ^ s); + + offset = (insn & 0x7ff) << 1; + offset |= (insn & 0x3ff0000) >> 4; + offset |= i2 << 22; + offset |= i1 << 23; + offset |= s << 24; + if (offset & 0x1000000) + offset |= ~ ((bfd_signed_vma) 0xffffff); + + if (is_blx) + offset &= ~ ((bfd_signed_vma) 3); + + stub_type = is_blx ? arm_stub_a8_veneer_blx : + is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b; + } + + if (stub_type != arm_stub_none) + { + bfd_vma pc_for_insn = base_vma + i + 4; + + /* The original instruction is a BL, but the target is + an ARM instruction. If we were not making a stub, + the BL would have been converted to a BLX. Use the + BLX stub instead in that case. */ + if (htab->use_blx && force_target_arm + && stub_type == arm_stub_a8_veneer_bl) + { + stub_type = arm_stub_a8_veneer_blx; + is_blx = TRUE; + is_bl = FALSE; + } + /* Conversely, if the original instruction was + BLX but the target is Thumb mode, use the BL + stub. */ + else if (force_target_thumb + && stub_type == arm_stub_a8_veneer_blx) + { + stub_type = arm_stub_a8_veneer_bl; + is_blx = FALSE; + is_bl = TRUE; + } + + if (is_blx) + pc_for_insn &= ~ ((bfd_vma) 3); + + /* If we found a relocation, use the proper destination, + not the offset in the (unrelocated) instruction. + Note this is always done if we switched the stub type + above. */ + if (found) + offset = + (bfd_signed_vma) (found->destination - pc_for_insn); + + target = pc_for_insn + offset; + + /* The BLX stub is ARM-mode code. Adjust the offset to + take the different PC value (+8 instead of +4) into + account. */ + if (stub_type == arm_stub_a8_veneer_blx) + offset += 4; + + if (((base_vma + i) & ~0xfff) == (target & ~0xfff)) + { + char *stub_name; + + if (num_a8_fixes == a8_fix_table_size) + { + a8_fix_table_size *= 2; + a8_fixes = bfd_realloc (a8_fixes, + sizeof (struct a8_erratum_fix) + * a8_fix_table_size); + } + + stub_name = bfd_malloc (8 + 1 + 8 + 1); + if (stub_name != NULL) + sprintf (stub_name, "%x:%x", section->id, i); + + a8_fixes[num_a8_fixes].input_bfd = input_bfd; + a8_fixes[num_a8_fixes].section = section; + a8_fixes[num_a8_fixes].offset = i; + a8_fixes[num_a8_fixes].addend = offset; + a8_fixes[num_a8_fixes].orig_insn = insn; + a8_fixes[num_a8_fixes].stub_name = stub_name; + a8_fixes[num_a8_fixes].stub_type = stub_type; + + num_a8_fixes++; + } + } + } + + i += insn_32bit ? 4 : 2; + last_was_32bit = insn_32bit; + last_was_branch = is_32bit_branch; + } + } + + if (elf_section_data (section)->this_hdr.contents == NULL) + free (contents); + } + + *a8_fixes_p = a8_fixes; + *num_a8_fixes_p = num_a8_fixes; + *a8_fix_table_size_p = a8_fix_table_size; + + return FALSE; +} + /* Determine and set the size of the stub section for a final link. The basic idea here is to examine all the relocations looking for @@ -3656,6 +4161,18 @@ elf32_arm_size_stubs (bfd *output_bfd, bfd_boolean stubs_always_after_branch; bfd_boolean stub_changed = 0; struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info); + struct a8_erratum_fix *a8_fixes = NULL; + unsigned int num_a8_fixes = 0, prev_num_a8_fixes = 0, a8_fix_table_size = 10; + struct a8_erratum_reloc *a8_relocs = NULL; + unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i; + + if (htab->fix_cortex_a8) + { + a8_fixes = bfd_zmalloc (sizeof (struct a8_erratum_fix) + * a8_fix_table_size); + a8_relocs = bfd_zmalloc (sizeof (struct a8_erratum_reloc) + * a8_reloc_table_size); + } /* Propagate mach to stub bfd, because it may not have been finalized when we created stub_bfd. */ @@ -3667,6 +4184,13 @@ elf32_arm_size_stubs (bfd *output_bfd, htab->add_stub_section = add_stub_section; htab->layout_sections_again = layout_sections_again; stubs_always_after_branch = group_size < 0; + + /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page + as the first half of a 32-bit branch straddling two 4K pages. This is a + crude way of enforcing that. */ + if (htab->fix_cortex_a8) + stubs_always_after_branch = 1; + if (group_size < 0) stub_group_size = -group_size; else @@ -3694,6 +4218,8 @@ elf32_arm_size_stubs (bfd *output_bfd, unsigned int bfd_indx; asection *stub_sec; + num_a8_fixes = 0; + for (input_bfd = info->input_bfds, bfd_indx = 0; input_bfd != NULL; input_bfd = input_bfd->link_next, bfd_indx++) @@ -3702,6 +4228,8 @@ elf32_arm_size_stubs (bfd *output_bfd, asection *section; Elf_Internal_Sym *local_syms = NULL; + num_a8_relocs = 0; + /* We'll need the symbol table in a second. */ symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr; if (symtab_hdr->sh_info == 0) @@ -3750,6 +4278,7 @@ elf32_arm_size_stubs (bfd *output_bfd, char *stub_name; const asection *id_sec; unsigned char st_type; + bfd_boolean created_stub = FALSE; r_type = ELF32_R_TYPE (irela->r_info); r_indx = ELF32_R_SYM (irela->r_info); @@ -3763,9 +4292,14 @@ elf32_arm_size_stubs (bfd *output_bfd, goto error_ret_free_local; } - /* Only look for stubs on call instructions. */ + /* Only look for stubs on branch instructions. */ if ((r_type != (unsigned int) R_ARM_CALL) - && (r_type != (unsigned int) R_ARM_THM_CALL)) + && (r_type != (unsigned int) R_ARM_THM_CALL) + && (r_type != (unsigned int) R_ARM_JUMP24) + && (r_type != (unsigned int) R_ARM_THM_JUMP19) + && (r_type != (unsigned int) R_ARM_THM_XPC22) + && (r_type != (unsigned int) R_ARM_THM_JUMP24) + && (r_type != (unsigned int) R_ARM_PLT32)) continue; /* Now determine the call target, its name, value, @@ -3827,17 +4361,52 @@ elf32_arm_size_stubs (bfd *output_bfd, { sym_sec = hash->root.root.u.def.section; sym_value = hash->root.root.u.def.value; - if (sym_sec->output_section != NULL) + + struct elf32_arm_link_hash_table *globals = + elf32_arm_hash_table (info); + + /* For a destination in a shared library, + use the PLT stub as target address to + decide whether a branch stub is + needed. */ + if (globals->splt != NULL && hash != NULL + && hash->root.plt.offset != (bfd_vma) -1) + { + sym_sec = globals->splt; + sym_value = hash->root.plt.offset; + if (sym_sec->output_section != NULL) + destination = (sym_value + + sym_sec->output_offset + + sym_sec->output_section->vma); + } + else if (sym_sec->output_section != NULL) destination = (sym_value + irela->r_addend + sym_sec->output_offset + sym_sec->output_section->vma); } - else if (hash->root.root.type == bfd_link_hash_undefweak - || hash->root.root.type == bfd_link_hash_undefined) - /* For a shared library, these will need a PLT stub, - which is treated separately. - For absolute code, they cannot be handled. */ - continue; + else if ((hash->root.root.type == bfd_link_hash_undefined) + || (hash->root.root.type == bfd_link_hash_undefweak)) + { + /* For a shared library, use the PLT stub as + target address to decide whether a long + branch stub is needed. + For absolute code, they cannot be handled. */ + struct elf32_arm_link_hash_table *globals = + elf32_arm_hash_table (info); + + if (globals->splt != NULL && hash != NULL + && hash->root.plt.offset != (bfd_vma) -1) + { + sym_sec = globals->splt; + sym_value = hash->root.plt.offset; + if (sym_sec->output_section != NULL) + destination = (sym_value + + sym_sec->output_offset + + sym_sec->output_section->vma); + } + else + continue; + } else { bfd_set_error (bfd_error_bad_value); @@ -3847,79 +4416,146 @@ elf32_arm_size_stubs (bfd *output_bfd, sym_name = hash->root.root.root.string; } - /* Determine what (if any) linker stub is needed. */ - stub_type = arm_type_of_stub (info, section, irela, st_type, - hash, destination, sym_sec, - input_bfd, sym_name); - if (stub_type == arm_stub_none) - continue; - - /* Support for grouping stub sections. */ - id_sec = htab->stub_group[section->id].link_sec; - - /* Get the name of this stub. */ - stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, irela); - if (!stub_name) - goto error_ret_free_internal; - - stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, - stub_name, - FALSE, FALSE); - if (stub_entry != NULL) - { - /* The proper stub has already been created. */ - free (stub_name); - continue; - } - - stub_entry = elf32_arm_add_stub (stub_name, section, htab); - if (stub_entry == NULL) - { - free (stub_name); - goto error_ret_free_internal; - } - - stub_entry->target_value = sym_value; - stub_entry->target_section = sym_sec; - stub_entry->stub_type = stub_type; - stub_entry->h = hash; - stub_entry->st_type = st_type; - - if (sym_name == NULL) - sym_name = "unnamed"; - stub_entry->output_name - = bfd_alloc (htab->stub_bfd, - sizeof (THUMB2ARM_GLUE_ENTRY_NAME) - + strlen (sym_name)); - if (stub_entry->output_name == NULL) + do { - free (stub_name); - goto error_ret_free_internal; - } + /* Determine what (if any) linker stub is needed. */ + stub_type = arm_type_of_stub (info, section, irela, + st_type, hash, + destination, sym_sec, + input_bfd, sym_name); + if (stub_type == arm_stub_none) + break; + + /* Support for grouping stub sections. */ + id_sec = htab->stub_group[section->id].link_sec; + + /* Get the name of this stub. */ + stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, + irela); + if (!stub_name) + goto error_ret_free_internal; + + /* We've either created a stub for this reloc already, + or we are about to. */ + created_stub = TRUE; + + stub_entry = arm_stub_hash_lookup + (&htab->stub_hash_table, stub_name, + FALSE, FALSE); + if (stub_entry != NULL) + { + /* The proper stub has already been created. */ + free (stub_name); + break; + } - /* For historical reasons, use the existing names for - ARM-to-Thumb and Thumb-to-ARM stubs. */ - if (r_type == (unsigned int) R_ARM_THM_CALL - && st_type != STT_ARM_TFUNC) - sprintf (stub_entry->output_name, THUMB2ARM_GLUE_ENTRY_NAME, - sym_name); - else if (r_type == (unsigned int) R_ARM_CALL - && st_type == STT_ARM_TFUNC) - sprintf (stub_entry->output_name, ARM2THUMB_GLUE_ENTRY_NAME, - sym_name); - else - sprintf (stub_entry->output_name, STUB_ENTRY_NAME, - sym_name); + stub_entry = elf32_arm_add_stub (stub_name, section, + htab); + if (stub_entry == NULL) + { + free (stub_name); + goto error_ret_free_internal; + } - stub_changed = TRUE; + stub_entry->target_value = sym_value; + stub_entry->target_section = sym_sec; + stub_entry->stub_type = stub_type; + stub_entry->h = hash; + stub_entry->st_type = st_type; + + if (sym_name == NULL) + sym_name = "unnamed"; + stub_entry->output_name + = bfd_alloc (htab->stub_bfd, + sizeof (THUMB2ARM_GLUE_ENTRY_NAME) + + strlen (sym_name)); + if (stub_entry->output_name == NULL) + { + free (stub_name); + goto error_ret_free_internal; + } + + /* For historical reasons, use the existing names for + ARM-to-Thumb and Thumb-to-ARM stubs. */ + if ( ((r_type == (unsigned int) R_ARM_THM_CALL) + || (r_type == (unsigned int) R_ARM_THM_JUMP24)) + && st_type != STT_ARM_TFUNC) + sprintf (stub_entry->output_name, + THUMB2ARM_GLUE_ENTRY_NAME, sym_name); + else if ( ((r_type == (unsigned int) R_ARM_CALL) + || (r_type == (unsigned int) R_ARM_JUMP24)) + && st_type == STT_ARM_TFUNC) + sprintf (stub_entry->output_name, + ARM2THUMB_GLUE_ENTRY_NAME, sym_name); + else + sprintf (stub_entry->output_name, STUB_ENTRY_NAME, + sym_name); + + stub_changed = TRUE; + } + while (0); + + /* Look for relocations which might trigger Cortex-A8 + erratum. */ + if (htab->fix_cortex_a8 + && (r_type == (unsigned int) R_ARM_THM_JUMP24 + || r_type == (unsigned int) R_ARM_THM_JUMP19 + || r_type == (unsigned int) R_ARM_THM_CALL + || r_type == (unsigned int) R_ARM_THM_XPC22)) + { + bfd_vma from = section->output_section->vma + + section->output_offset + + irela->r_offset; + + if ((from & 0xfff) == 0xffe) + { + /* Found a candidate. Note we haven't checked the + destination is within 4K here: if we do so (and + don't create an entry in a8_relocs) we can't tell + that a branch should have been relocated when + scanning later. */ + if (num_a8_relocs == a8_reloc_table_size) + { + a8_reloc_table_size *= 2; + a8_relocs = bfd_realloc (a8_relocs, + sizeof (struct a8_erratum_reloc) + * a8_reloc_table_size); + } + + a8_relocs[num_a8_relocs].from = from; + a8_relocs[num_a8_relocs].destination = destination; + a8_relocs[num_a8_relocs].r_type = r_type; + a8_relocs[num_a8_relocs].st_type = st_type; + a8_relocs[num_a8_relocs].sym_name = sym_name; + a8_relocs[num_a8_relocs].non_a8_stub = created_stub; + + num_a8_relocs++; + } + } } - /* We're done with the internal relocs, free them. */ - if (elf_section_data (section)->relocs == NULL) - free (internal_relocs); + /* We're done with the internal relocs, free them. */ + if (elf_section_data (section)->relocs == NULL) + free (internal_relocs); + } + + if (htab->fix_cortex_a8) + { + /* Sort relocs which might apply to Cortex-A8 erratum. */ + qsort (a8_relocs, num_a8_relocs, sizeof (struct a8_erratum_reloc), + &a8_reloc_compare); + + /* Scan for branches which might trigger Cortex-A8 erratum. */ + if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes, + &num_a8_fixes, &a8_fix_table_size, + a8_relocs, num_a8_relocs) != 0) + goto error_ret_free_local; } } + if (htab->fix_cortex_a8 && num_a8_fixes != prev_num_a8_fixes) + stub_changed = TRUE; + if (!stub_changed) break; @@ -3928,15 +4564,90 @@ elf32_arm_size_stubs (bfd *output_bfd, for (stub_sec = htab->stub_bfd->sections; stub_sec != NULL; stub_sec = stub_sec->next) - stub_sec->size = 0; + { + /* Ignore non-stub sections. */ + if (!strstr (stub_sec->name, STUB_SUFFIX)) + continue; + + stub_sec->size = 0; + } bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab); + /* Add Cortex-A8 erratum veneers to stub section sizes too. */ + if (htab->fix_cortex_a8) + for (i = 0; i < num_a8_fixes; i++) + { + stub_sec = elf32_arm_create_or_find_stub_sec (NULL, + a8_fixes[i].section, htab); + + if (stub_sec == NULL) + goto error_ret_free_local; + + stub_sec->size + += find_stub_size_and_template (a8_fixes[i].stub_type, NULL, + NULL); + } + + /* Ask the linker to do its stuff. */ (*htab->layout_sections_again) (); stub_changed = FALSE; + prev_num_a8_fixes = num_a8_fixes; } + /* Add stubs for Cortex-A8 erratum fixes now. */ + if (htab->fix_cortex_a8) + { + for (i = 0; i < num_a8_fixes; i++) + { + struct elf32_arm_stub_hash_entry *stub_entry; + char *stub_name = a8_fixes[i].stub_name; + asection *section = a8_fixes[i].section; + unsigned int section_id = a8_fixes[i].section->id; + asection *link_sec = htab->stub_group[section_id].link_sec; + asection *stub_sec = htab->stub_group[section_id].stub_sec; + const insn_sequence *template; + int template_size, size = 0; + + stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, + TRUE, FALSE); + if (stub_entry == NULL) + { + (*_bfd_error_handler) (_("%s: cannot create stub entry %s"), + section->owner, + stub_name); + return FALSE; + } + + stub_entry->stub_sec = stub_sec; + stub_entry->stub_offset = 0; + stub_entry->id_sec = link_sec; + stub_entry->stub_type = a8_fixes[i].stub_type; + stub_entry->target_section = a8_fixes[i].section; + stub_entry->target_value = a8_fixes[i].offset; + stub_entry->target_addend = a8_fixes[i].addend; + stub_entry->orig_insn = a8_fixes[i].orig_insn; + stub_entry->st_type = STT_ARM_TFUNC; + + size = find_stub_size_and_template (a8_fixes[i].stub_type, &template, + &template_size); + + stub_entry->stub_size = size; + stub_entry->stub_template = template; + stub_entry->stub_template_size = template_size; + } + + /* Stash the Cortex-A8 erratum fix array for use later in + elf32_arm_write_section(). */ + htab->a8_erratum_fixes = a8_fixes; + htab->num_a8_erratum_fixes = num_a8_fixes; + } + else + { + htab->a8_erratum_fixes = NULL; + htab->num_a8_erratum_fixes = 0; + } return TRUE; error_ret_free_local: @@ -4126,7 +4837,16 @@ arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * na bfd_byte * contents; if (size == 0) - return; + { + /* Do not include empty glue sections in the output. */ + if (abfd != NULL) + { + s = bfd_get_section_by_name (abfd, name); + if (s != NULL) + s->flags |= SEC_EXCLUDE; + } + return; + } BFD_ASSERT (abfd != NULL); @@ -4238,86 +4958,6 @@ record_arm_to_thumb_glue (struct bfd_link_info * link_info, return myh; } -static void -record_thumb_to_arm_glue (struct bfd_link_info *link_info, - struct elf_link_hash_entry *h) -{ - const char *name = h->root.root.string; - asection *s; - char *tmp_name; - struct elf_link_hash_entry *myh; - struct bfd_link_hash_entry *bh; - struct elf32_arm_link_hash_table *hash_table; - bfd_vma val; - - hash_table = elf32_arm_hash_table (link_info); - - BFD_ASSERT (hash_table != NULL); - BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL); - - s = bfd_get_section_by_name - (hash_table->bfd_of_glue_owner, THUMB2ARM_GLUE_SECTION_NAME); - - BFD_ASSERT (s != NULL); - - tmp_name = bfd_malloc ((bfd_size_type) strlen (name) - + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1); - - BFD_ASSERT (tmp_name); - - sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name); - - myh = elf_link_hash_lookup - (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE); - - if (myh != NULL) - { - /* We've already seen this guy. */ - free (tmp_name); - return; - } - - /* The only trick here is using hash_table->thumb_glue_size as the value. - Even though the section isn't allocated yet, this is where we will be - putting it. The +1 on the value marks that the stub has not been - output yet - not that it is a Thumb function. */ - bh = NULL; - val = hash_table->thumb_glue_size + 1; - _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner, - tmp_name, BSF_GLOBAL, s, val, - NULL, TRUE, FALSE, &bh); - - /* If we mark it 'Thumb', the disassembler will do a better job. */ - myh = (struct elf_link_hash_entry *) bh; - myh->type = ELF_ST_INFO (STB_LOCAL, STT_ARM_TFUNC); - myh->forced_local = 1; - - free (tmp_name); - -#define CHANGE_TO_ARM "__%s_change_to_arm" -#define BACK_FROM_ARM "__%s_back_from_arm" - - /* Allocate another symbol to mark where we switch to Arm mode. */ - tmp_name = bfd_malloc ((bfd_size_type) strlen (name) - + strlen (CHANGE_TO_ARM) + 1); - - BFD_ASSERT (tmp_name); - - sprintf (tmp_name, CHANGE_TO_ARM, name); - - bh = NULL; - val = hash_table->thumb_glue_size + 4, - _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner, - tmp_name, BSF_LOCAL, s, val, - NULL, TRUE, FALSE, &bh); - - free (tmp_name); - - s->size += THUMB2ARM_GLUE_SIZE; - hash_table->thumb_glue_size += THUMB2ARM_GLUE_SIZE; -} - - /* Allocate space for ARMv4 BX veneers. */ static void @@ -4528,11 +5168,9 @@ record_vfp11_erratum_veneer (struct bfd_link_info *link_info, return val; } -/* Note: we do not include the flag SEC_LINKER_CREATED, as that - would prevent elf_link_input_bfd() from processing the contents - of the section. */ #define ARM_GLUE_SECTION_FLAGS \ - (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE | SEC_READONLY) + (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \ + | SEC_READONLY | SEC_LINKER_CREATED) /* Create a fake section for use by the ARM backend of the linker. */ @@ -4571,10 +5209,6 @@ bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd, if (info->relocatable) return TRUE; - /* Linker stubs don't need glue. */ - if (!strcmp (abfd->filename, "linker stubs")) - return TRUE; - return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME) && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME) && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME) @@ -4693,9 +5327,6 @@ bfd_elf32_arm_process_before_allocation (bfd *abfd, /* These are the only relocation types we care about. */ if ( r_type != R_ARM_PC24 - && r_type != R_ARM_PLT32 - && r_type != R_ARM_JUMP24 - && r_type != R_ARM_THM_JUMP24 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2)) continue; @@ -4747,8 +5378,6 @@ bfd_elf32_arm_process_before_allocation (bfd *abfd, switch (r_type) { case R_ARM_PC24: - case R_ARM_PLT32: - case R_ARM_JUMP24: /* This one is a call from arm code. We need to look up the target of the call. If it is a thumb target, we insert glue. */ @@ -4756,16 +5385,6 @@ bfd_elf32_arm_process_before_allocation (bfd *abfd, record_arm_to_thumb_glue (link_info, h); break; - case R_ARM_THM_JUMP24: - /* This one is a call from thumb code. We look - up the target of the call. If it is not a thumb - target, we insert glue. */ - if (ELF_ST_TYPE (h->type) != STT_ARM_TFUNC - && !(globals->use_blx && r_type == R_ARM_THM_CALL) - && h->root.type != bfd_link_hash_undefweak) - record_thumb_to_arm_glue (link_info, h); - break; - default: abort (); } @@ -4846,6 +5465,28 @@ bfd_elf32_arm_init_maps (bfd *abfd) } +/* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly + say what they wanted. */ + +void +bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info) +{ + struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info); + obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd); + + if (globals->fix_cortex_a8 == -1) + { + /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */ + if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7 + && (out_attr[Tag_CPU_arch_profile].i == 'A' + || out_attr[Tag_CPU_arch_profile].i == 0)) + globals->fix_cortex_a8 = 1; + else + globals->fix_cortex_a8 = 0; + } +} + + void bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info) { @@ -5455,7 +6096,7 @@ bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd, int use_blx, bfd_arm_vfp11_fix vfp11_fix, int no_enum_warn, int no_wchar_warn, - int pic_veneer) + int pic_veneer, int fix_cortex_a8) { struct elf32_arm_link_hash_table *globals; @@ -5477,6 +6118,7 @@ bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd, globals->use_blx |= use_blx; globals->vfp11_fix = vfp11_fix; globals->pic_veneer = pic_veneer; + globals->fix_cortex_a8 = fix_cortex_a8; BFD_ASSERT (is_arm_elf (output_bfd)); elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn; @@ -6111,7 +6753,9 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, far away, in which case a long branch stub should be inserted. */ if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI - && r_type != R_ARM_CALL) + && r_type != R_ARM_CALL + && r_type != R_ARM_JUMP24 + && r_type != R_ARM_PLT32) && h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1) @@ -6279,7 +6923,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, input_bfd, h ? h->root.root.string : "(local)"); } - else if (r_type != R_ARM_CALL) + else if (r_type == R_ARM_PC24) { /* Check for Arm calling Thumb function. */ if (sym_flags == STT_ARM_TFUNC) @@ -6297,7 +6941,9 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, /* Check if a stub has to be inserted because the destination is too far or we are changing mode. */ - if (r_type == R_ARM_CALL) + if ( r_type == R_ARM_CALL + || r_type == R_ARM_JUMP24 + || r_type == R_ARM_PLT32) { /* If the call goes through a PLT entry, make sure to check distance to the right destination address. */ @@ -6316,7 +6962,11 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET || branch_offset < ARM_MAX_BWD_BRANCH_OFFSET - || sym_flags == STT_ARM_TFUNC) + || ((sym_flags == STT_ARM_TFUNC) + && (((r_type == R_ARM_CALL) && !globals->use_blx) + || (r_type == R_ARM_JUMP24) + || (r_type == R_ARM_PLT32) )) + ) { /* The target is out of reach, so redirect the branch to the local stub for this function. */ @@ -6363,8 +7013,9 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, signed_addend >>= howto->rightshift; /* A branch to an undefined weak symbol is turned into a jump to - the next instruction. */ - if (h && h->root.type == bfd_link_hash_undefweak) + the next instruction unless a PLT entry will be created. */ + if (h && h->root.type == bfd_link_hash_undefweak + && !(splt != NULL && h->plt.offset != (bfd_vma) -1)) { value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000) | 0x0affffff; @@ -6381,16 +7032,17 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, value = (signed_addend & howto->dst_mask) | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask)); - /* Set the H bit in the BLX instruction. */ - if (sym_flags == STT_ARM_TFUNC) - { - if (addend) - value |= (1 << 24); - else - value &= ~(bfd_vma)(1 << 24); - } if (r_type == R_ARM_CALL) { + /* Set the H bit in the BLX instruction. */ + if (sym_flags == STT_ARM_TFUNC) + { + if (addend) + value |= (1 << 24); + else + value &= ~(bfd_vma)(1 << 24); + } + /* Select the correct instruction (BL or BLX). */ /* Only if we are not handling a BL to a stub. In this case, mode switching is performed by the stub. */ @@ -6526,6 +7178,40 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, return bfd_reloc_ok; } + case R_ARM_THM_PC8: + /* PR 10073: This reloc is not generated by the GNU toolchain, + but it is supported for compatibility with third party libraries + generated by other compilers, specifically the ARM/IAR. */ + { + bfd_vma insn; + bfd_signed_vma relocation; + + insn = bfd_get_16 (input_bfd, hit_data); + + if (globals->use_rel) + addend = (insn & 0x00ff) << 2; + + relocation = value + addend; + relocation -= (input_section->output_section->vma + + input_section->output_offset + + rel->r_offset); + + value = abs (relocation); + + /* We do not check for overflow of this reloc. Although strictly + speaking this is incorrect, it appears to be necessary in order + to work with IAR generated relocs. Since GCC and GAS do not + generate R_ARM_THM_PC8 relocs, the lack of a check should not be + a problem for them. */ + value &= 0x3fc; + + insn = (insn & 0xff00) | (value >> 2); + + bfd_put_16 (input_bfd, insn, hit_data); + + return bfd_reloc_ok; + } + case R_ARM_THM_PC12: /* Corresponds to: ldr.w reg, [pc, #offset]. */ { @@ -6634,7 +7320,8 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, /* Convert BL to BLX. */ lower_insn = (lower_insn & ~0x1000) | 0x0800; } - else if (r_type != R_ARM_THM_CALL) + else if (( r_type != R_ARM_THM_CALL) + && (r_type != R_ARM_THM_JUMP24)) { if (elf32_thumb_to_arm_stub (info, sym_name, input_bfd, output_bfd, input_section, @@ -6671,7 +7358,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, *unresolved_reloc_p = FALSE; } - if (r_type == R_ARM_THM_CALL) + if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24) { /* Check if a stub has to be inserted because the destination is too far. */ @@ -6691,7 +7378,9 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, (thumb2 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET))) - || ((sym_flags != STT_ARM_TFUNC) && !globals->use_blx)) + || ((sym_flags != STT_ARM_TFUNC) + && (((r_type == R_ARM_THM_CALL) && !globals->use_blx) + || r_type == R_ARM_THM_JUMP24))) { /* The target is out of reach or we are changing modes, so redirect the branch to the local stub for this @@ -6705,7 +7394,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, + stub_entry->stub_sec->output_section->vma); /* If this call becomes a call to Arm, force BLX. */ - if (globals->use_blx) + if (globals->use_blx && (r_type == R_ARM_THM_CALL)) { if ((stub_entry && !arm_stub_is_thumb (stub_entry->stub_type)) @@ -8214,6 +8903,307 @@ elf32_arm_relocate_section (bfd * output_bfd, return TRUE; } +/* Add a new unwind edit to the list described by HEAD, TAIL. If INDEX is zero, + adds the edit to the start of the list. (The list must be built in order of + ascending INDEX: the function's callers are primarily responsible for + maintaining that condition). */ + +static void +add_unwind_table_edit (arm_unwind_table_edit **head, + arm_unwind_table_edit **tail, + arm_unwind_edit_type type, + asection *linked_section, + unsigned int index) +{ + arm_unwind_table_edit *new_edit = xmalloc (sizeof (arm_unwind_table_edit)); + + new_edit->type = type; + new_edit->linked_section = linked_section; + new_edit->index = index; + + if (index > 0) + { + new_edit->next = NULL; + + if (*tail) + (*tail)->next = new_edit; + + (*tail) = new_edit; + + if (!*head) + (*head) = new_edit; + } + else + { + new_edit->next = *head; + + if (!*tail) + *tail = new_edit; + + *head = new_edit; + } +} + +static _arm_elf_section_data *get_arm_elf_section_data (asection *); + +/* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */ +static void +adjust_exidx_size(asection *exidx_sec, int adjust) +{ + asection *out_sec; + + if (!exidx_sec->rawsize) + exidx_sec->rawsize = exidx_sec->size; + + bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust); + out_sec = exidx_sec->output_section; + /* Adjust size of output section. */ + bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust); +} + +/* Insert an EXIDX_CANTUNWIND marker at the end of a section. */ +static void +insert_cantunwind_after(asection *text_sec, asection *exidx_sec) +{ + struct _arm_elf_section_data *exidx_arm_data; + + exidx_arm_data = get_arm_elf_section_data (exidx_sec); + add_unwind_table_edit ( + &exidx_arm_data->u.exidx.unwind_edit_list, + &exidx_arm_data->u.exidx.unwind_edit_tail, + INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX); + + adjust_exidx_size(exidx_sec, 8); +} + +/* Scan .ARM.exidx tables, and create a list describing edits which should be + made to those tables, such that: + + 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries. + 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind + codes which have been inlined into the index). + + The edits are applied when the tables are written + (in elf32_arm_write_section). +*/ + +bfd_boolean +elf32_arm_fix_exidx_coverage (asection **text_section_order, + unsigned int num_text_sections, + struct bfd_link_info *info) +{ + bfd *inp; + unsigned int last_second_word = 0, i; + asection *last_exidx_sec = NULL; + asection *last_text_sec = NULL; + int last_unwind_type = -1; + + /* Walk over all EXIDX sections, and create backlinks from the corrsponding + text sections. */ + for (inp = info->input_bfds; inp != NULL; inp = inp->link_next) + { + asection *sec; + + for (sec = inp->sections; sec != NULL; sec = sec->next) + { + struct bfd_elf_section_data *elf_sec = elf_section_data (sec); + Elf_Internal_Shdr *hdr = &elf_sec->this_hdr; + + if (!hdr || hdr->sh_type != SHT_ARM_EXIDX) + continue; + + if (elf_sec->linked_to) + { + Elf_Internal_Shdr *linked_hdr + = &elf_section_data (elf_sec->linked_to)->this_hdr; + struct _arm_elf_section_data *linked_sec_arm_data + = get_arm_elf_section_data (linked_hdr->bfd_section); + + if (linked_sec_arm_data == NULL) + continue; + + /* Link this .ARM.exidx section back from the text section it + describes. */ + linked_sec_arm_data->u.text.arm_exidx_sec = sec; + } + } + } + + /* Walk all text sections in order of increasing VMA. Eilminate duplicate + index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes), + and add EXIDX_CANTUNWIND entries for sections with no unwind table data. + */ + + for (i = 0; i < num_text_sections; i++) + { + asection *sec = text_section_order[i]; + asection *exidx_sec; + struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec); + struct _arm_elf_section_data *exidx_arm_data; + bfd_byte *contents = NULL; + int deleted_exidx_bytes = 0; + bfd_vma j; + arm_unwind_table_edit *unwind_edit_head = NULL; + arm_unwind_table_edit *unwind_edit_tail = NULL; + Elf_Internal_Shdr *hdr; + bfd *ibfd; + + if (arm_data == NULL) + continue; + + exidx_sec = arm_data->u.text.arm_exidx_sec; + if (exidx_sec == NULL) + { + /* Section has no unwind data. */ + if (last_unwind_type == 0 || !last_exidx_sec) + continue; + + /* Ignore zero sized sections. */ + if (sec->size == 0) + continue; + + insert_cantunwind_after(last_text_sec, last_exidx_sec); + last_unwind_type = 0; + continue; + } + + /* Skip /DISCARD/ sections. */ + if (bfd_is_abs_section (exidx_sec->output_section)) + continue; + + hdr = &elf_section_data (exidx_sec)->this_hdr; + if (hdr->sh_type != SHT_ARM_EXIDX) + continue; + + exidx_arm_data = get_arm_elf_section_data (exidx_sec); + if (exidx_arm_data == NULL) + continue; + + ibfd = exidx_sec->owner; + + if (hdr->contents != NULL) + contents = hdr->contents; + else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents)) + /* An error? */ + continue; + + for (j = 0; j < hdr->sh_size; j += 8) + { + unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4); + int unwind_type; + int elide = 0; + + /* An EXIDX_CANTUNWIND entry. */ + if (second_word == 1) + { + if (last_unwind_type == 0) + elide = 1; + unwind_type = 0; + } + /* Inlined unwinding data. Merge if equal to previous. */ + else if ((second_word & 0x80000000) != 0) + { + if (last_second_word == second_word && last_unwind_type == 1) + elide = 1; + unwind_type = 1; + last_second_word = second_word; + } + /* Normal table entry. In theory we could merge these too, + but duplicate entries are likely to be much less common. */ + else + unwind_type = 2; + + if (elide) + { + add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail, + DELETE_EXIDX_ENTRY, NULL, j / 8); + + deleted_exidx_bytes += 8; + } + + last_unwind_type = unwind_type; + } + + /* Free contents if we allocated it ourselves. */ + if (contents != hdr->contents) + free (contents); + + /* Record edits to be applied later (in elf32_arm_write_section). */ + exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head; + exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail; + + if (deleted_exidx_bytes > 0) + adjust_exidx_size(exidx_sec, -deleted_exidx_bytes); + + last_exidx_sec = exidx_sec; + last_text_sec = sec; + } + + /* Add terminating CANTUNWIND entry. */ + if (last_exidx_sec && last_unwind_type != 0) + insert_cantunwind_after(last_text_sec, last_exidx_sec); + + return TRUE; +} + +static bfd_boolean +elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd, + bfd *ibfd, const char *name) +{ + asection *sec, *osec; + + sec = bfd_get_section_by_name (ibfd, name); + if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0) + return TRUE; + + osec = sec->output_section; + if (elf32_arm_write_section (obfd, info, sec, sec->contents)) + return TRUE; + + if (! bfd_set_section_contents (obfd, osec, sec->contents, + sec->output_offset, sec->size)) + return FALSE; + + return TRUE; +} + +static bfd_boolean +elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info) +{ + struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info); + + /* Invoke the regular ELF backend linker to do all the work. */ + if (!bfd_elf_final_link (abfd, info)) + return FALSE; + + /* Write out any glue sections now that we have created all the + stubs. */ + if (globals->bfd_of_glue_owner != NULL) + { + if (! elf32_arm_output_glue_section (info, abfd, + globals->bfd_of_glue_owner, + ARM2THUMB_GLUE_SECTION_NAME)) + return FALSE; + + if (! elf32_arm_output_glue_section (info, abfd, + globals->bfd_of_glue_owner, + THUMB2ARM_GLUE_SECTION_NAME)) + return FALSE; + + if (! elf32_arm_output_glue_section (info, abfd, + globals->bfd_of_glue_owner, + VFP11_ERRATUM_VENEER_SECTION_NAME)) + return FALSE; + + if (! elf32_arm_output_glue_section (info, abfd, + globals->bfd_of_glue_owner, + ARM_BX_GLUE_SECTION_NAME)) + return FALSE; + } + + return TRUE; +} + /* Set the right machine number. */ static bfd_boolean @@ -8605,6 +9595,12 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd) int i; bfd_boolean result = TRUE; + /* Skip the linker stubs file. This preserves previous behavior + of accepting unknown attributes in the first input file - but + is that a bug? */ + if (ibfd->flags & BFD_LINKER_CREATED) + return TRUE; + if (!elf_known_obj_attributes_proc (obfd)[0].i) { /* This is the first object. Copy the attributes. */ @@ -9778,16 +10774,27 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info, needs_plt = 1; goto normal_reloc; + case R_ARM_MOVW_ABS_NC: + case R_ARM_MOVT_ABS: + case R_ARM_THM_MOVW_ABS_NC: + case R_ARM_THM_MOVT_ABS: + if (info->shared) + { + (*_bfd_error_handler) + (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"), + abfd, elf32_arm_howto_table_1[r_type].name, + (h) ? h->root.root.string : "a local symbol"); + bfd_set_error (bfd_error_bad_value); + return FALSE; + } + + /* Fall through. */ case R_ARM_ABS32: case R_ARM_ABS32_NOI: case R_ARM_REL32: case R_ARM_REL32_NOI: - case R_ARM_MOVW_ABS_NC: - case R_ARM_MOVT_ABS: case R_ARM_MOVW_PREL_NC: case R_ARM_MOVT_PREL: - case R_ARM_THM_MOVW_ABS_NC: - case R_ARM_THM_MOVT_ABS: case R_ARM_THM_MOVW_PREL_NC: case R_ARM_THM_MOVT_PREL: needs_plt = 0; @@ -9881,15 +10888,19 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info, /* Track dynamic relocs needed for local syms too. We really need local syms available to do this easily. Oh well. */ - asection *s; void *vpp; + Elf_Internal_Sym *isym; - s = bfd_section_from_r_symndx (abfd, &htab->sym_sec, - sec, r_symndx); - if (s == NULL) + isym = bfd_sym_from_r_symndx (&htab->sym_cache, + abfd, r_symndx); + if (isym == NULL) return FALSE; + s = bfd_section_from_elf_index (abfd, isym->st_shndx); + if (s == NULL) + s = sec; + vpp = &elf_section_data (s)->local_dynrel; head = (struct elf32_arm_relocs_copied **) vpp; } @@ -10318,14 +11329,14 @@ allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf) { h->root.u.def.section = s; h->root.u.def.value = h->plt.offset; - - /* Make sure the function is not marked as Thumb, in case - it is the target of an ABS32 relocation, which will - point to the PLT entry. */ - if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC) - h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC); } + /* Make sure the function is not marked as Thumb, in case + it is the target of an ABS32 relocation, which will + point to the PLT entry. */ + if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC) + h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC); + /* Make room for this entry. */ s->size += htab->plt_entry_size; @@ -10763,6 +11774,9 @@ elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED, ibfd->filename); } + /* Allocate space for the glue sections now that we've sized them. */ + bfd_elf32_arm_allocate_interworking_sections (info); + /* The check_relocs and adjust_dynamic_symbol entry points have determined the sizes of the various dynamic sections. Allocate memory for them. */ @@ -11703,8 +12717,8 @@ typedef struct struct bfd_link_info *info; asection *sec; int sec_shndx; - bfd_boolean (*func) (void *, const char *, Elf_Internal_Sym *, - asection *, struct elf_link_hash_entry *); + int (*func) (void *, const char *, Elf_Internal_Sym *, + asection *, struct elf_link_hash_entry *); } output_arch_syminfo; enum map_symbol_type @@ -11734,9 +12748,7 @@ elf32_arm_output_map_sym (output_arch_syminfo *osi, sym.st_other = 0; sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE); sym.st_shndx = osi->sec_shndx; - if (!osi->func (osi->finfo, names[type], &sym, osi->sec, NULL)) - return FALSE; - return TRUE; + return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1; } @@ -11834,9 +12846,7 @@ elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name, sym.st_other = 0; sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC); sym.st_shndx = osi->sec_shndx; - if (!osi->func (osi->finfo, name, &sym, osi->sec, NULL)) - return FALSE; - return TRUE; + return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1; } static bfd_boolean @@ -11881,13 +12891,14 @@ arm_map_one_stub (struct bfd_hash_entry * gen_entry, return FALSE; break; case THUMB16_TYPE: + case THUMB32_TYPE: if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1, stub_entry->stub_size)) return FALSE; break; default: BFD_FAIL (); - return FALSE; + return 0; } prev_type = DATA_TYPE; @@ -11901,6 +12912,7 @@ arm_map_one_stub (struct bfd_hash_entry * gen_entry, break; case THUMB16_TYPE: + case THUMB32_TYPE: sym_type = ARM_MAP_THUMB; break; @@ -11923,6 +12935,7 @@ arm_map_one_stub (struct bfd_hash_entry * gen_entry, switch (template[i].type) { case ARM_TYPE: + case THUMB32_TYPE: size += 4; break; @@ -11949,10 +12962,10 @@ static bfd_boolean elf32_arm_output_arch_local_syms (bfd *output_bfd, struct bfd_link_info *info, void *finfo, - bfd_boolean (*func) (void *, const char *, - Elf_Internal_Sym *, - asection *, - struct elf_link_hash_entry *)) + int (*func) (void *, const char *, + Elf_Internal_Sym *, + asection *, + struct elf_link_hash_entry *)) { output_arch_syminfo osi; struct elf32_arm_link_hash_table *htab; @@ -12119,6 +13132,149 @@ elf32_arm_compare_mapping (const void * a, const void * b) return 0; } +/* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */ + +static unsigned long +offset_prel31 (unsigned long addr, bfd_vma offset) +{ + return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful); +} + +/* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31 + relocations. */ + +static void +copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset) +{ + unsigned long first_word = bfd_get_32 (output_bfd, from); + unsigned long second_word = bfd_get_32 (output_bfd, from + 4); + + /* High bit of first word is supposed to be zero. */ + if ((first_word & 0x80000000ul) == 0) + first_word = offset_prel31 (first_word, offset); + + /* If the high bit of the first word is clear, and the bit pattern is not 0x1 + (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */ + if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0)) + second_word = offset_prel31 (second_word, offset); + + bfd_put_32 (output_bfd, first_word, to); + bfd_put_32 (output_bfd, second_word, to + 4); +} + +/* Data for make_branch_to_a8_stub(). */ + +struct a8_branch_to_stub_data { + asection *writing_section; + bfd_byte *contents; +}; + + +/* Helper to insert branches to Cortex-A8 erratum stubs in the right + places for a particular section. */ + +static bfd_boolean +make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry, + void *in_arg) +{ + struct elf32_arm_stub_hash_entry *stub_entry; + struct a8_branch_to_stub_data *data; + bfd_byte *contents; + unsigned long branch_insn; + bfd_vma veneered_insn_loc, veneer_entry_loc; + bfd_signed_vma branch_offset; + bfd *abfd; + unsigned int index; + + stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry; + data = (struct a8_branch_to_stub_data *) in_arg; + + if (stub_entry->target_section != data->writing_section + || stub_entry->stub_type < arm_stub_a8_veneer_b_cond) + return TRUE; + + contents = data->contents; + + veneered_insn_loc = stub_entry->target_section->output_section->vma + + stub_entry->target_section->output_offset + + stub_entry->target_value; + + veneer_entry_loc = stub_entry->stub_sec->output_section->vma + + stub_entry->stub_sec->output_offset + + stub_entry->stub_offset; + + if (stub_entry->stub_type == arm_stub_a8_veneer_blx) + veneered_insn_loc &= ~3u; + + branch_offset = veneer_entry_loc - veneered_insn_loc - 4; + + abfd = stub_entry->target_section->owner; + index = stub_entry->target_value; + + /* We attempt to avoid this condition by setting stubs_always_after_branch + in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround. + This check is just to be on the safe side... */ + if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff)) + { + (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is " + "allocated in unsafe location"), abfd); + return FALSE; + } + + switch (stub_entry->stub_type) + { + case arm_stub_a8_veneer_b: + case arm_stub_a8_veneer_b_cond: + branch_insn = 0xf0009000; + goto jump24; + + case arm_stub_a8_veneer_blx: + branch_insn = 0xf000e800; + goto jump24; + + case arm_stub_a8_veneer_bl: + { + unsigned int i1, j1, i2, j2, s; + + branch_insn = 0xf000d000; + + jump24: + if (branch_offset < -16777216 || branch_offset > 16777214) + { + /* There's not much we can do apart from complain if this + happens. */ + (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out " + "of range (input file too large)"), abfd); + return FALSE; + } + + /* i1 = not(j1 eor s), so: + not i1 = j1 eor s + j1 = (not i1) eor s. */ + + branch_insn |= (branch_offset >> 1) & 0x7ff; + branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16; + i2 = (branch_offset >> 22) & 1; + i1 = (branch_offset >> 23) & 1; + s = (branch_offset >> 24) & 1; + j1 = (!i1) ^ s; + j2 = (!i2) ^ s; + branch_insn |= j2 << 11; + branch_insn |= j1 << 13; + branch_insn |= s << 26; + } + break; + + default: + BFD_FAIL (); + return FALSE; + } + + bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[index]); + bfd_put_16 (abfd, branch_insn & 0xffff, &contents[index + 2]); + + return TRUE; +} /* Do code byteswapping. Return FALSE afterwards so that the section is written out as normal. */ @@ -12129,7 +13285,7 @@ elf32_arm_write_section (bfd *output_bfd, asection *sec, bfd_byte *contents) { - int mapcount, errcount; + unsigned int mapcount, errcount; _arm_elf_section_data *arm_data; struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info); elf32_arm_section_map *map; @@ -12138,7 +13294,7 @@ elf32_arm_write_section (bfd *output_bfd, bfd_vma end; bfd_vma offset = sec->output_section->vma + sec->output_offset; bfd_byte tmp; - int i; + unsigned int i; /* If this section has not been allocated an _arm_elf_section_data structure then we cannot record anything. */ @@ -12225,6 +13381,106 @@ elf32_arm_write_section (bfd *output_bfd, } } + if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX) + { + arm_unwind_table_edit *edit_node + = arm_data->u.exidx.unwind_edit_list; + /* Now, sec->size is the size of the section we will write. The original + size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND + markers) was sec->rawsize. (This isn't the case if we perform no + edits, then rawsize will be zero and we should use size). */ + bfd_byte *edited_contents = bfd_malloc (sec->size); + unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size; + unsigned int in_index, out_index; + bfd_vma add_to_offsets = 0; + + for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;) + { + if (edit_node) + { + unsigned int edit_index = edit_node->index; + + if (in_index < edit_index && in_index * 8 < input_size) + { + copy_exidx_entry (output_bfd, edited_contents + out_index * 8, + contents + in_index * 8, add_to_offsets); + out_index++; + in_index++; + } + else if (in_index == edit_index + || (in_index * 8 >= input_size + && edit_index == UINT_MAX)) + { + switch (edit_node->type) + { + case DELETE_EXIDX_ENTRY: + in_index++; + add_to_offsets += 8; + break; + + case INSERT_EXIDX_CANTUNWIND_AT_END: + { + asection *text_sec = edit_node->linked_section; + bfd_vma text_offset = text_sec->output_section->vma + + text_sec->output_offset + + text_sec->size; + bfd_vma exidx_offset = offset + out_index * 8; + unsigned long prel31_offset; + + /* Note: this is meant to be equivalent to an + R_ARM_PREL31 relocation. These synthetic + EXIDX_CANTUNWIND markers are not relocated by the + usual BFD method. */ + prel31_offset = (text_offset - exidx_offset) + & 0x7ffffffful; + + /* First address we can't unwind. */ + bfd_put_32 (output_bfd, prel31_offset, + &edited_contents[out_index * 8]); + + /* Code for EXIDX_CANTUNWIND. */ + bfd_put_32 (output_bfd, 0x1, + &edited_contents[out_index * 8 + 4]); + + out_index++; + add_to_offsets -= 8; + } + break; + } + + edit_node = edit_node->next; + } + } + else + { + /* No more edits, copy remaining entries verbatim. */ + copy_exidx_entry (output_bfd, edited_contents + out_index * 8, + contents + in_index * 8, add_to_offsets); + out_index++; + in_index++; + } + } + + if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD)) + bfd_set_section_contents (output_bfd, sec->output_section, + edited_contents, + (file_ptr) sec->output_offset, sec->size); + + return TRUE; + } + + /* Fix code to point to Cortex-A8 erratum stubs. */ + if (globals->fix_cortex_a8) + { + struct a8_branch_to_stub_data data; + + data.writing_section = sec; + data.contents = contents; + + bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub, + &data); + } + if (mapcount == 0) return FALSE; @@ -12501,6 +13757,7 @@ const struct elf_size_info elf32_arm_size_info = #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol #define bfd_elf32_close_and_cleanup elf32_arm_close_and_cleanup #define bfd_elf32_bfd_free_cached_info elf32_arm_bfd_free_cached_info +#define bfd_elf32_bfd_final_link elf32_arm_final_link #define elf_backend_get_symbol_type elf32_arm_get_symbol_type #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook