X-Git-Url: http://drtracing.org/?a=blobdiff_plain;f=bfd%2Felf32-arm.c;h=f350dd910199c64ef9d4dbdb3f5541727e28bb5b;hb=2fd158eb7bd4059478086143dd58edcc5ea44864;hp=09c5aa44df85d819b02b90c5d03ab31ac2008a60;hpb=5c294fee9abb6bb259519d9cf164c34b81b83312;p=deliverable%2Fbinutils-gdb.git diff --git a/bfd/elf32-arm.c b/bfd/elf32-arm.c index 09c5aa44df..f350dd9101 100644 --- a/bfd/elf32-arm.c +++ b/bfd/elf32-arm.c @@ -1,5 +1,5 @@ /* 32-bit ELF support for ARM - Copyright (C) 1998-2014 Free Software Foundation, Inc. + Copyright (C) 1998-2015 Free Software Foundation, Inc. This file is part of BFD, the Binary File Descriptor library. @@ -79,7 +79,7 @@ static reloc_howto_type elf32_arm_howto_table_1[] = /* No relocation. */ HOWTO (R_ARM_NONE, /* type */ 0, /* rightshift */ - 0, /* size (0 = byte, 1 = short, 2 = long) */ + 3, /* size (0 = byte, 1 = short, 2 = long) */ 0, /* bitsize */ FALSE, /* pc_relative */ 0, /* bitpos */ @@ -1606,7 +1606,7 @@ static reloc_howto_type elf32_arm_howto_table_1[] = FALSE, /* pc_relative */ 0, /* bitpos */ complain_overflow_bitfield,/* complain_on_overflow */ - bfd_elf_generic_reloc, /* special_function */ + NULL, /* special_function */ "R_ARM_TLS_LE32", /* name */ TRUE, /* partial_inplace */ 0xffffffff, /* src_mask */ @@ -1689,6 +1689,60 @@ static reloc_howto_type elf32_arm_howto_table_1[] = 0x00000000, /* src_mask */ 0x00000000, /* dst_mask */ FALSE), /* pcrel_offset */ + EMPTY_HOWTO (130), + EMPTY_HOWTO (131), + HOWTO (R_ARM_THM_ALU_ABS_G0_NC,/* type. */ + 0, /* rightshift. */ + 1, /* size (0 = byte, 1 = short, 2 = long). */ + 16, /* bitsize. */ + FALSE, /* pc_relative. */ + 0, /* bitpos. */ + complain_overflow_bitfield,/* complain_on_overflow. */ + bfd_elf_generic_reloc, /* special_function. */ + "R_ARM_THM_ALU_ABS_G0_NC",/* name. */ + FALSE, /* partial_inplace. */ + 0x00000000, /* src_mask. */ + 0x00000000, /* dst_mask. */ + FALSE), /* pcrel_offset. */ + HOWTO (R_ARM_THM_ALU_ABS_G1_NC,/* type. */ + 0, /* rightshift. */ + 1, /* size (0 = byte, 1 = short, 2 = long). */ + 16, /* bitsize. */ + FALSE, /* pc_relative. */ + 0, /* bitpos. */ + complain_overflow_bitfield,/* complain_on_overflow. */ + bfd_elf_generic_reloc, /* special_function. */ + "R_ARM_THM_ALU_ABS_G1_NC",/* name. */ + FALSE, /* partial_inplace. */ + 0x00000000, /* src_mask. */ + 0x00000000, /* dst_mask. */ + FALSE), /* pcrel_offset. */ + HOWTO (R_ARM_THM_ALU_ABS_G2_NC,/* type. */ + 0, /* rightshift. */ + 1, /* size (0 = byte, 1 = short, 2 = long). */ + 16, /* bitsize. */ + FALSE, /* pc_relative. */ + 0, /* bitpos. */ + complain_overflow_bitfield,/* complain_on_overflow. */ + bfd_elf_generic_reloc, /* special_function. */ + "R_ARM_THM_ALU_ABS_G2_NC",/* name. */ + FALSE, /* partial_inplace. */ + 0x00000000, /* src_mask. */ + 0x00000000, /* dst_mask. */ + FALSE), /* pcrel_offset. */ + HOWTO (R_ARM_THM_ALU_ABS_G3_NC,/* type. */ + 0, /* rightshift. */ + 1, /* size (0 = byte, 1 = short, 2 = long). */ + 16, /* bitsize. */ + FALSE, /* pc_relative. */ + 0, /* bitpos. */ + complain_overflow_bitfield,/* complain_on_overflow. */ + bfd_elf_generic_reloc, /* special_function. */ + "R_ARM_THM_ALU_ABS_G3_NC",/* name. */ + FALSE, /* partial_inplace. */ + 0x00000000, /* src_mask. */ + 0x00000000, /* dst_mask. */ + FALSE), /* pcrel_offset. */ }; /* 160 onwards: */ @@ -1889,7 +1943,11 @@ static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] = {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0}, {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1}, {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2}, - {BFD_RELOC_ARM_V4BX, R_ARM_V4BX} + {BFD_RELOC_ARM_V4BX, R_ARM_V4BX}, + {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC, R_ARM_THM_ALU_ABS_G3_NC}, + {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC, R_ARM_THM_ALU_ABS_G2_NC}, + {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC, R_ARM_THM_ALU_ABS_G1_NC}, + {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC, R_ARM_THM_ALU_ABS_G0_NC} }; static reloc_howto_type * @@ -2072,6 +2130,9 @@ typedef unsigned short int insn16; #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer" #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x" +#define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer" +#define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "__stm32l4xx_veneer_%x" + #define ARM_BX_GLUE_SECTION_NAME ".v4_bx" #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d" @@ -2679,6 +2740,36 @@ typedef struct elf32_vfp11_erratum_list } elf32_vfp11_erratum_list; +/* Information about a STM32L4XX erratum veneer, or a branch to such a + veneer. */ +typedef enum +{ + STM32L4XX_ERRATUM_BRANCH_TO_VENEER, + STM32L4XX_ERRATUM_VENEER +} +elf32_stm32l4xx_erratum_type; + +typedef struct elf32_stm32l4xx_erratum_list +{ + struct elf32_stm32l4xx_erratum_list *next; + bfd_vma vma; + union + { + struct + { + struct elf32_stm32l4xx_erratum_list *veneer; + unsigned int insn; + } b; + struct + { + struct elf32_stm32l4xx_erratum_list *branch; + unsigned int id; + } v; + } u; + elf32_stm32l4xx_erratum_type type; +} +elf32_stm32l4xx_erratum_list; + typedef enum { DELETE_EXIDX_ENTRY, @@ -2709,6 +2800,9 @@ typedef struct _arm_elf_section_data /* Information about CPU errata. */ unsigned int erratumcount; elf32_vfp11_erratum_list *erratumlist; + unsigned int stm32l4xx_erratumcount; + elf32_stm32l4xx_erratum_list *stm32l4xx_erratumlist; + unsigned int additional_reloc_count; /* Information about unwind tables. */ union { @@ -2942,6 +3036,10 @@ struct elf32_arm_link_hash_table veneers. */ bfd_size_type vfp11_erratum_glue_size; + /* The size in bytes of the section containing glue for STM32L4XX erratum + veneers. */ + bfd_size_type stm32l4xx_erratum_glue_size; + /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and elf32_arm_write_section(). */ @@ -2982,6 +3080,13 @@ struct elf32_arm_link_hash_table /* Global counter for the number of fixes we have emitted. */ int num_vfp11_fixes; + /* What sort of code sequences we should look for which may trigger the + STM32L4XX erratum. */ + bfd_arm_stm32l4xx_fix stm32l4xx_fix; + + /* Global counter for the number of fixes we have emitted. */ + int num_stm32l4xx_fixes; + /* Nonzero to force PIC branch veneers. */ int pic_veneer; @@ -3061,14 +3166,50 @@ struct elf32_arm_link_hash_table struct map_stub *stub_group; /* Number of elements in stub_group. */ - int top_id; + unsigned int top_id; /* Assorted information used by elf32_arm_size_stubs. */ unsigned int bfd_count; - int top_index; + unsigned int top_index; asection **input_list; }; +static inline int +ctz (unsigned int mask) +{ +#if GCC_VERSION >= 3004 + return __builtin_ctz (mask); +#else + unsigned int i; + + for (i = 0; i < 8 * sizeof (mask); i++) + { + if (mask & 0x1) + break; + mask = (mask >> 1); + } + return i; +#endif +} + +static inline int +popcount (unsigned int mask) +{ +#if GCC_VERSION >= 3004 + return __builtin_popcount (mask); +#else + unsigned int i, sum = 0; + + for (i = 0; i < 8 * sizeof (mask); i++) + { + if (mask & 0x1) + sum++; + mask = (mask >> 1); + } + return sum; +#endif +} + /* Create an entry in an ARM ELF linker hash table. */ static struct bfd_hash_entry * @@ -3363,20 +3504,23 @@ create_ifunc_sections (struct bfd_link_info *info) static bfd_boolean using_thumb_only (struct elf32_arm_link_hash_table *globals) { - int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, - Tag_CPU_arch); - int profile; + int arch; + int profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, + Tag_CPU_arch_profile); - if (arch == TAG_CPU_ARCH_V6_M || arch == TAG_CPU_ARCH_V6S_M) - return TRUE; + if (profile) + return profile == 'M'; - if (arch != TAG_CPU_ARCH_V7 && arch != TAG_CPU_ARCH_V7E_M) - return FALSE; + arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch); - profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, - Tag_CPU_arch_profile); + if (arch == TAG_CPU_ARCH_V6_M + || arch == TAG_CPU_ARCH_V6S_M + || arch == TAG_CPU_ARCH_V7E_M + || arch == TAG_CPU_ARCH_V8M_BASE + || arch == TAG_CPU_ARCH_V8M_MAIN) + return TRUE; - return profile == 'M'; + return FALSE; } /* Determine if we're dealing with a Thumb-2 object. */ @@ -3409,7 +3553,7 @@ elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info) return FALSE; htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss"); - if (!info->shared) + if (!bfd_link_pic (info)) htab->srelbss = bfd_get_linker_section (dynobj, RELOC_SECTION (htab, ".bss")); @@ -3418,7 +3562,7 @@ elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info) if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2)) return FALSE; - if (info->shared) + if (bfd_link_pic (info)) { htab->plt_header_size = 0; htab->plt_entry_size @@ -3452,7 +3596,7 @@ elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info) if (!htab->root.splt || !htab->root.srelplt || !htab->sdynbss - || (!info->shared && !htab->srelbss)) + || (!bfd_link_pic (info) && !htab->srelbss)) abort (); return TRUE; @@ -3559,6 +3703,7 @@ elf32_arm_link_hash_table_create (bfd *abfd) } ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE; + ret->stm32l4xx_fix = BFD_ARM_STM32L4XX_FIX_NONE; #ifdef FOUR_WORD_PLT ret->plt_header_size = 16; ret->plt_entry_size = 16; @@ -3746,7 +3891,7 @@ arm_type_of_stub (struct bfd_link_info *info, /* Thumb to thumb. */ if (!thumb_only) { - stub_type = (info->shared | globals->pic_veneer) + stub_type = (bfd_link_pic (info) | globals->pic_veneer) /* PIC stubs. */ ? ((globals->use_blx && (r_type == R_ARM_THM_CALL)) @@ -3768,7 +3913,7 @@ arm_type_of_stub (struct bfd_link_info *info, } else { - stub_type = (info->shared | globals->pic_veneer) + stub_type = (bfd_link_pic (info) | globals->pic_veneer) /* PIC stub. */ ? arm_stub_long_branch_thumb_only_pic /* non-PIC stub. */ @@ -3789,7 +3934,7 @@ arm_type_of_stub (struct bfd_link_info *info, } stub_type = - (info->shared | globals->pic_veneer) + (bfd_link_pic (info) | globals->pic_veneer) /* PIC stubs. */ ? (r_type == R_ARM_THM_TLS_CALL /* TLS PIC stubs. */ @@ -3843,7 +3988,7 @@ arm_type_of_stub (struct bfd_link_info *info, || (r_type == R_ARM_JUMP24) || (r_type == R_ARM_PLT32)) { - stub_type = (info->shared | globals->pic_veneer) + stub_type = (bfd_link_pic (info) | globals->pic_veneer) /* PIC stubs. */ ? ((globals->use_blx) /* V5T and above. */ @@ -3866,7 +4011,7 @@ arm_type_of_stub (struct bfd_link_info *info, || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)) { stub_type = - (info->shared | globals->pic_veneer) + (bfd_link_pic (info) | globals->pic_veneer) /* PIC stubs. */ ? (r_type == R_ARM_TLS_CALL /* TLS PIC Stub. */ @@ -4088,6 +4233,26 @@ put_thumb_insn (struct elf32_arm_link_hash_table * htab, bfd_putb16 (val, ptr); } +/* Store a Thumb2 insn into an output section not processed by + elf32_arm_write_section. */ + +static void +put_thumb2_insn (struct elf32_arm_link_hash_table * htab, + bfd * output_bfd, bfd_vma val, void * ptr) +{ + /* T2 instructions are 16-bit streamed. */ + if (htab->byteswap_code != bfd_little_endian (output_bfd)) + { + bfd_putl16 ((val >> 16) & 0xffff, ptr); + bfd_putl16 ((val & 0xffff), ptr + 2); + } + else + { + bfd_putb16 ((val >> 16) & 0xffff, ptr); + bfd_putb16 ((val & 0xffff), ptr + 2); + } +} + /* If it's possible to change R_TYPE to a more efficient access model, return the new reloc type. */ @@ -4097,7 +4262,8 @@ elf32_arm_tls_transition (struct bfd_link_info *info, int r_type, { int is_local = (h == NULL); - if (info->shared || (h && h->root.type == bfd_link_hash_undefweak)) + if (bfd_link_pic (info) + || (h && h->root.type == bfd_link_hash_undefweak)) return r_type; /* We do not support relaxations for Old TLS models. */ @@ -4431,7 +4597,7 @@ elf32_arm_setup_section_lists (bfd *output_bfd, { bfd *input_bfd; unsigned int bfd_count; - int top_id, top_index; + unsigned int top_id, top_index; asection *section; asection **input_list, **list; bfd_size_type amt; @@ -5717,6 +5883,8 @@ static const insn16 t2a2_noop_insn = 0x46c0; static const insn32 t2a3_b_insn = 0xea000000; #define VFP11_ERRATUM_VENEER_SIZE 8 +#define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16 +#define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24 #define ARM_BX_VENEER_SIZE 12 static const insn32 armbx1_tst_insn = 0xe3100001; @@ -5773,6 +5941,10 @@ bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info) globals->vfp11_erratum_glue_size, VFP11_ERRATUM_VENEER_SECTION_NAME); + arm_allocate_glue_section_space (globals->bfd_of_glue_owner, + globals->stm32l4xx_erratum_glue_size, + STM32L4XX_ERRATUM_VENEER_SECTION_NAME); + arm_allocate_glue_section_space (globals->bfd_of_glue_owner, globals->bx_glue_size, ARM_BX_GLUE_SECTION_NAME); @@ -5838,7 +6010,8 @@ record_arm_to_thumb_glue (struct bfd_link_info * link_info, free (tmp_name); - if (link_info->shared || globals->root.is_relocatable_executable + if (bfd_link_pic (link_info) + || globals->root.is_relocatable_executable || globals->pic_veneer) size = ARM2THUMB_PIC_GLUE_SIZE; else if (globals->use_blx) @@ -6063,6 +6236,125 @@ record_vfp11_erratum_veneer (struct bfd_link_info *link_info, return val; } +/* Record information about a STM32L4XX STM erratum veneer. Only THUMB-mode + veneers need to be handled because used only in Cortex-M. */ + +static bfd_vma +record_stm32l4xx_erratum_veneer (struct bfd_link_info *link_info, + elf32_stm32l4xx_erratum_list *branch, + bfd *branch_bfd, + asection *branch_sec, + unsigned int offset, + bfd_size_type veneer_size) +{ + asection *s; + struct elf32_arm_link_hash_table *hash_table; + char *tmp_name; + struct elf_link_hash_entry *myh; + struct bfd_link_hash_entry *bh; + bfd_vma val; + struct _arm_elf_section_data *sec_data; + elf32_stm32l4xx_erratum_list *newerr; + + hash_table = elf32_arm_hash_table (link_info); + BFD_ASSERT (hash_table != NULL); + BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL); + + s = bfd_get_linker_section + (hash_table->bfd_of_glue_owner, STM32L4XX_ERRATUM_VENEER_SECTION_NAME); + + BFD_ASSERT (s != NULL); + + sec_data = elf32_arm_section_data (s); + + tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen + (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10); + + BFD_ASSERT (tmp_name); + + sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME, + hash_table->num_stm32l4xx_fixes); + + myh = elf_link_hash_lookup + (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE); + + BFD_ASSERT (myh == NULL); + + bh = NULL; + val = hash_table->stm32l4xx_erratum_glue_size; + _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner, + tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val, + NULL, TRUE, FALSE, &bh); + + myh = (struct elf_link_hash_entry *) bh; + myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC); + myh->forced_local = 1; + + /* Link veneer back to calling location. */ + sec_data->stm32l4xx_erratumcount += 1; + newerr = (elf32_stm32l4xx_erratum_list *) + bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list)); + + newerr->type = STM32L4XX_ERRATUM_VENEER; + newerr->vma = -1; + newerr->u.v.branch = branch; + newerr->u.v.id = hash_table->num_stm32l4xx_fixes; + branch->u.b.veneer = newerr; + + newerr->next = sec_data->stm32l4xx_erratumlist; + sec_data->stm32l4xx_erratumlist = newerr; + + /* A symbol for the return from the veneer. */ + sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r", + hash_table->num_stm32l4xx_fixes); + + myh = elf_link_hash_lookup + (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE); + + if (myh != NULL) + abort (); + + bh = NULL; + val = offset + 4; + _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL, + branch_sec, val, NULL, TRUE, FALSE, &bh); + + myh = (struct elf_link_hash_entry *) bh; + myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC); + myh->forced_local = 1; + + free (tmp_name); + + /* Generate a mapping symbol for the veneer section, and explicitly add an + entry for that symbol to the code/data map for the section. */ + if (hash_table->stm32l4xx_erratum_glue_size == 0) + { + bh = NULL; + /* Creates a THUMB symbol since there is no other choice. */ + _bfd_generic_link_add_one_symbol (link_info, + hash_table->bfd_of_glue_owner, "$t", + BSF_LOCAL, s, 0, NULL, + TRUE, FALSE, &bh); + + myh = (struct elf_link_hash_entry *) bh; + myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE); + myh->forced_local = 1; + + /* The elf32_arm_init_maps function only cares about symbols from input + BFDs. We must make a note of this generated mapping symbol + ourselves so that code byteswapping works properly in + elf32_arm_write_section. */ + elf32_arm_section_map_add (s, 't', 0); + } + + s->size += veneer_size; + hash_table->stm32l4xx_erratum_glue_size += veneer_size; + hash_table->num_stm32l4xx_fixes++; + + /* The offset of the veneer. */ + return val; +} + #define ARM_GLUE_SECTION_FLAGS \ (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \ | SEC_READONLY | SEC_LINKER_CREATED) @@ -6108,15 +6400,26 @@ bfd_boolean bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd, struct bfd_link_info *info) { + struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info); + bfd_boolean dostm32l4xx = globals + && globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE; + bfd_boolean addglue; + /* If we are only performing a partial link do not bother adding the glue. */ - if (info->relocatable) + if (bfd_link_relocatable (info)) return TRUE; - return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME) + addglue = arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME) && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME) && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME) && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME); + + if (!dostm32l4xx) + return addglue; + + return addglue + && arm_make_glue_section (abfd, STM32L4XX_ERRATUM_VENEER_SECTION_NAME); } /* Select a BFD to be used to hold the sections used by the glue code. @@ -6130,7 +6433,7 @@ bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info) /* If we are only performing a partial link do not bother getting a bfd to hold the glue. */ - if (info->relocatable) + if (bfd_link_relocatable (info)) return TRUE; /* Make sure we don't attach the glue sections to a dynamic object. */ @@ -6182,7 +6485,7 @@ bfd_elf32_arm_process_before_allocation (bfd *abfd, /* If we are only performing a partial link do not bother to construct any glue. */ - if (link_info->relocatable) + if (bfd_link_relocatable (link_info)) return TRUE; /* Here we have a bfd that is to be included on the link. We have a @@ -6435,6 +6738,26 @@ bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info) globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE; } +void +bfd_elf32_arm_set_stm32l4xx_fix (bfd *obfd, struct bfd_link_info *link_info) +{ + struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info); + obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd); + + if (globals == NULL) + return; + + /* We assume only Cortex-M4 may require the fix. */ + if (out_attr[Tag_CPU_arch].i != TAG_CPU_ARCH_V7E_M + || out_attr[Tag_CPU_arch_profile].i != 'M') + { + if (globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE) + /* Give a warning, but do as the user requests anyway. */ + (*_bfd_error_handler) + (_("%B: warning: selected STM32L4XX erratum " + "workaround is not necessary for target architecture"), obfd); + } +} enum bfd_arm_vfp11_pipe { @@ -6746,7 +7069,7 @@ bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info) /* If we are only performing a partial link do not bother to construct any glue. */ - if (link_info->relocatable) + if (bfd_link_relocatable (link_info)) return TRUE; /* Skip if this bfd does not correspond to an ELF image. */ @@ -6932,7 +7255,7 @@ bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd, struct elf32_arm_link_hash_table *globals; char *tmp_name; - if (link_info->relocatable) + if (bfd_link_relocatable (link_info)) return; /* Skip if this bfd does not correspond to an ELF image. */ @@ -7007,6 +7330,349 @@ bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd, free (tmp_name); } +/* Find virtual-memory addresses for STM32L4XX erratum veneers and + return locations after sections have been laid out, using + specially-named symbols. */ + +void +bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd *abfd, + struct bfd_link_info *link_info) +{ + asection *sec; + struct elf32_arm_link_hash_table *globals; + char *tmp_name; + + if (bfd_link_relocatable (link_info)) + return; + + /* Skip if this bfd does not correspond to an ELF image. */ + if (! is_arm_elf (abfd)) + return; + + globals = elf32_arm_hash_table (link_info); + if (globals == NULL) + return; + + tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen + (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10); + + for (sec = abfd->sections; sec != NULL; sec = sec->next) + { + struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec); + elf32_stm32l4xx_erratum_list *errnode = sec_data->stm32l4xx_erratumlist; + + for (; errnode != NULL; errnode = errnode->next) + { + struct elf_link_hash_entry *myh; + bfd_vma vma; + + switch (errnode->type) + { + case STM32L4XX_ERRATUM_BRANCH_TO_VENEER: + /* Find veneer symbol. */ + sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME, + errnode->u.b.veneer->u.v.id); + + myh = elf_link_hash_lookup + (&(globals)->root, tmp_name, FALSE, FALSE, TRUE); + + if (myh == NULL) + (*_bfd_error_handler) (_("%B: unable to find STM32L4XX veneer " + "`%s'"), abfd, tmp_name); + + vma = myh->root.u.def.section->output_section->vma + + myh->root.u.def.section->output_offset + + myh->root.u.def.value; + + errnode->u.b.veneer->vma = vma; + break; + + case STM32L4XX_ERRATUM_VENEER: + /* Find return location. */ + sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r", + errnode->u.v.id); + + myh = elf_link_hash_lookup + (&(globals)->root, tmp_name, FALSE, FALSE, TRUE); + + if (myh == NULL) + (*_bfd_error_handler) (_("%B: unable to find STM32L4XX veneer " + "`%s'"), abfd, tmp_name); + + vma = myh->root.u.def.section->output_section->vma + + myh->root.u.def.section->output_offset + + myh->root.u.def.value; + + errnode->u.v.branch->vma = vma; + break; + + default: + abort (); + } + } + } + + free (tmp_name); +} + +static inline bfd_boolean +is_thumb2_ldmia (const insn32 insn) +{ + /* Encoding T2: LDM.W {!}, + 1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll. */ + return (insn & 0xffd02000) == 0xe8900000; +} + +static inline bfd_boolean +is_thumb2_ldmdb (const insn32 insn) +{ + /* Encoding T1: LDMDB {!}, + 1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll. */ + return (insn & 0xffd02000) == 0xe9100000; +} + +static inline bfd_boolean +is_thumb2_vldm (const insn32 insn) +{ + /* A6.5 Extension register load or store instruction + A7.7.229 + We look only for the 32-bit registers case since the DP (64-bit + registers) are not supported for STM32L4XX + Encoding T2 VLDM{mode} {!}, + is consecutive 32-bit registers + 1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii + if P==0 && U==1 && W==1 && Rn=1101 VPOP + if PUW=010 || PUW=011 || PUW=101 VLDM. */ + return + ((insn & 0xfe100f00) == 0xec100a00) + && /* (IA without !). */ + (((((insn << 7) >> 28) & 0xd) == 0x4) + /* (IA with !), includes VPOP (when reg number is SP). */ + || ((((insn << 7) >> 28) & 0xd) == 0x5) + /* (DB with !). */ + || ((((insn << 7) >> 28) & 0xd) == 0x9)); +} + +/* STM STM32L4XX erratum : This function assumes that it receives an LDM or + VLDM opcode and: + - computes the number and the mode of memory accesses + - decides if the replacement should be done: + . replaces only if > 8-word accesses + . or (testing purposes only) replaces all accesses. */ + +static bfd_boolean +stm32l4xx_need_create_replacing_stub (const insn32 insn, + bfd_arm_stm32l4xx_fix stm32l4xx_fix) +{ + int nb_regs = 0; + + /* The field encoding the register list is the same for both LDMIA + and LDMDB encodings. */ + if (is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn)) + nb_regs = popcount (insn & 0x0000ffff); + else if (is_thumb2_vldm (insn)) + nb_regs = (insn & 0xff); + + /* DEFAULT mode accounts for the real bug condition situation, + ALL mode inserts stubs for each LDM/VLDM instruction (testing). */ + return + (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_DEFAULT) ? nb_regs > 8 : + (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_ALL) ? TRUE : FALSE; +} + +/* Look for potentially-troublesome code sequences which might trigger + the STM STM32L4XX erratum. */ + +bfd_boolean +bfd_elf32_arm_stm32l4xx_erratum_scan (bfd *abfd, + struct bfd_link_info *link_info) +{ + asection *sec; + bfd_byte *contents = NULL; + struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info); + + if (globals == NULL) + return FALSE; + + /* If we are only performing a partial link do not bother + to construct any glue. */ + if (bfd_link_relocatable (link_info)) + return TRUE; + + /* Skip if this bfd does not correspond to an ELF image. */ + if (! is_arm_elf (abfd)) + return TRUE; + + if (globals->stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_NONE) + return TRUE; + + /* Skip this BFD if it corresponds to an executable or dynamic object. */ + if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0) + return TRUE; + + for (sec = abfd->sections; sec != NULL; sec = sec->next) + { + unsigned int i, span; + struct _arm_elf_section_data *sec_data; + + /* If we don't have executable progbits, we're not interested in this + section. Also skip if section is to be excluded. */ + if (elf_section_type (sec) != SHT_PROGBITS + || (elf_section_flags (sec) & SHF_EXECINSTR) == 0 + || (sec->flags & SEC_EXCLUDE) != 0 + || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS + || sec->output_section == bfd_abs_section_ptr + || strcmp (sec->name, STM32L4XX_ERRATUM_VENEER_SECTION_NAME) == 0) + continue; + + sec_data = elf32_arm_section_data (sec); + + if (sec_data->mapcount == 0) + continue; + + if (elf_section_data (sec)->this_hdr.contents != NULL) + contents = elf_section_data (sec)->this_hdr.contents; + else if (! bfd_malloc_and_get_section (abfd, sec, &contents)) + goto error_return; + + qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map), + elf32_arm_compare_mapping); + + for (span = 0; span < sec_data->mapcount; span++) + { + unsigned int span_start = sec_data->map[span].vma; + unsigned int span_end = (span == sec_data->mapcount - 1) + ? sec->size : sec_data->map[span + 1].vma; + char span_type = sec_data->map[span].type; + int itblock_current_pos = 0; + + /* Only Thumb2 mode need be supported with this CM4 specific + code, we should not encounter any arm mode eg span_type + != 'a'. */ + if (span_type != 't') + continue; + + for (i = span_start; i < span_end;) + { + unsigned int insn = bfd_get_16 (abfd, &contents[i]); + bfd_boolean insn_32bit = FALSE; + bfd_boolean is_ldm = FALSE; + bfd_boolean is_vldm = FALSE; + bfd_boolean is_not_last_in_it_block = FALSE; + + /* The first 16-bits of all 32-bit thumb2 instructions start + with opcode[15..13]=0b111 and the encoded op1 can be anything + except opcode[12..11]!=0b00. + See 32-bit Thumb instruction encoding. */ + if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000) + insn_32bit = TRUE; + + /* Compute the predicate that tells if the instruction + is concerned by the IT block + - Creates an error if there is a ldm that is not + last in the IT block thus cannot be replaced + - Otherwise we can create a branch at the end of the + IT block, it will be controlled naturally by IT + with the proper pseudo-predicate + - So the only interesting predicate is the one that + tells that we are not on the last item of an IT + block. */ + if (itblock_current_pos != 0) + is_not_last_in_it_block = !!--itblock_current_pos; + + if (insn_32bit) + { + /* Load the rest of the insn (in manual-friendly order). */ + insn = (insn << 16) | bfd_get_16 (abfd, &contents[i + 2]); + is_ldm = is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn); + is_vldm = is_thumb2_vldm (insn); + + /* Veneers are created for (v)ldm depending on + option flags and memory accesses conditions; but + if the instruction is not the last instruction of + an IT block, we cannot create a jump there, so we + bail out. */ + if ((is_ldm || is_vldm) && + stm32l4xx_need_create_replacing_stub + (insn, globals->stm32l4xx_fix)) + { + if (is_not_last_in_it_block) + { + (*_bfd_error_handler) + /* Note - overlong line used here to allow for translation. */ + (_("\ +%B(%A+0x%lx): error: multiple load detected in non-last IT block instruction : STM32L4XX veneer cannot be generated.\n" + "Use gcc option -mrestrict-it to generate only one instruction per IT block.\n"), + abfd, sec, (long)i); + } + else + { + elf32_stm32l4xx_erratum_list *newerr = + (elf32_stm32l4xx_erratum_list *) + bfd_zmalloc + (sizeof (elf32_stm32l4xx_erratum_list)); + + elf32_arm_section_data (sec) + ->stm32l4xx_erratumcount += 1; + newerr->u.b.insn = insn; + /* We create only thumb branches. */ + newerr->type = + STM32L4XX_ERRATUM_BRANCH_TO_VENEER; + record_stm32l4xx_erratum_veneer + (link_info, newerr, abfd, sec, + i, + is_ldm ? + STM32L4XX_ERRATUM_LDM_VENEER_SIZE: + STM32L4XX_ERRATUM_VLDM_VENEER_SIZE); + newerr->vma = -1; + newerr->next = sec_data->stm32l4xx_erratumlist; + sec_data->stm32l4xx_erratumlist = newerr; + } + } + } + else + { + /* A7.7.37 IT p208 + IT blocks are only encoded in T1 + Encoding T1: IT{x{y{z}}} + 1 0 1 1 - 1 1 1 1 - firstcond - mask + if mask = '0000' then see 'related encodings' + We don't deal with UNPREDICTABLE, just ignore these. + There can be no nested IT blocks so an IT block + is naturally a new one for which it is worth + computing its size. */ + bfd_boolean is_newitblock = ((insn & 0xff00) == 0xbf00) && + ((insn & 0x000f) != 0x0000); + /* If we have a new IT block we compute its size. */ + if (is_newitblock) + { + /* Compute the number of instructions controlled + by the IT block, it will be used to decide + whether we are inside an IT block or not. */ + unsigned int mask = insn & 0x000f; + itblock_current_pos = 4 - ctz (mask); + } + } + + i += insn_32bit ? 4 : 2; + } + } + + if (contents != NULL + && elf_section_data (sec)->this_hdr.contents != contents) + free (contents); + contents = NULL; + } + + return TRUE; + +error_return: + if (contents != NULL + && elf_section_data (sec)->this_hdr.contents != contents) + free (contents); + + return FALSE; +} /* Set target relocation values needed during linking. */ @@ -7018,6 +7684,7 @@ bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd, int fix_v4bx, int use_blx, bfd_arm_vfp11_fix vfp11_fix, + bfd_arm_stm32l4xx_fix stm32l4xx_fix, int no_enum_warn, int no_wchar_warn, int pic_veneer, int fix_cortex_a8, int fix_arm1176) @@ -7043,6 +7710,7 @@ bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd, globals->fix_v4bx = fix_v4bx; globals->use_blx |= use_blx; globals->vfp11_fix = vfp11_fix; + globals->stm32l4xx_fix = stm32l4xx_fix; globals->pic_veneer = pic_veneer; globals->fix_cortex_a8 = fix_cortex_a8; globals->fix_arm1176 = fix_arm1176; @@ -7220,7 +7888,8 @@ elf32_arm_create_thumb_stub (struct bfd_link_info * info, --my_offset; myh->root.u.def.value = my_offset; - if (info->shared || globals->root.is_relocatable_executable + if (bfd_link_pic (info) + || globals->root.is_relocatable_executable || globals->pic_veneer) { /* For relocatable objects we can't use absolute addresses, @@ -7671,7 +8340,7 @@ elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info, + root_plt->offset); ptr = splt->contents + root_plt->offset; - if (htab->vxworks_p && info->shared) + if (htab->vxworks_p && bfd_link_pic (info)) { unsigned int i; bfd_vma val; @@ -8234,18 +8903,6 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, if (r_type != howto->type) howto = elf32_arm_howto_from_type (r_type); - /* If the start address has been set, then set the EF_ARM_HASENTRY - flag. Setting this more than once is redundant, but the cost is - not too high, and it keeps the code simple. - - The test is done here, rather than somewhere else, because the - start address is only set just before the final link commences. - - Note - if the user deliberately sets a start address of 0, the - flag will not be set. */ - if (bfd_get_start_address (output_bfd) != 0) - elf_elfheader (output_bfd)->e_flags |= EF_ARM_HASENTRY; - eh = (struct elf32_arm_link_hash_entry *) h; sgot = globals->root.sgot; local_got_offsets = elf_local_got_offsets (input_bfd); @@ -8395,7 +9052,8 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, /* When generating a shared object or relocatable executable, these relocations are copied into the output file to be resolved at run time. */ - if ((info->shared || globals->root.is_relocatable_executable) + if ((bfd_link_pic (info) + || globals->root.is_relocatable_executable) && (input_section->flags & SEC_ALLOC) && !(globals->vxworks_p && strcmp (input_section->output_section->name, @@ -8416,6 +9074,21 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, Elf_Internal_Rela outrel; bfd_boolean skip, relocate; + if ((r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI) + && !h->def_regular) + { + char *v = _("shared object"); + + if (bfd_link_executable (info)) + v = _("PIE executable"); + + (*_bfd_error_handler) + (_("%B: relocation %s against external or undefined symbol `%s'" + " can not be used when making a %s; recompile with -fPIC"), input_bfd, + elf32_arm_howto_table_1[r_type].name, h->root.root.string, v); + return bfd_reloc_notsupported; + } + *unresolved_reloc_p = FALSE; if (sreloc == NULL && globals->root.dynamic_sections_created) @@ -8445,8 +9118,8 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, memset (&outrel, 0, sizeof outrel); else if (h != NULL && h->dynindx != -1 - && (!info->shared - || !info->symbolic + && (!bfd_link_pic (info) + || !SYMBOLIC_BIND (info, h) || !h->def_regular)) outrel.r_info = ELF32_R_INFO (h->dynindx, r_type); else @@ -8809,7 +9482,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, + input_section->output_offset + rel->r_offset); - value = abs (relocation); + value = relocation; if (value >= 0x1000) return bfd_reloc_overflow; @@ -8844,7 +9517,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, + input_section->output_offset + rel->r_offset); - value = abs (relocation); + value = relocation; /* We do not check for overflow of this reloc. Although strictly speaking this is incorrect, it appears to be necessary in order @@ -8881,7 +9554,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, + input_section->output_offset + rel->r_offset); - value = abs (relocation); + value = relocation; if (value >= 0x1000) return bfd_reloc_overflow; @@ -9395,7 +10068,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, { if (dynreloc_st_type == STT_GNU_IFUNC) outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE); - else if (info->shared && + else if (bfd_link_pic (info) && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT || h->root.type != bfd_link_hash_undefweak)) outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE); @@ -9444,7 +10117,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, if (globals->use_rel) bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off); - if (info->shared || dynreloc_st_type == STT_GNU_IFUNC) + if (bfd_link_pic (info) || dynreloc_st_type == STT_GNU_IFUNC) { Elf_Internal_Rela outrel; @@ -9493,7 +10166,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, { /* If we don't know the module number, create a relocation for it. */ - if (info->shared) + if (bfd_link_pic (info)) { Elf_Internal_Rela outrel; @@ -9543,8 +10216,10 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, { bfd_boolean dyn; dyn = globals->root.dynamic_sections_created; - if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h) - && (!info->shared + if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, + bfd_link_pic (info), + h) + && (!bfd_link_pic (info) || !SYMBOL_REFERENCES_LOCAL (info, h))) { *unresolved_reloc_p = FALSE; @@ -9581,7 +10256,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, now, and emit any relocations. If both an IE GOT and a GD GOT are necessary, we emit the GD first. */ - if ((info->shared || indx != 0) + if ((bfd_link_pic (info) || indx != 0) && (h == NULL || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT || h->root.type != bfd_link_hash_undefweak)) @@ -9597,7 +10272,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, /* We should have relaxed, unless this is an undefined weak symbol. */ BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak)) - || info->shared); + || bfd_link_pic (info)); BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8 <= globals->root.sgotplt->size); @@ -9872,7 +10547,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, } case R_ARM_TLS_LE32: - if (info->shared && !info->pie) + if (bfd_link_dll (info)) { (*_bfd_error_handler) (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"), @@ -10116,8 +10791,8 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, /* Calculate the value of the relevant G_n, in encoded constant-with-rotation format. */ - g_n = calculate_group_reloc_mask (abs (signed_value), group, - &residual); + g_n = calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value, + group, &residual); /* Check for overflow if required. */ if ((r_type == R_ARM_ALU_PC_G0 @@ -10130,7 +10805,8 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, (*_bfd_error_handler) (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"), input_bfd, input_section, - (long) rel->r_offset, abs (signed_value), howto->name); + (long) rel->r_offset, signed_value < 0 ? - signed_value : signed_value, + howto->name); return bfd_reloc_overflow; } @@ -10210,15 +10886,16 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, /* Calculate the value of the relevant G_{n-1} to obtain the residual at that stage. */ - calculate_group_reloc_mask (abs (signed_value), group - 1, &residual); + calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value, + group - 1, &residual); /* Check for overflow. */ if (residual >= 0x1000) { (*_bfd_error_handler) (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"), - input_bfd, input_section, - (long) rel->r_offset, abs (signed_value), howto->name); + input_bfd, input_section, + (long) rel->r_offset, labs (signed_value), howto->name); return bfd_reloc_overflow; } @@ -10294,15 +10971,16 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, /* Calculate the value of the relevant G_{n-1} to obtain the residual at that stage. */ - calculate_group_reloc_mask (abs (signed_value), group - 1, &residual); + calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value, + group - 1, &residual); /* Check for overflow. */ if (residual >= 0x100) { (*_bfd_error_handler) (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"), - input_bfd, input_section, - (long) rel->r_offset, abs (signed_value), howto->name); + input_bfd, input_section, + (long) rel->r_offset, labs (signed_value), howto->name); return bfd_reloc_overflow; } @@ -10378,7 +11056,8 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, /* Calculate the value of the relevant G_{n-1} to obtain the residual at that stage. */ - calculate_group_reloc_mask (abs (signed_value), group - 1, &residual); + calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value, + group - 1, &residual); /* Check for overflow. (The absolute value to go in the place must be divisible by four and, after having been divided by four, must @@ -10388,7 +11067,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, (*_bfd_error_handler) (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"), input_bfd, input_section, - (long) rel->r_offset, abs (signed_value), howto->name); + (long) rel->r_offset, labs (signed_value), howto->name); return bfd_reloc_overflow; } @@ -10406,6 +11085,33 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, } return bfd_reloc_ok; + case R_ARM_THM_ALU_ABS_G0_NC: + case R_ARM_THM_ALU_ABS_G1_NC: + case R_ARM_THM_ALU_ABS_G2_NC: + case R_ARM_THM_ALU_ABS_G3_NC: + { + const int shift_array[4] = {0, 8, 16, 24}; + bfd_vma insn = bfd_get_16 (input_bfd, hit_data); + bfd_vma addr = value; + int shift = shift_array[r_type - R_ARM_THM_ALU_ABS_G0_NC]; + + /* Compute address. */ + if (globals->use_rel) + signed_addend = insn & 0xff; + addr += signed_addend; + if (branch_type == ST_BRANCH_TO_THUMB) + addr |= 1; + /* Clean imm8 insn. */ + insn &= 0xff00; + /* And update with correct part of address. */ + insn |= (addr >> shift) & 0xff; + /* Update insn. */ + bfd_put_16 (input_bfd, insn, hit_data); + } + + *unresolved_reloc_p = FALSE; + return bfd_reloc_ok; + default: return bfd_reloc_notsupported; } @@ -10593,7 +11299,7 @@ elf32_arm_relocate_section (bfd * output_bfd, relocation = (sec->output_section->vma + sec->output_offset + sym->st_value); - if (!info->relocatable + if (!bfd_link_relocatable (info) && (sec->flags & SEC_MERGE) && ELF_ST_TYPE (sym->st_info) == STT_SECTION) { @@ -10700,7 +11406,7 @@ elf32_arm_relocate_section (bfd * output_bfd, RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section, rel, 1, relend, howto, 0, contents); - if (info->relocatable) + if (bfd_link_relocatable (info)) { /* This is a relocatable link. We don't have to change anything, unless the reloc is against a section symbol, @@ -10917,6 +11623,8 @@ insert_cantunwind_after(asection *text_sec, asection *exidx_sec) &exidx_arm_data->u.exidx.unwind_edit_tail, INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX); + exidx_arm_data->additional_reloc_count++; + adjust_exidx_size(exidx_sec, 8); } @@ -11059,7 +11767,7 @@ elf32_arm_fix_exidx_coverage (asection **text_section_order, else unwind_type = 2; - if (elide) + if (elide && !bfd_link_relocatable (info)) { add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail, DELETE_EXIDX_ENTRY, NULL, j / 8); @@ -11086,7 +11794,8 @@ elf32_arm_fix_exidx_coverage (asection **text_section_order, } /* Add terminating CANTUNWIND entry. */ - if (last_exidx_sec && last_unwind_type != 0) + if (!bfd_link_relocatable (info) && last_exidx_sec + && last_unwind_type != 0) insert_cantunwind_after(last_text_sec, last_exidx_sec); return TRUE; @@ -11128,7 +11837,7 @@ elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info) /* Process stub sections (eg BE8 encoding, ...). */ struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info); - int i; + unsigned int i; for (i=0; itop_id; i++) { sec = htab->stub_group[i].stub_sec; @@ -11162,6 +11871,11 @@ elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info) VFP11_ERRATUM_VENEER_SECTION_NAME)) return FALSE; + if (! elf32_arm_output_glue_section (info, abfd, + globals->bfd_of_glue_owner, + STM32L4XX_ERRATUM_VENEER_SECTION_NAME)) + return FALSE; + if (! elf32_arm_output_glue_section (info, abfd, globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME)) @@ -11564,6 +12278,47 @@ tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out, T(V8), /* V7E_M. */ T(V8) /* V8. */ }; + const int v8m_baseline[] = + { + -1, /* PRE_V4. */ + -1, /* V4. */ + -1, /* V4T. */ + -1, /* V5T. */ + -1, /* V5TE. */ + -1, /* V5TEJ. */ + -1, /* V6. */ + -1, /* V6KZ. */ + -1, /* V6T2. */ + -1, /* V6K. */ + -1, /* V7. */ + T(V8M_BASE), /* V6_M. */ + T(V8M_BASE), /* V6S_M. */ + -1, /* V7E_M. */ + -1, /* V8. */ + -1, + T(V8M_BASE) /* V8-M BASELINE. */ + }; + const int v8m_mainline[] = + { + -1, /* PRE_V4. */ + -1, /* V4. */ + -1, /* V4T. */ + -1, /* V5T. */ + -1, /* V5TE. */ + -1, /* V5TEJ. */ + -1, /* V6. */ + -1, /* V6KZ. */ + -1, /* V6T2. */ + -1, /* V6K. */ + T(V8M_MAIN), /* V7. */ + T(V8M_MAIN), /* V6_M. */ + T(V8M_MAIN), /* V6S_M. */ + T(V8M_MAIN), /* V7E_M. */ + -1, /* V8. */ + -1, + T(V8M_MAIN), /* V8-M BASELINE. */ + T(V8M_MAIN) /* V8-M MAINLINE. */ + }; const int v4t_plus_v6_m[] = { -1, /* PRE_V4. */ @@ -11581,6 +12336,9 @@ tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out, T(V6S_M), /* V6S_M. */ T(V7E_M), /* V7E_M. */ T(V8), /* V8. */ + -1, /* Unused. */ + T(V8M_BASE), /* V8-M BASELINE. */ + T(V8M_MAIN), /* V8-M MAINLINE. */ T(V4T_PLUS_V6_M) /* V4T plus V6_M. */ }; const int *comb[] = @@ -11592,6 +12350,9 @@ tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out, v6s_m, v7e_m, v8, + NULL, + v8m_baseline, + v8m_mainline, /* Pseudo-architecture. */ v4t_plus_v6_m }; @@ -11624,7 +12385,7 @@ tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out, if (tagh <= TAG_CPU_ARCH_V6KZ) return result; - result = comb[tagh - T(V6T2)][tagl]; + result = comb[tagh - T(V6T2)] ? comb[tagh - T(V6T2)][tagl] : -1; /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M) as the canonical version. */ @@ -11807,7 +12568,10 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd) "ARM v7", "ARM v6-M", "ARM v6S-M", - "ARM v8" + "ARM v8", + "", + "ARM v8-M.baseline", + "ARM v8-M.mainline", }; /* Merge Tag_CPU_arch and Tag_also_compatible_with. */ @@ -11957,7 +12721,7 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd) /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch when it's 0. It might mean absence of FP hardware if - Tag_FP_arch is zero, otherwise it is effectively SP + DP. */ + Tag_FP_arch is zero. */ #define VFP_VERSION_COUNT 9 static const struct @@ -11999,7 +12763,7 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd) } /* Both the input and the output have nonzero Tag_FP_arch. - So Tag_ABI_HardFP_use is (SP & DP) when it's zero. */ + So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero. */ /* If both the input and the output have zero Tag_ABI_HardFP_use, do nothing. */ @@ -12007,10 +12771,10 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd) && out_attr[Tag_ABI_HardFP_use].i == 0) ; /* If the input and the output have different Tag_ABI_HardFP_use, - the combination of them is 3 (SP & DP). */ + the combination of them is 0 (implied by Tag_FP_arch). */ else if (in_attr[Tag_ABI_HardFP_use].i != out_attr[Tag_ABI_HardFP_use].i) - out_attr[Tag_ABI_HardFP_use].i = 3; + out_attr[Tag_ABI_HardFP_use].i = 0; /* Now we can handle Tag_FP_arch. */ @@ -12379,10 +13143,7 @@ elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr) if (flags & EF_ARM_RELEXEC) fprintf (file, _(" [relocatable executable]")); - if (flags & EF_ARM_HASENTRY) - fprintf (file, _(" [has entry point]")); - - flags &= ~ (EF_ARM_RELEXEC | EF_ARM_HASENTRY); + flags &= ~EF_ARM_RELEXEC; if (flags) fprintf (file, _("")); @@ -12448,7 +13209,7 @@ elf32_arm_gc_sweep_hook (bfd * abfd, const Elf_Internal_Rela *rel, *relend; struct elf32_arm_link_hash_table * globals; - if (info->relocatable) + if (bfd_link_relocatable (info)) return TRUE; globals = elf32_arm_hash_table (info); @@ -12546,7 +13307,7 @@ elf32_arm_gc_sweep_hook (bfd * abfd, case R_ARM_THM_MOVW_PREL_NC: case R_ARM_THM_MOVT_PREL: /* Should the interworking branches be here also? */ - if ((info->shared || globals->root.is_relocatable_executable) + if ((bfd_link_pic (info) || globals->root.is_relocatable_executable) && (sec->flags & SEC_ALLOC) != 0) { if (h == NULL @@ -12644,7 +13405,7 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info, bfd_boolean may_need_local_target_p; unsigned long nsyms; - if (info->relocatable) + if (bfd_link_relocatable (info)) return TRUE; BFD_ASSERT (is_arm_elf (abfd)); @@ -12761,7 +13522,7 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info, default: tls_type = GOT_NORMAL; break; } - if (!info->executable && (tls_type & GOT_TLS_IE)) + if (!bfd_link_executable (info) && (tls_type & GOT_TLS_IE)) info->flags |= DF_STATIC_TLS; if (h != NULL) @@ -12846,7 +13607,7 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info, case R_ARM_MOVT_ABS: case R_ARM_THM_MOVW_ABS_NC: case R_ARM_THM_MOVT_ABS: - if (info->shared) + if (bfd_link_pic (info)) { (*_bfd_error_handler) (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"), @@ -12859,7 +13620,7 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info, /* Fall through. */ case R_ARM_ABS32: case R_ARM_ABS32_NOI: - if (h != NULL && info->executable) + if (h != NULL && bfd_link_executable (info)) { h->pointer_equality_needed = 1; } @@ -12872,7 +13633,7 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info, case R_ARM_THM_MOVT_PREL: /* Should the interworking branches be listed here? */ - if ((info->shared || htab->root.is_relocatable_executable) + if ((bfd_link_pic (info) || htab->root.is_relocatable_executable) && (sec->flags & SEC_ALLOC) != 0) { if (h == NULL @@ -13315,7 +14076,7 @@ elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info, be handled correctly by relocate_section. Relocatable executables can reference data in shared objects directly, so we don't need to do anything here. */ - if (info->shared || globals->root.is_relocatable_executable) + if (bfd_link_pic (info) || globals->root.is_relocatable_executable) return TRUE; /* We must allocate the symbol in our .dynbss section, which will @@ -13397,7 +14158,7 @@ allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf) h->got.refcount = 0; } - if (info->shared + if (bfd_link_pic (info) || eh->is_iplt || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h)) { @@ -13408,7 +14169,7 @@ allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf) location in the .plt. This is required to make function pointers compare as equal between the normal executable and the shared library. */ - if (! info->shared + if (! bfd_link_pic (info) && !h->def_regular) { h->root.u.def.section = htab->root.splt; @@ -13423,7 +14184,7 @@ allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf) /* VxWorks executables have a second set of relocations for each PLT entry. They go in a separate relocation section, which is processed by the kernel loader. */ - if (htab->vxworks_p && !info->shared) + if (htab->vxworks_p && !bfd_link_pic (info)) { /* There is a relocation for the initial PLT entry: an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */ @@ -13510,13 +14271,15 @@ allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf) dyn = htab->root.dynamic_sections_created; indx = 0; - if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h) - && (!info->shared + if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, + bfd_link_pic (info), + h) + && (!bfd_link_pic (info) || !SYMBOL_REFERENCES_LOCAL (info, h))) indx = h->dynindx; if (tls_type != GOT_NORMAL - && (info->shared || indx != 0) + && (bfd_link_pic (info) || indx != 0) && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT || h->root.type != bfd_link_hash_undefweak)) { @@ -13550,8 +14313,9 @@ allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf) they all resolve dynamically instead. Reserve room for the GOT entry's R_ARM_IRELATIVE relocation. */ elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1); - else if (info->shared && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT - || h->root.type != bfd_link_hash_undefweak)) + else if (bfd_link_pic (info) + && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT + || h->root.type != bfd_link_hash_undefweak)) /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */ elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1); } @@ -13601,7 +14365,7 @@ allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf) space for pc-relative relocs that have become local due to symbol visibility changes. */ - if (info->shared || htab->root.is_relocatable_executable) + if (bfd_link_pic (info) || htab->root.is_relocatable_executable) { /* Relocs that use pc_count are PC-relative forms, which will appear on something like ".long foo - ." or "movw REG, foo - .". We want @@ -13777,7 +14541,7 @@ elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED, if (elf_hash_table (info)->dynamic_sections_created) { /* Set the contents of the .interp section to the interpreter. */ - if (info->executable) + if (bfd_link_executable (info) && !info->nointerp) { s = bfd_get_linker_section (dynobj, ".interp"); BFD_ASSERT (s != NULL); @@ -13929,13 +14693,13 @@ elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED, && (local_iplt == NULL || local_iplt->arm.noncall_refcount == 0)) elf32_arm_allocate_irelocs (info, srel, 1); - else if (info->shared || output_bfd->flags & DYNAMIC) + else if (bfd_link_pic (info) || output_bfd->flags & DYNAMIC) { - if ((info->shared && !(*local_tls_type & GOT_TLS_GDESC)) + if ((bfd_link_pic (info) && !(*local_tls_type & GOT_TLS_GDESC)) || *local_tls_type & GOT_TLS_GD) elf32_arm_allocate_dynrelocs (info, srel, 1); - if (info->shared && *local_tls_type & GOT_TLS_GDESC) + if (bfd_link_pic (info) && *local_tls_type & GOT_TLS_GDESC) { elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1); @@ -13954,7 +14718,7 @@ elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED, for R_ARM_TLS_LDM32 relocations. */ htab->tls_ldm_got.offset = htab->root.sgot->size; htab->root.sgot->size += 8; - if (info->shared) + if (bfd_link_pic (info)) elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1); } else @@ -13974,7 +14738,8 @@ elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED, bfd_elf32_arm_init_maps (ibfd); if (!bfd_elf32_arm_process_before_allocation (ibfd, info) - || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info)) + || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info) + || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd, info)) /* xgettext:c-format */ _bfd_error_handler (_("Errors encountered processing file %s"), ibfd->filename); @@ -14090,7 +14855,7 @@ elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED, #define add_dynamic_entry(TAG, VAL) \ _bfd_elf_add_dynamic_entry (info, TAG, VAL) - if (info->executable) + if (bfd_link_executable (info)) { if (!add_dynamic_entry (DT_DEBUG, 0)) return FALSE; @@ -14158,7 +14923,7 @@ elf32_arm_always_size_sections (bfd *output_bfd, { asection *tls_sec; - if (info->relocatable) + if (bfd_link_relocatable (info)) return TRUE; tls_sec = elf_hash_table (info)->tls_sec; @@ -14223,12 +14988,16 @@ elf32_arm_finish_dynamic_symbol (bfd * output_bfd, if (!h->def_regular) { /* Mark the symbol as undefined, rather than as defined in - the .plt section. Leave the value alone. */ + the .plt section. */ sym->st_shndx = SHN_UNDEF; - /* If the symbol is weak, we do need to clear the value. + /* If the symbol is weak we need to clear the value. Otherwise, the PLT entry would provide a definition for the symbol even if the symbol wasn't defined anywhere, - and so the symbol would never be NULL. */ + and so the symbol would never be NULL. Leave the value if + there were any relocations where pointer equality matters + (this is a clue for the dynamic linker, to make function + pointer comparisons work between an application and shared + library). */ if (!h->ref_regular_nonweak || !h->pointer_equality_needed) sym->st_value = 0; } @@ -14645,7 +15414,9 @@ elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info #endif } - if (htab->vxworks_p && !info->shared && htab->root.splt->size > 0) + if (htab->vxworks_p + && !bfd_link_pic (info) + && htab->root.splt->size > 0) { /* Correct the .rel(a).plt.unloaded relocations. They will have incorrect symbol indexes. */ @@ -14703,6 +15474,7 @@ elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATT { Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */ struct elf32_arm_link_hash_table *globals; + struct elf_segment_map *m; i_ehdrp = elf_elfheader (abfd); @@ -14728,6 +15500,26 @@ elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATT else i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT; } + + /* Scan segment to set p_flags attribute if it contains only sections with + SHF_ARM_NOREAD flag. */ + for (m = elf_seg_map (abfd); m != NULL; m = m->next) + { + unsigned int j; + + if (m->count == 0) + continue; + for (j = 0; j < m->count; j++) + { + if (!(elf_section_flags (m->sections[j]) & SHF_ARM_NOREAD)) + break; + } + if (j == m->count) + { + m->p_flags = PF_X; + m->p_flags_valid = 1; + } + } } static enum elf_reloc_type_class @@ -14779,6 +15571,10 @@ elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec) hdr->sh_type = SHT_ARM_EXIDX; hdr->sh_flags |= SHF_LINK_ORDER; } + + if (sec->flags & SEC_ELF_NOREAD) + hdr->sh_flags |= SHF_ARM_NOREAD; + return TRUE; } @@ -15164,7 +15960,7 @@ elf32_arm_output_arch_local_syms (bfd *output_bfd, osi.sec_shndx = _bfd_elf_section_from_bfd_section (output_bfd, osi.sec->output_section); - if (info->shared || htab->root.is_relocatable_executable + if (bfd_link_pic (info) || htab->root.is_relocatable_executable || htab->pic_veneer) size = ARM2THUMB_PIC_GLUE_SIZE; else if (htab->use_blx) @@ -15242,7 +16038,7 @@ elf32_arm_output_arch_local_syms (bfd *output_bfd, if (htab->vxworks_p) { /* VxWorks shared libraries have no PLT header. */ - if (!info->shared) + if (!bfd_link_pic (info)) { if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0)) return FALSE; @@ -15520,6 +16316,764 @@ make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry, return TRUE; } +/* Beginning of stm32l4xx work-around. */ + +/* Functions encoding instructions necessary for the emission of the + fix-stm32l4xx-629360. + Encoding is extracted from the + ARM (C) Architecture Reference Manual + ARMv7-A and ARMv7-R edition + ARM DDI 0406C.b (ID072512). */ + +static inline bfd_vma +create_instruction_branch_absolute (int branch_offset) +{ + /* A8.8.18 B (A8-334) + B target_address (Encoding T4). */ + /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii. */ + /* jump offset is: S:I1:I2:imm10:imm11:0. */ + /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S). */ + + int s = ((branch_offset & 0x1000000) >> 24); + int j1 = s ^ !((branch_offset & 0x800000) >> 23); + int j2 = s ^ !((branch_offset & 0x400000) >> 22); + + if (branch_offset < -(1 << 24) || branch_offset >= (1 << 24)) + BFD_ASSERT (0 && "Error: branch out of range. Cannot create branch."); + + bfd_vma patched_inst = 0xf0009000 + | s << 26 /* S. */ + | (((unsigned long) (branch_offset) >> 12) & 0x3ff) << 16 /* imm10. */ + | j1 << 13 /* J1. */ + | j2 << 11 /* J2. */ + | (((unsigned long) (branch_offset) >> 1) & 0x7ff); /* imm11. */ + + return patched_inst; +} + +static inline bfd_vma +create_instruction_ldmia (int base_reg, int wback, int reg_mask) +{ + /* A8.8.57 LDM/LDMIA/LDMFD (A8-396) + LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2). */ + bfd_vma patched_inst = 0xe8900000 + | (/*W=*/wback << 21) + | (base_reg << 16) + | (reg_mask & 0x0000ffff); + + return patched_inst; +} + +static inline bfd_vma +create_instruction_ldmdb (int base_reg, int wback, int reg_mask) +{ + /* A8.8.60 LDMDB/LDMEA (A8-402) + LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1). */ + bfd_vma patched_inst = 0xe9100000 + | (/*W=*/wback << 21) + | (base_reg << 16) + | (reg_mask & 0x0000ffff); + + return patched_inst; +} + +static inline bfd_vma +create_instruction_mov (int target_reg, int source_reg) +{ + /* A8.8.103 MOV (register) (A8-486) + MOV Rd, Rm (Encoding T1). */ + bfd_vma patched_inst = 0x4600 + | (target_reg & 0x7) + | ((target_reg & 0x8) >> 3) << 7 + | (source_reg << 3); + + return patched_inst; +} + +static inline bfd_vma +create_instruction_sub (int target_reg, int source_reg, int value) +{ + /* A8.8.221 SUB (immediate) (A8-708) + SUB Rd, Rn, #value (Encoding T3). */ + bfd_vma patched_inst = 0xf1a00000 + | (target_reg << 8) + | (source_reg << 16) + | (/*S=*/0 << 20) + | ((value & 0x800) >> 11) << 26 + | ((value & 0x700) >> 8) << 12 + | (value & 0x0ff); + + return patched_inst; +} + +static inline bfd_vma +create_instruction_vldmia (int base_reg, int wback, int num_regs, + int first_reg) +{ + /* A8.8.332 VLDM (A8-922) + VLMD{MODE} Rn{!}, {list} (Encoding T2). */ + bfd_vma patched_inst = 0xec900a00 + | (/*W=*/wback << 21) + | (base_reg << 16) + | (num_regs & 0x000000ff) + | (((unsigned)first_reg>>1) & 0x0000000f) << 12 + | (first_reg & 0x00000001) << 22; + + return patched_inst; +} + +static inline bfd_vma +create_instruction_vldmdb (int base_reg, int num_regs, int first_reg) +{ + /* A8.8.332 VLDM (A8-922) + VLMD{MODE} Rn!, {} (Encoding T2). */ + bfd_vma patched_inst = 0xed300a00 + | (base_reg << 16) + | (num_regs & 0x000000ff) + | (((unsigned)first_reg>>1) & 0x0000000f) << 12 + | (first_reg & 0x00000001) << 22; + + return patched_inst; +} + +static inline bfd_vma +create_instruction_udf_w (int value) +{ + /* A8.8.247 UDF (A8-758) + Undefined (Encoding T2). */ + bfd_vma patched_inst = 0xf7f0a000 + | (value & 0x00000fff) + | (value & 0x000f0000) << 16; + + return patched_inst; +} + +static inline bfd_vma +create_instruction_udf (int value) +{ + /* A8.8.247 UDF (A8-758) + Undefined (Encoding T1). */ + bfd_vma patched_inst = 0xde00 + | (value & 0xff); + + return patched_inst; +} + +/* Functions writing an instruction in memory, returning the next + memory position to write to. */ + +static inline bfd_byte * +push_thumb2_insn32 (struct elf32_arm_link_hash_table * htab, + bfd * output_bfd, bfd_byte *pt, insn32 insn) +{ + put_thumb2_insn (htab, output_bfd, insn, pt); + return pt + 4; +} + +static inline bfd_byte * +push_thumb2_insn16 (struct elf32_arm_link_hash_table * htab, + bfd * output_bfd, bfd_byte *pt, insn32 insn) +{ + put_thumb_insn (htab, output_bfd, insn, pt); + return pt + 2; +} + +/* Function filling up a region in memory with T1 and T2 UDFs taking + care of alignment. */ + +static bfd_byte * +stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table * htab, + bfd * output_bfd, + const bfd_byte * const base_stub_contents, + bfd_byte * const from_stub_contents, + const bfd_byte * const end_stub_contents) +{ + bfd_byte *current_stub_contents = from_stub_contents; + + /* Fill the remaining of the stub with deterministic contents : UDF + instructions. + Check if realignment is needed on modulo 4 frontier using T1, to + further use T2. */ + if ((current_stub_contents < end_stub_contents) + && !((current_stub_contents - base_stub_contents) % 2) + && ((current_stub_contents - base_stub_contents) % 4)) + current_stub_contents = + push_thumb2_insn16 (htab, output_bfd, current_stub_contents, + create_instruction_udf (0)); + + for (; current_stub_contents < end_stub_contents;) + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_udf_w (0)); + + return current_stub_contents; +} + +/* Functions writing the stream of instructions equivalent to the + derived sequence for ldmia, ldmdb, vldm respectively. */ + +static void +stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table * htab, + bfd * output_bfd, + const insn32 initial_insn, + const bfd_byte *const initial_insn_addr, + bfd_byte *const base_stub_contents) +{ + int wback = (initial_insn & 0x00200000) >> 21; + int ri, rn = (initial_insn & 0x000F0000) >> 16; + int insn_all_registers = initial_insn & 0x0000ffff; + int insn_low_registers, insn_high_registers; + int usable_register_mask; + int nb_registers = popcount (insn_all_registers); + int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0; + int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0; + bfd_byte *current_stub_contents = base_stub_contents; + + BFD_ASSERT (is_thumb2_ldmia (initial_insn)); + + /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with + smaller than 8 registers load sequences that do not cause the + hardware issue. */ + if (nb_registers <= 8) + { + /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + initial_insn); + + /* B initial_insn_addr+4. */ + if (!restore_pc) + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_branch_absolute + (initial_insn_addr - current_stub_contents)); + + + /* Fill the remaining of the stub with deterministic contents. */ + current_stub_contents = + stm32l4xx_fill_stub_udf (htab, output_bfd, + base_stub_contents, current_stub_contents, + base_stub_contents + + STM32L4XX_ERRATUM_LDM_VENEER_SIZE); + + return; + } + + /* - reg_list[13] == 0. */ + BFD_ASSERT ((insn_all_registers & (1 << 13))==0); + + /* - reg_list[14] & reg_list[15] != 1. */ + BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000); + + /* - if (wback==1) reg_list[rn] == 0. */ + BFD_ASSERT (!wback || !restore_rn); + + /* - nb_registers > 8. */ + BFD_ASSERT (popcount (insn_all_registers) > 8); + + /* At this point, LDMxx initial insn loads between 9 and 14 registers. */ + + /* In the following algorithm, we split this wide LDM using 2 LDM insns: + - One with the 7 lowest registers (register mask 0x007F) + This LDM will finally contain between 2 and 7 registers + - One with the 7 highest registers (register mask 0xDF80) + This ldm will finally contain between 2 and 7 registers. */ + insn_low_registers = insn_all_registers & 0x007F; + insn_high_registers = insn_all_registers & 0xDF80; + + /* A spare register may be needed during this veneer to temporarily + handle the base register. This register will be restored with the + last LDM operation. + The usable register may be any general purpose register (that + excludes PC, SP, LR : register mask is 0x1FFF). */ + usable_register_mask = 0x1FFF; + + /* Generate the stub function. */ + if (wback) + { + /* LDMIA Rn!, {R-low-register-list} : (Encoding T2). */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_ldmia + (rn, /*wback=*/1, insn_low_registers)); + + /* LDMIA Rn!, {R-high-register-list} : (Encoding T2). */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_ldmia + (rn, /*wback=*/1, insn_high_registers)); + if (!restore_pc) + { + /* B initial_insn_addr+4. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_branch_absolute + (initial_insn_addr - current_stub_contents)); + } + } + else /* if (!wback). */ + { + ri = rn; + + /* If Rn is not part of the high-register-list, move it there. */ + if (!(insn_high_registers & (1 << rn))) + { + /* Choose a Ri in the high-register-list that will be restored. */ + ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn)); + + /* MOV Ri, Rn. */ + current_stub_contents = + push_thumb2_insn16 (htab, output_bfd, current_stub_contents, + create_instruction_mov (ri, rn)); + } + + /* LDMIA Ri!, {R-low-register-list} : (Encoding T2). */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_ldmia + (ri, /*wback=*/1, insn_low_registers)); + + /* LDMIA Ri, {R-high-register-list} : (Encoding T2). */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_ldmia + (ri, /*wback=*/0, insn_high_registers)); + + if (!restore_pc) + { + /* B initial_insn_addr+4. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_branch_absolute + (initial_insn_addr - current_stub_contents)); + } + } + + /* Fill the remaining of the stub with deterministic contents. */ + current_stub_contents = + stm32l4xx_fill_stub_udf (htab, output_bfd, + base_stub_contents, current_stub_contents, + base_stub_contents + + STM32L4XX_ERRATUM_LDM_VENEER_SIZE); +} + +static void +stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table * htab, + bfd * output_bfd, + const insn32 initial_insn, + const bfd_byte *const initial_insn_addr, + bfd_byte *const base_stub_contents) +{ + int wback = (initial_insn & 0x00200000) >> 21; + int ri, rn = (initial_insn & 0x000f0000) >> 16; + int insn_all_registers = initial_insn & 0x0000ffff; + int insn_low_registers, insn_high_registers; + int usable_register_mask; + int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0; + int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0; + int nb_registers = popcount (insn_all_registers); + bfd_byte *current_stub_contents = base_stub_contents; + + BFD_ASSERT (is_thumb2_ldmdb (initial_insn)); + + /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with + smaller than 8 registers load sequences that do not cause the + hardware issue. */ + if (nb_registers <= 8) + { + /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + initial_insn); + + /* B initial_insn_addr+4. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_branch_absolute + (initial_insn_addr - current_stub_contents)); + + /* Fill the remaining of the stub with deterministic contents. */ + current_stub_contents = + stm32l4xx_fill_stub_udf (htab, output_bfd, + base_stub_contents, current_stub_contents, + base_stub_contents + + STM32L4XX_ERRATUM_LDM_VENEER_SIZE); + + return; + } + + /* - reg_list[13] == 0. */ + BFD_ASSERT ((insn_all_registers & (1 << 13)) == 0); + + /* - reg_list[14] & reg_list[15] != 1. */ + BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000); + + /* - if (wback==1) reg_list[rn] == 0. */ + BFD_ASSERT (!wback || !restore_rn); + + /* - nb_registers > 8. */ + BFD_ASSERT (popcount (insn_all_registers) > 8); + + /* At this point, LDMxx initial insn loads between 9 and 14 registers. */ + + /* In the following algorithm, we split this wide LDM using 2 LDM insn: + - One with the 7 lowest registers (register mask 0x007F) + This LDM will finally contain between 2 and 7 registers + - One with the 7 highest registers (register mask 0xDF80) + This ldm will finally contain between 2 and 7 registers. */ + insn_low_registers = insn_all_registers & 0x007F; + insn_high_registers = insn_all_registers & 0xDF80; + + /* A spare register may be needed during this veneer to temporarily + handle the base register. This register will be restored with + the last LDM operation. + The usable register may be any general purpose register (that excludes + PC, SP, LR : register mask is 0x1FFF). */ + usable_register_mask = 0x1FFF; + + /* Generate the stub function. */ + if (!wback && !restore_pc && !restore_rn) + { + /* Choose a Ri in the low-register-list that will be restored. */ + ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn)); + + /* MOV Ri, Rn. */ + current_stub_contents = + push_thumb2_insn16 (htab, output_bfd, current_stub_contents, + create_instruction_mov (ri, rn)); + + /* LDMDB Ri!, {R-high-register-list}. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_ldmdb + (ri, /*wback=*/1, insn_high_registers)); + + /* LDMDB Ri, {R-low-register-list}. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_ldmdb + (ri, /*wback=*/0, insn_low_registers)); + + /* B initial_insn_addr+4. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_branch_absolute + (initial_insn_addr - current_stub_contents)); + } + else if (wback && !restore_pc && !restore_rn) + { + /* LDMDB Rn!, {R-high-register-list}. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_ldmdb + (rn, /*wback=*/1, insn_high_registers)); + + /* LDMDB Rn!, {R-low-register-list}. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_ldmdb + (rn, /*wback=*/1, insn_low_registers)); + + /* B initial_insn_addr+4. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_branch_absolute + (initial_insn_addr - current_stub_contents)); + } + else if (!wback && restore_pc && !restore_rn) + { + /* Choose a Ri in the high-register-list that will be restored. */ + ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn)); + + /* SUB Ri, Rn, #(4*nb_registers). */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_sub (ri, rn, (4 * nb_registers))); + + /* LDMIA Ri!, {R-low-register-list}. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_ldmia + (ri, /*wback=*/1, insn_low_registers)); + + /* LDMIA Ri, {R-high-register-list}. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_ldmia + (ri, /*wback=*/0, insn_high_registers)); + } + else if (wback && restore_pc && !restore_rn) + { + /* Choose a Ri in the high-register-list that will be restored. */ + ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn)); + + /* SUB Rn, Rn, #(4*nb_registers) */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_sub (rn, rn, (4 * nb_registers))); + + /* MOV Ri, Rn. */ + current_stub_contents = + push_thumb2_insn16 (htab, output_bfd, current_stub_contents, + create_instruction_mov (ri, rn)); + + /* LDMIA Ri!, {R-low-register-list}. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_ldmia + (ri, /*wback=*/1, insn_low_registers)); + + /* LDMIA Ri, {R-high-register-list}. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_ldmia + (ri, /*wback=*/0, insn_high_registers)); + } + else if (!wback && !restore_pc && restore_rn) + { + ri = rn; + if (!(insn_low_registers & (1 << rn))) + { + /* Choose a Ri in the low-register-list that will be restored. */ + ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn)); + + /* MOV Ri, Rn. */ + current_stub_contents = + push_thumb2_insn16 (htab, output_bfd, current_stub_contents, + create_instruction_mov (ri, rn)); + } + + /* LDMDB Ri!, {R-high-register-list}. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_ldmdb + (ri, /*wback=*/1, insn_high_registers)); + + /* LDMDB Ri, {R-low-register-list}. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_ldmdb + (ri, /*wback=*/0, insn_low_registers)); + + /* B initial_insn_addr+4. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_branch_absolute + (initial_insn_addr - current_stub_contents)); + } + else if (!wback && restore_pc && restore_rn) + { + ri = rn; + if (!(insn_high_registers & (1 << rn))) + { + /* Choose a Ri in the high-register-list that will be restored. */ + ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn)); + } + + /* SUB Ri, Rn, #(4*nb_registers). */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_sub (ri, rn, (4 * nb_registers))); + + /* LDMIA Ri!, {R-low-register-list}. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_ldmia + (ri, /*wback=*/1, insn_low_registers)); + + /* LDMIA Ri, {R-high-register-list}. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_ldmia + (ri, /*wback=*/0, insn_high_registers)); + } + else if (wback && restore_rn) + { + /* The assembler should not have accepted to encode this. */ + BFD_ASSERT (0 && "Cannot patch an instruction that has an " + "undefined behavior.\n"); + } + + /* Fill the remaining of the stub with deterministic contents. */ + current_stub_contents = + stm32l4xx_fill_stub_udf (htab, output_bfd, + base_stub_contents, current_stub_contents, + base_stub_contents + + STM32L4XX_ERRATUM_LDM_VENEER_SIZE); + +} + +static void +stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table * htab, + bfd * output_bfd, + const insn32 initial_insn, + const bfd_byte *const initial_insn_addr, + bfd_byte *const base_stub_contents) +{ + int num_regs = ((unsigned int)initial_insn << 24) >> 24; + bfd_byte *current_stub_contents = base_stub_contents; + + BFD_ASSERT (is_thumb2_vldm (initial_insn)); + + /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with + smaller than 8 registers load sequences that do not cause the + hardware issue. */ + if (num_regs <= 8) + { + /* Untouched instruction. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + initial_insn); + + /* B initial_insn_addr+4. */ + current_stub_contents = + push_thumb2_insn32 (htab, output_bfd, current_stub_contents, + create_instruction_branch_absolute + (initial_insn_addr - current_stub_contents)); + } + else + { + bfd_boolean is_ia_nobang = /* (IA without !). */ + (((initial_insn << 7) >> 28) & 0xd) == 0x4; + bfd_boolean is_ia_bang = /* (IA with !) - includes VPOP. */ + (((initial_insn << 7) >> 28) & 0xd) == 0x5; + bfd_boolean is_db_bang = /* (DB with !). */ + (((initial_insn << 7) >> 28) & 0xd) == 0x9; + int base_reg = ((unsigned int)initial_insn << 12) >> 28; + /* d = UInt (Vd:D);. */ + int first_reg = ((((unsigned int)initial_insn << 16) >> 28) << 1) + | (((unsigned int)initial_insn << 9) >> 31); + + /* Compute the number of 8-register chunks needed to split. */ + int chunks = (num_regs%8) ? (num_regs/8 + 1) : (num_regs/8); + int chunk; + + /* The test coverage has been done assuming the following + hypothesis that exactly one of the previous is_ predicates is + true. */ + BFD_ASSERT ((is_ia_nobang ^ is_ia_bang ^ is_db_bang) && + !(is_ia_nobang & is_ia_bang & is_db_bang)); + + /* We treat the cutting of the register in one pass for all + cases, then we emit the adjustments: + + vldm rx, {...} + -> vldm rx!, {8_words_or_less} for each needed 8_word + -> sub rx, rx, #size (list) + + vldm rx!, {...} + -> vldm rx!, {8_words_or_less} for each needed 8_word + This also handles vpop instruction (when rx is sp) + + vldmd rx!, {...} + -> vldmb rx!, {8_words_or_less} for each needed 8_word. */ + for (chunk = 0; chunkrel.hdr) + { + rel_hdr = oesd->rel.hdr; + output_reldata = &(oesd->rel); + } + else if (oesd->rela.hdr) + { + rel_hdr = oesd->rela.hdr; + output_reldata = &(oesd->rela); + } + else + { + abort (); + } + + bfd_byte *erel = rel_hdr->contents; + erel += output_reldata->count * rel_hdr->sh_entsize; + htab = elf32_arm_hash_table (info); + SWAP_RELOC_OUT (htab) (output_bfd, rel, erel); + output_reldata->count++; +} + /* Do code byteswapping. Return FALSE afterwards so that the section is written out as normal. */ @@ -15534,6 +17088,7 @@ elf32_arm_write_section (bfd *output_bfd, struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info); elf32_arm_section_map *map; elf32_vfp11_erratum_list *errnode; + elf32_stm32l4xx_erratum_list *stm32l4xx_errnode; bfd_vma ptr; bfd_vma end; bfd_vma offset = sec->output_section->vma + sec->output_offset; @@ -15628,6 +17183,89 @@ elf32_arm_write_section (bfd *output_bfd, } } + if (arm_data->stm32l4xx_erratumcount != 0) + { + for (stm32l4xx_errnode = arm_data->stm32l4xx_erratumlist; + stm32l4xx_errnode != 0; + stm32l4xx_errnode = stm32l4xx_errnode->next) + { + bfd_vma target = stm32l4xx_errnode->vma - offset; + + switch (stm32l4xx_errnode->type) + { + case STM32L4XX_ERRATUM_BRANCH_TO_VENEER: + { + unsigned int insn; + bfd_vma branch_to_veneer = + stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma; + + if ((signed) branch_to_veneer < -(1 << 24) + || (signed) branch_to_veneer >= (1 << 24)) + { + bfd_vma out_of_range = + ((signed) branch_to_veneer < -(1 << 24)) ? + - branch_to_veneer - (1 << 24) : + ((signed) branch_to_veneer >= (1 << 24)) ? + branch_to_veneer - (1 << 24) : 0; + + (*_bfd_error_handler) + (_("%B(%#x): error: Cannot create STM32L4XX veneer. " + "Jump out of range by %ld bytes. " + "Cannot encode branch instruction. "), + output_bfd, + (long) (stm32l4xx_errnode->vma - 4), + out_of_range); + continue; + } + + insn = create_instruction_branch_absolute + (stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma); + + /* The instruction is before the label. */ + target -= 4; + + put_thumb2_insn (globals, output_bfd, + (bfd_vma) insn, contents + target); + } + break; + + case STM32L4XX_ERRATUM_VENEER: + { + bfd_byte * veneer; + bfd_byte * veneer_r; + unsigned int insn; + + veneer = contents + target; + veneer_r = veneer + + stm32l4xx_errnode->u.b.veneer->vma + - stm32l4xx_errnode->vma - 4; + + if ((signed) (veneer_r - veneer - + STM32L4XX_ERRATUM_VLDM_VENEER_SIZE > + STM32L4XX_ERRATUM_LDM_VENEER_SIZE ? + STM32L4XX_ERRATUM_VLDM_VENEER_SIZE : + STM32L4XX_ERRATUM_LDM_VENEER_SIZE) < -(1 << 24) + || (signed) (veneer_r - veneer) >= (1 << 24)) + { + (*_bfd_error_handler) (_("%B: error: Cannot create STM32L4XX " + "veneer."), output_bfd); + continue; + } + + /* Original instruction. */ + insn = stm32l4xx_errnode->u.v.branch->u.b.insn; + + stm32l4xx_create_replacing_stub + (globals, output_bfd, insn, (void*)veneer_r, (void*)veneer); + } + break; + + default: + abort (); + } + } + } + if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX) { arm_unwind_table_edit *edit_node @@ -15680,6 +17318,26 @@ elf32_arm_write_section (bfd *output_bfd, usual BFD method. */ prel31_offset = (text_offset - exidx_offset) & 0x7ffffffful; + if (bfd_link_relocatable (link_info)) + { + /* Here relocation for new EXIDX_CANTUNWIND is + created, so there is no need to + adjust offset by hand. */ + prel31_offset = text_sec->output_offset + + text_sec->size; + + /* New relocation entity. */ + asection *text_out = text_sec->output_section; + Elf_Internal_Rela rel; + rel.r_addend = 0; + rel.r_offset = exidx_offset; + rel.r_info = ELF32_R_INFO (text_out->target_index, + R_ARM_PREL31); + + elf32_arm_add_relocation (output_bfd, link_info, + sec->output_section, + &rel); + } /* First address we can't unwind. */ bfd_put_32 (output_bfd, prel31_offset, @@ -15724,8 +17382,8 @@ elf32_arm_write_section (bfd *output_bfd, data.writing_section = sec; data.contents = contents; - bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub, - &data); + bfd_hash_traverse (& globals->stub_hash_table, make_branch_to_a8_stub, + & data); } if (mapcount == 0) @@ -15924,7 +17582,7 @@ elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info, || ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE) && (abfd->flags & DYNAMIC) == 0 && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour) - elf_tdata (info->output_bfd)->has_gnu_symbols = TRUE; + elf_tdata (info->output_bfd)->has_gnu_symbols = elf_gnu_symbol_any; if (elf32_arm_hash_table (info) == NULL) return FALSE; @@ -16167,6 +17825,41 @@ elf32_arm_get_synthetic_symtab (bfd *abfd, return n; } +static const struct bfd_elf_special_section +elf32_arm_special_sections[] = +{ +/* Catch sections with .text.noread prefix and apply allocate, execute and + noread section attributes. */ + { STRING_COMMA_LEN (".text.noread"), -2, SHT_PROGBITS, + SHF_ALLOC + SHF_EXECINSTR + SHF_ARM_NOREAD }, + { NULL, 0, 0, 0, 0 } +}; + +static bfd_boolean +elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr * hdr) +{ + if (hdr->sh_flags & SHF_ARM_NOREAD) + *flags |= SEC_ELF_NOREAD; + return TRUE; +} + +static flagword +elf32_arm_lookup_section_flags (char *flag_name) +{ + if (!strcmp (flag_name, "SHF_ARM_NOREAD")) + return SHF_ARM_NOREAD; + + return SEC_NO_FLAGS; +} + +static unsigned int +elf32_arm_count_additional_relocs (asection *sec) +{ + struct _arm_elf_section_data *arm_data; + arm_data = get_arm_elf_section_data (sec); + return arm_data->additional_reloc_count; +} + #define ELF_ARCH bfd_arch_arm #define ELF_TARGET_ID ARM_ELF_DATA #define ELF_MACHINE_CODE EM_ARM @@ -16221,6 +17914,7 @@ elf32_arm_get_synthetic_symtab (bfd *abfd, #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms #define elf_backend_begin_write_processing elf32_arm_begin_write_processing #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook +#define elf_backend_count_additional_relocs elf32_arm_count_additional_relocs #define elf_backend_can_refcount 1 #define elf_backend_can_gc_sections 1 @@ -16232,6 +17926,7 @@ elf32_arm_get_synthetic_symtab (bfd *abfd, #define elf_backend_default_use_rela_p 0 #define elf_backend_got_header_size 12 +#define elf_backend_extern_protected_data 1 #undef elf_backend_obj_attrs_vendor #define elf_backend_obj_attrs_vendor "aeabi" @@ -16244,6 +17939,13 @@ elf32_arm_get_synthetic_symtab (bfd *abfd, #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown +#undef elf_backend_special_sections +#define elf_backend_special_sections elf32_arm_special_sections +#undef elf_backend_section_flags +#define elf_backend_section_flags elf32_arm_section_flags +#undef elf_backend_lookup_section_flags_hook +#define elf_backend_lookup_section_flags_hook elf32_arm_lookup_section_flags + #include "elf32-target.h" /* Native Client targets. */