/* tc-arm.c -- Assemble for the ARM
- Copyright 1994-2013 Free Software Foundation, Inc.
+ Copyright (C) 1994-2015 Free Software Foundation, Inc.
Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
Modified by David Taylor (dtaylor@armltd.co.uk)
Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
/* Warn on using deprecated features. */
static int warn_on_deprecated = TRUE;
+/* Understand CodeComposer Studio assembly syntax. */
+bfd_boolean codecomposer_syntax = FALSE;
/* Variables that we set while parsing command-line options. Once all
options have been read we re-process these values to set the real
static const arm_feature_set fpu_vfp_ext_fma = ARM_FEATURE (0, FPU_VFP_EXT_FMA);
static const arm_feature_set fpu_vfp_ext_armv8 =
ARM_FEATURE (0, FPU_VFP_EXT_ARMV8);
+static const arm_feature_set fpu_vfp_ext_armv8xd =
+ ARM_FEATURE (0, FPU_VFP_EXT_ARMV8xD);
static const arm_feature_set fpu_neon_ext_armv8 =
ARM_FEATURE (0, FPU_NEON_EXT_ARMV8);
static const arm_feature_set fpu_crypto_ext_armv8 =
/* Must be long enough to hold any of the names in arm_cpus. */
static char selected_cpu_name[16];
+extern FLONUM_TYPE generic_floating_point_number;
+
/* Return if no cpu was selected on command-line. */
static bfd_boolean
no_cpu_selected (void)
#define LITERAL_MASK 0xf000f000
#define OPCODE_MASK 0xfe1fffff
#define V4_STR_BIT 0x00000020
+#define VLDR_VMOV_SAME 0x0040f000
#define T2_SUBS_PC_LR 0xf3de8f00
struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
#endif
struct literal_pool * next;
+ unsigned int alignment;
} literal_pool;
/* Pointer to a linked list of literal pools. */
literal_pool * list_of_pools = NULL;
+typedef enum asmfunc_states
+{
+ OUTSIDE_ASMFUNC,
+ WAITING_ASMFUNC_NAME,
+ WAITING_ENDASMFUNC
+} asmfunc_states;
+
+static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
+
#ifdef OBJ_ELF
# define now_it seg_info (now_seg)->tc_segment_info_data.current_it
#else
/* This array holds the chars that always start a comment. If the
pre-processor is disabled, these aren't very useful. */
-const char comment_chars[] = "@";
+char arm_comment_chars[] = "@";
/* This array holds the chars that only start a comment at the beginning of
a line. If the line seems to have the form '# 123 filename'
/* Also note that comments like this one will always work. */
const char line_comment_chars[] = "#";
-const char line_separator_chars[] = ";";
+char arm_line_separator_chars[] = ";";
/* Chars that can be used to separate mant
from exp in floating point numbers. */
demand_empty_rest_of_line ();
}
+/* Directives: CodeComposer Studio. */
+
+/* .ref (for CodeComposer Studio syntax only). */
+static void
+s_ccs_ref (int unused ATTRIBUTE_UNUSED)
+{
+ if (codecomposer_syntax)
+ ignore_rest_of_line ();
+ else
+ as_bad (_(".ref pseudo-op only available with -mccs flag."));
+}
+
+/* If name is not NULL, then it is used for marking the beginning of a
+ function, wherease if it is NULL then it means the function end. */
+static void
+asmfunc_debug (const char * name)
+{
+ static const char * last_name = NULL;
+
+ if (name != NULL)
+ {
+ gas_assert (last_name == NULL);
+ last_name = name;
+
+ if (debug_type == DEBUG_STABS)
+ stabs_generate_asm_func (name, name);
+ }
+ else
+ {
+ gas_assert (last_name != NULL);
+
+ if (debug_type == DEBUG_STABS)
+ stabs_generate_asm_endfunc (last_name, last_name);
+
+ last_name = NULL;
+ }
+}
+
+static void
+s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
+{
+ if (codecomposer_syntax)
+ {
+ switch (asmfunc_state)
+ {
+ case OUTSIDE_ASMFUNC:
+ asmfunc_state = WAITING_ASMFUNC_NAME;
+ break;
+
+ case WAITING_ASMFUNC_NAME:
+ as_bad (_(".asmfunc repeated."));
+ break;
+
+ case WAITING_ENDASMFUNC:
+ as_bad (_(".asmfunc without function."));
+ break;
+ }
+ demand_empty_rest_of_line ();
+ }
+ else
+ as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
+}
+
+static void
+s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
+{
+ if (codecomposer_syntax)
+ {
+ switch (asmfunc_state)
+ {
+ case OUTSIDE_ASMFUNC:
+ as_bad (_(".endasmfunc without a .asmfunc."));
+ break;
+
+ case WAITING_ASMFUNC_NAME:
+ as_bad (_(".endasmfunc without function."));
+ break;
+
+ case WAITING_ENDASMFUNC:
+ asmfunc_state = OUTSIDE_ASMFUNC;
+ asmfunc_debug (NULL);
+ break;
+ }
+ demand_empty_rest_of_line ();
+ }
+ else
+ as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
+}
+
+static void
+s_ccs_def (int name)
+{
+ if (codecomposer_syntax)
+ s_globl (name);
+ else
+ as_bad (_(".def pseudo-op only available with -mccs flag."));
+}
+
/* Directives: Literal pools. */
static literal_pool *
pool->sub_section = now_subseg;
pool->next = list_of_pools;
pool->symbol = NULL;
+ pool->alignment = 2;
/* Add it to the list. */
list_of_pools = pool;
structure to the relevant literal pool. */
static int
-add_to_lit_pool (void)
+add_to_lit_pool (unsigned int nbytes)
{
+#define PADDING_SLOT 0x1
+#define LIT_ENTRY_SIZE_MASK 0xFF
literal_pool * pool;
- unsigned int entry;
+ unsigned int entry, pool_size = 0;
+ bfd_boolean padding_slot_p = FALSE;
+ unsigned imm1 = 0;
+ unsigned imm2 = 0;
+
+ if (nbytes == 8)
+ {
+ imm1 = inst.operands[1].imm;
+ imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
+ : inst.reloc.exp.X_unsigned ? 0
+ : ((bfd_int64_t) inst.operands[1].imm) >> 32);
+ if (target_big_endian)
+ {
+ imm1 = imm2;
+ imm2 = inst.operands[1].imm;
+ }
+ }
pool = find_or_make_literal_pool ();
/* Check if this literal value is already in the pool. */
for (entry = 0; entry < pool->next_free_entry; entry ++)
{
- if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
- && (inst.reloc.exp.X_op == O_constant)
- && (pool->literals[entry].X_add_number
- == inst.reloc.exp.X_add_number)
- && (pool->literals[entry].X_unsigned
- == inst.reloc.exp.X_unsigned))
+ if (nbytes == 4)
+ {
+ if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
+ && (inst.reloc.exp.X_op == O_constant)
+ && (pool->literals[entry].X_add_number
+ == inst.reloc.exp.X_add_number)
+ && (pool->literals[entry].X_md == nbytes)
+ && (pool->literals[entry].X_unsigned
+ == inst.reloc.exp.X_unsigned))
+ break;
+
+ if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
+ && (inst.reloc.exp.X_op == O_symbol)
+ && (pool->literals[entry].X_add_number
+ == inst.reloc.exp.X_add_number)
+ && (pool->literals[entry].X_add_symbol
+ == inst.reloc.exp.X_add_symbol)
+ && (pool->literals[entry].X_op_symbol
+ == inst.reloc.exp.X_op_symbol)
+ && (pool->literals[entry].X_md == nbytes))
+ break;
+ }
+ else if ((nbytes == 8)
+ && !(pool_size & 0x7)
+ && ((entry + 1) != pool->next_free_entry)
+ && (pool->literals[entry].X_op == O_constant)
+ && (pool->literals[entry].X_add_number == (offsetT) imm1)
+ && (pool->literals[entry].X_unsigned
+ == inst.reloc.exp.X_unsigned)
+ && (pool->literals[entry + 1].X_op == O_constant)
+ && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
+ && (pool->literals[entry + 1].X_unsigned
+ == inst.reloc.exp.X_unsigned))
break;
- if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
- && (inst.reloc.exp.X_op == O_symbol)
- && (pool->literals[entry].X_add_number
- == inst.reloc.exp.X_add_number)
- && (pool->literals[entry].X_add_symbol
- == inst.reloc.exp.X_add_symbol)
- && (pool->literals[entry].X_op_symbol
- == inst.reloc.exp.X_op_symbol))
+ padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
+ if (padding_slot_p && (nbytes == 4))
break;
+
+ pool_size += 4;
}
/* Do we need to create a new entry? */
return FAIL;
}
- pool->literals[entry] = inst.reloc.exp;
+ if (nbytes == 8)
+ {
+ /* For 8-byte entries, we align to an 8-byte boundary,
+ and split it into two 4-byte entries, because on 32-bit
+ host, 8-byte constants are treated as big num, thus
+ saved in "generic_bignum" which will be overwritten
+ by later assignments.
+
+ We also need to make sure there is enough space for
+ the split.
+
+ We also check to make sure the literal operand is a
+ constant number. */
+ if (!(inst.reloc.exp.X_op == O_constant
+ || inst.reloc.exp.X_op == O_big))
+ {
+ inst.error = _("invalid type for literal pool");
+ return FAIL;
+ }
+ else if (pool_size & 0x7)
+ {
+ if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
+ {
+ inst.error = _("literal pool overflow");
+ return FAIL;
+ }
+
+ pool->literals[entry] = inst.reloc.exp;
+ pool->literals[entry].X_add_number = 0;
+ pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
+ pool->next_free_entry += 1;
+ pool_size += 4;
+ }
+ else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
+ {
+ inst.error = _("literal pool overflow");
+ return FAIL;
+ }
+
+ pool->literals[entry] = inst.reloc.exp;
+ pool->literals[entry].X_op = O_constant;
+ pool->literals[entry].X_add_number = imm1;
+ pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
+ pool->literals[entry++].X_md = 4;
+ pool->literals[entry] = inst.reloc.exp;
+ pool->literals[entry].X_op = O_constant;
+ pool->literals[entry].X_add_number = imm2;
+ pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
+ pool->literals[entry].X_md = 4;
+ pool->alignment = 3;
+ pool->next_free_entry += 1;
+ }
+ else
+ {
+ pool->literals[entry] = inst.reloc.exp;
+ pool->literals[entry].X_md = 4;
+ }
+
#ifdef OBJ_ELF
/* PR ld/12974: Record the location of the first source line to reference
this entry in the literal pool. If it turns out during linking that the
#endif
pool->next_free_entry += 1;
}
+ else if (padding_slot_p)
+ {
+ pool->literals[entry] = inst.reloc.exp;
+ pool->literals[entry].X_md = nbytes;
+ }
inst.reloc.exp.X_op = O_symbol;
- inst.reloc.exp.X_add_number = ((int) entry) * 4;
+ inst.reloc.exp.X_add_number = pool_size;
inst.reloc.exp.X_add_symbol = pool->symbol;
return SUCCESS;
}
+bfd_boolean
+tc_start_label_without_colon (char unused1 ATTRIBUTE_UNUSED, const char * rest)
+{
+ bfd_boolean ret = TRUE;
+
+ if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
+ {
+ const char *label = rest;
+
+ while (!is_end_of_line[(int) label[-1]])
+ --label;
+
+ if (*label == '.')
+ {
+ as_bad (_("Invalid label '%s'"), label);
+ ret = FALSE;
+ }
+
+ asmfunc_debug (label);
+
+ asmfunc_state = WAITING_ENDASMFUNC;
+ }
+
+ return ret;
+}
+
/* Can't use symbol_new here, so have to create a symbol and then at
a later date assign it a value. Thats what these functions do. */
valueT valu, /* Symbol value. */
fragS * frag) /* Associated fragment. */
{
- unsigned int name_length;
+ size_t name_length;
char * preserved_copy_of_name;
name_length = strlen (name) + 1; /* +1 for \0. */
#endif /* DEBUG_SYMS */
}
-
static void
s_ltorg (int ignored ATTRIBUTE_UNUSED)
{
|| pool->next_free_entry == 0)
return;
- mapping_state (MAP_DATA);
-
/* Align pool as you have word accesses.
Only make a frag if we have to. */
if (!need_pass_2)
- frag_align (2, 0, 0);
+ frag_align (pool->alignment, 0, 0);
record_alignment (now_seg, 2);
+#ifdef OBJ_ELF
+ seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
+ make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
+#endif
sprintf (sym_name, "$$lit_\002%x", pool->id);
symbol_locate (pool->symbol, sym_name, now_seg,
dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
#endif
/* First output the expression in the instruction to the pool. */
- emit_expr (&(pool->literals[entry]), 4); /* .word */
+ emit_expr (&(pool->literals[entry]),
+ pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
}
/* Mark the pool as empty. */
memcpy (base, save_buf, p - base);
offset = nbytes - size;
- p = frag_more ((int) nbytes);
+ p = frag_more (nbytes);
+ memset (p, 0, nbytes);
fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
size, &exp, 0, (enum bfd_reloc_code_real) reloc);
}
s_arm_unwind_save_fpa (reg->number);
return;
- case REG_TYPE_RN: s_arm_unwind_save_core (); return;
+ case REG_TYPE_RN:
+ s_arm_unwind_save_core ();
+ return;
+
case REG_TYPE_VFD:
if (arch_v6)
s_arm_unwind_save_vfp_armv6 ();
else
s_arm_unwind_save_vfp ();
return;
- case REG_TYPE_MMXWR: s_arm_unwind_save_mmxwr (); return;
- case REG_TYPE_MMXWCG: s_arm_unwind_save_mmxwcg (); return;
+
+ case REG_TYPE_MMXWR:
+ s_arm_unwind_save_mmxwr ();
+ return;
+
+ case REG_TYPE_MMXWCG:
+ s_arm_unwind_save_mmxwcg ();
+ return;
default:
as_bad (_(".unwind_save does not support this kind of register"));
#ifdef TE_PE
{"secrel32", pe_directive_secrel, 0},
#endif
+
+ /* These are for compatibility with CodeComposer Studio. */
+ {"ref", s_ccs_ref, 0},
+ {"def", s_ccs_def, 0},
+ {"asmfunc", s_ccs_asmfunc, 0},
+ {"endasmfunc", s_ccs_endasmfunc, 0},
+
{ 0, 0, 0 }
};
\f
instructions. Puts the result directly in inst.operands[i]. */
static int
-parse_big_immediate (char **str, int i)
+parse_big_immediate (char **str, int i, expressionS *in_exp,
+ bfd_boolean allow_symbol_p)
{
expressionS exp;
+ expressionS *exp_p = in_exp ? in_exp : &exp;
char *ptr = *str;
- my_get_expression (&exp, &ptr, GE_OPT_PREFIX_BIG);
+ my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
- if (exp.X_op == O_constant)
+ if (exp_p->X_op == O_constant)
{
- inst.operands[i].imm = exp.X_add_number & 0xffffffff;
+ inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
/* If we're on a 64-bit host, then a 64-bit number can be returned using
O_constant. We have to be careful not to break compilation for
32-bit X_add_number, though. */
- if ((exp.X_add_number & ~(offsetT)(0xffffffffU)) != 0)
+ if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
{
- /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4. */
- inst.operands[i].reg = ((exp.X_add_number >> 16) >> 16) & 0xffffffff;
+ /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
+ inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
+ & 0xffffffff);
inst.operands[i].regisimm = 1;
}
}
- else if (exp.X_op == O_big
- && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 32)
+ else if (exp_p->X_op == O_big
+ && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
{
unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
PR 11972: Bignums can now be sign-extended to the
size of a .octa so check that the out of range bits
are all zero or all one. */
- if (LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 64)
+ if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
{
LITTLENUM_TYPE m = -1;
&& generic_bignum[parts * 2] != m)
return FAIL;
- for (j = parts * 2 + 1; j < (unsigned) exp.X_add_number; j++)
+ for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
if (generic_bignum[j] != generic_bignum[j-1])
return FAIL;
}
<< (LITTLENUM_NUMBER_OF_BITS * j);
inst.operands[i].regisimm = 1;
}
- else
+ else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
return FAIL;
*str = ptr;
return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
}
+
+/* Detect the presence of a floating point or integer zero constant,
+ i.e. #0.0 or #0. */
+
+static bfd_boolean
+parse_ifimm_zero (char **in)
+{
+ int error_code;
+
+ if (!is_immediate_prefix (**in))
+ return FALSE;
+
+ ++*in;
+
+ /* Accept #0x0 as a synonym for #0. */
+ if (strncmp (*in, "0x", 2) == 0)
+ {
+ int val;
+ if (parse_immediate (in, &val, 0, 0, TRUE) == FAIL)
+ return FALSE;
+ return TRUE;
+ }
+
+ error_code = atof_generic (in, ".", EXP_CHARS,
+ &generic_floating_point_number);
+
+ if (!error_code
+ && generic_floating_point_number.sign == '+'
+ && (generic_floating_point_number.low
+ > generic_floating_point_number.leader))
+ return TRUE;
+
+ return FALSE;
+}
+
/* Parse an 8-bit "quarter-precision" floating point number of the form:
0baBbbbbbc defgh000 00000000 00000000.
The zero and minus-zero cases need special handling, since they can't be
inst.operands[i].reg = REG_PC;
inst.operands[i].isreg = 1;
inst.operands[i].preind = 1;
- }
- /* Otherwise a load-constant pseudo op, no special treatment needed here. */
- if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
+ if (my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX_BIG))
+ return PARSE_OPERAND_FAIL;
+ }
+ else if (parse_big_immediate (&p, i, &inst.reloc.exp,
+ /*allow_symbol_p=*/TRUE))
return PARSE_OPERAND_FAIL;
*str = p;
Case 10: VMOV.F32 <Sd>, #<imm>
Case 11: VMOV.F64 <Dd>, #<imm> */
inst.operands[i].immisfloat = 1;
- else if (parse_big_immediate (&ptr, i) == SUCCESS)
+ else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
+ == SUCCESS)
/* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
;
OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
+ OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero. */
OP_RR_RNSC, /* ARM reg or Neon scalar. */
OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
break;
+ case OP_RSVD_FI0:
+ {
+ po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
+ break;
+ try_ifimm0:
+ if (parse_ifimm_zero (&str))
+ inst.operands[i].imm = 0;
+ else
+ {
+ inst.error
+ = _("only floating point zero is allowed as immediate value");
+ goto failure;
+ }
+ }
+ break;
+
case OP_RR_RNSC:
{
po_scalar_or_goto (8, try_rr);
try_immbig:
/* There's a possibility of getting a 64-bit immediate here, so
we need special handling. */
- if (parse_big_immediate (&str, i) == FAIL)
+ if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
+ == FAIL)
{
inst.error = _("immediate value is out of range");
goto failure;
/* Functions for operand encoding. ARM, then Thumb. */
-#define rotate_left(v, n) (v << n | v >> (32 - n))
+#define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
/* If VAL can be encoded in the immediate field of an ARM instruction,
return the encoded form. Otherwise, return FAIL. */
}
}
-/* inst.operands[i] was set up by parse_address. Encode it into an
- ARM-format instruction. Reject all forms which cannot be encoded
- into a coprocessor load/store instruction. If wb_ok is false,
- reject use of writeback; if unind_ok is false, reject use of
- unindexed addressing. If reloc_override is not 0, use it instead
- of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
- (in which case it is preserved). */
+/* Write immediate bits [7:0] to the following locations:
+
+ |28/24|23 19|18 16|15 4|3 0|
+ | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
+
+ This function is used by VMOV/VMVN/VORR/VBIC. */
+
+static void
+neon_write_immbits (unsigned immbits)
+{
+ inst.instruction |= immbits & 0xf;
+ inst.instruction |= ((immbits >> 4) & 0x7) << 16;
+ inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
+}
+
+/* Invert low-order SIZE bits of XHI:XLO. */
+
+static void
+neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
+{
+ unsigned immlo = xlo ? *xlo : 0;
+ unsigned immhi = xhi ? *xhi : 0;
+
+ switch (size)
+ {
+ case 8:
+ immlo = (~immlo) & 0xff;
+ break;
+
+ case 16:
+ immlo = (~immlo) & 0xffff;
+ break;
+
+ case 64:
+ immhi = (~immhi) & 0xffffffff;
+ /* fall through. */
+
+ case 32:
+ immlo = (~immlo) & 0xffffffff;
+ break;
+
+ default:
+ abort ();
+ }
+
+ if (xlo)
+ *xlo = immlo;
+
+ if (xhi)
+ *xhi = immhi;
+}
+
+/* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
+ A, B, C, D. */
static int
-encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
+neon_bits_same_in_bytes (unsigned imm)
{
- inst.instruction |= inst.operands[i].reg << 16;
+ return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
+ && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
+ && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
+ && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
+}
- gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
+/* For immediate of above form, return 0bABCD. */
- if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
+static unsigned
+neon_squash_bits (unsigned imm)
+{
+ return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
+ | ((imm & 0x01000000) >> 21);
+}
+
+/* Compress quarter-float representation to 0b...000 abcdefgh. */
+
+static unsigned
+neon_qfloat_bits (unsigned imm)
+{
+ return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
+}
+
+/* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
+ the instruction. *OP is passed as the initial value of the op field, and
+ may be set to a different value depending on the constant (i.e.
+ "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
+ MVN). If the immediate looks like a repeated pattern then also
+ try smaller element sizes. */
+
+static int
+neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
+ unsigned *immbits, int *op, int size,
+ enum neon_el_type type)
+{
+ /* Only permit float immediates (including 0.0/-0.0) if the operand type is
+ float. */
+ if (type == NT_float && !float_p)
+ return FAIL;
+
+ if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
{
- gas_assert (!inst.operands[i].writeback);
- if (!unind_ok)
+ if (size != 32 || *op == 1)
+ return FAIL;
+ *immbits = neon_qfloat_bits (immlo);
+ return 0xf;
+ }
+
+ if (size == 64)
+ {
+ if (neon_bits_same_in_bytes (immhi)
+ && neon_bits_same_in_bytes (immlo))
{
- inst.error = _("instruction does not support unindexed addressing");
- return FAIL;
+ if (*op == 1)
+ return FAIL;
+ *immbits = (neon_squash_bits (immhi) << 4)
+ | neon_squash_bits (immlo);
+ *op = 1;
+ return 0xe;
}
- inst.instruction |= inst.operands[i].imm;
- inst.instruction |= INDEX_UP;
- return SUCCESS;
- }
- if (inst.operands[i].preind)
- inst.instruction |= PRE_INDEX;
+ if (immhi != immlo)
+ return FAIL;
+ }
- if (inst.operands[i].writeback)
+ if (size >= 32)
{
- if (inst.operands[i].reg == REG_PC)
+ if (immlo == (immlo & 0x000000ff))
{
- inst.error = _("pc may not be used with write-back");
- return FAIL;
+ *immbits = immlo;
+ return 0x0;
}
- if (!wb_ok)
+ else if (immlo == (immlo & 0x0000ff00))
{
- inst.error = _("instruction does not support writeback");
- return FAIL;
+ *immbits = immlo >> 8;
+ return 0x2;
}
- inst.instruction |= WRITE_BACK;
+ else if (immlo == (immlo & 0x00ff0000))
+ {
+ *immbits = immlo >> 16;
+ return 0x4;
+ }
+ else if (immlo == (immlo & 0xff000000))
+ {
+ *immbits = immlo >> 24;
+ return 0x6;
+ }
+ else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
+ {
+ *immbits = (immlo >> 8) & 0xff;
+ return 0xc;
+ }
+ else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
+ {
+ *immbits = (immlo >> 16) & 0xff;
+ return 0xd;
+ }
+
+ if ((immlo & 0xffff) != (immlo >> 16))
+ return FAIL;
+ immlo &= 0xffff;
}
- if (reloc_override)
- inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
- else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
- || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
- && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
+ if (size >= 16)
{
- if (thumb_mode)
- inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
- else
- inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
+ if (immlo == (immlo & 0x000000ff))
+ {
+ *immbits = immlo;
+ return 0x8;
+ }
+ else if (immlo == (immlo & 0x0000ff00))
+ {
+ *immbits = immlo >> 8;
+ return 0xa;
+ }
+
+ if ((immlo & 0xff) != (immlo >> 8))
+ return FAIL;
+ immlo &= 0xff;
}
- /* Prefer + for zero encoded value. */
- if (!inst.operands[i].negative)
- inst.instruction |= INDEX_UP;
+ if (immlo == (immlo & 0x000000ff))
+ {
+ /* Don't allow MVN with 8-bit immediate. */
+ if (*op == 1)
+ return FAIL;
+ *immbits = immlo;
+ return 0xe;
+ }
- return SUCCESS;
+ return FAIL;
}
+enum lit_type
+{
+ CONST_THUMB,
+ CONST_ARM,
+ CONST_VEC
+};
+
/* inst.reloc.exp describes an "=expr" load pseudo-operation.
Determine whether it can be performed with a move instruction; if
it can, convert inst.instruction to that move instruction and
inst.operands[i] describes the destination register. */
static bfd_boolean
-move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3)
+move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
{
unsigned long tbit;
+ bfd_boolean thumb_p = (t == CONST_THUMB);
+ bfd_boolean arm_p = (t == CONST_ARM);
+ bfd_boolean vec64_p = (t == CONST_VEC) && !inst.operands[i].issingle;
if (thumb_p)
tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
inst.error = _("invalid pseudo operation");
return TRUE;
}
- if (inst.reloc.exp.X_op != O_constant && inst.reloc.exp.X_op != O_symbol)
+ if (inst.reloc.exp.X_op != O_constant
+ && inst.reloc.exp.X_op != O_symbol
+ && inst.reloc.exp.X_op != O_big)
{
inst.error = _("constant expression expected");
return TRUE;
}
- if (inst.reloc.exp.X_op == O_constant)
+ if ((inst.reloc.exp.X_op == O_constant
+ || inst.reloc.exp.X_op == O_big)
+ && !inst.operands[i].issingle)
{
- if (thumb_p)
+ if (thumb_p && inst.reloc.exp.X_op == O_constant)
{
if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
{
return TRUE;
}
}
- else
+ else if (arm_p && inst.reloc.exp.X_op == O_constant)
{
int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
if (value != FAIL)
return TRUE;
}
}
+ else if (vec64_p)
+ {
+ int op = 0;
+ unsigned immbits = 0;
+ unsigned immlo = inst.operands[1].imm;
+ unsigned immhi = inst.operands[1].regisimm
+ ? inst.operands[1].reg
+ : inst.reloc.exp.X_unsigned
+ ? 0
+ : ((bfd_int64_t)((int) immlo)) >> 32;
+ int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
+ &op, 64, NT_invtype);
+
+ if (cmode == FAIL)
+ {
+ neon_invert_size (&immlo, &immhi, 64);
+ op = !op;
+ cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
+ &op, 64, NT_invtype);
+ }
+ if (cmode != FAIL)
+ {
+ inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
+ | (1 << 23)
+ | (cmode << 8)
+ | (op << 5)
+ | (1 << 4);
+ /* Fill other bits in vmov encoding for both thumb and arm. */
+ if (thumb_mode)
+ inst.instruction |= (0x7 << 29) | (0xF << 24);
+ else
+ inst.instruction |= (0xF << 28) | (0x1 << 25);
+ neon_write_immbits (immbits);
+ return TRUE;
+ }
+ }
+ }
+
+ if (add_to_lit_pool ((!inst.operands[i].isvec
+ || inst.operands[i].issingle) ? 4 : 8) == FAIL)
+ return TRUE;
+
+ inst.operands[1].reg = REG_PC;
+ inst.operands[1].isreg = 1;
+ inst.operands[1].preind = 1;
+ inst.reloc.pc_rel = 1;
+ inst.reloc.type = (thumb_p
+ ? BFD_RELOC_ARM_THUMB_OFFSET
+ : (mode_3
+ ? BFD_RELOC_ARM_HWLITERAL
+ : BFD_RELOC_ARM_LITERAL));
+ return FALSE;
+}
+
+/* inst.operands[i] was set up by parse_address. Encode it into an
+ ARM-format instruction. Reject all forms which cannot be encoded
+ into a coprocessor load/store instruction. If wb_ok is false,
+ reject use of writeback; if unind_ok is false, reject use of
+ unindexed addressing. If reloc_override is not 0, use it instead
+ of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
+ (in which case it is preserved). */
+
+static int
+encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
+{
+ if (!inst.operands[i].isreg)
+ {
+ gas_assert (inst.operands[0].isvec);
+ if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
+ return SUCCESS;
+ }
+
+ inst.instruction |= inst.operands[i].reg << 16;
+
+ gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
+
+ if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
+ {
+ gas_assert (!inst.operands[i].writeback);
+ if (!unind_ok)
+ {
+ inst.error = _("instruction does not support unindexed addressing");
+ return FAIL;
+ }
+ inst.instruction |= inst.operands[i].imm;
+ inst.instruction |= INDEX_UP;
+ return SUCCESS;
+ }
+
+ if (inst.operands[i].preind)
+ inst.instruction |= PRE_INDEX;
+
+ if (inst.operands[i].writeback)
+ {
+ if (inst.operands[i].reg == REG_PC)
+ {
+ inst.error = _("pc may not be used with write-back");
+ return FAIL;
+ }
+ if (!wb_ok)
+ {
+ inst.error = _("instruction does not support writeback");
+ return FAIL;
+ }
+ inst.instruction |= WRITE_BACK;
}
- if (add_to_lit_pool () == FAIL)
+ if (reloc_override)
+ inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
+ else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
+ || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
+ && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
{
- inst.error = _("literal pool insertion failed");
- return TRUE;
+ if (thumb_mode)
+ inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
+ else
+ inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
}
- inst.operands[1].reg = REG_PC;
- inst.operands[1].isreg = 1;
- inst.operands[1].preind = 1;
- inst.reloc.pc_rel = 1;
- inst.reloc.type = (thumb_p
- ? BFD_RELOC_ARM_THUMB_OFFSET
- : (mode_3
- ? BFD_RELOC_ARM_HWLITERAL
- : BFD_RELOC_ARM_LITERAL));
- return FALSE;
+
+ /* Prefer + for zero encoded value. */
+ if (!inst.operands[i].negative)
+ inst.instruction |= INDEX_UP;
+
+ return SUCCESS;
}
/* Functions for instruction encoding, sorted by sub-architecture.
{
inst.instruction |= inst.operands[0].reg << 12;
if (!inst.operands[1].isreg)
- if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/FALSE))
+ if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
return;
encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
check_ldr_r15_aligned ();
constraint (inst.operands[0].reg == REG_PC, BAD_PC);
inst.instruction |= inst.operands[0].reg << 12;
if (!inst.operands[1].isreg)
- if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/TRUE))
+ if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
return;
encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
}
{
if (opcode <= 0xffff)
inst.instruction = THUMB_OP32 (opcode);
- if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
+ if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
return;
}
if (inst.operands[1].isreg
inst.instruction = THUMB_OP16 (inst.instruction);
if (!inst.operands[1].isreg)
- if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
+ if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
return;
constraint (!inst.operands[1].preind
|| inst.operands[1].shifted
|| Rn > 7 || Rm > 7)
narrow = FALSE;
- else if (inst.instruction == T_MNEM_cmn)
+ else if (inst.instruction == T_MNEM_cmn
+ || inst.instruction == T_MNEM_tst)
narrow = TRUE;
else if (THUMB_SETS_FLAGS (inst.instruction))
narrow = !in_it_block ();
_("expression too complex"));
mask = inst.operands[0].imm;
- if ((mask & ~0xff) == 0)
+ if (inst.size_req != 4 && (mask & ~0xff) == 0)
inst.instruction = THUMB_OP16 (inst.instruction) | mask;
- else if ((inst.instruction == T_MNEM_push
- && (mask & ~0xff) == 1 << REG_LR)
- || (inst.instruction == T_MNEM_pop
- && (mask & ~0xff) == 1 << REG_PC))
+ else if (inst.size_req != 4
+ && (mask & ~0xff) == (1 << (inst.instruction == T_MNEM_push
+ ? REG_LR : REG_PC)))
{
inst.instruction = THUMB_OP16 (inst.instruction);
inst.instruction |= THUMB_PP_PC_LR;
{
enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
+ int imm = inst.operands[2].imm;
+
+ constraint (imm < 0 || (unsigned)imm >= et.size,
+ _("immediate out of range for shift"));
NEON_ENCODE (IMMED, inst);
- neon_imm_shift (FALSE, 0, neon_quad (rs), et, inst.operands[2].imm);
+ neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
}
else
{
{
enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
+ int imm = inst.operands[2].imm;
+ constraint (imm < 0 || (unsigned)imm >= et.size,
+ _("immediate out of range for shift"));
NEON_ENCODE (IMMED, inst);
- neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
- inst.operands[2].imm);
+ neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, imm);
}
else
{
return FAIL;
}
-/* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
- A, B, C, D. */
-
-static int
-neon_bits_same_in_bytes (unsigned imm)
-{
- return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
- && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
- && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
- && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
-}
-
-/* For immediate of above form, return 0bABCD. */
-
-static unsigned
-neon_squash_bits (unsigned imm)
-{
- return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
- | ((imm & 0x01000000) >> 21);
-}
-
-/* Compress quarter-float representation to 0b...000 abcdefgh. */
-
-static unsigned
-neon_qfloat_bits (unsigned imm)
-{
- return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
-}
-
-/* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
- the instruction. *OP is passed as the initial value of the op field, and
- may be set to a different value depending on the constant (i.e.
- "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
- MVN). If the immediate looks like a repeated pattern then also
- try smaller element sizes. */
-
-static int
-neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
- unsigned *immbits, int *op, int size,
- enum neon_el_type type)
-{
- /* Only permit float immediates (including 0.0/-0.0) if the operand type is
- float. */
- if (type == NT_float && !float_p)
- return FAIL;
-
- if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
- {
- if (size != 32 || *op == 1)
- return FAIL;
- *immbits = neon_qfloat_bits (immlo);
- return 0xf;
- }
-
- if (size == 64)
- {
- if (neon_bits_same_in_bytes (immhi)
- && neon_bits_same_in_bytes (immlo))
- {
- if (*op == 1)
- return FAIL;
- *immbits = (neon_squash_bits (immhi) << 4)
- | neon_squash_bits (immlo);
- *op = 1;
- return 0xe;
- }
-
- if (immhi != immlo)
- return FAIL;
- }
-
- if (size >= 32)
- {
- if (immlo == (immlo & 0x000000ff))
- {
- *immbits = immlo;
- return 0x0;
- }
- else if (immlo == (immlo & 0x0000ff00))
- {
- *immbits = immlo >> 8;
- return 0x2;
- }
- else if (immlo == (immlo & 0x00ff0000))
- {
- *immbits = immlo >> 16;
- return 0x4;
- }
- else if (immlo == (immlo & 0xff000000))
- {
- *immbits = immlo >> 24;
- return 0x6;
- }
- else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
- {
- *immbits = (immlo >> 8) & 0xff;
- return 0xc;
- }
- else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
- {
- *immbits = (immlo >> 16) & 0xff;
- return 0xd;
- }
-
- if ((immlo & 0xffff) != (immlo >> 16))
- return FAIL;
- immlo &= 0xffff;
- }
-
- if (size >= 16)
- {
- if (immlo == (immlo & 0x000000ff))
- {
- *immbits = immlo;
- return 0x8;
- }
- else if (immlo == (immlo & 0x0000ff00))
- {
- *immbits = immlo >> 8;
- return 0xa;
- }
-
- if ((immlo & 0xff) != (immlo >> 8))
- return FAIL;
- immlo &= 0xff;
- }
-
- if (immlo == (immlo & 0x000000ff))
- {
- /* Don't allow MVN with 8-bit immediate. */
- if (*op == 1)
- return FAIL;
- *immbits = immlo;
- return 0xe;
- }
-
- return FAIL;
-}
-
-/* Write immediate bits [7:0] to the following locations:
-
- |28/24|23 19|18 16|15 4|3 0|
- | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
-
- This function is used by VMOV/VMVN/VORR/VBIC. */
-
-static void
-neon_write_immbits (unsigned immbits)
-{
- inst.instruction |= immbits & 0xf;
- inst.instruction |= ((immbits >> 4) & 0x7) << 16;
- inst.instruction |= ((immbits >> 7) & 0x1) << 24;
-}
-
-/* Invert low-order SIZE bits of XHI:XLO. */
-
-static void
-neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
-{
- unsigned immlo = xlo ? *xlo : 0;
- unsigned immhi = xhi ? *xhi : 0;
-
- switch (size)
- {
- case 8:
- immlo = (~immlo) & 0xff;
- break;
-
- case 16:
- immlo = (~immlo) & 0xffff;
- break;
-
- case 64:
- immhi = (~immhi) & 0xffffffff;
- /* fall through. */
-
- case 32:
- immlo = (~immlo) & 0xffffffff;
- break;
-
- default:
- abort ();
- }
-
- if (xlo)
- *xlo = immlo;
-
- if (xhi)
- *xhi = immhi;
-}
-
static void
do_neon_logic (void)
{
int sz, op;
int rm;
+ /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
+ D register operands. */
+ if (flavour == neon_cvt_flavour_s32_f64
+ || flavour == neon_cvt_flavour_u32_f64)
+ constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
+ _(BAD_FPU));
+
set_it_insn_type (OUTSIDE_IT_INSN);
switch (flavour)
{
case neon_cvt_flavour_s32_f64:
sz = 1;
- op = 0;
+ op = 1;
break;
case neon_cvt_flavour_s32_f32:
sz = 0;
}
else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
{
+ /* The VCVTB and VCVTT instructions with D-register operands
+ don't work for SP only targets. */
+ constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
+ _(BAD_FPU));
+
inst.error = NULL;
do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
}
else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
{
+ /* The VCVTB and VCVTT instructions with D-register operands
+ don't work for SP only targets. */
+ constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
+ _(BAD_FPU));
+
inst.error = NULL;
do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
}
static void
do_vfp_nsyn_fpv8 (enum neon_shape rs)
{
+ /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
+ D register operands. */
+ if (neon_shape_class[rs] == SC_DOUBLE)
+ constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
+ _(BAD_FPU));
+
NEON_ENCODE (FPV8, inst);
if (rs == NS_FFF)
if (rs == NS_NULL)
return;
+ /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
+ D register operands. */
+ if (neon_shape_class[rs] == SC_DOUBLE)
+ constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
+ _(BAD_FPU));
+
et = neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
if (et.type != NT_invtype)
{
{ 0x4800, 0xf800, N_("Literal loads") },
{ 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
{ 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
+ /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
+ field in asm_opcode. 'tvalue' is used at the stage this check happen. */
+ { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
{ 0, 0, NULL }
};
/* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
set those bits when Thumb-2 32-bit instructions are seen. ie.
anything other than bl/blx and v6-M instructions.
- This is overly pessimistic for relaxable instructions. */
- if (((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
- || inst.relax)
+ The impact of relaxable instructions will be considered later after we
+ finish all relaxation. */
+ if ((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
&& !(ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
|| ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier)))
ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
/* FP for ARMv8. */
#undef ARM_VARIANT
-#define ARM_VARIANT & fpu_vfp_ext_armv8
+#define ARM_VARIANT & fpu_vfp_ext_armv8xd
#undef THUMB_VARIANT
-#define THUMB_VARIANT & fpu_vfp_ext_armv8
+#define THUMB_VARIANT & fpu_vfp_ext_armv8xd
nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD), vsel),
nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD), vsel),
nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
- nCE(vcmp, _vcmp, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
- nCE(vcmpe, _vcmpe, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
+ nCE(vcmp, _vcmp, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
+ nCE(vcmpe, _vcmpe, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
fixp->fx_file = fragp->fr_file;
fixp->fx_line = fragp->fr_line;
fragp->fr_fix += fragp->fr_var;
+
+ /* Set whether we use thumb-2 ISA based on final relaxation results. */
+ if (thumb_mode && fragp->fr_var == 4 && no_cpu_selected ()
+ && !ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2))
+ ARM_MERGE_FEATURE_SETS (arm_arch_used, thumb_arch_used, arm_ext_v6t2);
}
/* Return the size of a relaxable immediate operand instruction.
if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
{
- if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
+ if (ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
+ ? selected_cpu : arm_arch_none, arm_ext_v6t2))
{
narrow_noop = thumb_noop[1][target_big_endian];
noop = wide_thumb_noop[target_big_endian];
}
else
{
- noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k) != 0]
+ noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
+ ? selected_cpu : arm_arch_none,
+ arm_ext_v6k) != 0]
[target_big_endian];
noop_size = 4;
#ifdef OBJ_ELF
/* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
personality routine data. Returns zero, or the index table value for
- and inline entry. */
+ an inline entry. */
static valueT
create_unwind_entry (int have_data)
}
else
{
- gas_assert (unwind.personality_index == -1);
+ /* PR 16765: Missing or misplaced unwind directives can trigger this. */
+ if (unwind.personality_index != -1)
+ {
+ as_bad (_("attempt to recreate an unwind entry"));
+ return 1;
+ }
/* An extra byte is required for the opcode count. */
size = unwind.opcode_count + 1;
tc_arm_regname_to_dw2regnum (char *regname)
{
int reg = arm_reg_parse (®name, REG_TYPE_RN);
+ if (reg != FAIL)
+ return reg;
- if (reg == FAIL)
- return -1;
+ /* PR 16694: Allow VFP registers as well. */
+ reg = arm_reg_parse (®name, REG_TYPE_VFS);
+ if (reg != FAIL)
+ return 64 + reg;
- return reg;
+ reg = arm_reg_parse (®name, REG_TYPE_VFD);
+ if (reg != FAIL)
+ return reg + 256;
+
+ return -1;
}
#ifdef TE_PE
case BFD_RELOC_8:
if (fixP->fx_done || !seg->use_rela_p)
- md_number_to_chars (buf, value, 1);
+ *buf = value;
break;
case BFD_RELOC_16:
case BFD_RELOC_ARM_THM_TLS_CALL:
case BFD_RELOC_ARM_TLS_DESCSEQ:
case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
- S_SET_THREAD_LOCAL (fixP->fx_addsy);
- break;
-
case BFD_RELOC_ARM_TLS_GOTDESC:
case BFD_RELOC_ARM_TLS_GD32:
case BFD_RELOC_ARM_TLS_LE32:
case BFD_RELOC_ARM_TLS_LDM32:
case BFD_RELOC_ARM_TLS_LDO32:
S_SET_THREAD_LOCAL (fixP->fx_addsy);
- /* fall through */
+ break;
case BFD_RELOC_ARM_GOT32:
case BFD_RELOC_ARM_GOTOFF:
- if (fixP->fx_done || !seg->use_rela_p)
- md_number_to_chars (buf, 0, 4);
break;
case BFD_RELOC_ARM_GOT_PREL:
cons_fix_new_arm (fragS * frag,
int where,
int size,
- expressionS * exp)
+ expressionS * exp,
+ bfd_reloc_code_real_type reloc)
{
- bfd_reloc_code_real_type type;
int pcrel = 0;
/* Pick a reloc.
switch (size)
{
case 1:
- type = BFD_RELOC_8;
+ reloc = BFD_RELOC_8;
break;
case 2:
- type = BFD_RELOC_16;
+ reloc = BFD_RELOC_16;
break;
case 4:
default:
- type = BFD_RELOC_32;
+ reloc = BFD_RELOC_32;
break;
case 8:
- type = BFD_RELOC_64;
+ reloc = BFD_RELOC_64;
break;
}
if (exp->X_op == O_secrel)
{
exp->X_op = O_symbol;
- type = BFD_RELOC_32_SECREL;
+ reloc = BFD_RELOC_32_SECREL;
}
#endif
- fix_new_exp (frag, where, (int) size, exp, pcrel, type);
+ fix_new_exp (frag, where, size, exp, pcrel, reloc);
}
#if defined (OBJ_COFF)
ARM_CPU_OPT ("arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL),
ARM_CPU_OPT ("cortex-a5", ARM_ARCH_V7A_MP_SEC,
FPU_NONE, "Cortex-A5"),
- ARM_CPU_OPT ("cortex-a7", ARM_ARCH_V7A_IDIV_MP_SEC_VIRT,
- FPU_ARCH_NEON_VFP_V4,
+ ARM_CPU_OPT ("cortex-a7", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
"Cortex-A7"),
ARM_CPU_OPT ("cortex-a8", ARM_ARCH_V7A_SEC,
ARM_FEATURE (0, FPU_VFP_V3
ARM_FEATURE (0, FPU_VFP_V3
| FPU_NEON_EXT_V1),
"Cortex-A9"),
- ARM_CPU_OPT ("cortex-a12", ARM_ARCH_V7A_IDIV_MP_SEC_VIRT,
- FPU_ARCH_NEON_VFP_V4,
+ ARM_CPU_OPT ("cortex-a12", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
"Cortex-A12"),
- ARM_CPU_OPT ("cortex-a15", ARM_ARCH_V7A_IDIV_MP_SEC_VIRT,
- FPU_ARCH_NEON_VFP_V4,
+ ARM_CPU_OPT ("cortex-a15", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
"Cortex-A15"),
+ ARM_CPU_OPT ("cortex-a17", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
+ "Cortex-A17"),
ARM_CPU_OPT ("cortex-a53", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
"Cortex-A53"),
ARM_CPU_OPT ("cortex-a57", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
ARM_CPU_OPT ("cortex-r7", ARM_ARCH_V7R_IDIV,
FPU_ARCH_VFP_V3D16,
"Cortex-R7"),
+ ARM_CPU_OPT ("cortex-m7", ARM_ARCH_V7EM, FPU_NONE, "Cortex-M7"),
ARM_CPU_OPT ("cortex-m4", ARM_ARCH_V7EM, FPU_NONE, "Cortex-M4"),
ARM_CPU_OPT ("cortex-m3", ARM_ARCH_V7M, FPU_NONE, "Cortex-M3"),
ARM_CPU_OPT ("cortex-m1", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M1"),
/* Marvell processors. */
ARM_CPU_OPT ("marvell-pj4", ARM_FEATURE (ARM_AEXT_V7A | ARM_EXT_MP | ARM_EXT_SEC, 0),
FPU_ARCH_VFP_V3D16, NULL),
+ ARM_CPU_OPT ("marvell-whitney", ARM_FEATURE (ARM_AEXT_V7A | ARM_EXT_MP
+ | ARM_EXT_SEC, 0),
+ FPU_ARCH_NEON_VFP_V4, NULL),
+ /* APM X-Gene family. */
+ ARM_CPU_OPT ("xgene1", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
+ "APM X-Gene 1"),
+ ARM_CPU_OPT ("xgene2", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
+ "APM X-Gene 2"),
{ NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
};
/* The official spelling of the ARMv7 profile variants is the dashed form.
Accept the non-dashed form for compatibility with old toolchains. */
ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP),
+ ARM_ARCH_OPT ("armv7ve", ARM_ARCH_V7VE, FPU_ARCH_VFP),
ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP),
ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP),
{
char *name;
size_t name_len;
- const arm_feature_set value;
+ const arm_feature_set merge_value;
+ const arm_feature_set clear_value;
const arm_feature_set allowed_archs;
};
/* The following table must be in alphabetical order with a NULL last entry.
*/
-#define ARM_EXT_OPT(N, V, AA) { N, sizeof (N) - 1, V, AA }
+#define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, AA }
static const struct arm_option_extension_value_table arm_extensions[] =
{
- ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE (ARM_EXT_V8, 0)),
+ ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE (0, CRC_EXT_ARMV8),
+ ARM_FEATURE (ARM_EXT_V8, 0)),
ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
+ ARM_FEATURE (0, FPU_CRYPTO_ARMV8),
ARM_FEATURE (ARM_EXT_V8, 0)),
- ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8,
+ ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8, ARM_FEATURE (0, FPU_VFP_ARMV8),
ARM_FEATURE (ARM_EXT_V8, 0)),
ARM_EXT_OPT ("idiv", ARM_FEATURE (ARM_EXT_ADIV | ARM_EXT_DIV, 0),
+ ARM_FEATURE (ARM_EXT_ADIV | ARM_EXT_DIV, 0),
ARM_FEATURE (ARM_EXT_V7A | ARM_EXT_V7R, 0)),
- ARM_EXT_OPT ("iwmmxt",ARM_FEATURE (0, ARM_CEXT_IWMMXT), ARM_ANY),
- ARM_EXT_OPT ("iwmmxt2",
- ARM_FEATURE (0, ARM_CEXT_IWMMXT2), ARM_ANY),
- ARM_EXT_OPT ("maverick",
- ARM_FEATURE (0, ARM_CEXT_MAVERICK), ARM_ANY),
+ ARM_EXT_OPT ("iwmmxt",ARM_FEATURE (0, ARM_CEXT_IWMMXT),
+ ARM_FEATURE (0, ARM_CEXT_IWMMXT), ARM_ANY),
+ ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE (0, ARM_CEXT_IWMMXT2),
+ ARM_FEATURE (0, ARM_CEXT_IWMMXT2), ARM_ANY),
+ ARM_EXT_OPT ("maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK),
+ ARM_FEATURE (0, ARM_CEXT_MAVERICK), ARM_ANY),
ARM_EXT_OPT ("mp", ARM_FEATURE (ARM_EXT_MP, 0),
+ ARM_FEATURE (ARM_EXT_MP, 0),
ARM_FEATURE (ARM_EXT_V7A | ARM_EXT_V7R, 0)),
ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8,
+ ARM_FEATURE(0, FPU_NEON_ARMV8),
ARM_FEATURE (ARM_EXT_V8, 0)),
ARM_EXT_OPT ("os", ARM_FEATURE (ARM_EXT_OS, 0),
+ ARM_FEATURE (ARM_EXT_OS, 0),
ARM_FEATURE (ARM_EXT_V6M, 0)),
ARM_EXT_OPT ("sec", ARM_FEATURE (ARM_EXT_SEC, 0),
+ ARM_FEATURE (ARM_EXT_SEC, 0),
ARM_FEATURE (ARM_EXT_V6K | ARM_EXT_V7A, 0)),
ARM_EXT_OPT ("virt", ARM_FEATURE (ARM_EXT_VIRT | ARM_EXT_ADIV
| ARM_EXT_DIV, 0),
+ ARM_FEATURE (ARM_EXT_VIRT, 0),
ARM_FEATURE (ARM_EXT_V7A, 0)),
- ARM_EXT_OPT ("xscale",ARM_FEATURE (0, ARM_CEXT_XSCALE), ARM_ANY),
- { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
+ ARM_EXT_OPT ("xscale",ARM_FEATURE (0, ARM_CEXT_XSCALE),
+ ARM_FEATURE (0, ARM_CEXT_XSCALE), ARM_ANY),
+ { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, ARM_ARCH_NONE }
};
#undef ARM_EXT_OPT
{"vfpv4", FPU_ARCH_VFP_V4},
{"vfpv4-d16", FPU_ARCH_VFP_V4D16},
{"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
+ {"fpv5-d16", FPU_ARCH_VFP_V5D16},
+ {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16},
{"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
{"fp-armv8", FPU_ARCH_VFP_ARMV8},
{"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8},
/* Add or remove the extension. */
if (adding_value)
- ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
+ ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->merge_value);
else
- ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
+ ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->clear_value);
break;
}
return ret;
}
+static bfd_boolean
+arm_ccs_mode (char * unused ATTRIBUTE_UNUSED)
+{
+ codecomposer_syntax = TRUE;
+ arm_comment_chars[0] = ';';
+ arm_line_separator_chars[0] = 0;
+ return TRUE;
+}
+
struct arm_long_option_table arm_long_opts[] =
{
{"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
#endif
{"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
arm_parse_it_mode, NULL},
+ {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
+ arm_ccs_mode, NULL},
{NULL, NULL, 0, NULL}
};
{11, ARM_ARCH_V6M},
{12, ARM_ARCH_V6SM},
{8, ARM_ARCH_V6T2},
- {10, ARM_ARCH_V7A_IDIV_MP_SEC_VIRT},
+ {10, ARM_ARCH_V7VE},
{10, ARM_ARCH_V7R},
{10, ARM_ARCH_V7M},
{14, ARM_ARCH_V8A},
}
/* Set the public EABI object attributes. */
-static void
+void
aeabi_set_public_attributes (void)
{
int arch;
if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
+ selected_cpu = flags;
+
/* Allow the user to override the reported architecture. */
if (object_arch)
{
ARM_CPU_HAS_FEATURE (flags, arm_arch_t2) ? 2 : 1);
/* Tag_VFP_arch. */
- if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8))
- aeabi_set_attribute_int (Tag_VFP_arch, 7);
+ if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8xd))
+ aeabi_set_attribute_int (Tag_VFP_arch,
+ ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
+ ? 7 : 8);
else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
aeabi_set_attribute_int (Tag_VFP_arch,
ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
}
if (adding_value)
- ARM_MERGE_FEATURE_SETS (selected_cpu, selected_cpu, opt->value);
+ ARM_MERGE_FEATURE_SETS (selected_cpu, selected_cpu,
+ opt->merge_value);
else
- ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, opt->value);
+ ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, opt->clear_value);
mcpu_cpu_opt = &selected_cpu;
ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);