static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7);
static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A);
static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R);
+static const arm_feature_set arm_ext_v8r = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8R);
#ifdef OBJ_ELF
static const arm_feature_set ATTRIBUTE_UNUSED arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M);
#endif
ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM);
static const arm_feature_set arm_ext_crc =
ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC);
+static const arm_feature_set arm_ext_cde =
+ ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE);
+static const arm_feature_set arm_ext_cde0 =
+ ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE0);
+static const arm_feature_set arm_ext_cde1 =
+ ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE1);
+static const arm_feature_set arm_ext_cde2 =
+ ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE2);
+static const arm_feature_set arm_ext_cde3 =
+ ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE3);
+static const arm_feature_set arm_ext_cde4 =
+ ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE4);
+static const arm_feature_set arm_ext_cde5 =
+ ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE5);
+static const arm_feature_set arm_ext_cde6 =
+ ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE6);
+static const arm_feature_set arm_ext_cde7 =
+ ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE7);
static const arm_feature_set arm_arch_any = ARM_ANY;
static const arm_feature_set fpu_any = FPU_ANY;
ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE);
static const arm_feature_set mve_fp_ext =
ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP);
+/* Note: This has more than one bit set, which means using it with
+ mark_feature_used (which returns if *any* of the bits are set in the current
+ cpu variant) can give surprising results. */
+static const arm_feature_set armv8m_fp =
+ ARM_FEATURE_COPROC (FPU_VFP_V5_SP_D16);
#ifdef OBJ_ELF
static const arm_feature_set fpu_vfp_fp16 =
ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16);
unsigned size;
};
-#define NEON_MAX_TYPE_ELS 4
+#define NEON_MAX_TYPE_ELS 5
struct neon_type
{
VPT_INSN, /* The VPT/VPST insn has been parsed. */
MVE_OUTSIDE_PRED_INSN , /* Instruction to indicate a MVE instruction without
a predication code. */
- MVE_UNPREDICABLE_INSN /* MVE instruction that is non-predicable. */
+ MVE_UNPREDICABLE_INSN, /* MVE instruction that is non-predicable. */
};
/* The maximum number of operands we need. */
[REG_TYPE_MMXWCG] = N_("iWMMXt scalar register expected"),
[REG_TYPE_XSCALE] = N_("XScale accumulator register expected"),
[REG_TYPE_MQ] = N_("MVE vector register expected"),
- [REG_TYPE_RNB] = N_("")
+ [REG_TYPE_RNB] = ""
};
/* Some well known registers that we refer to directly elsewhere. */
#define BAD_ADDR_MODE _("instruction does not accept this addressing mode")
#define BAD_BRANCH _("branch must be last instruction in IT block")
#define BAD_BRANCH_OFF _("branch out of range or not a multiple of 2")
+#define BAD_NO_VPT _("instruction not allowed in VPT block")
#define BAD_NOT_IT _("instruction not allowed in IT block")
#define BAD_NOT_VPT _("instruction missing MVE vector predication code")
#define BAD_FPU _("selected FPU does not support instruction")
#define BAD_RANGE _("branch out of range")
#define BAD_FP16 _("selected processor does not support fp16 instruction")
#define BAD_BF16 _("selected processor does not support bf16 instruction")
+#define BAD_CDE _("selected processor does not support cde instruction")
+#define BAD_CDE_COPROC _("coprocessor for insn is not enabled for cde")
#define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
#define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only")
#define MVE_NOT_IT _("Warning: instruction is UNPREDICTABLE in an IT " \
#define BAD_EL_TYPE _("bad element type for instruction")
#define MVE_BAD_QREG _("MVE vector register Q[0..7] expected")
-static struct hash_control * arm_ops_hsh;
-static struct hash_control * arm_cond_hsh;
-static struct hash_control * arm_vcond_hsh;
-static struct hash_control * arm_shift_hsh;
-static struct hash_control * arm_psr_hsh;
-static struct hash_control * arm_v7m_psr_hsh;
-static struct hash_control * arm_reg_hsh;
-static struct hash_control * arm_reloc_hsh;
-static struct hash_control * arm_barrier_opt_hsh;
+static htab_t arm_ops_hsh;
+static htab_t arm_cond_hsh;
+static htab_t arm_vcond_hsh;
+static htab_t arm_shift_hsh;
+static htab_t arm_psr_hsh;
+static htab_t arm_v7m_psr_hsh;
+static htab_t arm_reg_hsh;
+static htab_t arm_reloc_hsh;
+static htab_t arm_barrier_opt_hsh;
/* Stuff needed to resolve the label ambiguity
As:
p++;
while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
- reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
+ reg = (struct reg_entry *) str_hash_find_n (arm_reg_hsh, start, p - start);
if (!reg)
return NULL;
return -1;
if ((r = (struct reloc_entry *)
- hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
+ str_hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
return -1;
*str = q + 1;
struct reg_entry *new_reg;
const char *name;
- if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
+ if ((new_reg = (struct reg_entry *) str_hash_find (arm_reg_hsh, str)) != 0)
{
if (new_reg->builtin)
as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
new_reg->builtin = FALSE;
new_reg->neon = NULL;
- if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
- abort ();
+ str_hash_insert (arm_reg_hsh, name, new_reg);
return new_reg;
}
if (*oldname == '\0')
return FALSE;
- old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
+ old = (struct reg_entry *) str_hash_find (arm_reg_hsh, oldname);
if (!old)
{
as_warn (_("unknown register '%s' -- .req ignored"), oldname);
as_bad (_("invalid syntax for .unreq directive"));
else
{
- struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
+ struct reg_entry *reg = (struct reg_entry *) str_hash_find (arm_reg_hsh,
name);
if (!reg)
char * p;
char * nbuf;
- hash_delete (arm_reg_hsh, name, FALSE);
+ str_hash_delete (arm_reg_hsh, name);
free ((char *) reg->name);
- if (reg->neon)
- free (reg->neon);
+ free (reg->neon);
free (reg);
/* Also locate the all upper case and all lower case versions.
nbuf = strdup (name);
for (p = nbuf; *p; p++)
*p = TOUPPER (*p);
- reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
+ reg = (struct reg_entry *) str_hash_find (arm_reg_hsh, nbuf);
if (reg)
{
- hash_delete (arm_reg_hsh, nbuf, FALSE);
+ str_hash_delete (arm_reg_hsh, nbuf);
free ((char *) reg->name);
- if (reg->neon)
- free (reg->neon);
+ free (reg->neon);
free (reg);
}
for (p = nbuf; *p; p++)
*p = TOLOWER (*p);
- reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
+ reg = (struct reg_entry *) str_hash_find (arm_reg_hsh, nbuf);
if (reg)
{
- hash_delete (arm_reg_hsh, nbuf, FALSE);
+ str_hash_delete (arm_reg_hsh, nbuf);
free ((char *) reg->name);
- if (reg->neon)
- free (reg->neon);
+ free (reg->neon);
free (reg);
}
abort ();
}
- symbolP = symbol_new (symname, now_seg, value, frag);
+ symbolP = symbol_new (symname, now_seg, frag, value);
symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
switch (state)
memset (dummy_frag, 0, sizeof (fragS));
dummy_frag->fr_type = rs_fill;
dummy_frag->line = listing_tail;
- symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
+ symbolP = symbol_new (name, undefined_section, dummy_frag, 0);
dummy_frag->fr_symbol = symbolP;
}
else
#endif
- symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
+ symbolP = symbol_new (name, undefined_section, &zero_address_frag, 0);
#ifdef OBJ_COFF
/* "set" symbols are local unless otherwise specified. */
if (pool->symbol == NULL)
{
pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
- (valueT) 0, &zero_address_frag);
+ &zero_address_frag, 0);
pool->id = latest_pool_num ++;
}
}
return;
-error:
+ error:
ignore_rest_of_line ();
}
op = 0xc700 | mask;
add_unwind_opcode (op, 2);
return;
-error:
+ error:
ignore_rest_of_line ();
}
as_warn (_("float16 format cannot be set more than once, ignoring."));
}
-cleanup:
+ cleanup:
*input_line_pointer = saved_char;
ignore_rest_of_line ();
}
return FAIL;
}
- shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
+ shift_name = (const struct asm_shift_name *) str_hash_find_n (arm_shift_hsh, *str,
p - *str);
if (shift_name == NULL)
|| strncasecmp (start, "psr", 3) == 0)
p = start + strcspn (start, "rR") + 1;
- psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
+ psr = (const struct asm_psr *) str_hash_find_n (arm_v7m_psr_hsh, start,
p - start);
if (!psr)
goto unsupported_psr;
p += 4;
-check_suffix:
+ check_suffix:
if (*p == '_')
{
/* A suffix follows. */
}
else
{
- psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
+ psr = (const struct asm_psr *) str_hash_find_n (arm_psr_hsh, start,
p - start);
if (!psr)
goto error;
n++;
}
- c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
+ c = (const struct asm_cond *) str_hash_find_n (arm_cond_hsh, cond, n);
if (!c)
{
inst.error = _("condition required");
while (ISALPHA (*q))
q++;
- o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
+ o = (const struct asm_barrier_opt *) str_hash_find_n (arm_barrier_opt_hsh, p,
q - p);
if (!o)
return FAIL;
OP_RNDMQ, /* Neon double precision (0..31) or MVE vector register. */
OP_RNDMQR, /* Neon double precision (0..31), MVE vector or ARM register.
*/
+ OP_RNSDMQR, /* Neon single or double precision, MVE vector or ARM register.
+ */
OP_RNQ, /* Neon quad precision register */
OP_RNQMQ, /* Neon quad or MVE vector register. */
OP_RVSD, /* VFP single or double precision register */
OP_RIWG, /* iWMMXt wCG register */
OP_RXA, /* XScale accumulator register */
+ OP_RNSDMQ, /* Neon single, double or MVE vector register */
OP_RNSDQMQ, /* Neon single, double or quad register or MVE vector register
*/
OP_RNSDQMQR, /* Neon single, double or quad register, MVE vector register or
OP_I63s, /* -64 .. 63 */
OP_I64, /* 1 .. 64 */
OP_I64z, /* 0 .. 64 */
+ OP_I127, /* 0 .. 127 */
OP_I255, /* 0 .. 255 */
-
+ OP_I511, /* 0 .. 511 */
+ OP_I4095, /* 0 .. 4095 */
+ OP_I8191, /* 0 .. 8191 */
OP_I4b, /* immediate, prefix optional, 1 .. 4 */
OP_I7b, /* 0 .. 7 */
OP_I15b, /* 0 .. 15 */
OP_oRNSDQ, /* Optional single, double or quad precision vector register */
OP_oRNSDQMQ, /* Optional single, double or quad register or MVE vector
register. */
+ OP_oRNSDMQ, /* Optional single, double register or MVE vector
+ register. */
OP_oSHll, /* LSL immediate */
OP_oSHar, /* ASR immediate */
OP_oSHllar, /* LSL or ASR immediate */
case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
case OP_oRND:
+ case OP_RNSDMQR:
+ po_reg_or_goto (REG_TYPE_VFS, try_rndmqr);
+ break;
+ try_rndmqr:
case OP_RNDMQR:
po_reg_or_goto (REG_TYPE_RN, try_rndmq);
break;
case OP_RVSD_COND:
po_reg_or_goto (REG_TYPE_VFSD, try_cond);
break;
+ case OP_oRNSDMQ:
+ case OP_RNSDMQ:
+ po_reg_or_goto (REG_TYPE_NSD, try_mq2);
+ break;
+ try_mq2:
+ po_reg_or_fail (REG_TYPE_MQ);
+ break;
case OP_oRNSDQ:
case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
case OP_RNSDQMQR:
case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
+ case OP_I127: po_imm_or_fail ( 0, 127, FALSE); break;
case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
-
+ case OP_I511: po_imm_or_fail ( 0, 511, FALSE); break;
+ case OP_I4095: po_imm_or_fail ( 0, 4095, FALSE); break;
+ case OP_I8191: po_imm_or_fail ( 0, 8191, FALSE); break;
case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
case OP_oI7b:
case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
X(2, (Q, R), QUAD), \
X(2, (D, I), DOUBLE), \
X(2, (Q, I), QUAD), \
+ X(3, (P, F, I), SINGLE), \
+ X(3, (P, D, I), DOUBLE), \
+ X(3, (P, Q, I), QUAD), \
+ X(4, (P, F, F, I), SINGLE), \
+ X(4, (P, D, D, I), DOUBLE), \
+ X(4, (P, Q, Q, I), QUAD), \
+ X(5, (P, F, F, F, I), SINGLE), \
+ X(5, (P, D, D, D, I), DOUBLE), \
+ X(5, (P, Q, Q, Q, I), QUAD), \
X(3, (D, L, D), DOUBLE), \
X(2, (D, Q), MIXED), \
X(2, (Q, D), MIXED), \
#define S2(A,B) NS_##A##B
#define S3(A,B,C) NS_##A##B##C
#define S4(A,B,C,D) NS_##A##B##C##D
+#define S5(A,B,C,D,E) NS_##A##B##C##D##E
#define X(N, L, C) S##N L
#undef S2
#undef S3
#undef S4
+#undef S5
enum neon_shape_class
{
SE_I,
SE_S,
SE_R,
- SE_L
+ SE_L,
+ SE_P
};
/* Register widths of above. */
0,
32,
32,
+ 0,
0
};
#define S2(A,B) { SE_##A, SE_##B }
#define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
#define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
+#define S5(A,B,C,D,E) { SE_##A, SE_##B, SE_##C, SE_##D, SE_##E }
#define X(N, L, C) { N, S##N L }
#undef S2
#undef S3
#undef S4
+#undef S5
/* Bit masks used in type checking given instructions.
'N_EQK' means the type must be the same as (or based on in some way) the key
matches = 0;
break;
+ case SE_P:
case SE_L:
break;
}
{
const struct asm_opcode *opcode;
- opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
+ opcode = (const struct asm_opcode *) str_hash_find (arm_ops_hsh, opname);
if (!opcode)
abort ();
#define LOW4(R) ((R) & 0xf)
#define HI1(R) (((R) >> 4) & 1)
+#define LOW1(R) ((R) & 0x1)
+#define HI4(R) (((R) >> 1) & 0xf)
static unsigned
mve_get_vcmp_vpt_cond (struct neon_type_el et)
enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_DDS,
NS_QQS, NS_QQQ, NS_QQR, NS_NULL);
- if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
- && inst.cond == COND_ALWAYS
+ if (inst.cond == COND_ALWAYS
&& ((unsigned)inst.instruction) == M_MNEM_vmullt)
{
+
if (rs == NS_QQQ)
{
-
- struct neon_type_el et = neon_check_type (3, rs, N_EQK , N_EQK,
- N_SUF_32 | N_F64 | N_P8
- | N_P16 | N_I_MVE | N_KEY);
- if (((et.type == NT_poly) && et.size == 8
- && ARM_CPU_IS_ANY (cpu_variant))
- || (et.type == NT_integer) || (et.type == NT_float))
+ if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
goto neon_vmul;
}
else
return;
-neon_vmul:
+ neon_vmul:
inst.instruction = N_MNEM_vmul;
inst.cond = 0xb;
if (thumb_mode)
return;
}
+ if ((rs == NS_FD || rs == NS_QQI) && mode == neon_cvt_mode_n
+ && ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
+ {
+ /* We are dealing with vcvt with the 'ne' condition. */
+ inst.cond = 0x1;
+ inst.instruction = N_MNEM_vcvt;
+ do_neon_cvt_1 (neon_cvt_mode_z);
+ return;
+ }
+
/* VFP rather than Neon conversions. */
if (flavour >= neon_cvt_flavour_first_fp)
{
NEON_CHECK_CC | NEON_CHECK_ARCH))
return;
}
- else if (mode == neon_cvt_mode_n)
- {
- /* We are dealing with vcvt with the 'ne' condition. */
- inst.cond = 0x1;
- inst.instruction = N_MNEM_vcvt;
- do_neon_cvt_1 (neon_cvt_mode_z);
- return;
- }
/* fall through. */
case NS_DDI:
{
static void
do_neon_dyadic_long (void)
{
- enum neon_shape rs = neon_select_shape (NS_QDD, NS_QQQ, NS_QQR, NS_NULL);
+ enum neon_shape rs = neon_select_shape (NS_QDD, NS_HHH, NS_FFF, NS_DDD, NS_NULL);
if (rs == NS_QDD)
{
if (vfp_or_neon_is_neon (NEON_CHECK_ARCH | NEON_CHECK_CC) == FAIL)
| ((elno & 0x1) << 3));
}
-bad_scalar:
+ bad_scalar:
first_error (_("scalar out of range for multiply instruction"));
return 0;
}
inst.instruction &= 0x00ffffff;
inst.instruction |= high8;
-#define LOW1(R) ((R) & 0x1)
-#define HI4(R) (((R) >> 1) & 0xf)
/* Unlike usually NEON three-same, encoding for Vn and Vm will depend on
whether the instruction is in Q form and whether Vm is a scalar indexed
operand. */
}
+static void
+check_cde_operand (size_t index, int is_dual)
+{
+ unsigned Rx = inst.operands[index].reg;
+ bfd_boolean isvec = inst.operands[index].isvec;
+ if (is_dual == 0 && thumb_mode)
+ constraint (
+ !((Rx <= 14 && Rx != 13) || (Rx == REG_PC && isvec)),
+ _("Register must be r0-r14 except r13, or APSR_nzcv."));
+ else
+ constraint ( !((Rx <= 10 && Rx % 2 == 0 )),
+ _("Register must be an even register between r0-r10."));
+}
+
+static bfd_boolean
+cde_coproc_enabled (unsigned coproc)
+{
+ switch (coproc)
+ {
+ case 0: return mark_feature_used (&arm_ext_cde0);
+ case 1: return mark_feature_used (&arm_ext_cde1);
+ case 2: return mark_feature_used (&arm_ext_cde2);
+ case 3: return mark_feature_used (&arm_ext_cde3);
+ case 4: return mark_feature_used (&arm_ext_cde4);
+ case 5: return mark_feature_used (&arm_ext_cde5);
+ case 6: return mark_feature_used (&arm_ext_cde6);
+ case 7: return mark_feature_used (&arm_ext_cde7);
+ default: return FALSE;
+ }
+}
+
+#define cde_coproc_pos 8
+static void
+cde_handle_coproc (void)
+{
+ unsigned coproc = inst.operands[0].reg;
+ constraint (coproc > 7, _("CDE Coprocessor must be in range 0-7"));
+ constraint (!(cde_coproc_enabled (coproc)), BAD_CDE_COPROC);
+ inst.instruction |= coproc << cde_coproc_pos;
+}
+#undef cde_coproc_pos
+
+static void
+cxn_handle_predication (bfd_boolean is_accum)
+{
+ if (is_accum && conditional_insn ())
+ set_pred_insn_type (INSIDE_IT_INSN);
+ else if (conditional_insn ())
+ /* conditional_insn essentially checks for a suffix, not whether the
+ instruction is inside an IT block or not.
+ The non-accumulator versions should not have suffixes. */
+ inst.error = BAD_SYNTAX;
+ else
+ set_pred_insn_type (OUTSIDE_PRED_INSN);
+}
+
+static void
+do_custom_instruction_1 (int is_dual, bfd_boolean is_accum)
+{
+
+ constraint (!mark_feature_used (&arm_ext_cde), _(BAD_CDE));
+
+ unsigned imm, Rd;
+
+ Rd = inst.operands[1].reg;
+ check_cde_operand (1, is_dual);
+
+ if (is_dual == 1)
+ {
+ constraint (inst.operands[2].reg != Rd + 1,
+ _("cx1d requires consecutive destination registers."));
+ imm = inst.operands[3].imm;
+ }
+ else if (is_dual == 0)
+ imm = inst.operands[2].imm;
+ else
+ abort ();
+
+ inst.instruction |= Rd << 12;
+ inst.instruction |= (imm & 0x1F80) << 9;
+ inst.instruction |= (imm & 0x0040) << 1;
+ inst.instruction |= (imm & 0x003f);
+
+ cde_handle_coproc ();
+ cxn_handle_predication (is_accum);
+}
+
+static void
+do_custom_instruction_2 (int is_dual, bfd_boolean is_accum)
+{
+
+ constraint (!mark_feature_used (&arm_ext_cde), _(BAD_CDE));
+
+ unsigned imm, Rd, Rn;
+
+ Rd = inst.operands[1].reg;
+
+ if (is_dual == 1)
+ {
+ constraint (inst.operands[2].reg != Rd + 1,
+ _("cx2d requires consecutive destination registers."));
+ imm = inst.operands[4].imm;
+ Rn = inst.operands[3].reg;
+ }
+ else if (is_dual == 0)
+ {
+ imm = inst.operands[3].imm;
+ Rn = inst.operands[2].reg;
+ }
+ else
+ abort ();
+
+ check_cde_operand (2 + is_dual, /* is_dual = */0);
+ check_cde_operand (1, is_dual);
+
+ inst.instruction |= Rd << 12;
+ inst.instruction |= Rn << 16;
+
+ inst.instruction |= (imm & 0x0380) << 13;
+ inst.instruction |= (imm & 0x0040) << 1;
+ inst.instruction |= (imm & 0x003f);
+
+ cde_handle_coproc ();
+ cxn_handle_predication (is_accum);
+}
+
+static void
+do_custom_instruction_3 (int is_dual, bfd_boolean is_accum)
+{
+
+ constraint (!mark_feature_used (&arm_ext_cde), _(BAD_CDE));
+
+ unsigned imm, Rd, Rn, Rm;
+
+ Rd = inst.operands[1].reg;
+
+ if (is_dual == 1)
+ {
+ constraint (inst.operands[2].reg != Rd + 1,
+ _("cx3d requires consecutive destination registers."));
+ imm = inst.operands[5].imm;
+ Rn = inst.operands[3].reg;
+ Rm = inst.operands[4].reg;
+ }
+ else if (is_dual == 0)
+ {
+ imm = inst.operands[4].imm;
+ Rn = inst.operands[2].reg;
+ Rm = inst.operands[3].reg;
+ }
+ else
+ abort ();
+
+ check_cde_operand (1, is_dual);
+ check_cde_operand (2 + is_dual, /* is_dual = */0);
+ check_cde_operand (3 + is_dual, /* is_dual = */0);
+
+ inst.instruction |= Rd;
+ inst.instruction |= Rn << 16;
+ inst.instruction |= Rm << 12;
+
+ inst.instruction |= (imm & 0x0038) << 17;
+ inst.instruction |= (imm & 0x0004) << 5;
+ inst.instruction |= (imm & 0x0003) << 4;
+
+ cde_handle_coproc ();
+ cxn_handle_predication (is_accum);
+}
+
+static void
+do_cx1 (void)
+{
+ return do_custom_instruction_1 (0, 0);
+}
+
+static void
+do_cx1a (void)
+{
+ return do_custom_instruction_1 (0, 1);
+}
+
+static void
+do_cx1d (void)
+{
+ return do_custom_instruction_1 (1, 0);
+}
+
+static void
+do_cx1da (void)
+{
+ return do_custom_instruction_1 (1, 1);
+}
+
+static void
+do_cx2 (void)
+{
+ return do_custom_instruction_2 (0, 0);
+}
+
+static void
+do_cx2a (void)
+{
+ return do_custom_instruction_2 (0, 1);
+}
+
+static void
+do_cx2d (void)
+{
+ return do_custom_instruction_2 (1, 0);
+}
+
+static void
+do_cx2da (void)
+{
+ return do_custom_instruction_2 (1, 1);
+}
+
+static void
+do_cx3 (void)
+{
+ return do_custom_instruction_3 (0, 0);
+}
+
+static void
+do_cx3a (void)
+{
+ return do_custom_instruction_3 (0, 1);
+}
+
+static void
+do_cx3d (void)
+{
+ return do_custom_instruction_3 (1, 0);
+}
+
+static void
+do_cx3da (void)
+{
+ return do_custom_instruction_3 (1, 1);
+}
+
+static void
+vcx_assign_vec_d (unsigned regnum)
+{
+ inst.instruction |= HI4 (regnum) << 12;
+ inst.instruction |= LOW1 (regnum) << 22;
+}
+
+static void
+vcx_assign_vec_m (unsigned regnum)
+{
+ inst.instruction |= HI4 (regnum);
+ inst.instruction |= LOW1 (regnum) << 5;
+}
+
+static void
+vcx_assign_vec_n (unsigned regnum)
+{
+ inst.instruction |= HI4 (regnum) << 16;
+ inst.instruction |= LOW1 (regnum) << 7;
+}
+
+enum vcx_reg_type {
+ q_reg,
+ d_reg,
+ s_reg
+};
+
+static enum vcx_reg_type
+vcx_get_reg_type (enum neon_shape ns)
+{
+ gas_assert (ns == NS_PQI
+ || ns == NS_PDI
+ || ns == NS_PFI
+ || ns == NS_PQQI
+ || ns == NS_PDDI
+ || ns == NS_PFFI
+ || ns == NS_PQQQI
+ || ns == NS_PDDDI
+ || ns == NS_PFFFI);
+ if (ns == NS_PQI || ns == NS_PQQI || ns == NS_PQQQI)
+ return q_reg;
+ if (ns == NS_PDI || ns == NS_PDDI || ns == NS_PDDDI)
+ return d_reg;
+ return s_reg;
+}
+
+#define vcx_size_pos 24
+#define vcx_vec_pos 6
+static unsigned
+vcx_handle_shape (enum vcx_reg_type reg_type)
+{
+ unsigned mult = 2;
+ if (reg_type == q_reg)
+ inst.instruction |= 1 << vcx_vec_pos;
+ else if (reg_type == d_reg)
+ inst.instruction |= 1 << vcx_size_pos;
+ else
+ mult = 1;
+ /* NOTE:
+ The documentation says that the Q registers are encoded as 2*N in the D:Vd
+ bits (or equivalent for N and M registers).
+ Similarly the D registers are encoded as N in D:Vd bits.
+ While the S registers are encoded as N in the Vd:D bits.
+
+ Taking into account the maximum values of these registers we can see a
+ nicer pattern for calculation:
+ Q -> 7, D -> 15, S -> 31
+
+ If we say that everything is encoded in the Vd:D bits, then we can say
+ that Q is encoded as 4*N, and D is encoded as 2*N.
+ This way the bits will end up the same, and calculation is simpler.
+ (calculation is now:
+ 1. Multiply by a number determined by the register letter.
+ 2. Encode resulting number in Vd:D bits.)
+
+ This is made a little more complicated by automatic handling of 'Q'
+ registers elsewhere, which means the register number is already 2*N where
+ N is the number the user wrote after the register letter.
+ */
+ return mult;
+}
+#undef vcx_vec_pos
+#undef vcx_size_pos
+
+static void
+vcx_ensure_register_in_range (unsigned R, enum vcx_reg_type reg_type)
+{
+ if (reg_type == q_reg)
+ {
+ gas_assert (R % 2 == 0);
+ constraint (R >= 16, _("'q' register must be in range 0-7"));
+ }
+ else if (reg_type == d_reg)
+ constraint (R >= 16, _("'d' register must be in range 0-15"));
+ else
+ constraint (R >= 32, _("'s' register must be in range 0-31"));
+}
+
+static void (*vcx_assign_vec[3]) (unsigned) = {
+ vcx_assign_vec_d,
+ vcx_assign_vec_m,
+ vcx_assign_vec_n
+};
+
+static void
+vcx_handle_register_arguments (unsigned num_registers,
+ enum vcx_reg_type reg_type)
+{
+ unsigned R, i;
+ unsigned reg_mult = vcx_handle_shape (reg_type);
+ for (i = 0; i < num_registers; i++)
+ {
+ R = inst.operands[i+1].reg;
+ vcx_ensure_register_in_range (R, reg_type);
+ if (num_registers == 3 && i > 0)
+ {
+ if (i == 2)
+ vcx_assign_vec[1] (R * reg_mult);
+ else
+ vcx_assign_vec[2] (R * reg_mult);
+ continue;
+ }
+ vcx_assign_vec[i](R * reg_mult);
+ }
+}
+
+static void
+vcx_handle_insn_block (enum vcx_reg_type reg_type)
+{
+ if (reg_type == q_reg)
+ if (inst.cond > COND_ALWAYS)
+ inst.pred_insn_type = INSIDE_VPT_INSN;
+ else
+ inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
+ else if (inst.cond == COND_ALWAYS)
+ inst.pred_insn_type = OUTSIDE_PRED_INSN;
+ else
+ inst.error = BAD_NOT_IT;
+}
+
+static void
+vcx_handle_common_checks (unsigned num_args, enum neon_shape rs)
+{
+ constraint (!mark_feature_used (&arm_ext_cde), _(BAD_CDE));
+ cde_handle_coproc ();
+ enum vcx_reg_type reg_type = vcx_get_reg_type (rs);
+ vcx_handle_register_arguments (num_args, reg_type);
+ vcx_handle_insn_block (reg_type);
+ if (reg_type == q_reg)
+ constraint (!mark_feature_used (&mve_ext),
+ _("vcx instructions with Q registers require MVE"));
+ else
+ constraint (!(ARM_FSET_CPU_SUBSET (armv8m_fp, cpu_variant)
+ && mark_feature_used (&armv8m_fp))
+ && !mark_feature_used (&mve_ext),
+ _("vcx instructions with S or D registers require either MVE"
+ " or Armv8-M floating point extension."));
+}
+
+static void
+do_vcx1 (void)
+{
+ enum neon_shape rs = neon_select_shape (NS_PQI, NS_PDI, NS_PFI, NS_NULL);
+ vcx_handle_common_checks (1, rs);
+
+ unsigned imm = inst.operands[2].imm;
+ inst.instruction |= (imm & 0x03f);
+ inst.instruction |= (imm & 0x040) << 1;
+ inst.instruction |= (imm & 0x780) << 9;
+ if (rs != NS_PQI)
+ constraint (imm >= 2048,
+ _("vcx1 with S or D registers takes immediate within 0-2047"));
+ inst.instruction |= (imm & 0x800) << 13;
+}
+
+static void
+do_vcx2 (void)
+{
+ enum neon_shape rs = neon_select_shape (NS_PQQI, NS_PDDI, NS_PFFI, NS_NULL);
+ vcx_handle_common_checks (2, rs);
+
+ unsigned imm = inst.operands[3].imm;
+ inst.instruction |= (imm & 0x01) << 4;
+ inst.instruction |= (imm & 0x02) << 6;
+ inst.instruction |= (imm & 0x3c) << 14;
+ if (rs != NS_PQQI)
+ constraint (imm >= 64,
+ _("vcx2 with S or D registers takes immediate within 0-63"));
+ inst.instruction |= (imm & 0x40) << 18;
+}
+
+static void
+do_vcx3 (void)
+{
+ enum neon_shape rs = neon_select_shape (NS_PQQQI, NS_PDDDI, NS_PFFFI, NS_NULL);
+ vcx_handle_common_checks (3, rs);
+
+ unsigned imm = inst.operands[4].imm;
+ inst.instruction |= (imm & 0x1) << 4;
+ inst.instruction |= (imm & 0x6) << 19;
+ if (rs != NS_PQQQI)
+ constraint (imm >= 8,
+ _("vcx2 with S or D registers takes immediate within 0-7"));
+ inst.instruction |= (imm & 0x8) << 21;
+}
+
/* Crypto v1 instructions. */
static void
do_crypto_2op_1 (unsigned elttype, int op)
*str = end;
/* Look for unaffixed or special-case affixed mnemonic. */
- opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
+ opcode = (const struct asm_opcode *) str_hash_find_n (arm_ops_hsh, base,
end - base);
if (opcode)
{
if (warn_on_deprecated && unified_syntax)
as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
affix = base + (opcode->tag - OT_odd_infix_0);
- cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
+ cond = (const struct asm_cond *) str_hash_find_n (arm_cond_hsh, affix, 2);
gas_assert (cond);
inst.cond = cond->value;
if (end - base < 2)
return NULL;
affix = end - 1;
- cond = (const struct asm_cond *) hash_find_n (arm_vcond_hsh, affix, 1);
- opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
+ cond = (const struct asm_cond *) str_hash_find_n (arm_vcond_hsh, affix, 1);
+ opcode = (const struct asm_opcode *) str_hash_find_n (arm_ops_hsh, base,
affix - base);
/* If this opcode can not be vector predicated then don't accept it with a
vector predication code. */
/* Look for suffixed mnemonic. */
affix = end - 2;
- cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
- opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
+ cond = (const struct asm_cond *) str_hash_find_n (arm_cond_hsh, affix, 2);
+ opcode = (const struct asm_opcode *) str_hash_find_n (arm_ops_hsh, base,
affix - base);
}
/* Look for infixed mnemonic in the usual position. */
affix = base + 3;
- cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
+ cond = (const struct asm_cond *) str_hash_find_n (arm_cond_hsh, affix, 2);
if (!cond)
return NULL;
memcpy (save, affix, 2);
memmove (affix, affix + 2, (end - affix) - 2);
- opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
+ opcode = (const struct asm_opcode *) str_hash_find_n (arm_ops_hsh, base,
(end - base) - 2);
memmove (affix + 2, affix, (end - affix) - 2);
memcpy (affix, save, 2);
close_automatic_it_block ();
break;
+ /* Fallthrough. */
case NEUTRAL_IT_INSN:
now_pred.block_length++;
now_pred.insn_cond = TRUE;
&& warn_on_restrict_it
&& !now_pred.warn_deprecated
&& warn_on_deprecated
- && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)
+ && (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)
+ || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8r))
&& !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_m))
{
if (inst.instruction >= 0x10000)
ldrexd, t_ldrexd),
TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
strexd, t_strexd),
+#undef THUMB_VARIANT
+#define THUMB_VARIANT & arm_ext_v8r
+#undef ARM_VARIANT
+#define ARM_VARIANT & arm_ext_v8r
+
+/* ARMv8-R instructions. */
+ TUF("dfb", 57ff04c, f3bf8f4c, 0, (), noargs, noargs),
/* Defined in V8 but is in undefined encoding space for earlier
architectures. However earlier architectures are required to treat
#define ARM_VARIANT & fpu_neon_ext_v1
mnUF(vabd, _vabd, 3, (RNDQMQ, oRNDQMQ, RNDQMQ), neon_dyadic_if_su),
mnUF(vabdl, _vabdl, 3, (RNQMQ, RNDMQ, RNDMQ), neon_dyadic_long),
- mnUF(vaddl, _vaddl, 3, (RNQMQ, RNDMQ, RNDMQR), neon_dyadic_long),
- mnUF(vsubl, _vsubl, 3, (RNQMQ, RNDMQ, RNDMQR), neon_dyadic_long),
+ mnUF(vaddl, _vaddl, 3, (RNSDQMQ, oRNSDMQ, RNSDMQR), neon_dyadic_long),
+ mnUF(vsubl, _vsubl, 3, (RNSDQMQ, oRNSDMQ, RNSDMQR), neon_dyadic_long),
mnUF(vand, _vand, 3, (RNDQMQ, oRNDQMQ, RNDQMQ_Ibig), neon_logic),
mnUF(vbic, _vbic, 3, (RNDQMQ, oRNDQMQ, RNDQMQ_Ibig), neon_logic),
mnUF(vorr, _vorr, 3, (RNDQMQ, oRNDQMQ, RNDQMQ_Ibig), neon_logic),
TUF ("vusmmla", ca00c40, fca00c40, 3, (RNQ, RNQ, RNQ), vsmmla, vsmmla),
TUF ("vusdot", c800d00, fc800d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), vusdot, vusdot),
TUF ("vsudot", c800d10, fc800d10, 3, (RNDQ, RNDQ, RNSC), vsudot, vsudot),
+
+#undef ARM_VARIANT
+#undef THUMB_VARIANT
+#define THUMB_VARIANT &arm_ext_cde
+ ToC ("cx1", ee000000, 3, (RCP, APSR_RR, I8191), cx1),
+ ToC ("cx1a", fe000000, 3, (RCP, APSR_RR, I8191), cx1a),
+ ToC ("cx1d", ee000040, 4, (RCP, RR, APSR_RR, I8191), cx1d),
+ ToC ("cx1da", fe000040, 4, (RCP, RR, APSR_RR, I8191), cx1da),
+
+ ToC ("cx2", ee400000, 4, (RCP, APSR_RR, APSR_RR, I511), cx2),
+ ToC ("cx2a", fe400000, 4, (RCP, APSR_RR, APSR_RR, I511), cx2a),
+ ToC ("cx2d", ee400040, 5, (RCP, RR, APSR_RR, APSR_RR, I511), cx2d),
+ ToC ("cx2da", fe400040, 5, (RCP, RR, APSR_RR, APSR_RR, I511), cx2da),
+
+ ToC ("cx3", ee800000, 5, (RCP, APSR_RR, APSR_RR, APSR_RR, I63), cx3),
+ ToC ("cx3a", fe800000, 5, (RCP, APSR_RR, APSR_RR, APSR_RR, I63), cx3a),
+ ToC ("cx3d", ee800040, 6, (RCP, RR, APSR_RR, APSR_RR, APSR_RR, I63), cx3d),
+ ToC ("cx3da", fe800040, 6, (RCP, RR, APSR_RR, APSR_RR, APSR_RR, I63), cx3da),
+
+ mToC ("vcx1", ec200000, 3, (RCP, RNSDMQ, I4095), vcx1),
+ mToC ("vcx1a", fc200000, 3, (RCP, RNSDMQ, I4095), vcx1),
+
+ mToC ("vcx2", ec300000, 4, (RCP, RNSDMQ, RNSDMQ, I127), vcx2),
+ mToC ("vcx2a", fc300000, 4, (RCP, RNSDMQ, RNSDMQ, I127), vcx2),
+
+ mToC ("vcx3", ec800000, 5, (RCP, RNSDMQ, RNSDMQ, RNSDMQ, I15), vcx3),
+ mToC ("vcx3a", fc800000, 5, (RCP, RNSDMQ, RNSDMQ, RNSDMQ, I15), vcx3),
};
+
#undef ARM_VARIANT
#undef THUMB_VARIANT
#undef TCE
for (p = nbuf; *p; p++)
*p = TOLOWER (*p);
- if (hash_find (arm_ops_hsh, nbuf) != NULL)
+ if (str_hash_find (arm_ops_hsh, nbuf) != NULL)
{
- static struct hash_control * already_warned = NULL;
+ static htab_t already_warned = NULL;
if (already_warned == NULL)
- already_warned = hash_new ();
+ already_warned = str_htab_create ();
/* Only warn about the symbol once. To keep the code
- simple we let hash_insert do the lookup for us. */
- if (hash_insert (already_warned, nbuf, NULL) == NULL)
- as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name);
+ simple we let str_hash_insert do the lookup for us. */
+ if (str_hash_find (already_warned, nbuf) == NULL)
+ {
+ as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name);
+ str_hash_insert (already_warned, nbuf, NULL);
+ }
}
else
free (nbuf);
as_bad (_("GOT already in the symbol table"));
GOT_symbol = symbol_new (name, undefined_section,
- (valueT) 0, & zero_address_frag);
+ &zero_address_frag, 0);
}
return GOT_symbol;
(((unsigned long) fixP->fx_frag->fr_address
+ (unsigned long) fixP->fx_where) & ~3)
+ (unsigned long) value);
+ else if (get_recorded_alignment (seg) < 2)
+ as_warn_where (fixP->fx_file, fixP->fx_line,
+ _("section does not have enough alignment to ensure safe PC-relative loads"));
if (value & ~0x3fc)
as_bad_where (fixP->fx_file, fixP->fx_line,
|| fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
|| fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
|| fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
+ || fixp->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH12
|| fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
|| fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
|| fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
unsigned mach;
unsigned int i;
- if ( (arm_ops_hsh = hash_new ()) == NULL
- || (arm_cond_hsh = hash_new ()) == NULL
- || (arm_vcond_hsh = hash_new ()) == NULL
- || (arm_shift_hsh = hash_new ()) == NULL
- || (arm_psr_hsh = hash_new ()) == NULL
- || (arm_v7m_psr_hsh = hash_new ()) == NULL
- || (arm_reg_hsh = hash_new ()) == NULL
- || (arm_reloc_hsh = hash_new ()) == NULL
- || (arm_barrier_opt_hsh = hash_new ()) == NULL)
+ if ( (arm_ops_hsh = str_htab_create ()) == NULL
+ || (arm_cond_hsh = str_htab_create ()) == NULL
+ || (arm_vcond_hsh = str_htab_create ()) == NULL
+ || (arm_shift_hsh = str_htab_create ()) == NULL
+ || (arm_psr_hsh = str_htab_create ()) == NULL
+ || (arm_v7m_psr_hsh = str_htab_create ()) == NULL
+ || (arm_reg_hsh = str_htab_create ()) == NULL
+ || (arm_reloc_hsh = str_htab_create ()) == NULL
+ || (arm_barrier_opt_hsh = str_htab_create ()) == NULL)
as_fatal (_("virtual memory exhausted"));
for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
- hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
+ if (str_hash_find (arm_ops_hsh, insns[i].template_name) == NULL)
+ str_hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
- hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
+ str_hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
for (i = 0; i < sizeof (vconds) / sizeof (struct asm_cond); i++)
- hash_insert (arm_vcond_hsh, vconds[i].template_name, (void *) (vconds + i));
+ str_hash_insert (arm_vcond_hsh, vconds[i].template_name, (void *) (vconds + i));
for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
- hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
+ str_hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
- hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
+ str_hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
- hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
+ str_hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
(void *) (v7m_psrs + i));
for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
- hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
+ str_hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
for (i = 0;
i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
i++)
- hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
+ str_hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
(void *) (barrier_opt_names + i));
#ifdef OBJ_ELF
for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
/* This makes encode_branch() use the EABI versions of this relocation. */
entry->reloc = BFD_RELOC_UNUSED;
- hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
+ str_hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
}
#endif
{ NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
};
+#define CDE_EXTENSIONS \
+ ARM_ADD ("cdecp0", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE0)), \
+ ARM_ADD ("cdecp1", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE1)), \
+ ARM_ADD ("cdecp2", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE2)), \
+ ARM_ADD ("cdecp3", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE3)), \
+ ARM_ADD ("cdecp4", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE4)), \
+ ARM_ADD ("cdecp5", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE5)), \
+ ARM_ADD ("cdecp6", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE6)), \
+ ARM_ADD ("cdecp7", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE7))
+
static const struct arm_ext_table armv8m_main_ext_table[] =
{
ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_AEXT_V8M_MAIN_DSP),
ARM_FEATURE_CORE_LOW (ARM_AEXT_V8M_MAIN_DSP)),
ARM_EXT ("fp", FPU_ARCH_VFP_V5_SP_D16, ALL_FP),
ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16),
+ CDE_EXTENSIONS,
{ NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
};
ARM_FEATURE (ARM_AEXT_V8M_MAIN_DSP,
ARM_EXT2_FP16_INST | ARM_EXT2_MVE | ARM_EXT2_MVE_FP,
FPU_VFP_V5_SP_D16 | FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA)),
+ CDE_EXTENSIONS,
{ NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
};
+#undef CDE_EXTENSIONS
+
static const struct arm_ext_table armv8r_ext_table[] =
{
ARM_ADD ("crc", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC)),
if (p_ver_ret == NULL)
return -1;
-found:
+ found:
/* Tag_CPU_arch_profile. */
- if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7a)
- || ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8)
- || (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_atomics)
- && !ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8m_m_only)))
+ if (!ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8r)
+ && (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7a)
+ || ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8)
+ || (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_atomics)
+ && !ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8m_m_only))))
*profile = 'A';
- else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7r))
+ else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7r)
+ || ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8r))
*profile = 'R';
else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_m))
*profile = 'M';