/* tc-aarch64.c -- Assemble for the AArch64 ISA
- Copyright 2009, 2010, 2011, 2012, 2013
- Free Software Foundation, Inc.
+ Copyright (C) 2009-2015 Free Software Foundation, Inc.
Contributed by ARM Ltd.
This file is part of GAS.
#define streq(a, b) (strcmp (a, b) == 0)
+#define END_OF_INSN '\0'
+
static aarch64_feature_set cpu_variant;
/* Variables that we set while parsing command-line options. Once all
#ifdef OBJ_ELF
/* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
static symbolS *GOT_symbol;
+
+/* Which ABI to use. */
+enum aarch64_abi_type
+{
+ AARCH64_ABI_LP64 = 0,
+ AARCH64_ABI_ILP32 = 1
+};
+
+/* AArch64 ABI for the output file. */
+static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_LP64;
+
+/* When non-zero, program to a 32-bit model, in which the C data types
+ int, long and all pointer types are 32-bit objects (ILP32); or to a
+ 64-bit model, in which the C int type is 32-bits but the C long type
+ and all pointer types are 64-bit objects (LP64). */
+#define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
#endif
enum neon_el_type
and per-sub-section basis. */
#define MAX_LITERAL_POOL_SIZE 1024
+typedef struct literal_expression
+{
+ expressionS exp;
+ /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
+ LITTLENUM_TYPE * bignum;
+} literal_expression;
+
typedef struct literal_pool
{
- expressionS literals[MAX_LITERAL_POOL_SIZE];
+ literal_expression literals[MAX_LITERAL_POOL_SIZE];
unsigned int next_free_entry;
unsigned int id;
symbolS *symbol;
/* Check if this literal value is already in the pool. */
for (entry = 0; entry < pool->next_free_entry; entry++)
{
- if ((pool->literals[entry].X_op == exp->X_op)
+ expressionS * litexp = & pool->literals[entry].exp;
+
+ if ((litexp->X_op == exp->X_op)
&& (exp->X_op == O_constant)
- && (pool->literals[entry].X_add_number == exp->X_add_number)
- && (pool->literals[entry].X_unsigned == exp->X_unsigned))
+ && (litexp->X_add_number == exp->X_add_number)
+ && (litexp->X_unsigned == exp->X_unsigned))
break;
- if ((pool->literals[entry].X_op == exp->X_op)
+ if ((litexp->X_op == exp->X_op)
&& (exp->X_op == O_symbol)
- && (pool->literals[entry].X_add_number == exp->X_add_number)
- && (pool->literals[entry].X_add_symbol == exp->X_add_symbol)
- && (pool->literals[entry].X_op_symbol == exp->X_op_symbol))
+ && (litexp->X_add_number == exp->X_add_number)
+ && (litexp->X_add_symbol == exp->X_add_symbol)
+ && (litexp->X_op_symbol == exp->X_op_symbol))
break;
}
return FALSE;
}
- pool->literals[entry] = *exp;
+ pool->literals[entry].exp = *exp;
pool->next_free_entry += 1;
+ if (exp->X_op == O_big)
+ {
+ /* PR 16688: Bignums are held in a single global array. We must
+ copy and preserve that value now, before it is overwritten. */
+ pool->literals[entry].bignum = xmalloc (CHARS_PER_LITTLENUM * exp->X_add_number);
+ memcpy (pool->literals[entry].bignum, generic_bignum,
+ CHARS_PER_LITTLENUM * exp->X_add_number);
+ }
+ else
+ pool->literals[entry].bignum = NULL;
}
exp->X_op = O_symbol;
valueT valu, /* Symbol value. */
fragS * frag) /* Associated fragment. */
{
- unsigned int name_length;
+ size_t name_length;
char *preserved_copy_of_name;
name_length = strlen (name) + 1; /* +1 for \0. */
symbol_table_insert (pool->symbol);
for (entry = 0; entry < pool->next_free_entry; entry++)
- /* First output the expression in the instruction to the pool. */
- emit_expr (&(pool->literals[entry]), size); /* .word|.xword */
+ {
+ expressionS * exp = & pool->literals[entry].exp;
+
+ if (exp->X_op == O_big)
+ {
+ /* PR 16688: Restore the global bignum value. */
+ gas_assert (pool->literals[entry].bignum != NULL);
+ memcpy (generic_bignum, pool->literals[entry].bignum,
+ CHARS_PER_LITTLENUM * exp->X_add_number);
+ }
+
+ /* First output the expression in the instruction to the pool. */
+ emit_expr (exp, size); /* .word|.xword */
+
+ if (exp->X_op == O_big)
+ {
+ free (pool->literals[entry].bignum);
+ pool->literals[entry].bignum = NULL;
+ }
+ }
/* Mark the pool as empty. */
pool->next_free_entry = 0;
static void s_aarch64_arch (int);
static void s_aarch64_cpu (int);
+static void s_aarch64_arch_extension (int);
/* This table describes all the machine specific pseudo-ops the assembler
has to support. The fields are:
{"pool", s_ltorg, 0},
{"cpu", s_aarch64_cpu, 0},
{"arch", s_aarch64_arch, 0},
+ {"arch_extension", s_aarch64_arch_extension, 0},
{"inst", s_aarch64_inst, 0},
#ifdef OBJ_ELF
{"tlsdesccall", s_tlsdesccall, 0},
| ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
}
-/* Return TRUE if IMM is a valid floating-point immediate; return FALSE
- otherwise. */
+/* Return TRUE if the single-precision floating-point value encoded in IMM
+ can be expressed in the AArch64 8-bit signed floating-point format with
+ 3-bit exponent and normalized 4 bits of precision; in other words, the
+ floating-point value must be expressable as
+ (+/-) n / 16 * power (2, r)
+ where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
+
static bfd_boolean
aarch64_imm_float_p (uint32_t imm)
{
- /* 3 32222222 2221111111111
+ /* If a single-precision floating-point value has the following bit
+ pattern, it can be expressed in the AArch64 8-bit floating-point
+ format:
+
+ 3 32222222 2221111111111
1 09876543 21098765432109876543210
- n Eeeeeexx xxxx0000000000000000000 */
- uint32_t e;
+ n Eeeeeexx xxxx0000000000000000000
+
+ where n, e and each x are either 0 or 1 independently, with
+ E == ~ e. */
+
+ uint32_t pattern;
+
+ /* Prepare the pattern for 'Eeeeee'. */
+ if (((imm >> 30) & 0x1) == 0)
+ pattern = 0x3e000000;
+ else
+ pattern = 0x40000000;
+
+ return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
+ && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
+}
+
+/* Like aarch64_imm_float_p but for a double-precision floating-point value.
+
+ Return TRUE if the value encoded in IMM can be expressed in the AArch64
+ 8-bit signed floating-point format with 3-bit exponent and normalized 4
+ bits of precision (i.e. can be used in an FMOV instruction); return the
+ equivalent single-precision encoding in *FPWORD.
+
+ Otherwise return FALSE. */
+
+static bfd_boolean
+aarch64_double_precision_fmovable (uint64_t imm, uint32_t *fpword)
+{
+ /* If a double-precision floating-point value has the following bit
+ pattern, it can be expressed in the AArch64 8-bit floating-point
+ format:
+
+ 6 66655555555 554444444...21111111111
+ 3 21098765432 109876543...098765432109876543210
+ n Eeeeeeeeexx xxxx00000...000000000000000000000
+
+ where n, e and each x are either 0 or 1 independently, with
+ E == ~ e. */
- e = (imm >> 30) & 0x1;
- if (e == 0)
- e = 0x3e000000;
+ uint32_t pattern;
+ uint32_t high32 = imm >> 32;
+
+ /* Lower 32 bits need to be 0s. */
+ if ((imm & 0xffffffff) != 0)
+ return FALSE;
+
+ /* Prepare the pattern for 'Eeeeeeeee'. */
+ if (((high32 >> 30) & 0x1) == 0)
+ pattern = 0x3fc00000;
+ else
+ pattern = 0x40000000;
+
+ if ((high32 & 0xffff) == 0 /* bits 32 - 47 are 0. */
+ && (high32 & 0x7fc00000) == pattern) /* bits 54 - 61 == ~ bit 62. */
+ {
+ /* Convert to the single-precision encoding.
+ i.e. convert
+ n Eeeeeeeeexx xxxx00000...000000000000000000000
+ to
+ n Eeeeeexx xxxx0000000000000000000. */
+ *fpword = ((high32 & 0xfe000000) /* nEeeeee. */
+ | (((high32 >> 16) & 0x3f) << 19)); /* xxxxxx. */
+ return TRUE;
+ }
else
- e = 0x40000000;
- return (imm & 0x7ffff) == 0 /* lower 19 bits are 0 */
- && ((imm & 0x7e000000) == e); /* bits 25-29 = ~ bit 30 */
+ return FALSE;
}
-/* Note: this accepts the floating-point 0 constant. */
+/* Parse a floating-point immediate. Return TRUE on success and return the
+ value in *IMMED in the format of IEEE754 single-precision encoding.
+ *CCP points to the start of the string; DP_P is TRUE when the immediate
+ is expected to be in double-precision (N.B. this only matters when
+ hexadecimal representation is involved).
+
+ N.B. 0.0 is accepted by this function. */
+
static bfd_boolean
-parse_aarch64_imm_float (char **ccp, int *immed)
+parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p)
{
char *str = *ccp;
char *fpnum;
LITTLENUM_TYPE words[MAX_LITTLENUMS];
int found_fpchar = 0;
+ int64_t val = 0;
+ unsigned fpword = 0;
+ bfd_boolean hex_p = FALSE;
skip_past_char (&str, '#');
- /* We must not accidentally parse an integer as a floating-point number. Make
- sure that the value we parse is not an integer by checking for special
- characters '.' or 'e'.
- FIXME: This is a hack that is not very efficient, but doing better is
- tricky because type information isn't in a very usable state at parse
- time. */
fpnum = str;
skip_whitespace (fpnum);
if (strncmp (fpnum, "0x", 2) == 0)
- return FALSE;
+ {
+ /* Support the hexadecimal representation of the IEEE754 encoding.
+ Double-precision is expected when DP_P is TRUE, otherwise the
+ representation should be in single-precision. */
+ if (! parse_constant_immediate (&str, &val))
+ goto invalid_fp;
+
+ if (dp_p)
+ {
+ if (! aarch64_double_precision_fmovable (val, &fpword))
+ goto invalid_fp;
+ }
+ else if ((uint64_t) val > 0xffffffff)
+ goto invalid_fp;
+ else
+ fpword = val;
+
+ hex_p = TRUE;
+ }
else
{
+ /* We must not accidentally parse an integer as a floating-point number.
+ Make sure that the value we parse is not an integer by checking for
+ special characters '.' or 'e'. */
for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
{
return FALSE;
}
- if ((str = atof_ieee (str, 's', words)) != NULL)
+ if (! hex_p)
{
- unsigned fpword = 0;
int i;
+ if ((str = atof_ieee (str, 's', words)) == NULL)
+ goto invalid_fp;
+
/* Our FP word must be 32 bits (single-precision FP). */
for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
{
fpword <<= LITTLENUM_NUMBER_OF_BITS;
fpword |= words[i];
}
+ }
- if (aarch64_imm_float_p (fpword) || (fpword & 0x7fffffff) == 0)
- *immed = fpword;
- else
- goto invalid_fp;
-
+ if (aarch64_imm_float_p (fpword) || (fpword & 0x7fffffff) == 0)
+ {
+ *immed = fpword;
*ccp = str;
-
return TRUE;
}
BFD_RELOC_AARCH64_MOVW_G3,
0,
0},
- /* Get to the GOT entry for a symbol. */
- {"got_prel19", 0,
- 0,
- 0,
- 0,
- BFD_RELOC_AARCH64_GOT_LD_PREL19},
+
/* Get to the page containing GOT entry for a symbol. */
{"got", 1,
BFD_RELOC_AARCH64_ADR_GOT_PAGE,
0,
0,
- 0},
+ BFD_RELOC_AARCH64_GOT_LD_PREL19},
+
/* 12 bit offset into the page containing GOT entry for that symbol. */
{"got_lo12", 0,
0,
0,
0,
- BFD_RELOC_AARCH64_LD64_GOT_LO12_NC},
+ BFD_RELOC_AARCH64_LD_GOT_LO12_NC},
/* Get to the page containing GOT TLS entry for a symbol */
{"tlsgd", 0,
/* Get to the page containing GOT TLS entry for a symbol */
{"tlsdesc", 0,
- BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE,
+ BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
0,
0,
0},
0,
0,
BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC,
- BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC},
+ BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC},
/* Get to the page containing GOT TLS entry for a symbol */
{"gottprel", 0,
0,
0,
0,
- BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC},
+ BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC},
/* Get tp offset for a symbol. */
{"tprel", 0,
if (**str == '\0')
return TRUE;
- /* Otherwise, we have a shifted reloc modifier, so rewind to
+ /* Otherwise, we have a shifted reloc modifier, so rewind to
recover the variable name and continue parsing for the shifter. */
*str = p;
return parse_shifter_operand_imm (str, operand, mode);
Returns the encoding for the option, or PARSE_FAIL.
If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
- implementation defined system register name S3_<op1>_<Cn>_<Cm>_<op2>. */
+ implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
static int
parse_sys_reg (char **str, struct hash_control *sys_regs, int imple_defined_p)
{
char *p, *q;
char buf[32];
- const struct aarch64_name_value_pair *o;
+ const aarch64_sys_reg *o;
int value;
p = buf;
return PARSE_FAIL;
else
{
- /* Parse S3_<op1>_<Cn>_<Cm>_<op2>, the implementation defined
- registers. */
+ /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
unsigned int op0, op1, cn, cm, op2;
- if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2) != 5)
+
+ if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
+ != 5)
return PARSE_FAIL;
- /* The architecture specifies the encoding space for implementation
- defined registers as:
- op0 op1 CRn CRm op2
- 11 xxx 1x11 xxxx xxx
- For convenience GAS accepts a wider encoding space, as follows:
- op0 op1 CRn CRm op2
- 11 xxx xxxx xxxx xxx */
- if (op0 != 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
+ if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
return PARSE_FAIL;
value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
}
}
else
- value = o->value;
+ {
+ if (aarch64_sys_reg_deprecated_p (o))
+ as_warn (_("system register name '%s' is deprecated and may be "
+"removed in a future release"), buf);
+ value = o->value;
+ }
*str = q;
return value;
\f
/* Diagnostics on operands errors. */
-/* By default, output one-line error message only.
- Enable the verbose error message by -merror-verbose. */
-static int verbose_error_p = 0;
+/* By default, output verbose error message.
+ Disable the verbose error message by -mno-verbose-error. */
+static int verbose_error_p = 1;
#ifdef DEBUG_AARCH64
/* N.B. this is only for the purpose of debugging. */
static void
output_operand_error_record (const operand_error_record *record, char *str)
{
- int idx = record->detail.index;
+ const aarch64_operand_error *detail = &record->detail;
+ int idx = detail->index;
const aarch64_opcode *opcode = record->opcode;
- enum aarch64_opnd opd_code = (idx != -1 ? opcode->operands[idx]
+ enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
: AARCH64_OPND_NIL);
- const aarch64_operand_error *detail = &record->detail;
switch (detail->kind)
{
case AARCH64_OPDE_RECOVERABLE:
case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
case AARCH64_OPDE_OTHER_ERROR:
- gas_assert (idx >= 0);
/* Use the prepared error message if there is, otherwise use the
operand description string to describe the error. */
if (detail->error != NULL)
{
- if (detail->index == -1)
+ if (idx < 0)
as_bad (_("%s -- `%s'"), detail->error, str);
else
as_bad (_("%s at operand %d -- `%s'"),
- detail->error, detail->index + 1, str);
+ detail->error, idx + 1, str);
}
else
- as_bad (_("operand %d should be %s -- `%s'"), idx + 1,
+ {
+ gas_assert (idx >= 0);
+ as_bad (_("operand %d should be %s -- `%s'"), idx + 1,
aarch64_get_operand_desc (opd_code), str);
+ }
break;
case AARCH64_OPDE_INVALID_VARIANT:
if (detail->data[0] != detail->data[1])
as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
detail->error ? detail->error : _("immediate value"),
- detail->data[0], detail->data[1], detail->index + 1, str);
+ detail->data[0], detail->data[1], idx + 1, str);
else
as_bad (_("%s expected to be %d at operand %d -- `%s'"),
detail->error ? detail->error : _("immediate value"),
- detail->data[0], detail->index + 1, str);
+ detail->data[0], idx + 1, str);
break;
case AARCH64_OPDE_REG_LIST:
if (detail->data[0] == 1)
as_bad (_("invalid number of registers in the list; "
"only 1 register is expected at operand %d -- `%s'"),
- detail->index + 1, str);
+ idx + 1, str);
else
as_bad (_("invalid number of registers in the list; "
"%d registers are expected at operand %d -- `%s'"),
- detail->data[0], detail->index + 1, str);
+ detail->data[0], idx + 1, str);
break;
case AARCH64_OPDE_UNALIGNED:
as_bad (_("immediate value should be a multiple of "
"%d at operand %d -- `%s'"),
- detail->data[0], detail->index + 1, str);
+ detail->data[0], idx + 1, str);
break;
default:
case AARCH64_OPND_Rs:
case AARCH64_OPND_Ra:
case AARCH64_OPND_Rt_SYS:
+ case AARCH64_OPND_PAIRREG:
po_int_reg_or_fail (1, 0);
break;
bfd_boolean res1 = FALSE, res2 = FALSE;
/* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
it is probably not worth the effort to support it. */
- if (!(res1 = parse_aarch64_imm_float (&str, &qfloat))
+ if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE))
&& !(res2 = parse_constant_immediate (&str, &val)))
goto failure;
if ((res1 && qfloat == 0) || (res2 && val == 0))
case AARCH64_OPND_IMM_MOV:
{
char *saved = str;
- if (reg_name_p (str, REG_TYPE_R_Z_SP))
+ if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
+ reg_name_p (str, REG_TYPE_VN))
goto failure;
str = saved;
po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
case AARCH64_OPND_SIMD_FPIMM:
{
int qfloat;
- if (! parse_aarch64_imm_float (&str, &qfloat))
+ bfd_boolean dp_p
+ = (aarch64_get_qualifier_esize (inst.base.operands[0].qualifier)
+ == 8);
+ if (! parse_aarch64_imm_float (&str, &qfloat, dp_p))
goto failure;
if (qfloat == 0)
{
break;
case AARCH64_OPND_COND:
+ case AARCH64_OPND_COND1:
info->cond = hash_find_n (aarch64_cond_hsh, str, 2);
str += 2;
if (info->cond == NULL)
set_syntax_error (_("invalid condition"));
goto failure;
}
+ else if (operands[i] == AARCH64_OPND_COND1
+ && (info->cond->value & 0xe) == 0xe)
+ {
+ /* Not allow AL or NV. */
+ set_default_error ();
+ goto failure;
+ }
break;
case AARCH64_OPND_ADDR_ADRP:
if (! backtrack_pos)
goto parse_operands_return;
+ {
+ /* We reach here because this operand is marked as optional, and
+ either no operand was supplied or the operand was supplied but it
+ was syntactically incorrect. In the latter case we report an
+ error. In the former case we perform a few more checks before
+ dropping through to the code to insert the default operand. */
+
+ char *tmp = backtrack_pos;
+ char endchar = END_OF_INSN;
+
+ if (i != (aarch64_num_of_operands (opcode) - 1))
+ endchar = ',';
+ skip_past_char (&tmp, ',');
+
+ if (*tmp != endchar)
+ /* The user has supplied an operand in the wrong format. */
+ goto parse_operands_return;
+
+ /* Make sure there is not a comma before the optional operand.
+ For example the fifth operand of 'sys' is optional:
+
+ sys #0,c0,c0,#0, <--- wrong
+ sys #0,c0,c0,#0 <--- correct. */
+ if (comma_skipped_p && i && endchar == END_OF_INSN)
+ {
+ set_fatal_syntax_error
+ (_("unexpected comma before the omitted optional operand"));
+ goto parse_operands_return;
+ }
+ }
+
/* Reaching here means we are dealing with an optional operand that is
omitted from the assembly line. */
gas_assert (optional_operand_p (opcode, i));
str = backtrack_pos;
backtrack_pos = 0;
- /* If this is the last operand that is optional and omitted, but without
- the presence of a comma. */
- if (i && comma_skipped_p && i == aarch64_num_of_operands (opcode) - 1)
- {
- set_fatal_syntax_error
- (_("unexpected comma before the omitted optional operand"));
- goto parse_operands_return;
- }
-
/* Clear any error record after the omitted optional operand has been
successfully handled. */
clear_error ();
return TRUE;
}
+/* Check for loads and stores that will cause unpredictable behavior. */
+
+static void
+warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
+{
+ aarch64_inst *base = &instr->base;
+ const aarch64_opcode *opcode = base->opcode;
+ const aarch64_opnd_info *opnds = base->operands;
+ switch (opcode->iclass)
+ {
+ case ldst_pos:
+ case ldst_imm9:
+ case ldst_unscaled:
+ case ldst_unpriv:
+ /* Loading/storing the base register is unpredictable if writeback. */
+ if ((aarch64_get_operand_class (opnds[0].type)
+ == AARCH64_OPND_CLASS_INT_REG)
+ && opnds[0].reg.regno == opnds[1].addr.base_regno
+ && opnds[1].addr.writeback)
+ as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
+ break;
+ case ldstpair_off:
+ case ldstnapair_offs:
+ case ldstpair_indexed:
+ /* Loading/storing the base register is unpredictable if writeback. */
+ if ((aarch64_get_operand_class (opnds[0].type)
+ == AARCH64_OPND_CLASS_INT_REG)
+ && (opnds[0].reg.regno == opnds[2].addr.base_regno
+ || opnds[1].reg.regno == opnds[2].addr.base_regno)
+ && opnds[2].addr.writeback)
+ as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
+ /* Load operations must load different registers. */
+ if ((opcode->opcode & (1 << 22))
+ && opnds[0].reg.regno == opnds[1].reg.regno)
+ as_warn (_("unpredictable load of register pair -- `%s'"), str);
+ break;
+ default:
+ break;
+ }
+}
+
/* A wrapper function to interface with libopcodes on encoding and
record the error message if there is any.
dump_opcode_operands (opcode);
#endif /* DEBUG_AARCH64 */
- /* Check that this instruction is supported for this CPU. */
- if (!opcode->avariant
- || !AARCH64_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
- {
- as_bad (_("selected processor does not support `%s'"), str);
- return;
- }
-
mapping_state (MAP_INSN);
inst_base = &inst.base;
&& programmer_friendly_fixup (&inst)
&& do_encode (inst_base->opcode, &inst.base, &inst_base->value))
{
+ /* Check that this instruction is supported for this CPU. */
+ if (!opcode->avariant
+ || !AARCH64_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
+ {
+ as_bad (_("selected processor does not support `%s'"), str);
+ return;
+ }
+
+ warn_unpredictable_ldst (&inst, str);
+
if (inst.reloc.type == BFD_RELOC_UNUSED
|| !inst.reloc.need_libopcodes_p)
output_inst (NULL);
}
/* This is called from HANDLE_ALIGN in write.c. Fill in the contents
- of an rs_align_code fragment. */
+ of an rs_align_code fragment.
+
+ Here we fill the frag with the appropriate info for padding the
+ output stream. The resulting frag will consist of a fixed (fr_fix)
+ and of a repeating (fr_var) part.
+
+ The fixed content is always emitted before the repeating content and
+ these two parts are used as follows in constructing the output:
+ - the fixed part will be used to align to a valid instruction word
+ boundary, in case that we start at a misaligned address; as no
+ executable instruction can live at the misaligned location, we
+ simply fill with zeros;
+ - the variable part will be used to cover the remaining padding and
+ we fill using the AArch64 NOP instruction.
+
+ Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
+ enough storage space for up to 3 bytes for padding the back to a valid
+ instruction alignment and exactly 4 bytes to store the NOP pattern. */
void
aarch64_handle_align (fragS * fragP)
int bytes, fix, noop_size;
char *p;
- const char *noop;
if (fragP->fr_type != rs_align_code)
return;
bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
p = fragP->fr_literal + fragP->fr_fix;
- fix = 0;
-
- if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
- bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
#ifdef OBJ_ELF
gas_assert (fragP->tc_frag_data.recorded);
#endif
- noop = aarch64_noop;
noop_size = sizeof (aarch64_noop);
- fragP->fr_var = noop_size;
- if (bytes & (noop_size - 1))
+ fix = bytes & (noop_size - 1);
+ if (fix)
{
- fix = bytes & (noop_size - 1);
#ifdef OBJ_ELF
insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
#endif
memset (p, 0, fix);
p += fix;
- bytes -= fix;
+ fragP->fr_fix += fix;
}
- while (bytes >= noop_size)
- {
- memcpy (p, noop, noop_size);
- p += noop_size;
- bytes -= noop_size;
- fix += noop_size;
- }
-
- fragP->fr_fix += fix;
-}
-
-/* Called from md_do_align. Used to create an alignment
- frag in a code section. */
-
-void
-aarch64_frag_align_code (int n, int max)
-{
- char *p;
-
- /* We assume that there will never be a requirement
- to support alignments greater than x bytes. */
- if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
- as_fatal (_
- ("alignments greater than %d bytes not supported in .text sections"),
- MAX_MEM_FOR_RS_ALIGN_CODE + 1);
-
- p = frag_var (rs_align_code,
- MAX_MEM_FOR_RS_ALIGN_CODE,
- 1,
- (relax_substateT) max,
- (symbolS *) NULL, (offsetT) n, (char *) NULL);
- *p = 0;
+ if (noop_size)
+ memcpy (p, aarch64_noop, noop_size);
+ fragP->fr_var = noop_size;
}
/* Perform target specific initialisation of a frag.
case REG_TYPE_SP_64:
case REG_TYPE_R_32:
case REG_TYPE_R_64:
+ return reg->number;
+
case REG_TYPE_FP_B:
case REG_TYPE_FP_H:
case REG_TYPE_FP_S:
case REG_TYPE_FP_D:
case REG_TYPE_FP_Q:
- return reg->number;
+ return reg->number + 64;
+
default:
break;
}
return -1;
}
+/* Implement DWARF2_ADDR_SIZE. */
+
+int
+aarch64_dwarf2_addr_size (void)
+{
+#if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
+ if (ilp32_p)
+ return 4;
+#endif
+ return bfd_arch_bits_per_address (stdoutput) / 8;
+}
+
/* MD interface: Symbol and relocation handling. */
/* Return the address within the segment that a PC-relative fixup is
break;
case BFD_RELOC_AARCH64_LD_LO19_PCREL:
- if (value & 3)
- as_bad_where (fixP->fx_file, fixP->fx_line,
- _("pc-relative load offset not word aligned"));
- if (signed_overflow (value, 21))
- as_bad_where (fixP->fx_file, fixP->fx_line,
- _("pc-relative load offset out of range"));
if (fixP->fx_done || !seg->use_rela_p)
{
+ if (value & 3)
+ as_bad_where (fixP->fx_file, fixP->fx_line,
+ _("pc-relative load offset not word aligned"));
+ if (signed_overflow (value, 21))
+ as_bad_where (fixP->fx_file, fixP->fx_line,
+ _("pc-relative load offset out of range"));
insn = get_aarch64_insn (buf);
insn |= encode_ld_lit_ofs_19 (value >> 2);
put_aarch64_insn (buf, insn);
break;
case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
- if (signed_overflow (value, 21))
- as_bad_where (fixP->fx_file, fixP->fx_line,
- _("pc-relative address offset out of range"));
if (fixP->fx_done || !seg->use_rela_p)
{
+ if (signed_overflow (value, 21))
+ as_bad_where (fixP->fx_file, fixP->fx_line,
+ _("pc-relative address offset out of range"));
insn = get_aarch64_insn (buf);
insn |= encode_adr_imm (value);
put_aarch64_insn (buf, insn);
break;
case BFD_RELOC_AARCH64_BRANCH19:
- if (value & 3)
- as_bad_where (fixP->fx_file, fixP->fx_line,
- _("conditional branch target not word aligned"));
- if (signed_overflow (value, 21))
- as_bad_where (fixP->fx_file, fixP->fx_line,
- _("conditional branch out of range"));
if (fixP->fx_done || !seg->use_rela_p)
{
+ if (value & 3)
+ as_bad_where (fixP->fx_file, fixP->fx_line,
+ _("conditional branch target not word aligned"));
+ if (signed_overflow (value, 21))
+ as_bad_where (fixP->fx_file, fixP->fx_line,
+ _("conditional branch out of range"));
insn = get_aarch64_insn (buf);
insn |= encode_cond_branch_ofs_19 (value >> 2);
put_aarch64_insn (buf, insn);
break;
case BFD_RELOC_AARCH64_TSTBR14:
- if (value & 3)
- as_bad_where (fixP->fx_file, fixP->fx_line,
- _("conditional branch target not word aligned"));
- if (signed_overflow (value, 16))
- as_bad_where (fixP->fx_file, fixP->fx_line,
- _("conditional branch out of range"));
if (fixP->fx_done || !seg->use_rela_p)
{
+ if (value & 3)
+ as_bad_where (fixP->fx_file, fixP->fx_line,
+ _("conditional branch target not word aligned"));
+ if (signed_overflow (value, 16))
+ as_bad_where (fixP->fx_file, fixP->fx_line,
+ _("conditional branch out of range"));
insn = get_aarch64_insn (buf);
insn |= encode_tst_branch_ofs_14 (value >> 2);
put_aarch64_insn (buf, insn);
case BFD_RELOC_AARCH64_JUMP26:
case BFD_RELOC_AARCH64_CALL26:
- if (value & 3)
- as_bad_where (fixP->fx_file, fixP->fx_line,
- _("branch target not word aligned"));
- if (signed_overflow (value, 28))
- as_bad_where (fixP->fx_file, fixP->fx_line, _("branch out of range"));
if (fixP->fx_done || !seg->use_rela_p)
{
+ if (value & 3)
+ as_bad_where (fixP->fx_file, fixP->fx_line,
+ _("branch target not word aligned"));
+ if (signed_overflow (value, 28))
+ as_bad_where (fixP->fx_file, fixP->fx_line,
+ _("branch out of range"));
insn = get_aarch64_insn (buf);
insn |= encode_branch_ofs_26 (value >> 2);
put_aarch64_insn (buf, insn);
}
break;
- case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
+ case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
+ fixP->fx_r_type = (ilp32_p
+ ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
+ : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
+ S_SET_THREAD_LOCAL (fixP->fx_addsy);
+ /* Should always be exported to object file, see
+ aarch64_force_relocation(). */
+ gas_assert (!fixP->fx_done);
+ gas_assert (seg->use_rela_p);
+ break;
+
+ case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
+ fixP->fx_r_type = (ilp32_p
+ ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
+ : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC);
+ S_SET_THREAD_LOCAL (fixP->fx_addsy);
+ /* Should always be exported to object file, see
+ aarch64_force_relocation(). */
+ gas_assert (!fixP->fx_done);
+ gas_assert (seg->use_rela_p);
+ break;
+
+ case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
+ case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
+ case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
- case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
+ case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
- case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
- case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
- case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
- case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE:
- case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
- case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
S_SET_THREAD_LOCAL (fixP->fx_addsy);
/* Should always be exported to object file, see
aarch64_force_relocation(). */
gas_assert (seg->use_rela_p);
break;
+ case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
+ /* Should always be exported to object file, see
+ aarch64_force_relocation(). */
+ fixP->fx_r_type = (ilp32_p
+ ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
+ : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
+ gas_assert (!fixP->fx_done);
+ gas_assert (seg->use_rela_p);
+ break;
+
case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
case BFD_RELOC_AARCH64_ADD_LO12:
case BFD_RELOC_AARCH64_GOT_LD_PREL19:
case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
+ case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
/* Should always be exported to object file, see
aarch64_force_relocation(). */
gas_assert (!fixP->fx_done);
case BFD_RELOC_AARCH64_TLSDESC_CALL:
break;
+ case BFD_RELOC_UNUSED:
+ /* An error will already have been reported. */
+ break;
+
default:
as_bad_where (fixP->fx_file, fixP->fx_line,
_("unexpected %s fixup"),
even if the symbol is extern or weak. */
return 0;
- case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
+ case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
+ case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
+ /* Pseudo relocs that need to be fixed up according to
+ ilp32_p. */
+ return 0;
+
+ case BFD_RELOC_AARCH64_ADD_LO12:
+ case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
+ case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
+ case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
+ case BFD_RELOC_AARCH64_GOT_LD_PREL19:
+ case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
+ case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
+ case BFD_RELOC_AARCH64_LDST128_LO12:
+ case BFD_RELOC_AARCH64_LDST16_LO12:
+ case BFD_RELOC_AARCH64_LDST32_LO12:
+ case BFD_RELOC_AARCH64_LDST64_LO12:
+ case BFD_RELOC_AARCH64_LDST8_LO12:
+ case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
+ case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
+ case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
- case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
+ case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
- case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
- case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
- case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
- case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE:
- case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
- case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
- case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
- case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
- case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
- case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
- case BFD_RELOC_AARCH64_ADD_LO12:
- case BFD_RELOC_AARCH64_LDST8_LO12:
- case BFD_RELOC_AARCH64_LDST16_LO12:
- case BFD_RELOC_AARCH64_LDST32_LO12:
- case BFD_RELOC_AARCH64_LDST64_LO12:
- case BFD_RELOC_AARCH64_LDST128_LO12:
- case BFD_RELOC_AARCH64_GOT_LD_PREL19:
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
/* Always leave these relocations for the linker. */
return 1;
elf64_aarch64_target_format (void)
{
if (target_big_endian)
- return "elf64-bigaarch64";
+ return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
else
- return "elf64-littleaarch64";
+ return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
}
void
cpu_variant = *mcpu_cpu_opt;
/* Record the CPU type. */
- mach = bfd_mach_aarch64;
+ mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
}
#endif /* DEBUG_AARCH64 */
{"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
NULL},
+ {"mno-verbose-error", N_("do not output verbose error messages"),
+ &verbose_error_p, 0, NULL},
{NULL, NULL, NULL, 0, NULL}
};
recognized by GCC. */
static const struct aarch64_cpu_option_table aarch64_cpus[] = {
{"all", AARCH64_ANY, NULL},
- {"cortex-a53", AARCH64_ARCH_V8, "Cortex-A53"},
- {"cortex-a57", AARCH64_ARCH_V8, "Cortex-A57"},
+ {"cortex-a53", AARCH64_FEATURE(AARCH64_ARCH_V8,
+ AARCH64_FEATURE_CRC), "Cortex-A53"},
+ {"cortex-a57", AARCH64_FEATURE(AARCH64_ARCH_V8,
+ AARCH64_FEATURE_CRC), "Cortex-A57"},
+ {"thunderx", AARCH64_ARCH_V8, "Cavium ThunderX"},
+ /* The 'xgene-1' name is an older name for 'xgene1', which was used
+ in earlier releases and is superseded by 'xgene1' in all
+ tools. */
+ {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
+ {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
+ {"xgene2", AARCH64_FEATURE(AARCH64_ARCH_V8,
+ AARCH64_FEATURE_CRC), "APM X-Gene 2"},
{"generic", AARCH64_ARCH_V8, NULL},
- /* These two are example CPUs supported in GCC, once we have real
- CPUs they will be removed. */
- {"example-1", AARCH64_ARCH_V8, NULL},
- {"example-2", AARCH64_ARCH_V8, NULL},
-
{NULL, AARCH64_ARCH_NONE, NULL}
};
};
static const struct aarch64_option_cpu_value_table aarch64_features[] = {
+ {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0)},
{"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0)},
{"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
+ {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0)},
{"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
{NULL, AARCH64_ARCH_NONE}
};
};
static int
-aarch64_parse_features (char *str, const aarch64_feature_set **opt_p)
+aarch64_parse_features (char *str, const aarch64_feature_set **opt_p,
+ bfd_boolean ext_only)
{
/* We insist on extensions being added before being removed. We achieve
this by using the ADDING_VALUE variable to indicate whether we are
while (str != NULL && *str != 0)
{
const struct aarch64_option_cpu_value_table *opt;
- char *ext;
+ char *ext = NULL;
int optlen;
- if (*str != '+')
+ if (!ext_only)
{
- as_bad (_("invalid architectural extension"));
- return 0;
- }
+ if (*str != '+')
+ {
+ as_bad (_("invalid architectural extension"));
+ return 0;
+ }
- str++;
- ext = strchr (str, '+');
+ ext = strchr (++str, '+');
+ }
if (ext != NULL)
optlen = ext - str;
{
mcpu_cpu_opt = &opt->value;
if (ext != NULL)
- return aarch64_parse_features (ext, &mcpu_cpu_opt);
+ return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
return 1;
}
{
march_cpu_opt = &opt->value;
if (ext != NULL)
- return aarch64_parse_features (ext, &march_cpu_opt);
+ return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
return 1;
}
return 0;
}
+/* ABIs. */
+struct aarch64_option_abi_value_table
+{
+ char *name;
+ enum aarch64_abi_type value;
+};
+
+static const struct aarch64_option_abi_value_table aarch64_abis[] = {
+ {"ilp32", AARCH64_ABI_ILP32},
+ {"lp64", AARCH64_ABI_LP64},
+ {NULL, 0}
+};
+
+static int
+aarch64_parse_abi (char *str)
+{
+ const struct aarch64_option_abi_value_table *opt;
+ size_t optlen = strlen (str);
+
+ if (optlen == 0)
+ {
+ as_bad (_("missing abi name `%s'"), str);
+ return 0;
+ }
+
+ for (opt = aarch64_abis; opt->name != NULL; opt++)
+ if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
+ {
+ aarch64_abi = opt->value;
+ return 1;
+ }
+
+ as_bad (_("unknown abi `%s'\n"), str);
+ return 0;
+}
+
static struct aarch64_long_option_table aarch64_long_opts[] = {
+#ifdef OBJ_ELF
+ {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
+ aarch64_parse_abi, NULL},
+#endif /* OBJ_ELF */
{"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
aarch64_parse_cpu, NULL},
{"march=", N_("<arch name>\t assemble for architecture <arch name>"),
{
mcpu_cpu_opt = &opt->value;
if (ext != NULL)
- if (!aarch64_parse_features (ext, &mcpu_cpu_opt))
+ if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
return;
cpu_variant = *mcpu_cpu_opt;
{
mcpu_cpu_opt = &opt->value;
if (ext != NULL)
- if (!aarch64_parse_features (ext, &mcpu_cpu_opt))
+ if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
return;
cpu_variant = *mcpu_cpu_opt;
ignore_rest_of_line ();
}
+/* Parse a .arch_extension directive. */
+
+static void
+s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
+{
+ char saved_char;
+ char *ext = input_line_pointer;;
+
+ while (*input_line_pointer && !ISSPACE (*input_line_pointer))
+ input_line_pointer++;
+ saved_char = *input_line_pointer;
+ *input_line_pointer = 0;
+
+ if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
+ return;
+
+ cpu_variant = *mcpu_cpu_opt;
+
+ *input_line_pointer = saved_char;
+ demand_empty_rest_of_line ();
+}
+
/* Copy symbol information. */
void