Add support for V_4B so we can properly reject it.
[deliverable/binutils-gdb.git] / include / opcode / aarch64.h
index e61ac9c03fd59f1f836c84a159b7a993e483a28d..453b1771f02a9884652b44ec9c16ea5bbced1a83 100644 (file)
@@ -1,6 +1,6 @@
 /* AArch64 assembler/disassembler support.
 
-   Copyright (C) 2009-2016 Free Software Foundation, Inc.
+   Copyright (C) 2009-2017 Free Software Foundation, Inc.
    Contributed by ARM Ltd.
 
    This file is part of GNU Binutils.
@@ -37,8 +37,14 @@ extern "C" {
 typedef uint32_t aarch64_insn;
 
 /* The following bitmasks control CPU features.  */
+#define AARCH64_FEATURE_SHA2   0x200000000ULL  /* SHA2 instructions.  */
+#define AARCH64_FEATURE_AES    0x800000000ULL  /* AES instructions.  */
+#define AARCH64_FEATURE_V8_4   0x000000800ULL  /* ARMv8.4 processors.  */
+#define AARCH64_FEATURE_SM4    0x100000000ULL  /* SM3 & SM4 instructions.  */
+#define AARCH64_FEATURE_SHA3   0x400000000ULL  /* SHA3 instructions.  */
 #define AARCH64_FEATURE_V8     0x00000001      /* All processors.  */
 #define AARCH64_FEATURE_V8_2   0x00000020      /* ARMv8.2 processors.  */
+#define AARCH64_FEATURE_V8_3   0x00000040      /* ARMv8.3 processors.  */
 #define AARCH64_FEATURE_CRYPTO 0x00010000      /* Crypto instructions.  */
 #define AARCH64_FEATURE_FP     0x00020000      /* FP instructions.  */
 #define AARCH64_FEATURE_SIMD   0x00040000      /* SIMD instructions.  */
@@ -51,38 +57,40 @@ typedef uint32_t aarch64_insn;
 #define AARCH64_FEATURE_F16    0x02000000      /* v8.2 FP16 instructions.  */
 #define AARCH64_FEATURE_RAS    0x04000000      /* RAS Extensions.  */
 #define AARCH64_FEATURE_PROFILE        0x08000000      /* Statistical Profiling.  */
+#define AARCH64_FEATURE_SVE    0x10000000      /* SVE instructions.  */
+#define AARCH64_FEATURE_RCPC   0x20000000      /* RCPC instructions.  */
+#define AARCH64_FEATURE_COMPNUM        0x40000000      /* Complex # instructions.  */
+#define AARCH64_FEATURE_DOTPROD 0x080000000     /* Dot Product instructions.  */
+#define AARCH64_FEATURE_F16_FML        0x1000000000ULL /* v8.2 FP16FML ins.  */
 
 /* Architectures are the sum of the base and extensions.  */
 #define AARCH64_ARCH_V8                AARCH64_FEATURE (AARCH64_FEATURE_V8, \
                                                 AARCH64_FEATURE_FP  \
                                                 | AARCH64_FEATURE_SIMD)
-#define AARCH64_ARCH_V8_1      AARCH64_FEATURE (AARCH64_FEATURE_V8, \
-                                                AARCH64_FEATURE_FP  \
-                                                | AARCH64_FEATURE_SIMD \
-                                                | AARCH64_FEATURE_CRC  \
+#define AARCH64_ARCH_V8_1      AARCH64_FEATURE (AARCH64_ARCH_V8, \
+                                                AARCH64_FEATURE_CRC    \
                                                 | AARCH64_FEATURE_V8_1 \
                                                 | AARCH64_FEATURE_LSE  \
                                                 | AARCH64_FEATURE_PAN  \
                                                 | AARCH64_FEATURE_LOR  \
                                                 | AARCH64_FEATURE_RDMA)
-#define AARCH64_ARCH_V8_2      AARCH64_FEATURE (AARCH64_FEATURE_V8,    \
+#define AARCH64_ARCH_V8_2      AARCH64_FEATURE (AARCH64_ARCH_V8_1,     \
                                                 AARCH64_FEATURE_V8_2   \
-                                                | AARCH64_FEATURE_F16  \
-                                                | AARCH64_FEATURE_RAS  \
-                                                | AARCH64_FEATURE_FP   \
-                                                | AARCH64_FEATURE_SIMD \
-                                                | AARCH64_FEATURE_CRC  \
-                                                | AARCH64_FEATURE_V8_1 \
-                                                | AARCH64_FEATURE_LSE  \
-                                                | AARCH64_FEATURE_PAN  \
-                                                | AARCH64_FEATURE_LOR  \
-                                                | AARCH64_FEATURE_RDMA)
+                                                | AARCH64_FEATURE_RAS)
+#define AARCH64_ARCH_V8_3      AARCH64_FEATURE (AARCH64_ARCH_V8_2,     \
+                                                AARCH64_FEATURE_V8_3   \
+                                                | AARCH64_FEATURE_RCPC \
+                                                | AARCH64_FEATURE_COMPNUM)
+#define AARCH64_ARCH_V8_4      AARCH64_FEATURE (AARCH64_ARCH_V8_3,     \
+                                                AARCH64_FEATURE_V8_4   \
+                                                | AARCH64_FEATURE_DOTPROD \
+                                                | AARCH64_FEATURE_F16_FML)
 
 #define AARCH64_ARCH_NONE      AARCH64_FEATURE (0, 0)
 #define AARCH64_ANY            AARCH64_FEATURE (-1, 0) /* Any basic core.  */
 
 /* CPU-specific features.  */
-typedef unsigned long aarch64_feature_set;
+typedef unsigned long long aarch64_feature_set;
 
 #define AARCH64_CPU_HAS_ALL_FEATURES(CPU,FEAT) \
   ((~(CPU) & (FEAT)) == 0)
@@ -119,7 +127,6 @@ enum aarch64_operand_class
   AARCH64_OPND_CLASS_SIMD_ELEMENT,
   AARCH64_OPND_CLASS_SISD_REG,
   AARCH64_OPND_CLASS_SIMD_REGLIST,
-  AARCH64_OPND_CLASS_CP_REG,
   AARCH64_OPND_CLASS_SVE_REG,
   AARCH64_OPND_CLASS_PRED_REG,
   AARCH64_OPND_CLASS_ADDRESS,
@@ -146,6 +153,7 @@ enum aarch64_opnd
 
   AARCH64_OPND_Rd_SP,  /* Integer Rd or SP.  */
   AARCH64_OPND_Rn_SP,  /* Integer Rn or SP.  */
+  AARCH64_OPND_Rm_SP,  /* Integer Rm or SP.  */
   AARCH64_OPND_PAIRREG,        /* Paired register operand.  */
   AARCH64_OPND_Rm_EXT, /* Integer Rm extended.  */
   AARCH64_OPND_Rm_SFT, /* Integer Rm shifted.  */
@@ -161,6 +169,7 @@ enum aarch64_opnd
   AARCH64_OPND_Sn,     /* AdvSIMD Scalar Sn.  */
   AARCH64_OPND_Sm,     /* AdvSIMD Scalar Sm.  */
 
+  AARCH64_OPND_Va,     /* AdvSIMD Vector Va.  */
   AARCH64_OPND_Vd,     /* AdvSIMD Vector Vd.  */
   AARCH64_OPND_Vn,     /* AdvSIMD Vector Vn.  */
   AARCH64_OPND_Vm,     /* AdvSIMD Vector Vm.  */
@@ -175,10 +184,11 @@ enum aarch64_opnd
                           structure to all lanes.  */
   AARCH64_OPND_LEt,    /* AdvSIMD Vector Element list.  */
 
-  AARCH64_OPND_Cn,     /* Co-processor register in CRn field.  */
-  AARCH64_OPND_Cm,     /* Co-processor register in CRm field.  */
+  AARCH64_OPND_CRn,    /* Co-processor register in CRn field.  */
+  AARCH64_OPND_CRm,    /* Co-processor register in CRm field.  */
 
   AARCH64_OPND_IDX,    /* AdvSIMD EXT index operand.  */
+  AARCH64_OPND_MASK,   /* AdvSIMD EXT index operand.  */
   AARCH64_OPND_IMM_VLSL,/* Immediate for shifting vector registers left.  */
   AARCH64_OPND_IMM_VLSR,/* Immediate for shifting vector registers right.  */
   AARCH64_OPND_SIMD_IMM,/* AdvSIMD modified immediate without shift.  */
@@ -193,6 +203,7 @@ enum aarch64_opnd
   AARCH64_OPND_IMMS,   /* Immediate #<imms> in e.g. BFM.  */
   AARCH64_OPND_WIDTH,  /* Immediate #<width> in e.g. BFI.  */
   AARCH64_OPND_IMM,    /* Immediate.  */
+  AARCH64_OPND_IMM_2,  /* Immediate.  */
   AARCH64_OPND_UIMM3_OP1,/* Unsigned 3-bit immediate in the op1 field.  */
   AARCH64_OPND_UIMM3_OP2,/* Unsigned 3-bit immediate in the op2 field.  */
   AARCH64_OPND_UIMM4,  /* Unsigned 4-bit immediate in the CRm field.  */
@@ -200,6 +211,7 @@ enum aarch64_opnd
   AARCH64_OPND_BIT_NUM,        /* Immediate.  */
   AARCH64_OPND_EXCEPTION,/* imm16 operand in exception instructions.  */
   AARCH64_OPND_CCMP_IMM,/* Immediate in conditional compare instructions.  */
+  AARCH64_OPND_SIMM5,  /* 5-bit signed immediate in the imm5 field.  */
   AARCH64_OPND_NZCV,   /* Flag bit specifier giving an alternative value for
                           each condition flag.  */
 
@@ -208,6 +220,9 @@ enum aarch64_opnd
   AARCH64_OPND_HALF,   /* #<imm16>{, LSL #<shift>} operand in move wide.  */
   AARCH64_OPND_FBITS,  /* FP #<fbits> operand in e.g. SCVTF */
   AARCH64_OPND_IMM_MOV,        /* Immediate operand for the MOV alias.  */
+  AARCH64_OPND_IMM_ROT1,       /* Immediate rotate operand for FCMLA.  */
+  AARCH64_OPND_IMM_ROT2,       /* Immediate rotate operand for indexed FCMLA.  */
+  AARCH64_OPND_IMM_ROT3,       /* Immediate rotate operand for FCADD.  */
 
   AARCH64_OPND_COND,   /* Standard condition as the last operand.  */
   AARCH64_OPND_COND1,  /* Same as the above, but excluding AL and NV.  */
@@ -229,8 +244,10 @@ enum aarch64_opnd
                                   friendly feature of using LDR/STR as the
                                   the mnemonic name for LDUR/STUR instructions
                                   wherever there is no ambiguity.  */
+  AARCH64_OPND_ADDR_SIMM10,    /* Address of signed 10-bit immediate.  */
   AARCH64_OPND_ADDR_UIMM12,    /* Address of unsigned 12-bit immediate.  */
   AARCH64_OPND_SIMD_ADDR_SIMPLE,/* Address of ld/st multiple structures.  */
+  AARCH64_OPND_ADDR_OFFSET,     /* Address with an optional 9-bit immediate.  */
   AARCH64_OPND_SIMD_ADDR_POST, /* Address of ld/st multiple post-indexed.  */
 
   AARCH64_OPND_SYSREG,         /* System register operand.  */
@@ -244,6 +261,13 @@ enum aarch64_opnd
   AARCH64_OPND_PRFOP,          /* Prefetch operation.  */
   AARCH64_OPND_BARRIER_PSB,    /* Barrier operand for PSB.  */
 
+  AARCH64_OPND_SVE_ADDR_RI_S4x16,   /* SVE [<Xn|SP>, #<simm4>*16].  */
+  AARCH64_OPND_SVE_ADDR_RI_S4xVL,   /* SVE [<Xn|SP>, #<simm4>, MUL VL].  */
+  AARCH64_OPND_SVE_ADDR_RI_S4x2xVL, /* SVE [<Xn|SP>, #<simm4>*2, MUL VL].  */
+  AARCH64_OPND_SVE_ADDR_RI_S4x3xVL, /* SVE [<Xn|SP>, #<simm4>*3, MUL VL].  */
+  AARCH64_OPND_SVE_ADDR_RI_S4x4xVL, /* SVE [<Xn|SP>, #<simm4>*4, MUL VL].  */
+  AARCH64_OPND_SVE_ADDR_RI_S6xVL,   /* SVE [<Xn|SP>, #<simm6>, MUL VL].  */
+  AARCH64_OPND_SVE_ADDR_RI_S9xVL,   /* SVE [<Xn|SP>, #<simm9>, MUL VL].  */
   AARCH64_OPND_SVE_ADDR_RI_U6,     /* SVE [<Xn|SP>, #<uimm6>].  */
   AARCH64_OPND_SVE_ADDR_RI_U6x2,    /* SVE [<Xn|SP>, #<uimm6>*2].  */
   AARCH64_OPND_SVE_ADDR_RI_U6x4,    /* SVE [<Xn|SP>, #<uimm6>*4].  */
@@ -283,6 +307,17 @@ enum aarch64_opnd
   AARCH64_OPND_SVE_ADDR_ZZ_LSL,     /* SVE [Zn.<T>, Zm,<T>, LSL #<msz>].  */
   AARCH64_OPND_SVE_ADDR_ZZ_SXTW,    /* SVE [Zn.<T>, Zm,<T>, SXTW #<msz>].  */
   AARCH64_OPND_SVE_ADDR_ZZ_UXTW,    /* SVE [Zn.<T>, Zm,<T>, UXTW #<msz>].  */
+  AARCH64_OPND_SVE_AIMM,       /* SVE unsigned arithmetic immediate.  */
+  AARCH64_OPND_SVE_ASIMM,      /* SVE signed arithmetic immediate.  */
+  AARCH64_OPND_SVE_FPIMM8,     /* SVE 8-bit floating-point immediate.  */
+  AARCH64_OPND_SVE_I1_HALF_ONE,        /* SVE choice between 0.5 and 1.0.  */
+  AARCH64_OPND_SVE_I1_HALF_TWO,        /* SVE choice between 0.5 and 2.0.  */
+  AARCH64_OPND_SVE_I1_ZERO_ONE,        /* SVE choice between 0.0 and 1.0.  */
+  AARCH64_OPND_SVE_IMM_ROT1,   /* SVE 1-bit rotate operand (90 or 270).  */
+  AARCH64_OPND_SVE_IMM_ROT2,   /* SVE 2-bit rotate operand (N*90).  */
+  AARCH64_OPND_SVE_INV_LIMM,   /* SVE inverted logical immediate.  */
+  AARCH64_OPND_SVE_LIMM,       /* SVE logical immediate.  */
+  AARCH64_OPND_SVE_LIMM_MOV,   /* SVE logical immediate for MOV.  */
   AARCH64_OPND_SVE_PATTERN,    /* SVE vector pattern enumeration.  */
   AARCH64_OPND_SVE_PATTERN_SCALED, /* Likewise, with additional MUL factor.  */
   AARCH64_OPND_SVE_PRFOP,      /* SVE prefetch operation.  */
@@ -294,16 +329,38 @@ enum aarch64_opnd
   AARCH64_OPND_SVE_Pm,         /* SVE p0-p15 in Pm.  */
   AARCH64_OPND_SVE_Pn,         /* SVE p0-p15 in Pn.  */
   AARCH64_OPND_SVE_Pt,         /* SVE p0-p15 in Pt.  */
+  AARCH64_OPND_SVE_Rm,         /* Integer Rm or ZR, alt. SVE position.  */
+  AARCH64_OPND_SVE_Rn_SP,      /* Integer Rn or SP, alt. SVE position.  */
+  AARCH64_OPND_SVE_SHLIMM_PRED,          /* SVE shift left amount (predicated).  */
+  AARCH64_OPND_SVE_SHLIMM_UNPRED, /* SVE shift left amount (unpredicated).  */
+  AARCH64_OPND_SVE_SHRIMM_PRED,          /* SVE shift right amount (predicated).  */
+  AARCH64_OPND_SVE_SHRIMM_UNPRED, /* SVE shift right amount (unpredicated).  */
+  AARCH64_OPND_SVE_SIMM5,      /* SVE signed 5-bit immediate.  */
+  AARCH64_OPND_SVE_SIMM5B,     /* SVE secondary signed 5-bit immediate.  */
+  AARCH64_OPND_SVE_SIMM6,      /* SVE signed 6-bit immediate.  */
+  AARCH64_OPND_SVE_SIMM8,      /* SVE signed 8-bit immediate.  */
+  AARCH64_OPND_SVE_UIMM3,      /* SVE unsigned 3-bit immediate.  */
+  AARCH64_OPND_SVE_UIMM7,      /* SVE unsigned 7-bit immediate.  */
+  AARCH64_OPND_SVE_UIMM8,      /* SVE unsigned 8-bit immediate.  */
+  AARCH64_OPND_SVE_UIMM8_53,   /* SVE split unsigned 8-bit immediate.  */
+  AARCH64_OPND_SVE_VZn,                /* Scalar SIMD&FP register in Zn field.  */
+  AARCH64_OPND_SVE_Vd,         /* Scalar SIMD&FP register in Vd.  */
+  AARCH64_OPND_SVE_Vm,         /* Scalar SIMD&FP register in Vm.  */
+  AARCH64_OPND_SVE_Vn,         /* Scalar SIMD&FP register in Vn.  */
   AARCH64_OPND_SVE_Za_5,       /* SVE vector register in Za, bits [9,5].  */
   AARCH64_OPND_SVE_Za_16,      /* SVE vector register in Za, bits [20,16].  */
   AARCH64_OPND_SVE_Zd,         /* SVE vector register in Zd.  */
   AARCH64_OPND_SVE_Zm_5,       /* SVE vector register in Zm, bits [9,5].  */
   AARCH64_OPND_SVE_Zm_16,      /* SVE vector register in Zm, bits [20,16].  */
+  AARCH64_OPND_SVE_Zm3_INDEX,  /* z0-z7[0-3] in Zm, bits [20,16].  */
+  AARCH64_OPND_SVE_Zm3_22_INDEX, /* z0-z7[0-7] in Zm3_INDEX plus bit 22.  */
+  AARCH64_OPND_SVE_Zm4_INDEX,  /* z0-z15[0-1] in Zm, bits [20,16].  */
   AARCH64_OPND_SVE_Zn,         /* SVE vector register in Zn.  */
   AARCH64_OPND_SVE_Zn_INDEX,   /* Indexed SVE vector register, for DUP.  */
   AARCH64_OPND_SVE_ZnxN,       /* SVE vector register list in Zn.  */
   AARCH64_OPND_SVE_Zt,         /* SVE vector register in Zt.  */
   AARCH64_OPND_SVE_ZtxN,       /* SVE vector register list in Zt.  */
+  AARCH64_OPND_SM3_IMM2,       /* SM3 encodes lane in bits [13, 14].  */
 };
 
 /* Qualifier constrains an operand.  It either specifies a variant of an
@@ -346,6 +403,7 @@ enum aarch64_opnd_qualifier
      a use is only for the ease of operand encoding/decoding and qualifier
      sequence matching; such a use should not be applied widely; use the value
      constraint qualifiers for immediate operands wherever possible.  */
+  AARCH64_OPND_QLF_V_4B,
   AARCH64_OPND_QLF_V_8B,
   AARCH64_OPND_QLF_V_16B,
   AARCH64_OPND_QLF_V_2H,
@@ -361,6 +419,7 @@ enum aarch64_opnd_qualifier
   AARCH64_OPND_QLF_P_M,
 
   /* Constraint on value.  */
+  AARCH64_OPND_QLF_CR,         /* CRn, CRm. */
   AARCH64_OPND_QLF_imm_0_7,
   AARCH64_OPND_QLF_imm_0_15,
   AARCH64_OPND_QLF_imm_0_31,
@@ -436,6 +495,7 @@ enum aarch64_insn_class
   ldst_immpost,
   ldst_immpre,
   ldst_imm9,   /* immpost or immpre */
+  ldst_imm10,  /* LDRAA/LDRAB */
   ldst_pos,
   ldst_regoff,
   ldst_unpriv,
@@ -451,7 +511,22 @@ enum aarch64_insn_class
   movewide,
   pcreladdr,
   ic_system,
+  sve_cpy,
+  sve_index,
+  sve_limm,
+  sve_misc,
+  sve_movprfx,
+  sve_pred_zm,
+  sve_shift_pred,
+  sve_shift_unpred,
+  sve_size_bhs,
+  sve_size_bhsd,
+  sve_size_hsd,
+  sve_size_sd,
   testbranch,
+  cryptosm3,
+  cryptosm4,
+  dotproduct,
 };
 
 /* Opcode enumerators.  */
@@ -541,6 +616,20 @@ enum aarch64_op
   OP_UXTL,
   OP_UXTL2,
 
+  OP_MOV_P_P,
+  OP_MOV_Z_P_Z,
+  OP_MOV_Z_V,
+  OP_MOV_Z_Z,
+  OP_MOV_Z_Zi,
+  OP_MOVM_P_P_P,
+  OP_MOVS_P_P,
+  OP_MOVZS_P_P_P,
+  OP_MOVZ_P_P_P,
+  OP_NOTS_P_P_P_Z,
+  OP_NOT_P_P_P_Z,
+
+  OP_FCMLA_ELEM,       /* ARMv8.3, indexed element version.  */
+
   OP_TOTAL_NUM,                /* Pseudo.  */
 };
 
@@ -786,6 +875,7 @@ enum aarch64_modifier_kind
   AARCH64_MOD_SXTW,
   AARCH64_MOD_SXTX,
   AARCH64_MOD_MUL,
+  AARCH64_MOD_MUL_VL,
 };
 
 bfd_boolean
@@ -799,7 +889,7 @@ typedef struct
 {
   /* A list of names with the first one as the disassembly preference;
      terminated by NULL if fewer than 3.  */
-  const char *names[3];
+  const char *names[4];
   aarch64_insn value;
 } aarch64_cond;
 
@@ -1058,6 +1148,9 @@ aarch64_get_operand_name (enum aarch64_opnd);
 extern const char *
 aarch64_get_operand_desc (enum aarch64_opnd);
 
+extern bfd_boolean
+aarch64_sve_dupm_mov_immediate_p (uint64_t, int);
+
 #ifdef DEBUG_AARCH64
 extern int debug_dump;
 
This page took 0.028909 seconds and 4 git commands to generate.