typedef uint32_t aarch64_insn;
/* The following bitmasks control CPU features. */
+#define AARCH64_FEATURE_SHA2 0x200000000ULL /* SHA2 instructions. */
+#define AARCH64_FEATURE_AES 0x800000000ULL /* AES instructions. */
+#define AARCH64_FEATURE_V8_4 0x000000800ULL /* ARMv8.4 processors. */
+#define AARCH64_FEATURE_SM4 0x100000000ULL /* SM3 & SM4 instructions. */
+#define AARCH64_FEATURE_SHA3 0x400000000ULL /* SHA3 instructions. */
#define AARCH64_FEATURE_V8 0x00000001 /* All processors. */
#define AARCH64_FEATURE_V8_2 0x00000020 /* ARMv8.2 processors. */
#define AARCH64_FEATURE_V8_3 0x00000040 /* ARMv8.3 processors. */
#define AARCH64_FEATURE_PROFILE 0x08000000 /* Statistical Profiling. */
#define AARCH64_FEATURE_SVE 0x10000000 /* SVE instructions. */
#define AARCH64_FEATURE_RCPC 0x20000000 /* RCPC instructions. */
+#define AARCH64_FEATURE_COMPNUM 0x40000000 /* Complex # instructions. */
+#define AARCH64_FEATURE_DOTPROD 0x080000000 /* Dot Product instructions. */
+#define AARCH64_FEATURE_F16_FML 0x1000000000ULL /* v8.2 FP16FML ins. */
/* Architectures are the sum of the base and extensions. */
#define AARCH64_ARCH_V8 AARCH64_FEATURE (AARCH64_FEATURE_V8, \
| AARCH64_FEATURE_RDMA)
#define AARCH64_ARCH_V8_2 AARCH64_FEATURE (AARCH64_ARCH_V8_1, \
AARCH64_FEATURE_V8_2 \
- | AARCH64_FEATURE_F16 \
| AARCH64_FEATURE_RAS)
#define AARCH64_ARCH_V8_3 AARCH64_FEATURE (AARCH64_ARCH_V8_2, \
AARCH64_FEATURE_V8_3 \
- | AARCH64_FEATURE_RCPC)
+ | AARCH64_FEATURE_RCPC \
+ | AARCH64_FEATURE_COMPNUM)
+#define AARCH64_ARCH_V8_4 AARCH64_FEATURE (AARCH64_ARCH_V8_3, \
+ AARCH64_FEATURE_V8_4 \
+ | AARCH64_FEATURE_DOTPROD \
+ | AARCH64_FEATURE_F16_FML)
#define AARCH64_ARCH_NONE AARCH64_FEATURE (0, 0)
#define AARCH64_ANY AARCH64_FEATURE (-1, 0) /* Any basic core. */
/* CPU-specific features. */
-typedef unsigned long aarch64_feature_set;
+typedef unsigned long long aarch64_feature_set;
#define AARCH64_CPU_HAS_ALL_FEATURES(CPU,FEAT) \
((~(CPU) & (FEAT)) == 0)
AARCH64_OPND_Sn, /* AdvSIMD Scalar Sn. */
AARCH64_OPND_Sm, /* AdvSIMD Scalar Sm. */
+ AARCH64_OPND_Va, /* AdvSIMD Vector Va. */
AARCH64_OPND_Vd, /* AdvSIMD Vector Vd. */
AARCH64_OPND_Vn, /* AdvSIMD Vector Vn. */
AARCH64_OPND_Vm, /* AdvSIMD Vector Vm. */
AARCH64_OPND_CRm, /* Co-processor register in CRm field. */
AARCH64_OPND_IDX, /* AdvSIMD EXT index operand. */
+ AARCH64_OPND_MASK, /* AdvSIMD EXT index operand. */
AARCH64_OPND_IMM_VLSL,/* Immediate for shifting vector registers left. */
AARCH64_OPND_IMM_VLSR,/* Immediate for shifting vector registers right. */
AARCH64_OPND_SIMD_IMM,/* AdvSIMD modified immediate without shift. */
AARCH64_OPND_IMMS, /* Immediate #<imms> in e.g. BFM. */
AARCH64_OPND_WIDTH, /* Immediate #<width> in e.g. BFI. */
AARCH64_OPND_IMM, /* Immediate. */
+ AARCH64_OPND_IMM_2, /* Immediate. */
AARCH64_OPND_UIMM3_OP1,/* Unsigned 3-bit immediate in the op1 field. */
AARCH64_OPND_UIMM3_OP2,/* Unsigned 3-bit immediate in the op2 field. */
AARCH64_OPND_UIMM4, /* Unsigned 4-bit immediate in the CRm field. */
AARCH64_OPND_ADDR_SIMM10, /* Address of signed 10-bit immediate. */
AARCH64_OPND_ADDR_UIMM12, /* Address of unsigned 12-bit immediate. */
AARCH64_OPND_SIMD_ADDR_SIMPLE,/* Address of ld/st multiple structures. */
+ AARCH64_OPND_ADDR_OFFSET, /* Address with an optional 9-bit immediate. */
AARCH64_OPND_SIMD_ADDR_POST, /* Address of ld/st multiple post-indexed. */
AARCH64_OPND_SYSREG, /* System register operand. */
AARCH64_OPND_PRFOP, /* Prefetch operation. */
AARCH64_OPND_BARRIER_PSB, /* Barrier operand for PSB. */
+ AARCH64_OPND_SVE_ADDR_RI_S4x16, /* SVE [<Xn|SP>, #<simm4>*16]. */
AARCH64_OPND_SVE_ADDR_RI_S4xVL, /* SVE [<Xn|SP>, #<simm4>, MUL VL]. */
AARCH64_OPND_SVE_ADDR_RI_S4x2xVL, /* SVE [<Xn|SP>, #<simm4>*2, MUL VL]. */
AARCH64_OPND_SVE_ADDR_RI_S4x3xVL, /* SVE [<Xn|SP>, #<simm4>*3, MUL VL]. */
AARCH64_OPND_SVE_I1_HALF_ONE, /* SVE choice between 0.5 and 1.0. */
AARCH64_OPND_SVE_I1_HALF_TWO, /* SVE choice between 0.5 and 2.0. */
AARCH64_OPND_SVE_I1_ZERO_ONE, /* SVE choice between 0.0 and 1.0. */
+ AARCH64_OPND_SVE_IMM_ROT1, /* SVE 1-bit rotate operand (90 or 270). */
+ AARCH64_OPND_SVE_IMM_ROT2, /* SVE 2-bit rotate operand (N*90). */
AARCH64_OPND_SVE_INV_LIMM, /* SVE inverted logical immediate. */
AARCH64_OPND_SVE_LIMM, /* SVE logical immediate. */
AARCH64_OPND_SVE_LIMM_MOV, /* SVE logical immediate for MOV. */
AARCH64_OPND_SVE_Zd, /* SVE vector register in Zd. */
AARCH64_OPND_SVE_Zm_5, /* SVE vector register in Zm, bits [9,5]. */
AARCH64_OPND_SVE_Zm_16, /* SVE vector register in Zm, bits [20,16]. */
+ AARCH64_OPND_SVE_Zm3_INDEX, /* z0-z7[0-3] in Zm, bits [20,16]. */
+ AARCH64_OPND_SVE_Zm3_22_INDEX, /* z0-z7[0-7] in Zm3_INDEX plus bit 22. */
+ AARCH64_OPND_SVE_Zm4_INDEX, /* z0-z15[0-1] in Zm, bits [20,16]. */
AARCH64_OPND_SVE_Zn, /* SVE vector register in Zn. */
AARCH64_OPND_SVE_Zn_INDEX, /* Indexed SVE vector register, for DUP. */
AARCH64_OPND_SVE_ZnxN, /* SVE vector register list in Zn. */
AARCH64_OPND_SVE_Zt, /* SVE vector register in Zt. */
AARCH64_OPND_SVE_ZtxN, /* SVE vector register list in Zt. */
+ AARCH64_OPND_SM3_IMM2, /* SM3 encodes lane in bits [13, 14]. */
};
/* Qualifier constrains an operand. It either specifies a variant of an
a use is only for the ease of operand encoding/decoding and qualifier
sequence matching; such a use should not be applied widely; use the value
constraint qualifiers for immediate operands wherever possible. */
+ AARCH64_OPND_QLF_V_4B,
AARCH64_OPND_QLF_V_8B,
AARCH64_OPND_QLF_V_16B,
AARCH64_OPND_QLF_V_2H,
sve_size_hsd,
sve_size_sd,
testbranch,
+ cryptosm3,
+ cryptosm4,
+ dotproduct,
};
/* Opcode enumerators. */