X-Git-Url: http://drtracing.org/?a=blobdiff_plain;f=opcodes%2Farm-dis.c;h=79a3dc656a92b7e8bfc258f64d621d34f60273f9;hb=50d036364fb2a71b3ac9a0b0cdbe58296832a1b2;hp=a779138db0c267ac759d711240426f9c138ffc6e;hpb=c507f10b0711f24e1b82b8bd096e605317cf77fe;p=deliverable%2Fbinutils-gdb.git diff --git a/opcodes/arm-dis.c b/opcodes/arm-dis.c index a779138db0..79a3dc656a 100644 --- a/opcodes/arm-dis.c +++ b/opcodes/arm-dis.c @@ -1,5 +1,5 @@ /* Instruction printing code for the ARM - Copyright (C) 1994-2019 Free Software Foundation, Inc. + Copyright (C) 1994-2020 Free Software Foundation, Inc. Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org) Modification by James G. Smith (jsmith@cygnus.co.uk) @@ -144,6 +144,152 @@ enum mve_instructions MVE_VBIC_IMM, MVE_VBIC_REG, MVE_VMOVX, + MVE_VMOVL, + MVE_VMOVN, + MVE_VMULL_INT, + MVE_VMULL_POLY, + MVE_VQDMULL_T1, + MVE_VQDMULL_T2, + MVE_VQMOVN, + MVE_VQMOVUN, + MVE_VADDV, + MVE_VMLADAV_T1, + MVE_VMLADAV_T2, + MVE_VMLALDAV, + MVE_VMLAS, + MVE_VADDLV, + MVE_VMLSDAV_T1, + MVE_VMLSDAV_T2, + MVE_VMLSLDAV, + MVE_VRMLALDAVH, + MVE_VRMLSLDAVH, + MVE_VQDMLADH, + MVE_VQRDMLADH, + MVE_VQDMLAH, + MVE_VQRDMLAH, + MVE_VQDMLASH, + MVE_VQRDMLASH, + MVE_VQDMLSDH, + MVE_VQRDMLSDH, + MVE_VQDMULH_T1, + MVE_VQRDMULH_T2, + MVE_VQDMULH_T3, + MVE_VQRDMULH_T4, + MVE_VDDUP, + MVE_VDWDUP, + MVE_VIWDUP, + MVE_VIDUP, + MVE_VCADD_FP, + MVE_VCADD_VEC, + MVE_VHCADD, + MVE_VCMLA_FP, + MVE_VCMUL_FP, + MVE_VQRSHL_T1, + MVE_VQRSHL_T2, + MVE_VQRSHRN, + MVE_VQRSHRUN, + MVE_VQSHL_T1, + MVE_VQSHL_T2, + MVE_VQSHLU_T3, + MVE_VQSHL_T4, + MVE_VQSHRN, + MVE_VQSHRUN, + MVE_VRSHL_T1, + MVE_VRSHL_T2, + MVE_VRSHR, + MVE_VRSHRN, + MVE_VSHL_T1, + MVE_VSHL_T2, + MVE_VSHL_T3, + MVE_VSHLC, + MVE_VSHLL_T1, + MVE_VSHLL_T2, + MVE_VSHR, + MVE_VSHRN, + MVE_VSLI, + MVE_VSRI, + MVE_VADC, + MVE_VABAV, + MVE_VABD_FP, + MVE_VABD_VEC, + MVE_VABS_FP, + MVE_VABS_VEC, + MVE_VADD_FP_T1, + MVE_VADD_FP_T2, + MVE_VADD_VEC_T1, + MVE_VADD_VEC_T2, + MVE_VSBC, + MVE_VSUB_FP_T1, + MVE_VSUB_FP_T2, + MVE_VSUB_VEC_T1, + MVE_VSUB_VEC_T2, + MVE_VAND, + MVE_VBRSR, + MVE_VCLS, + MVE_VCLZ, + MVE_VCTP, + MVE_VMAX, + MVE_VMAXA, + MVE_VMAXNM_FP, + MVE_VMAXNMA_FP, + MVE_VMAXNMV_FP, + MVE_VMAXNMAV_FP, + MVE_VMAXV, + MVE_VMAXAV, + MVE_VMIN, + MVE_VMINA, + MVE_VMINNM_FP, + MVE_VMINNMA_FP, + MVE_VMINNMV_FP, + MVE_VMINNMAV_FP, + MVE_VMINV, + MVE_VMINAV, + MVE_VMLA, + MVE_VMUL_FP_T1, + MVE_VMUL_FP_T2, + MVE_VMUL_VEC_T1, + MVE_VMUL_VEC_T2, + MVE_VMULH, + MVE_VRMULH, + MVE_VNEG_FP, + MVE_VNEG_VEC, + MVE_VPNOT, + MVE_VPSEL, + MVE_VQABS, + MVE_VQADD_T1, + MVE_VQADD_T2, + MVE_VQSUB_T1, + MVE_VQSUB_T2, + MVE_VQNEG, + MVE_VREV16, + MVE_VREV32, + MVE_VREV64, + MVE_LSLL, + MVE_LSLLI, + MVE_LSRL, + MVE_ASRL, + MVE_ASRLI, + MVE_SQRSHRL, + MVE_SQRSHR, + MVE_UQRSHL, + MVE_UQRSHLL, + MVE_UQSHL, + MVE_UQSHLL, + MVE_URSHRL, + MVE_URSHR, + MVE_SRSHRL, + MVE_SRSHR, + MVE_SQSHLL, + MVE_SQSHL, + MVE_CINC, + MVE_CINV, + MVE_CNEG, + MVE_CSINC, + MVE_CSINV, + MVE_CSET, + MVE_CSETM, + MVE_CSNEG, + MVE_CSEL, MVE_NONE }; @@ -176,10 +322,12 @@ enum mve_unpredictable enum mve_undefined { + UNDEF_SIZE, /* undefined size. */ UNDEF_SIZE_0, /* undefined because size == 0. */ UNDEF_SIZE_2, /* undefined because size == 2. */ UNDEF_SIZE_3, /* undefined because size == 3. */ UNDEF_SIZE_LE_1, /* undefined because size <= 1. */ + UNDEF_SIZE_NOT_0, /* undefined because size != 0. */ UNDEF_SIZE_NOT_2, /* undefined because size != 2. */ UNDEF_SIZE_NOT_3, /* undefined because size != 3. */ UNDEF_NOT_UNS_SIZE_0, /* undefined because U == 0 and @@ -195,6 +343,7 @@ enum mve_undefined op2 == 0 and op1 == (0 or 1). */ UNDEF_OP_0_BAD_CMODE, /* undefined because op == 0 and cmode in {0xx1, x0x1}. */ + UNDEF_XCHG_UNS, /* undefined because X == 1 and U == 1. */ UNDEF_NONE /* no undefined behavior. */ }; @@ -206,6 +355,16 @@ struct opcode32 const char * assembler; /* How to disassemble this insn. */ }; +struct cdeopcode32 +{ + arm_feature_set arch; /* Architecture defining this insn. */ + uint8_t coproc_shift; /* coproc is this far into op. */ + uint16_t coproc_mask; /* Length of coproc field in op. */ + unsigned long value; /* If arch is 0 then value is a sentinel. */ + unsigned long mask; /* Recognise insn if (op & mask) == value. */ + const char * assembler; /* How to disassemble this insn. */ +}; + /* MVE opcodes. */ struct mopcode32 @@ -247,6 +406,7 @@ struct opcode16 %% % %c print condition code (always bits 28-31 in ARM mode) + %b print condition code allowing cp_num == 9 %q print shifter argument %u print condition code (unconditional in ARM mode, UNPREDICTABLE if not AL in Thumb) @@ -310,6 +470,75 @@ enum opcode_sentinel_enum /* Common coprocessor opcodes shared between Arm and Thumb-2. */ +/* print_insn_cde recognizes the following format control codes: + + %% % + + %a print 'a' iff bit 28 is 1 + %p print bits 8-10 as coprocessor + %d print as decimal + %r print as an ARM register + %n print as an ARM register but r15 is APSR_nzcv + %T print as an ARM register + 1 + %R as %r but r13 is UNPREDICTABLE + %S as %r but rX where X > 10 is UNPREDICTABLE + %j print immediate taken from bits (16..21,7,0..5) + %k print immediate taken from bits (20..21,7,0..5). + %l print immediate taken from bits (20..22,7,4..5). */ + +/* At the moment there is only one valid position for the coprocessor number, + and hence that's encoded in the macro below. */ +#define CDE_OPCODE(ARCH, VALUE, MASK, ASM) \ + { ARCH, 8, 7, VALUE, MASK, ASM } +static const struct cdeopcode32 cde_opcodes[] = +{ + /* Custom Datapath Extension instructions. */ + CDE_OPCODE (ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE), + 0xee000000, 0xefc00840, + "cx1%a\t%p, %12-15n, #%0-5,7,16-21d"), + CDE_OPCODE (ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE), + 0xee000040, 0xefc00840, + "cx1d%a\t%p, %12-15S, %12-15T, #%0-5,7,16-21d"), + + CDE_OPCODE (ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE), + 0xee400000, 0xefc00840, + "cx2%a\t%p, %12-15n, %16-19n, #%0-5,7,20-21d"), + CDE_OPCODE (ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE), + 0xee400040, 0xefc00840, + "cx2d%a\t%p, %12-15S, %12-15T, %16-19n, #%0-5,7,20-21d"), + + CDE_OPCODE (ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE), + 0xee800000, 0xef800840, + "cx3%a\t%p, %0-3n, %16-19n, %12-15n, #%4-5,7,20-22d"), + CDE_OPCODE (ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE), + 0xee800040, 0xef800840, + "cx3d%a\t%p, %0-3S, %0-3T, %16-19n, %12-15n, #%4-5,7,20-22d"), + + CDE_OPCODE (ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE), + 0xec200000, 0xeeb00840, + "vcx1%a\t%p, %12-15,22V, #%0-5,7,16-19d"), + CDE_OPCODE (ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE), + 0xec200040, 0xeeb00840, + "vcx1%a\t%p, %12-15,22V, #%0-5,7,16-19,24d"), + + CDE_OPCODE (ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE), + 0xec300000, 0xeeb00840, + "vcx2%a\t%p, %12-15,22V, %0-3,5V, #%4,7,16-19d"), + CDE_OPCODE (ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE), + 0xec300040, 0xeeb00840, + "vcx2%a\t%p, %12-15,22V, %0-3,5V, #%4,7,16-19,24d"), + + CDE_OPCODE (ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE), + 0xec800000, 0xee800840, + "vcx3%a\t%p, %12-15,22V, %16-19,7V, %0-3,5V, #%4,20-21d"), + CDE_OPCODE (ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE), + 0xec800040, 0xee800840, + "vcx3%a\t%p, %12-15,22V, %16-19,7V, %0-3,5V, #%4,20-21,24d"), + + CDE_OPCODE (ARM_FEATURE_CORE_LOW (0), 0, 0, 0) + +}; + static const struct sopcode32 coprocessor_opcodes[] = { /* XScale instructions. */ @@ -660,8 +889,10 @@ static const struct sopcode32 coprocessor_opcodes[] = /* Floating point coprocessor (VFP) instructions. */ {ANY, ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD), 0x0ee00a10, 0x0fff0fff, "vmsr%c\tfpsid, %12-15r"}, - {ANY, ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD), + {ANY, ARM_FEATURE (0, ARM_EXT2_V8_1M_MAIN, FPU_VFP_EXT_V1xD), 0x0ee10a10, 0x0fff0fff, "vmsr%c\tfpscr, %12-15r"}, + {ANY, ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN), + 0x0ee20a10, 0x0fff0fff, "vmsr%c\tfpscr_nzcvqc, %12-15r"}, {ANY, ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD), 0x0ee60a10, 0x0fff0fff, "vmsr%c\tmvfr1, %12-15r"}, {ANY, ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD), @@ -674,12 +905,22 @@ static const struct sopcode32 coprocessor_opcodes[] = 0x0ee90a10, 0x0fff0fff, "vmsr%c\tfpinst, %12-15r\t@ Impl def"}, {ANY, ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD), 0x0eea0a10, 0x0fff0fff, "vmsr%c\tfpinst2, %12-15r\t@ Impl def"}, + {ANY, ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + 0x0eec0a10, 0x0fff0fff, "vmsr%c\tvpr, %12-15r"}, + {ANY, ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + 0x0eed0a10, 0x0fff0fff, "vmsr%c\tp0, %12-15r"}, + {ANY, ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN), + 0x0eee0a10, 0x0fff0fff, "vmsr%c\tfpcxt_ns, %12-15r"}, + {ANY, ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN), + 0x0eef0a10, 0x0fff0fff, "vmsr%c\tfpcxt_s, %12-15r"}, {ANY, ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD), 0x0ef00a10, 0x0fff0fff, "vmrs%c\t%12-15r, fpsid"}, {ANY, ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD), 0x0ef1fa10, 0x0fffffff, "vmrs%c\tAPSR_nzcv, fpscr"}, - {ANY, ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD), + {ANY, ARM_FEATURE (0, ARM_EXT2_V8_1M_MAIN, FPU_VFP_EXT_V1xD), 0x0ef10a10, 0x0fff0fff, "vmrs%c\t%12-15r, fpscr"}, + {ANY, ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN), + 0x0ef20a10, 0x0fff0fff, "vmrs%c\t%12-15r, fpscr_nzcvqc"}, {ANY, ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8), 0x0ef50a10, 0x0fff0fff, "vmrs%c\t%12-15r, mvfr2"}, {ANY, ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD), @@ -692,6 +933,14 @@ static const struct sopcode32 coprocessor_opcodes[] = 0x0ef90a10, 0x0fff0fff, "vmrs%c\t%12-15r, fpinst\t@ Impl def"}, {ANY, ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD), 0x0efa0a10, 0x0fff0fff, "vmrs%c\t%12-15r, fpinst2\t@ Impl def"}, + {ANY, ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + 0x0efc0a10, 0x0fff0fff, "vmrs%c\t%12-15r, vpr"}, + {ANY, ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + 0x0efd0a10, 0x0fff0fff, "vmrs%c\t%12-15r, p0"}, + {ANY, ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN), + 0x0efe0a10, 0x0fff0fff, "vmrs%c\t%12-15r, fpcxt_ns"}, + {ANY, ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN), + 0x0eff0a10, 0x0fff0fff, "vmrs%c\t%12-15r, fpcxt_s"}, {ANY, ARM_FEATURE_COPROC (FPU_VFP_EXT_V1), 0x0e000b10, 0x0fd00fff, "vmov%c.32\t%z2[%21d], %12-15r"}, {ANY, ARM_FEATURE_COPROC (FPU_VFP_EXT_V1), @@ -1015,38 +1264,7 @@ static const struct sopcode32 coprocessor_opcodes[] = {ANY, ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8), 0xfeb80b40, 0xffbc0fd0, "vrint%16-17?mpna%u.f64\t%z1, %z0"}, - /* Generic coprocessor instructions. */ {ANY, ARM_FEATURE_CORE_LOW (0), SENTINEL_GENERIC_START, 0, "" }, - {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V5E), - 0x0c400000, 0x0ff00000, "mcrr%c\t%8-11d, %4-7d, %12-15R, %16-19r, cr%0-3d"}, - {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V5E), - 0x0c500000, 0x0ff00000, - "mrrc%c\t%8-11d, %4-7d, %12-15Ru, %16-19Ru, cr%0-3d"}, - {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V2), - 0x0e000000, 0x0f000010, - "cdp%c\t%8-11d, %20-23d, cr%12-15d, cr%16-19d, cr%0-3d, {%5-7d}"}, - {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V2), - 0x0e10f010, 0x0f10f010, - "mrc%c\t%8-11d, %21-23d, APSR_nzcv, cr%16-19d, cr%0-3d, {%5-7d}"}, - {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V2), - 0x0e100010, 0x0f100010, - "mrc%c\t%8-11d, %21-23d, %12-15r, cr%16-19d, cr%0-3d, {%5-7d}"}, - {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V2), - 0x0e000010, 0x0f100010, - "mcr%c\t%8-11d, %21-23d, %12-15R, cr%16-19d, cr%0-3d, {%5-7d}"}, - {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V2), - 0x0c000000, 0x0e100000, "stc%22'l%c\t%8-11d, cr%12-15d, %A"}, - {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V2), - 0x0c100000, 0x0e100000, "ldc%22'l%c\t%8-11d, cr%12-15d, %A"}, - - /* V6 coprocessor instructions. */ - {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V6), - 0xfc500000, 0xfff00000, - "mrrc2%c\t%8-11d, %4-7d, %12-15Ru, %16-19Ru, cr%0-3d"}, - {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V6), - 0xfc400000, 0xfff00000, - "mcrr2%c\t%8-11d, %4-7d, %12-15R, %16-19R, cr%0-3d"}, - /* ARMv8.3 AdvSIMD instructions in the space of coprocessor 8. */ {ANY, ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A), 0xfc800800, 0xfeb00f10, "vcadd%c.f16\t%12-15,22V, %16-19,7V, %0-3,5V, #%24?29%24'70"}, @@ -1069,11 +1287,15 @@ static const struct sopcode32 coprocessor_opcodes[] = {ANY, ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A), 0xfea00800, 0xffa00f10, "vcmla%c.f32\t%12-15,22V, %16-19,7V, %0-3,5D[0], #%20?21%20?780"}, + /* BFloat16 instructions. */ + {ANY, ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16), + 0x0eb30940, 0x0fbf0f50, "vcvt%7?tb%b.bf16.f32\t%y1, %y0"}, + /* Dot Product instructions in the space of coprocessor 13. */ {ANY, ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD), 0xfc200d00, 0xffb00f00, "v%4?usdot.%4?us8\t%12-15,22V, %16-19,7V, %0-3,5V"}, {ANY, ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD), - 0xfe000d00, 0xff000f00, "v%4?usdot.%4?us8\t%12-15,22V, %16-19,7V, %0-3D[%5?10]"}, + 0xfe200d00, 0xff200f00, "v%4?usdot.%4?us8\t%12-15,22V, %16-19,7V, %0-3D[%5?10]"}, /* ARMv8.2 FMAC Long instructions in the space of coprocessor 8. */ {ANY, ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST | ARM_EXT2_V8_2A), @@ -1093,21 +1315,6 @@ static const struct sopcode32 coprocessor_opcodes[] = {ANY, ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST | ARM_EXT2_V8_2A), 0xfe100850, 0xffb00f50, "vfmsl.f16\t%12-15,22Q, d%16-19,7d, d%0-2d[%3,5d]"}, - /* V5 coprocessor instructions. */ - {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V5), - 0xfc100000, 0xfe100000, "ldc2%22'l%c\t%8-11d, cr%12-15d, %A"}, - {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V5), - 0xfc000000, 0xfe100000, "stc2%22'l%c\t%8-11d, cr%12-15d, %A"}, - {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V5), - 0xfe000000, 0xff000010, - "cdp2%c\t%8-11d, %20-23d, cr%12-15d, cr%16-19d, cr%0-3d, {%5-7d}"}, - {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V5), - 0xfe000010, 0xff100010, - "mcr2%c\t%8-11d, %21-23d, %12-15R, cr%16-19d, cr%0-3d, {%5-7d}"}, - {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V5), - 0xfe100010, 0xff100010, - "mrc2%c\t%8-11d, %21-23d, %12-15r, cr%16-19d, cr%0-3d, {%5-7d}"}, - /* ARMv8.2 half-precision Floating point coprocessor 9 (VFP) instructions. cp_num: bit <11:8> == 0b1001. cond: bit <31:28> == 0b1110, otherwise, it's UNPREDICTABLE. */ @@ -1189,6 +1396,60 @@ static const struct sopcode32 coprocessor_opcodes[] = {ANY, ARM_FEATURE_CORE_LOW (0), 0, 0, 0} }; +/* Generic coprocessor instructions. These are only matched if a more specific + SIMD or co-processor instruction does not match first. */ + +static const struct sopcode32 generic_coprocessor_opcodes[] = +{ + /* Generic coprocessor instructions. */ + {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V5E), + 0x0c400000, 0x0ff00000, "mcrr%c\t%8-11d, %4-7d, %12-15R, %16-19r, cr%0-3d"}, + {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V5E), + 0x0c500000, 0x0ff00000, + "mrrc%c\t%8-11d, %4-7d, %12-15Ru, %16-19Ru, cr%0-3d"}, + {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V2), + 0x0e000000, 0x0f000010, + "cdp%c\t%8-11d, %20-23d, cr%12-15d, cr%16-19d, cr%0-3d, {%5-7d}"}, + {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V2), + 0x0e10f010, 0x0f10f010, + "mrc%c\t%8-11d, %21-23d, APSR_nzcv, cr%16-19d, cr%0-3d, {%5-7d}"}, + {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V2), + 0x0e100010, 0x0f100010, + "mrc%c\t%8-11d, %21-23d, %12-15r, cr%16-19d, cr%0-3d, {%5-7d}"}, + {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V2), + 0x0e000010, 0x0f100010, + "mcr%c\t%8-11d, %21-23d, %12-15R, cr%16-19d, cr%0-3d, {%5-7d}"}, + {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V2), + 0x0c000000, 0x0e100000, "stc%22'l%c\t%8-11d, cr%12-15d, %A"}, + {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V2), + 0x0c100000, 0x0e100000, "ldc%22'l%c\t%8-11d, cr%12-15d, %A"}, + + /* V6 coprocessor instructions. */ + {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V6), + 0xfc500000, 0xfff00000, + "mrrc2%c\t%8-11d, %4-7d, %12-15Ru, %16-19Ru, cr%0-3d"}, + {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V6), + 0xfc400000, 0xfff00000, + "mcrr2%c\t%8-11d, %4-7d, %12-15R, %16-19R, cr%0-3d"}, + + /* V5 coprocessor instructions. */ + {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V5), + 0xfc100000, 0xfe100000, "ldc2%22'l%c\t%8-11d, cr%12-15d, %A"}, + {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V5), + 0xfc000000, 0xfe100000, "stc2%22'l%c\t%8-11d, cr%12-15d, %A"}, + {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V5), + 0xfe000000, 0xff000010, + "cdp2%c\t%8-11d, %20-23d, cr%12-15d, cr%16-19d, cr%0-3d, {%5-7d}"}, + {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V5), + 0xfe000010, 0xff100010, + "mcr2%c\t%8-11d, %21-23d, %12-15R, cr%16-19d, cr%0-3d, {%5-7d}"}, + {ANY, ARM_FEATURE_CORE_LOW (ARM_EXT_V5), + 0xfe100010, 0xff100010, + "mrc2%c\t%8-11d, %21-23d, %12-15r, cr%16-19d, cr%0-3d, {%5-7d}"}, + + {ANY, ARM_FEATURE_CORE_LOW (0), 0, 0, 0} +}; + /* Neon opcode table: This does not encode the top byte -- that is checked by the print_insn_neon routine, as it depends on whether we are doing thumb32 or arm32 disassembly. */ @@ -1233,17 +1494,17 @@ static const struct opcode32 neon_opcodes[] = /* Data transfer between ARM and NEON registers. */ {ARM_FEATURE_COPROC (FPU_NEON_EXT_V1), - 0x0e800b10, 0x1ff00f70, "vdup%c.32\t%16-19,7D, %12-15r"}, + 0x0e800b10, 0x0ff00f70, "vdup%c.32\t%16-19,7D, %12-15r"}, {ARM_FEATURE_COPROC (FPU_NEON_EXT_V1), - 0x0e800b30, 0x1ff00f70, "vdup%c.16\t%16-19,7D, %12-15r"}, + 0x0e800b30, 0x0ff00f70, "vdup%c.16\t%16-19,7D, %12-15r"}, {ARM_FEATURE_COPROC (FPU_NEON_EXT_V1), - 0x0ea00b10, 0x1ff00f70, "vdup%c.32\t%16-19,7Q, %12-15r"}, + 0x0ea00b10, 0x0ff00f70, "vdup%c.32\t%16-19,7Q, %12-15r"}, {ARM_FEATURE_COPROC (FPU_NEON_EXT_V1), - 0x0ea00b30, 0x1ff00f70, "vdup%c.16\t%16-19,7Q, %12-15r"}, + 0x0ea00b30, 0x0ff00f70, "vdup%c.16\t%16-19,7Q, %12-15r"}, {ARM_FEATURE_COPROC (FPU_NEON_EXT_V1), - 0x0ec00b10, 0x1ff00f70, "vdup%c.8\t%16-19,7D, %12-15r"}, + 0x0ec00b10, 0x0ff00f70, "vdup%c.8\t%16-19,7D, %12-15r"}, {ARM_FEATURE_COPROC (FPU_NEON_EXT_V1), - 0x0ee00b10, 0x1ff00f70, "vdup%c.8\t%16-19,7Q, %12-15r"}, + 0x0ee00b10, 0x0ff00f70, "vdup%c.8\t%16-19,7Q, %12-15r"}, /* Move data element to all lanes. */ {ARM_FEATURE_COPROC (FPU_NEON_EXT_V1), @@ -1275,6 +1536,34 @@ static const struct opcode32 neon_opcodes[] = {ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST), 0xf2300c10, 0xffb00f10, "vfms%c.f16\t%12-15,22R, %16-19,7R, %0-3,5R"}, + /* BFloat16 instructions. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16), + 0xfc000d00, 0xffb00f10, "vdot.bf16\t%12-15,22R, %16-19,7R, %0-3,5R"}, + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16), + 0xfe000d00, 0xffb00f10, "vdot.bf16\t%12-15,22R, %16-19,7R, d%0-3d[%5d]"}, + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16), + 0xfc000c40, 0xffb00f50, "vmmla.bf16\t%12-15,22R, %16-19,7R, %0-3,5R"}, + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16), + 0xf3b60640, 0xffbf0fd0, "vcvt%c.bf16.f32\t%12-15,22D, %0-3,5Q"}, + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16), + 0xfc300810, 0xffb00f10, "vfma%6?tb.bf16\t%12-15,22Q, %16-19,7Q, %0-3,5Q"}, + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16), + 0xfe300810, 0xffb00f10, "vfma%6?tb.bf16\t%12-15,22Q, %16-19,7Q, %0-2D[%3,5d]"}, + + /* Matrix Multiply instructions. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM), + 0xfc200c40, 0xffb00f50, "vsmmla.s8\t%12-15,22R, %16-19,7R, %0-3,5R"}, + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM), + 0xfc200c50, 0xffb00f50, "vummla.u8\t%12-15,22R, %16-19,7R, %0-3,5R"}, + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM), + 0xfca00c40, 0xffb00f50, "vusmmla.s8\t%12-15,22R, %16-19,7R, %0-3,5R"}, + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM), + 0xfca00d00, 0xffb00f10, "vusdot.s8\t%12-15,22R, %16-19,7R, %0-3,5R"}, + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM), + 0xfe800d00, 0xffb00f10, "vusdot.s8\t%12-15,22R, %16-19,7R, d%0-3d[%5d]"}, + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM), + 0xfe800d10, 0xffb00f10, "vsudot.u8\t%12-15,22R, %16-19,7R, d%0-3d[%5d]"}, + /* Two registers, miscellaneous. */ {ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8), 0xf3ba0400, 0xffbf0c10, "vrint%7-9?p?m?zaxn%u.f32\t%12-15,22R, %0-3,5R"}, @@ -1903,6 +2192,8 @@ static const struct opcode32 neon_opcodes[] = %d print addr mode of MVE vldr[bhw] and vstr[bhw] %u print 'U' (unsigned) or 'S' for various mve instructions %i print MVE predicate(s) for vpt and vpst + %j print a 5-bit immediate from hw2[14:12,7:6] + %k print 48 if the 7th position bit is set else print 64. %m print rounding mode for vcvt and vrint %n print vector comparison code for predicated instruction %s print size for various vcvt instructions @@ -1913,319 +2204,653 @@ static const struct opcode32 neon_opcodes[] = %B print v{st,ld}[24] any one operands %E print vmov, vmvn, vorr, vbic encoded constant %N print generic index for vmov + %T print bottom ('b') or top ('t') of source register + %X print exchange field in vmla* instructions %r print as an ARM register %d print the bitfield in decimal + %A print accumulate or not + %c print bitfield as a condition code + %C print bitfield as an inverted condition code %Q print as a MVE Q register %F print as a MVE S register %Z as %<>r but r15 is ZR instead of PC and r13 is UNPREDICTABLE + + %S as %<>r but r15 or r13 is UNPREDICTABLE %s print size for vector predicate & non VMOV instructions + %I print carry flag or not %i print immediate for vstr/vldr reg +/- imm + %h print high half of 64-bit destination reg %k print immediate for vector conversion instruction + %l print low half of 64-bit destination reg + %o print rotate value for vcmul + %u print immediate value for vddup/vdwdup %x print the bitfield in hex. - */ + */ static const struct mopcode32 mve_opcodes[] = { /* MVE. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VPST, 0xfe310f4d, 0xffbf1fff, "vpst%i" }, /* Floating point VPT T1. */ - {ARM_FEATURE_COPROC (FPU_MVE_FP), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP), MVE_VPT_FP_T1, 0xee310f00, 0xefb10f50, "vpt%i.f%28s\t%n, %17-19Q, %1-3,5Q"}, /* Floating point VPT T2. */ - {ARM_FEATURE_COPROC (FPU_MVE_FP), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP), MVE_VPT_FP_T2, 0xee310f40, 0xefb10f50, "vpt%i.f%28s\t%n, %17-19Q, %0-3Z"}, /* Vector VPT T1. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VPT_VEC_T1, 0xfe010f00, 0xff811f51, "vpt%i.i%20-21s\t%n, %17-19Q, %1-3,5Q"}, /* Vector VPT T2. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VPT_VEC_T2, 0xfe010f01, 0xff811f51, "vpt%i.u%20-21s\t%n, %17-19Q, %1-3,5Q"}, /* Vector VPT T3. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VPT_VEC_T3, 0xfe011f00, 0xff811f50, "vpt%i.s%20-21s\t%n, %17-19Q, %1-3,5Q"}, /* Vector VPT T4. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VPT_VEC_T4, 0xfe010f40, 0xff811f70, "vpt%i.i%20-21s\t%n, %17-19Q, %0-3Z"}, /* Vector VPT T5. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VPT_VEC_T5, 0xfe010f60, 0xff811f70, "vpt%i.u%20-21s\t%n, %17-19Q, %0-3Z"}, /* Vector VPT T6. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VPT_VEC_T6, 0xfe011f40, 0xff811f50, "vpt%i.s%20-21s\t%n, %17-19Q, %0-3Z"}, /* Vector VBIC immediate. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VBIC_IMM, 0xef800070, 0xefb81070, "vbic%v.i%8-11s\t%13-15,22Q, %E"}, /* Vector VBIC register. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VBIC_REG, 0xef100150, 0xffb11f51, "vbic%v\t%13-15,22Q, %17-19,7Q, %1-3,5Q"}, + /* Vector VABAV. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VABAV, + 0xee800f01, 0xefc10f51, + "vabav%v.%u%20-21s\t%12-15r, %17-19,7Q, %1-3,5Q"}, + + /* Vector VABD floating point. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP), + MVE_VABD_FP, + 0xff200d40, 0xffa11f51, + "vabd%v.f%20s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"}, + + /* Vector VABD. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VABD_VEC, + 0xef000740, 0xef811f51, + "vabd%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"}, + + /* Vector VABS floating point. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP), + MVE_VABS_FP, + 0xFFB10740, 0xFFB31FD1, + "vabs%v.f%18-19s\t%13-15,22Q, %1-3,5Q"}, + /* Vector VABS. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VABS_VEC, + 0xffb10340, 0xffb31fd1, + "vabs%v.s%18-19s\t%13-15,22Q, %1-3,5Q"}, + + /* Vector VADD floating point T1. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP), + MVE_VADD_FP_T1, + 0xef000d40, 0xffa11f51, + "vadd%v.f%20s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"}, + /* Vector VADD floating point T2. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP), + MVE_VADD_FP_T2, + 0xee300f40, 0xefb11f70, + "vadd%v.f%28s\t%13-15,22Q, %17-19,7Q, %0-3r"}, + /* Vector VADD T1. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VADD_VEC_T1, + 0xef000840, 0xff811f51, + "vadd%v.i%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"}, + /* Vector VADD T2. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VADD_VEC_T2, + 0xee010f40, 0xff811f70, + "vadd%v.i%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"}, + + /* Vector VADDLV. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VADDLV, + 0xee890f00, 0xef8f1fd1, + "vaddlv%5A%v.%u32\t%13-15l, %20-22h, %1-3Q"}, + + /* Vector VADDV. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VADDV, + 0xeef10f00, 0xeff31fd1, + "vaddv%5A%v.%u%18-19s\t%13-15l, %1-3Q"}, + + /* Vector VADC. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VADC, + 0xee300f00, 0xffb10f51, + "vadc%12I%v.i32\t%13-15,22Q, %17-19,7Q, %1-3,5Q"}, + + /* Vector VAND. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VAND, + 0xef000150, 0xffb11f51, + "vand%v\t%13-15,22Q, %17-19,7Q, %1-3,5Q"}, + + /* Vector VBRSR register. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VBRSR, + 0xfe011e60, 0xff811f70, + "vbrsr%v.%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"}, + + /* Vector VCADD floating point. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP), + MVE_VCADD_FP, + 0xfc800840, 0xfea11f51, + "vcadd%v.f%20s\t%13-15,22Q, %17-19,7Q, %1-3,5Q, #%24o"}, + + /* Vector VCADD. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VCADD_VEC, + 0xfe000f00, 0xff810f51, + "vcadd%v.i%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q, #%12o"}, + + /* Vector VCLS. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VCLS, + 0xffb00440, 0xffb31fd1, + "vcls%v.s%18-19s\t%13-15,22Q, %1-3,5Q"}, + + /* Vector VCLZ. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VCLZ, + 0xffb004c0, 0xffb31fd1, + "vclz%v.i%18-19s\t%13-15,22Q, %1-3,5Q"}, + + /* Vector VCMLA. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP), + MVE_VCMLA_FP, + 0xfc200840, 0xfe211f51, + "vcmla%v.f%20s\t%13-15,22Q, %17-19,7Q, %1-3,5Q, #%23-24o"}, + /* Vector VCMP floating point T1. */ - {ARM_FEATURE_COPROC (FPU_MVE_FP), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP), MVE_VCMP_FP_T1, 0xee310f00, 0xeff1ef50, "vcmp%v.f%28s\t%n, %17-19Q, %1-3,5Q"}, /* Vector VCMP floating point T2. */ - {ARM_FEATURE_COPROC (FPU_MVE_FP), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP), MVE_VCMP_FP_T2, 0xee310f40, 0xeff1ef50, "vcmp%v.f%28s\t%n, %17-19Q, %0-3Z"}, /* Vector VCMP T1. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VCMP_VEC_T1, 0xfe010f00, 0xffc1ff51, "vcmp%v.i%20-21s\t%n, %17-19Q, %1-3,5Q"}, /* Vector VCMP T2. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VCMP_VEC_T2, 0xfe010f01, 0xffc1ff51, "vcmp%v.u%20-21s\t%n, %17-19Q, %1-3,5Q"}, /* Vector VCMP T3. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VCMP_VEC_T3, 0xfe011f00, 0xffc1ff50, "vcmp%v.s%20-21s\t%n, %17-19Q, %1-3,5Q"}, /* Vector VCMP T4. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VCMP_VEC_T4, 0xfe010f40, 0xffc1ff70, "vcmp%v.i%20-21s\t%n, %17-19Q, %0-3Z"}, /* Vector VCMP T5. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VCMP_VEC_T5, 0xfe010f60, 0xffc1ff70, "vcmp%v.u%20-21s\t%n, %17-19Q, %0-3Z"}, /* Vector VCMP T6. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VCMP_VEC_T6, 0xfe011f40, 0xffc1ff50, "vcmp%v.s%20-21s\t%n, %17-19Q, %0-3Z"}, /* Vector VDUP. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VDUP, 0xeea00b10, 0xffb10f5f, "vdup%v.%5,22s\t%17-19,7Q, %12-15r"}, /* Vector VEOR. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VEOR, 0xff000150, 0xffd11f51, "veor%v\t%13-15,22Q, %17-19,7Q, %1-3,5Q"}, /* Vector VFMA, vector * scalar. */ - {ARM_FEATURE_COPROC (FPU_MVE_FP), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP), MVE_VFMA_FP_SCALAR, 0xee310e40, 0xefb11f70, "vfma%v.f%28s\t%13-15,22Q, %17-19,7Q, %0-3r"}, /* Vector VFMA floating point. */ - {ARM_FEATURE_COPROC (FPU_MVE_FP), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP), MVE_VFMA_FP, 0xef000c50, 0xffa11f51, "vfma%v.f%20s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"}, /* Vector VFMS floating point. */ - {ARM_FEATURE_COPROC (FPU_MVE_FP), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP), MVE_VFMS_FP, 0xef200c50, 0xffa11f51, "vfms%v.f%20s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"}, /* Vector VFMAS, vector * scalar. */ - {ARM_FEATURE_COPROC (FPU_MVE_FP), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP), MVE_VFMAS_FP_SCALAR, 0xee311e40, 0xefb11f70, "vfmas%v.f%28s\t%13-15,22Q, %17-19,7Q, %0-3r"}, /* Vector VHADD T1. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VHADD_T1, 0xef000040, 0xef811f51, "vhadd%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"}, /* Vector VHADD T2. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VHADD_T2, 0xee000f40, 0xef811f70, "vhadd%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"}, /* Vector VHSUB T1. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VHSUB_T1, 0xef000240, 0xef811f51, "vhsub%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"}, /* Vector VHSUB T2. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VHSUB_T2, 0xee001f40, 0xef811f70, "vhsub%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"}, + /* Vector VCMUL. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP), + MVE_VCMUL_FP, + 0xee300e00, 0xefb10f50, + "vcmul%v.f%28s\t%13-15,22Q, %17-19,7Q, %1-3,5Q, #%0,12o"}, + + /* Vector VCTP. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VCTP, + 0xf000e801, 0xffc0ffff, + "vctp%v.%20-21s\t%16-19r"}, + /* Vector VDUP. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VDUP, 0xeea00b10, 0xffb10f5f, "vdup%v.%5,22s\t%17-19,7Q, %12-15r"}, /* Vector VRHADD. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VRHADD, 0xef000140, 0xef811f51, "vrhadd%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"}, /* Vector VCVT. */ - {ARM_FEATURE_COPROC (FPU_MVE_FP), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP), MVE_VCVT_FP_FIX_VEC, 0xef800c50, 0xef801cd1, "vcvt%v.%s\t%13-15,22Q, %1-3,5Q, #%16-21k"}, /* Vector VCVT. */ - {ARM_FEATURE_COPROC (FPU_MVE_FP), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP), MVE_VCVT_BETWEEN_FP_INT, 0xffb30640, 0xffb31e51, "vcvt%v.%s\t%13-15,22Q, %1-3,5Q"}, /* Vector VCVT between single and half-precision float, bottom half. */ - {ARM_FEATURE_COPROC (FPU_MVE_FP), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP), MVE_VCVT_FP_HALF_FP, 0xee3f0e01, 0xefbf1fd1, "vcvtb%v.%s\t%13-15,22Q, %1-3,5Q"}, /* Vector VCVT between single and half-precision float, top half. */ - {ARM_FEATURE_COPROC (FPU_MVE_FP), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP), MVE_VCVT_FP_HALF_FP, 0xee3f1e01, 0xefbf1fd1, "vcvtt%v.%s\t%13-15,22Q, %1-3,5Q"}, /* Vector VCVT. */ - {ARM_FEATURE_COPROC (FPU_MVE_FP), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP), MVE_VCVT_FROM_FP_TO_INT, 0xffb30040, 0xffb31c51, "vcvt%m%v.%s\t%13-15,22Q, %1-3,5Q"}, + /* Vector VDDUP. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VDDUP, + 0xee011f6e, 0xff811f7e, + "vddup%v.u%20-21s\t%13-15,22Q, %17-19l, #%0,7u"}, + + /* Vector VDWDUP. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VDWDUP, + 0xee011f60, 0xff811f70, + "vdwdup%v.u%20-21s\t%13-15,22Q, %17-19l, %1-3h, #%0,7u"}, + + /* Vector VHCADD. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VHCADD, + 0xee000f00, 0xff810f51, + "vhcadd%v.s%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q, #%12o"}, + + /* Vector VIWDUP. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VIWDUP, + 0xee010f60, 0xff811f70, + "viwdup%v.u%20-21s\t%13-15,22Q, %17-19l, %1-3h, #%0,7u"}, + + /* Vector VIDUP. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VIDUP, + 0xee010f6e, 0xff811f7e, + "vidup%v.u%20-21s\t%13-15,22Q, %17-19l, #%0,7u"}, + /* Vector VLD2. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VLD2, 0xfc901e00, 0xff901e5f, "vld2%5d.%7-8s\t%B, [%16-19r]%w"}, /* Vector VLD4. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VLD4, 0xfc901e01, 0xff901e1f, "vld4%5-6d.%7-8s\t%B, [%16-19r]%w"}, /* Vector VLDRB gather load. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VLDRB_GATHER_T1, 0xec900e00, 0xefb01e50, "vldrb%v.%u%7-8s\t%13-15,22Q, [%16-19r, %1-3,5Q]"}, /* Vector VLDRH gather load. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VLDRH_GATHER_T2, 0xec900e10, 0xefb01e50, "vldrh%v.%u%7-8s\t%13-15,22Q, [%16-19r, %1-3,5Q%o]"}, /* Vector VLDRW gather load. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VLDRW_GATHER_T3, 0xfc900f40, 0xffb01fd0, "vldrw%v.u32\t%13-15,22Q, [%16-19r, %1-3,5Q%o]"}, /* Vector VLDRD gather load. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VLDRD_GATHER_T4, 0xec900fd0, 0xefb01fd0, "vldrd%v.u64\t%13-15,22Q, [%16-19r, %1-3,5Q%o]"}, /* Vector VLDRW gather load. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VLDRW_GATHER_T5, 0xfd101e00, 0xff111f00, "vldrw%v.u32\t%13-15,22Q, [%17-19,7Q, #%a%0-6i]%w"}, /* Vector VLDRD gather load, variant T6. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VLDRD_GATHER_T6, 0xfd101f00, 0xff111f00, "vldrd%v.u64\t%13-15,22Q, [%17-19,7Q, #%a%0-6i]%w"}, /* Vector VLDRB. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VLDRB_T1, 0xec100e00, 0xee581e00, "vldrb%v.%u%7-8s\t%13-15Q, %d"}, /* Vector VLDRH. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VLDRH_T2, 0xec180e00, 0xee581e00, "vldrh%v.%u%7-8s\t%13-15Q, %d"}, /* Vector VLDRB unsigned, variant T5. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VLDRB_T5, 0xec101e00, 0xfe101f80, "vldrb%v.u8\t%13-15,22Q, %d"}, /* Vector VLDRH unsigned, variant T6. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VLDRH_T6, 0xec101e80, 0xfe101f80, "vldrh%v.u16\t%13-15,22Q, %d"}, /* Vector VLDRW unsigned, variant T7. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VLDRW_T7, 0xec101f00, 0xfe101f80, "vldrw%v.u32\t%13-15,22Q, %d"}, + /* Vector VMAX. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VMAX, + 0xef000640, 0xef811f51, + "vmax%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"}, + + /* Vector VMAXA. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VMAXA, + 0xee330e81, 0xffb31fd1, + "vmaxa%v.s%18-19s\t%13-15,22Q, %1-3,5Q"}, + + /* Vector VMAXNM floating point. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP), + MVE_VMAXNM_FP, + 0xff000f50, 0xffa11f51, + "vmaxnm%v.f%20s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"}, + + /* Vector VMAXNMA floating point. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP), + MVE_VMAXNMA_FP, + 0xee3f0e81, 0xefbf1fd1, + "vmaxnma%v.f%28s\t%13-15,22Q, %1-3,5Q"}, + + /* Vector VMAXNMV floating point. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP), + MVE_VMAXNMV_FP, + 0xeeee0f00, 0xefff0fd1, + "vmaxnmv%v.f%28s\t%12-15r, %1-3,5Q"}, + + /* Vector VMAXNMAV floating point. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP), + MVE_VMAXNMAV_FP, + 0xeeec0f00, 0xefff0fd1, + "vmaxnmav%v.f%28s\t%12-15r, %1-3,5Q"}, + + /* Vector VMAXV. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VMAXV, + 0xeee20f00, 0xeff30fd1, + "vmaxv%v.%u%18-19s\t%12-15r, %1-3,5Q"}, + + /* Vector VMAXAV. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VMAXAV, + 0xeee00f00, 0xfff30fd1, + "vmaxav%v.s%18-19s\t%12-15r, %1-3,5Q"}, + + /* Vector VMIN. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VMIN, + 0xef000650, 0xef811f51, + "vmin%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"}, + + /* Vector VMINA. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VMINA, + 0xee331e81, 0xffb31fd1, + "vmina%v.s%18-19s\t%13-15,22Q, %1-3,5Q"}, + + /* Vector VMINNM floating point. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP), + MVE_VMINNM_FP, + 0xff200f50, 0xffa11f51, + "vminnm%v.f%20s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"}, + + /* Vector VMINNMA floating point. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP), + MVE_VMINNMA_FP, + 0xee3f1e81, 0xefbf1fd1, + "vminnma%v.f%28s\t%13-15,22Q, %1-3,5Q"}, + + /* Vector VMINNMV floating point. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP), + MVE_VMINNMV_FP, + 0xeeee0f80, 0xefff0fd1, + "vminnmv%v.f%28s\t%12-15r, %1-3,5Q"}, + + /* Vector VMINNMAV floating point. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP), + MVE_VMINNMAV_FP, + 0xeeec0f80, 0xefff0fd1, + "vminnmav%v.f%28s\t%12-15r, %1-3,5Q"}, + + /* Vector VMINV. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VMINV, + 0xeee20f80, 0xeff30fd1, + "vminv%v.%u%18-19s\t%12-15r, %1-3,5Q"}, + + /* Vector VMINAV. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VMINAV, + 0xeee00f80, 0xfff30fd1, + "vminav%v.s%18-19s\t%12-15r, %1-3,5Q"}, + + /* Vector VMLA. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VMLA, + 0xee010e40, 0xef811f70, + "vmla%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"}, + + /* Vector VMLALDAV. Note must appear before VMLADAV due to instruction + opcode aliasing. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VMLALDAV, + 0xee801e00, 0xef801f51, + "vmlaldav%5Ax%v.%u%16s\t%13-15l, %20-22h, %17-19,7Q, %1-3Q"}, + + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VMLALDAV, + 0xee800e00, 0xef801f51, + "vmlalv%5A%v.%u%16s\t%13-15l, %20-22h, %17-19,7Q, %1-3Q"}, + + /* Vector VMLAV T1 variant, same as VMLADAV but with X == 0. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VMLADAV_T1, + 0xeef00e00, 0xeff01f51, + "vmlav%5A%v.%u%16s\t%13-15l, %17-19,7Q, %1-3Q"}, + + /* Vector VMLAV T2 variant, same as VMLADAV but with X == 0. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VMLADAV_T2, + 0xeef00f00, 0xeff11f51, + "vmlav%5A%v.%u8\t%13-15l, %17-19,7Q, %1-3Q"}, + + /* Vector VMLADAV T1 variant. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VMLADAV_T1, + 0xeef01e00, 0xeff01f51, + "vmladav%5Ax%v.%u%16s\t%13-15l, %17-19,7Q, %1-3Q"}, + + /* Vector VMLADAV T2 variant. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VMLADAV_T2, + 0xeef01f00, 0xeff11f51, + "vmladav%5Ax%v.%u8\t%13-15l, %17-19,7Q, %1-3Q"}, + + /* Vector VMLAS. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VMLAS, + 0xee011e40, 0xef811f70, + "vmlas%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"}, + + /* Vector VRMLSLDAVH. Note must appear before VMLSDAV due to instruction + opcode aliasing. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VRMLSLDAVH, + 0xfe800e01, 0xff810f51, + "vrmlsldavh%5A%X%v.s32\t%13-15l, %20-22h, %17-19,7Q, %1-3Q"}, + + /* Vector VMLSLDAV. Note must appear before VMLSDAV due to instruction + opcdoe aliasing. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VMLSLDAV, + 0xee800e01, 0xff800f51, + "vmlsldav%5A%X%v.%u%16s\t%13-15l, %20-22h, %17-19,7Q, %1-3Q"}, + + /* Vector VMLSDAV T1 Variant. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VMLSDAV_T1, + 0xeef00e01, 0xfff00f51, + "vmlsdav%5A%X%v.s%16s\t%13-15l, %17-19,7Q, %1-3Q"}, + + /* Vector VMLSDAV T2 Variant. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VMLSDAV_T2, + 0xfef00e01, 0xfff10f51, + "vmlsdav%5A%X%v.s8\t%13-15l, %17-19,7Q, %1-3Q"}, + /* Vector VMOV between gpr and half precision register, op == 0. */ - {ARM_FEATURE_COPROC (FPU_MVE_FP), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP), MVE_VMOV_HFP_TO_GP, 0xee000910, 0xfff00f7f, "vmov.f16\t%7,16-19F, %12-15r"}, /* Vector VMOV between gpr and half precision register, op == 1. */ - {ARM_FEATURE_COPROC (FPU_MVE_FP), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP), MVE_VMOV_HFP_TO_GP, 0xee100910, 0xfff00f7f, "vmov.f16\t%12-15r, %7,16-19F"}, - {ARM_FEATURE_COPROC (FPU_MVE_FP), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP), MVE_VMOV_GP_TO_VEC_LANE, 0xee000b10, 0xff900f1f, "vmov%c.%5-6,21-22s\t%17-19,7Q[%N], %12-15r"}, @@ -2233,179 +2858,760 @@ static const struct mopcode32 mve_opcodes[] = /* Vector VORR immediate to vector. NOTE: MVE_VORR_IMM must appear in the table before MVE_VMOV_IMM_TO_VEC due to opcode aliasing. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VORR_IMM, 0xef800050, 0xefb810f0, "vorr%v.i%8-11s\t%13-15,22Q, %E"}, + /* Vector VQSHL T2 Variant. + NOTE: MVE_VQSHL_T2 must appear in the table before + before MVE_VMOV_IMM_TO_VEC due to opcode aliasing. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VQSHL_T2, + 0xef800750, 0xef801fd1, + "vqshl%v.%u%19-21s\t%13-15,22Q, %1-3,5Q, #%16-18d"}, + + /* Vector VQSHLU T3 Variant + NOTE: MVE_VQSHL_T2 must appear in the table before + before MVE_VMOV_IMM_TO_VEC due to opcode aliasing. */ + + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VQSHLU_T3, + 0xff800650, 0xff801fd1, + "vqshlu%v.s%19-21s\t%13-15,22Q, %1-3,5Q, #%16-18d"}, + + /* Vector VRSHR + NOTE: MVE_VRSHR must appear in the table before + before MVE_VMOV_IMM_TO_VEC due to opcode aliasing. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VRSHR, + 0xef800250, 0xef801fd1, + "vrshr%v.%u%19-21s\t%13-15,22Q, %1-3,5Q, #%16-18d"}, + + /* Vector VSHL. + NOTE: MVE_VSHL must appear in the table before + before MVE_VMOV_IMM_TO_VEC due to opcode aliasing. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VSHL_T1, + 0xef800550, 0xff801fd1, + "vshl%v.i%19-21s\t%13-15,22Q, %1-3,5Q, #%16-18d"}, + + /* Vector VSHR + NOTE: MVE_VSHR must appear in the table before + before MVE_VMOV_IMM_TO_VEC due to opcode aliasing. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VSHR, + 0xef800050, 0xef801fd1, + "vshr%v.%u%19-21s\t%13-15,22Q, %1-3,5Q, #%16-18d"}, + + /* Vector VSLI + NOTE: MVE_VSLI must appear in the table before + before MVE_VMOV_IMM_TO_VEC due to opcode aliasing. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VSLI, + 0xff800550, 0xff801fd1, + "vsli%v.%19-21s\t%13-15,22Q, %1-3,5Q, #%16-18d"}, + + /* Vector VSRI + NOTE: MVE_VSRI must appear in the table before + before MVE_VMOV_IMM_TO_VEC due to opcode aliasing. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VSRI, + 0xff800450, 0xff801fd1, + "vsri%v.%19-21s\t%13-15,22Q, %1-3,5Q, #%16-18d"}, + + /* Vector VMOV immediate to vector, + undefinded for cmode == 1111 */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VMVN_IMM, 0xef800f70, 0xefb81ff0, UNDEFINED_INSTRUCTION}, + /* Vector VMOV immediate to vector, - cmode == 11x1 -> VMVN which is UNDEFINED - for such a cmode. */ - {ARM_FEATURE_COPROC (FPU_MVE), - MVE_VMVN_IMM, 0xef800d50, 0xefb81dd0, UNDEFINED_INSTRUCTION}, + cmode == 1101 */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VMOV_IMM_TO_VEC, 0xef800d50, 0xefb81fd0, + "vmov%v.%5,8-11s\t%13-15,22Q, %E"}, /* Vector VMOV immediate to vector. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VMOV_IMM_TO_VEC, 0xef800050, 0xefb810d0, "vmov%v.%5,8-11s\t%13-15,22Q, %E"}, /* Vector VMOV two 32-bit lanes to two gprs, idx = 0. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VMOV2_VEC_LANE_TO_GP, 0xec000f00, 0xffb01ff0, "vmov%c\t%0-3r, %16-19r, %13-15,22Q[2], %13-15,22Q[0]"}, /* Vector VMOV two 32-bit lanes to two gprs, idx = 1. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VMOV2_VEC_LANE_TO_GP, 0xec000f10, 0xffb01ff0, "vmov%c\t%0-3r, %16-19r, %13-15,22Q[3], %13-15,22Q[1]"}, /* Vector VMOV Two gprs to two 32-bit lanes, idx = 0. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VMOV2_GP_TO_VEC_LANE, 0xec100f00, 0xffb01ff0, "vmov%c\t%13-15,22Q[2], %13-15,22Q[0], %0-3r, %16-19r"}, /* Vector VMOV Two gprs to two 32-bit lanes, idx = 1. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VMOV2_GP_TO_VEC_LANE, 0xec100f10, 0xffb01ff0, "vmov%c\t%13-15,22Q[2], %13-15,22Q[0], %0-3r, %16-19r"}, /* Vector VMOV Vector lane to gpr. */ - {ARM_FEATURE_COPROC (FPU_MVE_FP), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP), MVE_VMOV_VEC_LANE_TO_GP, 0xee100b10, 0xff100f1f, "vmov%c.%u%5-6,21-22s\t%12-15r, %17-19,7Q[%N]"}, + /* Vector VSHLL T1 Variant. Note: VSHLL T1 must appear before MVE_VMOVL due + to instruction opcode aliasing. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VSHLL_T1, + 0xeea00f40, 0xefa00fd1, + "vshll%T%v.%u%19-20s\t%13-15,22Q, %1-3,5Q, #%16-18d"}, + + /* Vector VMOVL long. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VMOVL, + 0xeea00f40, 0xefa70fd1, + "vmovl%T%v.%u%19-20s\t%13-15,22Q, %1-3,5Q"}, + + /* Vector VMOV and narrow. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VMOVN, + 0xfe310e81, 0xffb30fd1, + "vmovn%T%v.i%18-19s\t%13-15,22Q, %1-3,5Q"}, + /* Floating point move extract. */ - {ARM_FEATURE_COPROC (FPU_MVE_FP), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP), MVE_VMOVX, 0xfeb00a40, 0xffbf0fd0, "vmovx.f16\t%22,12-15F, %5,0-3F"}, + /* Vector VMUL floating-point T1 variant. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP), + MVE_VMUL_FP_T1, + 0xff000d50, 0xffa11f51, + "vmul%v.f%20s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"}, + + /* Vector VMUL floating-point T2 variant. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP), + MVE_VMUL_FP_T2, + 0xee310e60, 0xefb11f70, + "vmul%v.f%28s\t%13-15,22Q, %17-19,7Q, %0-3r"}, + + /* Vector VMUL T1 variant. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VMUL_VEC_T1, + 0xef000950, 0xff811f51, + "vmul%v.i%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"}, + + /* Vector VMUL T2 variant. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VMUL_VEC_T2, + 0xee011e60, 0xff811f70, + "vmul%v.i%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"}, + + /* Vector VMULH. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VMULH, + 0xee010e01, 0xef811f51, + "vmulh%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"}, + + /* Vector VRMULH. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VRMULH, + 0xee011e01, 0xef811f51, + "vrmulh%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"}, + + /* Vector VMULL integer. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VMULL_INT, + 0xee010e00, 0xef810f51, + "vmull%T%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"}, + + /* Vector VMULL polynomial. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VMULL_POLY, + 0xee310e00, 0xefb10f51, + "vmull%T%v.%28s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"}, + /* Vector VMVN immediate to vector. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VMVN_IMM, 0xef800070, 0xefb810f0, "vmvn%v.i%8-11s\t%13-15,22Q, %E"}, /* Vector VMVN register. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VMVN_REG, 0xffb005c0, 0xffbf1fd1, "vmvn%v\t%13-15,22Q, %1-3,5Q"}, + /* Vector VNEG floating point. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP), + MVE_VNEG_FP, + 0xffb107c0, 0xffb31fd1, + "vneg%v.f%18-19s\t%13-15,22Q, %1-3,5Q"}, + + /* Vector VNEG. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VNEG_VEC, + 0xffb103c0, 0xffb31fd1, + "vneg%v.s%18-19s\t%13-15,22Q, %1-3,5Q"}, + /* Vector VORN, vector bitwise or not. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VORN, 0xef300150, 0xffb11f51, "vorn%v\t%13-15,22Q, %17-19,7Q, %1-3,5Q"}, /* Vector VORR register. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VORR_REG, 0xef200150, 0xffb11f51, "vorr%v\t%13-15,22Q, %17-19,7Q, %1-3,5Q"}, + /* Vector VMOV, vector to vector move. While decoding MVE_VORR_REG if + "Qm==Qn", VORR should replaced by its alias VMOV. For that to happen + MVE_VMOV_VEC_TO_VEC need to placed after MVE_VORR_REG in this mve_opcodes + array. */ + + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VMOV_VEC_TO_VEC, + 0xef200150, 0xffb11f51, + "vmov%v\t%13-15,22Q, %17-19,7Q"}, + + /* Vector VQDMULL T1 variant. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VQDMULL_T1, + 0xee300f01, 0xefb10f51, + "vqdmull%T%v.s%28s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"}, + + /* Vector VPNOT. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VPNOT, + 0xfe310f4d, 0xffffffff, + "vpnot%v"}, + + /* Vector VPSEL. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VPSEL, + 0xfe310f01, 0xffb11f51, + "vpsel%v\t%13-15,22Q, %17-19,7Q, %1-3,5Q"}, + + /* Vector VQABS. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VQABS, + 0xffb00740, 0xffb31fd1, + "vqabs%v.s%18-19s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"}, + + /* Vector VQADD T1 variant. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VQADD_T1, + 0xef000050, 0xef811f51, + "vqadd%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"}, + + /* Vector VQADD T2 variant. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VQADD_T2, + 0xee000f60, 0xef811f70, + "vqadd%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"}, + + /* Vector VQDMULL T2 variant. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VQDMULL_T2, + 0xee300f60, 0xefb10f70, + "vqdmull%T%v.s%28s\t%13-15,22Q, %17-19,7Q, %0-3r"}, + + /* Vector VQMOVN. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VQMOVN, + 0xee330e01, 0xefb30fd1, + "vqmovn%T%v.%u%18-19s\t%13-15,22Q, %1-3,5Q"}, + + /* Vector VQMOVUN. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VQMOVUN, + 0xee310e81, 0xffb30fd1, + "vqmovun%T%v.s%18-19s\t%13-15,22Q, %1-3,5Q"}, + + /* Vector VQDMLADH. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VQDMLADH, + 0xee000e00, 0xff810f51, + "vqdmladh%X%v.s%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"}, + + /* Vector VQRDMLADH. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VQRDMLADH, + 0xee000e01, 0xff810f51, + "vqrdmladh%X%v.s%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"}, + + /* Vector VQDMLAH. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VQDMLAH, + 0xee000e60, 0xff811f70, + "vqdmlah%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"}, + + /* Vector VQRDMLAH. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VQRDMLAH, + 0xee000e40, 0xff811f70, + "vqrdmlah%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"}, + + /* Vector VQDMLASH. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VQDMLASH, + 0xee001e60, 0xff811f70, + "vqdmlash%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"}, + + /* Vector VQRDMLASH. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VQRDMLASH, + 0xee001e40, 0xff811f70, + "vqrdmlash%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"}, + + /* Vector VQDMLSDH. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VQDMLSDH, + 0xfe000e00, 0xff810f51, + "vqdmlsdh%X%v.s%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"}, + + /* Vector VQRDMLSDH. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VQRDMLSDH, + 0xfe000e01, 0xff810f51, + "vqrdmlsdh%X%v.s%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"}, + + /* Vector VQDMULH T1 variant. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VQDMULH_T1, + 0xef000b40, 0xff811f51, + "vqdmulh%v.s%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"}, + + /* Vector VQRDMULH T2 variant. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VQRDMULH_T2, + 0xff000b40, 0xff811f51, + "vqrdmulh%v.s%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"}, + + /* Vector VQDMULH T3 variant. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VQDMULH_T3, + 0xee010e60, 0xff811f70, + "vqdmulh%v.s%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"}, + + /* Vector VQRDMULH T4 variant. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VQRDMULH_T4, + 0xfe010e60, 0xff811f70, + "vqrdmulh%v.s%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"}, + + /* Vector VQNEG. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VQNEG, + 0xffb007c0, 0xffb31fd1, + "vqneg%v.s%18-19s\t%13-15,22Q, %1-3,5Q"}, + + /* Vector VQRSHL T1 variant. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VQRSHL_T1, + 0xef000550, 0xef811f51, + "vqrshl%v.%u%20-21s\t%13-15,22Q, %1-3,5Q, %17-19,7Q"}, + + /* Vector VQRSHL T2 variant. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VQRSHL_T2, + 0xee331ee0, 0xefb31ff0, + "vqrshl%v.%u%18-19s\t%13-15,22Q, %0-3r"}, + + /* Vector VQRSHRN. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VQRSHRN, + 0xee800f41, 0xefa00fd1, + "vqrshrn%T%v.%u%19-20s\t%13-15,22Q, %1-3,5Q, #%16-18d"}, + + /* Vector VQRSHRUN. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VQRSHRUN, + 0xfe800fc0, 0xffa00fd1, + "vqrshrun%T%v.s%19-20s\t%13-15,22Q, %1-3,5Q, #%16-18d"}, + + /* Vector VQSHL T1 Variant. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VQSHL_T1, + 0xee311ee0, 0xefb31ff0, + "vqshl%v.%u%18-19s\t%13-15,22Q, %0-3r"}, + + /* Vector VQSHL T4 Variant. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VQSHL_T4, + 0xef000450, 0xef811f51, + "vqshl%v.%u%20-21s\t%13-15,22Q, %1-3,5Q, %17-19,7Q"}, + + /* Vector VQSHRN. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VQSHRN, + 0xee800f40, 0xefa00fd1, + "vqshrn%T%v.%u%19-20s\t%13-15,22Q, %1-3,5Q, #%16-18d"}, + + /* Vector VQSHRUN. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VQSHRUN, + 0xee800fc0, 0xffa00fd1, + "vqshrun%T%v.s%19-20s\t%13-15,22Q, %1-3,5Q, #%16-18d"}, + + /* Vector VQSUB T1 Variant. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VQSUB_T1, + 0xef000250, 0xef811f51, + "vqsub%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"}, + + /* Vector VQSUB T2 Variant. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VQSUB_T2, + 0xee001f60, 0xef811f70, + "vqsub%v.%u%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"}, + + /* Vector VREV16. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VREV16, + 0xffb00140, 0xffb31fd1, + "vrev16%v.8\t%13-15,22Q, %1-3,5Q"}, + + /* Vector VREV32. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VREV32, + 0xffb000c0, 0xffb31fd1, + "vrev32%v.%18-19s\t%13-15,22Q, %1-3,5Q"}, + + /* Vector VREV64. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VREV64, + 0xffb00040, 0xffb31fd1, + "vrev64%v.%18-19s\t%13-15,22Q, %1-3,5Q"}, + /* Vector VRINT floating point. */ - {ARM_FEATURE_COPROC (FPU_MVE_FP), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP), MVE_VRINT_FP, 0xffb20440, 0xffb31c51, "vrint%m%v.f%18-19s\t%13-15,22Q, %1-3,5Q"}, + /* Vector VRMLALDAVH. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VRMLALDAVH, + 0xee800f00, 0xef811f51, + "vrmlalvh%5A%v.%u32\t%13-15l, %20-22h, %17-19,7Q, %1-3Q"}, + + /* Vector VRMLALDAVH. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VRMLALDAVH, + 0xee801f00, 0xef811f51, + "vrmlaldavh%5Ax%v.%u32\t%13-15l, %20-22h, %17-19,7Q, %1-3Q"}, + + /* Vector VRSHL T1 Variant. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VRSHL_T1, + 0xef000540, 0xef811f51, + "vrshl%v.%u%20-21s\t%13-15,22Q, %1-3,5Q, %17-19,7Q"}, + + /* Vector VRSHL T2 Variant. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VRSHL_T2, + 0xee331e60, 0xefb31ff0, + "vrshl%v.%u%18-19s\t%13-15,22Q, %0-3r"}, + + /* Vector VRSHRN. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VRSHRN, + 0xfe800fc1, 0xffa00fd1, + "vrshrn%T%v.i%19-20s\t%13-15,22Q, %1-3,5Q, #%16-18d"}, + + /* Vector VSBC. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VSBC, + 0xfe300f00, 0xffb10f51, + "vsbc%12I%v.i32\t%13-15,22Q, %17-19,7Q, %1-3,5Q"}, + + /* Vector VSHL T2 Variant. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VSHL_T2, + 0xee311e60, 0xefb31ff0, + "vshl%v.%u%18-19s\t%13-15,22Q, %0-3r"}, + + /* Vector VSHL T3 Variant. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VSHL_T3, + 0xef000440, 0xef811f51, + "vshl%v.%u%20-21s\t%13-15,22Q, %1-3,5Q, %17-19,7Q"}, + + /* Vector VSHLC. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VSHLC, + 0xeea00fc0, 0xffa01ff0, + "vshlc%v\t%13-15,22Q, %0-3r, #%16-20d"}, + + /* Vector VSHLL T2 Variant. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VSHLL_T2, + 0xee310e01, 0xefb30fd1, + "vshll%T%v.%u%18-19s\t%13-15,22Q, %1-3,5Q, #%18-19d"}, + + /* Vector VSHRN. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VSHRN, + 0xee800fc1, 0xffa00fd1, + "vshrn%T%v.i%19-20s\t%13-15,22Q, %1-3,5Q, #%16-18d"}, + /* Vector VST2 no writeback. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VST2, 0xfc801e00, 0xffb01e5f, "vst2%5d.%7-8s\t%B, [%16-19r]"}, /* Vector VST2 writeback. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VST2, 0xfca01e00, 0xffb01e5f, "vst2%5d.%7-8s\t%B, [%16-19r]!"}, /* Vector VST4 no writeback. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VST4, 0xfc801e01, 0xffb01e1f, "vst4%5-6d.%7-8s\t%B, [%16-19r]"}, /* Vector VST4 writeback. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VST4, 0xfca01e01, 0xffb01e1f, "vst4%5-6d.%7-8s\t%B, [%16-19r]!"}, /* Vector VSTRB scatter store, T1 variant. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VSTRB_SCATTER_T1, 0xec800e00, 0xffb01e50, "vstrb%v.%7-8s\t%13-15,22Q, [%16-19r, %1-3,5Q]"}, /* Vector VSTRH scatter store, T2 variant. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VSTRH_SCATTER_T2, 0xec800e10, 0xffb01e50, "vstrh%v.%7-8s\t%13-15,22Q, [%16-19r, %1-3,5Q%o]"}, /* Vector VSTRW scatter store, T3 variant. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VSTRW_SCATTER_T3, 0xec800e40, 0xffb01e50, "vstrw%v.%7-8s\t%13-15,22Q, [%16-19r, %1-3,5Q%o]"}, /* Vector VSTRD scatter store, T4 variant. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VSTRD_SCATTER_T4, 0xec800fd0, 0xffb01fd0, "vstrd%v.64\t%13-15,22Q, [%16-19r, %1-3,5Q%o]"}, /* Vector VSTRW scatter store, T5 variant. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VSTRW_SCATTER_T5, 0xfd001e00, 0xff111f00, "vstrw%v.32\t%13-15,22Q, [%17-19,7Q, #%a%0-6i]%w"}, /* Vector VSTRD scatter store, T6 variant. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VSTRD_SCATTER_T6, 0xfd001f00, 0xff111f00, "vstrd%v.64\t%13-15,22Q, [%17-19,7Q, #%a%0-6i]%w"}, /* Vector VSTRB. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VSTRB_T1, 0xec000e00, 0xfe581e00, "vstrb%v.%7-8s\t%13-15Q, %d"}, /* Vector VSTRH. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VSTRH_T2, 0xec080e00, 0xfe581e00, "vstrh%v.%7-8s\t%13-15Q, %d"}, /* Vector VSTRB variant T5. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VSTRB_T5, 0xec001e00, 0xfe101f80, "vstrb%v.8\t%13-15,22Q, %d"}, /* Vector VSTRH variant T6. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VSTRH_T6, 0xec001e80, 0xfe101f80, "vstrh%v.16\t%13-15,22Q, %d"}, /* Vector VSTRW variant T7. */ - {ARM_FEATURE_COPROC (FPU_MVE), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), MVE_VSTRW_T7, 0xec001f00, 0xfe101f80, "vstrw%v.32\t%13-15,22Q, %d"}, + /* Vector VSUB floating point T1 variant. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP), + MVE_VSUB_FP_T1, + 0xef200d40, 0xffa11f51, + "vsub%v.f%20s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"}, + + /* Vector VSUB floating point T2 variant. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP), + MVE_VSUB_FP_T2, + 0xee301f40, 0xefb11f70, + "vsub%v.f%28s\t%13-15,22Q, %17-19,7Q, %0-3r"}, + + /* Vector VSUB T1 variant. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VSUB_VEC_T1, + 0xff000840, 0xff811f51, + "vsub%v.i%20-21s\t%13-15,22Q, %17-19,7Q, %1-3,5Q"}, + + /* Vector VSUB T2 variant. */ + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_VSUB_VEC_T2, + 0xee011f40, 0xff811f70, + "vsub%v.i%20-21s\t%13-15,22Q, %17-19,7Q, %0-3r"}, + + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_ASRLI, + 0xea50012f, 0xfff1813f, + "asrl%c\t%17-19l, %9-11h, %j"}, + + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_ASRL, + 0xea50012d, 0xfff101ff, + "asrl%c\t%17-19l, %9-11h, %12-15S"}, + + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_LSLLI, + 0xea50010f, 0xfff1813f, + "lsll%c\t%17-19l, %9-11h, %j"}, + + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_LSLL, + 0xea50010d, 0xfff101ff, + "lsll%c\t%17-19l, %9-11h, %12-15S"}, + + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_LSRL, + 0xea50011f, 0xfff1813f, + "lsrl%c\t%17-19l, %9-11h, %j"}, + + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_SQRSHRL, + 0xea51012d, 0xfff1017f, + "sqrshrl%c\t%17-19l, %9-11h, %k, %12-15S"}, + + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_SQRSHR, + 0xea500f2d, 0xfff00fff, + "sqrshr%c\t%16-19S, %12-15S"}, + + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_SQSHLL, + 0xea51013f, 0xfff1813f, + "sqshll%c\t%17-19l, %9-11h, %j"}, + + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_SQSHL, + 0xea500f3f, 0xfff08f3f, + "sqshl%c\t%16-19S, %j"}, + + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_SRSHRL, + 0xea51012f, 0xfff1813f, + "srshrl%c\t%17-19l, %9-11h, %j"}, + + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_SRSHR, + 0xea500f2f, 0xfff08f3f, + "srshr%c\t%16-19S, %j"}, + + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_UQRSHLL, + 0xea51010d, 0xfff1017f, + "uqrshll%c\t%17-19l, %9-11h, %k, %12-15S"}, + + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_UQRSHL, + 0xea500f0d, 0xfff00fff, + "uqrshl%c\t%16-19S, %12-15S"}, + + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_UQSHLL, + 0xea51010f, 0xfff1813f, + "uqshll%c\t%17-19l, %9-11h, %j"}, + + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_UQSHL, + 0xea500f0f, 0xfff08f3f, + "uqshl%c\t%16-19S, %j"}, + + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_URSHRL, + 0xea51011f, 0xfff1813f, + "urshrl%c\t%17-19l, %9-11h, %j"}, + + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE), + MVE_URSHR, + 0xea500f1f, 0xfff08f3f, + "urshr%c\t%16-19S, %j"}, + + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN), + MVE_CSINC, + 0xea509000, 0xfff0f000, + "csinc\t%8-11S, %16-19Z, %0-3Z, %4-7c"}, + + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN), + MVE_CSINV, + 0xea50a000, 0xfff0f000, + "csinv\t%8-11S, %16-19Z, %0-3Z, %4-7c"}, + + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN), + MVE_CSET, + 0xea5f900f, 0xfffff00f, + "cset\t%8-11S, %4-7C"}, + + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN), + MVE_CSETM, + 0xea5fa00f, 0xfffff00f, + "csetm\t%8-11S, %4-7C"}, + + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN), + MVE_CSEL, + 0xea508000, 0xfff0f000, + "csel\t%8-11S, %16-19Z, %0-3Z, %4-7c"}, + + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN), + MVE_CSNEG, + 0xea50b000, 0xfff0f000, + "csneg\t%8-11S, %16-19Z, %0-3Z, %4-7c"}, + + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN), + MVE_CINC, + 0xea509000, 0xfff0f000, + "cinc\t%8-11S, %16-19Z, %4-7C"}, + + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN), + MVE_CINV, + 0xea50a000, 0xfff0f000, + "cinv\t%8-11S, %16-19Z, %4-7C"}, + + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN), + MVE_CNEG, + 0xea50b000, 0xfff0f000, + "cneg\t%8-11S, %16-19Z, %4-7C"}, + {ARM_FEATURE_CORE_LOW (0), MVE_NONE, 0x00000000, 0x00000000, 0} @@ -2514,17 +3720,17 @@ static const struct opcode32 arm_opcodes[] = {ARM_FEATURE_CORE_LOW (ARM_EXT2_ATOMICS), 0x01f00c9f, 0x0ff00fff, "ldah%c\t%12-15r, [%16-19R]"}, /* CRC32 instructions. */ - {ARM_FEATURE_COPROC (CRC_EXT_ARMV8), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC), 0xe1000040, 0xfff00ff0, "crc32b\t%12-15R, %16-19R, %0-3R"}, - {ARM_FEATURE_COPROC (CRC_EXT_ARMV8), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC), 0xe1200040, 0xfff00ff0, "crc32h\t%12-15R, %16-19R, %0-3R"}, - {ARM_FEATURE_COPROC (CRC_EXT_ARMV8), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC), 0xe1400040, 0xfff00ff0, "crc32w\t%12-15R, %16-19R, %0-3R"}, - {ARM_FEATURE_COPROC (CRC_EXT_ARMV8), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC), 0xe1000240, 0xfff00ff0, "crc32cb\t%12-15R, %16-19R, %0-3R"}, - {ARM_FEATURE_COPROC (CRC_EXT_ARMV8), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC), 0xe1200240, 0xfff00ff0, "crc32ch\t%12-15R, %16-19R, %0-3R"}, - {ARM_FEATURE_COPROC (CRC_EXT_ARMV8), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC), 0xe1400240, 0xfff00ff0, "crc32cw\t%12-15R, %16-19R, %0-3R"}, /* Privileged Access Never extension instructions. */ @@ -3450,13 +4656,21 @@ static const struct opcode32 thumb32_opcodes[] = /* Armv8.1-M Mainline and Armv8.1-M Mainline Security Extensions instructions. */ {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN), - 0xf040c001, 0xfff0f001, "wls\tlr, %16-19S, %Q"}, - {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN), - 0xf040e001, 0xfff0ffff, "dls\tlr, %16-19S"}, + 0xf00fe001, 0xffffffff, "lctp%c"}, {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN), 0xf02fc001, 0xfffff001, "le\t%P"}, {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN), 0xf00fc001, 0xfffff001, "le\tlr, %P"}, + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN), + 0xf01fc001, 0xfffff001, "letp\tlr, %P"}, + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN), + 0xf040c001, 0xfff0f001, "wls\tlr, %16-19S, %Q"}, + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN), + 0xf000c001, 0xffc0f001, "wlstp.%20-21s\tlr, %16-19S, %Q"}, + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN), + 0xf040e001, 0xfff0ffff, "dls\tlr, %16-19S"}, + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN), + 0xf000e001, 0xffc0ffff, "dlstp.%20-21s\tlr, %16-19S"}, {ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN), 0xf040e001, 0xf860f001, "bf%c\t%G, %W"}, @@ -3522,17 +4736,17 @@ static const struct opcode32 thumb32_opcodes[] = 0xe8d000ff, 0xfff000ff, "ldaexd%c\t%12-15r, %8-11r, [%16-19R]"}, /* CRC32 instructions. */ - {ARM_FEATURE_COPROC (CRC_EXT_ARMV8), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC), 0xfac0f080, 0xfff0f0f0, "crc32b\t%8-11R, %16-19R, %0-3R"}, - {ARM_FEATURE_COPROC (CRC_EXT_ARMV8), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC), 0xfac0f090, 0xfff0f0f0, "crc32h\t%9-11R, %16-19R, %0-3R"}, - {ARM_FEATURE_COPROC (CRC_EXT_ARMV8), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC), 0xfac0f0a0, 0xfff0f0f0, "crc32w\t%8-11R, %16-19R, %0-3R"}, - {ARM_FEATURE_COPROC (CRC_EXT_ARMV8), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC), 0xfad0f080, 0xfff0f0f0, "crc32cb\t%8-11R, %16-19R, %0-3R"}, - {ARM_FEATURE_COPROC (CRC_EXT_ARMV8), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC), 0xfad0f090, 0xfff0f0f0, "crc32ch\t%8-11R, %16-19R, %0-3R"}, - {ARM_FEATURE_COPROC (CRC_EXT_ARMV8), + {ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC), 0xfad0f0a0, 0xfff0f0f0, "crc32cw\t%8-11R, %16-19R, %0-3R"}, /* Speculation Barriers. */ @@ -3980,7 +5194,8 @@ static const arm_regname regnames[] = { "reg-names-atpcs", N_("Select register names used in the ATPCS"), { "a1", "a2", "a3", "a4", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "IP", "SP", "LR", "PC" }}, { "reg-names-special-atpcs", N_("Select special register names used in the ATPCS"), - { "a1", "a2", "a3", "a4", "v1", "v2", "v3", "WR", "v5", "SB", "SL", "FP", "IP", "SP", "LR", "PC" }} + { "a1", "a2", "a3", "a4", "v1", "v2", "v3", "WR", "v5", "SB", "SL", "FP", "IP", "SP", "LR", "PC" }}, + { "coproc=(cde|generic)", N_("Enable CDE extensions for coprocessor N space"), { NULL } } }; static const char *const iwmmxt_wwnames[] = @@ -4060,6 +5275,7 @@ static unsigned int regname_selected = 1; #define arm_regnames regnames[regname_selected].reg_names static bfd_boolean force_thumb = FALSE; +static uint16_t cde_coprocs = 0; /* Current IT instruction state. This contains the same state as the IT bits in the CPSR. */ @@ -4272,6 +5488,23 @@ is_mve_okay_in_it (enum mve_instructions matched_insn) case MVE_VMOV2_VEC_LANE_TO_GP: case MVE_VMOV2_GP_TO_VEC_LANE: case MVE_VMOV_VEC_LANE_TO_GP: + case MVE_LSLL: + case MVE_LSLLI: + case MVE_LSRL: + case MVE_ASRL: + case MVE_ASRLI: + case MVE_SQRSHRL: + case MVE_SQRSHR: + case MVE_UQRSHL: + case MVE_UQRSHLL: + case MVE_UQSHL: + case MVE_UQSHLL: + case MVE_URSHRL: + case MVE_URSHR: + case MVE_SRSHRL: + case MVE_SRSHR: + case MVE_SQSHLL: + case MVE_SQSHL: return TRUE; default: return FALSE; @@ -4420,6 +5653,38 @@ is_mve_encoding_conflict (unsigned long given, else return FALSE; + case MVE_VQADD_T2: + case MVE_VQSUB_T2: + case MVE_VMUL_VEC_T2: + case MVE_VMULH: + case MVE_VRMULH: + case MVE_VMLA: + case MVE_VMAX: + case MVE_VMIN: + case MVE_VBRSR: + case MVE_VADD_VEC_T2: + case MVE_VSUB_VEC_T2: + case MVE_VABAV: + case MVE_VQRSHL_T1: + case MVE_VQSHL_T4: + case MVE_VRSHL_T1: + case MVE_VSHL_T3: + case MVE_VCADD_VEC: + case MVE_VHCADD: + case MVE_VDDUP: + case MVE_VIDUP: + case MVE_VQRDMLADH: + case MVE_VQDMLAH: + case MVE_VQRDMLAH: + case MVE_VQDMLASH: + case MVE_VQRDMLASH: + case MVE_VQDMLSDH: + case MVE_VQRDMLSDH: + case MVE_VQDMULH_T3: + case MVE_VQRDMULH_T4: + case MVE_VQDMLADH: + case MVE_VMLAS: + case MVE_VMULL_INT: case MVE_VHADD_T2: case MVE_VHSUB_T2: case MVE_VCMP_VEC_T1: @@ -4485,11 +5750,11 @@ is_mve_encoding_conflict (unsigned long given, { unsigned long cmode = arm_decode_field (given, 8, 11); - if ((cmode & 9) == 1) + if (cmode == 0xe) return TRUE; - else if ((cmode & 5) == 1) + else if ((cmode & 0x9) == 1) return TRUE; - else if ((cmode & 0xe) == 0xe) + else if ((cmode & 0xd) == 9) return TRUE; else return FALSE; @@ -4502,7 +5767,134 @@ is_mve_encoding_conflict (unsigned long given, else return FALSE; + case MVE_VMOVL: + { + unsigned long size = arm_decode_field (given, 19, 20); + if ((size == 0) || (size == 3)) + return TRUE; + else + return FALSE; + } + + case MVE_VMAXA: + case MVE_VMINA: + case MVE_VMAXV: + case MVE_VMAXAV: + case MVE_VMINV: + case MVE_VMINAV: + case MVE_VQRSHL_T2: + case MVE_VQSHL_T1: + case MVE_VRSHL_T2: + case MVE_VSHL_T2: + case MVE_VSHLL_T2: + case MVE_VADDV: + case MVE_VMOVN: + case MVE_VQMOVUN: + case MVE_VQMOVN: + if (arm_decode_field (given, 18, 19) == 3) + return TRUE; + else + return FALSE; + + case MVE_VMLSLDAV: + case MVE_VRMLSLDAVH: + case MVE_VMLALDAV: + case MVE_VADDLV: + if (arm_decode_field (given, 20, 22) == 7) + return TRUE; + else + return FALSE; + + case MVE_VRMLALDAVH: + if ((arm_decode_field (given, 20, 22) & 6) == 6) + return TRUE; + else + return FALSE; + + case MVE_VDWDUP: + case MVE_VIWDUP: + if ((arm_decode_field (given, 20, 21) == 3) + || (arm_decode_field (given, 1, 3) == 7)) + return TRUE; + else + return FALSE; + + + case MVE_VSHLL_T1: + if (arm_decode_field (given, 16, 18) == 0) + { + unsigned long sz = arm_decode_field (given, 19, 20); + + if ((sz == 1) || (sz == 2)) + return TRUE; + else + return FALSE; + } + else + return FALSE; + + case MVE_VQSHL_T2: + case MVE_VQSHLU_T3: + case MVE_VRSHR: + case MVE_VSHL_T1: + case MVE_VSHR: + case MVE_VSLI: + case MVE_VSRI: + if (arm_decode_field (given, 19, 21) == 0) + return TRUE; + else + return FALSE; + + case MVE_VCTP: + if (arm_decode_field (given, 16, 19) == 0xf) + return TRUE; + else + return FALSE; + + case MVE_ASRLI: + case MVE_ASRL: + case MVE_LSLLI: + case MVE_LSLL: + case MVE_LSRL: + case MVE_SQRSHRL: + case MVE_SQSHLL: + case MVE_SRSHRL: + case MVE_UQRSHLL: + case MVE_UQSHLL: + case MVE_URSHRL: + if (arm_decode_field (given, 9, 11) == 0x7) + return TRUE; + else + return FALSE; + + case MVE_CSINC: + case MVE_CSINV: + { + unsigned long rm, rn; + rm = arm_decode_field (given, 0, 3); + rn = arm_decode_field (given, 16, 19); + /* CSET/CSETM. */ + if (rm == 0xf && rn == 0xf) + return TRUE; + /* CINC/CINV. */ + else if (rn == rm && rn != 0xf) + return TRUE; + } + /* Fall through. */ + case MVE_CSEL: + case MVE_CSNEG: + if (arm_decode_field (given, 0, 3) == 0xd) + return TRUE; + /* CNEG. */ + else if (matched_insn == MVE_CSNEG) + if (arm_decode_field (given, 0, 3) == arm_decode_field (given, 16, 19)) + return TRUE; + return FALSE; + default: + case MVE_VADD_FP_T1: + case MVE_VADD_FP_T2: + case MVE_VADD_VEC_T1: return FALSE; } @@ -4603,6 +5995,14 @@ is_mve_undefined (unsigned long given, enum mve_instructions matched_insn, else return FALSE; + case MVE_VQADD_T1: + case MVE_VQSUB_T1: + case MVE_VMUL_VEC_T1: + case MVE_VABD_VEC: + case MVE_VADD_VEC_T1: + case MVE_VSUB_VEC_T1: + case MVE_VQDMULH_T1: + case MVE_VQRDMULH_T2: case MVE_VRHADD: case MVE_VHADD_T1: case MVE_VHSUB_T1: @@ -4776,6 +6176,8 @@ is_mve_undefined (unsigned long given, enum mve_instructions matched_insn, return FALSE; } + case MVE_VNEG_FP: + case MVE_VABS_FP: case MVE_VCVT_BETWEEN_FP_INT: case MVE_VCVT_FROM_FP_TO_INT: { @@ -4839,6 +6241,12 @@ is_mve_undefined (unsigned long given, enum mve_instructions matched_insn, else return FALSE; + case MVE_VMOV_VEC_TO_VEC: + if ((arm_decode_field (given, 5, 5) == 1) + || (arm_decode_field (given, 22, 22) == 1)) + return TRUE; + return FALSE; + case MVE_VMOV_IMM_TO_VEC: if (arm_decode_field (given, 5, 5) == 0) { @@ -4855,29 +6263,154 @@ is_mve_undefined (unsigned long given, enum mve_instructions matched_insn, else return FALSE; - default: - return FALSE; - } -} - -/* Return FALSE if GIVEN is not an unpredictable encoding for MATCHED_INSN. - Otherwise, return TRUE and set UNPREDICTABLE_CODE to give a reason as to - why this encoding is unpredictable. */ - -static bfd_boolean -is_mve_unpredictable (unsigned long given, enum mve_instructions matched_insn, - enum mve_unpredictable *unpredictable_code) -{ - *unpredictable_code = UNPRED_NONE; - - switch (matched_insn) - { - case MVE_VCMP_FP_T2: - case MVE_VPT_FP_T2: - if ((arm_decode_field (given, 12, 12) == 0) - && (arm_decode_field (given, 5, 5) == 1)) + case MVE_VSHLL_T2: + case MVE_VMOVN: + if (arm_decode_field (given, 18, 19) == 2) { - *unpredictable_code = UNPRED_FCA_0_FCB_1; + *undefined_code = UNDEF_SIZE_2; + return TRUE; + } + else + return FALSE; + + case MVE_VRMLALDAVH: + case MVE_VMLADAV_T1: + case MVE_VMLADAV_T2: + case MVE_VMLALDAV: + if ((arm_decode_field (given, 28, 28) == 1) + && (arm_decode_field (given, 12, 12) == 1)) + { + *undefined_code = UNDEF_XCHG_UNS; + return TRUE; + } + else + return FALSE; + + case MVE_VQSHRN: + case MVE_VQSHRUN: + case MVE_VSHLL_T1: + case MVE_VSHRN: + { + unsigned long sz = arm_decode_field (given, 19, 20); + if (sz == 1) + return FALSE; + else if ((sz & 2) == 2) + return FALSE; + else + { + *undefined_code = UNDEF_SIZE; + return TRUE; + } + } + break; + + case MVE_VQSHL_T2: + case MVE_VQSHLU_T3: + case MVE_VRSHR: + case MVE_VSHL_T1: + case MVE_VSHR: + case MVE_VSLI: + case MVE_VSRI: + { + unsigned long sz = arm_decode_field (given, 19, 21); + if ((sz & 7) == 1) + return FALSE; + else if ((sz & 6) == 2) + return FALSE; + else if ((sz & 4) == 4) + return FALSE; + else + { + *undefined_code = UNDEF_SIZE; + return TRUE; + } + } + + case MVE_VQRSHRN: + case MVE_VQRSHRUN: + if (arm_decode_field (given, 19, 20) == 0) + { + *undefined_code = UNDEF_SIZE_0; + return TRUE; + } + else + return FALSE; + + case MVE_VABS_VEC: + if (arm_decode_field (given, 18, 19) == 3) + { + *undefined_code = UNDEF_SIZE_3; + return TRUE; + } + else + return FALSE; + + case MVE_VQNEG: + case MVE_VQABS: + case MVE_VNEG_VEC: + case MVE_VCLS: + case MVE_VCLZ: + if (arm_decode_field (given, 18, 19) == 3) + { + *undefined_code = UNDEF_SIZE_3; + return TRUE; + } + else + return FALSE; + + case MVE_VREV16: + if (arm_decode_field (given, 18, 19) == 0) + return FALSE; + else + { + *undefined_code = UNDEF_SIZE_NOT_0; + return TRUE; + } + + case MVE_VREV32: + { + unsigned long size = arm_decode_field (given, 18, 19); + if ((size & 2) == 2) + { + *undefined_code = UNDEF_SIZE_2; + return TRUE; + } + else + return FALSE; + } + + case MVE_VREV64: + if (arm_decode_field (given, 18, 19) != 3) + return FALSE; + else + { + *undefined_code = UNDEF_SIZE_3; + return TRUE; + } + + default: + return FALSE; + } +} + +/* Return FALSE if GIVEN is not an unpredictable encoding for MATCHED_INSN. + Otherwise, return TRUE and set UNPREDICTABLE_CODE to give a reason as to + why this encoding is unpredictable. */ + +static bfd_boolean +is_mve_unpredictable (unsigned long given, enum mve_instructions matched_insn, + enum mve_unpredictable *unpredictable_code) +{ + *unpredictable_code = UNPRED_NONE; + + switch (matched_insn) + { + case MVE_VCMP_FP_T2: + case MVE_VPT_FP_T2: + if ((arm_decode_field (given, 12, 12) == 0) + && (arm_decode_field (given, 5, 5) == 1)) + { + *unpredictable_code = UNPRED_FCA_0_FCB_1; return TRUE; } else @@ -4914,6 +6447,28 @@ is_mve_unpredictable (unsigned long given, enum mve_instructions matched_insn, return FALSE; } + case MVE_VQADD_T2: + case MVE_VQSUB_T2: + case MVE_VMUL_FP_T2: + case MVE_VMUL_VEC_T2: + case MVE_VMLA: + case MVE_VBRSR: + case MVE_VADD_FP_T2: + case MVE_VSUB_FP_T2: + case MVE_VADD_VEC_T2: + case MVE_VSUB_VEC_T2: + case MVE_VQRSHL_T2: + case MVE_VQSHL_T1: + case MVE_VRSHL_T2: + case MVE_VSHL_T2: + case MVE_VSHLC: + case MVE_VQDMLAH: + case MVE_VQRDMLAH: + case MVE_VQDMLASH: + case MVE_VQRDMLASH: + case MVE_VQDMULH_T3: + case MVE_VQRDMULH_T4: + case MVE_VMLAS: case MVE_VFMA_FP_SCALAR: case MVE_VFMAS_FP_SCALAR: case MVE_VHADD_T2: @@ -5107,6 +6662,15 @@ is_mve_unpredictable (unsigned long given, enum mve_instructions matched_insn, return FALSE; } + case MVE_VMAXV: + case MVE_VMAXAV: + case MVE_VMAXNMV_FP: + case MVE_VMAXNMAV_FP: + case MVE_VMINNMV_FP: + case MVE_VMINNMAV_FP: + case MVE_VMINV: + case MVE_VMINAV: + case MVE_VABAV: case MVE_VMOV_HFP_TO_GP: case MVE_VMOV_GP_TO_VEC_LANE: case MVE_VMOV_VEC_LANE_TO_GP: @@ -5126,6 +6690,212 @@ is_mve_unpredictable (unsigned long given, enum mve_instructions matched_insn, return FALSE; } + case MVE_VMULL_INT: + { + unsigned long Qd; + unsigned long Qm; + unsigned long Qn; + + if (arm_decode_field (given, 20, 21) == 2) + { + Qd = arm_decode_field_multiple (given, 13, 15, 22, 22); + Qm = arm_decode_field_multiple (given, 1, 3, 5, 5); + Qn = arm_decode_field_multiple (given, 17, 19, 7, 7); + + if ((Qd == Qn) || (Qd == Qm)) + { + *unpredictable_code = UNPRED_Q_REGS_EQ_AND_SIZE_2; + return TRUE; + } + else + return FALSE; + } + else + return FALSE; + } + + case MVE_VCMUL_FP: + case MVE_VQDMULL_T1: + { + unsigned long Qd; + unsigned long Qm; + unsigned long Qn; + + if (arm_decode_field (given, 28, 28) == 1) + { + Qd = arm_decode_field_multiple (given, 13, 15, 22, 22); + Qm = arm_decode_field_multiple (given, 1, 3, 5, 5); + Qn = arm_decode_field_multiple (given, 17, 19, 7, 7); + + if ((Qd == Qn) || (Qd == Qm)) + { + *unpredictable_code = UNPRED_Q_REGS_EQ_AND_SIZE_1; + return TRUE; + } + else + return FALSE; + } + else + return FALSE; + } + + case MVE_VQDMULL_T2: + { + unsigned long gpr = arm_decode_field (given, 0, 3); + if (gpr == 0xd) + { + *unpredictable_code = UNPRED_R13; + return TRUE; + } + else if (gpr == 0xf) + { + *unpredictable_code = UNPRED_R15; + return TRUE; + } + + if (arm_decode_field (given, 28, 28) == 1) + { + unsigned long Qd + = arm_decode_field_multiple (given, 13, 15, 22, 22); + unsigned long Qn = arm_decode_field_multiple (given, 17, 19, 7, 7); + + if (Qd == Qn) + { + *unpredictable_code = UNPRED_Q_REGS_EQ_AND_SIZE_1; + return TRUE; + } + else + return FALSE; + } + + return FALSE; + } + + case MVE_VMLSLDAV: + case MVE_VRMLSLDAVH: + case MVE_VMLALDAV: + case MVE_VADDLV: + if (arm_decode_field (given, 20, 22) == 6) + { + *unpredictable_code = UNPRED_R13; + return TRUE; + } + else + return FALSE; + + case MVE_VDWDUP: + case MVE_VIWDUP: + if (arm_decode_field (given, 1, 3) == 6) + { + *unpredictable_code = UNPRED_R13; + return TRUE; + } + else + return FALSE; + + case MVE_VCADD_VEC: + case MVE_VHCADD: + { + unsigned long Qd = arm_decode_field_multiple (given, 13, 15, 22, 22); + unsigned long Qm = arm_decode_field_multiple (given, 1, 3, 5, 5); + if ((Qd == Qm) && arm_decode_field (given, 20, 21) == 2) + { + *unpredictable_code = UNPRED_Q_REGS_EQ_AND_SIZE_2; + return TRUE; + } + else + return FALSE; + } + + case MVE_VCADD_FP: + { + unsigned long Qd = arm_decode_field_multiple (given, 13, 15, 22, 22); + unsigned long Qm = arm_decode_field_multiple (given, 1, 3, 5, 5); + if ((Qd == Qm) && arm_decode_field (given, 20, 20) == 1) + { + *unpredictable_code = UNPRED_Q_REGS_EQ_AND_SIZE_1; + return TRUE; + } + else + return FALSE; + } + + case MVE_VCMLA_FP: + { + unsigned long Qda; + unsigned long Qm; + unsigned long Qn; + + if (arm_decode_field (given, 20, 20) == 1) + { + Qda = arm_decode_field_multiple (given, 13, 15, 22, 22); + Qm = arm_decode_field_multiple (given, 1, 3, 5, 5); + Qn = arm_decode_field_multiple (given, 17, 19, 7, 7); + + if ((Qda == Qn) || (Qda == Qm)) + { + *unpredictable_code = UNPRED_Q_REGS_EQ_AND_SIZE_1; + return TRUE; + } + else + return FALSE; + } + else + return FALSE; + + } + + case MVE_VCTP: + if (arm_decode_field (given, 16, 19) == 0xd) + { + *unpredictable_code = UNPRED_R13; + return TRUE; + } + else + return FALSE; + + case MVE_VREV64: + { + unsigned long qd = arm_decode_field_multiple (given, 13, 15, 22, 22); + unsigned long qm = arm_decode_field_multiple (given, 1, 3, 6, 6); + + if (qd == qm) + { + *unpredictable_code = UNPRED_Q_REGS_EQUAL; + return TRUE; + } + else + return FALSE; + } + + case MVE_LSLL: + case MVE_LSLLI: + case MVE_LSRL: + case MVE_ASRL: + case MVE_ASRLI: + case MVE_UQSHLL: + case MVE_UQRSHLL: + case MVE_URSHRL: + case MVE_SRSHRL: + case MVE_SQSHLL: + case MVE_SQRSHRL: + { + unsigned long gpr = arm_decode_field (given, 9, 11); + gpr = ((gpr << 1) | 1); + if (gpr == 0xd) + { + *unpredictable_code = UNPRED_R13; + return TRUE; + } + else if (gpr == 0xf) + { + *unpredictable_code = UNPRED_R15; + return TRUE; + } + + return FALSE; + } + default: return FALSE; } @@ -5137,23 +6907,23 @@ print_mve_vmov_index (struct disassemble_info *info, unsigned long given) unsigned long op1 = arm_decode_field (given, 21, 22); unsigned long op2 = arm_decode_field (given, 5, 6); unsigned long h = arm_decode_field (given, 16, 16); - unsigned long index, esize, targetBeat, idx; + unsigned long index_operand, esize, targetBeat, idx; void *stream = info->stream; fprintf_ftype func = info->fprintf_func; if ((op1 & 0x2) == 0x2) { - index = op2; + index_operand = op2; esize = 8; } else if (((op1 & 0x2) == 0x0) && ((op2 & 0x1) == 0x1)) { - index = op2 >> 1; + index_operand = op2 >> 1; esize = 16; } else if (((op1 & 0x2) == 0) && ((op2 & 0x3) == 0)) { - index = 0; + index_operand = 0; esize = 32; } else @@ -5163,7 +6933,7 @@ print_mve_vmov_index (struct disassemble_info *info, unsigned long given) } targetBeat = (op1 & 0x1) | (h << 1); - idx = index + targetBeat * (32/esize); + idx = index_operand + targetBeat * (32/esize); func (stream, "%lu", idx); } @@ -5336,6 +7106,10 @@ print_mve_undefined (struct disassemble_info *info, switch (undefined_code) { + case UNDEF_SIZE: + func (stream, "illegal size"); + break; + case UNDEF_SIZE_0: func (stream, "size equals zero"); break; @@ -5352,6 +7126,10 @@ print_mve_undefined (struct disassemble_info *info, func (stream, "size <= 1"); break; + case UNDEF_SIZE_NOT_0: + func (stream, "size not equal to 0"); + break; + case UNDEF_SIZE_NOT_2: func (stream, "size not equal to 2"); break; @@ -5392,6 +7170,10 @@ print_mve_undefined (struct disassemble_info *info, func (stream, "op field equal 0 and bad cmode"); break; + case UNDEF_XCHG_UNS: + func (stream, "exchange and unsigned together"); + break; + case UNDEF_NONE: break; } @@ -5729,6 +7511,49 @@ print_mve_vcvt_size (struct disassemble_info *info, } } +static void +print_mve_rotate (struct disassemble_info *info, unsigned long rot, + unsigned long rot_width) +{ + void *stream = info->stream; + fprintf_ftype func = info->fprintf_func; + + if (rot_width == 1) + { + switch (rot) + { + case 0: + func (stream, "90"); + break; + case 1: + func (stream, "270"); + break; + default: + break; + } + } + else if (rot_width == 2) + { + switch (rot) + { + case 0: + func (stream, "0"); + break; + case 1: + func (stream, "90"); + break; + case 2: + func (stream, "180"); + break; + case 3: + func (stream, "270"); + break; + default: + break; + } + } +} + static void print_instruction_predicate (struct disassemble_info *info) { @@ -5751,16 +7576,33 @@ print_mve_size (struct disassemble_info *info, switch (matched_insn) { + case MVE_VABAV: + case MVE_VABD_VEC: + case MVE_VABS_FP: + case MVE_VABS_VEC: + case MVE_VADD_VEC_T1: + case MVE_VADD_VEC_T2: + case MVE_VADDV: + case MVE_VBRSR: + case MVE_VCADD_VEC: + case MVE_VCLS: + case MVE_VCLZ: case MVE_VCMP_VEC_T1: case MVE_VCMP_VEC_T2: case MVE_VCMP_VEC_T3: case MVE_VCMP_VEC_T4: case MVE_VCMP_VEC_T5: case MVE_VCMP_VEC_T6: + case MVE_VCTP: + case MVE_VDDUP: + case MVE_VDWDUP: case MVE_VHADD_T1: case MVE_VHADD_T2: + case MVE_VHCADD: case MVE_VHSUB_T1: case MVE_VHSUB_T2: + case MVE_VIDUP: + case MVE_VIWDUP: case MVE_VLD2: case MVE_VLD4: case MVE_VLDRB_GATHER_T1: @@ -5769,14 +7611,60 @@ print_mve_size (struct disassemble_info *info, case MVE_VLDRD_GATHER_T4: case MVE_VLDRB_T1: case MVE_VLDRH_T2: + case MVE_VMAX: + case MVE_VMAXA: + case MVE_VMAXV: + case MVE_VMAXAV: + case MVE_VMIN: + case MVE_VMINA: + case MVE_VMINV: + case MVE_VMINAV: + case MVE_VMLA: + case MVE_VMLAS: + case MVE_VMUL_VEC_T1: + case MVE_VMUL_VEC_T2: + case MVE_VMULH: + case MVE_VRMULH: + case MVE_VMULL_INT: + case MVE_VNEG_FP: + case MVE_VNEG_VEC: case MVE_VPT_VEC_T1: case MVE_VPT_VEC_T2: case MVE_VPT_VEC_T3: case MVE_VPT_VEC_T4: case MVE_VPT_VEC_T5: case MVE_VPT_VEC_T6: + case MVE_VQABS: + case MVE_VQADD_T1: + case MVE_VQADD_T2: + case MVE_VQDMLADH: + case MVE_VQRDMLADH: + case MVE_VQDMLAH: + case MVE_VQRDMLAH: + case MVE_VQDMLASH: + case MVE_VQRDMLASH: + case MVE_VQDMLSDH: + case MVE_VQRDMLSDH: + case MVE_VQDMULH_T1: + case MVE_VQRDMULH_T2: + case MVE_VQDMULH_T3: + case MVE_VQRDMULH_T4: + case MVE_VQNEG: + case MVE_VQRSHL_T1: + case MVE_VQRSHL_T2: + case MVE_VQSHL_T1: + case MVE_VQSHL_T4: + case MVE_VQSUB_T1: + case MVE_VQSUB_T2: + case MVE_VREV32: + case MVE_VREV64: case MVE_VRHADD: case MVE_VRINT_FP: + case MVE_VRSHL_T1: + case MVE_VRSHL_T2: + case MVE_VSHL_T2: + case MVE_VSHL_T3: + case MVE_VSHLL_T2: case MVE_VST2: case MVE_VST4: case MVE_VSTRB_SCATTER_T1: @@ -5784,18 +7672,35 @@ print_mve_size (struct disassemble_info *info, case MVE_VSTRW_SCATTER_T3: case MVE_VSTRB_T1: case MVE_VSTRH_T2: + case MVE_VSUB_VEC_T1: + case MVE_VSUB_VEC_T2: if (size <= 3) func (stream, "%s", mve_vec_sizename[size]); else func (stream, ""); break; + case MVE_VABD_FP: + case MVE_VADD_FP_T1: + case MVE_VADD_FP_T2: + case MVE_VSUB_FP_T1: + case MVE_VSUB_FP_T2: case MVE_VCMP_FP_T1: case MVE_VCMP_FP_T2: case MVE_VFMA_FP_SCALAR: case MVE_VFMA_FP: case MVE_VFMS_FP: case MVE_VFMAS_FP_SCALAR: + case MVE_VMAXNM_FP: + case MVE_VMAXNMA_FP: + case MVE_VMAXNMV_FP: + case MVE_VMAXNMAV_FP: + case MVE_VMINNM_FP: + case MVE_VMINNMA_FP: + case MVE_VMINNMV_FP: + case MVE_VMINNMAV_FP: + case MVE_VMUL_FP_T1: + case MVE_VMUL_FP_T2: case MVE_VPT_FP_T1: case MVE_VPT_FP_T2: if (size == 0) @@ -5804,6 +7709,31 @@ print_mve_size (struct disassemble_info *info, func (stream, "16"); break; + case MVE_VCADD_FP: + case MVE_VCMLA_FP: + case MVE_VCMUL_FP: + case MVE_VMLADAV_T1: + case MVE_VMLALDAV: + case MVE_VMLSDAV_T1: + case MVE_VMLSLDAV: + case MVE_VMOVN: + case MVE_VQDMULL_T1: + case MVE_VQDMULL_T2: + case MVE_VQMOVN: + case MVE_VQMOVUN: + if (size == 0) + func (stream, "16"); + else if (size == 1) + func (stream, "32"); + break; + + case MVE_VMOVL: + if (size == 1) + func (stream, "8"); + else if (size == 2) + func (stream, "16"); + break; + case MVE_VDUP: switch (size) { @@ -5868,6 +7798,13 @@ print_mve_size (struct disassemble_info *info, } break; + case MVE_VMULL_POLY: + if (size == 0) + func (stream, "p8"); + else if (size == 1) + func (stream, "p16"); + break; + case MVE_VMVN_IMM: switch (size) { @@ -5903,11 +7840,95 @@ print_mve_size (struct disassemble_info *info, } break; + case MVE_VQSHRN: + case MVE_VQSHRUN: + case MVE_VQRSHRN: + case MVE_VQRSHRUN: + case MVE_VRSHRN: + case MVE_VSHRN: + { + switch (size) + { + case 1: + func (stream, "16"); + break; + + case 2: case 3: + func (stream, "32"); + break; + + default: + break; + } + } + break; + + case MVE_VQSHL_T2: + case MVE_VQSHLU_T3: + case MVE_VRSHR: + case MVE_VSHL_T1: + case MVE_VSHLL_T1: + case MVE_VSHR: + case MVE_VSLI: + case MVE_VSRI: + { + switch (size) + { + case 1: + func (stream, "8"); + break; + + case 2: case 3: + func (stream, "16"); + break; + + case 4: case 5: case 6: case 7: + func (stream, "32"); + break; + + default: + break; + } + } + break; + default: break; } } +static void +print_mve_shift_n (struct disassemble_info *info, long given, + enum mve_instructions matched_insn) +{ + void *stream = info->stream; + fprintf_ftype func = info->fprintf_func; + + int startAt0 + = matched_insn == MVE_VQSHL_T2 + || matched_insn == MVE_VQSHLU_T3 + || matched_insn == MVE_VSHL_T1 + || matched_insn == MVE_VSHLL_T1 + || matched_insn == MVE_VSLI; + + unsigned imm6 = (given & 0x3f0000) >> 16; + + if (matched_insn == MVE_VSHLL_T1) + imm6 &= 0x1f; + + unsigned shiftAmount = 0; + if ((imm6 & 0x20) != 0) + shiftAmount = startAt0 ? imm6 - 32 : 64 - imm6; + else if ((imm6 & 0x10) != 0) + shiftAmount = startAt0 ? imm6 - 16 : 32 - imm6; + else if ((imm6 & 0x08) != 0) + shiftAmount = startAt0 ? imm6 - 8 : 16 - imm6; + else + print_mve_undefined (info, UNDEF_SIZE_0); + + func (stream, "%u", shiftAmount); +} + static void print_vec_condition (struct disassemble_info *info, long given, enum mve_instructions matched_insn) @@ -5993,10 +8014,11 @@ print_vec_condition (struct disassemble_info *info, long given, recognised coprocessor instruction. */ static bfd_boolean -print_insn_coprocessor (bfd_vma pc, - struct disassemble_info *info, - long given, - bfd_boolean thumb) +print_insn_coprocessor_1 (const struct sopcode32 *opcodes, + bfd_vma pc, + struct disassemble_info *info, + long given, + bfd_boolean thumb) { const struct sopcode32 *insn; void *stream = info->stream; @@ -6012,7 +8034,7 @@ print_insn_coprocessor (bfd_vma pc, allowed_arches = private_data->features; - for (insn = coprocessor_opcodes; insn->assembler; insn++) + for (insn = opcodes; insn->assembler; insn++) { unsigned long u_reg = 16; bfd_boolean is_unpredictable = FALSE; @@ -6256,6 +8278,8 @@ print_insn_coprocessor (bfd_vma pc, if (cond != COND_UNCOND && cp_num == 9) is_unpredictable = TRUE; + /* Fall through. */ + case 'b': func (stream, "%s", arm_conditional[cond]); break; @@ -6728,6 +8752,26 @@ print_insn_coprocessor (bfd_vma pc, return FALSE; } +static bfd_boolean +print_insn_coprocessor (bfd_vma pc, + struct disassemble_info *info, + long given, + bfd_boolean thumb) +{ + return print_insn_coprocessor_1 (coprocessor_opcodes, + pc, info, given, thumb); +} + +static bfd_boolean +print_insn_generic_coprocessor (bfd_vma pc, + struct disassemble_info *info, + long given, + bfd_boolean thumb) +{ + return print_insn_coprocessor_1 (generic_coprocessor_opcodes, + pc, info, given, thumb); +} + /* Decodes and prints ARM addressing modes. Returns the offset used in the address, if any, if it is worthwhile printing the offset as a hexadecimal value in a comment at the end of the @@ -6823,6 +8867,140 @@ print_arm_address (bfd_vma pc, struct disassemble_info *info, long given) return (signed long) offset; } + +/* Print one cde instruction on INFO->STREAM. + Return TRUE if the instuction matched, FALSE if this is not a + recognised cde instruction. */ +static bfd_boolean +print_insn_cde (struct disassemble_info *info, long given, bfd_boolean thumb) +{ + const struct cdeopcode32 *insn; + void *stream = info->stream; + fprintf_ftype func = info->fprintf_func; + + if (thumb) + { + /* Manually extract the coprocessor code from a known point. + This position is the same across all CDE instructions. */ + for (insn = cde_opcodes; insn->assembler; insn++) + { + uint16_t coproc = (given >> insn->coproc_shift) & insn->coproc_mask; + uint16_t coproc_mask = 1 << coproc; + if (! (coproc_mask & cde_coprocs)) + continue; + + if ((given & insn->mask) == insn->value) + { + bfd_boolean is_unpredictable = FALSE; + const char *c; + + for (c = insn->assembler; *c; c++) + { + if (*c == '%') + { + switch (*++c) + { + case '%': + func (stream, "%%"); + break; + + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + { + int width; + unsigned long value; + + c = arm_decode_bitfield (c, given, &value, &width); + + switch (*c) + { + case 'S': + if (value > 10) + is_unpredictable = TRUE; + /* Fall through. */ + case 'R': + if (value == 13) + is_unpredictable = TRUE; + /* Fall through. */ + case 'r': + func (stream, "%s", arm_regnames[value]); + break; + + case 'n': + if (value == 15) + func (stream, "%s", "APSR_nzcv"); + else + func (stream, "%s", arm_regnames[value]); + break; + + case 'T': + func (stream, "%s", arm_regnames[value + 1]); + break; + + case 'd': + func (stream, "%ld", value); + break; + + case 'V': + if (given & (1 << 6)) + func (stream, "q%ld", value >> 1); + else if (given & (1 << 24)) + func (stream, "d%ld", value); + else + { + /* Encoding for S register is different than for D and + Q registers. S registers are encoded using the top + single bit in position 22 as the lowest bit of the + register number, while for Q and D it represents the + highest bit of the register number. */ + uint8_t top_bit = (value >> 4) & 1; + uint8_t tmp = (value << 1) & 0x1e; + uint8_t res = tmp | top_bit; + func (stream, "s%u", res); + } + break; + + default: + abort (); + } + } + break; + + case 'p': + { + uint8_t proc_number = (given >> 8) & 0x7; + func (stream, "p%u", proc_number); + break; + } + + case 'a': + { + uint8_t a_offset = 28; + if (given & (1 << a_offset)) + func (stream, "a"); + break; + } + default: + abort (); + } + } + else + func (stream, "%c", *c); + } + + if (is_unpredictable) + func (stream, UNPREDICTABLE_INSTRUCTION); + + return TRUE; + } + } + return FALSE; + } + else + return FALSE; +} + + /* Print one neon instruction on INFO->STREAM. Return TRUE if the instuction matched, FALSE if this is not a recognised neon instruction. */ @@ -6849,14 +9027,56 @@ print_insn_neon (struct disassemble_info *info, long given, bfd_boolean thumb) } else if ((given & 0xff000000) == 0xf9000000) given ^= 0xf9000000 ^ 0xf4000000; + /* BFloat16 neon instructions without special top byte handling. */ + else if ((given & 0xff000000) == 0xfe000000 + || (given & 0xff000000) == 0xfc000000) + ; /* vdup is also a valid neon instruction. */ - else if ((given & 0xff910f5f) != 0xee800b10) + else if ((given & 0xff900f5f) != 0xee800b10) return FALSE; } for (insn = neon_opcodes; insn->assembler; insn++) { - if ((given & insn->mask) == insn->value) + unsigned long cond_mask = insn->mask; + unsigned long cond_value = insn->value; + int cond; + + if (thumb) + { + if ((cond_mask & 0xf0000000) == 0) { + /* For the entries in neon_opcodes, an opcode mask/value with + the high 4 bits equal to 0 indicates a conditional + instruction. For thumb however, we need to include those + bits in the instruction matching. */ + cond_mask |= 0xf0000000; + /* Furthermore, the thumb encoding of a conditional instruction + will have the high 4 bits equal to 0xe. */ + cond_value |= 0xe0000000; + } + if (ifthen_state) + cond = IFTHEN_COND; + else + cond = COND_UNCOND; + } + else + { + if ((given & 0xf0000000) == 0xf0000000) + { + /* If the instruction is unconditional, update the mask to only + match against unconditional opcode values. */ + cond_mask |= 0xf0000000; + cond = COND_UNCOND; + } + else + { + cond = (given >> 28) & 0xf; + if (cond == 0xe) + cond = COND_UNCOND; + } + } + + if ((given & cond_mask) == cond_value) { signed long value_in_comment = 0; bfd_boolean is_unpredictable = FALSE; @@ -6878,8 +9098,7 @@ print_insn_neon (struct disassemble_info *info, long given, bfd_boolean thumb) /* Fall through. */ case 'c': - if (thumb && ifthen_state) - func (stream, "%s", arm_conditional[IFTHEN_COND]); + func (stream, "%s", arm_conditional[cond]); break; case 'A': @@ -7336,6 +9555,13 @@ print_insn_mve (struct disassemble_info *info, long given) if (is_mve_undefined (given, insn->mve_op, &undefined_cond)) is_undefined = TRUE; + /* In "VORR Qd, Qm, Qn", if Qm==Qn, VORR is nothing but VMOV, + i.e "VMOV Qd, Qm". */ + if ((insn->mve_op == MVE_VORR_REG) + && (arm_decode_field (given, 1, 3) + == arm_decode_field (given, 17, 19))) + continue; + for (c = insn->assembler; *c; c++) { if (*c == '%') @@ -7368,6 +9594,20 @@ print_insn_mve (struct disassemble_info *info, long given) } break; + case 'j': + { + unsigned int imm5 = 0; + imm5 |= arm_decode_field (given, 6, 7); + imm5 |= (arm_decode_field (given, 12, 14) << 2); + func (stream, "#%u", (imm5 == 0) ? 32 : imm5); + } + break; + + case 'k': + func (stream, "#%u", + (arm_decode_field (given, 7, 7) == 0) ? 64 : 48); + break; + case 'n': print_vec_condition (info, given, insn->mve_op); break; @@ -7443,6 +9683,18 @@ print_insn_mve (struct disassemble_info *info, long given) print_mve_vmov_index (info, given); break; + case 'T': + if (arm_decode_field (given, 12, 12) == 0) + func (stream, "b"); + else + func (stream, "t"); + break; + + case 'X': + if (arm_decode_field (given, 12, 12) == 1) + func (stream, "x"); + break; + case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { @@ -7461,11 +9713,42 @@ print_insn_mve (struct disassemble_info *info, long given) else func (stream, "%s", arm_regnames[value]); break; + + case 'c': + func (stream, "%s", arm_conditional[value]); + break; + + case 'C': + value ^= 1; + func (stream, "%s", arm_conditional[value]); + break; + + case 'S': + if (value == 13 || value == 15) + is_unpredictable = TRUE; + else + func (stream, "%s", arm_regnames[value]); + break; + case 's': print_mve_size (info, value, insn->mve_op); break; + case 'I': + if (value == 1) + func (stream, "i"); + break; + case 'A': + if (value == 1) + func (stream, "a"); + break; + case 'h': + { + unsigned int odd_reg = (value << 1) | 1; + func (stream, "%s", arm_regnames[odd_reg]); + } + break; case 'i': { unsigned long imm @@ -7493,12 +9776,74 @@ print_insn_mve (struct disassemble_info *info, long given) case 'k': func (stream, "%lu", 64 - value); break; + case 'l': + { + unsigned int even_reg = value << 1; + func (stream, "%s", arm_regnames[even_reg]); + } + break; + case 'u': + switch (value) + { + case 0: + func (stream, "1"); + break; + case 1: + func (stream, "2"); + break; + case 2: + func (stream, "4"); + break; + case 3: + func (stream, "8"); + break; + default: + break; + } + break; + case 'o': + print_mve_rotate (info, value, width); + break; case 'r': func (stream, "%s", arm_regnames[value]); break; case 'd': - func (stream, "%ld", value); - value_in_comment = value; + if (insn->mve_op == MVE_VQSHL_T2 + || insn->mve_op == MVE_VQSHLU_T3 + || insn->mve_op == MVE_VRSHR + || insn->mve_op == MVE_VRSHRN + || insn->mve_op == MVE_VSHL_T1 + || insn->mve_op == MVE_VSHLL_T1 + || insn->mve_op == MVE_VSHR + || insn->mve_op == MVE_VSHRN + || insn->mve_op == MVE_VSLI + || insn->mve_op == MVE_VSRI) + print_mve_shift_n (info, given, insn->mve_op); + else if (insn->mve_op == MVE_VSHLL_T2) + { + switch (value) + { + case 0x00: + func (stream, "8"); + break; + case 0x01: + func (stream, "16"); + break; + case 0x10: + print_mve_undefined (info, UNDEF_SIZE_0); + break; + default: + assert (0); + break; + } + } + else + { + if (insn->mve_op == MVE_VSHLC && value == 0) + value = 32; + func (stream, "%ld", value); + value_in_comment = value; + } break; case 'F': func (stream, "s%ld", value); @@ -7632,6 +9977,9 @@ print_insn_arm (bfd_vma pc, struct disassemble_info *info, long given) if (print_insn_neon (info, given, FALSE)) return; + if (print_insn_generic_coprocessor (pc, info, given, FALSE)) + return; + for (insn = arm_opcodes; insn->assembler; insn++) { if ((given & insn->mask) != insn->value) @@ -7790,7 +10138,13 @@ print_insn_arm (bfd_vma pc, struct disassemble_info *info, long given) case 'b': { bfd_vma disp = (((given & 0xffffff) ^ 0x800000) - 0x800000); - info->print_address_func (disp * 4 + pc + 8, info); + bfd_vma target = disp * 4 + pc + 8; + info->print_address_func (target, info); + + /* Fill in instruction information. */ + info->insn_info_valid = 1; + info->insn_type = dis_branch; + info->target = target; } break; @@ -7831,12 +10185,12 @@ print_insn_arm (bfd_vma pc, struct disassemble_info *info, long given) unsigned int immed = (given & 0xff); unsigned int a, i; - a = (((immed << (32 - rotate)) - | (immed >> rotate)) & 0xffffffff); + a = (immed << ((32 - rotate) & 31) + | immed >> rotate) & 0xffffffff; /* If there is another encoding with smaller rotate, the rotate should be specified directly. */ for (i = 0; i < 32; i += 2) - if ((a << i | a >> (32 - i)) <= 0xff) + if ((a << i | a >> ((32 - i) & 31)) <= 0xff) break; if (i != rotate) @@ -7928,6 +10282,11 @@ print_insn_arm (bfd_vma pc, struct disassemble_info *info, long given) address += 2; info->print_address_func (address, info); + + /* Fill in instruction information. */ + info->insn_info_valid = 1; + info->insn_type = dis_branch; + info->target = address; } break; @@ -7999,7 +10358,7 @@ print_insn_arm (bfd_vma pc, struct disassemble_info *info, long given) case 'T': /* We want register + 1 when decoding T. */ if (*c == 'T') - ++value; + value = (value + 1) & 0xf; if (c[1] == 'u') { @@ -8292,6 +10651,11 @@ print_insn_thumb16 (bfd_vma pc, struct disassemble_info *info, long given) + ((given & 0x00f8) >> 2) + ((given & 0x0200) >> 3)); info->print_address_func (address, info); + + /* Fill in instruction information. */ + info->insn_info_valid = 1; + info->insn_type = dis_branch; + info->target = address; } break; @@ -8365,8 +10729,14 @@ print_insn_thumb16 (bfd_vma pc, struct disassemble_info *info, long given) case 'B': reg = ((reg ^ (1 << bitend)) - (1 << bitend)); - info->print_address_func (reg * 2 + pc + 4, info); + bfd_vma target = reg * 2 + pc + 4; + info->print_address_func (target, info); value_in_comment = 0; + + /* Fill in instruction information. */ + info->insn_info_valid = 1; + info->insn_type = dis_branch; + info->target = target; break; case 'c': @@ -8469,6 +10839,12 @@ print_insn_thumb32 (bfd_vma pc, struct disassemble_info *info, long given) if (is_mve && print_insn_mve (info, given)) return; + if (print_insn_cde (info, given, TRUE)) + return; + + if (print_insn_generic_coprocessor (pc, info, given, TRUE)) + return; + for (insn = thumb32_opcodes; insn->assembler; insn++) if ((given & insn->mask) == insn->value) { @@ -8920,7 +11296,13 @@ print_insn_thumb32 (bfd_vma pc, struct disassemble_info *info, long given) offset |= (given & 0x000007ff) << 1; offset -= (1 << 20); - info->print_address_func (pc + 4 + offset, info); + bfd_vma target = pc + 4 + offset; + info->print_address_func (target, info); + + /* Fill in instruction information. */ + info->insn_info_valid = 1; + info->insn_type = dis_branch; + info->target = target; } break; @@ -8944,6 +11326,11 @@ print_insn_thumb32 (bfd_vma pc, struct disassemble_info *info, long given) offset &= ~2u; info->print_address_func (offset, info); + + /* Fill in instruction information. */ + info->insn_info_valid = 1; + info->insn_type = dis_branch; + info->target = offset; } break; @@ -9054,6 +11441,13 @@ print_insn_thumb32 (bfd_vma pc, struct disassemble_info *info, long given) switch (*c) { + case 's': + if (val <= 3) + func (stream, "%s", mve_vec_sizename[val]); + else + func (stream, ""); + break; + case 'd': func (stream, "%lu", val); value_in_comment = val; @@ -9217,6 +11611,36 @@ parse_arm_disassembler_options (const char *options) force_thumb = 1; else if (CONST_STRNEQ (opt, "no-force-thumb")) force_thumb = 0; + else if (CONST_STRNEQ (opt, "coproc")) + { + const char *procptr = opt + sizeof ("coproc") - 1; + char *endptr; + uint8_t coproc_number = strtol (procptr, &endptr, 10); + if (endptr != procptr + 1 || coproc_number > 7) + { + opcodes_error_handler (_("cde coprocessor not between 0-7: %s"), + opt); + continue; + } + if (*endptr != '=') + { + opcodes_error_handler (_("coproc must have an argument: %s"), + opt); + continue; + } + endptr += 1; + if (CONST_STRNEQ (endptr, "generic")) + cde_coprocs &= ~(1 << coproc_number); + else if (CONST_STRNEQ (endptr, "cde") + || CONST_STRNEQ (endptr, "CDE")) + cde_coprocs |= (1 << coproc_number); + else + { + opcodes_error_handler ( + _("coprocN argument takes options \"generic\"," + " \"cde\", or \"CDE\": %s"), opt); + } + } else /* xgettext: c-format */ opcodes_error_handler (_("unrecognised disassembler option: %s"), opt); @@ -9563,11 +11987,11 @@ select_arm_features (unsigned long mach, case bfd_mach_arm_7EM: ARM_SET_FEATURES (ARM_ARCH_V7EM); break; case bfd_mach_arm_8: { - /* Add bits for extensions that Armv8.5-A recognizes. */ - arm_feature_set armv8_5_ext_fset + /* Add bits for extensions that Armv8.6-A recognizes. */ + arm_feature_set armv8_6_ext_fset = ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST); - ARM_SET_FEATURES (ARM_ARCH_V8_5A); - ARM_MERGE_FEATURE_SETS (arch_fset, arch_fset, armv8_5_ext_fset); + ARM_SET_FEATURES (ARM_ARCH_V8_6A); + ARM_MERGE_FEATURE_SETS (arch_fset, arch_fset, armv8_6_ext_fset); break; } case bfd_mach_arm_8R: ARM_SET_FEATURES (ARM_ARCH_V8R); break; @@ -9575,11 +11999,18 @@ select_arm_features (unsigned long mach, case bfd_mach_arm_8M_MAIN: ARM_SET_FEATURES (ARM_ARCH_V8M_MAIN); break; case bfd_mach_arm_8_1M_MAIN: ARM_SET_FEATURES (ARM_ARCH_V8_1M_MAIN); + arm_feature_set mve_all + = ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE | ARM_EXT2_MVE_FP); + ARM_MERGE_FEATURE_SETS (arch_fset, arch_fset, mve_all); force_thumb = 1; break; /* If the machine type is unknown allow all architecture types and all - extensions. */ - case bfd_mach_arm_unknown: ARM_SET_FEATURES (ARM_FEATURE_ALL); break; + extensions, with the exception of MVE as that clashes with NEON. */ + case bfd_mach_arm_unknown: + ARM_SET_FEATURES (ARM_FEATURE (-1, + -1 & ~(ARM_EXT2_MVE | ARM_EXT2_MVE_FP), + -1)); + break; default: abort (); } @@ -9599,7 +12030,7 @@ static int print_insn (bfd_vma pc, struct disassemble_info *info, bfd_boolean little) { unsigned char b[4]; - long given; + unsigned long given; int status; int is_thumb = FALSE; int is_data = FALSE; @@ -9609,6 +12040,14 @@ print_insn (bfd_vma pc, struct disassemble_info *info, bfd_boolean little) bfd_boolean found = FALSE; struct arm_private_data *private_data; + /* Clear instruction information field. */ + info->insn_info_valid = 0; + info->branch_delay_insns = 0; + info->data_size = 0; + info->insn_type = dis_noninsn; + info->target = 0; + info->target2 = 0; + if (info->disassembler_options) { parse_arm_disassembler_options (info->disassembler_options); @@ -9779,9 +12218,9 @@ print_insn (bfd_vma pc, struct disassemble_info *info, bfd_boolean little) status = info->read_memory_func (pc, (bfd_byte *) b, 4, info); if (little_code) - given = (b[0]) | (b[1] << 8) | (b[2] << 16) | (b[3] << 24); + given = (b[0]) | (b[1] << 8) | (b[2] << 16) | ((unsigned) b[3] << 24); else - given = (b[3]) | (b[2] << 8) | (b[1] << 16) | (b[0] << 24); + given = (b[3]) | (b[2] << 8) | (b[1] << 16) | ((unsigned) b[0] << 24); } else {