X-Git-Url: http://drtracing.org/?a=blobdiff_plain;f=opcodes%2Faarch64-opc.c;h=d22e419e421e7f2342ebeb62c9506a024e38248f;hb=c0890d26289c4dad0e2ddedb7822a32a0645d150;hp=56a01693dc9d094e0f384735efd1a99e424eff75;hpb=f11ad6bc0fc44b94c6970115bb6984b497b967e7;p=deliverable%2Fbinutils-gdb.git diff --git a/opcodes/aarch64-opc.c b/opcodes/aarch64-opc.c index 56a01693dc..d22e419e42 100644 --- a/opcodes/aarch64-opc.c +++ b/opcodes/aarch64-opc.c @@ -27,6 +27,7 @@ #include #include "opintl.h" +#include "libiberty.h" #include "aarch64-opc.h" @@ -34,6 +35,70 @@ int debug_dump = FALSE; #endif /* DEBUG_AARCH64 */ +/* The enumeration strings associated with each value of a 5-bit SVE + pattern operand. A null entry indicates a reserved meaning. */ +const char *const aarch64_sve_pattern_array[32] = { + /* 0-7. */ + "pow2", + "vl1", + "vl2", + "vl3", + "vl4", + "vl5", + "vl6", + "vl7", + /* 8-15. */ + "vl8", + "vl16", + "vl32", + "vl64", + "vl128", + "vl256", + 0, + 0, + /* 16-23. */ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + /* 24-31. */ + 0, + 0, + 0, + 0, + 0, + "mul4", + "mul3", + "all" +}; + +/* The enumeration strings associated with each value of a 4-bit SVE + prefetch operand. A null entry indicates a reserved meaning. */ +const char *const aarch64_sve_prfop_array[16] = { + /* 0-7. */ + "pldl1keep", + "pldl1strm", + "pldl2keep", + "pldl2strm", + "pldl3keep", + "pldl3strm", + 0, + 0, + /* 8-15. */ + "pstl1keep", + "pstl1strm", + "pstl2keep", + "pstl2strm", + "pstl3keep", + "pstl3strm", + 0, + 0 +}; + /* Helper functions to determine which operand to be used to encode/decode the size:Q fields for AdvSIMD instructions. */ @@ -199,6 +264,10 @@ const aarch64_field fields[] = { 31, 1 }, /* b5: in the test bit and branch instructions. */ { 19, 5 }, /* b40: in the test bit and branch instructions. */ { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */ + { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */ + { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */ + { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */ + { 17, 1 }, /* SVE_N: SVE equivalent of N. */ { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */ { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */ { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */ @@ -207,6 +276,11 @@ const aarch64_field fields[] = { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */ { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */ { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */ + { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */ + { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */ + { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */ + { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */ + { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */ { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */ { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */ { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */ @@ -214,7 +288,27 @@ const aarch64_field fields[] = { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */ { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */ { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */ + { 5, 1 }, /* SVE_i1: single-bit immediate. */ + { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */ + { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */ + { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */ + { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */ + { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */ + { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */ + { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */ + { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */ + { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */ + { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */ + { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */ + { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */ + { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */ + { 22, 1 }, /* SVE_sz: 1-bit element size select. */ + { 16, 4 }, /* SVE_tsz: triangular size select. */ { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */ + { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */ + { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */ + { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */ + { 22, 1 } /* SVE_xs_22: UXTW/SXTW select (bit 22). */ }; enum aarch64_operand_class @@ -292,6 +386,8 @@ const struct aarch64_name_value_pair aarch64_operand_modifiers [] = {"sxth", 0x5}, {"sxtw", 0x6}, {"sxtx", 0x7}, + {"mul", 0x0}, + {"mul vl", 0x0}, {NULL, 0}, }; @@ -413,10 +509,11 @@ value_in_range_p (int64_t value, int low, int high) return (value >= low && value <= high) ? 1 : 0; } +/* Return true if VALUE is a multiple of ALIGN. */ static inline int value_aligned_p (int64_t value, int align) { - return ((value & (align - 1)) == 0) ? 1 : 0; + return (value % align) == 0; } /* A signed value fits in a field. */ @@ -603,6 +700,9 @@ struct operand_qualifier_data aarch64_opnd_qualifiers[] = {8, 2, 0x7, "2d", OQK_OPD_VARIANT}, {16, 1, 0x8, "1q", OQK_OPD_VARIANT}, + {0, 0, 0, "z", OQK_OPD_VARIANT}, + {0, 0, 0, "m", OQK_OPD_VARIANT}, + /* Qualifiers constraining the value range. First 3 fields: Lower bound, higher bound, unused. */ @@ -1233,6 +1333,18 @@ set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail, _("shift amount")); } +/* Report that the MUL modifier in operand IDX should be in the range + [LOWER_BOUND, UPPER_BOUND]. */ +static inline void +set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail, + int idx, int lower_bound, int upper_bound) +{ + if (mismatch_detail == NULL) + return; + set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound, + _("multiplier")); +} + static inline void set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx, int alignment) @@ -1284,9 +1396,10 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx, const aarch64_opcode *opcode, aarch64_operand_error *mismatch_detail) { - unsigned num; + unsigned num, modifiers, shift; unsigned char size; - int64_t imm; + int64_t imm, min_value, max_value; + uint64_t uvalue, mask; const aarch64_opnd_info *opnd = opnds + idx; aarch64_opnd_qualifier_t qualifier = opnd->qualifier; @@ -1578,6 +1691,156 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx, } break; + case AARCH64_OPND_SVE_ADDR_RI_S4xVL: + case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL: + case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL: + case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL: + min_value = -8; + max_value = 7; + sve_imm_offset_vl: + assert (!opnd->addr.offset.is_reg); + assert (opnd->addr.preind); + num = 1 + get_operand_specific_data (&aarch64_operands[type]); + min_value *= num; + max_value *= num; + if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present) + || (opnd->shifter.operator_present + && opnd->shifter.kind != AARCH64_MOD_MUL_VL)) + { + set_other_error (mismatch_detail, idx, + _("invalid addressing mode")); + return 0; + } + if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value)) + { + set_offset_out_of_range_error (mismatch_detail, idx, + min_value, max_value); + return 0; + } + if (!value_aligned_p (opnd->addr.offset.imm, num)) + { + set_unaligned_error (mismatch_detail, idx, num); + return 0; + } + break; + + case AARCH64_OPND_SVE_ADDR_RI_S6xVL: + min_value = -32; + max_value = 31; + goto sve_imm_offset_vl; + + case AARCH64_OPND_SVE_ADDR_RI_S9xVL: + min_value = -256; + max_value = 255; + goto sve_imm_offset_vl; + + case AARCH64_OPND_SVE_ADDR_RI_U6: + case AARCH64_OPND_SVE_ADDR_RI_U6x2: + case AARCH64_OPND_SVE_ADDR_RI_U6x4: + case AARCH64_OPND_SVE_ADDR_RI_U6x8: + min_value = 0; + max_value = 63; + sve_imm_offset: + assert (!opnd->addr.offset.is_reg); + assert (opnd->addr.preind); + num = 1 << get_operand_specific_data (&aarch64_operands[type]); + min_value *= num; + max_value *= num; + if (opnd->shifter.operator_present + || opnd->shifter.amount_present) + { + set_other_error (mismatch_detail, idx, + _("invalid addressing mode")); + return 0; + } + if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value)) + { + set_offset_out_of_range_error (mismatch_detail, idx, + min_value, max_value); + return 0; + } + if (!value_aligned_p (opnd->addr.offset.imm, num)) + { + set_unaligned_error (mismatch_detail, idx, num); + return 0; + } + break; + + case AARCH64_OPND_SVE_ADDR_RR: + case AARCH64_OPND_SVE_ADDR_RR_LSL1: + case AARCH64_OPND_SVE_ADDR_RR_LSL2: + case AARCH64_OPND_SVE_ADDR_RR_LSL3: + case AARCH64_OPND_SVE_ADDR_RX: + case AARCH64_OPND_SVE_ADDR_RX_LSL1: + case AARCH64_OPND_SVE_ADDR_RX_LSL2: + case AARCH64_OPND_SVE_ADDR_RX_LSL3: + case AARCH64_OPND_SVE_ADDR_RZ: + case AARCH64_OPND_SVE_ADDR_RZ_LSL1: + case AARCH64_OPND_SVE_ADDR_RZ_LSL2: + case AARCH64_OPND_SVE_ADDR_RZ_LSL3: + modifiers = 1 << AARCH64_MOD_LSL; + sve_rr_operand: + assert (opnd->addr.offset.is_reg); + assert (opnd->addr.preind); + if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0 + && opnd->addr.offset.regno == 31) + { + set_other_error (mismatch_detail, idx, + _("index register xzr is not allowed")); + return 0; + } + if (((1 << opnd->shifter.kind) & modifiers) == 0 + || (opnd->shifter.amount + != get_operand_specific_data (&aarch64_operands[type]))) + { + set_other_error (mismatch_detail, idx, + _("invalid addressing mode")); + return 0; + } + break; + + case AARCH64_OPND_SVE_ADDR_RZ_XTW_14: + case AARCH64_OPND_SVE_ADDR_RZ_XTW_22: + case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14: + case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22: + case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14: + case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22: + case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14: + case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22: + modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW); + goto sve_rr_operand; + + case AARCH64_OPND_SVE_ADDR_ZI_U5: + case AARCH64_OPND_SVE_ADDR_ZI_U5x2: + case AARCH64_OPND_SVE_ADDR_ZI_U5x4: + case AARCH64_OPND_SVE_ADDR_ZI_U5x8: + min_value = 0; + max_value = 31; + goto sve_imm_offset; + + case AARCH64_OPND_SVE_ADDR_ZZ_LSL: + modifiers = 1 << AARCH64_MOD_LSL; + sve_zz_operand: + assert (opnd->addr.offset.is_reg); + assert (opnd->addr.preind); + if (((1 << opnd->shifter.kind) & modifiers) == 0 + || opnd->shifter.amount < 0 + || opnd->shifter.amount > 3) + { + set_other_error (mismatch_detail, idx, + _("invalid addressing mode")); + return 0; + } + break; + + case AARCH64_OPND_SVE_ADDR_ZZ_SXTW: + modifiers = (1 << AARCH64_MOD_SXTW); + goto sve_zz_operand; + + case AARCH64_OPND_SVE_ADDR_ZZ_UXTW: + modifiers = 1 << AARCH64_MOD_UXTW; + goto sve_zz_operand; + default: break; } @@ -1737,6 +2000,10 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx, case AARCH64_OPND_UIMM7: case AARCH64_OPND_UIMM3_OP1: case AARCH64_OPND_UIMM3_OP2: + case AARCH64_OPND_SVE_UIMM3: + case AARCH64_OPND_SVE_UIMM7: + case AARCH64_OPND_SVE_UIMM8: + case AARCH64_OPND_SVE_UIMM8_53: size = get_operand_fields_width (get_operand_from_code (type)); assert (size < 32); if (!value_fit_unsigned_field_p (opnd->imm.value, size)) @@ -1747,6 +2014,22 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx, } break; + case AARCH64_OPND_SIMM5: + case AARCH64_OPND_SVE_SIMM5: + case AARCH64_OPND_SVE_SIMM5B: + case AARCH64_OPND_SVE_SIMM6: + case AARCH64_OPND_SVE_SIMM8: + size = get_operand_fields_width (get_operand_from_code (type)); + assert (size < 32); + if (!value_fit_signed_field_p (opnd->imm.value, size)) + { + set_imm_out_of_range_error (mismatch_detail, idx, + -(1 << (size - 1)), + (1 << (size - 1)) - 1); + return 0; + } + break; + case AARCH64_OPND_WIDTH: assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM && opnds[0].type == AARCH64_OPND_Rd); @@ -1761,6 +2044,7 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx, break; case AARCH64_OPND_LIMM: + case AARCH64_OPND_SVE_LIMM: { int esize = aarch64_get_qualifier_esize (opnds[0].qualifier); uint64_t uimm = opnd->imm.value; @@ -1907,6 +2191,7 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx, case AARCH64_OPND_FPIMM: case AARCH64_OPND_SIMD_FPIMM: + case AARCH64_OPND_SVE_FPIMM8: if (opnd->imm.is_fp == 0) { set_other_error (mismatch_detail, idx, @@ -1931,6 +2216,150 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx, } break; + case AARCH64_OPND_SVE_AIMM: + min_value = 0; + sve_aimm: + assert (opnd->shifter.kind == AARCH64_MOD_LSL); + size = aarch64_get_qualifier_esize (opnds[0].qualifier); + mask = ~((uint64_t) -1 << (size * 4) << (size * 4)); + uvalue = opnd->imm.value; + shift = opnd->shifter.amount; + if (size == 1) + { + if (shift != 0) + { + set_other_error (mismatch_detail, idx, + _("no shift amount allowed for" + " 8-bit constants")); + return 0; + } + } + else + { + if (shift != 0 && shift != 8) + { + set_other_error (mismatch_detail, idx, + _("shift amount must be 0 or 8")); + return 0; + } + if (shift == 0 && (uvalue & 0xff) == 0) + { + shift = 8; + uvalue = (int64_t) uvalue / 256; + } + } + mask >>= shift; + if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue) + { + set_other_error (mismatch_detail, idx, + _("immediate too big for element size")); + return 0; + } + uvalue = (uvalue - min_value) & mask; + if (uvalue > 0xff) + { + set_other_error (mismatch_detail, idx, + _("invalid arithmetic immediate")); + return 0; + } + break; + + case AARCH64_OPND_SVE_ASIMM: + min_value = -128; + goto sve_aimm; + + case AARCH64_OPND_SVE_I1_HALF_ONE: + assert (opnd->imm.is_fp); + if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000) + { + set_other_error (mismatch_detail, idx, + _("floating-point value must be 0.5 or 1.0")); + return 0; + } + break; + + case AARCH64_OPND_SVE_I1_HALF_TWO: + assert (opnd->imm.is_fp); + if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000) + { + set_other_error (mismatch_detail, idx, + _("floating-point value must be 0.5 or 2.0")); + return 0; + } + break; + + case AARCH64_OPND_SVE_I1_ZERO_ONE: + assert (opnd->imm.is_fp); + if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000) + { + set_other_error (mismatch_detail, idx, + _("floating-point value must be 0.0 or 1.0")); + return 0; + } + break; + + case AARCH64_OPND_SVE_INV_LIMM: + { + int esize = aarch64_get_qualifier_esize (opnds[0].qualifier); + uint64_t uimm = ~opnd->imm.value; + if (!aarch64_logical_immediate_p (uimm, esize, NULL)) + { + set_other_error (mismatch_detail, idx, + _("immediate out of range")); + return 0; + } + } + break; + + case AARCH64_OPND_SVE_LIMM_MOV: + { + int esize = aarch64_get_qualifier_esize (opnds[0].qualifier); + uint64_t uimm = opnd->imm.value; + if (!aarch64_logical_immediate_p (uimm, esize, NULL)) + { + set_other_error (mismatch_detail, idx, + _("immediate out of range")); + return 0; + } + if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize)) + { + set_other_error (mismatch_detail, idx, + _("invalid replicated MOV immediate")); + return 0; + } + } + break; + + case AARCH64_OPND_SVE_PATTERN_SCALED: + assert (opnd->shifter.kind == AARCH64_MOD_MUL); + if (!value_in_range_p (opnd->shifter.amount, 1, 16)) + { + set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16); + return 0; + } + break; + + case AARCH64_OPND_SVE_SHLIMM_PRED: + case AARCH64_OPND_SVE_SHLIMM_UNPRED: + size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier); + if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1)) + { + set_imm_out_of_range_error (mismatch_detail, idx, + 0, 8 * size - 1); + return 0; + } + break; + + case AARCH64_OPND_SVE_SHRIMM_PRED: + case AARCH64_OPND_SVE_SHRIMM_UNPRED: + size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier); + if (!value_in_range_p (opnd->imm.value, 1, 8 * size)) + { + set_imm_out_of_range_error (mismatch_detail, idx, 1, 8 * size); + return 0; + } + break; + default: break; } @@ -2237,6 +2666,17 @@ static const char *int_reg[2][2][32] = { #undef R64 #undef R32 }; + +/* Names of the SVE vector registers, first with .S suffixes, + then with .D suffixes. */ + +static const char *sve_reg[2][32] = { +#define ZS(X) "z" #X ".s" +#define ZD(X) "z" #X ".d" + BANK (ZS, ZS (31)), BANK (ZD, ZD (31)) +#undef ZD +#undef ZS +}; #undef BANK /* Return the integer register name. @@ -2280,6 +2720,17 @@ get_offset_int_reg_name (const aarch64_opnd_info *opnd) } } +/* Get the name of the SVE vector offset register in OPND, using the operand + qualifier to decide whether the suffix should be .S or .D. */ + +static inline const char * +get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier) +{ + assert (qualifier == AARCH64_OPND_QLF_S_S + || qualifier == AARCH64_OPND_QLF_S_D); + return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno]; +} + /* Types for expanding an encoded 8-bit value to a floating-point value. */ typedef union @@ -2419,7 +2870,13 @@ print_immediate_offset_address (char *buf, size_t size, } else { - if (opnd->addr.offset.imm) + if (opnd->shifter.operator_present) + { + assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL); + snprintf (buf, size, "[%s,#%d,mul vl]", + base, opnd->addr.offset.imm); + } + else if (opnd->addr.offset.imm) snprintf (buf, size, "[%s,#%d]", base, opnd->addr.offset.imm); else snprintf (buf, size, "[%s]", base); @@ -2455,7 +2912,8 @@ print_register_offset_address (char *buf, size_t size, if (print_extend_p) { if (print_amount_p) - snprintf (tb, sizeof (tb), ",%s #%d", shift_name, opnd->shifter.amount); + snprintf (tb, sizeof (tb), ",%s #%" PRIi64, shift_name, + opnd->shifter.amount); else snprintf (tb, sizeof (tb), ",%s", shift_name); } @@ -2486,7 +2944,7 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc, const char *name = NULL; const aarch64_opnd_info *opnd = opnds + idx; enum aarch64_modifier_kind kind; - uint64_t addr; + uint64_t addr, enum_value; buf[0] = '\0'; if (pcrel_p) @@ -2503,6 +2961,7 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc, case AARCH64_OPND_Ra: case AARCH64_OPND_Rt_SYS: case AARCH64_OPND_PAIRREG: + case AARCH64_OPND_SVE_Rm: /* The optional-ness of in e.g. IC {, } is determined by the , therefore we we use opnd->present to override the generic optional-ness information. */ @@ -2520,6 +2979,7 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc, case AARCH64_OPND_Rd_SP: case AARCH64_OPND_Rn_SP: + case AARCH64_OPND_SVE_Rn_SP: assert (opnd->qualifier == AARCH64_OPND_QLF_W || opnd->qualifier == AARCH64_OPND_QLF_WSP || opnd->qualifier == AARCH64_OPND_QLF_X @@ -2550,7 +3010,7 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc, } } if (opnd->shifter.amount) - snprintf (buf, size, "%s, %s #%d", + snprintf (buf, size, "%s, %s #%" PRIi64, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0), aarch64_operand_modifiers[kind].name, opnd->shifter.amount); @@ -2567,7 +3027,7 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc, snprintf (buf, size, "%s", get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)); else - snprintf (buf, size, "%s, %s #%d", + snprintf (buf, size, "%s, %s #%" PRIi64, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0), aarch64_operand_modifiers[opnd->shifter.kind].name, opnd->shifter.amount); @@ -2582,6 +3042,10 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc, case AARCH64_OPND_Sd: case AARCH64_OPND_Sn: case AARCH64_OPND_Sm: + case AARCH64_OPND_SVE_VZn: + case AARCH64_OPND_SVE_Vd: + case AARCH64_OPND_SVE_Vm: + case AARCH64_OPND_SVE_Vn: snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier), opnd->reg.regno); break; @@ -2623,6 +3087,10 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc, case AARCH64_OPND_SVE_Pt: if (opnd->qualifier == AARCH64_OPND_QLF_NIL) snprintf (buf, size, "p%d", opnd->reg.regno); + else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z + || opnd->qualifier == AARCH64_OPND_QLF_P_M) + snprintf (buf, size, "p%d/%s", opnd->reg.regno, + aarch64_get_qualifier_name (opnd->qualifier)); else snprintf (buf, size, "p%d.%s", opnd->reg.regno, aarch64_get_qualifier_name (opnd->qualifier)); @@ -2671,9 +3139,73 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc, case AARCH64_OPND_IMMR: case AARCH64_OPND_IMMS: case AARCH64_OPND_FBITS: + case AARCH64_OPND_SIMM5: + case AARCH64_OPND_SVE_SHLIMM_PRED: + case AARCH64_OPND_SVE_SHLIMM_UNPRED: + case AARCH64_OPND_SVE_SHRIMM_PRED: + case AARCH64_OPND_SVE_SHRIMM_UNPRED: + case AARCH64_OPND_SVE_SIMM5: + case AARCH64_OPND_SVE_SIMM5B: + case AARCH64_OPND_SVE_SIMM6: + case AARCH64_OPND_SVE_SIMM8: + case AARCH64_OPND_SVE_UIMM3: + case AARCH64_OPND_SVE_UIMM7: + case AARCH64_OPND_SVE_UIMM8: + case AARCH64_OPND_SVE_UIMM8_53: snprintf (buf, size, "#%" PRIi64, opnd->imm.value); break; + case AARCH64_OPND_SVE_I1_HALF_ONE: + case AARCH64_OPND_SVE_I1_HALF_TWO: + case AARCH64_OPND_SVE_I1_ZERO_ONE: + { + single_conv_t c; + c.i = opnd->imm.value; + snprintf (buf, size, "#%.1f", c.f); + break; + } + + case AARCH64_OPND_SVE_PATTERN: + if (optional_operand_p (opcode, idx) + && opnd->imm.value == get_optional_operand_default_value (opcode)) + break; + enum_value = opnd->imm.value; + assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array)); + if (aarch64_sve_pattern_array[enum_value]) + snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]); + else + snprintf (buf, size, "#%" PRIi64, opnd->imm.value); + break; + + case AARCH64_OPND_SVE_PATTERN_SCALED: + if (optional_operand_p (opcode, idx) + && !opnd->shifter.operator_present + && opnd->imm.value == get_optional_operand_default_value (opcode)) + break; + enum_value = opnd->imm.value; + assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array)); + if (aarch64_sve_pattern_array[opnd->imm.value]) + snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]); + else + snprintf (buf, size, "#%" PRIi64, opnd->imm.value); + if (opnd->shifter.operator_present) + { + size_t len = strlen (buf); + snprintf (buf + len, size - len, ", %s #%" PRIi64, + aarch64_operand_modifiers[opnd->shifter.kind].name, + opnd->shifter.amount); + } + break; + + case AARCH64_OPND_SVE_PRFOP: + enum_value = opnd->imm.value; + assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array)); + if (aarch64_sve_prfop_array[enum_value]) + snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]); + else + snprintf (buf, size, "#%" PRIi64, opnd->imm.value); + break; + case AARCH64_OPND_IMM_MOV: switch (aarch64_get_qualifier_esize (opnds[0].qualifier)) { @@ -2698,8 +3230,11 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc, case AARCH64_OPND_LIMM: case AARCH64_OPND_AIMM: case AARCH64_OPND_HALF: + case AARCH64_OPND_SVE_INV_LIMM: + case AARCH64_OPND_SVE_LIMM: + case AARCH64_OPND_SVE_LIMM_MOV: if (opnd->shifter.amount) - snprintf (buf, size, "#0x%" PRIx64 ", lsl #%d", opnd->imm.value, + snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value, opnd->shifter.amount); else snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value); @@ -2711,13 +3246,23 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc, || opnd->shifter.kind == AARCH64_MOD_NONE) snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value); else - snprintf (buf, size, "#0x%" PRIx64 ", %s #%d", opnd->imm.value, + snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value, aarch64_operand_modifiers[opnd->shifter.kind].name, opnd->shifter.amount); break; + case AARCH64_OPND_SVE_AIMM: + case AARCH64_OPND_SVE_ASIMM: + if (opnd->shifter.amount) + snprintf (buf, size, "#%" PRIi64 ", lsl #%" PRIi64, opnd->imm.value, + opnd->shifter.amount); + else + snprintf (buf, size, "#%" PRIi64, opnd->imm.value); + break; + case AARCH64_OPND_FPIMM: case AARCH64_OPND_SIMD_FPIMM: + case AARCH64_OPND_SVE_FPIMM8: switch (aarch64_get_qualifier_esize (opnds[0].qualifier)) { case 2: /* e.g. FMOV , #. */ @@ -2809,18 +3354,71 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc, break; case AARCH64_OPND_ADDR_REGOFF: + case AARCH64_OPND_SVE_ADDR_RR: + case AARCH64_OPND_SVE_ADDR_RR_LSL1: + case AARCH64_OPND_SVE_ADDR_RR_LSL2: + case AARCH64_OPND_SVE_ADDR_RR_LSL3: + case AARCH64_OPND_SVE_ADDR_RX: + case AARCH64_OPND_SVE_ADDR_RX_LSL1: + case AARCH64_OPND_SVE_ADDR_RX_LSL2: + case AARCH64_OPND_SVE_ADDR_RX_LSL3: print_register_offset_address (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1), get_offset_int_reg_name (opnd)); break; + case AARCH64_OPND_SVE_ADDR_RZ: + case AARCH64_OPND_SVE_ADDR_RZ_LSL1: + case AARCH64_OPND_SVE_ADDR_RZ_LSL2: + case AARCH64_OPND_SVE_ADDR_RZ_LSL3: + case AARCH64_OPND_SVE_ADDR_RZ_XTW_14: + case AARCH64_OPND_SVE_ADDR_RZ_XTW_22: + case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14: + case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22: + case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14: + case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22: + case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14: + case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22: + print_register_offset_address + (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1), + get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier)); + break; + case AARCH64_OPND_ADDR_SIMM7: case AARCH64_OPND_ADDR_SIMM9: case AARCH64_OPND_ADDR_SIMM9_2: + case AARCH64_OPND_SVE_ADDR_RI_S4xVL: + case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL: + case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL: + case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL: + case AARCH64_OPND_SVE_ADDR_RI_S6xVL: + case AARCH64_OPND_SVE_ADDR_RI_S9xVL: + case AARCH64_OPND_SVE_ADDR_RI_U6: + case AARCH64_OPND_SVE_ADDR_RI_U6x2: + case AARCH64_OPND_SVE_ADDR_RI_U6x4: + case AARCH64_OPND_SVE_ADDR_RI_U6x8: print_immediate_offset_address (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1)); break; + case AARCH64_OPND_SVE_ADDR_ZI_U5: + case AARCH64_OPND_SVE_ADDR_ZI_U5x2: + case AARCH64_OPND_SVE_ADDR_ZI_U5x4: + case AARCH64_OPND_SVE_ADDR_ZI_U5x8: + print_immediate_offset_address + (buf, size, opnd, + get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier)); + break; + + case AARCH64_OPND_SVE_ADDR_ZZ_LSL: + case AARCH64_OPND_SVE_ADDR_ZZ_SXTW: + case AARCH64_OPND_SVE_ADDR_ZZ_UXTW: + print_register_offset_address + (buf, size, opnd, + get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier), + get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier)); + break; + case AARCH64_OPND_ADDR_UIMM12: name = get_64bit_int_reg_name (opnd->addr.base_regno, 1); if (opnd->addr.offset.imm) @@ -3591,6 +4189,33 @@ verify_ldpsw (const struct aarch64_opcode * opcode ATTRIBUTE_UNUSED, return TRUE; } +/* Return true if VALUE cannot be moved into an SVE register using DUP + (with any element size, not just ESIZE) and if using DUPM would + therefore be OK. ESIZE is the number of bytes in the immediate. */ + +bfd_boolean +aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize) +{ + int64_t svalue = uvalue; + uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4); + + if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue) + return FALSE; + if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32)) + { + svalue = (int32_t) uvalue; + if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16)) + { + svalue = (int16_t) uvalue; + if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8)) + return FALSE; + } + } + if ((svalue & 0xff) == 0) + svalue /= 256; + return svalue < -128 || svalue >= 128; +} + /* Include the opcode description table as well as the operand description table. */ #define VERIFIER(x) verify_##x