X-Git-Url: http://drtracing.org/?a=blobdiff_plain;f=opcodes%2Faarch64-opc.c;h=61547b403df47a3d23361019799b00e285290969;hb=77ad80922bf3042536fc41b19cc0bf230ea485b4;hp=b11bc33a811c97b221af8fcf7d62b6ea863293d1;hpb=70f3d23af74dd6a1f90aec8748424cf0b5a953a5;p=deliverable%2Fbinutils-gdb.git diff --git a/opcodes/aarch64-opc.c b/opcodes/aarch64-opc.c index b11bc33a81..61547b403d 100644 --- a/opcodes/aarch64-opc.c +++ b/opcodes/aarch64-opc.c @@ -1,5 +1,5 @@ /* aarch64-opc.c -- AArch64 opcode support. - Copyright (C) 2009-2018 Free Software Foundation, Inc. + Copyright (C) 2009-2019 Free Software Foundation, Inc. Contributed by ARM Ltd. This file is part of the GNU opcodes library. @@ -22,7 +22,7 @@ #include #include #include -#include +#include "bfd_stdint.h" #include #include @@ -294,6 +294,9 @@ const aarch64_field fields[] = { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */ { 5, 1 }, /* SVE_i1: single-bit immediate. */ { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */ + { 11, 1 }, /* SVE_i3l: low bit of 3-bit immediate. */ + { 19, 2 }, /* SVE_i3h2: two high bits of 3bit immediate, bits [20,19]. */ + { 20, 1 }, /* SVE_i2h: high bit of 2bit immediate, bits. */ { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */ { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */ { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */ @@ -309,7 +312,10 @@ const aarch64_field fields[] = { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */ { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */ { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */ + { 10, 1 }, /* SVE_rot3: 1-bit rotation amount at bit 10. */ { 22, 1 }, /* SVE_sz: 1-bit element size select. */ + { 17, 2 }, /* SVE_size: 2-bit element size, bits [18,17]. */ + { 30, 1 }, /* SVE_sz2: 1-bit element size select. */ { 16, 4 }, /* SVE_tsz: triangular size select. */ { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */ { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */ @@ -320,6 +326,7 @@ const aarch64_field fields[] = { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */ { 12, 1 }, /* rotate3: FCADD immediate rotate. */ { 12, 2 }, /* SM3: Indexed element SM3 2 bits index immediate. */ + { 22, 1 }, /* sz: 1-bit element size select. */ }; enum aarch64_operand_class @@ -705,6 +712,7 @@ struct operand_qualifier_data aarch64_opnd_qualifiers[] = {8, 1, 0x3, "d", OQK_OPD_VARIANT}, {16, 1, 0x4, "q", OQK_OPD_VARIANT}, {4, 1, 0x0, "4b", OQK_OPD_VARIANT}, + {4, 1, 0x0, "2h", OQK_OPD_VARIANT}, {1, 4, 0x0, "4b", OQK_OPD_VARIANT}, {1, 8, 0x0, "8b", OQK_OPD_VARIANT}, @@ -1512,6 +1520,8 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx, { case AARCH64_OPND_SVE_Zm3_INDEX: case AARCH64_OPND_SVE_Zm3_22_INDEX: + case AARCH64_OPND_SVE_Zm3_11_INDEX: + case AARCH64_OPND_SVE_Zm4_11_INDEX: case AARCH64_OPND_SVE_Zm4_INDEX: size = get_operand_fields_width (get_operand_from_code (type)); shift = get_operand_specific_data (&aarch64_operands[type]); @@ -1602,7 +1612,6 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx, break; case ldst_imm9: case ldstpair_indexed: - case ldstgv_indexed: case asisdlsep: case asisdlsop: if (opnd->addr.writeback == 0) @@ -1890,10 +1899,22 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx, break; case AARCH64_OPND_SVE_ADDR_RI_S4x16: + case AARCH64_OPND_SVE_ADDR_RI_S4x32: min_value = -8; max_value = 7; goto sve_imm_offset; + case AARCH64_OPND_SVE_ADDR_ZX: + /* Everything is already ensured by parse_operands or + aarch64_ext_sve_addr_rr_lsl (because this is a very specific + argument type). */ + assert (opnd->addr.offset.is_reg); + assert (opnd->addr.preind); + assert ((aarch64_operands[type].flags & OPD_F_NO_ZR) == 0); + assert (opnd->shifter.kind == AARCH64_MOD_LSL); + assert (opnd->shifter.operator_present == 0); + break; + case AARCH64_OPND_SVE_ADDR_R: case AARCH64_OPND_SVE_ADDR_RR: case AARCH64_OPND_SVE_ADDR_RR_LSL1: @@ -2125,6 +2146,7 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx, case AARCH64_OPND_NZCV: case AARCH64_OPND_CCMP_IMM: case AARCH64_OPND_EXCEPTION: + case AARCH64_OPND_TME_UIMM16: case AARCH64_OPND_UIMM4: case AARCH64_OPND_UIMM4_ADDG: case AARCH64_OPND_UIMM7: @@ -2230,6 +2252,7 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx, case AARCH64_OPND_IMM_ROT3: case AARCH64_OPND_SVE_IMM_ROT1: + case AARCH64_OPND_SVE_IMM_ROT3: if (opnd->imm.value != 90 && opnd->imm.value != 270) { set_other_error (mismatch_detail, idx, @@ -2510,6 +2533,7 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx, case AARCH64_OPND_SVE_SHLIMM_PRED: case AARCH64_OPND_SVE_SHLIMM_UNPRED: + case AARCH64_OPND_SVE_SHLIMM_UNPRED_22: size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier); if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1)) { @@ -2521,10 +2545,12 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx, case AARCH64_OPND_SVE_SHRIMM_PRED: case AARCH64_OPND_SVE_SHRIMM_UNPRED: - size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier); + case AARCH64_OPND_SVE_SHRIMM_UNPRED_22: + num = (type == AARCH64_OPND_SVE_SHRIMM_UNPRED_22) ? 2 : 1; + size = aarch64_get_qualifier_esize (opnds[idx - num].qualifier); if (!value_in_range_p (opnd->imm.value, 1, 8 * size)) { - set_imm_out_of_range_error (mismatch_detail, idx, 1, 8 * size); + set_imm_out_of_range_error (mismatch_detail, idx, 1, 8*size); return 0; } break; @@ -3036,7 +3062,12 @@ print_immediate_offset_address (char *buf, size_t size, if (opnd->addr.writeback) { if (opnd->addr.preind) - snprintf (buf, size, "[%s, #%d]!", base, opnd->addr.offset.imm); + { + if (opnd->type == AARCH64_OPND_ADDR_SIMM10 && !opnd->addr.offset.imm) + snprintf (buf, size, "[%s]!", base); + else + snprintf (buf, size, "[%s, #%d]!", base, opnd->addr.offset.imm); + } else snprintf (buf, size, "[%s], #%d", base, opnd->addr.offset.imm); } @@ -3156,6 +3187,7 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc, case AARCH64_OPND_Rd_SP: case AARCH64_OPND_Rn_SP: + case AARCH64_OPND_Rt_SP: case AARCH64_OPND_SVE_Rn_SP: case AARCH64_OPND_Rm_SP: assert (opnd->qualifier == AARCH64_OPND_QLF_W @@ -3298,6 +3330,8 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc, case AARCH64_OPND_SVE_Zm3_INDEX: case AARCH64_OPND_SVE_Zm3_22_INDEX: + case AARCH64_OPND_SVE_Zm3_11_INDEX: + case AARCH64_OPND_SVE_Zm4_11_INDEX: case AARCH64_OPND_SVE_Zm4_INDEX: case AARCH64_OPND_SVE_Zn_INDEX: snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno, @@ -3325,11 +3359,14 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc, case AARCH64_OPND_IMMR: case AARCH64_OPND_IMMS: case AARCH64_OPND_FBITS: + case AARCH64_OPND_TME_UIMM16: case AARCH64_OPND_SIMM5: case AARCH64_OPND_SVE_SHLIMM_PRED: case AARCH64_OPND_SVE_SHLIMM_UNPRED: + case AARCH64_OPND_SVE_SHLIMM_UNPRED_22: case AARCH64_OPND_SVE_SHRIMM_PRED: case AARCH64_OPND_SVE_SHRIMM_UNPRED: + case AARCH64_OPND_SVE_SHRIMM_UNPRED_22: case AARCH64_OPND_SVE_SIMM5: case AARCH64_OPND_SVE_SIMM5B: case AARCH64_OPND_SVE_SIMM6: @@ -3343,6 +3380,7 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc, case AARCH64_OPND_IMM_ROT3: case AARCH64_OPND_SVE_IMM_ROT1: case AARCH64_OPND_SVE_IMM_ROT2: + case AARCH64_OPND_SVE_IMM_ROT3: snprintf (buf, size, "#%" PRIi64, opnd->imm.value); break; @@ -3557,11 +3595,6 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc, snprintf (buf, size, "[%s]", name); break; - case AARCH64_OPND_ADDR_SIMPLE_2: - name = get_64bit_int_reg_name (opnd->addr.base_regno, 1); - snprintf (buf, size, "[%s]!", name); - break; - case AARCH64_OPND_ADDR_REGOFF: case AARCH64_OPND_SVE_ADDR_R: case AARCH64_OPND_SVE_ADDR_RR: @@ -3577,6 +3610,13 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc, get_offset_int_reg_name (opnd)); break; + case AARCH64_OPND_SVE_ADDR_ZX: + print_register_offset_address + (buf, size, opnd, + get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier), + get_64bit_int_reg_name (opnd->addr.offset.regno, 0)); + break; + case AARCH64_OPND_SVE_ADDR_RZ: case AARCH64_OPND_SVE_ADDR_RZ_LSL1: case AARCH64_OPND_SVE_ADDR_RZ_LSL2: @@ -3602,6 +3642,7 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc, case AARCH64_OPND_ADDR_SIMM13: case AARCH64_OPND_ADDR_OFFSET: case AARCH64_OPND_SVE_ADDR_RI_S4x16: + case AARCH64_OPND_SVE_ADDR_RI_S4x32: case AARCH64_OPND_SVE_ADDR_RI_S4xVL: case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL: case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL: @@ -3930,13 +3971,14 @@ const aarch64_sys_reg aarch64_sys_regs [] = { "rndr", CPENC(3,3,C2,C4,0), F_ARCHEXT | F_REG_READ }, /* RO */ { "rndrrs", CPENC(3,3,C2,C4,1), F_ARCHEXT | F_REG_READ }, /* RO */ { "tco", CPENC(3,3,C4,C2,7), F_ARCHEXT }, - { "tfsre0_el1", CPENC(3,0,C6,C6,1), F_ARCHEXT }, - { "tfsr_el1", CPENC(3,0,C6,C5,0), F_ARCHEXT }, - { "tfsr_el2", CPENC(3,4,C6,C5,0), F_ARCHEXT }, - { "tfsr_el3", CPENC(3,6,C6,C6,0), F_ARCHEXT }, - { "tfsr_el12", CPENC(3,5,C6,C6,0), F_ARCHEXT }, + { "tfsre0_el1", CPENC(3,0,C5,C6,1), F_ARCHEXT }, + { "tfsr_el1", CPENC(3,0,C5,C6,0), F_ARCHEXT }, + { "tfsr_el2", CPENC(3,4,C5,C6,0), F_ARCHEXT }, + { "tfsr_el3", CPENC(3,6,C5,C6,0), F_ARCHEXT }, + { "tfsr_el12", CPENC(3,5,C5,C6,0), F_ARCHEXT }, { "rgsr_el1", CPENC(3,0,C1,C0,5), F_ARCHEXT }, { "gcr_el1", CPENC(3,0,C1,C0,6), F_ARCHEXT }, + { "gmid_el1", CPENC(3,1,C0,C0,4), F_ARCHEXT | F_REG_READ }, /* RO */ { "tpidr_el0", CPENC(3,3,C13,C0,2), 0 }, { "tpidrro_el0", CPENC(3,3,C13,C0,3), 0 }, /* RW */ { "tpidr_el1", CPENC(3,0,C13,C0,4), 0 }, @@ -4402,13 +4444,14 @@ aarch64_sys_reg_supported_p (const aarch64_feature_set features, /* System Registers in ARMv8.5-A with AARCH64_FEATURE_MEMTAG. */ if ((reg->value == CPENC (3, 3, C4, C2, 7) - || reg->value == CPENC (3, 0, C6, C6, 1) - || reg->value == CPENC (3, 0, C6, C5, 0) - || reg->value == CPENC (3, 4, C6, C5, 0) - || reg->value == CPENC (3, 6, C6, C6, 0) - || reg->value == CPENC (3, 5, C6, C6, 0) + || reg->value == CPENC (3, 0, C5, C6, 1) + || reg->value == CPENC (3, 0, C5, C6, 0) + || reg->value == CPENC (3, 4, C5, C6, 0) + || reg->value == CPENC (3, 6, C5, C6, 0) + || reg->value == CPENC (3, 5, C5, C6, 0) || reg->value == CPENC (3, 0, C1, C0, 5) - || reg->value == CPENC (3, 0, C1, C0, 6)) + || reg->value == CPENC (3, 0, C1, C0, 6) + || reg->value == CPENC (3, 1, C0, C0, 4)) && !(AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG))) return FALSE; @@ -4481,15 +4524,33 @@ const aarch64_sys_ins_reg aarch64_sys_regs_ic[] = const aarch64_sys_ins_reg aarch64_sys_regs_dc[] = { { "zva", CPENS (3, C7, C4, 1), F_HASXT }, + { "gva", CPENS (3, C7, C4, 3), F_HASXT | F_ARCHEXT }, + { "gzva", CPENS (3, C7, C4, 4), F_HASXT | F_ARCHEXT }, { "ivac", CPENS (0, C7, C6, 1), F_HASXT }, + { "igvac", CPENS (0, C7, C6, 3), F_HASXT | F_ARCHEXT }, + { "igsw", CPENS (0, C7, C6, 4), F_HASXT | F_ARCHEXT }, { "isw", CPENS (0, C7, C6, 2), F_HASXT }, + { "igdvac", CPENS (0, C7, C6, 5), F_HASXT | F_ARCHEXT }, + { "igdsw", CPENS (0, C7, C6, 6), F_HASXT | F_ARCHEXT }, { "cvac", CPENS (3, C7, C10, 1), F_HASXT }, + { "cgvac", CPENS (3, C7, C10, 3), F_HASXT | F_ARCHEXT }, + { "cgdvac", CPENS (3, C7, C10, 5), F_HASXT | F_ARCHEXT }, { "csw", CPENS (0, C7, C10, 2), F_HASXT }, + { "cgsw", CPENS (0, C7, C10, 4), F_HASXT | F_ARCHEXT }, + { "cgdsw", CPENS (0, C7, C10, 6), F_HASXT | F_ARCHEXT }, { "cvau", CPENS (3, C7, C11, 1), F_HASXT }, { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT }, + { "cgvap", CPENS (3, C7, C12, 3), F_HASXT | F_ARCHEXT }, + { "cgdvap", CPENS (3, C7, C12, 5), F_HASXT | F_ARCHEXT }, { "cvadp", CPENS (3, C7, C13, 1), F_HASXT | F_ARCHEXT }, + { "cgvadp", CPENS (3, C7, C13, 3), F_HASXT | F_ARCHEXT }, + { "cgdvadp", CPENS (3, C7, C13, 5), F_HASXT | F_ARCHEXT }, { "civac", CPENS (3, C7, C14, 1), F_HASXT }, + { "cigvac", CPENS (3, C7, C14, 3), F_HASXT | F_ARCHEXT }, + { "cigdvac", CPENS (3, C7, C14, 5), F_HASXT | F_ARCHEXT }, { "cisw", CPENS (0, C7, C14, 2), F_HASXT }, + { "cigsw", CPENS (0, C7, C14, 4), F_HASXT | F_ARCHEXT }, + { "cigdsw", CPENS (0, C7, C14, 6), F_HASXT | F_ARCHEXT }, { 0, CPENS(0,0,0,0), 0 } }; @@ -4632,6 +4693,28 @@ aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features, && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_CVADP)) return FALSE; + /* DC for ARMv8.5-A Memory Tagging Extension. */ + if ((reg->value == CPENS (0, C7, C6, 3) + || reg->value == CPENS (0, C7, C6, 4) + || reg->value == CPENS (0, C7, C10, 4) + || reg->value == CPENS (0, C7, C14, 4) + || reg->value == CPENS (3, C7, C10, 3) + || reg->value == CPENS (3, C7, C12, 3) + || reg->value == CPENS (3, C7, C13, 3) + || reg->value == CPENS (3, C7, C14, 3) + || reg->value == CPENS (3, C7, C4, 3) + || reg->value == CPENS (0, C7, C6, 5) + || reg->value == CPENS (0, C7, C6, 6) + || reg->value == CPENS (0, C7, C10, 6) + || reg->value == CPENS (0, C7, C14, 6) + || reg->value == CPENS (3, C7, C10, 5) + || reg->value == CPENS (3, C7, C12, 5) + || reg->value == CPENS (3, C7, C13, 5) + || reg->value == CPENS (3, C7, C14, 5) + || reg->value == CPENS (3, C7, C4, 4)) + && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG)) + return FALSE; + /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */ if ((reg->value == CPENS (0, C7, C9, 0) || reg->value == CPENS (0, C7, C9, 1)) @@ -4694,6 +4777,29 @@ verify_ldpsw (const struct aarch64_inst *inst ATTRIBUTE_UNUSED, return ERR_OK; } +/* Verifier for vector by element 3 operands functions where the + conditions `if sz:L == 11 then UNDEFINED` holds. */ + +static enum err_type +verify_elem_sd (const struct aarch64_inst *inst, const aarch64_insn insn, + bfd_vma pc ATTRIBUTE_UNUSED, bfd_boolean encoding, + aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED, + aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED) +{ + const aarch64_insn undef_pattern = 0x3; + aarch64_insn value; + + assert (inst->opcode); + assert (inst->opcode->operands[2] == AARCH64_OPND_Em); + value = encoding ? inst->value : insn; + assert (value); + + if (undef_pattern == extract_fields (value, 0, 2, FLD_sz, FLD_L)) + return ERR_UND; + + return ERR_OK; +} + /* Initialize an instruction sequence insn_sequence with the instruction INST. If INST is NULL the given insn_sequence is cleared and the sequence is left uninitialized. */ @@ -4806,7 +4912,9 @@ verify_constraints (const struct aarch64_inst *inst, { /* Check to see if the MOVPRFX SVE instruction is followed by an SVE instruction for better error messages. */ - if (!opcode->avariant || !(*opcode->avariant & AARCH64_FEATURE_SVE)) + if (!opcode->avariant + || !(*opcode->avariant & + (AARCH64_FEATURE_SVE | AARCH64_FEATURE_SVE2))) { mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR; mismatch_detail->error = _("SVE instruction expected after " @@ -4867,10 +4975,6 @@ verify_constraints (const struct aarch64_inst *inst, case AARCH64_OPND_Vm: case AARCH64_OPND_Sn: case AARCH64_OPND_Sm: - case AARCH64_OPND_Rn: - case AARCH64_OPND_Rm: - case AARCH64_OPND_Rn_SP: - case AARCH64_OPND_Rm_SP: if (inst_op.reg.regno == blk_dest.reg.regno) { num_op_used++;