is encoded in H:L:M in some cases, the fields H:L:M should be passed in
the order of H, L, M. */
-static inline aarch64_insn
+aarch64_insn
extract_fields (aarch64_insn code, aarch64_insn mask, ...)
{
uint32_t num;
return 1;
}
-/* Decode logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>. */
-
-int
-aarch64_ext_limm (const aarch64_operand *self ATTRIBUTE_UNUSED,
- aarch64_opnd_info *info, const aarch64_insn code,
- const aarch64_inst *inst ATTRIBUTE_UNUSED)
+/* Return true if VALUE is a valid logical immediate encoding, storing the
+ decoded value in *RESULT if so. ESIZE is the number of bytes in the
+ decoded immediate. */
+static int
+decode_limm (uint32_t esize, aarch64_insn value, int64_t *result)
{
uint64_t imm, mask;
- uint32_t sf;
uint32_t N, R, S;
unsigned simd_size;
- aarch64_insn value;
-
- value = extract_fields (code, 0, 3, FLD_N, FLD_immr, FLD_imms);
- assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_W
- || inst->operands[0].qualifier == AARCH64_OPND_QLF_X);
- sf = aarch64_get_qualifier_esize (inst->operands[0].qualifier) != 4;
/* value is N:immr:imms. */
S = value & 0x3f;
R = (value >> 6) & 0x3f;
N = (value >> 12) & 0x1;
- if (sf == 0 && N == 1)
- return 0;
-
/* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
(in other words, right rotated by R), then replicated. */
if (N != 0)
/* Top bits are IGNORED. */
R &= simd_size - 1;
}
+
+ if (simd_size > esize * 8)
+ return 0;
+
/* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
if (S == simd_size - 1)
return 0;
switch (simd_size)
{
case 2: imm = (imm << 2) | imm;
+ /* Fall through. */
case 4: imm = (imm << 4) | imm;
+ /* Fall through. */
case 8: imm = (imm << 8) | imm;
+ /* Fall through. */
case 16: imm = (imm << 16) | imm;
+ /* Fall through. */
case 32: imm = (imm << 32) | imm;
+ /* Fall through. */
case 64: break;
default: assert (0); return 0;
}
- info->imm.value = sf ? imm : imm & 0xffffffff;
+ *result = imm & ~((uint64_t) -1 << (esize * 4) << (esize * 4));
+
+ return 1;
+}
+
+/* Decode a logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>. */
+int
+aarch64_ext_limm (const aarch64_operand *self,
+ aarch64_opnd_info *info, const aarch64_insn code,
+ const aarch64_inst *inst)
+{
+ uint32_t esize;
+ aarch64_insn value;
+
+ value = extract_fields (code, 0, 3, self->fields[0], self->fields[1],
+ self->fields[2]);
+ esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
+ return decode_limm (esize, value, &info->imm.value);
+}
+/* Decode a logical immediate for the BIC alias of AND (etc.). */
+int
+aarch64_ext_inv_limm (const aarch64_operand *self,
+ aarch64_opnd_info *info, const aarch64_insn code,
+ const aarch64_inst *inst)
+{
+ if (!aarch64_ext_limm (self, info, code, inst))
+ return 0;
+ info->imm.value = ~info->imm.value;
return 1;
}
return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_UXTW);
}
+/* Finish decoding an SVE arithmetic immediate, given that INFO already
+ has the raw field value and that the low 8 bits decode to VALUE. */
+static int
+decode_sve_aimm (aarch64_opnd_info *info, int64_t value)
+{
+ info->shifter.kind = AARCH64_MOD_LSL;
+ info->shifter.amount = 0;
+ if (info->imm.value & 0x100)
+ {
+ if (value == 0)
+ /* Decode 0x100 as #0, LSL #8. */
+ info->shifter.amount = 8;
+ else
+ value *= 256;
+ }
+ info->shifter.operator_present = (info->shifter.amount != 0);
+ info->shifter.amount_present = (info->shifter.amount != 0);
+ info->imm.value = value;
+ return 1;
+}
+
+/* Decode an SVE ADD/SUB immediate. */
+int
+aarch64_ext_sve_aimm (const aarch64_operand *self,
+ aarch64_opnd_info *info, const aarch64_insn code,
+ const aarch64_inst *inst)
+{
+ return (aarch64_ext_imm (self, info, code, inst)
+ && decode_sve_aimm (info, (uint8_t) info->imm.value));
+}
+
+/* Decode an SVE CPY/DUP immediate. */
+int
+aarch64_ext_sve_asimm (const aarch64_operand *self,
+ aarch64_opnd_info *info, const aarch64_insn code,
+ const aarch64_inst *inst)
+{
+ return (aarch64_ext_imm (self, info, code, inst)
+ && decode_sve_aimm (info, (int8_t) info->imm.value));
+}
+
+/* Decode a single-bit immediate that selects between #0.5 and #1.0.
+ The fields array specifies which field to use. */
+int
+aarch64_ext_sve_float_half_one (const aarch64_operand *self,
+ aarch64_opnd_info *info, aarch64_insn code,
+ const aarch64_inst *inst ATTRIBUTE_UNUSED)
+{
+ if (extract_field (self->fields[0], code, 0))
+ info->imm.value = 0x3f800000;
+ else
+ info->imm.value = 0x3f000000;
+ info->imm.is_fp = TRUE;
+ return 1;
+}
+
+/* Decode a single-bit immediate that selects between #0.5 and #2.0.
+ The fields array specifies which field to use. */
+int
+aarch64_ext_sve_float_half_two (const aarch64_operand *self,
+ aarch64_opnd_info *info, aarch64_insn code,
+ const aarch64_inst *inst ATTRIBUTE_UNUSED)
+{
+ if (extract_field (self->fields[0], code, 0))
+ info->imm.value = 0x40000000;
+ else
+ info->imm.value = 0x3f000000;
+ info->imm.is_fp = TRUE;
+ return 1;
+}
+
+/* Decode a single-bit immediate that selects between #0.0 and #1.0.
+ The fields array specifies which field to use. */
+int
+aarch64_ext_sve_float_zero_one (const aarch64_operand *self,
+ aarch64_opnd_info *info, aarch64_insn code,
+ const aarch64_inst *inst ATTRIBUTE_UNUSED)
+{
+ if (extract_field (self->fields[0], code, 0))
+ info->imm.value = 0x3f800000;
+ else
+ info->imm.value = 0x0;
+ info->imm.is_fp = TRUE;
+ return 1;
+}
+
/* Decode Zn[MM], where MM has a 7-bit triangular encoding. The fields
array specifies which field to use for Zn. MM is encoded in the
concatenation of imm5 and SVE_tszh, with imm5 being the less
return 1;
}
+/* Decode a logical immediate for the MOV alias of SVE DUPM. */
+int
+aarch64_ext_sve_limm_mov (const aarch64_operand *self,
+ aarch64_opnd_info *info, const aarch64_insn code,
+ const aarch64_inst *inst)
+{
+ int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
+ return (aarch64_ext_limm (self, info, code, inst)
+ && aarch64_sve_dupm_mov_immediate_p (info->imm.value, esize));
+}
+
/* Decode {Zn.<T> - Zm.<T>}. The fields array specifies which field
to use for Zn. The opcode-dependent value specifies the number
of registers in the list. */
info->shifter.amount_present = (val != 0);
return 1;
}
+
+/* Return the top set bit in VALUE, which is expected to be relatively
+ small. */
+static uint64_t
+get_top_bit (uint64_t value)
+{
+ while ((value & -value) != value)
+ value -= value & -value;
+ return value;
+}
+
+/* Decode an SVE shift-left immediate. */
+int
+aarch64_ext_sve_shlimm (const aarch64_operand *self,
+ aarch64_opnd_info *info, const aarch64_insn code,
+ const aarch64_inst *inst)
+{
+ if (!aarch64_ext_imm (self, info, code, inst)
+ || info->imm.value == 0)
+ return 0;
+
+ info->imm.value -= get_top_bit (info->imm.value);
+ return 1;
+}
+
+/* Decode an SVE shift-right immediate. */
+int
+aarch64_ext_sve_shrimm (const aarch64_operand *self,
+ aarch64_opnd_info *info, const aarch64_insn code,
+ const aarch64_inst *inst)
+{
+ if (!aarch64_ext_imm (self, info, code, inst)
+ || info->imm.value == 0)
+ return 0;
+
+ info->imm.value = get_top_bit (info->imm.value) * 2 - info->imm.value;
+ return 1;
+}
\f
/* Bitfields that are commonly used to encode certain operands' information
may be partially used as part of the base opcode in some instructions.
static int
do_misc_decoding (aarch64_inst *inst)
{
+ unsigned int value;
switch (inst->opcode->op)
{
case OP_FCVT:
return decode_fcvt (inst);
+
case OP_FCVTN:
case OP_FCVTN2:
case OP_FCVTL:
case OP_FCVTL2:
return decode_asimd_fcvt (inst);
+
case OP_FCVTXN_S:
return decode_asisd_fcvtxn (inst);
+
+ case OP_MOV_P_P:
+ case OP_MOVS_P_P:
+ value = extract_field (FLD_SVE_Pn, inst->value, 0);
+ return (value == extract_field (FLD_SVE_Pm, inst->value, 0)
+ && value == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
+
+ case OP_MOV_Z_P_Z:
+ return (extract_field (FLD_SVE_Zd, inst->value, 0)
+ == extract_field (FLD_SVE_Zm_16, inst->value, 0));
+
+ case OP_MOV_Z_V:
+ /* Index must be zero. */
+ value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
+ return value == 1 || value == 2 || value == 4 || value == 8;
+
+ case OP_MOV_Z_Z:
+ return (extract_field (FLD_SVE_Zn, inst->value, 0)
+ == extract_field (FLD_SVE_Zm_16, inst->value, 0));
+
+ case OP_MOV_Z_Zi:
+ /* Index must be nonzero. */
+ value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
+ return value != 1 && value != 2 && value != 4 && value != 8;
+
+ case OP_MOVM_P_P_P:
+ return (extract_field (FLD_SVE_Pd, inst->value, 0)
+ == extract_field (FLD_SVE_Pm, inst->value, 0));
+
+ case OP_MOVZS_P_P_P:
+ case OP_MOVZ_P_P_P:
+ return (extract_field (FLD_SVE_Pn, inst->value, 0)
+ == extract_field (FLD_SVE_Pm, inst->value, 0));
+
+ case OP_NOTS_P_P_P_Z:
+ case OP_NOT_P_P_P_Z:
+ return (extract_field (FLD_SVE_Pm, inst->value, 0)
+ == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
+
default:
return 0;
}
}
}
+/* Some instructions (including all SVE ones) use the instruction class
+ to describe how a qualifiers_list index is represented in the instruction
+ encoding. If INST is such an instruction, decode the appropriate fields
+ and fill in the operand qualifiers accordingly. Return true if no
+ problems are found. */
+
+static bfd_boolean
+aarch64_decode_variant_using_iclass (aarch64_inst *inst)
+{
+ int i, variant;
+
+ variant = 0;
+ switch (inst->opcode->iclass)
+ {
+ case sve_cpy:
+ variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_14);
+ break;
+
+ case sve_index:
+ i = extract_field (FLD_SVE_tsz, inst->value, 0);
+ if (i == 0)
+ return FALSE;
+ while ((i & 1) == 0)
+ {
+ i >>= 1;
+ variant += 1;
+ }
+ break;
+
+ case sve_limm:
+ /* Pick the smallest applicable element size. */
+ if ((inst->value & 0x20600) == 0x600)
+ variant = 0;
+ else if ((inst->value & 0x20400) == 0x400)
+ variant = 1;
+ else if ((inst->value & 0x20000) == 0)
+ variant = 2;
+ else
+ variant = 3;
+ break;
+
+ case sve_misc:
+ /* sve_misc instructions have only a single variant. */
+ break;
+
+ case sve_movprfx:
+ variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_16);
+ break;
+
+ case sve_pred_zm:
+ variant = extract_field (FLD_SVE_M_4, inst->value, 0);
+ break;
+
+ case sve_shift_pred:
+ i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_8);
+ sve_shift:
+ if (i == 0)
+ return FALSE;
+ while (i != 1)
+ {
+ i >>= 1;
+ variant += 1;
+ }
+ break;
+
+ case sve_shift_unpred:
+ i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
+ goto sve_shift;
+
+ case sve_size_bhs:
+ variant = extract_field (FLD_size, inst->value, 0);
+ if (variant >= 3)
+ return FALSE;
+ break;
+
+ case sve_size_bhsd:
+ variant = extract_field (FLD_size, inst->value, 0);
+ break;
+
+ case sve_size_hsd:
+ i = extract_field (FLD_size, inst->value, 0);
+ if (i < 1)
+ return FALSE;
+ variant = i - 1;
+ break;
+
+ case sve_size_sd:
+ variant = extract_field (FLD_SVE_sz, inst->value, 0);
+ break;
+
+ default:
+ /* No mapping between instruction class and qualifiers. */
+ return TRUE;
+ }
+
+ for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
+ inst->operands[i].qualifier = inst->opcode->qualifiers_list[variant][i];
+ return TRUE;
+}
/* Decode the CODE according to OPCODE; fill INST. Return 0 if the decoding
fails, which meanes that CODE is not an instruction of OPCODE; otherwise
return 1.
goto decode_fail;
}
+ /* Possibly use the instruction class to determine the correct
+ qualifier. */
+ if (!aarch64_decode_variant_using_iclass (inst))
+ {
+ DEBUG_TRACE ("iclass-based decoder FAIL");
+ goto decode_fail;
+ }
+
/* Call operand decoders. */
for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
{
}
}
+/* Set NAME to a copy of INST's mnemonic with the "." suffix removed. */
+
+static void
+remove_dot_suffix (char *name, const aarch64_inst *inst)
+{
+ char *ptr;
+ size_t len;
+
+ ptr = strchr (inst->opcode->name, '.');
+ assert (ptr && inst->cond);
+ len = ptr - inst->opcode->name;
+ assert (len < 8);
+ strncpy (name, inst->opcode->name, len);
+ name[len] = '\0';
+}
+
/* Print the instruction mnemonic name. */
static void
/* For instructions that are truly conditionally executed, e.g. b.cond,
prepare the full mnemonic name with the corresponding condition
suffix. */
- char name[8], *ptr;
- size_t len;
-
- ptr = strchr (inst->opcode->name, '.');
- assert (ptr && inst->cond);
- len = ptr - inst->opcode->name;
- assert (len < 8);
- strncpy (name, inst->opcode->name, len);
- name [len] = '\0';
+ char name[8];
+
+ remove_dot_suffix (name, inst);
(*info->fprintf_func) (info->stream, "%s.%s", name, inst->cond->names[0]);
}
else
(*info->fprintf_func) (info->stream, "%s", inst->opcode->name);
}
+/* Decide whether we need to print a comment after the operands of
+ instruction INST. */
+
+static void
+print_comment (const aarch64_inst *inst, struct disassemble_info *info)
+{
+ if (inst->opcode->flags & F_COND)
+ {
+ char name[8];
+ unsigned int i, num_conds;
+
+ remove_dot_suffix (name, inst);
+ num_conds = ARRAY_SIZE (inst->cond->names);
+ for (i = 1; i < num_conds && inst->cond->names[i]; ++i)
+ (*info->fprintf_func) (info->stream, "%s %s.%s",
+ i == 1 ? " //" : ",",
+ name, inst->cond->names[i]);
+ }
+}
+
/* Print the instruction according to *INST. */
static void
{
print_mnemonic_name (inst, info);
print_operands (pc, inst->opcode, inst->operands, info);
+ print_comment (inst, info);
}
/* Entry-point of the instruction disassembler and printer. */