return NULL;
}
-/* Insert logical/bitmask immediate for e.g. the last operand in
- ORR <Wd|WSP>, <Wn>, #<imm>. */
-const char *
-aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
- aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
+/* Common routine shared by aarch64_ins{,_inv}_limm. INVERT_P says whether
+ the operand should be inverted before encoding. */
+static const char *
+aarch64_ins_limm_1 (const aarch64_operand *self,
+ const aarch64_opnd_info *info, aarch64_insn *code,
+ const aarch64_inst *inst, bfd_boolean invert_p)
{
aarch64_insn value;
uint64_t imm = info->imm.value;
int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
- if (inst->opcode->op == OP_BIC)
+ if (invert_p)
imm = ~imm;
if (aarch64_logical_immediate_p (imm, esize, &value) == FALSE)
/* The constraint check should have guaranteed this wouldn't happen. */
return NULL;
}
+/* Insert logical/bitmask immediate for e.g. the last operand in
+ ORR <Wd|WSP>, <Wn>, #<imm>. */
+const char *
+aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
+ aarch64_insn *code, const aarch64_inst *inst)
+{
+ return aarch64_ins_limm_1 (self, info, code, inst,
+ inst->opcode->op == OP_BIC);
+}
+
+/* Insert a logical/bitmask immediate for the BIC alias of AND (etc.). */
+const char *
+aarch64_ins_inv_limm (const aarch64_operand *self,
+ const aarch64_opnd_info *info, aarch64_insn *code,
+ const aarch64_inst *inst)
+{
+ return aarch64_ins_limm_1 (self, info, code, inst, TRUE);
+}
+
/* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
const char *
return aarch64_ext_sve_addr_zz (self, info, code);
}
+/* Encode an SVE ADD/SUB immediate. */
+const char *
+aarch64_ins_sve_aimm (const aarch64_operand *self,
+ const aarch64_opnd_info *info, aarch64_insn *code,
+ const aarch64_inst *inst ATTRIBUTE_UNUSED)
+{
+ if (info->shifter.amount == 8)
+ insert_all_fields (self, code, (info->imm.value & 0xff) | 256);
+ else if (info->imm.value != 0 && (info->imm.value & 0xff) == 0)
+ insert_all_fields (self, code, ((info->imm.value / 256) & 0xff) | 256);
+ else
+ insert_all_fields (self, code, info->imm.value & 0xff);
+ return NULL;
+}
+
+/* Encode an SVE CPY/DUP immediate. */
+const char *
+aarch64_ins_sve_asimm (const aarch64_operand *self,
+ const aarch64_opnd_info *info, aarch64_insn *code,
+ const aarch64_inst *inst)
+{
+ return aarch64_ins_sve_aimm (self, info, code, inst);
+}
+
/* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
array specifies which field to use for Zn. MM is encoded in the
concatenation of imm5 and SVE_tszh, with imm5 being the less
return NULL;
}
+/* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM. */
+const char *
+aarch64_ins_sve_limm_mov (const aarch64_operand *self,
+ const aarch64_opnd_info *info, aarch64_insn *code,
+ const aarch64_inst *inst)
+{
+ return aarch64_ins_limm (self, info, code, inst);
+}
+
/* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
to use for Zn. */
const char *
return NULL;
}
+/* Encode an SVE shift left immediate. */
+const char *
+aarch64_ins_sve_shlimm (const aarch64_operand *self,
+ const aarch64_opnd_info *info, aarch64_insn *code,
+ const aarch64_inst *inst)
+{
+ const aarch64_opnd_info *prev_operand;
+ unsigned int esize;
+
+ assert (info->idx > 0);
+ prev_operand = &inst->operands[info->idx - 1];
+ esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
+ insert_all_fields (self, code, 8 * esize + info->imm.value);
+ return NULL;
+}
+
+/* Encode an SVE shift right immediate. */
+const char *
+aarch64_ins_sve_shrimm (const aarch64_operand *self,
+ const aarch64_opnd_info *info, aarch64_insn *code,
+ const aarch64_inst *inst)
+{
+ const aarch64_opnd_info *prev_operand;
+ unsigned int esize;
+
+ assert (info->idx > 0);
+ prev_operand = &inst->operands[info->idx - 1];
+ esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
+ insert_all_fields (self, code, 16 * esize - info->imm.value);
+ return NULL;
+}
+
+/* Encode a single-bit immediate that selects between #0.5 and #1.0.
+ The fields array specifies which field to use. */
+const char *
+aarch64_ins_sve_float_half_one (const aarch64_operand *self,
+ const aarch64_opnd_info *info,
+ aarch64_insn *code,
+ const aarch64_inst *inst ATTRIBUTE_UNUSED)
+{
+ if (info->imm.value == 0x3f000000)
+ insert_field (self->fields[0], code, 0, 0);
+ else
+ insert_field (self->fields[0], code, 1, 0);
+ return NULL;
+}
+
+/* Encode a single-bit immediate that selects between #0.5 and #2.0.
+ The fields array specifies which field to use. */
+const char *
+aarch64_ins_sve_float_half_two (const aarch64_operand *self,
+ const aarch64_opnd_info *info,
+ aarch64_insn *code,
+ const aarch64_inst *inst ATTRIBUTE_UNUSED)
+{
+ if (info->imm.value == 0x3f000000)
+ insert_field (self->fields[0], code, 0, 0);
+ else
+ insert_field (self->fields[0], code, 1, 0);
+ return NULL;
+}
+
+/* Encode a single-bit immediate that selects between #0.0 and #1.0.
+ The fields array specifies which field to use. */
+const char *
+aarch64_ins_sve_float_zero_one (const aarch64_operand *self,
+ const aarch64_opnd_info *info,
+ aarch64_insn *code,
+ const aarch64_inst *inst ATTRIBUTE_UNUSED)
+{
+ if (info->imm.value == 0)
+ insert_field (self->fields[0], code, 0, 0);
+ else
+ insert_field (self->fields[0], code, 1, 0);
+ return NULL;
+}
+
/* Miscellaneous encoding functions. */
/* Encode size[0], i.e. bit 22, for
return;
}
+/* Return the index in qualifiers_list that INST is using. Should only
+ be called once the qualifiers are known to be valid. */
+
+static int
+aarch64_get_variant (struct aarch64_inst *inst)
+{
+ int i, nops, variant;
+
+ nops = aarch64_num_of_operands (inst->opcode);
+ for (variant = 0; variant < AARCH64_MAX_QLF_SEQ_NUM; ++variant)
+ {
+ for (i = 0; i < nops; ++i)
+ if (inst->opcode->qualifiers_list[variant][i]
+ != inst->operands[i].qualifier)
+ break;
+ if (i == nops)
+ return variant;
+ }
+ abort ();
+}
+
/* Do miscellaneous encodings that are not common enough to be driven by
flags. */
static void
do_misc_encoding (aarch64_inst *inst)
{
+ unsigned int value;
+
switch (inst->opcode->op)
{
case OP_FCVT:
case OP_FCVTXN_S:
encode_asisd_fcvtxn (inst);
break;
+ case OP_MOV_P_P:
+ case OP_MOVS_P_P:
+ /* Copy Pn to Pm and Pg. */
+ value = extract_field (FLD_SVE_Pn, inst->value, 0);
+ insert_field (FLD_SVE_Pm, &inst->value, value, 0);
+ insert_field (FLD_SVE_Pg4_10, &inst->value, value, 0);
+ break;
+ case OP_MOV_Z_P_Z:
+ /* Copy Zd to Zm. */
+ value = extract_field (FLD_SVE_Zd, inst->value, 0);
+ insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
+ break;
+ case OP_MOV_Z_V:
+ /* Fill in the zero immediate. */
+ insert_field (FLD_SVE_tsz, &inst->value,
+ 1 << aarch64_get_variant (inst), 0);
+ break;
+ case OP_MOV_Z_Z:
+ /* Copy Zn to Zm. */
+ value = extract_field (FLD_SVE_Zn, inst->value, 0);
+ insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
+ break;
+ case OP_MOV_Z_Zi:
+ break;
+ case OP_MOVM_P_P_P:
+ /* Copy Pd to Pm. */
+ value = extract_field (FLD_SVE_Pd, inst->value, 0);
+ insert_field (FLD_SVE_Pm, &inst->value, value, 0);
+ break;
+ case OP_MOVZS_P_P_P:
+ case OP_MOVZ_P_P_P:
+ /* Copy Pn to Pm. */
+ value = extract_field (FLD_SVE_Pn, inst->value, 0);
+ insert_field (FLD_SVE_Pm, &inst->value, value, 0);
+ break;
+ case OP_NOTS_P_P_P_Z:
+ case OP_NOT_P_P_P_Z:
+ /* Copy Pg to Pm. */
+ value = extract_field (FLD_SVE_Pg4_10, inst->value, 0);
+ insert_field (FLD_SVE_Pm, &inst->value, value, 0);
+ break;
default: break;
}
}
DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
}
+/* Some instructions (including all SVE ones) use the instruction class
+ to describe how a qualifiers_list index is represented in the instruction
+ encoding. If INST is such an instruction, encode the chosen qualifier
+ variant. */
+
+static void
+aarch64_encode_variant_using_iclass (struct aarch64_inst *inst)
+{
+ switch (inst->opcode->iclass)
+ {
+ case sve_cpy:
+ insert_fields (&inst->value, aarch64_get_variant (inst),
+ 0, 2, FLD_SVE_M_14, FLD_size);
+ break;
+
+ case sve_index:
+ case sve_shift_pred:
+ case sve_shift_unpred:
+ /* For indices and shift amounts, the variant is encoded as
+ part of the immediate. */
+ break;
+
+ case sve_limm:
+ /* For sve_limm, the .B, .H, and .S forms are just a convenience
+ and depend on the immediate. They don't have a separate
+ encoding. */
+ break;
+
+ case sve_misc:
+ /* sve_misc instructions have only a single variant. */
+ break;
+
+ case sve_movprfx:
+ insert_fields (&inst->value, aarch64_get_variant (inst),
+ 0, 2, FLD_SVE_M_16, FLD_size);
+ break;
+
+ case sve_pred_zm:
+ insert_field (FLD_SVE_M_4, &inst->value, aarch64_get_variant (inst), 0);
+ break;
+
+ case sve_size_bhs:
+ case sve_size_bhsd:
+ insert_field (FLD_size, &inst->value, aarch64_get_variant (inst), 0);
+ break;
+
+ case sve_size_hsd:
+ insert_field (FLD_size, &inst->value, aarch64_get_variant (inst) + 1, 0);
+ break;
+
+ case sve_size_sd:
+ insert_field (FLD_SVE_sz, &inst->value, aarch64_get_variant (inst), 0);
+ break;
+
+ default:
+ break;
+ }
+}
+
/* Converters converting an alias opcode instruction to its real form. */
/* ROR <Wd>, <Ws>, #<shift>
/* Insert XZR. */
copy_operand_info (inst, 3, 2);
copy_operand_info (inst, 2, 1);
- copy_operand_info (inst, 2, 0);
+ copy_operand_info (inst, 0, 0);
inst->operands[1].reg.regno = 0x1f;
/* Convert the immedate operand. */
if (opcode_has_special_coder (opcode))
do_special_encoding (inst);
+ /* Possibly use the instruction class to encode the chosen qualifier
+ variant. */
+ aarch64_encode_variant_using_iclass (inst);
+
encoding_exit:
DEBUG_TRACE ("exit with %s", opcode->name);