/* Index of broadcasted operand. */
int operand;
+
+ /* Number of bytes to broadcast. */
+ int bytes;
};
static struct Broadcast_Operation broadcast_op;
invalid_vector_register_set,
unsupported_vector_index_register,
unsupported_broadcast,
- broadcast_not_on_src_operand,
broadcast_needed,
unsupported_masking,
mask_not_on_destination,
/* Flags for operands. */
unsigned int flags[MAX_OPERANDS];
#define Operand_PCrel 1
+#define Operand_Mem 2
/* Relocation type for operand */
enum bfd_reloc_code_real reloc[MAX_OPERANDS];
return 0;
}
-/* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit/80bit on
- operand J for instruction template T. */
+/* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit/80bit size
+ between operand GIVEN and opeand WANTED for instruction template T. */
static INLINE int
-match_reg_size (const insn_template *t, unsigned int wanted, unsigned int given)
+match_operand_size (const insn_template *t, unsigned int wanted,
+ unsigned int given)
{
return !((i.types[given].bitfield.byte
&& !t->operand_types[wanted].bitfield.byte)
&& !t->operand_types[wanted].bitfield.tbyte));
}
-/* Return 1 if there is no conflict in SIMD register on
- operand J for instruction template T. */
+/* Return 1 if there is no conflict in SIMD register between operand
+ GIVEN and opeand WANTED for instruction template T. */
static INLINE int
-match_simd_size (const insn_template *t, unsigned int wanted, unsigned int given)
+match_simd_size (const insn_template *t, unsigned int wanted,
+ unsigned int given)
{
return !((i.types[given].bitfield.xmmword
&& !t->operand_types[wanted].bitfield.xmmword)
&& !t->operand_types[wanted].bitfield.zmmword));
}
-/* Return 1 if there is no conflict in any size on operand J for
- instruction template T. */
+/* Return 1 if there is no conflict in any size between operand GIVEN
+ and opeand WANTED for instruction template T. */
static INLINE int
-match_mem_size (const insn_template *t, unsigned int wanted, unsigned int given)
+match_mem_size (const insn_template *t, unsigned int wanted,
+ unsigned int given)
{
- return (match_reg_size (t, wanted, given)
+ return (match_operand_size (t, wanted, given)
&& !((i.types[given].bitfield.unspecified
&& !i.broadcast
&& !t->operand_types[wanted].bitfield.unspecified)
continue;
if (t->operand_types[j].bitfield.reg
- && !match_reg_size (t, j, j))
+ && !match_operand_size (t, j, j))
{
match = 0;
break;
}
if (t->operand_types[j].bitfield.acc
- && (!match_reg_size (t, j, j) || !match_simd_size (t, j, j)))
+ && (!match_operand_size (t, j, j) || !match_simd_size (t, j, j)))
{
match = 0;
break;
}
- if (i.types[j].bitfield.mem && !match_mem_size (t, j, j))
+ if ((i.flags[j] & Operand_Mem) && !match_mem_size (t, j, j))
{
match = 0;
break;
{
if ((t->operand_types[j].bitfield.reg
|| t->operand_types[j].bitfield.acc)
- && !match_reg_size (t, j, !j))
+ && !match_operand_size (t, j, !j))
goto mismatch;
- if (i.types[!j].bitfield.mem
- && !match_mem_size (t, j, !j))
+ if ((i.flags[!j] & Operand_Mem) && !match_mem_size (t, j, !j))
goto mismatch;
}
{
unsigned int op;
+ /* Determine vector length from the last multi-length vector
+ operand. */
vector_length = 0;
- for (op = 0; op < t->operands; ++op)
+ for (op = t->operands; op--;)
if (t->operand_types[op].bitfield.xmmword
&& t->operand_types[op].bitfield.ymmword
&& i.types[op].bitfield.ymmword)
static INLINE bfd_boolean
is_evex_encoding (const insn_template *t)
{
- return t->opcode_modifier.evex
+ return t->opcode_modifier.evex || t->opcode_modifier.disp8memshift
|| t->opcode_modifier.broadcast || t->opcode_modifier.masking
|| t->opcode_modifier.staticrounding || t->opcode_modifier.sae;
}
+static INLINE bfd_boolean
+is_any_vex_encoding (const insn_template *t)
+{
+ return t->opcode_modifier.vex || t->opcode_modifier.vexopcode
+ || is_evex_encoding (t);
+}
+
/* Build the EVEX prefix. */
static void
{
unsigned int op;
+ /* Determine vector length from the last multi-length vector
+ operand. */
vec_length = 0;
- for (op = 0; op < i.tm.operands; ++op)
+ for (op = i.operands; op--;)
if (i.tm.operand_types[op].bitfield.xmmword
+ i.tm.operand_types[op].bitfield.ymmword
+ i.tm.operand_types[op].bitfield.zmmword > 1)
{
if (i.types[op].bitfield.zmmword)
- i.tm.opcode_modifier.evex = EVEX512;
+ {
+ i.tm.opcode_modifier.evex = EVEX512;
+ break;
+ }
else if (i.types[op].bitfield.ymmword)
- i.tm.opcode_modifier.evex = EVEX256;
+ {
+ i.tm.opcode_modifier.evex = EVEX256;
+ break;
+ }
else if (i.types[op].bitfield.xmmword)
- i.tm.opcode_modifier.evex = EVEX128;
- else
- continue;
- break;
+ {
+ i.tm.opcode_modifier.evex = EVEX128;
+ break;
+ }
+ else if (i.broadcast && (int) op == i.broadcast->operand)
+ {
+ switch (i.broadcast->bytes)
+ {
+ case 64:
+ i.tm.opcode_modifier.evex = EVEX512;
+ break;
+ case 32:
+ i.tm.opcode_modifier.evex = EVEX256;
+ break;
+ case 16:
+ i.tm.opcode_modifier.evex = EVEX128;
+ break;
+ default:
+ abort ();
+ }
+ break;
+ }
}
+
+ if (op >= MAX_OPERANDS)
+ abort ();
}
switch (i.tm.opcode_modifier.evex)
gas_assert (i.imm_operands <= 1
&& (i.operands <= 2
- || ((i.tm.opcode_modifier.vex
- || i.tm.opcode_modifier.vexopcode
- || is_evex_encoding (&i.tm))
+ || (is_any_vex_encoding (&i.tm)
&& i.operands <= 4)));
exp = &im_expressions[i.imm_operands++];
&& is_evex_encoding (&i.tm)
&& (i.vec_encoding != vex_encoding_evex
|| i.tm.cpu_flags.bitfield.cpuavx512vl
+ || (i.tm.operand_types[2].bitfield.zmmword
+ && i.types[2].bitfield.ymmword)
|| cpu_arch_isa_flags.bitfield.cpuavx512vl)))
&& ((i.tm.base_opcode == 0x55
|| i.tm.base_opcode == 0x6655
|| i.tm.base_opcode == 0x66f8
|| i.tm.base_opcode == 0x66f9
|| i.tm.base_opcode == 0x66fa
- || i.tm.base_opcode == 0x66fb)
+ || i.tm.base_opcode == 0x66fb
+ || i.tm.base_opcode == 0x42
+ || i.tm.base_opcode == 0x6642
+ || i.tm.base_opcode == 0x47
+ || i.tm.base_opcode == 0x6647)
&& i.tm.extension_opcode == None))
{
/* Optimize: -O2:
EVEX VOP %ymmM, %ymmM, %ymmN
-> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
-> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16)
+ VOP, one of kxord and kxorq:
+ VEX VOP %kM, %kM, %kN
+ -> VEX kxorw %kM, %kM, %kN
+ VOP, one of kandnd and kandnq:
+ VEX VOP %kM, %kM, %kN
+ -> VEX kandnw %kM, %kM, %kN
*/
if (is_evex_encoding (&i.tm))
{
i.tm.opcode_modifier.evex = 0;
}
}
+ else if (i.tm.operand_types[0].bitfield.regmask)
+ {
+ i.tm.base_opcode &= 0xff;
+ i.tm.opcode_modifier.vexw = VEXW0;
+ }
else
i.tm.opcode_modifier.vex = VEX128;
return;
}
+ /* Check for data size prefix on VEX/XOP/EVEX encoded insns. */
+ if (i.prefix[DATA_PREFIX] && is_any_vex_encoding (&i.tm))
+ {
+ as_bad (_("data size prefix invalid with `%s'"), i.tm.name);
+ return;
+ }
+
/* Check if HLE prefix is OK. */
if (i.hle_prefix && !check_hle ())
return;
as_warn (_("translating to `%sp'"), i.tm.name);
}
- if (i.tm.opcode_modifier.vex || i.tm.opcode_modifier.vexopcode
- || is_evex_encoding (&i.tm))
+ if (is_any_vex_encoding (&i.tm))
{
if (flag_code == CODE_16BIT)
{
/* Now parse operand adding info to 'i' as we go along. */
END_STRING_AND_SAVE (l);
+ if (i.mem_operands > 1)
+ {
+ as_bad (_("too many memory references for `%s'"),
+ mnemonic);
+ return 0;
+ }
+
if (intel_syntax)
operand_ok =
i386_intel_operand (token_start,
{
union i386_op temp_op;
i386_operand_type temp_type;
+ unsigned int temp_flags;
enum bfd_reloc_code_real temp_reloc;
temp_type = i.types[xchg2];
i.types[xchg2] = i.types[xchg1];
i.types[xchg1] = temp_type;
+
+ temp_flags = i.flags[xchg2];
+ i.flags[xchg2] = i.flags[xchg1];
+ i.flags[xchg1] = temp_flags;
+
temp_op = i.op[xchg2];
i.op[xchg2] = i.op[xchg1];
i.op[xchg1] = temp_op;
+
temp_reloc = i.reloc[xchg2];
i.reloc[xchg2] = i.reloc[xchg1];
i.reloc[xchg1] = temp_reloc;
}
}
+/* Return 1 if there is a match in broadcast bytes between operand
+ GIVEN and instruction template T. */
+
+static INLINE int
+match_broadcast_size (const insn_template *t, unsigned int given)
+{
+ return ((t->opcode_modifier.broadcast == BYTE_BROADCAST
+ && i.types[given].bitfield.byte)
+ || (t->opcode_modifier.broadcast == WORD_BROADCAST
+ && i.types[given].bitfield.word)
+ || (t->opcode_modifier.broadcast == DWORD_BROADCAST
+ && i.types[given].bitfield.dword)
+ || (t->opcode_modifier.broadcast == QWORD_BROADCAST
+ && i.types[given].bitfield.qword));
+}
+
/* Check if operands are valid for the instruction. */
static int
i386_operand_type type, overlap;
/* Check if specified broadcast is supported in this instruction,
- and it's applied to memory operand of DWORD or QWORD type. */
+ and its broadcast bytes match the memory operand. */
op = i.broadcast->operand;
if (!t->opcode_modifier.broadcast
- || !i.types[op].bitfield.mem
+ || !(i.flags[op] & Operand_Mem)
|| (!i.types[op].bitfield.unspecified
- && (t->operand_types[op].bitfield.dword
- ? !i.types[op].bitfield.dword
- : !i.types[op].bitfield.qword)))
+ && !match_broadcast_size (t, op)))
{
bad_broadcast:
i.error = unsupported_broadcast;
return 1;
}
+ i.broadcast->bytes = ((1 << (t->opcode_modifier.broadcast - 1))
+ * i.broadcast->type);
operand_type_set (&type, 0);
- switch ((t->operand_types[op].bitfield.dword ? 4 : 8) * i.broadcast->type)
+ switch (i.broadcast->bytes)
{
+ case 2:
+ type.bitfield.word = 1;
+ break;
+ case 4:
+ type.bitfield.dword = 1;
+ break;
case 8:
type.bitfield.qword = 1;
break;
break;
gas_assert (op < i.operands);
/* Check size of the memory operand. */
- if (t->operand_types[op].bitfield.dword
- ? i.types[op].bitfield.dword
- : i.types[op].bitfield.qword)
+ if (match_broadcast_size (t, op))
{
i.error = broadcast_needed;
return 1;
op = MAX_OPERANDS - 1; /* Avoid uninitialized variable warning. */
/* Check if requested masking is supported. */
- if (i.mask
- && (!t->opcode_modifier.masking
- || (i.mask->zeroing
- && t->opcode_modifier.masking == MERGING_MASKING)))
+ if (i.mask)
{
- i.error = unsupported_masking;
- return 1;
+ switch (t->opcode_modifier.masking)
+ {
+ case BOTH_MASKING:
+ break;
+ case MERGING_MASKING:
+ if (i.mask->zeroing)
+ {
+ case 0:
+ i.error = unsupported_masking;
+ return 1;
+ }
+ break;
+ case DYNAMIC_MASKING:
+ /* Memory destinations allow only merging masking. */
+ if (i.mask->zeroing && i.mem_operands)
+ {
+ /* Find memory operand. */
+ for (op = 0; op < i.operands; op++)
+ if (i.flags[op] & Operand_Mem)
+ break;
+ gas_assert (op < i.operands);
+ if (op == i.operands - 1)
+ {
+ i.error = unsupported_masking;
+ return 1;
+ }
+ }
+ break;
+ default:
+ abort ();
+ }
}
/* Check if masking is applied to dest operand. */
&& i.disp_encoding != disp_encoding_32bit)
{
if (i.broadcast)
- i.memshift = t->operand_types[op].bitfield.dword ? 2 : 3;
- else
+ i.memshift = t->opcode_modifier.broadcast - 1;
+ else if (t->opcode_modifier.disp8memshift != DISP8_SHIFT_VL)
i.memshift = t->opcode_modifier.disp8memshift;
+ else
+ {
+ const i386_operand_type *type = NULL;
+
+ i.memshift = 0;
+ for (op = 0; op < i.operands; op++)
+ if (operand_type_check (i.types[op], anymem))
+ {
+ if (t->opcode_modifier.evex == EVEXLIG)
+ i.memshift = 2 + (i.suffix == QWORD_MNEM_SUFFIX);
+ else if (t->operand_types[op].bitfield.xmmword
+ + t->operand_types[op].bitfield.ymmword
+ + t->operand_types[op].bitfield.zmmword <= 1)
+ type = &t->operand_types[op];
+ else if (!i.types[op].bitfield.unspecified)
+ type = &i.types[op];
+ }
+ else if (i.types[op].bitfield.regsimd
+ && t->opcode_modifier.evex != EVEXLIG)
+ {
+ if (i.types[op].bitfield.zmmword)
+ i.memshift = 6;
+ else if (i.types[op].bitfield.ymmword && i.memshift < 5)
+ i.memshift = 5;
+ else if (i.types[op].bitfield.xmmword && i.memshift < 4)
+ i.memshift = 4;
+ }
+
+ if (type)
+ {
+ if (type->bitfield.zmmword)
+ i.memshift = 6;
+ else if (type->bitfield.ymmword)
+ i.memshift = 5;
+ else if (type->bitfield.xmmword)
+ i.memshift = 4;
+ }
+
+ /* For the check in fits_in_disp8(). */
+ if (i.memshift == 0)
+ i.memshift = -1;
+ }
for (op = 0; op < i.operands; op++)
if (operand_type_check (i.types[op], disp)
&& flag_code != CODE_64BIT
&& (intel_syntax
? (!t->opcode_modifier.ignoresize
+ && !t->opcode_modifier.broadcast
&& !intel_float_operand (t->name))
: intel_float_operand (t->name) != 2)
&& ((!operand_types[0].bitfield.regmmx
case unsupported_broadcast:
err_msg = _("unsupported broadcast");
break;
- case broadcast_not_on_src_operand:
- err_msg = _("broadcast not on source memory operand");
- break;
case broadcast_needed:
err_msg = _("broadcast is needed for operand of such type");
break;
else if (i.suffix != QWORD_MNEM_SUFFIX
&& !i.tm.opcode_modifier.ignoresize
&& !i.tm.opcode_modifier.floatmf
+ && !i.tm.opcode_modifier.vex
+ && !i.tm.opcode_modifier.vexopcode
+ && !is_evex_encoding (&i.tm)
&& ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
|| (flag_code == CODE_64BIT
&& i.tm.opcode_modifier.jumpbyte)))
int size = disp_size (n);
offsetT val = i.op[n].disps->X_add_number;
- val = offset_in_range (val >> i.memshift, size);
+ val = offset_in_range (val >> (size == 1 ? i.memshift : 0),
+ size);
p = frag_more (size);
md_number_to_chars (p, val, size);
}
broadcast_op.type = bcst_type;
broadcast_op.operand = this_operand;
+ broadcast_op.bytes = 0;
i.broadcast = &broadcast_op;
}
/* Check masking operation. */
if (i386_index_check (operand_string) == 0)
return 0;
- i.types[this_operand].bitfield.mem = 1;
+ i.flags[this_operand] |= Operand_Mem;
if (i.mem_operands == 0)
i.memop1_string = xstrdup (operand_string);
i.mem_operands++;
mode, and require EVEX encoding. */
if (r->reg_flags & RegVRex)
{
- if (!cpu_arch_flags.bitfield.cpuvrex
+ if (!cpu_arch_flags.bitfield.cpuavx512f
|| flag_code != CODE_64BIT)
return (const reg_entry *) NULL;