static const char *default_arch = DEFAULT_ARCH;
+/* parse_register() returns this when a register alias cannot be used. */
+static const reg_entry bad_reg = { "<bad>", OPERAND_TYPE_NONE, 0, 0,
+ { Dw2Inval, Dw2Inval } };
+
/* This struct describes rounding control and SAE in the instruction. */
struct RC_Operation
{
vex_encoding_default = 0,
vex_encoding_vex,
vex_encoding_vex3,
- vex_encoding_evex
+ vex_encoding_evex,
+ vex_encoding_error
} vec_encoding;
/* REP prefix. */
|| ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
&& !defined (TE_GNU) \
&& !defined (TE_LINUX) \
- && !defined (TE_NACL) \
&& !defined (TE_FreeBSD) \
&& !defined (TE_DragonFly) \
&& !defined (TE_NetBSD)))
"lock addl $0, (%{re}sp)". */
static int avoid_fence = 0;
-/* Type of the previous instruction. */
+/* 1 if lfence should be inserted after every load. */
+static int lfence_after_load = 0;
+
+/* Non-zero if lfence should be inserted before indirect branch. */
+static enum lfence_before_indirect_branch_kind
+ {
+ lfence_branch_none = 0,
+ lfence_branch_register,
+ lfence_branch_memory,
+ lfence_branch_all
+ }
+lfence_before_indirect_branch;
+
+/* Non-zero if lfence should be inserted before ret. */
+static enum lfence_before_ret_kind
+ {
+ lfence_before_ret_none = 0,
+ lfence_before_ret_not,
+ lfence_before_ret_or,
+ lfence_before_ret_shl
+ }
+lfence_before_ret;
+
+/* Types of previous instruction is .byte or prefix. */
static struct
{
segT seg;
CPU_AVX512_VP2INTERSECT_FLAGS, 0 },
{ STRING_COMMA_LEN (".enqcmd"), PROCESSOR_UNKNOWN,
CPU_ENQCMD_FLAGS, 0 },
+ { STRING_COMMA_LEN (".serialize"), PROCESSOR_UNKNOWN,
+ CPU_SERIALIZE_FLAGS, 0 },
{ STRING_COMMA_LEN (".rdpru"), PROCESSOR_UNKNOWN,
CPU_RDPRU_FLAGS, 0 },
{ STRING_COMMA_LEN (".mcommit"), PROCESSOR_UNKNOWN,
CPU_MCOMMIT_FLAGS, 0 },
{ STRING_COMMA_LEN (".sev_es"), PROCESSOR_UNKNOWN,
CPU_SEV_ES_FLAGS, 0 },
+ { STRING_COMMA_LEN (".tsxldtrk"), PROCESSOR_UNKNOWN,
+ CPU_TSXLDTRK_FLAGS, 0 },
};
static const noarch_entry cpu_noarch[] =
{ STRING_COMMA_LEN ("nomovdiri"), CPU_ANY_MOVDIRI_FLAGS },
{ STRING_COMMA_LEN ("nomovdir64b"), CPU_ANY_MOVDIR64B_FLAGS },
{ STRING_COMMA_LEN ("noavx512_bf16"), CPU_ANY_AVX512_BF16_FLAGS },
- { STRING_COMMA_LEN ("noavx512_vp2intersect"), CPU_ANY_SHSTK_FLAGS },
+ { STRING_COMMA_LEN ("noavx512_vp2intersect"),
+ CPU_ANY_AVX512_VP2INTERSECT_FLAGS },
{ STRING_COMMA_LEN ("noenqcmd"), CPU_ANY_ENQCMD_FLAGS },
+ { STRING_COMMA_LEN ("noserialize"), CPU_ANY_SERIALIZE_FLAGS },
+ { STRING_COMMA_LEN ("notsxldtrk"), CPU_ANY_TSXLDTRK_FLAGS },
};
#ifdef I386COFF
{
/* We need to check a few extra flags with AVX. */
if (cpu.bitfield.cpuavx
- && (!t->opcode_modifier.sse2avx || sse2avx)
+ && (!t->opcode_modifier.sse2avx
+ || (sse2avx && !i.prefix[DATA_PREFIX]))
&& (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
&& (!x.bitfield.cpugfni || cpu.bitfield.cpugfni)
&& (!x.bitfield.cpupclmul || cpu.bitfield.cpupclmul))
here. Also for v{,p}broadcast*, {,v}pmov{s,z}*, and
down-conversion vpmov*. */
|| ((t->operand_types[wanted].bitfield.class == RegSIMD
- && !t->opcode_modifier.broadcast
- && (t->operand_types[wanted].bitfield.byte
- || t->operand_types[wanted].bitfield.word
- || t->operand_types[wanted].bitfield.dword
- || t->operand_types[wanted].bitfield.qword))
+ && t->operand_types[wanted].bitfield.byte
+ + t->operand_types[wanted].bitfield.word
+ + t->operand_types[wanted].bitfield.dword
+ + t->operand_types[wanted].bitfield.qword
+ > !!t->opcode_modifier.broadcast)
? (i.types[given].bitfield.xmmword
|| i.types[given].bitfield.ymmword
|| i.types[given].bitfield.zmmword)
if (x->types[j].bitfield.class == Reg
|| x->types[j].bitfield.class == RegMMX
|| x->types[j].bitfield.class == RegSIMD
+ || x->types[j].bitfield.class == RegMask
|| x->types[j].bitfield.class == SReg
|| x->types[j].bitfield.class == RegCR
|| x->types[j].bitfield.class == RegDR
- || x->types[j].bitfield.class == RegTR)
+ || x->types[j].bitfield.class == RegTR
+ || x->types[j].bitfield.class == RegBND)
fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
if (operand_type_check (x->types[j], imm))
pe (x->op[j].imms);
}
}
- switch ((i.tm.base_opcode >> 8) & 0xff)
+ switch ((i.tm.base_opcode >> (i.tm.opcode_length << 3)) & 0xff)
{
case 0:
implied_prefix = 0;
/* Determine vector length from the last multi-length vector
operand. */
- vec_length = 0;
for (op = i.operands; op--;)
if (i.tm.operand_types[op].bitfield.xmmword
+ i.tm.operand_types[op].bitfield.ymmword
}
}
+/* Return non-zero for load instruction. */
+
+static int
+load_insn_p (void)
+{
+ unsigned int dest;
+ int any_vex_p = is_any_vex_encoding (&i.tm);
+ unsigned int base_opcode = i.tm.base_opcode | 1;
+
+ if (!any_vex_p)
+ {
+ /* Anysize insns: lea, invlpg, clflush, prefetchnta, prefetcht0,
+ prefetcht1, prefetcht2, prefetchtw, bndmk, bndcl, bndcu, bndcn,
+ bndstx, bndldx, prefetchwt1, clflushopt, clwb, cldemote. */
+ if (i.tm.opcode_modifier.anysize)
+ return 0;
+
+ /* pop, popf, popa. */
+ if (strcmp (i.tm.name, "pop") == 0
+ || i.tm.base_opcode == 0x9d
+ || i.tm.base_opcode == 0x61)
+ return 1;
+
+ /* movs, cmps, lods, scas. */
+ if ((i.tm.base_opcode | 0xb) == 0xaf)
+ return 1;
+
+ /* outs, xlatb. */
+ if (base_opcode == 0x6f
+ || i.tm.base_opcode == 0xd7)
+ return 1;
+ /* NB: For AMD-specific insns with implicit memory operands,
+ they're intentionally not covered. */
+ }
+
+ /* No memory operand. */
+ if (!i.mem_operands)
+ return 0;
+
+ if (any_vex_p)
+ {
+ /* vldmxcsr. */
+ if (i.tm.base_opcode == 0xae
+ && i.tm.opcode_modifier.vex
+ && i.tm.opcode_modifier.vexopcode == VEX0F
+ && i.tm.extension_opcode == 2)
+ return 1;
+ }
+ else
+ {
+ /* test, not, neg, mul, imul, div, idiv. */
+ if ((i.tm.base_opcode == 0xf6 || i.tm.base_opcode == 0xf7)
+ && i.tm.extension_opcode != 1)
+ return 1;
+
+ /* inc, dec. */
+ if (base_opcode == 0xff && i.tm.extension_opcode <= 1)
+ return 1;
+
+ /* add, or, adc, sbb, and, sub, xor, cmp. */
+ if (i.tm.base_opcode >= 0x80 && i.tm.base_opcode <= 0x83)
+ return 1;
+
+ /* bt, bts, btr, btc. */
+ if (i.tm.base_opcode == 0xfba
+ && (i.tm.extension_opcode >= 4 && i.tm.extension_opcode <= 7))
+ return 1;
+
+ /* rol, ror, rcl, rcr, shl/sal, shr, sar. */
+ if ((base_opcode == 0xc1
+ || (i.tm.base_opcode >= 0xd0 && i.tm.base_opcode <= 0xd3))
+ && i.tm.extension_opcode != 6)
+ return 1;
+
+ /* cmpxchg8b, cmpxchg16b, xrstors. */
+ if (i.tm.base_opcode == 0xfc7
+ && (i.tm.extension_opcode == 1 || i.tm.extension_opcode == 3))
+ return 1;
+
+ /* fxrstor, ldmxcsr, xrstor. */
+ if (i.tm.base_opcode == 0xfae
+ && (i.tm.extension_opcode == 1
+ || i.tm.extension_opcode == 2
+ || i.tm.extension_opcode == 5))
+ return 1;
+
+ /* lgdt, lidt, lmsw. */
+ if (i.tm.base_opcode == 0xf01
+ && (i.tm.extension_opcode == 2
+ || i.tm.extension_opcode == 3
+ || i.tm.extension_opcode == 6))
+ return 1;
+
+ /* vmptrld */
+ if (i.tm.base_opcode == 0xfc7
+ && i.tm.extension_opcode == 6)
+ return 1;
+
+ /* Check for x87 instructions. */
+ if (i.tm.base_opcode >= 0xd8 && i.tm.base_opcode <= 0xdf)
+ {
+ /* Skip fst, fstp, fstenv, fstcw. */
+ if (i.tm.base_opcode == 0xd9
+ && (i.tm.extension_opcode == 2
+ || i.tm.extension_opcode == 3
+ || i.tm.extension_opcode == 6
+ || i.tm.extension_opcode == 7))
+ return 0;
+
+ /* Skip fisttp, fist, fistp, fstp. */
+ if (i.tm.base_opcode == 0xdb
+ && (i.tm.extension_opcode == 1
+ || i.tm.extension_opcode == 2
+ || i.tm.extension_opcode == 3
+ || i.tm.extension_opcode == 7))
+ return 0;
+
+ /* Skip fisttp, fst, fstp, fsave, fstsw. */
+ if (i.tm.base_opcode == 0xdd
+ && (i.tm.extension_opcode == 1
+ || i.tm.extension_opcode == 2
+ || i.tm.extension_opcode == 3
+ || i.tm.extension_opcode == 6
+ || i.tm.extension_opcode == 7))
+ return 0;
+
+ /* Skip fisttp, fist, fistp, fbstp, fistp. */
+ if (i.tm.base_opcode == 0xdf
+ && (i.tm.extension_opcode == 1
+ || i.tm.extension_opcode == 2
+ || i.tm.extension_opcode == 3
+ || i.tm.extension_opcode == 6
+ || i.tm.extension_opcode == 7))
+ return 0;
+
+ return 1;
+ }
+ }
+
+ dest = i.operands - 1;
+
+ /* Check fake imm8 operand and 3 source operands. */
+ if ((i.tm.opcode_modifier.immext
+ || i.tm.opcode_modifier.vexsources == VEX3SOURCES)
+ && i.types[dest].bitfield.imm8)
+ dest--;
+
+ /* add, or, adc, sbb, and, sub, xor, cmp, test, xchg, xadd */
+ if (!any_vex_p
+ && (base_opcode == 0x1
+ || base_opcode == 0x9
+ || base_opcode == 0x11
+ || base_opcode == 0x19
+ || base_opcode == 0x21
+ || base_opcode == 0x29
+ || base_opcode == 0x31
+ || base_opcode == 0x39
+ || (i.tm.base_opcode >= 0x84 && i.tm.base_opcode <= 0x87)
+ || base_opcode == 0xfc1))
+ return 1;
+
+ /* Check for load instruction. */
+ return (i.types[dest].bitfield.class != ClassNone
+ || i.types[dest].bitfield.instance == Accum);
+}
+
+/* Output lfence, 0xfaee8, after instruction. */
+
+static void
+insert_lfence_after (void)
+{
+ if (lfence_after_load && load_insn_p ())
+ {
+ /* There are also two REP string instructions that require
+ special treatment. Specifically, the compare string (CMPS)
+ and scan string (SCAS) instructions set EFLAGS in a manner
+ that depends on the data being compared/scanned. When used
+ with a REP prefix, the number of iterations may therefore
+ vary depending on this data. If the data is a program secret
+ chosen by the adversary using an LVI method,
+ then this data-dependent behavior may leak some aspect
+ of the secret. */
+ if (((i.tm.base_opcode | 0x1) == 0xa7
+ || (i.tm.base_opcode | 0x1) == 0xaf)
+ && i.prefix[REP_PREFIX])
+ {
+ as_warn (_("`%s` changes flags which would affect control flow behavior"),
+ i.tm.name);
+ }
+ char *p = frag_more (3);
+ *p++ = 0xf;
+ *p++ = 0xae;
+ *p = 0xe8;
+ }
+}
+
+/* Output lfence, 0xfaee8, before instruction. */
+
+static void
+insert_lfence_before (void)
+{
+ char *p;
+
+ if (is_any_vex_encoding (&i.tm))
+ return;
+
+ if (i.tm.base_opcode == 0xff
+ && (i.tm.extension_opcode == 2 || i.tm.extension_opcode == 4))
+ {
+ /* Insert lfence before indirect branch if needed. */
+
+ if (lfence_before_indirect_branch == lfence_branch_none)
+ return;
+
+ if (i.operands != 1)
+ abort ();
+
+ if (i.reg_operands == 1)
+ {
+ /* Indirect branch via register. Don't insert lfence with
+ -mlfence-after-load=yes. */
+ if (lfence_after_load
+ || lfence_before_indirect_branch == lfence_branch_memory)
+ return;
+ }
+ else if (i.mem_operands == 1
+ && lfence_before_indirect_branch != lfence_branch_register)
+ {
+ as_warn (_("indirect `%s` with memory operand should be avoided"),
+ i.tm.name);
+ return;
+ }
+ else
+ return;
+
+ if (last_insn.kind != last_insn_other
+ && last_insn.seg == now_seg)
+ {
+ as_warn_where (last_insn.file, last_insn.line,
+ _("`%s` skips -mlfence-before-indirect-branch on `%s`"),
+ last_insn.name, i.tm.name);
+ return;
+ }
+
+ p = frag_more (3);
+ *p++ = 0xf;
+ *p++ = 0xae;
+ *p = 0xe8;
+ return;
+ }
+
+ /* Output or/not/shl and lfence before near ret. */
+ if (lfence_before_ret != lfence_before_ret_none
+ && (i.tm.base_opcode == 0xc2
+ || i.tm.base_opcode == 0xc3))
+ {
+ if (last_insn.kind != last_insn_other
+ && last_insn.seg == now_seg)
+ {
+ as_warn_where (last_insn.file, last_insn.line,
+ _("`%s` skips -mlfence-before-ret on `%s`"),
+ last_insn.name, i.tm.name);
+ return;
+ }
+
+ /* Near ret ingore operand size override under CPU64. */
+ char prefix = flag_code == CODE_64BIT
+ ? 0x48
+ : i.prefix[DATA_PREFIX] ? 0x66 : 0x0;
+
+ if (lfence_before_ret == lfence_before_ret_not)
+ {
+ /* not: 0xf71424, may add prefix
+ for operand size override or 64-bit code. */
+ p = frag_more ((prefix ? 2 : 0) + 6 + 3);
+ if (prefix)
+ *p++ = prefix;
+ *p++ = 0xf7;
+ *p++ = 0x14;
+ *p++ = 0x24;
+ if (prefix)
+ *p++ = prefix;
+ *p++ = 0xf7;
+ *p++ = 0x14;
+ *p++ = 0x24;
+ }
+ else
+ {
+ p = frag_more ((prefix ? 1 : 0) + 4 + 3);
+ if (prefix)
+ *p++ = prefix;
+ if (lfence_before_ret == lfence_before_ret_or)
+ {
+ /* or: 0x830c2400, may add prefix
+ for operand size override or 64-bit code. */
+ *p++ = 0x83;
+ *p++ = 0x0c;
+ }
+ else
+ {
+ /* shl: 0xc1242400, may add prefix
+ for operand size override or 64-bit code. */
+ *p++ = 0xc1;
+ *p++ = 0x24;
+ }
+
+ *p++ = 0x24;
+ *p++ = 0x0;
+ }
+
+ *p++ = 0xf;
+ *p++ = 0xae;
+ *p = 0xe8;
+ }
+}
+
/* This is the guts of the machine-dependent assembler. LINE points to a
machine dependent instruction. This function is supposed to emit
the frags/bytes it assembles to. */
|| i.tm.cpu_flags.bitfield.cpussse3
|| i.tm.cpu_flags.bitfield.cpusse4_1
|| i.tm.cpu_flags.bitfield.cpusse4_2
- || i.tm.cpu_flags.bitfield.cpusse4a
|| i.tm.cpu_flags.bitfield.cpupclmul
|| i.tm.cpu_flags.bitfield.cpuaes
|| i.tm.cpu_flags.bitfield.cpusha
return;
}
- /* Check for data size prefix on VEX/XOP/EVEX encoded insns. */
- if (i.prefix[DATA_PREFIX] && is_any_vex_encoding (&i.tm))
+ /* Check for data size prefix on VEX/XOP/EVEX encoded and SIMD insns. */
+ if (i.prefix[DATA_PREFIX]
+ && (is_any_vex_encoding (&i.tm)
+ || i.tm.operand_types[i.imm_operands].bitfield.class >= RegMMX
+ || i.tm.operand_types[i.imm_operands + 1].bitfield.class >= RegMMX))
{
as_bad (_("data size prefix invalid with `%s'"), i.tm.name);
return;
&& !i.types[j].bitfield.xmmword)
i.reg_operands--;
- /* ImmExt should be processed after SSE2AVX. */
- if (!i.tm.opcode_modifier.sse2avx
- && i.tm.opcode_modifier.immext)
- process_immext ();
-
/* For insns with operands there are more diddles to do to the opcode. */
if (i.operands)
{
return;
}
+ /* Check for explicit REX prefix. */
+ if (i.prefix[REX_PREFIX] || i.rex_encoding)
+ {
+ as_bad (_("REX prefix invalid with `%s'"), i.tm.name);
+ return;
+ }
+
if (i.tm.opcode_modifier.vex)
build_vex_prefix (t);
else
build_evex_prefix ();
+
+ /* The individual REX.RXBW bits got consumed. */
+ i.rex &= REX_OPCODE;
}
/* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
i.op[0].disps->X_op = O_symbol;
}
- if (i.tm.opcode_modifier.rex64)
- i.rex |= REX_W;
-
/* For 8 bit registers we need an empty rex prefix. Also if the
instruction already has a prefix, we need to convert old
registers to new ones. */
if (i.rex != 0)
add_prefix (REX_OPCODE | i.rex);
+ insert_lfence_before ();
+
/* We are ready to output the insn. */
output_insn ();
+ insert_lfence_after ();
+
last_insn.seg = now_seg;
if (i.tm.opcode_modifier.isprefix)
}
/* Without VSIB byte, we can't have a vector register for index. */
- if (!t->opcode_modifier.vecsib
+ if (!t->opcode_modifier.sib
&& i.index_reg
&& (i.index_reg->reg_type.bitfield.xmmword
|| i.index_reg->reg_type.bitfield.ymmword
/* For VSIB byte, we need a vector register for index, and all vector
registers must be distinct. */
- if (t->opcode_modifier.vecsib)
+ if (t->opcode_modifier.sib)
{
if (!i.index_reg
- || !((t->opcode_modifier.vecsib == VecSIB128
+ || !((t->opcode_modifier.sib == VECSIB128
&& i.index_reg->reg_type.bitfield.xmmword)
- || (t->opcode_modifier.vecsib == VecSIB256
+ || (t->opcode_modifier.sib == VECSIB256
&& i.index_reg->reg_type.bitfield.ymmword)
- || (t->opcode_modifier.vecsib == VecSIB512
+ || (t->opcode_modifier.sib == VECSIB512
&& i.index_reg->reg_type.bitfield.zmmword)))
{
i.error = invalid_vsib_address;
}
overlap = operand_type_and (type, t->operand_types[op]);
+ if (t->operand_types[op].bitfield.class == RegSIMD
+ && t->operand_types[op].bitfield.byte
+ + t->operand_types[op].bitfield.word
+ + t->operand_types[op].bitfield.dword
+ + t->operand_types[op].bitfield.qword > 1)
+ {
+ overlap.bitfield.xmmword = 0;
+ overlap.bitfield.ymmword = 0;
+ overlap.bitfield.zmmword = 0;
+ }
if (operand_type_all_zero (&overlap))
goto bad_broadcast;
}
}
+ /* Check the special Imm4 cases; must be the first operand. */
+ if (t->cpu_flags.bitfield.cpuxop && t->operands == 5)
+ {
+ if (i.op[0].imms->X_op != O_constant
+ || !fits_in_imm4 (i.op[0].imms->X_add_number))
+ {
+ i.error = bad_imm4;
+ return 1;
+ }
+
+ /* Turn off Imm<N> so that update_imm won't complain. */
+ operand_type_set (&i.types[0], 0);
+ }
+
/* Check vector Disp8 operand. */
if (t->opcode_modifier.disp8memshift
&& i.disp_encoding != disp_encoding_32bit)
return 0;
}
-/* Check if operands are valid for the instruction. Update VEX
- operand types. */
+/* Check if encoding requirements are met by the instruction. */
static int
-VEX_check_operands (const insn_template *t)
+VEX_check_encoding (const insn_template *t)
{
+ if (i.vec_encoding == vex_encoding_error)
+ {
+ i.error = unsupported;
+ return 1;
+ }
+
if (i.vec_encoding == vex_encoding_evex)
{
/* This instruction must be encoded with EVEX prefix. */
return 0;
}
- /* Check the special Imm4 cases; must be the first operand. */
- if (t->cpu_flags.bitfield.cpuxop && t->operands == 5)
- {
- if (i.op[0].imms->X_op != O_constant
- || !fits_in_imm4 (i.op[0].imms->X_add_number))
- {
- i.error = bad_imm4;
- return 1;
- }
-
- /* Turn off Imm<N> so that update_imm won't complain. */
- operand_type_set (&i.types[0], 0);
- }
-
return 0;
}
for (j = 0; j < MAX_OPERANDS; j++)
operand_types[j] = t->operand_types[j];
- /* In general, don't allow 64-bit operands in 32-bit mode. */
- if (i.suffix == QWORD_MNEM_SUFFIX
- && flag_code != CODE_64BIT
+ /* In general, don't allow
+ - 64-bit operands outside of 64-bit mode,
+ - 32-bit operands on pre-386. */
+ j = i.imm_operands + (t->operands > i.imm_operands + 1);
+ if (((i.suffix == QWORD_MNEM_SUFFIX
+ && flag_code != CODE_64BIT
+ && (t->base_opcode != 0x0fc7
+ || t->extension_opcode != 1 /* cmpxchg8b */))
+ || (i.suffix == LONG_MNEM_SUFFIX
+ && !cpu_arch_flags.bitfield.cpui386))
&& (intel_syntax
? (t->opcode_modifier.mnemonicsize != IGNORESIZE
- && !t->opcode_modifier.broadcast
&& !intel_float_operand (t->name))
: intel_float_operand (t->name) != 2)
- && ((operand_types[0].bitfield.class != RegMMX
- && operand_types[0].bitfield.class != RegSIMD)
- || (operand_types[t->operands > 1].bitfield.class != RegMMX
- && operand_types[t->operands > 1].bitfield.class != RegSIMD))
- && (t->base_opcode != 0x0fc7
- || t->extension_opcode != 1 /* cmpxchg8b */))
- continue;
-
- /* In general, don't allow 32-bit operands on pre-386. */
- else if (i.suffix == LONG_MNEM_SUFFIX
- && !cpu_arch_flags.bitfield.cpui386
- && (intel_syntax
- ? (t->opcode_modifier.mnemonicsize != IGNORESIZE
- && !intel_float_operand (t->name))
- : intel_float_operand (t->name) != 2)
- && ((operand_types[0].bitfield.class != RegMMX
- && operand_types[0].bitfield.class != RegSIMD)
- || (operand_types[t->operands > 1].bitfield.class != RegMMX
- && operand_types[t->operands > 1].bitfield.class
- != RegSIMD)))
+ && (t->operands == i.imm_operands
+ || (operand_types[i.imm_operands].bitfield.class != RegMMX
+ && operand_types[i.imm_operands].bitfield.class != RegSIMD
+ && operand_types[i.imm_operands].bitfield.class != RegMask)
+ || (operand_types[j].bitfield.class != RegMMX
+ && operand_types[j].bitfield.class != RegSIMD
+ && operand_types[j].bitfield.class != RegMask))
+ && !t->opcode_modifier.sib)
continue;
/* Do not verify operands when there are none. */
- else
+ if (!t->operands)
{
- if (!t->operands)
- /* We've found a match; break out of loop. */
- break;
+ if (VEX_check_encoding (t))
+ {
+ specific_error = i.error;
+ continue;
+ }
+
+ /* We've found a match; break out of loop. */
+ break;
}
if (!t->opcode_modifier.jump
slip through to break. */
}
- /* Check if vector and VEX operands are valid. */
- if (check_VecOperands (t) || VEX_check_operands (t))
+ /* Check if vector operands are valid. */
+ if (check_VecOperands (t))
+ {
+ specific_error = i.error;
+ continue;
+ }
+
+ /* Check if VEX/EVEX encoding requirements can be satisfied. */
+ if (VEX_check_encoding (t))
{
specific_error = i.error;
continue;
|| (i.tm.base_opcode == 0x63 && i.tm.cpu_flags.bitfield.cpu64))
--i.operands;
+ /* crc32 needs REX.W set regardless of suffix / source operand size. */
+ if (i.tm.base_opcode == 0xf20f38f0
+ && i.tm.operand_types[1].bitfield.qword)
+ i.rex |= REX_W;
+
/* If there's no instruction mnemonic suffix we try to invent one
based on GPR operands. */
if (!i.suffix)
case CODE_64BIT:
if (!i.tm.opcode_modifier.no_qsuf)
{
- i.suffix = QWORD_MNEM_SUFFIX;
+ if (i.tm.opcode_modifier.jump == JUMP_BYTE
+ || i.tm.opcode_modifier.no_lsuf)
+ i.suffix = QWORD_MNEM_SUFFIX;
break;
}
/* Fall through. */
&& !i.tm.opcode_modifier.no_lsuf
&& !i.tm.opcode_modifier.no_qsuf))
&& i.tm.opcode_modifier.mnemonicsize != IGNORESIZE
+ /* Explicit sizing prefixes are assumed to disambiguate insns. */
+ && !i.prefix[DATA_PREFIX] && !(i.prefix[REX_PREFIX] & REX_W)
/* Accept FLDENV et al without suffix. */
&& (i.tm.opcode_modifier.no_ssuf || i.tm.opcode_modifier.floatmf))
{
if (i.suffix == QWORD_MNEM_SUFFIX
&& flag_code == CODE_64BIT
&& !i.tm.opcode_modifier.norex64
+ && !i.tm.opcode_modifier.vexw
/* Special case for xchg %rax,%rax. It is NOP and doesn't
need rex64. */
&& ! (i.operands == 2
i.rex |= REX_W;
break;
+
+ case 0:
+ /* Select word/dword/qword operation with explict data sizing prefix
+ when there are no suitable register operands. */
+ if (i.tm.opcode_modifier.w
+ && (i.prefix[DATA_PREFIX] || (i.prefix[REX_PREFIX] & REX_W))
+ && (!i.reg_operands
+ || (i.reg_operands == 1
+ /* ShiftCount */
+ && (i.tm.operand_types[0].bitfield.instance == RegC
+ /* InOutPortReg */
+ || i.tm.operand_types[0].bitfield.instance == RegD
+ || i.tm.operand_types[1].bitfield.instance == RegD
+ /* CRC32 */
+ || i.tm.base_opcode == 0xf20f38f0))))
+ i.tm.base_opcode |= 1;
+ break;
}
if (i.tm.opcode_modifier.addrprefixopreg)
continue;
/* Any other register is bad. */
- if (i.types[op].bitfield.class == Reg
- || i.types[op].bitfield.class == RegMMX
- || i.types[op].bitfield.class == RegSIMD
- || i.types[op].bitfield.class == SReg
- || i.types[op].bitfield.class == RegCR
- || i.types[op].bitfield.class == RegDR
- || i.types[op].bitfield.class == RegTR)
- {
- as_bad (_("`%s%s' not allowed with `%s%c'"),
- register_prefix,
- i.op[op].regs->reg_name,
- i.tm.name,
- i.suffix);
- return 0;
- }
+ as_bad (_("`%s%s' not allowed with `%s%c'"),
+ register_prefix, i.op[op].regs->reg_name,
+ i.tm.name, i.suffix);
+ return 0;
}
return 1;
}
else
overlap = imm32s;
}
+ else if (i.prefix[REX_PREFIX] & REX_W)
+ overlap = operand_type_and (overlap, imm32s);
+ else if (i.prefix[DATA_PREFIX])
+ overlap = operand_type_and (overlap,
+ flag_code != CODE_16BIT ? imm16 : imm32);
if (!operand_type_equal (&overlap, &imm8)
&& !operand_type_equal (&overlap, &imm8s)
&& !operand_type_equal (&overlap, &imm16)
unnecessary segment overrides. */
const seg_entry *default_seg = 0;
+ if (i.tm.opcode_modifier.sse2avx)
+ {
+ /* Legacy encoded insns allow explicit REX prefixes, so these prefixes
+ need converting. */
+ i.rex |= i.prefix[REX_PREFIX] & (REX_W | REX_R | REX_X | REX_B);
+ i.prefix[REX_PREFIX] = 0;
+ i.rex_encoding = 0;
+ }
+ /* ImmExt should be processed after SSE2AVX. */
+ else if (i.tm.opcode_modifier.immext)
+ process_immext ();
+
if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
{
unsigned int dupl = i.operands;
return 1;
}
+static INLINE void set_rex_vrex (const reg_entry *r, unsigned int rex_bit,
+ bfd_boolean do_sse2avx)
+{
+ if (r->reg_flags & RegRex)
+ {
+ if (i.rex & rex_bit)
+ as_bad (_("same type of prefix used twice"));
+ i.rex |= rex_bit;
+ }
+ else if (do_sse2avx && (i.rex & rex_bit) && i.vex.register_specifier)
+ {
+ gas_assert (i.vex.register_specifier == r);
+ i.vex.register_specifier += 8;
+ }
+
+ if (r->reg_flags & RegVRex)
+ i.vrex |= rex_bit;
+}
+
static const seg_entry *
build_modrm_byte (void)
{
i386_operand_type op;
unsigned int vvvv;
- /* Check register-only source operand when two source
- operands are swapped. */
- if (!i.tm.operand_types[source].bitfield.baseindex
- && i.tm.operand_types[dest].bitfield.baseindex)
+ /* Swap two source operands if needed. */
+ if (i.tm.opcode_modifier.swapsources)
{
vvvv = source;
source = dest;
else
i.has_regxmm = TRUE;
}
- if ((i.op[dest].regs->reg_flags & RegRex) != 0)
- i.rex |= REX_R;
- if ((i.op[dest].regs->reg_flags & RegVRex) != 0)
- i.vrex |= REX_R;
- if ((i.op[source].regs->reg_flags & RegRex) != 0)
- i.rex |= REX_B;
- if ((i.op[source].regs->reg_flags & RegVRex) != 0)
- i.vrex |= REX_B;
+ set_rex_vrex (i.op[dest].regs, REX_R, i.tm.opcode_modifier.sse2avx);
+ set_rex_vrex (i.op[source].regs, REX_B, FALSE);
}
else
{
i.rm.reg = i.op[source].regs->reg_num;
i.rm.regmem = i.op[dest].regs->reg_num;
- if ((i.op[dest].regs->reg_flags & RegRex) != 0)
- i.rex |= REX_B;
- if ((i.op[dest].regs->reg_flags & RegVRex) != 0)
- i.vrex |= REX_B;
- if ((i.op[source].regs->reg_flags & RegRex) != 0)
- i.rex |= REX_R;
- if ((i.op[source].regs->reg_flags & RegVRex) != 0)
- i.vrex |= REX_R;
+ set_rex_vrex (i.op[dest].regs, REX_B, i.tm.opcode_modifier.sse2avx);
+ set_rex_vrex (i.op[source].regs, REX_R, FALSE);
}
if (flag_code != CODE_64BIT && (i.rex & REX_R))
{
break;
gas_assert (op < i.operands);
- if (i.tm.opcode_modifier.vecsib)
+ if (i.tm.opcode_modifier.sib)
{
if (i.index_reg->reg_num == RegIZ)
abort ();
}
}
i.sib.index = i.index_reg->reg_num;
- if ((i.index_reg->reg_flags & RegRex) != 0)
- i.rex |= REX_X;
- if ((i.index_reg->reg_flags & RegVRex) != 0)
- i.vrex |= REX_X;
+ set_rex_vrex (i.index_reg, REX_X, FALSE);
}
default_seg = &ds;
{
i386_operand_type newdisp;
- gas_assert (!i.tm.opcode_modifier.vecsib);
+ gas_assert (!i.tm.opcode_modifier.sib);
/* Operand is just <disp> */
if (flag_code == CODE_64BIT)
{
i.types[op] = operand_type_and_not (i.types[op], anydisp);
i.types[op] = operand_type_or (i.types[op], newdisp);
}
- else if (!i.tm.opcode_modifier.vecsib)
+ else if (!i.tm.opcode_modifier.sib)
{
/* !i.base_reg && i.index_reg */
if (i.index_reg->reg_num == RegIZ)
/* RIP addressing for 64bit mode. */
else if (i.base_reg->reg_num == RegIP)
{
- gas_assert (!i.tm.opcode_modifier.vecsib);
+ gas_assert (!i.tm.opcode_modifier.sib);
i.rm.regmem = NO_BASE_REGISTER;
i.types[op].bitfield.disp8 = 0;
i.types[op].bitfield.disp16 = 0;
}
else if (i.base_reg->reg_type.bitfield.word)
{
- gas_assert (!i.tm.opcode_modifier.vecsib);
+ gas_assert (!i.tm.opcode_modifier.sib);
switch (i.base_reg->reg_num)
{
case 3: /* (%bx) */
}
}
- if (!i.tm.opcode_modifier.vecsib)
+ if (!i.tm.opcode_modifier.sib)
i.rm.regmem = i.base_reg->reg_num;
if ((i.base_reg->reg_flags & RegRex) != 0)
i.rex |= REX_B;
i.sib.scale = i.log2_scale_factor;
if (i.index_reg == 0)
{
- gas_assert (!i.tm.opcode_modifier.vecsib);
+ gas_assert (!i.tm.opcode_modifier.sib);
/* <disp>(%esp) becomes two byte modrm with no index
register. We've already stored the code for esp
in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
extra modrm byte. */
i.sib.index = NO_INDEX_REGISTER;
}
- else if (!i.tm.opcode_modifier.vecsib)
+ else if (!i.tm.opcode_modifier.sib)
{
if (i.index_reg->reg_num == RegIZ)
i.sib.index = NO_INDEX_REGISTER;
if (i.tm.extension_opcode != None)
{
i.rm.regmem = i.op[op].regs->reg_num;
- if ((i.op[op].regs->reg_flags & RegRex) != 0)
- i.rex |= REX_B;
- if ((i.op[op].regs->reg_flags & RegVRex) != 0)
- i.vrex |= REX_B;
+ set_rex_vrex (i.op[op].regs, REX_B,
+ i.tm.opcode_modifier.sse2avx);
}
else
{
i.rm.reg = i.op[op].regs->reg_num;
- if ((i.op[op].regs->reg_flags & RegRex) != 0)
- i.rex |= REX_R;
- if ((i.op[op].regs->reg_flags & RegVRex) != 0)
- i.vrex |= REX_R;
+ set_rex_vrex (i.op[op].regs, REX_R,
+ i.tm.opcode_modifier.sse2avx);
}
}
if (*q)
switch (j)
{
- case REX_PREFIX:
- /* REX byte is encoded in VEX prefix. */
- break;
case SEG_PREFIX:
case ADDR_PREFIX:
FRAG_APPEND_1_CHAR (*q);
/* Check masking operation. */
else if ((mask = parse_register (op_string, &end_op)) != NULL)
{
+ if (mask == &bad_reg)
+ return NULL;
+
/* k0 can't be used for write mask. */
if (mask->reg_type.bitfield.class != RegMask || !mask->reg_num)
{
{
i386_operand_type temp;
+ if (r == &bad_reg)
+ return 0;
+
/* Check for a segment override by searching for ':' after a
segment register. */
op_string = end_op;
if (i.base_reg)
{
+ if (i.base_reg == &bad_reg)
+ return 0;
base_string = end_op;
if (is_space_char (*base_string))
++base_string;
if ((i.index_reg = parse_register (base_string, &end_op))
!= NULL)
{
+ if (i.index_reg == &bad_reg)
+ return 0;
base_string = end_op;
if (is_space_char (*base_string))
++base_string;
return output_invalid_buf;
}
+/* Verify that @r can be used in the current context. */
+
+static bfd_boolean check_register (const reg_entry *r)
+{
+ if (allow_pseudo_reg)
+ return TRUE;
+
+ if (operand_type_all_zero (&r->reg_type))
+ return FALSE;
+
+ if ((r->reg_type.bitfield.dword
+ || (r->reg_type.bitfield.class == SReg && r->reg_num > 3)
+ || r->reg_type.bitfield.class == RegCR
+ || r->reg_type.bitfield.class == RegDR)
+ && !cpu_arch_flags.bitfield.cpui386)
+ return FALSE;
+
+ if (r->reg_type.bitfield.class == RegTR
+ && (flag_code == CODE_64BIT
+ || !cpu_arch_flags.bitfield.cpui386
+ || cpu_arch_isa_flags.bitfield.cpui586
+ || cpu_arch_isa_flags.bitfield.cpui686))
+ return FALSE;
+
+ if (r->reg_type.bitfield.class == RegMMX && !cpu_arch_flags.bitfield.cpummx)
+ return FALSE;
+
+ if (!cpu_arch_flags.bitfield.cpuavx512f)
+ {
+ if (r->reg_type.bitfield.zmmword
+ || r->reg_type.bitfield.class == RegMask)
+ return FALSE;
+
+ if (!cpu_arch_flags.bitfield.cpuavx)
+ {
+ if (r->reg_type.bitfield.ymmword)
+ return FALSE;
+
+ if (!cpu_arch_flags.bitfield.cpusse && r->reg_type.bitfield.xmmword)
+ return FALSE;
+ }
+ }
+
+ if (r->reg_type.bitfield.class == RegBND && !cpu_arch_flags.bitfield.cpumpx)
+ return FALSE;
+
+ /* Don't allow fake index register unless allow_index_reg isn't 0. */
+ if (!allow_index_reg && r->reg_num == RegIZ)
+ return FALSE;
+
+ /* Upper 16 vector registers are only available with VREX in 64bit
+ mode, and require EVEX encoding. */
+ if (r->reg_flags & RegVRex)
+ {
+ if (!cpu_arch_flags.bitfield.cpuavx512f
+ || flag_code != CODE_64BIT)
+ return FALSE;
+
+ if (i.vec_encoding == vex_encoding_default)
+ i.vec_encoding = vex_encoding_evex;
+ else if (i.vec_encoding != vex_encoding_evex)
+ i.vec_encoding = vex_encoding_error;
+ }
+
+ if (((r->reg_flags & (RegRex64 | RegRex)) || r->reg_type.bitfield.qword)
+ && (!cpu_arch_flags.bitfield.cpulm || r->reg_type.bitfield.class != RegCR)
+ && flag_code != CODE_64BIT)
+ return FALSE;
+
+ if (r->reg_type.bitfield.class == SReg && r->reg_num == RegFlat
+ && !intel_syntax)
+ return FALSE;
+
+ return TRUE;
+}
+
/* REG_STRING starts *before* REGISTER_PREFIX. */
static const reg_entry *
{
if (!cpu_arch_flags.bitfield.cpu8087
&& !cpu_arch_flags.bitfield.cpu287
- && !cpu_arch_flags.bitfield.cpu387)
+ && !cpu_arch_flags.bitfield.cpu387
+ && !allow_pseudo_reg)
return (const reg_entry *) NULL;
if (is_space_char (*s))
}
}
- if (r == NULL || allow_pseudo_reg)
- return r;
-
- if (operand_type_all_zero (&r->reg_type))
- return (const reg_entry *) NULL;
-
- if ((r->reg_type.bitfield.dword
- || (r->reg_type.bitfield.class == SReg && r->reg_num > 3)
- || r->reg_type.bitfield.class == RegCR
- || r->reg_type.bitfield.class == RegDR
- || r->reg_type.bitfield.class == RegTR)
- && !cpu_arch_flags.bitfield.cpui386)
- return (const reg_entry *) NULL;
-
- if (r->reg_type.bitfield.class == RegMMX && !cpu_arch_flags.bitfield.cpummx)
- return (const reg_entry *) NULL;
-
- if (!cpu_arch_flags.bitfield.cpuavx512f)
- {
- if (r->reg_type.bitfield.zmmword
- || r->reg_type.bitfield.class == RegMask)
- return (const reg_entry *) NULL;
-
- if (!cpu_arch_flags.bitfield.cpuavx)
- {
- if (r->reg_type.bitfield.ymmword)
- return (const reg_entry *) NULL;
-
- if (!cpu_arch_flags.bitfield.cpusse && r->reg_type.bitfield.xmmword)
- return (const reg_entry *) NULL;
- }
- }
-
- if (r->reg_type.bitfield.class == RegBND && !cpu_arch_flags.bitfield.cpumpx)
- return (const reg_entry *) NULL;
-
- /* Don't allow fake index register unless allow_index_reg isn't 0. */
- if (!allow_index_reg && r->reg_num == RegIZ)
- return (const reg_entry *) NULL;
-
- /* Upper 16 vector registers are only available with VREX in 64bit
- mode, and require EVEX encoding. */
- if (r->reg_flags & RegVRex)
- {
- if (!cpu_arch_flags.bitfield.cpuavx512f
- || flag_code != CODE_64BIT)
- return (const reg_entry *) NULL;
-
- i.vec_encoding = vex_encoding_evex;
- }
-
- if (((r->reg_flags & (RegRex64 | RegRex)) || r->reg_type.bitfield.qword)
- && (!cpu_arch_flags.bitfield.cpulm || r->reg_type.bitfield.class != RegCR)
- && flag_code != CODE_64BIT)
- return (const reg_entry *) NULL;
-
- if (r->reg_type.bitfield.class == SReg && r->reg_num == RegFlat
- && !intel_syntax)
- return (const reg_entry *) NULL;
-
- return r;
+ return r && check_register (r) ? r : NULL;
}
/* REG_STRING starts *before* REGISTER_PREFIX. */
know (e->X_add_number >= 0
&& (valueT) e->X_add_number < i386_regtab_size);
r = i386_regtab + e->X_add_number;
- if ((r->reg_flags & RegVRex))
- i.vec_encoding = vex_encoding_evex;
+ if (!check_register (r))
+ {
+ as_bad (_("register '%s%s' cannot be used here"),
+ register_prefix, r->reg_name);
+ r = &bad_reg;
+ }
*end_op = input_line_pointer;
}
*input_line_pointer = c;
{
*nextcharP = *input_line_pointer;
*input_line_pointer = 0;
- e->X_op = O_register;
- e->X_add_number = r - i386_regtab;
+ if (r != &bad_reg)
+ {
+ e->X_op = O_register;
+ e->X_add_number = r - i386_regtab;
+ }
+ else
+ e->X_op = O_illegal;
return 1;
}
input_line_pointer = end;
#define OPTION_MALIGN_BRANCH_PREFIX_SIZE (OPTION_MD_BASE + 28)
#define OPTION_MALIGN_BRANCH (OPTION_MD_BASE + 29)
#define OPTION_MBRANCHES_WITH_32B_BOUNDARIES (OPTION_MD_BASE + 30)
+#define OPTION_MLFENCE_AFTER_LOAD (OPTION_MD_BASE + 31)
+#define OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH (OPTION_MD_BASE + 32)
+#define OPTION_MLFENCE_BEFORE_RET (OPTION_MD_BASE + 33)
struct option md_longopts[] =
{
{"malign-branch-prefix-size", required_argument, NULL, OPTION_MALIGN_BRANCH_PREFIX_SIZE},
{"malign-branch", required_argument, NULL, OPTION_MALIGN_BRANCH},
{"mbranches-within-32B-boundaries", no_argument, NULL, OPTION_MBRANCHES_WITH_32B_BOUNDARIES},
+ {"mlfence-after-load", required_argument, NULL, OPTION_MLFENCE_AFTER_LOAD},
+ {"mlfence-before-indirect-branch", required_argument, NULL,
+ OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH},
+ {"mlfence-before-ret", required_argument, NULL, OPTION_MLFENCE_BEFORE_RET},
{"mamd64", no_argument, NULL, OPTION_MAMD64},
{"mintel64", no_argument, NULL, OPTION_MINTEL64},
{NULL, no_argument, NULL, 0}
as_fatal (_("invalid -mfence-as-lock-add= option: `%s'"), arg);
break;
+ case OPTION_MLFENCE_AFTER_LOAD:
+ if (strcasecmp (arg, "yes") == 0)
+ lfence_after_load = 1;
+ else if (strcasecmp (arg, "no") == 0)
+ lfence_after_load = 0;
+ else
+ as_fatal (_("invalid -mlfence-after-load= option: `%s'"), arg);
+ break;
+
+ case OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH:
+ if (strcasecmp (arg, "all") == 0)
+ {
+ lfence_before_indirect_branch = lfence_branch_all;
+ if (lfence_before_ret == lfence_before_ret_none)
+ lfence_before_ret = lfence_before_ret_shl;
+ }
+ else if (strcasecmp (arg, "memory") == 0)
+ lfence_before_indirect_branch = lfence_branch_memory;
+ else if (strcasecmp (arg, "register") == 0)
+ lfence_before_indirect_branch = lfence_branch_register;
+ else if (strcasecmp (arg, "none") == 0)
+ lfence_before_indirect_branch = lfence_branch_none;
+ else
+ as_fatal (_("invalid -mlfence-before-indirect-branch= option: `%s'"),
+ arg);
+ break;
+
+ case OPTION_MLFENCE_BEFORE_RET:
+ if (strcasecmp (arg, "or") == 0)
+ lfence_before_ret = lfence_before_ret_or;
+ else if (strcasecmp (arg, "not") == 0)
+ lfence_before_ret = lfence_before_ret_not;
+ else if (strcasecmp (arg, "shl") == 0 || strcasecmp (arg, "yes") == 0)
+ lfence_before_ret = lfence_before_ret_shl;
+ else if (strcasecmp (arg, "none") == 0)
+ lfence_before_ret = lfence_before_ret_none;
+ else
+ as_fatal (_("invalid -mlfence-before-ret= option: `%s'"),
+ arg);
+ break;
+
case OPTION_MRELAX_RELOCATIONS:
if (strcasecmp (arg, "yes") == 0)
generate_relax_relocations = 1;
-mbranches-within-32B-boundaries\n\
align branches within 32 byte boundary\n"));
fprintf (stream, _("\
+ -mlfence-after-load=[no|yes] (default: no)\n\
+ generate lfence after load\n"));
+ fprintf (stream, _("\
+ -mlfence-before-indirect-branch=[none|all|register|memory] (default: none)\n\
+ generate lfence before indirect near branch\n"));
+ fprintf (stream, _("\
+ -mlfence-before-ret=[none|or|not|shl|yes] (default: none)\n\
+ generate lfence before ret\n"));
+ fprintf (stream, _("\
-mamd64 accept only AMD64 ISA [default]\n"));
fprintf (stream, _("\
-mintel64 accept only Intel64 ISA\n"));
if (flag_code == CODE_64BIT)
return use_big_obj ? "pe-bigobj-x86-64" : "pe-x86-64";
else
- return "pe-i386";
+ return use_big_obj ? "pe-bigobj-i386" : "pe-i386";
# elif defined (TE_GO32)
case bfd_target_coff_flavour:
return "coff-go32";
last_insn.kind = last_insn_directive;
last_insn.name = "constant directive";
last_insn.file = as_where (&last_insn.line);
+ if (lfence_before_ret != lfence_before_ret_none)
+ {
+ if (lfence_before_indirect_branch != lfence_branch_none)
+ as_warn (_("constant directive skips -mlfence-before-ret "
+ "and -mlfence-before-indirect-branch"));
+ else
+ as_warn (_("constant directive skips -mlfence-before-ret"));
+ }
+ else if (lfence_before_indirect_branch != lfence_branch_none)
+ as_warn (_("constant directive skips -mlfence-before-indirect-branch"));
}
}