#endif
#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
static void handle_large_common (int small ATTRIBUTE_UNUSED);
+
+/* GNU_PROPERTY_X86_ISA_1_USED. */
+static unsigned int x86_isa_1_used;
+/* GNU_PROPERTY_X86_FEATURE_2_USED. */
+static unsigned int x86_feature_2_used;
+/* Generate x86 used ISA and feature properties. */
+static unsigned int x86_used_note = DEFAULT_X86_USED_NOTE;
#endif
static const char *default_arch = DEFAULT_ARCH;
/* Flags for operands. */
unsigned int flags[MAX_OPERANDS];
#define Operand_PCrel 1
+#define Operand_Mem 2
/* Relocation type for operand */
enum bfd_reloc_code_real reloc[MAX_OPERANDS];
unsigned int prefixes;
unsigned char prefix[MAX_PREFIXES];
+ /* Has MMX register operands. */
+ bfd_boolean has_regmmx;
+
+ /* Has XMM register operands. */
+ bfd_boolean has_regxmm;
+
+ /* Has YMM register operands. */
+ bfd_boolean has_regymm;
+
+ /* Has ZMM register operands. */
+ bfd_boolean has_regzmm;
+
/* RM and SIB are the modrm byte and the sib byte where the
addressing modes of this insn are encoded. */
modrm_byte rm;
CPU_387_FLAGS, 0 },
{ STRING_COMMA_LEN (".687"), PROCESSOR_UNKNOWN,
CPU_687_FLAGS, 0 },
+ { STRING_COMMA_LEN (".cmov"), PROCESSOR_UNKNOWN,
+ CPU_CMOV_FLAGS, 0 },
+ { STRING_COMMA_LEN (".fxsr"), PROCESSOR_UNKNOWN,
+ CPU_FXSR_FLAGS, 0 },
{ STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
CPU_MMX_FLAGS, 0 },
{ STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
{ STRING_COMMA_LEN ("no287"), CPU_ANY_287_FLAGS },
{ STRING_COMMA_LEN ("no387"), CPU_ANY_387_FLAGS },
{ STRING_COMMA_LEN ("no687"), CPU_ANY_687_FLAGS },
+ { STRING_COMMA_LEN ("nocmov"), CPU_ANY_CMOV_FLAGS },
+ { STRING_COMMA_LEN ("nofxsr"), CPU_ANY_FXSR_FLAGS },
{ STRING_COMMA_LEN ("nommx"), CPU_ANY_MMX_FLAGS },
{ STRING_COMMA_LEN ("nosse"), CPU_ANY_SSE_FLAGS },
{ STRING_COMMA_LEN ("nosse2"), CPU_ANY_SSE2_FLAGS },
break;
}
- if (i.types[j].bitfield.mem && !match_mem_size (t, j, j))
+ if ((i.flags[j] & Operand_Mem) && !match_mem_size (t, j, j))
{
match = 0;
break;
&& !match_operand_size (t, j, !j))
goto mismatch;
- if (i.types[!j].bitfield.mem
- && !match_mem_size (t, j, !j))
+ if ((i.flags[!j] & Operand_Mem) && !match_mem_size (t, j, !j))
goto mismatch;
}
|| i.tm.base_opcode == 0x66f8
|| i.tm.base_opcode == 0x66f9
|| i.tm.base_opcode == 0x66fa
- || i.tm.base_opcode == 0x66fb)
+ || i.tm.base_opcode == 0x66fb
+ || i.tm.base_opcode == 0x42
+ || i.tm.base_opcode == 0x6642
+ || i.tm.base_opcode == 0x47
+ || i.tm.base_opcode == 0x6647)
&& i.tm.extension_opcode == None))
{
/* Optimize: -O2:
EVEX VOP %ymmM, %ymmM, %ymmN
-> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
-> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16)
+ VOP, one of kxord and kxorq:
+ VEX VOP %kM, %kM, %kN
+ -> VEX kxorw %kM, %kM, %kN
+ VOP, one of kandnd and kandnq:
+ VEX VOP %kM, %kM, %kN
+ -> VEX kandnw %kM, %kM, %kN
*/
if (is_evex_encoding (&i.tm))
{
i.tm.opcode_modifier.evex = 0;
}
}
+ else if (i.tm.operand_types[0].bitfield.regmask)
+ {
+ i.tm.base_opcode &= 0xff;
+ i.tm.opcode_modifier.vexw = VEXW0;
+ }
else
i.tm.opcode_modifier.vex = VEX128;
{
union i386_op temp_op;
i386_operand_type temp_type;
+ unsigned int temp_flags;
enum bfd_reloc_code_real temp_reloc;
temp_type = i.types[xchg2];
i.types[xchg2] = i.types[xchg1];
i.types[xchg1] = temp_type;
+
+ temp_flags = i.flags[xchg2];
+ i.flags[xchg2] = i.flags[xchg1];
+ i.flags[xchg1] = temp_flags;
+
temp_op = i.op[xchg2];
i.op[xchg2] = i.op[xchg1];
i.op[xchg1] = temp_op;
+
temp_reloc = i.reloc[xchg2];
i.reloc[xchg2] = i.reloc[xchg1];
i.reloc[xchg1] = temp_reloc;
and its broadcast bytes match the memory operand. */
op = i.broadcast->operand;
if (!t->opcode_modifier.broadcast
- || !i.types[op].bitfield.mem
+ || !(i.flags[op] & Operand_Mem)
|| (!i.types[op].bitfield.unspecified
&& !match_broadcast_size (t, op)))
{
op = MAX_OPERANDS - 1; /* Avoid uninitialized variable warning. */
/* Check if requested masking is supported. */
- if (i.mask
- && (!t->opcode_modifier.masking
- || (i.mask->zeroing
- && t->opcode_modifier.masking == MERGING_MASKING)))
+ if (i.mask)
{
- i.error = unsupported_masking;
- return 1;
+ switch (t->opcode_modifier.masking)
+ {
+ case BOTH_MASKING:
+ break;
+ case MERGING_MASKING:
+ if (i.mask->zeroing)
+ {
+ case 0:
+ i.error = unsupported_masking;
+ return 1;
+ }
+ break;
+ case DYNAMIC_MASKING:
+ /* Memory destinations allow only merging masking. */
+ if (i.mask->zeroing && i.mem_operands)
+ {
+ /* Find memory operand. */
+ for (op = 0; op < i.operands; op++)
+ if (i.flags[op] & Operand_Mem)
+ break;
+ gas_assert (op < i.operands);
+ if (op == i.operands - 1)
+ {
+ i.error = unsupported_masking;
+ return 1;
+ }
+ }
+ break;
+ default:
+ abort ();
+ }
}
/* Check if masking is applied to dest operand. */
{
i.rm.reg = i.op[dest].regs->reg_num;
i.rm.regmem = i.op[source].regs->reg_num;
+ if (i.op[dest].regs->reg_type.bitfield.regmmx
+ || i.op[source].regs->reg_type.bitfield.regmmx)
+ i.has_regmmx = TRUE;
+ else if (i.op[dest].regs->reg_type.bitfield.regsimd
+ || i.op[source].regs->reg_type.bitfield.regsimd)
+ {
+ if (i.types[dest].bitfield.zmmword
+ || i.types[source].bitfield.zmmword)
+ i.has_regzmm = TRUE;
+ else if (i.types[dest].bitfield.ymmword
+ || i.types[source].bitfield.ymmword)
+ i.has_regymm = TRUE;
+ else
+ i.has_regxmm = TRUE;
+ }
if ((i.op[dest].regs->reg_flags & RegRex) != 0)
i.rex |= REX_R;
if ((i.op[dest].regs->reg_flags & RegVRex) != 0)
if (i.tm.opcode_modifier.vecsib)
{
- if (i.index_reg->reg_num == RegEiz
- || i.index_reg->reg_num == RegRiz)
+ if (i.index_reg->reg_num == RegIZ)
abort ();
i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
else if (!i.tm.opcode_modifier.vecsib)
{
/* !i.base_reg && i.index_reg */
- if (i.index_reg->reg_num == RegEiz
- || i.index_reg->reg_num == RegRiz)
+ if (i.index_reg->reg_num == RegIZ)
i.sib.index = NO_INDEX_REGISTER;
else
i.sib.index = i.index_reg->reg_num;
}
}
/* RIP addressing for 64bit mode. */
- else if (i.base_reg->reg_num == RegRip ||
- i.base_reg->reg_num == RegEip)
+ else if (i.base_reg->reg_num == RegIP)
{
gas_assert (!i.tm.opcode_modifier.vecsib);
i.rm.regmem = NO_BASE_REGISTER;
}
else if (!i.tm.opcode_modifier.vecsib)
{
- if (i.index_reg->reg_num == RegEiz
- || i.index_reg->reg_num == RegRiz)
+ if (i.index_reg->reg_num == RegIZ)
i.sib.index = NO_INDEX_REGISTER;
else
i.sib.index = i.index_reg->reg_num;
unsigned int vex_reg = ~0;
for (op = 0; op < i.operands; op++)
- if (i.types[op].bitfield.reg
- || i.types[op].bitfield.regmmx
- || i.types[op].bitfield.regsimd
- || i.types[op].bitfield.regbnd
- || i.types[op].bitfield.regmask
- || i.types[op].bitfield.sreg2
- || i.types[op].bitfield.sreg3
- || i.types[op].bitfield.control
- || i.types[op].bitfield.debug
- || i.types[op].bitfield.test)
- break;
+ {
+ if (i.types[op].bitfield.reg
+ || i.types[op].bitfield.regbnd
+ || i.types[op].bitfield.regmask
+ || i.types[op].bitfield.sreg2
+ || i.types[op].bitfield.sreg3
+ || i.types[op].bitfield.control
+ || i.types[op].bitfield.debug
+ || i.types[op].bitfield.test)
+ break;
+ if (i.types[op].bitfield.regsimd)
+ {
+ if (i.types[op].bitfield.zmmword)
+ i.has_regzmm = TRUE;
+ else if (i.types[op].bitfield.ymmword)
+ i.has_regymm = TRUE;
+ else
+ i.has_regxmm = TRUE;
+ break;
+ }
+ if (i.types[op].bitfield.regmmx)
+ {
+ i.has_regmmx = TRUE;
+ break;
+ }
+ }
if (vex_3_sources)
op = dest;
md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
}
+#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+void
+x86_cleanup (void)
+{
+ char *p;
+ asection *seg = now_seg;
+ subsegT subseg = now_subseg;
+ asection *sec;
+ unsigned int alignment, align_size_1;
+ unsigned int isa_1_descsz, feature_2_descsz, descsz;
+ unsigned int isa_1_descsz_raw, feature_2_descsz_raw;
+ unsigned int padding;
+
+ if (!IS_ELF || !x86_used_note)
+ return;
+
+ x86_isa_1_used |= GNU_PROPERTY_X86_UINT32_VALID;
+ x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_X86;
+
+ /* The .note.gnu.property section layout:
+
+ Field Length Contents
+ ---- ---- ----
+ n_namsz 4 4
+ n_descsz 4 The note descriptor size
+ n_type 4 NT_GNU_PROPERTY_TYPE_0
+ n_name 4 "GNU"
+ n_desc n_descsz The program property array
+ .... .... ....
+ */
+
+ /* Create the .note.gnu.property section. */
+ sec = subseg_new (NOTE_GNU_PROPERTY_SECTION_NAME, 0);
+ bfd_set_section_flags (stdoutput, sec,
+ (SEC_ALLOC
+ | SEC_LOAD
+ | SEC_DATA
+ | SEC_HAS_CONTENTS
+ | SEC_READONLY));
+
+ if (get_elf_backend_data (stdoutput)->s->elfclass == ELFCLASS64)
+ {
+ align_size_1 = 7;
+ alignment = 3;
+ }
+ else
+ {
+ align_size_1 = 3;
+ alignment = 2;
+ }
+
+ bfd_set_section_alignment (stdoutput, sec, alignment);
+ elf_section_type (sec) = SHT_NOTE;
+
+ /* GNU_PROPERTY_X86_ISA_1_USED: 4-byte type + 4-byte data size
+ + 4-byte data */
+ isa_1_descsz_raw = 4 + 4 + 4;
+ /* Align GNU_PROPERTY_X86_ISA_1_USED. */
+ isa_1_descsz = (isa_1_descsz_raw + align_size_1) & ~align_size_1;
+
+ feature_2_descsz_raw = isa_1_descsz;
+ /* GNU_PROPERTY_X86_FEATURE_2_USED: 4-byte type + 4-byte data size
+ + 4-byte data */
+ feature_2_descsz_raw += 4 + 4 + 4;
+ /* Align GNU_PROPERTY_X86_FEATURE_2_USED. */
+ feature_2_descsz = ((feature_2_descsz_raw + align_size_1)
+ & ~align_size_1);
+
+ descsz = feature_2_descsz;
+ /* Section size: n_namsz + n_descsz + n_type + n_name + n_descsz. */
+ p = frag_more (4 + 4 + 4 + 4 + descsz);
+
+ /* Write n_namsz. */
+ md_number_to_chars (p, (valueT) 4, 4);
+
+ /* Write n_descsz. */
+ md_number_to_chars (p + 4, (valueT) descsz, 4);
+
+ /* Write n_type. */
+ md_number_to_chars (p + 4 * 2, (valueT) NT_GNU_PROPERTY_TYPE_0, 4);
+
+ /* Write n_name. */
+ memcpy (p + 4 * 3, "GNU", 4);
+
+ /* Write 4-byte type. */
+ md_number_to_chars (p + 4 * 4,
+ (valueT) GNU_PROPERTY_X86_ISA_1_USED, 4);
+
+ /* Write 4-byte data size. */
+ md_number_to_chars (p + 4 * 5, (valueT) 4, 4);
+
+ /* Write 4-byte data. */
+ md_number_to_chars (p + 4 * 6, (valueT) x86_isa_1_used, 4);
+
+ /* Zero out paddings. */
+ padding = isa_1_descsz - isa_1_descsz_raw;
+ if (padding)
+ memset (p + 4 * 7, 0, padding);
+
+ /* Write 4-byte type. */
+ md_number_to_chars (p + isa_1_descsz + 4 * 4,
+ (valueT) GNU_PROPERTY_X86_FEATURE_2_USED, 4);
+
+ /* Write 4-byte data size. */
+ md_number_to_chars (p + isa_1_descsz + 4 * 5, (valueT) 4, 4);
+
+ /* Write 4-byte data. */
+ md_number_to_chars (p + isa_1_descsz + 4 * 6,
+ (valueT) x86_feature_2_used, 4);
+
+ /* Zero out paddings. */
+ padding = feature_2_descsz - feature_2_descsz_raw;
+ if (padding)
+ memset (p + isa_1_descsz + 4 * 7, 0, padding);
+
+ /* We probably can't restore the current segment, for there likely
+ isn't one yet... */
+ if (seg && subseg)
+ subseg_set (seg, subseg);
+}
+#endif
+
static void
output_insn (void)
{
fragS *insn_start_frag;
offsetT insn_start_off;
+#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+ if (IS_ELF && x86_used_note)
+ {
+ if (i.tm.cpu_flags.bitfield.cpucmov)
+ x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_CMOV;
+ if (i.tm.cpu_flags.bitfield.cpusse)
+ x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_SSE;
+ if (i.tm.cpu_flags.bitfield.cpusse2)
+ x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_SSE2;
+ if (i.tm.cpu_flags.bitfield.cpusse3)
+ x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_SSE3;
+ if (i.tm.cpu_flags.bitfield.cpussse3)
+ x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_SSSE3;
+ if (i.tm.cpu_flags.bitfield.cpusse4_1)
+ x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_SSE4_1;
+ if (i.tm.cpu_flags.bitfield.cpusse4_2)
+ x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_SSE4_2;
+ if (i.tm.cpu_flags.bitfield.cpuavx)
+ x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX;
+ if (i.tm.cpu_flags.bitfield.cpuavx2)
+ x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX2;
+ if (i.tm.cpu_flags.bitfield.cpufma)
+ x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_FMA;
+ if (i.tm.cpu_flags.bitfield.cpuavx512f)
+ x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512F;
+ if (i.tm.cpu_flags.bitfield.cpuavx512cd)
+ x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512CD;
+ if (i.tm.cpu_flags.bitfield.cpuavx512er)
+ x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512ER;
+ if (i.tm.cpu_flags.bitfield.cpuavx512pf)
+ x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512PF;
+ if (i.tm.cpu_flags.bitfield.cpuavx512vl)
+ x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512VL;
+ if (i.tm.cpu_flags.bitfield.cpuavx512dq)
+ x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512DQ;
+ if (i.tm.cpu_flags.bitfield.cpuavx512bw)
+ x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512BW;
+ if (i.tm.cpu_flags.bitfield.cpuavx512_4fmaps)
+ x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512_4FMAPS;
+ if (i.tm.cpu_flags.bitfield.cpuavx512_4vnniw)
+ x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512_4VNNIW;
+ if (i.tm.cpu_flags.bitfield.cpuavx512_bitalg)
+ x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512_BITALG;
+ if (i.tm.cpu_flags.bitfield.cpuavx512ifma)
+ x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512_IFMA;
+ if (i.tm.cpu_flags.bitfield.cpuavx512vbmi)
+ x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512_VBMI;
+ if (i.tm.cpu_flags.bitfield.cpuavx512_vbmi2)
+ x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512_VBMI2;
+ if (i.tm.cpu_flags.bitfield.cpuavx512_vnni)
+ x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512_VNNI;
+
+ if (i.tm.cpu_flags.bitfield.cpu8087
+ || i.tm.cpu_flags.bitfield.cpu287
+ || i.tm.cpu_flags.bitfield.cpu387
+ || i.tm.cpu_flags.bitfield.cpu687
+ || i.tm.cpu_flags.bitfield.cpufisttp)
+ x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_X87;
+ /* Don't set GNU_PROPERTY_X86_FEATURE_2_MMX for prefetchtXXX nor
+ Xfence instructions. */
+ if (i.tm.base_opcode != 0xf18
+ && i.tm.base_opcode != 0xf0d
+ && i.tm.base_opcode != 0xfae
+ && (i.has_regmmx
+ || i.tm.cpu_flags.bitfield.cpummx
+ || i.tm.cpu_flags.bitfield.cpua3dnow
+ || i.tm.cpu_flags.bitfield.cpua3dnowa))
+ x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_MMX;
+ if (i.has_regxmm)
+ x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_XMM;
+ if (i.has_regymm)
+ x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_YMM;
+ if (i.has_regzmm)
+ x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_ZMM;
+ if (i.tm.cpu_flags.bitfield.cpufxsr)
+ x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_FXSR;
+ if (i.tm.cpu_flags.bitfield.cpuxsave)
+ x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_XSAVE;
+ if (i.tm.cpu_flags.bitfield.cpuxsaveopt)
+ x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_XSAVEOPT;
+ if (i.tm.cpu_flags.bitfield.cpuxsavec)
+ x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_XSAVEC;
+ }
+#endif
+
/* Tie dwarf2 debug info to the address at the start of the insn.
We can't do this after the insn has been output as the current
frag may have been closed off. eg. by frag_var. */
{
fixP->fx_tcbit = i.rex != 0;
if (i.base_reg
- && (i.base_reg->reg_num == RegRip
- || i.base_reg->reg_num == RegEip))
+ && (i.base_reg->reg_num == RegIP))
fixP->fx_tcbit2 = 1;
}
else
if (addr_reg)
{
- if (addr_reg->reg_num == RegEip
- || addr_reg->reg_num == RegEiz
- || addr_reg->reg_type.bitfield.dword)
+ if (addr_reg->reg_type.bitfield.dword)
addr_mode = CODE_32BIT;
else if (flag_code != CODE_64BIT
&& addr_reg->reg_type.bitfield.word)
{
/* 32-bit/64-bit checks. */
if ((i.base_reg
- && (addr_mode == CODE_64BIT
- ? !i.base_reg->reg_type.bitfield.qword
- : !i.base_reg->reg_type.bitfield.dword)
- && (i.index_reg
- || (i.base_reg->reg_num
- != (addr_mode == CODE_64BIT ? RegRip : RegEip))))
+ && ((addr_mode == CODE_64BIT
+ ? !i.base_reg->reg_type.bitfield.qword
+ : !i.base_reg->reg_type.bitfield.dword)
+ || (i.index_reg && i.base_reg->reg_num == RegIP)
+ || i.base_reg->reg_num == RegIZ))
|| (i.index_reg
&& !i.index_reg->reg_type.bitfield.xmmword
&& !i.index_reg->reg_type.bitfield.ymmword
&& !i.index_reg->reg_type.bitfield.zmmword
&& ((addr_mode == CODE_64BIT
- ? !(i.index_reg->reg_type.bitfield.qword
- || i.index_reg->reg_num == RegRiz)
- : !(i.index_reg->reg_type.bitfield.dword
- || i.index_reg->reg_num == RegEiz))
+ ? !i.index_reg->reg_type.bitfield.qword
+ : !i.index_reg->reg_type.bitfield.dword)
|| !i.index_reg->reg_type.bitfield.baseindex)))
goto bad_address;
|| (current_templates->start->base_opcode & ~1) == 0x0f1a)
{
/* They cannot use RIP-relative addressing. */
- if (i.base_reg && i.base_reg->reg_num == RegRip)
+ if (i.base_reg && i.base_reg->reg_num == RegIP)
{
as_bad (_("`%s' cannot be used here"), operand_string);
return 0;
if (i386_index_check (operand_string) == 0)
return 0;
- i.types[this_operand].bitfield.mem = 1;
+ i.flags[this_operand] |= Operand_Mem;
if (i.mem_operands == 0)
i.memop1_string = xstrdup (operand_string);
i.mem_operands++;
return (const reg_entry *) NULL;
/* Don't allow fake index register unless allow_index_reg isn't 0. */
- if (!allow_index_reg
- && (r->reg_num == RegEiz || r->reg_num == RegRiz))
+ if (!allow_index_reg && r->reg_num == RegIZ)
return (const reg_entry *) NULL;
/* Upper 16 vector registers are only available with VREX in 64bit
#define OPTION_MAMD64 (OPTION_MD_BASE + 22)
#define OPTION_MINTEL64 (OPTION_MD_BASE + 23)
#define OPTION_MFENCE_AS_LOCK_ADD (OPTION_MD_BASE + 24)
+#define OPTION_X86_USED_NOTE (OPTION_MD_BASE + 25)
struct option md_longopts[] =
{
#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
{"x32", no_argument, NULL, OPTION_X32},
{"mshared", no_argument, NULL, OPTION_MSHARED},
+ {"mx86-used-note", required_argument, NULL, OPTION_X86_USED_NOTE},
#endif
{"divide", no_argument, NULL, OPTION_DIVIDE},
{"march", required_argument, NULL, OPTION_MARCH},
case OPTION_MSHARED:
shared = 1;
break;
+
+ case OPTION_X86_USED_NOTE:
+ if (strcasecmp (arg, "yes") == 0)
+ x86_used_note = 1;
+ else if (strcasecmp (arg, "no") == 0)
+ x86_used_note = 0;
+ else
+ as_fatal (_("invalid -mx86-used-note= option: `%s'"), arg);
+ break;
+
+
#endif
#if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
|| defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
fprintf (stream, _("\
-s ignored\n"));
#endif
-#if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
- || defined (TE_PE) || defined (TE_PEP))
+#if defined BFD64 && (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
+ || defined (TE_PE) || defined (TE_PEP))
fprintf (stream, _("\
--32/--64/--x32 generate 32bit/64bit/x32 code\n"));
#endif
fprintf (stream, _("\
-msse2avx encode SSE instructions with VEX prefix\n"));
fprintf (stream, _("\
- -msse-check=[none|error|warning]\n\
+ -msse-check=[none|error|warning] (default: warning)\n\
check SSE instructions\n"));
fprintf (stream, _("\
- -moperand-check=[none|error|warning]\n\
+ -moperand-check=[none|error|warning] (default: warning)\n\
check operand combinations for validity\n"));
fprintf (stream, _("\
- -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
+ -mavxscalar=[128|256] (default: 128)\n\
+ encode scalar AVX instructions with specific vector\n\
length\n"));
fprintf (stream, _("\
- -mevexlig=[128|256|512] encode scalar EVEX instructions with specific vector\n\
+ -mevexlig=[128|256|512] (default: 128)\n\
+ encode scalar EVEX instructions with specific vector\n\
length\n"));
fprintf (stream, _("\
- -mevexwig=[0|1] encode EVEX instructions with specific EVEX.W value\n\
+ -mevexwig=[0|1] (default: 0)\n\
+ encode EVEX instructions with specific EVEX.W value\n\
for EVEX.W bit ignored instructions\n"));
fprintf (stream, _("\
- -mevexrcig=[rne|rd|ru|rz]\n\
+ -mevexrcig=[rne|rd|ru|rz] (default: rne)\n\
encode EVEX instructions with specific EVEX.RC value\n\
for SAE-only ignored instructions\n"));
fprintf (stream, _("\
- -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
+ -mmnemonic=[att|intel] "));
+ if (SYSV386_COMPAT)
+ fprintf (stream, _("(default: att)\n"));
+ else
+ fprintf (stream, _("(default: intel)\n"));
+ fprintf (stream, _("\
+ use AT&T/Intel mnemonic\n"));
fprintf (stream, _("\
- -msyntax=[att|intel] use AT&T/Intel syntax\n"));
+ -msyntax=[att|intel] (default: att)\n\
+ use AT&T/Intel syntax\n"));
fprintf (stream, _("\
-mindex-reg support pseudo index registers\n"));
fprintf (stream, _("\
-mnaked-reg don't require `%%' prefix for registers\n"));
fprintf (stream, _("\
-madd-bnd-prefix add BND prefix for all valid branches\n"));
+#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
fprintf (stream, _("\
-mshared disable branch optimization for shared code\n"));
-# if defined (TE_PE) || defined (TE_PEP)
+ fprintf (stream, _("\
+ -mx86-used-note=[no|yes] "));
+ if (DEFAULT_X86_USED_NOTE)
+ fprintf (stream, _("(default: yes)\n"));
+ else
+ fprintf (stream, _("(default: no)\n"));
+ fprintf (stream, _("\
+ generate x86 used ISA and feature properties\n"));
+#endif
+#if defined (TE_PE) || defined (TE_PEP)
fprintf (stream, _("\
-mbig-obj generate big object files\n"));
#endif
fprintf (stream, _("\
- -momit-lock-prefix=[no|yes]\n\
+ -momit-lock-prefix=[no|yes] (default: no)\n\
strip all lock prefixes\n"));
fprintf (stream, _("\
- -mfence-as-lock-add=[no|yes]\n\
+ -mfence-as-lock-add=[no|yes] (default: no)\n\
encode lfence, mfence and sfence as\n\
lock addl $0x0, (%%{re}sp)\n"));
fprintf (stream, _("\
- -mrelax-relocations=[no|yes]\n\
+ -mrelax-relocations=[no|yes] "));
+ if (DEFAULT_GENERATE_X86_RELAX_RELOCATIONS)
+ fprintf (stream, _("(default: yes)\n"));
+ else
+ fprintf (stream, _("(default: no)\n"));
+ fprintf (stream, _("\
generate relax relocations\n"));
fprintf (stream, _("\
- -mamd64 accept only AMD64 ISA\n"));
+ -mamd64 accept only AMD64 ISA [default]\n"));
fprintf (stream, _("\
-mintel64 accept only Intel64 ISA\n"));
}