X-Git-Url: http://drtracing.org/?a=blobdiff_plain;f=gas%2Fconfig%2Ftc-i386.c;h=457f557c9ad94ee054c6274c8b697b9a6bbd6fc9;hb=d05584d3eeab4cb1c1d85d8dfdfef56827e03b3d;hp=973ebe4474fc10d9dfae05ecf7fde3d589ac5120;hpb=e4630f71b252fa13df07f93d427e7c9f4505c1fe;p=deliverable%2Fbinutils-gdb.git diff --git a/gas/config/tc-i386.c b/gas/config/tc-i386.c index 973ebe4474..457f557c9a 100644 --- a/gas/config/tc-i386.c +++ b/gas/config/tc-i386.c @@ -1,8 +1,5 @@ /* tc-i386.c -- Assemble code for the Intel 80386 - Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, - 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, - 2012 - Free Software Foundation, Inc. + Copyright (C) 1989-2016 Free Software Foundation, Inc. This file is part of GAS, the GNU Assembler. @@ -136,10 +133,18 @@ typedef struct enum processor_type type; /* arch type */ i386_cpu_flags flags; /* cpu feature flags */ unsigned int skip; /* show_arch should skip this. */ - unsigned int negated; /* turn off indicated flags. */ } arch_entry; +/* Used to turn off indicated flags. */ +typedef struct +{ + const char *name; /* arch name */ + unsigned int len; /* arch string length */ + i386_cpu_flags flags; /* cpu feature flags */ +} +noarch_entry; + static void update_code_flag (int, int); static void set_code_flag (int); static void set_16bit_gcc_code_flag (int); @@ -516,10 +521,24 @@ enum x86_elf_abi static enum x86_elf_abi x86_elf_abi = I386_ABI; #endif +#if defined (TE_PE) || defined (TE_PEP) +/* Use big object file format. */ +static int use_big_obj = 0; +#endif + +#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) +/* 1 if generating code for a shared library. */ +static int shared = 0; +#endif + /* 1 for intel syntax, 0 if att syntax. */ static int intel_syntax = 0; +/* 1 for Intel64 ISA, + 0 if AMD64 ISA. */ +static int intel64; + /* 1 for intel mnemonic, 0 if att mnemonic. */ static int intel_mnemonic = !SYSV386_COMPAT; @@ -541,6 +560,19 @@ static int add_bnd_prefix = 0; /* 1 if pseudo index register, eiz/riz, is allowed . */ static int allow_index_reg = 0; +/* 1 if the assembler should ignore LOCK prefix, even if it was + specified explicitly. */ +static int omit_lock_prefix = 0; + +/* 1 if the assembler should encode lfence, mfence, and sfence as + "lock addl $0, (%{re}sp)". */ +static int avoid_fence = 0; + +/* 1 if the assembler should generate relax relocations. */ + +static int generate_relax_relocations + = DEFAULT_GENERATE_X86_RELAX_RELOCATIONS; + static enum check_kind { check_none = 0, @@ -614,6 +646,9 @@ static enum evexw1 } evexwig; +/* Value to encode in EVEX RC bits, for SAE-only instructions. */ +static enum rc_type evexrcig = rne; + /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */ static symbolS *GOT_symbol; @@ -707,203 +742,260 @@ static const arch_entry cpu_arch[] = /* Do not replace the first two entries - i386_target_format() relies on them being there in this order. */ { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32, - CPU_GENERIC32_FLAGS, 0, 0 }, + CPU_GENERIC32_FLAGS, 0 }, { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64, - CPU_GENERIC64_FLAGS, 0, 0 }, + CPU_GENERIC64_FLAGS, 0 }, { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN, - CPU_NONE_FLAGS, 0, 0 }, + CPU_NONE_FLAGS, 0 }, { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN, - CPU_I186_FLAGS, 0, 0 }, + CPU_I186_FLAGS, 0 }, { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN, - CPU_I286_FLAGS, 0, 0 }, + CPU_I286_FLAGS, 0 }, { STRING_COMMA_LEN ("i386"), PROCESSOR_I386, - CPU_I386_FLAGS, 0, 0 }, + CPU_I386_FLAGS, 0 }, { STRING_COMMA_LEN ("i486"), PROCESSOR_I486, - CPU_I486_FLAGS, 0, 0 }, + CPU_I486_FLAGS, 0 }, { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM, - CPU_I586_FLAGS, 0, 0 }, + CPU_I586_FLAGS, 0 }, { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO, - CPU_I686_FLAGS, 0, 0 }, + CPU_I686_FLAGS, 0 }, { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM, - CPU_I586_FLAGS, 0, 0 }, + CPU_I586_FLAGS, 0 }, { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO, - CPU_PENTIUMPRO_FLAGS, 0, 0 }, + CPU_PENTIUMPRO_FLAGS, 0 }, { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO, - CPU_P2_FLAGS, 0, 0 }, + CPU_P2_FLAGS, 0 }, { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO, - CPU_P3_FLAGS, 0, 0 }, + CPU_P3_FLAGS, 0 }, { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4, - CPU_P4_FLAGS, 0, 0 }, + CPU_P4_FLAGS, 0 }, { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA, - CPU_CORE_FLAGS, 0, 0 }, + CPU_CORE_FLAGS, 0 }, { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA, - CPU_NOCONA_FLAGS, 0, 0 }, + CPU_NOCONA_FLAGS, 0 }, { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE, - CPU_CORE_FLAGS, 1, 0 }, + CPU_CORE_FLAGS, 1 }, { STRING_COMMA_LEN ("core"), PROCESSOR_CORE, - CPU_CORE_FLAGS, 0, 0 }, + CPU_CORE_FLAGS, 0 }, { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2, - CPU_CORE2_FLAGS, 1, 0 }, + CPU_CORE2_FLAGS, 1 }, { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2, - CPU_CORE2_FLAGS, 0, 0 }, + CPU_CORE2_FLAGS, 0 }, { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7, - CPU_COREI7_FLAGS, 0, 0 }, + CPU_COREI7_FLAGS, 0 }, { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM, - CPU_L1OM_FLAGS, 0, 0 }, + CPU_L1OM_FLAGS, 0 }, { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM, - CPU_K1OM_FLAGS, 0, 0 }, + CPU_K1OM_FLAGS, 0 }, + { STRING_COMMA_LEN ("iamcu"), PROCESSOR_IAMCU, + CPU_IAMCU_FLAGS, 0 }, { STRING_COMMA_LEN ("k6"), PROCESSOR_K6, - CPU_K6_FLAGS, 0, 0 }, + CPU_K6_FLAGS, 0 }, { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6, - CPU_K6_2_FLAGS, 0, 0 }, + CPU_K6_2_FLAGS, 0 }, { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON, - CPU_ATHLON_FLAGS, 0, 0 }, + CPU_ATHLON_FLAGS, 0 }, { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8, - CPU_K8_FLAGS, 1, 0 }, + CPU_K8_FLAGS, 1 }, { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8, - CPU_K8_FLAGS, 0, 0 }, + CPU_K8_FLAGS, 0 }, { STRING_COMMA_LEN ("k8"), PROCESSOR_K8, - CPU_K8_FLAGS, 0, 0 }, + CPU_K8_FLAGS, 0 }, { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10, - CPU_AMDFAM10_FLAGS, 0, 0 }, + CPU_AMDFAM10_FLAGS, 0 }, { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD, - CPU_BDVER1_FLAGS, 0, 0 }, + CPU_BDVER1_FLAGS, 0 }, { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD, - CPU_BDVER2_FLAGS, 0, 0 }, + CPU_BDVER2_FLAGS, 0 }, { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD, - CPU_BDVER3_FLAGS, 0, 0 }, + CPU_BDVER3_FLAGS, 0 }, { STRING_COMMA_LEN ("bdver4"), PROCESSOR_BD, - CPU_BDVER4_FLAGS, 0, 0 }, + CPU_BDVER4_FLAGS, 0 }, + { STRING_COMMA_LEN ("znver1"), PROCESSOR_ZNVER, + CPU_ZNVER1_FLAGS, 0 }, { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT, - CPU_BTVER1_FLAGS, 0, 0 }, + CPU_BTVER1_FLAGS, 0 }, { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT, - CPU_BTVER2_FLAGS, 0, 0 }, + CPU_BTVER2_FLAGS, 0 }, { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN, - CPU_8087_FLAGS, 0, 0 }, + CPU_8087_FLAGS, 0 }, { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN, - CPU_287_FLAGS, 0, 0 }, + CPU_287_FLAGS, 0 }, { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN, - CPU_387_FLAGS, 0, 0 }, - { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN, - CPU_ANY87_FLAGS, 0, 1 }, + CPU_387_FLAGS, 0 }, + { STRING_COMMA_LEN (".687"), PROCESSOR_UNKNOWN, + CPU_687_FLAGS, 0 }, { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN, - CPU_MMX_FLAGS, 0, 0 }, - { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN, - CPU_3DNOWA_FLAGS, 0, 1 }, + CPU_MMX_FLAGS, 0 }, { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN, - CPU_SSE_FLAGS, 0, 0 }, + CPU_SSE_FLAGS, 0 }, { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN, - CPU_SSE2_FLAGS, 0, 0 }, + CPU_SSE2_FLAGS, 0 }, { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN, - CPU_SSE3_FLAGS, 0, 0 }, + CPU_SSE3_FLAGS, 0 }, { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN, - CPU_SSSE3_FLAGS, 0, 0 }, + CPU_SSSE3_FLAGS, 0 }, { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN, - CPU_SSE4_1_FLAGS, 0, 0 }, + CPU_SSE4_1_FLAGS, 0 }, { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN, - CPU_SSE4_2_FLAGS, 0, 0 }, + CPU_SSE4_2_FLAGS, 0 }, { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN, - CPU_SSE4_2_FLAGS, 0, 0 }, - { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN, - CPU_ANY_SSE_FLAGS, 0, 1 }, + CPU_SSE4_2_FLAGS, 0 }, { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN, - CPU_AVX_FLAGS, 0, 0 }, + CPU_AVX_FLAGS, 0 }, { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN, - CPU_AVX2_FLAGS, 0, 0 }, + CPU_AVX2_FLAGS, 0 }, { STRING_COMMA_LEN (".avx512f"), PROCESSOR_UNKNOWN, - CPU_AVX512F_FLAGS, 0, 0 }, + CPU_AVX512F_FLAGS, 0 }, { STRING_COMMA_LEN (".avx512cd"), PROCESSOR_UNKNOWN, - CPU_AVX512CD_FLAGS, 0, 0 }, + CPU_AVX512CD_FLAGS, 0 }, { STRING_COMMA_LEN (".avx512er"), PROCESSOR_UNKNOWN, - CPU_AVX512ER_FLAGS, 0, 0 }, + CPU_AVX512ER_FLAGS, 0 }, { STRING_COMMA_LEN (".avx512pf"), PROCESSOR_UNKNOWN, - CPU_AVX512PF_FLAGS, 0, 0 }, - { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN, - CPU_ANY_AVX_FLAGS, 0, 1 }, + CPU_AVX512PF_FLAGS, 0 }, + { STRING_COMMA_LEN (".avx512dq"), PROCESSOR_UNKNOWN, + CPU_AVX512DQ_FLAGS, 0 }, + { STRING_COMMA_LEN (".avx512bw"), PROCESSOR_UNKNOWN, + CPU_AVX512BW_FLAGS, 0 }, + { STRING_COMMA_LEN (".avx512vl"), PROCESSOR_UNKNOWN, + CPU_AVX512VL_FLAGS, 0 }, { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN, - CPU_VMX_FLAGS, 0, 0 }, + CPU_VMX_FLAGS, 0 }, { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN, - CPU_VMFUNC_FLAGS, 0, 0 }, + CPU_VMFUNC_FLAGS, 0 }, { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN, - CPU_SMX_FLAGS, 0, 0 }, + CPU_SMX_FLAGS, 0 }, { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN, - CPU_XSAVE_FLAGS, 0, 0 }, + CPU_XSAVE_FLAGS, 0 }, { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN, - CPU_XSAVEOPT_FLAGS, 0, 0 }, + CPU_XSAVEOPT_FLAGS, 0 }, + { STRING_COMMA_LEN (".xsavec"), PROCESSOR_UNKNOWN, + CPU_XSAVEC_FLAGS, 0 }, + { STRING_COMMA_LEN (".xsaves"), PROCESSOR_UNKNOWN, + CPU_XSAVES_FLAGS, 0 }, { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN, - CPU_AES_FLAGS, 0, 0 }, + CPU_AES_FLAGS, 0 }, { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN, - CPU_PCLMUL_FLAGS, 0, 0 }, + CPU_PCLMUL_FLAGS, 0 }, { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN, - CPU_PCLMUL_FLAGS, 1, 0 }, + CPU_PCLMUL_FLAGS, 1 }, { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN, - CPU_FSGSBASE_FLAGS, 0, 0 }, + CPU_FSGSBASE_FLAGS, 0 }, { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN, - CPU_RDRND_FLAGS, 0, 0 }, + CPU_RDRND_FLAGS, 0 }, { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN, - CPU_F16C_FLAGS, 0, 0 }, + CPU_F16C_FLAGS, 0 }, { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN, - CPU_BMI2_FLAGS, 0, 0 }, + CPU_BMI2_FLAGS, 0 }, { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN, - CPU_FMA_FLAGS, 0, 0 }, + CPU_FMA_FLAGS, 0 }, { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN, - CPU_FMA4_FLAGS, 0, 0 }, + CPU_FMA4_FLAGS, 0 }, { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN, - CPU_XOP_FLAGS, 0, 0 }, + CPU_XOP_FLAGS, 0 }, { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN, - CPU_LWP_FLAGS, 0, 0 }, + CPU_LWP_FLAGS, 0 }, { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN, - CPU_MOVBE_FLAGS, 0, 0 }, + CPU_MOVBE_FLAGS, 0 }, { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN, - CPU_CX16_FLAGS, 0, 0 }, + CPU_CX16_FLAGS, 0 }, { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN, - CPU_EPT_FLAGS, 0, 0 }, + CPU_EPT_FLAGS, 0 }, { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN, - CPU_LZCNT_FLAGS, 0, 0 }, + CPU_LZCNT_FLAGS, 0 }, { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN, - CPU_HLE_FLAGS, 0, 0 }, + CPU_HLE_FLAGS, 0 }, { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN, - CPU_RTM_FLAGS, 0, 0 }, + CPU_RTM_FLAGS, 0 }, { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN, - CPU_INVPCID_FLAGS, 0, 0 }, + CPU_INVPCID_FLAGS, 0 }, { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN, - CPU_CLFLUSH_FLAGS, 0, 0 }, + CPU_CLFLUSH_FLAGS, 0 }, { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN, - CPU_NOP_FLAGS, 0, 0 }, + CPU_NOP_FLAGS, 0 }, { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN, - CPU_SYSCALL_FLAGS, 0, 0 }, + CPU_SYSCALL_FLAGS, 0 }, { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN, - CPU_RDTSCP_FLAGS, 0, 0 }, + CPU_RDTSCP_FLAGS, 0 }, { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN, - CPU_3DNOW_FLAGS, 0, 0 }, + CPU_3DNOW_FLAGS, 0 }, { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN, - CPU_3DNOWA_FLAGS, 0, 0 }, + CPU_3DNOWA_FLAGS, 0 }, { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN, - CPU_PADLOCK_FLAGS, 0, 0 }, + CPU_PADLOCK_FLAGS, 0 }, { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN, - CPU_SVME_FLAGS, 1, 0 }, + CPU_SVME_FLAGS, 1 }, { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN, - CPU_SVME_FLAGS, 0, 0 }, + CPU_SVME_FLAGS, 0 }, { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN, - CPU_SSE4A_FLAGS, 0, 0 }, + CPU_SSE4A_FLAGS, 0 }, { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN, - CPU_ABM_FLAGS, 0, 0 }, + CPU_ABM_FLAGS, 0 }, { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN, - CPU_BMI_FLAGS, 0, 0 }, + CPU_BMI_FLAGS, 0 }, { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN, - CPU_TBM_FLAGS, 0, 0 }, + CPU_TBM_FLAGS, 0 }, { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN, - CPU_ADX_FLAGS, 0, 0 }, + CPU_ADX_FLAGS, 0 }, { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN, - CPU_RDSEED_FLAGS, 0, 0 }, + CPU_RDSEED_FLAGS, 0 }, { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN, - CPU_PRFCHW_FLAGS, 0, 0 }, + CPU_PRFCHW_FLAGS, 0 }, { STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN, - CPU_SMAP_FLAGS, 0, 0 }, + CPU_SMAP_FLAGS, 0 }, { STRING_COMMA_LEN (".mpx"), PROCESSOR_UNKNOWN, - CPU_MPX_FLAGS, 0, 0 }, + CPU_MPX_FLAGS, 0 }, { STRING_COMMA_LEN (".sha"), PROCESSOR_UNKNOWN, - CPU_SHA_FLAGS, 0, 0 }, + CPU_SHA_FLAGS, 0 }, + { STRING_COMMA_LEN (".clflushopt"), PROCESSOR_UNKNOWN, + CPU_CLFLUSHOPT_FLAGS, 0 }, + { STRING_COMMA_LEN (".prefetchwt1"), PROCESSOR_UNKNOWN, + CPU_PREFETCHWT1_FLAGS, 0 }, + { STRING_COMMA_LEN (".se1"), PROCESSOR_UNKNOWN, + CPU_SE1_FLAGS, 0 }, + { STRING_COMMA_LEN (".clwb"), PROCESSOR_UNKNOWN, + CPU_CLWB_FLAGS, 0 }, + { STRING_COMMA_LEN (".pcommit"), PROCESSOR_UNKNOWN, + CPU_PCOMMIT_FLAGS, 0 }, + { STRING_COMMA_LEN (".avx512ifma"), PROCESSOR_UNKNOWN, + CPU_AVX512IFMA_FLAGS, 0 }, + { STRING_COMMA_LEN (".avx512vbmi"), PROCESSOR_UNKNOWN, + CPU_AVX512VBMI_FLAGS, 0 }, + { STRING_COMMA_LEN (".clzero"), PROCESSOR_UNKNOWN, + CPU_CLZERO_FLAGS, 0 }, + { STRING_COMMA_LEN (".mwaitx"), PROCESSOR_UNKNOWN, + CPU_MWAITX_FLAGS, 0 }, + { STRING_COMMA_LEN (".ospke"), PROCESSOR_UNKNOWN, + CPU_OSPKE_FLAGS, 0 }, + { STRING_COMMA_LEN (".rdpid"), PROCESSOR_UNKNOWN, + CPU_RDPID_FLAGS, 0 }, +}; + +static const noarch_entry cpu_noarch[] = +{ + { STRING_COMMA_LEN ("no87"), CPU_ANY_X87_FLAGS }, + { STRING_COMMA_LEN ("no287"), CPU_ANY_287_FLAGS }, + { STRING_COMMA_LEN ("no387"), CPU_ANY_387_FLAGS }, + { STRING_COMMA_LEN ("no687"), CPU_ANY_687_FLAGS }, + { STRING_COMMA_LEN ("nommx"), CPU_ANY_MMX_FLAGS }, + { STRING_COMMA_LEN ("nosse"), CPU_ANY_SSE_FLAGS }, + { STRING_COMMA_LEN ("nosse2"), CPU_ANY_SSE2_FLAGS }, + { STRING_COMMA_LEN ("nosse3"), CPU_ANY_SSE3_FLAGS }, + { STRING_COMMA_LEN ("nossse3"), CPU_ANY_SSSE3_FLAGS }, + { STRING_COMMA_LEN ("nosse4.1"), CPU_ANY_SSE4_1_FLAGS }, + { STRING_COMMA_LEN ("nosse4.2"), CPU_ANY_SSE4_2_FLAGS }, + { STRING_COMMA_LEN ("nosse4"), CPU_ANY_SSE4_1_FLAGS }, + { STRING_COMMA_LEN ("noavx"), CPU_ANY_AVX_FLAGS }, + { STRING_COMMA_LEN ("noavx2"), CPU_ANY_AVX2_FLAGS }, + { STRING_COMMA_LEN ("noavx512f"), CPU_ANY_AVX512F_FLAGS }, + { STRING_COMMA_LEN ("noavx512cd"), CPU_ANY_AVX512CD_FLAGS }, + { STRING_COMMA_LEN ("noavx512er"), CPU_ANY_AVX512ER_FLAGS }, + { STRING_COMMA_LEN ("noavx512pf"), CPU_ANY_AVX512PF_FLAGS }, + { STRING_COMMA_LEN ("noavx512dq"), CPU_ANY_AVX512DQ_FLAGS }, + { STRING_COMMA_LEN ("noavx512bw"), CPU_ANY_AVX512BW_FLAGS }, + { STRING_COMMA_LEN ("noavx512vl"), CPU_ANY_AVX512VL_FLAGS }, + { STRING_COMMA_LEN ("noavx512ifma"), CPU_ANY_AVX512IFMA_FLAGS }, + { STRING_COMMA_LEN ("noavx512vbmi"), CPU_ANY_AVX512VBMI_FLAGS }, }; #ifdef I386COFF @@ -1008,173 +1100,97 @@ i386_align_code (fragS *fragP, int count) /* Various efficient no-op patterns for aligning code labels. Note: Don't try to assemble the instructions in the comments. 0L and 0w are not legal. */ - static const char f32_1[] = + static const unsigned char f32_1[] = {0x90}; /* nop */ - static const char f32_2[] = + static const unsigned char f32_2[] = {0x66,0x90}; /* xchg %ax,%ax */ - static const char f32_3[] = + static const unsigned char f32_3[] = {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */ - static const char f32_4[] = + static const unsigned char f32_4[] = {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */ - static const char f32_5[] = + static const unsigned char f32_5[] = {0x90, /* nop */ 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */ - static const char f32_6[] = + static const unsigned char f32_6[] = {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */ - static const char f32_7[] = + static const unsigned char f32_7[] = {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */ - static const char f32_8[] = + static const unsigned char f32_8[] = {0x90, /* nop */ 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */ - static const char f32_9[] = + static const unsigned char f32_9[] = {0x89,0xf6, /* movl %esi,%esi */ 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */ - static const char f32_10[] = + static const unsigned char f32_10[] = {0x8d,0x76,0x00, /* leal 0(%esi),%esi */ 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */ - static const char f32_11[] = + static const unsigned char f32_11[] = {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */ 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */ - static const char f32_12[] = + static const unsigned char f32_12[] = {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */ 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */ - static const char f32_13[] = + static const unsigned char f32_13[] = {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */ 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */ - static const char f32_14[] = + static const unsigned char f32_14[] = {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */ 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */ - static const char f16_3[] = + static const unsigned char f16_3[] = {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */ - static const char f16_4[] = + static const unsigned char f16_4[] = {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */ - static const char f16_5[] = + static const unsigned char f16_5[] = {0x90, /* nop */ 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */ - static const char f16_6[] = + static const unsigned char f16_6[] = {0x89,0xf6, /* mov %si,%si */ 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */ - static const char f16_7[] = + static const unsigned char f16_7[] = {0x8d,0x74,0x00, /* lea 0(%si),%si */ 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */ - static const char f16_8[] = + static const unsigned char f16_8[] = {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */ 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */ - static const char jump_31[] = + static const unsigned char jump_31[] = {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */ 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90, 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90, 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90}; - static const char *const f32_patt[] = { + static const unsigned char *const f32_patt[] = { f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8, f32_9, f32_10, f32_11, f32_12, f32_13, f32_14 }; - static const char *const f16_patt[] = { + static const unsigned char *const f16_patt[] = { f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8 }; /* nopl (%[re]ax) */ - static const char alt_3[] = + static const unsigned char alt_3[] = {0x0f,0x1f,0x00}; /* nopl 0(%[re]ax) */ - static const char alt_4[] = + static const unsigned char alt_4[] = {0x0f,0x1f,0x40,0x00}; /* nopl 0(%[re]ax,%[re]ax,1) */ - static const char alt_5[] = + static const unsigned char alt_5[] = {0x0f,0x1f,0x44,0x00,0x00}; /* nopw 0(%[re]ax,%[re]ax,1) */ - static const char alt_6[] = + static const unsigned char alt_6[] = {0x66,0x0f,0x1f,0x44,0x00,0x00}; /* nopl 0L(%[re]ax) */ - static const char alt_7[] = + static const unsigned char alt_7[] = {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00}; /* nopl 0L(%[re]ax,%[re]ax,1) */ - static const char alt_8[] = + static const unsigned char alt_8[] = {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00}; /* nopw 0L(%[re]ax,%[re]ax,1) */ - static const char alt_9[] = + static const unsigned char alt_9[] = {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00}; /* nopw %cs:0L(%[re]ax,%[re]ax,1) */ - static const char alt_10[] = + static const unsigned char alt_10[] = {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00}; - /* data16 - nopw %cs:0L(%[re]ax,%[re]ax,1) */ - static const char alt_long_11[] = - {0x66, - 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00}; - /* data16 - data16 - nopw %cs:0L(%[re]ax,%[re]ax,1) */ - static const char alt_long_12[] = - {0x66, - 0x66, - 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00}; - /* data16 - data16 - data16 - nopw %cs:0L(%[re]ax,%[re]ax,1) */ - static const char alt_long_13[] = - {0x66, - 0x66, - 0x66, - 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00}; - /* data16 - data16 - data16 - data16 - nopw %cs:0L(%[re]ax,%[re]ax,1) */ - static const char alt_long_14[] = - {0x66, - 0x66, - 0x66, - 0x66, - 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00}; - /* data16 - data16 - data16 - data16 - data16 - nopw %cs:0L(%[re]ax,%[re]ax,1) */ - static const char alt_long_15[] = - {0x66, - 0x66, - 0x66, - 0x66, - 0x66, - 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00}; - /* nopl 0(%[re]ax,%[re]ax,1) - nopw 0(%[re]ax,%[re]ax,1) */ - static const char alt_short_11[] = - {0x0f,0x1f,0x44,0x00,0x00, - 0x66,0x0f,0x1f,0x44,0x00,0x00}; - /* nopw 0(%[re]ax,%[re]ax,1) - nopw 0(%[re]ax,%[re]ax,1) */ - static const char alt_short_12[] = - {0x66,0x0f,0x1f,0x44,0x00,0x00, - 0x66,0x0f,0x1f,0x44,0x00,0x00}; - /* nopw 0(%[re]ax,%[re]ax,1) - nopl 0L(%[re]ax) */ - static const char alt_short_13[] = - {0x66,0x0f,0x1f,0x44,0x00,0x00, - 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00}; - /* nopl 0L(%[re]ax) - nopl 0L(%[re]ax) */ - static const char alt_short_14[] = - {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00, - 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00}; - /* nopl 0L(%[re]ax) - nopl 0L(%[re]ax,%[re]ax,1) */ - static const char alt_short_15[] = - {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00, - 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00}; - static const char *const alt_short_patt[] = { + static const unsigned char *const alt_patt[] = { f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8, - alt_9, alt_10, alt_short_11, alt_short_12, alt_short_13, - alt_short_14, alt_short_15 - }; - static const char *const alt_long_patt[] = { - f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8, - alt_9, alt_10, alt_long_11, alt_long_12, alt_long_13, - alt_long_14, alt_long_15 + alt_9, alt_10 }; /* Only align for at least a positive non-zero boundary. */ @@ -1186,14 +1202,9 @@ i386_align_code (fragS *fragP, int count) 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and PROCESSOR_GENERIC32, f32_patt will be used. - 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA, - PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and - PROCESSOR_GENERIC64, alt_long_patt will be used. - 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and - PROCESSOR_AMDFAM10, PROCESSOR_BD and PROCESSOR_BT, alt_short_patt - will be used. - - When -mtune= isn't used, alt_long_patt will be used if + 2. For the rest, alt_patt will be used. + + When -mtune= isn't used, alt_patt will be used if cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will be used. @@ -1215,7 +1226,7 @@ i386_align_code (fragS *fragP, int count) } else { - const char *const *patt = NULL; + const unsigned char *const *patt = NULL; if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN) { @@ -1226,7 +1237,7 @@ i386_align_code (fragS *fragP, int count) /* We use cpu_arch_isa_flags to check if we SHOULD optimize with nops. */ if (fragP->tc_frag_data.isa_flags.bitfield.cpunop) - patt = alt_long_patt; + patt = alt_patt; else patt = f32_patt; break; @@ -1238,20 +1249,20 @@ i386_align_code (fragS *fragP, int count) case PROCESSOR_L1OM: case PROCESSOR_K1OM: case PROCESSOR_GENERIC64: - patt = alt_long_patt; - break; case PROCESSOR_K6: case PROCESSOR_ATHLON: case PROCESSOR_K8: case PROCESSOR_AMDFAM10: case PROCESSOR_BD: + case PROCESSOR_ZNVER: case PROCESSOR_BT: - patt = alt_short_patt; + patt = alt_patt; break; case PROCESSOR_I386: case PROCESSOR_I486: case PROCESSOR_PENTIUM: case PROCESSOR_PENTIUMPRO: + case PROCESSOR_IAMCU: case PROCESSOR_GENERIC32: patt = f32_patt; break; @@ -1270,17 +1281,19 @@ i386_align_code (fragS *fragP, int count) case PROCESSOR_I386: case PROCESSOR_I486: case PROCESSOR_PENTIUM: + case PROCESSOR_IAMCU: case PROCESSOR_K6: case PROCESSOR_ATHLON: case PROCESSOR_K8: case PROCESSOR_AMDFAM10: case PROCESSOR_BD: + case PROCESSOR_ZNVER: case PROCESSOR_BT: case PROCESSOR_GENERIC32: /* We use cpu_arch_isa_flags to check if we CAN optimize with nops. */ if (fragP->tc_frag_data.isa_flags.bitfield.cpunop) - patt = alt_short_patt; + patt = alt_patt; else patt = f32_patt; break; @@ -1293,12 +1306,12 @@ i386_align_code (fragS *fragP, int count) case PROCESSOR_L1OM: case PROCESSOR_K1OM: if (fragP->tc_frag_data.isa_flags.bitfield.cpunop) - patt = alt_long_patt; + patt = alt_patt; else patt = f32_patt; break; case PROCESSOR_GENERIC64: - patt = alt_long_patt; + patt = alt_patt; break; } } @@ -1329,15 +1342,15 @@ i386_align_code (fragS *fragP, int count) } else { - /* Maximum length of an instruction is 15 byte. If the - padding is greater than 15 bytes and we don't use jump, + /* Maximum length of an instruction is 10 byte. If the + padding is greater than 10 bytes and we don't use jump, we have to break it into smaller pieces. */ int padding = count; - while (padding > 15) + while (padding > 10) { - padding -= 15; + padding -= 10; memcpy (fragP->fr_literal + fragP->fr_fix + padding, - patt [14], 15); + patt [9], 10); } if (padding) @@ -1421,23 +1434,6 @@ cpu_flags_all_zero (const union i386_cpu_flags *x) } } -static INLINE void -cpu_flags_set (union i386_cpu_flags *x, unsigned int v) -{ - switch (ARRAY_SIZE(x->array)) - { - case 3: - x->array[2] = v; - case 2: - x->array[1] = v; - case 1: - x->array[0] = v; - break; - default: - abort (); - } -} - static INLINE int cpu_flags_equal (const union i386_cpu_flags *x, const union i386_cpu_flags *y) @@ -1519,6 +1515,20 @@ cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y) return x; } +static int +valid_iamcu_cpu_flags (const i386_cpu_flags *flags) +{ + if (cpu_arch_isa == PROCESSOR_IAMCU) + { + static const i386_cpu_flags iamcu_flags = CPU_IAMCU_COMPAT_FLAGS; + i386_cpu_flags compat_flags; + compat_flags = cpu_flags_and_not (*flags, iamcu_flags); + return cpu_flags_all_zero (&compat_flags); + } + else + return 1; +} + #define CPU_FLAGS_ARCH_MATCH 0x1 #define CPU_FLAGS_64BIT_MATCH 0x2 #define CPU_FLAGS_AES_MATCH 0x4 @@ -1552,8 +1562,6 @@ cpu_flags_match (const insn_template *t) /* This instruction is available only on some archs. */ i386_cpu_flags cpu = cpu_arch_flags; - cpu.bitfield.cpu64 = 0; - cpu.bitfield.cpuno64 = 0; cpu = cpu_flags_and (x, cpu); if (!cpu_flags_all_zero (&cpu)) { @@ -1579,6 +1587,21 @@ cpu_flags_match (const insn_template *t) else match |= CPU_FLAGS_ARCH_MATCH; } + else if (x.bitfield.cpuavx512vl) + { + /* Match AVX512VL. */ + if (cpu.bitfield.cpuavx512vl) + { + /* Need another match. */ + cpu.bitfield.cpuavx512vl = 0; + if (!cpu_flags_all_zero (&cpu)) + match |= CPU_FLAGS_32BIT_MATCH; + else + match |= CPU_FLAGS_ARCH_MATCH; + } + else + match |= CPU_FLAGS_ARCH_MATCH; + } else match |= CPU_FLAGS_32BIT_MATCH; } @@ -1667,8 +1690,6 @@ static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32; static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S; static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S; static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4; -static const i386_operand_type regbnd = OPERAND_TYPE_REGBND; -static const i386_operand_type vec_disp8 = OPERAND_TYPE_VEC_DISP8; enum operand_type { @@ -1743,6 +1764,7 @@ match_mem_size (const insn_template *t, unsigned int j) { return (match_reg_size (t, j) && !((i.types[j].bitfield.unspecified + && !i.broadcast && !t->operand_types[j].bitfield.unspecified) || (i.types[j].bitfield.fword && !t->operand_types[j].bitfield.fword) @@ -1906,6 +1928,9 @@ register_number (const reg_entry *r) if (r->reg_flags & RegRex) nr += 8; + if (r->reg_flags & RegVRex) + nr += 16; + return nr; } @@ -1923,47 +1948,46 @@ mode_from_disp_size (i386_operand_type t) } static INLINE int -fits_in_signed_byte (offsetT num) +fits_in_signed_byte (addressT num) { - return (num >= -128) && (num <= 127); + return num + 0x80 <= 0xff; } static INLINE int -fits_in_unsigned_byte (offsetT num) +fits_in_unsigned_byte (addressT num) { - return (num & 0xff) == num; + return num <= 0xff; } static INLINE int -fits_in_unsigned_word (offsetT num) +fits_in_unsigned_word (addressT num) { - return (num & 0xffff) == num; + return num <= 0xffff; } static INLINE int -fits_in_signed_word (offsetT num) +fits_in_signed_word (addressT num) { - return (-32768 <= num) && (num <= 32767); + return num + 0x8000 <= 0xffff; } static INLINE int -fits_in_signed_long (offsetT num ATTRIBUTE_UNUSED) +fits_in_signed_long (addressT num ATTRIBUTE_UNUSED) { #ifndef BFD64 return 1; #else - return (!(((offsetT) -1 << 31) & num) - || (((offsetT) -1 << 31) & num) == ((offsetT) -1 << 31)); + return num + 0x80000000 <= 0xffffffff; #endif } /* fits_in_signed_long() */ static INLINE int -fits_in_unsigned_long (offsetT num ATTRIBUTE_UNUSED) +fits_in_unsigned_long (addressT num ATTRIBUTE_UNUSED) { #ifndef BFD64 return 1; #else - return (num & (((offsetT) 2 << 31) - 1)) == num; + return num <= 0xffffffff; #endif } /* fits_in_unsigned_long() */ @@ -2230,8 +2254,8 @@ set_intel_syntax (int syntax_flag) SKIP_WHITESPACE (); if (!is_end_of_line[(unsigned char) *input_line_pointer]) { - char *string = input_line_pointer; - int e = get_symbol_end (); + char *string; + int e = get_symbol_name (&string); if (strcmp (string, "prefix") == 0) ask_naked_reg = 1; @@ -2239,7 +2263,7 @@ set_intel_syntax (int syntax_flag) ask_naked_reg = -1; else as_bad (_("bad argument to syntax directive.")); - *input_line_pointer = e; + (void) restore_line_pointer (e); } demand_empty_rest_of_line (); @@ -2291,8 +2315,8 @@ set_check (int what) if (!is_end_of_line[(unsigned char) *input_line_pointer]) { - char *string = input_line_pointer; - int e = get_symbol_end (); + char *string; + int e = get_symbol_name (&string); if (strcmp (string, "none") == 0) *kind = check_none; @@ -2302,7 +2326,7 @@ set_check (int what) *kind = check_error; else as_bad (_("bad argument to %s_check directive."), str); - *input_line_pointer = e; + (void) restore_line_pointer (e); } else as_bad (_("missing argument for %s_check directive"), str); @@ -2330,6 +2354,11 @@ check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED, arch = default_arch; } + /* If we are targeting Intel MCU, we must enable it. */ + if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_IAMCU + || new_flag.bitfield.cpuiamcu) + return; + /* If we are targeting Intel L1OM, we must enable it. */ if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM || new_flag.bitfield.cpul1om) @@ -2351,8 +2380,8 @@ set_cpu_arch (int dummy ATTRIBUTE_UNUSED) if (!is_end_of_line[(unsigned char) *input_line_pointer]) { - char *string = input_line_pointer; - int e = get_symbol_end (); + char *string; + int e = get_symbol_name (&string); unsigned int j; i386_cpu_flags flags; @@ -2387,13 +2416,13 @@ set_cpu_arch (int dummy ATTRIBUTE_UNUSED) break; } - if (!cpu_arch[j].negated) - flags = cpu_flags_or (cpu_arch_flags, - cpu_arch[j].flags); - else - flags = cpu_flags_and_not (cpu_arch_flags, - cpu_arch[j].flags); - if (!cpu_flags_equal (&flags, &cpu_arch_flags)) + flags = cpu_flags_or (cpu_arch_flags, + cpu_arch[j].flags); + + if (!valid_iamcu_cpu_flags (&flags)) + as_fatal (_("`%s' isn't valid for Intel MCU"), + cpu_arch[j].name); + else if (!cpu_flags_equal (&flags, &cpu_arch_flags)) { if (cpu_sub_arch_name) { @@ -2408,11 +2437,42 @@ set_cpu_arch (int dummy ATTRIBUTE_UNUSED) cpu_arch_flags = flags; cpu_arch_isa_flags = flags; } - *input_line_pointer = e; + (void) restore_line_pointer (e); demand_empty_rest_of_line (); return; } } + + if (*string == '.' && j >= ARRAY_SIZE (cpu_arch)) + { + /* Disable an ISA entension. */ + for (j = 0; j < ARRAY_SIZE (cpu_noarch); j++) + if (strcmp (string + 1, cpu_noarch [j].name) == 0) + { + flags = cpu_flags_and_not (cpu_arch_flags, + cpu_noarch[j].flags); + if (!cpu_flags_equal (&flags, &cpu_arch_flags)) + { + if (cpu_sub_arch_name) + { + char *name = cpu_sub_arch_name; + cpu_sub_arch_name = concat (name, string, + (const char *) NULL); + free (name); + } + else + cpu_sub_arch_name = xstrdup (string); + cpu_arch_flags = flags; + cpu_arch_isa_flags = flags; + } + (void) restore_line_pointer (e); + demand_empty_rest_of_line (); + return; + } + + j = ARRAY_SIZE (cpu_arch); + } + if (j >= ARRAY_SIZE (cpu_arch)) as_bad (_("no such architecture: `%s'"), string); @@ -2425,8 +2485,11 @@ set_cpu_arch (int dummy ATTRIBUTE_UNUSED) if (*input_line_pointer == ',' && !is_end_of_line[(unsigned char) input_line_pointer[1]]) { - char *string = ++input_line_pointer; - int e = get_symbol_end (); + char *string; + char e; + + ++input_line_pointer; + e = get_symbol_name (&string); if (strcmp (string, "nojumps") == 0) no_cond_jump_promotion = 1; @@ -2435,7 +2498,7 @@ set_cpu_arch (int dummy ATTRIBUTE_UNUSED) else as_bad (_("no such architecture modifier: `%s'"), string); - *input_line_pointer = e; + (void) restore_line_pointer (e); } demand_empty_rest_of_line (); @@ -2458,6 +2521,13 @@ i386_arch (void) as_fatal (_("Intel K1OM is 64bit ELF only")); return bfd_arch_k1om; } + else if (cpu_arch_isa == PROCESSOR_IAMCU) + { + if (OUTPUT_FLAVOR != bfd_target_elf_flavour + || flag_code == CODE_64BIT) + as_fatal (_("Intel MCU is 32bit ELF only")); + return bfd_arch_iamcu; + } else return bfd_arch_i386; } @@ -2486,8 +2556,18 @@ i386_mach (void) else return bfd_mach_x64_32; } - else if (!strcmp (default_arch, "i386")) - return bfd_mach_i386_i386; + else if (!strcmp (default_arch, "i386") + || !strcmp (default_arch, "iamcu")) + { + if (cpu_arch_isa == PROCESSOR_IAMCU) + { + if (OUTPUT_FLAVOR != bfd_target_elf_flavour) + as_fatal (_("Intel MCU is 32bit ELF only")); + return bfd_mach_i386_iamcu; + } + else + return bfd_mach_i386_i386; + } else as_fatal (_("unknown architecture")); } @@ -2506,7 +2586,7 @@ md_begin (void) /* Setup for loop. */ optab = i386_optab; - core_optab = (templates *) xmalloc (sizeof (templates)); + core_optab = XNEW (templates); core_optab->start = optab; while (1) @@ -2529,7 +2609,7 @@ md_begin (void) } if (optab->name == NULL) break; - core_optab = (templates *) xmalloc (sizeof (templates)); + core_optab = XNEW (templates); core_optab->start = optab; } } @@ -2812,6 +2892,9 @@ reloc (unsigned int size, case BFD_RELOC_X86_64_GOT32: return BFD_RELOC_X86_64_GOT64; break; + case BFD_RELOC_X86_64_GOTPLT64: + return BFD_RELOC_X86_64_GOTPLT64; + break; case BFD_RELOC_X86_64_PLTOFF64: return BFD_RELOC_X86_64_PLTOFF64; break; @@ -2835,9 +2918,12 @@ reloc (unsigned int size, if (other == BFD_RELOC_SIZE32) { if (size == 8) - return BFD_RELOC_SIZE64; + other = BFD_RELOC_SIZE64; if (pcrel) - as_bad (_("there are no pc-relative size relocations")); + { + as_bad (_("there are no pc-relative size relocations")); + return NO_RELOC; + } } #endif @@ -2931,6 +3017,7 @@ tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED) || fixP->fx_r_type == BFD_RELOC_386_GOTOFF || fixP->fx_r_type == BFD_RELOC_386_PLT32 || fixP->fx_r_type == BFD_RELOC_386_GOT32 + || fixP->fx_r_type == BFD_RELOC_386_GOT32X || fixP->fx_r_type == BFD_RELOC_386_TLS_GD || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32 @@ -2944,6 +3031,8 @@ tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED) || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL + || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCRELX + || fixP->fx_r_type == BFD_RELOC_X86_64_REX_GOTPCRELX || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32 @@ -3147,14 +3236,8 @@ build_vex_prefix (const insn_template *t) /* Check the REX.W bit. */ w = (i.rex & REX_W) ? 1 : 0; - if (i.tm.opcode_modifier.vexw) - { - if (w) - abort (); - - if (i.tm.opcode_modifier.vexw == VEXW1) - w = 1; - } + if (i.tm.opcode_modifier.vexw == VEXW1) + w = 1; i.vex.bytes[2] = (w << 7 | register_specifier << 3 @@ -3325,7 +3408,7 @@ build_evex_prefix (void) if (i.rounding->type != saeonly) i.vex.bytes[3] |= 0x10 | (i.rounding->type << 5); else - i.vex.bytes[3] |= 0x10; + i.vex.bytes[3] |= 0x10 | (evexrcig << 5); } if (i.mask && i.mask->mask) @@ -3355,6 +3438,34 @@ process_immext (void) i.operands = 0; } + if (i.tm.cpu_flags.bitfield.cpumwaitx && i.operands > 0) + { + /* MONITORX/MWAITX instructions have fixed operands with an opcode + suffix which is coded in the same place as an 8-bit immediate + field would be. + Here we check those operands and remove them afterwards. */ + unsigned int x; + + if (i.operands != 3) + abort(); + + for (x = 0; x < 2; x++) + if (register_number (i.op[x].regs) != x) + goto bad_register_operand; + + /* Check for third operand for mwaitx/monitorx insn. */ + if (register_number (i.op[x].regs) + != (x + (i.tm.extension_opcode == 0xfb))) + { +bad_register_operand: + as_bad (_("can't use register '%s%s' as operand %d in '%s'."), + register_prefix, i.op[x].regs->reg_name, x+1, + i.tm.name); + } + + i.operands = 0; + } + /* These AMD 3DNow! and SSE2 instructions have an opcode suffix which is coded in the same place as an 8-bit immediate field would be. Here we fake an 8-bit immediate operand from the @@ -3610,11 +3721,20 @@ md_assemble (char *line) as_warn (_("translating to `%sp'"), i.tm.name); } - if (i.tm.opcode_modifier.vex) - build_vex_prefix (t); + if (i.tm.opcode_modifier.vex || i.tm.opcode_modifier.evex) + { + if (flag_code == CODE_16BIT) + { + as_bad (_("instruction `%s' isn't supported in 16-bit mode."), + i.tm.name); + return; + } - if (i.tm.opcode_modifier.evex) - build_evex_prefix (); + if (i.tm.opcode_modifier.vex) + build_vex_prefix (t); + else + build_evex_prefix (); + } /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4 instructions may define INT_OPCODE as well, so avoid this corner @@ -3936,14 +4056,14 @@ parse_operands (char *l, const char *mnemonic) /* Skip optional white space before operand. */ if (is_space_char (*l)) ++l; - if (!is_operand_char (*l) && *l != END_OF_INSN) + if (!is_operand_char (*l) && *l != END_OF_INSN && *l != '"') { as_bad (_("invalid character %s before operand %d"), output_invalid (*l), i.operands + 1); return NULL; } - token_start = l; /* after white space */ + token_start = l; /* After white space. */ paren_not_balanced = 0; while (paren_not_balanced || *l != ',') { @@ -3962,7 +4082,7 @@ parse_operands (char *l, const char *mnemonic) else break; /* we are done */ } - else if (!is_operand_char (*l) && !is_space_char (*l)) + else if (!is_operand_char (*l) && !is_space_char (*l) && *l != '"') { as_bad (_("invalid character %s in operand %d"), output_invalid (*l), @@ -4185,6 +4305,8 @@ optimize_imm (void) i.op[op].imms->X_add_number = (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000); } +#ifdef BFD64 + /* Store 32-bit immediate in 64-bit for 64-bit BFD. */ if ((i.types[op].bitfield.imm32) && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1)) == 0)) @@ -4193,6 +4315,7 @@ optimize_imm (void) ^ ((offsetT) 1 << 31)) - ((offsetT) 1 << 31)); } +#endif i.types[op] = operand_type_or (i.types[op], smallest_imm_type (i.op[op].imms->X_add_number)); @@ -4273,6 +4396,8 @@ optimize_disp (void) op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000); i.types[op].bitfield.disp64 = 0; } +#ifdef BFD64 + /* Optimize 64-bit displacement to 32-bit for 64-bit BFD. */ if (i.types[op].bitfield.disp32 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0) { @@ -4283,6 +4408,7 @@ optimize_disp (void) op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31); i.types[op].bitfield.disp64 = 0; } +#endif if (!op_disp && i.types[op].bitfield.baseindex) { i.types[op].bitfield.disp8 = 0; @@ -4345,6 +4471,14 @@ check_VecOperands (const insn_template *t) return 1; } + /* Check if default mask is allowed. */ + if (t->opcode_modifier.nodefmask + && (!i.mask || i.mask->mask->reg_num == 0)) + { + i.error = no_default_mask; + return 1; + } + /* For VSIB byte, we need a vector register for index, and all vector registers must be distinct. */ if (t->opcode_modifier.vecsib) @@ -4365,11 +4499,9 @@ check_VecOperands (const insn_template *t) if (i.reg_operands == 2 && !i.mask) { gas_assert (i.types[0].bitfield.regxmm - || i.types[0].bitfield.regymm - || i.types[0].bitfield.regzmm); + || i.types[0].bitfield.regymm); gas_assert (i.types[2].bitfield.regxmm - || i.types[2].bitfield.regymm - || i.types[2].bitfield.regzmm); + || i.types[2].bitfield.regymm); if (operand_check == check_none) return 0; if (register_number (i.op[0].regs) @@ -4386,6 +4518,22 @@ check_VecOperands (const insn_template *t) } as_warn (_("mask, index, and destination registers should be distinct")); } + else if (i.reg_operands == 1 && i.mask) + { + if ((i.types[1].bitfield.regymm + || i.types[1].bitfield.regzmm) + && (register_number (i.op[1].regs) + == register_number (i.index_reg))) + { + if (operand_check == check_error) + { + i.error = invalid_vector_register_set; + return 1; + } + if (operand_check != check_none) + as_warn (_("index and destination registers should be distinct")); + } + } } /* Check if broadcast is supported by the instruction and is applied @@ -4412,6 +4560,10 @@ check_VecOperands (const insn_template *t) broadcasted_opnd_size <<= 4; /* Broadcast 1to16. */ else if (i.broadcast->type == BROADCAST_1TO8) broadcasted_opnd_size <<= 3; /* Broadcast 1to8. */ + else if (i.broadcast->type == BROADCAST_1TO4) + broadcasted_opnd_size <<= 2; /* Broadcast 1to4. */ + else if (i.broadcast->type == BROADCAST_1TO2) + broadcasted_opnd_size <<= 1; /* Broadcast 1to2. */ else goto bad_broadcast; @@ -4462,14 +4614,6 @@ check_VecOperands (const insn_template *t) return 1; } - /* Check if default mask is allowed. */ - if (t->opcode_modifier.nodefmask - && (!i.mask || i.mask->mask->reg_num == 0)) - { - i.error = no_default_mask; - return 1; - } - /* Check RC/SAE. */ if (i.rounding) { @@ -4506,7 +4650,9 @@ check_VecOperands (const insn_template *t) && i.op[op].disps->X_op == O_constant) { offsetT value = i.op[op].disps->X_add_number; - int vec_disp8_ok = fits_in_vec_disp8 (value); + int vec_disp8_ok + = (i.disp_encoding != disp_encoding_32bit + && fits_in_vec_disp8 (value)); if (t->operand_types [op].bitfield.vec_disp8) { if (vec_disp8_ok) @@ -4636,10 +4782,12 @@ match_template (void) if (intel_mnemonic && t->opcode_modifier.attmnemonic) continue; - /* Check AT&T/Intel syntax. */ + /* Check AT&T/Intel syntax and Intel64/AMD64 ISA. */ i.error = unsupported_syntax; if ((intel_syntax && t->opcode_modifier.attsyntax) - || (!intel_syntax && t->opcode_modifier.intelsyntax)) + || (!intel_syntax && t->opcode_modifier.intelsyntax) + || (intel64 && t->opcode_modifier.amd64) + || (!intel64 && t->opcode_modifier.intel64)) continue; /* Check the suffix, except for some instructions in intel mode. */ @@ -4671,9 +4819,9 @@ match_template (void) && !operand_types[0].bitfield.regymm && !operand_types[0].bitfield.regzmm) || (!operand_types[t->operands > 1].bitfield.regmmx - && !!operand_types[t->operands > 1].bitfield.regxmm - && !!operand_types[t->operands > 1].bitfield.regymm - && !!operand_types[t->operands > 1].bitfield.regzmm)) + && operand_types[t->operands > 1].bitfield.regxmm + && operand_types[t->operands > 1].bitfield.regymm + && operand_types[t->operands > 1].bitfield.regzmm)) && (t->base_opcode != 0x0fc7 || t->extension_opcode != 1 /* cmpxchg8b */)) continue; @@ -4688,7 +4836,7 @@ match_template (void) && ((!operand_types[0].bitfield.regmmx && !operand_types[0].bitfield.regxmm) || (!operand_types[t->operands > 1].bitfield.regmmx - && !!operand_types[t->operands > 1].bitfield.regxmm))) + && operand_types[t->operands > 1].bitfield.regxmm))) continue; /* Do not verify operands when there are none. */ @@ -4745,6 +4893,10 @@ match_template (void) } } + /* Force 0x8b encoding for "mov foo@GOT, %eax". */ + if (i.reloc[0] == BFD_RELOC_386_GOT32 && t->base_opcode == 0xa0) + continue; + /* We check register size if needed. */ check_register = t->opcode_modifier.checkregsize; overlap0 = operand_type_and (i.types[0], operand_types[0]); @@ -6133,8 +6285,8 @@ build_modrm_byte (void) op = i.tm.operand_types[vvvv]; op.bitfield.regmem = 0; if ((dest + 1) >= i.operands - || (op.bitfield.reg32 != 1 - && !op.bitfield.reg64 != 1 + || (!op.bitfield.reg32 + && op.bitfield.reg64 && !operand_type_equal (&op, ®xmm) && !operand_type_equal (&op, ®ymm) && !operand_type_equal (&op, ®zmm) @@ -6895,6 +7047,31 @@ output_insn (void) unsigned int j; unsigned int prefix; + if (avoid_fence + && i.tm.base_opcode == 0xfae + && i.operands == 1 + && i.imm_operands == 1 + && (i.op[0].imms->X_add_number == 0xe8 + || i.op[0].imms->X_add_number == 0xf0 + || i.op[0].imms->X_add_number == 0xf8)) + { + /* Encode lfence, mfence, and sfence as + f0 83 04 24 00 lock addl $0x0, (%{re}sp). */ + offsetT val = 0x240483f0ULL; + p = frag_more (5); + md_number_to_chars (p, val, 5); + return; + } + + /* Some processors fail on LOCK prefix. This options makes + assembler ignore LOCK prefix and serves as a workaround. */ + if (omit_lock_prefix) + { + if (i.tm.base_opcode == LOCK_PREFIX_OPCODE) + return; + i.prefix[LOCK_PREFIX] = 0; + } + /* Since the VEX/EVEX prefix contains the implicit prefix, we don't need the explicit prefix. */ if (!i.tm.opcode_modifier.vex && !i.tm.opcode_modifier.evex) @@ -6930,6 +7107,17 @@ check_prefix: abort (); } +#if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF) + /* For x32, add a dummy REX_OPCODE prefix for mov/add with + R_X86_64_GOTTPOFF relocation so that linker can safely + perform IE->LE optimization. */ + if (x86_elf_abi == X86_64_X32_ABI + && i.operands == 2 + && i.reloc[0] == BFD_RELOC_X86_64_GOTTPOFF + && i.prefix[REX_PREFIX] == 0) + add_prefix (REX_OPCODE); +#endif + /* The prefix bytes. */ for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++) if (*q) @@ -7091,6 +7279,7 @@ output_disp (fragS *insn_start_frag, offsetT insn_start_off) int size = disp_size (n); int sign = i.types[n].bitfield.disp32s; int pcrel = (i.flags[n] & Operand_PCrel) != 0; + fixS *fixP; /* We can't have 8 bit displacement here. */ gas_assert (!i.types[n].bitfield.disp8); @@ -7159,8 +7348,39 @@ output_disp (fragS *insn_start_frag, offsetT insn_start_off) insn, and that is taken care of in other code. */ reloc_type = BFD_RELOC_X86_64_GOTPC32; } - fix_new_exp (frag_now, p - frag_now->fr_literal, size, - i.op[n].disps, pcrel, reloc_type); + fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, + size, i.op[n].disps, pcrel, + reloc_type); + /* Check for "call/jmp *mem", "mov mem, %reg", + "test %reg, mem" and "binop mem, %reg" where binop + is one of adc, add, and, cmp, or, sbb, sub, xor + instructions. Always generate R_386_GOT32X for + "sym*GOT" operand in 32-bit mode. */ + if ((generate_relax_relocations + || (!object_64bit + && i.rm.mode == 0 + && i.rm.regmem == 5)) + && (i.rm.mode == 2 + || (i.rm.mode == 0 && i.rm.regmem == 5)) + && ((i.operands == 1 + && i.tm.base_opcode == 0xff + && (i.rm.reg == 2 || i.rm.reg == 4)) + || (i.operands == 2 + && (i.tm.base_opcode == 0x8b + || i.tm.base_opcode == 0x85 + || (i.tm.base_opcode & 0xc7) == 0x03)))) + { + if (object_64bit) + { + fixP->fx_tcbit = i.rex != 0; + if (i.base_reg + && (i.base_reg->reg_num == RegRip + || i.base_reg->reg_num == RegEip)) + fixP->fx_tcbit2 = 1; + } + else + fixP->fx_tcbit2 = 1; + } } } } @@ -7295,16 +7515,13 @@ output_imm (fragS *insn_start_frag, offsetT insn_start_off) /* x86_cons_fix_new is called via the expression parsing code when a reloc is needed. We use this hook to get the correct .got reloc. */ -static enum bfd_reloc_code_real got_reloc = NO_RELOC; static int cons_sign = -1; void x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len, - expressionS *exp) + expressionS *exp, bfd_reloc_code_real_type r) { - enum bfd_reloc_code_real r = reloc (len, 0, cons_sign, got_reloc); - - got_reloc = NO_RELOC; + r = reloc (len, 0, cons_sign, r); #ifdef TE_PE if (exp->X_op == O_secrel) @@ -7465,7 +7682,7 @@ lex_got (enum bfd_reloc_code_real *rel, /* Allocate and copy string. The trailing NUL shouldn't be necessary, but be safe. */ - tmpbuf = (char *) xmalloc (first + second + 2); + tmpbuf = XNEWVEC (char, first + second + 2); memcpy (tmpbuf, input_line_pointer, first); if (second != 0 && *past_reloc != ' ') /* Replace the relocation token with ' ', so that @@ -7511,7 +7728,7 @@ lex_got (enum bfd_reloc_code_real *rel, static char * lex_got (enum bfd_reloc_code_real *rel ATTRIBUTE_UNUSED, int *adjust ATTRIBUTE_UNUSED, - i386_operand_type *types ATTRIBUTE_UNUSED) + i386_operand_type *types) { static const struct { @@ -7573,7 +7790,7 @@ lex_got (enum bfd_reloc_code_real *rel ATTRIBUTE_UNUSED, /* Allocate and copy string. The trailing NUL shouldn't be necessary, but be safe. */ - tmpbuf = (char *) xmalloc (first + second + 2); + tmpbuf = XNEWVEC (char, first + second + 2); memcpy (tmpbuf, input_line_pointer, first); if (second != 0 && *past_reloc != ' ') /* Replace the relocation token with ' ', so that @@ -7596,9 +7813,11 @@ lex_got (enum bfd_reloc_code_real *rel ATTRIBUTE_UNUSED, #endif /* TE_PE */ -void +bfd_reloc_code_real_type x86_cons (expressionS *exp, int size) { + bfd_reloc_code_real_type got_reloc = NO_RELOC; + intel_syntax = -intel_syntax; exp->X_md = 0; @@ -7645,6 +7864,8 @@ x86_cons (expressionS *exp, int size) if (intel_syntax) i386_intel_simplify (exp); + + return got_reloc; } static void @@ -7705,6 +7926,10 @@ check_VecOperations (char *op_string, char *op_end) op_string += 3; if (*op_string == '8') bcst_type = BROADCAST_1TO8; + else if (*op_string == '4') + bcst_type = BROADCAST_1TO4; + else if (*op_string == '2') + bcst_type = BROADCAST_1TO2; else if (*op_string == '1' && *(op_string+1) == '6') { @@ -7904,7 +8129,7 @@ i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp, return 0; } #endif - else if (!intel_syntax && exp->X_op == O_register) + else if (!intel_syntax && exp_seg == reg_section) { if (imm_start) as_bad (_("illegal immediate register operand %s"), imm_start); @@ -8563,6 +8788,7 @@ i386_att_operand (char *operand_string) } else if (is_digit_char (*op_string) || is_identifier_char (*op_string) + || *op_string == '"' || *op_string == '(') { /* This is a memory reference of some sort. */ @@ -8777,6 +9003,40 @@ i386_frag_max_var (fragS *frag) return TYPE_FROM_RELAX_STATE (frag->fr_subtype) == UNCOND_JUMP ? 4 : 5; } +#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) +static int +elf_symbol_resolved_in_segment_p (symbolS *fr_symbol, offsetT fr_var) +{ + /* STT_GNU_IFUNC symbol must go through PLT. */ + if ((symbol_get_bfdsym (fr_symbol)->flags + & BSF_GNU_INDIRECT_FUNCTION) != 0) + return 0; + + if (!S_IS_EXTERNAL (fr_symbol)) + /* Symbol may be weak or local. */ + return !S_IS_WEAK (fr_symbol); + + /* Global symbols with non-default visibility can't be preempted. */ + if (ELF_ST_VISIBILITY (S_GET_OTHER (fr_symbol)) != STV_DEFAULT) + return 1; + + if (fr_var != NO_RELOC) + switch ((enum bfd_reloc_code_real) fr_var) + { + case BFD_RELOC_386_PLT32: + case BFD_RELOC_X86_64_PLT32: + /* Symbol with PLT relocatin may be preempted. */ + return 0; + default: + abort (); + } + + /* Global symbols with default visibility in a shared library may be + preempted by another definition. */ + return !shared; +} +#endif + /* md_estimate_size_before_relax() Called just before relax() for rs_machine_dependent frags. The x86 @@ -8800,10 +9060,8 @@ md_estimate_size_before_relax (fragS *fragP, segT segment) if (S_GET_SEGMENT (fragP->fr_symbol) != segment #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || (IS_ELF - && (S_IS_EXTERNAL (fragP->fr_symbol) - || S_IS_WEAK (fragP->fr_symbol) - || ((symbol_get_bfdsym (fragP->fr_symbol)->flags - & BSF_GNU_INDIRECT_FUNCTION)))) + && !elf_symbol_resolved_in_segment_p (fragP->fr_symbol, + fragP->fr_var)) #endif #if defined (OBJ_COFF) && defined (TE_PE) || (OUTPUT_FLAVOR == bfd_target_coff_flavour @@ -9097,8 +9355,21 @@ md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED) #endif } #if defined (OBJ_COFF) && defined (TE_PE) - if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy)) - { + if (fixP->fx_addsy != NULL + && S_IS_WEAK (fixP->fx_addsy) + /* PR 16858: Do not modify weak function references. */ + && ! fixP->fx_pcrel) + { +#if !defined (TE_PEP) + /* For x86 PE weak function symbols are neither PC-relative + nor do they set S_IS_FUNCTION. So the only reliable way + to detect them is to check the flags of their containing + section. */ + if (S_GET_SEGMENT (fixP->fx_addsy) != NULL + && S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_CODE) + ; + else +#endif value -= S_GET_VALUE (fixP->fx_addsy); } #endif @@ -9145,11 +9416,6 @@ md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED) fixP->fx_done = 0; return; - case BFD_RELOC_386_GOT32: - case BFD_RELOC_X86_64_GOT32: - value = 0; /* Fully resolved at runtime. No addend. */ - break; - case BFD_RELOC_VTABLE_INHERIT: case BFD_RELOC_VTABLE_ENTRY: fixP->fx_done = 0; @@ -9186,7 +9452,7 @@ md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED) md_number_to_chars (p, value, fixP->fx_size); } -char * +const char * md_atof (int type, char *litP, int *sizeP) { /* This outputs the LITTLENUMs in REVERSE order; @@ -9292,17 +9558,20 @@ parse_real_register (char *reg_string, char **end_op) && !cpu_arch_flags.bitfield.cpu387) return (const reg_entry *) NULL; - if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx) + if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpuregmmx) + return (const reg_entry *) NULL; + + if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpuregxmm) return (const reg_entry *) NULL; - if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse) + if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuregymm) return (const reg_entry *) NULL; - if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx) + if (r->reg_type.bitfield.regzmm && !cpu_arch_flags.bitfield.cpuregzmm) return (const reg_entry *) NULL; - if ((r->reg_type.bitfield.regzmm || r->reg_type.bitfield.regmask) - && !cpu_arch_flags.bitfield.cpuavx512f) + if (r->reg_type.bitfield.regmask + && !cpu_arch_flags.bitfield.cpuregmask) return (const reg_entry *) NULL; /* Don't allow fake index register unless allow_index_reg isn't 0. */ @@ -9352,7 +9621,7 @@ parse_register (char *reg_string, char **end_op) symbolS *symbolP; input_line_pointer = reg_string; - c = get_symbol_end (); + c = get_symbol_name (®_string); symbolP = symbol_find (reg_string); if (symbolP && S_GET_SEGMENT (symbolP) == reg_section) { @@ -9362,6 +9631,8 @@ parse_register (char *reg_string, char **end_op) know (e->X_add_number >= 0 && (valueT) e->X_add_number < i386_regtab_size); r = i386_regtab + e->X_add_number; + if ((r->reg_flags & RegVRex)) + i.need_vrex = 1; *end_op = input_line_pointer; } *input_line_pointer = c; @@ -9455,6 +9726,14 @@ const char *md_shortopts = "qn"; #define OPTION_MADD_BND_PREFIX (OPTION_MD_BASE + 15) #define OPTION_MEVEXLIG (OPTION_MD_BASE + 16) #define OPTION_MEVEXWIG (OPTION_MD_BASE + 17) +#define OPTION_MBIG_OBJ (OPTION_MD_BASE + 18) +#define OPTION_MOMIT_LOCK_PREFIX (OPTION_MD_BASE + 19) +#define OPTION_MEVEXRCIG (OPTION_MD_BASE + 20) +#define OPTION_MSHARED (OPTION_MD_BASE + 21) +#define OPTION_MAMD64 (OPTION_MD_BASE + 22) +#define OPTION_MINTEL64 (OPTION_MD_BASE + 23) +#define OPTION_MFENCE_AS_LOCK_ADD (OPTION_MD_BASE + 24) +#define OPTION_MRELAX_RELOCATIONS (OPTION_MD_BASE + 25) struct option md_longopts[] = { @@ -9465,6 +9744,7 @@ struct option md_longopts[] = #endif #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) {"x32", no_argument, NULL, OPTION_X32}, + {"mshared", no_argument, NULL, OPTION_MSHARED}, #endif {"divide", no_argument, NULL, OPTION_DIVIDE}, {"march", required_argument, NULL, OPTION_MARCH}, @@ -9481,15 +9761,24 @@ struct option md_longopts[] = {"madd-bnd-prefix", no_argument, NULL, OPTION_MADD_BND_PREFIX}, {"mevexlig", required_argument, NULL, OPTION_MEVEXLIG}, {"mevexwig", required_argument, NULL, OPTION_MEVEXWIG}, +# if defined (TE_PE) || defined (TE_PEP) + {"mbig-obj", no_argument, NULL, OPTION_MBIG_OBJ}, +#endif + {"momit-lock-prefix", required_argument, NULL, OPTION_MOMIT_LOCK_PREFIX}, + {"mfence-as-lock-add", required_argument, NULL, OPTION_MFENCE_AS_LOCK_ADD}, + {"mrelax-relocations", required_argument, NULL, OPTION_MRELAX_RELOCATIONS}, + {"mevexrcig", required_argument, NULL, OPTION_MEVEXRCIG}, + {"mamd64", no_argument, NULL, OPTION_MAMD64}, + {"mintel64", no_argument, NULL, OPTION_MINTEL64}, {NULL, no_argument, NULL, 0} }; size_t md_longopts_size = sizeof (md_longopts); int -md_parse_option (int c, char *arg) +md_parse_option (int c, const char *arg) { unsigned int j; - char *arch, *next; + char *arch, *next, *saved; switch (c) { @@ -9520,6 +9809,10 @@ md_parse_option (int c, char *arg) /* -s: On i386 Solaris, this tells the native assembler to use .stab instead of .stab.excl. We always use .stab anyhow. */ break; + + case OPTION_MSHARED: + shared = 1; + break; #endif #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \ || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O)) @@ -9577,7 +9870,7 @@ md_parse_option (int c, char *arg) char *n, *t; const char *s; - n = (char *) xmalloc (strlen (i386_comment_chars) + 1); + n = XNEWVEC (char, strlen (i386_comment_chars) + 1); t = n; for (s = i386_comment_chars; *s != '\0'; s++) if (*s != '/') @@ -9589,7 +9882,11 @@ md_parse_option (int c, char *arg) break; case OPTION_MARCH: - arch = xstrdup (arg); + saved = xstrdup (arg); + arch = saved; + /* Allow -march=+nosse. */ + if (*arch == '+') + arch++; do { if (*arch == '.') @@ -9623,13 +9920,12 @@ md_parse_option (int c, char *arg) /* ISA entension. */ i386_cpu_flags flags; - if (!cpu_arch[j].negated) - flags = cpu_flags_or (cpu_arch_flags, - cpu_arch[j].flags); - else - flags = cpu_flags_and_not (cpu_arch_flags, - cpu_arch[j].flags); - if (!cpu_flags_equal (&flags, &cpu_arch_flags)) + flags = cpu_flags_or (cpu_arch_flags, + cpu_arch[j].flags); + + if (!valid_iamcu_cpu_flags (&flags)) + as_fatal (_("`%s' isn't valid for Intel MCU"), arch); + else if (!cpu_flags_equal (&flags, &cpu_arch_flags)) { if (cpu_sub_arch_name) { @@ -9648,12 +9944,44 @@ md_parse_option (int c, char *arg) } } + if (j >= ARRAY_SIZE (cpu_arch)) + { + /* Disable an ISA entension. */ + for (j = 0; j < ARRAY_SIZE (cpu_noarch); j++) + if (strcmp (arch, cpu_noarch [j].name) == 0) + { + i386_cpu_flags flags; + + flags = cpu_flags_and_not (cpu_arch_flags, + cpu_noarch[j].flags); + if (!cpu_flags_equal (&flags, &cpu_arch_flags)) + { + if (cpu_sub_arch_name) + { + char *name = cpu_sub_arch_name; + cpu_sub_arch_name = concat (arch, + (const char *) NULL); + free (name); + } + else + cpu_sub_arch_name = xstrdup (arch); + cpu_arch_flags = flags; + cpu_arch_isa_flags = flags; + } + break; + } + + if (j >= ARRAY_SIZE (cpu_noarch)) + j = ARRAY_SIZE (cpu_arch); + } + if (j >= ARRAY_SIZE (cpu_arch)) as_fatal (_("invalid -march= option: `%s'"), arg); arch = next; } - while (next != NULL ); + while (next != NULL); + free (saved); break; case OPTION_MTUNE: @@ -9753,6 +10081,19 @@ md_parse_option (int c, char *arg) as_fatal (_("invalid -mevexlig= option: `%s'"), arg); break; + case OPTION_MEVEXRCIG: + if (strcmp (arg, "rne") == 0) + evexrcig = rne; + else if (strcmp (arg, "rd") == 0) + evexrcig = rd; + else if (strcmp (arg, "ru") == 0) + evexrcig = ru; + else if (strcmp (arg, "rz") == 0) + evexrcig = rz; + else + as_fatal (_("invalid -mevexrcig= option: `%s'"), arg); + break; + case OPTION_MEVEXWIG: if (strcmp (arg, "0") == 0) evexwig = evexw0; @@ -9762,6 +10103,47 @@ md_parse_option (int c, char *arg) as_fatal (_("invalid -mevexwig= option: `%s'"), arg); break; +# if defined (TE_PE) || defined (TE_PEP) + case OPTION_MBIG_OBJ: + use_big_obj = 1; + break; +#endif + + case OPTION_MOMIT_LOCK_PREFIX: + if (strcasecmp (arg, "yes") == 0) + omit_lock_prefix = 1; + else if (strcasecmp (arg, "no") == 0) + omit_lock_prefix = 0; + else + as_fatal (_("invalid -momit-lock-prefix= option: `%s'"), arg); + break; + + case OPTION_MFENCE_AS_LOCK_ADD: + if (strcasecmp (arg, "yes") == 0) + avoid_fence = 1; + else if (strcasecmp (arg, "no") == 0) + avoid_fence = 0; + else + as_fatal (_("invalid -mfence-as-lock-add= option: `%s'"), arg); + break; + + case OPTION_MRELAX_RELOCATIONS: + if (strcasecmp (arg, "yes") == 0) + generate_relax_relocations = 1; + else if (strcasecmp (arg, "no") == 0) + generate_relax_relocations = 0; + else + as_fatal (_("invalid -mrelax-relocations= option: `%s'"), arg); + break; + + case OPTION_MAMD64: + intel64 = 0; + break; + + case OPTION_MINTEL64: + intel64 = 1; + break; + default: return 0; } @@ -9771,6 +10153,44 @@ md_parse_option (int c, char *arg) #define MESSAGE_TEMPLATE \ " " +static char * +output_message (FILE *stream, char *p, char *message, char *start, + int *left_p, const char *name, int len) +{ + int size = sizeof (MESSAGE_TEMPLATE); + int left = *left_p; + + /* Reserve 2 spaces for ", " or ",\0" */ + left -= len + 2; + + /* Check if there is any room. */ + if (left >= 0) + { + if (p != start) + { + *p++ = ','; + *p++ = ' '; + } + p = mempcpy (p, name, len); + } + else + { + /* Output the current message now and start a new one. */ + *p++ = ','; + *p = '\0'; + fprintf (stream, "%s\n", message); + p = start; + left = size - (start - message) - len - 2; + + gas_assert (left >= 0); + + p = mempcpy (p, name, len); + } + + *left_p = left; + return p; +} + static void show_arch (FILE *stream, int ext, int check) { @@ -9815,34 +10235,19 @@ show_arch (FILE *stream, int ext, int check) continue; } - /* Reserve 2 spaces for ", " or ",\0" */ - left -= len + 2; - - /* Check if there is any room. */ - if (left >= 0) - { - if (p != start) - { - *p++ = ','; - *p++ = ' '; - } - p = mempcpy (p, name, len); - } - else - { - /* Output the current message now and start a new one. */ - *p++ = ','; - *p = '\0'; - fprintf (stream, "%s\n", message); - p = start; - left = size - (start - message) - len - 2; - - gas_assert (left >= 0); - - p = mempcpy (p, name, len); - } + p = output_message (stream, p, message, start, &left, name, len); } + /* Display disabled extensions. */ + if (ext) + for (j = 0; j < ARRAY_SIZE (cpu_noarch); j++) + { + name = cpu_noarch [j].name; + len = cpu_noarch [j].len; + p = output_message (stream, p, message, start, &left, name, + len); + } + *p = '\0'; fprintf (stream, "%s\n", message); } @@ -9903,6 +10308,10 @@ md_show_usage (FILE *stream) -mevexwig=[0|1] encode EVEX instructions with specific EVEX.W value\n\ for EVEX.W bit ignored instructions\n")); fprintf (stream, _("\ + -mevexrcig=[rne|rd|ru|rz]\n\ + encode EVEX instructions with specific EVEX.RC value\n\ + for SAE-only ignored instructions\n")); + fprintf (stream, _("\ -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n")); fprintf (stream, _("\ -msyntax=[att|intel] use AT&T/Intel syntax\n")); @@ -9914,6 +10323,26 @@ md_show_usage (FILE *stream) -mold-gcc support old (<= 2.8.1) versions of gcc\n")); fprintf (stream, _("\ -madd-bnd-prefix add BND prefix for all valid branches\n")); + fprintf (stream, _("\ + -mshared disable branch optimization for shared code\n")); +# if defined (TE_PE) || defined (TE_PEP) + fprintf (stream, _("\ + -mbig-obj generate big object files\n")); +#endif + fprintf (stream, _("\ + -momit-lock-prefix=[no|yes]\n\ + strip all lock prefixes\n")); + fprintf (stream, _("\ + -mfence-as-lock-add=[no|yes]\n\ + encode lfence, mfence and sfence as\n\ + lock addl $0x0, (%%{re}sp)\n")); + fprintf (stream, _("\ + -mrelax-relocations=[no|yes]\n\ + generate relax relocations\n")); + fprintf (stream, _("\ + -mamd64 accept only AMD64 ISA\n")); + fprintf (stream, _("\ + -mintel64 accept only Intel64 ISA\n")); } #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \ @@ -9935,6 +10364,27 @@ i386_target_format (void) } else if (!strcmp (default_arch, "i386")) update_code_flag (CODE_32BIT, 1); + else if (!strcmp (default_arch, "iamcu")) + { + update_code_flag (CODE_32BIT, 1); + if (cpu_arch_isa == PROCESSOR_UNKNOWN) + { + static const i386_cpu_flags iamcu_flags = CPU_IAMCU_FLAGS; + cpu_arch_name = "iamcu"; + cpu_sub_arch_name = NULL; + cpu_arch_flags = iamcu_flags; + cpu_arch_isa = PROCESSOR_IAMCU; + cpu_arch_isa_flags = iamcu_flags; + if (!cpu_arch_tune_set) + { + cpu_arch_tune = cpu_arch_isa; + cpu_arch_tune_flags = cpu_arch_isa_flags; + } + } + else + as_fatal (_("Intel MCU doesn't support `%s' architecture"), + cpu_arch_name); + } else as_fatal (_("unknown architecture")); @@ -9952,7 +10402,10 @@ i386_target_format (void) #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF) # if defined (TE_PE) || defined (TE_PEP) case bfd_target_coff_flavour: - return flag_code == CODE_64BIT ? "pe-x86-64" : "pe-i386"; + if (flag_code == CODE_64BIT) + return use_big_obj ? "pe-bigobj-x86-64" : "pe-x86-64"; + else + return "pe-i386"; # elif defined (TE_GO32) case bfd_target_coff_flavour: return "coff-go32"; @@ -9989,12 +10442,18 @@ i386_target_format (void) as_fatal (_("Intel L1OM is 64bit only")); return ELF_TARGET_L1OM_FORMAT; } - if (cpu_arch_isa == PROCESSOR_K1OM) + else if (cpu_arch_isa == PROCESSOR_K1OM) { if (x86_elf_abi != X86_64_ABI) as_fatal (_("Intel K1OM is 64bit only")); return ELF_TARGET_K1OM_FORMAT; } + else if (cpu_arch_isa == PROCESSOR_IAMCU) + { + if (x86_elf_abi != I386_ABI) + as_fatal (_("Intel MCU is 32bit only")); + return ELF_TARGET_IAMCU_FORMAT; + } else return format; } @@ -10017,48 +10476,6 @@ i386_target_format (void) } #endif /* OBJ_MAYBE_ more than one */ - -#if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) -void -i386_elf_emit_arch_note (void) -{ - if (IS_ELF && cpu_arch_name != NULL) - { - char *p; - asection *seg = now_seg; - subsegT subseg = now_subseg; - Elf_Internal_Note i_note; - Elf_External_Note e_note; - asection *note_secp; - int len; - - /* Create the .note section. */ - note_secp = subseg_new (".note", 0); - bfd_set_section_flags (stdoutput, - note_secp, - SEC_HAS_CONTENTS | SEC_READONLY); - - /* Process the arch string. */ - len = strlen (cpu_arch_name); - - i_note.namesz = len + 1; - i_note.descsz = 0; - i_note.type = NT_ARCH; - p = frag_more (sizeof (e_note.namesz)); - md_number_to_chars (p, (valueT) i_note.namesz, sizeof (e_note.namesz)); - p = frag_more (sizeof (e_note.descsz)); - md_number_to_chars (p, (valueT) i_note.descsz, sizeof (e_note.descsz)); - p = frag_more (sizeof (e_note.type)); - md_number_to_chars (p, (valueT) i_note.type, sizeof (e_note.type)); - p = frag_more (len + 1); - strcpy (p, cpu_arch_name); - - frag_align (2, 0, 0); - - subseg_set (seg, subseg); - } -} -#endif symbolS * md_undefined_symbol (char *name) @@ -10096,7 +10513,7 @@ md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size) int align; align = bfd_get_section_alignment (stdoutput, segment); - size = ((size + (1 << align) - 1) & ((valueT) -1 << align)); + size = ((size + (1 << align) - 1) & (-((valueT) 1 << align))); } #endif @@ -10134,23 +10551,41 @@ s_bss (int ignore ATTRIBUTE_UNUSED) void i386_validate_fix (fixS *fixp) { - if (fixp->fx_subsy && fixp->fx_subsy == GOT_symbol) + if (fixp->fx_subsy) { - if (fixp->fx_r_type == BFD_RELOC_32_PCREL) - { - if (!object_64bit) - abort (); - fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL; - } - else + if (fixp->fx_subsy == GOT_symbol) { - if (!object_64bit) - fixp->fx_r_type = BFD_RELOC_386_GOTOFF; + if (fixp->fx_r_type == BFD_RELOC_32_PCREL) + { + if (!object_64bit) + abort (); +#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) + if (fixp->fx_tcbit2) + fixp->fx_r_type = (fixp->fx_tcbit + ? BFD_RELOC_X86_64_REX_GOTPCRELX + : BFD_RELOC_X86_64_GOTPCRELX); + else +#endif + fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL; + } else - fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64; + { + if (!object_64bit) + fixp->fx_r_type = BFD_RELOC_386_GOTOFF; + else + fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64; + } + fixp->fx_subsy = 0; } - fixp->fx_subsy = 0; } +#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) + else if (!object_64bit) + { + if (fixp->fx_r_type == BFD_RELOC_386_GOT32 + && fixp->fx_tcbit2) + fixp->fx_r_type = BFD_RELOC_386_GOT32X; + } +#endif } arelent * @@ -10184,8 +10619,11 @@ tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp) case BFD_RELOC_X86_64_PLT32: case BFD_RELOC_X86_64_GOT32: case BFD_RELOC_X86_64_GOTPCREL: + case BFD_RELOC_X86_64_GOTPCRELX: + case BFD_RELOC_X86_64_REX_GOTPCRELX: case BFD_RELOC_386_PLT32: case BFD_RELOC_386_GOT32: + case BFD_RELOC_386_GOT32X: case BFD_RELOC_386_GOTOFF: case BFD_RELOC_386_GOTPC: case BFD_RELOC_386_TLS_GD: @@ -10287,8 +10725,8 @@ tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp) code = BFD_RELOC_X86_64_GOTPC64; } - rel = (arelent *) xmalloc (sizeof (arelent)); - rel->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *)); + rel = XNEW (arelent); + rel->sym_ptr_ptr = XNEW (asymbol *); *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy); rel->address = fixp->fx_frag->fr_address + fixp->fx_where; @@ -10337,6 +10775,8 @@ tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp) case BFD_RELOC_X86_64_PLT32: case BFD_RELOC_X86_64_GOT32: case BFD_RELOC_X86_64_GOTPCREL: + case BFD_RELOC_X86_64_GOTPCRELX: + case BFD_RELOC_X86_64_REX_GOTPCRELX: case BFD_RELOC_X86_64_TLSGD: case BFD_RELOC_X86_64_TLSLD: case BFD_RELOC_X86_64_GOTTPOFF: @@ -10467,7 +10907,7 @@ tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size) /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */ bfd_vma -x86_64_section_letter (int letter, char **ptr_msg) +x86_64_section_letter (int letter, const char **ptr_msg) { if (flag_code == CODE_64BIT) {