1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright (C) 1989-2019 Free Software Foundation, Inc.
4 This file is part of GAS, the GNU Assembler.
6 GAS is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GAS is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GAS; see the file COPYING. If not, write to the Free
18 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
21 /* Intel 80386 machine specific gas.
22 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
23 x86_64 support by Jan Hubicka (jh@suse.cz)
24 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
25 Bugs & suggestions are completely welcome. This is free software.
26 Please help us make it better. */
29 #include "safe-ctype.h"
31 #include "dwarf2dbg.h"
32 #include "dw2gencfi.h"
33 #include "elf/x86-64.h"
34 #include "opcodes/i386-init.h"
39 #ifdef HAVE_SYS_PARAM_H
40 #include <sys/param.h>
43 #define INT_MAX (int) (((unsigned) (-1)) >> 1)
47 #ifndef REGISTER_WARNINGS
48 #define REGISTER_WARNINGS 1
51 #ifndef INFER_ADDR_PREFIX
52 #define INFER_ADDR_PREFIX 1
56 #define DEFAULT_ARCH "i386"
61 #define INLINE __inline__
67 /* Prefixes will be emitted in the order defined below.
68 WAIT_PREFIX must be the first prefix since FWAIT is really is an
69 instruction, and so must come before any prefixes.
70 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
71 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
77 #define HLE_PREFIX REP_PREFIX
78 #define BND_PREFIX REP_PREFIX
80 #define REX_PREFIX 6 /* must come last. */
81 #define MAX_PREFIXES 7 /* max prefixes per opcode */
83 /* we define the syntax here (modulo base,index,scale syntax) */
84 #define REGISTER_PREFIX '%'
85 #define IMMEDIATE_PREFIX '$'
86 #define ABSOLUTE_PREFIX '*'
88 /* these are the instruction mnemonic suffixes in AT&T syntax or
89 memory operand size in Intel syntax. */
90 #define WORD_MNEM_SUFFIX 'w'
91 #define BYTE_MNEM_SUFFIX 'b'
92 #define SHORT_MNEM_SUFFIX 's'
93 #define LONG_MNEM_SUFFIX 'l'
94 #define QWORD_MNEM_SUFFIX 'q'
95 /* Intel Syntax. Use a non-ascii letter since since it never appears
97 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
99 #define END_OF_INSN '\0'
101 /* This matches the C -> StaticRounding alias in the opcode table. */
102 #define commutative staticrounding
105 'templates' is for grouping together 'template' structures for opcodes
106 of the same name. This is only used for storing the insns in the grand
107 ole hash table of insns.
108 The templates themselves start at START and range up to (but not including)
113 const insn_template
*start
;
114 const insn_template
*end
;
118 /* 386 operand encoding bytes: see 386 book for details of this. */
121 unsigned int regmem
; /* codes register or memory operand */
122 unsigned int reg
; /* codes register operand (or extended opcode) */
123 unsigned int mode
; /* how to interpret regmem & reg */
127 /* x86-64 extension prefix. */
128 typedef int rex_byte
;
130 /* 386 opcode byte to code indirect addressing. */
139 /* x86 arch names, types and features */
142 const char *name
; /* arch name */
143 unsigned int len
; /* arch string length */
144 enum processor_type type
; /* arch type */
145 i386_cpu_flags flags
; /* cpu feature flags */
146 unsigned int skip
; /* show_arch should skip this. */
150 /* Used to turn off indicated flags. */
153 const char *name
; /* arch name */
154 unsigned int len
; /* arch string length */
155 i386_cpu_flags flags
; /* cpu feature flags */
159 static void update_code_flag (int, int);
160 static void set_code_flag (int);
161 static void set_16bit_gcc_code_flag (int);
162 static void set_intel_syntax (int);
163 static void set_intel_mnemonic (int);
164 static void set_allow_index_reg (int);
165 static void set_check (int);
166 static void set_cpu_arch (int);
168 static void pe_directive_secrel (int);
170 static void signed_cons (int);
171 static char *output_invalid (int c
);
172 static int i386_finalize_immediate (segT
, expressionS
*, i386_operand_type
,
174 static int i386_finalize_displacement (segT
, expressionS
*, i386_operand_type
,
176 static int i386_att_operand (char *);
177 static int i386_intel_operand (char *, int);
178 static int i386_intel_simplify (expressionS
*);
179 static int i386_intel_parse_name (const char *, expressionS
*);
180 static const reg_entry
*parse_register (char *, char **);
181 static char *parse_insn (char *, char *);
182 static char *parse_operands (char *, const char *);
183 static void swap_operands (void);
184 static void swap_2_operands (int, int);
185 static void optimize_imm (void);
186 static void optimize_disp (void);
187 static const insn_template
*match_template (char);
188 static int check_string (void);
189 static int process_suffix (void);
190 static int check_byte_reg (void);
191 static int check_long_reg (void);
192 static int check_qword_reg (void);
193 static int check_word_reg (void);
194 static int finalize_imm (void);
195 static int process_operands (void);
196 static const seg_entry
*build_modrm_byte (void);
197 static void output_insn (void);
198 static void output_imm (fragS
*, offsetT
);
199 static void output_disp (fragS
*, offsetT
);
201 static void s_bss (int);
203 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
204 static void handle_large_common (int small ATTRIBUTE_UNUSED
);
206 /* GNU_PROPERTY_X86_ISA_1_USED. */
207 static unsigned int x86_isa_1_used
;
208 /* GNU_PROPERTY_X86_FEATURE_2_USED. */
209 static unsigned int x86_feature_2_used
;
210 /* Generate x86 used ISA and feature properties. */
211 static unsigned int x86_used_note
= DEFAULT_X86_USED_NOTE
;
214 static const char *default_arch
= DEFAULT_ARCH
;
216 /* This struct describes rounding control and SAE in the instruction. */
230 static struct RC_Operation rc_op
;
232 /* The struct describes masking, applied to OPERAND in the instruction.
233 MASK is a pointer to the corresponding mask register. ZEROING tells
234 whether merging or zeroing mask is used. */
235 struct Mask_Operation
237 const reg_entry
*mask
;
238 unsigned int zeroing
;
239 /* The operand where this operation is associated. */
243 static struct Mask_Operation mask_op
;
245 /* The struct describes broadcasting, applied to OPERAND. FACTOR is
247 struct Broadcast_Operation
249 /* Type of broadcast: {1to2}, {1to4}, {1to8}, or {1to16}. */
252 /* Index of broadcasted operand. */
255 /* Number of bytes to broadcast. */
259 static struct Broadcast_Operation broadcast_op
;
264 /* VEX prefix is either 2 byte or 3 byte. EVEX is 4 byte. */
265 unsigned char bytes
[4];
267 /* Destination or source register specifier. */
268 const reg_entry
*register_specifier
;
271 /* 'md_assemble ()' gathers together information and puts it into a
278 const reg_entry
*regs
;
283 operand_size_mismatch
,
284 operand_type_mismatch
,
285 register_type_mismatch
,
286 number_of_operands_mismatch
,
287 invalid_instruction_suffix
,
289 unsupported_with_intel_mnemonic
,
292 invalid_vsib_address
,
293 invalid_vector_register_set
,
294 unsupported_vector_index_register
,
295 unsupported_broadcast
,
298 mask_not_on_destination
,
301 rc_sae_operand_not_last_imm
,
302 invalid_register_operand
,
307 /* TM holds the template for the insn were currently assembling. */
310 /* SUFFIX holds the instruction size suffix for byte, word, dword
311 or qword, if given. */
314 /* OPERANDS gives the number of given operands. */
315 unsigned int operands
;
317 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
318 of given register, displacement, memory operands and immediate
320 unsigned int reg_operands
, disp_operands
, mem_operands
, imm_operands
;
322 /* TYPES [i] is the type (see above #defines) which tells us how to
323 use OP[i] for the corresponding operand. */
324 i386_operand_type types
[MAX_OPERANDS
];
326 /* Displacement expression, immediate expression, or register for each
328 union i386_op op
[MAX_OPERANDS
];
330 /* Flags for operands. */
331 unsigned int flags
[MAX_OPERANDS
];
332 #define Operand_PCrel 1
333 #define Operand_Mem 2
335 /* Relocation type for operand */
336 enum bfd_reloc_code_real reloc
[MAX_OPERANDS
];
338 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
339 the base index byte below. */
340 const reg_entry
*base_reg
;
341 const reg_entry
*index_reg
;
342 unsigned int log2_scale_factor
;
344 /* SEG gives the seg_entries of this insn. They are zero unless
345 explicit segment overrides are given. */
346 const seg_entry
*seg
[2];
348 /* Copied first memory operand string, for re-checking. */
351 /* PREFIX holds all the given prefix opcodes (usually null).
352 PREFIXES is the number of prefix opcodes. */
353 unsigned int prefixes
;
354 unsigned char prefix
[MAX_PREFIXES
];
356 /* Has MMX register operands. */
357 bfd_boolean has_regmmx
;
359 /* Has XMM register operands. */
360 bfd_boolean has_regxmm
;
362 /* Has YMM register operands. */
363 bfd_boolean has_regymm
;
365 /* Has ZMM register operands. */
366 bfd_boolean has_regzmm
;
368 /* RM and SIB are the modrm byte and the sib byte where the
369 addressing modes of this insn are encoded. */
376 /* Masking attributes. */
377 struct Mask_Operation
*mask
;
379 /* Rounding control and SAE attributes. */
380 struct RC_Operation
*rounding
;
382 /* Broadcasting attributes. */
383 struct Broadcast_Operation
*broadcast
;
385 /* Compressed disp8*N attribute. */
386 unsigned int memshift
;
388 /* Prefer load or store in encoding. */
391 dir_encoding_default
= 0,
397 /* Prefer 8bit or 32bit displacement in encoding. */
400 disp_encoding_default
= 0,
405 /* Prefer the REX byte in encoding. */
406 bfd_boolean rex_encoding
;
408 /* Disable instruction size optimization. */
409 bfd_boolean no_optimize
;
411 /* How to encode vector instructions. */
414 vex_encoding_default
= 0,
421 const char *rep_prefix
;
424 const char *hle_prefix
;
426 /* Have BND prefix. */
427 const char *bnd_prefix
;
429 /* Have NOTRACK prefix. */
430 const char *notrack_prefix
;
433 enum i386_error error
;
436 typedef struct _i386_insn i386_insn
;
438 /* Link RC type with corresponding string, that'll be looked for in
447 static const struct RC_name RC_NamesTable
[] =
449 { rne
, STRING_COMMA_LEN ("rn-sae") },
450 { rd
, STRING_COMMA_LEN ("rd-sae") },
451 { ru
, STRING_COMMA_LEN ("ru-sae") },
452 { rz
, STRING_COMMA_LEN ("rz-sae") },
453 { saeonly
, STRING_COMMA_LEN ("sae") },
456 /* List of chars besides those in app.c:symbol_chars that can start an
457 operand. Used to prevent the scrubber eating vital white-space. */
458 const char extra_symbol_chars
[] = "*%-([{}"
467 #if (defined (TE_I386AIX) \
468 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
469 && !defined (TE_GNU) \
470 && !defined (TE_LINUX) \
471 && !defined (TE_NACL) \
472 && !defined (TE_FreeBSD) \
473 && !defined (TE_DragonFly) \
474 && !defined (TE_NetBSD)))
475 /* This array holds the chars that always start a comment. If the
476 pre-processor is disabled, these aren't very useful. The option
477 --divide will remove '/' from this list. */
478 const char *i386_comment_chars
= "#/";
479 #define SVR4_COMMENT_CHARS 1
480 #define PREFIX_SEPARATOR '\\'
483 const char *i386_comment_chars
= "#";
484 #define PREFIX_SEPARATOR '/'
487 /* This array holds the chars that only start a comment at the beginning of
488 a line. If the line seems to have the form '# 123 filename'
489 .line and .file directives will appear in the pre-processed output.
490 Note that input_file.c hand checks for '#' at the beginning of the
491 first line of the input file. This is because the compiler outputs
492 #NO_APP at the beginning of its output.
493 Also note that comments started like this one will always work if
494 '/' isn't otherwise defined. */
495 const char line_comment_chars
[] = "#/";
497 const char line_separator_chars
[] = ";";
499 /* Chars that can be used to separate mant from exp in floating point
501 const char EXP_CHARS
[] = "eE";
503 /* Chars that mean this number is a floating point constant
506 const char FLT_CHARS
[] = "fFdDxX";
508 /* Tables for lexical analysis. */
509 static char mnemonic_chars
[256];
510 static char register_chars
[256];
511 static char operand_chars
[256];
512 static char identifier_chars
[256];
513 static char digit_chars
[256];
515 /* Lexical macros. */
516 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
517 #define is_operand_char(x) (operand_chars[(unsigned char) x])
518 #define is_register_char(x) (register_chars[(unsigned char) x])
519 #define is_space_char(x) ((x) == ' ')
520 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
521 #define is_digit_char(x) (digit_chars[(unsigned char) x])
523 /* All non-digit non-letter characters that may occur in an operand. */
524 static char operand_special_chars
[] = "%$-+(,)*._~/<>|&^!:[@]";
526 /* md_assemble() always leaves the strings it's passed unaltered. To
527 effect this we maintain a stack of saved characters that we've smashed
528 with '\0's (indicating end of strings for various sub-fields of the
529 assembler instruction). */
530 static char save_stack
[32];
531 static char *save_stack_p
;
532 #define END_STRING_AND_SAVE(s) \
533 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
534 #define RESTORE_END_STRING(s) \
535 do { *(s) = *--save_stack_p; } while (0)
537 /* The instruction we're assembling. */
540 /* Possible templates for current insn. */
541 static const templates
*current_templates
;
543 /* Per instruction expressionS buffers: max displacements & immediates. */
544 static expressionS disp_expressions
[MAX_MEMORY_OPERANDS
];
545 static expressionS im_expressions
[MAX_IMMEDIATE_OPERANDS
];
547 /* Current operand we are working on. */
548 static int this_operand
= -1;
550 /* We support four different modes. FLAG_CODE variable is used to distinguish
558 static enum flag_code flag_code
;
559 static unsigned int object_64bit
;
560 static unsigned int disallow_64bit_reloc
;
561 static int use_rela_relocations
= 0;
563 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
564 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
565 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
567 /* The ELF ABI to use. */
575 static enum x86_elf_abi x86_elf_abi
= I386_ABI
;
578 #if defined (TE_PE) || defined (TE_PEP)
579 /* Use big object file format. */
580 static int use_big_obj
= 0;
583 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
584 /* 1 if generating code for a shared library. */
585 static int shared
= 0;
588 /* 1 for intel syntax,
590 static int intel_syntax
= 0;
592 /* 1 for Intel64 ISA,
596 /* 1 for intel mnemonic,
597 0 if att mnemonic. */
598 static int intel_mnemonic
= !SYSV386_COMPAT
;
600 /* 1 if pseudo registers are permitted. */
601 static int allow_pseudo_reg
= 0;
603 /* 1 if register prefix % not required. */
604 static int allow_naked_reg
= 0;
606 /* 1 if the assembler should add BND prefix for all control-transferring
607 instructions supporting it, even if this prefix wasn't specified
609 static int add_bnd_prefix
= 0;
611 /* 1 if pseudo index register, eiz/riz, is allowed . */
612 static int allow_index_reg
= 0;
614 /* 1 if the assembler should ignore LOCK prefix, even if it was
615 specified explicitly. */
616 static int omit_lock_prefix
= 0;
618 /* 1 if the assembler should encode lfence, mfence, and sfence as
619 "lock addl $0, (%{re}sp)". */
620 static int avoid_fence
= 0;
622 /* 1 if the assembler should generate relax relocations. */
624 static int generate_relax_relocations
625 = DEFAULT_GENERATE_X86_RELAX_RELOCATIONS
;
627 static enum check_kind
633 sse_check
, operand_check
= check_warning
;
636 1. Clear the REX_W bit with register operand if possible.
637 2. Above plus use 128bit vector instruction to clear the full vector
640 static int optimize
= 0;
643 1. Clear the REX_W bit with register operand if possible.
644 2. Above plus use 128bit vector instruction to clear the full vector
646 3. Above plus optimize "test{q,l,w} $imm8,%r{64,32,16}" to
649 static int optimize_for_space
= 0;
651 /* Register prefix used for error message. */
652 static const char *register_prefix
= "%";
654 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
655 leave, push, and pop instructions so that gcc has the same stack
656 frame as in 32 bit mode. */
657 static char stackop_size
= '\0';
659 /* Non-zero to optimize code alignment. */
660 int optimize_align_code
= 1;
662 /* Non-zero to quieten some warnings. */
663 static int quiet_warnings
= 0;
666 static const char *cpu_arch_name
= NULL
;
667 static char *cpu_sub_arch_name
= NULL
;
669 /* CPU feature flags. */
670 static i386_cpu_flags cpu_arch_flags
= CPU_UNKNOWN_FLAGS
;
672 /* If we have selected a cpu we are generating instructions for. */
673 static int cpu_arch_tune_set
= 0;
675 /* Cpu we are generating instructions for. */
676 enum processor_type cpu_arch_tune
= PROCESSOR_UNKNOWN
;
678 /* CPU feature flags of cpu we are generating instructions for. */
679 static i386_cpu_flags cpu_arch_tune_flags
;
681 /* CPU instruction set architecture used. */
682 enum processor_type cpu_arch_isa
= PROCESSOR_UNKNOWN
;
684 /* CPU feature flags of instruction set architecture used. */
685 i386_cpu_flags cpu_arch_isa_flags
;
687 /* If set, conditional jumps are not automatically promoted to handle
688 larger than a byte offset. */
689 static unsigned int no_cond_jump_promotion
= 0;
691 /* Encode SSE instructions with VEX prefix. */
692 static unsigned int sse2avx
;
694 /* Encode scalar AVX instructions with specific vector length. */
701 /* Encode VEX WIG instructions with specific vex.w. */
708 /* Encode scalar EVEX LIG instructions with specific vector length. */
716 /* Encode EVEX WIG instructions with specific evex.w. */
723 /* Value to encode in EVEX RC bits, for SAE-only instructions. */
724 static enum rc_type evexrcig
= rne
;
726 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
727 static symbolS
*GOT_symbol
;
729 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
730 unsigned int x86_dwarf2_return_column
;
732 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
733 int x86_cie_data_alignment
;
735 /* Interface to relax_segment.
736 There are 3 major relax states for 386 jump insns because the
737 different types of jumps add different sizes to frags when we're
738 figuring out what sort of jump to choose to reach a given label. */
741 #define UNCOND_JUMP 0
743 #define COND_JUMP86 2
748 #define SMALL16 (SMALL | CODE16)
750 #define BIG16 (BIG | CODE16)
754 #define INLINE __inline__
760 #define ENCODE_RELAX_STATE(type, size) \
761 ((relax_substateT) (((type) << 2) | (size)))
762 #define TYPE_FROM_RELAX_STATE(s) \
764 #define DISP_SIZE_FROM_RELAX_STATE(s) \
765 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
767 /* This table is used by relax_frag to promote short jumps to long
768 ones where necessary. SMALL (short) jumps may be promoted to BIG
769 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
770 don't allow a short jump in a 32 bit code segment to be promoted to
771 a 16 bit offset jump because it's slower (requires data size
772 prefix), and doesn't work, unless the destination is in the bottom
773 64k of the code segment (The top 16 bits of eip are zeroed). */
775 const relax_typeS md_relax_table
[] =
778 1) most positive reach of this state,
779 2) most negative reach of this state,
780 3) how many bytes this mode will have in the variable part of the frag
781 4) which index into the table to try if we can't fit into this one. */
783 /* UNCOND_JUMP states. */
784 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
)},
785 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
)},
786 /* dword jmp adds 4 bytes to frag:
787 0 extra opcode bytes, 4 displacement bytes. */
789 /* word jmp adds 2 byte2 to frag:
790 0 extra opcode bytes, 2 displacement bytes. */
793 /* COND_JUMP states. */
794 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG
)},
795 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG16
)},
796 /* dword conditionals adds 5 bytes to frag:
797 1 extra opcode byte, 4 displacement bytes. */
799 /* word conditionals add 3 bytes to frag:
800 1 extra opcode byte, 2 displacement bytes. */
803 /* COND_JUMP86 states. */
804 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG
)},
805 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
)},
806 /* dword conditionals adds 5 bytes to frag:
807 1 extra opcode byte, 4 displacement bytes. */
809 /* word conditionals add 4 bytes to frag:
810 1 displacement byte and a 3 byte long branch insn. */
814 static const arch_entry cpu_arch
[] =
816 /* Do not replace the first two entries - i386_target_format()
817 relies on them being there in this order. */
818 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32
,
819 CPU_GENERIC32_FLAGS
, 0 },
820 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64
,
821 CPU_GENERIC64_FLAGS
, 0 },
822 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN
,
824 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN
,
826 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN
,
828 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386
,
830 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486
,
832 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM
,
834 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO
,
836 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM
,
838 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO
,
839 CPU_PENTIUMPRO_FLAGS
, 0 },
840 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO
,
842 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO
,
844 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4
,
846 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA
,
848 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA
,
849 CPU_NOCONA_FLAGS
, 0 },
850 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE
,
852 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE
,
854 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2
,
855 CPU_CORE2_FLAGS
, 1 },
856 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2
,
857 CPU_CORE2_FLAGS
, 0 },
858 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7
,
859 CPU_COREI7_FLAGS
, 0 },
860 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM
,
862 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM
,
864 { STRING_COMMA_LEN ("iamcu"), PROCESSOR_IAMCU
,
865 CPU_IAMCU_FLAGS
, 0 },
866 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6
,
868 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6
,
870 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON
,
871 CPU_ATHLON_FLAGS
, 0 },
872 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8
,
874 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8
,
876 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8
,
878 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10
,
879 CPU_AMDFAM10_FLAGS
, 0 },
880 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD
,
881 CPU_BDVER1_FLAGS
, 0 },
882 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD
,
883 CPU_BDVER2_FLAGS
, 0 },
884 { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD
,
885 CPU_BDVER3_FLAGS
, 0 },
886 { STRING_COMMA_LEN ("bdver4"), PROCESSOR_BD
,
887 CPU_BDVER4_FLAGS
, 0 },
888 { STRING_COMMA_LEN ("znver1"), PROCESSOR_ZNVER
,
889 CPU_ZNVER1_FLAGS
, 0 },
890 { STRING_COMMA_LEN ("znver2"), PROCESSOR_ZNVER
,
891 CPU_ZNVER2_FLAGS
, 0 },
892 { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT
,
893 CPU_BTVER1_FLAGS
, 0 },
894 { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT
,
895 CPU_BTVER2_FLAGS
, 0 },
896 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN
,
898 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN
,
900 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN
,
902 { STRING_COMMA_LEN (".687"), PROCESSOR_UNKNOWN
,
904 { STRING_COMMA_LEN (".cmov"), PROCESSOR_UNKNOWN
,
906 { STRING_COMMA_LEN (".fxsr"), PROCESSOR_UNKNOWN
,
908 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN
,
910 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN
,
912 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN
,
914 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN
,
916 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN
,
917 CPU_SSSE3_FLAGS
, 0 },
918 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN
,
919 CPU_SSE4_1_FLAGS
, 0 },
920 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN
,
921 CPU_SSE4_2_FLAGS
, 0 },
922 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN
,
923 CPU_SSE4_2_FLAGS
, 0 },
924 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN
,
926 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN
,
928 { STRING_COMMA_LEN (".avx512f"), PROCESSOR_UNKNOWN
,
929 CPU_AVX512F_FLAGS
, 0 },
930 { STRING_COMMA_LEN (".avx512cd"), PROCESSOR_UNKNOWN
,
931 CPU_AVX512CD_FLAGS
, 0 },
932 { STRING_COMMA_LEN (".avx512er"), PROCESSOR_UNKNOWN
,
933 CPU_AVX512ER_FLAGS
, 0 },
934 { STRING_COMMA_LEN (".avx512pf"), PROCESSOR_UNKNOWN
,
935 CPU_AVX512PF_FLAGS
, 0 },
936 { STRING_COMMA_LEN (".avx512dq"), PROCESSOR_UNKNOWN
,
937 CPU_AVX512DQ_FLAGS
, 0 },
938 { STRING_COMMA_LEN (".avx512bw"), PROCESSOR_UNKNOWN
,
939 CPU_AVX512BW_FLAGS
, 0 },
940 { STRING_COMMA_LEN (".avx512vl"), PROCESSOR_UNKNOWN
,
941 CPU_AVX512VL_FLAGS
, 0 },
942 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN
,
944 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN
,
945 CPU_VMFUNC_FLAGS
, 0 },
946 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN
,
948 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN
,
949 CPU_XSAVE_FLAGS
, 0 },
950 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN
,
951 CPU_XSAVEOPT_FLAGS
, 0 },
952 { STRING_COMMA_LEN (".xsavec"), PROCESSOR_UNKNOWN
,
953 CPU_XSAVEC_FLAGS
, 0 },
954 { STRING_COMMA_LEN (".xsaves"), PROCESSOR_UNKNOWN
,
955 CPU_XSAVES_FLAGS
, 0 },
956 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN
,
958 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN
,
959 CPU_PCLMUL_FLAGS
, 0 },
960 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN
,
961 CPU_PCLMUL_FLAGS
, 1 },
962 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN
,
963 CPU_FSGSBASE_FLAGS
, 0 },
964 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN
,
965 CPU_RDRND_FLAGS
, 0 },
966 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN
,
968 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN
,
970 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN
,
972 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN
,
974 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN
,
976 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN
,
978 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN
,
979 CPU_MOVBE_FLAGS
, 0 },
980 { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN
,
982 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN
,
984 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN
,
985 CPU_LZCNT_FLAGS
, 0 },
986 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN
,
988 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN
,
990 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN
,
991 CPU_INVPCID_FLAGS
, 0 },
992 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN
,
993 CPU_CLFLUSH_FLAGS
, 0 },
994 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN
,
996 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN
,
997 CPU_SYSCALL_FLAGS
, 0 },
998 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN
,
999 CPU_RDTSCP_FLAGS
, 0 },
1000 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN
,
1001 CPU_3DNOW_FLAGS
, 0 },
1002 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN
,
1003 CPU_3DNOWA_FLAGS
, 0 },
1004 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN
,
1005 CPU_PADLOCK_FLAGS
, 0 },
1006 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN
,
1007 CPU_SVME_FLAGS
, 1 },
1008 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN
,
1009 CPU_SVME_FLAGS
, 0 },
1010 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN
,
1011 CPU_SSE4A_FLAGS
, 0 },
1012 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN
,
1014 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN
,
1016 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN
,
1018 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN
,
1020 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN
,
1021 CPU_RDSEED_FLAGS
, 0 },
1022 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN
,
1023 CPU_PRFCHW_FLAGS
, 0 },
1024 { STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN
,
1025 CPU_SMAP_FLAGS
, 0 },
1026 { STRING_COMMA_LEN (".mpx"), PROCESSOR_UNKNOWN
,
1028 { STRING_COMMA_LEN (".sha"), PROCESSOR_UNKNOWN
,
1030 { STRING_COMMA_LEN (".clflushopt"), PROCESSOR_UNKNOWN
,
1031 CPU_CLFLUSHOPT_FLAGS
, 0 },
1032 { STRING_COMMA_LEN (".prefetchwt1"), PROCESSOR_UNKNOWN
,
1033 CPU_PREFETCHWT1_FLAGS
, 0 },
1034 { STRING_COMMA_LEN (".se1"), PROCESSOR_UNKNOWN
,
1036 { STRING_COMMA_LEN (".clwb"), PROCESSOR_UNKNOWN
,
1037 CPU_CLWB_FLAGS
, 0 },
1038 { STRING_COMMA_LEN (".avx512ifma"), PROCESSOR_UNKNOWN
,
1039 CPU_AVX512IFMA_FLAGS
, 0 },
1040 { STRING_COMMA_LEN (".avx512vbmi"), PROCESSOR_UNKNOWN
,
1041 CPU_AVX512VBMI_FLAGS
, 0 },
1042 { STRING_COMMA_LEN (".avx512_4fmaps"), PROCESSOR_UNKNOWN
,
1043 CPU_AVX512_4FMAPS_FLAGS
, 0 },
1044 { STRING_COMMA_LEN (".avx512_4vnniw"), PROCESSOR_UNKNOWN
,
1045 CPU_AVX512_4VNNIW_FLAGS
, 0 },
1046 { STRING_COMMA_LEN (".avx512_vpopcntdq"), PROCESSOR_UNKNOWN
,
1047 CPU_AVX512_VPOPCNTDQ_FLAGS
, 0 },
1048 { STRING_COMMA_LEN (".avx512_vbmi2"), PROCESSOR_UNKNOWN
,
1049 CPU_AVX512_VBMI2_FLAGS
, 0 },
1050 { STRING_COMMA_LEN (".avx512_vnni"), PROCESSOR_UNKNOWN
,
1051 CPU_AVX512_VNNI_FLAGS
, 0 },
1052 { STRING_COMMA_LEN (".avx512_bitalg"), PROCESSOR_UNKNOWN
,
1053 CPU_AVX512_BITALG_FLAGS
, 0 },
1054 { STRING_COMMA_LEN (".clzero"), PROCESSOR_UNKNOWN
,
1055 CPU_CLZERO_FLAGS
, 0 },
1056 { STRING_COMMA_LEN (".mwaitx"), PROCESSOR_UNKNOWN
,
1057 CPU_MWAITX_FLAGS
, 0 },
1058 { STRING_COMMA_LEN (".ospke"), PROCESSOR_UNKNOWN
,
1059 CPU_OSPKE_FLAGS
, 0 },
1060 { STRING_COMMA_LEN (".rdpid"), PROCESSOR_UNKNOWN
,
1061 CPU_RDPID_FLAGS
, 0 },
1062 { STRING_COMMA_LEN (".ptwrite"), PROCESSOR_UNKNOWN
,
1063 CPU_PTWRITE_FLAGS
, 0 },
1064 { STRING_COMMA_LEN (".ibt"), PROCESSOR_UNKNOWN
,
1066 { STRING_COMMA_LEN (".shstk"), PROCESSOR_UNKNOWN
,
1067 CPU_SHSTK_FLAGS
, 0 },
1068 { STRING_COMMA_LEN (".gfni"), PROCESSOR_UNKNOWN
,
1069 CPU_GFNI_FLAGS
, 0 },
1070 { STRING_COMMA_LEN (".vaes"), PROCESSOR_UNKNOWN
,
1071 CPU_VAES_FLAGS
, 0 },
1072 { STRING_COMMA_LEN (".vpclmulqdq"), PROCESSOR_UNKNOWN
,
1073 CPU_VPCLMULQDQ_FLAGS
, 0 },
1074 { STRING_COMMA_LEN (".wbnoinvd"), PROCESSOR_UNKNOWN
,
1075 CPU_WBNOINVD_FLAGS
, 0 },
1076 { STRING_COMMA_LEN (".pconfig"), PROCESSOR_UNKNOWN
,
1077 CPU_PCONFIG_FLAGS
, 0 },
1078 { STRING_COMMA_LEN (".waitpkg"), PROCESSOR_UNKNOWN
,
1079 CPU_WAITPKG_FLAGS
, 0 },
1080 { STRING_COMMA_LEN (".cldemote"), PROCESSOR_UNKNOWN
,
1081 CPU_CLDEMOTE_FLAGS
, 0 },
1082 { STRING_COMMA_LEN (".movdiri"), PROCESSOR_UNKNOWN
,
1083 CPU_MOVDIRI_FLAGS
, 0 },
1084 { STRING_COMMA_LEN (".movdir64b"), PROCESSOR_UNKNOWN
,
1085 CPU_MOVDIR64B_FLAGS
, 0 },
1086 { STRING_COMMA_LEN (".avx512_bf16"), PROCESSOR_UNKNOWN
,
1087 CPU_AVX512_BF16_FLAGS
, 0 },
1088 { STRING_COMMA_LEN (".avx512_vp2intersect"), PROCESSOR_UNKNOWN
,
1089 CPU_AVX512_VP2INTERSECT_FLAGS
, 0 },
1090 { STRING_COMMA_LEN (".enqcmd"), PROCESSOR_UNKNOWN
,
1091 CPU_ENQCMD_FLAGS
, 0 },
1092 { STRING_COMMA_LEN (".rdpru"), PROCESSOR_UNKNOWN
,
1093 CPU_RDPRU_FLAGS
, 0 },
1094 { STRING_COMMA_LEN (".mcommit"), PROCESSOR_UNKNOWN
,
1095 CPU_MCOMMIT_FLAGS
, 0 },
1098 static const noarch_entry cpu_noarch
[] =
1100 { STRING_COMMA_LEN ("no87"), CPU_ANY_X87_FLAGS
},
1101 { STRING_COMMA_LEN ("no287"), CPU_ANY_287_FLAGS
},
1102 { STRING_COMMA_LEN ("no387"), CPU_ANY_387_FLAGS
},
1103 { STRING_COMMA_LEN ("no687"), CPU_ANY_687_FLAGS
},
1104 { STRING_COMMA_LEN ("nocmov"), CPU_ANY_CMOV_FLAGS
},
1105 { STRING_COMMA_LEN ("nofxsr"), CPU_ANY_FXSR_FLAGS
},
1106 { STRING_COMMA_LEN ("nommx"), CPU_ANY_MMX_FLAGS
},
1107 { STRING_COMMA_LEN ("nosse"), CPU_ANY_SSE_FLAGS
},
1108 { STRING_COMMA_LEN ("nosse2"), CPU_ANY_SSE2_FLAGS
},
1109 { STRING_COMMA_LEN ("nosse3"), CPU_ANY_SSE3_FLAGS
},
1110 { STRING_COMMA_LEN ("nossse3"), CPU_ANY_SSSE3_FLAGS
},
1111 { STRING_COMMA_LEN ("nosse4.1"), CPU_ANY_SSE4_1_FLAGS
},
1112 { STRING_COMMA_LEN ("nosse4.2"), CPU_ANY_SSE4_2_FLAGS
},
1113 { STRING_COMMA_LEN ("nosse4"), CPU_ANY_SSE4_1_FLAGS
},
1114 { STRING_COMMA_LEN ("noavx"), CPU_ANY_AVX_FLAGS
},
1115 { STRING_COMMA_LEN ("noavx2"), CPU_ANY_AVX2_FLAGS
},
1116 { STRING_COMMA_LEN ("noavx512f"), CPU_ANY_AVX512F_FLAGS
},
1117 { STRING_COMMA_LEN ("noavx512cd"), CPU_ANY_AVX512CD_FLAGS
},
1118 { STRING_COMMA_LEN ("noavx512er"), CPU_ANY_AVX512ER_FLAGS
},
1119 { STRING_COMMA_LEN ("noavx512pf"), CPU_ANY_AVX512PF_FLAGS
},
1120 { STRING_COMMA_LEN ("noavx512dq"), CPU_ANY_AVX512DQ_FLAGS
},
1121 { STRING_COMMA_LEN ("noavx512bw"), CPU_ANY_AVX512BW_FLAGS
},
1122 { STRING_COMMA_LEN ("noavx512vl"), CPU_ANY_AVX512VL_FLAGS
},
1123 { STRING_COMMA_LEN ("noavx512ifma"), CPU_ANY_AVX512IFMA_FLAGS
},
1124 { STRING_COMMA_LEN ("noavx512vbmi"), CPU_ANY_AVX512VBMI_FLAGS
},
1125 { STRING_COMMA_LEN ("noavx512_4fmaps"), CPU_ANY_AVX512_4FMAPS_FLAGS
},
1126 { STRING_COMMA_LEN ("noavx512_4vnniw"), CPU_ANY_AVX512_4VNNIW_FLAGS
},
1127 { STRING_COMMA_LEN ("noavx512_vpopcntdq"), CPU_ANY_AVX512_VPOPCNTDQ_FLAGS
},
1128 { STRING_COMMA_LEN ("noavx512_vbmi2"), CPU_ANY_AVX512_VBMI2_FLAGS
},
1129 { STRING_COMMA_LEN ("noavx512_vnni"), CPU_ANY_AVX512_VNNI_FLAGS
},
1130 { STRING_COMMA_LEN ("noavx512_bitalg"), CPU_ANY_AVX512_BITALG_FLAGS
},
1131 { STRING_COMMA_LEN ("noibt"), CPU_ANY_IBT_FLAGS
},
1132 { STRING_COMMA_LEN ("noshstk"), CPU_ANY_SHSTK_FLAGS
},
1133 { STRING_COMMA_LEN ("nomovdiri"), CPU_ANY_MOVDIRI_FLAGS
},
1134 { STRING_COMMA_LEN ("nomovdir64b"), CPU_ANY_MOVDIR64B_FLAGS
},
1135 { STRING_COMMA_LEN ("noavx512_bf16"), CPU_ANY_AVX512_BF16_FLAGS
},
1136 { STRING_COMMA_LEN ("noavx512_vp2intersect"), CPU_ANY_SHSTK_FLAGS
},
1137 { STRING_COMMA_LEN ("noenqcmd"), CPU_ANY_ENQCMD_FLAGS
},
1141 /* Like s_lcomm_internal in gas/read.c but the alignment string
1142 is allowed to be optional. */
1145 pe_lcomm_internal (int needs_align
, symbolS
*symbolP
, addressT size
)
1152 && *input_line_pointer
== ',')
1154 align
= parse_align (needs_align
- 1);
1156 if (align
== (addressT
) -1)
1171 bss_alloc (symbolP
, size
, align
);
1176 pe_lcomm (int needs_align
)
1178 s_comm_internal (needs_align
* 2, pe_lcomm_internal
);
1182 const pseudo_typeS md_pseudo_table
[] =
1184 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
1185 {"align", s_align_bytes
, 0},
1187 {"align", s_align_ptwo
, 0},
1189 {"arch", set_cpu_arch
, 0},
1193 {"lcomm", pe_lcomm
, 1},
1195 {"ffloat", float_cons
, 'f'},
1196 {"dfloat", float_cons
, 'd'},
1197 {"tfloat", float_cons
, 'x'},
1199 {"slong", signed_cons
, 4},
1200 {"noopt", s_ignore
, 0},
1201 {"optim", s_ignore
, 0},
1202 {"code16gcc", set_16bit_gcc_code_flag
, CODE_16BIT
},
1203 {"code16", set_code_flag
, CODE_16BIT
},
1204 {"code32", set_code_flag
, CODE_32BIT
},
1206 {"code64", set_code_flag
, CODE_64BIT
},
1208 {"intel_syntax", set_intel_syntax
, 1},
1209 {"att_syntax", set_intel_syntax
, 0},
1210 {"intel_mnemonic", set_intel_mnemonic
, 1},
1211 {"att_mnemonic", set_intel_mnemonic
, 0},
1212 {"allow_index_reg", set_allow_index_reg
, 1},
1213 {"disallow_index_reg", set_allow_index_reg
, 0},
1214 {"sse_check", set_check
, 0},
1215 {"operand_check", set_check
, 1},
1216 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
1217 {"largecomm", handle_large_common
, 0},
1219 {"file", dwarf2_directive_file
, 0},
1220 {"loc", dwarf2_directive_loc
, 0},
1221 {"loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0},
1224 {"secrel32", pe_directive_secrel
, 0},
1229 /* For interface with expression (). */
1230 extern char *input_line_pointer
;
1232 /* Hash table for instruction mnemonic lookup. */
1233 static struct hash_control
*op_hash
;
1235 /* Hash table for register lookup. */
1236 static struct hash_control
*reg_hash
;
1238 /* Various efficient no-op patterns for aligning code labels.
1239 Note: Don't try to assemble the instructions in the comments.
1240 0L and 0w are not legal. */
1241 static const unsigned char f32_1
[] =
1243 static const unsigned char f32_2
[] =
1244 {0x66,0x90}; /* xchg %ax,%ax */
1245 static const unsigned char f32_3
[] =
1246 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
1247 static const unsigned char f32_4
[] =
1248 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1249 static const unsigned char f32_6
[] =
1250 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
1251 static const unsigned char f32_7
[] =
1252 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1253 static const unsigned char f16_3
[] =
1254 {0x8d,0x74,0x00}; /* lea 0(%si),%si */
1255 static const unsigned char f16_4
[] =
1256 {0x8d,0xb4,0x00,0x00}; /* lea 0W(%si),%si */
1257 static const unsigned char jump_disp8
[] =
1258 {0xeb}; /* jmp disp8 */
1259 static const unsigned char jump32_disp32
[] =
1260 {0xe9}; /* jmp disp32 */
1261 static const unsigned char jump16_disp32
[] =
1262 {0x66,0xe9}; /* jmp disp32 */
1263 /* 32-bit NOPs patterns. */
1264 static const unsigned char *const f32_patt
[] = {
1265 f32_1
, f32_2
, f32_3
, f32_4
, NULL
, f32_6
, f32_7
1267 /* 16-bit NOPs patterns. */
1268 static const unsigned char *const f16_patt
[] = {
1269 f32_1
, f32_2
, f16_3
, f16_4
1271 /* nopl (%[re]ax) */
1272 static const unsigned char alt_3
[] =
1274 /* nopl 0(%[re]ax) */
1275 static const unsigned char alt_4
[] =
1276 {0x0f,0x1f,0x40,0x00};
1277 /* nopl 0(%[re]ax,%[re]ax,1) */
1278 static const unsigned char alt_5
[] =
1279 {0x0f,0x1f,0x44,0x00,0x00};
1280 /* nopw 0(%[re]ax,%[re]ax,1) */
1281 static const unsigned char alt_6
[] =
1282 {0x66,0x0f,0x1f,0x44,0x00,0x00};
1283 /* nopl 0L(%[re]ax) */
1284 static const unsigned char alt_7
[] =
1285 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1286 /* nopl 0L(%[re]ax,%[re]ax,1) */
1287 static const unsigned char alt_8
[] =
1288 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1289 /* nopw 0L(%[re]ax,%[re]ax,1) */
1290 static const unsigned char alt_9
[] =
1291 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1292 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
1293 static const unsigned char alt_10
[] =
1294 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1295 /* data16 nopw %cs:0L(%eax,%eax,1) */
1296 static const unsigned char alt_11
[] =
1297 {0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1298 /* 32-bit and 64-bit NOPs patterns. */
1299 static const unsigned char *const alt_patt
[] = {
1300 f32_1
, f32_2
, alt_3
, alt_4
, alt_5
, alt_6
, alt_7
, alt_8
,
1301 alt_9
, alt_10
, alt_11
1304 /* Genenerate COUNT bytes of NOPs to WHERE from PATT with the maximum
1305 size of a single NOP instruction MAX_SINGLE_NOP_SIZE. */
1308 i386_output_nops (char *where
, const unsigned char *const *patt
,
1309 int count
, int max_single_nop_size
)
1312 /* Place the longer NOP first. */
1315 const unsigned char *nops
;
1317 if (max_single_nop_size
< 1)
1319 as_fatal (_("i386_output_nops called to generate nops of at most %d bytes!"),
1320 max_single_nop_size
);
1324 nops
= patt
[max_single_nop_size
- 1];
1326 /* Use the smaller one if the requsted one isn't available. */
1329 max_single_nop_size
--;
1330 nops
= patt
[max_single_nop_size
- 1];
1333 last
= count
% max_single_nop_size
;
1336 for (offset
= 0; offset
< count
; offset
+= max_single_nop_size
)
1337 memcpy (where
+ offset
, nops
, max_single_nop_size
);
1341 nops
= patt
[last
- 1];
1344 /* Use the smaller one plus one-byte NOP if the needed one
1347 nops
= patt
[last
- 1];
1348 memcpy (where
+ offset
, nops
, last
);
1349 where
[offset
+ last
] = *patt
[0];
1352 memcpy (where
+ offset
, nops
, last
);
1357 fits_in_imm7 (offsetT num
)
1359 return (num
& 0x7f) == num
;
1363 fits_in_imm31 (offsetT num
)
1365 return (num
& 0x7fffffff) == num
;
1368 /* Genenerate COUNT bytes of NOPs to WHERE with the maximum size of a
1369 single NOP instruction LIMIT. */
1372 i386_generate_nops (fragS
*fragP
, char *where
, offsetT count
, int limit
)
1374 const unsigned char *const *patt
= NULL
;
1375 int max_single_nop_size
;
1376 /* Maximum number of NOPs before switching to jump over NOPs. */
1377 int max_number_of_nops
;
1379 switch (fragP
->fr_type
)
1388 /* We need to decide which NOP sequence to use for 32bit and
1389 64bit. When -mtune= is used:
1391 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1392 PROCESSOR_GENERIC32, f32_patt will be used.
1393 2. For the rest, alt_patt will be used.
1395 When -mtune= isn't used, alt_patt will be used if
1396 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1399 When -march= or .arch is used, we can't use anything beyond
1400 cpu_arch_isa_flags. */
1402 if (flag_code
== CODE_16BIT
)
1405 max_single_nop_size
= sizeof (f16_patt
) / sizeof (f16_patt
[0]);
1406 /* Limit number of NOPs to 2 in 16-bit mode. */
1407 max_number_of_nops
= 2;
1411 if (fragP
->tc_frag_data
.isa
== PROCESSOR_UNKNOWN
)
1413 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1414 switch (cpu_arch_tune
)
1416 case PROCESSOR_UNKNOWN
:
1417 /* We use cpu_arch_isa_flags to check if we SHOULD
1418 optimize with nops. */
1419 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1424 case PROCESSOR_PENTIUM4
:
1425 case PROCESSOR_NOCONA
:
1426 case PROCESSOR_CORE
:
1427 case PROCESSOR_CORE2
:
1428 case PROCESSOR_COREI7
:
1429 case PROCESSOR_L1OM
:
1430 case PROCESSOR_K1OM
:
1431 case PROCESSOR_GENERIC64
:
1433 case PROCESSOR_ATHLON
:
1435 case PROCESSOR_AMDFAM10
:
1437 case PROCESSOR_ZNVER
:
1441 case PROCESSOR_I386
:
1442 case PROCESSOR_I486
:
1443 case PROCESSOR_PENTIUM
:
1444 case PROCESSOR_PENTIUMPRO
:
1445 case PROCESSOR_IAMCU
:
1446 case PROCESSOR_GENERIC32
:
1453 switch (fragP
->tc_frag_data
.tune
)
1455 case PROCESSOR_UNKNOWN
:
1456 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1457 PROCESSOR_UNKNOWN. */
1461 case PROCESSOR_I386
:
1462 case PROCESSOR_I486
:
1463 case PROCESSOR_PENTIUM
:
1464 case PROCESSOR_IAMCU
:
1466 case PROCESSOR_ATHLON
:
1468 case PROCESSOR_AMDFAM10
:
1470 case PROCESSOR_ZNVER
:
1472 case PROCESSOR_GENERIC32
:
1473 /* We use cpu_arch_isa_flags to check if we CAN optimize
1475 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1480 case PROCESSOR_PENTIUMPRO
:
1481 case PROCESSOR_PENTIUM4
:
1482 case PROCESSOR_NOCONA
:
1483 case PROCESSOR_CORE
:
1484 case PROCESSOR_CORE2
:
1485 case PROCESSOR_COREI7
:
1486 case PROCESSOR_L1OM
:
1487 case PROCESSOR_K1OM
:
1488 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1493 case PROCESSOR_GENERIC64
:
1499 if (patt
== f32_patt
)
1501 max_single_nop_size
= sizeof (f32_patt
) / sizeof (f32_patt
[0]);
1502 /* Limit number of NOPs to 2 for older processors. */
1503 max_number_of_nops
= 2;
1507 max_single_nop_size
= sizeof (alt_patt
) / sizeof (alt_patt
[0]);
1508 /* Limit number of NOPs to 7 for newer processors. */
1509 max_number_of_nops
= 7;
1514 limit
= max_single_nop_size
;
1516 if (fragP
->fr_type
== rs_fill_nop
)
1518 /* Output NOPs for .nop directive. */
1519 if (limit
> max_single_nop_size
)
1521 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
1522 _("invalid single nop size: %d "
1523 "(expect within [0, %d])"),
1524 limit
, max_single_nop_size
);
1529 fragP
->fr_var
= count
;
1531 if ((count
/ max_single_nop_size
) > max_number_of_nops
)
1533 /* Generate jump over NOPs. */
1534 offsetT disp
= count
- 2;
1535 if (fits_in_imm7 (disp
))
1537 /* Use "jmp disp8" if possible. */
1539 where
[0] = jump_disp8
[0];
1545 unsigned int size_of_jump
;
1547 if (flag_code
== CODE_16BIT
)
1549 where
[0] = jump16_disp32
[0];
1550 where
[1] = jump16_disp32
[1];
1555 where
[0] = jump32_disp32
[0];
1559 count
-= size_of_jump
+ 4;
1560 if (!fits_in_imm31 (count
))
1562 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
1563 _("jump over nop padding out of range"));
1567 md_number_to_chars (where
+ size_of_jump
, count
, 4);
1568 where
+= size_of_jump
+ 4;
1572 /* Generate multiple NOPs. */
1573 i386_output_nops (where
, patt
, count
, limit
);
1577 operand_type_all_zero (const union i386_operand_type
*x
)
1579 switch (ARRAY_SIZE(x
->array
))
1590 return !x
->array
[0];
1597 operand_type_set (union i386_operand_type
*x
, unsigned int v
)
1599 switch (ARRAY_SIZE(x
->array
))
1615 x
->bitfield
.class = ClassNone
;
1619 operand_type_equal (const union i386_operand_type
*x
,
1620 const union i386_operand_type
*y
)
1622 switch (ARRAY_SIZE(x
->array
))
1625 if (x
->array
[2] != y
->array
[2])
1629 if (x
->array
[1] != y
->array
[1])
1633 return x
->array
[0] == y
->array
[0];
1641 cpu_flags_all_zero (const union i386_cpu_flags
*x
)
1643 switch (ARRAY_SIZE(x
->array
))
1658 return !x
->array
[0];
1665 cpu_flags_equal (const union i386_cpu_flags
*x
,
1666 const union i386_cpu_flags
*y
)
1668 switch (ARRAY_SIZE(x
->array
))
1671 if (x
->array
[3] != y
->array
[3])
1675 if (x
->array
[2] != y
->array
[2])
1679 if (x
->array
[1] != y
->array
[1])
1683 return x
->array
[0] == y
->array
[0];
1691 cpu_flags_check_cpu64 (i386_cpu_flags f
)
1693 return !((flag_code
== CODE_64BIT
&& f
.bitfield
.cpuno64
)
1694 || (flag_code
!= CODE_64BIT
&& f
.bitfield
.cpu64
));
1697 static INLINE i386_cpu_flags
1698 cpu_flags_and (i386_cpu_flags x
, i386_cpu_flags y
)
1700 switch (ARRAY_SIZE (x
.array
))
1703 x
.array
[3] &= y
.array
[3];
1706 x
.array
[2] &= y
.array
[2];
1709 x
.array
[1] &= y
.array
[1];
1712 x
.array
[0] &= y
.array
[0];
1720 static INLINE i386_cpu_flags
1721 cpu_flags_or (i386_cpu_flags x
, i386_cpu_flags y
)
1723 switch (ARRAY_SIZE (x
.array
))
1726 x
.array
[3] |= y
.array
[3];
1729 x
.array
[2] |= y
.array
[2];
1732 x
.array
[1] |= y
.array
[1];
1735 x
.array
[0] |= y
.array
[0];
1743 static INLINE i386_cpu_flags
1744 cpu_flags_and_not (i386_cpu_flags x
, i386_cpu_flags y
)
1746 switch (ARRAY_SIZE (x
.array
))
1749 x
.array
[3] &= ~y
.array
[3];
1752 x
.array
[2] &= ~y
.array
[2];
1755 x
.array
[1] &= ~y
.array
[1];
1758 x
.array
[0] &= ~y
.array
[0];
1766 #define CPU_FLAGS_ARCH_MATCH 0x1
1767 #define CPU_FLAGS_64BIT_MATCH 0x2
1769 #define CPU_FLAGS_PERFECT_MATCH \
1770 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_64BIT_MATCH)
1772 /* Return CPU flags match bits. */
1775 cpu_flags_match (const insn_template
*t
)
1777 i386_cpu_flags x
= t
->cpu_flags
;
1778 int match
= cpu_flags_check_cpu64 (x
) ? CPU_FLAGS_64BIT_MATCH
: 0;
1780 x
.bitfield
.cpu64
= 0;
1781 x
.bitfield
.cpuno64
= 0;
1783 if (cpu_flags_all_zero (&x
))
1785 /* This instruction is available on all archs. */
1786 match
|= CPU_FLAGS_ARCH_MATCH
;
1790 /* This instruction is available only on some archs. */
1791 i386_cpu_flags cpu
= cpu_arch_flags
;
1793 /* AVX512VL is no standalone feature - match it and then strip it. */
1794 if (x
.bitfield
.cpuavx512vl
&& !cpu
.bitfield
.cpuavx512vl
)
1796 x
.bitfield
.cpuavx512vl
= 0;
1798 cpu
= cpu_flags_and (x
, cpu
);
1799 if (!cpu_flags_all_zero (&cpu
))
1801 if (x
.bitfield
.cpuavx
)
1803 /* We need to check a few extra flags with AVX. */
1804 if (cpu
.bitfield
.cpuavx
1805 && (!t
->opcode_modifier
.sse2avx
|| sse2avx
)
1806 && (!x
.bitfield
.cpuaes
|| cpu
.bitfield
.cpuaes
)
1807 && (!x
.bitfield
.cpugfni
|| cpu
.bitfield
.cpugfni
)
1808 && (!x
.bitfield
.cpupclmul
|| cpu
.bitfield
.cpupclmul
))
1809 match
|= CPU_FLAGS_ARCH_MATCH
;
1811 else if (x
.bitfield
.cpuavx512f
)
1813 /* We need to check a few extra flags with AVX512F. */
1814 if (cpu
.bitfield
.cpuavx512f
1815 && (!x
.bitfield
.cpugfni
|| cpu
.bitfield
.cpugfni
)
1816 && (!x
.bitfield
.cpuvaes
|| cpu
.bitfield
.cpuvaes
)
1817 && (!x
.bitfield
.cpuvpclmulqdq
|| cpu
.bitfield
.cpuvpclmulqdq
))
1818 match
|= CPU_FLAGS_ARCH_MATCH
;
1821 match
|= CPU_FLAGS_ARCH_MATCH
;
1827 static INLINE i386_operand_type
1828 operand_type_and (i386_operand_type x
, i386_operand_type y
)
1830 if (x
.bitfield
.class != y
.bitfield
.class)
1831 x
.bitfield
.class = ClassNone
;
1833 switch (ARRAY_SIZE (x
.array
))
1836 x
.array
[2] &= y
.array
[2];
1839 x
.array
[1] &= y
.array
[1];
1842 x
.array
[0] &= y
.array
[0];
1850 static INLINE i386_operand_type
1851 operand_type_and_not (i386_operand_type x
, i386_operand_type y
)
1853 gas_assert (y
.bitfield
.class == ClassNone
);
1855 switch (ARRAY_SIZE (x
.array
))
1858 x
.array
[2] &= ~y
.array
[2];
1861 x
.array
[1] &= ~y
.array
[1];
1864 x
.array
[0] &= ~y
.array
[0];
1872 static INLINE i386_operand_type
1873 operand_type_or (i386_operand_type x
, i386_operand_type y
)
1875 gas_assert (x
.bitfield
.class == ClassNone
||
1876 y
.bitfield
.class == ClassNone
||
1877 x
.bitfield
.class == y
.bitfield
.class);
1879 switch (ARRAY_SIZE (x
.array
))
1882 x
.array
[2] |= y
.array
[2];
1885 x
.array
[1] |= y
.array
[1];
1888 x
.array
[0] |= y
.array
[0];
1896 static INLINE i386_operand_type
1897 operand_type_xor (i386_operand_type x
, i386_operand_type y
)
1899 gas_assert (y
.bitfield
.class == ClassNone
);
1901 switch (ARRAY_SIZE (x
.array
))
1904 x
.array
[2] ^= y
.array
[2];
1907 x
.array
[1] ^= y
.array
[1];
1910 x
.array
[0] ^= y
.array
[0];
1918 static const i386_operand_type disp16
= OPERAND_TYPE_DISP16
;
1919 static const i386_operand_type disp32
= OPERAND_TYPE_DISP32
;
1920 static const i386_operand_type disp32s
= OPERAND_TYPE_DISP32S
;
1921 static const i386_operand_type disp16_32
= OPERAND_TYPE_DISP16_32
;
1922 static const i386_operand_type anydisp
= OPERAND_TYPE_ANYDISP
;
1923 static const i386_operand_type anyimm
= OPERAND_TYPE_ANYIMM
;
1924 static const i386_operand_type regxmm
= OPERAND_TYPE_REGXMM
;
1925 static const i386_operand_type regmask
= OPERAND_TYPE_REGMASK
;
1926 static const i386_operand_type imm8
= OPERAND_TYPE_IMM8
;
1927 static const i386_operand_type imm8s
= OPERAND_TYPE_IMM8S
;
1928 static const i386_operand_type imm16
= OPERAND_TYPE_IMM16
;
1929 static const i386_operand_type imm32
= OPERAND_TYPE_IMM32
;
1930 static const i386_operand_type imm32s
= OPERAND_TYPE_IMM32S
;
1931 static const i386_operand_type imm64
= OPERAND_TYPE_IMM64
;
1932 static const i386_operand_type imm16_32
= OPERAND_TYPE_IMM16_32
;
1933 static const i386_operand_type imm16_32s
= OPERAND_TYPE_IMM16_32S
;
1934 static const i386_operand_type imm16_32_32s
= OPERAND_TYPE_IMM16_32_32S
;
1945 operand_type_check (i386_operand_type t
, enum operand_type c
)
1950 return t
.bitfield
.class == Reg
;
1953 return (t
.bitfield
.imm8
1957 || t
.bitfield
.imm32s
1958 || t
.bitfield
.imm64
);
1961 return (t
.bitfield
.disp8
1962 || t
.bitfield
.disp16
1963 || t
.bitfield
.disp32
1964 || t
.bitfield
.disp32s
1965 || t
.bitfield
.disp64
);
1968 return (t
.bitfield
.disp8
1969 || t
.bitfield
.disp16
1970 || t
.bitfield
.disp32
1971 || t
.bitfield
.disp32s
1972 || t
.bitfield
.disp64
1973 || t
.bitfield
.baseindex
);
1982 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit/80bit size
1983 between operand GIVEN and opeand WANTED for instruction template T. */
1986 match_operand_size (const insn_template
*t
, unsigned int wanted
,
1989 return !((i
.types
[given
].bitfield
.byte
1990 && !t
->operand_types
[wanted
].bitfield
.byte
)
1991 || (i
.types
[given
].bitfield
.word
1992 && !t
->operand_types
[wanted
].bitfield
.word
)
1993 || (i
.types
[given
].bitfield
.dword
1994 && !t
->operand_types
[wanted
].bitfield
.dword
)
1995 || (i
.types
[given
].bitfield
.qword
1996 && !t
->operand_types
[wanted
].bitfield
.qword
)
1997 || (i
.types
[given
].bitfield
.tbyte
1998 && !t
->operand_types
[wanted
].bitfield
.tbyte
));
2001 /* Return 1 if there is no conflict in SIMD register between operand
2002 GIVEN and opeand WANTED for instruction template T. */
2005 match_simd_size (const insn_template
*t
, unsigned int wanted
,
2008 return !((i
.types
[given
].bitfield
.xmmword
2009 && !t
->operand_types
[wanted
].bitfield
.xmmword
)
2010 || (i
.types
[given
].bitfield
.ymmword
2011 && !t
->operand_types
[wanted
].bitfield
.ymmword
)
2012 || (i
.types
[given
].bitfield
.zmmword
2013 && !t
->operand_types
[wanted
].bitfield
.zmmword
));
2016 /* Return 1 if there is no conflict in any size between operand GIVEN
2017 and opeand WANTED for instruction template T. */
2020 match_mem_size (const insn_template
*t
, unsigned int wanted
,
2023 return (match_operand_size (t
, wanted
, given
)
2024 && !((i
.types
[given
].bitfield
.unspecified
2026 && !t
->operand_types
[wanted
].bitfield
.unspecified
)
2027 || (i
.types
[given
].bitfield
.fword
2028 && !t
->operand_types
[wanted
].bitfield
.fword
)
2029 /* For scalar opcode templates to allow register and memory
2030 operands at the same time, some special casing is needed
2031 here. Also for v{,p}broadcast*, {,v}pmov{s,z}*, and
2032 down-conversion vpmov*. */
2033 || ((t
->operand_types
[wanted
].bitfield
.regsimd
2034 && !t
->opcode_modifier
.broadcast
2035 && (t
->operand_types
[wanted
].bitfield
.byte
2036 || t
->operand_types
[wanted
].bitfield
.word
2037 || t
->operand_types
[wanted
].bitfield
.dword
2038 || t
->operand_types
[wanted
].bitfield
.qword
))
2039 ? (i
.types
[given
].bitfield
.xmmword
2040 || i
.types
[given
].bitfield
.ymmword
2041 || i
.types
[given
].bitfield
.zmmword
)
2042 : !match_simd_size(t
, wanted
, given
))));
2045 /* Return value has MATCH_STRAIGHT set if there is no size conflict on any
2046 operands for instruction template T, and it has MATCH_REVERSE set if there
2047 is no size conflict on any operands for the template with operands reversed
2048 (and the template allows for reversing in the first place). */
2050 #define MATCH_STRAIGHT 1
2051 #define MATCH_REVERSE 2
2053 static INLINE
unsigned int
2054 operand_size_match (const insn_template
*t
)
2056 unsigned int j
, match
= MATCH_STRAIGHT
;
2058 /* Don't check jump instructions. */
2059 if (t
->opcode_modifier
.jump
2060 || t
->opcode_modifier
.jumpbyte
2061 || t
->opcode_modifier
.jumpdword
2062 || t
->opcode_modifier
.jumpintersegment
)
2065 /* Check memory and accumulator operand size. */
2066 for (j
= 0; j
< i
.operands
; j
++)
2068 if (i
.types
[j
].bitfield
.class != Reg
&& !i
.types
[j
].bitfield
.regsimd
2069 && t
->operand_types
[j
].bitfield
.anysize
)
2072 if (t
->operand_types
[j
].bitfield
.class == Reg
2073 && !match_operand_size (t
, j
, j
))
2079 if (t
->operand_types
[j
].bitfield
.regsimd
2080 && !match_simd_size (t
, j
, j
))
2086 if (t
->operand_types
[j
].bitfield
.acc
2087 && (!match_operand_size (t
, j
, j
) || !match_simd_size (t
, j
, j
)))
2093 if ((i
.flags
[j
] & Operand_Mem
) && !match_mem_size (t
, j
, j
))
2100 if (!t
->opcode_modifier
.d
)
2104 i
.error
= operand_size_mismatch
;
2108 /* Check reverse. */
2109 gas_assert (i
.operands
>= 2 && i
.operands
<= 3);
2111 for (j
= 0; j
< i
.operands
; j
++)
2113 unsigned int given
= i
.operands
- j
- 1;
2115 if (t
->operand_types
[j
].bitfield
.class == Reg
2116 && !match_operand_size (t
, j
, given
))
2119 if (t
->operand_types
[j
].bitfield
.regsimd
2120 && !match_simd_size (t
, j
, given
))
2123 if (t
->operand_types
[j
].bitfield
.acc
2124 && (!match_operand_size (t
, j
, given
)
2125 || !match_simd_size (t
, j
, given
)))
2128 if ((i
.flags
[given
] & Operand_Mem
) && !match_mem_size (t
, j
, given
))
2132 return match
| MATCH_REVERSE
;
2136 operand_type_match (i386_operand_type overlap
,
2137 i386_operand_type given
)
2139 i386_operand_type temp
= overlap
;
2141 temp
.bitfield
.jumpabsolute
= 0;
2142 temp
.bitfield
.unspecified
= 0;
2143 temp
.bitfield
.byte
= 0;
2144 temp
.bitfield
.word
= 0;
2145 temp
.bitfield
.dword
= 0;
2146 temp
.bitfield
.fword
= 0;
2147 temp
.bitfield
.qword
= 0;
2148 temp
.bitfield
.tbyte
= 0;
2149 temp
.bitfield
.xmmword
= 0;
2150 temp
.bitfield
.ymmword
= 0;
2151 temp
.bitfield
.zmmword
= 0;
2152 if (operand_type_all_zero (&temp
))
2155 if (given
.bitfield
.baseindex
== overlap
.bitfield
.baseindex
2156 && given
.bitfield
.jumpabsolute
== overlap
.bitfield
.jumpabsolute
)
2160 i
.error
= operand_type_mismatch
;
2164 /* If given types g0 and g1 are registers they must be of the same type
2165 unless the expected operand type register overlap is null.
2166 Memory operand size of certain SIMD instructions is also being checked
2170 operand_type_register_match (i386_operand_type g0
,
2171 i386_operand_type t0
,
2172 i386_operand_type g1
,
2173 i386_operand_type t1
)
2175 if (g0
.bitfield
.class != Reg
2176 && !g0
.bitfield
.regsimd
2177 && (!operand_type_check (g0
, anymem
)
2178 || g0
.bitfield
.unspecified
2179 || !t0
.bitfield
.regsimd
))
2182 if (g1
.bitfield
.class != Reg
2183 && !g1
.bitfield
.regsimd
2184 && (!operand_type_check (g1
, anymem
)
2185 || g1
.bitfield
.unspecified
2186 || !t1
.bitfield
.regsimd
))
2189 if (g0
.bitfield
.byte
== g1
.bitfield
.byte
2190 && g0
.bitfield
.word
== g1
.bitfield
.word
2191 && g0
.bitfield
.dword
== g1
.bitfield
.dword
2192 && g0
.bitfield
.qword
== g1
.bitfield
.qword
2193 && g0
.bitfield
.xmmword
== g1
.bitfield
.xmmword
2194 && g0
.bitfield
.ymmword
== g1
.bitfield
.ymmword
2195 && g0
.bitfield
.zmmword
== g1
.bitfield
.zmmword
)
2198 if (!(t0
.bitfield
.byte
& t1
.bitfield
.byte
)
2199 && !(t0
.bitfield
.word
& t1
.bitfield
.word
)
2200 && !(t0
.bitfield
.dword
& t1
.bitfield
.dword
)
2201 && !(t0
.bitfield
.qword
& t1
.bitfield
.qword
)
2202 && !(t0
.bitfield
.xmmword
& t1
.bitfield
.xmmword
)
2203 && !(t0
.bitfield
.ymmword
& t1
.bitfield
.ymmword
)
2204 && !(t0
.bitfield
.zmmword
& t1
.bitfield
.zmmword
))
2207 i
.error
= register_type_mismatch
;
2212 static INLINE
unsigned int
2213 register_number (const reg_entry
*r
)
2215 unsigned int nr
= r
->reg_num
;
2217 if (r
->reg_flags
& RegRex
)
2220 if (r
->reg_flags
& RegVRex
)
2226 static INLINE
unsigned int
2227 mode_from_disp_size (i386_operand_type t
)
2229 if (t
.bitfield
.disp8
)
2231 else if (t
.bitfield
.disp16
2232 || t
.bitfield
.disp32
2233 || t
.bitfield
.disp32s
)
2240 fits_in_signed_byte (addressT num
)
2242 return num
+ 0x80 <= 0xff;
2246 fits_in_unsigned_byte (addressT num
)
2252 fits_in_unsigned_word (addressT num
)
2254 return num
<= 0xffff;
2258 fits_in_signed_word (addressT num
)
2260 return num
+ 0x8000 <= 0xffff;
2264 fits_in_signed_long (addressT num ATTRIBUTE_UNUSED
)
2269 return num
+ 0x80000000 <= 0xffffffff;
2271 } /* fits_in_signed_long() */
2274 fits_in_unsigned_long (addressT num ATTRIBUTE_UNUSED
)
2279 return num
<= 0xffffffff;
2281 } /* fits_in_unsigned_long() */
2284 fits_in_disp8 (offsetT num
)
2286 int shift
= i
.memshift
;
2292 mask
= (1 << shift
) - 1;
2294 /* Return 0 if NUM isn't properly aligned. */
2298 /* Check if NUM will fit in 8bit after shift. */
2299 return fits_in_signed_byte (num
>> shift
);
2303 fits_in_imm4 (offsetT num
)
2305 return (num
& 0xf) == num
;
2308 static i386_operand_type
2309 smallest_imm_type (offsetT num
)
2311 i386_operand_type t
;
2313 operand_type_set (&t
, 0);
2314 t
.bitfield
.imm64
= 1;
2316 if (cpu_arch_tune
!= PROCESSOR_I486
&& num
== 1)
2318 /* This code is disabled on the 486 because all the Imm1 forms
2319 in the opcode table are slower on the i486. They're the
2320 versions with the implicitly specified single-position
2321 displacement, which has another syntax if you really want to
2323 t
.bitfield
.imm1
= 1;
2324 t
.bitfield
.imm8
= 1;
2325 t
.bitfield
.imm8s
= 1;
2326 t
.bitfield
.imm16
= 1;
2327 t
.bitfield
.imm32
= 1;
2328 t
.bitfield
.imm32s
= 1;
2330 else if (fits_in_signed_byte (num
))
2332 t
.bitfield
.imm8
= 1;
2333 t
.bitfield
.imm8s
= 1;
2334 t
.bitfield
.imm16
= 1;
2335 t
.bitfield
.imm32
= 1;
2336 t
.bitfield
.imm32s
= 1;
2338 else if (fits_in_unsigned_byte (num
))
2340 t
.bitfield
.imm8
= 1;
2341 t
.bitfield
.imm16
= 1;
2342 t
.bitfield
.imm32
= 1;
2343 t
.bitfield
.imm32s
= 1;
2345 else if (fits_in_signed_word (num
) || fits_in_unsigned_word (num
))
2347 t
.bitfield
.imm16
= 1;
2348 t
.bitfield
.imm32
= 1;
2349 t
.bitfield
.imm32s
= 1;
2351 else if (fits_in_signed_long (num
))
2353 t
.bitfield
.imm32
= 1;
2354 t
.bitfield
.imm32s
= 1;
2356 else if (fits_in_unsigned_long (num
))
2357 t
.bitfield
.imm32
= 1;
2363 offset_in_range (offsetT val
, int size
)
2369 case 1: mask
= ((addressT
) 1 << 8) - 1; break;
2370 case 2: mask
= ((addressT
) 1 << 16) - 1; break;
2371 case 4: mask
= ((addressT
) 2 << 31) - 1; break;
2373 case 8: mask
= ((addressT
) 2 << 63) - 1; break;
2379 /* If BFD64, sign extend val for 32bit address mode. */
2380 if (flag_code
!= CODE_64BIT
2381 || i
.prefix
[ADDR_PREFIX
])
2382 if ((val
& ~(((addressT
) 2 << 31) - 1)) == 0)
2383 val
= (val
^ ((addressT
) 1 << 31)) - ((addressT
) 1 << 31);
2386 if ((val
& ~mask
) != 0 && (val
& ~mask
) != ~mask
)
2388 char buf1
[40], buf2
[40];
2390 sprint_value (buf1
, val
);
2391 sprint_value (buf2
, val
& mask
);
2392 as_warn (_("%s shortened to %s"), buf1
, buf2
);
2407 a. PREFIX_EXIST if attempting to add a prefix where one from the
2408 same class already exists.
2409 b. PREFIX_LOCK if lock prefix is added.
2410 c. PREFIX_REP if rep/repne prefix is added.
2411 d. PREFIX_DS if ds prefix is added.
2412 e. PREFIX_OTHER if other prefix is added.
2415 static enum PREFIX_GROUP
2416 add_prefix (unsigned int prefix
)
2418 enum PREFIX_GROUP ret
= PREFIX_OTHER
;
2421 if (prefix
>= REX_OPCODE
&& prefix
< REX_OPCODE
+ 16
2422 && flag_code
== CODE_64BIT
)
2424 if ((i
.prefix
[REX_PREFIX
] & prefix
& REX_W
)
2425 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_R
)
2426 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_X
)
2427 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_B
))
2438 case DS_PREFIX_OPCODE
:
2441 case CS_PREFIX_OPCODE
:
2442 case ES_PREFIX_OPCODE
:
2443 case FS_PREFIX_OPCODE
:
2444 case GS_PREFIX_OPCODE
:
2445 case SS_PREFIX_OPCODE
:
2449 case REPNE_PREFIX_OPCODE
:
2450 case REPE_PREFIX_OPCODE
:
2455 case LOCK_PREFIX_OPCODE
:
2464 case ADDR_PREFIX_OPCODE
:
2468 case DATA_PREFIX_OPCODE
:
2472 if (i
.prefix
[q
] != 0)
2480 i
.prefix
[q
] |= prefix
;
2483 as_bad (_("same type of prefix used twice"));
2489 update_code_flag (int value
, int check
)
2491 PRINTF_LIKE ((*as_error
));
2493 flag_code
= (enum flag_code
) value
;
2494 if (flag_code
== CODE_64BIT
)
2496 cpu_arch_flags
.bitfield
.cpu64
= 1;
2497 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2501 cpu_arch_flags
.bitfield
.cpu64
= 0;
2502 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2504 if (value
== CODE_64BIT
&& !cpu_arch_flags
.bitfield
.cpulm
)
2507 as_error
= as_fatal
;
2510 (*as_error
) (_("64bit mode not supported on `%s'."),
2511 cpu_arch_name
? cpu_arch_name
: default_arch
);
2513 if (value
== CODE_32BIT
&& !cpu_arch_flags
.bitfield
.cpui386
)
2516 as_error
= as_fatal
;
2519 (*as_error
) (_("32bit mode not supported on `%s'."),
2520 cpu_arch_name
? cpu_arch_name
: default_arch
);
2522 stackop_size
= '\0';
2526 set_code_flag (int value
)
2528 update_code_flag (value
, 0);
2532 set_16bit_gcc_code_flag (int new_code_flag
)
2534 flag_code
= (enum flag_code
) new_code_flag
;
2535 if (flag_code
!= CODE_16BIT
)
2537 cpu_arch_flags
.bitfield
.cpu64
= 0;
2538 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2539 stackop_size
= LONG_MNEM_SUFFIX
;
2543 set_intel_syntax (int syntax_flag
)
2545 /* Find out if register prefixing is specified. */
2546 int ask_naked_reg
= 0;
2549 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2552 int e
= get_symbol_name (&string
);
2554 if (strcmp (string
, "prefix") == 0)
2556 else if (strcmp (string
, "noprefix") == 0)
2559 as_bad (_("bad argument to syntax directive."));
2560 (void) restore_line_pointer (e
);
2562 demand_empty_rest_of_line ();
2564 intel_syntax
= syntax_flag
;
2566 if (ask_naked_reg
== 0)
2567 allow_naked_reg
= (intel_syntax
2568 && (bfd_get_symbol_leading_char (stdoutput
) != '\0'));
2570 allow_naked_reg
= (ask_naked_reg
< 0);
2572 expr_set_rank (O_full_ptr
, syntax_flag
? 10 : 0);
2574 identifier_chars
['%'] = intel_syntax
&& allow_naked_reg
? '%' : 0;
2575 identifier_chars
['$'] = intel_syntax
? '$' : 0;
2576 register_prefix
= allow_naked_reg
? "" : "%";
2580 set_intel_mnemonic (int mnemonic_flag
)
2582 intel_mnemonic
= mnemonic_flag
;
2586 set_allow_index_reg (int flag
)
2588 allow_index_reg
= flag
;
2592 set_check (int what
)
2594 enum check_kind
*kind
;
2599 kind
= &operand_check
;
2610 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2613 int e
= get_symbol_name (&string
);
2615 if (strcmp (string
, "none") == 0)
2617 else if (strcmp (string
, "warning") == 0)
2618 *kind
= check_warning
;
2619 else if (strcmp (string
, "error") == 0)
2620 *kind
= check_error
;
2622 as_bad (_("bad argument to %s_check directive."), str
);
2623 (void) restore_line_pointer (e
);
2626 as_bad (_("missing argument for %s_check directive"), str
);
2628 demand_empty_rest_of_line ();
2632 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED
,
2633 i386_cpu_flags new_flag ATTRIBUTE_UNUSED
)
2635 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2636 static const char *arch
;
2638 /* Intel LIOM is only supported on ELF. */
2644 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2645 use default_arch. */
2646 arch
= cpu_arch_name
;
2648 arch
= default_arch
;
2651 /* If we are targeting Intel MCU, we must enable it. */
2652 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_IAMCU
2653 || new_flag
.bitfield
.cpuiamcu
)
2656 /* If we are targeting Intel L1OM, we must enable it. */
2657 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_L1OM
2658 || new_flag
.bitfield
.cpul1om
)
2661 /* If we are targeting Intel K1OM, we must enable it. */
2662 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_K1OM
2663 || new_flag
.bitfield
.cpuk1om
)
2666 as_bad (_("`%s' is not supported on `%s'"), name
, arch
);
2671 set_cpu_arch (int dummy ATTRIBUTE_UNUSED
)
2675 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2678 int e
= get_symbol_name (&string
);
2680 i386_cpu_flags flags
;
2682 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
2684 if (strcmp (string
, cpu_arch
[j
].name
) == 0)
2686 check_cpu_arch_compatible (string
, cpu_arch
[j
].flags
);
2690 cpu_arch_name
= cpu_arch
[j
].name
;
2691 cpu_sub_arch_name
= NULL
;
2692 cpu_arch_flags
= cpu_arch
[j
].flags
;
2693 if (flag_code
== CODE_64BIT
)
2695 cpu_arch_flags
.bitfield
.cpu64
= 1;
2696 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2700 cpu_arch_flags
.bitfield
.cpu64
= 0;
2701 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2703 cpu_arch_isa
= cpu_arch
[j
].type
;
2704 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
2705 if (!cpu_arch_tune_set
)
2707 cpu_arch_tune
= cpu_arch_isa
;
2708 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
2713 flags
= cpu_flags_or (cpu_arch_flags
,
2716 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2718 if (cpu_sub_arch_name
)
2720 char *name
= cpu_sub_arch_name
;
2721 cpu_sub_arch_name
= concat (name
,
2723 (const char *) NULL
);
2727 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
2728 cpu_arch_flags
= flags
;
2729 cpu_arch_isa_flags
= flags
;
2733 = cpu_flags_or (cpu_arch_isa_flags
,
2735 (void) restore_line_pointer (e
);
2736 demand_empty_rest_of_line ();
2741 if (*string
== '.' && j
>= ARRAY_SIZE (cpu_arch
))
2743 /* Disable an ISA extension. */
2744 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
2745 if (strcmp (string
+ 1, cpu_noarch
[j
].name
) == 0)
2747 flags
= cpu_flags_and_not (cpu_arch_flags
,
2748 cpu_noarch
[j
].flags
);
2749 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2751 if (cpu_sub_arch_name
)
2753 char *name
= cpu_sub_arch_name
;
2754 cpu_sub_arch_name
= concat (name
, string
,
2755 (const char *) NULL
);
2759 cpu_sub_arch_name
= xstrdup (string
);
2760 cpu_arch_flags
= flags
;
2761 cpu_arch_isa_flags
= flags
;
2763 (void) restore_line_pointer (e
);
2764 demand_empty_rest_of_line ();
2768 j
= ARRAY_SIZE (cpu_arch
);
2771 if (j
>= ARRAY_SIZE (cpu_arch
))
2772 as_bad (_("no such architecture: `%s'"), string
);
2774 *input_line_pointer
= e
;
2777 as_bad (_("missing cpu architecture"));
2779 no_cond_jump_promotion
= 0;
2780 if (*input_line_pointer
== ','
2781 && !is_end_of_line
[(unsigned char) input_line_pointer
[1]])
2786 ++input_line_pointer
;
2787 e
= get_symbol_name (&string
);
2789 if (strcmp (string
, "nojumps") == 0)
2790 no_cond_jump_promotion
= 1;
2791 else if (strcmp (string
, "jumps") == 0)
2794 as_bad (_("no such architecture modifier: `%s'"), string
);
2796 (void) restore_line_pointer (e
);
2799 demand_empty_rest_of_line ();
2802 enum bfd_architecture
2805 if (cpu_arch_isa
== PROCESSOR_L1OM
)
2807 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2808 || flag_code
!= CODE_64BIT
)
2809 as_fatal (_("Intel L1OM is 64bit ELF only"));
2810 return bfd_arch_l1om
;
2812 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
2814 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2815 || flag_code
!= CODE_64BIT
)
2816 as_fatal (_("Intel K1OM is 64bit ELF only"));
2817 return bfd_arch_k1om
;
2819 else if (cpu_arch_isa
== PROCESSOR_IAMCU
)
2821 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2822 || flag_code
== CODE_64BIT
)
2823 as_fatal (_("Intel MCU is 32bit ELF only"));
2824 return bfd_arch_iamcu
;
2827 return bfd_arch_i386
;
2833 if (!strncmp (default_arch
, "x86_64", 6))
2835 if (cpu_arch_isa
== PROCESSOR_L1OM
)
2837 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2838 || default_arch
[6] != '\0')
2839 as_fatal (_("Intel L1OM is 64bit ELF only"));
2840 return bfd_mach_l1om
;
2842 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
2844 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2845 || default_arch
[6] != '\0')
2846 as_fatal (_("Intel K1OM is 64bit ELF only"));
2847 return bfd_mach_k1om
;
2849 else if (default_arch
[6] == '\0')
2850 return bfd_mach_x86_64
;
2852 return bfd_mach_x64_32
;
2854 else if (!strcmp (default_arch
, "i386")
2855 || !strcmp (default_arch
, "iamcu"))
2857 if (cpu_arch_isa
== PROCESSOR_IAMCU
)
2859 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
)
2860 as_fatal (_("Intel MCU is 32bit ELF only"));
2861 return bfd_mach_i386_iamcu
;
2864 return bfd_mach_i386_i386
;
2867 as_fatal (_("unknown architecture"));
2873 const char *hash_err
;
2875 /* Support pseudo prefixes like {disp32}. */
2876 lex_type
['{'] = LEX_BEGIN_NAME
;
2878 /* Initialize op_hash hash table. */
2879 op_hash
= hash_new ();
2882 const insn_template
*optab
;
2883 templates
*core_optab
;
2885 /* Setup for loop. */
2887 core_optab
= XNEW (templates
);
2888 core_optab
->start
= optab
;
2893 if (optab
->name
== NULL
2894 || strcmp (optab
->name
, (optab
- 1)->name
) != 0)
2896 /* different name --> ship out current template list;
2897 add to hash table; & begin anew. */
2898 core_optab
->end
= optab
;
2899 hash_err
= hash_insert (op_hash
,
2901 (void *) core_optab
);
2904 as_fatal (_("can't hash %s: %s"),
2908 if (optab
->name
== NULL
)
2910 core_optab
= XNEW (templates
);
2911 core_optab
->start
= optab
;
2916 /* Initialize reg_hash hash table. */
2917 reg_hash
= hash_new ();
2919 const reg_entry
*regtab
;
2920 unsigned int regtab_size
= i386_regtab_size
;
2922 for (regtab
= i386_regtab
; regtab_size
--; regtab
++)
2924 hash_err
= hash_insert (reg_hash
, regtab
->reg_name
, (void *) regtab
);
2926 as_fatal (_("can't hash %s: %s"),
2932 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2937 for (c
= 0; c
< 256; c
++)
2942 mnemonic_chars
[c
] = c
;
2943 register_chars
[c
] = c
;
2944 operand_chars
[c
] = c
;
2946 else if (ISLOWER (c
))
2948 mnemonic_chars
[c
] = c
;
2949 register_chars
[c
] = c
;
2950 operand_chars
[c
] = c
;
2952 else if (ISUPPER (c
))
2954 mnemonic_chars
[c
] = TOLOWER (c
);
2955 register_chars
[c
] = mnemonic_chars
[c
];
2956 operand_chars
[c
] = c
;
2958 else if (c
== '{' || c
== '}')
2960 mnemonic_chars
[c
] = c
;
2961 operand_chars
[c
] = c
;
2964 if (ISALPHA (c
) || ISDIGIT (c
))
2965 identifier_chars
[c
] = c
;
2968 identifier_chars
[c
] = c
;
2969 operand_chars
[c
] = c
;
2974 identifier_chars
['@'] = '@';
2977 identifier_chars
['?'] = '?';
2978 operand_chars
['?'] = '?';
2980 digit_chars
['-'] = '-';
2981 mnemonic_chars
['_'] = '_';
2982 mnemonic_chars
['-'] = '-';
2983 mnemonic_chars
['.'] = '.';
2984 identifier_chars
['_'] = '_';
2985 identifier_chars
['.'] = '.';
2987 for (p
= operand_special_chars
; *p
!= '\0'; p
++)
2988 operand_chars
[(unsigned char) *p
] = *p
;
2991 if (flag_code
== CODE_64BIT
)
2993 #if defined (OBJ_COFF) && defined (TE_PE)
2994 x86_dwarf2_return_column
= (OUTPUT_FLAVOR
== bfd_target_coff_flavour
2997 x86_dwarf2_return_column
= 16;
2999 x86_cie_data_alignment
= -8;
3003 x86_dwarf2_return_column
= 8;
3004 x86_cie_data_alignment
= -4;
3009 i386_print_statistics (FILE *file
)
3011 hash_print_statistics (file
, "i386 opcode", op_hash
);
3012 hash_print_statistics (file
, "i386 register", reg_hash
);
3017 /* Debugging routines for md_assemble. */
3018 static void pte (insn_template
*);
3019 static void pt (i386_operand_type
);
3020 static void pe (expressionS
*);
3021 static void ps (symbolS
*);
3024 pi (const char *line
, i386_insn
*x
)
3028 fprintf (stdout
, "%s: template ", line
);
3030 fprintf (stdout
, " address: base %s index %s scale %x\n",
3031 x
->base_reg
? x
->base_reg
->reg_name
: "none",
3032 x
->index_reg
? x
->index_reg
->reg_name
: "none",
3033 x
->log2_scale_factor
);
3034 fprintf (stdout
, " modrm: mode %x reg %x reg/mem %x\n",
3035 x
->rm
.mode
, x
->rm
.reg
, x
->rm
.regmem
);
3036 fprintf (stdout
, " sib: base %x index %x scale %x\n",
3037 x
->sib
.base
, x
->sib
.index
, x
->sib
.scale
);
3038 fprintf (stdout
, " rex: 64bit %x extX %x extY %x extZ %x\n",
3039 (x
->rex
& REX_W
) != 0,
3040 (x
->rex
& REX_R
) != 0,
3041 (x
->rex
& REX_X
) != 0,
3042 (x
->rex
& REX_B
) != 0);
3043 for (j
= 0; j
< x
->operands
; j
++)
3045 fprintf (stdout
, " #%d: ", j
+ 1);
3047 fprintf (stdout
, "\n");
3048 if (x
->types
[j
].bitfield
.class == Reg
3049 || x
->types
[j
].bitfield
.regmmx
3050 || x
->types
[j
].bitfield
.regsimd
3051 || x
->types
[j
].bitfield
.class == SReg
3052 || x
->types
[j
].bitfield
.class == RegCR
3053 || x
->types
[j
].bitfield
.class == RegDR
3054 || x
->types
[j
].bitfield
.class == RegTR
)
3055 fprintf (stdout
, "%s\n", x
->op
[j
].regs
->reg_name
);
3056 if (operand_type_check (x
->types
[j
], imm
))
3058 if (operand_type_check (x
->types
[j
], disp
))
3059 pe (x
->op
[j
].disps
);
3064 pte (insn_template
*t
)
3067 fprintf (stdout
, " %d operands ", t
->operands
);
3068 fprintf (stdout
, "opcode %x ", t
->base_opcode
);
3069 if (t
->extension_opcode
!= None
)
3070 fprintf (stdout
, "ext %x ", t
->extension_opcode
);
3071 if (t
->opcode_modifier
.d
)
3072 fprintf (stdout
, "D");
3073 if (t
->opcode_modifier
.w
)
3074 fprintf (stdout
, "W");
3075 fprintf (stdout
, "\n");
3076 for (j
= 0; j
< t
->operands
; j
++)
3078 fprintf (stdout
, " #%d type ", j
+ 1);
3079 pt (t
->operand_types
[j
]);
3080 fprintf (stdout
, "\n");
3087 fprintf (stdout
, " operation %d\n", e
->X_op
);
3088 fprintf (stdout
, " add_number %ld (%lx)\n",
3089 (long) e
->X_add_number
, (long) e
->X_add_number
);
3090 if (e
->X_add_symbol
)
3092 fprintf (stdout
, " add_symbol ");
3093 ps (e
->X_add_symbol
);
3094 fprintf (stdout
, "\n");
3098 fprintf (stdout
, " op_symbol ");
3099 ps (e
->X_op_symbol
);
3100 fprintf (stdout
, "\n");
3107 fprintf (stdout
, "%s type %s%s",
3109 S_IS_EXTERNAL (s
) ? "EXTERNAL " : "",
3110 segment_name (S_GET_SEGMENT (s
)));
3113 static struct type_name
3115 i386_operand_type mask
;
3118 const type_names
[] =
3120 { OPERAND_TYPE_REG8
, "r8" },
3121 { OPERAND_TYPE_REG16
, "r16" },
3122 { OPERAND_TYPE_REG32
, "r32" },
3123 { OPERAND_TYPE_REG64
, "r64" },
3124 { OPERAND_TYPE_ACC8
, "acc8" },
3125 { OPERAND_TYPE_ACC16
, "acc16" },
3126 { OPERAND_TYPE_ACC32
, "acc32" },
3127 { OPERAND_TYPE_ACC64
, "acc64" },
3128 { OPERAND_TYPE_IMM8
, "i8" },
3129 { OPERAND_TYPE_IMM8
, "i8s" },
3130 { OPERAND_TYPE_IMM16
, "i16" },
3131 { OPERAND_TYPE_IMM32
, "i32" },
3132 { OPERAND_TYPE_IMM32S
, "i32s" },
3133 { OPERAND_TYPE_IMM64
, "i64" },
3134 { OPERAND_TYPE_IMM1
, "i1" },
3135 { OPERAND_TYPE_BASEINDEX
, "BaseIndex" },
3136 { OPERAND_TYPE_DISP8
, "d8" },
3137 { OPERAND_TYPE_DISP16
, "d16" },
3138 { OPERAND_TYPE_DISP32
, "d32" },
3139 { OPERAND_TYPE_DISP32S
, "d32s" },
3140 { OPERAND_TYPE_DISP64
, "d64" },
3141 { OPERAND_TYPE_INOUTPORTREG
, "InOutPortReg" },
3142 { OPERAND_TYPE_SHIFTCOUNT
, "ShiftCount" },
3143 { OPERAND_TYPE_CONTROL
, "control reg" },
3144 { OPERAND_TYPE_TEST
, "test reg" },
3145 { OPERAND_TYPE_DEBUG
, "debug reg" },
3146 { OPERAND_TYPE_FLOATREG
, "FReg" },
3147 { OPERAND_TYPE_FLOATACC
, "FAcc" },
3148 { OPERAND_TYPE_SREG
, "SReg" },
3149 { OPERAND_TYPE_JUMPABSOLUTE
, "Jump Absolute" },
3150 { OPERAND_TYPE_REGMMX
, "rMMX" },
3151 { OPERAND_TYPE_REGXMM
, "rXMM" },
3152 { OPERAND_TYPE_REGYMM
, "rYMM" },
3153 { OPERAND_TYPE_REGZMM
, "rZMM" },
3154 { OPERAND_TYPE_REGMASK
, "Mask reg" },
3155 { OPERAND_TYPE_ESSEG
, "es" },
3159 pt (i386_operand_type t
)
3162 i386_operand_type a
;
3164 for (j
= 0; j
< ARRAY_SIZE (type_names
); j
++)
3166 a
= operand_type_and (t
, type_names
[j
].mask
);
3167 if (operand_type_equal (&a
, &type_names
[j
].mask
))
3168 fprintf (stdout
, "%s, ", type_names
[j
].name
);
3173 #endif /* DEBUG386 */
3175 static bfd_reloc_code_real_type
3176 reloc (unsigned int size
,
3179 bfd_reloc_code_real_type other
)
3181 if (other
!= NO_RELOC
)
3183 reloc_howto_type
*rel
;
3188 case BFD_RELOC_X86_64_GOT32
:
3189 return BFD_RELOC_X86_64_GOT64
;
3191 case BFD_RELOC_X86_64_GOTPLT64
:
3192 return BFD_RELOC_X86_64_GOTPLT64
;
3194 case BFD_RELOC_X86_64_PLTOFF64
:
3195 return BFD_RELOC_X86_64_PLTOFF64
;
3197 case BFD_RELOC_X86_64_GOTPC32
:
3198 other
= BFD_RELOC_X86_64_GOTPC64
;
3200 case BFD_RELOC_X86_64_GOTPCREL
:
3201 other
= BFD_RELOC_X86_64_GOTPCREL64
;
3203 case BFD_RELOC_X86_64_TPOFF32
:
3204 other
= BFD_RELOC_X86_64_TPOFF64
;
3206 case BFD_RELOC_X86_64_DTPOFF32
:
3207 other
= BFD_RELOC_X86_64_DTPOFF64
;
3213 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3214 if (other
== BFD_RELOC_SIZE32
)
3217 other
= BFD_RELOC_SIZE64
;
3220 as_bad (_("there are no pc-relative size relocations"));
3226 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
3227 if (size
== 4 && (flag_code
!= CODE_64BIT
|| disallow_64bit_reloc
))
3230 rel
= bfd_reloc_type_lookup (stdoutput
, other
);
3232 as_bad (_("unknown relocation (%u)"), other
);
3233 else if (size
!= bfd_get_reloc_size (rel
))
3234 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
3235 bfd_get_reloc_size (rel
),
3237 else if (pcrel
&& !rel
->pc_relative
)
3238 as_bad (_("non-pc-relative relocation for pc-relative field"));
3239 else if ((rel
->complain_on_overflow
== complain_overflow_signed
3241 || (rel
->complain_on_overflow
== complain_overflow_unsigned
3243 as_bad (_("relocated field and relocation type differ in signedness"));
3252 as_bad (_("there are no unsigned pc-relative relocations"));
3255 case 1: return BFD_RELOC_8_PCREL
;
3256 case 2: return BFD_RELOC_16_PCREL
;
3257 case 4: return BFD_RELOC_32_PCREL
;
3258 case 8: return BFD_RELOC_64_PCREL
;
3260 as_bad (_("cannot do %u byte pc-relative relocation"), size
);
3267 case 4: return BFD_RELOC_X86_64_32S
;
3272 case 1: return BFD_RELOC_8
;
3273 case 2: return BFD_RELOC_16
;
3274 case 4: return BFD_RELOC_32
;
3275 case 8: return BFD_RELOC_64
;
3277 as_bad (_("cannot do %s %u byte relocation"),
3278 sign
> 0 ? "signed" : "unsigned", size
);
3284 /* Here we decide which fixups can be adjusted to make them relative to
3285 the beginning of the section instead of the symbol. Basically we need
3286 to make sure that the dynamic relocations are done correctly, so in
3287 some cases we force the original symbol to be used. */
3290 tc_i386_fix_adjustable (fixS
*fixP ATTRIBUTE_UNUSED
)
3292 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3296 /* Don't adjust pc-relative references to merge sections in 64-bit
3298 if (use_rela_relocations
3299 && (S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_MERGE
) != 0
3303 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
3304 and changed later by validate_fix. */
3305 if (GOT_symbol
&& fixP
->fx_subsy
== GOT_symbol
3306 && fixP
->fx_r_type
== BFD_RELOC_32_PCREL
)
3309 /* Adjust_reloc_syms doesn't know about the GOT. Need to keep symbol
3310 for size relocations. */
3311 if (fixP
->fx_r_type
== BFD_RELOC_SIZE32
3312 || fixP
->fx_r_type
== BFD_RELOC_SIZE64
3313 || fixP
->fx_r_type
== BFD_RELOC_386_GOTOFF
3314 || fixP
->fx_r_type
== BFD_RELOC_386_PLT32
3315 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32
3316 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32X
3317 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GD
3318 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDM
3319 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDO_32
3320 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE_32
3321 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE
3322 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTIE
3323 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE_32
3324 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE
3325 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTDESC
3326 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_DESC_CALL
3327 || fixP
->fx_r_type
== BFD_RELOC_X86_64_PLT32
3328 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOT32
3329 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCREL
3330 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCRELX
3331 || fixP
->fx_r_type
== BFD_RELOC_X86_64_REX_GOTPCRELX
3332 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSGD
3333 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSLD
3334 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF32
3335 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF64
3336 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTTPOFF
3337 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF32
3338 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF64
3339 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTOFF64
3340 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPC32_TLSDESC
3341 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSDESC_CALL
3342 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
3343 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
3350 intel_float_operand (const char *mnemonic
)
3352 /* Note that the value returned is meaningful only for opcodes with (memory)
3353 operands, hence the code here is free to improperly handle opcodes that
3354 have no operands (for better performance and smaller code). */
3356 if (mnemonic
[0] != 'f')
3357 return 0; /* non-math */
3359 switch (mnemonic
[1])
3361 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
3362 the fs segment override prefix not currently handled because no
3363 call path can make opcodes without operands get here */
3365 return 2 /* integer op */;
3367 if (mnemonic
[2] == 'd' && (mnemonic
[3] == 'c' || mnemonic
[3] == 'e'))
3368 return 3; /* fldcw/fldenv */
3371 if (mnemonic
[2] != 'o' /* fnop */)
3372 return 3; /* non-waiting control op */
3375 if (mnemonic
[2] == 's')
3376 return 3; /* frstor/frstpm */
3379 if (mnemonic
[2] == 'a')
3380 return 3; /* fsave */
3381 if (mnemonic
[2] == 't')
3383 switch (mnemonic
[3])
3385 case 'c': /* fstcw */
3386 case 'd': /* fstdw */
3387 case 'e': /* fstenv */
3388 case 's': /* fsts[gw] */
3394 if (mnemonic
[2] == 'r' || mnemonic
[2] == 's')
3395 return 0; /* fxsave/fxrstor are not really math ops */
3402 /* Build the VEX prefix. */
3405 build_vex_prefix (const insn_template
*t
)
3407 unsigned int register_specifier
;
3408 unsigned int implied_prefix
;
3409 unsigned int vector_length
;
3412 /* Check register specifier. */
3413 if (i
.vex
.register_specifier
)
3415 register_specifier
=
3416 ~register_number (i
.vex
.register_specifier
) & 0xf;
3417 gas_assert ((i
.vex
.register_specifier
->reg_flags
& RegVRex
) == 0);
3420 register_specifier
= 0xf;
3422 /* Use 2-byte VEX prefix by swapping destination and source operand
3423 if there are more than 1 register operand. */
3424 if (i
.reg_operands
> 1
3425 && i
.vec_encoding
!= vex_encoding_vex3
3426 && i
.dir_encoding
== dir_encoding_default
3427 && i
.operands
== i
.reg_operands
3428 && operand_type_equal (&i
.types
[0], &i
.types
[i
.operands
- 1])
3429 && i
.tm
.opcode_modifier
.vexopcode
== VEX0F
3430 && (i
.tm
.opcode_modifier
.load
|| i
.tm
.opcode_modifier
.d
)
3433 unsigned int xchg
= i
.operands
- 1;
3434 union i386_op temp_op
;
3435 i386_operand_type temp_type
;
3437 temp_type
= i
.types
[xchg
];
3438 i
.types
[xchg
] = i
.types
[0];
3439 i
.types
[0] = temp_type
;
3440 temp_op
= i
.op
[xchg
];
3441 i
.op
[xchg
] = i
.op
[0];
3444 gas_assert (i
.rm
.mode
== 3);
3448 i
.rm
.regmem
= i
.rm
.reg
;
3451 if (i
.tm
.opcode_modifier
.d
)
3452 i
.tm
.base_opcode
^= (i
.tm
.base_opcode
& 0xee) != 0x6e
3453 ? Opcode_SIMD_FloatD
: Opcode_SIMD_IntD
;
3454 else /* Use the next insn. */
3458 /* Use 2-byte VEX prefix by swapping commutative source operands if there
3459 are no memory operands and at least 3 register ones. */
3460 if (i
.reg_operands
>= 3
3461 && i
.vec_encoding
!= vex_encoding_vex3
3462 && i
.reg_operands
== i
.operands
- i
.imm_operands
3463 && i
.tm
.opcode_modifier
.vex
3464 && i
.tm
.opcode_modifier
.commutative
3465 && (i
.tm
.opcode_modifier
.sse2avx
|| optimize
> 1)
3467 && i
.vex
.register_specifier
3468 && !(i
.vex
.register_specifier
->reg_flags
& RegRex
))
3470 unsigned int xchg
= i
.operands
- i
.reg_operands
;
3471 union i386_op temp_op
;
3472 i386_operand_type temp_type
;
3474 gas_assert (i
.tm
.opcode_modifier
.vexopcode
== VEX0F
);
3475 gas_assert (!i
.tm
.opcode_modifier
.sae
);
3476 gas_assert (operand_type_equal (&i
.types
[i
.operands
- 2],
3477 &i
.types
[i
.operands
- 3]));
3478 gas_assert (i
.rm
.mode
== 3);
3480 temp_type
= i
.types
[xchg
];
3481 i
.types
[xchg
] = i
.types
[xchg
+ 1];
3482 i
.types
[xchg
+ 1] = temp_type
;
3483 temp_op
= i
.op
[xchg
];
3484 i
.op
[xchg
] = i
.op
[xchg
+ 1];
3485 i
.op
[xchg
+ 1] = temp_op
;
3488 xchg
= i
.rm
.regmem
| 8;
3489 i
.rm
.regmem
= ~register_specifier
& 0xf;
3490 gas_assert (!(i
.rm
.regmem
& 8));
3491 i
.vex
.register_specifier
+= xchg
- i
.rm
.regmem
;
3492 register_specifier
= ~xchg
& 0xf;
3495 if (i
.tm
.opcode_modifier
.vex
== VEXScalar
)
3496 vector_length
= avxscalar
;
3497 else if (i
.tm
.opcode_modifier
.vex
== VEX256
)
3503 /* Determine vector length from the last multi-length vector
3506 for (op
= t
->operands
; op
--;)
3507 if (t
->operand_types
[op
].bitfield
.xmmword
3508 && t
->operand_types
[op
].bitfield
.ymmword
3509 && i
.types
[op
].bitfield
.ymmword
)
3516 switch ((i
.tm
.base_opcode
>> 8) & 0xff)
3521 case DATA_PREFIX_OPCODE
:
3524 case REPE_PREFIX_OPCODE
:
3527 case REPNE_PREFIX_OPCODE
:
3534 /* Check the REX.W bit and VEXW. */
3535 if (i
.tm
.opcode_modifier
.vexw
== VEXWIG
)
3536 w
= (vexwig
== vexw1
|| (i
.rex
& REX_W
)) ? 1 : 0;
3537 else if (i
.tm
.opcode_modifier
.vexw
)
3538 w
= i
.tm
.opcode_modifier
.vexw
== VEXW1
? 1 : 0;
3540 w
= (flag_code
== CODE_64BIT
? i
.rex
& REX_W
: vexwig
== vexw1
) ? 1 : 0;
3542 /* Use 2-byte VEX prefix if possible. */
3544 && i
.vec_encoding
!= vex_encoding_vex3
3545 && i
.tm
.opcode_modifier
.vexopcode
== VEX0F
3546 && (i
.rex
& (REX_W
| REX_X
| REX_B
)) == 0)
3548 /* 2-byte VEX prefix. */
3552 i
.vex
.bytes
[0] = 0xc5;
3554 /* Check the REX.R bit. */
3555 r
= (i
.rex
& REX_R
) ? 0 : 1;
3556 i
.vex
.bytes
[1] = (r
<< 7
3557 | register_specifier
<< 3
3558 | vector_length
<< 2
3563 /* 3-byte VEX prefix. */
3568 switch (i
.tm
.opcode_modifier
.vexopcode
)
3572 i
.vex
.bytes
[0] = 0xc4;
3576 i
.vex
.bytes
[0] = 0xc4;
3580 i
.vex
.bytes
[0] = 0xc4;
3584 i
.vex
.bytes
[0] = 0x8f;
3588 i
.vex
.bytes
[0] = 0x8f;
3592 i
.vex
.bytes
[0] = 0x8f;
3598 /* The high 3 bits of the second VEX byte are 1's compliment
3599 of RXB bits from REX. */
3600 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | m
;
3602 i
.vex
.bytes
[2] = (w
<< 7
3603 | register_specifier
<< 3
3604 | vector_length
<< 2
3609 static INLINE bfd_boolean
3610 is_evex_encoding (const insn_template
*t
)
3612 return t
->opcode_modifier
.evex
|| t
->opcode_modifier
.disp8memshift
3613 || t
->opcode_modifier
.broadcast
|| t
->opcode_modifier
.masking
3614 || t
->opcode_modifier
.sae
;
3617 static INLINE bfd_boolean
3618 is_any_vex_encoding (const insn_template
*t
)
3620 return t
->opcode_modifier
.vex
|| t
->opcode_modifier
.vexopcode
3621 || is_evex_encoding (t
);
3624 /* Build the EVEX prefix. */
3627 build_evex_prefix (void)
3629 unsigned int register_specifier
;
3630 unsigned int implied_prefix
;
3632 rex_byte vrex_used
= 0;
3634 /* Check register specifier. */
3635 if (i
.vex
.register_specifier
)
3637 gas_assert ((i
.vrex
& REX_X
) == 0);
3639 register_specifier
= i
.vex
.register_specifier
->reg_num
;
3640 if ((i
.vex
.register_specifier
->reg_flags
& RegRex
))
3641 register_specifier
+= 8;
3642 /* The upper 16 registers are encoded in the fourth byte of the
3644 if (!(i
.vex
.register_specifier
->reg_flags
& RegVRex
))
3645 i
.vex
.bytes
[3] = 0x8;
3646 register_specifier
= ~register_specifier
& 0xf;
3650 register_specifier
= 0xf;
3652 /* Encode upper 16 vector index register in the fourth byte of
3654 if (!(i
.vrex
& REX_X
))
3655 i
.vex
.bytes
[3] = 0x8;
3660 switch ((i
.tm
.base_opcode
>> 8) & 0xff)
3665 case DATA_PREFIX_OPCODE
:
3668 case REPE_PREFIX_OPCODE
:
3671 case REPNE_PREFIX_OPCODE
:
3678 /* 4 byte EVEX prefix. */
3680 i
.vex
.bytes
[0] = 0x62;
3683 switch (i
.tm
.opcode_modifier
.vexopcode
)
3699 /* The high 3 bits of the second EVEX byte are 1's compliment of RXB
3701 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | m
;
3703 /* The fifth bit of the second EVEX byte is 1's compliment of the
3704 REX_R bit in VREX. */
3705 if (!(i
.vrex
& REX_R
))
3706 i
.vex
.bytes
[1] |= 0x10;
3710 if ((i
.reg_operands
+ i
.imm_operands
) == i
.operands
)
3712 /* When all operands are registers, the REX_X bit in REX is not
3713 used. We reuse it to encode the upper 16 registers, which is
3714 indicated by the REX_B bit in VREX. The REX_X bit is encoded
3715 as 1's compliment. */
3716 if ((i
.vrex
& REX_B
))
3719 i
.vex
.bytes
[1] &= ~0x40;
3723 /* EVEX instructions shouldn't need the REX prefix. */
3724 i
.vrex
&= ~vrex_used
;
3725 gas_assert (i
.vrex
== 0);
3727 /* Check the REX.W bit and VEXW. */
3728 if (i
.tm
.opcode_modifier
.vexw
== VEXWIG
)
3729 w
= (evexwig
== evexw1
|| (i
.rex
& REX_W
)) ? 1 : 0;
3730 else if (i
.tm
.opcode_modifier
.vexw
)
3731 w
= i
.tm
.opcode_modifier
.vexw
== VEXW1
? 1 : 0;
3733 w
= (flag_code
== CODE_64BIT
? i
.rex
& REX_W
: evexwig
== evexw1
) ? 1 : 0;
3735 /* Encode the U bit. */
3736 implied_prefix
|= 0x4;
3738 /* The third byte of the EVEX prefix. */
3739 i
.vex
.bytes
[2] = (w
<< 7 | register_specifier
<< 3 | implied_prefix
);
3741 /* The fourth byte of the EVEX prefix. */
3742 /* The zeroing-masking bit. */
3743 if (i
.mask
&& i
.mask
->zeroing
)
3744 i
.vex
.bytes
[3] |= 0x80;
3746 /* Don't always set the broadcast bit if there is no RC. */
3749 /* Encode the vector length. */
3750 unsigned int vec_length
;
3752 if (!i
.tm
.opcode_modifier
.evex
3753 || i
.tm
.opcode_modifier
.evex
== EVEXDYN
)
3757 /* Determine vector length from the last multi-length vector
3760 for (op
= i
.operands
; op
--;)
3761 if (i
.tm
.operand_types
[op
].bitfield
.xmmword
3762 + i
.tm
.operand_types
[op
].bitfield
.ymmword
3763 + i
.tm
.operand_types
[op
].bitfield
.zmmword
> 1)
3765 if (i
.types
[op
].bitfield
.zmmword
)
3767 i
.tm
.opcode_modifier
.evex
= EVEX512
;
3770 else if (i
.types
[op
].bitfield
.ymmword
)
3772 i
.tm
.opcode_modifier
.evex
= EVEX256
;
3775 else if (i
.types
[op
].bitfield
.xmmword
)
3777 i
.tm
.opcode_modifier
.evex
= EVEX128
;
3780 else if (i
.broadcast
&& (int) op
== i
.broadcast
->operand
)
3782 switch (i
.broadcast
->bytes
)
3785 i
.tm
.opcode_modifier
.evex
= EVEX512
;
3788 i
.tm
.opcode_modifier
.evex
= EVEX256
;
3791 i
.tm
.opcode_modifier
.evex
= EVEX128
;
3800 if (op
>= MAX_OPERANDS
)
3804 switch (i
.tm
.opcode_modifier
.evex
)
3806 case EVEXLIG
: /* LL' is ignored */
3807 vec_length
= evexlig
<< 5;
3810 vec_length
= 0 << 5;
3813 vec_length
= 1 << 5;
3816 vec_length
= 2 << 5;
3822 i
.vex
.bytes
[3] |= vec_length
;
3823 /* Encode the broadcast bit. */
3825 i
.vex
.bytes
[3] |= 0x10;
3829 if (i
.rounding
->type
!= saeonly
)
3830 i
.vex
.bytes
[3] |= 0x10 | (i
.rounding
->type
<< 5);
3832 i
.vex
.bytes
[3] |= 0x10 | (evexrcig
<< 5);
3835 if (i
.mask
&& i
.mask
->mask
)
3836 i
.vex
.bytes
[3] |= i
.mask
->mask
->reg_num
;
3840 process_immext (void)
3844 if ((i
.tm
.cpu_flags
.bitfield
.cpusse3
|| i
.tm
.cpu_flags
.bitfield
.cpusvme
)
3847 /* MONITOR/MWAIT as well as SVME instructions have fixed operands
3848 with an opcode suffix which is coded in the same place as an
3849 8-bit immediate field would be.
3850 Here we check those operands and remove them afterwards. */
3853 for (x
= 0; x
< i
.operands
; x
++)
3854 if (register_number (i
.op
[x
].regs
) != x
)
3855 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
3856 register_prefix
, i
.op
[x
].regs
->reg_name
, x
+ 1,
3862 if (i
.tm
.cpu_flags
.bitfield
.cpumwaitx
&& i
.operands
> 0)
3864 /* MONITORX/MWAITX instructions have fixed operands with an opcode
3865 suffix which is coded in the same place as an 8-bit immediate
3867 Here we check those operands and remove them afterwards. */
3870 if (i
.operands
!= 3)
3873 for (x
= 0; x
< 2; x
++)
3874 if (register_number (i
.op
[x
].regs
) != x
)
3875 goto bad_register_operand
;
3877 /* Check for third operand for mwaitx/monitorx insn. */
3878 if (register_number (i
.op
[x
].regs
)
3879 != (x
+ (i
.tm
.extension_opcode
== 0xfb)))
3881 bad_register_operand
:
3882 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
3883 register_prefix
, i
.op
[x
].regs
->reg_name
, x
+1,
3890 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
3891 which is coded in the same place as an 8-bit immediate field
3892 would be. Here we fake an 8-bit immediate operand from the
3893 opcode suffix stored in tm.extension_opcode.
3895 AVX instructions also use this encoding, for some of
3896 3 argument instructions. */
3898 gas_assert (i
.imm_operands
<= 1
3900 || (is_any_vex_encoding (&i
.tm
)
3901 && i
.operands
<= 4)));
3903 exp
= &im_expressions
[i
.imm_operands
++];
3904 i
.op
[i
.operands
].imms
= exp
;
3905 i
.types
[i
.operands
] = imm8
;
3907 exp
->X_op
= O_constant
;
3908 exp
->X_add_number
= i
.tm
.extension_opcode
;
3909 i
.tm
.extension_opcode
= None
;
3916 switch (i
.tm
.opcode_modifier
.hleprefixok
)
3921 as_bad (_("invalid instruction `%s' after `%s'"),
3922 i
.tm
.name
, i
.hle_prefix
);
3925 if (i
.prefix
[LOCK_PREFIX
])
3927 as_bad (_("missing `lock' with `%s'"), i
.hle_prefix
);
3931 case HLEPrefixRelease
:
3932 if (i
.prefix
[HLE_PREFIX
] != XRELEASE_PREFIX_OPCODE
)
3934 as_bad (_("instruction `%s' after `xacquire' not allowed"),
3938 if (i
.mem_operands
== 0 || !(i
.flags
[i
.operands
- 1] & Operand_Mem
))
3940 as_bad (_("memory destination needed for instruction `%s'"
3941 " after `xrelease'"), i
.tm
.name
);
3948 /* Try the shortest encoding by shortening operand size. */
3951 optimize_encoding (void)
3955 if (optimize_for_space
3956 && i
.reg_operands
== 1
3957 && i
.imm_operands
== 1
3958 && !i
.types
[1].bitfield
.byte
3959 && i
.op
[0].imms
->X_op
== O_constant
3960 && fits_in_imm7 (i
.op
[0].imms
->X_add_number
)
3961 && ((i
.tm
.base_opcode
== 0xa8
3962 && i
.tm
.extension_opcode
== None
)
3963 || (i
.tm
.base_opcode
== 0xf6
3964 && i
.tm
.extension_opcode
== 0x0)))
3967 test $imm7, %r64/%r32/%r16 -> test $imm7, %r8
3969 unsigned int base_regnum
= i
.op
[1].regs
->reg_num
;
3970 if (flag_code
== CODE_64BIT
|| base_regnum
< 4)
3972 i
.types
[1].bitfield
.byte
= 1;
3973 /* Ignore the suffix. */
3975 if (base_regnum
>= 4
3976 && !(i
.op
[1].regs
->reg_flags
& RegRex
))
3978 /* Handle SP, BP, SI and DI registers. */
3979 if (i
.types
[1].bitfield
.word
)
3981 else if (i
.types
[1].bitfield
.dword
)
3989 else if (flag_code
== CODE_64BIT
3990 && ((i
.types
[1].bitfield
.qword
3991 && i
.reg_operands
== 1
3992 && i
.imm_operands
== 1
3993 && i
.op
[0].imms
->X_op
== O_constant
3994 && ((i
.tm
.base_opcode
== 0xb8
3995 && i
.tm
.extension_opcode
== None
3996 && fits_in_unsigned_long (i
.op
[0].imms
->X_add_number
))
3997 || (fits_in_imm31 (i
.op
[0].imms
->X_add_number
)
3998 && (((i
.tm
.base_opcode
== 0x24
3999 || i
.tm
.base_opcode
== 0xa8)
4000 && i
.tm
.extension_opcode
== None
)
4001 || (i
.tm
.base_opcode
== 0x80
4002 && i
.tm
.extension_opcode
== 0x4)
4003 || ((i
.tm
.base_opcode
== 0xf6
4004 || (i
.tm
.base_opcode
| 1) == 0xc7)
4005 && i
.tm
.extension_opcode
== 0x0)))
4006 || (fits_in_imm7 (i
.op
[0].imms
->X_add_number
)
4007 && i
.tm
.base_opcode
== 0x83
4008 && i
.tm
.extension_opcode
== 0x4)))
4009 || (i
.types
[0].bitfield
.qword
4010 && ((i
.reg_operands
== 2
4011 && i
.op
[0].regs
== i
.op
[1].regs
4012 && ((i
.tm
.base_opcode
== 0x30
4013 || i
.tm
.base_opcode
== 0x28)
4014 && i
.tm
.extension_opcode
== None
))
4015 || (i
.reg_operands
== 1
4017 && i
.tm
.base_opcode
== 0x30
4018 && i
.tm
.extension_opcode
== None
)))))
4021 andq $imm31, %r64 -> andl $imm31, %r32
4022 andq $imm7, %r64 -> andl $imm7, %r32
4023 testq $imm31, %r64 -> testl $imm31, %r32
4024 xorq %r64, %r64 -> xorl %r32, %r32
4025 subq %r64, %r64 -> subl %r32, %r32
4026 movq $imm31, %r64 -> movl $imm31, %r32
4027 movq $imm32, %r64 -> movl $imm32, %r32
4029 i
.tm
.opcode_modifier
.norex64
= 1;
4030 if (i
.tm
.base_opcode
== 0xb8 || (i
.tm
.base_opcode
| 1) == 0xc7)
4033 movq $imm31, %r64 -> movl $imm31, %r32
4034 movq $imm32, %r64 -> movl $imm32, %r32
4036 i
.tm
.operand_types
[0].bitfield
.imm32
= 1;
4037 i
.tm
.operand_types
[0].bitfield
.imm32s
= 0;
4038 i
.tm
.operand_types
[0].bitfield
.imm64
= 0;
4039 i
.types
[0].bitfield
.imm32
= 1;
4040 i
.types
[0].bitfield
.imm32s
= 0;
4041 i
.types
[0].bitfield
.imm64
= 0;
4042 i
.types
[1].bitfield
.dword
= 1;
4043 i
.types
[1].bitfield
.qword
= 0;
4044 if ((i
.tm
.base_opcode
| 1) == 0xc7)
4047 movq $imm31, %r64 -> movl $imm31, %r32
4049 i
.tm
.base_opcode
= 0xb8;
4050 i
.tm
.extension_opcode
= None
;
4051 i
.tm
.opcode_modifier
.w
= 0;
4052 i
.tm
.opcode_modifier
.shortform
= 1;
4053 i
.tm
.opcode_modifier
.modrm
= 0;
4057 else if (optimize
> 1
4058 && !optimize_for_space
4059 && i
.reg_operands
== 2
4060 && i
.op
[0].regs
== i
.op
[1].regs
4061 && ((i
.tm
.base_opcode
& ~(Opcode_D
| 1)) == 0x8
4062 || (i
.tm
.base_opcode
& ~(Opcode_D
| 1)) == 0x20)
4063 && (flag_code
!= CODE_64BIT
|| !i
.types
[0].bitfield
.dword
))
4066 andb %rN, %rN -> testb %rN, %rN
4067 andw %rN, %rN -> testw %rN, %rN
4068 andq %rN, %rN -> testq %rN, %rN
4069 orb %rN, %rN -> testb %rN, %rN
4070 orw %rN, %rN -> testw %rN, %rN
4071 orq %rN, %rN -> testq %rN, %rN
4073 and outside of 64-bit mode
4075 andl %rN, %rN -> testl %rN, %rN
4076 orl %rN, %rN -> testl %rN, %rN
4078 i
.tm
.base_opcode
= 0x84 | (i
.tm
.base_opcode
& 1);
4080 else if (i
.reg_operands
== 3
4081 && i
.op
[0].regs
== i
.op
[1].regs
4082 && !i
.types
[2].bitfield
.xmmword
4083 && (i
.tm
.opcode_modifier
.vex
4084 || ((!i
.mask
|| i
.mask
->zeroing
)
4086 && is_evex_encoding (&i
.tm
)
4087 && (i
.vec_encoding
!= vex_encoding_evex
4088 || cpu_arch_isa_flags
.bitfield
.cpuavx512vl
4089 || i
.tm
.cpu_flags
.bitfield
.cpuavx512vl
4090 || (i
.tm
.operand_types
[2].bitfield
.zmmword
4091 && i
.types
[2].bitfield
.ymmword
))))
4092 && ((i
.tm
.base_opcode
== 0x55
4093 || i
.tm
.base_opcode
== 0x6655
4094 || i
.tm
.base_opcode
== 0x66df
4095 || i
.tm
.base_opcode
== 0x57
4096 || i
.tm
.base_opcode
== 0x6657
4097 || i
.tm
.base_opcode
== 0x66ef
4098 || i
.tm
.base_opcode
== 0x66f8
4099 || i
.tm
.base_opcode
== 0x66f9
4100 || i
.tm
.base_opcode
== 0x66fa
4101 || i
.tm
.base_opcode
== 0x66fb
4102 || i
.tm
.base_opcode
== 0x42
4103 || i
.tm
.base_opcode
== 0x6642
4104 || i
.tm
.base_opcode
== 0x47
4105 || i
.tm
.base_opcode
== 0x6647)
4106 && i
.tm
.extension_opcode
== None
))
4109 VOP, one of vandnps, vandnpd, vxorps, vxorpd, vpsubb, vpsubd,
4111 EVEX VOP %zmmM, %zmmM, %zmmN
4112 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
4113 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4114 EVEX VOP %ymmM, %ymmM, %ymmN
4115 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
4116 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4117 VEX VOP %ymmM, %ymmM, %ymmN
4118 -> VEX VOP %xmmM, %xmmM, %xmmN
4119 VOP, one of vpandn and vpxor:
4120 VEX VOP %ymmM, %ymmM, %ymmN
4121 -> VEX VOP %xmmM, %xmmM, %xmmN
4122 VOP, one of vpandnd and vpandnq:
4123 EVEX VOP %zmmM, %zmmM, %zmmN
4124 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
4125 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4126 EVEX VOP %ymmM, %ymmM, %ymmN
4127 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
4128 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4129 VOP, one of vpxord and vpxorq:
4130 EVEX VOP %zmmM, %zmmM, %zmmN
4131 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
4132 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4133 EVEX VOP %ymmM, %ymmM, %ymmN
4134 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
4135 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4136 VOP, one of kxord and kxorq:
4137 VEX VOP %kM, %kM, %kN
4138 -> VEX kxorw %kM, %kM, %kN
4139 VOP, one of kandnd and kandnq:
4140 VEX VOP %kM, %kM, %kN
4141 -> VEX kandnw %kM, %kM, %kN
4143 if (is_evex_encoding (&i
.tm
))
4145 if (i
.vec_encoding
!= vex_encoding_evex
)
4147 i
.tm
.opcode_modifier
.vex
= VEX128
;
4148 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4149 i
.tm
.opcode_modifier
.evex
= 0;
4151 else if (optimize
> 1)
4152 i
.tm
.opcode_modifier
.evex
= EVEX128
;
4156 else if (i
.tm
.operand_types
[0].bitfield
.regmask
)
4158 i
.tm
.base_opcode
&= 0xff;
4159 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4162 i
.tm
.opcode_modifier
.vex
= VEX128
;
4164 if (i
.tm
.opcode_modifier
.vex
)
4165 for (j
= 0; j
< 3; j
++)
4167 i
.types
[j
].bitfield
.xmmword
= 1;
4168 i
.types
[j
].bitfield
.ymmword
= 0;
4171 else if (i
.vec_encoding
!= vex_encoding_evex
4172 && !i
.types
[0].bitfield
.zmmword
4173 && !i
.types
[1].bitfield
.zmmword
4176 && is_evex_encoding (&i
.tm
)
4177 && ((i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0x666f
4178 || (i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0xf36f
4179 || (i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0xf26f
4180 || (i
.tm
.base_opcode
& ~4) == 0x66db
4181 || (i
.tm
.base_opcode
& ~4) == 0x66eb)
4182 && i
.tm
.extension_opcode
== None
)
4185 VOP, one of vmovdqa32, vmovdqa64, vmovdqu8, vmovdqu16,
4186 vmovdqu32 and vmovdqu64:
4187 EVEX VOP %xmmM, %xmmN
4188 -> VEX vmovdqa|vmovdqu %xmmM, %xmmN (M and N < 16)
4189 EVEX VOP %ymmM, %ymmN
4190 -> VEX vmovdqa|vmovdqu %ymmM, %ymmN (M and N < 16)
4192 -> VEX vmovdqa|vmovdqu %xmmM, mem (M < 16)
4194 -> VEX vmovdqa|vmovdqu %ymmM, mem (M < 16)
4196 -> VEX mvmovdqa|vmovdquem, %xmmN (N < 16)
4198 -> VEX vmovdqa|vmovdqu mem, %ymmN (N < 16)
4199 VOP, one of vpand, vpandn, vpor, vpxor:
4200 EVEX VOP{d,q} %xmmL, %xmmM, %xmmN
4201 -> VEX VOP %xmmL, %xmmM, %xmmN (L, M, and N < 16)
4202 EVEX VOP{d,q} %ymmL, %ymmM, %ymmN
4203 -> VEX VOP %ymmL, %ymmM, %ymmN (L, M, and N < 16)
4204 EVEX VOP{d,q} mem, %xmmM, %xmmN
4205 -> VEX VOP mem, %xmmM, %xmmN (M and N < 16)
4206 EVEX VOP{d,q} mem, %ymmM, %ymmN
4207 -> VEX VOP mem, %ymmM, %ymmN (M and N < 16)
4209 for (j
= 0; j
< i
.operands
; j
++)
4210 if (operand_type_check (i
.types
[j
], disp
)
4211 && i
.op
[j
].disps
->X_op
== O_constant
)
4213 /* Since the VEX prefix has 2 or 3 bytes, the EVEX prefix
4214 has 4 bytes, EVEX Disp8 has 1 byte and VEX Disp32 has 4
4215 bytes, we choose EVEX Disp8 over VEX Disp32. */
4216 int evex_disp8
, vex_disp8
;
4217 unsigned int memshift
= i
.memshift
;
4218 offsetT n
= i
.op
[j
].disps
->X_add_number
;
4220 evex_disp8
= fits_in_disp8 (n
);
4222 vex_disp8
= fits_in_disp8 (n
);
4223 if (evex_disp8
!= vex_disp8
)
4225 i
.memshift
= memshift
;
4229 i
.types
[j
].bitfield
.disp8
= vex_disp8
;
4232 if ((i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0xf26f)
4233 i
.tm
.base_opcode
^= 0xf36f ^ 0xf26f;
4234 i
.tm
.opcode_modifier
.vex
4235 = i
.types
[0].bitfield
.ymmword
? VEX256
: VEX128
;
4236 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4237 /* VPAND, VPOR, and VPXOR are commutative. */
4238 if (i
.reg_operands
== 3 && i
.tm
.base_opcode
!= 0x66df)
4239 i
.tm
.opcode_modifier
.commutative
= 1;
4240 i
.tm
.opcode_modifier
.evex
= 0;
4241 i
.tm
.opcode_modifier
.masking
= 0;
4242 i
.tm
.opcode_modifier
.broadcast
= 0;
4243 i
.tm
.opcode_modifier
.disp8memshift
= 0;
4246 i
.types
[j
].bitfield
.disp8
4247 = fits_in_disp8 (i
.op
[j
].disps
->X_add_number
);
4251 /* This is the guts of the machine-dependent assembler. LINE points to a
4252 machine dependent instruction. This function is supposed to emit
4253 the frags/bytes it assembles to. */
4256 md_assemble (char *line
)
4259 char mnemonic
[MAX_MNEM_SIZE
], mnem_suffix
;
4260 const insn_template
*t
;
4262 /* Initialize globals. */
4263 memset (&i
, '\0', sizeof (i
));
4264 for (j
= 0; j
< MAX_OPERANDS
; j
++)
4265 i
.reloc
[j
] = NO_RELOC
;
4266 memset (disp_expressions
, '\0', sizeof (disp_expressions
));
4267 memset (im_expressions
, '\0', sizeof (im_expressions
));
4268 save_stack_p
= save_stack
;
4270 /* First parse an instruction mnemonic & call i386_operand for the operands.
4271 We assume that the scrubber has arranged it so that line[0] is the valid
4272 start of a (possibly prefixed) mnemonic. */
4274 line
= parse_insn (line
, mnemonic
);
4277 mnem_suffix
= i
.suffix
;
4279 line
= parse_operands (line
, mnemonic
);
4281 xfree (i
.memop1_string
);
4282 i
.memop1_string
= NULL
;
4286 /* Now we've parsed the mnemonic into a set of templates, and have the
4287 operands at hand. */
4289 /* All intel opcodes have reversed operands except for "bound" and
4290 "enter". We also don't reverse intersegment "jmp" and "call"
4291 instructions with 2 immediate operands so that the immediate segment
4292 precedes the offset, as it does when in AT&T mode. */
4295 && (strcmp (mnemonic
, "bound") != 0)
4296 && (strcmp (mnemonic
, "invlpga") != 0)
4297 && !(operand_type_check (i
.types
[0], imm
)
4298 && operand_type_check (i
.types
[1], imm
)))
4301 /* The order of the immediates should be reversed
4302 for 2 immediates extrq and insertq instructions */
4303 if (i
.imm_operands
== 2
4304 && (strcmp (mnemonic
, "extrq") == 0
4305 || strcmp (mnemonic
, "insertq") == 0))
4306 swap_2_operands (0, 1);
4311 /* Don't optimize displacement for movabs since it only takes 64bit
4314 && i
.disp_encoding
!= disp_encoding_32bit
4315 && (flag_code
!= CODE_64BIT
4316 || strcmp (mnemonic
, "movabs") != 0))
4319 /* Next, we find a template that matches the given insn,
4320 making sure the overlap of the given operands types is consistent
4321 with the template operand types. */
4323 if (!(t
= match_template (mnem_suffix
)))
4326 if (sse_check
!= check_none
4327 && !i
.tm
.opcode_modifier
.noavx
4328 && !i
.tm
.cpu_flags
.bitfield
.cpuavx
4329 && (i
.tm
.cpu_flags
.bitfield
.cpusse
4330 || i
.tm
.cpu_flags
.bitfield
.cpusse2
4331 || i
.tm
.cpu_flags
.bitfield
.cpusse3
4332 || i
.tm
.cpu_flags
.bitfield
.cpussse3
4333 || i
.tm
.cpu_flags
.bitfield
.cpusse4_1
4334 || i
.tm
.cpu_flags
.bitfield
.cpusse4_2
4335 || i
.tm
.cpu_flags
.bitfield
.cpupclmul
4336 || i
.tm
.cpu_flags
.bitfield
.cpuaes
4337 || i
.tm
.cpu_flags
.bitfield
.cpugfni
))
4339 (sse_check
== check_warning
4341 : as_bad
) (_("SSE instruction `%s' is used"), i
.tm
.name
);
4344 /* Zap movzx and movsx suffix. The suffix has been set from
4345 "word ptr" or "byte ptr" on the source operand in Intel syntax
4346 or extracted from mnemonic in AT&T syntax. But we'll use
4347 the destination register to choose the suffix for encoding. */
4348 if ((i
.tm
.base_opcode
& ~9) == 0x0fb6)
4350 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
4351 there is no suffix, the default will be byte extension. */
4352 if (i
.reg_operands
!= 2
4355 as_bad (_("ambiguous operand size for `%s'"), i
.tm
.name
);
4360 if (i
.tm
.opcode_modifier
.fwait
)
4361 if (!add_prefix (FWAIT_OPCODE
))
4364 /* Check if REP prefix is OK. */
4365 if (i
.rep_prefix
&& !i
.tm
.opcode_modifier
.repprefixok
)
4367 as_bad (_("invalid instruction `%s' after `%s'"),
4368 i
.tm
.name
, i
.rep_prefix
);
4372 /* Check for lock without a lockable instruction. Destination operand
4373 must be memory unless it is xchg (0x86). */
4374 if (i
.prefix
[LOCK_PREFIX
]
4375 && (!i
.tm
.opcode_modifier
.islockable
4376 || i
.mem_operands
== 0
4377 || (i
.tm
.base_opcode
!= 0x86
4378 && !(i
.flags
[i
.operands
- 1] & Operand_Mem
))))
4380 as_bad (_("expecting lockable instruction after `lock'"));
4384 /* Check for data size prefix on VEX/XOP/EVEX encoded insns. */
4385 if (i
.prefix
[DATA_PREFIX
] && is_any_vex_encoding (&i
.tm
))
4387 as_bad (_("data size prefix invalid with `%s'"), i
.tm
.name
);
4391 /* Check if HLE prefix is OK. */
4392 if (i
.hle_prefix
&& !check_hle ())
4395 /* Check BND prefix. */
4396 if (i
.bnd_prefix
&& !i
.tm
.opcode_modifier
.bndprefixok
)
4397 as_bad (_("expecting valid branch instruction after `bnd'"));
4399 /* Check NOTRACK prefix. */
4400 if (i
.notrack_prefix
&& !i
.tm
.opcode_modifier
.notrackprefixok
)
4401 as_bad (_("expecting indirect branch instruction after `notrack'"));
4403 if (i
.tm
.cpu_flags
.bitfield
.cpumpx
)
4405 if (flag_code
== CODE_64BIT
&& i
.prefix
[ADDR_PREFIX
])
4406 as_bad (_("32-bit address isn't allowed in 64-bit MPX instructions."));
4407 else if (flag_code
!= CODE_16BIT
4408 ? i
.prefix
[ADDR_PREFIX
]
4409 : i
.mem_operands
&& !i
.prefix
[ADDR_PREFIX
])
4410 as_bad (_("16-bit address isn't allowed in MPX instructions"));
4413 /* Insert BND prefix. */
4414 if (add_bnd_prefix
&& i
.tm
.opcode_modifier
.bndprefixok
)
4416 if (!i
.prefix
[BND_PREFIX
])
4417 add_prefix (BND_PREFIX_OPCODE
);
4418 else if (i
.prefix
[BND_PREFIX
] != BND_PREFIX_OPCODE
)
4420 as_warn (_("replacing `rep'/`repe' prefix by `bnd'"));
4421 i
.prefix
[BND_PREFIX
] = BND_PREFIX_OPCODE
;
4425 /* Check string instruction segment overrides. */
4426 if (i
.tm
.opcode_modifier
.isstring
&& i
.mem_operands
!= 0)
4428 if (!check_string ())
4430 i
.disp_operands
= 0;
4433 if (optimize
&& !i
.no_optimize
&& i
.tm
.opcode_modifier
.optimize
)
4434 optimize_encoding ();
4436 if (!process_suffix ())
4439 /* Update operand types. */
4440 for (j
= 0; j
< i
.operands
; j
++)
4441 i
.types
[j
] = operand_type_and (i
.types
[j
], i
.tm
.operand_types
[j
]);
4443 /* Make still unresolved immediate matches conform to size of immediate
4444 given in i.suffix. */
4445 if (!finalize_imm ())
4448 if (i
.types
[0].bitfield
.imm1
)
4449 i
.imm_operands
= 0; /* kludge for shift insns. */
4451 /* We only need to check those implicit registers for instructions
4452 with 3 operands or less. */
4453 if (i
.operands
<= 3)
4454 for (j
= 0; j
< i
.operands
; j
++)
4455 if (i
.types
[j
].bitfield
.inoutportreg
4456 || i
.types
[j
].bitfield
.shiftcount
4457 || (i
.types
[j
].bitfield
.acc
&& !i
.types
[j
].bitfield
.xmmword
))
4460 /* ImmExt should be processed after SSE2AVX. */
4461 if (!i
.tm
.opcode_modifier
.sse2avx
4462 && i
.tm
.opcode_modifier
.immext
)
4465 /* For insns with operands there are more diddles to do to the opcode. */
4468 if (!process_operands ())
4471 else if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
4473 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
4474 as_warn (_("translating to `%sp'"), i
.tm
.name
);
4477 if (is_any_vex_encoding (&i
.tm
))
4479 if (!cpu_arch_flags
.bitfield
.cpui286
)
4481 as_bad (_("instruction `%s' isn't supported outside of protected mode."),
4486 if (i
.tm
.opcode_modifier
.vex
)
4487 build_vex_prefix (t
);
4489 build_evex_prefix ();
4492 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
4493 instructions may define INT_OPCODE as well, so avoid this corner
4494 case for those instructions that use MODRM. */
4495 if (i
.tm
.base_opcode
== INT_OPCODE
4496 && !i
.tm
.opcode_modifier
.modrm
4497 && i
.op
[0].imms
->X_add_number
== 3)
4499 i
.tm
.base_opcode
= INT3_OPCODE
;
4503 if ((i
.tm
.opcode_modifier
.jump
4504 || i
.tm
.opcode_modifier
.jumpbyte
4505 || i
.tm
.opcode_modifier
.jumpdword
)
4506 && i
.op
[0].disps
->X_op
== O_constant
)
4508 /* Convert "jmp constant" (and "call constant") to a jump (call) to
4509 the absolute address given by the constant. Since ix86 jumps and
4510 calls are pc relative, we need to generate a reloc. */
4511 i
.op
[0].disps
->X_add_symbol
= &abs_symbol
;
4512 i
.op
[0].disps
->X_op
= O_symbol
;
4515 if (i
.tm
.opcode_modifier
.rex64
)
4518 /* For 8 bit registers we need an empty rex prefix. Also if the
4519 instruction already has a prefix, we need to convert old
4520 registers to new ones. */
4522 if ((i
.types
[0].bitfield
.class == Reg
&& i
.types
[0].bitfield
.byte
4523 && (i
.op
[0].regs
->reg_flags
& RegRex64
) != 0)
4524 || (i
.types
[1].bitfield
.class == Reg
&& i
.types
[1].bitfield
.byte
4525 && (i
.op
[1].regs
->reg_flags
& RegRex64
) != 0)
4526 || (((i
.types
[0].bitfield
.class == Reg
&& i
.types
[0].bitfield
.byte
)
4527 || (i
.types
[1].bitfield
.class == Reg
&& i
.types
[1].bitfield
.byte
))
4532 i
.rex
|= REX_OPCODE
;
4533 for (x
= 0; x
< 2; x
++)
4535 /* Look for 8 bit operand that uses old registers. */
4536 if (i
.types
[x
].bitfield
.class == Reg
&& i
.types
[x
].bitfield
.byte
4537 && (i
.op
[x
].regs
->reg_flags
& RegRex64
) == 0)
4539 /* In case it is "hi" register, give up. */
4540 if (i
.op
[x
].regs
->reg_num
> 3)
4541 as_bad (_("can't encode register '%s%s' in an "
4542 "instruction requiring REX prefix."),
4543 register_prefix
, i
.op
[x
].regs
->reg_name
);
4545 /* Otherwise it is equivalent to the extended register.
4546 Since the encoding doesn't change this is merely
4547 cosmetic cleanup for debug output. */
4549 i
.op
[x
].regs
= i
.op
[x
].regs
+ 8;
4554 if (i
.rex
== 0 && i
.rex_encoding
)
4556 /* Check if we can add a REX_OPCODE byte. Look for 8 bit operand
4557 that uses legacy register. If it is "hi" register, don't add
4558 the REX_OPCODE byte. */
4560 for (x
= 0; x
< 2; x
++)
4561 if (i
.types
[x
].bitfield
.class == Reg
4562 && i
.types
[x
].bitfield
.byte
4563 && (i
.op
[x
].regs
->reg_flags
& RegRex64
) == 0
4564 && i
.op
[x
].regs
->reg_num
> 3)
4566 i
.rex_encoding
= FALSE
;
4575 add_prefix (REX_OPCODE
| i
.rex
);
4577 /* We are ready to output the insn. */
4582 parse_insn (char *line
, char *mnemonic
)
4585 char *token_start
= l
;
4588 const insn_template
*t
;
4594 while ((*mnem_p
= mnemonic_chars
[(unsigned char) *l
]) != 0)
4599 if (mnem_p
>= mnemonic
+ MAX_MNEM_SIZE
)
4601 as_bad (_("no such instruction: `%s'"), token_start
);
4606 if (!is_space_char (*l
)
4607 && *l
!= END_OF_INSN
4609 || (*l
!= PREFIX_SEPARATOR
4612 as_bad (_("invalid character %s in mnemonic"),
4613 output_invalid (*l
));
4616 if (token_start
== l
)
4618 if (!intel_syntax
&& *l
== PREFIX_SEPARATOR
)
4619 as_bad (_("expecting prefix; got nothing"));
4621 as_bad (_("expecting mnemonic; got nothing"));
4625 /* Look up instruction (or prefix) via hash table. */
4626 current_templates
= (const templates
*) hash_find (op_hash
, mnemonic
);
4628 if (*l
!= END_OF_INSN
4629 && (!is_space_char (*l
) || l
[1] != END_OF_INSN
)
4630 && current_templates
4631 && current_templates
->start
->opcode_modifier
.isprefix
)
4633 if (!cpu_flags_check_cpu64 (current_templates
->start
->cpu_flags
))
4635 as_bad ((flag_code
!= CODE_64BIT
4636 ? _("`%s' is only supported in 64-bit mode")
4637 : _("`%s' is not supported in 64-bit mode")),
4638 current_templates
->start
->name
);
4641 /* If we are in 16-bit mode, do not allow addr16 or data16.
4642 Similarly, in 32-bit mode, do not allow addr32 or data32. */
4643 if ((current_templates
->start
->opcode_modifier
.size
== SIZE16
4644 || current_templates
->start
->opcode_modifier
.size
== SIZE32
)
4645 && flag_code
!= CODE_64BIT
4646 && ((current_templates
->start
->opcode_modifier
.size
== SIZE32
)
4647 ^ (flag_code
== CODE_16BIT
)))
4649 as_bad (_("redundant %s prefix"),
4650 current_templates
->start
->name
);
4653 if (current_templates
->start
->opcode_length
== 0)
4655 /* Handle pseudo prefixes. */
4656 switch (current_templates
->start
->base_opcode
)
4660 i
.disp_encoding
= disp_encoding_8bit
;
4664 i
.disp_encoding
= disp_encoding_32bit
;
4668 i
.dir_encoding
= dir_encoding_load
;
4672 i
.dir_encoding
= dir_encoding_store
;
4676 i
.vec_encoding
= vex_encoding_vex2
;
4680 i
.vec_encoding
= vex_encoding_vex3
;
4684 i
.vec_encoding
= vex_encoding_evex
;
4688 i
.rex_encoding
= TRUE
;
4692 i
.no_optimize
= TRUE
;
4700 /* Add prefix, checking for repeated prefixes. */
4701 switch (add_prefix (current_templates
->start
->base_opcode
))
4706 if (current_templates
->start
->cpu_flags
.bitfield
.cpuibt
)
4707 i
.notrack_prefix
= current_templates
->start
->name
;
4710 if (current_templates
->start
->cpu_flags
.bitfield
.cpuhle
)
4711 i
.hle_prefix
= current_templates
->start
->name
;
4712 else if (current_templates
->start
->cpu_flags
.bitfield
.cpumpx
)
4713 i
.bnd_prefix
= current_templates
->start
->name
;
4715 i
.rep_prefix
= current_templates
->start
->name
;
4721 /* Skip past PREFIX_SEPARATOR and reset token_start. */
4728 if (!current_templates
)
4730 /* Deprecated functionality (new code should use pseudo-prefixes instead):
4731 Check if we should swap operand or force 32bit displacement in
4733 if (mnem_p
- 2 == dot_p
&& dot_p
[1] == 's')
4734 i
.dir_encoding
= dir_encoding_swap
;
4735 else if (mnem_p
- 3 == dot_p
4738 i
.disp_encoding
= disp_encoding_8bit
;
4739 else if (mnem_p
- 4 == dot_p
4743 i
.disp_encoding
= disp_encoding_32bit
;
4748 current_templates
= (const templates
*) hash_find (op_hash
, mnemonic
);
4751 if (!current_templates
)
4754 if (mnem_p
> mnemonic
)
4756 /* See if we can get a match by trimming off a suffix. */
4759 case WORD_MNEM_SUFFIX
:
4760 if (intel_syntax
&& (intel_float_operand (mnemonic
) & 2))
4761 i
.suffix
= SHORT_MNEM_SUFFIX
;
4764 case BYTE_MNEM_SUFFIX
:
4765 case QWORD_MNEM_SUFFIX
:
4766 i
.suffix
= mnem_p
[-1];
4768 current_templates
= (const templates
*) hash_find (op_hash
,
4771 case SHORT_MNEM_SUFFIX
:
4772 case LONG_MNEM_SUFFIX
:
4775 i
.suffix
= mnem_p
[-1];
4777 current_templates
= (const templates
*) hash_find (op_hash
,
4786 if (intel_float_operand (mnemonic
) == 1)
4787 i
.suffix
= SHORT_MNEM_SUFFIX
;
4789 i
.suffix
= LONG_MNEM_SUFFIX
;
4791 current_templates
= (const templates
*) hash_find (op_hash
,
4798 if (!current_templates
)
4800 as_bad (_("no such instruction: `%s'"), token_start
);
4805 if (current_templates
->start
->opcode_modifier
.jump
4806 || current_templates
->start
->opcode_modifier
.jumpbyte
)
4808 /* Check for a branch hint. We allow ",pt" and ",pn" for
4809 predict taken and predict not taken respectively.
4810 I'm not sure that branch hints actually do anything on loop
4811 and jcxz insns (JumpByte) for current Pentium4 chips. They
4812 may work in the future and it doesn't hurt to accept them
4814 if (l
[0] == ',' && l
[1] == 'p')
4818 if (!add_prefix (DS_PREFIX_OPCODE
))
4822 else if (l
[2] == 'n')
4824 if (!add_prefix (CS_PREFIX_OPCODE
))
4830 /* Any other comma loses. */
4833 as_bad (_("invalid character %s in mnemonic"),
4834 output_invalid (*l
));
4838 /* Check if instruction is supported on specified architecture. */
4840 for (t
= current_templates
->start
; t
< current_templates
->end
; ++t
)
4842 supported
|= cpu_flags_match (t
);
4843 if (supported
== CPU_FLAGS_PERFECT_MATCH
)
4845 if (!cpu_arch_flags
.bitfield
.cpui386
&& (flag_code
!= CODE_16BIT
))
4846 as_warn (_("use .code16 to ensure correct addressing mode"));
4852 if (!(supported
& CPU_FLAGS_64BIT_MATCH
))
4853 as_bad (flag_code
== CODE_64BIT
4854 ? _("`%s' is not supported in 64-bit mode")
4855 : _("`%s' is only supported in 64-bit mode"),
4856 current_templates
->start
->name
);
4858 as_bad (_("`%s' is not supported on `%s%s'"),
4859 current_templates
->start
->name
,
4860 cpu_arch_name
? cpu_arch_name
: default_arch
,
4861 cpu_sub_arch_name
? cpu_sub_arch_name
: "");
4867 parse_operands (char *l
, const char *mnemonic
)
4871 /* 1 if operand is pending after ','. */
4872 unsigned int expecting_operand
= 0;
4874 /* Non-zero if operand parens not balanced. */
4875 unsigned int paren_not_balanced
;
4877 while (*l
!= END_OF_INSN
)
4879 /* Skip optional white space before operand. */
4880 if (is_space_char (*l
))
4882 if (!is_operand_char (*l
) && *l
!= END_OF_INSN
&& *l
!= '"')
4884 as_bad (_("invalid character %s before operand %d"),
4885 output_invalid (*l
),
4889 token_start
= l
; /* After white space. */
4890 paren_not_balanced
= 0;
4891 while (paren_not_balanced
|| *l
!= ',')
4893 if (*l
== END_OF_INSN
)
4895 if (paren_not_balanced
)
4898 as_bad (_("unbalanced parenthesis in operand %d."),
4901 as_bad (_("unbalanced brackets in operand %d."),
4906 break; /* we are done */
4908 else if (!is_operand_char (*l
) && !is_space_char (*l
) && *l
!= '"')
4910 as_bad (_("invalid character %s in operand %d"),
4911 output_invalid (*l
),
4918 ++paren_not_balanced
;
4920 --paren_not_balanced
;
4925 ++paren_not_balanced
;
4927 --paren_not_balanced
;
4931 if (l
!= token_start
)
4932 { /* Yes, we've read in another operand. */
4933 unsigned int operand_ok
;
4934 this_operand
= i
.operands
++;
4935 if (i
.operands
> MAX_OPERANDS
)
4937 as_bad (_("spurious operands; (%d operands/instruction max)"),
4941 i
.types
[this_operand
].bitfield
.unspecified
= 1;
4942 /* Now parse operand adding info to 'i' as we go along. */
4943 END_STRING_AND_SAVE (l
);
4945 if (i
.mem_operands
> 1)
4947 as_bad (_("too many memory references for `%s'"),
4954 i386_intel_operand (token_start
,
4955 intel_float_operand (mnemonic
));
4957 operand_ok
= i386_att_operand (token_start
);
4959 RESTORE_END_STRING (l
);
4965 if (expecting_operand
)
4967 expecting_operand_after_comma
:
4968 as_bad (_("expecting operand after ','; got nothing"));
4973 as_bad (_("expecting operand before ','; got nothing"));
4978 /* Now *l must be either ',' or END_OF_INSN. */
4981 if (*++l
== END_OF_INSN
)
4983 /* Just skip it, if it's \n complain. */
4984 goto expecting_operand_after_comma
;
4986 expecting_operand
= 1;
4993 swap_2_operands (int xchg1
, int xchg2
)
4995 union i386_op temp_op
;
4996 i386_operand_type temp_type
;
4997 unsigned int temp_flags
;
4998 enum bfd_reloc_code_real temp_reloc
;
5000 temp_type
= i
.types
[xchg2
];
5001 i
.types
[xchg2
] = i
.types
[xchg1
];
5002 i
.types
[xchg1
] = temp_type
;
5004 temp_flags
= i
.flags
[xchg2
];
5005 i
.flags
[xchg2
] = i
.flags
[xchg1
];
5006 i
.flags
[xchg1
] = temp_flags
;
5008 temp_op
= i
.op
[xchg2
];
5009 i
.op
[xchg2
] = i
.op
[xchg1
];
5010 i
.op
[xchg1
] = temp_op
;
5012 temp_reloc
= i
.reloc
[xchg2
];
5013 i
.reloc
[xchg2
] = i
.reloc
[xchg1
];
5014 i
.reloc
[xchg1
] = temp_reloc
;
5018 if (i
.mask
->operand
== xchg1
)
5019 i
.mask
->operand
= xchg2
;
5020 else if (i
.mask
->operand
== xchg2
)
5021 i
.mask
->operand
= xchg1
;
5025 if (i
.broadcast
->operand
== xchg1
)
5026 i
.broadcast
->operand
= xchg2
;
5027 else if (i
.broadcast
->operand
== xchg2
)
5028 i
.broadcast
->operand
= xchg1
;
5032 if (i
.rounding
->operand
== xchg1
)
5033 i
.rounding
->operand
= xchg2
;
5034 else if (i
.rounding
->operand
== xchg2
)
5035 i
.rounding
->operand
= xchg1
;
5040 swap_operands (void)
5046 swap_2_operands (1, i
.operands
- 2);
5050 swap_2_operands (0, i
.operands
- 1);
5056 if (i
.mem_operands
== 2)
5058 const seg_entry
*temp_seg
;
5059 temp_seg
= i
.seg
[0];
5060 i
.seg
[0] = i
.seg
[1];
5061 i
.seg
[1] = temp_seg
;
5065 /* Try to ensure constant immediates are represented in the smallest
5070 char guess_suffix
= 0;
5074 guess_suffix
= i
.suffix
;
5075 else if (i
.reg_operands
)
5077 /* Figure out a suffix from the last register operand specified.
5078 We can't do this properly yet, ie. excluding InOutPortReg,
5079 but the following works for instructions with immediates.
5080 In any case, we can't set i.suffix yet. */
5081 for (op
= i
.operands
; --op
>= 0;)
5082 if (i
.types
[op
].bitfield
.class != Reg
)
5084 else if (i
.types
[op
].bitfield
.byte
)
5086 guess_suffix
= BYTE_MNEM_SUFFIX
;
5089 else if (i
.types
[op
].bitfield
.word
)
5091 guess_suffix
= WORD_MNEM_SUFFIX
;
5094 else if (i
.types
[op
].bitfield
.dword
)
5096 guess_suffix
= LONG_MNEM_SUFFIX
;
5099 else if (i
.types
[op
].bitfield
.qword
)
5101 guess_suffix
= QWORD_MNEM_SUFFIX
;
5105 else if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
5106 guess_suffix
= WORD_MNEM_SUFFIX
;
5108 for (op
= i
.operands
; --op
>= 0;)
5109 if (operand_type_check (i
.types
[op
], imm
))
5111 switch (i
.op
[op
].imms
->X_op
)
5114 /* If a suffix is given, this operand may be shortened. */
5115 switch (guess_suffix
)
5117 case LONG_MNEM_SUFFIX
:
5118 i
.types
[op
].bitfield
.imm32
= 1;
5119 i
.types
[op
].bitfield
.imm64
= 1;
5121 case WORD_MNEM_SUFFIX
:
5122 i
.types
[op
].bitfield
.imm16
= 1;
5123 i
.types
[op
].bitfield
.imm32
= 1;
5124 i
.types
[op
].bitfield
.imm32s
= 1;
5125 i
.types
[op
].bitfield
.imm64
= 1;
5127 case BYTE_MNEM_SUFFIX
:
5128 i
.types
[op
].bitfield
.imm8
= 1;
5129 i
.types
[op
].bitfield
.imm8s
= 1;
5130 i
.types
[op
].bitfield
.imm16
= 1;
5131 i
.types
[op
].bitfield
.imm32
= 1;
5132 i
.types
[op
].bitfield
.imm32s
= 1;
5133 i
.types
[op
].bitfield
.imm64
= 1;
5137 /* If this operand is at most 16 bits, convert it
5138 to a signed 16 bit number before trying to see
5139 whether it will fit in an even smaller size.
5140 This allows a 16-bit operand such as $0xffe0 to
5141 be recognised as within Imm8S range. */
5142 if ((i
.types
[op
].bitfield
.imm16
)
5143 && (i
.op
[op
].imms
->X_add_number
& ~(offsetT
) 0xffff) == 0)
5145 i
.op
[op
].imms
->X_add_number
=
5146 (((i
.op
[op
].imms
->X_add_number
& 0xffff) ^ 0x8000) - 0x8000);
5149 /* Store 32-bit immediate in 64-bit for 64-bit BFD. */
5150 if ((i
.types
[op
].bitfield
.imm32
)
5151 && ((i
.op
[op
].imms
->X_add_number
& ~(((offsetT
) 2 << 31) - 1))
5154 i
.op
[op
].imms
->X_add_number
= ((i
.op
[op
].imms
->X_add_number
5155 ^ ((offsetT
) 1 << 31))
5156 - ((offsetT
) 1 << 31));
5160 = operand_type_or (i
.types
[op
],
5161 smallest_imm_type (i
.op
[op
].imms
->X_add_number
));
5163 /* We must avoid matching of Imm32 templates when 64bit
5164 only immediate is available. */
5165 if (guess_suffix
== QWORD_MNEM_SUFFIX
)
5166 i
.types
[op
].bitfield
.imm32
= 0;
5173 /* Symbols and expressions. */
5175 /* Convert symbolic operand to proper sizes for matching, but don't
5176 prevent matching a set of insns that only supports sizes other
5177 than those matching the insn suffix. */
5179 i386_operand_type mask
, allowed
;
5180 const insn_template
*t
;
5182 operand_type_set (&mask
, 0);
5183 operand_type_set (&allowed
, 0);
5185 for (t
= current_templates
->start
;
5186 t
< current_templates
->end
;
5189 allowed
= operand_type_or (allowed
, t
->operand_types
[op
]);
5190 allowed
= operand_type_and (allowed
, anyimm
);
5192 switch (guess_suffix
)
5194 case QWORD_MNEM_SUFFIX
:
5195 mask
.bitfield
.imm64
= 1;
5196 mask
.bitfield
.imm32s
= 1;
5198 case LONG_MNEM_SUFFIX
:
5199 mask
.bitfield
.imm32
= 1;
5201 case WORD_MNEM_SUFFIX
:
5202 mask
.bitfield
.imm16
= 1;
5204 case BYTE_MNEM_SUFFIX
:
5205 mask
.bitfield
.imm8
= 1;
5210 allowed
= operand_type_and (mask
, allowed
);
5211 if (!operand_type_all_zero (&allowed
))
5212 i
.types
[op
] = operand_type_and (i
.types
[op
], mask
);
5219 /* Try to use the smallest displacement type too. */
5221 optimize_disp (void)
5225 for (op
= i
.operands
; --op
>= 0;)
5226 if (operand_type_check (i
.types
[op
], disp
))
5228 if (i
.op
[op
].disps
->X_op
== O_constant
)
5230 offsetT op_disp
= i
.op
[op
].disps
->X_add_number
;
5232 if (i
.types
[op
].bitfield
.disp16
5233 && (op_disp
& ~(offsetT
) 0xffff) == 0)
5235 /* If this operand is at most 16 bits, convert
5236 to a signed 16 bit number and don't use 64bit
5238 op_disp
= (((op_disp
& 0xffff) ^ 0x8000) - 0x8000);
5239 i
.types
[op
].bitfield
.disp64
= 0;
5242 /* Optimize 64-bit displacement to 32-bit for 64-bit BFD. */
5243 if (i
.types
[op
].bitfield
.disp32
5244 && (op_disp
& ~(((offsetT
) 2 << 31) - 1)) == 0)
5246 /* If this operand is at most 32 bits, convert
5247 to a signed 32 bit number and don't use 64bit
5249 op_disp
&= (((offsetT
) 2 << 31) - 1);
5250 op_disp
= (op_disp
^ ((offsetT
) 1 << 31)) - ((addressT
) 1 << 31);
5251 i
.types
[op
].bitfield
.disp64
= 0;
5254 if (!op_disp
&& i
.types
[op
].bitfield
.baseindex
)
5256 i
.types
[op
].bitfield
.disp8
= 0;
5257 i
.types
[op
].bitfield
.disp16
= 0;
5258 i
.types
[op
].bitfield
.disp32
= 0;
5259 i
.types
[op
].bitfield
.disp32s
= 0;
5260 i
.types
[op
].bitfield
.disp64
= 0;
5264 else if (flag_code
== CODE_64BIT
)
5266 if (fits_in_signed_long (op_disp
))
5268 i
.types
[op
].bitfield
.disp64
= 0;
5269 i
.types
[op
].bitfield
.disp32s
= 1;
5271 if (i
.prefix
[ADDR_PREFIX
]
5272 && fits_in_unsigned_long (op_disp
))
5273 i
.types
[op
].bitfield
.disp32
= 1;
5275 if ((i
.types
[op
].bitfield
.disp32
5276 || i
.types
[op
].bitfield
.disp32s
5277 || i
.types
[op
].bitfield
.disp16
)
5278 && fits_in_disp8 (op_disp
))
5279 i
.types
[op
].bitfield
.disp8
= 1;
5281 else if (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
5282 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
)
5284 fix_new_exp (frag_now
, frag_more (0) - frag_now
->fr_literal
, 0,
5285 i
.op
[op
].disps
, 0, i
.reloc
[op
]);
5286 i
.types
[op
].bitfield
.disp8
= 0;
5287 i
.types
[op
].bitfield
.disp16
= 0;
5288 i
.types
[op
].bitfield
.disp32
= 0;
5289 i
.types
[op
].bitfield
.disp32s
= 0;
5290 i
.types
[op
].bitfield
.disp64
= 0;
5293 /* We only support 64bit displacement on constants. */
5294 i
.types
[op
].bitfield
.disp64
= 0;
5298 /* Return 1 if there is a match in broadcast bytes between operand
5299 GIVEN and instruction template T. */
5302 match_broadcast_size (const insn_template
*t
, unsigned int given
)
5304 return ((t
->opcode_modifier
.broadcast
== BYTE_BROADCAST
5305 && i
.types
[given
].bitfield
.byte
)
5306 || (t
->opcode_modifier
.broadcast
== WORD_BROADCAST
5307 && i
.types
[given
].bitfield
.word
)
5308 || (t
->opcode_modifier
.broadcast
== DWORD_BROADCAST
5309 && i
.types
[given
].bitfield
.dword
)
5310 || (t
->opcode_modifier
.broadcast
== QWORD_BROADCAST
5311 && i
.types
[given
].bitfield
.qword
));
5314 /* Check if operands are valid for the instruction. */
5317 check_VecOperands (const insn_template
*t
)
5321 static const i386_cpu_flags avx512
= CPU_ANY_AVX512F_FLAGS
;
5323 /* Templates allowing for ZMMword as well as YMMword and/or XMMword for
5324 any one operand are implicity requiring AVX512VL support if the actual
5325 operand size is YMMword or XMMword. Since this function runs after
5326 template matching, there's no need to check for YMMword/XMMword in
5328 cpu
= cpu_flags_and (t
->cpu_flags
, avx512
);
5329 if (!cpu_flags_all_zero (&cpu
)
5330 && !t
->cpu_flags
.bitfield
.cpuavx512vl
5331 && !cpu_arch_flags
.bitfield
.cpuavx512vl
)
5333 for (op
= 0; op
< t
->operands
; ++op
)
5335 if (t
->operand_types
[op
].bitfield
.zmmword
5336 && (i
.types
[op
].bitfield
.ymmword
5337 || i
.types
[op
].bitfield
.xmmword
))
5339 i
.error
= unsupported
;
5345 /* Without VSIB byte, we can't have a vector register for index. */
5346 if (!t
->opcode_modifier
.vecsib
5348 && (i
.index_reg
->reg_type
.bitfield
.xmmword
5349 || i
.index_reg
->reg_type
.bitfield
.ymmword
5350 || i
.index_reg
->reg_type
.bitfield
.zmmword
))
5352 i
.error
= unsupported_vector_index_register
;
5356 /* Check if default mask is allowed. */
5357 if (t
->opcode_modifier
.nodefmask
5358 && (!i
.mask
|| i
.mask
->mask
->reg_num
== 0))
5360 i
.error
= no_default_mask
;
5364 /* For VSIB byte, we need a vector register for index, and all vector
5365 registers must be distinct. */
5366 if (t
->opcode_modifier
.vecsib
)
5369 || !((t
->opcode_modifier
.vecsib
== VecSIB128
5370 && i
.index_reg
->reg_type
.bitfield
.xmmword
)
5371 || (t
->opcode_modifier
.vecsib
== VecSIB256
5372 && i
.index_reg
->reg_type
.bitfield
.ymmword
)
5373 || (t
->opcode_modifier
.vecsib
== VecSIB512
5374 && i
.index_reg
->reg_type
.bitfield
.zmmword
)))
5376 i
.error
= invalid_vsib_address
;
5380 gas_assert (i
.reg_operands
== 2 || i
.mask
);
5381 if (i
.reg_operands
== 2 && !i
.mask
)
5383 gas_assert (i
.types
[0].bitfield
.regsimd
);
5384 gas_assert (i
.types
[0].bitfield
.xmmword
5385 || i
.types
[0].bitfield
.ymmword
);
5386 gas_assert (i
.types
[2].bitfield
.regsimd
);
5387 gas_assert (i
.types
[2].bitfield
.xmmword
5388 || i
.types
[2].bitfield
.ymmword
);
5389 if (operand_check
== check_none
)
5391 if (register_number (i
.op
[0].regs
)
5392 != register_number (i
.index_reg
)
5393 && register_number (i
.op
[2].regs
)
5394 != register_number (i
.index_reg
)
5395 && register_number (i
.op
[0].regs
)
5396 != register_number (i
.op
[2].regs
))
5398 if (operand_check
== check_error
)
5400 i
.error
= invalid_vector_register_set
;
5403 as_warn (_("mask, index, and destination registers should be distinct"));
5405 else if (i
.reg_operands
== 1 && i
.mask
)
5407 if (i
.types
[1].bitfield
.regsimd
5408 && (i
.types
[1].bitfield
.xmmword
5409 || i
.types
[1].bitfield
.ymmword
5410 || i
.types
[1].bitfield
.zmmword
)
5411 && (register_number (i
.op
[1].regs
)
5412 == register_number (i
.index_reg
)))
5414 if (operand_check
== check_error
)
5416 i
.error
= invalid_vector_register_set
;
5419 if (operand_check
!= check_none
)
5420 as_warn (_("index and destination registers should be distinct"));
5425 /* Check if broadcast is supported by the instruction and is applied
5426 to the memory operand. */
5429 i386_operand_type type
, overlap
;
5431 /* Check if specified broadcast is supported in this instruction,
5432 and its broadcast bytes match the memory operand. */
5433 op
= i
.broadcast
->operand
;
5434 if (!t
->opcode_modifier
.broadcast
5435 || !(i
.flags
[op
] & Operand_Mem
)
5436 || (!i
.types
[op
].bitfield
.unspecified
5437 && !match_broadcast_size (t
, op
)))
5440 i
.error
= unsupported_broadcast
;
5444 i
.broadcast
->bytes
= ((1 << (t
->opcode_modifier
.broadcast
- 1))
5445 * i
.broadcast
->type
);
5446 operand_type_set (&type
, 0);
5447 switch (i
.broadcast
->bytes
)
5450 type
.bitfield
.word
= 1;
5453 type
.bitfield
.dword
= 1;
5456 type
.bitfield
.qword
= 1;
5459 type
.bitfield
.xmmword
= 1;
5462 type
.bitfield
.ymmword
= 1;
5465 type
.bitfield
.zmmword
= 1;
5471 overlap
= operand_type_and (type
, t
->operand_types
[op
]);
5472 if (operand_type_all_zero (&overlap
))
5475 if (t
->opcode_modifier
.checkregsize
)
5479 type
.bitfield
.baseindex
= 1;
5480 for (j
= 0; j
< i
.operands
; ++j
)
5483 && !operand_type_register_match(i
.types
[j
],
5484 t
->operand_types
[j
],
5486 t
->operand_types
[op
]))
5491 /* If broadcast is supported in this instruction, we need to check if
5492 operand of one-element size isn't specified without broadcast. */
5493 else if (t
->opcode_modifier
.broadcast
&& i
.mem_operands
)
5495 /* Find memory operand. */
5496 for (op
= 0; op
< i
.operands
; op
++)
5497 if (i
.flags
[op
] & Operand_Mem
)
5499 gas_assert (op
< i
.operands
);
5500 /* Check size of the memory operand. */
5501 if (match_broadcast_size (t
, op
))
5503 i
.error
= broadcast_needed
;
5508 op
= MAX_OPERANDS
- 1; /* Avoid uninitialized variable warning. */
5510 /* Check if requested masking is supported. */
5513 switch (t
->opcode_modifier
.masking
)
5517 case MERGING_MASKING
:
5518 if (i
.mask
->zeroing
)
5521 i
.error
= unsupported_masking
;
5525 case DYNAMIC_MASKING
:
5526 /* Memory destinations allow only merging masking. */
5527 if (i
.mask
->zeroing
&& i
.mem_operands
)
5529 /* Find memory operand. */
5530 for (op
= 0; op
< i
.operands
; op
++)
5531 if (i
.flags
[op
] & Operand_Mem
)
5533 gas_assert (op
< i
.operands
);
5534 if (op
== i
.operands
- 1)
5536 i
.error
= unsupported_masking
;
5546 /* Check if masking is applied to dest operand. */
5547 if (i
.mask
&& (i
.mask
->operand
!= (int) (i
.operands
- 1)))
5549 i
.error
= mask_not_on_destination
;
5556 if (!t
->opcode_modifier
.sae
5557 || (i
.rounding
->type
!= saeonly
&& !t
->opcode_modifier
.staticrounding
))
5559 i
.error
= unsupported_rc_sae
;
5562 /* If the instruction has several immediate operands and one of
5563 them is rounding, the rounding operand should be the last
5564 immediate operand. */
5565 if (i
.imm_operands
> 1
5566 && i
.rounding
->operand
!= (int) (i
.imm_operands
- 1))
5568 i
.error
= rc_sae_operand_not_last_imm
;
5573 /* Check vector Disp8 operand. */
5574 if (t
->opcode_modifier
.disp8memshift
5575 && i
.disp_encoding
!= disp_encoding_32bit
)
5578 i
.memshift
= t
->opcode_modifier
.broadcast
- 1;
5579 else if (t
->opcode_modifier
.disp8memshift
!= DISP8_SHIFT_VL
)
5580 i
.memshift
= t
->opcode_modifier
.disp8memshift
;
5583 const i386_operand_type
*type
= NULL
;
5586 for (op
= 0; op
< i
.operands
; op
++)
5587 if (i
.flags
[op
] & Operand_Mem
)
5589 if (t
->opcode_modifier
.evex
== EVEXLIG
)
5590 i
.memshift
= 2 + (i
.suffix
== QWORD_MNEM_SUFFIX
);
5591 else if (t
->operand_types
[op
].bitfield
.xmmword
5592 + t
->operand_types
[op
].bitfield
.ymmword
5593 + t
->operand_types
[op
].bitfield
.zmmword
<= 1)
5594 type
= &t
->operand_types
[op
];
5595 else if (!i
.types
[op
].bitfield
.unspecified
)
5596 type
= &i
.types
[op
];
5598 else if (i
.types
[op
].bitfield
.regsimd
5599 && t
->opcode_modifier
.evex
!= EVEXLIG
)
5601 if (i
.types
[op
].bitfield
.zmmword
)
5603 else if (i
.types
[op
].bitfield
.ymmword
&& i
.memshift
< 5)
5605 else if (i
.types
[op
].bitfield
.xmmword
&& i
.memshift
< 4)
5611 if (type
->bitfield
.zmmword
)
5613 else if (type
->bitfield
.ymmword
)
5615 else if (type
->bitfield
.xmmword
)
5619 /* For the check in fits_in_disp8(). */
5620 if (i
.memshift
== 0)
5624 for (op
= 0; op
< i
.operands
; op
++)
5625 if (operand_type_check (i
.types
[op
], disp
)
5626 && i
.op
[op
].disps
->X_op
== O_constant
)
5628 if (fits_in_disp8 (i
.op
[op
].disps
->X_add_number
))
5630 i
.types
[op
].bitfield
.disp8
= 1;
5633 i
.types
[op
].bitfield
.disp8
= 0;
5642 /* Check if operands are valid for the instruction. Update VEX
5646 VEX_check_operands (const insn_template
*t
)
5648 if (i
.vec_encoding
== vex_encoding_evex
)
5650 /* This instruction must be encoded with EVEX prefix. */
5651 if (!is_evex_encoding (t
))
5653 i
.error
= unsupported
;
5659 if (!t
->opcode_modifier
.vex
)
5661 /* This instruction template doesn't have VEX prefix. */
5662 if (i
.vec_encoding
!= vex_encoding_default
)
5664 i
.error
= unsupported
;
5670 /* Check the special Imm4 cases; must be the first operand. */
5671 if (t
->cpu_flags
.bitfield
.cpuxop
&& t
->operands
== 5)
5673 if (i
.op
[0].imms
->X_op
!= O_constant
5674 || !fits_in_imm4 (i
.op
[0].imms
->X_add_number
))
5680 /* Turn off Imm<N> so that update_imm won't complain. */
5681 operand_type_set (&i
.types
[0], 0);
5687 static const insn_template
*
5688 match_template (char mnem_suffix
)
5690 /* Points to template once we've found it. */
5691 const insn_template
*t
;
5692 i386_operand_type overlap0
, overlap1
, overlap2
, overlap3
;
5693 i386_operand_type overlap4
;
5694 unsigned int found_reverse_match
;
5695 i386_opcode_modifier suffix_check
, mnemsuf_check
;
5696 i386_operand_type operand_types
[MAX_OPERANDS
];
5697 int addr_prefix_disp
;
5699 unsigned int found_cpu_match
, size_match
;
5700 unsigned int check_register
;
5701 enum i386_error specific_error
= 0;
5703 #if MAX_OPERANDS != 5
5704 # error "MAX_OPERANDS must be 5."
5707 found_reverse_match
= 0;
5708 addr_prefix_disp
= -1;
5710 memset (&suffix_check
, 0, sizeof (suffix_check
));
5711 if (intel_syntax
&& i
.broadcast
)
5713 else if (i
.suffix
== BYTE_MNEM_SUFFIX
)
5714 suffix_check
.no_bsuf
= 1;
5715 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
5716 suffix_check
.no_wsuf
= 1;
5717 else if (i
.suffix
== SHORT_MNEM_SUFFIX
)
5718 suffix_check
.no_ssuf
= 1;
5719 else if (i
.suffix
== LONG_MNEM_SUFFIX
)
5720 suffix_check
.no_lsuf
= 1;
5721 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
5722 suffix_check
.no_qsuf
= 1;
5723 else if (i
.suffix
== LONG_DOUBLE_MNEM_SUFFIX
)
5724 suffix_check
.no_ldsuf
= 1;
5726 memset (&mnemsuf_check
, 0, sizeof (mnemsuf_check
));
5729 switch (mnem_suffix
)
5731 case BYTE_MNEM_SUFFIX
: mnemsuf_check
.no_bsuf
= 1; break;
5732 case WORD_MNEM_SUFFIX
: mnemsuf_check
.no_wsuf
= 1; break;
5733 case SHORT_MNEM_SUFFIX
: mnemsuf_check
.no_ssuf
= 1; break;
5734 case LONG_MNEM_SUFFIX
: mnemsuf_check
.no_lsuf
= 1; break;
5735 case QWORD_MNEM_SUFFIX
: mnemsuf_check
.no_qsuf
= 1; break;
5739 /* Must have right number of operands. */
5740 i
.error
= number_of_operands_mismatch
;
5742 for (t
= current_templates
->start
; t
< current_templates
->end
; t
++)
5744 addr_prefix_disp
= -1;
5745 found_reverse_match
= 0;
5747 if (i
.operands
!= t
->operands
)
5750 /* Check processor support. */
5751 i
.error
= unsupported
;
5752 found_cpu_match
= (cpu_flags_match (t
)
5753 == CPU_FLAGS_PERFECT_MATCH
);
5754 if (!found_cpu_match
)
5757 /* Check AT&T mnemonic. */
5758 i
.error
= unsupported_with_intel_mnemonic
;
5759 if (intel_mnemonic
&& t
->opcode_modifier
.attmnemonic
)
5762 /* Check AT&T/Intel syntax and Intel64/AMD64 ISA. */
5763 i
.error
= unsupported_syntax
;
5764 if ((intel_syntax
&& t
->opcode_modifier
.attsyntax
)
5765 || (!intel_syntax
&& t
->opcode_modifier
.intelsyntax
)
5766 || (intel64
&& t
->opcode_modifier
.amd64
)
5767 || (!intel64
&& t
->opcode_modifier
.intel64
))
5770 /* Check the suffix, except for some instructions in intel mode. */
5771 i
.error
= invalid_instruction_suffix
;
5772 if ((!intel_syntax
|| !t
->opcode_modifier
.ignoresize
)
5773 && ((t
->opcode_modifier
.no_bsuf
&& suffix_check
.no_bsuf
)
5774 || (t
->opcode_modifier
.no_wsuf
&& suffix_check
.no_wsuf
)
5775 || (t
->opcode_modifier
.no_lsuf
&& suffix_check
.no_lsuf
)
5776 || (t
->opcode_modifier
.no_ssuf
&& suffix_check
.no_ssuf
)
5777 || (t
->opcode_modifier
.no_qsuf
&& suffix_check
.no_qsuf
)
5778 || (t
->opcode_modifier
.no_ldsuf
&& suffix_check
.no_ldsuf
)))
5780 /* In Intel mode all mnemonic suffixes must be explicitly allowed. */
5781 if ((t
->opcode_modifier
.no_bsuf
&& mnemsuf_check
.no_bsuf
)
5782 || (t
->opcode_modifier
.no_wsuf
&& mnemsuf_check
.no_wsuf
)
5783 || (t
->opcode_modifier
.no_lsuf
&& mnemsuf_check
.no_lsuf
)
5784 || (t
->opcode_modifier
.no_ssuf
&& mnemsuf_check
.no_ssuf
)
5785 || (t
->opcode_modifier
.no_qsuf
&& mnemsuf_check
.no_qsuf
)
5786 || (t
->opcode_modifier
.no_ldsuf
&& mnemsuf_check
.no_ldsuf
))
5789 size_match
= operand_size_match (t
);
5793 for (j
= 0; j
< MAX_OPERANDS
; j
++)
5794 operand_types
[j
] = t
->operand_types
[j
];
5796 /* In general, don't allow 64-bit operands in 32-bit mode. */
5797 if (i
.suffix
== QWORD_MNEM_SUFFIX
5798 && flag_code
!= CODE_64BIT
5800 ? (!t
->opcode_modifier
.ignoresize
5801 && !t
->opcode_modifier
.broadcast
5802 && !intel_float_operand (t
->name
))
5803 : intel_float_operand (t
->name
) != 2)
5804 && ((!operand_types
[0].bitfield
.regmmx
5805 && !operand_types
[0].bitfield
.regsimd
)
5806 || (!operand_types
[t
->operands
> 1].bitfield
.regmmx
5807 && !operand_types
[t
->operands
> 1].bitfield
.regsimd
))
5808 && (t
->base_opcode
!= 0x0fc7
5809 || t
->extension_opcode
!= 1 /* cmpxchg8b */))
5812 /* In general, don't allow 32-bit operands on pre-386. */
5813 else if (i
.suffix
== LONG_MNEM_SUFFIX
5814 && !cpu_arch_flags
.bitfield
.cpui386
5816 ? (!t
->opcode_modifier
.ignoresize
5817 && !intel_float_operand (t
->name
))
5818 : intel_float_operand (t
->name
) != 2)
5819 && ((!operand_types
[0].bitfield
.regmmx
5820 && !operand_types
[0].bitfield
.regsimd
)
5821 || (!operand_types
[t
->operands
> 1].bitfield
.regmmx
5822 && !operand_types
[t
->operands
> 1].bitfield
.regsimd
)))
5825 /* Do not verify operands when there are none. */
5829 /* We've found a match; break out of loop. */
5833 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
5834 into Disp32/Disp16/Disp32 operand. */
5835 if (i
.prefix
[ADDR_PREFIX
] != 0)
5837 /* There should be only one Disp operand. */
5841 for (j
= 0; j
< MAX_OPERANDS
; j
++)
5843 if (operand_types
[j
].bitfield
.disp16
)
5845 addr_prefix_disp
= j
;
5846 operand_types
[j
].bitfield
.disp32
= 1;
5847 operand_types
[j
].bitfield
.disp16
= 0;
5853 for (j
= 0; j
< MAX_OPERANDS
; j
++)
5855 if (operand_types
[j
].bitfield
.disp32
)
5857 addr_prefix_disp
= j
;
5858 operand_types
[j
].bitfield
.disp32
= 0;
5859 operand_types
[j
].bitfield
.disp16
= 1;
5865 for (j
= 0; j
< MAX_OPERANDS
; j
++)
5867 if (operand_types
[j
].bitfield
.disp64
)
5869 addr_prefix_disp
= j
;
5870 operand_types
[j
].bitfield
.disp64
= 0;
5871 operand_types
[j
].bitfield
.disp32
= 1;
5879 /* Force 0x8b encoding for "mov foo@GOT, %eax". */
5880 if (i
.reloc
[0] == BFD_RELOC_386_GOT32
&& t
->base_opcode
== 0xa0)
5883 /* We check register size if needed. */
5884 if (t
->opcode_modifier
.checkregsize
)
5886 check_register
= (1 << t
->operands
) - 1;
5888 check_register
&= ~(1 << i
.broadcast
->operand
);
5893 overlap0
= operand_type_and (i
.types
[0], operand_types
[0]);
5894 switch (t
->operands
)
5897 if (!operand_type_match (overlap0
, i
.types
[0]))
5901 /* xchg %eax, %eax is a special case. It is an alias for nop
5902 only in 32bit mode and we can use opcode 0x90. In 64bit
5903 mode, we can't use 0x90 for xchg %eax, %eax since it should
5904 zero-extend %eax to %rax. */
5905 if (flag_code
== CODE_64BIT
5906 && t
->base_opcode
== 0x90
5907 && i
.types
[0].bitfield
.acc
&& i
.types
[0].bitfield
.dword
5908 && i
.types
[1].bitfield
.acc
&& i
.types
[1].bitfield
.dword
)
5910 /* xrelease mov %eax, <disp> is another special case. It must not
5911 match the accumulator-only encoding of mov. */
5912 if (flag_code
!= CODE_64BIT
5914 && t
->base_opcode
== 0xa0
5915 && i
.types
[0].bitfield
.acc
5916 && (i
.flags
[1] & Operand_Mem
))
5921 if (!(size_match
& MATCH_STRAIGHT
))
5923 /* Reverse direction of operands if swapping is possible in the first
5924 place (operands need to be symmetric) and
5925 - the load form is requested, and the template is a store form,
5926 - the store form is requested, and the template is a load form,
5927 - the non-default (swapped) form is requested. */
5928 overlap1
= operand_type_and (operand_types
[0], operand_types
[1]);
5929 if (t
->opcode_modifier
.d
&& i
.reg_operands
== i
.operands
5930 && !operand_type_all_zero (&overlap1
))
5931 switch (i
.dir_encoding
)
5933 case dir_encoding_load
:
5934 if (operand_type_check (operand_types
[i
.operands
- 1], anymem
)
5935 || t
->opcode_modifier
.regmem
)
5939 case dir_encoding_store
:
5940 if (!operand_type_check (operand_types
[i
.operands
- 1], anymem
)
5941 && !t
->opcode_modifier
.regmem
)
5945 case dir_encoding_swap
:
5948 case dir_encoding_default
:
5951 /* If we want store form, we skip the current load. */
5952 if ((i
.dir_encoding
== dir_encoding_store
5953 || i
.dir_encoding
== dir_encoding_swap
)
5954 && i
.mem_operands
== 0
5955 && t
->opcode_modifier
.load
)
5960 overlap1
= operand_type_and (i
.types
[1], operand_types
[1]);
5961 if (!operand_type_match (overlap0
, i
.types
[0])
5962 || !operand_type_match (overlap1
, i
.types
[1])
5963 || ((check_register
& 3) == 3
5964 && !operand_type_register_match (i
.types
[0],
5969 /* Check if other direction is valid ... */
5970 if (!t
->opcode_modifier
.d
)
5974 if (!(size_match
& MATCH_REVERSE
))
5976 /* Try reversing direction of operands. */
5977 overlap0
= operand_type_and (i
.types
[0], operand_types
[i
.operands
- 1]);
5978 overlap1
= operand_type_and (i
.types
[i
.operands
- 1], operand_types
[0]);
5979 if (!operand_type_match (overlap0
, i
.types
[0])
5980 || !operand_type_match (overlap1
, i
.types
[i
.operands
- 1])
5982 && !operand_type_register_match (i
.types
[0],
5983 operand_types
[i
.operands
- 1],
5984 i
.types
[i
.operands
- 1],
5987 /* Does not match either direction. */
5990 /* found_reverse_match holds which of D or FloatR
5992 if (!t
->opcode_modifier
.d
)
5993 found_reverse_match
= 0;
5994 else if (operand_types
[0].bitfield
.tbyte
)
5995 found_reverse_match
= Opcode_FloatD
;
5996 else if (operand_types
[0].bitfield
.xmmword
5997 || operand_types
[i
.operands
- 1].bitfield
.xmmword
5998 || operand_types
[0].bitfield
.regmmx
5999 || operand_types
[i
.operands
- 1].bitfield
.regmmx
6000 || is_any_vex_encoding(t
))
6001 found_reverse_match
= (t
->base_opcode
& 0xee) != 0x6e
6002 ? Opcode_SIMD_FloatD
: Opcode_SIMD_IntD
;
6004 found_reverse_match
= Opcode_D
;
6005 if (t
->opcode_modifier
.floatr
)
6006 found_reverse_match
|= Opcode_FloatR
;
6010 /* Found a forward 2 operand match here. */
6011 switch (t
->operands
)
6014 overlap4
= operand_type_and (i
.types
[4],
6018 overlap3
= operand_type_and (i
.types
[3],
6022 overlap2
= operand_type_and (i
.types
[2],
6027 switch (t
->operands
)
6030 if (!operand_type_match (overlap4
, i
.types
[4])
6031 || !operand_type_register_match (i
.types
[3],
6038 if (!operand_type_match (overlap3
, i
.types
[3])
6039 || ((check_register
& 0xa) == 0xa
6040 && !operand_type_register_match (i
.types
[1],
6044 || ((check_register
& 0xc) == 0xc
6045 && !operand_type_register_match (i
.types
[2],
6052 /* Here we make use of the fact that there are no
6053 reverse match 3 operand instructions. */
6054 if (!operand_type_match (overlap2
, i
.types
[2])
6055 || ((check_register
& 5) == 5
6056 && !operand_type_register_match (i
.types
[0],
6060 || ((check_register
& 6) == 6
6061 && !operand_type_register_match (i
.types
[1],
6069 /* Found either forward/reverse 2, 3 or 4 operand match here:
6070 slip through to break. */
6072 if (!found_cpu_match
)
6075 /* Check if vector and VEX operands are valid. */
6076 if (check_VecOperands (t
) || VEX_check_operands (t
))
6078 specific_error
= i
.error
;
6082 /* We've found a match; break out of loop. */
6086 if (t
== current_templates
->end
)
6088 /* We found no match. */
6089 const char *err_msg
;
6090 switch (specific_error
? specific_error
: i
.error
)
6094 case operand_size_mismatch
:
6095 err_msg
= _("operand size mismatch");
6097 case operand_type_mismatch
:
6098 err_msg
= _("operand type mismatch");
6100 case register_type_mismatch
:
6101 err_msg
= _("register type mismatch");
6103 case number_of_operands_mismatch
:
6104 err_msg
= _("number of operands mismatch");
6106 case invalid_instruction_suffix
:
6107 err_msg
= _("invalid instruction suffix");
6110 err_msg
= _("constant doesn't fit in 4 bits");
6112 case unsupported_with_intel_mnemonic
:
6113 err_msg
= _("unsupported with Intel mnemonic");
6115 case unsupported_syntax
:
6116 err_msg
= _("unsupported syntax");
6119 as_bad (_("unsupported instruction `%s'"),
6120 current_templates
->start
->name
);
6122 case invalid_vsib_address
:
6123 err_msg
= _("invalid VSIB address");
6125 case invalid_vector_register_set
:
6126 err_msg
= _("mask, index, and destination registers must be distinct");
6128 case unsupported_vector_index_register
:
6129 err_msg
= _("unsupported vector index register");
6131 case unsupported_broadcast
:
6132 err_msg
= _("unsupported broadcast");
6134 case broadcast_needed
:
6135 err_msg
= _("broadcast is needed for operand of such type");
6137 case unsupported_masking
:
6138 err_msg
= _("unsupported masking");
6140 case mask_not_on_destination
:
6141 err_msg
= _("mask not on destination operand");
6143 case no_default_mask
:
6144 err_msg
= _("default mask isn't allowed");
6146 case unsupported_rc_sae
:
6147 err_msg
= _("unsupported static rounding/sae");
6149 case rc_sae_operand_not_last_imm
:
6151 err_msg
= _("RC/SAE operand must precede immediate operands");
6153 err_msg
= _("RC/SAE operand must follow immediate operands");
6155 case invalid_register_operand
:
6156 err_msg
= _("invalid register operand");
6159 as_bad (_("%s for `%s'"), err_msg
,
6160 current_templates
->start
->name
);
6164 if (!quiet_warnings
)
6167 && (i
.types
[0].bitfield
.jumpabsolute
6168 != operand_types
[0].bitfield
.jumpabsolute
))
6170 as_warn (_("indirect %s without `*'"), t
->name
);
6173 if (t
->opcode_modifier
.isprefix
6174 && t
->opcode_modifier
.ignoresize
)
6176 /* Warn them that a data or address size prefix doesn't
6177 affect assembly of the next line of code. */
6178 as_warn (_("stand-alone `%s' prefix"), t
->name
);
6182 /* Copy the template we found. */
6185 if (addr_prefix_disp
!= -1)
6186 i
.tm
.operand_types
[addr_prefix_disp
]
6187 = operand_types
[addr_prefix_disp
];
6189 if (found_reverse_match
)
6191 /* If we found a reverse match we must alter the opcode direction
6192 bit and clear/flip the regmem modifier one. found_reverse_match
6193 holds bits to change (different for int & float insns). */
6195 i
.tm
.base_opcode
^= found_reverse_match
;
6197 i
.tm
.operand_types
[0] = operand_types
[i
.operands
- 1];
6198 i
.tm
.operand_types
[i
.operands
- 1] = operand_types
[0];
6200 /* Certain SIMD insns have their load forms specified in the opcode
6201 table, and hence we need to _set_ RegMem instead of clearing it.
6202 We need to avoid setting the bit though on insns like KMOVW. */
6203 i
.tm
.opcode_modifier
.regmem
6204 = i
.tm
.opcode_modifier
.modrm
&& i
.tm
.opcode_modifier
.d
6205 && i
.tm
.operands
> 2U - i
.tm
.opcode_modifier
.sse2avx
6206 && !i
.tm
.opcode_modifier
.regmem
;
6215 unsigned int mem_op
= i
.flags
[0] & Operand_Mem
? 0 : 1;
6217 if (i
.tm
.operand_types
[mem_op
].bitfield
.esseg
)
6219 if (i
.seg
[0] != NULL
&& i
.seg
[0] != &es
)
6221 as_bad (_("`%s' operand %d must use `%ses' segment"),
6223 intel_syntax
? i
.tm
.operands
- mem_op
: mem_op
+ 1,
6227 /* There's only ever one segment override allowed per instruction.
6228 This instruction possibly has a legal segment override on the
6229 second operand, so copy the segment to where non-string
6230 instructions store it, allowing common code. */
6231 i
.seg
[0] = i
.seg
[1];
6233 else if (i
.tm
.operand_types
[mem_op
+ 1].bitfield
.esseg
)
6235 if (i
.seg
[1] != NULL
&& i
.seg
[1] != &es
)
6237 as_bad (_("`%s' operand %d must use `%ses' segment"),
6239 intel_syntax
? i
.tm
.operands
- mem_op
- 1 : mem_op
+ 2,
6248 process_suffix (void)
6250 /* If matched instruction specifies an explicit instruction mnemonic
6252 if (i
.tm
.opcode_modifier
.size
== SIZE16
)
6253 i
.suffix
= WORD_MNEM_SUFFIX
;
6254 else if (i
.tm
.opcode_modifier
.size
== SIZE32
)
6255 i
.suffix
= LONG_MNEM_SUFFIX
;
6256 else if (i
.tm
.opcode_modifier
.size
== SIZE64
)
6257 i
.suffix
= QWORD_MNEM_SUFFIX
;
6258 else if (i
.reg_operands
)
6260 /* If there's no instruction mnemonic suffix we try to invent one
6261 based on register operands. */
6264 /* We take i.suffix from the last register operand specified,
6265 Destination register type is more significant than source
6266 register type. crc32 in SSE4.2 prefers source register
6268 if (i
.tm
.base_opcode
== 0xf20f38f0
6269 && i
.types
[0].bitfield
.class == Reg
)
6271 if (i
.types
[0].bitfield
.byte
)
6272 i
.suffix
= BYTE_MNEM_SUFFIX
;
6273 else if (i
.types
[0].bitfield
.word
)
6274 i
.suffix
= WORD_MNEM_SUFFIX
;
6275 else if (i
.types
[0].bitfield
.dword
)
6276 i
.suffix
= LONG_MNEM_SUFFIX
;
6277 else if (i
.types
[0].bitfield
.qword
)
6278 i
.suffix
= QWORD_MNEM_SUFFIX
;
6285 if (i
.tm
.base_opcode
== 0xf20f38f0)
6287 /* We have to know the operand size for crc32. */
6288 as_bad (_("ambiguous memory operand size for `%s`"),
6293 for (op
= i
.operands
; --op
>= 0;)
6294 if (!i
.tm
.operand_types
[op
].bitfield
.inoutportreg
6295 && !i
.tm
.operand_types
[op
].bitfield
.shiftcount
)
6297 if (i
.types
[op
].bitfield
.class != Reg
)
6299 if (i
.types
[op
].bitfield
.byte
)
6300 i
.suffix
= BYTE_MNEM_SUFFIX
;
6301 else if (i
.types
[op
].bitfield
.word
)
6302 i
.suffix
= WORD_MNEM_SUFFIX
;
6303 else if (i
.types
[op
].bitfield
.dword
)
6304 i
.suffix
= LONG_MNEM_SUFFIX
;
6305 else if (i
.types
[op
].bitfield
.qword
)
6306 i
.suffix
= QWORD_MNEM_SUFFIX
;
6313 else if (i
.suffix
== BYTE_MNEM_SUFFIX
)
6316 && i
.tm
.opcode_modifier
.ignoresize
6317 && i
.tm
.opcode_modifier
.no_bsuf
)
6319 else if (!check_byte_reg ())
6322 else if (i
.suffix
== LONG_MNEM_SUFFIX
)
6325 && i
.tm
.opcode_modifier
.ignoresize
6326 && i
.tm
.opcode_modifier
.no_lsuf
6327 && !i
.tm
.opcode_modifier
.todword
6328 && !i
.tm
.opcode_modifier
.toqword
)
6330 else if (!check_long_reg ())
6333 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
6336 && i
.tm
.opcode_modifier
.ignoresize
6337 && i
.tm
.opcode_modifier
.no_qsuf
6338 && !i
.tm
.opcode_modifier
.todword
6339 && !i
.tm
.opcode_modifier
.toqword
)
6341 else if (!check_qword_reg ())
6344 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
6347 && i
.tm
.opcode_modifier
.ignoresize
6348 && i
.tm
.opcode_modifier
.no_wsuf
)
6350 else if (!check_word_reg ())
6353 else if (intel_syntax
&& i
.tm
.opcode_modifier
.ignoresize
)
6354 /* Do nothing if the instruction is going to ignore the prefix. */
6359 else if (i
.tm
.opcode_modifier
.defaultsize
6361 /* exclude fldenv/frstor/fsave/fstenv */
6362 && i
.tm
.opcode_modifier
.no_ssuf
)
6364 if (stackop_size
== LONG_MNEM_SUFFIX
6365 && i
.tm
.base_opcode
== 0xcf)
6367 /* stackop_size is set to LONG_MNEM_SUFFIX for the
6368 .code16gcc directive to support 16-bit mode with
6369 32-bit address. For IRET without a suffix, generate
6370 16-bit IRET (opcode 0xcf) to return from an interrupt
6372 i
.suffix
= WORD_MNEM_SUFFIX
;
6373 as_warn (_("generating 16-bit `iret' for .code16gcc directive"));
6376 i
.suffix
= stackop_size
;
6378 else if (intel_syntax
6380 && (i
.tm
.operand_types
[0].bitfield
.jumpabsolute
6381 || i
.tm
.opcode_modifier
.jumpbyte
6382 || i
.tm
.opcode_modifier
.jumpintersegment
6383 || (i
.tm
.base_opcode
== 0x0f01 /* [ls][gi]dt */
6384 && i
.tm
.extension_opcode
<= 3)))
6389 if (!i
.tm
.opcode_modifier
.no_qsuf
)
6391 i
.suffix
= QWORD_MNEM_SUFFIX
;
6396 if (!i
.tm
.opcode_modifier
.no_lsuf
)
6397 i
.suffix
= LONG_MNEM_SUFFIX
;
6400 if (!i
.tm
.opcode_modifier
.no_wsuf
)
6401 i
.suffix
= WORD_MNEM_SUFFIX
;
6410 if (i
.tm
.opcode_modifier
.w
)
6412 as_bad (_("no instruction mnemonic suffix given and "
6413 "no register operands; can't size instruction"));
6419 unsigned int suffixes
;
6421 suffixes
= !i
.tm
.opcode_modifier
.no_bsuf
;
6422 if (!i
.tm
.opcode_modifier
.no_wsuf
)
6424 if (!i
.tm
.opcode_modifier
.no_lsuf
)
6426 if (!i
.tm
.opcode_modifier
.no_ldsuf
)
6428 if (!i
.tm
.opcode_modifier
.no_ssuf
)
6430 if (flag_code
== CODE_64BIT
&& !i
.tm
.opcode_modifier
.no_qsuf
)
6433 /* There are more than suffix matches. */
6434 if (i
.tm
.opcode_modifier
.w
6435 || ((suffixes
& (suffixes
- 1))
6436 && !i
.tm
.opcode_modifier
.defaultsize
6437 && !i
.tm
.opcode_modifier
.ignoresize
))
6439 as_bad (_("ambiguous operand size for `%s'"), i
.tm
.name
);
6445 /* Change the opcode based on the operand size given by i.suffix. */
6448 /* Size floating point instruction. */
6449 case LONG_MNEM_SUFFIX
:
6450 if (i
.tm
.opcode_modifier
.floatmf
)
6452 i
.tm
.base_opcode
^= 4;
6456 case WORD_MNEM_SUFFIX
:
6457 case QWORD_MNEM_SUFFIX
:
6458 /* It's not a byte, select word/dword operation. */
6459 if (i
.tm
.opcode_modifier
.w
)
6461 if (i
.tm
.opcode_modifier
.shortform
)
6462 i
.tm
.base_opcode
|= 8;
6464 i
.tm
.base_opcode
|= 1;
6467 case SHORT_MNEM_SUFFIX
:
6468 /* Now select between word & dword operations via the operand
6469 size prefix, except for instructions that will ignore this
6471 if (i
.reg_operands
> 0
6472 && i
.types
[0].bitfield
.class == Reg
6473 && i
.tm
.opcode_modifier
.addrprefixopreg
6474 && (i
.tm
.opcode_modifier
.immext
6475 || i
.operands
== 1))
6477 /* The address size override prefix changes the size of the
6479 if ((flag_code
== CODE_32BIT
6480 && i
.op
[0].regs
->reg_type
.bitfield
.word
)
6481 || (flag_code
!= CODE_32BIT
6482 && i
.op
[0].regs
->reg_type
.bitfield
.dword
))
6483 if (!add_prefix (ADDR_PREFIX_OPCODE
))
6486 else if (i
.suffix
!= QWORD_MNEM_SUFFIX
6487 && !i
.tm
.opcode_modifier
.ignoresize
6488 && !i
.tm
.opcode_modifier
.floatmf
6489 && !is_any_vex_encoding (&i
.tm
)
6490 && ((i
.suffix
== LONG_MNEM_SUFFIX
) == (flag_code
== CODE_16BIT
)
6491 || (flag_code
== CODE_64BIT
6492 && i
.tm
.opcode_modifier
.jumpbyte
)))
6494 unsigned int prefix
= DATA_PREFIX_OPCODE
;
6496 if (i
.tm
.opcode_modifier
.jumpbyte
) /* jcxz, loop */
6497 prefix
= ADDR_PREFIX_OPCODE
;
6499 if (!add_prefix (prefix
))
6503 /* Set mode64 for an operand. */
6504 if (i
.suffix
== QWORD_MNEM_SUFFIX
6505 && flag_code
== CODE_64BIT
6506 && !i
.tm
.opcode_modifier
.norex64
6507 /* Special case for xchg %rax,%rax. It is NOP and doesn't
6509 && ! (i
.operands
== 2
6510 && i
.tm
.base_opcode
== 0x90
6511 && i
.tm
.extension_opcode
== None
6512 && i
.types
[0].bitfield
.acc
&& i
.types
[0].bitfield
.qword
6513 && i
.types
[1].bitfield
.acc
&& i
.types
[1].bitfield
.qword
))
6519 if (i
.reg_operands
!= 0
6521 && i
.tm
.opcode_modifier
.addrprefixopreg
6522 && !i
.tm
.opcode_modifier
.immext
)
6524 /* Check invalid register operand when the address size override
6525 prefix changes the size of register operands. */
6527 enum { need_word
, need_dword
, need_qword
} need
;
6529 if (flag_code
== CODE_32BIT
)
6530 need
= i
.prefix
[ADDR_PREFIX
] ? need_word
: need_dword
;
6533 if (i
.prefix
[ADDR_PREFIX
])
6536 need
= flag_code
== CODE_64BIT
? need_qword
: need_word
;
6539 for (op
= 0; op
< i
.operands
; op
++)
6540 if (i
.types
[op
].bitfield
.class == Reg
6541 && ((need
== need_word
6542 && !i
.op
[op
].regs
->reg_type
.bitfield
.word
)
6543 || (need
== need_dword
6544 && !i
.op
[op
].regs
->reg_type
.bitfield
.dword
)
6545 || (need
== need_qword
6546 && !i
.op
[op
].regs
->reg_type
.bitfield
.qword
)))
6548 as_bad (_("invalid register operand size for `%s'"),
6558 check_byte_reg (void)
6562 for (op
= i
.operands
; --op
>= 0;)
6564 /* Skip non-register operands. */
6565 if (i
.types
[op
].bitfield
.class != Reg
)
6568 /* If this is an eight bit register, it's OK. If it's the 16 or
6569 32 bit version of an eight bit register, we will just use the
6570 low portion, and that's OK too. */
6571 if (i
.types
[op
].bitfield
.byte
)
6574 /* I/O port address operands are OK too. */
6575 if (i
.tm
.operand_types
[op
].bitfield
.inoutportreg
)
6578 /* crc32 doesn't generate this warning. */
6579 if (i
.tm
.base_opcode
== 0xf20f38f0)
6582 if ((i
.types
[op
].bitfield
.word
6583 || i
.types
[op
].bitfield
.dword
6584 || i
.types
[op
].bitfield
.qword
)
6585 && i
.op
[op
].regs
->reg_num
< 4
6586 /* Prohibit these changes in 64bit mode, since the lowering
6587 would be more complicated. */
6588 && flag_code
!= CODE_64BIT
)
6590 #if REGISTER_WARNINGS
6591 if (!quiet_warnings
)
6592 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
6594 (i
.op
[op
].regs
+ (i
.types
[op
].bitfield
.word
6595 ? REGNAM_AL
- REGNAM_AX
6596 : REGNAM_AL
- REGNAM_EAX
))->reg_name
,
6598 i
.op
[op
].regs
->reg_name
,
6603 /* Any other register is bad. */
6604 if (i
.types
[op
].bitfield
.class == Reg
6605 || i
.types
[op
].bitfield
.regmmx
6606 || i
.types
[op
].bitfield
.regsimd
6607 || i
.types
[op
].bitfield
.class == SReg
6608 || i
.types
[op
].bitfield
.class == RegCR
6609 || i
.types
[op
].bitfield
.class == RegDR
6610 || i
.types
[op
].bitfield
.class == RegTR
)
6612 as_bad (_("`%s%s' not allowed with `%s%c'"),
6614 i
.op
[op
].regs
->reg_name
,
6624 check_long_reg (void)
6628 for (op
= i
.operands
; --op
>= 0;)
6629 /* Skip non-register operands. */
6630 if (i
.types
[op
].bitfield
.class != Reg
)
6632 /* Reject eight bit registers, except where the template requires
6633 them. (eg. movzb) */
6634 else if (i
.types
[op
].bitfield
.byte
6635 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
6636 || i
.tm
.operand_types
[op
].bitfield
.acc
)
6637 && (i
.tm
.operand_types
[op
].bitfield
.word
6638 || i
.tm
.operand_types
[op
].bitfield
.dword
))
6640 as_bad (_("`%s%s' not allowed with `%s%c'"),
6642 i
.op
[op
].regs
->reg_name
,
6647 /* Warn if the e prefix on a general reg is missing. */
6648 else if ((!quiet_warnings
|| flag_code
== CODE_64BIT
)
6649 && i
.types
[op
].bitfield
.word
6650 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
6651 || i
.tm
.operand_types
[op
].bitfield
.acc
)
6652 && i
.tm
.operand_types
[op
].bitfield
.dword
)
6654 /* Prohibit these changes in the 64bit mode, since the
6655 lowering is more complicated. */
6656 if (flag_code
== CODE_64BIT
)
6658 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
6659 register_prefix
, i
.op
[op
].regs
->reg_name
,
6663 #if REGISTER_WARNINGS
6664 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
6666 (i
.op
[op
].regs
+ REGNAM_EAX
- REGNAM_AX
)->reg_name
,
6667 register_prefix
, i
.op
[op
].regs
->reg_name
, i
.suffix
);
6670 /* Warn if the r prefix on a general reg is present. */
6671 else if (i
.types
[op
].bitfield
.qword
6672 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
6673 || i
.tm
.operand_types
[op
].bitfield
.acc
)
6674 && i
.tm
.operand_types
[op
].bitfield
.dword
)
6677 && i
.tm
.opcode_modifier
.toqword
6678 && !i
.types
[0].bitfield
.regsimd
)
6680 /* Convert to QWORD. We want REX byte. */
6681 i
.suffix
= QWORD_MNEM_SUFFIX
;
6685 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
6686 register_prefix
, i
.op
[op
].regs
->reg_name
,
6695 check_qword_reg (void)
6699 for (op
= i
.operands
; --op
>= 0; )
6700 /* Skip non-register operands. */
6701 if (i
.types
[op
].bitfield
.class != Reg
)
6703 /* Reject eight bit registers, except where the template requires
6704 them. (eg. movzb) */
6705 else if (i
.types
[op
].bitfield
.byte
6706 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
6707 || i
.tm
.operand_types
[op
].bitfield
.acc
)
6708 && (i
.tm
.operand_types
[op
].bitfield
.word
6709 || i
.tm
.operand_types
[op
].bitfield
.dword
))
6711 as_bad (_("`%s%s' not allowed with `%s%c'"),
6713 i
.op
[op
].regs
->reg_name
,
6718 /* Warn if the r prefix on a general reg is missing. */
6719 else if ((i
.types
[op
].bitfield
.word
6720 || i
.types
[op
].bitfield
.dword
)
6721 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
6722 || i
.tm
.operand_types
[op
].bitfield
.acc
)
6723 && i
.tm
.operand_types
[op
].bitfield
.qword
)
6725 /* Prohibit these changes in the 64bit mode, since the
6726 lowering is more complicated. */
6728 && i
.tm
.opcode_modifier
.todword
6729 && !i
.types
[0].bitfield
.regsimd
)
6731 /* Convert to DWORD. We don't want REX byte. */
6732 i
.suffix
= LONG_MNEM_SUFFIX
;
6736 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
6737 register_prefix
, i
.op
[op
].regs
->reg_name
,
6746 check_word_reg (void)
6749 for (op
= i
.operands
; --op
>= 0;)
6750 /* Skip non-register operands. */
6751 if (i
.types
[op
].bitfield
.class != Reg
)
6753 /* Reject eight bit registers, except where the template requires
6754 them. (eg. movzb) */
6755 else if (i
.types
[op
].bitfield
.byte
6756 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
6757 || i
.tm
.operand_types
[op
].bitfield
.acc
)
6758 && (i
.tm
.operand_types
[op
].bitfield
.word
6759 || i
.tm
.operand_types
[op
].bitfield
.dword
))
6761 as_bad (_("`%s%s' not allowed with `%s%c'"),
6763 i
.op
[op
].regs
->reg_name
,
6768 /* Warn if the e or r prefix on a general reg is present. */
6769 else if ((!quiet_warnings
|| flag_code
== CODE_64BIT
)
6770 && (i
.types
[op
].bitfield
.dword
6771 || i
.types
[op
].bitfield
.qword
)
6772 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
6773 || i
.tm
.operand_types
[op
].bitfield
.acc
)
6774 && i
.tm
.operand_types
[op
].bitfield
.word
)
6776 /* Prohibit these changes in the 64bit mode, since the
6777 lowering is more complicated. */
6778 if (flag_code
== CODE_64BIT
)
6780 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
6781 register_prefix
, i
.op
[op
].regs
->reg_name
,
6785 #if REGISTER_WARNINGS
6786 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
6788 (i
.op
[op
].regs
+ REGNAM_AX
- REGNAM_EAX
)->reg_name
,
6789 register_prefix
, i
.op
[op
].regs
->reg_name
, i
.suffix
);
6796 update_imm (unsigned int j
)
6798 i386_operand_type overlap
= i
.types
[j
];
6799 if ((overlap
.bitfield
.imm8
6800 || overlap
.bitfield
.imm8s
6801 || overlap
.bitfield
.imm16
6802 || overlap
.bitfield
.imm32
6803 || overlap
.bitfield
.imm32s
6804 || overlap
.bitfield
.imm64
)
6805 && !operand_type_equal (&overlap
, &imm8
)
6806 && !operand_type_equal (&overlap
, &imm8s
)
6807 && !operand_type_equal (&overlap
, &imm16
)
6808 && !operand_type_equal (&overlap
, &imm32
)
6809 && !operand_type_equal (&overlap
, &imm32s
)
6810 && !operand_type_equal (&overlap
, &imm64
))
6814 i386_operand_type temp
;
6816 operand_type_set (&temp
, 0);
6817 if (i
.suffix
== BYTE_MNEM_SUFFIX
)
6819 temp
.bitfield
.imm8
= overlap
.bitfield
.imm8
;
6820 temp
.bitfield
.imm8s
= overlap
.bitfield
.imm8s
;
6822 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
6823 temp
.bitfield
.imm16
= overlap
.bitfield
.imm16
;
6824 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
6826 temp
.bitfield
.imm64
= overlap
.bitfield
.imm64
;
6827 temp
.bitfield
.imm32s
= overlap
.bitfield
.imm32s
;
6830 temp
.bitfield
.imm32
= overlap
.bitfield
.imm32
;
6833 else if (operand_type_equal (&overlap
, &imm16_32_32s
)
6834 || operand_type_equal (&overlap
, &imm16_32
)
6835 || operand_type_equal (&overlap
, &imm16_32s
))
6837 if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
6842 if (!operand_type_equal (&overlap
, &imm8
)
6843 && !operand_type_equal (&overlap
, &imm8s
)
6844 && !operand_type_equal (&overlap
, &imm16
)
6845 && !operand_type_equal (&overlap
, &imm32
)
6846 && !operand_type_equal (&overlap
, &imm32s
)
6847 && !operand_type_equal (&overlap
, &imm64
))
6849 as_bad (_("no instruction mnemonic suffix given; "
6850 "can't determine immediate size"));
6854 i
.types
[j
] = overlap
;
6864 /* Update the first 2 immediate operands. */
6865 n
= i
.operands
> 2 ? 2 : i
.operands
;
6868 for (j
= 0; j
< n
; j
++)
6869 if (update_imm (j
) == 0)
6872 /* The 3rd operand can't be immediate operand. */
6873 gas_assert (operand_type_check (i
.types
[2], imm
) == 0);
6880 process_operands (void)
6882 /* Default segment register this instruction will use for memory
6883 accesses. 0 means unknown. This is only for optimizing out
6884 unnecessary segment overrides. */
6885 const seg_entry
*default_seg
= 0;
6887 if (i
.tm
.opcode_modifier
.sse2avx
&& i
.tm
.opcode_modifier
.vexvvvv
)
6889 unsigned int dupl
= i
.operands
;
6890 unsigned int dest
= dupl
- 1;
6893 /* The destination must be an xmm register. */
6894 gas_assert (i
.reg_operands
6895 && MAX_OPERANDS
> dupl
6896 && operand_type_equal (&i
.types
[dest
], ®xmm
));
6898 if (i
.tm
.operand_types
[0].bitfield
.acc
6899 && i
.tm
.operand_types
[0].bitfield
.xmmword
)
6901 if (i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
)
6903 /* Keep xmm0 for instructions with VEX prefix and 3
6905 i
.tm
.operand_types
[0].bitfield
.acc
= 0;
6906 i
.tm
.operand_types
[0].bitfield
.regsimd
= 1;
6911 /* We remove the first xmm0 and keep the number of
6912 operands unchanged, which in fact duplicates the
6914 for (j
= 1; j
< i
.operands
; j
++)
6916 i
.op
[j
- 1] = i
.op
[j
];
6917 i
.types
[j
- 1] = i
.types
[j
];
6918 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
6919 i
.flags
[j
- 1] = i
.flags
[j
];
6923 else if (i
.tm
.opcode_modifier
.implicit1stxmm0
)
6925 gas_assert ((MAX_OPERANDS
- 1) > dupl
6926 && (i
.tm
.opcode_modifier
.vexsources
6929 /* Add the implicit xmm0 for instructions with VEX prefix
6931 for (j
= i
.operands
; j
> 0; j
--)
6933 i
.op
[j
] = i
.op
[j
- 1];
6934 i
.types
[j
] = i
.types
[j
- 1];
6935 i
.tm
.operand_types
[j
] = i
.tm
.operand_types
[j
- 1];
6936 i
.flags
[j
] = i
.flags
[j
- 1];
6939 = (const reg_entry
*) hash_find (reg_hash
, "xmm0");
6940 i
.types
[0] = regxmm
;
6941 i
.tm
.operand_types
[0] = regxmm
;
6944 i
.reg_operands
+= 2;
6949 i
.op
[dupl
] = i
.op
[dest
];
6950 i
.types
[dupl
] = i
.types
[dest
];
6951 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
6952 i
.flags
[dupl
] = i
.flags
[dest
];
6961 i
.op
[dupl
] = i
.op
[dest
];
6962 i
.types
[dupl
] = i
.types
[dest
];
6963 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
6964 i
.flags
[dupl
] = i
.flags
[dest
];
6967 if (i
.tm
.opcode_modifier
.immext
)
6970 else if (i
.tm
.operand_types
[0].bitfield
.acc
6971 && i
.tm
.operand_types
[0].bitfield
.xmmword
)
6975 for (j
= 1; j
< i
.operands
; j
++)
6977 i
.op
[j
- 1] = i
.op
[j
];
6978 i
.types
[j
- 1] = i
.types
[j
];
6980 /* We need to adjust fields in i.tm since they are used by
6981 build_modrm_byte. */
6982 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
6984 i
.flags
[j
- 1] = i
.flags
[j
];
6991 else if (i
.tm
.opcode_modifier
.implicitquadgroup
)
6993 unsigned int regnum
, first_reg_in_group
, last_reg_in_group
;
6995 /* The second operand must be {x,y,z}mmN, where N is a multiple of 4. */
6996 gas_assert (i
.operands
>= 2 && i
.types
[1].bitfield
.regsimd
);
6997 regnum
= register_number (i
.op
[1].regs
);
6998 first_reg_in_group
= regnum
& ~3;
6999 last_reg_in_group
= first_reg_in_group
+ 3;
7000 if (regnum
!= first_reg_in_group
)
7001 as_warn (_("source register `%s%s' implicitly denotes"
7002 " `%s%.3s%u' to `%s%.3s%u' source group in `%s'"),
7003 register_prefix
, i
.op
[1].regs
->reg_name
,
7004 register_prefix
, i
.op
[1].regs
->reg_name
, first_reg_in_group
,
7005 register_prefix
, i
.op
[1].regs
->reg_name
, last_reg_in_group
,
7008 else if (i
.tm
.opcode_modifier
.regkludge
)
7010 /* The imul $imm, %reg instruction is converted into
7011 imul $imm, %reg, %reg, and the clr %reg instruction
7012 is converted into xor %reg, %reg. */
7014 unsigned int first_reg_op
;
7016 if (operand_type_check (i
.types
[0], reg
))
7020 /* Pretend we saw the extra register operand. */
7021 gas_assert (i
.reg_operands
== 1
7022 && i
.op
[first_reg_op
+ 1].regs
== 0);
7023 i
.op
[first_reg_op
+ 1].regs
= i
.op
[first_reg_op
].regs
;
7024 i
.types
[first_reg_op
+ 1] = i
.types
[first_reg_op
];
7029 if (i
.tm
.opcode_modifier
.modrm
)
7031 /* The opcode is completed (modulo i.tm.extension_opcode which
7032 must be put into the modrm byte). Now, we make the modrm and
7033 index base bytes based on all the info we've collected. */
7035 default_seg
= build_modrm_byte ();
7037 else if (i
.types
[0].bitfield
.class == SReg
)
7039 if (flag_code
!= CODE_64BIT
7040 ? i
.tm
.base_opcode
== POP_SEG_SHORT
7041 && i
.op
[0].regs
->reg_num
== 1
7042 : (i
.tm
.base_opcode
| 1) == POP_SEG386_SHORT
7043 && i
.op
[0].regs
->reg_num
< 4)
7045 as_bad (_("you can't `%s %s%s'"),
7046 i
.tm
.name
, register_prefix
, i
.op
[0].regs
->reg_name
);
7049 if ( i
.op
[0].regs
->reg_num
> 3 && i
.tm
.opcode_length
== 1 )
7051 i
.tm
.base_opcode
^= POP_SEG_SHORT
^ POP_SEG386_SHORT
;
7052 i
.tm
.opcode_length
= 2;
7054 i
.tm
.base_opcode
|= (i
.op
[0].regs
->reg_num
<< 3);
7056 else if ((i
.tm
.base_opcode
& ~0x3) == MOV_AX_DISP32
)
7060 else if (i
.tm
.opcode_modifier
.isstring
)
7062 /* For the string instructions that allow a segment override
7063 on one of their operands, the default segment is ds. */
7066 else if (i
.tm
.opcode_modifier
.shortform
)
7068 /* The register or float register operand is in operand
7070 unsigned int op
= i
.tm
.operand_types
[0].bitfield
.class != Reg
;
7072 /* Register goes in low 3 bits of opcode. */
7073 i
.tm
.base_opcode
|= i
.op
[op
].regs
->reg_num
;
7074 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
7076 if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
7078 /* Warn about some common errors, but press on regardless.
7079 The first case can be generated by gcc (<= 2.8.1). */
7080 if (i
.operands
== 2)
7082 /* Reversed arguments on faddp, fsubp, etc. */
7083 as_warn (_("translating to `%s %s%s,%s%s'"), i
.tm
.name
,
7084 register_prefix
, i
.op
[!intel_syntax
].regs
->reg_name
,
7085 register_prefix
, i
.op
[intel_syntax
].regs
->reg_name
);
7089 /* Extraneous `l' suffix on fp insn. */
7090 as_warn (_("translating to `%s %s%s'"), i
.tm
.name
,
7091 register_prefix
, i
.op
[0].regs
->reg_name
);
7096 if (i
.tm
.base_opcode
== 0x8d /* lea */
7099 as_warn (_("segment override on `%s' is ineffectual"), i
.tm
.name
);
7101 /* If a segment was explicitly specified, and the specified segment
7102 is not the default, use an opcode prefix to select it. If we
7103 never figured out what the default segment is, then default_seg
7104 will be zero at this point, and the specified segment prefix will
7106 if ((i
.seg
[0]) && (i
.seg
[0] != default_seg
))
7108 if (!add_prefix (i
.seg
[0]->seg_prefix
))
7114 static const seg_entry
*
7115 build_modrm_byte (void)
7117 const seg_entry
*default_seg
= 0;
7118 unsigned int source
, dest
;
7121 vex_3_sources
= i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
;
7124 unsigned int nds
, reg_slot
;
7127 dest
= i
.operands
- 1;
7130 /* There are 2 kinds of instructions:
7131 1. 5 operands: 4 register operands or 3 register operands
7132 plus 1 memory operand plus one Imm4 operand, VexXDS, and
7133 VexW0 or VexW1. The destination must be either XMM, YMM or
7135 2. 4 operands: 4 register operands or 3 register operands
7136 plus 1 memory operand, with VexXDS. */
7137 gas_assert ((i
.reg_operands
== 4
7138 || (i
.reg_operands
== 3 && i
.mem_operands
== 1))
7139 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
7140 && i
.tm
.opcode_modifier
.vexw
7141 && i
.tm
.operand_types
[dest
].bitfield
.regsimd
);
7143 /* If VexW1 is set, the first non-immediate operand is the source and
7144 the second non-immediate one is encoded in the immediate operand. */
7145 if (i
.tm
.opcode_modifier
.vexw
== VEXW1
)
7147 source
= i
.imm_operands
;
7148 reg_slot
= i
.imm_operands
+ 1;
7152 source
= i
.imm_operands
+ 1;
7153 reg_slot
= i
.imm_operands
;
7156 if (i
.imm_operands
== 0)
7158 /* When there is no immediate operand, generate an 8bit
7159 immediate operand to encode the first operand. */
7160 exp
= &im_expressions
[i
.imm_operands
++];
7161 i
.op
[i
.operands
].imms
= exp
;
7162 i
.types
[i
.operands
] = imm8
;
7165 gas_assert (i
.tm
.operand_types
[reg_slot
].bitfield
.regsimd
);
7166 exp
->X_op
= O_constant
;
7167 exp
->X_add_number
= register_number (i
.op
[reg_slot
].regs
) << 4;
7168 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
7172 gas_assert (i
.imm_operands
== 1);
7173 gas_assert (fits_in_imm4 (i
.op
[0].imms
->X_add_number
));
7174 gas_assert (!i
.tm
.opcode_modifier
.immext
);
7176 /* Turn on Imm8 again so that output_imm will generate it. */
7177 i
.types
[0].bitfield
.imm8
= 1;
7179 gas_assert (i
.tm
.operand_types
[reg_slot
].bitfield
.regsimd
);
7180 i
.op
[0].imms
->X_add_number
7181 |= register_number (i
.op
[reg_slot
].regs
) << 4;
7182 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
7185 gas_assert (i
.tm
.operand_types
[nds
].bitfield
.regsimd
);
7186 i
.vex
.register_specifier
= i
.op
[nds
].regs
;
7191 /* i.reg_operands MUST be the number of real register operands;
7192 implicit registers do not count. If there are 3 register
7193 operands, it must be a instruction with VexNDS. For a
7194 instruction with VexNDD, the destination register is encoded
7195 in VEX prefix. If there are 4 register operands, it must be
7196 a instruction with VEX prefix and 3 sources. */
7197 if (i
.mem_operands
== 0
7198 && ((i
.reg_operands
== 2
7199 && i
.tm
.opcode_modifier
.vexvvvv
<= VEXXDS
)
7200 || (i
.reg_operands
== 3
7201 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
7202 || (i
.reg_operands
== 4 && vex_3_sources
)))
7210 /* When there are 3 operands, one of them may be immediate,
7211 which may be the first or the last operand. Otherwise,
7212 the first operand must be shift count register (cl) or it
7213 is an instruction with VexNDS. */
7214 gas_assert (i
.imm_operands
== 1
7215 || (i
.imm_operands
== 0
7216 && (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
7217 || i
.types
[0].bitfield
.shiftcount
)));
7218 if (operand_type_check (i
.types
[0], imm
)
7219 || i
.types
[0].bitfield
.shiftcount
)
7225 /* When there are 4 operands, the first two must be 8bit
7226 immediate operands. The source operand will be the 3rd
7229 For instructions with VexNDS, if the first operand
7230 an imm8, the source operand is the 2nd one. If the last
7231 operand is imm8, the source operand is the first one. */
7232 gas_assert ((i
.imm_operands
== 2
7233 && i
.types
[0].bitfield
.imm8
7234 && i
.types
[1].bitfield
.imm8
)
7235 || (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
7236 && i
.imm_operands
== 1
7237 && (i
.types
[0].bitfield
.imm8
7238 || i
.types
[i
.operands
- 1].bitfield
.imm8
7240 if (i
.imm_operands
== 2)
7244 if (i
.types
[0].bitfield
.imm8
)
7251 if (is_evex_encoding (&i
.tm
))
7253 /* For EVEX instructions, when there are 5 operands, the
7254 first one must be immediate operand. If the second one
7255 is immediate operand, the source operand is the 3th
7256 one. If the last one is immediate operand, the source
7257 operand is the 2nd one. */
7258 gas_assert (i
.imm_operands
== 2
7259 && i
.tm
.opcode_modifier
.sae
7260 && operand_type_check (i
.types
[0], imm
));
7261 if (operand_type_check (i
.types
[1], imm
))
7263 else if (operand_type_check (i
.types
[4], imm
))
7277 /* RC/SAE operand could be between DEST and SRC. That happens
7278 when one operand is GPR and the other one is XMM/YMM/ZMM
7280 if (i
.rounding
&& i
.rounding
->operand
== (int) dest
)
7283 if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
7285 /* For instructions with VexNDS, the register-only source
7286 operand must be a 32/64bit integer, XMM, YMM, ZMM, or mask
7287 register. It is encoded in VEX prefix. */
7289 i386_operand_type op
;
7292 /* Check register-only source operand when two source
7293 operands are swapped. */
7294 if (!i
.tm
.operand_types
[source
].bitfield
.baseindex
7295 && i
.tm
.operand_types
[dest
].bitfield
.baseindex
)
7303 op
= i
.tm
.operand_types
[vvvv
];
7304 if ((dest
+ 1) >= i
.operands
7305 || ((op
.bitfield
.class != Reg
7306 || (!op
.bitfield
.dword
&& !op
.bitfield
.qword
))
7307 && !op
.bitfield
.regsimd
7308 && !operand_type_equal (&op
, ®mask
)))
7310 i
.vex
.register_specifier
= i
.op
[vvvv
].regs
;
7316 /* One of the register operands will be encoded in the i.rm.reg
7317 field, the other in the combined i.rm.mode and i.rm.regmem
7318 fields. If no form of this instruction supports a memory
7319 destination operand, then we assume the source operand may
7320 sometimes be a memory operand and so we need to store the
7321 destination in the i.rm.reg field. */
7322 if (!i
.tm
.opcode_modifier
.regmem
7323 && operand_type_check (i
.tm
.operand_types
[dest
], anymem
) == 0)
7325 i
.rm
.reg
= i
.op
[dest
].regs
->reg_num
;
7326 i
.rm
.regmem
= i
.op
[source
].regs
->reg_num
;
7327 if (i
.op
[dest
].regs
->reg_type
.bitfield
.regmmx
7328 || i
.op
[source
].regs
->reg_type
.bitfield
.regmmx
)
7329 i
.has_regmmx
= TRUE
;
7330 else if (i
.op
[dest
].regs
->reg_type
.bitfield
.regsimd
7331 || i
.op
[source
].regs
->reg_type
.bitfield
.regsimd
)
7333 if (i
.types
[dest
].bitfield
.zmmword
7334 || i
.types
[source
].bitfield
.zmmword
)
7335 i
.has_regzmm
= TRUE
;
7336 else if (i
.types
[dest
].bitfield
.ymmword
7337 || i
.types
[source
].bitfield
.ymmword
)
7338 i
.has_regymm
= TRUE
;
7340 i
.has_regxmm
= TRUE
;
7342 if ((i
.op
[dest
].regs
->reg_flags
& RegRex
) != 0)
7344 if ((i
.op
[dest
].regs
->reg_flags
& RegVRex
) != 0)
7346 if ((i
.op
[source
].regs
->reg_flags
& RegRex
) != 0)
7348 if ((i
.op
[source
].regs
->reg_flags
& RegVRex
) != 0)
7353 i
.rm
.reg
= i
.op
[source
].regs
->reg_num
;
7354 i
.rm
.regmem
= i
.op
[dest
].regs
->reg_num
;
7355 if ((i
.op
[dest
].regs
->reg_flags
& RegRex
) != 0)
7357 if ((i
.op
[dest
].regs
->reg_flags
& RegVRex
) != 0)
7359 if ((i
.op
[source
].regs
->reg_flags
& RegRex
) != 0)
7361 if ((i
.op
[source
].regs
->reg_flags
& RegVRex
) != 0)
7364 if (flag_code
!= CODE_64BIT
&& (i
.rex
& REX_R
))
7366 if (i
.types
[!i
.tm
.opcode_modifier
.regmem
].bitfield
.class != RegCR
)
7369 add_prefix (LOCK_PREFIX_OPCODE
);
7373 { /* If it's not 2 reg operands... */
7378 unsigned int fake_zero_displacement
= 0;
7381 for (op
= 0; op
< i
.operands
; op
++)
7382 if (i
.flags
[op
] & Operand_Mem
)
7384 gas_assert (op
< i
.operands
);
7386 if (i
.tm
.opcode_modifier
.vecsib
)
7388 if (i
.index_reg
->reg_num
== RegIZ
)
7391 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
7394 i
.sib
.base
= NO_BASE_REGISTER
;
7395 i
.sib
.scale
= i
.log2_scale_factor
;
7396 i
.types
[op
].bitfield
.disp8
= 0;
7397 i
.types
[op
].bitfield
.disp16
= 0;
7398 i
.types
[op
].bitfield
.disp64
= 0;
7399 if (flag_code
!= CODE_64BIT
|| i
.prefix
[ADDR_PREFIX
])
7401 /* Must be 32 bit */
7402 i
.types
[op
].bitfield
.disp32
= 1;
7403 i
.types
[op
].bitfield
.disp32s
= 0;
7407 i
.types
[op
].bitfield
.disp32
= 0;
7408 i
.types
[op
].bitfield
.disp32s
= 1;
7411 i
.sib
.index
= i
.index_reg
->reg_num
;
7412 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
7414 if ((i
.index_reg
->reg_flags
& RegVRex
) != 0)
7420 if (i
.base_reg
== 0)
7423 if (!i
.disp_operands
)
7424 fake_zero_displacement
= 1;
7425 if (i
.index_reg
== 0)
7427 i386_operand_type newdisp
;
7429 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
7430 /* Operand is just <disp> */
7431 if (flag_code
== CODE_64BIT
)
7433 /* 64bit mode overwrites the 32bit absolute
7434 addressing by RIP relative addressing and
7435 absolute addressing is encoded by one of the
7436 redundant SIB forms. */
7437 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
7438 i
.sib
.base
= NO_BASE_REGISTER
;
7439 i
.sib
.index
= NO_INDEX_REGISTER
;
7440 newdisp
= (!i
.prefix
[ADDR_PREFIX
] ? disp32s
: disp32
);
7442 else if ((flag_code
== CODE_16BIT
)
7443 ^ (i
.prefix
[ADDR_PREFIX
] != 0))
7445 i
.rm
.regmem
= NO_BASE_REGISTER_16
;
7450 i
.rm
.regmem
= NO_BASE_REGISTER
;
7453 i
.types
[op
] = operand_type_and_not (i
.types
[op
], anydisp
);
7454 i
.types
[op
] = operand_type_or (i
.types
[op
], newdisp
);
7456 else if (!i
.tm
.opcode_modifier
.vecsib
)
7458 /* !i.base_reg && i.index_reg */
7459 if (i
.index_reg
->reg_num
== RegIZ
)
7460 i
.sib
.index
= NO_INDEX_REGISTER
;
7462 i
.sib
.index
= i
.index_reg
->reg_num
;
7463 i
.sib
.base
= NO_BASE_REGISTER
;
7464 i
.sib
.scale
= i
.log2_scale_factor
;
7465 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
7466 i
.types
[op
].bitfield
.disp8
= 0;
7467 i
.types
[op
].bitfield
.disp16
= 0;
7468 i
.types
[op
].bitfield
.disp64
= 0;
7469 if (flag_code
!= CODE_64BIT
|| i
.prefix
[ADDR_PREFIX
])
7471 /* Must be 32 bit */
7472 i
.types
[op
].bitfield
.disp32
= 1;
7473 i
.types
[op
].bitfield
.disp32s
= 0;
7477 i
.types
[op
].bitfield
.disp32
= 0;
7478 i
.types
[op
].bitfield
.disp32s
= 1;
7480 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
7484 /* RIP addressing for 64bit mode. */
7485 else if (i
.base_reg
->reg_num
== RegIP
)
7487 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
7488 i
.rm
.regmem
= NO_BASE_REGISTER
;
7489 i
.types
[op
].bitfield
.disp8
= 0;
7490 i
.types
[op
].bitfield
.disp16
= 0;
7491 i
.types
[op
].bitfield
.disp32
= 0;
7492 i
.types
[op
].bitfield
.disp32s
= 1;
7493 i
.types
[op
].bitfield
.disp64
= 0;
7494 i
.flags
[op
] |= Operand_PCrel
;
7495 if (! i
.disp_operands
)
7496 fake_zero_displacement
= 1;
7498 else if (i
.base_reg
->reg_type
.bitfield
.word
)
7500 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
7501 switch (i
.base_reg
->reg_num
)
7504 if (i
.index_reg
== 0)
7506 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
7507 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6;
7511 if (i
.index_reg
== 0)
7514 if (operand_type_check (i
.types
[op
], disp
) == 0)
7516 /* fake (%bp) into 0(%bp) */
7517 i
.types
[op
].bitfield
.disp8
= 1;
7518 fake_zero_displacement
= 1;
7521 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
7522 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6 + 2;
7524 default: /* (%si) -> 4 or (%di) -> 5 */
7525 i
.rm
.regmem
= i
.base_reg
->reg_num
- 6 + 4;
7527 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
7529 else /* i.base_reg and 32/64 bit mode */
7531 if (flag_code
== CODE_64BIT
7532 && operand_type_check (i
.types
[op
], disp
))
7534 i
.types
[op
].bitfield
.disp16
= 0;
7535 i
.types
[op
].bitfield
.disp64
= 0;
7536 if (i
.prefix
[ADDR_PREFIX
] == 0)
7538 i
.types
[op
].bitfield
.disp32
= 0;
7539 i
.types
[op
].bitfield
.disp32s
= 1;
7543 i
.types
[op
].bitfield
.disp32
= 1;
7544 i
.types
[op
].bitfield
.disp32s
= 0;
7548 if (!i
.tm
.opcode_modifier
.vecsib
)
7549 i
.rm
.regmem
= i
.base_reg
->reg_num
;
7550 if ((i
.base_reg
->reg_flags
& RegRex
) != 0)
7552 i
.sib
.base
= i
.base_reg
->reg_num
;
7553 /* x86-64 ignores REX prefix bit here to avoid decoder
7555 if (!(i
.base_reg
->reg_flags
& RegRex
)
7556 && (i
.base_reg
->reg_num
== EBP_REG_NUM
7557 || i
.base_reg
->reg_num
== ESP_REG_NUM
))
7559 if (i
.base_reg
->reg_num
== 5 && i
.disp_operands
== 0)
7561 fake_zero_displacement
= 1;
7562 i
.types
[op
].bitfield
.disp8
= 1;
7564 i
.sib
.scale
= i
.log2_scale_factor
;
7565 if (i
.index_reg
== 0)
7567 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
7568 /* <disp>(%esp) becomes two byte modrm with no index
7569 register. We've already stored the code for esp
7570 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
7571 Any base register besides %esp will not use the
7572 extra modrm byte. */
7573 i
.sib
.index
= NO_INDEX_REGISTER
;
7575 else if (!i
.tm
.opcode_modifier
.vecsib
)
7577 if (i
.index_reg
->reg_num
== RegIZ
)
7578 i
.sib
.index
= NO_INDEX_REGISTER
;
7580 i
.sib
.index
= i
.index_reg
->reg_num
;
7581 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
7582 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
7587 && (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
7588 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
))
7592 if (!fake_zero_displacement
7596 fake_zero_displacement
= 1;
7597 if (i
.disp_encoding
== disp_encoding_8bit
)
7598 i
.types
[op
].bitfield
.disp8
= 1;
7600 i
.types
[op
].bitfield
.disp32
= 1;
7602 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
7606 if (fake_zero_displacement
)
7608 /* Fakes a zero displacement assuming that i.types[op]
7609 holds the correct displacement size. */
7612 gas_assert (i
.op
[op
].disps
== 0);
7613 exp
= &disp_expressions
[i
.disp_operands
++];
7614 i
.op
[op
].disps
= exp
;
7615 exp
->X_op
= O_constant
;
7616 exp
->X_add_number
= 0;
7617 exp
->X_add_symbol
= (symbolS
*) 0;
7618 exp
->X_op_symbol
= (symbolS
*) 0;
7626 if (i
.tm
.opcode_modifier
.vexsources
== XOP2SOURCES
)
7628 if (operand_type_check (i
.types
[0], imm
))
7629 i
.vex
.register_specifier
= NULL
;
7632 /* VEX.vvvv encodes one of the sources when the first
7633 operand is not an immediate. */
7634 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
7635 i
.vex
.register_specifier
= i
.op
[0].regs
;
7637 i
.vex
.register_specifier
= i
.op
[1].regs
;
7640 /* Destination is a XMM register encoded in the ModRM.reg
7642 i
.rm
.reg
= i
.op
[2].regs
->reg_num
;
7643 if ((i
.op
[2].regs
->reg_flags
& RegRex
) != 0)
7646 /* ModRM.rm and VEX.B encodes the other source. */
7647 if (!i
.mem_operands
)
7651 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
7652 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
7654 i
.rm
.regmem
= i
.op
[0].regs
->reg_num
;
7656 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
7660 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXLWP
)
7662 i
.vex
.register_specifier
= i
.op
[2].regs
;
7663 if (!i
.mem_operands
)
7666 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
7667 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
7671 /* Fill in i.rm.reg or i.rm.regmem field with register operand
7672 (if any) based on i.tm.extension_opcode. Again, we must be
7673 careful to make sure that segment/control/debug/test/MMX
7674 registers are coded into the i.rm.reg field. */
7675 else if (i
.reg_operands
)
7678 unsigned int vex_reg
= ~0;
7680 for (op
= 0; op
< i
.operands
; op
++)
7682 if (i
.types
[op
].bitfield
.class == Reg
7683 || i
.types
[op
].bitfield
.regbnd
7684 || i
.types
[op
].bitfield
.regmask
7685 || i
.types
[op
].bitfield
.class == SReg
7686 || i
.types
[op
].bitfield
.class == RegCR
7687 || i
.types
[op
].bitfield
.class == RegDR
7688 || i
.types
[op
].bitfield
.class == RegTR
)
7690 if (i
.types
[op
].bitfield
.regsimd
)
7692 if (i
.types
[op
].bitfield
.zmmword
)
7693 i
.has_regzmm
= TRUE
;
7694 else if (i
.types
[op
].bitfield
.ymmword
)
7695 i
.has_regymm
= TRUE
;
7697 i
.has_regxmm
= TRUE
;
7700 if (i
.types
[op
].bitfield
.regmmx
)
7702 i
.has_regmmx
= TRUE
;
7709 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
7711 /* For instructions with VexNDS, the register-only
7712 source operand is encoded in VEX prefix. */
7713 gas_assert (mem
!= (unsigned int) ~0);
7718 gas_assert (op
< i
.operands
);
7722 /* Check register-only source operand when two source
7723 operands are swapped. */
7724 if (!i
.tm
.operand_types
[op
].bitfield
.baseindex
7725 && i
.tm
.operand_types
[op
+ 1].bitfield
.baseindex
)
7729 gas_assert (mem
== (vex_reg
+ 1)
7730 && op
< i
.operands
);
7735 gas_assert (vex_reg
< i
.operands
);
7739 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXNDD
)
7741 /* For instructions with VexNDD, the register destination
7742 is encoded in VEX prefix. */
7743 if (i
.mem_operands
== 0)
7745 /* There is no memory operand. */
7746 gas_assert ((op
+ 2) == i
.operands
);
7751 /* There are only 2 non-immediate operands. */
7752 gas_assert (op
< i
.imm_operands
+ 2
7753 && i
.operands
== i
.imm_operands
+ 2);
7754 vex_reg
= i
.imm_operands
+ 1;
7758 gas_assert (op
< i
.operands
);
7760 if (vex_reg
!= (unsigned int) ~0)
7762 i386_operand_type
*type
= &i
.tm
.operand_types
[vex_reg
];
7764 if ((type
->bitfield
.class != Reg
7765 || (!type
->bitfield
.dword
&& !type
->bitfield
.qword
))
7766 && !type
->bitfield
.regsimd
7767 && !operand_type_equal (type
, ®mask
))
7770 i
.vex
.register_specifier
= i
.op
[vex_reg
].regs
;
7773 /* Don't set OP operand twice. */
7776 /* If there is an extension opcode to put here, the
7777 register number must be put into the regmem field. */
7778 if (i
.tm
.extension_opcode
!= None
)
7780 i
.rm
.regmem
= i
.op
[op
].regs
->reg_num
;
7781 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
7783 if ((i
.op
[op
].regs
->reg_flags
& RegVRex
) != 0)
7788 i
.rm
.reg
= i
.op
[op
].regs
->reg_num
;
7789 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
7791 if ((i
.op
[op
].regs
->reg_flags
& RegVRex
) != 0)
7796 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
7797 must set it to 3 to indicate this is a register operand
7798 in the regmem field. */
7799 if (!i
.mem_operands
)
7803 /* Fill in i.rm.reg field with extension opcode (if any). */
7804 if (i
.tm
.extension_opcode
!= None
)
7805 i
.rm
.reg
= i
.tm
.extension_opcode
;
7811 output_branch (void)
7817 relax_substateT subtype
;
7821 code16
= flag_code
== CODE_16BIT
? CODE16
: 0;
7822 size
= i
.disp_encoding
== disp_encoding_32bit
? BIG
: SMALL
;
7825 if (i
.prefix
[DATA_PREFIX
] != 0)
7831 /* Pentium4 branch hints. */
7832 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
7833 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
7838 if (i
.prefix
[REX_PREFIX
] != 0)
7844 /* BND prefixed jump. */
7845 if (i
.prefix
[BND_PREFIX
] != 0)
7847 FRAG_APPEND_1_CHAR (i
.prefix
[BND_PREFIX
]);
7851 if (i
.prefixes
!= 0 && !intel_syntax
)
7852 as_warn (_("skipping prefixes on this instruction"));
7854 /* It's always a symbol; End frag & setup for relax.
7855 Make sure there is enough room in this frag for the largest
7856 instruction we may generate in md_convert_frag. This is 2
7857 bytes for the opcode and room for the prefix and largest
7859 frag_grow (prefix
+ 2 + 4);
7860 /* Prefix and 1 opcode byte go in fr_fix. */
7861 p
= frag_more (prefix
+ 1);
7862 if (i
.prefix
[DATA_PREFIX
] != 0)
7863 *p
++ = DATA_PREFIX_OPCODE
;
7864 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
7865 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
)
7866 *p
++ = i
.prefix
[SEG_PREFIX
];
7867 if (i
.prefix
[REX_PREFIX
] != 0)
7868 *p
++ = i
.prefix
[REX_PREFIX
];
7869 *p
= i
.tm
.base_opcode
;
7871 if ((unsigned char) *p
== JUMP_PC_RELATIVE
)
7872 subtype
= ENCODE_RELAX_STATE (UNCOND_JUMP
, size
);
7873 else if (cpu_arch_flags
.bitfield
.cpui386
)
7874 subtype
= ENCODE_RELAX_STATE (COND_JUMP
, size
);
7876 subtype
= ENCODE_RELAX_STATE (COND_JUMP86
, size
);
7879 sym
= i
.op
[0].disps
->X_add_symbol
;
7880 off
= i
.op
[0].disps
->X_add_number
;
7882 if (i
.op
[0].disps
->X_op
!= O_constant
7883 && i
.op
[0].disps
->X_op
!= O_symbol
)
7885 /* Handle complex expressions. */
7886 sym
= make_expr_symbol (i
.op
[0].disps
);
7890 /* 1 possible extra opcode + 4 byte displacement go in var part.
7891 Pass reloc in fr_var. */
7892 frag_var (rs_machine_dependent
, 5, i
.reloc
[0], subtype
, sym
, off
, p
);
7895 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7896 /* Return TRUE iff PLT32 relocation should be used for branching to
7900 need_plt32_p (symbolS
*s
)
7902 /* PLT32 relocation is ELF only. */
7907 /* Don't emit PLT32 relocation on Solaris: neither native linker nor
7908 krtld support it. */
7912 /* Since there is no need to prepare for PLT branch on x86-64, we
7913 can generate R_X86_64_PLT32, instead of R_X86_64_PC32, which can
7914 be used as a marker for 32-bit PC-relative branches. */
7918 /* Weak or undefined symbol need PLT32 relocation. */
7919 if (S_IS_WEAK (s
) || !S_IS_DEFINED (s
))
7922 /* Non-global symbol doesn't need PLT32 relocation. */
7923 if (! S_IS_EXTERNAL (s
))
7926 /* Other global symbols need PLT32 relocation. NB: Symbol with
7927 non-default visibilities are treated as normal global symbol
7928 so that PLT32 relocation can be used as a marker for 32-bit
7929 PC-relative branches. It is useful for linker relaxation. */
7940 bfd_reloc_code_real_type jump_reloc
= i
.reloc
[0];
7942 if (i
.tm
.opcode_modifier
.jumpbyte
)
7944 /* This is a loop or jecxz type instruction. */
7946 if (i
.prefix
[ADDR_PREFIX
] != 0)
7948 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE
);
7951 /* Pentium4 branch hints. */
7952 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
7953 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
7955 FRAG_APPEND_1_CHAR (i
.prefix
[SEG_PREFIX
]);
7964 if (flag_code
== CODE_16BIT
)
7967 if (i
.prefix
[DATA_PREFIX
] != 0)
7969 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE
);
7979 if (i
.prefix
[REX_PREFIX
] != 0)
7981 FRAG_APPEND_1_CHAR (i
.prefix
[REX_PREFIX
]);
7985 /* BND prefixed jump. */
7986 if (i
.prefix
[BND_PREFIX
] != 0)
7988 FRAG_APPEND_1_CHAR (i
.prefix
[BND_PREFIX
]);
7992 if (i
.prefixes
!= 0 && !intel_syntax
)
7993 as_warn (_("skipping prefixes on this instruction"));
7995 p
= frag_more (i
.tm
.opcode_length
+ size
);
7996 switch (i
.tm
.opcode_length
)
7999 *p
++ = i
.tm
.base_opcode
>> 8;
8002 *p
++ = i
.tm
.base_opcode
;
8008 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8010 && jump_reloc
== NO_RELOC
8011 && need_plt32_p (i
.op
[0].disps
->X_add_symbol
))
8012 jump_reloc
= BFD_RELOC_X86_64_PLT32
;
8015 jump_reloc
= reloc (size
, 1, 1, jump_reloc
);
8017 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
8018 i
.op
[0].disps
, 1, jump_reloc
);
8020 /* All jumps handled here are signed, but don't use a signed limit
8021 check for 32 and 16 bit jumps as we want to allow wrap around at
8022 4G and 64k respectively. */
8024 fixP
->fx_signed
= 1;
8028 output_interseg_jump (void)
8036 if (flag_code
== CODE_16BIT
)
8040 if (i
.prefix
[DATA_PREFIX
] != 0)
8046 if (i
.prefix
[REX_PREFIX
] != 0)
8056 if (i
.prefixes
!= 0 && !intel_syntax
)
8057 as_warn (_("skipping prefixes on this instruction"));
8059 /* 1 opcode; 2 segment; offset */
8060 p
= frag_more (prefix
+ 1 + 2 + size
);
8062 if (i
.prefix
[DATA_PREFIX
] != 0)
8063 *p
++ = DATA_PREFIX_OPCODE
;
8065 if (i
.prefix
[REX_PREFIX
] != 0)
8066 *p
++ = i
.prefix
[REX_PREFIX
];
8068 *p
++ = i
.tm
.base_opcode
;
8069 if (i
.op
[1].imms
->X_op
== O_constant
)
8071 offsetT n
= i
.op
[1].imms
->X_add_number
;
8074 && !fits_in_unsigned_word (n
)
8075 && !fits_in_signed_word (n
))
8077 as_bad (_("16-bit jump out of range"));
8080 md_number_to_chars (p
, n
, size
);
8083 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
8084 i
.op
[1].imms
, 0, reloc (size
, 0, 0, i
.reloc
[1]));
8085 if (i
.op
[0].imms
->X_op
!= O_constant
)
8086 as_bad (_("can't handle non absolute segment in `%s'"),
8088 md_number_to_chars (p
+ size
, (valueT
) i
.op
[0].imms
->X_add_number
, 2);
8091 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8096 asection
*seg
= now_seg
;
8097 subsegT subseg
= now_subseg
;
8099 unsigned int alignment
, align_size_1
;
8100 unsigned int isa_1_descsz
, feature_2_descsz
, descsz
;
8101 unsigned int isa_1_descsz_raw
, feature_2_descsz_raw
;
8102 unsigned int padding
;
8104 if (!IS_ELF
|| !x86_used_note
)
8107 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_X86
;
8109 /* The .note.gnu.property section layout:
8111 Field Length Contents
8114 n_descsz 4 The note descriptor size
8115 n_type 4 NT_GNU_PROPERTY_TYPE_0
8117 n_desc n_descsz The program property array
8121 /* Create the .note.gnu.property section. */
8122 sec
= subseg_new (NOTE_GNU_PROPERTY_SECTION_NAME
, 0);
8123 bfd_set_section_flags (sec
,
8130 if (get_elf_backend_data (stdoutput
)->s
->elfclass
== ELFCLASS64
)
8141 bfd_set_section_alignment (sec
, alignment
);
8142 elf_section_type (sec
) = SHT_NOTE
;
8144 /* GNU_PROPERTY_X86_ISA_1_USED: 4-byte type + 4-byte data size
8146 isa_1_descsz_raw
= 4 + 4 + 4;
8147 /* Align GNU_PROPERTY_X86_ISA_1_USED. */
8148 isa_1_descsz
= (isa_1_descsz_raw
+ align_size_1
) & ~align_size_1
;
8150 feature_2_descsz_raw
= isa_1_descsz
;
8151 /* GNU_PROPERTY_X86_FEATURE_2_USED: 4-byte type + 4-byte data size
8153 feature_2_descsz_raw
+= 4 + 4 + 4;
8154 /* Align GNU_PROPERTY_X86_FEATURE_2_USED. */
8155 feature_2_descsz
= ((feature_2_descsz_raw
+ align_size_1
)
8158 descsz
= feature_2_descsz
;
8159 /* Section size: n_namsz + n_descsz + n_type + n_name + n_descsz. */
8160 p
= frag_more (4 + 4 + 4 + 4 + descsz
);
8162 /* Write n_namsz. */
8163 md_number_to_chars (p
, (valueT
) 4, 4);
8165 /* Write n_descsz. */
8166 md_number_to_chars (p
+ 4, (valueT
) descsz
, 4);
8169 md_number_to_chars (p
+ 4 * 2, (valueT
) NT_GNU_PROPERTY_TYPE_0
, 4);
8172 memcpy (p
+ 4 * 3, "GNU", 4);
8174 /* Write 4-byte type. */
8175 md_number_to_chars (p
+ 4 * 4,
8176 (valueT
) GNU_PROPERTY_X86_ISA_1_USED
, 4);
8178 /* Write 4-byte data size. */
8179 md_number_to_chars (p
+ 4 * 5, (valueT
) 4, 4);
8181 /* Write 4-byte data. */
8182 md_number_to_chars (p
+ 4 * 6, (valueT
) x86_isa_1_used
, 4);
8184 /* Zero out paddings. */
8185 padding
= isa_1_descsz
- isa_1_descsz_raw
;
8187 memset (p
+ 4 * 7, 0, padding
);
8189 /* Write 4-byte type. */
8190 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 4,
8191 (valueT
) GNU_PROPERTY_X86_FEATURE_2_USED
, 4);
8193 /* Write 4-byte data size. */
8194 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 5, (valueT
) 4, 4);
8196 /* Write 4-byte data. */
8197 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 6,
8198 (valueT
) x86_feature_2_used
, 4);
8200 /* Zero out paddings. */
8201 padding
= feature_2_descsz
- feature_2_descsz_raw
;
8203 memset (p
+ isa_1_descsz
+ 4 * 7, 0, padding
);
8205 /* We probably can't restore the current segment, for there likely
8208 subseg_set (seg
, subseg
);
8213 encoding_length (const fragS
*start_frag
, offsetT start_off
,
8214 const char *frag_now_ptr
)
8216 unsigned int len
= 0;
8218 if (start_frag
!= frag_now
)
8220 const fragS
*fr
= start_frag
;
8225 } while (fr
&& fr
!= frag_now
);
8228 return len
- start_off
+ (frag_now_ptr
- frag_now
->fr_literal
);
8234 fragS
*insn_start_frag
;
8235 offsetT insn_start_off
;
8237 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8238 if (IS_ELF
&& x86_used_note
)
8240 if (i
.tm
.cpu_flags
.bitfield
.cpucmov
)
8241 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_CMOV
;
8242 if (i
.tm
.cpu_flags
.bitfield
.cpusse
)
8243 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSE
;
8244 if (i
.tm
.cpu_flags
.bitfield
.cpusse2
)
8245 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSE2
;
8246 if (i
.tm
.cpu_flags
.bitfield
.cpusse3
)
8247 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSE3
;
8248 if (i
.tm
.cpu_flags
.bitfield
.cpussse3
)
8249 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSSE3
;
8250 if (i
.tm
.cpu_flags
.bitfield
.cpusse4_1
)
8251 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSE4_1
;
8252 if (i
.tm
.cpu_flags
.bitfield
.cpusse4_2
)
8253 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSE4_2
;
8254 if (i
.tm
.cpu_flags
.bitfield
.cpuavx
)
8255 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX
;
8256 if (i
.tm
.cpu_flags
.bitfield
.cpuavx2
)
8257 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX2
;
8258 if (i
.tm
.cpu_flags
.bitfield
.cpufma
)
8259 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_FMA
;
8260 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512f
)
8261 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512F
;
8262 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512cd
)
8263 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512CD
;
8264 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512er
)
8265 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512ER
;
8266 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512pf
)
8267 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512PF
;
8268 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512vl
)
8269 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512VL
;
8270 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512dq
)
8271 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512DQ
;
8272 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512bw
)
8273 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512BW
;
8274 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_4fmaps
)
8275 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_4FMAPS
;
8276 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_4vnniw
)
8277 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_4VNNIW
;
8278 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_bitalg
)
8279 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_BITALG
;
8280 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512ifma
)
8281 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_IFMA
;
8282 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512vbmi
)
8283 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_VBMI
;
8284 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_vbmi2
)
8285 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_VBMI2
;
8286 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_vnni
)
8287 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_VNNI
;
8288 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_bf16
)
8289 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_BF16
;
8291 if (i
.tm
.cpu_flags
.bitfield
.cpu8087
8292 || i
.tm
.cpu_flags
.bitfield
.cpu287
8293 || i
.tm
.cpu_flags
.bitfield
.cpu387
8294 || i
.tm
.cpu_flags
.bitfield
.cpu687
8295 || i
.tm
.cpu_flags
.bitfield
.cpufisttp
)
8296 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_X87
;
8297 /* Don't set GNU_PROPERTY_X86_FEATURE_2_MMX for prefetchtXXX nor
8298 Xfence instructions. */
8299 if (i
.tm
.base_opcode
!= 0xf18
8300 && i
.tm
.base_opcode
!= 0xf0d
8301 && i
.tm
.base_opcode
!= 0xfaef8
8303 || i
.tm
.cpu_flags
.bitfield
.cpummx
8304 || i
.tm
.cpu_flags
.bitfield
.cpua3dnow
8305 || i
.tm
.cpu_flags
.bitfield
.cpua3dnowa
))
8306 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_MMX
;
8308 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XMM
;
8310 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_YMM
;
8312 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_ZMM
;
8313 if (i
.tm
.cpu_flags
.bitfield
.cpufxsr
)
8314 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_FXSR
;
8315 if (i
.tm
.cpu_flags
.bitfield
.cpuxsave
)
8316 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVE
;
8317 if (i
.tm
.cpu_flags
.bitfield
.cpuxsaveopt
)
8318 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVEOPT
;
8319 if (i
.tm
.cpu_flags
.bitfield
.cpuxsavec
)
8320 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVEC
;
8324 /* Tie dwarf2 debug info to the address at the start of the insn.
8325 We can't do this after the insn has been output as the current
8326 frag may have been closed off. eg. by frag_var. */
8327 dwarf2_emit_insn (0);
8329 insn_start_frag
= frag_now
;
8330 insn_start_off
= frag_now_fix ();
8333 if (i
.tm
.opcode_modifier
.jump
)
8335 else if (i
.tm
.opcode_modifier
.jumpbyte
8336 || i
.tm
.opcode_modifier
.jumpdword
)
8338 else if (i
.tm
.opcode_modifier
.jumpintersegment
)
8339 output_interseg_jump ();
8342 /* Output normal instructions here. */
8346 unsigned int prefix
;
8349 && (i
.tm
.base_opcode
== 0xfaee8
8350 || i
.tm
.base_opcode
== 0xfaef0
8351 || i
.tm
.base_opcode
== 0xfaef8))
8353 /* Encode lfence, mfence, and sfence as
8354 f0 83 04 24 00 lock addl $0x0, (%{re}sp). */
8355 offsetT val
= 0x240483f0ULL
;
8357 md_number_to_chars (p
, val
, 5);
8361 /* Some processors fail on LOCK prefix. This options makes
8362 assembler ignore LOCK prefix and serves as a workaround. */
8363 if (omit_lock_prefix
)
8365 if (i
.tm
.base_opcode
== LOCK_PREFIX_OPCODE
)
8367 i
.prefix
[LOCK_PREFIX
] = 0;
8370 /* Since the VEX/EVEX prefix contains the implicit prefix, we
8371 don't need the explicit prefix. */
8372 if (!i
.tm
.opcode_modifier
.vex
&& !i
.tm
.opcode_modifier
.evex
)
8374 switch (i
.tm
.opcode_length
)
8377 if (i
.tm
.base_opcode
& 0xff000000)
8379 prefix
= (i
.tm
.base_opcode
>> 24) & 0xff;
8380 if (!i
.tm
.cpu_flags
.bitfield
.cpupadlock
8381 || prefix
!= REPE_PREFIX_OPCODE
8382 || (i
.prefix
[REP_PREFIX
] != REPE_PREFIX_OPCODE
))
8383 add_prefix (prefix
);
8387 if ((i
.tm
.base_opcode
& 0xff0000) != 0)
8389 prefix
= (i
.tm
.base_opcode
>> 16) & 0xff;
8390 add_prefix (prefix
);
8396 /* Check for pseudo prefixes. */
8397 as_bad_where (insn_start_frag
->fr_file
,
8398 insn_start_frag
->fr_line
,
8399 _("pseudo prefix without instruction"));
8405 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8406 /* For x32, add a dummy REX_OPCODE prefix for mov/add with
8407 R_X86_64_GOTTPOFF relocation so that linker can safely
8408 perform IE->LE optimization. */
8409 if (x86_elf_abi
== X86_64_X32_ABI
8411 && i
.reloc
[0] == BFD_RELOC_X86_64_GOTTPOFF
8412 && i
.prefix
[REX_PREFIX
] == 0)
8413 add_prefix (REX_OPCODE
);
8416 /* The prefix bytes. */
8417 for (j
= ARRAY_SIZE (i
.prefix
), q
= i
.prefix
; j
> 0; j
--, q
++)
8419 FRAG_APPEND_1_CHAR (*q
);
8423 for (j
= 0, q
= i
.prefix
; j
< ARRAY_SIZE (i
.prefix
); j
++, q
++)
8428 /* REX byte is encoded in VEX prefix. */
8432 FRAG_APPEND_1_CHAR (*q
);
8435 /* There should be no other prefixes for instructions
8440 /* For EVEX instructions i.vrex should become 0 after
8441 build_evex_prefix. For VEX instructions upper 16 registers
8442 aren't available, so VREX should be 0. */
8445 /* Now the VEX prefix. */
8446 p
= frag_more (i
.vex
.length
);
8447 for (j
= 0; j
< i
.vex
.length
; j
++)
8448 p
[j
] = i
.vex
.bytes
[j
];
8451 /* Now the opcode; be careful about word order here! */
8452 if (i
.tm
.opcode_length
== 1)
8454 FRAG_APPEND_1_CHAR (i
.tm
.base_opcode
);
8458 switch (i
.tm
.opcode_length
)
8462 *p
++ = (i
.tm
.base_opcode
>> 24) & 0xff;
8463 *p
++ = (i
.tm
.base_opcode
>> 16) & 0xff;
8467 *p
++ = (i
.tm
.base_opcode
>> 16) & 0xff;
8477 /* Put out high byte first: can't use md_number_to_chars! */
8478 *p
++ = (i
.tm
.base_opcode
>> 8) & 0xff;
8479 *p
= i
.tm
.base_opcode
& 0xff;
8482 /* Now the modrm byte and sib byte (if present). */
8483 if (i
.tm
.opcode_modifier
.modrm
)
8485 FRAG_APPEND_1_CHAR ((i
.rm
.regmem
<< 0
8488 /* If i.rm.regmem == ESP (4)
8489 && i.rm.mode != (Register mode)
8491 ==> need second modrm byte. */
8492 if (i
.rm
.regmem
== ESCAPE_TO_TWO_BYTE_ADDRESSING
8494 && !(i
.base_reg
&& i
.base_reg
->reg_type
.bitfield
.word
))
8495 FRAG_APPEND_1_CHAR ((i
.sib
.base
<< 0
8497 | i
.sib
.scale
<< 6));
8500 if (i
.disp_operands
)
8501 output_disp (insn_start_frag
, insn_start_off
);
8504 output_imm (insn_start_frag
, insn_start_off
);
8507 * frag_now_fix () returning plain abs_section_offset when we're in the
8508 * absolute section, and abs_section_offset not getting updated as data
8509 * gets added to the frag breaks the logic below.
8511 if (now_seg
!= absolute_section
)
8513 j
= encoding_length (insn_start_frag
, insn_start_off
, frag_more (0));
8515 as_warn (_("instruction length of %u bytes exceeds the limit of 15"),
8523 pi ("" /*line*/, &i
);
8525 #endif /* DEBUG386 */
8528 /* Return the size of the displacement operand N. */
8531 disp_size (unsigned int n
)
8535 if (i
.types
[n
].bitfield
.disp64
)
8537 else if (i
.types
[n
].bitfield
.disp8
)
8539 else if (i
.types
[n
].bitfield
.disp16
)
8544 /* Return the size of the immediate operand N. */
8547 imm_size (unsigned int n
)
8550 if (i
.types
[n
].bitfield
.imm64
)
8552 else if (i
.types
[n
].bitfield
.imm8
|| i
.types
[n
].bitfield
.imm8s
)
8554 else if (i
.types
[n
].bitfield
.imm16
)
8560 output_disp (fragS
*insn_start_frag
, offsetT insn_start_off
)
8565 for (n
= 0; n
< i
.operands
; n
++)
8567 if (operand_type_check (i
.types
[n
], disp
))
8569 if (i
.op
[n
].disps
->X_op
== O_constant
)
8571 int size
= disp_size (n
);
8572 offsetT val
= i
.op
[n
].disps
->X_add_number
;
8574 val
= offset_in_range (val
>> (size
== 1 ? i
.memshift
: 0),
8576 p
= frag_more (size
);
8577 md_number_to_chars (p
, val
, size
);
8581 enum bfd_reloc_code_real reloc_type
;
8582 int size
= disp_size (n
);
8583 int sign
= i
.types
[n
].bitfield
.disp32s
;
8584 int pcrel
= (i
.flags
[n
] & Operand_PCrel
) != 0;
8587 /* We can't have 8 bit displacement here. */
8588 gas_assert (!i
.types
[n
].bitfield
.disp8
);
8590 /* The PC relative address is computed relative
8591 to the instruction boundary, so in case immediate
8592 fields follows, we need to adjust the value. */
8593 if (pcrel
&& i
.imm_operands
)
8598 for (n1
= 0; n1
< i
.operands
; n1
++)
8599 if (operand_type_check (i
.types
[n1
], imm
))
8601 /* Only one immediate is allowed for PC
8602 relative address. */
8603 gas_assert (sz
== 0);
8605 i
.op
[n
].disps
->X_add_number
-= sz
;
8607 /* We should find the immediate. */
8608 gas_assert (sz
!= 0);
8611 p
= frag_more (size
);
8612 reloc_type
= reloc (size
, pcrel
, sign
, i
.reloc
[n
]);
8614 && GOT_symbol
== i
.op
[n
].disps
->X_add_symbol
8615 && (((reloc_type
== BFD_RELOC_32
8616 || reloc_type
== BFD_RELOC_X86_64_32S
8617 || (reloc_type
== BFD_RELOC_64
8619 && (i
.op
[n
].disps
->X_op
== O_symbol
8620 || (i
.op
[n
].disps
->X_op
== O_add
8621 && ((symbol_get_value_expression
8622 (i
.op
[n
].disps
->X_op_symbol
)->X_op
)
8624 || reloc_type
== BFD_RELOC_32_PCREL
))
8628 reloc_type
= BFD_RELOC_386_GOTPC
;
8629 i
.op
[n
].imms
->X_add_number
+=
8630 encoding_length (insn_start_frag
, insn_start_off
, p
);
8632 else if (reloc_type
== BFD_RELOC_64
)
8633 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
8635 /* Don't do the adjustment for x86-64, as there
8636 the pcrel addressing is relative to the _next_
8637 insn, and that is taken care of in other code. */
8638 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
8640 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
,
8641 size
, i
.op
[n
].disps
, pcrel
,
8643 /* Check for "call/jmp *mem", "mov mem, %reg",
8644 "test %reg, mem" and "binop mem, %reg" where binop
8645 is one of adc, add, and, cmp, or, sbb, sub, xor
8646 instructions without data prefix. Always generate
8647 R_386_GOT32X for "sym*GOT" operand in 32-bit mode. */
8648 if (i
.prefix
[DATA_PREFIX
] == 0
8649 && (generate_relax_relocations
8652 && i
.rm
.regmem
== 5))
8654 || (i
.rm
.mode
== 0 && i
.rm
.regmem
== 5))
8655 && ((i
.operands
== 1
8656 && i
.tm
.base_opcode
== 0xff
8657 && (i
.rm
.reg
== 2 || i
.rm
.reg
== 4))
8659 && (i
.tm
.base_opcode
== 0x8b
8660 || i
.tm
.base_opcode
== 0x85
8661 || (i
.tm
.base_opcode
& 0xc7) == 0x03))))
8665 fixP
->fx_tcbit
= i
.rex
!= 0;
8667 && (i
.base_reg
->reg_num
== RegIP
))
8668 fixP
->fx_tcbit2
= 1;
8671 fixP
->fx_tcbit2
= 1;
8679 output_imm (fragS
*insn_start_frag
, offsetT insn_start_off
)
8684 for (n
= 0; n
< i
.operands
; n
++)
8686 /* Skip SAE/RC Imm operand in EVEX. They are already handled. */
8687 if (i
.rounding
&& (int) n
== i
.rounding
->operand
)
8690 if (operand_type_check (i
.types
[n
], imm
))
8692 if (i
.op
[n
].imms
->X_op
== O_constant
)
8694 int size
= imm_size (n
);
8697 val
= offset_in_range (i
.op
[n
].imms
->X_add_number
,
8699 p
= frag_more (size
);
8700 md_number_to_chars (p
, val
, size
);
8704 /* Not absolute_section.
8705 Need a 32-bit fixup (don't support 8bit
8706 non-absolute imms). Try to support other
8708 enum bfd_reloc_code_real reloc_type
;
8709 int size
= imm_size (n
);
8712 if (i
.types
[n
].bitfield
.imm32s
8713 && (i
.suffix
== QWORD_MNEM_SUFFIX
8714 || (!i
.suffix
&& i
.tm
.opcode_modifier
.no_lsuf
)))
8719 p
= frag_more (size
);
8720 reloc_type
= reloc (size
, 0, sign
, i
.reloc
[n
]);
8722 /* This is tough to explain. We end up with this one if we
8723 * have operands that look like
8724 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
8725 * obtain the absolute address of the GOT, and it is strongly
8726 * preferable from a performance point of view to avoid using
8727 * a runtime relocation for this. The actual sequence of
8728 * instructions often look something like:
8733 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
8735 * The call and pop essentially return the absolute address
8736 * of the label .L66 and store it in %ebx. The linker itself
8737 * will ultimately change the first operand of the addl so
8738 * that %ebx points to the GOT, but to keep things simple, the
8739 * .o file must have this operand set so that it generates not
8740 * the absolute address of .L66, but the absolute address of
8741 * itself. This allows the linker itself simply treat a GOTPC
8742 * relocation as asking for a pcrel offset to the GOT to be
8743 * added in, and the addend of the relocation is stored in the
8744 * operand field for the instruction itself.
8746 * Our job here is to fix the operand so that it would add
8747 * the correct offset so that %ebx would point to itself. The
8748 * thing that is tricky is that .-.L66 will point to the
8749 * beginning of the instruction, so we need to further modify
8750 * the operand so that it will point to itself. There are
8751 * other cases where you have something like:
8753 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
8755 * and here no correction would be required. Internally in
8756 * the assembler we treat operands of this form as not being
8757 * pcrel since the '.' is explicitly mentioned, and I wonder
8758 * whether it would simplify matters to do it this way. Who
8759 * knows. In earlier versions of the PIC patches, the
8760 * pcrel_adjust field was used to store the correction, but
8761 * since the expression is not pcrel, I felt it would be
8762 * confusing to do it this way. */
8764 if ((reloc_type
== BFD_RELOC_32
8765 || reloc_type
== BFD_RELOC_X86_64_32S
8766 || reloc_type
== BFD_RELOC_64
)
8768 && GOT_symbol
== i
.op
[n
].imms
->X_add_symbol
8769 && (i
.op
[n
].imms
->X_op
== O_symbol
8770 || (i
.op
[n
].imms
->X_op
== O_add
8771 && ((symbol_get_value_expression
8772 (i
.op
[n
].imms
->X_op_symbol
)->X_op
)
8776 reloc_type
= BFD_RELOC_386_GOTPC
;
8778 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
8780 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
8781 i
.op
[n
].imms
->X_add_number
+=
8782 encoding_length (insn_start_frag
, insn_start_off
, p
);
8784 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
8785 i
.op
[n
].imms
, 0, reloc_type
);
8791 /* x86_cons_fix_new is called via the expression parsing code when a
8792 reloc is needed. We use this hook to get the correct .got reloc. */
8793 static int cons_sign
= -1;
8796 x86_cons_fix_new (fragS
*frag
, unsigned int off
, unsigned int len
,
8797 expressionS
*exp
, bfd_reloc_code_real_type r
)
8799 r
= reloc (len
, 0, cons_sign
, r
);
8802 if (exp
->X_op
== O_secrel
)
8804 exp
->X_op
= O_symbol
;
8805 r
= BFD_RELOC_32_SECREL
;
8809 fix_new_exp (frag
, off
, len
, exp
, 0, r
);
8812 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
8813 purpose of the `.dc.a' internal pseudo-op. */
8816 x86_address_bytes (void)
8818 if ((stdoutput
->arch_info
->mach
& bfd_mach_x64_32
))
8820 return stdoutput
->arch_info
->bits_per_address
/ 8;
8823 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
8825 # define lex_got(reloc, adjust, types) NULL
8827 /* Parse operands of the form
8828 <symbol>@GOTOFF+<nnn>
8829 and similar .plt or .got references.
8831 If we find one, set up the correct relocation in RELOC and copy the
8832 input string, minus the `@GOTOFF' into a malloc'd buffer for
8833 parsing by the calling routine. Return this buffer, and if ADJUST
8834 is non-null set it to the length of the string we removed from the
8835 input line. Otherwise return NULL. */
8837 lex_got (enum bfd_reloc_code_real
*rel
,
8839 i386_operand_type
*types
)
8841 /* Some of the relocations depend on the size of what field is to
8842 be relocated. But in our callers i386_immediate and i386_displacement
8843 we don't yet know the operand size (this will be set by insn
8844 matching). Hence we record the word32 relocation here,
8845 and adjust the reloc according to the real size in reloc(). */
8846 static const struct {
8849 const enum bfd_reloc_code_real rel
[2];
8850 const i386_operand_type types64
;
8852 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8853 { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32
,
8855 OPERAND_TYPE_IMM32_64
},
8857 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real
,
8858 BFD_RELOC_X86_64_PLTOFF64
},
8859 OPERAND_TYPE_IMM64
},
8860 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32
,
8861 BFD_RELOC_X86_64_PLT32
},
8862 OPERAND_TYPE_IMM32_32S_DISP32
},
8863 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real
,
8864 BFD_RELOC_X86_64_GOTPLT64
},
8865 OPERAND_TYPE_IMM64_DISP64
},
8866 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF
,
8867 BFD_RELOC_X86_64_GOTOFF64
},
8868 OPERAND_TYPE_IMM64_DISP64
},
8869 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real
,
8870 BFD_RELOC_X86_64_GOTPCREL
},
8871 OPERAND_TYPE_IMM32_32S_DISP32
},
8872 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD
,
8873 BFD_RELOC_X86_64_TLSGD
},
8874 OPERAND_TYPE_IMM32_32S_DISP32
},
8875 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM
,
8876 _dummy_first_bfd_reloc_code_real
},
8877 OPERAND_TYPE_NONE
},
8878 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real
,
8879 BFD_RELOC_X86_64_TLSLD
},
8880 OPERAND_TYPE_IMM32_32S_DISP32
},
8881 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32
,
8882 BFD_RELOC_X86_64_GOTTPOFF
},
8883 OPERAND_TYPE_IMM32_32S_DISP32
},
8884 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32
,
8885 BFD_RELOC_X86_64_TPOFF32
},
8886 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
8887 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE
,
8888 _dummy_first_bfd_reloc_code_real
},
8889 OPERAND_TYPE_NONE
},
8890 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32
,
8891 BFD_RELOC_X86_64_DTPOFF32
},
8892 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
8893 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE
,
8894 _dummy_first_bfd_reloc_code_real
},
8895 OPERAND_TYPE_NONE
},
8896 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE
,
8897 _dummy_first_bfd_reloc_code_real
},
8898 OPERAND_TYPE_NONE
},
8899 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32
,
8900 BFD_RELOC_X86_64_GOT32
},
8901 OPERAND_TYPE_IMM32_32S_64_DISP32
},
8902 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC
,
8903 BFD_RELOC_X86_64_GOTPC32_TLSDESC
},
8904 OPERAND_TYPE_IMM32_32S_DISP32
},
8905 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL
,
8906 BFD_RELOC_X86_64_TLSDESC_CALL
},
8907 OPERAND_TYPE_IMM32_32S_DISP32
},
8912 #if defined (OBJ_MAYBE_ELF)
8917 for (cp
= input_line_pointer
; *cp
!= '@'; cp
++)
8918 if (is_end_of_line
[(unsigned char) *cp
] || *cp
== ',')
8921 for (j
= 0; j
< ARRAY_SIZE (gotrel
); j
++)
8923 int len
= gotrel
[j
].len
;
8924 if (strncasecmp (cp
+ 1, gotrel
[j
].str
, len
) == 0)
8926 if (gotrel
[j
].rel
[object_64bit
] != 0)
8929 char *tmpbuf
, *past_reloc
;
8931 *rel
= gotrel
[j
].rel
[object_64bit
];
8935 if (flag_code
!= CODE_64BIT
)
8937 types
->bitfield
.imm32
= 1;
8938 types
->bitfield
.disp32
= 1;
8941 *types
= gotrel
[j
].types64
;
8944 if (j
!= 0 && GOT_symbol
== NULL
)
8945 GOT_symbol
= symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME
);
8947 /* The length of the first part of our input line. */
8948 first
= cp
- input_line_pointer
;
8950 /* The second part goes from after the reloc token until
8951 (and including) an end_of_line char or comma. */
8952 past_reloc
= cp
+ 1 + len
;
8954 while (!is_end_of_line
[(unsigned char) *cp
] && *cp
!= ',')
8956 second
= cp
+ 1 - past_reloc
;
8958 /* Allocate and copy string. The trailing NUL shouldn't
8959 be necessary, but be safe. */
8960 tmpbuf
= XNEWVEC (char, first
+ second
+ 2);
8961 memcpy (tmpbuf
, input_line_pointer
, first
);
8962 if (second
!= 0 && *past_reloc
!= ' ')
8963 /* Replace the relocation token with ' ', so that
8964 errors like foo@GOTOFF1 will be detected. */
8965 tmpbuf
[first
++] = ' ';
8967 /* Increment length by 1 if the relocation token is
8972 memcpy (tmpbuf
+ first
, past_reloc
, second
);
8973 tmpbuf
[first
+ second
] = '\0';
8977 as_bad (_("@%s reloc is not supported with %d-bit output format"),
8978 gotrel
[j
].str
, 1 << (5 + object_64bit
));
8983 /* Might be a symbol version string. Don't as_bad here. */
8992 /* Parse operands of the form
8993 <symbol>@SECREL32+<nnn>
8995 If we find one, set up the correct relocation in RELOC and copy the
8996 input string, minus the `@SECREL32' into a malloc'd buffer for
8997 parsing by the calling routine. Return this buffer, and if ADJUST
8998 is non-null set it to the length of the string we removed from the
8999 input line. Otherwise return NULL.
9001 This function is copied from the ELF version above adjusted for PE targets. */
9004 lex_got (enum bfd_reloc_code_real
*rel ATTRIBUTE_UNUSED
,
9005 int *adjust ATTRIBUTE_UNUSED
,
9006 i386_operand_type
*types
)
9012 const enum bfd_reloc_code_real rel
[2];
9013 const i386_operand_type types64
;
9017 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL
,
9018 BFD_RELOC_32_SECREL
},
9019 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
9025 for (cp
= input_line_pointer
; *cp
!= '@'; cp
++)
9026 if (is_end_of_line
[(unsigned char) *cp
] || *cp
== ',')
9029 for (j
= 0; j
< ARRAY_SIZE (gotrel
); j
++)
9031 int len
= gotrel
[j
].len
;
9033 if (strncasecmp (cp
+ 1, gotrel
[j
].str
, len
) == 0)
9035 if (gotrel
[j
].rel
[object_64bit
] != 0)
9038 char *tmpbuf
, *past_reloc
;
9040 *rel
= gotrel
[j
].rel
[object_64bit
];
9046 if (flag_code
!= CODE_64BIT
)
9048 types
->bitfield
.imm32
= 1;
9049 types
->bitfield
.disp32
= 1;
9052 *types
= gotrel
[j
].types64
;
9055 /* The length of the first part of our input line. */
9056 first
= cp
- input_line_pointer
;
9058 /* The second part goes from after the reloc token until
9059 (and including) an end_of_line char or comma. */
9060 past_reloc
= cp
+ 1 + len
;
9062 while (!is_end_of_line
[(unsigned char) *cp
] && *cp
!= ',')
9064 second
= cp
+ 1 - past_reloc
;
9066 /* Allocate and copy string. The trailing NUL shouldn't
9067 be necessary, but be safe. */
9068 tmpbuf
= XNEWVEC (char, first
+ second
+ 2);
9069 memcpy (tmpbuf
, input_line_pointer
, first
);
9070 if (second
!= 0 && *past_reloc
!= ' ')
9071 /* Replace the relocation token with ' ', so that
9072 errors like foo@SECLREL321 will be detected. */
9073 tmpbuf
[first
++] = ' ';
9074 memcpy (tmpbuf
+ first
, past_reloc
, second
);
9075 tmpbuf
[first
+ second
] = '\0';
9079 as_bad (_("@%s reloc is not supported with %d-bit output format"),
9080 gotrel
[j
].str
, 1 << (5 + object_64bit
));
9085 /* Might be a symbol version string. Don't as_bad here. */
9091 bfd_reloc_code_real_type
9092 x86_cons (expressionS
*exp
, int size
)
9094 bfd_reloc_code_real_type got_reloc
= NO_RELOC
;
9096 intel_syntax
= -intel_syntax
;
9099 if (size
== 4 || (object_64bit
&& size
== 8))
9101 /* Handle @GOTOFF and the like in an expression. */
9103 char *gotfree_input_line
;
9106 save
= input_line_pointer
;
9107 gotfree_input_line
= lex_got (&got_reloc
, &adjust
, NULL
);
9108 if (gotfree_input_line
)
9109 input_line_pointer
= gotfree_input_line
;
9113 if (gotfree_input_line
)
9115 /* expression () has merrily parsed up to the end of line,
9116 or a comma - in the wrong buffer. Transfer how far
9117 input_line_pointer has moved to the right buffer. */
9118 input_line_pointer
= (save
9119 + (input_line_pointer
- gotfree_input_line
)
9121 free (gotfree_input_line
);
9122 if (exp
->X_op
== O_constant
9123 || exp
->X_op
== O_absent
9124 || exp
->X_op
== O_illegal
9125 || exp
->X_op
== O_register
9126 || exp
->X_op
== O_big
)
9128 char c
= *input_line_pointer
;
9129 *input_line_pointer
= 0;
9130 as_bad (_("missing or invalid expression `%s'"), save
);
9131 *input_line_pointer
= c
;
9133 else if ((got_reloc
== BFD_RELOC_386_PLT32
9134 || got_reloc
== BFD_RELOC_X86_64_PLT32
)
9135 && exp
->X_op
!= O_symbol
)
9137 char c
= *input_line_pointer
;
9138 *input_line_pointer
= 0;
9139 as_bad (_("invalid PLT expression `%s'"), save
);
9140 *input_line_pointer
= c
;
9147 intel_syntax
= -intel_syntax
;
9150 i386_intel_simplify (exp
);
9156 signed_cons (int size
)
9158 if (flag_code
== CODE_64BIT
)
9166 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
9173 if (exp
.X_op
== O_symbol
)
9174 exp
.X_op
= O_secrel
;
9176 emit_expr (&exp
, 4);
9178 while (*input_line_pointer
++ == ',');
9180 input_line_pointer
--;
9181 demand_empty_rest_of_line ();
9185 /* Handle Vector operations. */
9188 check_VecOperations (char *op_string
, char *op_end
)
9190 const reg_entry
*mask
;
9195 && (op_end
== NULL
|| op_string
< op_end
))
9198 if (*op_string
== '{')
9202 /* Check broadcasts. */
9203 if (strncmp (op_string
, "1to", 3) == 0)
9208 goto duplicated_vec_op
;
9211 if (*op_string
== '8')
9213 else if (*op_string
== '4')
9215 else if (*op_string
== '2')
9217 else if (*op_string
== '1'
9218 && *(op_string
+1) == '6')
9225 as_bad (_("Unsupported broadcast: `%s'"), saved
);
9230 broadcast_op
.type
= bcst_type
;
9231 broadcast_op
.operand
= this_operand
;
9232 broadcast_op
.bytes
= 0;
9233 i
.broadcast
= &broadcast_op
;
9235 /* Check masking operation. */
9236 else if ((mask
= parse_register (op_string
, &end_op
)) != NULL
)
9238 /* k0 can't be used for write mask. */
9239 if (!mask
->reg_type
.bitfield
.regmask
|| mask
->reg_num
== 0)
9241 as_bad (_("`%s%s' can't be used for write mask"),
9242 register_prefix
, mask
->reg_name
);
9248 mask_op
.mask
= mask
;
9249 mask_op
.zeroing
= 0;
9250 mask_op
.operand
= this_operand
;
9256 goto duplicated_vec_op
;
9258 i
.mask
->mask
= mask
;
9260 /* Only "{z}" is allowed here. No need to check
9261 zeroing mask explicitly. */
9262 if (i
.mask
->operand
!= this_operand
)
9264 as_bad (_("invalid write mask `%s'"), saved
);
9271 /* Check zeroing-flag for masking operation. */
9272 else if (*op_string
== 'z')
9276 mask_op
.mask
= NULL
;
9277 mask_op
.zeroing
= 1;
9278 mask_op
.operand
= this_operand
;
9283 if (i
.mask
->zeroing
)
9286 as_bad (_("duplicated `%s'"), saved
);
9290 i
.mask
->zeroing
= 1;
9292 /* Only "{%k}" is allowed here. No need to check mask
9293 register explicitly. */
9294 if (i
.mask
->operand
!= this_operand
)
9296 as_bad (_("invalid zeroing-masking `%s'"),
9305 goto unknown_vec_op
;
9307 if (*op_string
!= '}')
9309 as_bad (_("missing `}' in `%s'"), saved
);
9314 /* Strip whitespace since the addition of pseudo prefixes
9315 changed how the scrubber treats '{'. */
9316 if (is_space_char (*op_string
))
9322 /* We don't know this one. */
9323 as_bad (_("unknown vector operation: `%s'"), saved
);
9327 if (i
.mask
&& i
.mask
->zeroing
&& !i
.mask
->mask
)
9329 as_bad (_("zeroing-masking only allowed with write mask"));
9337 i386_immediate (char *imm_start
)
9339 char *save_input_line_pointer
;
9340 char *gotfree_input_line
;
9343 i386_operand_type types
;
9345 operand_type_set (&types
, ~0);
9347 if (i
.imm_operands
== MAX_IMMEDIATE_OPERANDS
)
9349 as_bad (_("at most %d immediate operands are allowed"),
9350 MAX_IMMEDIATE_OPERANDS
);
9354 exp
= &im_expressions
[i
.imm_operands
++];
9355 i
.op
[this_operand
].imms
= exp
;
9357 if (is_space_char (*imm_start
))
9360 save_input_line_pointer
= input_line_pointer
;
9361 input_line_pointer
= imm_start
;
9363 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
9364 if (gotfree_input_line
)
9365 input_line_pointer
= gotfree_input_line
;
9367 exp_seg
= expression (exp
);
9371 /* Handle vector operations. */
9372 if (*input_line_pointer
== '{')
9374 input_line_pointer
= check_VecOperations (input_line_pointer
,
9376 if (input_line_pointer
== NULL
)
9380 if (*input_line_pointer
)
9381 as_bad (_("junk `%s' after expression"), input_line_pointer
);
9383 input_line_pointer
= save_input_line_pointer
;
9384 if (gotfree_input_line
)
9386 free (gotfree_input_line
);
9388 if (exp
->X_op
== O_constant
|| exp
->X_op
== O_register
)
9389 exp
->X_op
= O_illegal
;
9392 return i386_finalize_immediate (exp_seg
, exp
, types
, imm_start
);
9396 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
9397 i386_operand_type types
, const char *imm_start
)
9399 if (exp
->X_op
== O_absent
|| exp
->X_op
== O_illegal
|| exp
->X_op
== O_big
)
9402 as_bad (_("missing or invalid immediate expression `%s'"),
9406 else if (exp
->X_op
== O_constant
)
9408 /* Size it properly later. */
9409 i
.types
[this_operand
].bitfield
.imm64
= 1;
9410 /* If not 64bit, sign extend val. */
9411 if (flag_code
!= CODE_64BIT
9412 && (exp
->X_add_number
& ~(((addressT
) 2 << 31) - 1)) == 0)
9414 = (exp
->X_add_number
^ ((addressT
) 1 << 31)) - ((addressT
) 1 << 31);
9416 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
9417 else if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
9418 && exp_seg
!= absolute_section
9419 && exp_seg
!= text_section
9420 && exp_seg
!= data_section
9421 && exp_seg
!= bss_section
9422 && exp_seg
!= undefined_section
9423 && !bfd_is_com_section (exp_seg
))
9425 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
9429 else if (!intel_syntax
&& exp_seg
== reg_section
)
9432 as_bad (_("illegal immediate register operand %s"), imm_start
);
9437 /* This is an address. The size of the address will be
9438 determined later, depending on destination register,
9439 suffix, or the default for the section. */
9440 i
.types
[this_operand
].bitfield
.imm8
= 1;
9441 i
.types
[this_operand
].bitfield
.imm16
= 1;
9442 i
.types
[this_operand
].bitfield
.imm32
= 1;
9443 i
.types
[this_operand
].bitfield
.imm32s
= 1;
9444 i
.types
[this_operand
].bitfield
.imm64
= 1;
9445 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
9453 i386_scale (char *scale
)
9456 char *save
= input_line_pointer
;
9458 input_line_pointer
= scale
;
9459 val
= get_absolute_expression ();
9464 i
.log2_scale_factor
= 0;
9467 i
.log2_scale_factor
= 1;
9470 i
.log2_scale_factor
= 2;
9473 i
.log2_scale_factor
= 3;
9477 char sep
= *input_line_pointer
;
9479 *input_line_pointer
= '\0';
9480 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
9482 *input_line_pointer
= sep
;
9483 input_line_pointer
= save
;
9487 if (i
.log2_scale_factor
!= 0 && i
.index_reg
== 0)
9489 as_warn (_("scale factor of %d without an index register"),
9490 1 << i
.log2_scale_factor
);
9491 i
.log2_scale_factor
= 0;
9493 scale
= input_line_pointer
;
9494 input_line_pointer
= save
;
9499 i386_displacement (char *disp_start
, char *disp_end
)
9503 char *save_input_line_pointer
;
9504 char *gotfree_input_line
;
9506 i386_operand_type bigdisp
, types
= anydisp
;
9509 if (i
.disp_operands
== MAX_MEMORY_OPERANDS
)
9511 as_bad (_("at most %d displacement operands are allowed"),
9512 MAX_MEMORY_OPERANDS
);
9516 operand_type_set (&bigdisp
, 0);
9517 if ((i
.types
[this_operand
].bitfield
.jumpabsolute
)
9518 || (!current_templates
->start
->opcode_modifier
.jump
9519 && !current_templates
->start
->opcode_modifier
.jumpdword
))
9521 bigdisp
.bitfield
.disp32
= 1;
9522 override
= (i
.prefix
[ADDR_PREFIX
] != 0);
9523 if (flag_code
== CODE_64BIT
)
9527 bigdisp
.bitfield
.disp32s
= 1;
9528 bigdisp
.bitfield
.disp64
= 1;
9531 else if ((flag_code
== CODE_16BIT
) ^ override
)
9533 bigdisp
.bitfield
.disp32
= 0;
9534 bigdisp
.bitfield
.disp16
= 1;
9539 /* For PC-relative branches, the width of the displacement
9540 is dependent upon data size, not address size. */
9541 override
= (i
.prefix
[DATA_PREFIX
] != 0);
9542 if (flag_code
== CODE_64BIT
)
9544 if (override
|| i
.suffix
== WORD_MNEM_SUFFIX
)
9545 bigdisp
.bitfield
.disp16
= 1;
9548 bigdisp
.bitfield
.disp32
= 1;
9549 bigdisp
.bitfield
.disp32s
= 1;
9555 override
= (i
.suffix
== (flag_code
!= CODE_16BIT
9557 : LONG_MNEM_SUFFIX
));
9558 bigdisp
.bitfield
.disp32
= 1;
9559 if ((flag_code
== CODE_16BIT
) ^ override
)
9561 bigdisp
.bitfield
.disp32
= 0;
9562 bigdisp
.bitfield
.disp16
= 1;
9566 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
9569 exp
= &disp_expressions
[i
.disp_operands
];
9570 i
.op
[this_operand
].disps
= exp
;
9572 save_input_line_pointer
= input_line_pointer
;
9573 input_line_pointer
= disp_start
;
9574 END_STRING_AND_SAVE (disp_end
);
9576 #ifndef GCC_ASM_O_HACK
9577 #define GCC_ASM_O_HACK 0
9580 END_STRING_AND_SAVE (disp_end
+ 1);
9581 if (i
.types
[this_operand
].bitfield
.baseIndex
9582 && displacement_string_end
[-1] == '+')
9584 /* This hack is to avoid a warning when using the "o"
9585 constraint within gcc asm statements.
9588 #define _set_tssldt_desc(n,addr,limit,type) \
9589 __asm__ __volatile__ ( \
9591 "movw %w1,2+%0\n\t" \
9593 "movb %b1,4+%0\n\t" \
9594 "movb %4,5+%0\n\t" \
9595 "movb $0,6+%0\n\t" \
9596 "movb %h1,7+%0\n\t" \
9598 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
9600 This works great except that the output assembler ends
9601 up looking a bit weird if it turns out that there is
9602 no offset. You end up producing code that looks like:
9615 So here we provide the missing zero. */
9617 *displacement_string_end
= '0';
9620 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
9621 if (gotfree_input_line
)
9622 input_line_pointer
= gotfree_input_line
;
9624 exp_seg
= expression (exp
);
9627 if (*input_line_pointer
)
9628 as_bad (_("junk `%s' after expression"), input_line_pointer
);
9630 RESTORE_END_STRING (disp_end
+ 1);
9632 input_line_pointer
= save_input_line_pointer
;
9633 if (gotfree_input_line
)
9635 free (gotfree_input_line
);
9637 if (exp
->X_op
== O_constant
|| exp
->X_op
== O_register
)
9638 exp
->X_op
= O_illegal
;
9641 ret
= i386_finalize_displacement (exp_seg
, exp
, types
, disp_start
);
9643 RESTORE_END_STRING (disp_end
);
9649 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
9650 i386_operand_type types
, const char *disp_start
)
9652 i386_operand_type bigdisp
;
9655 /* We do this to make sure that the section symbol is in
9656 the symbol table. We will ultimately change the relocation
9657 to be relative to the beginning of the section. */
9658 if (i
.reloc
[this_operand
] == BFD_RELOC_386_GOTOFF
9659 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
9660 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
9662 if (exp
->X_op
!= O_symbol
)
9665 if (S_IS_LOCAL (exp
->X_add_symbol
)
9666 && S_GET_SEGMENT (exp
->X_add_symbol
) != undefined_section
9667 && S_GET_SEGMENT (exp
->X_add_symbol
) != expr_section
)
9668 section_symbol (S_GET_SEGMENT (exp
->X_add_symbol
));
9669 exp
->X_op
= O_subtract
;
9670 exp
->X_op_symbol
= GOT_symbol
;
9671 if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
)
9672 i
.reloc
[this_operand
] = BFD_RELOC_32_PCREL
;
9673 else if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
9674 i
.reloc
[this_operand
] = BFD_RELOC_64
;
9676 i
.reloc
[this_operand
] = BFD_RELOC_32
;
9679 else if (exp
->X_op
== O_absent
9680 || exp
->X_op
== O_illegal
9681 || exp
->X_op
== O_big
)
9684 as_bad (_("missing or invalid displacement expression `%s'"),
9689 else if (flag_code
== CODE_64BIT
9690 && !i
.prefix
[ADDR_PREFIX
]
9691 && exp
->X_op
== O_constant
)
9693 /* Since displacement is signed extended to 64bit, don't allow
9694 disp32 and turn off disp32s if they are out of range. */
9695 i
.types
[this_operand
].bitfield
.disp32
= 0;
9696 if (!fits_in_signed_long (exp
->X_add_number
))
9698 i
.types
[this_operand
].bitfield
.disp32s
= 0;
9699 if (i
.types
[this_operand
].bitfield
.baseindex
)
9701 as_bad (_("0x%lx out range of signed 32bit displacement"),
9702 (long) exp
->X_add_number
);
9708 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
9709 else if (exp
->X_op
!= O_constant
9710 && OUTPUT_FLAVOR
== bfd_target_aout_flavour
9711 && exp_seg
!= absolute_section
9712 && exp_seg
!= text_section
9713 && exp_seg
!= data_section
9714 && exp_seg
!= bss_section
9715 && exp_seg
!= undefined_section
9716 && !bfd_is_com_section (exp_seg
))
9718 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
9723 /* Check if this is a displacement only operand. */
9724 bigdisp
= i
.types
[this_operand
];
9725 bigdisp
.bitfield
.disp8
= 0;
9726 bigdisp
.bitfield
.disp16
= 0;
9727 bigdisp
.bitfield
.disp32
= 0;
9728 bigdisp
.bitfield
.disp32s
= 0;
9729 bigdisp
.bitfield
.disp64
= 0;
9730 if (operand_type_all_zero (&bigdisp
))
9731 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
9737 /* Return the active addressing mode, taking address override and
9738 registers forming the address into consideration. Update the
9739 address override prefix if necessary. */
9741 static enum flag_code
9742 i386_addressing_mode (void)
9744 enum flag_code addr_mode
;
9746 if (i
.prefix
[ADDR_PREFIX
])
9747 addr_mode
= flag_code
== CODE_32BIT
? CODE_16BIT
: CODE_32BIT
;
9750 addr_mode
= flag_code
;
9752 #if INFER_ADDR_PREFIX
9753 if (i
.mem_operands
== 0)
9755 /* Infer address prefix from the first memory operand. */
9756 const reg_entry
*addr_reg
= i
.base_reg
;
9758 if (addr_reg
== NULL
)
9759 addr_reg
= i
.index_reg
;
9763 if (addr_reg
->reg_type
.bitfield
.dword
)
9764 addr_mode
= CODE_32BIT
;
9765 else if (flag_code
!= CODE_64BIT
9766 && addr_reg
->reg_type
.bitfield
.word
)
9767 addr_mode
= CODE_16BIT
;
9769 if (addr_mode
!= flag_code
)
9771 i
.prefix
[ADDR_PREFIX
] = ADDR_PREFIX_OPCODE
;
9773 /* Change the size of any displacement too. At most one
9774 of Disp16 or Disp32 is set.
9775 FIXME. There doesn't seem to be any real need for
9776 separate Disp16 and Disp32 flags. The same goes for
9777 Imm16 and Imm32. Removing them would probably clean
9778 up the code quite a lot. */
9779 if (flag_code
!= CODE_64BIT
9780 && (i
.types
[this_operand
].bitfield
.disp16
9781 || i
.types
[this_operand
].bitfield
.disp32
))
9782 i
.types
[this_operand
]
9783 = operand_type_xor (i
.types
[this_operand
], disp16_32
);
9793 /* Make sure the memory operand we've been dealt is valid.
9794 Return 1 on success, 0 on a failure. */
9797 i386_index_check (const char *operand_string
)
9799 const char *kind
= "base/index";
9800 enum flag_code addr_mode
= i386_addressing_mode ();
9802 if (current_templates
->start
->opcode_modifier
.isstring
9803 && !current_templates
->start
->cpu_flags
.bitfield
.cpupadlock
9804 && (current_templates
->end
[-1].opcode_modifier
.isstring
9807 /* Memory operands of string insns are special in that they only allow
9808 a single register (rDI, rSI, or rBX) as their memory address. */
9809 const reg_entry
*expected_reg
;
9810 static const char *di_si
[][2] =
9816 static const char *bx
[] = { "ebx", "bx", "rbx" };
9818 kind
= "string address";
9820 if (current_templates
->start
->opcode_modifier
.repprefixok
)
9822 i386_operand_type type
= current_templates
->end
[-1].operand_types
[0];
9824 if (!type
.bitfield
.baseindex
9825 || ((!i
.mem_operands
!= !intel_syntax
)
9826 && current_templates
->end
[-1].operand_types
[1]
9827 .bitfield
.baseindex
))
9828 type
= current_templates
->end
[-1].operand_types
[1];
9829 expected_reg
= hash_find (reg_hash
,
9830 di_si
[addr_mode
][type
.bitfield
.esseg
]);
9834 expected_reg
= hash_find (reg_hash
, bx
[addr_mode
]);
9836 if (i
.base_reg
!= expected_reg
9838 || operand_type_check (i
.types
[this_operand
], disp
))
9840 /* The second memory operand must have the same size as
9844 && !((addr_mode
== CODE_64BIT
9845 && i
.base_reg
->reg_type
.bitfield
.qword
)
9846 || (addr_mode
== CODE_32BIT
9847 ? i
.base_reg
->reg_type
.bitfield
.dword
9848 : i
.base_reg
->reg_type
.bitfield
.word
)))
9851 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
9853 intel_syntax
? '[' : '(',
9855 expected_reg
->reg_name
,
9856 intel_syntax
? ']' : ')');
9863 as_bad (_("`%s' is not a valid %s expression"),
9864 operand_string
, kind
);
9869 if (addr_mode
!= CODE_16BIT
)
9871 /* 32-bit/64-bit checks. */
9873 && ((addr_mode
== CODE_64BIT
9874 ? !i
.base_reg
->reg_type
.bitfield
.qword
9875 : !i
.base_reg
->reg_type
.bitfield
.dword
)
9876 || (i
.index_reg
&& i
.base_reg
->reg_num
== RegIP
)
9877 || i
.base_reg
->reg_num
== RegIZ
))
9879 && !i
.index_reg
->reg_type
.bitfield
.xmmword
9880 && !i
.index_reg
->reg_type
.bitfield
.ymmword
9881 && !i
.index_reg
->reg_type
.bitfield
.zmmword
9882 && ((addr_mode
== CODE_64BIT
9883 ? !i
.index_reg
->reg_type
.bitfield
.qword
9884 : !i
.index_reg
->reg_type
.bitfield
.dword
)
9885 || !i
.index_reg
->reg_type
.bitfield
.baseindex
)))
9888 /* bndmk, bndldx, and bndstx have special restrictions. */
9889 if (current_templates
->start
->base_opcode
== 0xf30f1b
9890 || (current_templates
->start
->base_opcode
& ~1) == 0x0f1a)
9892 /* They cannot use RIP-relative addressing. */
9893 if (i
.base_reg
&& i
.base_reg
->reg_num
== RegIP
)
9895 as_bad (_("`%s' cannot be used here"), operand_string
);
9899 /* bndldx and bndstx ignore their scale factor. */
9900 if (current_templates
->start
->base_opcode
!= 0xf30f1b
9901 && i
.log2_scale_factor
)
9902 as_warn (_("register scaling is being ignored here"));
9907 /* 16-bit checks. */
9909 && (!i
.base_reg
->reg_type
.bitfield
.word
9910 || !i
.base_reg
->reg_type
.bitfield
.baseindex
))
9912 && (!i
.index_reg
->reg_type
.bitfield
.word
9913 || !i
.index_reg
->reg_type
.bitfield
.baseindex
9915 && i
.base_reg
->reg_num
< 6
9916 && i
.index_reg
->reg_num
>= 6
9917 && i
.log2_scale_factor
== 0))))
9924 /* Handle vector immediates. */
9927 RC_SAE_immediate (const char *imm_start
)
9929 unsigned int match_found
, j
;
9930 const char *pstr
= imm_start
;
9938 for (j
= 0; j
< ARRAY_SIZE (RC_NamesTable
); j
++)
9940 if (!strncmp (pstr
, RC_NamesTable
[j
].name
, RC_NamesTable
[j
].len
))
9944 rc_op
.type
= RC_NamesTable
[j
].type
;
9945 rc_op
.operand
= this_operand
;
9946 i
.rounding
= &rc_op
;
9950 as_bad (_("duplicated `%s'"), imm_start
);
9953 pstr
+= RC_NamesTable
[j
].len
;
9963 as_bad (_("Missing '}': '%s'"), imm_start
);
9966 /* RC/SAE immediate string should contain nothing more. */;
9969 as_bad (_("Junk after '}': '%s'"), imm_start
);
9973 exp
= &im_expressions
[i
.imm_operands
++];
9974 i
.op
[this_operand
].imms
= exp
;
9976 exp
->X_op
= O_constant
;
9977 exp
->X_add_number
= 0;
9978 exp
->X_add_symbol
= (symbolS
*) 0;
9979 exp
->X_op_symbol
= (symbolS
*) 0;
9981 i
.types
[this_operand
].bitfield
.imm8
= 1;
9985 /* Only string instructions can have a second memory operand, so
9986 reduce current_templates to just those if it contains any. */
9988 maybe_adjust_templates (void)
9990 const insn_template
*t
;
9992 gas_assert (i
.mem_operands
== 1);
9994 for (t
= current_templates
->start
; t
< current_templates
->end
; ++t
)
9995 if (t
->opcode_modifier
.isstring
)
9998 if (t
< current_templates
->end
)
10000 static templates aux_templates
;
10001 bfd_boolean recheck
;
10003 aux_templates
.start
= t
;
10004 for (; t
< current_templates
->end
; ++t
)
10005 if (!t
->opcode_modifier
.isstring
)
10007 aux_templates
.end
= t
;
10009 /* Determine whether to re-check the first memory operand. */
10010 recheck
= (aux_templates
.start
!= current_templates
->start
10011 || t
!= current_templates
->end
);
10013 current_templates
= &aux_templates
;
10017 i
.mem_operands
= 0;
10018 if (i
.memop1_string
!= NULL
10019 && i386_index_check (i
.memop1_string
) == 0)
10021 i
.mem_operands
= 1;
10028 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
10032 i386_att_operand (char *operand_string
)
10034 const reg_entry
*r
;
10036 char *op_string
= operand_string
;
10038 if (is_space_char (*op_string
))
10041 /* We check for an absolute prefix (differentiating,
10042 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
10043 if (*op_string
== ABSOLUTE_PREFIX
)
10046 if (is_space_char (*op_string
))
10048 i
.types
[this_operand
].bitfield
.jumpabsolute
= 1;
10051 /* Check if operand is a register. */
10052 if ((r
= parse_register (op_string
, &end_op
)) != NULL
)
10054 i386_operand_type temp
;
10056 /* Check for a segment override by searching for ':' after a
10057 segment register. */
10058 op_string
= end_op
;
10059 if (is_space_char (*op_string
))
10061 if (*op_string
== ':' && r
->reg_type
.bitfield
.class == SReg
)
10063 switch (r
->reg_num
)
10066 i
.seg
[i
.mem_operands
] = &es
;
10069 i
.seg
[i
.mem_operands
] = &cs
;
10072 i
.seg
[i
.mem_operands
] = &ss
;
10075 i
.seg
[i
.mem_operands
] = &ds
;
10078 i
.seg
[i
.mem_operands
] = &fs
;
10081 i
.seg
[i
.mem_operands
] = &gs
;
10085 /* Skip the ':' and whitespace. */
10087 if (is_space_char (*op_string
))
10090 if (!is_digit_char (*op_string
)
10091 && !is_identifier_char (*op_string
)
10092 && *op_string
!= '('
10093 && *op_string
!= ABSOLUTE_PREFIX
)
10095 as_bad (_("bad memory operand `%s'"), op_string
);
10098 /* Handle case of %es:*foo. */
10099 if (*op_string
== ABSOLUTE_PREFIX
)
10102 if (is_space_char (*op_string
))
10104 i
.types
[this_operand
].bitfield
.jumpabsolute
= 1;
10106 goto do_memory_reference
;
10109 /* Handle vector operations. */
10110 if (*op_string
== '{')
10112 op_string
= check_VecOperations (op_string
, NULL
);
10113 if (op_string
== NULL
)
10119 as_bad (_("junk `%s' after register"), op_string
);
10122 temp
= r
->reg_type
;
10123 temp
.bitfield
.baseindex
= 0;
10124 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
10126 i
.types
[this_operand
].bitfield
.unspecified
= 0;
10127 i
.op
[this_operand
].regs
= r
;
10130 else if (*op_string
== REGISTER_PREFIX
)
10132 as_bad (_("bad register name `%s'"), op_string
);
10135 else if (*op_string
== IMMEDIATE_PREFIX
)
10138 if (i
.types
[this_operand
].bitfield
.jumpabsolute
)
10140 as_bad (_("immediate operand illegal with absolute jump"));
10143 if (!i386_immediate (op_string
))
10146 else if (RC_SAE_immediate (operand_string
))
10148 /* If it is a RC or SAE immediate, do nothing. */
10151 else if (is_digit_char (*op_string
)
10152 || is_identifier_char (*op_string
)
10153 || *op_string
== '"'
10154 || *op_string
== '(')
10156 /* This is a memory reference of some sort. */
10159 /* Start and end of displacement string expression (if found). */
10160 char *displacement_string_start
;
10161 char *displacement_string_end
;
10164 do_memory_reference
:
10165 if (i
.mem_operands
== 1 && !maybe_adjust_templates ())
10167 if ((i
.mem_operands
== 1
10168 && !current_templates
->start
->opcode_modifier
.isstring
)
10169 || i
.mem_operands
== 2)
10171 as_bad (_("too many memory references for `%s'"),
10172 current_templates
->start
->name
);
10176 /* Check for base index form. We detect the base index form by
10177 looking for an ')' at the end of the operand, searching
10178 for the '(' matching it, and finding a REGISTER_PREFIX or ','
10180 base_string
= op_string
+ strlen (op_string
);
10182 /* Handle vector operations. */
10183 vop_start
= strchr (op_string
, '{');
10184 if (vop_start
&& vop_start
< base_string
)
10186 if (check_VecOperations (vop_start
, base_string
) == NULL
)
10188 base_string
= vop_start
;
10192 if (is_space_char (*base_string
))
10195 /* If we only have a displacement, set-up for it to be parsed later. */
10196 displacement_string_start
= op_string
;
10197 displacement_string_end
= base_string
+ 1;
10199 if (*base_string
== ')')
10202 unsigned int parens_balanced
= 1;
10203 /* We've already checked that the number of left & right ()'s are
10204 equal, so this loop will not be infinite. */
10208 if (*base_string
== ')')
10210 if (*base_string
== '(')
10213 while (parens_balanced
);
10215 temp_string
= base_string
;
10217 /* Skip past '(' and whitespace. */
10219 if (is_space_char (*base_string
))
10222 if (*base_string
== ','
10223 || ((i
.base_reg
= parse_register (base_string
, &end_op
))
10226 displacement_string_end
= temp_string
;
10228 i
.types
[this_operand
].bitfield
.baseindex
= 1;
10232 base_string
= end_op
;
10233 if (is_space_char (*base_string
))
10237 /* There may be an index reg or scale factor here. */
10238 if (*base_string
== ',')
10241 if (is_space_char (*base_string
))
10244 if ((i
.index_reg
= parse_register (base_string
, &end_op
))
10247 base_string
= end_op
;
10248 if (is_space_char (*base_string
))
10250 if (*base_string
== ',')
10253 if (is_space_char (*base_string
))
10256 else if (*base_string
!= ')')
10258 as_bad (_("expecting `,' or `)' "
10259 "after index register in `%s'"),
10264 else if (*base_string
== REGISTER_PREFIX
)
10266 end_op
= strchr (base_string
, ',');
10269 as_bad (_("bad register name `%s'"), base_string
);
10273 /* Check for scale factor. */
10274 if (*base_string
!= ')')
10276 char *end_scale
= i386_scale (base_string
);
10281 base_string
= end_scale
;
10282 if (is_space_char (*base_string
))
10284 if (*base_string
!= ')')
10286 as_bad (_("expecting `)' "
10287 "after scale factor in `%s'"),
10292 else if (!i
.index_reg
)
10294 as_bad (_("expecting index register or scale factor "
10295 "after `,'; got '%c'"),
10300 else if (*base_string
!= ')')
10302 as_bad (_("expecting `,' or `)' "
10303 "after base register in `%s'"),
10308 else if (*base_string
== REGISTER_PREFIX
)
10310 end_op
= strchr (base_string
, ',');
10313 as_bad (_("bad register name `%s'"), base_string
);
10318 /* If there's an expression beginning the operand, parse it,
10319 assuming displacement_string_start and
10320 displacement_string_end are meaningful. */
10321 if (displacement_string_start
!= displacement_string_end
)
10323 if (!i386_displacement (displacement_string_start
,
10324 displacement_string_end
))
10328 /* Special case for (%dx) while doing input/output op. */
10330 && i
.base_reg
->reg_type
.bitfield
.inoutportreg
10331 && i
.index_reg
== 0
10332 && i
.log2_scale_factor
== 0
10333 && i
.seg
[i
.mem_operands
] == 0
10334 && !operand_type_check (i
.types
[this_operand
], disp
))
10336 i
.types
[this_operand
] = i
.base_reg
->reg_type
;
10340 if (i386_index_check (operand_string
) == 0)
10342 i
.flags
[this_operand
] |= Operand_Mem
;
10343 if (i
.mem_operands
== 0)
10344 i
.memop1_string
= xstrdup (operand_string
);
10349 /* It's not a memory operand; argh! */
10350 as_bad (_("invalid char %s beginning operand %d `%s'"),
10351 output_invalid (*op_string
),
10356 return 1; /* Normal return. */
10359 /* Calculate the maximum variable size (i.e., excluding fr_fix)
10360 that an rs_machine_dependent frag may reach. */
10363 i386_frag_max_var (fragS
*frag
)
10365 /* The only relaxable frags are for jumps.
10366 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
10367 gas_assert (frag
->fr_type
== rs_machine_dependent
);
10368 return TYPE_FROM_RELAX_STATE (frag
->fr_subtype
) == UNCOND_JUMP
? 4 : 5;
10371 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10373 elf_symbol_resolved_in_segment_p (symbolS
*fr_symbol
, offsetT fr_var
)
10375 /* STT_GNU_IFUNC symbol must go through PLT. */
10376 if ((symbol_get_bfdsym (fr_symbol
)->flags
10377 & BSF_GNU_INDIRECT_FUNCTION
) != 0)
10380 if (!S_IS_EXTERNAL (fr_symbol
))
10381 /* Symbol may be weak or local. */
10382 return !S_IS_WEAK (fr_symbol
);
10384 /* Global symbols with non-default visibility can't be preempted. */
10385 if (ELF_ST_VISIBILITY (S_GET_OTHER (fr_symbol
)) != STV_DEFAULT
)
10388 if (fr_var
!= NO_RELOC
)
10389 switch ((enum bfd_reloc_code_real
) fr_var
)
10391 case BFD_RELOC_386_PLT32
:
10392 case BFD_RELOC_X86_64_PLT32
:
10393 /* Symbol with PLT relocation may be preempted. */
10399 /* Global symbols with default visibility in a shared library may be
10400 preempted by another definition. */
10405 /* md_estimate_size_before_relax()
10407 Called just before relax() for rs_machine_dependent frags. The x86
10408 assembler uses these frags to handle variable size jump
10411 Any symbol that is now undefined will not become defined.
10412 Return the correct fr_subtype in the frag.
10413 Return the initial "guess for variable size of frag" to caller.
10414 The guess is actually the growth beyond the fixed part. Whatever
10415 we do to grow the fixed or variable part contributes to our
10419 md_estimate_size_before_relax (fragS
*fragP
, segT segment
)
10421 /* We've already got fragP->fr_subtype right; all we have to do is
10422 check for un-relaxable symbols. On an ELF system, we can't relax
10423 an externally visible symbol, because it may be overridden by a
10425 if (S_GET_SEGMENT (fragP
->fr_symbol
) != segment
10426 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10428 && !elf_symbol_resolved_in_segment_p (fragP
->fr_symbol
,
10431 #if defined (OBJ_COFF) && defined (TE_PE)
10432 || (OUTPUT_FLAVOR
== bfd_target_coff_flavour
10433 && S_IS_WEAK (fragP
->fr_symbol
))
10437 /* Symbol is undefined in this segment, or we need to keep a
10438 reloc so that weak symbols can be overridden. */
10439 int size
= (fragP
->fr_subtype
& CODE16
) ? 2 : 4;
10440 enum bfd_reloc_code_real reloc_type
;
10441 unsigned char *opcode
;
10444 if (fragP
->fr_var
!= NO_RELOC
)
10445 reloc_type
= (enum bfd_reloc_code_real
) fragP
->fr_var
;
10446 else if (size
== 2)
10447 reloc_type
= BFD_RELOC_16_PCREL
;
10448 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10449 else if (need_plt32_p (fragP
->fr_symbol
))
10450 reloc_type
= BFD_RELOC_X86_64_PLT32
;
10453 reloc_type
= BFD_RELOC_32_PCREL
;
10455 old_fr_fix
= fragP
->fr_fix
;
10456 opcode
= (unsigned char *) fragP
->fr_opcode
;
10458 switch (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
))
10461 /* Make jmp (0xeb) a (d)word displacement jump. */
10463 fragP
->fr_fix
+= size
;
10464 fix_new (fragP
, old_fr_fix
, size
,
10466 fragP
->fr_offset
, 1,
10472 && (!no_cond_jump_promotion
|| fragP
->fr_var
!= NO_RELOC
))
10474 /* Negate the condition, and branch past an
10475 unconditional jump. */
10478 /* Insert an unconditional jump. */
10480 /* We added two extra opcode bytes, and have a two byte
10482 fragP
->fr_fix
+= 2 + 2;
10483 fix_new (fragP
, old_fr_fix
+ 2, 2,
10485 fragP
->fr_offset
, 1,
10489 /* Fall through. */
10492 if (no_cond_jump_promotion
&& fragP
->fr_var
== NO_RELOC
)
10496 fragP
->fr_fix
+= 1;
10497 fixP
= fix_new (fragP
, old_fr_fix
, 1,
10499 fragP
->fr_offset
, 1,
10500 BFD_RELOC_8_PCREL
);
10501 fixP
->fx_signed
= 1;
10505 /* This changes the byte-displacement jump 0x7N
10506 to the (d)word-displacement jump 0x0f,0x8N. */
10507 opcode
[1] = opcode
[0] + 0x10;
10508 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
10509 /* We've added an opcode byte. */
10510 fragP
->fr_fix
+= 1 + size
;
10511 fix_new (fragP
, old_fr_fix
+ 1, size
,
10513 fragP
->fr_offset
, 1,
10518 BAD_CASE (fragP
->fr_subtype
);
10522 return fragP
->fr_fix
- old_fr_fix
;
10525 /* Guess size depending on current relax state. Initially the relax
10526 state will correspond to a short jump and we return 1, because
10527 the variable part of the frag (the branch offset) is one byte
10528 long. However, we can relax a section more than once and in that
10529 case we must either set fr_subtype back to the unrelaxed state,
10530 or return the value for the appropriate branch. */
10531 return md_relax_table
[fragP
->fr_subtype
].rlx_length
;
10534 /* Called after relax() is finished.
10536 In: Address of frag.
10537 fr_type == rs_machine_dependent.
10538 fr_subtype is what the address relaxed to.
10540 Out: Any fixSs and constants are set up.
10541 Caller will turn frag into a ".space 0". */
10544 md_convert_frag (bfd
*abfd ATTRIBUTE_UNUSED
, segT sec ATTRIBUTE_UNUSED
,
10547 unsigned char *opcode
;
10548 unsigned char *where_to_put_displacement
= NULL
;
10549 offsetT target_address
;
10550 offsetT opcode_address
;
10551 unsigned int extension
= 0;
10552 offsetT displacement_from_opcode_start
;
10554 opcode
= (unsigned char *) fragP
->fr_opcode
;
10556 /* Address we want to reach in file space. */
10557 target_address
= S_GET_VALUE (fragP
->fr_symbol
) + fragP
->fr_offset
;
10559 /* Address opcode resides at in file space. */
10560 opcode_address
= fragP
->fr_address
+ fragP
->fr_fix
;
10562 /* Displacement from opcode start to fill into instruction. */
10563 displacement_from_opcode_start
= target_address
- opcode_address
;
10565 if ((fragP
->fr_subtype
& BIG
) == 0)
10567 /* Don't have to change opcode. */
10568 extension
= 1; /* 1 opcode + 1 displacement */
10569 where_to_put_displacement
= &opcode
[1];
10573 if (no_cond_jump_promotion
10574 && TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) != UNCOND_JUMP
)
10575 as_warn_where (fragP
->fr_file
, fragP
->fr_line
,
10576 _("long jump required"));
10578 switch (fragP
->fr_subtype
)
10580 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
):
10581 extension
= 4; /* 1 opcode + 4 displacement */
10583 where_to_put_displacement
= &opcode
[1];
10586 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
):
10587 extension
= 2; /* 1 opcode + 2 displacement */
10589 where_to_put_displacement
= &opcode
[1];
10592 case ENCODE_RELAX_STATE (COND_JUMP
, BIG
):
10593 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG
):
10594 extension
= 5; /* 2 opcode + 4 displacement */
10595 opcode
[1] = opcode
[0] + 0x10;
10596 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
10597 where_to_put_displacement
= &opcode
[2];
10600 case ENCODE_RELAX_STATE (COND_JUMP
, BIG16
):
10601 extension
= 3; /* 2 opcode + 2 displacement */
10602 opcode
[1] = opcode
[0] + 0x10;
10603 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
10604 where_to_put_displacement
= &opcode
[2];
10607 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
):
10612 where_to_put_displacement
= &opcode
[3];
10616 BAD_CASE (fragP
->fr_subtype
);
10621 /* If size if less then four we are sure that the operand fits,
10622 but if it's 4, then it could be that the displacement is larger
10624 if (DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
) == 4
10626 && ((addressT
) (displacement_from_opcode_start
- extension
10627 + ((addressT
) 1 << 31))
10628 > (((addressT
) 2 << 31) - 1)))
10630 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
10631 _("jump target out of range"));
10632 /* Make us emit 0. */
10633 displacement_from_opcode_start
= extension
;
10635 /* Now put displacement after opcode. */
10636 md_number_to_chars ((char *) where_to_put_displacement
,
10637 (valueT
) (displacement_from_opcode_start
- extension
),
10638 DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
));
10639 fragP
->fr_fix
+= extension
;
10642 /* Apply a fixup (fixP) to segment data, once it has been determined
10643 by our caller that we have all the info we need to fix it up.
10645 Parameter valP is the pointer to the value of the bits.
10647 On the 386, immediates, displacements, and data pointers are all in
10648 the same (little-endian) format, so we don't need to care about which
10649 we are handling. */
10652 md_apply_fix (fixS
*fixP
, valueT
*valP
, segT seg ATTRIBUTE_UNUSED
)
10654 char *p
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
10655 valueT value
= *valP
;
10657 #if !defined (TE_Mach)
10658 if (fixP
->fx_pcrel
)
10660 switch (fixP
->fx_r_type
)
10666 fixP
->fx_r_type
= BFD_RELOC_64_PCREL
;
10669 case BFD_RELOC_X86_64_32S
:
10670 fixP
->fx_r_type
= BFD_RELOC_32_PCREL
;
10673 fixP
->fx_r_type
= BFD_RELOC_16_PCREL
;
10676 fixP
->fx_r_type
= BFD_RELOC_8_PCREL
;
10681 if (fixP
->fx_addsy
!= NULL
10682 && (fixP
->fx_r_type
== BFD_RELOC_32_PCREL
10683 || fixP
->fx_r_type
== BFD_RELOC_64_PCREL
10684 || fixP
->fx_r_type
== BFD_RELOC_16_PCREL
10685 || fixP
->fx_r_type
== BFD_RELOC_8_PCREL
)
10686 && !use_rela_relocations
)
10688 /* This is a hack. There should be a better way to handle this.
10689 This covers for the fact that bfd_install_relocation will
10690 subtract the current location (for partial_inplace, PC relative
10691 relocations); see more below. */
10695 || OUTPUT_FLAVOR
== bfd_target_coff_flavour
10698 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
10700 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10703 segT sym_seg
= S_GET_SEGMENT (fixP
->fx_addsy
);
10705 if ((sym_seg
== seg
10706 || (symbol_section_p (fixP
->fx_addsy
)
10707 && sym_seg
!= absolute_section
))
10708 && !generic_force_reloc (fixP
))
10710 /* Yes, we add the values in twice. This is because
10711 bfd_install_relocation subtracts them out again. I think
10712 bfd_install_relocation is broken, but I don't dare change
10714 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
10718 #if defined (OBJ_COFF) && defined (TE_PE)
10719 /* For some reason, the PE format does not store a
10720 section address offset for a PC relative symbol. */
10721 if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
10722 || S_IS_WEAK (fixP
->fx_addsy
))
10723 value
+= md_pcrel_from (fixP
);
10726 #if defined (OBJ_COFF) && defined (TE_PE)
10727 if (fixP
->fx_addsy
!= NULL
10728 && S_IS_WEAK (fixP
->fx_addsy
)
10729 /* PR 16858: Do not modify weak function references. */
10730 && ! fixP
->fx_pcrel
)
10732 #if !defined (TE_PEP)
10733 /* For x86 PE weak function symbols are neither PC-relative
10734 nor do they set S_IS_FUNCTION. So the only reliable way
10735 to detect them is to check the flags of their containing
10737 if (S_GET_SEGMENT (fixP
->fx_addsy
) != NULL
10738 && S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_CODE
)
10742 value
-= S_GET_VALUE (fixP
->fx_addsy
);
10746 /* Fix a few things - the dynamic linker expects certain values here,
10747 and we must not disappoint it. */
10748 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10749 if (IS_ELF
&& fixP
->fx_addsy
)
10750 switch (fixP
->fx_r_type
)
10752 case BFD_RELOC_386_PLT32
:
10753 case BFD_RELOC_X86_64_PLT32
:
10754 /* Make the jump instruction point to the address of the operand.
10755 At runtime we merely add the offset to the actual PLT entry.
10756 NB: Subtract the offset size only for jump instructions. */
10757 if (fixP
->fx_pcrel
)
10761 case BFD_RELOC_386_TLS_GD
:
10762 case BFD_RELOC_386_TLS_LDM
:
10763 case BFD_RELOC_386_TLS_IE_32
:
10764 case BFD_RELOC_386_TLS_IE
:
10765 case BFD_RELOC_386_TLS_GOTIE
:
10766 case BFD_RELOC_386_TLS_GOTDESC
:
10767 case BFD_RELOC_X86_64_TLSGD
:
10768 case BFD_RELOC_X86_64_TLSLD
:
10769 case BFD_RELOC_X86_64_GOTTPOFF
:
10770 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
10771 value
= 0; /* Fully resolved at runtime. No addend. */
10773 case BFD_RELOC_386_TLS_LE
:
10774 case BFD_RELOC_386_TLS_LDO_32
:
10775 case BFD_RELOC_386_TLS_LE_32
:
10776 case BFD_RELOC_X86_64_DTPOFF32
:
10777 case BFD_RELOC_X86_64_DTPOFF64
:
10778 case BFD_RELOC_X86_64_TPOFF32
:
10779 case BFD_RELOC_X86_64_TPOFF64
:
10780 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
10783 case BFD_RELOC_386_TLS_DESC_CALL
:
10784 case BFD_RELOC_X86_64_TLSDESC_CALL
:
10785 value
= 0; /* Fully resolved at runtime. No addend. */
10786 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
10790 case BFD_RELOC_VTABLE_INHERIT
:
10791 case BFD_RELOC_VTABLE_ENTRY
:
10798 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
10800 #endif /* !defined (TE_Mach) */
10802 /* Are we finished with this relocation now? */
10803 if (fixP
->fx_addsy
== NULL
)
10805 #if defined (OBJ_COFF) && defined (TE_PE)
10806 else if (fixP
->fx_addsy
!= NULL
&& S_IS_WEAK (fixP
->fx_addsy
))
10809 /* Remember value for tc_gen_reloc. */
10810 fixP
->fx_addnumber
= value
;
10811 /* Clear out the frag for now. */
10815 else if (use_rela_relocations
)
10817 fixP
->fx_no_overflow
= 1;
10818 /* Remember value for tc_gen_reloc. */
10819 fixP
->fx_addnumber
= value
;
10823 md_number_to_chars (p
, value
, fixP
->fx_size
);
10827 md_atof (int type
, char *litP
, int *sizeP
)
10829 /* This outputs the LITTLENUMs in REVERSE order;
10830 in accord with the bigendian 386. */
10831 return ieee_md_atof (type
, litP
, sizeP
, FALSE
);
10834 static char output_invalid_buf
[sizeof (unsigned char) * 2 + 6];
10837 output_invalid (int c
)
10840 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
10843 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
10844 "(0x%x)", (unsigned char) c
);
10845 return output_invalid_buf
;
10848 /* REG_STRING starts *before* REGISTER_PREFIX. */
10850 static const reg_entry
*
10851 parse_real_register (char *reg_string
, char **end_op
)
10853 char *s
= reg_string
;
10855 char reg_name_given
[MAX_REG_NAME_SIZE
+ 1];
10856 const reg_entry
*r
;
10858 /* Skip possible REGISTER_PREFIX and possible whitespace. */
10859 if (*s
== REGISTER_PREFIX
)
10862 if (is_space_char (*s
))
10865 p
= reg_name_given
;
10866 while ((*p
++ = register_chars
[(unsigned char) *s
]) != '\0')
10868 if (p
>= reg_name_given
+ MAX_REG_NAME_SIZE
)
10869 return (const reg_entry
*) NULL
;
10873 /* For naked regs, make sure that we are not dealing with an identifier.
10874 This prevents confusing an identifier like `eax_var' with register
10876 if (allow_naked_reg
&& identifier_chars
[(unsigned char) *s
])
10877 return (const reg_entry
*) NULL
;
10881 r
= (const reg_entry
*) hash_find (reg_hash
, reg_name_given
);
10883 /* Handle floating point regs, allowing spaces in the (i) part. */
10884 if (r
== i386_regtab
/* %st is first entry of table */)
10886 if (!cpu_arch_flags
.bitfield
.cpu8087
10887 && !cpu_arch_flags
.bitfield
.cpu287
10888 && !cpu_arch_flags
.bitfield
.cpu387
)
10889 return (const reg_entry
*) NULL
;
10891 if (is_space_char (*s
))
10896 if (is_space_char (*s
))
10898 if (*s
>= '0' && *s
<= '7')
10900 int fpr
= *s
- '0';
10902 if (is_space_char (*s
))
10907 r
= (const reg_entry
*) hash_find (reg_hash
, "st(0)");
10912 /* We have "%st(" then garbage. */
10913 return (const reg_entry
*) NULL
;
10917 if (r
== NULL
|| allow_pseudo_reg
)
10920 if (operand_type_all_zero (&r
->reg_type
))
10921 return (const reg_entry
*) NULL
;
10923 if ((r
->reg_type
.bitfield
.dword
10924 || (r
->reg_type
.bitfield
.class == SReg
&& r
->reg_num
> 3)
10925 || r
->reg_type
.bitfield
.class == RegCR
10926 || r
->reg_type
.bitfield
.class == RegDR
10927 || r
->reg_type
.bitfield
.class == RegTR
)
10928 && !cpu_arch_flags
.bitfield
.cpui386
)
10929 return (const reg_entry
*) NULL
;
10931 if (r
->reg_type
.bitfield
.regmmx
&& !cpu_arch_flags
.bitfield
.cpummx
)
10932 return (const reg_entry
*) NULL
;
10934 if (!cpu_arch_flags
.bitfield
.cpuavx512f
)
10936 if (r
->reg_type
.bitfield
.zmmword
|| r
->reg_type
.bitfield
.regmask
)
10937 return (const reg_entry
*) NULL
;
10939 if (!cpu_arch_flags
.bitfield
.cpuavx
)
10941 if (r
->reg_type
.bitfield
.ymmword
)
10942 return (const reg_entry
*) NULL
;
10944 if (!cpu_arch_flags
.bitfield
.cpusse
&& r
->reg_type
.bitfield
.xmmword
)
10945 return (const reg_entry
*) NULL
;
10949 if (r
->reg_type
.bitfield
.regbnd
&& !cpu_arch_flags
.bitfield
.cpumpx
)
10950 return (const reg_entry
*) NULL
;
10952 /* Don't allow fake index register unless allow_index_reg isn't 0. */
10953 if (!allow_index_reg
&& r
->reg_num
== RegIZ
)
10954 return (const reg_entry
*) NULL
;
10956 /* Upper 16 vector registers are only available with VREX in 64bit
10957 mode, and require EVEX encoding. */
10958 if (r
->reg_flags
& RegVRex
)
10960 if (!cpu_arch_flags
.bitfield
.cpuavx512f
10961 || flag_code
!= CODE_64BIT
)
10962 return (const reg_entry
*) NULL
;
10964 i
.vec_encoding
= vex_encoding_evex
;
10967 if (((r
->reg_flags
& (RegRex64
| RegRex
)) || r
->reg_type
.bitfield
.qword
)
10968 && (!cpu_arch_flags
.bitfield
.cpulm
|| r
->reg_type
.bitfield
.class != RegCR
)
10969 && flag_code
!= CODE_64BIT
)
10970 return (const reg_entry
*) NULL
;
10972 if (r
->reg_type
.bitfield
.class == SReg
&& r
->reg_num
== RegFlat
10974 return (const reg_entry
*) NULL
;
10979 /* REG_STRING starts *before* REGISTER_PREFIX. */
10981 static const reg_entry
*
10982 parse_register (char *reg_string
, char **end_op
)
10984 const reg_entry
*r
;
10986 if (*reg_string
== REGISTER_PREFIX
|| allow_naked_reg
)
10987 r
= parse_real_register (reg_string
, end_op
);
10992 char *save
= input_line_pointer
;
10996 input_line_pointer
= reg_string
;
10997 c
= get_symbol_name (®_string
);
10998 symbolP
= symbol_find (reg_string
);
10999 if (symbolP
&& S_GET_SEGMENT (symbolP
) == reg_section
)
11001 const expressionS
*e
= symbol_get_value_expression (symbolP
);
11003 know (e
->X_op
== O_register
);
11004 know (e
->X_add_number
>= 0
11005 && (valueT
) e
->X_add_number
< i386_regtab_size
);
11006 r
= i386_regtab
+ e
->X_add_number
;
11007 if ((r
->reg_flags
& RegVRex
))
11008 i
.vec_encoding
= vex_encoding_evex
;
11009 *end_op
= input_line_pointer
;
11011 *input_line_pointer
= c
;
11012 input_line_pointer
= save
;
11018 i386_parse_name (char *name
, expressionS
*e
, char *nextcharP
)
11020 const reg_entry
*r
;
11021 char *end
= input_line_pointer
;
11024 r
= parse_register (name
, &input_line_pointer
);
11025 if (r
&& end
<= input_line_pointer
)
11027 *nextcharP
= *input_line_pointer
;
11028 *input_line_pointer
= 0;
11029 e
->X_op
= O_register
;
11030 e
->X_add_number
= r
- i386_regtab
;
11033 input_line_pointer
= end
;
11035 return intel_syntax
? i386_intel_parse_name (name
, e
) : 0;
11039 md_operand (expressionS
*e
)
11042 const reg_entry
*r
;
11044 switch (*input_line_pointer
)
11046 case REGISTER_PREFIX
:
11047 r
= parse_real_register (input_line_pointer
, &end
);
11050 e
->X_op
= O_register
;
11051 e
->X_add_number
= r
- i386_regtab
;
11052 input_line_pointer
= end
;
11057 gas_assert (intel_syntax
);
11058 end
= input_line_pointer
++;
11060 if (*input_line_pointer
== ']')
11062 ++input_line_pointer
;
11063 e
->X_op_symbol
= make_expr_symbol (e
);
11064 e
->X_add_symbol
= NULL
;
11065 e
->X_add_number
= 0;
11070 e
->X_op
= O_absent
;
11071 input_line_pointer
= end
;
11078 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11079 const char *md_shortopts
= "kVQ:sqnO::";
11081 const char *md_shortopts
= "qnO::";
11084 #define OPTION_32 (OPTION_MD_BASE + 0)
11085 #define OPTION_64 (OPTION_MD_BASE + 1)
11086 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
11087 #define OPTION_MARCH (OPTION_MD_BASE + 3)
11088 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
11089 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
11090 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
11091 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
11092 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
11093 #define OPTION_MRELAX_RELOCATIONS (OPTION_MD_BASE + 9)
11094 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
11095 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
11096 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
11097 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
11098 #define OPTION_X32 (OPTION_MD_BASE + 14)
11099 #define OPTION_MADD_BND_PREFIX (OPTION_MD_BASE + 15)
11100 #define OPTION_MEVEXLIG (OPTION_MD_BASE + 16)
11101 #define OPTION_MEVEXWIG (OPTION_MD_BASE + 17)
11102 #define OPTION_MBIG_OBJ (OPTION_MD_BASE + 18)
11103 #define OPTION_MOMIT_LOCK_PREFIX (OPTION_MD_BASE + 19)
11104 #define OPTION_MEVEXRCIG (OPTION_MD_BASE + 20)
11105 #define OPTION_MSHARED (OPTION_MD_BASE + 21)
11106 #define OPTION_MAMD64 (OPTION_MD_BASE + 22)
11107 #define OPTION_MINTEL64 (OPTION_MD_BASE + 23)
11108 #define OPTION_MFENCE_AS_LOCK_ADD (OPTION_MD_BASE + 24)
11109 #define OPTION_X86_USED_NOTE (OPTION_MD_BASE + 25)
11110 #define OPTION_MVEXWIG (OPTION_MD_BASE + 26)
11112 struct option md_longopts
[] =
11114 {"32", no_argument
, NULL
, OPTION_32
},
11115 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
11116 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
11117 {"64", no_argument
, NULL
, OPTION_64
},
11119 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11120 {"x32", no_argument
, NULL
, OPTION_X32
},
11121 {"mshared", no_argument
, NULL
, OPTION_MSHARED
},
11122 {"mx86-used-note", required_argument
, NULL
, OPTION_X86_USED_NOTE
},
11124 {"divide", no_argument
, NULL
, OPTION_DIVIDE
},
11125 {"march", required_argument
, NULL
, OPTION_MARCH
},
11126 {"mtune", required_argument
, NULL
, OPTION_MTUNE
},
11127 {"mmnemonic", required_argument
, NULL
, OPTION_MMNEMONIC
},
11128 {"msyntax", required_argument
, NULL
, OPTION_MSYNTAX
},
11129 {"mindex-reg", no_argument
, NULL
, OPTION_MINDEX_REG
},
11130 {"mnaked-reg", no_argument
, NULL
, OPTION_MNAKED_REG
},
11131 {"msse2avx", no_argument
, NULL
, OPTION_MSSE2AVX
},
11132 {"msse-check", required_argument
, NULL
, OPTION_MSSE_CHECK
},
11133 {"moperand-check", required_argument
, NULL
, OPTION_MOPERAND_CHECK
},
11134 {"mavxscalar", required_argument
, NULL
, OPTION_MAVXSCALAR
},
11135 {"mvexwig", required_argument
, NULL
, OPTION_MVEXWIG
},
11136 {"madd-bnd-prefix", no_argument
, NULL
, OPTION_MADD_BND_PREFIX
},
11137 {"mevexlig", required_argument
, NULL
, OPTION_MEVEXLIG
},
11138 {"mevexwig", required_argument
, NULL
, OPTION_MEVEXWIG
},
11139 # if defined (TE_PE) || defined (TE_PEP)
11140 {"mbig-obj", no_argument
, NULL
, OPTION_MBIG_OBJ
},
11142 {"momit-lock-prefix", required_argument
, NULL
, OPTION_MOMIT_LOCK_PREFIX
},
11143 {"mfence-as-lock-add", required_argument
, NULL
, OPTION_MFENCE_AS_LOCK_ADD
},
11144 {"mrelax-relocations", required_argument
, NULL
, OPTION_MRELAX_RELOCATIONS
},
11145 {"mevexrcig", required_argument
, NULL
, OPTION_MEVEXRCIG
},
11146 {"mamd64", no_argument
, NULL
, OPTION_MAMD64
},
11147 {"mintel64", no_argument
, NULL
, OPTION_MINTEL64
},
11148 {NULL
, no_argument
, NULL
, 0}
11150 size_t md_longopts_size
= sizeof (md_longopts
);
11153 md_parse_option (int c
, const char *arg
)
11156 char *arch
, *next
, *saved
;
11161 optimize_align_code
= 0;
11165 quiet_warnings
= 1;
11168 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11169 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
11170 should be emitted or not. FIXME: Not implemented. */
11172 if ((arg
[0] != 'y' && arg
[0] != 'n') || arg
[1])
11176 /* -V: SVR4 argument to print version ID. */
11178 print_version_id ();
11181 /* -k: Ignore for FreeBSD compatibility. */
11186 /* -s: On i386 Solaris, this tells the native assembler to use
11187 .stab instead of .stab.excl. We always use .stab anyhow. */
11190 case OPTION_MSHARED
:
11194 case OPTION_X86_USED_NOTE
:
11195 if (strcasecmp (arg
, "yes") == 0)
11197 else if (strcasecmp (arg
, "no") == 0)
11200 as_fatal (_("invalid -mx86-used-note= option: `%s'"), arg
);
11205 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
11206 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
11209 const char **list
, **l
;
11211 list
= bfd_target_list ();
11212 for (l
= list
; *l
!= NULL
; l
++)
11213 if (CONST_STRNEQ (*l
, "elf64-x86-64")
11214 || strcmp (*l
, "coff-x86-64") == 0
11215 || strcmp (*l
, "pe-x86-64") == 0
11216 || strcmp (*l
, "pei-x86-64") == 0
11217 || strcmp (*l
, "mach-o-x86-64") == 0)
11219 default_arch
= "x86_64";
11223 as_fatal (_("no compiled in support for x86_64"));
11229 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11233 const char **list
, **l
;
11235 list
= bfd_target_list ();
11236 for (l
= list
; *l
!= NULL
; l
++)
11237 if (CONST_STRNEQ (*l
, "elf32-x86-64"))
11239 default_arch
= "x86_64:32";
11243 as_fatal (_("no compiled in support for 32bit x86_64"));
11247 as_fatal (_("32bit x86_64 is only supported for ELF"));
11252 default_arch
= "i386";
11255 case OPTION_DIVIDE
:
11256 #ifdef SVR4_COMMENT_CHARS
11261 n
= XNEWVEC (char, strlen (i386_comment_chars
) + 1);
11263 for (s
= i386_comment_chars
; *s
!= '\0'; s
++)
11267 i386_comment_chars
= n
;
11273 saved
= xstrdup (arg
);
11275 /* Allow -march=+nosse. */
11281 as_fatal (_("invalid -march= option: `%s'"), arg
);
11282 next
= strchr (arch
, '+');
11285 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
11287 if (strcmp (arch
, cpu_arch
[j
].name
) == 0)
11290 if (! cpu_arch
[j
].flags
.bitfield
.cpui386
)
11293 cpu_arch_name
= cpu_arch
[j
].name
;
11294 cpu_sub_arch_name
= NULL
;
11295 cpu_arch_flags
= cpu_arch
[j
].flags
;
11296 cpu_arch_isa
= cpu_arch
[j
].type
;
11297 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
11298 if (!cpu_arch_tune_set
)
11300 cpu_arch_tune
= cpu_arch_isa
;
11301 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
11305 else if (*cpu_arch
[j
].name
== '.'
11306 && strcmp (arch
, cpu_arch
[j
].name
+ 1) == 0)
11308 /* ISA extension. */
11309 i386_cpu_flags flags
;
11311 flags
= cpu_flags_or (cpu_arch_flags
,
11312 cpu_arch
[j
].flags
);
11314 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
11316 if (cpu_sub_arch_name
)
11318 char *name
= cpu_sub_arch_name
;
11319 cpu_sub_arch_name
= concat (name
,
11321 (const char *) NULL
);
11325 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
11326 cpu_arch_flags
= flags
;
11327 cpu_arch_isa_flags
= flags
;
11331 = cpu_flags_or (cpu_arch_isa_flags
,
11332 cpu_arch
[j
].flags
);
11337 if (j
>= ARRAY_SIZE (cpu_arch
))
11339 /* Disable an ISA extension. */
11340 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
11341 if (strcmp (arch
, cpu_noarch
[j
].name
) == 0)
11343 i386_cpu_flags flags
;
11345 flags
= cpu_flags_and_not (cpu_arch_flags
,
11346 cpu_noarch
[j
].flags
);
11347 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
11349 if (cpu_sub_arch_name
)
11351 char *name
= cpu_sub_arch_name
;
11352 cpu_sub_arch_name
= concat (arch
,
11353 (const char *) NULL
);
11357 cpu_sub_arch_name
= xstrdup (arch
);
11358 cpu_arch_flags
= flags
;
11359 cpu_arch_isa_flags
= flags
;
11364 if (j
>= ARRAY_SIZE (cpu_noarch
))
11365 j
= ARRAY_SIZE (cpu_arch
);
11368 if (j
>= ARRAY_SIZE (cpu_arch
))
11369 as_fatal (_("invalid -march= option: `%s'"), arg
);
11373 while (next
!= NULL
);
11379 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
11380 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
11382 if (strcmp (arg
, cpu_arch
[j
].name
) == 0)
11384 cpu_arch_tune_set
= 1;
11385 cpu_arch_tune
= cpu_arch
[j
].type
;
11386 cpu_arch_tune_flags
= cpu_arch
[j
].flags
;
11390 if (j
>= ARRAY_SIZE (cpu_arch
))
11391 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
11394 case OPTION_MMNEMONIC
:
11395 if (strcasecmp (arg
, "att") == 0)
11396 intel_mnemonic
= 0;
11397 else if (strcasecmp (arg
, "intel") == 0)
11398 intel_mnemonic
= 1;
11400 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg
);
11403 case OPTION_MSYNTAX
:
11404 if (strcasecmp (arg
, "att") == 0)
11406 else if (strcasecmp (arg
, "intel") == 0)
11409 as_fatal (_("invalid -msyntax= option: `%s'"), arg
);
11412 case OPTION_MINDEX_REG
:
11413 allow_index_reg
= 1;
11416 case OPTION_MNAKED_REG
:
11417 allow_naked_reg
= 1;
11420 case OPTION_MSSE2AVX
:
11424 case OPTION_MSSE_CHECK
:
11425 if (strcasecmp (arg
, "error") == 0)
11426 sse_check
= check_error
;
11427 else if (strcasecmp (arg
, "warning") == 0)
11428 sse_check
= check_warning
;
11429 else if (strcasecmp (arg
, "none") == 0)
11430 sse_check
= check_none
;
11432 as_fatal (_("invalid -msse-check= option: `%s'"), arg
);
11435 case OPTION_MOPERAND_CHECK
:
11436 if (strcasecmp (arg
, "error") == 0)
11437 operand_check
= check_error
;
11438 else if (strcasecmp (arg
, "warning") == 0)
11439 operand_check
= check_warning
;
11440 else if (strcasecmp (arg
, "none") == 0)
11441 operand_check
= check_none
;
11443 as_fatal (_("invalid -moperand-check= option: `%s'"), arg
);
11446 case OPTION_MAVXSCALAR
:
11447 if (strcasecmp (arg
, "128") == 0)
11448 avxscalar
= vex128
;
11449 else if (strcasecmp (arg
, "256") == 0)
11450 avxscalar
= vex256
;
11452 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg
);
11455 case OPTION_MVEXWIG
:
11456 if (strcmp (arg
, "0") == 0)
11458 else if (strcmp (arg
, "1") == 0)
11461 as_fatal (_("invalid -mvexwig= option: `%s'"), arg
);
11464 case OPTION_MADD_BND_PREFIX
:
11465 add_bnd_prefix
= 1;
11468 case OPTION_MEVEXLIG
:
11469 if (strcmp (arg
, "128") == 0)
11470 evexlig
= evexl128
;
11471 else if (strcmp (arg
, "256") == 0)
11472 evexlig
= evexl256
;
11473 else if (strcmp (arg
, "512") == 0)
11474 evexlig
= evexl512
;
11476 as_fatal (_("invalid -mevexlig= option: `%s'"), arg
);
11479 case OPTION_MEVEXRCIG
:
11480 if (strcmp (arg
, "rne") == 0)
11482 else if (strcmp (arg
, "rd") == 0)
11484 else if (strcmp (arg
, "ru") == 0)
11486 else if (strcmp (arg
, "rz") == 0)
11489 as_fatal (_("invalid -mevexrcig= option: `%s'"), arg
);
11492 case OPTION_MEVEXWIG
:
11493 if (strcmp (arg
, "0") == 0)
11495 else if (strcmp (arg
, "1") == 0)
11498 as_fatal (_("invalid -mevexwig= option: `%s'"), arg
);
11501 # if defined (TE_PE) || defined (TE_PEP)
11502 case OPTION_MBIG_OBJ
:
11507 case OPTION_MOMIT_LOCK_PREFIX
:
11508 if (strcasecmp (arg
, "yes") == 0)
11509 omit_lock_prefix
= 1;
11510 else if (strcasecmp (arg
, "no") == 0)
11511 omit_lock_prefix
= 0;
11513 as_fatal (_("invalid -momit-lock-prefix= option: `%s'"), arg
);
11516 case OPTION_MFENCE_AS_LOCK_ADD
:
11517 if (strcasecmp (arg
, "yes") == 0)
11519 else if (strcasecmp (arg
, "no") == 0)
11522 as_fatal (_("invalid -mfence-as-lock-add= option: `%s'"), arg
);
11525 case OPTION_MRELAX_RELOCATIONS
:
11526 if (strcasecmp (arg
, "yes") == 0)
11527 generate_relax_relocations
= 1;
11528 else if (strcasecmp (arg
, "no") == 0)
11529 generate_relax_relocations
= 0;
11531 as_fatal (_("invalid -mrelax-relocations= option: `%s'"), arg
);
11534 case OPTION_MAMD64
:
11538 case OPTION_MINTEL64
:
11546 /* Turn off -Os. */
11547 optimize_for_space
= 0;
11549 else if (*arg
== 's')
11551 optimize_for_space
= 1;
11552 /* Turn on all encoding optimizations. */
11553 optimize
= INT_MAX
;
11557 optimize
= atoi (arg
);
11558 /* Turn off -Os. */
11559 optimize_for_space
= 0;
11569 #define MESSAGE_TEMPLATE \
11573 output_message (FILE *stream
, char *p
, char *message
, char *start
,
11574 int *left_p
, const char *name
, int len
)
11576 int size
= sizeof (MESSAGE_TEMPLATE
);
11577 int left
= *left_p
;
11579 /* Reserve 2 spaces for ", " or ",\0" */
11582 /* Check if there is any room. */
11590 p
= mempcpy (p
, name
, len
);
11594 /* Output the current message now and start a new one. */
11597 fprintf (stream
, "%s\n", message
);
11599 left
= size
- (start
- message
) - len
- 2;
11601 gas_assert (left
>= 0);
11603 p
= mempcpy (p
, name
, len
);
11611 show_arch (FILE *stream
, int ext
, int check
)
11613 static char message
[] = MESSAGE_TEMPLATE
;
11614 char *start
= message
+ 27;
11616 int size
= sizeof (MESSAGE_TEMPLATE
);
11623 left
= size
- (start
- message
);
11624 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
11626 /* Should it be skipped? */
11627 if (cpu_arch
[j
].skip
)
11630 name
= cpu_arch
[j
].name
;
11631 len
= cpu_arch
[j
].len
;
11634 /* It is an extension. Skip if we aren't asked to show it. */
11645 /* It is an processor. Skip if we show only extension. */
11648 else if (check
&& ! cpu_arch
[j
].flags
.bitfield
.cpui386
)
11650 /* It is an impossible processor - skip. */
11654 p
= output_message (stream
, p
, message
, start
, &left
, name
, len
);
11657 /* Display disabled extensions. */
11659 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
11661 name
= cpu_noarch
[j
].name
;
11662 len
= cpu_noarch
[j
].len
;
11663 p
= output_message (stream
, p
, message
, start
, &left
, name
,
11668 fprintf (stream
, "%s\n", message
);
11672 md_show_usage (FILE *stream
)
11674 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11675 fprintf (stream
, _("\
11676 -Qy, -Qn ignored\n\
11677 -V print assembler version number\n\
11680 fprintf (stream
, _("\
11681 -n Do not optimize code alignment\n\
11682 -q quieten some warnings\n"));
11683 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11684 fprintf (stream
, _("\
11687 #if defined BFD64 && (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
11688 || defined (TE_PE) || defined (TE_PEP))
11689 fprintf (stream
, _("\
11690 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
11692 #ifdef SVR4_COMMENT_CHARS
11693 fprintf (stream
, _("\
11694 --divide do not treat `/' as a comment character\n"));
11696 fprintf (stream
, _("\
11697 --divide ignored\n"));
11699 fprintf (stream
, _("\
11700 -march=CPU[,+EXTENSION...]\n\
11701 generate code for CPU and EXTENSION, CPU is one of:\n"));
11702 show_arch (stream
, 0, 1);
11703 fprintf (stream
, _("\
11704 EXTENSION is combination of:\n"));
11705 show_arch (stream
, 1, 0);
11706 fprintf (stream
, _("\
11707 -mtune=CPU optimize for CPU, CPU is one of:\n"));
11708 show_arch (stream
, 0, 0);
11709 fprintf (stream
, _("\
11710 -msse2avx encode SSE instructions with VEX prefix\n"));
11711 fprintf (stream
, _("\
11712 -msse-check=[none|error|warning] (default: warning)\n\
11713 check SSE instructions\n"));
11714 fprintf (stream
, _("\
11715 -moperand-check=[none|error|warning] (default: warning)\n\
11716 check operand combinations for validity\n"));
11717 fprintf (stream
, _("\
11718 -mavxscalar=[128|256] (default: 128)\n\
11719 encode scalar AVX instructions with specific vector\n\
11721 fprintf (stream
, _("\
11722 -mvexwig=[0|1] (default: 0)\n\
11723 encode VEX instructions with specific VEX.W value\n\
11724 for VEX.W bit ignored instructions\n"));
11725 fprintf (stream
, _("\
11726 -mevexlig=[128|256|512] (default: 128)\n\
11727 encode scalar EVEX instructions with specific vector\n\
11729 fprintf (stream
, _("\
11730 -mevexwig=[0|1] (default: 0)\n\
11731 encode EVEX instructions with specific EVEX.W value\n\
11732 for EVEX.W bit ignored instructions\n"));
11733 fprintf (stream
, _("\
11734 -mevexrcig=[rne|rd|ru|rz] (default: rne)\n\
11735 encode EVEX instructions with specific EVEX.RC value\n\
11736 for SAE-only ignored instructions\n"));
11737 fprintf (stream
, _("\
11738 -mmnemonic=[att|intel] "));
11739 if (SYSV386_COMPAT
)
11740 fprintf (stream
, _("(default: att)\n"));
11742 fprintf (stream
, _("(default: intel)\n"));
11743 fprintf (stream
, _("\
11744 use AT&T/Intel mnemonic\n"));
11745 fprintf (stream
, _("\
11746 -msyntax=[att|intel] (default: att)\n\
11747 use AT&T/Intel syntax\n"));
11748 fprintf (stream
, _("\
11749 -mindex-reg support pseudo index registers\n"));
11750 fprintf (stream
, _("\
11751 -mnaked-reg don't require `%%' prefix for registers\n"));
11752 fprintf (stream
, _("\
11753 -madd-bnd-prefix add BND prefix for all valid branches\n"));
11754 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11755 fprintf (stream
, _("\
11756 -mshared disable branch optimization for shared code\n"));
11757 fprintf (stream
, _("\
11758 -mx86-used-note=[no|yes] "));
11759 if (DEFAULT_X86_USED_NOTE
)
11760 fprintf (stream
, _("(default: yes)\n"));
11762 fprintf (stream
, _("(default: no)\n"));
11763 fprintf (stream
, _("\
11764 generate x86 used ISA and feature properties\n"));
11766 #if defined (TE_PE) || defined (TE_PEP)
11767 fprintf (stream
, _("\
11768 -mbig-obj generate big object files\n"));
11770 fprintf (stream
, _("\
11771 -momit-lock-prefix=[no|yes] (default: no)\n\
11772 strip all lock prefixes\n"));
11773 fprintf (stream
, _("\
11774 -mfence-as-lock-add=[no|yes] (default: no)\n\
11775 encode lfence, mfence and sfence as\n\
11776 lock addl $0x0, (%%{re}sp)\n"));
11777 fprintf (stream
, _("\
11778 -mrelax-relocations=[no|yes] "));
11779 if (DEFAULT_GENERATE_X86_RELAX_RELOCATIONS
)
11780 fprintf (stream
, _("(default: yes)\n"));
11782 fprintf (stream
, _("(default: no)\n"));
11783 fprintf (stream
, _("\
11784 generate relax relocations\n"));
11785 fprintf (stream
, _("\
11786 -mamd64 accept only AMD64 ISA [default]\n"));
11787 fprintf (stream
, _("\
11788 -mintel64 accept only Intel64 ISA\n"));
11791 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
11792 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
11793 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
11795 /* Pick the target format to use. */
11798 i386_target_format (void)
11800 if (!strncmp (default_arch
, "x86_64", 6))
11802 update_code_flag (CODE_64BIT
, 1);
11803 if (default_arch
[6] == '\0')
11804 x86_elf_abi
= X86_64_ABI
;
11806 x86_elf_abi
= X86_64_X32_ABI
;
11808 else if (!strcmp (default_arch
, "i386"))
11809 update_code_flag (CODE_32BIT
, 1);
11810 else if (!strcmp (default_arch
, "iamcu"))
11812 update_code_flag (CODE_32BIT
, 1);
11813 if (cpu_arch_isa
== PROCESSOR_UNKNOWN
)
11815 static const i386_cpu_flags iamcu_flags
= CPU_IAMCU_FLAGS
;
11816 cpu_arch_name
= "iamcu";
11817 cpu_sub_arch_name
= NULL
;
11818 cpu_arch_flags
= iamcu_flags
;
11819 cpu_arch_isa
= PROCESSOR_IAMCU
;
11820 cpu_arch_isa_flags
= iamcu_flags
;
11821 if (!cpu_arch_tune_set
)
11823 cpu_arch_tune
= cpu_arch_isa
;
11824 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
11827 else if (cpu_arch_isa
!= PROCESSOR_IAMCU
)
11828 as_fatal (_("Intel MCU doesn't support `%s' architecture"),
11832 as_fatal (_("unknown architecture"));
11834 if (cpu_flags_all_zero (&cpu_arch_isa_flags
))
11835 cpu_arch_isa_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
11836 if (cpu_flags_all_zero (&cpu_arch_tune_flags
))
11837 cpu_arch_tune_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
11839 switch (OUTPUT_FLAVOR
)
11841 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
11842 case bfd_target_aout_flavour
:
11843 return AOUT_TARGET_FORMAT
;
11845 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
11846 # if defined (TE_PE) || defined (TE_PEP)
11847 case bfd_target_coff_flavour
:
11848 if (flag_code
== CODE_64BIT
)
11849 return use_big_obj
? "pe-bigobj-x86-64" : "pe-x86-64";
11852 # elif defined (TE_GO32)
11853 case bfd_target_coff_flavour
:
11854 return "coff-go32";
11856 case bfd_target_coff_flavour
:
11857 return "coff-i386";
11860 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
11861 case bfd_target_elf_flavour
:
11863 const char *format
;
11865 switch (x86_elf_abi
)
11868 format
= ELF_TARGET_FORMAT
;
11871 use_rela_relocations
= 1;
11873 format
= ELF_TARGET_FORMAT64
;
11875 case X86_64_X32_ABI
:
11876 use_rela_relocations
= 1;
11878 disallow_64bit_reloc
= 1;
11879 format
= ELF_TARGET_FORMAT32
;
11882 if (cpu_arch_isa
== PROCESSOR_L1OM
)
11884 if (x86_elf_abi
!= X86_64_ABI
)
11885 as_fatal (_("Intel L1OM is 64bit only"));
11886 return ELF_TARGET_L1OM_FORMAT
;
11888 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
11890 if (x86_elf_abi
!= X86_64_ABI
)
11891 as_fatal (_("Intel K1OM is 64bit only"));
11892 return ELF_TARGET_K1OM_FORMAT
;
11894 else if (cpu_arch_isa
== PROCESSOR_IAMCU
)
11896 if (x86_elf_abi
!= I386_ABI
)
11897 as_fatal (_("Intel MCU is 32bit only"));
11898 return ELF_TARGET_IAMCU_FORMAT
;
11904 #if defined (OBJ_MACH_O)
11905 case bfd_target_mach_o_flavour
:
11906 if (flag_code
== CODE_64BIT
)
11908 use_rela_relocations
= 1;
11910 return "mach-o-x86-64";
11913 return "mach-o-i386";
11921 #endif /* OBJ_MAYBE_ more than one */
11924 md_undefined_symbol (char *name
)
11926 if (name
[0] == GLOBAL_OFFSET_TABLE_NAME
[0]
11927 && name
[1] == GLOBAL_OFFSET_TABLE_NAME
[1]
11928 && name
[2] == GLOBAL_OFFSET_TABLE_NAME
[2]
11929 && strcmp (name
, GLOBAL_OFFSET_TABLE_NAME
) == 0)
11933 if (symbol_find (name
))
11934 as_bad (_("GOT already in symbol table"));
11935 GOT_symbol
= symbol_new (name
, undefined_section
,
11936 (valueT
) 0, &zero_address_frag
);
11943 /* Round up a section size to the appropriate boundary. */
11946 md_section_align (segT segment ATTRIBUTE_UNUSED
, valueT size
)
11948 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
11949 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
11951 /* For a.out, force the section size to be aligned. If we don't do
11952 this, BFD will align it for us, but it will not write out the
11953 final bytes of the section. This may be a bug in BFD, but it is
11954 easier to fix it here since that is how the other a.out targets
11958 align
= bfd_section_alignment (segment
);
11959 size
= ((size
+ (1 << align
) - 1) & (-((valueT
) 1 << align
)));
11966 /* On the i386, PC-relative offsets are relative to the start of the
11967 next instruction. That is, the address of the offset, plus its
11968 size, since the offset is always the last part of the insn. */
11971 md_pcrel_from (fixS
*fixP
)
11973 return fixP
->fx_size
+ fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
11979 s_bss (int ignore ATTRIBUTE_UNUSED
)
11983 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11985 obj_elf_section_change_hook ();
11987 temp
= get_absolute_expression ();
11988 subseg_set (bss_section
, (subsegT
) temp
);
11989 demand_empty_rest_of_line ();
11995 i386_validate_fix (fixS
*fixp
)
11997 if (fixp
->fx_subsy
)
11999 if (fixp
->fx_subsy
== GOT_symbol
)
12001 if (fixp
->fx_r_type
== BFD_RELOC_32_PCREL
)
12005 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12006 if (fixp
->fx_tcbit2
)
12007 fixp
->fx_r_type
= (fixp
->fx_tcbit
12008 ? BFD_RELOC_X86_64_REX_GOTPCRELX
12009 : BFD_RELOC_X86_64_GOTPCRELX
);
12012 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTPCREL
;
12017 fixp
->fx_r_type
= BFD_RELOC_386_GOTOFF
;
12019 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTOFF64
;
12021 fixp
->fx_subsy
= 0;
12024 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12025 else if (!object_64bit
)
12027 if (fixp
->fx_r_type
== BFD_RELOC_386_GOT32
12028 && fixp
->fx_tcbit2
)
12029 fixp
->fx_r_type
= BFD_RELOC_386_GOT32X
;
12035 tc_gen_reloc (asection
*section ATTRIBUTE_UNUSED
, fixS
*fixp
)
12038 bfd_reloc_code_real_type code
;
12040 switch (fixp
->fx_r_type
)
12042 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12043 case BFD_RELOC_SIZE32
:
12044 case BFD_RELOC_SIZE64
:
12045 if (S_IS_DEFINED (fixp
->fx_addsy
)
12046 && !S_IS_EXTERNAL (fixp
->fx_addsy
))
12048 /* Resolve size relocation against local symbol to size of
12049 the symbol plus addend. */
12050 valueT value
= S_GET_SIZE (fixp
->fx_addsy
) + fixp
->fx_offset
;
12051 if (fixp
->fx_r_type
== BFD_RELOC_SIZE32
12052 && !fits_in_unsigned_long (value
))
12053 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
12054 _("symbol size computation overflow"));
12055 fixp
->fx_addsy
= NULL
;
12056 fixp
->fx_subsy
= NULL
;
12057 md_apply_fix (fixp
, (valueT
*) &value
, NULL
);
12061 /* Fall through. */
12063 case BFD_RELOC_X86_64_PLT32
:
12064 case BFD_RELOC_X86_64_GOT32
:
12065 case BFD_RELOC_X86_64_GOTPCREL
:
12066 case BFD_RELOC_X86_64_GOTPCRELX
:
12067 case BFD_RELOC_X86_64_REX_GOTPCRELX
:
12068 case BFD_RELOC_386_PLT32
:
12069 case BFD_RELOC_386_GOT32
:
12070 case BFD_RELOC_386_GOT32X
:
12071 case BFD_RELOC_386_GOTOFF
:
12072 case BFD_RELOC_386_GOTPC
:
12073 case BFD_RELOC_386_TLS_GD
:
12074 case BFD_RELOC_386_TLS_LDM
:
12075 case BFD_RELOC_386_TLS_LDO_32
:
12076 case BFD_RELOC_386_TLS_IE_32
:
12077 case BFD_RELOC_386_TLS_IE
:
12078 case BFD_RELOC_386_TLS_GOTIE
:
12079 case BFD_RELOC_386_TLS_LE_32
:
12080 case BFD_RELOC_386_TLS_LE
:
12081 case BFD_RELOC_386_TLS_GOTDESC
:
12082 case BFD_RELOC_386_TLS_DESC_CALL
:
12083 case BFD_RELOC_X86_64_TLSGD
:
12084 case BFD_RELOC_X86_64_TLSLD
:
12085 case BFD_RELOC_X86_64_DTPOFF32
:
12086 case BFD_RELOC_X86_64_DTPOFF64
:
12087 case BFD_RELOC_X86_64_GOTTPOFF
:
12088 case BFD_RELOC_X86_64_TPOFF32
:
12089 case BFD_RELOC_X86_64_TPOFF64
:
12090 case BFD_RELOC_X86_64_GOTOFF64
:
12091 case BFD_RELOC_X86_64_GOTPC32
:
12092 case BFD_RELOC_X86_64_GOT64
:
12093 case BFD_RELOC_X86_64_GOTPCREL64
:
12094 case BFD_RELOC_X86_64_GOTPC64
:
12095 case BFD_RELOC_X86_64_GOTPLT64
:
12096 case BFD_RELOC_X86_64_PLTOFF64
:
12097 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
12098 case BFD_RELOC_X86_64_TLSDESC_CALL
:
12099 case BFD_RELOC_RVA
:
12100 case BFD_RELOC_VTABLE_ENTRY
:
12101 case BFD_RELOC_VTABLE_INHERIT
:
12103 case BFD_RELOC_32_SECREL
:
12105 code
= fixp
->fx_r_type
;
12107 case BFD_RELOC_X86_64_32S
:
12108 if (!fixp
->fx_pcrel
)
12110 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
12111 code
= fixp
->fx_r_type
;
12114 /* Fall through. */
12116 if (fixp
->fx_pcrel
)
12118 switch (fixp
->fx_size
)
12121 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
12122 _("can not do %d byte pc-relative relocation"),
12124 code
= BFD_RELOC_32_PCREL
;
12126 case 1: code
= BFD_RELOC_8_PCREL
; break;
12127 case 2: code
= BFD_RELOC_16_PCREL
; break;
12128 case 4: code
= BFD_RELOC_32_PCREL
; break;
12130 case 8: code
= BFD_RELOC_64_PCREL
; break;
12136 switch (fixp
->fx_size
)
12139 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
12140 _("can not do %d byte relocation"),
12142 code
= BFD_RELOC_32
;
12144 case 1: code
= BFD_RELOC_8
; break;
12145 case 2: code
= BFD_RELOC_16
; break;
12146 case 4: code
= BFD_RELOC_32
; break;
12148 case 8: code
= BFD_RELOC_64
; break;
12155 if ((code
== BFD_RELOC_32
12156 || code
== BFD_RELOC_32_PCREL
12157 || code
== BFD_RELOC_X86_64_32S
)
12159 && fixp
->fx_addsy
== GOT_symbol
)
12162 code
= BFD_RELOC_386_GOTPC
;
12164 code
= BFD_RELOC_X86_64_GOTPC32
;
12166 if ((code
== BFD_RELOC_64
|| code
== BFD_RELOC_64_PCREL
)
12168 && fixp
->fx_addsy
== GOT_symbol
)
12170 code
= BFD_RELOC_X86_64_GOTPC64
;
12173 rel
= XNEW (arelent
);
12174 rel
->sym_ptr_ptr
= XNEW (asymbol
*);
12175 *rel
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
12177 rel
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
12179 if (!use_rela_relocations
)
12181 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
12182 vtable entry to be used in the relocation's section offset. */
12183 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
12184 rel
->address
= fixp
->fx_offset
;
12185 #if defined (OBJ_COFF) && defined (TE_PE)
12186 else if (fixp
->fx_addsy
&& S_IS_WEAK (fixp
->fx_addsy
))
12187 rel
->addend
= fixp
->fx_addnumber
- (S_GET_VALUE (fixp
->fx_addsy
) * 2);
12192 /* Use the rela in 64bit mode. */
12195 if (disallow_64bit_reloc
)
12198 case BFD_RELOC_X86_64_DTPOFF64
:
12199 case BFD_RELOC_X86_64_TPOFF64
:
12200 case BFD_RELOC_64_PCREL
:
12201 case BFD_RELOC_X86_64_GOTOFF64
:
12202 case BFD_RELOC_X86_64_GOT64
:
12203 case BFD_RELOC_X86_64_GOTPCREL64
:
12204 case BFD_RELOC_X86_64_GOTPC64
:
12205 case BFD_RELOC_X86_64_GOTPLT64
:
12206 case BFD_RELOC_X86_64_PLTOFF64
:
12207 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
12208 _("cannot represent relocation type %s in x32 mode"),
12209 bfd_get_reloc_code_name (code
));
12215 if (!fixp
->fx_pcrel
)
12216 rel
->addend
= fixp
->fx_offset
;
12220 case BFD_RELOC_X86_64_PLT32
:
12221 case BFD_RELOC_X86_64_GOT32
:
12222 case BFD_RELOC_X86_64_GOTPCREL
:
12223 case BFD_RELOC_X86_64_GOTPCRELX
:
12224 case BFD_RELOC_X86_64_REX_GOTPCRELX
:
12225 case BFD_RELOC_X86_64_TLSGD
:
12226 case BFD_RELOC_X86_64_TLSLD
:
12227 case BFD_RELOC_X86_64_GOTTPOFF
:
12228 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
12229 case BFD_RELOC_X86_64_TLSDESC_CALL
:
12230 rel
->addend
= fixp
->fx_offset
- fixp
->fx_size
;
12233 rel
->addend
= (section
->vma
12235 + fixp
->fx_addnumber
12236 + md_pcrel_from (fixp
));
12241 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
12242 if (rel
->howto
== NULL
)
12244 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
12245 _("cannot represent relocation type %s"),
12246 bfd_get_reloc_code_name (code
));
12247 /* Set howto to a garbage value so that we can keep going. */
12248 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, BFD_RELOC_32
);
12249 gas_assert (rel
->howto
!= NULL
);
12255 #include "tc-i386-intel.c"
12258 tc_x86_parse_to_dw2regnum (expressionS
*exp
)
12260 int saved_naked_reg
;
12261 char saved_register_dot
;
12263 saved_naked_reg
= allow_naked_reg
;
12264 allow_naked_reg
= 1;
12265 saved_register_dot
= register_chars
['.'];
12266 register_chars
['.'] = '.';
12267 allow_pseudo_reg
= 1;
12268 expression_and_evaluate (exp
);
12269 allow_pseudo_reg
= 0;
12270 register_chars
['.'] = saved_register_dot
;
12271 allow_naked_reg
= saved_naked_reg
;
12273 if (exp
->X_op
== O_register
&& exp
->X_add_number
>= 0)
12275 if ((addressT
) exp
->X_add_number
< i386_regtab_size
)
12277 exp
->X_op
= O_constant
;
12278 exp
->X_add_number
= i386_regtab
[exp
->X_add_number
]
12279 .dw2_regnum
[flag_code
>> 1];
12282 exp
->X_op
= O_illegal
;
12287 tc_x86_frame_initial_instructions (void)
12289 static unsigned int sp_regno
[2];
12291 if (!sp_regno
[flag_code
>> 1])
12293 char *saved_input
= input_line_pointer
;
12294 char sp
[][4] = {"esp", "rsp"};
12297 input_line_pointer
= sp
[flag_code
>> 1];
12298 tc_x86_parse_to_dw2regnum (&exp
);
12299 gas_assert (exp
.X_op
== O_constant
);
12300 sp_regno
[flag_code
>> 1] = exp
.X_add_number
;
12301 input_line_pointer
= saved_input
;
12304 cfi_add_CFA_def_cfa (sp_regno
[flag_code
>> 1], -x86_cie_data_alignment
);
12305 cfi_add_CFA_offset (x86_dwarf2_return_column
, x86_cie_data_alignment
);
12309 x86_dwarf2_addr_size (void)
12311 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
12312 if (x86_elf_abi
== X86_64_X32_ABI
)
12315 return bfd_arch_bits_per_address (stdoutput
) / 8;
12319 i386_elf_section_type (const char *str
, size_t len
)
12321 if (flag_code
== CODE_64BIT
12322 && len
== sizeof ("unwind") - 1
12323 && strncmp (str
, "unwind", 6) == 0)
12324 return SHT_X86_64_UNWIND
;
12331 i386_solaris_fix_up_eh_frame (segT sec
)
12333 if (flag_code
== CODE_64BIT
)
12334 elf_section_type (sec
) = SHT_X86_64_UNWIND
;
12340 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
12344 exp
.X_op
= O_secrel
;
12345 exp
.X_add_symbol
= symbol
;
12346 exp
.X_add_number
= 0;
12347 emit_expr (&exp
, size
);
12351 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12352 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
12355 x86_64_section_letter (int letter
, const char **ptr_msg
)
12357 if (flag_code
== CODE_64BIT
)
12360 return SHF_X86_64_LARGE
;
12362 *ptr_msg
= _("bad .section directive: want a,l,w,x,M,S,G,T in string");
12365 *ptr_msg
= _("bad .section directive: want a,w,x,M,S,G,T in string");
12370 x86_64_section_word (char *str
, size_t len
)
12372 if (len
== 5 && flag_code
== CODE_64BIT
&& CONST_STRNEQ (str
, "large"))
12373 return SHF_X86_64_LARGE
;
12379 handle_large_common (int small ATTRIBUTE_UNUSED
)
12381 if (flag_code
!= CODE_64BIT
)
12383 s_comm_internal (0, elf_common_parse
);
12384 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
12388 static segT lbss_section
;
12389 asection
*saved_com_section_ptr
= elf_com_section_ptr
;
12390 asection
*saved_bss_section
= bss_section
;
12392 if (lbss_section
== NULL
)
12394 flagword applicable
;
12395 segT seg
= now_seg
;
12396 subsegT subseg
= now_subseg
;
12398 /* The .lbss section is for local .largecomm symbols. */
12399 lbss_section
= subseg_new (".lbss", 0);
12400 applicable
= bfd_applicable_section_flags (stdoutput
);
12401 bfd_set_section_flags (lbss_section
, applicable
& SEC_ALLOC
);
12402 seg_info (lbss_section
)->bss
= 1;
12404 subseg_set (seg
, subseg
);
12407 elf_com_section_ptr
= &_bfd_elf_large_com_section
;
12408 bss_section
= lbss_section
;
12410 s_comm_internal (0, elf_common_parse
);
12412 elf_com_section_ptr
= saved_com_section_ptr
;
12413 bss_section
= saved_bss_section
;
12416 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */