1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright (C) 1989-2019 Free Software Foundation, Inc.
4 This file is part of GAS, the GNU Assembler.
6 GAS is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GAS is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GAS; see the file COPYING. If not, write to the Free
18 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
21 /* Intel 80386 machine specific gas.
22 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
23 x86_64 support by Jan Hubicka (jh@suse.cz)
24 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
25 Bugs & suggestions are completely welcome. This is free software.
26 Please help us make it better. */
29 #include "safe-ctype.h"
31 #include "dwarf2dbg.h"
32 #include "dw2gencfi.h"
33 #include "elf/x86-64.h"
34 #include "opcodes/i386-init.h"
39 #ifdef HAVE_SYS_PARAM_H
40 #include <sys/param.h>
43 #define INT_MAX (int) (((unsigned) (-1)) >> 1)
47 #ifndef REGISTER_WARNINGS
48 #define REGISTER_WARNINGS 1
51 #ifndef INFER_ADDR_PREFIX
52 #define INFER_ADDR_PREFIX 1
56 #define DEFAULT_ARCH "i386"
61 #define INLINE __inline__
67 /* Prefixes will be emitted in the order defined below.
68 WAIT_PREFIX must be the first prefix since FWAIT is really is an
69 instruction, and so must come before any prefixes.
70 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
71 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
77 #define HLE_PREFIX REP_PREFIX
78 #define BND_PREFIX REP_PREFIX
80 #define REX_PREFIX 6 /* must come last. */
81 #define MAX_PREFIXES 7 /* max prefixes per opcode */
83 /* we define the syntax here (modulo base,index,scale syntax) */
84 #define REGISTER_PREFIX '%'
85 #define IMMEDIATE_PREFIX '$'
86 #define ABSOLUTE_PREFIX '*'
88 /* these are the instruction mnemonic suffixes in AT&T syntax or
89 memory operand size in Intel syntax. */
90 #define WORD_MNEM_SUFFIX 'w'
91 #define BYTE_MNEM_SUFFIX 'b'
92 #define SHORT_MNEM_SUFFIX 's'
93 #define LONG_MNEM_SUFFIX 'l'
94 #define QWORD_MNEM_SUFFIX 'q'
95 /* Intel Syntax. Use a non-ascii letter since since it never appears
97 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
99 #define END_OF_INSN '\0'
101 /* This matches the C -> StaticRounding alias in the opcode table. */
102 #define commutative staticrounding
105 'templates' is for grouping together 'template' structures for opcodes
106 of the same name. This is only used for storing the insns in the grand
107 ole hash table of insns.
108 The templates themselves start at START and range up to (but not including)
113 const insn_template
*start
;
114 const insn_template
*end
;
118 /* 386 operand encoding bytes: see 386 book for details of this. */
121 unsigned int regmem
; /* codes register or memory operand */
122 unsigned int reg
; /* codes register operand (or extended opcode) */
123 unsigned int mode
; /* how to interpret regmem & reg */
127 /* x86-64 extension prefix. */
128 typedef int rex_byte
;
130 /* 386 opcode byte to code indirect addressing. */
139 /* x86 arch names, types and features */
142 const char *name
; /* arch name */
143 unsigned int len
; /* arch string length */
144 enum processor_type type
; /* arch type */
145 i386_cpu_flags flags
; /* cpu feature flags */
146 unsigned int skip
; /* show_arch should skip this. */
150 /* Used to turn off indicated flags. */
153 const char *name
; /* arch name */
154 unsigned int len
; /* arch string length */
155 i386_cpu_flags flags
; /* cpu feature flags */
159 static void update_code_flag (int, int);
160 static void set_code_flag (int);
161 static void set_16bit_gcc_code_flag (int);
162 static void set_intel_syntax (int);
163 static void set_intel_mnemonic (int);
164 static void set_allow_index_reg (int);
165 static void set_check (int);
166 static void set_cpu_arch (int);
168 static void pe_directive_secrel (int);
170 static void signed_cons (int);
171 static char *output_invalid (int c
);
172 static int i386_finalize_immediate (segT
, expressionS
*, i386_operand_type
,
174 static int i386_finalize_displacement (segT
, expressionS
*, i386_operand_type
,
176 static int i386_att_operand (char *);
177 static int i386_intel_operand (char *, int);
178 static int i386_intel_simplify (expressionS
*);
179 static int i386_intel_parse_name (const char *, expressionS
*);
180 static const reg_entry
*parse_register (char *, char **);
181 static char *parse_insn (char *, char *);
182 static char *parse_operands (char *, const char *);
183 static void swap_operands (void);
184 static void swap_2_operands (int, int);
185 static void optimize_imm (void);
186 static void optimize_disp (void);
187 static const insn_template
*match_template (char);
188 static int check_string (void);
189 static int process_suffix (void);
190 static int check_byte_reg (void);
191 static int check_long_reg (void);
192 static int check_qword_reg (void);
193 static int check_word_reg (void);
194 static int finalize_imm (void);
195 static int process_operands (void);
196 static const seg_entry
*build_modrm_byte (void);
197 static void output_insn (void);
198 static void output_imm (fragS
*, offsetT
);
199 static void output_disp (fragS
*, offsetT
);
201 static void s_bss (int);
203 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
204 static void handle_large_common (int small ATTRIBUTE_UNUSED
);
206 /* GNU_PROPERTY_X86_ISA_1_USED. */
207 static unsigned int x86_isa_1_used
;
208 /* GNU_PROPERTY_X86_FEATURE_2_USED. */
209 static unsigned int x86_feature_2_used
;
210 /* Generate x86 used ISA and feature properties. */
211 static unsigned int x86_used_note
= DEFAULT_X86_USED_NOTE
;
214 static const char *default_arch
= DEFAULT_ARCH
;
216 /* This struct describes rounding control and SAE in the instruction. */
230 static struct RC_Operation rc_op
;
232 /* The struct describes masking, applied to OPERAND in the instruction.
233 MASK is a pointer to the corresponding mask register. ZEROING tells
234 whether merging or zeroing mask is used. */
235 struct Mask_Operation
237 const reg_entry
*mask
;
238 unsigned int zeroing
;
239 /* The operand where this operation is associated. */
243 static struct Mask_Operation mask_op
;
245 /* The struct describes broadcasting, applied to OPERAND. FACTOR is
247 struct Broadcast_Operation
249 /* Type of broadcast: {1to2}, {1to4}, {1to8}, or {1to16}. */
252 /* Index of broadcasted operand. */
255 /* Number of bytes to broadcast. */
259 static struct Broadcast_Operation broadcast_op
;
264 /* VEX prefix is either 2 byte or 3 byte. EVEX is 4 byte. */
265 unsigned char bytes
[4];
267 /* Destination or source register specifier. */
268 const reg_entry
*register_specifier
;
271 /* 'md_assemble ()' gathers together information and puts it into a
278 const reg_entry
*regs
;
283 operand_size_mismatch
,
284 operand_type_mismatch
,
285 register_type_mismatch
,
286 number_of_operands_mismatch
,
287 invalid_instruction_suffix
,
289 unsupported_with_intel_mnemonic
,
292 invalid_vsib_address
,
293 invalid_vector_register_set
,
294 unsupported_vector_index_register
,
295 unsupported_broadcast
,
298 mask_not_on_destination
,
301 rc_sae_operand_not_last_imm
,
302 invalid_register_operand
,
307 /* TM holds the template for the insn were currently assembling. */
310 /* SUFFIX holds the instruction size suffix for byte, word, dword
311 or qword, if given. */
314 /* OPERANDS gives the number of given operands. */
315 unsigned int operands
;
317 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
318 of given register, displacement, memory operands and immediate
320 unsigned int reg_operands
, disp_operands
, mem_operands
, imm_operands
;
322 /* TYPES [i] is the type (see above #defines) which tells us how to
323 use OP[i] for the corresponding operand. */
324 i386_operand_type types
[MAX_OPERANDS
];
326 /* Displacement expression, immediate expression, or register for each
328 union i386_op op
[MAX_OPERANDS
];
330 /* Flags for operands. */
331 unsigned int flags
[MAX_OPERANDS
];
332 #define Operand_PCrel 1
333 #define Operand_Mem 2
335 /* Relocation type for operand */
336 enum bfd_reloc_code_real reloc
[MAX_OPERANDS
];
338 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
339 the base index byte below. */
340 const reg_entry
*base_reg
;
341 const reg_entry
*index_reg
;
342 unsigned int log2_scale_factor
;
344 /* SEG gives the seg_entries of this insn. They are zero unless
345 explicit segment overrides are given. */
346 const seg_entry
*seg
[2];
348 /* Copied first memory operand string, for re-checking. */
351 /* PREFIX holds all the given prefix opcodes (usually null).
352 PREFIXES is the number of prefix opcodes. */
353 unsigned int prefixes
;
354 unsigned char prefix
[MAX_PREFIXES
];
356 /* Has MMX register operands. */
357 bfd_boolean has_regmmx
;
359 /* Has XMM register operands. */
360 bfd_boolean has_regxmm
;
362 /* Has YMM register operands. */
363 bfd_boolean has_regymm
;
365 /* Has ZMM register operands. */
366 bfd_boolean has_regzmm
;
368 /* RM and SIB are the modrm byte and the sib byte where the
369 addressing modes of this insn are encoded. */
376 /* Masking attributes. */
377 struct Mask_Operation
*mask
;
379 /* Rounding control and SAE attributes. */
380 struct RC_Operation
*rounding
;
382 /* Broadcasting attributes. */
383 struct Broadcast_Operation
*broadcast
;
385 /* Compressed disp8*N attribute. */
386 unsigned int memshift
;
388 /* Prefer load or store in encoding. */
391 dir_encoding_default
= 0,
397 /* Prefer 8bit or 32bit displacement in encoding. */
400 disp_encoding_default
= 0,
405 /* Prefer the REX byte in encoding. */
406 bfd_boolean rex_encoding
;
408 /* Disable instruction size optimization. */
409 bfd_boolean no_optimize
;
411 /* How to encode vector instructions. */
414 vex_encoding_default
= 0,
421 const char *rep_prefix
;
424 const char *hle_prefix
;
426 /* Have BND prefix. */
427 const char *bnd_prefix
;
429 /* Have NOTRACK prefix. */
430 const char *notrack_prefix
;
433 enum i386_error error
;
436 typedef struct _i386_insn i386_insn
;
438 /* Link RC type with corresponding string, that'll be looked for in
447 static const struct RC_name RC_NamesTable
[] =
449 { rne
, STRING_COMMA_LEN ("rn-sae") },
450 { rd
, STRING_COMMA_LEN ("rd-sae") },
451 { ru
, STRING_COMMA_LEN ("ru-sae") },
452 { rz
, STRING_COMMA_LEN ("rz-sae") },
453 { saeonly
, STRING_COMMA_LEN ("sae") },
456 /* List of chars besides those in app.c:symbol_chars that can start an
457 operand. Used to prevent the scrubber eating vital white-space. */
458 const char extra_symbol_chars
[] = "*%-([{}"
467 #if (defined (TE_I386AIX) \
468 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
469 && !defined (TE_GNU) \
470 && !defined (TE_LINUX) \
471 && !defined (TE_NACL) \
472 && !defined (TE_FreeBSD) \
473 && !defined (TE_DragonFly) \
474 && !defined (TE_NetBSD)))
475 /* This array holds the chars that always start a comment. If the
476 pre-processor is disabled, these aren't very useful. The option
477 --divide will remove '/' from this list. */
478 const char *i386_comment_chars
= "#/";
479 #define SVR4_COMMENT_CHARS 1
480 #define PREFIX_SEPARATOR '\\'
483 const char *i386_comment_chars
= "#";
484 #define PREFIX_SEPARATOR '/'
487 /* This array holds the chars that only start a comment at the beginning of
488 a line. If the line seems to have the form '# 123 filename'
489 .line and .file directives will appear in the pre-processed output.
490 Note that input_file.c hand checks for '#' at the beginning of the
491 first line of the input file. This is because the compiler outputs
492 #NO_APP at the beginning of its output.
493 Also note that comments started like this one will always work if
494 '/' isn't otherwise defined. */
495 const char line_comment_chars
[] = "#/";
497 const char line_separator_chars
[] = ";";
499 /* Chars that can be used to separate mant from exp in floating point
501 const char EXP_CHARS
[] = "eE";
503 /* Chars that mean this number is a floating point constant
506 const char FLT_CHARS
[] = "fFdDxX";
508 /* Tables for lexical analysis. */
509 static char mnemonic_chars
[256];
510 static char register_chars
[256];
511 static char operand_chars
[256];
512 static char identifier_chars
[256];
513 static char digit_chars
[256];
515 /* Lexical macros. */
516 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
517 #define is_operand_char(x) (operand_chars[(unsigned char) x])
518 #define is_register_char(x) (register_chars[(unsigned char) x])
519 #define is_space_char(x) ((x) == ' ')
520 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
521 #define is_digit_char(x) (digit_chars[(unsigned char) x])
523 /* All non-digit non-letter characters that may occur in an operand. */
524 static char operand_special_chars
[] = "%$-+(,)*._~/<>|&^!:[@]";
526 /* md_assemble() always leaves the strings it's passed unaltered. To
527 effect this we maintain a stack of saved characters that we've smashed
528 with '\0's (indicating end of strings for various sub-fields of the
529 assembler instruction). */
530 static char save_stack
[32];
531 static char *save_stack_p
;
532 #define END_STRING_AND_SAVE(s) \
533 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
534 #define RESTORE_END_STRING(s) \
535 do { *(s) = *--save_stack_p; } while (0)
537 /* The instruction we're assembling. */
540 /* Possible templates for current insn. */
541 static const templates
*current_templates
;
543 /* Per instruction expressionS buffers: max displacements & immediates. */
544 static expressionS disp_expressions
[MAX_MEMORY_OPERANDS
];
545 static expressionS im_expressions
[MAX_IMMEDIATE_OPERANDS
];
547 /* Current operand we are working on. */
548 static int this_operand
= -1;
550 /* We support four different modes. FLAG_CODE variable is used to distinguish
558 static enum flag_code flag_code
;
559 static unsigned int object_64bit
;
560 static unsigned int disallow_64bit_reloc
;
561 static int use_rela_relocations
= 0;
563 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
564 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
565 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
567 /* The ELF ABI to use. */
575 static enum x86_elf_abi x86_elf_abi
= I386_ABI
;
578 #if defined (TE_PE) || defined (TE_PEP)
579 /* Use big object file format. */
580 static int use_big_obj
= 0;
583 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
584 /* 1 if generating code for a shared library. */
585 static int shared
= 0;
588 /* 1 for intel syntax,
590 static int intel_syntax
= 0;
592 /* 1 for Intel64 ISA,
596 /* 1 for intel mnemonic,
597 0 if att mnemonic. */
598 static int intel_mnemonic
= !SYSV386_COMPAT
;
600 /* 1 if pseudo registers are permitted. */
601 static int allow_pseudo_reg
= 0;
603 /* 1 if register prefix % not required. */
604 static int allow_naked_reg
= 0;
606 /* 1 if the assembler should add BND prefix for all control-transferring
607 instructions supporting it, even if this prefix wasn't specified
609 static int add_bnd_prefix
= 0;
611 /* 1 if pseudo index register, eiz/riz, is allowed . */
612 static int allow_index_reg
= 0;
614 /* 1 if the assembler should ignore LOCK prefix, even if it was
615 specified explicitly. */
616 static int omit_lock_prefix
= 0;
618 /* 1 if the assembler should encode lfence, mfence, and sfence as
619 "lock addl $0, (%{re}sp)". */
620 static int avoid_fence
= 0;
622 /* 1 if the assembler should generate relax relocations. */
624 static int generate_relax_relocations
625 = DEFAULT_GENERATE_X86_RELAX_RELOCATIONS
;
627 static enum check_kind
633 sse_check
, operand_check
= check_warning
;
636 1. Clear the REX_W bit with register operand if possible.
637 2. Above plus use 128bit vector instruction to clear the full vector
640 static int optimize
= 0;
643 1. Clear the REX_W bit with register operand if possible.
644 2. Above plus use 128bit vector instruction to clear the full vector
646 3. Above plus optimize "test{q,l,w} $imm8,%r{64,32,16}" to
649 static int optimize_for_space
= 0;
651 /* Register prefix used for error message. */
652 static const char *register_prefix
= "%";
654 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
655 leave, push, and pop instructions so that gcc has the same stack
656 frame as in 32 bit mode. */
657 static char stackop_size
= '\0';
659 /* Non-zero to optimize code alignment. */
660 int optimize_align_code
= 1;
662 /* Non-zero to quieten some warnings. */
663 static int quiet_warnings
= 0;
666 static const char *cpu_arch_name
= NULL
;
667 static char *cpu_sub_arch_name
= NULL
;
669 /* CPU feature flags. */
670 static i386_cpu_flags cpu_arch_flags
= CPU_UNKNOWN_FLAGS
;
672 /* If we have selected a cpu we are generating instructions for. */
673 static int cpu_arch_tune_set
= 0;
675 /* Cpu we are generating instructions for. */
676 enum processor_type cpu_arch_tune
= PROCESSOR_UNKNOWN
;
678 /* CPU feature flags of cpu we are generating instructions for. */
679 static i386_cpu_flags cpu_arch_tune_flags
;
681 /* CPU instruction set architecture used. */
682 enum processor_type cpu_arch_isa
= PROCESSOR_UNKNOWN
;
684 /* CPU feature flags of instruction set architecture used. */
685 i386_cpu_flags cpu_arch_isa_flags
;
687 /* If set, conditional jumps are not automatically promoted to handle
688 larger than a byte offset. */
689 static unsigned int no_cond_jump_promotion
= 0;
691 /* Encode SSE instructions with VEX prefix. */
692 static unsigned int sse2avx
;
694 /* Encode scalar AVX instructions with specific vector length. */
701 /* Encode VEX WIG instructions with specific vex.w. */
708 /* Encode scalar EVEX LIG instructions with specific vector length. */
716 /* Encode EVEX WIG instructions with specific evex.w. */
723 /* Value to encode in EVEX RC bits, for SAE-only instructions. */
724 static enum rc_type evexrcig
= rne
;
726 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
727 static symbolS
*GOT_symbol
;
729 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
730 unsigned int x86_dwarf2_return_column
;
732 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
733 int x86_cie_data_alignment
;
735 /* Interface to relax_segment.
736 There are 3 major relax states for 386 jump insns because the
737 different types of jumps add different sizes to frags when we're
738 figuring out what sort of jump to choose to reach a given label. */
741 #define UNCOND_JUMP 0
743 #define COND_JUMP86 2
748 #define SMALL16 (SMALL | CODE16)
750 #define BIG16 (BIG | CODE16)
754 #define INLINE __inline__
760 #define ENCODE_RELAX_STATE(type, size) \
761 ((relax_substateT) (((type) << 2) | (size)))
762 #define TYPE_FROM_RELAX_STATE(s) \
764 #define DISP_SIZE_FROM_RELAX_STATE(s) \
765 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
767 /* This table is used by relax_frag to promote short jumps to long
768 ones where necessary. SMALL (short) jumps may be promoted to BIG
769 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
770 don't allow a short jump in a 32 bit code segment to be promoted to
771 a 16 bit offset jump because it's slower (requires data size
772 prefix), and doesn't work, unless the destination is in the bottom
773 64k of the code segment (The top 16 bits of eip are zeroed). */
775 const relax_typeS md_relax_table
[] =
778 1) most positive reach of this state,
779 2) most negative reach of this state,
780 3) how many bytes this mode will have in the variable part of the frag
781 4) which index into the table to try if we can't fit into this one. */
783 /* UNCOND_JUMP states. */
784 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
)},
785 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
)},
786 /* dword jmp adds 4 bytes to frag:
787 0 extra opcode bytes, 4 displacement bytes. */
789 /* word jmp adds 2 byte2 to frag:
790 0 extra opcode bytes, 2 displacement bytes. */
793 /* COND_JUMP states. */
794 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG
)},
795 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG16
)},
796 /* dword conditionals adds 5 bytes to frag:
797 1 extra opcode byte, 4 displacement bytes. */
799 /* word conditionals add 3 bytes to frag:
800 1 extra opcode byte, 2 displacement bytes. */
803 /* COND_JUMP86 states. */
804 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG
)},
805 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
)},
806 /* dword conditionals adds 5 bytes to frag:
807 1 extra opcode byte, 4 displacement bytes. */
809 /* word conditionals add 4 bytes to frag:
810 1 displacement byte and a 3 byte long branch insn. */
814 static const arch_entry cpu_arch
[] =
816 /* Do not replace the first two entries - i386_target_format()
817 relies on them being there in this order. */
818 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32
,
819 CPU_GENERIC32_FLAGS
, 0 },
820 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64
,
821 CPU_GENERIC64_FLAGS
, 0 },
822 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN
,
824 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN
,
826 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN
,
828 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386
,
830 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486
,
832 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM
,
834 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO
,
836 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM
,
838 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO
,
839 CPU_PENTIUMPRO_FLAGS
, 0 },
840 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO
,
842 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO
,
844 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4
,
846 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA
,
848 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA
,
849 CPU_NOCONA_FLAGS
, 0 },
850 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE
,
852 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE
,
854 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2
,
855 CPU_CORE2_FLAGS
, 1 },
856 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2
,
857 CPU_CORE2_FLAGS
, 0 },
858 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7
,
859 CPU_COREI7_FLAGS
, 0 },
860 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM
,
862 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM
,
864 { STRING_COMMA_LEN ("iamcu"), PROCESSOR_IAMCU
,
865 CPU_IAMCU_FLAGS
, 0 },
866 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6
,
868 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6
,
870 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON
,
871 CPU_ATHLON_FLAGS
, 0 },
872 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8
,
874 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8
,
876 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8
,
878 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10
,
879 CPU_AMDFAM10_FLAGS
, 0 },
880 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD
,
881 CPU_BDVER1_FLAGS
, 0 },
882 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD
,
883 CPU_BDVER2_FLAGS
, 0 },
884 { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD
,
885 CPU_BDVER3_FLAGS
, 0 },
886 { STRING_COMMA_LEN ("bdver4"), PROCESSOR_BD
,
887 CPU_BDVER4_FLAGS
, 0 },
888 { STRING_COMMA_LEN ("znver1"), PROCESSOR_ZNVER
,
889 CPU_ZNVER1_FLAGS
, 0 },
890 { STRING_COMMA_LEN ("znver2"), PROCESSOR_ZNVER
,
891 CPU_ZNVER2_FLAGS
, 0 },
892 { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT
,
893 CPU_BTVER1_FLAGS
, 0 },
894 { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT
,
895 CPU_BTVER2_FLAGS
, 0 },
896 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN
,
898 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN
,
900 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN
,
902 { STRING_COMMA_LEN (".687"), PROCESSOR_UNKNOWN
,
904 { STRING_COMMA_LEN (".cmov"), PROCESSOR_UNKNOWN
,
906 { STRING_COMMA_LEN (".fxsr"), PROCESSOR_UNKNOWN
,
908 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN
,
910 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN
,
912 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN
,
914 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN
,
916 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN
,
917 CPU_SSSE3_FLAGS
, 0 },
918 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN
,
919 CPU_SSE4_1_FLAGS
, 0 },
920 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN
,
921 CPU_SSE4_2_FLAGS
, 0 },
922 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN
,
923 CPU_SSE4_2_FLAGS
, 0 },
924 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN
,
926 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN
,
928 { STRING_COMMA_LEN (".avx512f"), PROCESSOR_UNKNOWN
,
929 CPU_AVX512F_FLAGS
, 0 },
930 { STRING_COMMA_LEN (".avx512cd"), PROCESSOR_UNKNOWN
,
931 CPU_AVX512CD_FLAGS
, 0 },
932 { STRING_COMMA_LEN (".avx512er"), PROCESSOR_UNKNOWN
,
933 CPU_AVX512ER_FLAGS
, 0 },
934 { STRING_COMMA_LEN (".avx512pf"), PROCESSOR_UNKNOWN
,
935 CPU_AVX512PF_FLAGS
, 0 },
936 { STRING_COMMA_LEN (".avx512dq"), PROCESSOR_UNKNOWN
,
937 CPU_AVX512DQ_FLAGS
, 0 },
938 { STRING_COMMA_LEN (".avx512bw"), PROCESSOR_UNKNOWN
,
939 CPU_AVX512BW_FLAGS
, 0 },
940 { STRING_COMMA_LEN (".avx512vl"), PROCESSOR_UNKNOWN
,
941 CPU_AVX512VL_FLAGS
, 0 },
942 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN
,
944 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN
,
945 CPU_VMFUNC_FLAGS
, 0 },
946 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN
,
948 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN
,
949 CPU_XSAVE_FLAGS
, 0 },
950 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN
,
951 CPU_XSAVEOPT_FLAGS
, 0 },
952 { STRING_COMMA_LEN (".xsavec"), PROCESSOR_UNKNOWN
,
953 CPU_XSAVEC_FLAGS
, 0 },
954 { STRING_COMMA_LEN (".xsaves"), PROCESSOR_UNKNOWN
,
955 CPU_XSAVES_FLAGS
, 0 },
956 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN
,
958 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN
,
959 CPU_PCLMUL_FLAGS
, 0 },
960 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN
,
961 CPU_PCLMUL_FLAGS
, 1 },
962 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN
,
963 CPU_FSGSBASE_FLAGS
, 0 },
964 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN
,
965 CPU_RDRND_FLAGS
, 0 },
966 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN
,
968 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN
,
970 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN
,
972 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN
,
974 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN
,
976 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN
,
978 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN
,
979 CPU_MOVBE_FLAGS
, 0 },
980 { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN
,
982 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN
,
984 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN
,
985 CPU_LZCNT_FLAGS
, 0 },
986 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN
,
988 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN
,
990 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN
,
991 CPU_INVPCID_FLAGS
, 0 },
992 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN
,
993 CPU_CLFLUSH_FLAGS
, 0 },
994 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN
,
996 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN
,
997 CPU_SYSCALL_FLAGS
, 0 },
998 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN
,
999 CPU_RDTSCP_FLAGS
, 0 },
1000 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN
,
1001 CPU_3DNOW_FLAGS
, 0 },
1002 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN
,
1003 CPU_3DNOWA_FLAGS
, 0 },
1004 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN
,
1005 CPU_PADLOCK_FLAGS
, 0 },
1006 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN
,
1007 CPU_SVME_FLAGS
, 1 },
1008 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN
,
1009 CPU_SVME_FLAGS
, 0 },
1010 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN
,
1011 CPU_SSE4A_FLAGS
, 0 },
1012 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN
,
1014 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN
,
1016 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN
,
1018 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN
,
1020 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN
,
1021 CPU_RDSEED_FLAGS
, 0 },
1022 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN
,
1023 CPU_PRFCHW_FLAGS
, 0 },
1024 { STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN
,
1025 CPU_SMAP_FLAGS
, 0 },
1026 { STRING_COMMA_LEN (".mpx"), PROCESSOR_UNKNOWN
,
1028 { STRING_COMMA_LEN (".sha"), PROCESSOR_UNKNOWN
,
1030 { STRING_COMMA_LEN (".clflushopt"), PROCESSOR_UNKNOWN
,
1031 CPU_CLFLUSHOPT_FLAGS
, 0 },
1032 { STRING_COMMA_LEN (".prefetchwt1"), PROCESSOR_UNKNOWN
,
1033 CPU_PREFETCHWT1_FLAGS
, 0 },
1034 { STRING_COMMA_LEN (".se1"), PROCESSOR_UNKNOWN
,
1036 { STRING_COMMA_LEN (".clwb"), PROCESSOR_UNKNOWN
,
1037 CPU_CLWB_FLAGS
, 0 },
1038 { STRING_COMMA_LEN (".avx512ifma"), PROCESSOR_UNKNOWN
,
1039 CPU_AVX512IFMA_FLAGS
, 0 },
1040 { STRING_COMMA_LEN (".avx512vbmi"), PROCESSOR_UNKNOWN
,
1041 CPU_AVX512VBMI_FLAGS
, 0 },
1042 { STRING_COMMA_LEN (".avx512_4fmaps"), PROCESSOR_UNKNOWN
,
1043 CPU_AVX512_4FMAPS_FLAGS
, 0 },
1044 { STRING_COMMA_LEN (".avx512_4vnniw"), PROCESSOR_UNKNOWN
,
1045 CPU_AVX512_4VNNIW_FLAGS
, 0 },
1046 { STRING_COMMA_LEN (".avx512_vpopcntdq"), PROCESSOR_UNKNOWN
,
1047 CPU_AVX512_VPOPCNTDQ_FLAGS
, 0 },
1048 { STRING_COMMA_LEN (".avx512_vbmi2"), PROCESSOR_UNKNOWN
,
1049 CPU_AVX512_VBMI2_FLAGS
, 0 },
1050 { STRING_COMMA_LEN (".avx512_vnni"), PROCESSOR_UNKNOWN
,
1051 CPU_AVX512_VNNI_FLAGS
, 0 },
1052 { STRING_COMMA_LEN (".avx512_bitalg"), PROCESSOR_UNKNOWN
,
1053 CPU_AVX512_BITALG_FLAGS
, 0 },
1054 { STRING_COMMA_LEN (".clzero"), PROCESSOR_UNKNOWN
,
1055 CPU_CLZERO_FLAGS
, 0 },
1056 { STRING_COMMA_LEN (".mwaitx"), PROCESSOR_UNKNOWN
,
1057 CPU_MWAITX_FLAGS
, 0 },
1058 { STRING_COMMA_LEN (".ospke"), PROCESSOR_UNKNOWN
,
1059 CPU_OSPKE_FLAGS
, 0 },
1060 { STRING_COMMA_LEN (".rdpid"), PROCESSOR_UNKNOWN
,
1061 CPU_RDPID_FLAGS
, 0 },
1062 { STRING_COMMA_LEN (".ptwrite"), PROCESSOR_UNKNOWN
,
1063 CPU_PTWRITE_FLAGS
, 0 },
1064 { STRING_COMMA_LEN (".ibt"), PROCESSOR_UNKNOWN
,
1066 { STRING_COMMA_LEN (".shstk"), PROCESSOR_UNKNOWN
,
1067 CPU_SHSTK_FLAGS
, 0 },
1068 { STRING_COMMA_LEN (".gfni"), PROCESSOR_UNKNOWN
,
1069 CPU_GFNI_FLAGS
, 0 },
1070 { STRING_COMMA_LEN (".vaes"), PROCESSOR_UNKNOWN
,
1071 CPU_VAES_FLAGS
, 0 },
1072 { STRING_COMMA_LEN (".vpclmulqdq"), PROCESSOR_UNKNOWN
,
1073 CPU_VPCLMULQDQ_FLAGS
, 0 },
1074 { STRING_COMMA_LEN (".wbnoinvd"), PROCESSOR_UNKNOWN
,
1075 CPU_WBNOINVD_FLAGS
, 0 },
1076 { STRING_COMMA_LEN (".pconfig"), PROCESSOR_UNKNOWN
,
1077 CPU_PCONFIG_FLAGS
, 0 },
1078 { STRING_COMMA_LEN (".waitpkg"), PROCESSOR_UNKNOWN
,
1079 CPU_WAITPKG_FLAGS
, 0 },
1080 { STRING_COMMA_LEN (".cldemote"), PROCESSOR_UNKNOWN
,
1081 CPU_CLDEMOTE_FLAGS
, 0 },
1082 { STRING_COMMA_LEN (".movdiri"), PROCESSOR_UNKNOWN
,
1083 CPU_MOVDIRI_FLAGS
, 0 },
1084 { STRING_COMMA_LEN (".movdir64b"), PROCESSOR_UNKNOWN
,
1085 CPU_MOVDIR64B_FLAGS
, 0 },
1086 { STRING_COMMA_LEN (".avx512_bf16"), PROCESSOR_UNKNOWN
,
1087 CPU_AVX512_BF16_FLAGS
, 0 },
1088 { STRING_COMMA_LEN (".avx512_vp2intersect"), PROCESSOR_UNKNOWN
,
1089 CPU_AVX512_VP2INTERSECT_FLAGS
, 0 },
1090 { STRING_COMMA_LEN (".enqcmd"), PROCESSOR_UNKNOWN
,
1091 CPU_ENQCMD_FLAGS
, 0 },
1092 { STRING_COMMA_LEN (".rdpru"), PROCESSOR_UNKNOWN
,
1093 CPU_RDPRU_FLAGS
, 0 },
1094 { STRING_COMMA_LEN (".mcommit"), PROCESSOR_UNKNOWN
,
1095 CPU_MCOMMIT_FLAGS
, 0 },
1098 static const noarch_entry cpu_noarch
[] =
1100 { STRING_COMMA_LEN ("no87"), CPU_ANY_X87_FLAGS
},
1101 { STRING_COMMA_LEN ("no287"), CPU_ANY_287_FLAGS
},
1102 { STRING_COMMA_LEN ("no387"), CPU_ANY_387_FLAGS
},
1103 { STRING_COMMA_LEN ("no687"), CPU_ANY_687_FLAGS
},
1104 { STRING_COMMA_LEN ("nocmov"), CPU_ANY_CMOV_FLAGS
},
1105 { STRING_COMMA_LEN ("nofxsr"), CPU_ANY_FXSR_FLAGS
},
1106 { STRING_COMMA_LEN ("nommx"), CPU_ANY_MMX_FLAGS
},
1107 { STRING_COMMA_LEN ("nosse"), CPU_ANY_SSE_FLAGS
},
1108 { STRING_COMMA_LEN ("nosse2"), CPU_ANY_SSE2_FLAGS
},
1109 { STRING_COMMA_LEN ("nosse3"), CPU_ANY_SSE3_FLAGS
},
1110 { STRING_COMMA_LEN ("nossse3"), CPU_ANY_SSSE3_FLAGS
},
1111 { STRING_COMMA_LEN ("nosse4.1"), CPU_ANY_SSE4_1_FLAGS
},
1112 { STRING_COMMA_LEN ("nosse4.2"), CPU_ANY_SSE4_2_FLAGS
},
1113 { STRING_COMMA_LEN ("nosse4"), CPU_ANY_SSE4_1_FLAGS
},
1114 { STRING_COMMA_LEN ("noavx"), CPU_ANY_AVX_FLAGS
},
1115 { STRING_COMMA_LEN ("noavx2"), CPU_ANY_AVX2_FLAGS
},
1116 { STRING_COMMA_LEN ("noavx512f"), CPU_ANY_AVX512F_FLAGS
},
1117 { STRING_COMMA_LEN ("noavx512cd"), CPU_ANY_AVX512CD_FLAGS
},
1118 { STRING_COMMA_LEN ("noavx512er"), CPU_ANY_AVX512ER_FLAGS
},
1119 { STRING_COMMA_LEN ("noavx512pf"), CPU_ANY_AVX512PF_FLAGS
},
1120 { STRING_COMMA_LEN ("noavx512dq"), CPU_ANY_AVX512DQ_FLAGS
},
1121 { STRING_COMMA_LEN ("noavx512bw"), CPU_ANY_AVX512BW_FLAGS
},
1122 { STRING_COMMA_LEN ("noavx512vl"), CPU_ANY_AVX512VL_FLAGS
},
1123 { STRING_COMMA_LEN ("noavx512ifma"), CPU_ANY_AVX512IFMA_FLAGS
},
1124 { STRING_COMMA_LEN ("noavx512vbmi"), CPU_ANY_AVX512VBMI_FLAGS
},
1125 { STRING_COMMA_LEN ("noavx512_4fmaps"), CPU_ANY_AVX512_4FMAPS_FLAGS
},
1126 { STRING_COMMA_LEN ("noavx512_4vnniw"), CPU_ANY_AVX512_4VNNIW_FLAGS
},
1127 { STRING_COMMA_LEN ("noavx512_vpopcntdq"), CPU_ANY_AVX512_VPOPCNTDQ_FLAGS
},
1128 { STRING_COMMA_LEN ("noavx512_vbmi2"), CPU_ANY_AVX512_VBMI2_FLAGS
},
1129 { STRING_COMMA_LEN ("noavx512_vnni"), CPU_ANY_AVX512_VNNI_FLAGS
},
1130 { STRING_COMMA_LEN ("noavx512_bitalg"), CPU_ANY_AVX512_BITALG_FLAGS
},
1131 { STRING_COMMA_LEN ("noibt"), CPU_ANY_IBT_FLAGS
},
1132 { STRING_COMMA_LEN ("noshstk"), CPU_ANY_SHSTK_FLAGS
},
1133 { STRING_COMMA_LEN ("nomovdiri"), CPU_ANY_MOVDIRI_FLAGS
},
1134 { STRING_COMMA_LEN ("nomovdir64b"), CPU_ANY_MOVDIR64B_FLAGS
},
1135 { STRING_COMMA_LEN ("noavx512_bf16"), CPU_ANY_AVX512_BF16_FLAGS
},
1136 { STRING_COMMA_LEN ("noavx512_vp2intersect"), CPU_ANY_SHSTK_FLAGS
},
1137 { STRING_COMMA_LEN ("noenqcmd"), CPU_ANY_ENQCMD_FLAGS
},
1141 /* Like s_lcomm_internal in gas/read.c but the alignment string
1142 is allowed to be optional. */
1145 pe_lcomm_internal (int needs_align
, symbolS
*symbolP
, addressT size
)
1152 && *input_line_pointer
== ',')
1154 align
= parse_align (needs_align
- 1);
1156 if (align
== (addressT
) -1)
1171 bss_alloc (symbolP
, size
, align
);
1176 pe_lcomm (int needs_align
)
1178 s_comm_internal (needs_align
* 2, pe_lcomm_internal
);
1182 const pseudo_typeS md_pseudo_table
[] =
1184 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
1185 {"align", s_align_bytes
, 0},
1187 {"align", s_align_ptwo
, 0},
1189 {"arch", set_cpu_arch
, 0},
1193 {"lcomm", pe_lcomm
, 1},
1195 {"ffloat", float_cons
, 'f'},
1196 {"dfloat", float_cons
, 'd'},
1197 {"tfloat", float_cons
, 'x'},
1199 {"slong", signed_cons
, 4},
1200 {"noopt", s_ignore
, 0},
1201 {"optim", s_ignore
, 0},
1202 {"code16gcc", set_16bit_gcc_code_flag
, CODE_16BIT
},
1203 {"code16", set_code_flag
, CODE_16BIT
},
1204 {"code32", set_code_flag
, CODE_32BIT
},
1206 {"code64", set_code_flag
, CODE_64BIT
},
1208 {"intel_syntax", set_intel_syntax
, 1},
1209 {"att_syntax", set_intel_syntax
, 0},
1210 {"intel_mnemonic", set_intel_mnemonic
, 1},
1211 {"att_mnemonic", set_intel_mnemonic
, 0},
1212 {"allow_index_reg", set_allow_index_reg
, 1},
1213 {"disallow_index_reg", set_allow_index_reg
, 0},
1214 {"sse_check", set_check
, 0},
1215 {"operand_check", set_check
, 1},
1216 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
1217 {"largecomm", handle_large_common
, 0},
1219 {"file", dwarf2_directive_file
, 0},
1220 {"loc", dwarf2_directive_loc
, 0},
1221 {"loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0},
1224 {"secrel32", pe_directive_secrel
, 0},
1229 /* For interface with expression (). */
1230 extern char *input_line_pointer
;
1232 /* Hash table for instruction mnemonic lookup. */
1233 static struct hash_control
*op_hash
;
1235 /* Hash table for register lookup. */
1236 static struct hash_control
*reg_hash
;
1238 /* Various efficient no-op patterns for aligning code labels.
1239 Note: Don't try to assemble the instructions in the comments.
1240 0L and 0w are not legal. */
1241 static const unsigned char f32_1
[] =
1243 static const unsigned char f32_2
[] =
1244 {0x66,0x90}; /* xchg %ax,%ax */
1245 static const unsigned char f32_3
[] =
1246 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
1247 static const unsigned char f32_4
[] =
1248 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1249 static const unsigned char f32_6
[] =
1250 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
1251 static const unsigned char f32_7
[] =
1252 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1253 static const unsigned char f16_3
[] =
1254 {0x8d,0x74,0x00}; /* lea 0(%si),%si */
1255 static const unsigned char f16_4
[] =
1256 {0x8d,0xb4,0x00,0x00}; /* lea 0W(%si),%si */
1257 static const unsigned char jump_disp8
[] =
1258 {0xeb}; /* jmp disp8 */
1259 static const unsigned char jump32_disp32
[] =
1260 {0xe9}; /* jmp disp32 */
1261 static const unsigned char jump16_disp32
[] =
1262 {0x66,0xe9}; /* jmp disp32 */
1263 /* 32-bit NOPs patterns. */
1264 static const unsigned char *const f32_patt
[] = {
1265 f32_1
, f32_2
, f32_3
, f32_4
, NULL
, f32_6
, f32_7
1267 /* 16-bit NOPs patterns. */
1268 static const unsigned char *const f16_patt
[] = {
1269 f32_1
, f32_2
, f16_3
, f16_4
1271 /* nopl (%[re]ax) */
1272 static const unsigned char alt_3
[] =
1274 /* nopl 0(%[re]ax) */
1275 static const unsigned char alt_4
[] =
1276 {0x0f,0x1f,0x40,0x00};
1277 /* nopl 0(%[re]ax,%[re]ax,1) */
1278 static const unsigned char alt_5
[] =
1279 {0x0f,0x1f,0x44,0x00,0x00};
1280 /* nopw 0(%[re]ax,%[re]ax,1) */
1281 static const unsigned char alt_6
[] =
1282 {0x66,0x0f,0x1f,0x44,0x00,0x00};
1283 /* nopl 0L(%[re]ax) */
1284 static const unsigned char alt_7
[] =
1285 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1286 /* nopl 0L(%[re]ax,%[re]ax,1) */
1287 static const unsigned char alt_8
[] =
1288 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1289 /* nopw 0L(%[re]ax,%[re]ax,1) */
1290 static const unsigned char alt_9
[] =
1291 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1292 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
1293 static const unsigned char alt_10
[] =
1294 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1295 /* data16 nopw %cs:0L(%eax,%eax,1) */
1296 static const unsigned char alt_11
[] =
1297 {0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1298 /* 32-bit and 64-bit NOPs patterns. */
1299 static const unsigned char *const alt_patt
[] = {
1300 f32_1
, f32_2
, alt_3
, alt_4
, alt_5
, alt_6
, alt_7
, alt_8
,
1301 alt_9
, alt_10
, alt_11
1304 /* Genenerate COUNT bytes of NOPs to WHERE from PATT with the maximum
1305 size of a single NOP instruction MAX_SINGLE_NOP_SIZE. */
1308 i386_output_nops (char *where
, const unsigned char *const *patt
,
1309 int count
, int max_single_nop_size
)
1312 /* Place the longer NOP first. */
1315 const unsigned char *nops
;
1317 if (max_single_nop_size
< 1)
1319 as_fatal (_("i386_output_nops called to generate nops of at most %d bytes!"),
1320 max_single_nop_size
);
1324 nops
= patt
[max_single_nop_size
- 1];
1326 /* Use the smaller one if the requsted one isn't available. */
1329 max_single_nop_size
--;
1330 nops
= patt
[max_single_nop_size
- 1];
1333 last
= count
% max_single_nop_size
;
1336 for (offset
= 0; offset
< count
; offset
+= max_single_nop_size
)
1337 memcpy (where
+ offset
, nops
, max_single_nop_size
);
1341 nops
= patt
[last
- 1];
1344 /* Use the smaller one plus one-byte NOP if the needed one
1347 nops
= patt
[last
- 1];
1348 memcpy (where
+ offset
, nops
, last
);
1349 where
[offset
+ last
] = *patt
[0];
1352 memcpy (where
+ offset
, nops
, last
);
1357 fits_in_imm7 (offsetT num
)
1359 return (num
& 0x7f) == num
;
1363 fits_in_imm31 (offsetT num
)
1365 return (num
& 0x7fffffff) == num
;
1368 /* Genenerate COUNT bytes of NOPs to WHERE with the maximum size of a
1369 single NOP instruction LIMIT. */
1372 i386_generate_nops (fragS
*fragP
, char *where
, offsetT count
, int limit
)
1374 const unsigned char *const *patt
= NULL
;
1375 int max_single_nop_size
;
1376 /* Maximum number of NOPs before switching to jump over NOPs. */
1377 int max_number_of_nops
;
1379 switch (fragP
->fr_type
)
1388 /* We need to decide which NOP sequence to use for 32bit and
1389 64bit. When -mtune= is used:
1391 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1392 PROCESSOR_GENERIC32, f32_patt will be used.
1393 2. For the rest, alt_patt will be used.
1395 When -mtune= isn't used, alt_patt will be used if
1396 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1399 When -march= or .arch is used, we can't use anything beyond
1400 cpu_arch_isa_flags. */
1402 if (flag_code
== CODE_16BIT
)
1405 max_single_nop_size
= sizeof (f16_patt
) / sizeof (f16_patt
[0]);
1406 /* Limit number of NOPs to 2 in 16-bit mode. */
1407 max_number_of_nops
= 2;
1411 if (fragP
->tc_frag_data
.isa
== PROCESSOR_UNKNOWN
)
1413 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1414 switch (cpu_arch_tune
)
1416 case PROCESSOR_UNKNOWN
:
1417 /* We use cpu_arch_isa_flags to check if we SHOULD
1418 optimize with nops. */
1419 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1424 case PROCESSOR_PENTIUM4
:
1425 case PROCESSOR_NOCONA
:
1426 case PROCESSOR_CORE
:
1427 case PROCESSOR_CORE2
:
1428 case PROCESSOR_COREI7
:
1429 case PROCESSOR_L1OM
:
1430 case PROCESSOR_K1OM
:
1431 case PROCESSOR_GENERIC64
:
1433 case PROCESSOR_ATHLON
:
1435 case PROCESSOR_AMDFAM10
:
1437 case PROCESSOR_ZNVER
:
1441 case PROCESSOR_I386
:
1442 case PROCESSOR_I486
:
1443 case PROCESSOR_PENTIUM
:
1444 case PROCESSOR_PENTIUMPRO
:
1445 case PROCESSOR_IAMCU
:
1446 case PROCESSOR_GENERIC32
:
1453 switch (fragP
->tc_frag_data
.tune
)
1455 case PROCESSOR_UNKNOWN
:
1456 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1457 PROCESSOR_UNKNOWN. */
1461 case PROCESSOR_I386
:
1462 case PROCESSOR_I486
:
1463 case PROCESSOR_PENTIUM
:
1464 case PROCESSOR_IAMCU
:
1466 case PROCESSOR_ATHLON
:
1468 case PROCESSOR_AMDFAM10
:
1470 case PROCESSOR_ZNVER
:
1472 case PROCESSOR_GENERIC32
:
1473 /* We use cpu_arch_isa_flags to check if we CAN optimize
1475 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1480 case PROCESSOR_PENTIUMPRO
:
1481 case PROCESSOR_PENTIUM4
:
1482 case PROCESSOR_NOCONA
:
1483 case PROCESSOR_CORE
:
1484 case PROCESSOR_CORE2
:
1485 case PROCESSOR_COREI7
:
1486 case PROCESSOR_L1OM
:
1487 case PROCESSOR_K1OM
:
1488 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1493 case PROCESSOR_GENERIC64
:
1499 if (patt
== f32_patt
)
1501 max_single_nop_size
= sizeof (f32_patt
) / sizeof (f32_patt
[0]);
1502 /* Limit number of NOPs to 2 for older processors. */
1503 max_number_of_nops
= 2;
1507 max_single_nop_size
= sizeof (alt_patt
) / sizeof (alt_patt
[0]);
1508 /* Limit number of NOPs to 7 for newer processors. */
1509 max_number_of_nops
= 7;
1514 limit
= max_single_nop_size
;
1516 if (fragP
->fr_type
== rs_fill_nop
)
1518 /* Output NOPs for .nop directive. */
1519 if (limit
> max_single_nop_size
)
1521 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
1522 _("invalid single nop size: %d "
1523 "(expect within [0, %d])"),
1524 limit
, max_single_nop_size
);
1529 fragP
->fr_var
= count
;
1531 if ((count
/ max_single_nop_size
) > max_number_of_nops
)
1533 /* Generate jump over NOPs. */
1534 offsetT disp
= count
- 2;
1535 if (fits_in_imm7 (disp
))
1537 /* Use "jmp disp8" if possible. */
1539 where
[0] = jump_disp8
[0];
1545 unsigned int size_of_jump
;
1547 if (flag_code
== CODE_16BIT
)
1549 where
[0] = jump16_disp32
[0];
1550 where
[1] = jump16_disp32
[1];
1555 where
[0] = jump32_disp32
[0];
1559 count
-= size_of_jump
+ 4;
1560 if (!fits_in_imm31 (count
))
1562 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
1563 _("jump over nop padding out of range"));
1567 md_number_to_chars (where
+ size_of_jump
, count
, 4);
1568 where
+= size_of_jump
+ 4;
1572 /* Generate multiple NOPs. */
1573 i386_output_nops (where
, patt
, count
, limit
);
1577 operand_type_all_zero (const union i386_operand_type
*x
)
1579 switch (ARRAY_SIZE(x
->array
))
1590 return !x
->array
[0];
1597 operand_type_set (union i386_operand_type
*x
, unsigned int v
)
1599 switch (ARRAY_SIZE(x
->array
))
1615 x
->bitfield
.class = ClassNone
;
1616 x
->bitfield
.instance
= InstanceNone
;
1620 operand_type_equal (const union i386_operand_type
*x
,
1621 const union i386_operand_type
*y
)
1623 switch (ARRAY_SIZE(x
->array
))
1626 if (x
->array
[2] != y
->array
[2])
1630 if (x
->array
[1] != y
->array
[1])
1634 return x
->array
[0] == y
->array
[0];
1642 cpu_flags_all_zero (const union i386_cpu_flags
*x
)
1644 switch (ARRAY_SIZE(x
->array
))
1659 return !x
->array
[0];
1666 cpu_flags_equal (const union i386_cpu_flags
*x
,
1667 const union i386_cpu_flags
*y
)
1669 switch (ARRAY_SIZE(x
->array
))
1672 if (x
->array
[3] != y
->array
[3])
1676 if (x
->array
[2] != y
->array
[2])
1680 if (x
->array
[1] != y
->array
[1])
1684 return x
->array
[0] == y
->array
[0];
1692 cpu_flags_check_cpu64 (i386_cpu_flags f
)
1694 return !((flag_code
== CODE_64BIT
&& f
.bitfield
.cpuno64
)
1695 || (flag_code
!= CODE_64BIT
&& f
.bitfield
.cpu64
));
1698 static INLINE i386_cpu_flags
1699 cpu_flags_and (i386_cpu_flags x
, i386_cpu_flags y
)
1701 switch (ARRAY_SIZE (x
.array
))
1704 x
.array
[3] &= y
.array
[3];
1707 x
.array
[2] &= y
.array
[2];
1710 x
.array
[1] &= y
.array
[1];
1713 x
.array
[0] &= y
.array
[0];
1721 static INLINE i386_cpu_flags
1722 cpu_flags_or (i386_cpu_flags x
, i386_cpu_flags y
)
1724 switch (ARRAY_SIZE (x
.array
))
1727 x
.array
[3] |= y
.array
[3];
1730 x
.array
[2] |= y
.array
[2];
1733 x
.array
[1] |= y
.array
[1];
1736 x
.array
[0] |= y
.array
[0];
1744 static INLINE i386_cpu_flags
1745 cpu_flags_and_not (i386_cpu_flags x
, i386_cpu_flags y
)
1747 switch (ARRAY_SIZE (x
.array
))
1750 x
.array
[3] &= ~y
.array
[3];
1753 x
.array
[2] &= ~y
.array
[2];
1756 x
.array
[1] &= ~y
.array
[1];
1759 x
.array
[0] &= ~y
.array
[0];
1767 #define CPU_FLAGS_ARCH_MATCH 0x1
1768 #define CPU_FLAGS_64BIT_MATCH 0x2
1770 #define CPU_FLAGS_PERFECT_MATCH \
1771 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_64BIT_MATCH)
1773 /* Return CPU flags match bits. */
1776 cpu_flags_match (const insn_template
*t
)
1778 i386_cpu_flags x
= t
->cpu_flags
;
1779 int match
= cpu_flags_check_cpu64 (x
) ? CPU_FLAGS_64BIT_MATCH
: 0;
1781 x
.bitfield
.cpu64
= 0;
1782 x
.bitfield
.cpuno64
= 0;
1784 if (cpu_flags_all_zero (&x
))
1786 /* This instruction is available on all archs. */
1787 match
|= CPU_FLAGS_ARCH_MATCH
;
1791 /* This instruction is available only on some archs. */
1792 i386_cpu_flags cpu
= cpu_arch_flags
;
1794 /* AVX512VL is no standalone feature - match it and then strip it. */
1795 if (x
.bitfield
.cpuavx512vl
&& !cpu
.bitfield
.cpuavx512vl
)
1797 x
.bitfield
.cpuavx512vl
= 0;
1799 cpu
= cpu_flags_and (x
, cpu
);
1800 if (!cpu_flags_all_zero (&cpu
))
1802 if (x
.bitfield
.cpuavx
)
1804 /* We need to check a few extra flags with AVX. */
1805 if (cpu
.bitfield
.cpuavx
1806 && (!t
->opcode_modifier
.sse2avx
|| sse2avx
)
1807 && (!x
.bitfield
.cpuaes
|| cpu
.bitfield
.cpuaes
)
1808 && (!x
.bitfield
.cpugfni
|| cpu
.bitfield
.cpugfni
)
1809 && (!x
.bitfield
.cpupclmul
|| cpu
.bitfield
.cpupclmul
))
1810 match
|= CPU_FLAGS_ARCH_MATCH
;
1812 else if (x
.bitfield
.cpuavx512f
)
1814 /* We need to check a few extra flags with AVX512F. */
1815 if (cpu
.bitfield
.cpuavx512f
1816 && (!x
.bitfield
.cpugfni
|| cpu
.bitfield
.cpugfni
)
1817 && (!x
.bitfield
.cpuvaes
|| cpu
.bitfield
.cpuvaes
)
1818 && (!x
.bitfield
.cpuvpclmulqdq
|| cpu
.bitfield
.cpuvpclmulqdq
))
1819 match
|= CPU_FLAGS_ARCH_MATCH
;
1822 match
|= CPU_FLAGS_ARCH_MATCH
;
1828 static INLINE i386_operand_type
1829 operand_type_and (i386_operand_type x
, i386_operand_type y
)
1831 if (x
.bitfield
.class != y
.bitfield
.class)
1832 x
.bitfield
.class = ClassNone
;
1833 if (x
.bitfield
.instance
!= y
.bitfield
.instance
)
1834 x
.bitfield
.instance
= InstanceNone
;
1836 switch (ARRAY_SIZE (x
.array
))
1839 x
.array
[2] &= y
.array
[2];
1842 x
.array
[1] &= y
.array
[1];
1845 x
.array
[0] &= y
.array
[0];
1853 static INLINE i386_operand_type
1854 operand_type_and_not (i386_operand_type x
, i386_operand_type y
)
1856 gas_assert (y
.bitfield
.class == ClassNone
);
1857 gas_assert (y
.bitfield
.instance
== InstanceNone
);
1859 switch (ARRAY_SIZE (x
.array
))
1862 x
.array
[2] &= ~y
.array
[2];
1865 x
.array
[1] &= ~y
.array
[1];
1868 x
.array
[0] &= ~y
.array
[0];
1876 static INLINE i386_operand_type
1877 operand_type_or (i386_operand_type x
, i386_operand_type y
)
1879 gas_assert (x
.bitfield
.class == ClassNone
||
1880 y
.bitfield
.class == ClassNone
||
1881 x
.bitfield
.class == y
.bitfield
.class);
1882 gas_assert (x
.bitfield
.instance
== InstanceNone
||
1883 y
.bitfield
.instance
== InstanceNone
||
1884 x
.bitfield
.instance
== y
.bitfield
.instance
);
1886 switch (ARRAY_SIZE (x
.array
))
1889 x
.array
[2] |= y
.array
[2];
1892 x
.array
[1] |= y
.array
[1];
1895 x
.array
[0] |= y
.array
[0];
1903 static INLINE i386_operand_type
1904 operand_type_xor (i386_operand_type x
, i386_operand_type y
)
1906 gas_assert (y
.bitfield
.class == ClassNone
);
1907 gas_assert (y
.bitfield
.instance
== InstanceNone
);
1909 switch (ARRAY_SIZE (x
.array
))
1912 x
.array
[2] ^= y
.array
[2];
1915 x
.array
[1] ^= y
.array
[1];
1918 x
.array
[0] ^= y
.array
[0];
1926 static const i386_operand_type disp16
= OPERAND_TYPE_DISP16
;
1927 static const i386_operand_type disp32
= OPERAND_TYPE_DISP32
;
1928 static const i386_operand_type disp32s
= OPERAND_TYPE_DISP32S
;
1929 static const i386_operand_type disp16_32
= OPERAND_TYPE_DISP16_32
;
1930 static const i386_operand_type anydisp
= OPERAND_TYPE_ANYDISP
;
1931 static const i386_operand_type anyimm
= OPERAND_TYPE_ANYIMM
;
1932 static const i386_operand_type regxmm
= OPERAND_TYPE_REGXMM
;
1933 static const i386_operand_type regmask
= OPERAND_TYPE_REGMASK
;
1934 static const i386_operand_type imm8
= OPERAND_TYPE_IMM8
;
1935 static const i386_operand_type imm8s
= OPERAND_TYPE_IMM8S
;
1936 static const i386_operand_type imm16
= OPERAND_TYPE_IMM16
;
1937 static const i386_operand_type imm32
= OPERAND_TYPE_IMM32
;
1938 static const i386_operand_type imm32s
= OPERAND_TYPE_IMM32S
;
1939 static const i386_operand_type imm64
= OPERAND_TYPE_IMM64
;
1940 static const i386_operand_type imm16_32
= OPERAND_TYPE_IMM16_32
;
1941 static const i386_operand_type imm16_32s
= OPERAND_TYPE_IMM16_32S
;
1942 static const i386_operand_type imm16_32_32s
= OPERAND_TYPE_IMM16_32_32S
;
1953 operand_type_check (i386_operand_type t
, enum operand_type c
)
1958 return t
.bitfield
.class == Reg
;
1961 return (t
.bitfield
.imm8
1965 || t
.bitfield
.imm32s
1966 || t
.bitfield
.imm64
);
1969 return (t
.bitfield
.disp8
1970 || t
.bitfield
.disp16
1971 || t
.bitfield
.disp32
1972 || t
.bitfield
.disp32s
1973 || t
.bitfield
.disp64
);
1976 return (t
.bitfield
.disp8
1977 || t
.bitfield
.disp16
1978 || t
.bitfield
.disp32
1979 || t
.bitfield
.disp32s
1980 || t
.bitfield
.disp64
1981 || t
.bitfield
.baseindex
);
1990 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit/80bit size
1991 between operand GIVEN and opeand WANTED for instruction template T. */
1994 match_operand_size (const insn_template
*t
, unsigned int wanted
,
1997 return !((i
.types
[given
].bitfield
.byte
1998 && !t
->operand_types
[wanted
].bitfield
.byte
)
1999 || (i
.types
[given
].bitfield
.word
2000 && !t
->operand_types
[wanted
].bitfield
.word
)
2001 || (i
.types
[given
].bitfield
.dword
2002 && !t
->operand_types
[wanted
].bitfield
.dword
)
2003 || (i
.types
[given
].bitfield
.qword
2004 && !t
->operand_types
[wanted
].bitfield
.qword
)
2005 || (i
.types
[given
].bitfield
.tbyte
2006 && !t
->operand_types
[wanted
].bitfield
.tbyte
));
2009 /* Return 1 if there is no conflict in SIMD register between operand
2010 GIVEN and opeand WANTED for instruction template T. */
2013 match_simd_size (const insn_template
*t
, unsigned int wanted
,
2016 return !((i
.types
[given
].bitfield
.xmmword
2017 && !t
->operand_types
[wanted
].bitfield
.xmmword
)
2018 || (i
.types
[given
].bitfield
.ymmword
2019 && !t
->operand_types
[wanted
].bitfield
.ymmword
)
2020 || (i
.types
[given
].bitfield
.zmmword
2021 && !t
->operand_types
[wanted
].bitfield
.zmmword
));
2024 /* Return 1 if there is no conflict in any size between operand GIVEN
2025 and opeand WANTED for instruction template T. */
2028 match_mem_size (const insn_template
*t
, unsigned int wanted
,
2031 return (match_operand_size (t
, wanted
, given
)
2032 && !((i
.types
[given
].bitfield
.unspecified
2034 && !t
->operand_types
[wanted
].bitfield
.unspecified
)
2035 || (i
.types
[given
].bitfield
.fword
2036 && !t
->operand_types
[wanted
].bitfield
.fword
)
2037 /* For scalar opcode templates to allow register and memory
2038 operands at the same time, some special casing is needed
2039 here. Also for v{,p}broadcast*, {,v}pmov{s,z}*, and
2040 down-conversion vpmov*. */
2041 || ((t
->operand_types
[wanted
].bitfield
.class == RegSIMD
2042 && !t
->opcode_modifier
.broadcast
2043 && (t
->operand_types
[wanted
].bitfield
.byte
2044 || t
->operand_types
[wanted
].bitfield
.word
2045 || t
->operand_types
[wanted
].bitfield
.dword
2046 || t
->operand_types
[wanted
].bitfield
.qword
))
2047 ? (i
.types
[given
].bitfield
.xmmword
2048 || i
.types
[given
].bitfield
.ymmword
2049 || i
.types
[given
].bitfield
.zmmword
)
2050 : !match_simd_size(t
, wanted
, given
))));
2053 /* Return value has MATCH_STRAIGHT set if there is no size conflict on any
2054 operands for instruction template T, and it has MATCH_REVERSE set if there
2055 is no size conflict on any operands for the template with operands reversed
2056 (and the template allows for reversing in the first place). */
2058 #define MATCH_STRAIGHT 1
2059 #define MATCH_REVERSE 2
2061 static INLINE
unsigned int
2062 operand_size_match (const insn_template
*t
)
2064 unsigned int j
, match
= MATCH_STRAIGHT
;
2066 /* Don't check jump instructions. */
2067 if (t
->opcode_modifier
.jump
2068 || t
->opcode_modifier
.jumpbyte
2069 || t
->opcode_modifier
.jumpdword
2070 || t
->opcode_modifier
.jumpintersegment
)
2073 /* Check memory and accumulator operand size. */
2074 for (j
= 0; j
< i
.operands
; j
++)
2076 if (i
.types
[j
].bitfield
.class != Reg
2077 && i
.types
[j
].bitfield
.class != RegSIMD
2078 && t
->operand_types
[j
].bitfield
.anysize
)
2081 if (t
->operand_types
[j
].bitfield
.class == Reg
2082 && !match_operand_size (t
, j
, j
))
2088 if (t
->operand_types
[j
].bitfield
.class == RegSIMD
2089 && !match_simd_size (t
, j
, j
))
2095 if (t
->operand_types
[j
].bitfield
.instance
== Accum
2096 && (!match_operand_size (t
, j
, j
) || !match_simd_size (t
, j
, j
)))
2102 if ((i
.flags
[j
] & Operand_Mem
) && !match_mem_size (t
, j
, j
))
2109 if (!t
->opcode_modifier
.d
)
2113 i
.error
= operand_size_mismatch
;
2117 /* Check reverse. */
2118 gas_assert (i
.operands
>= 2 && i
.operands
<= 3);
2120 for (j
= 0; j
< i
.operands
; j
++)
2122 unsigned int given
= i
.operands
- j
- 1;
2124 if (t
->operand_types
[j
].bitfield
.class == Reg
2125 && !match_operand_size (t
, j
, given
))
2128 if (t
->operand_types
[j
].bitfield
.class == RegSIMD
2129 && !match_simd_size (t
, j
, given
))
2132 if (t
->operand_types
[j
].bitfield
.instance
== Accum
2133 && (!match_operand_size (t
, j
, given
)
2134 || !match_simd_size (t
, j
, given
)))
2137 if ((i
.flags
[given
] & Operand_Mem
) && !match_mem_size (t
, j
, given
))
2141 return match
| MATCH_REVERSE
;
2145 operand_type_match (i386_operand_type overlap
,
2146 i386_operand_type given
)
2148 i386_operand_type temp
= overlap
;
2150 temp
.bitfield
.jumpabsolute
= 0;
2151 temp
.bitfield
.unspecified
= 0;
2152 temp
.bitfield
.byte
= 0;
2153 temp
.bitfield
.word
= 0;
2154 temp
.bitfield
.dword
= 0;
2155 temp
.bitfield
.fword
= 0;
2156 temp
.bitfield
.qword
= 0;
2157 temp
.bitfield
.tbyte
= 0;
2158 temp
.bitfield
.xmmword
= 0;
2159 temp
.bitfield
.ymmword
= 0;
2160 temp
.bitfield
.zmmword
= 0;
2161 if (operand_type_all_zero (&temp
))
2164 if (given
.bitfield
.baseindex
== overlap
.bitfield
.baseindex
2165 && given
.bitfield
.jumpabsolute
== overlap
.bitfield
.jumpabsolute
)
2169 i
.error
= operand_type_mismatch
;
2173 /* If given types g0 and g1 are registers they must be of the same type
2174 unless the expected operand type register overlap is null.
2175 Memory operand size of certain SIMD instructions is also being checked
2179 operand_type_register_match (i386_operand_type g0
,
2180 i386_operand_type t0
,
2181 i386_operand_type g1
,
2182 i386_operand_type t1
)
2184 if (g0
.bitfield
.class != Reg
2185 && g0
.bitfield
.class != RegSIMD
2186 && (!operand_type_check (g0
, anymem
)
2187 || g0
.bitfield
.unspecified
2188 || t0
.bitfield
.class != RegSIMD
))
2191 if (g1
.bitfield
.class != Reg
2192 && g1
.bitfield
.class != RegSIMD
2193 && (!operand_type_check (g1
, anymem
)
2194 || g1
.bitfield
.unspecified
2195 || t1
.bitfield
.class != RegSIMD
))
2198 if (g0
.bitfield
.byte
== g1
.bitfield
.byte
2199 && g0
.bitfield
.word
== g1
.bitfield
.word
2200 && g0
.bitfield
.dword
== g1
.bitfield
.dword
2201 && g0
.bitfield
.qword
== g1
.bitfield
.qword
2202 && g0
.bitfield
.xmmword
== g1
.bitfield
.xmmword
2203 && g0
.bitfield
.ymmword
== g1
.bitfield
.ymmword
2204 && g0
.bitfield
.zmmword
== g1
.bitfield
.zmmword
)
2207 if (!(t0
.bitfield
.byte
& t1
.bitfield
.byte
)
2208 && !(t0
.bitfield
.word
& t1
.bitfield
.word
)
2209 && !(t0
.bitfield
.dword
& t1
.bitfield
.dword
)
2210 && !(t0
.bitfield
.qword
& t1
.bitfield
.qword
)
2211 && !(t0
.bitfield
.xmmword
& t1
.bitfield
.xmmword
)
2212 && !(t0
.bitfield
.ymmword
& t1
.bitfield
.ymmword
)
2213 && !(t0
.bitfield
.zmmword
& t1
.bitfield
.zmmword
))
2216 i
.error
= register_type_mismatch
;
2221 static INLINE
unsigned int
2222 register_number (const reg_entry
*r
)
2224 unsigned int nr
= r
->reg_num
;
2226 if (r
->reg_flags
& RegRex
)
2229 if (r
->reg_flags
& RegVRex
)
2235 static INLINE
unsigned int
2236 mode_from_disp_size (i386_operand_type t
)
2238 if (t
.bitfield
.disp8
)
2240 else if (t
.bitfield
.disp16
2241 || t
.bitfield
.disp32
2242 || t
.bitfield
.disp32s
)
2249 fits_in_signed_byte (addressT num
)
2251 return num
+ 0x80 <= 0xff;
2255 fits_in_unsigned_byte (addressT num
)
2261 fits_in_unsigned_word (addressT num
)
2263 return num
<= 0xffff;
2267 fits_in_signed_word (addressT num
)
2269 return num
+ 0x8000 <= 0xffff;
2273 fits_in_signed_long (addressT num ATTRIBUTE_UNUSED
)
2278 return num
+ 0x80000000 <= 0xffffffff;
2280 } /* fits_in_signed_long() */
2283 fits_in_unsigned_long (addressT num ATTRIBUTE_UNUSED
)
2288 return num
<= 0xffffffff;
2290 } /* fits_in_unsigned_long() */
2293 fits_in_disp8 (offsetT num
)
2295 int shift
= i
.memshift
;
2301 mask
= (1 << shift
) - 1;
2303 /* Return 0 if NUM isn't properly aligned. */
2307 /* Check if NUM will fit in 8bit after shift. */
2308 return fits_in_signed_byte (num
>> shift
);
2312 fits_in_imm4 (offsetT num
)
2314 return (num
& 0xf) == num
;
2317 static i386_operand_type
2318 smallest_imm_type (offsetT num
)
2320 i386_operand_type t
;
2322 operand_type_set (&t
, 0);
2323 t
.bitfield
.imm64
= 1;
2325 if (cpu_arch_tune
!= PROCESSOR_I486
&& num
== 1)
2327 /* This code is disabled on the 486 because all the Imm1 forms
2328 in the opcode table are slower on the i486. They're the
2329 versions with the implicitly specified single-position
2330 displacement, which has another syntax if you really want to
2332 t
.bitfield
.imm1
= 1;
2333 t
.bitfield
.imm8
= 1;
2334 t
.bitfield
.imm8s
= 1;
2335 t
.bitfield
.imm16
= 1;
2336 t
.bitfield
.imm32
= 1;
2337 t
.bitfield
.imm32s
= 1;
2339 else if (fits_in_signed_byte (num
))
2341 t
.bitfield
.imm8
= 1;
2342 t
.bitfield
.imm8s
= 1;
2343 t
.bitfield
.imm16
= 1;
2344 t
.bitfield
.imm32
= 1;
2345 t
.bitfield
.imm32s
= 1;
2347 else if (fits_in_unsigned_byte (num
))
2349 t
.bitfield
.imm8
= 1;
2350 t
.bitfield
.imm16
= 1;
2351 t
.bitfield
.imm32
= 1;
2352 t
.bitfield
.imm32s
= 1;
2354 else if (fits_in_signed_word (num
) || fits_in_unsigned_word (num
))
2356 t
.bitfield
.imm16
= 1;
2357 t
.bitfield
.imm32
= 1;
2358 t
.bitfield
.imm32s
= 1;
2360 else if (fits_in_signed_long (num
))
2362 t
.bitfield
.imm32
= 1;
2363 t
.bitfield
.imm32s
= 1;
2365 else if (fits_in_unsigned_long (num
))
2366 t
.bitfield
.imm32
= 1;
2372 offset_in_range (offsetT val
, int size
)
2378 case 1: mask
= ((addressT
) 1 << 8) - 1; break;
2379 case 2: mask
= ((addressT
) 1 << 16) - 1; break;
2380 case 4: mask
= ((addressT
) 2 << 31) - 1; break;
2382 case 8: mask
= ((addressT
) 2 << 63) - 1; break;
2388 /* If BFD64, sign extend val for 32bit address mode. */
2389 if (flag_code
!= CODE_64BIT
2390 || i
.prefix
[ADDR_PREFIX
])
2391 if ((val
& ~(((addressT
) 2 << 31) - 1)) == 0)
2392 val
= (val
^ ((addressT
) 1 << 31)) - ((addressT
) 1 << 31);
2395 if ((val
& ~mask
) != 0 && (val
& ~mask
) != ~mask
)
2397 char buf1
[40], buf2
[40];
2399 sprint_value (buf1
, val
);
2400 sprint_value (buf2
, val
& mask
);
2401 as_warn (_("%s shortened to %s"), buf1
, buf2
);
2416 a. PREFIX_EXIST if attempting to add a prefix where one from the
2417 same class already exists.
2418 b. PREFIX_LOCK if lock prefix is added.
2419 c. PREFIX_REP if rep/repne prefix is added.
2420 d. PREFIX_DS if ds prefix is added.
2421 e. PREFIX_OTHER if other prefix is added.
2424 static enum PREFIX_GROUP
2425 add_prefix (unsigned int prefix
)
2427 enum PREFIX_GROUP ret
= PREFIX_OTHER
;
2430 if (prefix
>= REX_OPCODE
&& prefix
< REX_OPCODE
+ 16
2431 && flag_code
== CODE_64BIT
)
2433 if ((i
.prefix
[REX_PREFIX
] & prefix
& REX_W
)
2434 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_R
)
2435 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_X
)
2436 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_B
))
2447 case DS_PREFIX_OPCODE
:
2450 case CS_PREFIX_OPCODE
:
2451 case ES_PREFIX_OPCODE
:
2452 case FS_PREFIX_OPCODE
:
2453 case GS_PREFIX_OPCODE
:
2454 case SS_PREFIX_OPCODE
:
2458 case REPNE_PREFIX_OPCODE
:
2459 case REPE_PREFIX_OPCODE
:
2464 case LOCK_PREFIX_OPCODE
:
2473 case ADDR_PREFIX_OPCODE
:
2477 case DATA_PREFIX_OPCODE
:
2481 if (i
.prefix
[q
] != 0)
2489 i
.prefix
[q
] |= prefix
;
2492 as_bad (_("same type of prefix used twice"));
2498 update_code_flag (int value
, int check
)
2500 PRINTF_LIKE ((*as_error
));
2502 flag_code
= (enum flag_code
) value
;
2503 if (flag_code
== CODE_64BIT
)
2505 cpu_arch_flags
.bitfield
.cpu64
= 1;
2506 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2510 cpu_arch_flags
.bitfield
.cpu64
= 0;
2511 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2513 if (value
== CODE_64BIT
&& !cpu_arch_flags
.bitfield
.cpulm
)
2516 as_error
= as_fatal
;
2519 (*as_error
) (_("64bit mode not supported on `%s'."),
2520 cpu_arch_name
? cpu_arch_name
: default_arch
);
2522 if (value
== CODE_32BIT
&& !cpu_arch_flags
.bitfield
.cpui386
)
2525 as_error
= as_fatal
;
2528 (*as_error
) (_("32bit mode not supported on `%s'."),
2529 cpu_arch_name
? cpu_arch_name
: default_arch
);
2531 stackop_size
= '\0';
2535 set_code_flag (int value
)
2537 update_code_flag (value
, 0);
2541 set_16bit_gcc_code_flag (int new_code_flag
)
2543 flag_code
= (enum flag_code
) new_code_flag
;
2544 if (flag_code
!= CODE_16BIT
)
2546 cpu_arch_flags
.bitfield
.cpu64
= 0;
2547 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2548 stackop_size
= LONG_MNEM_SUFFIX
;
2552 set_intel_syntax (int syntax_flag
)
2554 /* Find out if register prefixing is specified. */
2555 int ask_naked_reg
= 0;
2558 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2561 int e
= get_symbol_name (&string
);
2563 if (strcmp (string
, "prefix") == 0)
2565 else if (strcmp (string
, "noprefix") == 0)
2568 as_bad (_("bad argument to syntax directive."));
2569 (void) restore_line_pointer (e
);
2571 demand_empty_rest_of_line ();
2573 intel_syntax
= syntax_flag
;
2575 if (ask_naked_reg
== 0)
2576 allow_naked_reg
= (intel_syntax
2577 && (bfd_get_symbol_leading_char (stdoutput
) != '\0'));
2579 allow_naked_reg
= (ask_naked_reg
< 0);
2581 expr_set_rank (O_full_ptr
, syntax_flag
? 10 : 0);
2583 identifier_chars
['%'] = intel_syntax
&& allow_naked_reg
? '%' : 0;
2584 identifier_chars
['$'] = intel_syntax
? '$' : 0;
2585 register_prefix
= allow_naked_reg
? "" : "%";
2589 set_intel_mnemonic (int mnemonic_flag
)
2591 intel_mnemonic
= mnemonic_flag
;
2595 set_allow_index_reg (int flag
)
2597 allow_index_reg
= flag
;
2601 set_check (int what
)
2603 enum check_kind
*kind
;
2608 kind
= &operand_check
;
2619 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2622 int e
= get_symbol_name (&string
);
2624 if (strcmp (string
, "none") == 0)
2626 else if (strcmp (string
, "warning") == 0)
2627 *kind
= check_warning
;
2628 else if (strcmp (string
, "error") == 0)
2629 *kind
= check_error
;
2631 as_bad (_("bad argument to %s_check directive."), str
);
2632 (void) restore_line_pointer (e
);
2635 as_bad (_("missing argument for %s_check directive"), str
);
2637 demand_empty_rest_of_line ();
2641 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED
,
2642 i386_cpu_flags new_flag ATTRIBUTE_UNUSED
)
2644 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2645 static const char *arch
;
2647 /* Intel LIOM is only supported on ELF. */
2653 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2654 use default_arch. */
2655 arch
= cpu_arch_name
;
2657 arch
= default_arch
;
2660 /* If we are targeting Intel MCU, we must enable it. */
2661 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_IAMCU
2662 || new_flag
.bitfield
.cpuiamcu
)
2665 /* If we are targeting Intel L1OM, we must enable it. */
2666 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_L1OM
2667 || new_flag
.bitfield
.cpul1om
)
2670 /* If we are targeting Intel K1OM, we must enable it. */
2671 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_K1OM
2672 || new_flag
.bitfield
.cpuk1om
)
2675 as_bad (_("`%s' is not supported on `%s'"), name
, arch
);
2680 set_cpu_arch (int dummy ATTRIBUTE_UNUSED
)
2684 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2687 int e
= get_symbol_name (&string
);
2689 i386_cpu_flags flags
;
2691 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
2693 if (strcmp (string
, cpu_arch
[j
].name
) == 0)
2695 check_cpu_arch_compatible (string
, cpu_arch
[j
].flags
);
2699 cpu_arch_name
= cpu_arch
[j
].name
;
2700 cpu_sub_arch_name
= NULL
;
2701 cpu_arch_flags
= cpu_arch
[j
].flags
;
2702 if (flag_code
== CODE_64BIT
)
2704 cpu_arch_flags
.bitfield
.cpu64
= 1;
2705 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2709 cpu_arch_flags
.bitfield
.cpu64
= 0;
2710 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2712 cpu_arch_isa
= cpu_arch
[j
].type
;
2713 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
2714 if (!cpu_arch_tune_set
)
2716 cpu_arch_tune
= cpu_arch_isa
;
2717 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
2722 flags
= cpu_flags_or (cpu_arch_flags
,
2725 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2727 if (cpu_sub_arch_name
)
2729 char *name
= cpu_sub_arch_name
;
2730 cpu_sub_arch_name
= concat (name
,
2732 (const char *) NULL
);
2736 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
2737 cpu_arch_flags
= flags
;
2738 cpu_arch_isa_flags
= flags
;
2742 = cpu_flags_or (cpu_arch_isa_flags
,
2744 (void) restore_line_pointer (e
);
2745 demand_empty_rest_of_line ();
2750 if (*string
== '.' && j
>= ARRAY_SIZE (cpu_arch
))
2752 /* Disable an ISA extension. */
2753 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
2754 if (strcmp (string
+ 1, cpu_noarch
[j
].name
) == 0)
2756 flags
= cpu_flags_and_not (cpu_arch_flags
,
2757 cpu_noarch
[j
].flags
);
2758 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2760 if (cpu_sub_arch_name
)
2762 char *name
= cpu_sub_arch_name
;
2763 cpu_sub_arch_name
= concat (name
, string
,
2764 (const char *) NULL
);
2768 cpu_sub_arch_name
= xstrdup (string
);
2769 cpu_arch_flags
= flags
;
2770 cpu_arch_isa_flags
= flags
;
2772 (void) restore_line_pointer (e
);
2773 demand_empty_rest_of_line ();
2777 j
= ARRAY_SIZE (cpu_arch
);
2780 if (j
>= ARRAY_SIZE (cpu_arch
))
2781 as_bad (_("no such architecture: `%s'"), string
);
2783 *input_line_pointer
= e
;
2786 as_bad (_("missing cpu architecture"));
2788 no_cond_jump_promotion
= 0;
2789 if (*input_line_pointer
== ','
2790 && !is_end_of_line
[(unsigned char) input_line_pointer
[1]])
2795 ++input_line_pointer
;
2796 e
= get_symbol_name (&string
);
2798 if (strcmp (string
, "nojumps") == 0)
2799 no_cond_jump_promotion
= 1;
2800 else if (strcmp (string
, "jumps") == 0)
2803 as_bad (_("no such architecture modifier: `%s'"), string
);
2805 (void) restore_line_pointer (e
);
2808 demand_empty_rest_of_line ();
2811 enum bfd_architecture
2814 if (cpu_arch_isa
== PROCESSOR_L1OM
)
2816 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2817 || flag_code
!= CODE_64BIT
)
2818 as_fatal (_("Intel L1OM is 64bit ELF only"));
2819 return bfd_arch_l1om
;
2821 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
2823 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2824 || flag_code
!= CODE_64BIT
)
2825 as_fatal (_("Intel K1OM is 64bit ELF only"));
2826 return bfd_arch_k1om
;
2828 else if (cpu_arch_isa
== PROCESSOR_IAMCU
)
2830 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2831 || flag_code
== CODE_64BIT
)
2832 as_fatal (_("Intel MCU is 32bit ELF only"));
2833 return bfd_arch_iamcu
;
2836 return bfd_arch_i386
;
2842 if (!strncmp (default_arch
, "x86_64", 6))
2844 if (cpu_arch_isa
== PROCESSOR_L1OM
)
2846 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2847 || default_arch
[6] != '\0')
2848 as_fatal (_("Intel L1OM is 64bit ELF only"));
2849 return bfd_mach_l1om
;
2851 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
2853 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2854 || default_arch
[6] != '\0')
2855 as_fatal (_("Intel K1OM is 64bit ELF only"));
2856 return bfd_mach_k1om
;
2858 else if (default_arch
[6] == '\0')
2859 return bfd_mach_x86_64
;
2861 return bfd_mach_x64_32
;
2863 else if (!strcmp (default_arch
, "i386")
2864 || !strcmp (default_arch
, "iamcu"))
2866 if (cpu_arch_isa
== PROCESSOR_IAMCU
)
2868 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
)
2869 as_fatal (_("Intel MCU is 32bit ELF only"));
2870 return bfd_mach_i386_iamcu
;
2873 return bfd_mach_i386_i386
;
2876 as_fatal (_("unknown architecture"));
2882 const char *hash_err
;
2884 /* Support pseudo prefixes like {disp32}. */
2885 lex_type
['{'] = LEX_BEGIN_NAME
;
2887 /* Initialize op_hash hash table. */
2888 op_hash
= hash_new ();
2891 const insn_template
*optab
;
2892 templates
*core_optab
;
2894 /* Setup for loop. */
2896 core_optab
= XNEW (templates
);
2897 core_optab
->start
= optab
;
2902 if (optab
->name
== NULL
2903 || strcmp (optab
->name
, (optab
- 1)->name
) != 0)
2905 /* different name --> ship out current template list;
2906 add to hash table; & begin anew. */
2907 core_optab
->end
= optab
;
2908 hash_err
= hash_insert (op_hash
,
2910 (void *) core_optab
);
2913 as_fatal (_("can't hash %s: %s"),
2917 if (optab
->name
== NULL
)
2919 core_optab
= XNEW (templates
);
2920 core_optab
->start
= optab
;
2925 /* Initialize reg_hash hash table. */
2926 reg_hash
= hash_new ();
2928 const reg_entry
*regtab
;
2929 unsigned int regtab_size
= i386_regtab_size
;
2931 for (regtab
= i386_regtab
; regtab_size
--; regtab
++)
2933 hash_err
= hash_insert (reg_hash
, regtab
->reg_name
, (void *) regtab
);
2935 as_fatal (_("can't hash %s: %s"),
2941 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2946 for (c
= 0; c
< 256; c
++)
2951 mnemonic_chars
[c
] = c
;
2952 register_chars
[c
] = c
;
2953 operand_chars
[c
] = c
;
2955 else if (ISLOWER (c
))
2957 mnemonic_chars
[c
] = c
;
2958 register_chars
[c
] = c
;
2959 operand_chars
[c
] = c
;
2961 else if (ISUPPER (c
))
2963 mnemonic_chars
[c
] = TOLOWER (c
);
2964 register_chars
[c
] = mnemonic_chars
[c
];
2965 operand_chars
[c
] = c
;
2967 else if (c
== '{' || c
== '}')
2969 mnemonic_chars
[c
] = c
;
2970 operand_chars
[c
] = c
;
2973 if (ISALPHA (c
) || ISDIGIT (c
))
2974 identifier_chars
[c
] = c
;
2977 identifier_chars
[c
] = c
;
2978 operand_chars
[c
] = c
;
2983 identifier_chars
['@'] = '@';
2986 identifier_chars
['?'] = '?';
2987 operand_chars
['?'] = '?';
2989 digit_chars
['-'] = '-';
2990 mnemonic_chars
['_'] = '_';
2991 mnemonic_chars
['-'] = '-';
2992 mnemonic_chars
['.'] = '.';
2993 identifier_chars
['_'] = '_';
2994 identifier_chars
['.'] = '.';
2996 for (p
= operand_special_chars
; *p
!= '\0'; p
++)
2997 operand_chars
[(unsigned char) *p
] = *p
;
3000 if (flag_code
== CODE_64BIT
)
3002 #if defined (OBJ_COFF) && defined (TE_PE)
3003 x86_dwarf2_return_column
= (OUTPUT_FLAVOR
== bfd_target_coff_flavour
3006 x86_dwarf2_return_column
= 16;
3008 x86_cie_data_alignment
= -8;
3012 x86_dwarf2_return_column
= 8;
3013 x86_cie_data_alignment
= -4;
3018 i386_print_statistics (FILE *file
)
3020 hash_print_statistics (file
, "i386 opcode", op_hash
);
3021 hash_print_statistics (file
, "i386 register", reg_hash
);
3026 /* Debugging routines for md_assemble. */
3027 static void pte (insn_template
*);
3028 static void pt (i386_operand_type
);
3029 static void pe (expressionS
*);
3030 static void ps (symbolS
*);
3033 pi (const char *line
, i386_insn
*x
)
3037 fprintf (stdout
, "%s: template ", line
);
3039 fprintf (stdout
, " address: base %s index %s scale %x\n",
3040 x
->base_reg
? x
->base_reg
->reg_name
: "none",
3041 x
->index_reg
? x
->index_reg
->reg_name
: "none",
3042 x
->log2_scale_factor
);
3043 fprintf (stdout
, " modrm: mode %x reg %x reg/mem %x\n",
3044 x
->rm
.mode
, x
->rm
.reg
, x
->rm
.regmem
);
3045 fprintf (stdout
, " sib: base %x index %x scale %x\n",
3046 x
->sib
.base
, x
->sib
.index
, x
->sib
.scale
);
3047 fprintf (stdout
, " rex: 64bit %x extX %x extY %x extZ %x\n",
3048 (x
->rex
& REX_W
) != 0,
3049 (x
->rex
& REX_R
) != 0,
3050 (x
->rex
& REX_X
) != 0,
3051 (x
->rex
& REX_B
) != 0);
3052 for (j
= 0; j
< x
->operands
; j
++)
3054 fprintf (stdout
, " #%d: ", j
+ 1);
3056 fprintf (stdout
, "\n");
3057 if (x
->types
[j
].bitfield
.class == Reg
3058 || x
->types
[j
].bitfield
.class == RegMMX
3059 || x
->types
[j
].bitfield
.class == RegSIMD
3060 || x
->types
[j
].bitfield
.class == SReg
3061 || x
->types
[j
].bitfield
.class == RegCR
3062 || x
->types
[j
].bitfield
.class == RegDR
3063 || x
->types
[j
].bitfield
.class == RegTR
)
3064 fprintf (stdout
, "%s\n", x
->op
[j
].regs
->reg_name
);
3065 if (operand_type_check (x
->types
[j
], imm
))
3067 if (operand_type_check (x
->types
[j
], disp
))
3068 pe (x
->op
[j
].disps
);
3073 pte (insn_template
*t
)
3076 fprintf (stdout
, " %d operands ", t
->operands
);
3077 fprintf (stdout
, "opcode %x ", t
->base_opcode
);
3078 if (t
->extension_opcode
!= None
)
3079 fprintf (stdout
, "ext %x ", t
->extension_opcode
);
3080 if (t
->opcode_modifier
.d
)
3081 fprintf (stdout
, "D");
3082 if (t
->opcode_modifier
.w
)
3083 fprintf (stdout
, "W");
3084 fprintf (stdout
, "\n");
3085 for (j
= 0; j
< t
->operands
; j
++)
3087 fprintf (stdout
, " #%d type ", j
+ 1);
3088 pt (t
->operand_types
[j
]);
3089 fprintf (stdout
, "\n");
3096 fprintf (stdout
, " operation %d\n", e
->X_op
);
3097 fprintf (stdout
, " add_number %ld (%lx)\n",
3098 (long) e
->X_add_number
, (long) e
->X_add_number
);
3099 if (e
->X_add_symbol
)
3101 fprintf (stdout
, " add_symbol ");
3102 ps (e
->X_add_symbol
);
3103 fprintf (stdout
, "\n");
3107 fprintf (stdout
, " op_symbol ");
3108 ps (e
->X_op_symbol
);
3109 fprintf (stdout
, "\n");
3116 fprintf (stdout
, "%s type %s%s",
3118 S_IS_EXTERNAL (s
) ? "EXTERNAL " : "",
3119 segment_name (S_GET_SEGMENT (s
)));
3122 static struct type_name
3124 i386_operand_type mask
;
3127 const type_names
[] =
3129 { OPERAND_TYPE_REG8
, "r8" },
3130 { OPERAND_TYPE_REG16
, "r16" },
3131 { OPERAND_TYPE_REG32
, "r32" },
3132 { OPERAND_TYPE_REG64
, "r64" },
3133 { OPERAND_TYPE_ACC8
, "acc8" },
3134 { OPERAND_TYPE_ACC16
, "acc16" },
3135 { OPERAND_TYPE_ACC32
, "acc32" },
3136 { OPERAND_TYPE_ACC64
, "acc64" },
3137 { OPERAND_TYPE_IMM8
, "i8" },
3138 { OPERAND_TYPE_IMM8
, "i8s" },
3139 { OPERAND_TYPE_IMM16
, "i16" },
3140 { OPERAND_TYPE_IMM32
, "i32" },
3141 { OPERAND_TYPE_IMM32S
, "i32s" },
3142 { OPERAND_TYPE_IMM64
, "i64" },
3143 { OPERAND_TYPE_IMM1
, "i1" },
3144 { OPERAND_TYPE_BASEINDEX
, "BaseIndex" },
3145 { OPERAND_TYPE_DISP8
, "d8" },
3146 { OPERAND_TYPE_DISP16
, "d16" },
3147 { OPERAND_TYPE_DISP32
, "d32" },
3148 { OPERAND_TYPE_DISP32S
, "d32s" },
3149 { OPERAND_TYPE_DISP64
, "d64" },
3150 { OPERAND_TYPE_INOUTPORTREG
, "InOutPortReg" },
3151 { OPERAND_TYPE_SHIFTCOUNT
, "ShiftCount" },
3152 { OPERAND_TYPE_CONTROL
, "control reg" },
3153 { OPERAND_TYPE_TEST
, "test reg" },
3154 { OPERAND_TYPE_DEBUG
, "debug reg" },
3155 { OPERAND_TYPE_FLOATREG
, "FReg" },
3156 { OPERAND_TYPE_FLOATACC
, "FAcc" },
3157 { OPERAND_TYPE_SREG
, "SReg" },
3158 { OPERAND_TYPE_JUMPABSOLUTE
, "Jump Absolute" },
3159 { OPERAND_TYPE_REGMMX
, "rMMX" },
3160 { OPERAND_TYPE_REGXMM
, "rXMM" },
3161 { OPERAND_TYPE_REGYMM
, "rYMM" },
3162 { OPERAND_TYPE_REGZMM
, "rZMM" },
3163 { OPERAND_TYPE_REGMASK
, "Mask reg" },
3164 { OPERAND_TYPE_ESSEG
, "es" },
3168 pt (i386_operand_type t
)
3171 i386_operand_type a
;
3173 for (j
= 0; j
< ARRAY_SIZE (type_names
); j
++)
3175 a
= operand_type_and (t
, type_names
[j
].mask
);
3176 if (operand_type_equal (&a
, &type_names
[j
].mask
))
3177 fprintf (stdout
, "%s, ", type_names
[j
].name
);
3182 #endif /* DEBUG386 */
3184 static bfd_reloc_code_real_type
3185 reloc (unsigned int size
,
3188 bfd_reloc_code_real_type other
)
3190 if (other
!= NO_RELOC
)
3192 reloc_howto_type
*rel
;
3197 case BFD_RELOC_X86_64_GOT32
:
3198 return BFD_RELOC_X86_64_GOT64
;
3200 case BFD_RELOC_X86_64_GOTPLT64
:
3201 return BFD_RELOC_X86_64_GOTPLT64
;
3203 case BFD_RELOC_X86_64_PLTOFF64
:
3204 return BFD_RELOC_X86_64_PLTOFF64
;
3206 case BFD_RELOC_X86_64_GOTPC32
:
3207 other
= BFD_RELOC_X86_64_GOTPC64
;
3209 case BFD_RELOC_X86_64_GOTPCREL
:
3210 other
= BFD_RELOC_X86_64_GOTPCREL64
;
3212 case BFD_RELOC_X86_64_TPOFF32
:
3213 other
= BFD_RELOC_X86_64_TPOFF64
;
3215 case BFD_RELOC_X86_64_DTPOFF32
:
3216 other
= BFD_RELOC_X86_64_DTPOFF64
;
3222 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3223 if (other
== BFD_RELOC_SIZE32
)
3226 other
= BFD_RELOC_SIZE64
;
3229 as_bad (_("there are no pc-relative size relocations"));
3235 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
3236 if (size
== 4 && (flag_code
!= CODE_64BIT
|| disallow_64bit_reloc
))
3239 rel
= bfd_reloc_type_lookup (stdoutput
, other
);
3241 as_bad (_("unknown relocation (%u)"), other
);
3242 else if (size
!= bfd_get_reloc_size (rel
))
3243 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
3244 bfd_get_reloc_size (rel
),
3246 else if (pcrel
&& !rel
->pc_relative
)
3247 as_bad (_("non-pc-relative relocation for pc-relative field"));
3248 else if ((rel
->complain_on_overflow
== complain_overflow_signed
3250 || (rel
->complain_on_overflow
== complain_overflow_unsigned
3252 as_bad (_("relocated field and relocation type differ in signedness"));
3261 as_bad (_("there are no unsigned pc-relative relocations"));
3264 case 1: return BFD_RELOC_8_PCREL
;
3265 case 2: return BFD_RELOC_16_PCREL
;
3266 case 4: return BFD_RELOC_32_PCREL
;
3267 case 8: return BFD_RELOC_64_PCREL
;
3269 as_bad (_("cannot do %u byte pc-relative relocation"), size
);
3276 case 4: return BFD_RELOC_X86_64_32S
;
3281 case 1: return BFD_RELOC_8
;
3282 case 2: return BFD_RELOC_16
;
3283 case 4: return BFD_RELOC_32
;
3284 case 8: return BFD_RELOC_64
;
3286 as_bad (_("cannot do %s %u byte relocation"),
3287 sign
> 0 ? "signed" : "unsigned", size
);
3293 /* Here we decide which fixups can be adjusted to make them relative to
3294 the beginning of the section instead of the symbol. Basically we need
3295 to make sure that the dynamic relocations are done correctly, so in
3296 some cases we force the original symbol to be used. */
3299 tc_i386_fix_adjustable (fixS
*fixP ATTRIBUTE_UNUSED
)
3301 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3305 /* Don't adjust pc-relative references to merge sections in 64-bit
3307 if (use_rela_relocations
3308 && (S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_MERGE
) != 0
3312 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
3313 and changed later by validate_fix. */
3314 if (GOT_symbol
&& fixP
->fx_subsy
== GOT_symbol
3315 && fixP
->fx_r_type
== BFD_RELOC_32_PCREL
)
3318 /* Adjust_reloc_syms doesn't know about the GOT. Need to keep symbol
3319 for size relocations. */
3320 if (fixP
->fx_r_type
== BFD_RELOC_SIZE32
3321 || fixP
->fx_r_type
== BFD_RELOC_SIZE64
3322 || fixP
->fx_r_type
== BFD_RELOC_386_GOTOFF
3323 || fixP
->fx_r_type
== BFD_RELOC_386_PLT32
3324 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32
3325 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32X
3326 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GD
3327 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDM
3328 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDO_32
3329 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE_32
3330 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE
3331 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTIE
3332 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE_32
3333 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE
3334 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTDESC
3335 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_DESC_CALL
3336 || fixP
->fx_r_type
== BFD_RELOC_X86_64_PLT32
3337 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOT32
3338 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCREL
3339 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCRELX
3340 || fixP
->fx_r_type
== BFD_RELOC_X86_64_REX_GOTPCRELX
3341 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSGD
3342 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSLD
3343 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF32
3344 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF64
3345 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTTPOFF
3346 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF32
3347 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF64
3348 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTOFF64
3349 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPC32_TLSDESC
3350 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSDESC_CALL
3351 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
3352 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
3359 intel_float_operand (const char *mnemonic
)
3361 /* Note that the value returned is meaningful only for opcodes with (memory)
3362 operands, hence the code here is free to improperly handle opcodes that
3363 have no operands (for better performance and smaller code). */
3365 if (mnemonic
[0] != 'f')
3366 return 0; /* non-math */
3368 switch (mnemonic
[1])
3370 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
3371 the fs segment override prefix not currently handled because no
3372 call path can make opcodes without operands get here */
3374 return 2 /* integer op */;
3376 if (mnemonic
[2] == 'd' && (mnemonic
[3] == 'c' || mnemonic
[3] == 'e'))
3377 return 3; /* fldcw/fldenv */
3380 if (mnemonic
[2] != 'o' /* fnop */)
3381 return 3; /* non-waiting control op */
3384 if (mnemonic
[2] == 's')
3385 return 3; /* frstor/frstpm */
3388 if (mnemonic
[2] == 'a')
3389 return 3; /* fsave */
3390 if (mnemonic
[2] == 't')
3392 switch (mnemonic
[3])
3394 case 'c': /* fstcw */
3395 case 'd': /* fstdw */
3396 case 'e': /* fstenv */
3397 case 's': /* fsts[gw] */
3403 if (mnemonic
[2] == 'r' || mnemonic
[2] == 's')
3404 return 0; /* fxsave/fxrstor are not really math ops */
3411 /* Build the VEX prefix. */
3414 build_vex_prefix (const insn_template
*t
)
3416 unsigned int register_specifier
;
3417 unsigned int implied_prefix
;
3418 unsigned int vector_length
;
3421 /* Check register specifier. */
3422 if (i
.vex
.register_specifier
)
3424 register_specifier
=
3425 ~register_number (i
.vex
.register_specifier
) & 0xf;
3426 gas_assert ((i
.vex
.register_specifier
->reg_flags
& RegVRex
) == 0);
3429 register_specifier
= 0xf;
3431 /* Use 2-byte VEX prefix by swapping destination and source operand
3432 if there are more than 1 register operand. */
3433 if (i
.reg_operands
> 1
3434 && i
.vec_encoding
!= vex_encoding_vex3
3435 && i
.dir_encoding
== dir_encoding_default
3436 && i
.operands
== i
.reg_operands
3437 && operand_type_equal (&i
.types
[0], &i
.types
[i
.operands
- 1])
3438 && i
.tm
.opcode_modifier
.vexopcode
== VEX0F
3439 && (i
.tm
.opcode_modifier
.load
|| i
.tm
.opcode_modifier
.d
)
3442 unsigned int xchg
= i
.operands
- 1;
3443 union i386_op temp_op
;
3444 i386_operand_type temp_type
;
3446 temp_type
= i
.types
[xchg
];
3447 i
.types
[xchg
] = i
.types
[0];
3448 i
.types
[0] = temp_type
;
3449 temp_op
= i
.op
[xchg
];
3450 i
.op
[xchg
] = i
.op
[0];
3453 gas_assert (i
.rm
.mode
== 3);
3457 i
.rm
.regmem
= i
.rm
.reg
;
3460 if (i
.tm
.opcode_modifier
.d
)
3461 i
.tm
.base_opcode
^= (i
.tm
.base_opcode
& 0xee) != 0x6e
3462 ? Opcode_SIMD_FloatD
: Opcode_SIMD_IntD
;
3463 else /* Use the next insn. */
3467 /* Use 2-byte VEX prefix by swapping commutative source operands if there
3468 are no memory operands and at least 3 register ones. */
3469 if (i
.reg_operands
>= 3
3470 && i
.vec_encoding
!= vex_encoding_vex3
3471 && i
.reg_operands
== i
.operands
- i
.imm_operands
3472 && i
.tm
.opcode_modifier
.vex
3473 && i
.tm
.opcode_modifier
.commutative
3474 && (i
.tm
.opcode_modifier
.sse2avx
|| optimize
> 1)
3476 && i
.vex
.register_specifier
3477 && !(i
.vex
.register_specifier
->reg_flags
& RegRex
))
3479 unsigned int xchg
= i
.operands
- i
.reg_operands
;
3480 union i386_op temp_op
;
3481 i386_operand_type temp_type
;
3483 gas_assert (i
.tm
.opcode_modifier
.vexopcode
== VEX0F
);
3484 gas_assert (!i
.tm
.opcode_modifier
.sae
);
3485 gas_assert (operand_type_equal (&i
.types
[i
.operands
- 2],
3486 &i
.types
[i
.operands
- 3]));
3487 gas_assert (i
.rm
.mode
== 3);
3489 temp_type
= i
.types
[xchg
];
3490 i
.types
[xchg
] = i
.types
[xchg
+ 1];
3491 i
.types
[xchg
+ 1] = temp_type
;
3492 temp_op
= i
.op
[xchg
];
3493 i
.op
[xchg
] = i
.op
[xchg
+ 1];
3494 i
.op
[xchg
+ 1] = temp_op
;
3497 xchg
= i
.rm
.regmem
| 8;
3498 i
.rm
.regmem
= ~register_specifier
& 0xf;
3499 gas_assert (!(i
.rm
.regmem
& 8));
3500 i
.vex
.register_specifier
+= xchg
- i
.rm
.regmem
;
3501 register_specifier
= ~xchg
& 0xf;
3504 if (i
.tm
.opcode_modifier
.vex
== VEXScalar
)
3505 vector_length
= avxscalar
;
3506 else if (i
.tm
.opcode_modifier
.vex
== VEX256
)
3512 /* Determine vector length from the last multi-length vector
3515 for (op
= t
->operands
; op
--;)
3516 if (t
->operand_types
[op
].bitfield
.xmmword
3517 && t
->operand_types
[op
].bitfield
.ymmword
3518 && i
.types
[op
].bitfield
.ymmword
)
3525 switch ((i
.tm
.base_opcode
>> 8) & 0xff)
3530 case DATA_PREFIX_OPCODE
:
3533 case REPE_PREFIX_OPCODE
:
3536 case REPNE_PREFIX_OPCODE
:
3543 /* Check the REX.W bit and VEXW. */
3544 if (i
.tm
.opcode_modifier
.vexw
== VEXWIG
)
3545 w
= (vexwig
== vexw1
|| (i
.rex
& REX_W
)) ? 1 : 0;
3546 else if (i
.tm
.opcode_modifier
.vexw
)
3547 w
= i
.tm
.opcode_modifier
.vexw
== VEXW1
? 1 : 0;
3549 w
= (flag_code
== CODE_64BIT
? i
.rex
& REX_W
: vexwig
== vexw1
) ? 1 : 0;
3551 /* Use 2-byte VEX prefix if possible. */
3553 && i
.vec_encoding
!= vex_encoding_vex3
3554 && i
.tm
.opcode_modifier
.vexopcode
== VEX0F
3555 && (i
.rex
& (REX_W
| REX_X
| REX_B
)) == 0)
3557 /* 2-byte VEX prefix. */
3561 i
.vex
.bytes
[0] = 0xc5;
3563 /* Check the REX.R bit. */
3564 r
= (i
.rex
& REX_R
) ? 0 : 1;
3565 i
.vex
.bytes
[1] = (r
<< 7
3566 | register_specifier
<< 3
3567 | vector_length
<< 2
3572 /* 3-byte VEX prefix. */
3577 switch (i
.tm
.opcode_modifier
.vexopcode
)
3581 i
.vex
.bytes
[0] = 0xc4;
3585 i
.vex
.bytes
[0] = 0xc4;
3589 i
.vex
.bytes
[0] = 0xc4;
3593 i
.vex
.bytes
[0] = 0x8f;
3597 i
.vex
.bytes
[0] = 0x8f;
3601 i
.vex
.bytes
[0] = 0x8f;
3607 /* The high 3 bits of the second VEX byte are 1's compliment
3608 of RXB bits from REX. */
3609 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | m
;
3611 i
.vex
.bytes
[2] = (w
<< 7
3612 | register_specifier
<< 3
3613 | vector_length
<< 2
3618 static INLINE bfd_boolean
3619 is_evex_encoding (const insn_template
*t
)
3621 return t
->opcode_modifier
.evex
|| t
->opcode_modifier
.disp8memshift
3622 || t
->opcode_modifier
.broadcast
|| t
->opcode_modifier
.masking
3623 || t
->opcode_modifier
.sae
;
3626 static INLINE bfd_boolean
3627 is_any_vex_encoding (const insn_template
*t
)
3629 return t
->opcode_modifier
.vex
|| t
->opcode_modifier
.vexopcode
3630 || is_evex_encoding (t
);
3633 /* Build the EVEX prefix. */
3636 build_evex_prefix (void)
3638 unsigned int register_specifier
;
3639 unsigned int implied_prefix
;
3641 rex_byte vrex_used
= 0;
3643 /* Check register specifier. */
3644 if (i
.vex
.register_specifier
)
3646 gas_assert ((i
.vrex
& REX_X
) == 0);
3648 register_specifier
= i
.vex
.register_specifier
->reg_num
;
3649 if ((i
.vex
.register_specifier
->reg_flags
& RegRex
))
3650 register_specifier
+= 8;
3651 /* The upper 16 registers are encoded in the fourth byte of the
3653 if (!(i
.vex
.register_specifier
->reg_flags
& RegVRex
))
3654 i
.vex
.bytes
[3] = 0x8;
3655 register_specifier
= ~register_specifier
& 0xf;
3659 register_specifier
= 0xf;
3661 /* Encode upper 16 vector index register in the fourth byte of
3663 if (!(i
.vrex
& REX_X
))
3664 i
.vex
.bytes
[3] = 0x8;
3669 switch ((i
.tm
.base_opcode
>> 8) & 0xff)
3674 case DATA_PREFIX_OPCODE
:
3677 case REPE_PREFIX_OPCODE
:
3680 case REPNE_PREFIX_OPCODE
:
3687 /* 4 byte EVEX prefix. */
3689 i
.vex
.bytes
[0] = 0x62;
3692 switch (i
.tm
.opcode_modifier
.vexopcode
)
3708 /* The high 3 bits of the second EVEX byte are 1's compliment of RXB
3710 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | m
;
3712 /* The fifth bit of the second EVEX byte is 1's compliment of the
3713 REX_R bit in VREX. */
3714 if (!(i
.vrex
& REX_R
))
3715 i
.vex
.bytes
[1] |= 0x10;
3719 if ((i
.reg_operands
+ i
.imm_operands
) == i
.operands
)
3721 /* When all operands are registers, the REX_X bit in REX is not
3722 used. We reuse it to encode the upper 16 registers, which is
3723 indicated by the REX_B bit in VREX. The REX_X bit is encoded
3724 as 1's compliment. */
3725 if ((i
.vrex
& REX_B
))
3728 i
.vex
.bytes
[1] &= ~0x40;
3732 /* EVEX instructions shouldn't need the REX prefix. */
3733 i
.vrex
&= ~vrex_used
;
3734 gas_assert (i
.vrex
== 0);
3736 /* Check the REX.W bit and VEXW. */
3737 if (i
.tm
.opcode_modifier
.vexw
== VEXWIG
)
3738 w
= (evexwig
== evexw1
|| (i
.rex
& REX_W
)) ? 1 : 0;
3739 else if (i
.tm
.opcode_modifier
.vexw
)
3740 w
= i
.tm
.opcode_modifier
.vexw
== VEXW1
? 1 : 0;
3742 w
= (flag_code
== CODE_64BIT
? i
.rex
& REX_W
: evexwig
== evexw1
) ? 1 : 0;
3744 /* Encode the U bit. */
3745 implied_prefix
|= 0x4;
3747 /* The third byte of the EVEX prefix. */
3748 i
.vex
.bytes
[2] = (w
<< 7 | register_specifier
<< 3 | implied_prefix
);
3750 /* The fourth byte of the EVEX prefix. */
3751 /* The zeroing-masking bit. */
3752 if (i
.mask
&& i
.mask
->zeroing
)
3753 i
.vex
.bytes
[3] |= 0x80;
3755 /* Don't always set the broadcast bit if there is no RC. */
3758 /* Encode the vector length. */
3759 unsigned int vec_length
;
3761 if (!i
.tm
.opcode_modifier
.evex
3762 || i
.tm
.opcode_modifier
.evex
== EVEXDYN
)
3766 /* Determine vector length from the last multi-length vector
3769 for (op
= i
.operands
; op
--;)
3770 if (i
.tm
.operand_types
[op
].bitfield
.xmmword
3771 + i
.tm
.operand_types
[op
].bitfield
.ymmword
3772 + i
.tm
.operand_types
[op
].bitfield
.zmmword
> 1)
3774 if (i
.types
[op
].bitfield
.zmmword
)
3776 i
.tm
.opcode_modifier
.evex
= EVEX512
;
3779 else if (i
.types
[op
].bitfield
.ymmword
)
3781 i
.tm
.opcode_modifier
.evex
= EVEX256
;
3784 else if (i
.types
[op
].bitfield
.xmmword
)
3786 i
.tm
.opcode_modifier
.evex
= EVEX128
;
3789 else if (i
.broadcast
&& (int) op
== i
.broadcast
->operand
)
3791 switch (i
.broadcast
->bytes
)
3794 i
.tm
.opcode_modifier
.evex
= EVEX512
;
3797 i
.tm
.opcode_modifier
.evex
= EVEX256
;
3800 i
.tm
.opcode_modifier
.evex
= EVEX128
;
3809 if (op
>= MAX_OPERANDS
)
3813 switch (i
.tm
.opcode_modifier
.evex
)
3815 case EVEXLIG
: /* LL' is ignored */
3816 vec_length
= evexlig
<< 5;
3819 vec_length
= 0 << 5;
3822 vec_length
= 1 << 5;
3825 vec_length
= 2 << 5;
3831 i
.vex
.bytes
[3] |= vec_length
;
3832 /* Encode the broadcast bit. */
3834 i
.vex
.bytes
[3] |= 0x10;
3838 if (i
.rounding
->type
!= saeonly
)
3839 i
.vex
.bytes
[3] |= 0x10 | (i
.rounding
->type
<< 5);
3841 i
.vex
.bytes
[3] |= 0x10 | (evexrcig
<< 5);
3844 if (i
.mask
&& i
.mask
->mask
)
3845 i
.vex
.bytes
[3] |= i
.mask
->mask
->reg_num
;
3849 process_immext (void)
3853 if ((i
.tm
.cpu_flags
.bitfield
.cpusse3
|| i
.tm
.cpu_flags
.bitfield
.cpusvme
)
3856 /* MONITOR/MWAIT as well as SVME instructions have fixed operands
3857 with an opcode suffix which is coded in the same place as an
3858 8-bit immediate field would be.
3859 Here we check those operands and remove them afterwards. */
3862 for (x
= 0; x
< i
.operands
; x
++)
3863 if (register_number (i
.op
[x
].regs
) != x
)
3864 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
3865 register_prefix
, i
.op
[x
].regs
->reg_name
, x
+ 1,
3871 if (i
.tm
.cpu_flags
.bitfield
.cpumwaitx
&& i
.operands
> 0)
3873 /* MONITORX/MWAITX instructions have fixed operands with an opcode
3874 suffix which is coded in the same place as an 8-bit immediate
3876 Here we check those operands and remove them afterwards. */
3879 if (i
.operands
!= 3)
3882 for (x
= 0; x
< 2; x
++)
3883 if (register_number (i
.op
[x
].regs
) != x
)
3884 goto bad_register_operand
;
3886 /* Check for third operand for mwaitx/monitorx insn. */
3887 if (register_number (i
.op
[x
].regs
)
3888 != (x
+ (i
.tm
.extension_opcode
== 0xfb)))
3890 bad_register_operand
:
3891 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
3892 register_prefix
, i
.op
[x
].regs
->reg_name
, x
+1,
3899 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
3900 which is coded in the same place as an 8-bit immediate field
3901 would be. Here we fake an 8-bit immediate operand from the
3902 opcode suffix stored in tm.extension_opcode.
3904 AVX instructions also use this encoding, for some of
3905 3 argument instructions. */
3907 gas_assert (i
.imm_operands
<= 1
3909 || (is_any_vex_encoding (&i
.tm
)
3910 && i
.operands
<= 4)));
3912 exp
= &im_expressions
[i
.imm_operands
++];
3913 i
.op
[i
.operands
].imms
= exp
;
3914 i
.types
[i
.operands
] = imm8
;
3916 exp
->X_op
= O_constant
;
3917 exp
->X_add_number
= i
.tm
.extension_opcode
;
3918 i
.tm
.extension_opcode
= None
;
3925 switch (i
.tm
.opcode_modifier
.hleprefixok
)
3930 as_bad (_("invalid instruction `%s' after `%s'"),
3931 i
.tm
.name
, i
.hle_prefix
);
3934 if (i
.prefix
[LOCK_PREFIX
])
3936 as_bad (_("missing `lock' with `%s'"), i
.hle_prefix
);
3940 case HLEPrefixRelease
:
3941 if (i
.prefix
[HLE_PREFIX
] != XRELEASE_PREFIX_OPCODE
)
3943 as_bad (_("instruction `%s' after `xacquire' not allowed"),
3947 if (i
.mem_operands
== 0 || !(i
.flags
[i
.operands
- 1] & Operand_Mem
))
3949 as_bad (_("memory destination needed for instruction `%s'"
3950 " after `xrelease'"), i
.tm
.name
);
3957 /* Try the shortest encoding by shortening operand size. */
3960 optimize_encoding (void)
3964 if (optimize_for_space
3965 && i
.reg_operands
== 1
3966 && i
.imm_operands
== 1
3967 && !i
.types
[1].bitfield
.byte
3968 && i
.op
[0].imms
->X_op
== O_constant
3969 && fits_in_imm7 (i
.op
[0].imms
->X_add_number
)
3970 && ((i
.tm
.base_opcode
== 0xa8
3971 && i
.tm
.extension_opcode
== None
)
3972 || (i
.tm
.base_opcode
== 0xf6
3973 && i
.tm
.extension_opcode
== 0x0)))
3976 test $imm7, %r64/%r32/%r16 -> test $imm7, %r8
3978 unsigned int base_regnum
= i
.op
[1].regs
->reg_num
;
3979 if (flag_code
== CODE_64BIT
|| base_regnum
< 4)
3981 i
.types
[1].bitfield
.byte
= 1;
3982 /* Ignore the suffix. */
3984 if (base_regnum
>= 4
3985 && !(i
.op
[1].regs
->reg_flags
& RegRex
))
3987 /* Handle SP, BP, SI and DI registers. */
3988 if (i
.types
[1].bitfield
.word
)
3990 else if (i
.types
[1].bitfield
.dword
)
3998 else if (flag_code
== CODE_64BIT
3999 && ((i
.types
[1].bitfield
.qword
4000 && i
.reg_operands
== 1
4001 && i
.imm_operands
== 1
4002 && i
.op
[0].imms
->X_op
== O_constant
4003 && ((i
.tm
.base_opcode
== 0xb8
4004 && i
.tm
.extension_opcode
== None
4005 && fits_in_unsigned_long (i
.op
[0].imms
->X_add_number
))
4006 || (fits_in_imm31 (i
.op
[0].imms
->X_add_number
)
4007 && (((i
.tm
.base_opcode
== 0x24
4008 || i
.tm
.base_opcode
== 0xa8)
4009 && i
.tm
.extension_opcode
== None
)
4010 || (i
.tm
.base_opcode
== 0x80
4011 && i
.tm
.extension_opcode
== 0x4)
4012 || ((i
.tm
.base_opcode
== 0xf6
4013 || (i
.tm
.base_opcode
| 1) == 0xc7)
4014 && i
.tm
.extension_opcode
== 0x0)))
4015 || (fits_in_imm7 (i
.op
[0].imms
->X_add_number
)
4016 && i
.tm
.base_opcode
== 0x83
4017 && i
.tm
.extension_opcode
== 0x4)))
4018 || (i
.types
[0].bitfield
.qword
4019 && ((i
.reg_operands
== 2
4020 && i
.op
[0].regs
== i
.op
[1].regs
4021 && ((i
.tm
.base_opcode
== 0x30
4022 || i
.tm
.base_opcode
== 0x28)
4023 && i
.tm
.extension_opcode
== None
))
4024 || (i
.reg_operands
== 1
4026 && i
.tm
.base_opcode
== 0x30
4027 && i
.tm
.extension_opcode
== None
)))))
4030 andq $imm31, %r64 -> andl $imm31, %r32
4031 andq $imm7, %r64 -> andl $imm7, %r32
4032 testq $imm31, %r64 -> testl $imm31, %r32
4033 xorq %r64, %r64 -> xorl %r32, %r32
4034 subq %r64, %r64 -> subl %r32, %r32
4035 movq $imm31, %r64 -> movl $imm31, %r32
4036 movq $imm32, %r64 -> movl $imm32, %r32
4038 i
.tm
.opcode_modifier
.norex64
= 1;
4039 if (i
.tm
.base_opcode
== 0xb8 || (i
.tm
.base_opcode
| 1) == 0xc7)
4042 movq $imm31, %r64 -> movl $imm31, %r32
4043 movq $imm32, %r64 -> movl $imm32, %r32
4045 i
.tm
.operand_types
[0].bitfield
.imm32
= 1;
4046 i
.tm
.operand_types
[0].bitfield
.imm32s
= 0;
4047 i
.tm
.operand_types
[0].bitfield
.imm64
= 0;
4048 i
.types
[0].bitfield
.imm32
= 1;
4049 i
.types
[0].bitfield
.imm32s
= 0;
4050 i
.types
[0].bitfield
.imm64
= 0;
4051 i
.types
[1].bitfield
.dword
= 1;
4052 i
.types
[1].bitfield
.qword
= 0;
4053 if ((i
.tm
.base_opcode
| 1) == 0xc7)
4056 movq $imm31, %r64 -> movl $imm31, %r32
4058 i
.tm
.base_opcode
= 0xb8;
4059 i
.tm
.extension_opcode
= None
;
4060 i
.tm
.opcode_modifier
.w
= 0;
4061 i
.tm
.opcode_modifier
.shortform
= 1;
4062 i
.tm
.opcode_modifier
.modrm
= 0;
4066 else if (optimize
> 1
4067 && !optimize_for_space
4068 && i
.reg_operands
== 2
4069 && i
.op
[0].regs
== i
.op
[1].regs
4070 && ((i
.tm
.base_opcode
& ~(Opcode_D
| 1)) == 0x8
4071 || (i
.tm
.base_opcode
& ~(Opcode_D
| 1)) == 0x20)
4072 && (flag_code
!= CODE_64BIT
|| !i
.types
[0].bitfield
.dword
))
4075 andb %rN, %rN -> testb %rN, %rN
4076 andw %rN, %rN -> testw %rN, %rN
4077 andq %rN, %rN -> testq %rN, %rN
4078 orb %rN, %rN -> testb %rN, %rN
4079 orw %rN, %rN -> testw %rN, %rN
4080 orq %rN, %rN -> testq %rN, %rN
4082 and outside of 64-bit mode
4084 andl %rN, %rN -> testl %rN, %rN
4085 orl %rN, %rN -> testl %rN, %rN
4087 i
.tm
.base_opcode
= 0x84 | (i
.tm
.base_opcode
& 1);
4089 else if (i
.reg_operands
== 3
4090 && i
.op
[0].regs
== i
.op
[1].regs
4091 && !i
.types
[2].bitfield
.xmmword
4092 && (i
.tm
.opcode_modifier
.vex
4093 || ((!i
.mask
|| i
.mask
->zeroing
)
4095 && is_evex_encoding (&i
.tm
)
4096 && (i
.vec_encoding
!= vex_encoding_evex
4097 || cpu_arch_isa_flags
.bitfield
.cpuavx512vl
4098 || i
.tm
.cpu_flags
.bitfield
.cpuavx512vl
4099 || (i
.tm
.operand_types
[2].bitfield
.zmmword
4100 && i
.types
[2].bitfield
.ymmword
))))
4101 && ((i
.tm
.base_opcode
== 0x55
4102 || i
.tm
.base_opcode
== 0x6655
4103 || i
.tm
.base_opcode
== 0x66df
4104 || i
.tm
.base_opcode
== 0x57
4105 || i
.tm
.base_opcode
== 0x6657
4106 || i
.tm
.base_opcode
== 0x66ef
4107 || i
.tm
.base_opcode
== 0x66f8
4108 || i
.tm
.base_opcode
== 0x66f9
4109 || i
.tm
.base_opcode
== 0x66fa
4110 || i
.tm
.base_opcode
== 0x66fb
4111 || i
.tm
.base_opcode
== 0x42
4112 || i
.tm
.base_opcode
== 0x6642
4113 || i
.tm
.base_opcode
== 0x47
4114 || i
.tm
.base_opcode
== 0x6647)
4115 && i
.tm
.extension_opcode
== None
))
4118 VOP, one of vandnps, vandnpd, vxorps, vxorpd, vpsubb, vpsubd,
4120 EVEX VOP %zmmM, %zmmM, %zmmN
4121 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
4122 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4123 EVEX VOP %ymmM, %ymmM, %ymmN
4124 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
4125 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4126 VEX VOP %ymmM, %ymmM, %ymmN
4127 -> VEX VOP %xmmM, %xmmM, %xmmN
4128 VOP, one of vpandn and vpxor:
4129 VEX VOP %ymmM, %ymmM, %ymmN
4130 -> VEX VOP %xmmM, %xmmM, %xmmN
4131 VOP, one of vpandnd and vpandnq:
4132 EVEX VOP %zmmM, %zmmM, %zmmN
4133 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
4134 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4135 EVEX VOP %ymmM, %ymmM, %ymmN
4136 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
4137 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4138 VOP, one of vpxord and vpxorq:
4139 EVEX VOP %zmmM, %zmmM, %zmmN
4140 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
4141 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4142 EVEX VOP %ymmM, %ymmM, %ymmN
4143 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
4144 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4145 VOP, one of kxord and kxorq:
4146 VEX VOP %kM, %kM, %kN
4147 -> VEX kxorw %kM, %kM, %kN
4148 VOP, one of kandnd and kandnq:
4149 VEX VOP %kM, %kM, %kN
4150 -> VEX kandnw %kM, %kM, %kN
4152 if (is_evex_encoding (&i
.tm
))
4154 if (i
.vec_encoding
!= vex_encoding_evex
)
4156 i
.tm
.opcode_modifier
.vex
= VEX128
;
4157 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4158 i
.tm
.opcode_modifier
.evex
= 0;
4160 else if (optimize
> 1)
4161 i
.tm
.opcode_modifier
.evex
= EVEX128
;
4165 else if (i
.tm
.operand_types
[0].bitfield
.class == RegMask
)
4167 i
.tm
.base_opcode
&= 0xff;
4168 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4171 i
.tm
.opcode_modifier
.vex
= VEX128
;
4173 if (i
.tm
.opcode_modifier
.vex
)
4174 for (j
= 0; j
< 3; j
++)
4176 i
.types
[j
].bitfield
.xmmword
= 1;
4177 i
.types
[j
].bitfield
.ymmword
= 0;
4180 else if (i
.vec_encoding
!= vex_encoding_evex
4181 && !i
.types
[0].bitfield
.zmmword
4182 && !i
.types
[1].bitfield
.zmmword
4185 && is_evex_encoding (&i
.tm
)
4186 && ((i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0x666f
4187 || (i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0xf36f
4188 || (i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0xf26f
4189 || (i
.tm
.base_opcode
& ~4) == 0x66db
4190 || (i
.tm
.base_opcode
& ~4) == 0x66eb)
4191 && i
.tm
.extension_opcode
== None
)
4194 VOP, one of vmovdqa32, vmovdqa64, vmovdqu8, vmovdqu16,
4195 vmovdqu32 and vmovdqu64:
4196 EVEX VOP %xmmM, %xmmN
4197 -> VEX vmovdqa|vmovdqu %xmmM, %xmmN (M and N < 16)
4198 EVEX VOP %ymmM, %ymmN
4199 -> VEX vmovdqa|vmovdqu %ymmM, %ymmN (M and N < 16)
4201 -> VEX vmovdqa|vmovdqu %xmmM, mem (M < 16)
4203 -> VEX vmovdqa|vmovdqu %ymmM, mem (M < 16)
4205 -> VEX mvmovdqa|vmovdquem, %xmmN (N < 16)
4207 -> VEX vmovdqa|vmovdqu mem, %ymmN (N < 16)
4208 VOP, one of vpand, vpandn, vpor, vpxor:
4209 EVEX VOP{d,q} %xmmL, %xmmM, %xmmN
4210 -> VEX VOP %xmmL, %xmmM, %xmmN (L, M, and N < 16)
4211 EVEX VOP{d,q} %ymmL, %ymmM, %ymmN
4212 -> VEX VOP %ymmL, %ymmM, %ymmN (L, M, and N < 16)
4213 EVEX VOP{d,q} mem, %xmmM, %xmmN
4214 -> VEX VOP mem, %xmmM, %xmmN (M and N < 16)
4215 EVEX VOP{d,q} mem, %ymmM, %ymmN
4216 -> VEX VOP mem, %ymmM, %ymmN (M and N < 16)
4218 for (j
= 0; j
< i
.operands
; j
++)
4219 if (operand_type_check (i
.types
[j
], disp
)
4220 && i
.op
[j
].disps
->X_op
== O_constant
)
4222 /* Since the VEX prefix has 2 or 3 bytes, the EVEX prefix
4223 has 4 bytes, EVEX Disp8 has 1 byte and VEX Disp32 has 4
4224 bytes, we choose EVEX Disp8 over VEX Disp32. */
4225 int evex_disp8
, vex_disp8
;
4226 unsigned int memshift
= i
.memshift
;
4227 offsetT n
= i
.op
[j
].disps
->X_add_number
;
4229 evex_disp8
= fits_in_disp8 (n
);
4231 vex_disp8
= fits_in_disp8 (n
);
4232 if (evex_disp8
!= vex_disp8
)
4234 i
.memshift
= memshift
;
4238 i
.types
[j
].bitfield
.disp8
= vex_disp8
;
4241 if ((i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0xf26f)
4242 i
.tm
.base_opcode
^= 0xf36f ^ 0xf26f;
4243 i
.tm
.opcode_modifier
.vex
4244 = i
.types
[0].bitfield
.ymmword
? VEX256
: VEX128
;
4245 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4246 /* VPAND, VPOR, and VPXOR are commutative. */
4247 if (i
.reg_operands
== 3 && i
.tm
.base_opcode
!= 0x66df)
4248 i
.tm
.opcode_modifier
.commutative
= 1;
4249 i
.tm
.opcode_modifier
.evex
= 0;
4250 i
.tm
.opcode_modifier
.masking
= 0;
4251 i
.tm
.opcode_modifier
.broadcast
= 0;
4252 i
.tm
.opcode_modifier
.disp8memshift
= 0;
4255 i
.types
[j
].bitfield
.disp8
4256 = fits_in_disp8 (i
.op
[j
].disps
->X_add_number
);
4260 /* This is the guts of the machine-dependent assembler. LINE points to a
4261 machine dependent instruction. This function is supposed to emit
4262 the frags/bytes it assembles to. */
4265 md_assemble (char *line
)
4268 char mnemonic
[MAX_MNEM_SIZE
], mnem_suffix
;
4269 const insn_template
*t
;
4271 /* Initialize globals. */
4272 memset (&i
, '\0', sizeof (i
));
4273 for (j
= 0; j
< MAX_OPERANDS
; j
++)
4274 i
.reloc
[j
] = NO_RELOC
;
4275 memset (disp_expressions
, '\0', sizeof (disp_expressions
));
4276 memset (im_expressions
, '\0', sizeof (im_expressions
));
4277 save_stack_p
= save_stack
;
4279 /* First parse an instruction mnemonic & call i386_operand for the operands.
4280 We assume that the scrubber has arranged it so that line[0] is the valid
4281 start of a (possibly prefixed) mnemonic. */
4283 line
= parse_insn (line
, mnemonic
);
4286 mnem_suffix
= i
.suffix
;
4288 line
= parse_operands (line
, mnemonic
);
4290 xfree (i
.memop1_string
);
4291 i
.memop1_string
= NULL
;
4295 /* Now we've parsed the mnemonic into a set of templates, and have the
4296 operands at hand. */
4298 /* All intel opcodes have reversed operands except for "bound" and
4299 "enter". We also don't reverse intersegment "jmp" and "call"
4300 instructions with 2 immediate operands so that the immediate segment
4301 precedes the offset, as it does when in AT&T mode. */
4304 && (strcmp (mnemonic
, "bound") != 0)
4305 && (strcmp (mnemonic
, "invlpga") != 0)
4306 && !(operand_type_check (i
.types
[0], imm
)
4307 && operand_type_check (i
.types
[1], imm
)))
4310 /* The order of the immediates should be reversed
4311 for 2 immediates extrq and insertq instructions */
4312 if (i
.imm_operands
== 2
4313 && (strcmp (mnemonic
, "extrq") == 0
4314 || strcmp (mnemonic
, "insertq") == 0))
4315 swap_2_operands (0, 1);
4320 /* Don't optimize displacement for movabs since it only takes 64bit
4323 && i
.disp_encoding
!= disp_encoding_32bit
4324 && (flag_code
!= CODE_64BIT
4325 || strcmp (mnemonic
, "movabs") != 0))
4328 /* Next, we find a template that matches the given insn,
4329 making sure the overlap of the given operands types is consistent
4330 with the template operand types. */
4332 if (!(t
= match_template (mnem_suffix
)))
4335 if (sse_check
!= check_none
4336 && !i
.tm
.opcode_modifier
.noavx
4337 && !i
.tm
.cpu_flags
.bitfield
.cpuavx
4338 && (i
.tm
.cpu_flags
.bitfield
.cpusse
4339 || i
.tm
.cpu_flags
.bitfield
.cpusse2
4340 || i
.tm
.cpu_flags
.bitfield
.cpusse3
4341 || i
.tm
.cpu_flags
.bitfield
.cpussse3
4342 || i
.tm
.cpu_flags
.bitfield
.cpusse4_1
4343 || i
.tm
.cpu_flags
.bitfield
.cpusse4_2
4344 || i
.tm
.cpu_flags
.bitfield
.cpupclmul
4345 || i
.tm
.cpu_flags
.bitfield
.cpuaes
4346 || i
.tm
.cpu_flags
.bitfield
.cpugfni
))
4348 (sse_check
== check_warning
4350 : as_bad
) (_("SSE instruction `%s' is used"), i
.tm
.name
);
4353 /* Zap movzx and movsx suffix. The suffix has been set from
4354 "word ptr" or "byte ptr" on the source operand in Intel syntax
4355 or extracted from mnemonic in AT&T syntax. But we'll use
4356 the destination register to choose the suffix for encoding. */
4357 if ((i
.tm
.base_opcode
& ~9) == 0x0fb6)
4359 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
4360 there is no suffix, the default will be byte extension. */
4361 if (i
.reg_operands
!= 2
4364 as_bad (_("ambiguous operand size for `%s'"), i
.tm
.name
);
4369 if (i
.tm
.opcode_modifier
.fwait
)
4370 if (!add_prefix (FWAIT_OPCODE
))
4373 /* Check if REP prefix is OK. */
4374 if (i
.rep_prefix
&& !i
.tm
.opcode_modifier
.repprefixok
)
4376 as_bad (_("invalid instruction `%s' after `%s'"),
4377 i
.tm
.name
, i
.rep_prefix
);
4381 /* Check for lock without a lockable instruction. Destination operand
4382 must be memory unless it is xchg (0x86). */
4383 if (i
.prefix
[LOCK_PREFIX
]
4384 && (!i
.tm
.opcode_modifier
.islockable
4385 || i
.mem_operands
== 0
4386 || (i
.tm
.base_opcode
!= 0x86
4387 && !(i
.flags
[i
.operands
- 1] & Operand_Mem
))))
4389 as_bad (_("expecting lockable instruction after `lock'"));
4393 /* Check for data size prefix on VEX/XOP/EVEX encoded insns. */
4394 if (i
.prefix
[DATA_PREFIX
] && is_any_vex_encoding (&i
.tm
))
4396 as_bad (_("data size prefix invalid with `%s'"), i
.tm
.name
);
4400 /* Check if HLE prefix is OK. */
4401 if (i
.hle_prefix
&& !check_hle ())
4404 /* Check BND prefix. */
4405 if (i
.bnd_prefix
&& !i
.tm
.opcode_modifier
.bndprefixok
)
4406 as_bad (_("expecting valid branch instruction after `bnd'"));
4408 /* Check NOTRACK prefix. */
4409 if (i
.notrack_prefix
&& !i
.tm
.opcode_modifier
.notrackprefixok
)
4410 as_bad (_("expecting indirect branch instruction after `notrack'"));
4412 if (i
.tm
.cpu_flags
.bitfield
.cpumpx
)
4414 if (flag_code
== CODE_64BIT
&& i
.prefix
[ADDR_PREFIX
])
4415 as_bad (_("32-bit address isn't allowed in 64-bit MPX instructions."));
4416 else if (flag_code
!= CODE_16BIT
4417 ? i
.prefix
[ADDR_PREFIX
]
4418 : i
.mem_operands
&& !i
.prefix
[ADDR_PREFIX
])
4419 as_bad (_("16-bit address isn't allowed in MPX instructions"));
4422 /* Insert BND prefix. */
4423 if (add_bnd_prefix
&& i
.tm
.opcode_modifier
.bndprefixok
)
4425 if (!i
.prefix
[BND_PREFIX
])
4426 add_prefix (BND_PREFIX_OPCODE
);
4427 else if (i
.prefix
[BND_PREFIX
] != BND_PREFIX_OPCODE
)
4429 as_warn (_("replacing `rep'/`repe' prefix by `bnd'"));
4430 i
.prefix
[BND_PREFIX
] = BND_PREFIX_OPCODE
;
4434 /* Check string instruction segment overrides. */
4435 if (i
.tm
.opcode_modifier
.isstring
&& i
.mem_operands
!= 0)
4437 if (!check_string ())
4439 i
.disp_operands
= 0;
4442 if (optimize
&& !i
.no_optimize
&& i
.tm
.opcode_modifier
.optimize
)
4443 optimize_encoding ();
4445 if (!process_suffix ())
4448 /* Update operand types. */
4449 for (j
= 0; j
< i
.operands
; j
++)
4450 i
.types
[j
] = operand_type_and (i
.types
[j
], i
.tm
.operand_types
[j
]);
4452 /* Make still unresolved immediate matches conform to size of immediate
4453 given in i.suffix. */
4454 if (!finalize_imm ())
4457 if (i
.types
[0].bitfield
.imm1
)
4458 i
.imm_operands
= 0; /* kludge for shift insns. */
4460 /* We only need to check those implicit registers for instructions
4461 with 3 operands or less. */
4462 if (i
.operands
<= 3)
4463 for (j
= 0; j
< i
.operands
; j
++)
4464 if (i
.types
[j
].bitfield
.instance
!= InstanceNone
4465 && !i
.types
[j
].bitfield
.xmmword
)
4468 /* ImmExt should be processed after SSE2AVX. */
4469 if (!i
.tm
.opcode_modifier
.sse2avx
4470 && i
.tm
.opcode_modifier
.immext
)
4473 /* For insns with operands there are more diddles to do to the opcode. */
4476 if (!process_operands ())
4479 else if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
4481 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
4482 as_warn (_("translating to `%sp'"), i
.tm
.name
);
4485 if (is_any_vex_encoding (&i
.tm
))
4487 if (!cpu_arch_flags
.bitfield
.cpui286
)
4489 as_bad (_("instruction `%s' isn't supported outside of protected mode."),
4494 if (i
.tm
.opcode_modifier
.vex
)
4495 build_vex_prefix (t
);
4497 build_evex_prefix ();
4500 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
4501 instructions may define INT_OPCODE as well, so avoid this corner
4502 case for those instructions that use MODRM. */
4503 if (i
.tm
.base_opcode
== INT_OPCODE
4504 && !i
.tm
.opcode_modifier
.modrm
4505 && i
.op
[0].imms
->X_add_number
== 3)
4507 i
.tm
.base_opcode
= INT3_OPCODE
;
4511 if ((i
.tm
.opcode_modifier
.jump
4512 || i
.tm
.opcode_modifier
.jumpbyte
4513 || i
.tm
.opcode_modifier
.jumpdword
)
4514 && i
.op
[0].disps
->X_op
== O_constant
)
4516 /* Convert "jmp constant" (and "call constant") to a jump (call) to
4517 the absolute address given by the constant. Since ix86 jumps and
4518 calls are pc relative, we need to generate a reloc. */
4519 i
.op
[0].disps
->X_add_symbol
= &abs_symbol
;
4520 i
.op
[0].disps
->X_op
= O_symbol
;
4523 if (i
.tm
.opcode_modifier
.rex64
)
4526 /* For 8 bit registers we need an empty rex prefix. Also if the
4527 instruction already has a prefix, we need to convert old
4528 registers to new ones. */
4530 if ((i
.types
[0].bitfield
.class == Reg
&& i
.types
[0].bitfield
.byte
4531 && (i
.op
[0].regs
->reg_flags
& RegRex64
) != 0)
4532 || (i
.types
[1].bitfield
.class == Reg
&& i
.types
[1].bitfield
.byte
4533 && (i
.op
[1].regs
->reg_flags
& RegRex64
) != 0)
4534 || (((i
.types
[0].bitfield
.class == Reg
&& i
.types
[0].bitfield
.byte
)
4535 || (i
.types
[1].bitfield
.class == Reg
&& i
.types
[1].bitfield
.byte
))
4540 i
.rex
|= REX_OPCODE
;
4541 for (x
= 0; x
< 2; x
++)
4543 /* Look for 8 bit operand that uses old registers. */
4544 if (i
.types
[x
].bitfield
.class == Reg
&& i
.types
[x
].bitfield
.byte
4545 && (i
.op
[x
].regs
->reg_flags
& RegRex64
) == 0)
4547 /* In case it is "hi" register, give up. */
4548 if (i
.op
[x
].regs
->reg_num
> 3)
4549 as_bad (_("can't encode register '%s%s' in an "
4550 "instruction requiring REX prefix."),
4551 register_prefix
, i
.op
[x
].regs
->reg_name
);
4553 /* Otherwise it is equivalent to the extended register.
4554 Since the encoding doesn't change this is merely
4555 cosmetic cleanup for debug output. */
4557 i
.op
[x
].regs
= i
.op
[x
].regs
+ 8;
4562 if (i
.rex
== 0 && i
.rex_encoding
)
4564 /* Check if we can add a REX_OPCODE byte. Look for 8 bit operand
4565 that uses legacy register. If it is "hi" register, don't add
4566 the REX_OPCODE byte. */
4568 for (x
= 0; x
< 2; x
++)
4569 if (i
.types
[x
].bitfield
.class == Reg
4570 && i
.types
[x
].bitfield
.byte
4571 && (i
.op
[x
].regs
->reg_flags
& RegRex64
) == 0
4572 && i
.op
[x
].regs
->reg_num
> 3)
4574 i
.rex_encoding
= FALSE
;
4583 add_prefix (REX_OPCODE
| i
.rex
);
4585 /* We are ready to output the insn. */
4590 parse_insn (char *line
, char *mnemonic
)
4593 char *token_start
= l
;
4596 const insn_template
*t
;
4602 while ((*mnem_p
= mnemonic_chars
[(unsigned char) *l
]) != 0)
4607 if (mnem_p
>= mnemonic
+ MAX_MNEM_SIZE
)
4609 as_bad (_("no such instruction: `%s'"), token_start
);
4614 if (!is_space_char (*l
)
4615 && *l
!= END_OF_INSN
4617 || (*l
!= PREFIX_SEPARATOR
4620 as_bad (_("invalid character %s in mnemonic"),
4621 output_invalid (*l
));
4624 if (token_start
== l
)
4626 if (!intel_syntax
&& *l
== PREFIX_SEPARATOR
)
4627 as_bad (_("expecting prefix; got nothing"));
4629 as_bad (_("expecting mnemonic; got nothing"));
4633 /* Look up instruction (or prefix) via hash table. */
4634 current_templates
= (const templates
*) hash_find (op_hash
, mnemonic
);
4636 if (*l
!= END_OF_INSN
4637 && (!is_space_char (*l
) || l
[1] != END_OF_INSN
)
4638 && current_templates
4639 && current_templates
->start
->opcode_modifier
.isprefix
)
4641 if (!cpu_flags_check_cpu64 (current_templates
->start
->cpu_flags
))
4643 as_bad ((flag_code
!= CODE_64BIT
4644 ? _("`%s' is only supported in 64-bit mode")
4645 : _("`%s' is not supported in 64-bit mode")),
4646 current_templates
->start
->name
);
4649 /* If we are in 16-bit mode, do not allow addr16 or data16.
4650 Similarly, in 32-bit mode, do not allow addr32 or data32. */
4651 if ((current_templates
->start
->opcode_modifier
.size
== SIZE16
4652 || current_templates
->start
->opcode_modifier
.size
== SIZE32
)
4653 && flag_code
!= CODE_64BIT
4654 && ((current_templates
->start
->opcode_modifier
.size
== SIZE32
)
4655 ^ (flag_code
== CODE_16BIT
)))
4657 as_bad (_("redundant %s prefix"),
4658 current_templates
->start
->name
);
4661 if (current_templates
->start
->opcode_length
== 0)
4663 /* Handle pseudo prefixes. */
4664 switch (current_templates
->start
->base_opcode
)
4668 i
.disp_encoding
= disp_encoding_8bit
;
4672 i
.disp_encoding
= disp_encoding_32bit
;
4676 i
.dir_encoding
= dir_encoding_load
;
4680 i
.dir_encoding
= dir_encoding_store
;
4684 i
.vec_encoding
= vex_encoding_vex2
;
4688 i
.vec_encoding
= vex_encoding_vex3
;
4692 i
.vec_encoding
= vex_encoding_evex
;
4696 i
.rex_encoding
= TRUE
;
4700 i
.no_optimize
= TRUE
;
4708 /* Add prefix, checking for repeated prefixes. */
4709 switch (add_prefix (current_templates
->start
->base_opcode
))
4714 if (current_templates
->start
->cpu_flags
.bitfield
.cpuibt
)
4715 i
.notrack_prefix
= current_templates
->start
->name
;
4718 if (current_templates
->start
->cpu_flags
.bitfield
.cpuhle
)
4719 i
.hle_prefix
= current_templates
->start
->name
;
4720 else if (current_templates
->start
->cpu_flags
.bitfield
.cpumpx
)
4721 i
.bnd_prefix
= current_templates
->start
->name
;
4723 i
.rep_prefix
= current_templates
->start
->name
;
4729 /* Skip past PREFIX_SEPARATOR and reset token_start. */
4736 if (!current_templates
)
4738 /* Deprecated functionality (new code should use pseudo-prefixes instead):
4739 Check if we should swap operand or force 32bit displacement in
4741 if (mnem_p
- 2 == dot_p
&& dot_p
[1] == 's')
4742 i
.dir_encoding
= dir_encoding_swap
;
4743 else if (mnem_p
- 3 == dot_p
4746 i
.disp_encoding
= disp_encoding_8bit
;
4747 else if (mnem_p
- 4 == dot_p
4751 i
.disp_encoding
= disp_encoding_32bit
;
4756 current_templates
= (const templates
*) hash_find (op_hash
, mnemonic
);
4759 if (!current_templates
)
4762 if (mnem_p
> mnemonic
)
4764 /* See if we can get a match by trimming off a suffix. */
4767 case WORD_MNEM_SUFFIX
:
4768 if (intel_syntax
&& (intel_float_operand (mnemonic
) & 2))
4769 i
.suffix
= SHORT_MNEM_SUFFIX
;
4772 case BYTE_MNEM_SUFFIX
:
4773 case QWORD_MNEM_SUFFIX
:
4774 i
.suffix
= mnem_p
[-1];
4776 current_templates
= (const templates
*) hash_find (op_hash
,
4779 case SHORT_MNEM_SUFFIX
:
4780 case LONG_MNEM_SUFFIX
:
4783 i
.suffix
= mnem_p
[-1];
4785 current_templates
= (const templates
*) hash_find (op_hash
,
4794 if (intel_float_operand (mnemonic
) == 1)
4795 i
.suffix
= SHORT_MNEM_SUFFIX
;
4797 i
.suffix
= LONG_MNEM_SUFFIX
;
4799 current_templates
= (const templates
*) hash_find (op_hash
,
4806 if (!current_templates
)
4808 as_bad (_("no such instruction: `%s'"), token_start
);
4813 if (current_templates
->start
->opcode_modifier
.jump
4814 || current_templates
->start
->opcode_modifier
.jumpbyte
)
4816 /* Check for a branch hint. We allow ",pt" and ",pn" for
4817 predict taken and predict not taken respectively.
4818 I'm not sure that branch hints actually do anything on loop
4819 and jcxz insns (JumpByte) for current Pentium4 chips. They
4820 may work in the future and it doesn't hurt to accept them
4822 if (l
[0] == ',' && l
[1] == 'p')
4826 if (!add_prefix (DS_PREFIX_OPCODE
))
4830 else if (l
[2] == 'n')
4832 if (!add_prefix (CS_PREFIX_OPCODE
))
4838 /* Any other comma loses. */
4841 as_bad (_("invalid character %s in mnemonic"),
4842 output_invalid (*l
));
4846 /* Check if instruction is supported on specified architecture. */
4848 for (t
= current_templates
->start
; t
< current_templates
->end
; ++t
)
4850 supported
|= cpu_flags_match (t
);
4851 if (supported
== CPU_FLAGS_PERFECT_MATCH
)
4853 if (!cpu_arch_flags
.bitfield
.cpui386
&& (flag_code
!= CODE_16BIT
))
4854 as_warn (_("use .code16 to ensure correct addressing mode"));
4860 if (!(supported
& CPU_FLAGS_64BIT_MATCH
))
4861 as_bad (flag_code
== CODE_64BIT
4862 ? _("`%s' is not supported in 64-bit mode")
4863 : _("`%s' is only supported in 64-bit mode"),
4864 current_templates
->start
->name
);
4866 as_bad (_("`%s' is not supported on `%s%s'"),
4867 current_templates
->start
->name
,
4868 cpu_arch_name
? cpu_arch_name
: default_arch
,
4869 cpu_sub_arch_name
? cpu_sub_arch_name
: "");
4875 parse_operands (char *l
, const char *mnemonic
)
4879 /* 1 if operand is pending after ','. */
4880 unsigned int expecting_operand
= 0;
4882 /* Non-zero if operand parens not balanced. */
4883 unsigned int paren_not_balanced
;
4885 while (*l
!= END_OF_INSN
)
4887 /* Skip optional white space before operand. */
4888 if (is_space_char (*l
))
4890 if (!is_operand_char (*l
) && *l
!= END_OF_INSN
&& *l
!= '"')
4892 as_bad (_("invalid character %s before operand %d"),
4893 output_invalid (*l
),
4897 token_start
= l
; /* After white space. */
4898 paren_not_balanced
= 0;
4899 while (paren_not_balanced
|| *l
!= ',')
4901 if (*l
== END_OF_INSN
)
4903 if (paren_not_balanced
)
4906 as_bad (_("unbalanced parenthesis in operand %d."),
4909 as_bad (_("unbalanced brackets in operand %d."),
4914 break; /* we are done */
4916 else if (!is_operand_char (*l
) && !is_space_char (*l
) && *l
!= '"')
4918 as_bad (_("invalid character %s in operand %d"),
4919 output_invalid (*l
),
4926 ++paren_not_balanced
;
4928 --paren_not_balanced
;
4933 ++paren_not_balanced
;
4935 --paren_not_balanced
;
4939 if (l
!= token_start
)
4940 { /* Yes, we've read in another operand. */
4941 unsigned int operand_ok
;
4942 this_operand
= i
.operands
++;
4943 if (i
.operands
> MAX_OPERANDS
)
4945 as_bad (_("spurious operands; (%d operands/instruction max)"),
4949 i
.types
[this_operand
].bitfield
.unspecified
= 1;
4950 /* Now parse operand adding info to 'i' as we go along. */
4951 END_STRING_AND_SAVE (l
);
4953 if (i
.mem_operands
> 1)
4955 as_bad (_("too many memory references for `%s'"),
4962 i386_intel_operand (token_start
,
4963 intel_float_operand (mnemonic
));
4965 operand_ok
= i386_att_operand (token_start
);
4967 RESTORE_END_STRING (l
);
4973 if (expecting_operand
)
4975 expecting_operand_after_comma
:
4976 as_bad (_("expecting operand after ','; got nothing"));
4981 as_bad (_("expecting operand before ','; got nothing"));
4986 /* Now *l must be either ',' or END_OF_INSN. */
4989 if (*++l
== END_OF_INSN
)
4991 /* Just skip it, if it's \n complain. */
4992 goto expecting_operand_after_comma
;
4994 expecting_operand
= 1;
5001 swap_2_operands (int xchg1
, int xchg2
)
5003 union i386_op temp_op
;
5004 i386_operand_type temp_type
;
5005 unsigned int temp_flags
;
5006 enum bfd_reloc_code_real temp_reloc
;
5008 temp_type
= i
.types
[xchg2
];
5009 i
.types
[xchg2
] = i
.types
[xchg1
];
5010 i
.types
[xchg1
] = temp_type
;
5012 temp_flags
= i
.flags
[xchg2
];
5013 i
.flags
[xchg2
] = i
.flags
[xchg1
];
5014 i
.flags
[xchg1
] = temp_flags
;
5016 temp_op
= i
.op
[xchg2
];
5017 i
.op
[xchg2
] = i
.op
[xchg1
];
5018 i
.op
[xchg1
] = temp_op
;
5020 temp_reloc
= i
.reloc
[xchg2
];
5021 i
.reloc
[xchg2
] = i
.reloc
[xchg1
];
5022 i
.reloc
[xchg1
] = temp_reloc
;
5026 if (i
.mask
->operand
== xchg1
)
5027 i
.mask
->operand
= xchg2
;
5028 else if (i
.mask
->operand
== xchg2
)
5029 i
.mask
->operand
= xchg1
;
5033 if (i
.broadcast
->operand
== xchg1
)
5034 i
.broadcast
->operand
= xchg2
;
5035 else if (i
.broadcast
->operand
== xchg2
)
5036 i
.broadcast
->operand
= xchg1
;
5040 if (i
.rounding
->operand
== xchg1
)
5041 i
.rounding
->operand
= xchg2
;
5042 else if (i
.rounding
->operand
== xchg2
)
5043 i
.rounding
->operand
= xchg1
;
5048 swap_operands (void)
5054 swap_2_operands (1, i
.operands
- 2);
5058 swap_2_operands (0, i
.operands
- 1);
5064 if (i
.mem_operands
== 2)
5066 const seg_entry
*temp_seg
;
5067 temp_seg
= i
.seg
[0];
5068 i
.seg
[0] = i
.seg
[1];
5069 i
.seg
[1] = temp_seg
;
5073 /* Try to ensure constant immediates are represented in the smallest
5078 char guess_suffix
= 0;
5082 guess_suffix
= i
.suffix
;
5083 else if (i
.reg_operands
)
5085 /* Figure out a suffix from the last register operand specified.
5086 We can't do this properly yet, i.e. excluding special register
5087 instances, but the following works for instructions with
5088 immediates. In any case, we can't set i.suffix yet. */
5089 for (op
= i
.operands
; --op
>= 0;)
5090 if (i
.types
[op
].bitfield
.class != Reg
)
5092 else if (i
.types
[op
].bitfield
.byte
)
5094 guess_suffix
= BYTE_MNEM_SUFFIX
;
5097 else if (i
.types
[op
].bitfield
.word
)
5099 guess_suffix
= WORD_MNEM_SUFFIX
;
5102 else if (i
.types
[op
].bitfield
.dword
)
5104 guess_suffix
= LONG_MNEM_SUFFIX
;
5107 else if (i
.types
[op
].bitfield
.qword
)
5109 guess_suffix
= QWORD_MNEM_SUFFIX
;
5113 else if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
5114 guess_suffix
= WORD_MNEM_SUFFIX
;
5116 for (op
= i
.operands
; --op
>= 0;)
5117 if (operand_type_check (i
.types
[op
], imm
))
5119 switch (i
.op
[op
].imms
->X_op
)
5122 /* If a suffix is given, this operand may be shortened. */
5123 switch (guess_suffix
)
5125 case LONG_MNEM_SUFFIX
:
5126 i
.types
[op
].bitfield
.imm32
= 1;
5127 i
.types
[op
].bitfield
.imm64
= 1;
5129 case WORD_MNEM_SUFFIX
:
5130 i
.types
[op
].bitfield
.imm16
= 1;
5131 i
.types
[op
].bitfield
.imm32
= 1;
5132 i
.types
[op
].bitfield
.imm32s
= 1;
5133 i
.types
[op
].bitfield
.imm64
= 1;
5135 case BYTE_MNEM_SUFFIX
:
5136 i
.types
[op
].bitfield
.imm8
= 1;
5137 i
.types
[op
].bitfield
.imm8s
= 1;
5138 i
.types
[op
].bitfield
.imm16
= 1;
5139 i
.types
[op
].bitfield
.imm32
= 1;
5140 i
.types
[op
].bitfield
.imm32s
= 1;
5141 i
.types
[op
].bitfield
.imm64
= 1;
5145 /* If this operand is at most 16 bits, convert it
5146 to a signed 16 bit number before trying to see
5147 whether it will fit in an even smaller size.
5148 This allows a 16-bit operand such as $0xffe0 to
5149 be recognised as within Imm8S range. */
5150 if ((i
.types
[op
].bitfield
.imm16
)
5151 && (i
.op
[op
].imms
->X_add_number
& ~(offsetT
) 0xffff) == 0)
5153 i
.op
[op
].imms
->X_add_number
=
5154 (((i
.op
[op
].imms
->X_add_number
& 0xffff) ^ 0x8000) - 0x8000);
5157 /* Store 32-bit immediate in 64-bit for 64-bit BFD. */
5158 if ((i
.types
[op
].bitfield
.imm32
)
5159 && ((i
.op
[op
].imms
->X_add_number
& ~(((offsetT
) 2 << 31) - 1))
5162 i
.op
[op
].imms
->X_add_number
= ((i
.op
[op
].imms
->X_add_number
5163 ^ ((offsetT
) 1 << 31))
5164 - ((offsetT
) 1 << 31));
5168 = operand_type_or (i
.types
[op
],
5169 smallest_imm_type (i
.op
[op
].imms
->X_add_number
));
5171 /* We must avoid matching of Imm32 templates when 64bit
5172 only immediate is available. */
5173 if (guess_suffix
== QWORD_MNEM_SUFFIX
)
5174 i
.types
[op
].bitfield
.imm32
= 0;
5181 /* Symbols and expressions. */
5183 /* Convert symbolic operand to proper sizes for matching, but don't
5184 prevent matching a set of insns that only supports sizes other
5185 than those matching the insn suffix. */
5187 i386_operand_type mask
, allowed
;
5188 const insn_template
*t
;
5190 operand_type_set (&mask
, 0);
5191 operand_type_set (&allowed
, 0);
5193 for (t
= current_templates
->start
;
5194 t
< current_templates
->end
;
5197 allowed
= operand_type_or (allowed
, t
->operand_types
[op
]);
5198 allowed
= operand_type_and (allowed
, anyimm
);
5200 switch (guess_suffix
)
5202 case QWORD_MNEM_SUFFIX
:
5203 mask
.bitfield
.imm64
= 1;
5204 mask
.bitfield
.imm32s
= 1;
5206 case LONG_MNEM_SUFFIX
:
5207 mask
.bitfield
.imm32
= 1;
5209 case WORD_MNEM_SUFFIX
:
5210 mask
.bitfield
.imm16
= 1;
5212 case BYTE_MNEM_SUFFIX
:
5213 mask
.bitfield
.imm8
= 1;
5218 allowed
= operand_type_and (mask
, allowed
);
5219 if (!operand_type_all_zero (&allowed
))
5220 i
.types
[op
] = operand_type_and (i
.types
[op
], mask
);
5227 /* Try to use the smallest displacement type too. */
5229 optimize_disp (void)
5233 for (op
= i
.operands
; --op
>= 0;)
5234 if (operand_type_check (i
.types
[op
], disp
))
5236 if (i
.op
[op
].disps
->X_op
== O_constant
)
5238 offsetT op_disp
= i
.op
[op
].disps
->X_add_number
;
5240 if (i
.types
[op
].bitfield
.disp16
5241 && (op_disp
& ~(offsetT
) 0xffff) == 0)
5243 /* If this operand is at most 16 bits, convert
5244 to a signed 16 bit number and don't use 64bit
5246 op_disp
= (((op_disp
& 0xffff) ^ 0x8000) - 0x8000);
5247 i
.types
[op
].bitfield
.disp64
= 0;
5250 /* Optimize 64-bit displacement to 32-bit for 64-bit BFD. */
5251 if (i
.types
[op
].bitfield
.disp32
5252 && (op_disp
& ~(((offsetT
) 2 << 31) - 1)) == 0)
5254 /* If this operand is at most 32 bits, convert
5255 to a signed 32 bit number and don't use 64bit
5257 op_disp
&= (((offsetT
) 2 << 31) - 1);
5258 op_disp
= (op_disp
^ ((offsetT
) 1 << 31)) - ((addressT
) 1 << 31);
5259 i
.types
[op
].bitfield
.disp64
= 0;
5262 if (!op_disp
&& i
.types
[op
].bitfield
.baseindex
)
5264 i
.types
[op
].bitfield
.disp8
= 0;
5265 i
.types
[op
].bitfield
.disp16
= 0;
5266 i
.types
[op
].bitfield
.disp32
= 0;
5267 i
.types
[op
].bitfield
.disp32s
= 0;
5268 i
.types
[op
].bitfield
.disp64
= 0;
5272 else if (flag_code
== CODE_64BIT
)
5274 if (fits_in_signed_long (op_disp
))
5276 i
.types
[op
].bitfield
.disp64
= 0;
5277 i
.types
[op
].bitfield
.disp32s
= 1;
5279 if (i
.prefix
[ADDR_PREFIX
]
5280 && fits_in_unsigned_long (op_disp
))
5281 i
.types
[op
].bitfield
.disp32
= 1;
5283 if ((i
.types
[op
].bitfield
.disp32
5284 || i
.types
[op
].bitfield
.disp32s
5285 || i
.types
[op
].bitfield
.disp16
)
5286 && fits_in_disp8 (op_disp
))
5287 i
.types
[op
].bitfield
.disp8
= 1;
5289 else if (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
5290 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
)
5292 fix_new_exp (frag_now
, frag_more (0) - frag_now
->fr_literal
, 0,
5293 i
.op
[op
].disps
, 0, i
.reloc
[op
]);
5294 i
.types
[op
].bitfield
.disp8
= 0;
5295 i
.types
[op
].bitfield
.disp16
= 0;
5296 i
.types
[op
].bitfield
.disp32
= 0;
5297 i
.types
[op
].bitfield
.disp32s
= 0;
5298 i
.types
[op
].bitfield
.disp64
= 0;
5301 /* We only support 64bit displacement on constants. */
5302 i
.types
[op
].bitfield
.disp64
= 0;
5306 /* Return 1 if there is a match in broadcast bytes between operand
5307 GIVEN and instruction template T. */
5310 match_broadcast_size (const insn_template
*t
, unsigned int given
)
5312 return ((t
->opcode_modifier
.broadcast
== BYTE_BROADCAST
5313 && i
.types
[given
].bitfield
.byte
)
5314 || (t
->opcode_modifier
.broadcast
== WORD_BROADCAST
5315 && i
.types
[given
].bitfield
.word
)
5316 || (t
->opcode_modifier
.broadcast
== DWORD_BROADCAST
5317 && i
.types
[given
].bitfield
.dword
)
5318 || (t
->opcode_modifier
.broadcast
== QWORD_BROADCAST
5319 && i
.types
[given
].bitfield
.qword
));
5322 /* Check if operands are valid for the instruction. */
5325 check_VecOperands (const insn_template
*t
)
5329 static const i386_cpu_flags avx512
= CPU_ANY_AVX512F_FLAGS
;
5331 /* Templates allowing for ZMMword as well as YMMword and/or XMMword for
5332 any one operand are implicity requiring AVX512VL support if the actual
5333 operand size is YMMword or XMMword. Since this function runs after
5334 template matching, there's no need to check for YMMword/XMMword in
5336 cpu
= cpu_flags_and (t
->cpu_flags
, avx512
);
5337 if (!cpu_flags_all_zero (&cpu
)
5338 && !t
->cpu_flags
.bitfield
.cpuavx512vl
5339 && !cpu_arch_flags
.bitfield
.cpuavx512vl
)
5341 for (op
= 0; op
< t
->operands
; ++op
)
5343 if (t
->operand_types
[op
].bitfield
.zmmword
5344 && (i
.types
[op
].bitfield
.ymmword
5345 || i
.types
[op
].bitfield
.xmmword
))
5347 i
.error
= unsupported
;
5353 /* Without VSIB byte, we can't have a vector register for index. */
5354 if (!t
->opcode_modifier
.vecsib
5356 && (i
.index_reg
->reg_type
.bitfield
.xmmword
5357 || i
.index_reg
->reg_type
.bitfield
.ymmword
5358 || i
.index_reg
->reg_type
.bitfield
.zmmword
))
5360 i
.error
= unsupported_vector_index_register
;
5364 /* Check if default mask is allowed. */
5365 if (t
->opcode_modifier
.nodefmask
5366 && (!i
.mask
|| i
.mask
->mask
->reg_num
== 0))
5368 i
.error
= no_default_mask
;
5372 /* For VSIB byte, we need a vector register for index, and all vector
5373 registers must be distinct. */
5374 if (t
->opcode_modifier
.vecsib
)
5377 || !((t
->opcode_modifier
.vecsib
== VecSIB128
5378 && i
.index_reg
->reg_type
.bitfield
.xmmword
)
5379 || (t
->opcode_modifier
.vecsib
== VecSIB256
5380 && i
.index_reg
->reg_type
.bitfield
.ymmword
)
5381 || (t
->opcode_modifier
.vecsib
== VecSIB512
5382 && i
.index_reg
->reg_type
.bitfield
.zmmword
)))
5384 i
.error
= invalid_vsib_address
;
5388 gas_assert (i
.reg_operands
== 2 || i
.mask
);
5389 if (i
.reg_operands
== 2 && !i
.mask
)
5391 gas_assert (i
.types
[0].bitfield
.class == RegSIMD
);
5392 gas_assert (i
.types
[0].bitfield
.xmmword
5393 || i
.types
[0].bitfield
.ymmword
);
5394 gas_assert (i
.types
[2].bitfield
.class == RegSIMD
);
5395 gas_assert (i
.types
[2].bitfield
.xmmword
5396 || i
.types
[2].bitfield
.ymmword
);
5397 if (operand_check
== check_none
)
5399 if (register_number (i
.op
[0].regs
)
5400 != register_number (i
.index_reg
)
5401 && register_number (i
.op
[2].regs
)
5402 != register_number (i
.index_reg
)
5403 && register_number (i
.op
[0].regs
)
5404 != register_number (i
.op
[2].regs
))
5406 if (operand_check
== check_error
)
5408 i
.error
= invalid_vector_register_set
;
5411 as_warn (_("mask, index, and destination registers should be distinct"));
5413 else if (i
.reg_operands
== 1 && i
.mask
)
5415 if (i
.types
[1].bitfield
.class == RegSIMD
5416 && (i
.types
[1].bitfield
.xmmword
5417 || i
.types
[1].bitfield
.ymmword
5418 || i
.types
[1].bitfield
.zmmword
)
5419 && (register_number (i
.op
[1].regs
)
5420 == register_number (i
.index_reg
)))
5422 if (operand_check
== check_error
)
5424 i
.error
= invalid_vector_register_set
;
5427 if (operand_check
!= check_none
)
5428 as_warn (_("index and destination registers should be distinct"));
5433 /* Check if broadcast is supported by the instruction and is applied
5434 to the memory operand. */
5437 i386_operand_type type
, overlap
;
5439 /* Check if specified broadcast is supported in this instruction,
5440 and its broadcast bytes match the memory operand. */
5441 op
= i
.broadcast
->operand
;
5442 if (!t
->opcode_modifier
.broadcast
5443 || !(i
.flags
[op
] & Operand_Mem
)
5444 || (!i
.types
[op
].bitfield
.unspecified
5445 && !match_broadcast_size (t
, op
)))
5448 i
.error
= unsupported_broadcast
;
5452 i
.broadcast
->bytes
= ((1 << (t
->opcode_modifier
.broadcast
- 1))
5453 * i
.broadcast
->type
);
5454 operand_type_set (&type
, 0);
5455 switch (i
.broadcast
->bytes
)
5458 type
.bitfield
.word
= 1;
5461 type
.bitfield
.dword
= 1;
5464 type
.bitfield
.qword
= 1;
5467 type
.bitfield
.xmmword
= 1;
5470 type
.bitfield
.ymmword
= 1;
5473 type
.bitfield
.zmmword
= 1;
5479 overlap
= operand_type_and (type
, t
->operand_types
[op
]);
5480 if (operand_type_all_zero (&overlap
))
5483 if (t
->opcode_modifier
.checkregsize
)
5487 type
.bitfield
.baseindex
= 1;
5488 for (j
= 0; j
< i
.operands
; ++j
)
5491 && !operand_type_register_match(i
.types
[j
],
5492 t
->operand_types
[j
],
5494 t
->operand_types
[op
]))
5499 /* If broadcast is supported in this instruction, we need to check if
5500 operand of one-element size isn't specified without broadcast. */
5501 else if (t
->opcode_modifier
.broadcast
&& i
.mem_operands
)
5503 /* Find memory operand. */
5504 for (op
= 0; op
< i
.operands
; op
++)
5505 if (i
.flags
[op
] & Operand_Mem
)
5507 gas_assert (op
< i
.operands
);
5508 /* Check size of the memory operand. */
5509 if (match_broadcast_size (t
, op
))
5511 i
.error
= broadcast_needed
;
5516 op
= MAX_OPERANDS
- 1; /* Avoid uninitialized variable warning. */
5518 /* Check if requested masking is supported. */
5521 switch (t
->opcode_modifier
.masking
)
5525 case MERGING_MASKING
:
5526 if (i
.mask
->zeroing
)
5529 i
.error
= unsupported_masking
;
5533 case DYNAMIC_MASKING
:
5534 /* Memory destinations allow only merging masking. */
5535 if (i
.mask
->zeroing
&& i
.mem_operands
)
5537 /* Find memory operand. */
5538 for (op
= 0; op
< i
.operands
; op
++)
5539 if (i
.flags
[op
] & Operand_Mem
)
5541 gas_assert (op
< i
.operands
);
5542 if (op
== i
.operands
- 1)
5544 i
.error
= unsupported_masking
;
5554 /* Check if masking is applied to dest operand. */
5555 if (i
.mask
&& (i
.mask
->operand
!= (int) (i
.operands
- 1)))
5557 i
.error
= mask_not_on_destination
;
5564 if (!t
->opcode_modifier
.sae
5565 || (i
.rounding
->type
!= saeonly
&& !t
->opcode_modifier
.staticrounding
))
5567 i
.error
= unsupported_rc_sae
;
5570 /* If the instruction has several immediate operands and one of
5571 them is rounding, the rounding operand should be the last
5572 immediate operand. */
5573 if (i
.imm_operands
> 1
5574 && i
.rounding
->operand
!= (int) (i
.imm_operands
- 1))
5576 i
.error
= rc_sae_operand_not_last_imm
;
5581 /* Check vector Disp8 operand. */
5582 if (t
->opcode_modifier
.disp8memshift
5583 && i
.disp_encoding
!= disp_encoding_32bit
)
5586 i
.memshift
= t
->opcode_modifier
.broadcast
- 1;
5587 else if (t
->opcode_modifier
.disp8memshift
!= DISP8_SHIFT_VL
)
5588 i
.memshift
= t
->opcode_modifier
.disp8memshift
;
5591 const i386_operand_type
*type
= NULL
;
5594 for (op
= 0; op
< i
.operands
; op
++)
5595 if (i
.flags
[op
] & Operand_Mem
)
5597 if (t
->opcode_modifier
.evex
== EVEXLIG
)
5598 i
.memshift
= 2 + (i
.suffix
== QWORD_MNEM_SUFFIX
);
5599 else if (t
->operand_types
[op
].bitfield
.xmmword
5600 + t
->operand_types
[op
].bitfield
.ymmword
5601 + t
->operand_types
[op
].bitfield
.zmmword
<= 1)
5602 type
= &t
->operand_types
[op
];
5603 else if (!i
.types
[op
].bitfield
.unspecified
)
5604 type
= &i
.types
[op
];
5606 else if (i
.types
[op
].bitfield
.class == RegSIMD
5607 && t
->opcode_modifier
.evex
!= EVEXLIG
)
5609 if (i
.types
[op
].bitfield
.zmmword
)
5611 else if (i
.types
[op
].bitfield
.ymmword
&& i
.memshift
< 5)
5613 else if (i
.types
[op
].bitfield
.xmmword
&& i
.memshift
< 4)
5619 if (type
->bitfield
.zmmword
)
5621 else if (type
->bitfield
.ymmword
)
5623 else if (type
->bitfield
.xmmword
)
5627 /* For the check in fits_in_disp8(). */
5628 if (i
.memshift
== 0)
5632 for (op
= 0; op
< i
.operands
; op
++)
5633 if (operand_type_check (i
.types
[op
], disp
)
5634 && i
.op
[op
].disps
->X_op
== O_constant
)
5636 if (fits_in_disp8 (i
.op
[op
].disps
->X_add_number
))
5638 i
.types
[op
].bitfield
.disp8
= 1;
5641 i
.types
[op
].bitfield
.disp8
= 0;
5650 /* Check if operands are valid for the instruction. Update VEX
5654 VEX_check_operands (const insn_template
*t
)
5656 if (i
.vec_encoding
== vex_encoding_evex
)
5658 /* This instruction must be encoded with EVEX prefix. */
5659 if (!is_evex_encoding (t
))
5661 i
.error
= unsupported
;
5667 if (!t
->opcode_modifier
.vex
)
5669 /* This instruction template doesn't have VEX prefix. */
5670 if (i
.vec_encoding
!= vex_encoding_default
)
5672 i
.error
= unsupported
;
5678 /* Check the special Imm4 cases; must be the first operand. */
5679 if (t
->cpu_flags
.bitfield
.cpuxop
&& t
->operands
== 5)
5681 if (i
.op
[0].imms
->X_op
!= O_constant
5682 || !fits_in_imm4 (i
.op
[0].imms
->X_add_number
))
5688 /* Turn off Imm<N> so that update_imm won't complain. */
5689 operand_type_set (&i
.types
[0], 0);
5695 static const insn_template
*
5696 match_template (char mnem_suffix
)
5698 /* Points to template once we've found it. */
5699 const insn_template
*t
;
5700 i386_operand_type overlap0
, overlap1
, overlap2
, overlap3
;
5701 i386_operand_type overlap4
;
5702 unsigned int found_reverse_match
;
5703 i386_opcode_modifier suffix_check
;
5704 i386_operand_type operand_types
[MAX_OPERANDS
];
5705 int addr_prefix_disp
;
5707 unsigned int found_cpu_match
, size_match
;
5708 unsigned int check_register
;
5709 enum i386_error specific_error
= 0;
5711 #if MAX_OPERANDS != 5
5712 # error "MAX_OPERANDS must be 5."
5715 found_reverse_match
= 0;
5716 addr_prefix_disp
= -1;
5718 /* Prepare for mnemonic suffix check. */
5719 memset (&suffix_check
, 0, sizeof (suffix_check
));
5720 switch (mnem_suffix
)
5722 case BYTE_MNEM_SUFFIX
:
5723 suffix_check
.no_bsuf
= 1;
5725 case WORD_MNEM_SUFFIX
:
5726 suffix_check
.no_wsuf
= 1;
5728 case SHORT_MNEM_SUFFIX
:
5729 suffix_check
.no_ssuf
= 1;
5731 case LONG_MNEM_SUFFIX
:
5732 suffix_check
.no_lsuf
= 1;
5734 case QWORD_MNEM_SUFFIX
:
5735 suffix_check
.no_qsuf
= 1;
5738 /* NB: In Intel syntax, normally we can check for memory operand
5739 size when there is no mnemonic suffix. But jmp and call have
5740 2 different encodings with Dword memory operand size, one with
5741 No_ldSuf and the other without. i.suffix is set to
5742 LONG_DOUBLE_MNEM_SUFFIX to skip the one with No_ldSuf. */
5743 if (i
.suffix
== LONG_DOUBLE_MNEM_SUFFIX
)
5744 suffix_check
.no_ldsuf
= 1;
5747 /* Must have right number of operands. */
5748 i
.error
= number_of_operands_mismatch
;
5750 for (t
= current_templates
->start
; t
< current_templates
->end
; t
++)
5752 addr_prefix_disp
= -1;
5753 found_reverse_match
= 0;
5755 if (i
.operands
!= t
->operands
)
5758 /* Check processor support. */
5759 i
.error
= unsupported
;
5760 found_cpu_match
= (cpu_flags_match (t
)
5761 == CPU_FLAGS_PERFECT_MATCH
);
5762 if (!found_cpu_match
)
5765 /* Check AT&T mnemonic. */
5766 i
.error
= unsupported_with_intel_mnemonic
;
5767 if (intel_mnemonic
&& t
->opcode_modifier
.attmnemonic
)
5770 /* Check AT&T/Intel syntax and Intel64/AMD64 ISA. */
5771 i
.error
= unsupported_syntax
;
5772 if ((intel_syntax
&& t
->opcode_modifier
.attsyntax
)
5773 || (!intel_syntax
&& t
->opcode_modifier
.intelsyntax
)
5774 || (intel64
&& t
->opcode_modifier
.amd64
)
5775 || (!intel64
&& t
->opcode_modifier
.intel64
))
5778 /* Check the suffix. */
5779 i
.error
= invalid_instruction_suffix
;
5780 if ((t
->opcode_modifier
.no_bsuf
&& suffix_check
.no_bsuf
)
5781 || (t
->opcode_modifier
.no_wsuf
&& suffix_check
.no_wsuf
)
5782 || (t
->opcode_modifier
.no_lsuf
&& suffix_check
.no_lsuf
)
5783 || (t
->opcode_modifier
.no_ssuf
&& suffix_check
.no_ssuf
)
5784 || (t
->opcode_modifier
.no_qsuf
&& suffix_check
.no_qsuf
)
5785 || (t
->opcode_modifier
.no_ldsuf
&& suffix_check
.no_ldsuf
))
5788 size_match
= operand_size_match (t
);
5792 for (j
= 0; j
< MAX_OPERANDS
; j
++)
5793 operand_types
[j
] = t
->operand_types
[j
];
5795 /* In general, don't allow 64-bit operands in 32-bit mode. */
5796 if (i
.suffix
== QWORD_MNEM_SUFFIX
5797 && flag_code
!= CODE_64BIT
5799 ? (!t
->opcode_modifier
.ignoresize
5800 && !t
->opcode_modifier
.broadcast
5801 && !intel_float_operand (t
->name
))
5802 : intel_float_operand (t
->name
) != 2)
5803 && ((operand_types
[0].bitfield
.class != RegMMX
5804 && operand_types
[0].bitfield
.class != RegSIMD
)
5805 || (operand_types
[t
->operands
> 1].bitfield
.class != RegMMX
5806 && operand_types
[t
->operands
> 1].bitfield
.class != RegSIMD
))
5807 && (t
->base_opcode
!= 0x0fc7
5808 || t
->extension_opcode
!= 1 /* cmpxchg8b */))
5811 /* In general, don't allow 32-bit operands on pre-386. */
5812 else if (i
.suffix
== LONG_MNEM_SUFFIX
5813 && !cpu_arch_flags
.bitfield
.cpui386
5815 ? (!t
->opcode_modifier
.ignoresize
5816 && !intel_float_operand (t
->name
))
5817 : intel_float_operand (t
->name
) != 2)
5818 && ((operand_types
[0].bitfield
.class != RegMMX
5819 && operand_types
[0].bitfield
.class != RegSIMD
)
5820 || (operand_types
[t
->operands
> 1].bitfield
.class != RegMMX
5821 && operand_types
[t
->operands
> 1].bitfield
.class
5825 /* Do not verify operands when there are none. */
5829 /* We've found a match; break out of loop. */
5833 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
5834 into Disp32/Disp16/Disp32 operand. */
5835 if (i
.prefix
[ADDR_PREFIX
] != 0)
5837 /* There should be only one Disp operand. */
5841 for (j
= 0; j
< MAX_OPERANDS
; j
++)
5843 if (operand_types
[j
].bitfield
.disp16
)
5845 addr_prefix_disp
= j
;
5846 operand_types
[j
].bitfield
.disp32
= 1;
5847 operand_types
[j
].bitfield
.disp16
= 0;
5853 for (j
= 0; j
< MAX_OPERANDS
; j
++)
5855 if (operand_types
[j
].bitfield
.disp32
)
5857 addr_prefix_disp
= j
;
5858 operand_types
[j
].bitfield
.disp32
= 0;
5859 operand_types
[j
].bitfield
.disp16
= 1;
5865 for (j
= 0; j
< MAX_OPERANDS
; j
++)
5867 if (operand_types
[j
].bitfield
.disp64
)
5869 addr_prefix_disp
= j
;
5870 operand_types
[j
].bitfield
.disp64
= 0;
5871 operand_types
[j
].bitfield
.disp32
= 1;
5879 /* Force 0x8b encoding for "mov foo@GOT, %eax". */
5880 if (i
.reloc
[0] == BFD_RELOC_386_GOT32
&& t
->base_opcode
== 0xa0)
5883 /* We check register size if needed. */
5884 if (t
->opcode_modifier
.checkregsize
)
5886 check_register
= (1 << t
->operands
) - 1;
5888 check_register
&= ~(1 << i
.broadcast
->operand
);
5893 overlap0
= operand_type_and (i
.types
[0], operand_types
[0]);
5894 switch (t
->operands
)
5897 if (!operand_type_match (overlap0
, i
.types
[0]))
5901 /* xchg %eax, %eax is a special case. It is an alias for nop
5902 only in 32bit mode and we can use opcode 0x90. In 64bit
5903 mode, we can't use 0x90 for xchg %eax, %eax since it should
5904 zero-extend %eax to %rax. */
5905 if (flag_code
== CODE_64BIT
5906 && t
->base_opcode
== 0x90
5907 && i
.types
[0].bitfield
.instance
== Accum
5908 && i
.types
[0].bitfield
.dword
5909 && i
.types
[1].bitfield
.instance
== Accum
5910 && i
.types
[1].bitfield
.dword
)
5912 /* xrelease mov %eax, <disp> is another special case. It must not
5913 match the accumulator-only encoding of mov. */
5914 if (flag_code
!= CODE_64BIT
5916 && t
->base_opcode
== 0xa0
5917 && i
.types
[0].bitfield
.instance
== Accum
5918 && (i
.flags
[1] & Operand_Mem
))
5923 if (!(size_match
& MATCH_STRAIGHT
))
5925 /* Reverse direction of operands if swapping is possible in the first
5926 place (operands need to be symmetric) and
5927 - the load form is requested, and the template is a store form,
5928 - the store form is requested, and the template is a load form,
5929 - the non-default (swapped) form is requested. */
5930 overlap1
= operand_type_and (operand_types
[0], operand_types
[1]);
5931 if (t
->opcode_modifier
.d
&& i
.reg_operands
== i
.operands
5932 && !operand_type_all_zero (&overlap1
))
5933 switch (i
.dir_encoding
)
5935 case dir_encoding_load
:
5936 if (operand_type_check (operand_types
[i
.operands
- 1], anymem
)
5937 || t
->opcode_modifier
.regmem
)
5941 case dir_encoding_store
:
5942 if (!operand_type_check (operand_types
[i
.operands
- 1], anymem
)
5943 && !t
->opcode_modifier
.regmem
)
5947 case dir_encoding_swap
:
5950 case dir_encoding_default
:
5953 /* If we want store form, we skip the current load. */
5954 if ((i
.dir_encoding
== dir_encoding_store
5955 || i
.dir_encoding
== dir_encoding_swap
)
5956 && i
.mem_operands
== 0
5957 && t
->opcode_modifier
.load
)
5962 overlap1
= operand_type_and (i
.types
[1], operand_types
[1]);
5963 if (!operand_type_match (overlap0
, i
.types
[0])
5964 || !operand_type_match (overlap1
, i
.types
[1])
5965 || ((check_register
& 3) == 3
5966 && !operand_type_register_match (i
.types
[0],
5971 /* Check if other direction is valid ... */
5972 if (!t
->opcode_modifier
.d
)
5976 if (!(size_match
& MATCH_REVERSE
))
5978 /* Try reversing direction of operands. */
5979 overlap0
= operand_type_and (i
.types
[0], operand_types
[i
.operands
- 1]);
5980 overlap1
= operand_type_and (i
.types
[i
.operands
- 1], operand_types
[0]);
5981 if (!operand_type_match (overlap0
, i
.types
[0])
5982 || !operand_type_match (overlap1
, i
.types
[i
.operands
- 1])
5984 && !operand_type_register_match (i
.types
[0],
5985 operand_types
[i
.operands
- 1],
5986 i
.types
[i
.operands
- 1],
5989 /* Does not match either direction. */
5992 /* found_reverse_match holds which of D or FloatR
5994 if (!t
->opcode_modifier
.d
)
5995 found_reverse_match
= 0;
5996 else if (operand_types
[0].bitfield
.tbyte
)
5997 found_reverse_match
= Opcode_FloatD
;
5998 else if (operand_types
[0].bitfield
.xmmword
5999 || operand_types
[i
.operands
- 1].bitfield
.xmmword
6000 || operand_types
[0].bitfield
.class == RegMMX
6001 || operand_types
[i
.operands
- 1].bitfield
.class == RegMMX
6002 || is_any_vex_encoding(t
))
6003 found_reverse_match
= (t
->base_opcode
& 0xee) != 0x6e
6004 ? Opcode_SIMD_FloatD
: Opcode_SIMD_IntD
;
6006 found_reverse_match
= Opcode_D
;
6007 if (t
->opcode_modifier
.floatr
)
6008 found_reverse_match
|= Opcode_FloatR
;
6012 /* Found a forward 2 operand match here. */
6013 switch (t
->operands
)
6016 overlap4
= operand_type_and (i
.types
[4],
6020 overlap3
= operand_type_and (i
.types
[3],
6024 overlap2
= operand_type_and (i
.types
[2],
6029 switch (t
->operands
)
6032 if (!operand_type_match (overlap4
, i
.types
[4])
6033 || !operand_type_register_match (i
.types
[3],
6040 if (!operand_type_match (overlap3
, i
.types
[3])
6041 || ((check_register
& 0xa) == 0xa
6042 && !operand_type_register_match (i
.types
[1],
6046 || ((check_register
& 0xc) == 0xc
6047 && !operand_type_register_match (i
.types
[2],
6054 /* Here we make use of the fact that there are no
6055 reverse match 3 operand instructions. */
6056 if (!operand_type_match (overlap2
, i
.types
[2])
6057 || ((check_register
& 5) == 5
6058 && !operand_type_register_match (i
.types
[0],
6062 || ((check_register
& 6) == 6
6063 && !operand_type_register_match (i
.types
[1],
6071 /* Found either forward/reverse 2, 3 or 4 operand match here:
6072 slip through to break. */
6074 if (!found_cpu_match
)
6077 /* Check if vector and VEX operands are valid. */
6078 if (check_VecOperands (t
) || VEX_check_operands (t
))
6080 specific_error
= i
.error
;
6084 /* We've found a match; break out of loop. */
6088 if (t
== current_templates
->end
)
6090 /* We found no match. */
6091 const char *err_msg
;
6092 switch (specific_error
? specific_error
: i
.error
)
6096 case operand_size_mismatch
:
6097 err_msg
= _("operand size mismatch");
6099 case operand_type_mismatch
:
6100 err_msg
= _("operand type mismatch");
6102 case register_type_mismatch
:
6103 err_msg
= _("register type mismatch");
6105 case number_of_operands_mismatch
:
6106 err_msg
= _("number of operands mismatch");
6108 case invalid_instruction_suffix
:
6109 err_msg
= _("invalid instruction suffix");
6112 err_msg
= _("constant doesn't fit in 4 bits");
6114 case unsupported_with_intel_mnemonic
:
6115 err_msg
= _("unsupported with Intel mnemonic");
6117 case unsupported_syntax
:
6118 err_msg
= _("unsupported syntax");
6121 as_bad (_("unsupported instruction `%s'"),
6122 current_templates
->start
->name
);
6124 case invalid_vsib_address
:
6125 err_msg
= _("invalid VSIB address");
6127 case invalid_vector_register_set
:
6128 err_msg
= _("mask, index, and destination registers must be distinct");
6130 case unsupported_vector_index_register
:
6131 err_msg
= _("unsupported vector index register");
6133 case unsupported_broadcast
:
6134 err_msg
= _("unsupported broadcast");
6136 case broadcast_needed
:
6137 err_msg
= _("broadcast is needed for operand of such type");
6139 case unsupported_masking
:
6140 err_msg
= _("unsupported masking");
6142 case mask_not_on_destination
:
6143 err_msg
= _("mask not on destination operand");
6145 case no_default_mask
:
6146 err_msg
= _("default mask isn't allowed");
6148 case unsupported_rc_sae
:
6149 err_msg
= _("unsupported static rounding/sae");
6151 case rc_sae_operand_not_last_imm
:
6153 err_msg
= _("RC/SAE operand must precede immediate operands");
6155 err_msg
= _("RC/SAE operand must follow immediate operands");
6157 case invalid_register_operand
:
6158 err_msg
= _("invalid register operand");
6161 as_bad (_("%s for `%s'"), err_msg
,
6162 current_templates
->start
->name
);
6166 if (!quiet_warnings
)
6169 && (i
.types
[0].bitfield
.jumpabsolute
6170 != operand_types
[0].bitfield
.jumpabsolute
))
6172 as_warn (_("indirect %s without `*'"), t
->name
);
6175 if (t
->opcode_modifier
.isprefix
6176 && t
->opcode_modifier
.ignoresize
)
6178 /* Warn them that a data or address size prefix doesn't
6179 affect assembly of the next line of code. */
6180 as_warn (_("stand-alone `%s' prefix"), t
->name
);
6184 /* Copy the template we found. */
6187 if (addr_prefix_disp
!= -1)
6188 i
.tm
.operand_types
[addr_prefix_disp
]
6189 = operand_types
[addr_prefix_disp
];
6191 if (found_reverse_match
)
6193 /* If we found a reverse match we must alter the opcode direction
6194 bit and clear/flip the regmem modifier one. found_reverse_match
6195 holds bits to change (different for int & float insns). */
6197 i
.tm
.base_opcode
^= found_reverse_match
;
6199 i
.tm
.operand_types
[0] = operand_types
[i
.operands
- 1];
6200 i
.tm
.operand_types
[i
.operands
- 1] = operand_types
[0];
6202 /* Certain SIMD insns have their load forms specified in the opcode
6203 table, and hence we need to _set_ RegMem instead of clearing it.
6204 We need to avoid setting the bit though on insns like KMOVW. */
6205 i
.tm
.opcode_modifier
.regmem
6206 = i
.tm
.opcode_modifier
.modrm
&& i
.tm
.opcode_modifier
.d
6207 && i
.tm
.operands
> 2U - i
.tm
.opcode_modifier
.sse2avx
6208 && !i
.tm
.opcode_modifier
.regmem
;
6217 unsigned int mem_op
= i
.flags
[0] & Operand_Mem
? 0 : 1;
6219 if (i
.tm
.operand_types
[mem_op
].bitfield
.esseg
)
6221 if (i
.seg
[0] != NULL
&& i
.seg
[0] != &es
)
6223 as_bad (_("`%s' operand %d must use `%ses' segment"),
6225 intel_syntax
? i
.tm
.operands
- mem_op
: mem_op
+ 1,
6229 /* There's only ever one segment override allowed per instruction.
6230 This instruction possibly has a legal segment override on the
6231 second operand, so copy the segment to where non-string
6232 instructions store it, allowing common code. */
6233 i
.seg
[0] = i
.seg
[1];
6235 else if (i
.tm
.operand_types
[mem_op
+ 1].bitfield
.esseg
)
6237 if (i
.seg
[1] != NULL
&& i
.seg
[1] != &es
)
6239 as_bad (_("`%s' operand %d must use `%ses' segment"),
6241 intel_syntax
? i
.tm
.operands
- mem_op
- 1 : mem_op
+ 2,
6250 process_suffix (void)
6252 /* If matched instruction specifies an explicit instruction mnemonic
6254 if (i
.tm
.opcode_modifier
.size
== SIZE16
)
6255 i
.suffix
= WORD_MNEM_SUFFIX
;
6256 else if (i
.tm
.opcode_modifier
.size
== SIZE32
)
6257 i
.suffix
= LONG_MNEM_SUFFIX
;
6258 else if (i
.tm
.opcode_modifier
.size
== SIZE64
)
6259 i
.suffix
= QWORD_MNEM_SUFFIX
;
6260 else if (i
.reg_operands
)
6262 /* If there's no instruction mnemonic suffix we try to invent one
6263 based on register operands. */
6266 /* We take i.suffix from the last register operand specified,
6267 Destination register type is more significant than source
6268 register type. crc32 in SSE4.2 prefers source register
6270 if (i
.tm
.base_opcode
== 0xf20f38f0
6271 && i
.types
[0].bitfield
.class == Reg
)
6273 if (i
.types
[0].bitfield
.byte
)
6274 i
.suffix
= BYTE_MNEM_SUFFIX
;
6275 else if (i
.types
[0].bitfield
.word
)
6276 i
.suffix
= WORD_MNEM_SUFFIX
;
6277 else if (i
.types
[0].bitfield
.dword
)
6278 i
.suffix
= LONG_MNEM_SUFFIX
;
6279 else if (i
.types
[0].bitfield
.qword
)
6280 i
.suffix
= QWORD_MNEM_SUFFIX
;
6287 if (i
.tm
.base_opcode
== 0xf20f38f0)
6289 /* We have to know the operand size for crc32. */
6290 as_bad (_("ambiguous memory operand size for `%s`"),
6295 for (op
= i
.operands
; --op
>= 0;)
6296 if (i
.tm
.operand_types
[op
].bitfield
.instance
== InstanceNone
6297 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
6299 if (i
.types
[op
].bitfield
.class != Reg
)
6301 if (i
.types
[op
].bitfield
.byte
)
6302 i
.suffix
= BYTE_MNEM_SUFFIX
;
6303 else if (i
.types
[op
].bitfield
.word
)
6304 i
.suffix
= WORD_MNEM_SUFFIX
;
6305 else if (i
.types
[op
].bitfield
.dword
)
6306 i
.suffix
= LONG_MNEM_SUFFIX
;
6307 else if (i
.types
[op
].bitfield
.qword
)
6308 i
.suffix
= QWORD_MNEM_SUFFIX
;
6315 else if (i
.suffix
== BYTE_MNEM_SUFFIX
)
6318 && i
.tm
.opcode_modifier
.ignoresize
6319 && i
.tm
.opcode_modifier
.no_bsuf
)
6321 else if (!check_byte_reg ())
6324 else if (i
.suffix
== LONG_MNEM_SUFFIX
)
6327 && i
.tm
.opcode_modifier
.ignoresize
6328 && i
.tm
.opcode_modifier
.no_lsuf
6329 && !i
.tm
.opcode_modifier
.todword
6330 && !i
.tm
.opcode_modifier
.toqword
)
6332 else if (!check_long_reg ())
6335 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
6338 && i
.tm
.opcode_modifier
.ignoresize
6339 && i
.tm
.opcode_modifier
.no_qsuf
6340 && !i
.tm
.opcode_modifier
.todword
6341 && !i
.tm
.opcode_modifier
.toqword
)
6343 else if (!check_qword_reg ())
6346 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
6349 && i
.tm
.opcode_modifier
.ignoresize
6350 && i
.tm
.opcode_modifier
.no_wsuf
)
6352 else if (!check_word_reg ())
6355 else if (intel_syntax
&& i
.tm
.opcode_modifier
.ignoresize
)
6356 /* Do nothing if the instruction is going to ignore the prefix. */
6361 else if (i
.tm
.opcode_modifier
.defaultsize
6363 /* exclude fldenv/frstor/fsave/fstenv */
6364 && i
.tm
.opcode_modifier
.no_ssuf
)
6366 if (stackop_size
== LONG_MNEM_SUFFIX
6367 && i
.tm
.base_opcode
== 0xcf)
6369 /* stackop_size is set to LONG_MNEM_SUFFIX for the
6370 .code16gcc directive to support 16-bit mode with
6371 32-bit address. For IRET without a suffix, generate
6372 16-bit IRET (opcode 0xcf) to return from an interrupt
6374 i
.suffix
= WORD_MNEM_SUFFIX
;
6375 as_warn (_("generating 16-bit `iret' for .code16gcc directive"));
6378 i
.suffix
= stackop_size
;
6380 else if (intel_syntax
6382 && (i
.tm
.operand_types
[0].bitfield
.jumpabsolute
6383 || i
.tm
.opcode_modifier
.jumpbyte
6384 || i
.tm
.opcode_modifier
.jumpintersegment
6385 || (i
.tm
.base_opcode
== 0x0f01 /* [ls][gi]dt */
6386 && i
.tm
.extension_opcode
<= 3)))
6391 if (!i
.tm
.opcode_modifier
.no_qsuf
)
6393 i
.suffix
= QWORD_MNEM_SUFFIX
;
6398 if (!i
.tm
.opcode_modifier
.no_lsuf
)
6399 i
.suffix
= LONG_MNEM_SUFFIX
;
6402 if (!i
.tm
.opcode_modifier
.no_wsuf
)
6403 i
.suffix
= WORD_MNEM_SUFFIX
;
6412 if (i
.tm
.opcode_modifier
.w
)
6414 as_bad (_("no instruction mnemonic suffix given and "
6415 "no register operands; can't size instruction"));
6421 unsigned int suffixes
;
6423 suffixes
= !i
.tm
.opcode_modifier
.no_bsuf
;
6424 if (!i
.tm
.opcode_modifier
.no_wsuf
)
6426 if (!i
.tm
.opcode_modifier
.no_lsuf
)
6428 if (!i
.tm
.opcode_modifier
.no_ldsuf
)
6430 if (!i
.tm
.opcode_modifier
.no_ssuf
)
6432 if (flag_code
== CODE_64BIT
&& !i
.tm
.opcode_modifier
.no_qsuf
)
6435 /* There are more than suffix matches. */
6436 if (i
.tm
.opcode_modifier
.w
6437 || ((suffixes
& (suffixes
- 1))
6438 && !i
.tm
.opcode_modifier
.defaultsize
6439 && !i
.tm
.opcode_modifier
.ignoresize
))
6441 as_bad (_("ambiguous operand size for `%s'"), i
.tm
.name
);
6447 /* Change the opcode based on the operand size given by i.suffix. */
6450 /* Size floating point instruction. */
6451 case LONG_MNEM_SUFFIX
:
6452 if (i
.tm
.opcode_modifier
.floatmf
)
6454 i
.tm
.base_opcode
^= 4;
6458 case WORD_MNEM_SUFFIX
:
6459 case QWORD_MNEM_SUFFIX
:
6460 /* It's not a byte, select word/dword operation. */
6461 if (i
.tm
.opcode_modifier
.w
)
6463 if (i
.tm
.opcode_modifier
.shortform
)
6464 i
.tm
.base_opcode
|= 8;
6466 i
.tm
.base_opcode
|= 1;
6469 case SHORT_MNEM_SUFFIX
:
6470 /* Now select between word & dword operations via the operand
6471 size prefix, except for instructions that will ignore this
6473 if (i
.reg_operands
> 0
6474 && i
.types
[0].bitfield
.class == Reg
6475 && i
.tm
.opcode_modifier
.addrprefixopreg
6476 && (i
.tm
.opcode_modifier
.immext
6477 || i
.operands
== 1))
6479 /* The address size override prefix changes the size of the
6481 if ((flag_code
== CODE_32BIT
6482 && i
.op
[0].regs
->reg_type
.bitfield
.word
)
6483 || (flag_code
!= CODE_32BIT
6484 && i
.op
[0].regs
->reg_type
.bitfield
.dword
))
6485 if (!add_prefix (ADDR_PREFIX_OPCODE
))
6488 else if (i
.suffix
!= QWORD_MNEM_SUFFIX
6489 && !i
.tm
.opcode_modifier
.ignoresize
6490 && !i
.tm
.opcode_modifier
.floatmf
6491 && !is_any_vex_encoding (&i
.tm
)
6492 && ((i
.suffix
== LONG_MNEM_SUFFIX
) == (flag_code
== CODE_16BIT
)
6493 || (flag_code
== CODE_64BIT
6494 && i
.tm
.opcode_modifier
.jumpbyte
)))
6496 unsigned int prefix
= DATA_PREFIX_OPCODE
;
6498 if (i
.tm
.opcode_modifier
.jumpbyte
) /* jcxz, loop */
6499 prefix
= ADDR_PREFIX_OPCODE
;
6501 if (!add_prefix (prefix
))
6505 /* Set mode64 for an operand. */
6506 if (i
.suffix
== QWORD_MNEM_SUFFIX
6507 && flag_code
== CODE_64BIT
6508 && !i
.tm
.opcode_modifier
.norex64
6509 /* Special case for xchg %rax,%rax. It is NOP and doesn't
6511 && ! (i
.operands
== 2
6512 && i
.tm
.base_opcode
== 0x90
6513 && i
.tm
.extension_opcode
== None
6514 && i
.types
[0].bitfield
.instance
== Accum
6515 && i
.types
[0].bitfield
.qword
6516 && i
.types
[1].bitfield
.instance
== Accum
6517 && i
.types
[1].bitfield
.qword
))
6523 if (i
.reg_operands
!= 0
6525 && i
.tm
.opcode_modifier
.addrprefixopreg
6526 && !i
.tm
.opcode_modifier
.immext
)
6528 /* Check invalid register operand when the address size override
6529 prefix changes the size of register operands. */
6531 enum { need_word
, need_dword
, need_qword
} need
;
6533 if (flag_code
== CODE_32BIT
)
6534 need
= i
.prefix
[ADDR_PREFIX
] ? need_word
: need_dword
;
6537 if (i
.prefix
[ADDR_PREFIX
])
6540 need
= flag_code
== CODE_64BIT
? need_qword
: need_word
;
6543 for (op
= 0; op
< i
.operands
; op
++)
6544 if (i
.types
[op
].bitfield
.class == Reg
6545 && ((need
== need_word
6546 && !i
.op
[op
].regs
->reg_type
.bitfield
.word
)
6547 || (need
== need_dword
6548 && !i
.op
[op
].regs
->reg_type
.bitfield
.dword
)
6549 || (need
== need_qword
6550 && !i
.op
[op
].regs
->reg_type
.bitfield
.qword
)))
6552 as_bad (_("invalid register operand size for `%s'"),
6562 check_byte_reg (void)
6566 for (op
= i
.operands
; --op
>= 0;)
6568 /* Skip non-register operands. */
6569 if (i
.types
[op
].bitfield
.class != Reg
)
6572 /* If this is an eight bit register, it's OK. If it's the 16 or
6573 32 bit version of an eight bit register, we will just use the
6574 low portion, and that's OK too. */
6575 if (i
.types
[op
].bitfield
.byte
)
6578 /* I/O port address operands are OK too. */
6579 if (i
.tm
.operand_types
[op
].bitfield
.instance
== RegD
6580 && i
.tm
.operand_types
[op
].bitfield
.word
)
6583 /* crc32 doesn't generate this warning. */
6584 if (i
.tm
.base_opcode
== 0xf20f38f0)
6587 if ((i
.types
[op
].bitfield
.word
6588 || i
.types
[op
].bitfield
.dword
6589 || i
.types
[op
].bitfield
.qword
)
6590 && i
.op
[op
].regs
->reg_num
< 4
6591 /* Prohibit these changes in 64bit mode, since the lowering
6592 would be more complicated. */
6593 && flag_code
!= CODE_64BIT
)
6595 #if REGISTER_WARNINGS
6596 if (!quiet_warnings
)
6597 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
6599 (i
.op
[op
].regs
+ (i
.types
[op
].bitfield
.word
6600 ? REGNAM_AL
- REGNAM_AX
6601 : REGNAM_AL
- REGNAM_EAX
))->reg_name
,
6603 i
.op
[op
].regs
->reg_name
,
6608 /* Any other register is bad. */
6609 if (i
.types
[op
].bitfield
.class == Reg
6610 || i
.types
[op
].bitfield
.class == RegMMX
6611 || i
.types
[op
].bitfield
.class == RegSIMD
6612 || i
.types
[op
].bitfield
.class == SReg
6613 || i
.types
[op
].bitfield
.class == RegCR
6614 || i
.types
[op
].bitfield
.class == RegDR
6615 || i
.types
[op
].bitfield
.class == RegTR
)
6617 as_bad (_("`%s%s' not allowed with `%s%c'"),
6619 i
.op
[op
].regs
->reg_name
,
6629 check_long_reg (void)
6633 for (op
= i
.operands
; --op
>= 0;)
6634 /* Skip non-register operands. */
6635 if (i
.types
[op
].bitfield
.class != Reg
)
6637 /* Reject eight bit registers, except where the template requires
6638 them. (eg. movzb) */
6639 else if (i
.types
[op
].bitfield
.byte
6640 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
6641 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
6642 && (i
.tm
.operand_types
[op
].bitfield
.word
6643 || i
.tm
.operand_types
[op
].bitfield
.dword
))
6645 as_bad (_("`%s%s' not allowed with `%s%c'"),
6647 i
.op
[op
].regs
->reg_name
,
6652 /* Warn if the e prefix on a general reg is missing. */
6653 else if ((!quiet_warnings
|| flag_code
== CODE_64BIT
)
6654 && i
.types
[op
].bitfield
.word
6655 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
6656 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
6657 && i
.tm
.operand_types
[op
].bitfield
.dword
)
6659 /* Prohibit these changes in the 64bit mode, since the
6660 lowering is more complicated. */
6661 if (flag_code
== CODE_64BIT
)
6663 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
6664 register_prefix
, i
.op
[op
].regs
->reg_name
,
6668 #if REGISTER_WARNINGS
6669 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
6671 (i
.op
[op
].regs
+ REGNAM_EAX
- REGNAM_AX
)->reg_name
,
6672 register_prefix
, i
.op
[op
].regs
->reg_name
, i
.suffix
);
6675 /* Warn if the r prefix on a general reg is present. */
6676 else if (i
.types
[op
].bitfield
.qword
6677 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
6678 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
6679 && i
.tm
.operand_types
[op
].bitfield
.dword
)
6682 && i
.tm
.opcode_modifier
.toqword
6683 && i
.types
[0].bitfield
.class != RegSIMD
)
6685 /* Convert to QWORD. We want REX byte. */
6686 i
.suffix
= QWORD_MNEM_SUFFIX
;
6690 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
6691 register_prefix
, i
.op
[op
].regs
->reg_name
,
6700 check_qword_reg (void)
6704 for (op
= i
.operands
; --op
>= 0; )
6705 /* Skip non-register operands. */
6706 if (i
.types
[op
].bitfield
.class != Reg
)
6708 /* Reject eight bit registers, except where the template requires
6709 them. (eg. movzb) */
6710 else if (i
.types
[op
].bitfield
.byte
6711 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
6712 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
6713 && (i
.tm
.operand_types
[op
].bitfield
.word
6714 || i
.tm
.operand_types
[op
].bitfield
.dword
))
6716 as_bad (_("`%s%s' not allowed with `%s%c'"),
6718 i
.op
[op
].regs
->reg_name
,
6723 /* Warn if the r prefix on a general reg is missing. */
6724 else if ((i
.types
[op
].bitfield
.word
6725 || i
.types
[op
].bitfield
.dword
)
6726 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
6727 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
6728 && i
.tm
.operand_types
[op
].bitfield
.qword
)
6730 /* Prohibit these changes in the 64bit mode, since the
6731 lowering is more complicated. */
6733 && i
.tm
.opcode_modifier
.todword
6734 && i
.types
[0].bitfield
.class != RegSIMD
)
6736 /* Convert to DWORD. We don't want REX byte. */
6737 i
.suffix
= LONG_MNEM_SUFFIX
;
6741 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
6742 register_prefix
, i
.op
[op
].regs
->reg_name
,
6751 check_word_reg (void)
6754 for (op
= i
.operands
; --op
>= 0;)
6755 /* Skip non-register operands. */
6756 if (i
.types
[op
].bitfield
.class != Reg
)
6758 /* Reject eight bit registers, except where the template requires
6759 them. (eg. movzb) */
6760 else if (i
.types
[op
].bitfield
.byte
6761 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
6762 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
6763 && (i
.tm
.operand_types
[op
].bitfield
.word
6764 || i
.tm
.operand_types
[op
].bitfield
.dword
))
6766 as_bad (_("`%s%s' not allowed with `%s%c'"),
6768 i
.op
[op
].regs
->reg_name
,
6773 /* Warn if the e or r prefix on a general reg is present. */
6774 else if ((!quiet_warnings
|| flag_code
== CODE_64BIT
)
6775 && (i
.types
[op
].bitfield
.dword
6776 || i
.types
[op
].bitfield
.qword
)
6777 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
6778 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
6779 && i
.tm
.operand_types
[op
].bitfield
.word
)
6781 /* Prohibit these changes in the 64bit mode, since the
6782 lowering is more complicated. */
6783 if (flag_code
== CODE_64BIT
)
6785 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
6786 register_prefix
, i
.op
[op
].regs
->reg_name
,
6790 #if REGISTER_WARNINGS
6791 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
6793 (i
.op
[op
].regs
+ REGNAM_AX
- REGNAM_EAX
)->reg_name
,
6794 register_prefix
, i
.op
[op
].regs
->reg_name
, i
.suffix
);
6801 update_imm (unsigned int j
)
6803 i386_operand_type overlap
= i
.types
[j
];
6804 if ((overlap
.bitfield
.imm8
6805 || overlap
.bitfield
.imm8s
6806 || overlap
.bitfield
.imm16
6807 || overlap
.bitfield
.imm32
6808 || overlap
.bitfield
.imm32s
6809 || overlap
.bitfield
.imm64
)
6810 && !operand_type_equal (&overlap
, &imm8
)
6811 && !operand_type_equal (&overlap
, &imm8s
)
6812 && !operand_type_equal (&overlap
, &imm16
)
6813 && !operand_type_equal (&overlap
, &imm32
)
6814 && !operand_type_equal (&overlap
, &imm32s
)
6815 && !operand_type_equal (&overlap
, &imm64
))
6819 i386_operand_type temp
;
6821 operand_type_set (&temp
, 0);
6822 if (i
.suffix
== BYTE_MNEM_SUFFIX
)
6824 temp
.bitfield
.imm8
= overlap
.bitfield
.imm8
;
6825 temp
.bitfield
.imm8s
= overlap
.bitfield
.imm8s
;
6827 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
6828 temp
.bitfield
.imm16
= overlap
.bitfield
.imm16
;
6829 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
6831 temp
.bitfield
.imm64
= overlap
.bitfield
.imm64
;
6832 temp
.bitfield
.imm32s
= overlap
.bitfield
.imm32s
;
6835 temp
.bitfield
.imm32
= overlap
.bitfield
.imm32
;
6838 else if (operand_type_equal (&overlap
, &imm16_32_32s
)
6839 || operand_type_equal (&overlap
, &imm16_32
)
6840 || operand_type_equal (&overlap
, &imm16_32s
))
6842 if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
6847 if (!operand_type_equal (&overlap
, &imm8
)
6848 && !operand_type_equal (&overlap
, &imm8s
)
6849 && !operand_type_equal (&overlap
, &imm16
)
6850 && !operand_type_equal (&overlap
, &imm32
)
6851 && !operand_type_equal (&overlap
, &imm32s
)
6852 && !operand_type_equal (&overlap
, &imm64
))
6854 as_bad (_("no instruction mnemonic suffix given; "
6855 "can't determine immediate size"));
6859 i
.types
[j
] = overlap
;
6869 /* Update the first 2 immediate operands. */
6870 n
= i
.operands
> 2 ? 2 : i
.operands
;
6873 for (j
= 0; j
< n
; j
++)
6874 if (update_imm (j
) == 0)
6877 /* The 3rd operand can't be immediate operand. */
6878 gas_assert (operand_type_check (i
.types
[2], imm
) == 0);
6885 process_operands (void)
6887 /* Default segment register this instruction will use for memory
6888 accesses. 0 means unknown. This is only for optimizing out
6889 unnecessary segment overrides. */
6890 const seg_entry
*default_seg
= 0;
6892 if (i
.tm
.opcode_modifier
.sse2avx
&& i
.tm
.opcode_modifier
.vexvvvv
)
6894 unsigned int dupl
= i
.operands
;
6895 unsigned int dest
= dupl
- 1;
6898 /* The destination must be an xmm register. */
6899 gas_assert (i
.reg_operands
6900 && MAX_OPERANDS
> dupl
6901 && operand_type_equal (&i
.types
[dest
], ®xmm
));
6903 if (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
6904 && i
.tm
.operand_types
[0].bitfield
.xmmword
)
6906 if (i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
)
6908 /* Keep xmm0 for instructions with VEX prefix and 3
6910 i
.tm
.operand_types
[0].bitfield
.instance
= InstanceNone
;
6911 i
.tm
.operand_types
[0].bitfield
.class = RegSIMD
;
6916 /* We remove the first xmm0 and keep the number of
6917 operands unchanged, which in fact duplicates the
6919 for (j
= 1; j
< i
.operands
; j
++)
6921 i
.op
[j
- 1] = i
.op
[j
];
6922 i
.types
[j
- 1] = i
.types
[j
];
6923 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
6924 i
.flags
[j
- 1] = i
.flags
[j
];
6928 else if (i
.tm
.opcode_modifier
.implicit1stxmm0
)
6930 gas_assert ((MAX_OPERANDS
- 1) > dupl
6931 && (i
.tm
.opcode_modifier
.vexsources
6934 /* Add the implicit xmm0 for instructions with VEX prefix
6936 for (j
= i
.operands
; j
> 0; j
--)
6938 i
.op
[j
] = i
.op
[j
- 1];
6939 i
.types
[j
] = i
.types
[j
- 1];
6940 i
.tm
.operand_types
[j
] = i
.tm
.operand_types
[j
- 1];
6941 i
.flags
[j
] = i
.flags
[j
- 1];
6944 = (const reg_entry
*) hash_find (reg_hash
, "xmm0");
6945 i
.types
[0] = regxmm
;
6946 i
.tm
.operand_types
[0] = regxmm
;
6949 i
.reg_operands
+= 2;
6954 i
.op
[dupl
] = i
.op
[dest
];
6955 i
.types
[dupl
] = i
.types
[dest
];
6956 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
6957 i
.flags
[dupl
] = i
.flags
[dest
];
6966 i
.op
[dupl
] = i
.op
[dest
];
6967 i
.types
[dupl
] = i
.types
[dest
];
6968 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
6969 i
.flags
[dupl
] = i
.flags
[dest
];
6972 if (i
.tm
.opcode_modifier
.immext
)
6975 else if (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
6976 && i
.tm
.operand_types
[0].bitfield
.xmmword
)
6980 for (j
= 1; j
< i
.operands
; j
++)
6982 i
.op
[j
- 1] = i
.op
[j
];
6983 i
.types
[j
- 1] = i
.types
[j
];
6985 /* We need to adjust fields in i.tm since they are used by
6986 build_modrm_byte. */
6987 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
6989 i
.flags
[j
- 1] = i
.flags
[j
];
6996 else if (i
.tm
.opcode_modifier
.implicitquadgroup
)
6998 unsigned int regnum
, first_reg_in_group
, last_reg_in_group
;
7000 /* The second operand must be {x,y,z}mmN, where N is a multiple of 4. */
7001 gas_assert (i
.operands
>= 2 && i
.types
[1].bitfield
.class == RegSIMD
);
7002 regnum
= register_number (i
.op
[1].regs
);
7003 first_reg_in_group
= regnum
& ~3;
7004 last_reg_in_group
= first_reg_in_group
+ 3;
7005 if (regnum
!= first_reg_in_group
)
7006 as_warn (_("source register `%s%s' implicitly denotes"
7007 " `%s%.3s%u' to `%s%.3s%u' source group in `%s'"),
7008 register_prefix
, i
.op
[1].regs
->reg_name
,
7009 register_prefix
, i
.op
[1].regs
->reg_name
, first_reg_in_group
,
7010 register_prefix
, i
.op
[1].regs
->reg_name
, last_reg_in_group
,
7013 else if (i
.tm
.opcode_modifier
.regkludge
)
7015 /* The imul $imm, %reg instruction is converted into
7016 imul $imm, %reg, %reg, and the clr %reg instruction
7017 is converted into xor %reg, %reg. */
7019 unsigned int first_reg_op
;
7021 if (operand_type_check (i
.types
[0], reg
))
7025 /* Pretend we saw the extra register operand. */
7026 gas_assert (i
.reg_operands
== 1
7027 && i
.op
[first_reg_op
+ 1].regs
== 0);
7028 i
.op
[first_reg_op
+ 1].regs
= i
.op
[first_reg_op
].regs
;
7029 i
.types
[first_reg_op
+ 1] = i
.types
[first_reg_op
];
7034 if (i
.tm
.opcode_modifier
.modrm
)
7036 /* The opcode is completed (modulo i.tm.extension_opcode which
7037 must be put into the modrm byte). Now, we make the modrm and
7038 index base bytes based on all the info we've collected. */
7040 default_seg
= build_modrm_byte ();
7042 else if (i
.types
[0].bitfield
.class == SReg
)
7044 if (flag_code
!= CODE_64BIT
7045 ? i
.tm
.base_opcode
== POP_SEG_SHORT
7046 && i
.op
[0].regs
->reg_num
== 1
7047 : (i
.tm
.base_opcode
| 1) == POP_SEG386_SHORT
7048 && i
.op
[0].regs
->reg_num
< 4)
7050 as_bad (_("you can't `%s %s%s'"),
7051 i
.tm
.name
, register_prefix
, i
.op
[0].regs
->reg_name
);
7054 if ( i
.op
[0].regs
->reg_num
> 3 && i
.tm
.opcode_length
== 1 )
7056 i
.tm
.base_opcode
^= POP_SEG_SHORT
^ POP_SEG386_SHORT
;
7057 i
.tm
.opcode_length
= 2;
7059 i
.tm
.base_opcode
|= (i
.op
[0].regs
->reg_num
<< 3);
7061 else if ((i
.tm
.base_opcode
& ~0x3) == MOV_AX_DISP32
)
7065 else if (i
.tm
.opcode_modifier
.isstring
)
7067 /* For the string instructions that allow a segment override
7068 on one of their operands, the default segment is ds. */
7071 else if (i
.tm
.opcode_modifier
.shortform
)
7073 /* The register or float register operand is in operand
7075 unsigned int op
= i
.tm
.operand_types
[0].bitfield
.class != Reg
;
7077 /* Register goes in low 3 bits of opcode. */
7078 i
.tm
.base_opcode
|= i
.op
[op
].regs
->reg_num
;
7079 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
7081 if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
7083 /* Warn about some common errors, but press on regardless.
7084 The first case can be generated by gcc (<= 2.8.1). */
7085 if (i
.operands
== 2)
7087 /* Reversed arguments on faddp, fsubp, etc. */
7088 as_warn (_("translating to `%s %s%s,%s%s'"), i
.tm
.name
,
7089 register_prefix
, i
.op
[!intel_syntax
].regs
->reg_name
,
7090 register_prefix
, i
.op
[intel_syntax
].regs
->reg_name
);
7094 /* Extraneous `l' suffix on fp insn. */
7095 as_warn (_("translating to `%s %s%s'"), i
.tm
.name
,
7096 register_prefix
, i
.op
[0].regs
->reg_name
);
7101 if (i
.tm
.base_opcode
== 0x8d /* lea */
7104 as_warn (_("segment override on `%s' is ineffectual"), i
.tm
.name
);
7106 /* If a segment was explicitly specified, and the specified segment
7107 is not the default, use an opcode prefix to select it. If we
7108 never figured out what the default segment is, then default_seg
7109 will be zero at this point, and the specified segment prefix will
7111 if ((i
.seg
[0]) && (i
.seg
[0] != default_seg
))
7113 if (!add_prefix (i
.seg
[0]->seg_prefix
))
7119 static const seg_entry
*
7120 build_modrm_byte (void)
7122 const seg_entry
*default_seg
= 0;
7123 unsigned int source
, dest
;
7126 vex_3_sources
= i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
;
7129 unsigned int nds
, reg_slot
;
7132 dest
= i
.operands
- 1;
7135 /* There are 2 kinds of instructions:
7136 1. 5 operands: 4 register operands or 3 register operands
7137 plus 1 memory operand plus one Imm4 operand, VexXDS, and
7138 VexW0 or VexW1. The destination must be either XMM, YMM or
7140 2. 4 operands: 4 register operands or 3 register operands
7141 plus 1 memory operand, with VexXDS. */
7142 gas_assert ((i
.reg_operands
== 4
7143 || (i
.reg_operands
== 3 && i
.mem_operands
== 1))
7144 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
7145 && i
.tm
.opcode_modifier
.vexw
7146 && i
.tm
.operand_types
[dest
].bitfield
.class == RegSIMD
);
7148 /* If VexW1 is set, the first non-immediate operand is the source and
7149 the second non-immediate one is encoded in the immediate operand. */
7150 if (i
.tm
.opcode_modifier
.vexw
== VEXW1
)
7152 source
= i
.imm_operands
;
7153 reg_slot
= i
.imm_operands
+ 1;
7157 source
= i
.imm_operands
+ 1;
7158 reg_slot
= i
.imm_operands
;
7161 if (i
.imm_operands
== 0)
7163 /* When there is no immediate operand, generate an 8bit
7164 immediate operand to encode the first operand. */
7165 exp
= &im_expressions
[i
.imm_operands
++];
7166 i
.op
[i
.operands
].imms
= exp
;
7167 i
.types
[i
.operands
] = imm8
;
7170 gas_assert (i
.tm
.operand_types
[reg_slot
].bitfield
.class == RegSIMD
);
7171 exp
->X_op
= O_constant
;
7172 exp
->X_add_number
= register_number (i
.op
[reg_slot
].regs
) << 4;
7173 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
7177 gas_assert (i
.imm_operands
== 1);
7178 gas_assert (fits_in_imm4 (i
.op
[0].imms
->X_add_number
));
7179 gas_assert (!i
.tm
.opcode_modifier
.immext
);
7181 /* Turn on Imm8 again so that output_imm will generate it. */
7182 i
.types
[0].bitfield
.imm8
= 1;
7184 gas_assert (i
.tm
.operand_types
[reg_slot
].bitfield
.class == RegSIMD
);
7185 i
.op
[0].imms
->X_add_number
7186 |= register_number (i
.op
[reg_slot
].regs
) << 4;
7187 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
7190 gas_assert (i
.tm
.operand_types
[nds
].bitfield
.class == RegSIMD
);
7191 i
.vex
.register_specifier
= i
.op
[nds
].regs
;
7196 /* i.reg_operands MUST be the number of real register operands;
7197 implicit registers do not count. If there are 3 register
7198 operands, it must be a instruction with VexNDS. For a
7199 instruction with VexNDD, the destination register is encoded
7200 in VEX prefix. If there are 4 register operands, it must be
7201 a instruction with VEX prefix and 3 sources. */
7202 if (i
.mem_operands
== 0
7203 && ((i
.reg_operands
== 2
7204 && i
.tm
.opcode_modifier
.vexvvvv
<= VEXXDS
)
7205 || (i
.reg_operands
== 3
7206 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
7207 || (i
.reg_operands
== 4 && vex_3_sources
)))
7215 /* When there are 3 operands, one of them may be immediate,
7216 which may be the first or the last operand. Otherwise,
7217 the first operand must be shift count register (cl) or it
7218 is an instruction with VexNDS. */
7219 gas_assert (i
.imm_operands
== 1
7220 || (i
.imm_operands
== 0
7221 && (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
7222 || (i
.types
[0].bitfield
.instance
== RegC
7223 && i
.types
[0].bitfield
.byte
))));
7224 if (operand_type_check (i
.types
[0], imm
)
7225 || (i
.types
[0].bitfield
.instance
== RegC
7226 && i
.types
[0].bitfield
.byte
))
7232 /* When there are 4 operands, the first two must be 8bit
7233 immediate operands. The source operand will be the 3rd
7236 For instructions with VexNDS, if the first operand
7237 an imm8, the source operand is the 2nd one. If the last
7238 operand is imm8, the source operand is the first one. */
7239 gas_assert ((i
.imm_operands
== 2
7240 && i
.types
[0].bitfield
.imm8
7241 && i
.types
[1].bitfield
.imm8
)
7242 || (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
7243 && i
.imm_operands
== 1
7244 && (i
.types
[0].bitfield
.imm8
7245 || i
.types
[i
.operands
- 1].bitfield
.imm8
7247 if (i
.imm_operands
== 2)
7251 if (i
.types
[0].bitfield
.imm8
)
7258 if (is_evex_encoding (&i
.tm
))
7260 /* For EVEX instructions, when there are 5 operands, the
7261 first one must be immediate operand. If the second one
7262 is immediate operand, the source operand is the 3th
7263 one. If the last one is immediate operand, the source
7264 operand is the 2nd one. */
7265 gas_assert (i
.imm_operands
== 2
7266 && i
.tm
.opcode_modifier
.sae
7267 && operand_type_check (i
.types
[0], imm
));
7268 if (operand_type_check (i
.types
[1], imm
))
7270 else if (operand_type_check (i
.types
[4], imm
))
7284 /* RC/SAE operand could be between DEST and SRC. That happens
7285 when one operand is GPR and the other one is XMM/YMM/ZMM
7287 if (i
.rounding
&& i
.rounding
->operand
== (int) dest
)
7290 if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
7292 /* For instructions with VexNDS, the register-only source
7293 operand must be a 32/64bit integer, XMM, YMM, ZMM, or mask
7294 register. It is encoded in VEX prefix. */
7296 i386_operand_type op
;
7299 /* Check register-only source operand when two source
7300 operands are swapped. */
7301 if (!i
.tm
.operand_types
[source
].bitfield
.baseindex
7302 && i
.tm
.operand_types
[dest
].bitfield
.baseindex
)
7310 op
= i
.tm
.operand_types
[vvvv
];
7311 if ((dest
+ 1) >= i
.operands
7312 || ((op
.bitfield
.class != Reg
7313 || (!op
.bitfield
.dword
&& !op
.bitfield
.qword
))
7314 && op
.bitfield
.class != RegSIMD
7315 && !operand_type_equal (&op
, ®mask
)))
7317 i
.vex
.register_specifier
= i
.op
[vvvv
].regs
;
7323 /* One of the register operands will be encoded in the i.rm.reg
7324 field, the other in the combined i.rm.mode and i.rm.regmem
7325 fields. If no form of this instruction supports a memory
7326 destination operand, then we assume the source operand may
7327 sometimes be a memory operand and so we need to store the
7328 destination in the i.rm.reg field. */
7329 if (!i
.tm
.opcode_modifier
.regmem
7330 && operand_type_check (i
.tm
.operand_types
[dest
], anymem
) == 0)
7332 i
.rm
.reg
= i
.op
[dest
].regs
->reg_num
;
7333 i
.rm
.regmem
= i
.op
[source
].regs
->reg_num
;
7334 if (i
.op
[dest
].regs
->reg_type
.bitfield
.class == RegMMX
7335 || i
.op
[source
].regs
->reg_type
.bitfield
.class == RegMMX
)
7336 i
.has_regmmx
= TRUE
;
7337 else if (i
.op
[dest
].regs
->reg_type
.bitfield
.class == RegSIMD
7338 || i
.op
[source
].regs
->reg_type
.bitfield
.class == RegSIMD
)
7340 if (i
.types
[dest
].bitfield
.zmmword
7341 || i
.types
[source
].bitfield
.zmmword
)
7342 i
.has_regzmm
= TRUE
;
7343 else if (i
.types
[dest
].bitfield
.ymmword
7344 || i
.types
[source
].bitfield
.ymmword
)
7345 i
.has_regymm
= TRUE
;
7347 i
.has_regxmm
= TRUE
;
7349 if ((i
.op
[dest
].regs
->reg_flags
& RegRex
) != 0)
7351 if ((i
.op
[dest
].regs
->reg_flags
& RegVRex
) != 0)
7353 if ((i
.op
[source
].regs
->reg_flags
& RegRex
) != 0)
7355 if ((i
.op
[source
].regs
->reg_flags
& RegVRex
) != 0)
7360 i
.rm
.reg
= i
.op
[source
].regs
->reg_num
;
7361 i
.rm
.regmem
= i
.op
[dest
].regs
->reg_num
;
7362 if ((i
.op
[dest
].regs
->reg_flags
& RegRex
) != 0)
7364 if ((i
.op
[dest
].regs
->reg_flags
& RegVRex
) != 0)
7366 if ((i
.op
[source
].regs
->reg_flags
& RegRex
) != 0)
7368 if ((i
.op
[source
].regs
->reg_flags
& RegVRex
) != 0)
7371 if (flag_code
!= CODE_64BIT
&& (i
.rex
& REX_R
))
7373 if (i
.types
[!i
.tm
.opcode_modifier
.regmem
].bitfield
.class != RegCR
)
7376 add_prefix (LOCK_PREFIX_OPCODE
);
7380 { /* If it's not 2 reg operands... */
7385 unsigned int fake_zero_displacement
= 0;
7388 for (op
= 0; op
< i
.operands
; op
++)
7389 if (i
.flags
[op
] & Operand_Mem
)
7391 gas_assert (op
< i
.operands
);
7393 if (i
.tm
.opcode_modifier
.vecsib
)
7395 if (i
.index_reg
->reg_num
== RegIZ
)
7398 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
7401 i
.sib
.base
= NO_BASE_REGISTER
;
7402 i
.sib
.scale
= i
.log2_scale_factor
;
7403 i
.types
[op
].bitfield
.disp8
= 0;
7404 i
.types
[op
].bitfield
.disp16
= 0;
7405 i
.types
[op
].bitfield
.disp64
= 0;
7406 if (flag_code
!= CODE_64BIT
|| i
.prefix
[ADDR_PREFIX
])
7408 /* Must be 32 bit */
7409 i
.types
[op
].bitfield
.disp32
= 1;
7410 i
.types
[op
].bitfield
.disp32s
= 0;
7414 i
.types
[op
].bitfield
.disp32
= 0;
7415 i
.types
[op
].bitfield
.disp32s
= 1;
7418 i
.sib
.index
= i
.index_reg
->reg_num
;
7419 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
7421 if ((i
.index_reg
->reg_flags
& RegVRex
) != 0)
7427 if (i
.base_reg
== 0)
7430 if (!i
.disp_operands
)
7431 fake_zero_displacement
= 1;
7432 if (i
.index_reg
== 0)
7434 i386_operand_type newdisp
;
7436 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
7437 /* Operand is just <disp> */
7438 if (flag_code
== CODE_64BIT
)
7440 /* 64bit mode overwrites the 32bit absolute
7441 addressing by RIP relative addressing and
7442 absolute addressing is encoded by one of the
7443 redundant SIB forms. */
7444 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
7445 i
.sib
.base
= NO_BASE_REGISTER
;
7446 i
.sib
.index
= NO_INDEX_REGISTER
;
7447 newdisp
= (!i
.prefix
[ADDR_PREFIX
] ? disp32s
: disp32
);
7449 else if ((flag_code
== CODE_16BIT
)
7450 ^ (i
.prefix
[ADDR_PREFIX
] != 0))
7452 i
.rm
.regmem
= NO_BASE_REGISTER_16
;
7457 i
.rm
.regmem
= NO_BASE_REGISTER
;
7460 i
.types
[op
] = operand_type_and_not (i
.types
[op
], anydisp
);
7461 i
.types
[op
] = operand_type_or (i
.types
[op
], newdisp
);
7463 else if (!i
.tm
.opcode_modifier
.vecsib
)
7465 /* !i.base_reg && i.index_reg */
7466 if (i
.index_reg
->reg_num
== RegIZ
)
7467 i
.sib
.index
= NO_INDEX_REGISTER
;
7469 i
.sib
.index
= i
.index_reg
->reg_num
;
7470 i
.sib
.base
= NO_BASE_REGISTER
;
7471 i
.sib
.scale
= i
.log2_scale_factor
;
7472 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
7473 i
.types
[op
].bitfield
.disp8
= 0;
7474 i
.types
[op
].bitfield
.disp16
= 0;
7475 i
.types
[op
].bitfield
.disp64
= 0;
7476 if (flag_code
!= CODE_64BIT
|| i
.prefix
[ADDR_PREFIX
])
7478 /* Must be 32 bit */
7479 i
.types
[op
].bitfield
.disp32
= 1;
7480 i
.types
[op
].bitfield
.disp32s
= 0;
7484 i
.types
[op
].bitfield
.disp32
= 0;
7485 i
.types
[op
].bitfield
.disp32s
= 1;
7487 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
7491 /* RIP addressing for 64bit mode. */
7492 else if (i
.base_reg
->reg_num
== RegIP
)
7494 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
7495 i
.rm
.regmem
= NO_BASE_REGISTER
;
7496 i
.types
[op
].bitfield
.disp8
= 0;
7497 i
.types
[op
].bitfield
.disp16
= 0;
7498 i
.types
[op
].bitfield
.disp32
= 0;
7499 i
.types
[op
].bitfield
.disp32s
= 1;
7500 i
.types
[op
].bitfield
.disp64
= 0;
7501 i
.flags
[op
] |= Operand_PCrel
;
7502 if (! i
.disp_operands
)
7503 fake_zero_displacement
= 1;
7505 else if (i
.base_reg
->reg_type
.bitfield
.word
)
7507 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
7508 switch (i
.base_reg
->reg_num
)
7511 if (i
.index_reg
== 0)
7513 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
7514 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6;
7518 if (i
.index_reg
== 0)
7521 if (operand_type_check (i
.types
[op
], disp
) == 0)
7523 /* fake (%bp) into 0(%bp) */
7524 i
.types
[op
].bitfield
.disp8
= 1;
7525 fake_zero_displacement
= 1;
7528 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
7529 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6 + 2;
7531 default: /* (%si) -> 4 or (%di) -> 5 */
7532 i
.rm
.regmem
= i
.base_reg
->reg_num
- 6 + 4;
7534 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
7536 else /* i.base_reg and 32/64 bit mode */
7538 if (flag_code
== CODE_64BIT
7539 && operand_type_check (i
.types
[op
], disp
))
7541 i
.types
[op
].bitfield
.disp16
= 0;
7542 i
.types
[op
].bitfield
.disp64
= 0;
7543 if (i
.prefix
[ADDR_PREFIX
] == 0)
7545 i
.types
[op
].bitfield
.disp32
= 0;
7546 i
.types
[op
].bitfield
.disp32s
= 1;
7550 i
.types
[op
].bitfield
.disp32
= 1;
7551 i
.types
[op
].bitfield
.disp32s
= 0;
7555 if (!i
.tm
.opcode_modifier
.vecsib
)
7556 i
.rm
.regmem
= i
.base_reg
->reg_num
;
7557 if ((i
.base_reg
->reg_flags
& RegRex
) != 0)
7559 i
.sib
.base
= i
.base_reg
->reg_num
;
7560 /* x86-64 ignores REX prefix bit here to avoid decoder
7562 if (!(i
.base_reg
->reg_flags
& RegRex
)
7563 && (i
.base_reg
->reg_num
== EBP_REG_NUM
7564 || i
.base_reg
->reg_num
== ESP_REG_NUM
))
7566 if (i
.base_reg
->reg_num
== 5 && i
.disp_operands
== 0)
7568 fake_zero_displacement
= 1;
7569 i
.types
[op
].bitfield
.disp8
= 1;
7571 i
.sib
.scale
= i
.log2_scale_factor
;
7572 if (i
.index_reg
== 0)
7574 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
7575 /* <disp>(%esp) becomes two byte modrm with no index
7576 register. We've already stored the code for esp
7577 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
7578 Any base register besides %esp will not use the
7579 extra modrm byte. */
7580 i
.sib
.index
= NO_INDEX_REGISTER
;
7582 else if (!i
.tm
.opcode_modifier
.vecsib
)
7584 if (i
.index_reg
->reg_num
== RegIZ
)
7585 i
.sib
.index
= NO_INDEX_REGISTER
;
7587 i
.sib
.index
= i
.index_reg
->reg_num
;
7588 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
7589 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
7594 && (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
7595 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
))
7599 if (!fake_zero_displacement
7603 fake_zero_displacement
= 1;
7604 if (i
.disp_encoding
== disp_encoding_8bit
)
7605 i
.types
[op
].bitfield
.disp8
= 1;
7607 i
.types
[op
].bitfield
.disp32
= 1;
7609 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
7613 if (fake_zero_displacement
)
7615 /* Fakes a zero displacement assuming that i.types[op]
7616 holds the correct displacement size. */
7619 gas_assert (i
.op
[op
].disps
== 0);
7620 exp
= &disp_expressions
[i
.disp_operands
++];
7621 i
.op
[op
].disps
= exp
;
7622 exp
->X_op
= O_constant
;
7623 exp
->X_add_number
= 0;
7624 exp
->X_add_symbol
= (symbolS
*) 0;
7625 exp
->X_op_symbol
= (symbolS
*) 0;
7633 if (i
.tm
.opcode_modifier
.vexsources
== XOP2SOURCES
)
7635 if (operand_type_check (i
.types
[0], imm
))
7636 i
.vex
.register_specifier
= NULL
;
7639 /* VEX.vvvv encodes one of the sources when the first
7640 operand is not an immediate. */
7641 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
7642 i
.vex
.register_specifier
= i
.op
[0].regs
;
7644 i
.vex
.register_specifier
= i
.op
[1].regs
;
7647 /* Destination is a XMM register encoded in the ModRM.reg
7649 i
.rm
.reg
= i
.op
[2].regs
->reg_num
;
7650 if ((i
.op
[2].regs
->reg_flags
& RegRex
) != 0)
7653 /* ModRM.rm and VEX.B encodes the other source. */
7654 if (!i
.mem_operands
)
7658 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
7659 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
7661 i
.rm
.regmem
= i
.op
[0].regs
->reg_num
;
7663 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
7667 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXLWP
)
7669 i
.vex
.register_specifier
= i
.op
[2].regs
;
7670 if (!i
.mem_operands
)
7673 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
7674 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
7678 /* Fill in i.rm.reg or i.rm.regmem field with register operand
7679 (if any) based on i.tm.extension_opcode. Again, we must be
7680 careful to make sure that segment/control/debug/test/MMX
7681 registers are coded into the i.rm.reg field. */
7682 else if (i
.reg_operands
)
7685 unsigned int vex_reg
= ~0;
7687 for (op
= 0; op
< i
.operands
; op
++)
7689 if (i
.types
[op
].bitfield
.class == Reg
7690 || i
.types
[op
].bitfield
.class == RegBND
7691 || i
.types
[op
].bitfield
.class == RegMask
7692 || i
.types
[op
].bitfield
.class == SReg
7693 || i
.types
[op
].bitfield
.class == RegCR
7694 || i
.types
[op
].bitfield
.class == RegDR
7695 || i
.types
[op
].bitfield
.class == RegTR
)
7697 if (i
.types
[op
].bitfield
.class == RegSIMD
)
7699 if (i
.types
[op
].bitfield
.zmmword
)
7700 i
.has_regzmm
= TRUE
;
7701 else if (i
.types
[op
].bitfield
.ymmword
)
7702 i
.has_regymm
= TRUE
;
7704 i
.has_regxmm
= TRUE
;
7707 if (i
.types
[op
].bitfield
.class == RegMMX
)
7709 i
.has_regmmx
= TRUE
;
7716 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
7718 /* For instructions with VexNDS, the register-only
7719 source operand is encoded in VEX prefix. */
7720 gas_assert (mem
!= (unsigned int) ~0);
7725 gas_assert (op
< i
.operands
);
7729 /* Check register-only source operand when two source
7730 operands are swapped. */
7731 if (!i
.tm
.operand_types
[op
].bitfield
.baseindex
7732 && i
.tm
.operand_types
[op
+ 1].bitfield
.baseindex
)
7736 gas_assert (mem
== (vex_reg
+ 1)
7737 && op
< i
.operands
);
7742 gas_assert (vex_reg
< i
.operands
);
7746 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXNDD
)
7748 /* For instructions with VexNDD, the register destination
7749 is encoded in VEX prefix. */
7750 if (i
.mem_operands
== 0)
7752 /* There is no memory operand. */
7753 gas_assert ((op
+ 2) == i
.operands
);
7758 /* There are only 2 non-immediate operands. */
7759 gas_assert (op
< i
.imm_operands
+ 2
7760 && i
.operands
== i
.imm_operands
+ 2);
7761 vex_reg
= i
.imm_operands
+ 1;
7765 gas_assert (op
< i
.operands
);
7767 if (vex_reg
!= (unsigned int) ~0)
7769 i386_operand_type
*type
= &i
.tm
.operand_types
[vex_reg
];
7771 if ((type
->bitfield
.class != Reg
7772 || (!type
->bitfield
.dword
&& !type
->bitfield
.qword
))
7773 && type
->bitfield
.class != RegSIMD
7774 && !operand_type_equal (type
, ®mask
))
7777 i
.vex
.register_specifier
= i
.op
[vex_reg
].regs
;
7780 /* Don't set OP operand twice. */
7783 /* If there is an extension opcode to put here, the
7784 register number must be put into the regmem field. */
7785 if (i
.tm
.extension_opcode
!= None
)
7787 i
.rm
.regmem
= i
.op
[op
].regs
->reg_num
;
7788 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
7790 if ((i
.op
[op
].regs
->reg_flags
& RegVRex
) != 0)
7795 i
.rm
.reg
= i
.op
[op
].regs
->reg_num
;
7796 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
7798 if ((i
.op
[op
].regs
->reg_flags
& RegVRex
) != 0)
7803 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
7804 must set it to 3 to indicate this is a register operand
7805 in the regmem field. */
7806 if (!i
.mem_operands
)
7810 /* Fill in i.rm.reg field with extension opcode (if any). */
7811 if (i
.tm
.extension_opcode
!= None
)
7812 i
.rm
.reg
= i
.tm
.extension_opcode
;
7818 output_branch (void)
7824 relax_substateT subtype
;
7828 code16
= flag_code
== CODE_16BIT
? CODE16
: 0;
7829 size
= i
.disp_encoding
== disp_encoding_32bit
? BIG
: SMALL
;
7832 if (i
.prefix
[DATA_PREFIX
] != 0)
7838 /* Pentium4 branch hints. */
7839 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
7840 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
7845 if (i
.prefix
[REX_PREFIX
] != 0)
7851 /* BND prefixed jump. */
7852 if (i
.prefix
[BND_PREFIX
] != 0)
7854 FRAG_APPEND_1_CHAR (i
.prefix
[BND_PREFIX
]);
7858 if (i
.prefixes
!= 0 && !intel_syntax
)
7859 as_warn (_("skipping prefixes on this instruction"));
7861 /* It's always a symbol; End frag & setup for relax.
7862 Make sure there is enough room in this frag for the largest
7863 instruction we may generate in md_convert_frag. This is 2
7864 bytes for the opcode and room for the prefix and largest
7866 frag_grow (prefix
+ 2 + 4);
7867 /* Prefix and 1 opcode byte go in fr_fix. */
7868 p
= frag_more (prefix
+ 1);
7869 if (i
.prefix
[DATA_PREFIX
] != 0)
7870 *p
++ = DATA_PREFIX_OPCODE
;
7871 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
7872 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
)
7873 *p
++ = i
.prefix
[SEG_PREFIX
];
7874 if (i
.prefix
[REX_PREFIX
] != 0)
7875 *p
++ = i
.prefix
[REX_PREFIX
];
7876 *p
= i
.tm
.base_opcode
;
7878 if ((unsigned char) *p
== JUMP_PC_RELATIVE
)
7879 subtype
= ENCODE_RELAX_STATE (UNCOND_JUMP
, size
);
7880 else if (cpu_arch_flags
.bitfield
.cpui386
)
7881 subtype
= ENCODE_RELAX_STATE (COND_JUMP
, size
);
7883 subtype
= ENCODE_RELAX_STATE (COND_JUMP86
, size
);
7886 sym
= i
.op
[0].disps
->X_add_symbol
;
7887 off
= i
.op
[0].disps
->X_add_number
;
7889 if (i
.op
[0].disps
->X_op
!= O_constant
7890 && i
.op
[0].disps
->X_op
!= O_symbol
)
7892 /* Handle complex expressions. */
7893 sym
= make_expr_symbol (i
.op
[0].disps
);
7897 /* 1 possible extra opcode + 4 byte displacement go in var part.
7898 Pass reloc in fr_var. */
7899 frag_var (rs_machine_dependent
, 5, i
.reloc
[0], subtype
, sym
, off
, p
);
7902 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7903 /* Return TRUE iff PLT32 relocation should be used for branching to
7907 need_plt32_p (symbolS
*s
)
7909 /* PLT32 relocation is ELF only. */
7914 /* Don't emit PLT32 relocation on Solaris: neither native linker nor
7915 krtld support it. */
7919 /* Since there is no need to prepare for PLT branch on x86-64, we
7920 can generate R_X86_64_PLT32, instead of R_X86_64_PC32, which can
7921 be used as a marker for 32-bit PC-relative branches. */
7925 /* Weak or undefined symbol need PLT32 relocation. */
7926 if (S_IS_WEAK (s
) || !S_IS_DEFINED (s
))
7929 /* Non-global symbol doesn't need PLT32 relocation. */
7930 if (! S_IS_EXTERNAL (s
))
7933 /* Other global symbols need PLT32 relocation. NB: Symbol with
7934 non-default visibilities are treated as normal global symbol
7935 so that PLT32 relocation can be used as a marker for 32-bit
7936 PC-relative branches. It is useful for linker relaxation. */
7947 bfd_reloc_code_real_type jump_reloc
= i
.reloc
[0];
7949 if (i
.tm
.opcode_modifier
.jumpbyte
)
7951 /* This is a loop or jecxz type instruction. */
7953 if (i
.prefix
[ADDR_PREFIX
] != 0)
7955 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE
);
7958 /* Pentium4 branch hints. */
7959 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
7960 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
7962 FRAG_APPEND_1_CHAR (i
.prefix
[SEG_PREFIX
]);
7971 if (flag_code
== CODE_16BIT
)
7974 if (i
.prefix
[DATA_PREFIX
] != 0)
7976 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE
);
7986 if (i
.prefix
[REX_PREFIX
] != 0)
7988 FRAG_APPEND_1_CHAR (i
.prefix
[REX_PREFIX
]);
7992 /* BND prefixed jump. */
7993 if (i
.prefix
[BND_PREFIX
] != 0)
7995 FRAG_APPEND_1_CHAR (i
.prefix
[BND_PREFIX
]);
7999 if (i
.prefixes
!= 0 && !intel_syntax
)
8000 as_warn (_("skipping prefixes on this instruction"));
8002 p
= frag_more (i
.tm
.opcode_length
+ size
);
8003 switch (i
.tm
.opcode_length
)
8006 *p
++ = i
.tm
.base_opcode
>> 8;
8009 *p
++ = i
.tm
.base_opcode
;
8015 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8017 && jump_reloc
== NO_RELOC
8018 && need_plt32_p (i
.op
[0].disps
->X_add_symbol
))
8019 jump_reloc
= BFD_RELOC_X86_64_PLT32
;
8022 jump_reloc
= reloc (size
, 1, 1, jump_reloc
);
8024 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
8025 i
.op
[0].disps
, 1, jump_reloc
);
8027 /* All jumps handled here are signed, but don't use a signed limit
8028 check for 32 and 16 bit jumps as we want to allow wrap around at
8029 4G and 64k respectively. */
8031 fixP
->fx_signed
= 1;
8035 output_interseg_jump (void)
8043 if (flag_code
== CODE_16BIT
)
8047 if (i
.prefix
[DATA_PREFIX
] != 0)
8053 if (i
.prefix
[REX_PREFIX
] != 0)
8063 if (i
.prefixes
!= 0 && !intel_syntax
)
8064 as_warn (_("skipping prefixes on this instruction"));
8066 /* 1 opcode; 2 segment; offset */
8067 p
= frag_more (prefix
+ 1 + 2 + size
);
8069 if (i
.prefix
[DATA_PREFIX
] != 0)
8070 *p
++ = DATA_PREFIX_OPCODE
;
8072 if (i
.prefix
[REX_PREFIX
] != 0)
8073 *p
++ = i
.prefix
[REX_PREFIX
];
8075 *p
++ = i
.tm
.base_opcode
;
8076 if (i
.op
[1].imms
->X_op
== O_constant
)
8078 offsetT n
= i
.op
[1].imms
->X_add_number
;
8081 && !fits_in_unsigned_word (n
)
8082 && !fits_in_signed_word (n
))
8084 as_bad (_("16-bit jump out of range"));
8087 md_number_to_chars (p
, n
, size
);
8090 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
8091 i
.op
[1].imms
, 0, reloc (size
, 0, 0, i
.reloc
[1]));
8092 if (i
.op
[0].imms
->X_op
!= O_constant
)
8093 as_bad (_("can't handle non absolute segment in `%s'"),
8095 md_number_to_chars (p
+ size
, (valueT
) i
.op
[0].imms
->X_add_number
, 2);
8098 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8103 asection
*seg
= now_seg
;
8104 subsegT subseg
= now_subseg
;
8106 unsigned int alignment
, align_size_1
;
8107 unsigned int isa_1_descsz
, feature_2_descsz
, descsz
;
8108 unsigned int isa_1_descsz_raw
, feature_2_descsz_raw
;
8109 unsigned int padding
;
8111 if (!IS_ELF
|| !x86_used_note
)
8114 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_X86
;
8116 /* The .note.gnu.property section layout:
8118 Field Length Contents
8121 n_descsz 4 The note descriptor size
8122 n_type 4 NT_GNU_PROPERTY_TYPE_0
8124 n_desc n_descsz The program property array
8128 /* Create the .note.gnu.property section. */
8129 sec
= subseg_new (NOTE_GNU_PROPERTY_SECTION_NAME
, 0);
8130 bfd_set_section_flags (sec
,
8137 if (get_elf_backend_data (stdoutput
)->s
->elfclass
== ELFCLASS64
)
8148 bfd_set_section_alignment (sec
, alignment
);
8149 elf_section_type (sec
) = SHT_NOTE
;
8151 /* GNU_PROPERTY_X86_ISA_1_USED: 4-byte type + 4-byte data size
8153 isa_1_descsz_raw
= 4 + 4 + 4;
8154 /* Align GNU_PROPERTY_X86_ISA_1_USED. */
8155 isa_1_descsz
= (isa_1_descsz_raw
+ align_size_1
) & ~align_size_1
;
8157 feature_2_descsz_raw
= isa_1_descsz
;
8158 /* GNU_PROPERTY_X86_FEATURE_2_USED: 4-byte type + 4-byte data size
8160 feature_2_descsz_raw
+= 4 + 4 + 4;
8161 /* Align GNU_PROPERTY_X86_FEATURE_2_USED. */
8162 feature_2_descsz
= ((feature_2_descsz_raw
+ align_size_1
)
8165 descsz
= feature_2_descsz
;
8166 /* Section size: n_namsz + n_descsz + n_type + n_name + n_descsz. */
8167 p
= frag_more (4 + 4 + 4 + 4 + descsz
);
8169 /* Write n_namsz. */
8170 md_number_to_chars (p
, (valueT
) 4, 4);
8172 /* Write n_descsz. */
8173 md_number_to_chars (p
+ 4, (valueT
) descsz
, 4);
8176 md_number_to_chars (p
+ 4 * 2, (valueT
) NT_GNU_PROPERTY_TYPE_0
, 4);
8179 memcpy (p
+ 4 * 3, "GNU", 4);
8181 /* Write 4-byte type. */
8182 md_number_to_chars (p
+ 4 * 4,
8183 (valueT
) GNU_PROPERTY_X86_ISA_1_USED
, 4);
8185 /* Write 4-byte data size. */
8186 md_number_to_chars (p
+ 4 * 5, (valueT
) 4, 4);
8188 /* Write 4-byte data. */
8189 md_number_to_chars (p
+ 4 * 6, (valueT
) x86_isa_1_used
, 4);
8191 /* Zero out paddings. */
8192 padding
= isa_1_descsz
- isa_1_descsz_raw
;
8194 memset (p
+ 4 * 7, 0, padding
);
8196 /* Write 4-byte type. */
8197 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 4,
8198 (valueT
) GNU_PROPERTY_X86_FEATURE_2_USED
, 4);
8200 /* Write 4-byte data size. */
8201 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 5, (valueT
) 4, 4);
8203 /* Write 4-byte data. */
8204 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 6,
8205 (valueT
) x86_feature_2_used
, 4);
8207 /* Zero out paddings. */
8208 padding
= feature_2_descsz
- feature_2_descsz_raw
;
8210 memset (p
+ isa_1_descsz
+ 4 * 7, 0, padding
);
8212 /* We probably can't restore the current segment, for there likely
8215 subseg_set (seg
, subseg
);
8220 encoding_length (const fragS
*start_frag
, offsetT start_off
,
8221 const char *frag_now_ptr
)
8223 unsigned int len
= 0;
8225 if (start_frag
!= frag_now
)
8227 const fragS
*fr
= start_frag
;
8232 } while (fr
&& fr
!= frag_now
);
8235 return len
- start_off
+ (frag_now_ptr
- frag_now
->fr_literal
);
8241 fragS
*insn_start_frag
;
8242 offsetT insn_start_off
;
8244 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8245 if (IS_ELF
&& x86_used_note
)
8247 if (i
.tm
.cpu_flags
.bitfield
.cpucmov
)
8248 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_CMOV
;
8249 if (i
.tm
.cpu_flags
.bitfield
.cpusse
)
8250 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSE
;
8251 if (i
.tm
.cpu_flags
.bitfield
.cpusse2
)
8252 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSE2
;
8253 if (i
.tm
.cpu_flags
.bitfield
.cpusse3
)
8254 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSE3
;
8255 if (i
.tm
.cpu_flags
.bitfield
.cpussse3
)
8256 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSSE3
;
8257 if (i
.tm
.cpu_flags
.bitfield
.cpusse4_1
)
8258 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSE4_1
;
8259 if (i
.tm
.cpu_flags
.bitfield
.cpusse4_2
)
8260 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSE4_2
;
8261 if (i
.tm
.cpu_flags
.bitfield
.cpuavx
)
8262 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX
;
8263 if (i
.tm
.cpu_flags
.bitfield
.cpuavx2
)
8264 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX2
;
8265 if (i
.tm
.cpu_flags
.bitfield
.cpufma
)
8266 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_FMA
;
8267 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512f
)
8268 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512F
;
8269 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512cd
)
8270 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512CD
;
8271 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512er
)
8272 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512ER
;
8273 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512pf
)
8274 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512PF
;
8275 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512vl
)
8276 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512VL
;
8277 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512dq
)
8278 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512DQ
;
8279 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512bw
)
8280 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512BW
;
8281 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_4fmaps
)
8282 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_4FMAPS
;
8283 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_4vnniw
)
8284 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_4VNNIW
;
8285 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_bitalg
)
8286 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_BITALG
;
8287 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512ifma
)
8288 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_IFMA
;
8289 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512vbmi
)
8290 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_VBMI
;
8291 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_vbmi2
)
8292 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_VBMI2
;
8293 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_vnni
)
8294 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_VNNI
;
8295 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_bf16
)
8296 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_BF16
;
8298 if (i
.tm
.cpu_flags
.bitfield
.cpu8087
8299 || i
.tm
.cpu_flags
.bitfield
.cpu287
8300 || i
.tm
.cpu_flags
.bitfield
.cpu387
8301 || i
.tm
.cpu_flags
.bitfield
.cpu687
8302 || i
.tm
.cpu_flags
.bitfield
.cpufisttp
)
8303 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_X87
;
8304 /* Don't set GNU_PROPERTY_X86_FEATURE_2_MMX for prefetchtXXX nor
8305 Xfence instructions. */
8306 if (i
.tm
.base_opcode
!= 0xf18
8307 && i
.tm
.base_opcode
!= 0xf0d
8308 && i
.tm
.base_opcode
!= 0xfaef8
8310 || i
.tm
.cpu_flags
.bitfield
.cpummx
8311 || i
.tm
.cpu_flags
.bitfield
.cpua3dnow
8312 || i
.tm
.cpu_flags
.bitfield
.cpua3dnowa
))
8313 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_MMX
;
8315 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XMM
;
8317 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_YMM
;
8319 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_ZMM
;
8320 if (i
.tm
.cpu_flags
.bitfield
.cpufxsr
)
8321 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_FXSR
;
8322 if (i
.tm
.cpu_flags
.bitfield
.cpuxsave
)
8323 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVE
;
8324 if (i
.tm
.cpu_flags
.bitfield
.cpuxsaveopt
)
8325 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVEOPT
;
8326 if (i
.tm
.cpu_flags
.bitfield
.cpuxsavec
)
8327 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVEC
;
8331 /* Tie dwarf2 debug info to the address at the start of the insn.
8332 We can't do this after the insn has been output as the current
8333 frag may have been closed off. eg. by frag_var. */
8334 dwarf2_emit_insn (0);
8336 insn_start_frag
= frag_now
;
8337 insn_start_off
= frag_now_fix ();
8340 if (i
.tm
.opcode_modifier
.jump
)
8342 else if (i
.tm
.opcode_modifier
.jumpbyte
8343 || i
.tm
.opcode_modifier
.jumpdword
)
8345 else if (i
.tm
.opcode_modifier
.jumpintersegment
)
8346 output_interseg_jump ();
8349 /* Output normal instructions here. */
8353 unsigned int prefix
;
8356 && (i
.tm
.base_opcode
== 0xfaee8
8357 || i
.tm
.base_opcode
== 0xfaef0
8358 || i
.tm
.base_opcode
== 0xfaef8))
8360 /* Encode lfence, mfence, and sfence as
8361 f0 83 04 24 00 lock addl $0x0, (%{re}sp). */
8362 offsetT val
= 0x240483f0ULL
;
8364 md_number_to_chars (p
, val
, 5);
8368 /* Some processors fail on LOCK prefix. This options makes
8369 assembler ignore LOCK prefix and serves as a workaround. */
8370 if (omit_lock_prefix
)
8372 if (i
.tm
.base_opcode
== LOCK_PREFIX_OPCODE
)
8374 i
.prefix
[LOCK_PREFIX
] = 0;
8377 /* Since the VEX/EVEX prefix contains the implicit prefix, we
8378 don't need the explicit prefix. */
8379 if (!i
.tm
.opcode_modifier
.vex
&& !i
.tm
.opcode_modifier
.evex
)
8381 switch (i
.tm
.opcode_length
)
8384 if (i
.tm
.base_opcode
& 0xff000000)
8386 prefix
= (i
.tm
.base_opcode
>> 24) & 0xff;
8387 if (!i
.tm
.cpu_flags
.bitfield
.cpupadlock
8388 || prefix
!= REPE_PREFIX_OPCODE
8389 || (i
.prefix
[REP_PREFIX
] != REPE_PREFIX_OPCODE
))
8390 add_prefix (prefix
);
8394 if ((i
.tm
.base_opcode
& 0xff0000) != 0)
8396 prefix
= (i
.tm
.base_opcode
>> 16) & 0xff;
8397 add_prefix (prefix
);
8403 /* Check for pseudo prefixes. */
8404 as_bad_where (insn_start_frag
->fr_file
,
8405 insn_start_frag
->fr_line
,
8406 _("pseudo prefix without instruction"));
8412 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8413 /* For x32, add a dummy REX_OPCODE prefix for mov/add with
8414 R_X86_64_GOTTPOFF relocation so that linker can safely
8415 perform IE->LE optimization. */
8416 if (x86_elf_abi
== X86_64_X32_ABI
8418 && i
.reloc
[0] == BFD_RELOC_X86_64_GOTTPOFF
8419 && i
.prefix
[REX_PREFIX
] == 0)
8420 add_prefix (REX_OPCODE
);
8423 /* The prefix bytes. */
8424 for (j
= ARRAY_SIZE (i
.prefix
), q
= i
.prefix
; j
> 0; j
--, q
++)
8426 FRAG_APPEND_1_CHAR (*q
);
8430 for (j
= 0, q
= i
.prefix
; j
< ARRAY_SIZE (i
.prefix
); j
++, q
++)
8435 /* REX byte is encoded in VEX prefix. */
8439 FRAG_APPEND_1_CHAR (*q
);
8442 /* There should be no other prefixes for instructions
8447 /* For EVEX instructions i.vrex should become 0 after
8448 build_evex_prefix. For VEX instructions upper 16 registers
8449 aren't available, so VREX should be 0. */
8452 /* Now the VEX prefix. */
8453 p
= frag_more (i
.vex
.length
);
8454 for (j
= 0; j
< i
.vex
.length
; j
++)
8455 p
[j
] = i
.vex
.bytes
[j
];
8458 /* Now the opcode; be careful about word order here! */
8459 if (i
.tm
.opcode_length
== 1)
8461 FRAG_APPEND_1_CHAR (i
.tm
.base_opcode
);
8465 switch (i
.tm
.opcode_length
)
8469 *p
++ = (i
.tm
.base_opcode
>> 24) & 0xff;
8470 *p
++ = (i
.tm
.base_opcode
>> 16) & 0xff;
8474 *p
++ = (i
.tm
.base_opcode
>> 16) & 0xff;
8484 /* Put out high byte first: can't use md_number_to_chars! */
8485 *p
++ = (i
.tm
.base_opcode
>> 8) & 0xff;
8486 *p
= i
.tm
.base_opcode
& 0xff;
8489 /* Now the modrm byte and sib byte (if present). */
8490 if (i
.tm
.opcode_modifier
.modrm
)
8492 FRAG_APPEND_1_CHAR ((i
.rm
.regmem
<< 0
8495 /* If i.rm.regmem == ESP (4)
8496 && i.rm.mode != (Register mode)
8498 ==> need second modrm byte. */
8499 if (i
.rm
.regmem
== ESCAPE_TO_TWO_BYTE_ADDRESSING
8501 && !(i
.base_reg
&& i
.base_reg
->reg_type
.bitfield
.word
))
8502 FRAG_APPEND_1_CHAR ((i
.sib
.base
<< 0
8504 | i
.sib
.scale
<< 6));
8507 if (i
.disp_operands
)
8508 output_disp (insn_start_frag
, insn_start_off
);
8511 output_imm (insn_start_frag
, insn_start_off
);
8514 * frag_now_fix () returning plain abs_section_offset when we're in the
8515 * absolute section, and abs_section_offset not getting updated as data
8516 * gets added to the frag breaks the logic below.
8518 if (now_seg
!= absolute_section
)
8520 j
= encoding_length (insn_start_frag
, insn_start_off
, frag_more (0));
8522 as_warn (_("instruction length of %u bytes exceeds the limit of 15"),
8530 pi ("" /*line*/, &i
);
8532 #endif /* DEBUG386 */
8535 /* Return the size of the displacement operand N. */
8538 disp_size (unsigned int n
)
8542 if (i
.types
[n
].bitfield
.disp64
)
8544 else if (i
.types
[n
].bitfield
.disp8
)
8546 else if (i
.types
[n
].bitfield
.disp16
)
8551 /* Return the size of the immediate operand N. */
8554 imm_size (unsigned int n
)
8557 if (i
.types
[n
].bitfield
.imm64
)
8559 else if (i
.types
[n
].bitfield
.imm8
|| i
.types
[n
].bitfield
.imm8s
)
8561 else if (i
.types
[n
].bitfield
.imm16
)
8567 output_disp (fragS
*insn_start_frag
, offsetT insn_start_off
)
8572 for (n
= 0; n
< i
.operands
; n
++)
8574 if (operand_type_check (i
.types
[n
], disp
))
8576 if (i
.op
[n
].disps
->X_op
== O_constant
)
8578 int size
= disp_size (n
);
8579 offsetT val
= i
.op
[n
].disps
->X_add_number
;
8581 val
= offset_in_range (val
>> (size
== 1 ? i
.memshift
: 0),
8583 p
= frag_more (size
);
8584 md_number_to_chars (p
, val
, size
);
8588 enum bfd_reloc_code_real reloc_type
;
8589 int size
= disp_size (n
);
8590 int sign
= i
.types
[n
].bitfield
.disp32s
;
8591 int pcrel
= (i
.flags
[n
] & Operand_PCrel
) != 0;
8594 /* We can't have 8 bit displacement here. */
8595 gas_assert (!i
.types
[n
].bitfield
.disp8
);
8597 /* The PC relative address is computed relative
8598 to the instruction boundary, so in case immediate
8599 fields follows, we need to adjust the value. */
8600 if (pcrel
&& i
.imm_operands
)
8605 for (n1
= 0; n1
< i
.operands
; n1
++)
8606 if (operand_type_check (i
.types
[n1
], imm
))
8608 /* Only one immediate is allowed for PC
8609 relative address. */
8610 gas_assert (sz
== 0);
8612 i
.op
[n
].disps
->X_add_number
-= sz
;
8614 /* We should find the immediate. */
8615 gas_assert (sz
!= 0);
8618 p
= frag_more (size
);
8619 reloc_type
= reloc (size
, pcrel
, sign
, i
.reloc
[n
]);
8621 && GOT_symbol
== i
.op
[n
].disps
->X_add_symbol
8622 && (((reloc_type
== BFD_RELOC_32
8623 || reloc_type
== BFD_RELOC_X86_64_32S
8624 || (reloc_type
== BFD_RELOC_64
8626 && (i
.op
[n
].disps
->X_op
== O_symbol
8627 || (i
.op
[n
].disps
->X_op
== O_add
8628 && ((symbol_get_value_expression
8629 (i
.op
[n
].disps
->X_op_symbol
)->X_op
)
8631 || reloc_type
== BFD_RELOC_32_PCREL
))
8635 reloc_type
= BFD_RELOC_386_GOTPC
;
8636 i
.op
[n
].imms
->X_add_number
+=
8637 encoding_length (insn_start_frag
, insn_start_off
, p
);
8639 else if (reloc_type
== BFD_RELOC_64
)
8640 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
8642 /* Don't do the adjustment for x86-64, as there
8643 the pcrel addressing is relative to the _next_
8644 insn, and that is taken care of in other code. */
8645 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
8647 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
,
8648 size
, i
.op
[n
].disps
, pcrel
,
8650 /* Check for "call/jmp *mem", "mov mem, %reg",
8651 "test %reg, mem" and "binop mem, %reg" where binop
8652 is one of adc, add, and, cmp, or, sbb, sub, xor
8653 instructions without data prefix. Always generate
8654 R_386_GOT32X for "sym*GOT" operand in 32-bit mode. */
8655 if (i
.prefix
[DATA_PREFIX
] == 0
8656 && (generate_relax_relocations
8659 && i
.rm
.regmem
== 5))
8661 || (i
.rm
.mode
== 0 && i
.rm
.regmem
== 5))
8662 && ((i
.operands
== 1
8663 && i
.tm
.base_opcode
== 0xff
8664 && (i
.rm
.reg
== 2 || i
.rm
.reg
== 4))
8666 && (i
.tm
.base_opcode
== 0x8b
8667 || i
.tm
.base_opcode
== 0x85
8668 || (i
.tm
.base_opcode
& 0xc7) == 0x03))))
8672 fixP
->fx_tcbit
= i
.rex
!= 0;
8674 && (i
.base_reg
->reg_num
== RegIP
))
8675 fixP
->fx_tcbit2
= 1;
8678 fixP
->fx_tcbit2
= 1;
8686 output_imm (fragS
*insn_start_frag
, offsetT insn_start_off
)
8691 for (n
= 0; n
< i
.operands
; n
++)
8693 /* Skip SAE/RC Imm operand in EVEX. They are already handled. */
8694 if (i
.rounding
&& (int) n
== i
.rounding
->operand
)
8697 if (operand_type_check (i
.types
[n
], imm
))
8699 if (i
.op
[n
].imms
->X_op
== O_constant
)
8701 int size
= imm_size (n
);
8704 val
= offset_in_range (i
.op
[n
].imms
->X_add_number
,
8706 p
= frag_more (size
);
8707 md_number_to_chars (p
, val
, size
);
8711 /* Not absolute_section.
8712 Need a 32-bit fixup (don't support 8bit
8713 non-absolute imms). Try to support other
8715 enum bfd_reloc_code_real reloc_type
;
8716 int size
= imm_size (n
);
8719 if (i
.types
[n
].bitfield
.imm32s
8720 && (i
.suffix
== QWORD_MNEM_SUFFIX
8721 || (!i
.suffix
&& i
.tm
.opcode_modifier
.no_lsuf
)))
8726 p
= frag_more (size
);
8727 reloc_type
= reloc (size
, 0, sign
, i
.reloc
[n
]);
8729 /* This is tough to explain. We end up with this one if we
8730 * have operands that look like
8731 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
8732 * obtain the absolute address of the GOT, and it is strongly
8733 * preferable from a performance point of view to avoid using
8734 * a runtime relocation for this. The actual sequence of
8735 * instructions often look something like:
8740 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
8742 * The call and pop essentially return the absolute address
8743 * of the label .L66 and store it in %ebx. The linker itself
8744 * will ultimately change the first operand of the addl so
8745 * that %ebx points to the GOT, but to keep things simple, the
8746 * .o file must have this operand set so that it generates not
8747 * the absolute address of .L66, but the absolute address of
8748 * itself. This allows the linker itself simply treat a GOTPC
8749 * relocation as asking for a pcrel offset to the GOT to be
8750 * added in, and the addend of the relocation is stored in the
8751 * operand field for the instruction itself.
8753 * Our job here is to fix the operand so that it would add
8754 * the correct offset so that %ebx would point to itself. The
8755 * thing that is tricky is that .-.L66 will point to the
8756 * beginning of the instruction, so we need to further modify
8757 * the operand so that it will point to itself. There are
8758 * other cases where you have something like:
8760 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
8762 * and here no correction would be required. Internally in
8763 * the assembler we treat operands of this form as not being
8764 * pcrel since the '.' is explicitly mentioned, and I wonder
8765 * whether it would simplify matters to do it this way. Who
8766 * knows. In earlier versions of the PIC patches, the
8767 * pcrel_adjust field was used to store the correction, but
8768 * since the expression is not pcrel, I felt it would be
8769 * confusing to do it this way. */
8771 if ((reloc_type
== BFD_RELOC_32
8772 || reloc_type
== BFD_RELOC_X86_64_32S
8773 || reloc_type
== BFD_RELOC_64
)
8775 && GOT_symbol
== i
.op
[n
].imms
->X_add_symbol
8776 && (i
.op
[n
].imms
->X_op
== O_symbol
8777 || (i
.op
[n
].imms
->X_op
== O_add
8778 && ((symbol_get_value_expression
8779 (i
.op
[n
].imms
->X_op_symbol
)->X_op
)
8783 reloc_type
= BFD_RELOC_386_GOTPC
;
8785 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
8787 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
8788 i
.op
[n
].imms
->X_add_number
+=
8789 encoding_length (insn_start_frag
, insn_start_off
, p
);
8791 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
8792 i
.op
[n
].imms
, 0, reloc_type
);
8798 /* x86_cons_fix_new is called via the expression parsing code when a
8799 reloc is needed. We use this hook to get the correct .got reloc. */
8800 static int cons_sign
= -1;
8803 x86_cons_fix_new (fragS
*frag
, unsigned int off
, unsigned int len
,
8804 expressionS
*exp
, bfd_reloc_code_real_type r
)
8806 r
= reloc (len
, 0, cons_sign
, r
);
8809 if (exp
->X_op
== O_secrel
)
8811 exp
->X_op
= O_symbol
;
8812 r
= BFD_RELOC_32_SECREL
;
8816 fix_new_exp (frag
, off
, len
, exp
, 0, r
);
8819 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
8820 purpose of the `.dc.a' internal pseudo-op. */
8823 x86_address_bytes (void)
8825 if ((stdoutput
->arch_info
->mach
& bfd_mach_x64_32
))
8827 return stdoutput
->arch_info
->bits_per_address
/ 8;
8830 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
8832 # define lex_got(reloc, adjust, types) NULL
8834 /* Parse operands of the form
8835 <symbol>@GOTOFF+<nnn>
8836 and similar .plt or .got references.
8838 If we find one, set up the correct relocation in RELOC and copy the
8839 input string, minus the `@GOTOFF' into a malloc'd buffer for
8840 parsing by the calling routine. Return this buffer, and if ADJUST
8841 is non-null set it to the length of the string we removed from the
8842 input line. Otherwise return NULL. */
8844 lex_got (enum bfd_reloc_code_real
*rel
,
8846 i386_operand_type
*types
)
8848 /* Some of the relocations depend on the size of what field is to
8849 be relocated. But in our callers i386_immediate and i386_displacement
8850 we don't yet know the operand size (this will be set by insn
8851 matching). Hence we record the word32 relocation here,
8852 and adjust the reloc according to the real size in reloc(). */
8853 static const struct {
8856 const enum bfd_reloc_code_real rel
[2];
8857 const i386_operand_type types64
;
8859 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8860 { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32
,
8862 OPERAND_TYPE_IMM32_64
},
8864 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real
,
8865 BFD_RELOC_X86_64_PLTOFF64
},
8866 OPERAND_TYPE_IMM64
},
8867 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32
,
8868 BFD_RELOC_X86_64_PLT32
},
8869 OPERAND_TYPE_IMM32_32S_DISP32
},
8870 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real
,
8871 BFD_RELOC_X86_64_GOTPLT64
},
8872 OPERAND_TYPE_IMM64_DISP64
},
8873 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF
,
8874 BFD_RELOC_X86_64_GOTOFF64
},
8875 OPERAND_TYPE_IMM64_DISP64
},
8876 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real
,
8877 BFD_RELOC_X86_64_GOTPCREL
},
8878 OPERAND_TYPE_IMM32_32S_DISP32
},
8879 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD
,
8880 BFD_RELOC_X86_64_TLSGD
},
8881 OPERAND_TYPE_IMM32_32S_DISP32
},
8882 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM
,
8883 _dummy_first_bfd_reloc_code_real
},
8884 OPERAND_TYPE_NONE
},
8885 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real
,
8886 BFD_RELOC_X86_64_TLSLD
},
8887 OPERAND_TYPE_IMM32_32S_DISP32
},
8888 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32
,
8889 BFD_RELOC_X86_64_GOTTPOFF
},
8890 OPERAND_TYPE_IMM32_32S_DISP32
},
8891 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32
,
8892 BFD_RELOC_X86_64_TPOFF32
},
8893 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
8894 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE
,
8895 _dummy_first_bfd_reloc_code_real
},
8896 OPERAND_TYPE_NONE
},
8897 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32
,
8898 BFD_RELOC_X86_64_DTPOFF32
},
8899 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
8900 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE
,
8901 _dummy_first_bfd_reloc_code_real
},
8902 OPERAND_TYPE_NONE
},
8903 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE
,
8904 _dummy_first_bfd_reloc_code_real
},
8905 OPERAND_TYPE_NONE
},
8906 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32
,
8907 BFD_RELOC_X86_64_GOT32
},
8908 OPERAND_TYPE_IMM32_32S_64_DISP32
},
8909 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC
,
8910 BFD_RELOC_X86_64_GOTPC32_TLSDESC
},
8911 OPERAND_TYPE_IMM32_32S_DISP32
},
8912 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL
,
8913 BFD_RELOC_X86_64_TLSDESC_CALL
},
8914 OPERAND_TYPE_IMM32_32S_DISP32
},
8919 #if defined (OBJ_MAYBE_ELF)
8924 for (cp
= input_line_pointer
; *cp
!= '@'; cp
++)
8925 if (is_end_of_line
[(unsigned char) *cp
] || *cp
== ',')
8928 for (j
= 0; j
< ARRAY_SIZE (gotrel
); j
++)
8930 int len
= gotrel
[j
].len
;
8931 if (strncasecmp (cp
+ 1, gotrel
[j
].str
, len
) == 0)
8933 if (gotrel
[j
].rel
[object_64bit
] != 0)
8936 char *tmpbuf
, *past_reloc
;
8938 *rel
= gotrel
[j
].rel
[object_64bit
];
8942 if (flag_code
!= CODE_64BIT
)
8944 types
->bitfield
.imm32
= 1;
8945 types
->bitfield
.disp32
= 1;
8948 *types
= gotrel
[j
].types64
;
8951 if (j
!= 0 && GOT_symbol
== NULL
)
8952 GOT_symbol
= symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME
);
8954 /* The length of the first part of our input line. */
8955 first
= cp
- input_line_pointer
;
8957 /* The second part goes from after the reloc token until
8958 (and including) an end_of_line char or comma. */
8959 past_reloc
= cp
+ 1 + len
;
8961 while (!is_end_of_line
[(unsigned char) *cp
] && *cp
!= ',')
8963 second
= cp
+ 1 - past_reloc
;
8965 /* Allocate and copy string. The trailing NUL shouldn't
8966 be necessary, but be safe. */
8967 tmpbuf
= XNEWVEC (char, first
+ second
+ 2);
8968 memcpy (tmpbuf
, input_line_pointer
, first
);
8969 if (second
!= 0 && *past_reloc
!= ' ')
8970 /* Replace the relocation token with ' ', so that
8971 errors like foo@GOTOFF1 will be detected. */
8972 tmpbuf
[first
++] = ' ';
8974 /* Increment length by 1 if the relocation token is
8979 memcpy (tmpbuf
+ first
, past_reloc
, second
);
8980 tmpbuf
[first
+ second
] = '\0';
8984 as_bad (_("@%s reloc is not supported with %d-bit output format"),
8985 gotrel
[j
].str
, 1 << (5 + object_64bit
));
8990 /* Might be a symbol version string. Don't as_bad here. */
8999 /* Parse operands of the form
9000 <symbol>@SECREL32+<nnn>
9002 If we find one, set up the correct relocation in RELOC and copy the
9003 input string, minus the `@SECREL32' into a malloc'd buffer for
9004 parsing by the calling routine. Return this buffer, and if ADJUST
9005 is non-null set it to the length of the string we removed from the
9006 input line. Otherwise return NULL.
9008 This function is copied from the ELF version above adjusted for PE targets. */
9011 lex_got (enum bfd_reloc_code_real
*rel ATTRIBUTE_UNUSED
,
9012 int *adjust ATTRIBUTE_UNUSED
,
9013 i386_operand_type
*types
)
9019 const enum bfd_reloc_code_real rel
[2];
9020 const i386_operand_type types64
;
9024 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL
,
9025 BFD_RELOC_32_SECREL
},
9026 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
9032 for (cp
= input_line_pointer
; *cp
!= '@'; cp
++)
9033 if (is_end_of_line
[(unsigned char) *cp
] || *cp
== ',')
9036 for (j
= 0; j
< ARRAY_SIZE (gotrel
); j
++)
9038 int len
= gotrel
[j
].len
;
9040 if (strncasecmp (cp
+ 1, gotrel
[j
].str
, len
) == 0)
9042 if (gotrel
[j
].rel
[object_64bit
] != 0)
9045 char *tmpbuf
, *past_reloc
;
9047 *rel
= gotrel
[j
].rel
[object_64bit
];
9053 if (flag_code
!= CODE_64BIT
)
9055 types
->bitfield
.imm32
= 1;
9056 types
->bitfield
.disp32
= 1;
9059 *types
= gotrel
[j
].types64
;
9062 /* The length of the first part of our input line. */
9063 first
= cp
- input_line_pointer
;
9065 /* The second part goes from after the reloc token until
9066 (and including) an end_of_line char or comma. */
9067 past_reloc
= cp
+ 1 + len
;
9069 while (!is_end_of_line
[(unsigned char) *cp
] && *cp
!= ',')
9071 second
= cp
+ 1 - past_reloc
;
9073 /* Allocate and copy string. The trailing NUL shouldn't
9074 be necessary, but be safe. */
9075 tmpbuf
= XNEWVEC (char, first
+ second
+ 2);
9076 memcpy (tmpbuf
, input_line_pointer
, first
);
9077 if (second
!= 0 && *past_reloc
!= ' ')
9078 /* Replace the relocation token with ' ', so that
9079 errors like foo@SECLREL321 will be detected. */
9080 tmpbuf
[first
++] = ' ';
9081 memcpy (tmpbuf
+ first
, past_reloc
, second
);
9082 tmpbuf
[first
+ second
] = '\0';
9086 as_bad (_("@%s reloc is not supported with %d-bit output format"),
9087 gotrel
[j
].str
, 1 << (5 + object_64bit
));
9092 /* Might be a symbol version string. Don't as_bad here. */
9098 bfd_reloc_code_real_type
9099 x86_cons (expressionS
*exp
, int size
)
9101 bfd_reloc_code_real_type got_reloc
= NO_RELOC
;
9103 intel_syntax
= -intel_syntax
;
9106 if (size
== 4 || (object_64bit
&& size
== 8))
9108 /* Handle @GOTOFF and the like in an expression. */
9110 char *gotfree_input_line
;
9113 save
= input_line_pointer
;
9114 gotfree_input_line
= lex_got (&got_reloc
, &adjust
, NULL
);
9115 if (gotfree_input_line
)
9116 input_line_pointer
= gotfree_input_line
;
9120 if (gotfree_input_line
)
9122 /* expression () has merrily parsed up to the end of line,
9123 or a comma - in the wrong buffer. Transfer how far
9124 input_line_pointer has moved to the right buffer. */
9125 input_line_pointer
= (save
9126 + (input_line_pointer
- gotfree_input_line
)
9128 free (gotfree_input_line
);
9129 if (exp
->X_op
== O_constant
9130 || exp
->X_op
== O_absent
9131 || exp
->X_op
== O_illegal
9132 || exp
->X_op
== O_register
9133 || exp
->X_op
== O_big
)
9135 char c
= *input_line_pointer
;
9136 *input_line_pointer
= 0;
9137 as_bad (_("missing or invalid expression `%s'"), save
);
9138 *input_line_pointer
= c
;
9140 else if ((got_reloc
== BFD_RELOC_386_PLT32
9141 || got_reloc
== BFD_RELOC_X86_64_PLT32
)
9142 && exp
->X_op
!= O_symbol
)
9144 char c
= *input_line_pointer
;
9145 *input_line_pointer
= 0;
9146 as_bad (_("invalid PLT expression `%s'"), save
);
9147 *input_line_pointer
= c
;
9154 intel_syntax
= -intel_syntax
;
9157 i386_intel_simplify (exp
);
9163 signed_cons (int size
)
9165 if (flag_code
== CODE_64BIT
)
9173 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
9180 if (exp
.X_op
== O_symbol
)
9181 exp
.X_op
= O_secrel
;
9183 emit_expr (&exp
, 4);
9185 while (*input_line_pointer
++ == ',');
9187 input_line_pointer
--;
9188 demand_empty_rest_of_line ();
9192 /* Handle Vector operations. */
9195 check_VecOperations (char *op_string
, char *op_end
)
9197 const reg_entry
*mask
;
9202 && (op_end
== NULL
|| op_string
< op_end
))
9205 if (*op_string
== '{')
9209 /* Check broadcasts. */
9210 if (strncmp (op_string
, "1to", 3) == 0)
9215 goto duplicated_vec_op
;
9218 if (*op_string
== '8')
9220 else if (*op_string
== '4')
9222 else if (*op_string
== '2')
9224 else if (*op_string
== '1'
9225 && *(op_string
+1) == '6')
9232 as_bad (_("Unsupported broadcast: `%s'"), saved
);
9237 broadcast_op
.type
= bcst_type
;
9238 broadcast_op
.operand
= this_operand
;
9239 broadcast_op
.bytes
= 0;
9240 i
.broadcast
= &broadcast_op
;
9242 /* Check masking operation. */
9243 else if ((mask
= parse_register (op_string
, &end_op
)) != NULL
)
9245 /* k0 can't be used for write mask. */
9246 if (mask
->reg_type
.bitfield
.class != RegMask
|| !mask
->reg_num
)
9248 as_bad (_("`%s%s' can't be used for write mask"),
9249 register_prefix
, mask
->reg_name
);
9255 mask_op
.mask
= mask
;
9256 mask_op
.zeroing
= 0;
9257 mask_op
.operand
= this_operand
;
9263 goto duplicated_vec_op
;
9265 i
.mask
->mask
= mask
;
9267 /* Only "{z}" is allowed here. No need to check
9268 zeroing mask explicitly. */
9269 if (i
.mask
->operand
!= this_operand
)
9271 as_bad (_("invalid write mask `%s'"), saved
);
9278 /* Check zeroing-flag for masking operation. */
9279 else if (*op_string
== 'z')
9283 mask_op
.mask
= NULL
;
9284 mask_op
.zeroing
= 1;
9285 mask_op
.operand
= this_operand
;
9290 if (i
.mask
->zeroing
)
9293 as_bad (_("duplicated `%s'"), saved
);
9297 i
.mask
->zeroing
= 1;
9299 /* Only "{%k}" is allowed here. No need to check mask
9300 register explicitly. */
9301 if (i
.mask
->operand
!= this_operand
)
9303 as_bad (_("invalid zeroing-masking `%s'"),
9312 goto unknown_vec_op
;
9314 if (*op_string
!= '}')
9316 as_bad (_("missing `}' in `%s'"), saved
);
9321 /* Strip whitespace since the addition of pseudo prefixes
9322 changed how the scrubber treats '{'. */
9323 if (is_space_char (*op_string
))
9329 /* We don't know this one. */
9330 as_bad (_("unknown vector operation: `%s'"), saved
);
9334 if (i
.mask
&& i
.mask
->zeroing
&& !i
.mask
->mask
)
9336 as_bad (_("zeroing-masking only allowed with write mask"));
9344 i386_immediate (char *imm_start
)
9346 char *save_input_line_pointer
;
9347 char *gotfree_input_line
;
9350 i386_operand_type types
;
9352 operand_type_set (&types
, ~0);
9354 if (i
.imm_operands
== MAX_IMMEDIATE_OPERANDS
)
9356 as_bad (_("at most %d immediate operands are allowed"),
9357 MAX_IMMEDIATE_OPERANDS
);
9361 exp
= &im_expressions
[i
.imm_operands
++];
9362 i
.op
[this_operand
].imms
= exp
;
9364 if (is_space_char (*imm_start
))
9367 save_input_line_pointer
= input_line_pointer
;
9368 input_line_pointer
= imm_start
;
9370 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
9371 if (gotfree_input_line
)
9372 input_line_pointer
= gotfree_input_line
;
9374 exp_seg
= expression (exp
);
9378 /* Handle vector operations. */
9379 if (*input_line_pointer
== '{')
9381 input_line_pointer
= check_VecOperations (input_line_pointer
,
9383 if (input_line_pointer
== NULL
)
9387 if (*input_line_pointer
)
9388 as_bad (_("junk `%s' after expression"), input_line_pointer
);
9390 input_line_pointer
= save_input_line_pointer
;
9391 if (gotfree_input_line
)
9393 free (gotfree_input_line
);
9395 if (exp
->X_op
== O_constant
|| exp
->X_op
== O_register
)
9396 exp
->X_op
= O_illegal
;
9399 return i386_finalize_immediate (exp_seg
, exp
, types
, imm_start
);
9403 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
9404 i386_operand_type types
, const char *imm_start
)
9406 if (exp
->X_op
== O_absent
|| exp
->X_op
== O_illegal
|| exp
->X_op
== O_big
)
9409 as_bad (_("missing or invalid immediate expression `%s'"),
9413 else if (exp
->X_op
== O_constant
)
9415 /* Size it properly later. */
9416 i
.types
[this_operand
].bitfield
.imm64
= 1;
9417 /* If not 64bit, sign extend val. */
9418 if (flag_code
!= CODE_64BIT
9419 && (exp
->X_add_number
& ~(((addressT
) 2 << 31) - 1)) == 0)
9421 = (exp
->X_add_number
^ ((addressT
) 1 << 31)) - ((addressT
) 1 << 31);
9423 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
9424 else if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
9425 && exp_seg
!= absolute_section
9426 && exp_seg
!= text_section
9427 && exp_seg
!= data_section
9428 && exp_seg
!= bss_section
9429 && exp_seg
!= undefined_section
9430 && !bfd_is_com_section (exp_seg
))
9432 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
9436 else if (!intel_syntax
&& exp_seg
== reg_section
)
9439 as_bad (_("illegal immediate register operand %s"), imm_start
);
9444 /* This is an address. The size of the address will be
9445 determined later, depending on destination register,
9446 suffix, or the default for the section. */
9447 i
.types
[this_operand
].bitfield
.imm8
= 1;
9448 i
.types
[this_operand
].bitfield
.imm16
= 1;
9449 i
.types
[this_operand
].bitfield
.imm32
= 1;
9450 i
.types
[this_operand
].bitfield
.imm32s
= 1;
9451 i
.types
[this_operand
].bitfield
.imm64
= 1;
9452 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
9460 i386_scale (char *scale
)
9463 char *save
= input_line_pointer
;
9465 input_line_pointer
= scale
;
9466 val
= get_absolute_expression ();
9471 i
.log2_scale_factor
= 0;
9474 i
.log2_scale_factor
= 1;
9477 i
.log2_scale_factor
= 2;
9480 i
.log2_scale_factor
= 3;
9484 char sep
= *input_line_pointer
;
9486 *input_line_pointer
= '\0';
9487 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
9489 *input_line_pointer
= sep
;
9490 input_line_pointer
= save
;
9494 if (i
.log2_scale_factor
!= 0 && i
.index_reg
== 0)
9496 as_warn (_("scale factor of %d without an index register"),
9497 1 << i
.log2_scale_factor
);
9498 i
.log2_scale_factor
= 0;
9500 scale
= input_line_pointer
;
9501 input_line_pointer
= save
;
9506 i386_displacement (char *disp_start
, char *disp_end
)
9510 char *save_input_line_pointer
;
9511 char *gotfree_input_line
;
9513 i386_operand_type bigdisp
, types
= anydisp
;
9516 if (i
.disp_operands
== MAX_MEMORY_OPERANDS
)
9518 as_bad (_("at most %d displacement operands are allowed"),
9519 MAX_MEMORY_OPERANDS
);
9523 operand_type_set (&bigdisp
, 0);
9524 if ((i
.types
[this_operand
].bitfield
.jumpabsolute
)
9525 || (!current_templates
->start
->opcode_modifier
.jump
9526 && !current_templates
->start
->opcode_modifier
.jumpdword
))
9528 bigdisp
.bitfield
.disp32
= 1;
9529 override
= (i
.prefix
[ADDR_PREFIX
] != 0);
9530 if (flag_code
== CODE_64BIT
)
9534 bigdisp
.bitfield
.disp32s
= 1;
9535 bigdisp
.bitfield
.disp64
= 1;
9538 else if ((flag_code
== CODE_16BIT
) ^ override
)
9540 bigdisp
.bitfield
.disp32
= 0;
9541 bigdisp
.bitfield
.disp16
= 1;
9546 /* For PC-relative branches, the width of the displacement
9547 is dependent upon data size, not address size. */
9548 override
= (i
.prefix
[DATA_PREFIX
] != 0);
9549 if (flag_code
== CODE_64BIT
)
9551 if (override
|| i
.suffix
== WORD_MNEM_SUFFIX
)
9552 bigdisp
.bitfield
.disp16
= 1;
9555 bigdisp
.bitfield
.disp32
= 1;
9556 bigdisp
.bitfield
.disp32s
= 1;
9562 override
= (i
.suffix
== (flag_code
!= CODE_16BIT
9564 : LONG_MNEM_SUFFIX
));
9565 bigdisp
.bitfield
.disp32
= 1;
9566 if ((flag_code
== CODE_16BIT
) ^ override
)
9568 bigdisp
.bitfield
.disp32
= 0;
9569 bigdisp
.bitfield
.disp16
= 1;
9573 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
9576 exp
= &disp_expressions
[i
.disp_operands
];
9577 i
.op
[this_operand
].disps
= exp
;
9579 save_input_line_pointer
= input_line_pointer
;
9580 input_line_pointer
= disp_start
;
9581 END_STRING_AND_SAVE (disp_end
);
9583 #ifndef GCC_ASM_O_HACK
9584 #define GCC_ASM_O_HACK 0
9587 END_STRING_AND_SAVE (disp_end
+ 1);
9588 if (i
.types
[this_operand
].bitfield
.baseIndex
9589 && displacement_string_end
[-1] == '+')
9591 /* This hack is to avoid a warning when using the "o"
9592 constraint within gcc asm statements.
9595 #define _set_tssldt_desc(n,addr,limit,type) \
9596 __asm__ __volatile__ ( \
9598 "movw %w1,2+%0\n\t" \
9600 "movb %b1,4+%0\n\t" \
9601 "movb %4,5+%0\n\t" \
9602 "movb $0,6+%0\n\t" \
9603 "movb %h1,7+%0\n\t" \
9605 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
9607 This works great except that the output assembler ends
9608 up looking a bit weird if it turns out that there is
9609 no offset. You end up producing code that looks like:
9622 So here we provide the missing zero. */
9624 *displacement_string_end
= '0';
9627 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
9628 if (gotfree_input_line
)
9629 input_line_pointer
= gotfree_input_line
;
9631 exp_seg
= expression (exp
);
9634 if (*input_line_pointer
)
9635 as_bad (_("junk `%s' after expression"), input_line_pointer
);
9637 RESTORE_END_STRING (disp_end
+ 1);
9639 input_line_pointer
= save_input_line_pointer
;
9640 if (gotfree_input_line
)
9642 free (gotfree_input_line
);
9644 if (exp
->X_op
== O_constant
|| exp
->X_op
== O_register
)
9645 exp
->X_op
= O_illegal
;
9648 ret
= i386_finalize_displacement (exp_seg
, exp
, types
, disp_start
);
9650 RESTORE_END_STRING (disp_end
);
9656 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
9657 i386_operand_type types
, const char *disp_start
)
9659 i386_operand_type bigdisp
;
9662 /* We do this to make sure that the section symbol is in
9663 the symbol table. We will ultimately change the relocation
9664 to be relative to the beginning of the section. */
9665 if (i
.reloc
[this_operand
] == BFD_RELOC_386_GOTOFF
9666 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
9667 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
9669 if (exp
->X_op
!= O_symbol
)
9672 if (S_IS_LOCAL (exp
->X_add_symbol
)
9673 && S_GET_SEGMENT (exp
->X_add_symbol
) != undefined_section
9674 && S_GET_SEGMENT (exp
->X_add_symbol
) != expr_section
)
9675 section_symbol (S_GET_SEGMENT (exp
->X_add_symbol
));
9676 exp
->X_op
= O_subtract
;
9677 exp
->X_op_symbol
= GOT_symbol
;
9678 if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
)
9679 i
.reloc
[this_operand
] = BFD_RELOC_32_PCREL
;
9680 else if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
9681 i
.reloc
[this_operand
] = BFD_RELOC_64
;
9683 i
.reloc
[this_operand
] = BFD_RELOC_32
;
9686 else if (exp
->X_op
== O_absent
9687 || exp
->X_op
== O_illegal
9688 || exp
->X_op
== O_big
)
9691 as_bad (_("missing or invalid displacement expression `%s'"),
9696 else if (flag_code
== CODE_64BIT
9697 && !i
.prefix
[ADDR_PREFIX
]
9698 && exp
->X_op
== O_constant
)
9700 /* Since displacement is signed extended to 64bit, don't allow
9701 disp32 and turn off disp32s if they are out of range. */
9702 i
.types
[this_operand
].bitfield
.disp32
= 0;
9703 if (!fits_in_signed_long (exp
->X_add_number
))
9705 i
.types
[this_operand
].bitfield
.disp32s
= 0;
9706 if (i
.types
[this_operand
].bitfield
.baseindex
)
9708 as_bad (_("0x%lx out range of signed 32bit displacement"),
9709 (long) exp
->X_add_number
);
9715 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
9716 else if (exp
->X_op
!= O_constant
9717 && OUTPUT_FLAVOR
== bfd_target_aout_flavour
9718 && exp_seg
!= absolute_section
9719 && exp_seg
!= text_section
9720 && exp_seg
!= data_section
9721 && exp_seg
!= bss_section
9722 && exp_seg
!= undefined_section
9723 && !bfd_is_com_section (exp_seg
))
9725 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
9730 /* Check if this is a displacement only operand. */
9731 bigdisp
= i
.types
[this_operand
];
9732 bigdisp
.bitfield
.disp8
= 0;
9733 bigdisp
.bitfield
.disp16
= 0;
9734 bigdisp
.bitfield
.disp32
= 0;
9735 bigdisp
.bitfield
.disp32s
= 0;
9736 bigdisp
.bitfield
.disp64
= 0;
9737 if (operand_type_all_zero (&bigdisp
))
9738 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
9744 /* Return the active addressing mode, taking address override and
9745 registers forming the address into consideration. Update the
9746 address override prefix if necessary. */
9748 static enum flag_code
9749 i386_addressing_mode (void)
9751 enum flag_code addr_mode
;
9753 if (i
.prefix
[ADDR_PREFIX
])
9754 addr_mode
= flag_code
== CODE_32BIT
? CODE_16BIT
: CODE_32BIT
;
9757 addr_mode
= flag_code
;
9759 #if INFER_ADDR_PREFIX
9760 if (i
.mem_operands
== 0)
9762 /* Infer address prefix from the first memory operand. */
9763 const reg_entry
*addr_reg
= i
.base_reg
;
9765 if (addr_reg
== NULL
)
9766 addr_reg
= i
.index_reg
;
9770 if (addr_reg
->reg_type
.bitfield
.dword
)
9771 addr_mode
= CODE_32BIT
;
9772 else if (flag_code
!= CODE_64BIT
9773 && addr_reg
->reg_type
.bitfield
.word
)
9774 addr_mode
= CODE_16BIT
;
9776 if (addr_mode
!= flag_code
)
9778 i
.prefix
[ADDR_PREFIX
] = ADDR_PREFIX_OPCODE
;
9780 /* Change the size of any displacement too. At most one
9781 of Disp16 or Disp32 is set.
9782 FIXME. There doesn't seem to be any real need for
9783 separate Disp16 and Disp32 flags. The same goes for
9784 Imm16 and Imm32. Removing them would probably clean
9785 up the code quite a lot. */
9786 if (flag_code
!= CODE_64BIT
9787 && (i
.types
[this_operand
].bitfield
.disp16
9788 || i
.types
[this_operand
].bitfield
.disp32
))
9789 i
.types
[this_operand
]
9790 = operand_type_xor (i
.types
[this_operand
], disp16_32
);
9800 /* Make sure the memory operand we've been dealt is valid.
9801 Return 1 on success, 0 on a failure. */
9804 i386_index_check (const char *operand_string
)
9806 const char *kind
= "base/index";
9807 enum flag_code addr_mode
= i386_addressing_mode ();
9809 if (current_templates
->start
->opcode_modifier
.isstring
9810 && !current_templates
->start
->cpu_flags
.bitfield
.cpupadlock
9811 && (current_templates
->end
[-1].opcode_modifier
.isstring
9814 /* Memory operands of string insns are special in that they only allow
9815 a single register (rDI, rSI, or rBX) as their memory address. */
9816 const reg_entry
*expected_reg
;
9817 static const char *di_si
[][2] =
9823 static const char *bx
[] = { "ebx", "bx", "rbx" };
9825 kind
= "string address";
9827 if (current_templates
->start
->opcode_modifier
.repprefixok
)
9829 i386_operand_type type
= current_templates
->end
[-1].operand_types
[0];
9831 if (!type
.bitfield
.baseindex
9832 || ((!i
.mem_operands
!= !intel_syntax
)
9833 && current_templates
->end
[-1].operand_types
[1]
9834 .bitfield
.baseindex
))
9835 type
= current_templates
->end
[-1].operand_types
[1];
9836 expected_reg
= hash_find (reg_hash
,
9837 di_si
[addr_mode
][type
.bitfield
.esseg
]);
9841 expected_reg
= hash_find (reg_hash
, bx
[addr_mode
]);
9843 if (i
.base_reg
!= expected_reg
9845 || operand_type_check (i
.types
[this_operand
], disp
))
9847 /* The second memory operand must have the same size as
9851 && !((addr_mode
== CODE_64BIT
9852 && i
.base_reg
->reg_type
.bitfield
.qword
)
9853 || (addr_mode
== CODE_32BIT
9854 ? i
.base_reg
->reg_type
.bitfield
.dword
9855 : i
.base_reg
->reg_type
.bitfield
.word
)))
9858 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
9860 intel_syntax
? '[' : '(',
9862 expected_reg
->reg_name
,
9863 intel_syntax
? ']' : ')');
9870 as_bad (_("`%s' is not a valid %s expression"),
9871 operand_string
, kind
);
9876 if (addr_mode
!= CODE_16BIT
)
9878 /* 32-bit/64-bit checks. */
9880 && ((addr_mode
== CODE_64BIT
9881 ? !i
.base_reg
->reg_type
.bitfield
.qword
9882 : !i
.base_reg
->reg_type
.bitfield
.dword
)
9883 || (i
.index_reg
&& i
.base_reg
->reg_num
== RegIP
)
9884 || i
.base_reg
->reg_num
== RegIZ
))
9886 && !i
.index_reg
->reg_type
.bitfield
.xmmword
9887 && !i
.index_reg
->reg_type
.bitfield
.ymmword
9888 && !i
.index_reg
->reg_type
.bitfield
.zmmword
9889 && ((addr_mode
== CODE_64BIT
9890 ? !i
.index_reg
->reg_type
.bitfield
.qword
9891 : !i
.index_reg
->reg_type
.bitfield
.dword
)
9892 || !i
.index_reg
->reg_type
.bitfield
.baseindex
)))
9895 /* bndmk, bndldx, and bndstx have special restrictions. */
9896 if (current_templates
->start
->base_opcode
== 0xf30f1b
9897 || (current_templates
->start
->base_opcode
& ~1) == 0x0f1a)
9899 /* They cannot use RIP-relative addressing. */
9900 if (i
.base_reg
&& i
.base_reg
->reg_num
== RegIP
)
9902 as_bad (_("`%s' cannot be used here"), operand_string
);
9906 /* bndldx and bndstx ignore their scale factor. */
9907 if (current_templates
->start
->base_opcode
!= 0xf30f1b
9908 && i
.log2_scale_factor
)
9909 as_warn (_("register scaling is being ignored here"));
9914 /* 16-bit checks. */
9916 && (!i
.base_reg
->reg_type
.bitfield
.word
9917 || !i
.base_reg
->reg_type
.bitfield
.baseindex
))
9919 && (!i
.index_reg
->reg_type
.bitfield
.word
9920 || !i
.index_reg
->reg_type
.bitfield
.baseindex
9922 && i
.base_reg
->reg_num
< 6
9923 && i
.index_reg
->reg_num
>= 6
9924 && i
.log2_scale_factor
== 0))))
9931 /* Handle vector immediates. */
9934 RC_SAE_immediate (const char *imm_start
)
9936 unsigned int match_found
, j
;
9937 const char *pstr
= imm_start
;
9945 for (j
= 0; j
< ARRAY_SIZE (RC_NamesTable
); j
++)
9947 if (!strncmp (pstr
, RC_NamesTable
[j
].name
, RC_NamesTable
[j
].len
))
9951 rc_op
.type
= RC_NamesTable
[j
].type
;
9952 rc_op
.operand
= this_operand
;
9953 i
.rounding
= &rc_op
;
9957 as_bad (_("duplicated `%s'"), imm_start
);
9960 pstr
+= RC_NamesTable
[j
].len
;
9970 as_bad (_("Missing '}': '%s'"), imm_start
);
9973 /* RC/SAE immediate string should contain nothing more. */;
9976 as_bad (_("Junk after '}': '%s'"), imm_start
);
9980 exp
= &im_expressions
[i
.imm_operands
++];
9981 i
.op
[this_operand
].imms
= exp
;
9983 exp
->X_op
= O_constant
;
9984 exp
->X_add_number
= 0;
9985 exp
->X_add_symbol
= (symbolS
*) 0;
9986 exp
->X_op_symbol
= (symbolS
*) 0;
9988 i
.types
[this_operand
].bitfield
.imm8
= 1;
9992 /* Only string instructions can have a second memory operand, so
9993 reduce current_templates to just those if it contains any. */
9995 maybe_adjust_templates (void)
9997 const insn_template
*t
;
9999 gas_assert (i
.mem_operands
== 1);
10001 for (t
= current_templates
->start
; t
< current_templates
->end
; ++t
)
10002 if (t
->opcode_modifier
.isstring
)
10005 if (t
< current_templates
->end
)
10007 static templates aux_templates
;
10008 bfd_boolean recheck
;
10010 aux_templates
.start
= t
;
10011 for (; t
< current_templates
->end
; ++t
)
10012 if (!t
->opcode_modifier
.isstring
)
10014 aux_templates
.end
= t
;
10016 /* Determine whether to re-check the first memory operand. */
10017 recheck
= (aux_templates
.start
!= current_templates
->start
10018 || t
!= current_templates
->end
);
10020 current_templates
= &aux_templates
;
10024 i
.mem_operands
= 0;
10025 if (i
.memop1_string
!= NULL
10026 && i386_index_check (i
.memop1_string
) == 0)
10028 i
.mem_operands
= 1;
10035 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
10039 i386_att_operand (char *operand_string
)
10041 const reg_entry
*r
;
10043 char *op_string
= operand_string
;
10045 if (is_space_char (*op_string
))
10048 /* We check for an absolute prefix (differentiating,
10049 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
10050 if (*op_string
== ABSOLUTE_PREFIX
)
10053 if (is_space_char (*op_string
))
10055 i
.types
[this_operand
].bitfield
.jumpabsolute
= 1;
10058 /* Check if operand is a register. */
10059 if ((r
= parse_register (op_string
, &end_op
)) != NULL
)
10061 i386_operand_type temp
;
10063 /* Check for a segment override by searching for ':' after a
10064 segment register. */
10065 op_string
= end_op
;
10066 if (is_space_char (*op_string
))
10068 if (*op_string
== ':' && r
->reg_type
.bitfield
.class == SReg
)
10070 switch (r
->reg_num
)
10073 i
.seg
[i
.mem_operands
] = &es
;
10076 i
.seg
[i
.mem_operands
] = &cs
;
10079 i
.seg
[i
.mem_operands
] = &ss
;
10082 i
.seg
[i
.mem_operands
] = &ds
;
10085 i
.seg
[i
.mem_operands
] = &fs
;
10088 i
.seg
[i
.mem_operands
] = &gs
;
10092 /* Skip the ':' and whitespace. */
10094 if (is_space_char (*op_string
))
10097 if (!is_digit_char (*op_string
)
10098 && !is_identifier_char (*op_string
)
10099 && *op_string
!= '('
10100 && *op_string
!= ABSOLUTE_PREFIX
)
10102 as_bad (_("bad memory operand `%s'"), op_string
);
10105 /* Handle case of %es:*foo. */
10106 if (*op_string
== ABSOLUTE_PREFIX
)
10109 if (is_space_char (*op_string
))
10111 i
.types
[this_operand
].bitfield
.jumpabsolute
= 1;
10113 goto do_memory_reference
;
10116 /* Handle vector operations. */
10117 if (*op_string
== '{')
10119 op_string
= check_VecOperations (op_string
, NULL
);
10120 if (op_string
== NULL
)
10126 as_bad (_("junk `%s' after register"), op_string
);
10129 temp
= r
->reg_type
;
10130 temp
.bitfield
.baseindex
= 0;
10131 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
10133 i
.types
[this_operand
].bitfield
.unspecified
= 0;
10134 i
.op
[this_operand
].regs
= r
;
10137 else if (*op_string
== REGISTER_PREFIX
)
10139 as_bad (_("bad register name `%s'"), op_string
);
10142 else if (*op_string
== IMMEDIATE_PREFIX
)
10145 if (i
.types
[this_operand
].bitfield
.jumpabsolute
)
10147 as_bad (_("immediate operand illegal with absolute jump"));
10150 if (!i386_immediate (op_string
))
10153 else if (RC_SAE_immediate (operand_string
))
10155 /* If it is a RC or SAE immediate, do nothing. */
10158 else if (is_digit_char (*op_string
)
10159 || is_identifier_char (*op_string
)
10160 || *op_string
== '"'
10161 || *op_string
== '(')
10163 /* This is a memory reference of some sort. */
10166 /* Start and end of displacement string expression (if found). */
10167 char *displacement_string_start
;
10168 char *displacement_string_end
;
10171 do_memory_reference
:
10172 if (i
.mem_operands
== 1 && !maybe_adjust_templates ())
10174 if ((i
.mem_operands
== 1
10175 && !current_templates
->start
->opcode_modifier
.isstring
)
10176 || i
.mem_operands
== 2)
10178 as_bad (_("too many memory references for `%s'"),
10179 current_templates
->start
->name
);
10183 /* Check for base index form. We detect the base index form by
10184 looking for an ')' at the end of the operand, searching
10185 for the '(' matching it, and finding a REGISTER_PREFIX or ','
10187 base_string
= op_string
+ strlen (op_string
);
10189 /* Handle vector operations. */
10190 vop_start
= strchr (op_string
, '{');
10191 if (vop_start
&& vop_start
< base_string
)
10193 if (check_VecOperations (vop_start
, base_string
) == NULL
)
10195 base_string
= vop_start
;
10199 if (is_space_char (*base_string
))
10202 /* If we only have a displacement, set-up for it to be parsed later. */
10203 displacement_string_start
= op_string
;
10204 displacement_string_end
= base_string
+ 1;
10206 if (*base_string
== ')')
10209 unsigned int parens_balanced
= 1;
10210 /* We've already checked that the number of left & right ()'s are
10211 equal, so this loop will not be infinite. */
10215 if (*base_string
== ')')
10217 if (*base_string
== '(')
10220 while (parens_balanced
);
10222 temp_string
= base_string
;
10224 /* Skip past '(' and whitespace. */
10226 if (is_space_char (*base_string
))
10229 if (*base_string
== ','
10230 || ((i
.base_reg
= parse_register (base_string
, &end_op
))
10233 displacement_string_end
= temp_string
;
10235 i
.types
[this_operand
].bitfield
.baseindex
= 1;
10239 base_string
= end_op
;
10240 if (is_space_char (*base_string
))
10244 /* There may be an index reg or scale factor here. */
10245 if (*base_string
== ',')
10248 if (is_space_char (*base_string
))
10251 if ((i
.index_reg
= parse_register (base_string
, &end_op
))
10254 base_string
= end_op
;
10255 if (is_space_char (*base_string
))
10257 if (*base_string
== ',')
10260 if (is_space_char (*base_string
))
10263 else if (*base_string
!= ')')
10265 as_bad (_("expecting `,' or `)' "
10266 "after index register in `%s'"),
10271 else if (*base_string
== REGISTER_PREFIX
)
10273 end_op
= strchr (base_string
, ',');
10276 as_bad (_("bad register name `%s'"), base_string
);
10280 /* Check for scale factor. */
10281 if (*base_string
!= ')')
10283 char *end_scale
= i386_scale (base_string
);
10288 base_string
= end_scale
;
10289 if (is_space_char (*base_string
))
10291 if (*base_string
!= ')')
10293 as_bad (_("expecting `)' "
10294 "after scale factor in `%s'"),
10299 else if (!i
.index_reg
)
10301 as_bad (_("expecting index register or scale factor "
10302 "after `,'; got '%c'"),
10307 else if (*base_string
!= ')')
10309 as_bad (_("expecting `,' or `)' "
10310 "after base register in `%s'"),
10315 else if (*base_string
== REGISTER_PREFIX
)
10317 end_op
= strchr (base_string
, ',');
10320 as_bad (_("bad register name `%s'"), base_string
);
10325 /* If there's an expression beginning the operand, parse it,
10326 assuming displacement_string_start and
10327 displacement_string_end are meaningful. */
10328 if (displacement_string_start
!= displacement_string_end
)
10330 if (!i386_displacement (displacement_string_start
,
10331 displacement_string_end
))
10335 /* Special case for (%dx) while doing input/output op. */
10337 && i
.base_reg
->reg_type
.bitfield
.instance
== RegD
10338 && i
.base_reg
->reg_type
.bitfield
.word
10339 && i
.index_reg
== 0
10340 && i
.log2_scale_factor
== 0
10341 && i
.seg
[i
.mem_operands
] == 0
10342 && !operand_type_check (i
.types
[this_operand
], disp
))
10344 i
.types
[this_operand
] = i
.base_reg
->reg_type
;
10348 if (i386_index_check (operand_string
) == 0)
10350 i
.flags
[this_operand
] |= Operand_Mem
;
10351 if (i
.mem_operands
== 0)
10352 i
.memop1_string
= xstrdup (operand_string
);
10357 /* It's not a memory operand; argh! */
10358 as_bad (_("invalid char %s beginning operand %d `%s'"),
10359 output_invalid (*op_string
),
10364 return 1; /* Normal return. */
10367 /* Calculate the maximum variable size (i.e., excluding fr_fix)
10368 that an rs_machine_dependent frag may reach. */
10371 i386_frag_max_var (fragS
*frag
)
10373 /* The only relaxable frags are for jumps.
10374 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
10375 gas_assert (frag
->fr_type
== rs_machine_dependent
);
10376 return TYPE_FROM_RELAX_STATE (frag
->fr_subtype
) == UNCOND_JUMP
? 4 : 5;
10379 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10381 elf_symbol_resolved_in_segment_p (symbolS
*fr_symbol
, offsetT fr_var
)
10383 /* STT_GNU_IFUNC symbol must go through PLT. */
10384 if ((symbol_get_bfdsym (fr_symbol
)->flags
10385 & BSF_GNU_INDIRECT_FUNCTION
) != 0)
10388 if (!S_IS_EXTERNAL (fr_symbol
))
10389 /* Symbol may be weak or local. */
10390 return !S_IS_WEAK (fr_symbol
);
10392 /* Global symbols with non-default visibility can't be preempted. */
10393 if (ELF_ST_VISIBILITY (S_GET_OTHER (fr_symbol
)) != STV_DEFAULT
)
10396 if (fr_var
!= NO_RELOC
)
10397 switch ((enum bfd_reloc_code_real
) fr_var
)
10399 case BFD_RELOC_386_PLT32
:
10400 case BFD_RELOC_X86_64_PLT32
:
10401 /* Symbol with PLT relocation may be preempted. */
10407 /* Global symbols with default visibility in a shared library may be
10408 preempted by another definition. */
10413 /* md_estimate_size_before_relax()
10415 Called just before relax() for rs_machine_dependent frags. The x86
10416 assembler uses these frags to handle variable size jump
10419 Any symbol that is now undefined will not become defined.
10420 Return the correct fr_subtype in the frag.
10421 Return the initial "guess for variable size of frag" to caller.
10422 The guess is actually the growth beyond the fixed part. Whatever
10423 we do to grow the fixed or variable part contributes to our
10427 md_estimate_size_before_relax (fragS
*fragP
, segT segment
)
10429 /* We've already got fragP->fr_subtype right; all we have to do is
10430 check for un-relaxable symbols. On an ELF system, we can't relax
10431 an externally visible symbol, because it may be overridden by a
10433 if (S_GET_SEGMENT (fragP
->fr_symbol
) != segment
10434 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10436 && !elf_symbol_resolved_in_segment_p (fragP
->fr_symbol
,
10439 #if defined (OBJ_COFF) && defined (TE_PE)
10440 || (OUTPUT_FLAVOR
== bfd_target_coff_flavour
10441 && S_IS_WEAK (fragP
->fr_symbol
))
10445 /* Symbol is undefined in this segment, or we need to keep a
10446 reloc so that weak symbols can be overridden. */
10447 int size
= (fragP
->fr_subtype
& CODE16
) ? 2 : 4;
10448 enum bfd_reloc_code_real reloc_type
;
10449 unsigned char *opcode
;
10452 if (fragP
->fr_var
!= NO_RELOC
)
10453 reloc_type
= (enum bfd_reloc_code_real
) fragP
->fr_var
;
10454 else if (size
== 2)
10455 reloc_type
= BFD_RELOC_16_PCREL
;
10456 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10457 else if (need_plt32_p (fragP
->fr_symbol
))
10458 reloc_type
= BFD_RELOC_X86_64_PLT32
;
10461 reloc_type
= BFD_RELOC_32_PCREL
;
10463 old_fr_fix
= fragP
->fr_fix
;
10464 opcode
= (unsigned char *) fragP
->fr_opcode
;
10466 switch (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
))
10469 /* Make jmp (0xeb) a (d)word displacement jump. */
10471 fragP
->fr_fix
+= size
;
10472 fix_new (fragP
, old_fr_fix
, size
,
10474 fragP
->fr_offset
, 1,
10480 && (!no_cond_jump_promotion
|| fragP
->fr_var
!= NO_RELOC
))
10482 /* Negate the condition, and branch past an
10483 unconditional jump. */
10486 /* Insert an unconditional jump. */
10488 /* We added two extra opcode bytes, and have a two byte
10490 fragP
->fr_fix
+= 2 + 2;
10491 fix_new (fragP
, old_fr_fix
+ 2, 2,
10493 fragP
->fr_offset
, 1,
10497 /* Fall through. */
10500 if (no_cond_jump_promotion
&& fragP
->fr_var
== NO_RELOC
)
10504 fragP
->fr_fix
+= 1;
10505 fixP
= fix_new (fragP
, old_fr_fix
, 1,
10507 fragP
->fr_offset
, 1,
10508 BFD_RELOC_8_PCREL
);
10509 fixP
->fx_signed
= 1;
10513 /* This changes the byte-displacement jump 0x7N
10514 to the (d)word-displacement jump 0x0f,0x8N. */
10515 opcode
[1] = opcode
[0] + 0x10;
10516 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
10517 /* We've added an opcode byte. */
10518 fragP
->fr_fix
+= 1 + size
;
10519 fix_new (fragP
, old_fr_fix
+ 1, size
,
10521 fragP
->fr_offset
, 1,
10526 BAD_CASE (fragP
->fr_subtype
);
10530 return fragP
->fr_fix
- old_fr_fix
;
10533 /* Guess size depending on current relax state. Initially the relax
10534 state will correspond to a short jump and we return 1, because
10535 the variable part of the frag (the branch offset) is one byte
10536 long. However, we can relax a section more than once and in that
10537 case we must either set fr_subtype back to the unrelaxed state,
10538 or return the value for the appropriate branch. */
10539 return md_relax_table
[fragP
->fr_subtype
].rlx_length
;
10542 /* Called after relax() is finished.
10544 In: Address of frag.
10545 fr_type == rs_machine_dependent.
10546 fr_subtype is what the address relaxed to.
10548 Out: Any fixSs and constants are set up.
10549 Caller will turn frag into a ".space 0". */
10552 md_convert_frag (bfd
*abfd ATTRIBUTE_UNUSED
, segT sec ATTRIBUTE_UNUSED
,
10555 unsigned char *opcode
;
10556 unsigned char *where_to_put_displacement
= NULL
;
10557 offsetT target_address
;
10558 offsetT opcode_address
;
10559 unsigned int extension
= 0;
10560 offsetT displacement_from_opcode_start
;
10562 opcode
= (unsigned char *) fragP
->fr_opcode
;
10564 /* Address we want to reach in file space. */
10565 target_address
= S_GET_VALUE (fragP
->fr_symbol
) + fragP
->fr_offset
;
10567 /* Address opcode resides at in file space. */
10568 opcode_address
= fragP
->fr_address
+ fragP
->fr_fix
;
10570 /* Displacement from opcode start to fill into instruction. */
10571 displacement_from_opcode_start
= target_address
- opcode_address
;
10573 if ((fragP
->fr_subtype
& BIG
) == 0)
10575 /* Don't have to change opcode. */
10576 extension
= 1; /* 1 opcode + 1 displacement */
10577 where_to_put_displacement
= &opcode
[1];
10581 if (no_cond_jump_promotion
10582 && TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) != UNCOND_JUMP
)
10583 as_warn_where (fragP
->fr_file
, fragP
->fr_line
,
10584 _("long jump required"));
10586 switch (fragP
->fr_subtype
)
10588 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
):
10589 extension
= 4; /* 1 opcode + 4 displacement */
10591 where_to_put_displacement
= &opcode
[1];
10594 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
):
10595 extension
= 2; /* 1 opcode + 2 displacement */
10597 where_to_put_displacement
= &opcode
[1];
10600 case ENCODE_RELAX_STATE (COND_JUMP
, BIG
):
10601 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG
):
10602 extension
= 5; /* 2 opcode + 4 displacement */
10603 opcode
[1] = opcode
[0] + 0x10;
10604 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
10605 where_to_put_displacement
= &opcode
[2];
10608 case ENCODE_RELAX_STATE (COND_JUMP
, BIG16
):
10609 extension
= 3; /* 2 opcode + 2 displacement */
10610 opcode
[1] = opcode
[0] + 0x10;
10611 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
10612 where_to_put_displacement
= &opcode
[2];
10615 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
):
10620 where_to_put_displacement
= &opcode
[3];
10624 BAD_CASE (fragP
->fr_subtype
);
10629 /* If size if less then four we are sure that the operand fits,
10630 but if it's 4, then it could be that the displacement is larger
10632 if (DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
) == 4
10634 && ((addressT
) (displacement_from_opcode_start
- extension
10635 + ((addressT
) 1 << 31))
10636 > (((addressT
) 2 << 31) - 1)))
10638 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
10639 _("jump target out of range"));
10640 /* Make us emit 0. */
10641 displacement_from_opcode_start
= extension
;
10643 /* Now put displacement after opcode. */
10644 md_number_to_chars ((char *) where_to_put_displacement
,
10645 (valueT
) (displacement_from_opcode_start
- extension
),
10646 DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
));
10647 fragP
->fr_fix
+= extension
;
10650 /* Apply a fixup (fixP) to segment data, once it has been determined
10651 by our caller that we have all the info we need to fix it up.
10653 Parameter valP is the pointer to the value of the bits.
10655 On the 386, immediates, displacements, and data pointers are all in
10656 the same (little-endian) format, so we don't need to care about which
10657 we are handling. */
10660 md_apply_fix (fixS
*fixP
, valueT
*valP
, segT seg ATTRIBUTE_UNUSED
)
10662 char *p
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
10663 valueT value
= *valP
;
10665 #if !defined (TE_Mach)
10666 if (fixP
->fx_pcrel
)
10668 switch (fixP
->fx_r_type
)
10674 fixP
->fx_r_type
= BFD_RELOC_64_PCREL
;
10677 case BFD_RELOC_X86_64_32S
:
10678 fixP
->fx_r_type
= BFD_RELOC_32_PCREL
;
10681 fixP
->fx_r_type
= BFD_RELOC_16_PCREL
;
10684 fixP
->fx_r_type
= BFD_RELOC_8_PCREL
;
10689 if (fixP
->fx_addsy
!= NULL
10690 && (fixP
->fx_r_type
== BFD_RELOC_32_PCREL
10691 || fixP
->fx_r_type
== BFD_RELOC_64_PCREL
10692 || fixP
->fx_r_type
== BFD_RELOC_16_PCREL
10693 || fixP
->fx_r_type
== BFD_RELOC_8_PCREL
)
10694 && !use_rela_relocations
)
10696 /* This is a hack. There should be a better way to handle this.
10697 This covers for the fact that bfd_install_relocation will
10698 subtract the current location (for partial_inplace, PC relative
10699 relocations); see more below. */
10703 || OUTPUT_FLAVOR
== bfd_target_coff_flavour
10706 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
10708 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10711 segT sym_seg
= S_GET_SEGMENT (fixP
->fx_addsy
);
10713 if ((sym_seg
== seg
10714 || (symbol_section_p (fixP
->fx_addsy
)
10715 && sym_seg
!= absolute_section
))
10716 && !generic_force_reloc (fixP
))
10718 /* Yes, we add the values in twice. This is because
10719 bfd_install_relocation subtracts them out again. I think
10720 bfd_install_relocation is broken, but I don't dare change
10722 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
10726 #if defined (OBJ_COFF) && defined (TE_PE)
10727 /* For some reason, the PE format does not store a
10728 section address offset for a PC relative symbol. */
10729 if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
10730 || S_IS_WEAK (fixP
->fx_addsy
))
10731 value
+= md_pcrel_from (fixP
);
10734 #if defined (OBJ_COFF) && defined (TE_PE)
10735 if (fixP
->fx_addsy
!= NULL
10736 && S_IS_WEAK (fixP
->fx_addsy
)
10737 /* PR 16858: Do not modify weak function references. */
10738 && ! fixP
->fx_pcrel
)
10740 #if !defined (TE_PEP)
10741 /* For x86 PE weak function symbols are neither PC-relative
10742 nor do they set S_IS_FUNCTION. So the only reliable way
10743 to detect them is to check the flags of their containing
10745 if (S_GET_SEGMENT (fixP
->fx_addsy
) != NULL
10746 && S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_CODE
)
10750 value
-= S_GET_VALUE (fixP
->fx_addsy
);
10754 /* Fix a few things - the dynamic linker expects certain values here,
10755 and we must not disappoint it. */
10756 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10757 if (IS_ELF
&& fixP
->fx_addsy
)
10758 switch (fixP
->fx_r_type
)
10760 case BFD_RELOC_386_PLT32
:
10761 case BFD_RELOC_X86_64_PLT32
:
10762 /* Make the jump instruction point to the address of the operand.
10763 At runtime we merely add the offset to the actual PLT entry.
10764 NB: Subtract the offset size only for jump instructions. */
10765 if (fixP
->fx_pcrel
)
10769 case BFD_RELOC_386_TLS_GD
:
10770 case BFD_RELOC_386_TLS_LDM
:
10771 case BFD_RELOC_386_TLS_IE_32
:
10772 case BFD_RELOC_386_TLS_IE
:
10773 case BFD_RELOC_386_TLS_GOTIE
:
10774 case BFD_RELOC_386_TLS_GOTDESC
:
10775 case BFD_RELOC_X86_64_TLSGD
:
10776 case BFD_RELOC_X86_64_TLSLD
:
10777 case BFD_RELOC_X86_64_GOTTPOFF
:
10778 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
10779 value
= 0; /* Fully resolved at runtime. No addend. */
10781 case BFD_RELOC_386_TLS_LE
:
10782 case BFD_RELOC_386_TLS_LDO_32
:
10783 case BFD_RELOC_386_TLS_LE_32
:
10784 case BFD_RELOC_X86_64_DTPOFF32
:
10785 case BFD_RELOC_X86_64_DTPOFF64
:
10786 case BFD_RELOC_X86_64_TPOFF32
:
10787 case BFD_RELOC_X86_64_TPOFF64
:
10788 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
10791 case BFD_RELOC_386_TLS_DESC_CALL
:
10792 case BFD_RELOC_X86_64_TLSDESC_CALL
:
10793 value
= 0; /* Fully resolved at runtime. No addend. */
10794 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
10798 case BFD_RELOC_VTABLE_INHERIT
:
10799 case BFD_RELOC_VTABLE_ENTRY
:
10806 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
10808 #endif /* !defined (TE_Mach) */
10810 /* Are we finished with this relocation now? */
10811 if (fixP
->fx_addsy
== NULL
)
10813 #if defined (OBJ_COFF) && defined (TE_PE)
10814 else if (fixP
->fx_addsy
!= NULL
&& S_IS_WEAK (fixP
->fx_addsy
))
10817 /* Remember value for tc_gen_reloc. */
10818 fixP
->fx_addnumber
= value
;
10819 /* Clear out the frag for now. */
10823 else if (use_rela_relocations
)
10825 fixP
->fx_no_overflow
= 1;
10826 /* Remember value for tc_gen_reloc. */
10827 fixP
->fx_addnumber
= value
;
10831 md_number_to_chars (p
, value
, fixP
->fx_size
);
10835 md_atof (int type
, char *litP
, int *sizeP
)
10837 /* This outputs the LITTLENUMs in REVERSE order;
10838 in accord with the bigendian 386. */
10839 return ieee_md_atof (type
, litP
, sizeP
, FALSE
);
10842 static char output_invalid_buf
[sizeof (unsigned char) * 2 + 6];
10845 output_invalid (int c
)
10848 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
10851 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
10852 "(0x%x)", (unsigned char) c
);
10853 return output_invalid_buf
;
10856 /* REG_STRING starts *before* REGISTER_PREFIX. */
10858 static const reg_entry
*
10859 parse_real_register (char *reg_string
, char **end_op
)
10861 char *s
= reg_string
;
10863 char reg_name_given
[MAX_REG_NAME_SIZE
+ 1];
10864 const reg_entry
*r
;
10866 /* Skip possible REGISTER_PREFIX and possible whitespace. */
10867 if (*s
== REGISTER_PREFIX
)
10870 if (is_space_char (*s
))
10873 p
= reg_name_given
;
10874 while ((*p
++ = register_chars
[(unsigned char) *s
]) != '\0')
10876 if (p
>= reg_name_given
+ MAX_REG_NAME_SIZE
)
10877 return (const reg_entry
*) NULL
;
10881 /* For naked regs, make sure that we are not dealing with an identifier.
10882 This prevents confusing an identifier like `eax_var' with register
10884 if (allow_naked_reg
&& identifier_chars
[(unsigned char) *s
])
10885 return (const reg_entry
*) NULL
;
10889 r
= (const reg_entry
*) hash_find (reg_hash
, reg_name_given
);
10891 /* Handle floating point regs, allowing spaces in the (i) part. */
10892 if (r
== i386_regtab
/* %st is first entry of table */)
10894 if (!cpu_arch_flags
.bitfield
.cpu8087
10895 && !cpu_arch_flags
.bitfield
.cpu287
10896 && !cpu_arch_flags
.bitfield
.cpu387
)
10897 return (const reg_entry
*) NULL
;
10899 if (is_space_char (*s
))
10904 if (is_space_char (*s
))
10906 if (*s
>= '0' && *s
<= '7')
10908 int fpr
= *s
- '0';
10910 if (is_space_char (*s
))
10915 r
= (const reg_entry
*) hash_find (reg_hash
, "st(0)");
10920 /* We have "%st(" then garbage. */
10921 return (const reg_entry
*) NULL
;
10925 if (r
== NULL
|| allow_pseudo_reg
)
10928 if (operand_type_all_zero (&r
->reg_type
))
10929 return (const reg_entry
*) NULL
;
10931 if ((r
->reg_type
.bitfield
.dword
10932 || (r
->reg_type
.bitfield
.class == SReg
&& r
->reg_num
> 3)
10933 || r
->reg_type
.bitfield
.class == RegCR
10934 || r
->reg_type
.bitfield
.class == RegDR
10935 || r
->reg_type
.bitfield
.class == RegTR
)
10936 && !cpu_arch_flags
.bitfield
.cpui386
)
10937 return (const reg_entry
*) NULL
;
10939 if (r
->reg_type
.bitfield
.class == RegMMX
&& !cpu_arch_flags
.bitfield
.cpummx
)
10940 return (const reg_entry
*) NULL
;
10942 if (!cpu_arch_flags
.bitfield
.cpuavx512f
)
10944 if (r
->reg_type
.bitfield
.zmmword
10945 || r
->reg_type
.bitfield
.class == RegMask
)
10946 return (const reg_entry
*) NULL
;
10948 if (!cpu_arch_flags
.bitfield
.cpuavx
)
10950 if (r
->reg_type
.bitfield
.ymmword
)
10951 return (const reg_entry
*) NULL
;
10953 if (!cpu_arch_flags
.bitfield
.cpusse
&& r
->reg_type
.bitfield
.xmmword
)
10954 return (const reg_entry
*) NULL
;
10958 if (r
->reg_type
.bitfield
.class == RegBND
&& !cpu_arch_flags
.bitfield
.cpumpx
)
10959 return (const reg_entry
*) NULL
;
10961 /* Don't allow fake index register unless allow_index_reg isn't 0. */
10962 if (!allow_index_reg
&& r
->reg_num
== RegIZ
)
10963 return (const reg_entry
*) NULL
;
10965 /* Upper 16 vector registers are only available with VREX in 64bit
10966 mode, and require EVEX encoding. */
10967 if (r
->reg_flags
& RegVRex
)
10969 if (!cpu_arch_flags
.bitfield
.cpuavx512f
10970 || flag_code
!= CODE_64BIT
)
10971 return (const reg_entry
*) NULL
;
10973 i
.vec_encoding
= vex_encoding_evex
;
10976 if (((r
->reg_flags
& (RegRex64
| RegRex
)) || r
->reg_type
.bitfield
.qword
)
10977 && (!cpu_arch_flags
.bitfield
.cpulm
|| r
->reg_type
.bitfield
.class != RegCR
)
10978 && flag_code
!= CODE_64BIT
)
10979 return (const reg_entry
*) NULL
;
10981 if (r
->reg_type
.bitfield
.class == SReg
&& r
->reg_num
== RegFlat
10983 return (const reg_entry
*) NULL
;
10988 /* REG_STRING starts *before* REGISTER_PREFIX. */
10990 static const reg_entry
*
10991 parse_register (char *reg_string
, char **end_op
)
10993 const reg_entry
*r
;
10995 if (*reg_string
== REGISTER_PREFIX
|| allow_naked_reg
)
10996 r
= parse_real_register (reg_string
, end_op
);
11001 char *save
= input_line_pointer
;
11005 input_line_pointer
= reg_string
;
11006 c
= get_symbol_name (®_string
);
11007 symbolP
= symbol_find (reg_string
);
11008 if (symbolP
&& S_GET_SEGMENT (symbolP
) == reg_section
)
11010 const expressionS
*e
= symbol_get_value_expression (symbolP
);
11012 know (e
->X_op
== O_register
);
11013 know (e
->X_add_number
>= 0
11014 && (valueT
) e
->X_add_number
< i386_regtab_size
);
11015 r
= i386_regtab
+ e
->X_add_number
;
11016 if ((r
->reg_flags
& RegVRex
))
11017 i
.vec_encoding
= vex_encoding_evex
;
11018 *end_op
= input_line_pointer
;
11020 *input_line_pointer
= c
;
11021 input_line_pointer
= save
;
11027 i386_parse_name (char *name
, expressionS
*e
, char *nextcharP
)
11029 const reg_entry
*r
;
11030 char *end
= input_line_pointer
;
11033 r
= parse_register (name
, &input_line_pointer
);
11034 if (r
&& end
<= input_line_pointer
)
11036 *nextcharP
= *input_line_pointer
;
11037 *input_line_pointer
= 0;
11038 e
->X_op
= O_register
;
11039 e
->X_add_number
= r
- i386_regtab
;
11042 input_line_pointer
= end
;
11044 return intel_syntax
? i386_intel_parse_name (name
, e
) : 0;
11048 md_operand (expressionS
*e
)
11051 const reg_entry
*r
;
11053 switch (*input_line_pointer
)
11055 case REGISTER_PREFIX
:
11056 r
= parse_real_register (input_line_pointer
, &end
);
11059 e
->X_op
= O_register
;
11060 e
->X_add_number
= r
- i386_regtab
;
11061 input_line_pointer
= end
;
11066 gas_assert (intel_syntax
);
11067 end
= input_line_pointer
++;
11069 if (*input_line_pointer
== ']')
11071 ++input_line_pointer
;
11072 e
->X_op_symbol
= make_expr_symbol (e
);
11073 e
->X_add_symbol
= NULL
;
11074 e
->X_add_number
= 0;
11079 e
->X_op
= O_absent
;
11080 input_line_pointer
= end
;
11087 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11088 const char *md_shortopts
= "kVQ:sqnO::";
11090 const char *md_shortopts
= "qnO::";
11093 #define OPTION_32 (OPTION_MD_BASE + 0)
11094 #define OPTION_64 (OPTION_MD_BASE + 1)
11095 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
11096 #define OPTION_MARCH (OPTION_MD_BASE + 3)
11097 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
11098 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
11099 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
11100 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
11101 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
11102 #define OPTION_MRELAX_RELOCATIONS (OPTION_MD_BASE + 9)
11103 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
11104 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
11105 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
11106 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
11107 #define OPTION_X32 (OPTION_MD_BASE + 14)
11108 #define OPTION_MADD_BND_PREFIX (OPTION_MD_BASE + 15)
11109 #define OPTION_MEVEXLIG (OPTION_MD_BASE + 16)
11110 #define OPTION_MEVEXWIG (OPTION_MD_BASE + 17)
11111 #define OPTION_MBIG_OBJ (OPTION_MD_BASE + 18)
11112 #define OPTION_MOMIT_LOCK_PREFIX (OPTION_MD_BASE + 19)
11113 #define OPTION_MEVEXRCIG (OPTION_MD_BASE + 20)
11114 #define OPTION_MSHARED (OPTION_MD_BASE + 21)
11115 #define OPTION_MAMD64 (OPTION_MD_BASE + 22)
11116 #define OPTION_MINTEL64 (OPTION_MD_BASE + 23)
11117 #define OPTION_MFENCE_AS_LOCK_ADD (OPTION_MD_BASE + 24)
11118 #define OPTION_X86_USED_NOTE (OPTION_MD_BASE + 25)
11119 #define OPTION_MVEXWIG (OPTION_MD_BASE + 26)
11121 struct option md_longopts
[] =
11123 {"32", no_argument
, NULL
, OPTION_32
},
11124 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
11125 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
11126 {"64", no_argument
, NULL
, OPTION_64
},
11128 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11129 {"x32", no_argument
, NULL
, OPTION_X32
},
11130 {"mshared", no_argument
, NULL
, OPTION_MSHARED
},
11131 {"mx86-used-note", required_argument
, NULL
, OPTION_X86_USED_NOTE
},
11133 {"divide", no_argument
, NULL
, OPTION_DIVIDE
},
11134 {"march", required_argument
, NULL
, OPTION_MARCH
},
11135 {"mtune", required_argument
, NULL
, OPTION_MTUNE
},
11136 {"mmnemonic", required_argument
, NULL
, OPTION_MMNEMONIC
},
11137 {"msyntax", required_argument
, NULL
, OPTION_MSYNTAX
},
11138 {"mindex-reg", no_argument
, NULL
, OPTION_MINDEX_REG
},
11139 {"mnaked-reg", no_argument
, NULL
, OPTION_MNAKED_REG
},
11140 {"msse2avx", no_argument
, NULL
, OPTION_MSSE2AVX
},
11141 {"msse-check", required_argument
, NULL
, OPTION_MSSE_CHECK
},
11142 {"moperand-check", required_argument
, NULL
, OPTION_MOPERAND_CHECK
},
11143 {"mavxscalar", required_argument
, NULL
, OPTION_MAVXSCALAR
},
11144 {"mvexwig", required_argument
, NULL
, OPTION_MVEXWIG
},
11145 {"madd-bnd-prefix", no_argument
, NULL
, OPTION_MADD_BND_PREFIX
},
11146 {"mevexlig", required_argument
, NULL
, OPTION_MEVEXLIG
},
11147 {"mevexwig", required_argument
, NULL
, OPTION_MEVEXWIG
},
11148 # if defined (TE_PE) || defined (TE_PEP)
11149 {"mbig-obj", no_argument
, NULL
, OPTION_MBIG_OBJ
},
11151 {"momit-lock-prefix", required_argument
, NULL
, OPTION_MOMIT_LOCK_PREFIX
},
11152 {"mfence-as-lock-add", required_argument
, NULL
, OPTION_MFENCE_AS_LOCK_ADD
},
11153 {"mrelax-relocations", required_argument
, NULL
, OPTION_MRELAX_RELOCATIONS
},
11154 {"mevexrcig", required_argument
, NULL
, OPTION_MEVEXRCIG
},
11155 {"mamd64", no_argument
, NULL
, OPTION_MAMD64
},
11156 {"mintel64", no_argument
, NULL
, OPTION_MINTEL64
},
11157 {NULL
, no_argument
, NULL
, 0}
11159 size_t md_longopts_size
= sizeof (md_longopts
);
11162 md_parse_option (int c
, const char *arg
)
11165 char *arch
, *next
, *saved
;
11170 optimize_align_code
= 0;
11174 quiet_warnings
= 1;
11177 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11178 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
11179 should be emitted or not. FIXME: Not implemented. */
11181 if ((arg
[0] != 'y' && arg
[0] != 'n') || arg
[1])
11185 /* -V: SVR4 argument to print version ID. */
11187 print_version_id ();
11190 /* -k: Ignore for FreeBSD compatibility. */
11195 /* -s: On i386 Solaris, this tells the native assembler to use
11196 .stab instead of .stab.excl. We always use .stab anyhow. */
11199 case OPTION_MSHARED
:
11203 case OPTION_X86_USED_NOTE
:
11204 if (strcasecmp (arg
, "yes") == 0)
11206 else if (strcasecmp (arg
, "no") == 0)
11209 as_fatal (_("invalid -mx86-used-note= option: `%s'"), arg
);
11214 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
11215 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
11218 const char **list
, **l
;
11220 list
= bfd_target_list ();
11221 for (l
= list
; *l
!= NULL
; l
++)
11222 if (CONST_STRNEQ (*l
, "elf64-x86-64")
11223 || strcmp (*l
, "coff-x86-64") == 0
11224 || strcmp (*l
, "pe-x86-64") == 0
11225 || strcmp (*l
, "pei-x86-64") == 0
11226 || strcmp (*l
, "mach-o-x86-64") == 0)
11228 default_arch
= "x86_64";
11232 as_fatal (_("no compiled in support for x86_64"));
11238 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11242 const char **list
, **l
;
11244 list
= bfd_target_list ();
11245 for (l
= list
; *l
!= NULL
; l
++)
11246 if (CONST_STRNEQ (*l
, "elf32-x86-64"))
11248 default_arch
= "x86_64:32";
11252 as_fatal (_("no compiled in support for 32bit x86_64"));
11256 as_fatal (_("32bit x86_64 is only supported for ELF"));
11261 default_arch
= "i386";
11264 case OPTION_DIVIDE
:
11265 #ifdef SVR4_COMMENT_CHARS
11270 n
= XNEWVEC (char, strlen (i386_comment_chars
) + 1);
11272 for (s
= i386_comment_chars
; *s
!= '\0'; s
++)
11276 i386_comment_chars
= n
;
11282 saved
= xstrdup (arg
);
11284 /* Allow -march=+nosse. */
11290 as_fatal (_("invalid -march= option: `%s'"), arg
);
11291 next
= strchr (arch
, '+');
11294 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
11296 if (strcmp (arch
, cpu_arch
[j
].name
) == 0)
11299 if (! cpu_arch
[j
].flags
.bitfield
.cpui386
)
11302 cpu_arch_name
= cpu_arch
[j
].name
;
11303 cpu_sub_arch_name
= NULL
;
11304 cpu_arch_flags
= cpu_arch
[j
].flags
;
11305 cpu_arch_isa
= cpu_arch
[j
].type
;
11306 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
11307 if (!cpu_arch_tune_set
)
11309 cpu_arch_tune
= cpu_arch_isa
;
11310 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
11314 else if (*cpu_arch
[j
].name
== '.'
11315 && strcmp (arch
, cpu_arch
[j
].name
+ 1) == 0)
11317 /* ISA extension. */
11318 i386_cpu_flags flags
;
11320 flags
= cpu_flags_or (cpu_arch_flags
,
11321 cpu_arch
[j
].flags
);
11323 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
11325 if (cpu_sub_arch_name
)
11327 char *name
= cpu_sub_arch_name
;
11328 cpu_sub_arch_name
= concat (name
,
11330 (const char *) NULL
);
11334 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
11335 cpu_arch_flags
= flags
;
11336 cpu_arch_isa_flags
= flags
;
11340 = cpu_flags_or (cpu_arch_isa_flags
,
11341 cpu_arch
[j
].flags
);
11346 if (j
>= ARRAY_SIZE (cpu_arch
))
11348 /* Disable an ISA extension. */
11349 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
11350 if (strcmp (arch
, cpu_noarch
[j
].name
) == 0)
11352 i386_cpu_flags flags
;
11354 flags
= cpu_flags_and_not (cpu_arch_flags
,
11355 cpu_noarch
[j
].flags
);
11356 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
11358 if (cpu_sub_arch_name
)
11360 char *name
= cpu_sub_arch_name
;
11361 cpu_sub_arch_name
= concat (arch
,
11362 (const char *) NULL
);
11366 cpu_sub_arch_name
= xstrdup (arch
);
11367 cpu_arch_flags
= flags
;
11368 cpu_arch_isa_flags
= flags
;
11373 if (j
>= ARRAY_SIZE (cpu_noarch
))
11374 j
= ARRAY_SIZE (cpu_arch
);
11377 if (j
>= ARRAY_SIZE (cpu_arch
))
11378 as_fatal (_("invalid -march= option: `%s'"), arg
);
11382 while (next
!= NULL
);
11388 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
11389 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
11391 if (strcmp (arg
, cpu_arch
[j
].name
) == 0)
11393 cpu_arch_tune_set
= 1;
11394 cpu_arch_tune
= cpu_arch
[j
].type
;
11395 cpu_arch_tune_flags
= cpu_arch
[j
].flags
;
11399 if (j
>= ARRAY_SIZE (cpu_arch
))
11400 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
11403 case OPTION_MMNEMONIC
:
11404 if (strcasecmp (arg
, "att") == 0)
11405 intel_mnemonic
= 0;
11406 else if (strcasecmp (arg
, "intel") == 0)
11407 intel_mnemonic
= 1;
11409 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg
);
11412 case OPTION_MSYNTAX
:
11413 if (strcasecmp (arg
, "att") == 0)
11415 else if (strcasecmp (arg
, "intel") == 0)
11418 as_fatal (_("invalid -msyntax= option: `%s'"), arg
);
11421 case OPTION_MINDEX_REG
:
11422 allow_index_reg
= 1;
11425 case OPTION_MNAKED_REG
:
11426 allow_naked_reg
= 1;
11429 case OPTION_MSSE2AVX
:
11433 case OPTION_MSSE_CHECK
:
11434 if (strcasecmp (arg
, "error") == 0)
11435 sse_check
= check_error
;
11436 else if (strcasecmp (arg
, "warning") == 0)
11437 sse_check
= check_warning
;
11438 else if (strcasecmp (arg
, "none") == 0)
11439 sse_check
= check_none
;
11441 as_fatal (_("invalid -msse-check= option: `%s'"), arg
);
11444 case OPTION_MOPERAND_CHECK
:
11445 if (strcasecmp (arg
, "error") == 0)
11446 operand_check
= check_error
;
11447 else if (strcasecmp (arg
, "warning") == 0)
11448 operand_check
= check_warning
;
11449 else if (strcasecmp (arg
, "none") == 0)
11450 operand_check
= check_none
;
11452 as_fatal (_("invalid -moperand-check= option: `%s'"), arg
);
11455 case OPTION_MAVXSCALAR
:
11456 if (strcasecmp (arg
, "128") == 0)
11457 avxscalar
= vex128
;
11458 else if (strcasecmp (arg
, "256") == 0)
11459 avxscalar
= vex256
;
11461 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg
);
11464 case OPTION_MVEXWIG
:
11465 if (strcmp (arg
, "0") == 0)
11467 else if (strcmp (arg
, "1") == 0)
11470 as_fatal (_("invalid -mvexwig= option: `%s'"), arg
);
11473 case OPTION_MADD_BND_PREFIX
:
11474 add_bnd_prefix
= 1;
11477 case OPTION_MEVEXLIG
:
11478 if (strcmp (arg
, "128") == 0)
11479 evexlig
= evexl128
;
11480 else if (strcmp (arg
, "256") == 0)
11481 evexlig
= evexl256
;
11482 else if (strcmp (arg
, "512") == 0)
11483 evexlig
= evexl512
;
11485 as_fatal (_("invalid -mevexlig= option: `%s'"), arg
);
11488 case OPTION_MEVEXRCIG
:
11489 if (strcmp (arg
, "rne") == 0)
11491 else if (strcmp (arg
, "rd") == 0)
11493 else if (strcmp (arg
, "ru") == 0)
11495 else if (strcmp (arg
, "rz") == 0)
11498 as_fatal (_("invalid -mevexrcig= option: `%s'"), arg
);
11501 case OPTION_MEVEXWIG
:
11502 if (strcmp (arg
, "0") == 0)
11504 else if (strcmp (arg
, "1") == 0)
11507 as_fatal (_("invalid -mevexwig= option: `%s'"), arg
);
11510 # if defined (TE_PE) || defined (TE_PEP)
11511 case OPTION_MBIG_OBJ
:
11516 case OPTION_MOMIT_LOCK_PREFIX
:
11517 if (strcasecmp (arg
, "yes") == 0)
11518 omit_lock_prefix
= 1;
11519 else if (strcasecmp (arg
, "no") == 0)
11520 omit_lock_prefix
= 0;
11522 as_fatal (_("invalid -momit-lock-prefix= option: `%s'"), arg
);
11525 case OPTION_MFENCE_AS_LOCK_ADD
:
11526 if (strcasecmp (arg
, "yes") == 0)
11528 else if (strcasecmp (arg
, "no") == 0)
11531 as_fatal (_("invalid -mfence-as-lock-add= option: `%s'"), arg
);
11534 case OPTION_MRELAX_RELOCATIONS
:
11535 if (strcasecmp (arg
, "yes") == 0)
11536 generate_relax_relocations
= 1;
11537 else if (strcasecmp (arg
, "no") == 0)
11538 generate_relax_relocations
= 0;
11540 as_fatal (_("invalid -mrelax-relocations= option: `%s'"), arg
);
11543 case OPTION_MAMD64
:
11547 case OPTION_MINTEL64
:
11555 /* Turn off -Os. */
11556 optimize_for_space
= 0;
11558 else if (*arg
== 's')
11560 optimize_for_space
= 1;
11561 /* Turn on all encoding optimizations. */
11562 optimize
= INT_MAX
;
11566 optimize
= atoi (arg
);
11567 /* Turn off -Os. */
11568 optimize_for_space
= 0;
11578 #define MESSAGE_TEMPLATE \
11582 output_message (FILE *stream
, char *p
, char *message
, char *start
,
11583 int *left_p
, const char *name
, int len
)
11585 int size
= sizeof (MESSAGE_TEMPLATE
);
11586 int left
= *left_p
;
11588 /* Reserve 2 spaces for ", " or ",\0" */
11591 /* Check if there is any room. */
11599 p
= mempcpy (p
, name
, len
);
11603 /* Output the current message now and start a new one. */
11606 fprintf (stream
, "%s\n", message
);
11608 left
= size
- (start
- message
) - len
- 2;
11610 gas_assert (left
>= 0);
11612 p
= mempcpy (p
, name
, len
);
11620 show_arch (FILE *stream
, int ext
, int check
)
11622 static char message
[] = MESSAGE_TEMPLATE
;
11623 char *start
= message
+ 27;
11625 int size
= sizeof (MESSAGE_TEMPLATE
);
11632 left
= size
- (start
- message
);
11633 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
11635 /* Should it be skipped? */
11636 if (cpu_arch
[j
].skip
)
11639 name
= cpu_arch
[j
].name
;
11640 len
= cpu_arch
[j
].len
;
11643 /* It is an extension. Skip if we aren't asked to show it. */
11654 /* It is an processor. Skip if we show only extension. */
11657 else if (check
&& ! cpu_arch
[j
].flags
.bitfield
.cpui386
)
11659 /* It is an impossible processor - skip. */
11663 p
= output_message (stream
, p
, message
, start
, &left
, name
, len
);
11666 /* Display disabled extensions. */
11668 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
11670 name
= cpu_noarch
[j
].name
;
11671 len
= cpu_noarch
[j
].len
;
11672 p
= output_message (stream
, p
, message
, start
, &left
, name
,
11677 fprintf (stream
, "%s\n", message
);
11681 md_show_usage (FILE *stream
)
11683 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11684 fprintf (stream
, _("\
11685 -Qy, -Qn ignored\n\
11686 -V print assembler version number\n\
11689 fprintf (stream
, _("\
11690 -n Do not optimize code alignment\n\
11691 -q quieten some warnings\n"));
11692 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11693 fprintf (stream
, _("\
11696 #if defined BFD64 && (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
11697 || defined (TE_PE) || defined (TE_PEP))
11698 fprintf (stream
, _("\
11699 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
11701 #ifdef SVR4_COMMENT_CHARS
11702 fprintf (stream
, _("\
11703 --divide do not treat `/' as a comment character\n"));
11705 fprintf (stream
, _("\
11706 --divide ignored\n"));
11708 fprintf (stream
, _("\
11709 -march=CPU[,+EXTENSION...]\n\
11710 generate code for CPU and EXTENSION, CPU is one of:\n"));
11711 show_arch (stream
, 0, 1);
11712 fprintf (stream
, _("\
11713 EXTENSION is combination of:\n"));
11714 show_arch (stream
, 1, 0);
11715 fprintf (stream
, _("\
11716 -mtune=CPU optimize for CPU, CPU is one of:\n"));
11717 show_arch (stream
, 0, 0);
11718 fprintf (stream
, _("\
11719 -msse2avx encode SSE instructions with VEX prefix\n"));
11720 fprintf (stream
, _("\
11721 -msse-check=[none|error|warning] (default: warning)\n\
11722 check SSE instructions\n"));
11723 fprintf (stream
, _("\
11724 -moperand-check=[none|error|warning] (default: warning)\n\
11725 check operand combinations for validity\n"));
11726 fprintf (stream
, _("\
11727 -mavxscalar=[128|256] (default: 128)\n\
11728 encode scalar AVX instructions with specific vector\n\
11730 fprintf (stream
, _("\
11731 -mvexwig=[0|1] (default: 0)\n\
11732 encode VEX instructions with specific VEX.W value\n\
11733 for VEX.W bit ignored instructions\n"));
11734 fprintf (stream
, _("\
11735 -mevexlig=[128|256|512] (default: 128)\n\
11736 encode scalar EVEX instructions with specific vector\n\
11738 fprintf (stream
, _("\
11739 -mevexwig=[0|1] (default: 0)\n\
11740 encode EVEX instructions with specific EVEX.W value\n\
11741 for EVEX.W bit ignored instructions\n"));
11742 fprintf (stream
, _("\
11743 -mevexrcig=[rne|rd|ru|rz] (default: rne)\n\
11744 encode EVEX instructions with specific EVEX.RC value\n\
11745 for SAE-only ignored instructions\n"));
11746 fprintf (stream
, _("\
11747 -mmnemonic=[att|intel] "));
11748 if (SYSV386_COMPAT
)
11749 fprintf (stream
, _("(default: att)\n"));
11751 fprintf (stream
, _("(default: intel)\n"));
11752 fprintf (stream
, _("\
11753 use AT&T/Intel mnemonic\n"));
11754 fprintf (stream
, _("\
11755 -msyntax=[att|intel] (default: att)\n\
11756 use AT&T/Intel syntax\n"));
11757 fprintf (stream
, _("\
11758 -mindex-reg support pseudo index registers\n"));
11759 fprintf (stream
, _("\
11760 -mnaked-reg don't require `%%' prefix for registers\n"));
11761 fprintf (stream
, _("\
11762 -madd-bnd-prefix add BND prefix for all valid branches\n"));
11763 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11764 fprintf (stream
, _("\
11765 -mshared disable branch optimization for shared code\n"));
11766 fprintf (stream
, _("\
11767 -mx86-used-note=[no|yes] "));
11768 if (DEFAULT_X86_USED_NOTE
)
11769 fprintf (stream
, _("(default: yes)\n"));
11771 fprintf (stream
, _("(default: no)\n"));
11772 fprintf (stream
, _("\
11773 generate x86 used ISA and feature properties\n"));
11775 #if defined (TE_PE) || defined (TE_PEP)
11776 fprintf (stream
, _("\
11777 -mbig-obj generate big object files\n"));
11779 fprintf (stream
, _("\
11780 -momit-lock-prefix=[no|yes] (default: no)\n\
11781 strip all lock prefixes\n"));
11782 fprintf (stream
, _("\
11783 -mfence-as-lock-add=[no|yes] (default: no)\n\
11784 encode lfence, mfence and sfence as\n\
11785 lock addl $0x0, (%%{re}sp)\n"));
11786 fprintf (stream
, _("\
11787 -mrelax-relocations=[no|yes] "));
11788 if (DEFAULT_GENERATE_X86_RELAX_RELOCATIONS
)
11789 fprintf (stream
, _("(default: yes)\n"));
11791 fprintf (stream
, _("(default: no)\n"));
11792 fprintf (stream
, _("\
11793 generate relax relocations\n"));
11794 fprintf (stream
, _("\
11795 -mamd64 accept only AMD64 ISA [default]\n"));
11796 fprintf (stream
, _("\
11797 -mintel64 accept only Intel64 ISA\n"));
11800 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
11801 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
11802 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
11804 /* Pick the target format to use. */
11807 i386_target_format (void)
11809 if (!strncmp (default_arch
, "x86_64", 6))
11811 update_code_flag (CODE_64BIT
, 1);
11812 if (default_arch
[6] == '\0')
11813 x86_elf_abi
= X86_64_ABI
;
11815 x86_elf_abi
= X86_64_X32_ABI
;
11817 else if (!strcmp (default_arch
, "i386"))
11818 update_code_flag (CODE_32BIT
, 1);
11819 else if (!strcmp (default_arch
, "iamcu"))
11821 update_code_flag (CODE_32BIT
, 1);
11822 if (cpu_arch_isa
== PROCESSOR_UNKNOWN
)
11824 static const i386_cpu_flags iamcu_flags
= CPU_IAMCU_FLAGS
;
11825 cpu_arch_name
= "iamcu";
11826 cpu_sub_arch_name
= NULL
;
11827 cpu_arch_flags
= iamcu_flags
;
11828 cpu_arch_isa
= PROCESSOR_IAMCU
;
11829 cpu_arch_isa_flags
= iamcu_flags
;
11830 if (!cpu_arch_tune_set
)
11832 cpu_arch_tune
= cpu_arch_isa
;
11833 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
11836 else if (cpu_arch_isa
!= PROCESSOR_IAMCU
)
11837 as_fatal (_("Intel MCU doesn't support `%s' architecture"),
11841 as_fatal (_("unknown architecture"));
11843 if (cpu_flags_all_zero (&cpu_arch_isa_flags
))
11844 cpu_arch_isa_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
11845 if (cpu_flags_all_zero (&cpu_arch_tune_flags
))
11846 cpu_arch_tune_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
11848 switch (OUTPUT_FLAVOR
)
11850 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
11851 case bfd_target_aout_flavour
:
11852 return AOUT_TARGET_FORMAT
;
11854 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
11855 # if defined (TE_PE) || defined (TE_PEP)
11856 case bfd_target_coff_flavour
:
11857 if (flag_code
== CODE_64BIT
)
11858 return use_big_obj
? "pe-bigobj-x86-64" : "pe-x86-64";
11861 # elif defined (TE_GO32)
11862 case bfd_target_coff_flavour
:
11863 return "coff-go32";
11865 case bfd_target_coff_flavour
:
11866 return "coff-i386";
11869 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
11870 case bfd_target_elf_flavour
:
11872 const char *format
;
11874 switch (x86_elf_abi
)
11877 format
= ELF_TARGET_FORMAT
;
11880 use_rela_relocations
= 1;
11882 format
= ELF_TARGET_FORMAT64
;
11884 case X86_64_X32_ABI
:
11885 use_rela_relocations
= 1;
11887 disallow_64bit_reloc
= 1;
11888 format
= ELF_TARGET_FORMAT32
;
11891 if (cpu_arch_isa
== PROCESSOR_L1OM
)
11893 if (x86_elf_abi
!= X86_64_ABI
)
11894 as_fatal (_("Intel L1OM is 64bit only"));
11895 return ELF_TARGET_L1OM_FORMAT
;
11897 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
11899 if (x86_elf_abi
!= X86_64_ABI
)
11900 as_fatal (_("Intel K1OM is 64bit only"));
11901 return ELF_TARGET_K1OM_FORMAT
;
11903 else if (cpu_arch_isa
== PROCESSOR_IAMCU
)
11905 if (x86_elf_abi
!= I386_ABI
)
11906 as_fatal (_("Intel MCU is 32bit only"));
11907 return ELF_TARGET_IAMCU_FORMAT
;
11913 #if defined (OBJ_MACH_O)
11914 case bfd_target_mach_o_flavour
:
11915 if (flag_code
== CODE_64BIT
)
11917 use_rela_relocations
= 1;
11919 return "mach-o-x86-64";
11922 return "mach-o-i386";
11930 #endif /* OBJ_MAYBE_ more than one */
11933 md_undefined_symbol (char *name
)
11935 if (name
[0] == GLOBAL_OFFSET_TABLE_NAME
[0]
11936 && name
[1] == GLOBAL_OFFSET_TABLE_NAME
[1]
11937 && name
[2] == GLOBAL_OFFSET_TABLE_NAME
[2]
11938 && strcmp (name
, GLOBAL_OFFSET_TABLE_NAME
) == 0)
11942 if (symbol_find (name
))
11943 as_bad (_("GOT already in symbol table"));
11944 GOT_symbol
= symbol_new (name
, undefined_section
,
11945 (valueT
) 0, &zero_address_frag
);
11952 /* Round up a section size to the appropriate boundary. */
11955 md_section_align (segT segment ATTRIBUTE_UNUSED
, valueT size
)
11957 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
11958 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
11960 /* For a.out, force the section size to be aligned. If we don't do
11961 this, BFD will align it for us, but it will not write out the
11962 final bytes of the section. This may be a bug in BFD, but it is
11963 easier to fix it here since that is how the other a.out targets
11967 align
= bfd_section_alignment (segment
);
11968 size
= ((size
+ (1 << align
) - 1) & (-((valueT
) 1 << align
)));
11975 /* On the i386, PC-relative offsets are relative to the start of the
11976 next instruction. That is, the address of the offset, plus its
11977 size, since the offset is always the last part of the insn. */
11980 md_pcrel_from (fixS
*fixP
)
11982 return fixP
->fx_size
+ fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
11988 s_bss (int ignore ATTRIBUTE_UNUSED
)
11992 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11994 obj_elf_section_change_hook ();
11996 temp
= get_absolute_expression ();
11997 subseg_set (bss_section
, (subsegT
) temp
);
11998 demand_empty_rest_of_line ();
12004 i386_validate_fix (fixS
*fixp
)
12006 if (fixp
->fx_subsy
)
12008 if (fixp
->fx_subsy
== GOT_symbol
)
12010 if (fixp
->fx_r_type
== BFD_RELOC_32_PCREL
)
12014 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12015 if (fixp
->fx_tcbit2
)
12016 fixp
->fx_r_type
= (fixp
->fx_tcbit
12017 ? BFD_RELOC_X86_64_REX_GOTPCRELX
12018 : BFD_RELOC_X86_64_GOTPCRELX
);
12021 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTPCREL
;
12026 fixp
->fx_r_type
= BFD_RELOC_386_GOTOFF
;
12028 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTOFF64
;
12030 fixp
->fx_subsy
= 0;
12033 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12034 else if (!object_64bit
)
12036 if (fixp
->fx_r_type
== BFD_RELOC_386_GOT32
12037 && fixp
->fx_tcbit2
)
12038 fixp
->fx_r_type
= BFD_RELOC_386_GOT32X
;
12044 tc_gen_reloc (asection
*section ATTRIBUTE_UNUSED
, fixS
*fixp
)
12047 bfd_reloc_code_real_type code
;
12049 switch (fixp
->fx_r_type
)
12051 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12052 case BFD_RELOC_SIZE32
:
12053 case BFD_RELOC_SIZE64
:
12054 if (S_IS_DEFINED (fixp
->fx_addsy
)
12055 && !S_IS_EXTERNAL (fixp
->fx_addsy
))
12057 /* Resolve size relocation against local symbol to size of
12058 the symbol plus addend. */
12059 valueT value
= S_GET_SIZE (fixp
->fx_addsy
) + fixp
->fx_offset
;
12060 if (fixp
->fx_r_type
== BFD_RELOC_SIZE32
12061 && !fits_in_unsigned_long (value
))
12062 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
12063 _("symbol size computation overflow"));
12064 fixp
->fx_addsy
= NULL
;
12065 fixp
->fx_subsy
= NULL
;
12066 md_apply_fix (fixp
, (valueT
*) &value
, NULL
);
12070 /* Fall through. */
12072 case BFD_RELOC_X86_64_PLT32
:
12073 case BFD_RELOC_X86_64_GOT32
:
12074 case BFD_RELOC_X86_64_GOTPCREL
:
12075 case BFD_RELOC_X86_64_GOTPCRELX
:
12076 case BFD_RELOC_X86_64_REX_GOTPCRELX
:
12077 case BFD_RELOC_386_PLT32
:
12078 case BFD_RELOC_386_GOT32
:
12079 case BFD_RELOC_386_GOT32X
:
12080 case BFD_RELOC_386_GOTOFF
:
12081 case BFD_RELOC_386_GOTPC
:
12082 case BFD_RELOC_386_TLS_GD
:
12083 case BFD_RELOC_386_TLS_LDM
:
12084 case BFD_RELOC_386_TLS_LDO_32
:
12085 case BFD_RELOC_386_TLS_IE_32
:
12086 case BFD_RELOC_386_TLS_IE
:
12087 case BFD_RELOC_386_TLS_GOTIE
:
12088 case BFD_RELOC_386_TLS_LE_32
:
12089 case BFD_RELOC_386_TLS_LE
:
12090 case BFD_RELOC_386_TLS_GOTDESC
:
12091 case BFD_RELOC_386_TLS_DESC_CALL
:
12092 case BFD_RELOC_X86_64_TLSGD
:
12093 case BFD_RELOC_X86_64_TLSLD
:
12094 case BFD_RELOC_X86_64_DTPOFF32
:
12095 case BFD_RELOC_X86_64_DTPOFF64
:
12096 case BFD_RELOC_X86_64_GOTTPOFF
:
12097 case BFD_RELOC_X86_64_TPOFF32
:
12098 case BFD_RELOC_X86_64_TPOFF64
:
12099 case BFD_RELOC_X86_64_GOTOFF64
:
12100 case BFD_RELOC_X86_64_GOTPC32
:
12101 case BFD_RELOC_X86_64_GOT64
:
12102 case BFD_RELOC_X86_64_GOTPCREL64
:
12103 case BFD_RELOC_X86_64_GOTPC64
:
12104 case BFD_RELOC_X86_64_GOTPLT64
:
12105 case BFD_RELOC_X86_64_PLTOFF64
:
12106 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
12107 case BFD_RELOC_X86_64_TLSDESC_CALL
:
12108 case BFD_RELOC_RVA
:
12109 case BFD_RELOC_VTABLE_ENTRY
:
12110 case BFD_RELOC_VTABLE_INHERIT
:
12112 case BFD_RELOC_32_SECREL
:
12114 code
= fixp
->fx_r_type
;
12116 case BFD_RELOC_X86_64_32S
:
12117 if (!fixp
->fx_pcrel
)
12119 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
12120 code
= fixp
->fx_r_type
;
12123 /* Fall through. */
12125 if (fixp
->fx_pcrel
)
12127 switch (fixp
->fx_size
)
12130 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
12131 _("can not do %d byte pc-relative relocation"),
12133 code
= BFD_RELOC_32_PCREL
;
12135 case 1: code
= BFD_RELOC_8_PCREL
; break;
12136 case 2: code
= BFD_RELOC_16_PCREL
; break;
12137 case 4: code
= BFD_RELOC_32_PCREL
; break;
12139 case 8: code
= BFD_RELOC_64_PCREL
; break;
12145 switch (fixp
->fx_size
)
12148 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
12149 _("can not do %d byte relocation"),
12151 code
= BFD_RELOC_32
;
12153 case 1: code
= BFD_RELOC_8
; break;
12154 case 2: code
= BFD_RELOC_16
; break;
12155 case 4: code
= BFD_RELOC_32
; break;
12157 case 8: code
= BFD_RELOC_64
; break;
12164 if ((code
== BFD_RELOC_32
12165 || code
== BFD_RELOC_32_PCREL
12166 || code
== BFD_RELOC_X86_64_32S
)
12168 && fixp
->fx_addsy
== GOT_symbol
)
12171 code
= BFD_RELOC_386_GOTPC
;
12173 code
= BFD_RELOC_X86_64_GOTPC32
;
12175 if ((code
== BFD_RELOC_64
|| code
== BFD_RELOC_64_PCREL
)
12177 && fixp
->fx_addsy
== GOT_symbol
)
12179 code
= BFD_RELOC_X86_64_GOTPC64
;
12182 rel
= XNEW (arelent
);
12183 rel
->sym_ptr_ptr
= XNEW (asymbol
*);
12184 *rel
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
12186 rel
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
12188 if (!use_rela_relocations
)
12190 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
12191 vtable entry to be used in the relocation's section offset. */
12192 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
12193 rel
->address
= fixp
->fx_offset
;
12194 #if defined (OBJ_COFF) && defined (TE_PE)
12195 else if (fixp
->fx_addsy
&& S_IS_WEAK (fixp
->fx_addsy
))
12196 rel
->addend
= fixp
->fx_addnumber
- (S_GET_VALUE (fixp
->fx_addsy
) * 2);
12201 /* Use the rela in 64bit mode. */
12204 if (disallow_64bit_reloc
)
12207 case BFD_RELOC_X86_64_DTPOFF64
:
12208 case BFD_RELOC_X86_64_TPOFF64
:
12209 case BFD_RELOC_64_PCREL
:
12210 case BFD_RELOC_X86_64_GOTOFF64
:
12211 case BFD_RELOC_X86_64_GOT64
:
12212 case BFD_RELOC_X86_64_GOTPCREL64
:
12213 case BFD_RELOC_X86_64_GOTPC64
:
12214 case BFD_RELOC_X86_64_GOTPLT64
:
12215 case BFD_RELOC_X86_64_PLTOFF64
:
12216 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
12217 _("cannot represent relocation type %s in x32 mode"),
12218 bfd_get_reloc_code_name (code
));
12224 if (!fixp
->fx_pcrel
)
12225 rel
->addend
= fixp
->fx_offset
;
12229 case BFD_RELOC_X86_64_PLT32
:
12230 case BFD_RELOC_X86_64_GOT32
:
12231 case BFD_RELOC_X86_64_GOTPCREL
:
12232 case BFD_RELOC_X86_64_GOTPCRELX
:
12233 case BFD_RELOC_X86_64_REX_GOTPCRELX
:
12234 case BFD_RELOC_X86_64_TLSGD
:
12235 case BFD_RELOC_X86_64_TLSLD
:
12236 case BFD_RELOC_X86_64_GOTTPOFF
:
12237 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
12238 case BFD_RELOC_X86_64_TLSDESC_CALL
:
12239 rel
->addend
= fixp
->fx_offset
- fixp
->fx_size
;
12242 rel
->addend
= (section
->vma
12244 + fixp
->fx_addnumber
12245 + md_pcrel_from (fixp
));
12250 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
12251 if (rel
->howto
== NULL
)
12253 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
12254 _("cannot represent relocation type %s"),
12255 bfd_get_reloc_code_name (code
));
12256 /* Set howto to a garbage value so that we can keep going. */
12257 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, BFD_RELOC_32
);
12258 gas_assert (rel
->howto
!= NULL
);
12264 #include "tc-i386-intel.c"
12267 tc_x86_parse_to_dw2regnum (expressionS
*exp
)
12269 int saved_naked_reg
;
12270 char saved_register_dot
;
12272 saved_naked_reg
= allow_naked_reg
;
12273 allow_naked_reg
= 1;
12274 saved_register_dot
= register_chars
['.'];
12275 register_chars
['.'] = '.';
12276 allow_pseudo_reg
= 1;
12277 expression_and_evaluate (exp
);
12278 allow_pseudo_reg
= 0;
12279 register_chars
['.'] = saved_register_dot
;
12280 allow_naked_reg
= saved_naked_reg
;
12282 if (exp
->X_op
== O_register
&& exp
->X_add_number
>= 0)
12284 if ((addressT
) exp
->X_add_number
< i386_regtab_size
)
12286 exp
->X_op
= O_constant
;
12287 exp
->X_add_number
= i386_regtab
[exp
->X_add_number
]
12288 .dw2_regnum
[flag_code
>> 1];
12291 exp
->X_op
= O_illegal
;
12296 tc_x86_frame_initial_instructions (void)
12298 static unsigned int sp_regno
[2];
12300 if (!sp_regno
[flag_code
>> 1])
12302 char *saved_input
= input_line_pointer
;
12303 char sp
[][4] = {"esp", "rsp"};
12306 input_line_pointer
= sp
[flag_code
>> 1];
12307 tc_x86_parse_to_dw2regnum (&exp
);
12308 gas_assert (exp
.X_op
== O_constant
);
12309 sp_regno
[flag_code
>> 1] = exp
.X_add_number
;
12310 input_line_pointer
= saved_input
;
12313 cfi_add_CFA_def_cfa (sp_regno
[flag_code
>> 1], -x86_cie_data_alignment
);
12314 cfi_add_CFA_offset (x86_dwarf2_return_column
, x86_cie_data_alignment
);
12318 x86_dwarf2_addr_size (void)
12320 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
12321 if (x86_elf_abi
== X86_64_X32_ABI
)
12324 return bfd_arch_bits_per_address (stdoutput
) / 8;
12328 i386_elf_section_type (const char *str
, size_t len
)
12330 if (flag_code
== CODE_64BIT
12331 && len
== sizeof ("unwind") - 1
12332 && strncmp (str
, "unwind", 6) == 0)
12333 return SHT_X86_64_UNWIND
;
12340 i386_solaris_fix_up_eh_frame (segT sec
)
12342 if (flag_code
== CODE_64BIT
)
12343 elf_section_type (sec
) = SHT_X86_64_UNWIND
;
12349 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
12353 exp
.X_op
= O_secrel
;
12354 exp
.X_add_symbol
= symbol
;
12355 exp
.X_add_number
= 0;
12356 emit_expr (&exp
, size
);
12360 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12361 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
12364 x86_64_section_letter (int letter
, const char **ptr_msg
)
12366 if (flag_code
== CODE_64BIT
)
12369 return SHF_X86_64_LARGE
;
12371 *ptr_msg
= _("bad .section directive: want a,l,w,x,M,S,G,T in string");
12374 *ptr_msg
= _("bad .section directive: want a,w,x,M,S,G,T in string");
12379 x86_64_section_word (char *str
, size_t len
)
12381 if (len
== 5 && flag_code
== CODE_64BIT
&& CONST_STRNEQ (str
, "large"))
12382 return SHF_X86_64_LARGE
;
12388 handle_large_common (int small ATTRIBUTE_UNUSED
)
12390 if (flag_code
!= CODE_64BIT
)
12392 s_comm_internal (0, elf_common_parse
);
12393 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
12397 static segT lbss_section
;
12398 asection
*saved_com_section_ptr
= elf_com_section_ptr
;
12399 asection
*saved_bss_section
= bss_section
;
12401 if (lbss_section
== NULL
)
12403 flagword applicable
;
12404 segT seg
= now_seg
;
12405 subsegT subseg
= now_subseg
;
12407 /* The .lbss section is for local .largecomm symbols. */
12408 lbss_section
= subseg_new (".lbss", 0);
12409 applicable
= bfd_applicable_section_flags (stdoutput
);
12410 bfd_set_section_flags (lbss_section
, applicable
& SEC_ALLOC
);
12411 seg_info (lbss_section
)->bss
= 1;
12413 subseg_set (seg
, subseg
);
12416 elf_com_section_ptr
= &_bfd_elf_large_com_section
;
12417 bss_section
= lbss_section
;
12419 s_comm_internal (0, elf_common_parse
);
12421 elf_com_section_ptr
= saved_com_section_ptr
;
12422 bss_section
= saved_bss_section
;
12425 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */