1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright (C) 1989-2015 Free Software Foundation, Inc.
4 This file is part of GAS, the GNU Assembler.
6 GAS is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GAS is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GAS; see the file COPYING. If not, write to the Free
18 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
21 /* Intel 80386 machine specific gas.
22 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
23 x86_64 support by Jan Hubicka (jh@suse.cz)
24 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
25 Bugs & suggestions are completely welcome. This is free software.
26 Please help us make it better. */
29 #include "safe-ctype.h"
31 #include "dwarf2dbg.h"
32 #include "dw2gencfi.h"
33 #include "elf/x86-64.h"
34 #include "opcodes/i386-init.h"
37 /* Default to compress debug sections for Linux. */
38 enum compressed_debug_section_type flag_compress_debug
39 = COMPRESS_DEBUG_ZLIB
;
42 #ifndef REGISTER_WARNINGS
43 #define REGISTER_WARNINGS 1
46 #ifndef INFER_ADDR_PREFIX
47 #define INFER_ADDR_PREFIX 1
51 #define DEFAULT_ARCH "i386"
56 #define INLINE __inline__
62 /* Prefixes will be emitted in the order defined below.
63 WAIT_PREFIX must be the first prefix since FWAIT is really is an
64 instruction, and so must come before any prefixes.
65 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
66 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
72 #define HLE_PREFIX REP_PREFIX
73 #define BND_PREFIX REP_PREFIX
75 #define REX_PREFIX 6 /* must come last. */
76 #define MAX_PREFIXES 7 /* max prefixes per opcode */
78 /* we define the syntax here (modulo base,index,scale syntax) */
79 #define REGISTER_PREFIX '%'
80 #define IMMEDIATE_PREFIX '$'
81 #define ABSOLUTE_PREFIX '*'
83 /* these are the instruction mnemonic suffixes in AT&T syntax or
84 memory operand size in Intel syntax. */
85 #define WORD_MNEM_SUFFIX 'w'
86 #define BYTE_MNEM_SUFFIX 'b'
87 #define SHORT_MNEM_SUFFIX 's'
88 #define LONG_MNEM_SUFFIX 'l'
89 #define QWORD_MNEM_SUFFIX 'q'
90 #define XMMWORD_MNEM_SUFFIX 'x'
91 #define YMMWORD_MNEM_SUFFIX 'y'
92 #define ZMMWORD_MNEM_SUFFIX 'z'
93 /* Intel Syntax. Use a non-ascii letter since since it never appears
95 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
97 #define END_OF_INSN '\0'
100 'templates' is for grouping together 'template' structures for opcodes
101 of the same name. This is only used for storing the insns in the grand
102 ole hash table of insns.
103 The templates themselves start at START and range up to (but not including)
108 const insn_template
*start
;
109 const insn_template
*end
;
113 /* 386 operand encoding bytes: see 386 book for details of this. */
116 unsigned int regmem
; /* codes register or memory operand */
117 unsigned int reg
; /* codes register operand (or extended opcode) */
118 unsigned int mode
; /* how to interpret regmem & reg */
122 /* x86-64 extension prefix. */
123 typedef int rex_byte
;
125 /* 386 opcode byte to code indirect addressing. */
134 /* x86 arch names, types and features */
137 const char *name
; /* arch name */
138 unsigned int len
; /* arch string length */
139 enum processor_type type
; /* arch type */
140 i386_cpu_flags flags
; /* cpu feature flags */
141 unsigned int skip
; /* show_arch should skip this. */
142 unsigned int negated
; /* turn off indicated flags. */
146 static void update_code_flag (int, int);
147 static void set_code_flag (int);
148 static void set_16bit_gcc_code_flag (int);
149 static void set_intel_syntax (int);
150 static void set_intel_mnemonic (int);
151 static void set_allow_index_reg (int);
152 static void set_check (int);
153 static void set_cpu_arch (int);
155 static void pe_directive_secrel (int);
157 static void signed_cons (int);
158 static char *output_invalid (int c
);
159 static int i386_finalize_immediate (segT
, expressionS
*, i386_operand_type
,
161 static int i386_finalize_displacement (segT
, expressionS
*, i386_operand_type
,
163 static int i386_att_operand (char *);
164 static int i386_intel_operand (char *, int);
165 static int i386_intel_simplify (expressionS
*);
166 static int i386_intel_parse_name (const char *, expressionS
*);
167 static const reg_entry
*parse_register (char *, char **);
168 static char *parse_insn (char *, char *);
169 static char *parse_operands (char *, const char *);
170 static void swap_operands (void);
171 static void swap_2_operands (int, int);
172 static void optimize_imm (void);
173 static void optimize_disp (void);
174 static const insn_template
*match_template (void);
175 static int check_string (void);
176 static int process_suffix (void);
177 static int check_byte_reg (void);
178 static int check_long_reg (void);
179 static int check_qword_reg (void);
180 static int check_word_reg (void);
181 static int finalize_imm (void);
182 static int process_operands (void);
183 static const seg_entry
*build_modrm_byte (void);
184 static void output_insn (void);
185 static void output_imm (fragS
*, offsetT
);
186 static void output_disp (fragS
*, offsetT
);
188 static void s_bss (int);
190 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
191 static void handle_large_common (int small ATTRIBUTE_UNUSED
);
194 static const char *default_arch
= DEFAULT_ARCH
;
196 /* This struct describes rounding control and SAE in the instruction. */
210 static struct RC_Operation rc_op
;
212 /* The struct describes masking, applied to OPERAND in the instruction.
213 MASK is a pointer to the corresponding mask register. ZEROING tells
214 whether merging or zeroing mask is used. */
215 struct Mask_Operation
217 const reg_entry
*mask
;
218 unsigned int zeroing
;
219 /* The operand where this operation is associated. */
223 static struct Mask_Operation mask_op
;
225 /* The struct describes broadcasting, applied to OPERAND. FACTOR is
227 struct Broadcast_Operation
229 /* Type of broadcast: no broadcast, {1to8}, or {1to16}. */
232 /* Index of broadcasted operand. */
236 static struct Broadcast_Operation broadcast_op
;
241 /* VEX prefix is either 2 byte or 3 byte. EVEX is 4 byte. */
242 unsigned char bytes
[4];
244 /* Destination or source register specifier. */
245 const reg_entry
*register_specifier
;
248 /* 'md_assemble ()' gathers together information and puts it into a
255 const reg_entry
*regs
;
260 operand_size_mismatch
,
261 operand_type_mismatch
,
262 register_type_mismatch
,
263 number_of_operands_mismatch
,
264 invalid_instruction_suffix
,
267 unsupported_with_intel_mnemonic
,
270 invalid_vsib_address
,
271 invalid_vector_register_set
,
272 unsupported_vector_index_register
,
273 unsupported_broadcast
,
274 broadcast_not_on_src_operand
,
277 mask_not_on_destination
,
280 rc_sae_operand_not_last_imm
,
281 invalid_register_operand
,
287 /* TM holds the template for the insn were currently assembling. */
290 /* SUFFIX holds the instruction size suffix for byte, word, dword
291 or qword, if given. */
294 /* OPERANDS gives the number of given operands. */
295 unsigned int operands
;
297 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
298 of given register, displacement, memory operands and immediate
300 unsigned int reg_operands
, disp_operands
, mem_operands
, imm_operands
;
302 /* TYPES [i] is the type (see above #defines) which tells us how to
303 use OP[i] for the corresponding operand. */
304 i386_operand_type types
[MAX_OPERANDS
];
306 /* Displacement expression, immediate expression, or register for each
308 union i386_op op
[MAX_OPERANDS
];
310 /* Flags for operands. */
311 unsigned int flags
[MAX_OPERANDS
];
312 #define Operand_PCrel 1
314 /* Relocation type for operand */
315 enum bfd_reloc_code_real reloc
[MAX_OPERANDS
];
317 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
318 the base index byte below. */
319 const reg_entry
*base_reg
;
320 const reg_entry
*index_reg
;
321 unsigned int log2_scale_factor
;
323 /* SEG gives the seg_entries of this insn. They are zero unless
324 explicit segment overrides are given. */
325 const seg_entry
*seg
[2];
327 /* PREFIX holds all the given prefix opcodes (usually null).
328 PREFIXES is the number of prefix opcodes. */
329 unsigned int prefixes
;
330 unsigned char prefix
[MAX_PREFIXES
];
332 /* RM and SIB are the modrm byte and the sib byte where the
333 addressing modes of this insn are encoded. */
340 /* Masking attributes. */
341 struct Mask_Operation
*mask
;
343 /* Rounding control and SAE attributes. */
344 struct RC_Operation
*rounding
;
346 /* Broadcasting attributes. */
347 struct Broadcast_Operation
*broadcast
;
349 /* Compressed disp8*N attribute. */
350 unsigned int memshift
;
352 /* Swap operand in encoding. */
353 unsigned int swap_operand
;
355 /* Prefer 8bit or 32bit displacement in encoding. */
358 disp_encoding_default
= 0,
364 const char *rep_prefix
;
367 const char *hle_prefix
;
369 /* Have BND prefix. */
370 const char *bnd_prefix
;
372 /* Need VREX to support upper 16 registers. */
376 enum i386_error error
;
379 typedef struct _i386_insn i386_insn
;
381 /* Link RC type with corresponding string, that'll be looked for in
390 static const struct RC_name RC_NamesTable
[] =
392 { rne
, STRING_COMMA_LEN ("rn-sae") },
393 { rd
, STRING_COMMA_LEN ("rd-sae") },
394 { ru
, STRING_COMMA_LEN ("ru-sae") },
395 { rz
, STRING_COMMA_LEN ("rz-sae") },
396 { saeonly
, STRING_COMMA_LEN ("sae") },
399 /* List of chars besides those in app.c:symbol_chars that can start an
400 operand. Used to prevent the scrubber eating vital white-space. */
401 const char extra_symbol_chars
[] = "*%-([{"
410 #if (defined (TE_I386AIX) \
411 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
412 && !defined (TE_GNU) \
413 && !defined (TE_LINUX) \
414 && !defined (TE_NACL) \
415 && !defined (TE_NETWARE) \
416 && !defined (TE_FreeBSD) \
417 && !defined (TE_DragonFly) \
418 && !defined (TE_NetBSD)))
419 /* This array holds the chars that always start a comment. If the
420 pre-processor is disabled, these aren't very useful. The option
421 --divide will remove '/' from this list. */
422 const char *i386_comment_chars
= "#/";
423 #define SVR4_COMMENT_CHARS 1
424 #define PREFIX_SEPARATOR '\\'
427 const char *i386_comment_chars
= "#";
428 #define PREFIX_SEPARATOR '/'
431 /* This array holds the chars that only start a comment at the beginning of
432 a line. If the line seems to have the form '# 123 filename'
433 .line and .file directives will appear in the pre-processed output.
434 Note that input_file.c hand checks for '#' at the beginning of the
435 first line of the input file. This is because the compiler outputs
436 #NO_APP at the beginning of its output.
437 Also note that comments started like this one will always work if
438 '/' isn't otherwise defined. */
439 const char line_comment_chars
[] = "#/";
441 const char line_separator_chars
[] = ";";
443 /* Chars that can be used to separate mant from exp in floating point
445 const char EXP_CHARS
[] = "eE";
447 /* Chars that mean this number is a floating point constant
450 const char FLT_CHARS
[] = "fFdDxX";
452 /* Tables for lexical analysis. */
453 static char mnemonic_chars
[256];
454 static char register_chars
[256];
455 static char operand_chars
[256];
456 static char identifier_chars
[256];
457 static char digit_chars
[256];
459 /* Lexical macros. */
460 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
461 #define is_operand_char(x) (operand_chars[(unsigned char) x])
462 #define is_register_char(x) (register_chars[(unsigned char) x])
463 #define is_space_char(x) ((x) == ' ')
464 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
465 #define is_digit_char(x) (digit_chars[(unsigned char) x])
467 /* All non-digit non-letter characters that may occur in an operand. */
468 static char operand_special_chars
[] = "%$-+(,)*._~/<>|&^!:[@]";
470 /* md_assemble() always leaves the strings it's passed unaltered. To
471 effect this we maintain a stack of saved characters that we've smashed
472 with '\0's (indicating end of strings for various sub-fields of the
473 assembler instruction). */
474 static char save_stack
[32];
475 static char *save_stack_p
;
476 #define END_STRING_AND_SAVE(s) \
477 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
478 #define RESTORE_END_STRING(s) \
479 do { *(s) = *--save_stack_p; } while (0)
481 /* The instruction we're assembling. */
484 /* Possible templates for current insn. */
485 static const templates
*current_templates
;
487 /* Per instruction expressionS buffers: max displacements & immediates. */
488 static expressionS disp_expressions
[MAX_MEMORY_OPERANDS
];
489 static expressionS im_expressions
[MAX_IMMEDIATE_OPERANDS
];
491 /* Current operand we are working on. */
492 static int this_operand
= -1;
494 /* We support four different modes. FLAG_CODE variable is used to distinguish
502 static enum flag_code flag_code
;
503 static unsigned int object_64bit
;
504 static unsigned int disallow_64bit_reloc
;
505 static int use_rela_relocations
= 0;
507 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
508 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
509 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
511 /* The ELF ABI to use. */
519 static enum x86_elf_abi x86_elf_abi
= I386_ABI
;
522 #if defined (TE_PE) || defined (TE_PEP)
523 /* Use big object file format. */
524 static int use_big_obj
= 0;
527 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
528 /* 1 if not generating code for a shared library. */
529 static int no_shared
= 0;
532 /* 1 for intel syntax,
534 static int intel_syntax
= 0;
536 /* 1 for intel mnemonic,
537 0 if att mnemonic. */
538 static int intel_mnemonic
= !SYSV386_COMPAT
;
540 /* 1 if support old (<= 2.8.1) versions of gcc. */
541 static int old_gcc
= OLDGCC_COMPAT
;
543 /* 1 if pseudo registers are permitted. */
544 static int allow_pseudo_reg
= 0;
546 /* 1 if register prefix % not required. */
547 static int allow_naked_reg
= 0;
549 /* 1 if the assembler should add BND prefix for all control-tranferring
550 instructions supporting it, even if this prefix wasn't specified
552 static int add_bnd_prefix
= 0;
554 /* 1 if pseudo index register, eiz/riz, is allowed . */
555 static int allow_index_reg
= 0;
557 /* 1 if the assembler should ignore LOCK prefix, even if it was
558 specified explicitly. */
559 static int omit_lock_prefix
= 0;
561 static enum check_kind
567 sse_check
, operand_check
= check_warning
;
569 /* Register prefix used for error message. */
570 static const char *register_prefix
= "%";
572 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
573 leave, push, and pop instructions so that gcc has the same stack
574 frame as in 32 bit mode. */
575 static char stackop_size
= '\0';
577 /* Non-zero to optimize code alignment. */
578 int optimize_align_code
= 1;
580 /* Non-zero to quieten some warnings. */
581 static int quiet_warnings
= 0;
584 static const char *cpu_arch_name
= NULL
;
585 static char *cpu_sub_arch_name
= NULL
;
587 /* CPU feature flags. */
588 static i386_cpu_flags cpu_arch_flags
= CPU_UNKNOWN_FLAGS
;
590 /* If we have selected a cpu we are generating instructions for. */
591 static int cpu_arch_tune_set
= 0;
593 /* Cpu we are generating instructions for. */
594 enum processor_type cpu_arch_tune
= PROCESSOR_UNKNOWN
;
596 /* CPU feature flags of cpu we are generating instructions for. */
597 static i386_cpu_flags cpu_arch_tune_flags
;
599 /* CPU instruction set architecture used. */
600 enum processor_type cpu_arch_isa
= PROCESSOR_UNKNOWN
;
602 /* CPU feature flags of instruction set architecture used. */
603 i386_cpu_flags cpu_arch_isa_flags
;
605 /* If set, conditional jumps are not automatically promoted to handle
606 larger than a byte offset. */
607 static unsigned int no_cond_jump_promotion
= 0;
609 /* Encode SSE instructions with VEX prefix. */
610 static unsigned int sse2avx
;
612 /* Encode scalar AVX instructions with specific vector length. */
619 /* Encode scalar EVEX LIG instructions with specific vector length. */
627 /* Encode EVEX WIG instructions with specific evex.w. */
634 /* Value to encode in EVEX RC bits, for SAE-only instructions. */
635 static enum rc_type evexrcig
= rne
;
637 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
638 static symbolS
*GOT_symbol
;
640 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
641 unsigned int x86_dwarf2_return_column
;
643 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
644 int x86_cie_data_alignment
;
646 /* Interface to relax_segment.
647 There are 3 major relax states for 386 jump insns because the
648 different types of jumps add different sizes to frags when we're
649 figuring out what sort of jump to choose to reach a given label. */
652 #define UNCOND_JUMP 0
654 #define COND_JUMP86 2
659 #define SMALL16 (SMALL | CODE16)
661 #define BIG16 (BIG | CODE16)
665 #define INLINE __inline__
671 #define ENCODE_RELAX_STATE(type, size) \
672 ((relax_substateT) (((type) << 2) | (size)))
673 #define TYPE_FROM_RELAX_STATE(s) \
675 #define DISP_SIZE_FROM_RELAX_STATE(s) \
676 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
678 /* This table is used by relax_frag to promote short jumps to long
679 ones where necessary. SMALL (short) jumps may be promoted to BIG
680 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
681 don't allow a short jump in a 32 bit code segment to be promoted to
682 a 16 bit offset jump because it's slower (requires data size
683 prefix), and doesn't work, unless the destination is in the bottom
684 64k of the code segment (The top 16 bits of eip are zeroed). */
686 const relax_typeS md_relax_table
[] =
689 1) most positive reach of this state,
690 2) most negative reach of this state,
691 3) how many bytes this mode will have in the variable part of the frag
692 4) which index into the table to try if we can't fit into this one. */
694 /* UNCOND_JUMP states. */
695 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
)},
696 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
)},
697 /* dword jmp adds 4 bytes to frag:
698 0 extra opcode bytes, 4 displacement bytes. */
700 /* word jmp adds 2 byte2 to frag:
701 0 extra opcode bytes, 2 displacement bytes. */
704 /* COND_JUMP states. */
705 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG
)},
706 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG16
)},
707 /* dword conditionals adds 5 bytes to frag:
708 1 extra opcode byte, 4 displacement bytes. */
710 /* word conditionals add 3 bytes to frag:
711 1 extra opcode byte, 2 displacement bytes. */
714 /* COND_JUMP86 states. */
715 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG
)},
716 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
)},
717 /* dword conditionals adds 5 bytes to frag:
718 1 extra opcode byte, 4 displacement bytes. */
720 /* word conditionals add 4 bytes to frag:
721 1 displacement byte and a 3 byte long branch insn. */
725 static const arch_entry cpu_arch
[] =
727 /* Do not replace the first two entries - i386_target_format()
728 relies on them being there in this order. */
729 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32
,
730 CPU_GENERIC32_FLAGS
, 0, 0 },
731 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64
,
732 CPU_GENERIC64_FLAGS
, 0, 0 },
733 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN
,
734 CPU_NONE_FLAGS
, 0, 0 },
735 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN
,
736 CPU_I186_FLAGS
, 0, 0 },
737 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN
,
738 CPU_I286_FLAGS
, 0, 0 },
739 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386
,
740 CPU_I386_FLAGS
, 0, 0 },
741 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486
,
742 CPU_I486_FLAGS
, 0, 0 },
743 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM
,
744 CPU_I586_FLAGS
, 0, 0 },
745 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO
,
746 CPU_I686_FLAGS
, 0, 0 },
747 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM
,
748 CPU_I586_FLAGS
, 0, 0 },
749 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO
,
750 CPU_PENTIUMPRO_FLAGS
, 0, 0 },
751 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO
,
752 CPU_P2_FLAGS
, 0, 0 },
753 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO
,
754 CPU_P3_FLAGS
, 0, 0 },
755 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4
,
756 CPU_P4_FLAGS
, 0, 0 },
757 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA
,
758 CPU_CORE_FLAGS
, 0, 0 },
759 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA
,
760 CPU_NOCONA_FLAGS
, 0, 0 },
761 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE
,
762 CPU_CORE_FLAGS
, 1, 0 },
763 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE
,
764 CPU_CORE_FLAGS
, 0, 0 },
765 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2
,
766 CPU_CORE2_FLAGS
, 1, 0 },
767 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2
,
768 CPU_CORE2_FLAGS
, 0, 0 },
769 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7
,
770 CPU_COREI7_FLAGS
, 0, 0 },
771 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM
,
772 CPU_L1OM_FLAGS
, 0, 0 },
773 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM
,
774 CPU_K1OM_FLAGS
, 0, 0 },
775 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6
,
776 CPU_K6_FLAGS
, 0, 0 },
777 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6
,
778 CPU_K6_2_FLAGS
, 0, 0 },
779 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON
,
780 CPU_ATHLON_FLAGS
, 0, 0 },
781 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8
,
782 CPU_K8_FLAGS
, 1, 0 },
783 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8
,
784 CPU_K8_FLAGS
, 0, 0 },
785 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8
,
786 CPU_K8_FLAGS
, 0, 0 },
787 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10
,
788 CPU_AMDFAM10_FLAGS
, 0, 0 },
789 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD
,
790 CPU_BDVER1_FLAGS
, 0, 0 },
791 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD
,
792 CPU_BDVER2_FLAGS
, 0, 0 },
793 { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD
,
794 CPU_BDVER3_FLAGS
, 0, 0 },
795 { STRING_COMMA_LEN ("bdver4"), PROCESSOR_BD
,
796 CPU_BDVER4_FLAGS
, 0, 0 },
797 { STRING_COMMA_LEN ("znver1"), PROCESSOR_ZNVER
,
798 CPU_ZNVER1_FLAGS
, 0, 0 },
799 { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT
,
800 CPU_BTVER1_FLAGS
, 0, 0 },
801 { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT
,
802 CPU_BTVER2_FLAGS
, 0, 0 },
803 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN
,
804 CPU_8087_FLAGS
, 0, 0 },
805 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN
,
806 CPU_287_FLAGS
, 0, 0 },
807 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN
,
808 CPU_387_FLAGS
, 0, 0 },
809 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN
,
810 CPU_ANY87_FLAGS
, 0, 1 },
811 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN
,
812 CPU_MMX_FLAGS
, 0, 0 },
813 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN
,
814 CPU_3DNOWA_FLAGS
, 0, 1 },
815 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN
,
816 CPU_SSE_FLAGS
, 0, 0 },
817 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN
,
818 CPU_SSE2_FLAGS
, 0, 0 },
819 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN
,
820 CPU_SSE3_FLAGS
, 0, 0 },
821 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN
,
822 CPU_SSSE3_FLAGS
, 0, 0 },
823 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN
,
824 CPU_SSE4_1_FLAGS
, 0, 0 },
825 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN
,
826 CPU_SSE4_2_FLAGS
, 0, 0 },
827 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN
,
828 CPU_SSE4_2_FLAGS
, 0, 0 },
829 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN
,
830 CPU_ANY_SSE_FLAGS
, 0, 1 },
831 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN
,
832 CPU_AVX_FLAGS
, 0, 0 },
833 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN
,
834 CPU_AVX2_FLAGS
, 0, 0 },
835 { STRING_COMMA_LEN (".avx512f"), PROCESSOR_UNKNOWN
,
836 CPU_AVX512F_FLAGS
, 0, 0 },
837 { STRING_COMMA_LEN (".avx512cd"), PROCESSOR_UNKNOWN
,
838 CPU_AVX512CD_FLAGS
, 0, 0 },
839 { STRING_COMMA_LEN (".avx512er"), PROCESSOR_UNKNOWN
,
840 CPU_AVX512ER_FLAGS
, 0, 0 },
841 { STRING_COMMA_LEN (".avx512pf"), PROCESSOR_UNKNOWN
,
842 CPU_AVX512PF_FLAGS
, 0, 0 },
843 { STRING_COMMA_LEN (".avx512dq"), PROCESSOR_UNKNOWN
,
844 CPU_AVX512DQ_FLAGS
, 0, 0 },
845 { STRING_COMMA_LEN (".avx512bw"), PROCESSOR_UNKNOWN
,
846 CPU_AVX512BW_FLAGS
, 0, 0 },
847 { STRING_COMMA_LEN (".avx512vl"), PROCESSOR_UNKNOWN
,
848 CPU_AVX512VL_FLAGS
, 0, 0 },
849 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN
,
850 CPU_ANY_AVX_FLAGS
, 0, 1 },
851 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN
,
852 CPU_VMX_FLAGS
, 0, 0 },
853 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN
,
854 CPU_VMFUNC_FLAGS
, 0, 0 },
855 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN
,
856 CPU_SMX_FLAGS
, 0, 0 },
857 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN
,
858 CPU_XSAVE_FLAGS
, 0, 0 },
859 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN
,
860 CPU_XSAVEOPT_FLAGS
, 0, 0 },
861 { STRING_COMMA_LEN (".xsavec"), PROCESSOR_UNKNOWN
,
862 CPU_XSAVEC_FLAGS
, 0, 0 },
863 { STRING_COMMA_LEN (".xsaves"), PROCESSOR_UNKNOWN
,
864 CPU_XSAVES_FLAGS
, 0, 0 },
865 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN
,
866 CPU_AES_FLAGS
, 0, 0 },
867 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN
,
868 CPU_PCLMUL_FLAGS
, 0, 0 },
869 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN
,
870 CPU_PCLMUL_FLAGS
, 1, 0 },
871 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN
,
872 CPU_FSGSBASE_FLAGS
, 0, 0 },
873 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN
,
874 CPU_RDRND_FLAGS
, 0, 0 },
875 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN
,
876 CPU_F16C_FLAGS
, 0, 0 },
877 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN
,
878 CPU_BMI2_FLAGS
, 0, 0 },
879 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN
,
880 CPU_FMA_FLAGS
, 0, 0 },
881 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN
,
882 CPU_FMA4_FLAGS
, 0, 0 },
883 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN
,
884 CPU_XOP_FLAGS
, 0, 0 },
885 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN
,
886 CPU_LWP_FLAGS
, 0, 0 },
887 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN
,
888 CPU_MOVBE_FLAGS
, 0, 0 },
889 { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN
,
890 CPU_CX16_FLAGS
, 0, 0 },
891 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN
,
892 CPU_EPT_FLAGS
, 0, 0 },
893 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN
,
894 CPU_LZCNT_FLAGS
, 0, 0 },
895 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN
,
896 CPU_HLE_FLAGS
, 0, 0 },
897 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN
,
898 CPU_RTM_FLAGS
, 0, 0 },
899 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN
,
900 CPU_INVPCID_FLAGS
, 0, 0 },
901 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN
,
902 CPU_CLFLUSH_FLAGS
, 0, 0 },
903 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN
,
904 CPU_NOP_FLAGS
, 0, 0 },
905 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN
,
906 CPU_SYSCALL_FLAGS
, 0, 0 },
907 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN
,
908 CPU_RDTSCP_FLAGS
, 0, 0 },
909 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN
,
910 CPU_3DNOW_FLAGS
, 0, 0 },
911 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN
,
912 CPU_3DNOWA_FLAGS
, 0, 0 },
913 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN
,
914 CPU_PADLOCK_FLAGS
, 0, 0 },
915 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN
,
916 CPU_SVME_FLAGS
, 1, 0 },
917 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN
,
918 CPU_SVME_FLAGS
, 0, 0 },
919 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN
,
920 CPU_SSE4A_FLAGS
, 0, 0 },
921 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN
,
922 CPU_ABM_FLAGS
, 0, 0 },
923 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN
,
924 CPU_BMI_FLAGS
, 0, 0 },
925 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN
,
926 CPU_TBM_FLAGS
, 0, 0 },
927 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN
,
928 CPU_ADX_FLAGS
, 0, 0 },
929 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN
,
930 CPU_RDSEED_FLAGS
, 0, 0 },
931 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN
,
932 CPU_PRFCHW_FLAGS
, 0, 0 },
933 { STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN
,
934 CPU_SMAP_FLAGS
, 0, 0 },
935 { STRING_COMMA_LEN (".mpx"), PROCESSOR_UNKNOWN
,
936 CPU_MPX_FLAGS
, 0, 0 },
937 { STRING_COMMA_LEN (".sha"), PROCESSOR_UNKNOWN
,
938 CPU_SHA_FLAGS
, 0, 0 },
939 { STRING_COMMA_LEN (".clflushopt"), PROCESSOR_UNKNOWN
,
940 CPU_CLFLUSHOPT_FLAGS
, 0, 0 },
941 { STRING_COMMA_LEN (".prefetchwt1"), PROCESSOR_UNKNOWN
,
942 CPU_PREFETCHWT1_FLAGS
, 0, 0 },
943 { STRING_COMMA_LEN (".se1"), PROCESSOR_UNKNOWN
,
944 CPU_SE1_FLAGS
, 0, 0 },
945 { STRING_COMMA_LEN (".clwb"), PROCESSOR_UNKNOWN
,
946 CPU_CLWB_FLAGS
, 0, 0 },
947 { STRING_COMMA_LEN (".pcommit"), PROCESSOR_UNKNOWN
,
948 CPU_PCOMMIT_FLAGS
, 0, 0 },
949 { STRING_COMMA_LEN (".avx512ifma"), PROCESSOR_UNKNOWN
,
950 CPU_AVX512IFMA_FLAGS
, 0, 0 },
951 { STRING_COMMA_LEN (".avx512vbmi"), PROCESSOR_UNKNOWN
,
952 CPU_AVX512VBMI_FLAGS
, 0, 0 },
953 { STRING_COMMA_LEN (".clzero"), PROCESSOR_UNKNOWN
,
954 CPU_CLZERO_FLAGS
, 0, 0 },
958 /* Like s_lcomm_internal in gas/read.c but the alignment string
959 is allowed to be optional. */
962 pe_lcomm_internal (int needs_align
, symbolS
*symbolP
, addressT size
)
969 && *input_line_pointer
== ',')
971 align
= parse_align (needs_align
- 1);
973 if (align
== (addressT
) -1)
988 bss_alloc (symbolP
, size
, align
);
993 pe_lcomm (int needs_align
)
995 s_comm_internal (needs_align
* 2, pe_lcomm_internal
);
999 const pseudo_typeS md_pseudo_table
[] =
1001 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
1002 {"align", s_align_bytes
, 0},
1004 {"align", s_align_ptwo
, 0},
1006 {"arch", set_cpu_arch
, 0},
1010 {"lcomm", pe_lcomm
, 1},
1012 {"ffloat", float_cons
, 'f'},
1013 {"dfloat", float_cons
, 'd'},
1014 {"tfloat", float_cons
, 'x'},
1016 {"slong", signed_cons
, 4},
1017 {"noopt", s_ignore
, 0},
1018 {"optim", s_ignore
, 0},
1019 {"code16gcc", set_16bit_gcc_code_flag
, CODE_16BIT
},
1020 {"code16", set_code_flag
, CODE_16BIT
},
1021 {"code32", set_code_flag
, CODE_32BIT
},
1022 {"code64", set_code_flag
, CODE_64BIT
},
1023 {"intel_syntax", set_intel_syntax
, 1},
1024 {"att_syntax", set_intel_syntax
, 0},
1025 {"intel_mnemonic", set_intel_mnemonic
, 1},
1026 {"att_mnemonic", set_intel_mnemonic
, 0},
1027 {"allow_index_reg", set_allow_index_reg
, 1},
1028 {"disallow_index_reg", set_allow_index_reg
, 0},
1029 {"sse_check", set_check
, 0},
1030 {"operand_check", set_check
, 1},
1031 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
1032 {"largecomm", handle_large_common
, 0},
1034 {"file", (void (*) (int)) dwarf2_directive_file
, 0},
1035 {"loc", dwarf2_directive_loc
, 0},
1036 {"loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0},
1039 {"secrel32", pe_directive_secrel
, 0},
1044 /* For interface with expression (). */
1045 extern char *input_line_pointer
;
1047 /* Hash table for instruction mnemonic lookup. */
1048 static struct hash_control
*op_hash
;
1050 /* Hash table for register lookup. */
1051 static struct hash_control
*reg_hash
;
1054 i386_align_code (fragS
*fragP
, int count
)
1056 /* Various efficient no-op patterns for aligning code labels.
1057 Note: Don't try to assemble the instructions in the comments.
1058 0L and 0w are not legal. */
1059 static const char f32_1
[] =
1061 static const char f32_2
[] =
1062 {0x66,0x90}; /* xchg %ax,%ax */
1063 static const char f32_3
[] =
1064 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
1065 static const char f32_4
[] =
1066 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1067 static const char f32_5
[] =
1069 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1070 static const char f32_6
[] =
1071 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
1072 static const char f32_7
[] =
1073 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1074 static const char f32_8
[] =
1076 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1077 static const char f32_9
[] =
1078 {0x89,0xf6, /* movl %esi,%esi */
1079 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1080 static const char f32_10
[] =
1081 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
1082 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1083 static const char f32_11
[] =
1084 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
1085 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1086 static const char f32_12
[] =
1087 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
1088 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
1089 static const char f32_13
[] =
1090 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
1091 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1092 static const char f32_14
[] =
1093 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
1094 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1095 static const char f16_3
[] =
1096 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
1097 static const char f16_4
[] =
1098 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
1099 static const char f16_5
[] =
1101 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
1102 static const char f16_6
[] =
1103 {0x89,0xf6, /* mov %si,%si */
1104 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
1105 static const char f16_7
[] =
1106 {0x8d,0x74,0x00, /* lea 0(%si),%si */
1107 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
1108 static const char f16_8
[] =
1109 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
1110 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
1111 static const char jump_31
[] =
1112 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
1113 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
1114 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
1115 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
1116 static const char *const f32_patt
[] = {
1117 f32_1
, f32_2
, f32_3
, f32_4
, f32_5
, f32_6
, f32_7
, f32_8
,
1118 f32_9
, f32_10
, f32_11
, f32_12
, f32_13
, f32_14
1120 static const char *const f16_patt
[] = {
1121 f32_1
, f32_2
, f16_3
, f16_4
, f16_5
, f16_6
, f16_7
, f16_8
1123 /* nopl (%[re]ax) */
1124 static const char alt_3
[] =
1126 /* nopl 0(%[re]ax) */
1127 static const char alt_4
[] =
1128 {0x0f,0x1f,0x40,0x00};
1129 /* nopl 0(%[re]ax,%[re]ax,1) */
1130 static const char alt_5
[] =
1131 {0x0f,0x1f,0x44,0x00,0x00};
1132 /* nopw 0(%[re]ax,%[re]ax,1) */
1133 static const char alt_6
[] =
1134 {0x66,0x0f,0x1f,0x44,0x00,0x00};
1135 /* nopl 0L(%[re]ax) */
1136 static const char alt_7
[] =
1137 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1138 /* nopl 0L(%[re]ax,%[re]ax,1) */
1139 static const char alt_8
[] =
1140 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1141 /* nopw 0L(%[re]ax,%[re]ax,1) */
1142 static const char alt_9
[] =
1143 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1144 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
1145 static const char alt_10
[] =
1146 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1147 static const char *const alt_patt
[] = {
1148 f32_1
, f32_2
, alt_3
, alt_4
, alt_5
, alt_6
, alt_7
, alt_8
,
1152 /* Only align for at least a positive non-zero boundary. */
1153 if (count
<= 0 || count
> MAX_MEM_FOR_RS_ALIGN_CODE
)
1156 /* We need to decide which NOP sequence to use for 32bit and
1157 64bit. When -mtune= is used:
1159 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1160 PROCESSOR_GENERIC32, f32_patt will be used.
1161 2. For the rest, alt_patt will be used.
1163 When -mtune= isn't used, alt_patt will be used if
1164 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1167 When -march= or .arch is used, we can't use anything beyond
1168 cpu_arch_isa_flags. */
1170 if (flag_code
== CODE_16BIT
)
1174 memcpy (fragP
->fr_literal
+ fragP
->fr_fix
,
1176 /* Adjust jump offset. */
1177 fragP
->fr_literal
[fragP
->fr_fix
+ 1] = count
- 2;
1180 memcpy (fragP
->fr_literal
+ fragP
->fr_fix
,
1181 f16_patt
[count
- 1], count
);
1185 const char *const *patt
= NULL
;
1187 if (fragP
->tc_frag_data
.isa
== PROCESSOR_UNKNOWN
)
1189 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1190 switch (cpu_arch_tune
)
1192 case PROCESSOR_UNKNOWN
:
1193 /* We use cpu_arch_isa_flags to check if we SHOULD
1194 optimize with nops. */
1195 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1200 case PROCESSOR_PENTIUM4
:
1201 case PROCESSOR_NOCONA
:
1202 case PROCESSOR_CORE
:
1203 case PROCESSOR_CORE2
:
1204 case PROCESSOR_COREI7
:
1205 case PROCESSOR_L1OM
:
1206 case PROCESSOR_K1OM
:
1207 case PROCESSOR_GENERIC64
:
1209 case PROCESSOR_ATHLON
:
1211 case PROCESSOR_AMDFAM10
:
1213 case PROCESSOR_ZNVER
:
1217 case PROCESSOR_I386
:
1218 case PROCESSOR_I486
:
1219 case PROCESSOR_PENTIUM
:
1220 case PROCESSOR_PENTIUMPRO
:
1221 case PROCESSOR_GENERIC32
:
1228 switch (fragP
->tc_frag_data
.tune
)
1230 case PROCESSOR_UNKNOWN
:
1231 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1232 PROCESSOR_UNKNOWN. */
1236 case PROCESSOR_I386
:
1237 case PROCESSOR_I486
:
1238 case PROCESSOR_PENTIUM
:
1240 case PROCESSOR_ATHLON
:
1242 case PROCESSOR_AMDFAM10
:
1244 case PROCESSOR_ZNVER
:
1246 case PROCESSOR_GENERIC32
:
1247 /* We use cpu_arch_isa_flags to check if we CAN optimize
1249 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1254 case PROCESSOR_PENTIUMPRO
:
1255 case PROCESSOR_PENTIUM4
:
1256 case PROCESSOR_NOCONA
:
1257 case PROCESSOR_CORE
:
1258 case PROCESSOR_CORE2
:
1259 case PROCESSOR_COREI7
:
1260 case PROCESSOR_L1OM
:
1261 case PROCESSOR_K1OM
:
1262 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1267 case PROCESSOR_GENERIC64
:
1273 if (patt
== f32_patt
)
1275 /* If the padding is less than 15 bytes, we use the normal
1276 ones. Otherwise, we use a jump instruction and adjust
1280 /* For 64bit, the limit is 3 bytes. */
1281 if (flag_code
== CODE_64BIT
1282 && fragP
->tc_frag_data
.isa_flags
.bitfield
.cpulm
)
1287 memcpy (fragP
->fr_literal
+ fragP
->fr_fix
,
1288 patt
[count
- 1], count
);
1291 memcpy (fragP
->fr_literal
+ fragP
->fr_fix
,
1293 /* Adjust jump offset. */
1294 fragP
->fr_literal
[fragP
->fr_fix
+ 1] = count
- 2;
1299 /* Maximum length of an instruction is 10 byte. If the
1300 padding is greater than 10 bytes and we don't use jump,
1301 we have to break it into smaller pieces. */
1302 int padding
= count
;
1303 while (padding
> 10)
1306 memcpy (fragP
->fr_literal
+ fragP
->fr_fix
+ padding
,
1311 memcpy (fragP
->fr_literal
+ fragP
->fr_fix
,
1312 patt
[padding
- 1], padding
);
1315 fragP
->fr_var
= count
;
1319 operand_type_all_zero (const union i386_operand_type
*x
)
1321 switch (ARRAY_SIZE(x
->array
))
1330 return !x
->array
[0];
1337 operand_type_set (union i386_operand_type
*x
, unsigned int v
)
1339 switch (ARRAY_SIZE(x
->array
))
1354 operand_type_equal (const union i386_operand_type
*x
,
1355 const union i386_operand_type
*y
)
1357 switch (ARRAY_SIZE(x
->array
))
1360 if (x
->array
[2] != y
->array
[2])
1363 if (x
->array
[1] != y
->array
[1])
1366 return x
->array
[0] == y
->array
[0];
1374 cpu_flags_all_zero (const union i386_cpu_flags
*x
)
1376 switch (ARRAY_SIZE(x
->array
))
1385 return !x
->array
[0];
1392 cpu_flags_equal (const union i386_cpu_flags
*x
,
1393 const union i386_cpu_flags
*y
)
1395 switch (ARRAY_SIZE(x
->array
))
1398 if (x
->array
[2] != y
->array
[2])
1401 if (x
->array
[1] != y
->array
[1])
1404 return x
->array
[0] == y
->array
[0];
1412 cpu_flags_check_cpu64 (i386_cpu_flags f
)
1414 return !((flag_code
== CODE_64BIT
&& f
.bitfield
.cpuno64
)
1415 || (flag_code
!= CODE_64BIT
&& f
.bitfield
.cpu64
));
1418 static INLINE i386_cpu_flags
1419 cpu_flags_and (i386_cpu_flags x
, i386_cpu_flags y
)
1421 switch (ARRAY_SIZE (x
.array
))
1424 x
.array
[2] &= y
.array
[2];
1426 x
.array
[1] &= y
.array
[1];
1428 x
.array
[0] &= y
.array
[0];
1436 static INLINE i386_cpu_flags
1437 cpu_flags_or (i386_cpu_flags x
, i386_cpu_flags y
)
1439 switch (ARRAY_SIZE (x
.array
))
1442 x
.array
[2] |= y
.array
[2];
1444 x
.array
[1] |= y
.array
[1];
1446 x
.array
[0] |= y
.array
[0];
1454 static INLINE i386_cpu_flags
1455 cpu_flags_and_not (i386_cpu_flags x
, i386_cpu_flags y
)
1457 switch (ARRAY_SIZE (x
.array
))
1460 x
.array
[2] &= ~y
.array
[2];
1462 x
.array
[1] &= ~y
.array
[1];
1464 x
.array
[0] &= ~y
.array
[0];
1472 #define CPU_FLAGS_ARCH_MATCH 0x1
1473 #define CPU_FLAGS_64BIT_MATCH 0x2
1474 #define CPU_FLAGS_AES_MATCH 0x4
1475 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1476 #define CPU_FLAGS_AVX_MATCH 0x10
1478 #define CPU_FLAGS_32BIT_MATCH \
1479 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1480 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1481 #define CPU_FLAGS_PERFECT_MATCH \
1482 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1484 /* Return CPU flags match bits. */
1487 cpu_flags_match (const insn_template
*t
)
1489 i386_cpu_flags x
= t
->cpu_flags
;
1490 int match
= cpu_flags_check_cpu64 (x
) ? CPU_FLAGS_64BIT_MATCH
: 0;
1492 x
.bitfield
.cpu64
= 0;
1493 x
.bitfield
.cpuno64
= 0;
1495 if (cpu_flags_all_zero (&x
))
1497 /* This instruction is available on all archs. */
1498 match
|= CPU_FLAGS_32BIT_MATCH
;
1502 /* This instruction is available only on some archs. */
1503 i386_cpu_flags cpu
= cpu_arch_flags
;
1505 cpu
.bitfield
.cpu64
= 0;
1506 cpu
.bitfield
.cpuno64
= 0;
1507 cpu
= cpu_flags_and (x
, cpu
);
1508 if (!cpu_flags_all_zero (&cpu
))
1510 if (x
.bitfield
.cpuavx
)
1512 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1513 if (cpu
.bitfield
.cpuavx
)
1515 /* Check SSE2AVX. */
1516 if (!t
->opcode_modifier
.sse2avx
|| sse2avx
)
1518 match
|= (CPU_FLAGS_ARCH_MATCH
1519 | CPU_FLAGS_AVX_MATCH
);
1521 if (!x
.bitfield
.cpuaes
|| cpu
.bitfield
.cpuaes
)
1522 match
|= CPU_FLAGS_AES_MATCH
;
1524 if (!x
.bitfield
.cpupclmul
1525 || cpu
.bitfield
.cpupclmul
)
1526 match
|= CPU_FLAGS_PCLMUL_MATCH
;
1530 match
|= CPU_FLAGS_ARCH_MATCH
;
1533 match
|= CPU_FLAGS_32BIT_MATCH
;
1539 static INLINE i386_operand_type
1540 operand_type_and (i386_operand_type x
, i386_operand_type y
)
1542 switch (ARRAY_SIZE (x
.array
))
1545 x
.array
[2] &= y
.array
[2];
1547 x
.array
[1] &= y
.array
[1];
1549 x
.array
[0] &= y
.array
[0];
1557 static INLINE i386_operand_type
1558 operand_type_or (i386_operand_type x
, i386_operand_type y
)
1560 switch (ARRAY_SIZE (x
.array
))
1563 x
.array
[2] |= y
.array
[2];
1565 x
.array
[1] |= y
.array
[1];
1567 x
.array
[0] |= y
.array
[0];
1575 static INLINE i386_operand_type
1576 operand_type_xor (i386_operand_type x
, i386_operand_type y
)
1578 switch (ARRAY_SIZE (x
.array
))
1581 x
.array
[2] ^= y
.array
[2];
1583 x
.array
[1] ^= y
.array
[1];
1585 x
.array
[0] ^= y
.array
[0];
1593 static const i386_operand_type acc32
= OPERAND_TYPE_ACC32
;
1594 static const i386_operand_type acc64
= OPERAND_TYPE_ACC64
;
1595 static const i386_operand_type control
= OPERAND_TYPE_CONTROL
;
1596 static const i386_operand_type inoutportreg
1597 = OPERAND_TYPE_INOUTPORTREG
;
1598 static const i386_operand_type reg16_inoutportreg
1599 = OPERAND_TYPE_REG16_INOUTPORTREG
;
1600 static const i386_operand_type disp16
= OPERAND_TYPE_DISP16
;
1601 static const i386_operand_type disp32
= OPERAND_TYPE_DISP32
;
1602 static const i386_operand_type disp32s
= OPERAND_TYPE_DISP32S
;
1603 static const i386_operand_type disp16_32
= OPERAND_TYPE_DISP16_32
;
1604 static const i386_operand_type anydisp
1605 = OPERAND_TYPE_ANYDISP
;
1606 static const i386_operand_type regxmm
= OPERAND_TYPE_REGXMM
;
1607 static const i386_operand_type regymm
= OPERAND_TYPE_REGYMM
;
1608 static const i386_operand_type regzmm
= OPERAND_TYPE_REGZMM
;
1609 static const i386_operand_type regmask
= OPERAND_TYPE_REGMASK
;
1610 static const i386_operand_type imm8
= OPERAND_TYPE_IMM8
;
1611 static const i386_operand_type imm8s
= OPERAND_TYPE_IMM8S
;
1612 static const i386_operand_type imm16
= OPERAND_TYPE_IMM16
;
1613 static const i386_operand_type imm32
= OPERAND_TYPE_IMM32
;
1614 static const i386_operand_type imm32s
= OPERAND_TYPE_IMM32S
;
1615 static const i386_operand_type imm64
= OPERAND_TYPE_IMM64
;
1616 static const i386_operand_type imm16_32
= OPERAND_TYPE_IMM16_32
;
1617 static const i386_operand_type imm16_32s
= OPERAND_TYPE_IMM16_32S
;
1618 static const i386_operand_type imm16_32_32s
= OPERAND_TYPE_IMM16_32_32S
;
1619 static const i386_operand_type vec_imm4
= OPERAND_TYPE_VEC_IMM4
;
1630 operand_type_check (i386_operand_type t
, enum operand_type c
)
1635 return (t
.bitfield
.reg8
1638 || t
.bitfield
.reg64
);
1641 return (t
.bitfield
.imm8
1645 || t
.bitfield
.imm32s
1646 || t
.bitfield
.imm64
);
1649 return (t
.bitfield
.disp8
1650 || t
.bitfield
.disp16
1651 || t
.bitfield
.disp32
1652 || t
.bitfield
.disp32s
1653 || t
.bitfield
.disp64
);
1656 return (t
.bitfield
.disp8
1657 || t
.bitfield
.disp16
1658 || t
.bitfield
.disp32
1659 || t
.bitfield
.disp32s
1660 || t
.bitfield
.disp64
1661 || t
.bitfield
.baseindex
);
1670 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1671 operand J for instruction template T. */
1674 match_reg_size (const insn_template
*t
, unsigned int j
)
1676 return !((i
.types
[j
].bitfield
.byte
1677 && !t
->operand_types
[j
].bitfield
.byte
)
1678 || (i
.types
[j
].bitfield
.word
1679 && !t
->operand_types
[j
].bitfield
.word
)
1680 || (i
.types
[j
].bitfield
.dword
1681 && !t
->operand_types
[j
].bitfield
.dword
)
1682 || (i
.types
[j
].bitfield
.qword
1683 && !t
->operand_types
[j
].bitfield
.qword
));
1686 /* Return 1 if there is no conflict in any size on operand J for
1687 instruction template T. */
1690 match_mem_size (const insn_template
*t
, unsigned int j
)
1692 return (match_reg_size (t
, j
)
1693 && !((i
.types
[j
].bitfield
.unspecified
1695 && !t
->operand_types
[j
].bitfield
.unspecified
)
1696 || (i
.types
[j
].bitfield
.fword
1697 && !t
->operand_types
[j
].bitfield
.fword
)
1698 || (i
.types
[j
].bitfield
.tbyte
1699 && !t
->operand_types
[j
].bitfield
.tbyte
)
1700 || (i
.types
[j
].bitfield
.xmmword
1701 && !t
->operand_types
[j
].bitfield
.xmmword
)
1702 || (i
.types
[j
].bitfield
.ymmword
1703 && !t
->operand_types
[j
].bitfield
.ymmword
)
1704 || (i
.types
[j
].bitfield
.zmmword
1705 && !t
->operand_types
[j
].bitfield
.zmmword
)));
1708 /* Return 1 if there is no size conflict on any operands for
1709 instruction template T. */
1712 operand_size_match (const insn_template
*t
)
1717 /* Don't check jump instructions. */
1718 if (t
->opcode_modifier
.jump
1719 || t
->opcode_modifier
.jumpbyte
1720 || t
->opcode_modifier
.jumpdword
1721 || t
->opcode_modifier
.jumpintersegment
)
1724 /* Check memory and accumulator operand size. */
1725 for (j
= 0; j
< i
.operands
; j
++)
1727 if (t
->operand_types
[j
].bitfield
.anysize
)
1730 if (t
->operand_types
[j
].bitfield
.acc
&& !match_reg_size (t
, j
))
1736 if (i
.types
[j
].bitfield
.mem
&& !match_mem_size (t
, j
))
1745 else if (!t
->opcode_modifier
.d
&& !t
->opcode_modifier
.floatd
)
1748 i
.error
= operand_size_mismatch
;
1752 /* Check reverse. */
1753 gas_assert (i
.operands
== 2);
1756 for (j
= 0; j
< 2; j
++)
1758 if (t
->operand_types
[j
].bitfield
.acc
1759 && !match_reg_size (t
, j
? 0 : 1))
1762 if (i
.types
[j
].bitfield
.mem
1763 && !match_mem_size (t
, j
? 0 : 1))
1771 operand_type_match (i386_operand_type overlap
,
1772 i386_operand_type given
)
1774 i386_operand_type temp
= overlap
;
1776 temp
.bitfield
.jumpabsolute
= 0;
1777 temp
.bitfield
.unspecified
= 0;
1778 temp
.bitfield
.byte
= 0;
1779 temp
.bitfield
.word
= 0;
1780 temp
.bitfield
.dword
= 0;
1781 temp
.bitfield
.fword
= 0;
1782 temp
.bitfield
.qword
= 0;
1783 temp
.bitfield
.tbyte
= 0;
1784 temp
.bitfield
.xmmword
= 0;
1785 temp
.bitfield
.ymmword
= 0;
1786 temp
.bitfield
.zmmword
= 0;
1787 if (operand_type_all_zero (&temp
))
1790 if (given
.bitfield
.baseindex
== overlap
.bitfield
.baseindex
1791 && given
.bitfield
.jumpabsolute
== overlap
.bitfield
.jumpabsolute
)
1795 i
.error
= operand_type_mismatch
;
1799 /* If given types g0 and g1 are registers they must be of the same type
1800 unless the expected operand type register overlap is null.
1801 Note that Acc in a template matches every size of reg. */
1804 operand_type_register_match (i386_operand_type m0
,
1805 i386_operand_type g0
,
1806 i386_operand_type t0
,
1807 i386_operand_type m1
,
1808 i386_operand_type g1
,
1809 i386_operand_type t1
)
1811 if (!operand_type_check (g0
, reg
))
1814 if (!operand_type_check (g1
, reg
))
1817 if (g0
.bitfield
.reg8
== g1
.bitfield
.reg8
1818 && g0
.bitfield
.reg16
== g1
.bitfield
.reg16
1819 && g0
.bitfield
.reg32
== g1
.bitfield
.reg32
1820 && g0
.bitfield
.reg64
== g1
.bitfield
.reg64
)
1823 if (m0
.bitfield
.acc
)
1825 t0
.bitfield
.reg8
= 1;
1826 t0
.bitfield
.reg16
= 1;
1827 t0
.bitfield
.reg32
= 1;
1828 t0
.bitfield
.reg64
= 1;
1831 if (m1
.bitfield
.acc
)
1833 t1
.bitfield
.reg8
= 1;
1834 t1
.bitfield
.reg16
= 1;
1835 t1
.bitfield
.reg32
= 1;
1836 t1
.bitfield
.reg64
= 1;
1839 if (!(t0
.bitfield
.reg8
& t1
.bitfield
.reg8
)
1840 && !(t0
.bitfield
.reg16
& t1
.bitfield
.reg16
)
1841 && !(t0
.bitfield
.reg32
& t1
.bitfield
.reg32
)
1842 && !(t0
.bitfield
.reg64
& t1
.bitfield
.reg64
))
1845 i
.error
= register_type_mismatch
;
1850 static INLINE
unsigned int
1851 register_number (const reg_entry
*r
)
1853 unsigned int nr
= r
->reg_num
;
1855 if (r
->reg_flags
& RegRex
)
1861 static INLINE
unsigned int
1862 mode_from_disp_size (i386_operand_type t
)
1864 if (t
.bitfield
.disp8
|| t
.bitfield
.vec_disp8
)
1866 else if (t
.bitfield
.disp16
1867 || t
.bitfield
.disp32
1868 || t
.bitfield
.disp32s
)
1875 fits_in_signed_byte (addressT num
)
1877 return num
+ 0x80 <= 0xff;
1881 fits_in_unsigned_byte (addressT num
)
1887 fits_in_unsigned_word (addressT num
)
1889 return num
<= 0xffff;
1893 fits_in_signed_word (addressT num
)
1895 return num
+ 0x8000 <= 0xffff;
1899 fits_in_signed_long (addressT num ATTRIBUTE_UNUSED
)
1904 return num
+ 0x80000000 <= 0xffffffff;
1906 } /* fits_in_signed_long() */
1909 fits_in_unsigned_long (addressT num ATTRIBUTE_UNUSED
)
1914 return num
<= 0xffffffff;
1916 } /* fits_in_unsigned_long() */
1919 fits_in_vec_disp8 (offsetT num
)
1921 int shift
= i
.memshift
;
1927 mask
= (1 << shift
) - 1;
1929 /* Return 0 if NUM isn't properly aligned. */
1933 /* Check if NUM will fit in 8bit after shift. */
1934 return fits_in_signed_byte (num
>> shift
);
1938 fits_in_imm4 (offsetT num
)
1940 return (num
& 0xf) == num
;
1943 static i386_operand_type
1944 smallest_imm_type (offsetT num
)
1946 i386_operand_type t
;
1948 operand_type_set (&t
, 0);
1949 t
.bitfield
.imm64
= 1;
1951 if (cpu_arch_tune
!= PROCESSOR_I486
&& num
== 1)
1953 /* This code is disabled on the 486 because all the Imm1 forms
1954 in the opcode table are slower on the i486. They're the
1955 versions with the implicitly specified single-position
1956 displacement, which has another syntax if you really want to
1958 t
.bitfield
.imm1
= 1;
1959 t
.bitfield
.imm8
= 1;
1960 t
.bitfield
.imm8s
= 1;
1961 t
.bitfield
.imm16
= 1;
1962 t
.bitfield
.imm32
= 1;
1963 t
.bitfield
.imm32s
= 1;
1965 else if (fits_in_signed_byte (num
))
1967 t
.bitfield
.imm8
= 1;
1968 t
.bitfield
.imm8s
= 1;
1969 t
.bitfield
.imm16
= 1;
1970 t
.bitfield
.imm32
= 1;
1971 t
.bitfield
.imm32s
= 1;
1973 else if (fits_in_unsigned_byte (num
))
1975 t
.bitfield
.imm8
= 1;
1976 t
.bitfield
.imm16
= 1;
1977 t
.bitfield
.imm32
= 1;
1978 t
.bitfield
.imm32s
= 1;
1980 else if (fits_in_signed_word (num
) || fits_in_unsigned_word (num
))
1982 t
.bitfield
.imm16
= 1;
1983 t
.bitfield
.imm32
= 1;
1984 t
.bitfield
.imm32s
= 1;
1986 else if (fits_in_signed_long (num
))
1988 t
.bitfield
.imm32
= 1;
1989 t
.bitfield
.imm32s
= 1;
1991 else if (fits_in_unsigned_long (num
))
1992 t
.bitfield
.imm32
= 1;
1998 offset_in_range (offsetT val
, int size
)
2004 case 1: mask
= ((addressT
) 1 << 8) - 1; break;
2005 case 2: mask
= ((addressT
) 1 << 16) - 1; break;
2006 case 4: mask
= ((addressT
) 2 << 31) - 1; break;
2008 case 8: mask
= ((addressT
) 2 << 63) - 1; break;
2014 /* If BFD64, sign extend val for 32bit address mode. */
2015 if (flag_code
!= CODE_64BIT
2016 || i
.prefix
[ADDR_PREFIX
])
2017 if ((val
& ~(((addressT
) 2 << 31) - 1)) == 0)
2018 val
= (val
^ ((addressT
) 1 << 31)) - ((addressT
) 1 << 31);
2021 if ((val
& ~mask
) != 0 && (val
& ~mask
) != ~mask
)
2023 char buf1
[40], buf2
[40];
2025 sprint_value (buf1
, val
);
2026 sprint_value (buf2
, val
& mask
);
2027 as_warn (_("%s shortened to %s"), buf1
, buf2
);
2041 a. PREFIX_EXIST if attempting to add a prefix where one from the
2042 same class already exists.
2043 b. PREFIX_LOCK if lock prefix is added.
2044 c. PREFIX_REP if rep/repne prefix is added.
2045 d. PREFIX_OTHER if other prefix is added.
2048 static enum PREFIX_GROUP
2049 add_prefix (unsigned int prefix
)
2051 enum PREFIX_GROUP ret
= PREFIX_OTHER
;
2054 if (prefix
>= REX_OPCODE
&& prefix
< REX_OPCODE
+ 16
2055 && flag_code
== CODE_64BIT
)
2057 if ((i
.prefix
[REX_PREFIX
] & prefix
& REX_W
)
2058 || ((i
.prefix
[REX_PREFIX
] & (REX_R
| REX_X
| REX_B
))
2059 && (prefix
& (REX_R
| REX_X
| REX_B
))))
2070 case CS_PREFIX_OPCODE
:
2071 case DS_PREFIX_OPCODE
:
2072 case ES_PREFIX_OPCODE
:
2073 case FS_PREFIX_OPCODE
:
2074 case GS_PREFIX_OPCODE
:
2075 case SS_PREFIX_OPCODE
:
2079 case REPNE_PREFIX_OPCODE
:
2080 case REPE_PREFIX_OPCODE
:
2085 case LOCK_PREFIX_OPCODE
:
2094 case ADDR_PREFIX_OPCODE
:
2098 case DATA_PREFIX_OPCODE
:
2102 if (i
.prefix
[q
] != 0)
2110 i
.prefix
[q
] |= prefix
;
2113 as_bad (_("same type of prefix used twice"));
2119 update_code_flag (int value
, int check
)
2121 PRINTF_LIKE ((*as_error
));
2123 flag_code
= (enum flag_code
) value
;
2124 if (flag_code
== CODE_64BIT
)
2126 cpu_arch_flags
.bitfield
.cpu64
= 1;
2127 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2131 cpu_arch_flags
.bitfield
.cpu64
= 0;
2132 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2134 if (value
== CODE_64BIT
&& !cpu_arch_flags
.bitfield
.cpulm
)
2137 as_error
= as_fatal
;
2140 (*as_error
) (_("64bit mode not supported on `%s'."),
2141 cpu_arch_name
? cpu_arch_name
: default_arch
);
2143 if (value
== CODE_32BIT
&& !cpu_arch_flags
.bitfield
.cpui386
)
2146 as_error
= as_fatal
;
2149 (*as_error
) (_("32bit mode not supported on `%s'."),
2150 cpu_arch_name
? cpu_arch_name
: default_arch
);
2152 stackop_size
= '\0';
2156 set_code_flag (int value
)
2158 update_code_flag (value
, 0);
2162 set_16bit_gcc_code_flag (int new_code_flag
)
2164 flag_code
= (enum flag_code
) new_code_flag
;
2165 if (flag_code
!= CODE_16BIT
)
2167 cpu_arch_flags
.bitfield
.cpu64
= 0;
2168 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2169 stackop_size
= LONG_MNEM_SUFFIX
;
2173 set_intel_syntax (int syntax_flag
)
2175 /* Find out if register prefixing is specified. */
2176 int ask_naked_reg
= 0;
2179 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2181 char *string
= input_line_pointer
;
2182 int e
= get_symbol_end ();
2184 if (strcmp (string
, "prefix") == 0)
2186 else if (strcmp (string
, "noprefix") == 0)
2189 as_bad (_("bad argument to syntax directive."));
2190 *input_line_pointer
= e
;
2192 demand_empty_rest_of_line ();
2194 intel_syntax
= syntax_flag
;
2196 if (ask_naked_reg
== 0)
2197 allow_naked_reg
= (intel_syntax
2198 && (bfd_get_symbol_leading_char (stdoutput
) != '\0'));
2200 allow_naked_reg
= (ask_naked_reg
< 0);
2202 expr_set_rank (O_full_ptr
, syntax_flag
? 10 : 0);
2204 identifier_chars
['%'] = intel_syntax
&& allow_naked_reg
? '%' : 0;
2205 identifier_chars
['$'] = intel_syntax
? '$' : 0;
2206 register_prefix
= allow_naked_reg
? "" : "%";
2210 set_intel_mnemonic (int mnemonic_flag
)
2212 intel_mnemonic
= mnemonic_flag
;
2216 set_allow_index_reg (int flag
)
2218 allow_index_reg
= flag
;
2222 set_check (int what
)
2224 enum check_kind
*kind
;
2229 kind
= &operand_check
;
2240 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2242 char *string
= input_line_pointer
;
2243 int e
= get_symbol_end ();
2245 if (strcmp (string
, "none") == 0)
2247 else if (strcmp (string
, "warning") == 0)
2248 *kind
= check_warning
;
2249 else if (strcmp (string
, "error") == 0)
2250 *kind
= check_error
;
2252 as_bad (_("bad argument to %s_check directive."), str
);
2253 *input_line_pointer
= e
;
2256 as_bad (_("missing argument for %s_check directive"), str
);
2258 demand_empty_rest_of_line ();
2262 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED
,
2263 i386_cpu_flags new_flag ATTRIBUTE_UNUSED
)
2265 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2266 static const char *arch
;
2268 /* Intel LIOM is only supported on ELF. */
2274 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2275 use default_arch. */
2276 arch
= cpu_arch_name
;
2278 arch
= default_arch
;
2281 /* If we are targeting Intel L1OM, we must enable it. */
2282 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_L1OM
2283 || new_flag
.bitfield
.cpul1om
)
2286 /* If we are targeting Intel K1OM, we must enable it. */
2287 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_K1OM
2288 || new_flag
.bitfield
.cpuk1om
)
2291 as_bad (_("`%s' is not supported on `%s'"), name
, arch
);
2296 set_cpu_arch (int dummy ATTRIBUTE_UNUSED
)
2300 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2302 char *string
= input_line_pointer
;
2303 int e
= get_symbol_end ();
2305 i386_cpu_flags flags
;
2307 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
2309 if (strcmp (string
, cpu_arch
[j
].name
) == 0)
2311 check_cpu_arch_compatible (string
, cpu_arch
[j
].flags
);
2315 cpu_arch_name
= cpu_arch
[j
].name
;
2316 cpu_sub_arch_name
= NULL
;
2317 cpu_arch_flags
= cpu_arch
[j
].flags
;
2318 if (flag_code
== CODE_64BIT
)
2320 cpu_arch_flags
.bitfield
.cpu64
= 1;
2321 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2325 cpu_arch_flags
.bitfield
.cpu64
= 0;
2326 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2328 cpu_arch_isa
= cpu_arch
[j
].type
;
2329 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
2330 if (!cpu_arch_tune_set
)
2332 cpu_arch_tune
= cpu_arch_isa
;
2333 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
2338 if (!cpu_arch
[j
].negated
)
2339 flags
= cpu_flags_or (cpu_arch_flags
,
2342 flags
= cpu_flags_and_not (cpu_arch_flags
,
2344 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2346 if (cpu_sub_arch_name
)
2348 char *name
= cpu_sub_arch_name
;
2349 cpu_sub_arch_name
= concat (name
,
2351 (const char *) NULL
);
2355 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
2356 cpu_arch_flags
= flags
;
2357 cpu_arch_isa_flags
= flags
;
2359 *input_line_pointer
= e
;
2360 demand_empty_rest_of_line ();
2364 if (j
>= ARRAY_SIZE (cpu_arch
))
2365 as_bad (_("no such architecture: `%s'"), string
);
2367 *input_line_pointer
= e
;
2370 as_bad (_("missing cpu architecture"));
2372 no_cond_jump_promotion
= 0;
2373 if (*input_line_pointer
== ','
2374 && !is_end_of_line
[(unsigned char) input_line_pointer
[1]])
2376 char *string
= ++input_line_pointer
;
2377 int e
= get_symbol_end ();
2379 if (strcmp (string
, "nojumps") == 0)
2380 no_cond_jump_promotion
= 1;
2381 else if (strcmp (string
, "jumps") == 0)
2384 as_bad (_("no such architecture modifier: `%s'"), string
);
2386 *input_line_pointer
= e
;
2389 demand_empty_rest_of_line ();
2392 enum bfd_architecture
2395 if (cpu_arch_isa
== PROCESSOR_L1OM
)
2397 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2398 || flag_code
!= CODE_64BIT
)
2399 as_fatal (_("Intel L1OM is 64bit ELF only"));
2400 return bfd_arch_l1om
;
2402 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
2404 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2405 || flag_code
!= CODE_64BIT
)
2406 as_fatal (_("Intel K1OM is 64bit ELF only"));
2407 return bfd_arch_k1om
;
2410 return bfd_arch_i386
;
2416 if (!strncmp (default_arch
, "x86_64", 6))
2418 if (cpu_arch_isa
== PROCESSOR_L1OM
)
2420 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2421 || default_arch
[6] != '\0')
2422 as_fatal (_("Intel L1OM is 64bit ELF only"));
2423 return bfd_mach_l1om
;
2425 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
2427 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2428 || default_arch
[6] != '\0')
2429 as_fatal (_("Intel K1OM is 64bit ELF only"));
2430 return bfd_mach_k1om
;
2432 else if (default_arch
[6] == '\0')
2433 return bfd_mach_x86_64
;
2435 return bfd_mach_x64_32
;
2437 else if (!strcmp (default_arch
, "i386"))
2438 return bfd_mach_i386_i386
;
2440 as_fatal (_("unknown architecture"));
2446 const char *hash_err
;
2448 /* Initialize op_hash hash table. */
2449 op_hash
= hash_new ();
2452 const insn_template
*optab
;
2453 templates
*core_optab
;
2455 /* Setup for loop. */
2457 core_optab
= (templates
*) xmalloc (sizeof (templates
));
2458 core_optab
->start
= optab
;
2463 if (optab
->name
== NULL
2464 || strcmp (optab
->name
, (optab
- 1)->name
) != 0)
2466 /* different name --> ship out current template list;
2467 add to hash table; & begin anew. */
2468 core_optab
->end
= optab
;
2469 hash_err
= hash_insert (op_hash
,
2471 (void *) core_optab
);
2474 as_fatal (_("can't hash %s: %s"),
2478 if (optab
->name
== NULL
)
2480 core_optab
= (templates
*) xmalloc (sizeof (templates
));
2481 core_optab
->start
= optab
;
2486 /* Initialize reg_hash hash table. */
2487 reg_hash
= hash_new ();
2489 const reg_entry
*regtab
;
2490 unsigned int regtab_size
= i386_regtab_size
;
2492 for (regtab
= i386_regtab
; regtab_size
--; regtab
++)
2494 hash_err
= hash_insert (reg_hash
, regtab
->reg_name
, (void *) regtab
);
2496 as_fatal (_("can't hash %s: %s"),
2502 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2507 for (c
= 0; c
< 256; c
++)
2512 mnemonic_chars
[c
] = c
;
2513 register_chars
[c
] = c
;
2514 operand_chars
[c
] = c
;
2516 else if (ISLOWER (c
))
2518 mnemonic_chars
[c
] = c
;
2519 register_chars
[c
] = c
;
2520 operand_chars
[c
] = c
;
2522 else if (ISUPPER (c
))
2524 mnemonic_chars
[c
] = TOLOWER (c
);
2525 register_chars
[c
] = mnemonic_chars
[c
];
2526 operand_chars
[c
] = c
;
2528 else if (c
== '{' || c
== '}')
2529 operand_chars
[c
] = c
;
2531 if (ISALPHA (c
) || ISDIGIT (c
))
2532 identifier_chars
[c
] = c
;
2535 identifier_chars
[c
] = c
;
2536 operand_chars
[c
] = c
;
2541 identifier_chars
['@'] = '@';
2544 identifier_chars
['?'] = '?';
2545 operand_chars
['?'] = '?';
2547 digit_chars
['-'] = '-';
2548 mnemonic_chars
['_'] = '_';
2549 mnemonic_chars
['-'] = '-';
2550 mnemonic_chars
['.'] = '.';
2551 identifier_chars
['_'] = '_';
2552 identifier_chars
['.'] = '.';
2554 for (p
= operand_special_chars
; *p
!= '\0'; p
++)
2555 operand_chars
[(unsigned char) *p
] = *p
;
2558 if (flag_code
== CODE_64BIT
)
2560 #if defined (OBJ_COFF) && defined (TE_PE)
2561 x86_dwarf2_return_column
= (OUTPUT_FLAVOR
== bfd_target_coff_flavour
2564 x86_dwarf2_return_column
= 16;
2566 x86_cie_data_alignment
= -8;
2570 x86_dwarf2_return_column
= 8;
2571 x86_cie_data_alignment
= -4;
2576 i386_print_statistics (FILE *file
)
2578 hash_print_statistics (file
, "i386 opcode", op_hash
);
2579 hash_print_statistics (file
, "i386 register", reg_hash
);
2584 /* Debugging routines for md_assemble. */
2585 static void pte (insn_template
*);
2586 static void pt (i386_operand_type
);
2587 static void pe (expressionS
*);
2588 static void ps (symbolS
*);
2591 pi (char *line
, i386_insn
*x
)
2595 fprintf (stdout
, "%s: template ", line
);
2597 fprintf (stdout
, " address: base %s index %s scale %x\n",
2598 x
->base_reg
? x
->base_reg
->reg_name
: "none",
2599 x
->index_reg
? x
->index_reg
->reg_name
: "none",
2600 x
->log2_scale_factor
);
2601 fprintf (stdout
, " modrm: mode %x reg %x reg/mem %x\n",
2602 x
->rm
.mode
, x
->rm
.reg
, x
->rm
.regmem
);
2603 fprintf (stdout
, " sib: base %x index %x scale %x\n",
2604 x
->sib
.base
, x
->sib
.index
, x
->sib
.scale
);
2605 fprintf (stdout
, " rex: 64bit %x extX %x extY %x extZ %x\n",
2606 (x
->rex
& REX_W
) != 0,
2607 (x
->rex
& REX_R
) != 0,
2608 (x
->rex
& REX_X
) != 0,
2609 (x
->rex
& REX_B
) != 0);
2610 for (j
= 0; j
< x
->operands
; j
++)
2612 fprintf (stdout
, " #%d: ", j
+ 1);
2614 fprintf (stdout
, "\n");
2615 if (x
->types
[j
].bitfield
.reg8
2616 || x
->types
[j
].bitfield
.reg16
2617 || x
->types
[j
].bitfield
.reg32
2618 || x
->types
[j
].bitfield
.reg64
2619 || x
->types
[j
].bitfield
.regmmx
2620 || x
->types
[j
].bitfield
.regxmm
2621 || x
->types
[j
].bitfield
.regymm
2622 || x
->types
[j
].bitfield
.regzmm
2623 || x
->types
[j
].bitfield
.sreg2
2624 || x
->types
[j
].bitfield
.sreg3
2625 || x
->types
[j
].bitfield
.control
2626 || x
->types
[j
].bitfield
.debug
2627 || x
->types
[j
].bitfield
.test
)
2628 fprintf (stdout
, "%s\n", x
->op
[j
].regs
->reg_name
);
2629 if (operand_type_check (x
->types
[j
], imm
))
2631 if (operand_type_check (x
->types
[j
], disp
))
2632 pe (x
->op
[j
].disps
);
2637 pte (insn_template
*t
)
2640 fprintf (stdout
, " %d operands ", t
->operands
);
2641 fprintf (stdout
, "opcode %x ", t
->base_opcode
);
2642 if (t
->extension_opcode
!= None
)
2643 fprintf (stdout
, "ext %x ", t
->extension_opcode
);
2644 if (t
->opcode_modifier
.d
)
2645 fprintf (stdout
, "D");
2646 if (t
->opcode_modifier
.w
)
2647 fprintf (stdout
, "W");
2648 fprintf (stdout
, "\n");
2649 for (j
= 0; j
< t
->operands
; j
++)
2651 fprintf (stdout
, " #%d type ", j
+ 1);
2652 pt (t
->operand_types
[j
]);
2653 fprintf (stdout
, "\n");
2660 fprintf (stdout
, " operation %d\n", e
->X_op
);
2661 fprintf (stdout
, " add_number %ld (%lx)\n",
2662 (long) e
->X_add_number
, (long) e
->X_add_number
);
2663 if (e
->X_add_symbol
)
2665 fprintf (stdout
, " add_symbol ");
2666 ps (e
->X_add_symbol
);
2667 fprintf (stdout
, "\n");
2671 fprintf (stdout
, " op_symbol ");
2672 ps (e
->X_op_symbol
);
2673 fprintf (stdout
, "\n");
2680 fprintf (stdout
, "%s type %s%s",
2682 S_IS_EXTERNAL (s
) ? "EXTERNAL " : "",
2683 segment_name (S_GET_SEGMENT (s
)));
2686 static struct type_name
2688 i386_operand_type mask
;
2691 const type_names
[] =
2693 { OPERAND_TYPE_REG8
, "r8" },
2694 { OPERAND_TYPE_REG16
, "r16" },
2695 { OPERAND_TYPE_REG32
, "r32" },
2696 { OPERAND_TYPE_REG64
, "r64" },
2697 { OPERAND_TYPE_IMM8
, "i8" },
2698 { OPERAND_TYPE_IMM8
, "i8s" },
2699 { OPERAND_TYPE_IMM16
, "i16" },
2700 { OPERAND_TYPE_IMM32
, "i32" },
2701 { OPERAND_TYPE_IMM32S
, "i32s" },
2702 { OPERAND_TYPE_IMM64
, "i64" },
2703 { OPERAND_TYPE_IMM1
, "i1" },
2704 { OPERAND_TYPE_BASEINDEX
, "BaseIndex" },
2705 { OPERAND_TYPE_DISP8
, "d8" },
2706 { OPERAND_TYPE_DISP16
, "d16" },
2707 { OPERAND_TYPE_DISP32
, "d32" },
2708 { OPERAND_TYPE_DISP32S
, "d32s" },
2709 { OPERAND_TYPE_DISP64
, "d64" },
2710 { OPERAND_TYPE_VEC_DISP8
, "Vector d8" },
2711 { OPERAND_TYPE_INOUTPORTREG
, "InOutPortReg" },
2712 { OPERAND_TYPE_SHIFTCOUNT
, "ShiftCount" },
2713 { OPERAND_TYPE_CONTROL
, "control reg" },
2714 { OPERAND_TYPE_TEST
, "test reg" },
2715 { OPERAND_TYPE_DEBUG
, "debug reg" },
2716 { OPERAND_TYPE_FLOATREG
, "FReg" },
2717 { OPERAND_TYPE_FLOATACC
, "FAcc" },
2718 { OPERAND_TYPE_SREG2
, "SReg2" },
2719 { OPERAND_TYPE_SREG3
, "SReg3" },
2720 { OPERAND_TYPE_ACC
, "Acc" },
2721 { OPERAND_TYPE_JUMPABSOLUTE
, "Jump Absolute" },
2722 { OPERAND_TYPE_REGMMX
, "rMMX" },
2723 { OPERAND_TYPE_REGXMM
, "rXMM" },
2724 { OPERAND_TYPE_REGYMM
, "rYMM" },
2725 { OPERAND_TYPE_REGZMM
, "rZMM" },
2726 { OPERAND_TYPE_REGMASK
, "Mask reg" },
2727 { OPERAND_TYPE_ESSEG
, "es" },
2731 pt (i386_operand_type t
)
2734 i386_operand_type a
;
2736 for (j
= 0; j
< ARRAY_SIZE (type_names
); j
++)
2738 a
= operand_type_and (t
, type_names
[j
].mask
);
2739 if (!operand_type_all_zero (&a
))
2740 fprintf (stdout
, "%s, ", type_names
[j
].name
);
2745 #endif /* DEBUG386 */
2747 static bfd_reloc_code_real_type
2748 reloc (unsigned int size
,
2751 bfd_reloc_code_real_type other
)
2753 if (other
!= NO_RELOC
)
2755 reloc_howto_type
*rel
;
2760 case BFD_RELOC_X86_64_GOT32
:
2761 return BFD_RELOC_X86_64_GOT64
;
2763 case BFD_RELOC_X86_64_GOTPLT64
:
2764 return BFD_RELOC_X86_64_GOTPLT64
;
2766 case BFD_RELOC_X86_64_PLTOFF64
:
2767 return BFD_RELOC_X86_64_PLTOFF64
;
2769 case BFD_RELOC_X86_64_GOTPC32
:
2770 other
= BFD_RELOC_X86_64_GOTPC64
;
2772 case BFD_RELOC_X86_64_GOTPCREL
:
2773 other
= BFD_RELOC_X86_64_GOTPCREL64
;
2775 case BFD_RELOC_X86_64_TPOFF32
:
2776 other
= BFD_RELOC_X86_64_TPOFF64
;
2778 case BFD_RELOC_X86_64_DTPOFF32
:
2779 other
= BFD_RELOC_X86_64_DTPOFF64
;
2785 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2786 if (other
== BFD_RELOC_SIZE32
)
2789 other
= BFD_RELOC_SIZE64
;
2792 as_bad (_("there are no pc-relative size relocations"));
2798 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2799 if (size
== 4 && (flag_code
!= CODE_64BIT
|| disallow_64bit_reloc
))
2802 rel
= bfd_reloc_type_lookup (stdoutput
, other
);
2804 as_bad (_("unknown relocation (%u)"), other
);
2805 else if (size
!= bfd_get_reloc_size (rel
))
2806 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2807 bfd_get_reloc_size (rel
),
2809 else if (pcrel
&& !rel
->pc_relative
)
2810 as_bad (_("non-pc-relative relocation for pc-relative field"));
2811 else if ((rel
->complain_on_overflow
== complain_overflow_signed
2813 || (rel
->complain_on_overflow
== complain_overflow_unsigned
2815 as_bad (_("relocated field and relocation type differ in signedness"));
2824 as_bad (_("there are no unsigned pc-relative relocations"));
2827 case 1: return BFD_RELOC_8_PCREL
;
2828 case 2: return BFD_RELOC_16_PCREL
;
2829 case 4: return BFD_RELOC_32_PCREL
;
2830 case 8: return BFD_RELOC_64_PCREL
;
2832 as_bad (_("cannot do %u byte pc-relative relocation"), size
);
2839 case 4: return BFD_RELOC_X86_64_32S
;
2844 case 1: return BFD_RELOC_8
;
2845 case 2: return BFD_RELOC_16
;
2846 case 4: return BFD_RELOC_32
;
2847 case 8: return BFD_RELOC_64
;
2849 as_bad (_("cannot do %s %u byte relocation"),
2850 sign
> 0 ? "signed" : "unsigned", size
);
2856 /* Here we decide which fixups can be adjusted to make them relative to
2857 the beginning of the section instead of the symbol. Basically we need
2858 to make sure that the dynamic relocations are done correctly, so in
2859 some cases we force the original symbol to be used. */
2862 tc_i386_fix_adjustable (fixS
*fixP ATTRIBUTE_UNUSED
)
2864 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2868 /* Don't adjust pc-relative references to merge sections in 64-bit
2870 if (use_rela_relocations
2871 && (S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_MERGE
) != 0
2875 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2876 and changed later by validate_fix. */
2877 if (GOT_symbol
&& fixP
->fx_subsy
== GOT_symbol
2878 && fixP
->fx_r_type
== BFD_RELOC_32_PCREL
)
2881 /* Adjust_reloc_syms doesn't know about the GOT. Need to keep symbol
2882 for size relocations. */
2883 if (fixP
->fx_r_type
== BFD_RELOC_SIZE32
2884 || fixP
->fx_r_type
== BFD_RELOC_SIZE64
2885 || fixP
->fx_r_type
== BFD_RELOC_386_GOTOFF
2886 || fixP
->fx_r_type
== BFD_RELOC_386_PLT32
2887 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32
2888 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GD
2889 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDM
2890 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDO_32
2891 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE_32
2892 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE
2893 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTIE
2894 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE_32
2895 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE
2896 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTDESC
2897 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_DESC_CALL
2898 || fixP
->fx_r_type
== BFD_RELOC_X86_64_PLT32
2899 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOT32
2900 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCREL
2901 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSGD
2902 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSLD
2903 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF32
2904 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF64
2905 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTTPOFF
2906 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF32
2907 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF64
2908 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTOFF64
2909 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPC32_TLSDESC
2910 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSDESC_CALL
2911 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
2912 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
2919 intel_float_operand (const char *mnemonic
)
2921 /* Note that the value returned is meaningful only for opcodes with (memory)
2922 operands, hence the code here is free to improperly handle opcodes that
2923 have no operands (for better performance and smaller code). */
2925 if (mnemonic
[0] != 'f')
2926 return 0; /* non-math */
2928 switch (mnemonic
[1])
2930 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2931 the fs segment override prefix not currently handled because no
2932 call path can make opcodes without operands get here */
2934 return 2 /* integer op */;
2936 if (mnemonic
[2] == 'd' && (mnemonic
[3] == 'c' || mnemonic
[3] == 'e'))
2937 return 3; /* fldcw/fldenv */
2940 if (mnemonic
[2] != 'o' /* fnop */)
2941 return 3; /* non-waiting control op */
2944 if (mnemonic
[2] == 's')
2945 return 3; /* frstor/frstpm */
2948 if (mnemonic
[2] == 'a')
2949 return 3; /* fsave */
2950 if (mnemonic
[2] == 't')
2952 switch (mnemonic
[3])
2954 case 'c': /* fstcw */
2955 case 'd': /* fstdw */
2956 case 'e': /* fstenv */
2957 case 's': /* fsts[gw] */
2963 if (mnemonic
[2] == 'r' || mnemonic
[2] == 's')
2964 return 0; /* fxsave/fxrstor are not really math ops */
2971 /* Build the VEX prefix. */
2974 build_vex_prefix (const insn_template
*t
)
2976 unsigned int register_specifier
;
2977 unsigned int implied_prefix
;
2978 unsigned int vector_length
;
2980 /* Check register specifier. */
2981 if (i
.vex
.register_specifier
)
2983 register_specifier
=
2984 ~register_number (i
.vex
.register_specifier
) & 0xf;
2985 gas_assert ((i
.vex
.register_specifier
->reg_flags
& RegVRex
) == 0);
2988 register_specifier
= 0xf;
2990 /* Use 2-byte VEX prefix by swappping destination and source
2993 && i
.operands
== i
.reg_operands
2994 && i
.tm
.opcode_modifier
.vexopcode
== VEX0F
2995 && i
.tm
.opcode_modifier
.s
2998 unsigned int xchg
= i
.operands
- 1;
2999 union i386_op temp_op
;
3000 i386_operand_type temp_type
;
3002 temp_type
= i
.types
[xchg
];
3003 i
.types
[xchg
] = i
.types
[0];
3004 i
.types
[0] = temp_type
;
3005 temp_op
= i
.op
[xchg
];
3006 i
.op
[xchg
] = i
.op
[0];
3009 gas_assert (i
.rm
.mode
== 3);
3013 i
.rm
.regmem
= i
.rm
.reg
;
3016 /* Use the next insn. */
3020 if (i
.tm
.opcode_modifier
.vex
== VEXScalar
)
3021 vector_length
= avxscalar
;
3023 vector_length
= i
.tm
.opcode_modifier
.vex
== VEX256
? 1 : 0;
3025 switch ((i
.tm
.base_opcode
>> 8) & 0xff)
3030 case DATA_PREFIX_OPCODE
:
3033 case REPE_PREFIX_OPCODE
:
3036 case REPNE_PREFIX_OPCODE
:
3043 /* Use 2-byte VEX prefix if possible. */
3044 if (i
.tm
.opcode_modifier
.vexopcode
== VEX0F
3045 && i
.tm
.opcode_modifier
.vexw
!= VEXW1
3046 && (i
.rex
& (REX_W
| REX_X
| REX_B
)) == 0)
3048 /* 2-byte VEX prefix. */
3052 i
.vex
.bytes
[0] = 0xc5;
3054 /* Check the REX.R bit. */
3055 r
= (i
.rex
& REX_R
) ? 0 : 1;
3056 i
.vex
.bytes
[1] = (r
<< 7
3057 | register_specifier
<< 3
3058 | vector_length
<< 2
3063 /* 3-byte VEX prefix. */
3068 switch (i
.tm
.opcode_modifier
.vexopcode
)
3072 i
.vex
.bytes
[0] = 0xc4;
3076 i
.vex
.bytes
[0] = 0xc4;
3080 i
.vex
.bytes
[0] = 0xc4;
3084 i
.vex
.bytes
[0] = 0x8f;
3088 i
.vex
.bytes
[0] = 0x8f;
3092 i
.vex
.bytes
[0] = 0x8f;
3098 /* The high 3 bits of the second VEX byte are 1's compliment
3099 of RXB bits from REX. */
3100 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | m
;
3102 /* Check the REX.W bit. */
3103 w
= (i
.rex
& REX_W
) ? 1 : 0;
3104 if (i
.tm
.opcode_modifier
.vexw
== VEXW1
)
3107 i
.vex
.bytes
[2] = (w
<< 7
3108 | register_specifier
<< 3
3109 | vector_length
<< 2
3114 /* Build the EVEX prefix. */
3117 build_evex_prefix (void)
3119 unsigned int register_specifier
;
3120 unsigned int implied_prefix
;
3122 rex_byte vrex_used
= 0;
3124 /* Check register specifier. */
3125 if (i
.vex
.register_specifier
)
3127 gas_assert ((i
.vrex
& REX_X
) == 0);
3129 register_specifier
= i
.vex
.register_specifier
->reg_num
;
3130 if ((i
.vex
.register_specifier
->reg_flags
& RegRex
))
3131 register_specifier
+= 8;
3132 /* The upper 16 registers are encoded in the fourth byte of the
3134 if (!(i
.vex
.register_specifier
->reg_flags
& RegVRex
))
3135 i
.vex
.bytes
[3] = 0x8;
3136 register_specifier
= ~register_specifier
& 0xf;
3140 register_specifier
= 0xf;
3142 /* Encode upper 16 vector index register in the fourth byte of
3144 if (!(i
.vrex
& REX_X
))
3145 i
.vex
.bytes
[3] = 0x8;
3150 switch ((i
.tm
.base_opcode
>> 8) & 0xff)
3155 case DATA_PREFIX_OPCODE
:
3158 case REPE_PREFIX_OPCODE
:
3161 case REPNE_PREFIX_OPCODE
:
3168 /* 4 byte EVEX prefix. */
3170 i
.vex
.bytes
[0] = 0x62;
3173 switch (i
.tm
.opcode_modifier
.vexopcode
)
3189 /* The high 3 bits of the second EVEX byte are 1's compliment of RXB
3191 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | m
;
3193 /* The fifth bit of the second EVEX byte is 1's compliment of the
3194 REX_R bit in VREX. */
3195 if (!(i
.vrex
& REX_R
))
3196 i
.vex
.bytes
[1] |= 0x10;
3200 if ((i
.reg_operands
+ i
.imm_operands
) == i
.operands
)
3202 /* When all operands are registers, the REX_X bit in REX is not
3203 used. We reuse it to encode the upper 16 registers, which is
3204 indicated by the REX_B bit in VREX. The REX_X bit is encoded
3205 as 1's compliment. */
3206 if ((i
.vrex
& REX_B
))
3209 i
.vex
.bytes
[1] &= ~0x40;
3213 /* EVEX instructions shouldn't need the REX prefix. */
3214 i
.vrex
&= ~vrex_used
;
3215 gas_assert (i
.vrex
== 0);
3217 /* Check the REX.W bit. */
3218 w
= (i
.rex
& REX_W
) ? 1 : 0;
3219 if (i
.tm
.opcode_modifier
.vexw
)
3221 if (i
.tm
.opcode_modifier
.vexw
== VEXW1
)
3224 /* If w is not set it means we are dealing with WIG instruction. */
3227 if (evexwig
== evexw1
)
3231 /* Encode the U bit. */
3232 implied_prefix
|= 0x4;
3234 /* The third byte of the EVEX prefix. */
3235 i
.vex
.bytes
[2] = (w
<< 7 | register_specifier
<< 3 | implied_prefix
);
3237 /* The fourth byte of the EVEX prefix. */
3238 /* The zeroing-masking bit. */
3239 if (i
.mask
&& i
.mask
->zeroing
)
3240 i
.vex
.bytes
[3] |= 0x80;
3242 /* Don't always set the broadcast bit if there is no RC. */
3245 /* Encode the vector length. */
3246 unsigned int vec_length
;
3248 switch (i
.tm
.opcode_modifier
.evex
)
3250 case EVEXLIG
: /* LL' is ignored */
3251 vec_length
= evexlig
<< 5;
3254 vec_length
= 0 << 5;
3257 vec_length
= 1 << 5;
3260 vec_length
= 2 << 5;
3266 i
.vex
.bytes
[3] |= vec_length
;
3267 /* Encode the broadcast bit. */
3269 i
.vex
.bytes
[3] |= 0x10;
3273 if (i
.rounding
->type
!= saeonly
)
3274 i
.vex
.bytes
[3] |= 0x10 | (i
.rounding
->type
<< 5);
3276 i
.vex
.bytes
[3] |= 0x10 | (evexrcig
<< 5);
3279 if (i
.mask
&& i
.mask
->mask
)
3280 i
.vex
.bytes
[3] |= i
.mask
->mask
->reg_num
;
3284 process_immext (void)
3288 if ((i
.tm
.cpu_flags
.bitfield
.cpusse3
|| i
.tm
.cpu_flags
.bitfield
.cpusvme
)
3291 /* MONITOR/MWAIT as well as SVME instructions have fixed operands
3292 with an opcode suffix which is coded in the same place as an
3293 8-bit immediate field would be.
3294 Here we check those operands and remove them afterwards. */
3297 for (x
= 0; x
< i
.operands
; x
++)
3298 if (register_number (i
.op
[x
].regs
) != x
)
3299 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
3300 register_prefix
, i
.op
[x
].regs
->reg_name
, x
+ 1,
3306 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
3307 which is coded in the same place as an 8-bit immediate field
3308 would be. Here we fake an 8-bit immediate operand from the
3309 opcode suffix stored in tm.extension_opcode.
3311 AVX instructions also use this encoding, for some of
3312 3 argument instructions. */
3314 gas_assert (i
.imm_operands
<= 1
3316 || ((i
.tm
.opcode_modifier
.vex
3317 || i
.tm
.opcode_modifier
.evex
)
3318 && i
.operands
<= 4)));
3320 exp
= &im_expressions
[i
.imm_operands
++];
3321 i
.op
[i
.operands
].imms
= exp
;
3322 i
.types
[i
.operands
] = imm8
;
3324 exp
->X_op
= O_constant
;
3325 exp
->X_add_number
= i
.tm
.extension_opcode
;
3326 i
.tm
.extension_opcode
= None
;
3333 switch (i
.tm
.opcode_modifier
.hleprefixok
)
3338 as_bad (_("invalid instruction `%s' after `%s'"),
3339 i
.tm
.name
, i
.hle_prefix
);
3342 if (i
.prefix
[LOCK_PREFIX
])
3344 as_bad (_("missing `lock' with `%s'"), i
.hle_prefix
);
3348 case HLEPrefixRelease
:
3349 if (i
.prefix
[HLE_PREFIX
] != XRELEASE_PREFIX_OPCODE
)
3351 as_bad (_("instruction `%s' after `xacquire' not allowed"),
3355 if (i
.mem_operands
== 0
3356 || !operand_type_check (i
.types
[i
.operands
- 1], anymem
))
3358 as_bad (_("memory destination needed for instruction `%s'"
3359 " after `xrelease'"), i
.tm
.name
);
3366 /* This is the guts of the machine-dependent assembler. LINE points to a
3367 machine dependent instruction. This function is supposed to emit
3368 the frags/bytes it assembles to. */
3371 md_assemble (char *line
)
3374 char mnemonic
[MAX_MNEM_SIZE
];
3375 const insn_template
*t
;
3377 /* Initialize globals. */
3378 memset (&i
, '\0', sizeof (i
));
3379 for (j
= 0; j
< MAX_OPERANDS
; j
++)
3380 i
.reloc
[j
] = NO_RELOC
;
3381 memset (disp_expressions
, '\0', sizeof (disp_expressions
));
3382 memset (im_expressions
, '\0', sizeof (im_expressions
));
3383 save_stack_p
= save_stack
;
3385 /* First parse an instruction mnemonic & call i386_operand for the operands.
3386 We assume that the scrubber has arranged it so that line[0] is the valid
3387 start of a (possibly prefixed) mnemonic. */
3389 line
= parse_insn (line
, mnemonic
);
3393 line
= parse_operands (line
, mnemonic
);
3398 /* Now we've parsed the mnemonic into a set of templates, and have the
3399 operands at hand. */
3401 /* All intel opcodes have reversed operands except for "bound" and
3402 "enter". We also don't reverse intersegment "jmp" and "call"
3403 instructions with 2 immediate operands so that the immediate segment
3404 precedes the offset, as it does when in AT&T mode. */
3407 && (strcmp (mnemonic
, "bound") != 0)
3408 && (strcmp (mnemonic
, "invlpga") != 0)
3409 && !(operand_type_check (i
.types
[0], imm
)
3410 && operand_type_check (i
.types
[1], imm
)))
3413 /* The order of the immediates should be reversed
3414 for 2 immediates extrq and insertq instructions */
3415 if (i
.imm_operands
== 2
3416 && (strcmp (mnemonic
, "extrq") == 0
3417 || strcmp (mnemonic
, "insertq") == 0))
3418 swap_2_operands (0, 1);
3423 /* Don't optimize displacement for movabs since it only takes 64bit
3426 && i
.disp_encoding
!= disp_encoding_32bit
3427 && (flag_code
!= CODE_64BIT
3428 || strcmp (mnemonic
, "movabs") != 0))
3431 /* Next, we find a template that matches the given insn,
3432 making sure the overlap of the given operands types is consistent
3433 with the template operand types. */
3435 if (!(t
= match_template ()))
3438 if (sse_check
!= check_none
3439 && !i
.tm
.opcode_modifier
.noavx
3440 && (i
.tm
.cpu_flags
.bitfield
.cpusse
3441 || i
.tm
.cpu_flags
.bitfield
.cpusse2
3442 || i
.tm
.cpu_flags
.bitfield
.cpusse3
3443 || i
.tm
.cpu_flags
.bitfield
.cpussse3
3444 || i
.tm
.cpu_flags
.bitfield
.cpusse4_1
3445 || i
.tm
.cpu_flags
.bitfield
.cpusse4_2
))
3447 (sse_check
== check_warning
3449 : as_bad
) (_("SSE instruction `%s' is used"), i
.tm
.name
);
3452 /* Zap movzx and movsx suffix. The suffix has been set from
3453 "word ptr" or "byte ptr" on the source operand in Intel syntax
3454 or extracted from mnemonic in AT&T syntax. But we'll use
3455 the destination register to choose the suffix for encoding. */
3456 if ((i
.tm
.base_opcode
& ~9) == 0x0fb6)
3458 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3459 there is no suffix, the default will be byte extension. */
3460 if (i
.reg_operands
!= 2
3463 as_bad (_("ambiguous operand size for `%s'"), i
.tm
.name
);
3468 if (i
.tm
.opcode_modifier
.fwait
)
3469 if (!add_prefix (FWAIT_OPCODE
))
3472 /* Check if REP prefix is OK. */
3473 if (i
.rep_prefix
&& !i
.tm
.opcode_modifier
.repprefixok
)
3475 as_bad (_("invalid instruction `%s' after `%s'"),
3476 i
.tm
.name
, i
.rep_prefix
);
3480 /* Check for lock without a lockable instruction. Destination operand
3481 must be memory unless it is xchg (0x86). */
3482 if (i
.prefix
[LOCK_PREFIX
]
3483 && (!i
.tm
.opcode_modifier
.islockable
3484 || i
.mem_operands
== 0
3485 || (i
.tm
.base_opcode
!= 0x86
3486 && !operand_type_check (i
.types
[i
.operands
- 1], anymem
))))
3488 as_bad (_("expecting lockable instruction after `lock'"));
3492 /* Check if HLE prefix is OK. */
3493 if (i
.hle_prefix
&& !check_hle ())
3496 /* Check BND prefix. */
3497 if (i
.bnd_prefix
&& !i
.tm
.opcode_modifier
.bndprefixok
)
3498 as_bad (_("expecting valid branch instruction after `bnd'"));
3500 if (i
.tm
.cpu_flags
.bitfield
.cpumpx
3501 && flag_code
== CODE_64BIT
3502 && i
.prefix
[ADDR_PREFIX
])
3503 as_bad (_("32-bit address isn't allowed in 64-bit MPX instructions."));
3505 /* Insert BND prefix. */
3507 && i
.tm
.opcode_modifier
.bndprefixok
3508 && !i
.prefix
[BND_PREFIX
])
3509 add_prefix (BND_PREFIX_OPCODE
);
3511 /* Check string instruction segment overrides. */
3512 if (i
.tm
.opcode_modifier
.isstring
&& i
.mem_operands
!= 0)
3514 if (!check_string ())
3516 i
.disp_operands
= 0;
3519 if (!process_suffix ())
3522 /* Update operand types. */
3523 for (j
= 0; j
< i
.operands
; j
++)
3524 i
.types
[j
] = operand_type_and (i
.types
[j
], i
.tm
.operand_types
[j
]);
3526 /* Make still unresolved immediate matches conform to size of immediate
3527 given in i.suffix. */
3528 if (!finalize_imm ())
3531 if (i
.types
[0].bitfield
.imm1
)
3532 i
.imm_operands
= 0; /* kludge for shift insns. */
3534 /* We only need to check those implicit registers for instructions
3535 with 3 operands or less. */
3536 if (i
.operands
<= 3)
3537 for (j
= 0; j
< i
.operands
; j
++)
3538 if (i
.types
[j
].bitfield
.inoutportreg
3539 || i
.types
[j
].bitfield
.shiftcount
3540 || i
.types
[j
].bitfield
.acc
3541 || i
.types
[j
].bitfield
.floatacc
)
3544 /* ImmExt should be processed after SSE2AVX. */
3545 if (!i
.tm
.opcode_modifier
.sse2avx
3546 && i
.tm
.opcode_modifier
.immext
)
3549 /* For insns with operands there are more diddles to do to the opcode. */
3552 if (!process_operands ())
3555 else if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
3557 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3558 as_warn (_("translating to `%sp'"), i
.tm
.name
);
3561 if (i
.tm
.opcode_modifier
.vex
|| i
.tm
.opcode_modifier
.evex
)
3563 if (flag_code
== CODE_16BIT
)
3565 as_bad (_("instruction `%s' isn't supported in 16-bit mode."),
3570 if (i
.tm
.opcode_modifier
.vex
)
3571 build_vex_prefix (t
);
3573 build_evex_prefix ();
3576 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3577 instructions may define INT_OPCODE as well, so avoid this corner
3578 case for those instructions that use MODRM. */
3579 if (i
.tm
.base_opcode
== INT_OPCODE
3580 && !i
.tm
.opcode_modifier
.modrm
3581 && i
.op
[0].imms
->X_add_number
== 3)
3583 i
.tm
.base_opcode
= INT3_OPCODE
;
3587 if ((i
.tm
.opcode_modifier
.jump
3588 || i
.tm
.opcode_modifier
.jumpbyte
3589 || i
.tm
.opcode_modifier
.jumpdword
)
3590 && i
.op
[0].disps
->X_op
== O_constant
)
3592 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3593 the absolute address given by the constant. Since ix86 jumps and
3594 calls are pc relative, we need to generate a reloc. */
3595 i
.op
[0].disps
->X_add_symbol
= &abs_symbol
;
3596 i
.op
[0].disps
->X_op
= O_symbol
;
3599 if (i
.tm
.opcode_modifier
.rex64
)
3602 /* For 8 bit registers we need an empty rex prefix. Also if the
3603 instruction already has a prefix, we need to convert old
3604 registers to new ones. */
3606 if ((i
.types
[0].bitfield
.reg8
3607 && (i
.op
[0].regs
->reg_flags
& RegRex64
) != 0)
3608 || (i
.types
[1].bitfield
.reg8
3609 && (i
.op
[1].regs
->reg_flags
& RegRex64
) != 0)
3610 || ((i
.types
[0].bitfield
.reg8
3611 || i
.types
[1].bitfield
.reg8
)
3616 i
.rex
|= REX_OPCODE
;
3617 for (x
= 0; x
< 2; x
++)
3619 /* Look for 8 bit operand that uses old registers. */
3620 if (i
.types
[x
].bitfield
.reg8
3621 && (i
.op
[x
].regs
->reg_flags
& RegRex64
) == 0)
3623 /* In case it is "hi" register, give up. */
3624 if (i
.op
[x
].regs
->reg_num
> 3)
3625 as_bad (_("can't encode register '%s%s' in an "
3626 "instruction requiring REX prefix."),
3627 register_prefix
, i
.op
[x
].regs
->reg_name
);
3629 /* Otherwise it is equivalent to the extended register.
3630 Since the encoding doesn't change this is merely
3631 cosmetic cleanup for debug output. */
3633 i
.op
[x
].regs
= i
.op
[x
].regs
+ 8;
3639 add_prefix (REX_OPCODE
| i
.rex
);
3641 /* We are ready to output the insn. */
3646 parse_insn (char *line
, char *mnemonic
)
3649 char *token_start
= l
;
3652 const insn_template
*t
;
3658 while ((*mnem_p
= mnemonic_chars
[(unsigned char) *l
]) != 0)
3663 if (mnem_p
>= mnemonic
+ MAX_MNEM_SIZE
)
3665 as_bad (_("no such instruction: `%s'"), token_start
);
3670 if (!is_space_char (*l
)
3671 && *l
!= END_OF_INSN
3673 || (*l
!= PREFIX_SEPARATOR
3676 as_bad (_("invalid character %s in mnemonic"),
3677 output_invalid (*l
));
3680 if (token_start
== l
)
3682 if (!intel_syntax
&& *l
== PREFIX_SEPARATOR
)
3683 as_bad (_("expecting prefix; got nothing"));
3685 as_bad (_("expecting mnemonic; got nothing"));
3689 /* Look up instruction (or prefix) via hash table. */
3690 current_templates
= (const templates
*) hash_find (op_hash
, mnemonic
);
3692 if (*l
!= END_OF_INSN
3693 && (!is_space_char (*l
) || l
[1] != END_OF_INSN
)
3694 && current_templates
3695 && current_templates
->start
->opcode_modifier
.isprefix
)
3697 if (!cpu_flags_check_cpu64 (current_templates
->start
->cpu_flags
))
3699 as_bad ((flag_code
!= CODE_64BIT
3700 ? _("`%s' is only supported in 64-bit mode")
3701 : _("`%s' is not supported in 64-bit mode")),
3702 current_templates
->start
->name
);
3705 /* If we are in 16-bit mode, do not allow addr16 or data16.
3706 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3707 if ((current_templates
->start
->opcode_modifier
.size16
3708 || current_templates
->start
->opcode_modifier
.size32
)
3709 && flag_code
!= CODE_64BIT
3710 && (current_templates
->start
->opcode_modifier
.size32
3711 ^ (flag_code
== CODE_16BIT
)))
3713 as_bad (_("redundant %s prefix"),
3714 current_templates
->start
->name
);
3717 /* Add prefix, checking for repeated prefixes. */
3718 switch (add_prefix (current_templates
->start
->base_opcode
))
3723 if (current_templates
->start
->cpu_flags
.bitfield
.cpuhle
)
3724 i
.hle_prefix
= current_templates
->start
->name
;
3725 else if (current_templates
->start
->cpu_flags
.bitfield
.cpumpx
)
3726 i
.bnd_prefix
= current_templates
->start
->name
;
3728 i
.rep_prefix
= current_templates
->start
->name
;
3733 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3740 if (!current_templates
)
3742 /* Check if we should swap operand or force 32bit displacement in
3744 if (mnem_p
- 2 == dot_p
&& dot_p
[1] == 's')
3746 else if (mnem_p
- 3 == dot_p
3749 i
.disp_encoding
= disp_encoding_8bit
;
3750 else if (mnem_p
- 4 == dot_p
3754 i
.disp_encoding
= disp_encoding_32bit
;
3759 current_templates
= (const templates
*) hash_find (op_hash
, mnemonic
);
3762 if (!current_templates
)
3765 /* See if we can get a match by trimming off a suffix. */
3768 case WORD_MNEM_SUFFIX
:
3769 if (intel_syntax
&& (intel_float_operand (mnemonic
) & 2))
3770 i
.suffix
= SHORT_MNEM_SUFFIX
;
3772 case BYTE_MNEM_SUFFIX
:
3773 case QWORD_MNEM_SUFFIX
:
3774 i
.suffix
= mnem_p
[-1];
3776 current_templates
= (const templates
*) hash_find (op_hash
,
3779 case SHORT_MNEM_SUFFIX
:
3780 case LONG_MNEM_SUFFIX
:
3783 i
.suffix
= mnem_p
[-1];
3785 current_templates
= (const templates
*) hash_find (op_hash
,
3794 if (intel_float_operand (mnemonic
) == 1)
3795 i
.suffix
= SHORT_MNEM_SUFFIX
;
3797 i
.suffix
= LONG_MNEM_SUFFIX
;
3799 current_templates
= (const templates
*) hash_find (op_hash
,
3804 if (!current_templates
)
3806 as_bad (_("no such instruction: `%s'"), token_start
);
3811 if (current_templates
->start
->opcode_modifier
.jump
3812 || current_templates
->start
->opcode_modifier
.jumpbyte
)
3814 /* Check for a branch hint. We allow ",pt" and ",pn" for
3815 predict taken and predict not taken respectively.
3816 I'm not sure that branch hints actually do anything on loop
3817 and jcxz insns (JumpByte) for current Pentium4 chips. They
3818 may work in the future and it doesn't hurt to accept them
3820 if (l
[0] == ',' && l
[1] == 'p')
3824 if (!add_prefix (DS_PREFIX_OPCODE
))
3828 else if (l
[2] == 'n')
3830 if (!add_prefix (CS_PREFIX_OPCODE
))
3836 /* Any other comma loses. */
3839 as_bad (_("invalid character %s in mnemonic"),
3840 output_invalid (*l
));
3844 /* Check if instruction is supported on specified architecture. */
3846 for (t
= current_templates
->start
; t
< current_templates
->end
; ++t
)
3848 supported
|= cpu_flags_match (t
);
3849 if (supported
== CPU_FLAGS_PERFECT_MATCH
)
3853 if (!(supported
& CPU_FLAGS_64BIT_MATCH
))
3855 as_bad (flag_code
== CODE_64BIT
3856 ? _("`%s' is not supported in 64-bit mode")
3857 : _("`%s' is only supported in 64-bit mode"),
3858 current_templates
->start
->name
);
3861 if (supported
!= CPU_FLAGS_PERFECT_MATCH
)
3863 as_bad (_("`%s' is not supported on `%s%s'"),
3864 current_templates
->start
->name
,
3865 cpu_arch_name
? cpu_arch_name
: default_arch
,
3866 cpu_sub_arch_name
? cpu_sub_arch_name
: "");
3871 if (!cpu_arch_flags
.bitfield
.cpui386
3872 && (flag_code
!= CODE_16BIT
))
3874 as_warn (_("use .code16 to ensure correct addressing mode"));
3881 parse_operands (char *l
, const char *mnemonic
)
3885 /* 1 if operand is pending after ','. */
3886 unsigned int expecting_operand
= 0;
3888 /* Non-zero if operand parens not balanced. */
3889 unsigned int paren_not_balanced
;
3891 while (*l
!= END_OF_INSN
)
3893 /* Skip optional white space before operand. */
3894 if (is_space_char (*l
))
3896 if (!is_operand_char (*l
) && *l
!= END_OF_INSN
)
3898 as_bad (_("invalid character %s before operand %d"),
3899 output_invalid (*l
),
3903 token_start
= l
; /* after white space */
3904 paren_not_balanced
= 0;
3905 while (paren_not_balanced
|| *l
!= ',')
3907 if (*l
== END_OF_INSN
)
3909 if (paren_not_balanced
)
3912 as_bad (_("unbalanced parenthesis in operand %d."),
3915 as_bad (_("unbalanced brackets in operand %d."),
3920 break; /* we are done */
3922 else if (!is_operand_char (*l
) && !is_space_char (*l
))
3924 as_bad (_("invalid character %s in operand %d"),
3925 output_invalid (*l
),
3932 ++paren_not_balanced
;
3934 --paren_not_balanced
;
3939 ++paren_not_balanced
;
3941 --paren_not_balanced
;
3945 if (l
!= token_start
)
3946 { /* Yes, we've read in another operand. */
3947 unsigned int operand_ok
;
3948 this_operand
= i
.operands
++;
3949 i
.types
[this_operand
].bitfield
.unspecified
= 1;
3950 if (i
.operands
> MAX_OPERANDS
)
3952 as_bad (_("spurious operands; (%d operands/instruction max)"),
3956 /* Now parse operand adding info to 'i' as we go along. */
3957 END_STRING_AND_SAVE (l
);
3961 i386_intel_operand (token_start
,
3962 intel_float_operand (mnemonic
));
3964 operand_ok
= i386_att_operand (token_start
);
3966 RESTORE_END_STRING (l
);
3972 if (expecting_operand
)
3974 expecting_operand_after_comma
:
3975 as_bad (_("expecting operand after ','; got nothing"));
3980 as_bad (_("expecting operand before ','; got nothing"));
3985 /* Now *l must be either ',' or END_OF_INSN. */
3988 if (*++l
== END_OF_INSN
)
3990 /* Just skip it, if it's \n complain. */
3991 goto expecting_operand_after_comma
;
3993 expecting_operand
= 1;
4000 swap_2_operands (int xchg1
, int xchg2
)
4002 union i386_op temp_op
;
4003 i386_operand_type temp_type
;
4004 enum bfd_reloc_code_real temp_reloc
;
4006 temp_type
= i
.types
[xchg2
];
4007 i
.types
[xchg2
] = i
.types
[xchg1
];
4008 i
.types
[xchg1
] = temp_type
;
4009 temp_op
= i
.op
[xchg2
];
4010 i
.op
[xchg2
] = i
.op
[xchg1
];
4011 i
.op
[xchg1
] = temp_op
;
4012 temp_reloc
= i
.reloc
[xchg2
];
4013 i
.reloc
[xchg2
] = i
.reloc
[xchg1
];
4014 i
.reloc
[xchg1
] = temp_reloc
;
4018 if (i
.mask
->operand
== xchg1
)
4019 i
.mask
->operand
= xchg2
;
4020 else if (i
.mask
->operand
== xchg2
)
4021 i
.mask
->operand
= xchg1
;
4025 if (i
.broadcast
->operand
== xchg1
)
4026 i
.broadcast
->operand
= xchg2
;
4027 else if (i
.broadcast
->operand
== xchg2
)
4028 i
.broadcast
->operand
= xchg1
;
4032 if (i
.rounding
->operand
== xchg1
)
4033 i
.rounding
->operand
= xchg2
;
4034 else if (i
.rounding
->operand
== xchg2
)
4035 i
.rounding
->operand
= xchg1
;
4040 swap_operands (void)
4046 swap_2_operands (1, i
.operands
- 2);
4049 swap_2_operands (0, i
.operands
- 1);
4055 if (i
.mem_operands
== 2)
4057 const seg_entry
*temp_seg
;
4058 temp_seg
= i
.seg
[0];
4059 i
.seg
[0] = i
.seg
[1];
4060 i
.seg
[1] = temp_seg
;
4064 /* Try to ensure constant immediates are represented in the smallest
4069 char guess_suffix
= 0;
4073 guess_suffix
= i
.suffix
;
4074 else if (i
.reg_operands
)
4076 /* Figure out a suffix from the last register operand specified.
4077 We can't do this properly yet, ie. excluding InOutPortReg,
4078 but the following works for instructions with immediates.
4079 In any case, we can't set i.suffix yet. */
4080 for (op
= i
.operands
; --op
>= 0;)
4081 if (i
.types
[op
].bitfield
.reg8
)
4083 guess_suffix
= BYTE_MNEM_SUFFIX
;
4086 else if (i
.types
[op
].bitfield
.reg16
)
4088 guess_suffix
= WORD_MNEM_SUFFIX
;
4091 else if (i
.types
[op
].bitfield
.reg32
)
4093 guess_suffix
= LONG_MNEM_SUFFIX
;
4096 else if (i
.types
[op
].bitfield
.reg64
)
4098 guess_suffix
= QWORD_MNEM_SUFFIX
;
4102 else if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
4103 guess_suffix
= WORD_MNEM_SUFFIX
;
4105 for (op
= i
.operands
; --op
>= 0;)
4106 if (operand_type_check (i
.types
[op
], imm
))
4108 switch (i
.op
[op
].imms
->X_op
)
4111 /* If a suffix is given, this operand may be shortened. */
4112 switch (guess_suffix
)
4114 case LONG_MNEM_SUFFIX
:
4115 i
.types
[op
].bitfield
.imm32
= 1;
4116 i
.types
[op
].bitfield
.imm64
= 1;
4118 case WORD_MNEM_SUFFIX
:
4119 i
.types
[op
].bitfield
.imm16
= 1;
4120 i
.types
[op
].bitfield
.imm32
= 1;
4121 i
.types
[op
].bitfield
.imm32s
= 1;
4122 i
.types
[op
].bitfield
.imm64
= 1;
4124 case BYTE_MNEM_SUFFIX
:
4125 i
.types
[op
].bitfield
.imm8
= 1;
4126 i
.types
[op
].bitfield
.imm8s
= 1;
4127 i
.types
[op
].bitfield
.imm16
= 1;
4128 i
.types
[op
].bitfield
.imm32
= 1;
4129 i
.types
[op
].bitfield
.imm32s
= 1;
4130 i
.types
[op
].bitfield
.imm64
= 1;
4134 /* If this operand is at most 16 bits, convert it
4135 to a signed 16 bit number before trying to see
4136 whether it will fit in an even smaller size.
4137 This allows a 16-bit operand such as $0xffe0 to
4138 be recognised as within Imm8S range. */
4139 if ((i
.types
[op
].bitfield
.imm16
)
4140 && (i
.op
[op
].imms
->X_add_number
& ~(offsetT
) 0xffff) == 0)
4142 i
.op
[op
].imms
->X_add_number
=
4143 (((i
.op
[op
].imms
->X_add_number
& 0xffff) ^ 0x8000) - 0x8000);
4145 if ((i
.types
[op
].bitfield
.imm32
)
4146 && ((i
.op
[op
].imms
->X_add_number
& ~(((offsetT
) 2 << 31) - 1))
4149 i
.op
[op
].imms
->X_add_number
= ((i
.op
[op
].imms
->X_add_number
4150 ^ ((offsetT
) 1 << 31))
4151 - ((offsetT
) 1 << 31));
4154 = operand_type_or (i
.types
[op
],
4155 smallest_imm_type (i
.op
[op
].imms
->X_add_number
));
4157 /* We must avoid matching of Imm32 templates when 64bit
4158 only immediate is available. */
4159 if (guess_suffix
== QWORD_MNEM_SUFFIX
)
4160 i
.types
[op
].bitfield
.imm32
= 0;
4167 /* Symbols and expressions. */
4169 /* Convert symbolic operand to proper sizes for matching, but don't
4170 prevent matching a set of insns that only supports sizes other
4171 than those matching the insn suffix. */
4173 i386_operand_type mask
, allowed
;
4174 const insn_template
*t
;
4176 operand_type_set (&mask
, 0);
4177 operand_type_set (&allowed
, 0);
4179 for (t
= current_templates
->start
;
4180 t
< current_templates
->end
;
4182 allowed
= operand_type_or (allowed
,
4183 t
->operand_types
[op
]);
4184 switch (guess_suffix
)
4186 case QWORD_MNEM_SUFFIX
:
4187 mask
.bitfield
.imm64
= 1;
4188 mask
.bitfield
.imm32s
= 1;
4190 case LONG_MNEM_SUFFIX
:
4191 mask
.bitfield
.imm32
= 1;
4193 case WORD_MNEM_SUFFIX
:
4194 mask
.bitfield
.imm16
= 1;
4196 case BYTE_MNEM_SUFFIX
:
4197 mask
.bitfield
.imm8
= 1;
4202 allowed
= operand_type_and (mask
, allowed
);
4203 if (!operand_type_all_zero (&allowed
))
4204 i
.types
[op
] = operand_type_and (i
.types
[op
], mask
);
4211 /* Try to use the smallest displacement type too. */
4213 optimize_disp (void)
4217 for (op
= i
.operands
; --op
>= 0;)
4218 if (operand_type_check (i
.types
[op
], disp
))
4220 if (i
.op
[op
].disps
->X_op
== O_constant
)
4222 offsetT op_disp
= i
.op
[op
].disps
->X_add_number
;
4224 if (i
.types
[op
].bitfield
.disp16
4225 && (op_disp
& ~(offsetT
) 0xffff) == 0)
4227 /* If this operand is at most 16 bits, convert
4228 to a signed 16 bit number and don't use 64bit
4230 op_disp
= (((op_disp
& 0xffff) ^ 0x8000) - 0x8000);
4231 i
.types
[op
].bitfield
.disp64
= 0;
4233 if (i
.types
[op
].bitfield
.disp32
4234 && (op_disp
& ~(((offsetT
) 2 << 31) - 1)) == 0)
4236 /* If this operand is at most 32 bits, convert
4237 to a signed 32 bit number and don't use 64bit
4239 op_disp
&= (((offsetT
) 2 << 31) - 1);
4240 op_disp
= (op_disp
^ ((offsetT
) 1 << 31)) - ((addressT
) 1 << 31);
4241 i
.types
[op
].bitfield
.disp64
= 0;
4243 if (!op_disp
&& i
.types
[op
].bitfield
.baseindex
)
4245 i
.types
[op
].bitfield
.disp8
= 0;
4246 i
.types
[op
].bitfield
.disp16
= 0;
4247 i
.types
[op
].bitfield
.disp32
= 0;
4248 i
.types
[op
].bitfield
.disp32s
= 0;
4249 i
.types
[op
].bitfield
.disp64
= 0;
4253 else if (flag_code
== CODE_64BIT
)
4255 if (fits_in_signed_long (op_disp
))
4257 i
.types
[op
].bitfield
.disp64
= 0;
4258 i
.types
[op
].bitfield
.disp32s
= 1;
4260 if (i
.prefix
[ADDR_PREFIX
]
4261 && fits_in_unsigned_long (op_disp
))
4262 i
.types
[op
].bitfield
.disp32
= 1;
4264 if ((i
.types
[op
].bitfield
.disp32
4265 || i
.types
[op
].bitfield
.disp32s
4266 || i
.types
[op
].bitfield
.disp16
)
4267 && fits_in_signed_byte (op_disp
))
4268 i
.types
[op
].bitfield
.disp8
= 1;
4270 else if (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
4271 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
)
4273 fix_new_exp (frag_now
, frag_more (0) - frag_now
->fr_literal
, 0,
4274 i
.op
[op
].disps
, 0, i
.reloc
[op
]);
4275 i
.types
[op
].bitfield
.disp8
= 0;
4276 i
.types
[op
].bitfield
.disp16
= 0;
4277 i
.types
[op
].bitfield
.disp32
= 0;
4278 i
.types
[op
].bitfield
.disp32s
= 0;
4279 i
.types
[op
].bitfield
.disp64
= 0;
4282 /* We only support 64bit displacement on constants. */
4283 i
.types
[op
].bitfield
.disp64
= 0;
4287 /* Check if operands are valid for the instruction. */
4290 check_VecOperands (const insn_template
*t
)
4294 /* Without VSIB byte, we can't have a vector register for index. */
4295 if (!t
->opcode_modifier
.vecsib
4297 && (i
.index_reg
->reg_type
.bitfield
.regxmm
4298 || i
.index_reg
->reg_type
.bitfield
.regymm
4299 || i
.index_reg
->reg_type
.bitfield
.regzmm
))
4301 i
.error
= unsupported_vector_index_register
;
4305 /* Check if default mask is allowed. */
4306 if (t
->opcode_modifier
.nodefmask
4307 && (!i
.mask
|| i
.mask
->mask
->reg_num
== 0))
4309 i
.error
= no_default_mask
;
4313 /* For VSIB byte, we need a vector register for index, and all vector
4314 registers must be distinct. */
4315 if (t
->opcode_modifier
.vecsib
)
4318 || !((t
->opcode_modifier
.vecsib
== VecSIB128
4319 && i
.index_reg
->reg_type
.bitfield
.regxmm
)
4320 || (t
->opcode_modifier
.vecsib
== VecSIB256
4321 && i
.index_reg
->reg_type
.bitfield
.regymm
)
4322 || (t
->opcode_modifier
.vecsib
== VecSIB512
4323 && i
.index_reg
->reg_type
.bitfield
.regzmm
)))
4325 i
.error
= invalid_vsib_address
;
4329 gas_assert (i
.reg_operands
== 2 || i
.mask
);
4330 if (i
.reg_operands
== 2 && !i
.mask
)
4332 gas_assert (i
.types
[0].bitfield
.regxmm
4333 || i
.types
[0].bitfield
.regymm
);
4334 gas_assert (i
.types
[2].bitfield
.regxmm
4335 || i
.types
[2].bitfield
.regymm
);
4336 if (operand_check
== check_none
)
4338 if (register_number (i
.op
[0].regs
)
4339 != register_number (i
.index_reg
)
4340 && register_number (i
.op
[2].regs
)
4341 != register_number (i
.index_reg
)
4342 && register_number (i
.op
[0].regs
)
4343 != register_number (i
.op
[2].regs
))
4345 if (operand_check
== check_error
)
4347 i
.error
= invalid_vector_register_set
;
4350 as_warn (_("mask, index, and destination registers should be distinct"));
4352 else if (i
.reg_operands
== 1 && i
.mask
)
4354 if ((i
.types
[1].bitfield
.regymm
4355 || i
.types
[1].bitfield
.regzmm
)
4356 && (register_number (i
.op
[1].regs
)
4357 == register_number (i
.index_reg
)))
4359 if (operand_check
== check_error
)
4361 i
.error
= invalid_vector_register_set
;
4364 if (operand_check
!= check_none
)
4365 as_warn (_("index and destination registers should be distinct"));
4370 /* Check if broadcast is supported by the instruction and is applied
4371 to the memory operand. */
4374 int broadcasted_opnd_size
;
4376 /* Check if specified broadcast is supported in this instruction,
4377 and it's applied to memory operand of DWORD or QWORD type,
4378 depending on VecESize. */
4379 if (i
.broadcast
->type
!= t
->opcode_modifier
.broadcast
4380 || !i
.types
[i
.broadcast
->operand
].bitfield
.mem
4381 || (t
->opcode_modifier
.vecesize
== 0
4382 && !i
.types
[i
.broadcast
->operand
].bitfield
.dword
4383 && !i
.types
[i
.broadcast
->operand
].bitfield
.unspecified
)
4384 || (t
->opcode_modifier
.vecesize
== 1
4385 && !i
.types
[i
.broadcast
->operand
].bitfield
.qword
4386 && !i
.types
[i
.broadcast
->operand
].bitfield
.unspecified
))
4389 broadcasted_opnd_size
= t
->opcode_modifier
.vecesize
? 64 : 32;
4390 if (i
.broadcast
->type
== BROADCAST_1TO16
)
4391 broadcasted_opnd_size
<<= 4; /* Broadcast 1to16. */
4392 else if (i
.broadcast
->type
== BROADCAST_1TO8
)
4393 broadcasted_opnd_size
<<= 3; /* Broadcast 1to8. */
4394 else if (i
.broadcast
->type
== BROADCAST_1TO4
)
4395 broadcasted_opnd_size
<<= 2; /* Broadcast 1to4. */
4396 else if (i
.broadcast
->type
== BROADCAST_1TO2
)
4397 broadcasted_opnd_size
<<= 1; /* Broadcast 1to2. */
4401 if ((broadcasted_opnd_size
== 256
4402 && !t
->operand_types
[i
.broadcast
->operand
].bitfield
.ymmword
)
4403 || (broadcasted_opnd_size
== 512
4404 && !t
->operand_types
[i
.broadcast
->operand
].bitfield
.zmmword
))
4407 i
.error
= unsupported_broadcast
;
4411 /* If broadcast is supported in this instruction, we need to check if
4412 operand of one-element size isn't specified without broadcast. */
4413 else if (t
->opcode_modifier
.broadcast
&& i
.mem_operands
)
4415 /* Find memory operand. */
4416 for (op
= 0; op
< i
.operands
; op
++)
4417 if (operand_type_check (i
.types
[op
], anymem
))
4419 gas_assert (op
< i
.operands
);
4420 /* Check size of the memory operand. */
4421 if ((t
->opcode_modifier
.vecesize
== 0
4422 && i
.types
[op
].bitfield
.dword
)
4423 || (t
->opcode_modifier
.vecesize
== 1
4424 && i
.types
[op
].bitfield
.qword
))
4426 i
.error
= broadcast_needed
;
4431 /* Check if requested masking is supported. */
4433 && (!t
->opcode_modifier
.masking
4435 && t
->opcode_modifier
.masking
== MERGING_MASKING
)))
4437 i
.error
= unsupported_masking
;
4441 /* Check if masking is applied to dest operand. */
4442 if (i
.mask
&& (i
.mask
->operand
!= (int) (i
.operands
- 1)))
4444 i
.error
= mask_not_on_destination
;
4451 if ((i
.rounding
->type
!= saeonly
4452 && !t
->opcode_modifier
.staticrounding
)
4453 || (i
.rounding
->type
== saeonly
4454 && (t
->opcode_modifier
.staticrounding
4455 || !t
->opcode_modifier
.sae
)))
4457 i
.error
= unsupported_rc_sae
;
4460 /* If the instruction has several immediate operands and one of
4461 them is rounding, the rounding operand should be the last
4462 immediate operand. */
4463 if (i
.imm_operands
> 1
4464 && i
.rounding
->operand
!= (int) (i
.imm_operands
- 1))
4466 i
.error
= rc_sae_operand_not_last_imm
;
4471 /* Check vector Disp8 operand. */
4472 if (t
->opcode_modifier
.disp8memshift
)
4475 i
.memshift
= t
->opcode_modifier
.vecesize
? 3 : 2;
4477 i
.memshift
= t
->opcode_modifier
.disp8memshift
;
4479 for (op
= 0; op
< i
.operands
; op
++)
4480 if (operand_type_check (i
.types
[op
], disp
)
4481 && i
.op
[op
].disps
->X_op
== O_constant
)
4483 offsetT value
= i
.op
[op
].disps
->X_add_number
;
4484 int vec_disp8_ok
= fits_in_vec_disp8 (value
);
4485 if (t
->operand_types
[op
].bitfield
.vec_disp8
)
4488 i
.types
[op
].bitfield
.vec_disp8
= 1;
4491 /* Vector insn can only have Vec_Disp8/Disp32 in
4492 32/64bit modes, and Vec_Disp8/Disp16 in 16bit
4494 i
.types
[op
].bitfield
.disp8
= 0;
4495 if (flag_code
!= CODE_16BIT
)
4496 i
.types
[op
].bitfield
.disp16
= 0;
4499 else if (flag_code
!= CODE_16BIT
)
4501 /* One form of this instruction supports vector Disp8.
4502 Try vector Disp8 if we need to use Disp32. */
4503 if (vec_disp8_ok
&& !fits_in_signed_byte (value
))
4505 i
.error
= try_vector_disp8
;
4517 /* Check if operands are valid for the instruction. Update VEX
4521 VEX_check_operands (const insn_template
*t
)
4523 /* VREX is only valid with EVEX prefix. */
4524 if (i
.need_vrex
&& !t
->opcode_modifier
.evex
)
4526 i
.error
= invalid_register_operand
;
4530 if (!t
->opcode_modifier
.vex
)
4533 /* Only check VEX_Imm4, which must be the first operand. */
4534 if (t
->operand_types
[0].bitfield
.vec_imm4
)
4536 if (i
.op
[0].imms
->X_op
!= O_constant
4537 || !fits_in_imm4 (i
.op
[0].imms
->X_add_number
))
4543 /* Turn off Imm8 so that update_imm won't complain. */
4544 i
.types
[0] = vec_imm4
;
4550 static const insn_template
*
4551 match_template (void)
4553 /* Points to template once we've found it. */
4554 const insn_template
*t
;
4555 i386_operand_type overlap0
, overlap1
, overlap2
, overlap3
;
4556 i386_operand_type overlap4
;
4557 unsigned int found_reverse_match
;
4558 i386_opcode_modifier suffix_check
;
4559 i386_operand_type operand_types
[MAX_OPERANDS
];
4560 int addr_prefix_disp
;
4562 unsigned int found_cpu_match
;
4563 unsigned int check_register
;
4564 enum i386_error specific_error
= 0;
4566 #if MAX_OPERANDS != 5
4567 # error "MAX_OPERANDS must be 5."
4570 found_reverse_match
= 0;
4571 addr_prefix_disp
= -1;
4573 memset (&suffix_check
, 0, sizeof (suffix_check
));
4574 if (i
.suffix
== BYTE_MNEM_SUFFIX
)
4575 suffix_check
.no_bsuf
= 1;
4576 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
4577 suffix_check
.no_wsuf
= 1;
4578 else if (i
.suffix
== SHORT_MNEM_SUFFIX
)
4579 suffix_check
.no_ssuf
= 1;
4580 else if (i
.suffix
== LONG_MNEM_SUFFIX
)
4581 suffix_check
.no_lsuf
= 1;
4582 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
4583 suffix_check
.no_qsuf
= 1;
4584 else if (i
.suffix
== LONG_DOUBLE_MNEM_SUFFIX
)
4585 suffix_check
.no_ldsuf
= 1;
4587 /* Must have right number of operands. */
4588 i
.error
= number_of_operands_mismatch
;
4590 for (t
= current_templates
->start
; t
< current_templates
->end
; t
++)
4592 addr_prefix_disp
= -1;
4594 if (i
.operands
!= t
->operands
)
4597 /* Check processor support. */
4598 i
.error
= unsupported
;
4599 found_cpu_match
= (cpu_flags_match (t
)
4600 == CPU_FLAGS_PERFECT_MATCH
);
4601 if (!found_cpu_match
)
4604 /* Check old gcc support. */
4605 i
.error
= old_gcc_only
;
4606 if (!old_gcc
&& t
->opcode_modifier
.oldgcc
)
4609 /* Check AT&T mnemonic. */
4610 i
.error
= unsupported_with_intel_mnemonic
;
4611 if (intel_mnemonic
&& t
->opcode_modifier
.attmnemonic
)
4614 /* Check AT&T/Intel syntax. */
4615 i
.error
= unsupported_syntax
;
4616 if ((intel_syntax
&& t
->opcode_modifier
.attsyntax
)
4617 || (!intel_syntax
&& t
->opcode_modifier
.intelsyntax
))
4620 /* Check the suffix, except for some instructions in intel mode. */
4621 i
.error
= invalid_instruction_suffix
;
4622 if ((!intel_syntax
|| !t
->opcode_modifier
.ignoresize
)
4623 && ((t
->opcode_modifier
.no_bsuf
&& suffix_check
.no_bsuf
)
4624 || (t
->opcode_modifier
.no_wsuf
&& suffix_check
.no_wsuf
)
4625 || (t
->opcode_modifier
.no_lsuf
&& suffix_check
.no_lsuf
)
4626 || (t
->opcode_modifier
.no_ssuf
&& suffix_check
.no_ssuf
)
4627 || (t
->opcode_modifier
.no_qsuf
&& suffix_check
.no_qsuf
)
4628 || (t
->opcode_modifier
.no_ldsuf
&& suffix_check
.no_ldsuf
)))
4631 if (!operand_size_match (t
))
4634 for (j
= 0; j
< MAX_OPERANDS
; j
++)
4635 operand_types
[j
] = t
->operand_types
[j
];
4637 /* In general, don't allow 64-bit operands in 32-bit mode. */
4638 if (i
.suffix
== QWORD_MNEM_SUFFIX
4639 && flag_code
!= CODE_64BIT
4641 ? (!t
->opcode_modifier
.ignoresize
4642 && !intel_float_operand (t
->name
))
4643 : intel_float_operand (t
->name
) != 2)
4644 && ((!operand_types
[0].bitfield
.regmmx
4645 && !operand_types
[0].bitfield
.regxmm
4646 && !operand_types
[0].bitfield
.regymm
4647 && !operand_types
[0].bitfield
.regzmm
)
4648 || (!operand_types
[t
->operands
> 1].bitfield
.regmmx
4649 && operand_types
[t
->operands
> 1].bitfield
.regxmm
4650 && operand_types
[t
->operands
> 1].bitfield
.regymm
4651 && operand_types
[t
->operands
> 1].bitfield
.regzmm
))
4652 && (t
->base_opcode
!= 0x0fc7
4653 || t
->extension_opcode
!= 1 /* cmpxchg8b */))
4656 /* In general, don't allow 32-bit operands on pre-386. */
4657 else if (i
.suffix
== LONG_MNEM_SUFFIX
4658 && !cpu_arch_flags
.bitfield
.cpui386
4660 ? (!t
->opcode_modifier
.ignoresize
4661 && !intel_float_operand (t
->name
))
4662 : intel_float_operand (t
->name
) != 2)
4663 && ((!operand_types
[0].bitfield
.regmmx
4664 && !operand_types
[0].bitfield
.regxmm
)
4665 || (!operand_types
[t
->operands
> 1].bitfield
.regmmx
4666 && operand_types
[t
->operands
> 1].bitfield
.regxmm
)))
4669 /* Do not verify operands when there are none. */
4673 /* We've found a match; break out of loop. */
4677 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
4678 into Disp32/Disp16/Disp32 operand. */
4679 if (i
.prefix
[ADDR_PREFIX
] != 0)
4681 /* There should be only one Disp operand. */
4685 for (j
= 0; j
< MAX_OPERANDS
; j
++)
4687 if (operand_types
[j
].bitfield
.disp16
)
4689 addr_prefix_disp
= j
;
4690 operand_types
[j
].bitfield
.disp32
= 1;
4691 operand_types
[j
].bitfield
.disp16
= 0;
4697 for (j
= 0; j
< MAX_OPERANDS
; j
++)
4699 if (operand_types
[j
].bitfield
.disp32
)
4701 addr_prefix_disp
= j
;
4702 operand_types
[j
].bitfield
.disp32
= 0;
4703 operand_types
[j
].bitfield
.disp16
= 1;
4709 for (j
= 0; j
< MAX_OPERANDS
; j
++)
4711 if (operand_types
[j
].bitfield
.disp64
)
4713 addr_prefix_disp
= j
;
4714 operand_types
[j
].bitfield
.disp64
= 0;
4715 operand_types
[j
].bitfield
.disp32
= 1;
4723 /* We check register size if needed. */
4724 check_register
= t
->opcode_modifier
.checkregsize
;
4725 overlap0
= operand_type_and (i
.types
[0], operand_types
[0]);
4726 switch (t
->operands
)
4729 if (!operand_type_match (overlap0
, i
.types
[0]))
4733 /* xchg %eax, %eax is a special case. It is an aliase for nop
4734 only in 32bit mode and we can use opcode 0x90. In 64bit
4735 mode, we can't use 0x90 for xchg %eax, %eax since it should
4736 zero-extend %eax to %rax. */
4737 if (flag_code
== CODE_64BIT
4738 && t
->base_opcode
== 0x90
4739 && operand_type_equal (&i
.types
[0], &acc32
)
4740 && operand_type_equal (&i
.types
[1], &acc32
))
4744 /* If we swap operand in encoding, we either match
4745 the next one or reverse direction of operands. */
4746 if (t
->opcode_modifier
.s
)
4748 else if (t
->opcode_modifier
.d
)
4753 /* If we swap operand in encoding, we match the next one. */
4754 if (i
.swap_operand
&& t
->opcode_modifier
.s
)
4758 overlap1
= operand_type_and (i
.types
[1], operand_types
[1]);
4759 if (!operand_type_match (overlap0
, i
.types
[0])
4760 || !operand_type_match (overlap1
, i
.types
[1])
4762 && !operand_type_register_match (overlap0
, i
.types
[0],
4764 overlap1
, i
.types
[1],
4767 /* Check if other direction is valid ... */
4768 if (!t
->opcode_modifier
.d
&& !t
->opcode_modifier
.floatd
)
4772 /* Try reversing direction of operands. */
4773 overlap0
= operand_type_and (i
.types
[0], operand_types
[1]);
4774 overlap1
= operand_type_and (i
.types
[1], operand_types
[0]);
4775 if (!operand_type_match (overlap0
, i
.types
[0])
4776 || !operand_type_match (overlap1
, i
.types
[1])
4778 && !operand_type_register_match (overlap0
,
4785 /* Does not match either direction. */
4788 /* found_reverse_match holds which of D or FloatDR
4790 if (t
->opcode_modifier
.d
)
4791 found_reverse_match
= Opcode_D
;
4792 else if (t
->opcode_modifier
.floatd
)
4793 found_reverse_match
= Opcode_FloatD
;
4795 found_reverse_match
= 0;
4796 if (t
->opcode_modifier
.floatr
)
4797 found_reverse_match
|= Opcode_FloatR
;
4801 /* Found a forward 2 operand match here. */
4802 switch (t
->operands
)
4805 overlap4
= operand_type_and (i
.types
[4],
4808 overlap3
= operand_type_and (i
.types
[3],
4811 overlap2
= operand_type_and (i
.types
[2],
4816 switch (t
->operands
)
4819 if (!operand_type_match (overlap4
, i
.types
[4])
4820 || !operand_type_register_match (overlap3
,
4828 if (!operand_type_match (overlap3
, i
.types
[3])
4830 && !operand_type_register_match (overlap2
,
4838 /* Here we make use of the fact that there are no
4839 reverse match 3 operand instructions, and all 3
4840 operand instructions only need to be checked for
4841 register consistency between operands 2 and 3. */
4842 if (!operand_type_match (overlap2
, i
.types
[2])
4844 && !operand_type_register_match (overlap1
,
4854 /* Found either forward/reverse 2, 3 or 4 operand match here:
4855 slip through to break. */
4857 if (!found_cpu_match
)
4859 found_reverse_match
= 0;
4863 /* Check if vector and VEX operands are valid. */
4864 if (check_VecOperands (t
) || VEX_check_operands (t
))
4866 specific_error
= i
.error
;
4870 /* We've found a match; break out of loop. */
4874 if (t
== current_templates
->end
)
4876 /* We found no match. */
4877 const char *err_msg
;
4878 switch (specific_error
? specific_error
: i
.error
)
4882 case operand_size_mismatch
:
4883 err_msg
= _("operand size mismatch");
4885 case operand_type_mismatch
:
4886 err_msg
= _("operand type mismatch");
4888 case register_type_mismatch
:
4889 err_msg
= _("register type mismatch");
4891 case number_of_operands_mismatch
:
4892 err_msg
= _("number of operands mismatch");
4894 case invalid_instruction_suffix
:
4895 err_msg
= _("invalid instruction suffix");
4898 err_msg
= _("constant doesn't fit in 4 bits");
4901 err_msg
= _("only supported with old gcc");
4903 case unsupported_with_intel_mnemonic
:
4904 err_msg
= _("unsupported with Intel mnemonic");
4906 case unsupported_syntax
:
4907 err_msg
= _("unsupported syntax");
4910 as_bad (_("unsupported instruction `%s'"),
4911 current_templates
->start
->name
);
4913 case invalid_vsib_address
:
4914 err_msg
= _("invalid VSIB address");
4916 case invalid_vector_register_set
:
4917 err_msg
= _("mask, index, and destination registers must be distinct");
4919 case unsupported_vector_index_register
:
4920 err_msg
= _("unsupported vector index register");
4922 case unsupported_broadcast
:
4923 err_msg
= _("unsupported broadcast");
4925 case broadcast_not_on_src_operand
:
4926 err_msg
= _("broadcast not on source memory operand");
4928 case broadcast_needed
:
4929 err_msg
= _("broadcast is needed for operand of such type");
4931 case unsupported_masking
:
4932 err_msg
= _("unsupported masking");
4934 case mask_not_on_destination
:
4935 err_msg
= _("mask not on destination operand");
4937 case no_default_mask
:
4938 err_msg
= _("default mask isn't allowed");
4940 case unsupported_rc_sae
:
4941 err_msg
= _("unsupported static rounding/sae");
4943 case rc_sae_operand_not_last_imm
:
4945 err_msg
= _("RC/SAE operand must precede immediate operands");
4947 err_msg
= _("RC/SAE operand must follow immediate operands");
4949 case invalid_register_operand
:
4950 err_msg
= _("invalid register operand");
4953 as_bad (_("%s for `%s'"), err_msg
,
4954 current_templates
->start
->name
);
4958 if (!quiet_warnings
)
4961 && (i
.types
[0].bitfield
.jumpabsolute
4962 != operand_types
[0].bitfield
.jumpabsolute
))
4964 as_warn (_("indirect %s without `*'"), t
->name
);
4967 if (t
->opcode_modifier
.isprefix
4968 && t
->opcode_modifier
.ignoresize
)
4970 /* Warn them that a data or address size prefix doesn't
4971 affect assembly of the next line of code. */
4972 as_warn (_("stand-alone `%s' prefix"), t
->name
);
4976 /* Copy the template we found. */
4979 if (addr_prefix_disp
!= -1)
4980 i
.tm
.operand_types
[addr_prefix_disp
]
4981 = operand_types
[addr_prefix_disp
];
4983 if (found_reverse_match
)
4985 /* If we found a reverse match we must alter the opcode
4986 direction bit. found_reverse_match holds bits to change
4987 (different for int & float insns). */
4989 i
.tm
.base_opcode
^= found_reverse_match
;
4991 i
.tm
.operand_types
[0] = operand_types
[1];
4992 i
.tm
.operand_types
[1] = operand_types
[0];
5001 int mem_op
= operand_type_check (i
.types
[0], anymem
) ? 0 : 1;
5002 if (i
.tm
.operand_types
[mem_op
].bitfield
.esseg
)
5004 if (i
.seg
[0] != NULL
&& i
.seg
[0] != &es
)
5006 as_bad (_("`%s' operand %d must use `%ses' segment"),
5012 /* There's only ever one segment override allowed per instruction.
5013 This instruction possibly has a legal segment override on the
5014 second operand, so copy the segment to where non-string
5015 instructions store it, allowing common code. */
5016 i
.seg
[0] = i
.seg
[1];
5018 else if (i
.tm
.operand_types
[mem_op
+ 1].bitfield
.esseg
)
5020 if (i
.seg
[1] != NULL
&& i
.seg
[1] != &es
)
5022 as_bad (_("`%s' operand %d must use `%ses' segment"),
5033 process_suffix (void)
5035 /* If matched instruction specifies an explicit instruction mnemonic
5037 if (i
.tm
.opcode_modifier
.size16
)
5038 i
.suffix
= WORD_MNEM_SUFFIX
;
5039 else if (i
.tm
.opcode_modifier
.size32
)
5040 i
.suffix
= LONG_MNEM_SUFFIX
;
5041 else if (i
.tm
.opcode_modifier
.size64
)
5042 i
.suffix
= QWORD_MNEM_SUFFIX
;
5043 else if (i
.reg_operands
)
5045 /* If there's no instruction mnemonic suffix we try to invent one
5046 based on register operands. */
5049 /* We take i.suffix from the last register operand specified,
5050 Destination register type is more significant than source
5051 register type. crc32 in SSE4.2 prefers source register
5053 if (i
.tm
.base_opcode
== 0xf20f38f1)
5055 if (i
.types
[0].bitfield
.reg16
)
5056 i
.suffix
= WORD_MNEM_SUFFIX
;
5057 else if (i
.types
[0].bitfield
.reg32
)
5058 i
.suffix
= LONG_MNEM_SUFFIX
;
5059 else if (i
.types
[0].bitfield
.reg64
)
5060 i
.suffix
= QWORD_MNEM_SUFFIX
;
5062 else if (i
.tm
.base_opcode
== 0xf20f38f0)
5064 if (i
.types
[0].bitfield
.reg8
)
5065 i
.suffix
= BYTE_MNEM_SUFFIX
;
5072 if (i
.tm
.base_opcode
== 0xf20f38f1
5073 || i
.tm
.base_opcode
== 0xf20f38f0)
5075 /* We have to know the operand size for crc32. */
5076 as_bad (_("ambiguous memory operand size for `%s`"),
5081 for (op
= i
.operands
; --op
>= 0;)
5082 if (!i
.tm
.operand_types
[op
].bitfield
.inoutportreg
)
5084 if (i
.types
[op
].bitfield
.reg8
)
5086 i
.suffix
= BYTE_MNEM_SUFFIX
;
5089 else if (i
.types
[op
].bitfield
.reg16
)
5091 i
.suffix
= WORD_MNEM_SUFFIX
;
5094 else if (i
.types
[op
].bitfield
.reg32
)
5096 i
.suffix
= LONG_MNEM_SUFFIX
;
5099 else if (i
.types
[op
].bitfield
.reg64
)
5101 i
.suffix
= QWORD_MNEM_SUFFIX
;
5107 else if (i
.suffix
== BYTE_MNEM_SUFFIX
)
5110 && i
.tm
.opcode_modifier
.ignoresize
5111 && i
.tm
.opcode_modifier
.no_bsuf
)
5113 else if (!check_byte_reg ())
5116 else if (i
.suffix
== LONG_MNEM_SUFFIX
)
5119 && i
.tm
.opcode_modifier
.ignoresize
5120 && i
.tm
.opcode_modifier
.no_lsuf
)
5122 else if (!check_long_reg ())
5125 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
5128 && i
.tm
.opcode_modifier
.ignoresize
5129 && i
.tm
.opcode_modifier
.no_qsuf
)
5131 else if (!check_qword_reg ())
5134 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
5137 && i
.tm
.opcode_modifier
.ignoresize
5138 && i
.tm
.opcode_modifier
.no_wsuf
)
5140 else if (!check_word_reg ())
5143 else if (i
.suffix
== XMMWORD_MNEM_SUFFIX
5144 || i
.suffix
== YMMWORD_MNEM_SUFFIX
5145 || i
.suffix
== ZMMWORD_MNEM_SUFFIX
)
5147 /* Skip if the instruction has x/y/z suffix. match_template
5148 should check if it is a valid suffix. */
5150 else if (intel_syntax
&& i
.tm
.opcode_modifier
.ignoresize
)
5151 /* Do nothing if the instruction is going to ignore the prefix. */
5156 else if (i
.tm
.opcode_modifier
.defaultsize
5158 /* exclude fldenv/frstor/fsave/fstenv */
5159 && i
.tm
.opcode_modifier
.no_ssuf
)
5161 i
.suffix
= stackop_size
;
5163 else if (intel_syntax
5165 && (i
.tm
.operand_types
[0].bitfield
.jumpabsolute
5166 || i
.tm
.opcode_modifier
.jumpbyte
5167 || i
.tm
.opcode_modifier
.jumpintersegment
5168 || (i
.tm
.base_opcode
== 0x0f01 /* [ls][gi]dt */
5169 && i
.tm
.extension_opcode
<= 3)))
5174 if (!i
.tm
.opcode_modifier
.no_qsuf
)
5176 i
.suffix
= QWORD_MNEM_SUFFIX
;
5180 if (!i
.tm
.opcode_modifier
.no_lsuf
)
5181 i
.suffix
= LONG_MNEM_SUFFIX
;
5184 if (!i
.tm
.opcode_modifier
.no_wsuf
)
5185 i
.suffix
= WORD_MNEM_SUFFIX
;
5194 if (i
.tm
.opcode_modifier
.w
)
5196 as_bad (_("no instruction mnemonic suffix given and "
5197 "no register operands; can't size instruction"));
5203 unsigned int suffixes
;
5205 suffixes
= !i
.tm
.opcode_modifier
.no_bsuf
;
5206 if (!i
.tm
.opcode_modifier
.no_wsuf
)
5208 if (!i
.tm
.opcode_modifier
.no_lsuf
)
5210 if (!i
.tm
.opcode_modifier
.no_ldsuf
)
5212 if (!i
.tm
.opcode_modifier
.no_ssuf
)
5214 if (!i
.tm
.opcode_modifier
.no_qsuf
)
5217 /* There are more than suffix matches. */
5218 if (i
.tm
.opcode_modifier
.w
5219 || ((suffixes
& (suffixes
- 1))
5220 && !i
.tm
.opcode_modifier
.defaultsize
5221 && !i
.tm
.opcode_modifier
.ignoresize
))
5223 as_bad (_("ambiguous operand size for `%s'"), i
.tm
.name
);
5229 /* Change the opcode based on the operand size given by i.suffix;
5230 We don't need to change things for byte insns. */
5233 && i
.suffix
!= BYTE_MNEM_SUFFIX
5234 && i
.suffix
!= XMMWORD_MNEM_SUFFIX
5235 && i
.suffix
!= YMMWORD_MNEM_SUFFIX
5236 && i
.suffix
!= ZMMWORD_MNEM_SUFFIX
)
5238 /* It's not a byte, select word/dword operation. */
5239 if (i
.tm
.opcode_modifier
.w
)
5241 if (i
.tm
.opcode_modifier
.shortform
)
5242 i
.tm
.base_opcode
|= 8;
5244 i
.tm
.base_opcode
|= 1;
5247 /* Now select between word & dword operations via the operand
5248 size prefix, except for instructions that will ignore this
5250 if (i
.tm
.opcode_modifier
.addrprefixop0
)
5252 /* The address size override prefix changes the size of the
5254 if ((flag_code
== CODE_32BIT
5255 && i
.op
->regs
[0].reg_type
.bitfield
.reg16
)
5256 || (flag_code
!= CODE_32BIT
5257 && i
.op
->regs
[0].reg_type
.bitfield
.reg32
))
5258 if (!add_prefix (ADDR_PREFIX_OPCODE
))
5261 else if (i
.suffix
!= QWORD_MNEM_SUFFIX
5262 && i
.suffix
!= LONG_DOUBLE_MNEM_SUFFIX
5263 && !i
.tm
.opcode_modifier
.ignoresize
5264 && !i
.tm
.opcode_modifier
.floatmf
5265 && ((i
.suffix
== LONG_MNEM_SUFFIX
) == (flag_code
== CODE_16BIT
)
5266 || (flag_code
== CODE_64BIT
5267 && i
.tm
.opcode_modifier
.jumpbyte
)))
5269 unsigned int prefix
= DATA_PREFIX_OPCODE
;
5271 if (i
.tm
.opcode_modifier
.jumpbyte
) /* jcxz, loop */
5272 prefix
= ADDR_PREFIX_OPCODE
;
5274 if (!add_prefix (prefix
))
5278 /* Set mode64 for an operand. */
5279 if (i
.suffix
== QWORD_MNEM_SUFFIX
5280 && flag_code
== CODE_64BIT
5281 && !i
.tm
.opcode_modifier
.norex64
)
5283 /* Special case for xchg %rax,%rax. It is NOP and doesn't
5284 need rex64. cmpxchg8b is also a special case. */
5285 if (! (i
.operands
== 2
5286 && i
.tm
.base_opcode
== 0x90
5287 && i
.tm
.extension_opcode
== None
5288 && operand_type_equal (&i
.types
[0], &acc64
)
5289 && operand_type_equal (&i
.types
[1], &acc64
))
5290 && ! (i
.operands
== 1
5291 && i
.tm
.base_opcode
== 0xfc7
5292 && i
.tm
.extension_opcode
== 1
5293 && !operand_type_check (i
.types
[0], reg
)
5294 && operand_type_check (i
.types
[0], anymem
)))
5298 /* Size floating point instruction. */
5299 if (i
.suffix
== LONG_MNEM_SUFFIX
)
5300 if (i
.tm
.opcode_modifier
.floatmf
)
5301 i
.tm
.base_opcode
^= 4;
5308 check_byte_reg (void)
5312 for (op
= i
.operands
; --op
>= 0;)
5314 /* If this is an eight bit register, it's OK. If it's the 16 or
5315 32 bit version of an eight bit register, we will just use the
5316 low portion, and that's OK too. */
5317 if (i
.types
[op
].bitfield
.reg8
)
5320 /* I/O port address operands are OK too. */
5321 if (i
.tm
.operand_types
[op
].bitfield
.inoutportreg
)
5324 /* crc32 doesn't generate this warning. */
5325 if (i
.tm
.base_opcode
== 0xf20f38f0)
5328 if ((i
.types
[op
].bitfield
.reg16
5329 || i
.types
[op
].bitfield
.reg32
5330 || i
.types
[op
].bitfield
.reg64
)
5331 && i
.op
[op
].regs
->reg_num
< 4
5332 /* Prohibit these changes in 64bit mode, since the lowering
5333 would be more complicated. */
5334 && flag_code
!= CODE_64BIT
)
5336 #if REGISTER_WARNINGS
5337 if (!quiet_warnings
)
5338 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5340 (i
.op
[op
].regs
+ (i
.types
[op
].bitfield
.reg16
5341 ? REGNAM_AL
- REGNAM_AX
5342 : REGNAM_AL
- REGNAM_EAX
))->reg_name
,
5344 i
.op
[op
].regs
->reg_name
,
5349 /* Any other register is bad. */
5350 if (i
.types
[op
].bitfield
.reg16
5351 || i
.types
[op
].bitfield
.reg32
5352 || i
.types
[op
].bitfield
.reg64
5353 || i
.types
[op
].bitfield
.regmmx
5354 || i
.types
[op
].bitfield
.regxmm
5355 || i
.types
[op
].bitfield
.regymm
5356 || i
.types
[op
].bitfield
.regzmm
5357 || i
.types
[op
].bitfield
.sreg2
5358 || i
.types
[op
].bitfield
.sreg3
5359 || i
.types
[op
].bitfield
.control
5360 || i
.types
[op
].bitfield
.debug
5361 || i
.types
[op
].bitfield
.test
5362 || i
.types
[op
].bitfield
.floatreg
5363 || i
.types
[op
].bitfield
.floatacc
)
5365 as_bad (_("`%s%s' not allowed with `%s%c'"),
5367 i
.op
[op
].regs
->reg_name
,
5377 check_long_reg (void)
5381 for (op
= i
.operands
; --op
>= 0;)
5382 /* Reject eight bit registers, except where the template requires
5383 them. (eg. movzb) */
5384 if (i
.types
[op
].bitfield
.reg8
5385 && (i
.tm
.operand_types
[op
].bitfield
.reg16
5386 || i
.tm
.operand_types
[op
].bitfield
.reg32
5387 || i
.tm
.operand_types
[op
].bitfield
.acc
))
5389 as_bad (_("`%s%s' not allowed with `%s%c'"),
5391 i
.op
[op
].regs
->reg_name
,
5396 /* Warn if the e prefix on a general reg is missing. */
5397 else if ((!quiet_warnings
|| flag_code
== CODE_64BIT
)
5398 && i
.types
[op
].bitfield
.reg16
5399 && (i
.tm
.operand_types
[op
].bitfield
.reg32
5400 || i
.tm
.operand_types
[op
].bitfield
.acc
))
5402 /* Prohibit these changes in the 64bit mode, since the
5403 lowering is more complicated. */
5404 if (flag_code
== CODE_64BIT
)
5406 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5407 register_prefix
, i
.op
[op
].regs
->reg_name
,
5411 #if REGISTER_WARNINGS
5412 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5414 (i
.op
[op
].regs
+ REGNAM_EAX
- REGNAM_AX
)->reg_name
,
5415 register_prefix
, i
.op
[op
].regs
->reg_name
, i
.suffix
);
5418 /* Warn if the r prefix on a general reg is present. */
5419 else if (i
.types
[op
].bitfield
.reg64
5420 && (i
.tm
.operand_types
[op
].bitfield
.reg32
5421 || i
.tm
.operand_types
[op
].bitfield
.acc
))
5424 && i
.tm
.opcode_modifier
.toqword
5425 && !i
.types
[0].bitfield
.regxmm
)
5427 /* Convert to QWORD. We want REX byte. */
5428 i
.suffix
= QWORD_MNEM_SUFFIX
;
5432 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5433 register_prefix
, i
.op
[op
].regs
->reg_name
,
5442 check_qword_reg (void)
5446 for (op
= i
.operands
; --op
>= 0; )
5447 /* Reject eight bit registers, except where the template requires
5448 them. (eg. movzb) */
5449 if (i
.types
[op
].bitfield
.reg8
5450 && (i
.tm
.operand_types
[op
].bitfield
.reg16
5451 || i
.tm
.operand_types
[op
].bitfield
.reg32
5452 || i
.tm
.operand_types
[op
].bitfield
.acc
))
5454 as_bad (_("`%s%s' not allowed with `%s%c'"),
5456 i
.op
[op
].regs
->reg_name
,
5461 /* Warn if the r prefix on a general reg is missing. */
5462 else if ((i
.types
[op
].bitfield
.reg16
5463 || i
.types
[op
].bitfield
.reg32
)
5464 && (i
.tm
.operand_types
[op
].bitfield
.reg32
5465 || i
.tm
.operand_types
[op
].bitfield
.acc
))
5467 /* Prohibit these changes in the 64bit mode, since the
5468 lowering is more complicated. */
5470 && i
.tm
.opcode_modifier
.todword
5471 && !i
.types
[0].bitfield
.regxmm
)
5473 /* Convert to DWORD. We don't want REX byte. */
5474 i
.suffix
= LONG_MNEM_SUFFIX
;
5478 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5479 register_prefix
, i
.op
[op
].regs
->reg_name
,
5488 check_word_reg (void)
5491 for (op
= i
.operands
; --op
>= 0;)
5492 /* Reject eight bit registers, except where the template requires
5493 them. (eg. movzb) */
5494 if (i
.types
[op
].bitfield
.reg8
5495 && (i
.tm
.operand_types
[op
].bitfield
.reg16
5496 || i
.tm
.operand_types
[op
].bitfield
.reg32
5497 || i
.tm
.operand_types
[op
].bitfield
.acc
))
5499 as_bad (_("`%s%s' not allowed with `%s%c'"),
5501 i
.op
[op
].regs
->reg_name
,
5506 /* Warn if the e or r prefix on a general reg is present. */
5507 else if ((!quiet_warnings
|| flag_code
== CODE_64BIT
)
5508 && (i
.types
[op
].bitfield
.reg32
5509 || i
.types
[op
].bitfield
.reg64
)
5510 && (i
.tm
.operand_types
[op
].bitfield
.reg16
5511 || i
.tm
.operand_types
[op
].bitfield
.acc
))
5513 /* Prohibit these changes in the 64bit mode, since the
5514 lowering is more complicated. */
5515 if (flag_code
== CODE_64BIT
)
5517 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5518 register_prefix
, i
.op
[op
].regs
->reg_name
,
5522 #if REGISTER_WARNINGS
5523 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5525 (i
.op
[op
].regs
+ REGNAM_AX
- REGNAM_EAX
)->reg_name
,
5526 register_prefix
, i
.op
[op
].regs
->reg_name
, i
.suffix
);
5533 update_imm (unsigned int j
)
5535 i386_operand_type overlap
= i
.types
[j
];
5536 if ((overlap
.bitfield
.imm8
5537 || overlap
.bitfield
.imm8s
5538 || overlap
.bitfield
.imm16
5539 || overlap
.bitfield
.imm32
5540 || overlap
.bitfield
.imm32s
5541 || overlap
.bitfield
.imm64
)
5542 && !operand_type_equal (&overlap
, &imm8
)
5543 && !operand_type_equal (&overlap
, &imm8s
)
5544 && !operand_type_equal (&overlap
, &imm16
)
5545 && !operand_type_equal (&overlap
, &imm32
)
5546 && !operand_type_equal (&overlap
, &imm32s
)
5547 && !operand_type_equal (&overlap
, &imm64
))
5551 i386_operand_type temp
;
5553 operand_type_set (&temp
, 0);
5554 if (i
.suffix
== BYTE_MNEM_SUFFIX
)
5556 temp
.bitfield
.imm8
= overlap
.bitfield
.imm8
;
5557 temp
.bitfield
.imm8s
= overlap
.bitfield
.imm8s
;
5559 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
5560 temp
.bitfield
.imm16
= overlap
.bitfield
.imm16
;
5561 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
5563 temp
.bitfield
.imm64
= overlap
.bitfield
.imm64
;
5564 temp
.bitfield
.imm32s
= overlap
.bitfield
.imm32s
;
5567 temp
.bitfield
.imm32
= overlap
.bitfield
.imm32
;
5570 else if (operand_type_equal (&overlap
, &imm16_32_32s
)
5571 || operand_type_equal (&overlap
, &imm16_32
)
5572 || operand_type_equal (&overlap
, &imm16_32s
))
5574 if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
5579 if (!operand_type_equal (&overlap
, &imm8
)
5580 && !operand_type_equal (&overlap
, &imm8s
)
5581 && !operand_type_equal (&overlap
, &imm16
)
5582 && !operand_type_equal (&overlap
, &imm32
)
5583 && !operand_type_equal (&overlap
, &imm32s
)
5584 && !operand_type_equal (&overlap
, &imm64
))
5586 as_bad (_("no instruction mnemonic suffix given; "
5587 "can't determine immediate size"));
5591 i
.types
[j
] = overlap
;
5601 /* Update the first 2 immediate operands. */
5602 n
= i
.operands
> 2 ? 2 : i
.operands
;
5605 for (j
= 0; j
< n
; j
++)
5606 if (update_imm (j
) == 0)
5609 /* The 3rd operand can't be immediate operand. */
5610 gas_assert (operand_type_check (i
.types
[2], imm
) == 0);
5617 bad_implicit_operand (int xmm
)
5619 const char *ireg
= xmm
? "xmm0" : "ymm0";
5622 as_bad (_("the last operand of `%s' must be `%s%s'"),
5623 i
.tm
.name
, register_prefix
, ireg
);
5625 as_bad (_("the first operand of `%s' must be `%s%s'"),
5626 i
.tm
.name
, register_prefix
, ireg
);
5631 process_operands (void)
5633 /* Default segment register this instruction will use for memory
5634 accesses. 0 means unknown. This is only for optimizing out
5635 unnecessary segment overrides. */
5636 const seg_entry
*default_seg
= 0;
5638 if (i
.tm
.opcode_modifier
.sse2avx
&& i
.tm
.opcode_modifier
.vexvvvv
)
5640 unsigned int dupl
= i
.operands
;
5641 unsigned int dest
= dupl
- 1;
5644 /* The destination must be an xmm register. */
5645 gas_assert (i
.reg_operands
5646 && MAX_OPERANDS
> dupl
5647 && operand_type_equal (&i
.types
[dest
], ®xmm
));
5649 if (i
.tm
.opcode_modifier
.firstxmm0
)
5651 /* The first operand is implicit and must be xmm0. */
5652 gas_assert (operand_type_equal (&i
.types
[0], ®xmm
));
5653 if (register_number (i
.op
[0].regs
) != 0)
5654 return bad_implicit_operand (1);
5656 if (i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
)
5658 /* Keep xmm0 for instructions with VEX prefix and 3
5664 /* We remove the first xmm0 and keep the number of
5665 operands unchanged, which in fact duplicates the
5667 for (j
= 1; j
< i
.operands
; j
++)
5669 i
.op
[j
- 1] = i
.op
[j
];
5670 i
.types
[j
- 1] = i
.types
[j
];
5671 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
5675 else if (i
.tm
.opcode_modifier
.implicit1stxmm0
)
5677 gas_assert ((MAX_OPERANDS
- 1) > dupl
5678 && (i
.tm
.opcode_modifier
.vexsources
5681 /* Add the implicit xmm0 for instructions with VEX prefix
5683 for (j
= i
.operands
; j
> 0; j
--)
5685 i
.op
[j
] = i
.op
[j
- 1];
5686 i
.types
[j
] = i
.types
[j
- 1];
5687 i
.tm
.operand_types
[j
] = i
.tm
.operand_types
[j
- 1];
5690 = (const reg_entry
*) hash_find (reg_hash
, "xmm0");
5691 i
.types
[0] = regxmm
;
5692 i
.tm
.operand_types
[0] = regxmm
;
5695 i
.reg_operands
+= 2;
5700 i
.op
[dupl
] = i
.op
[dest
];
5701 i
.types
[dupl
] = i
.types
[dest
];
5702 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
5711 i
.op
[dupl
] = i
.op
[dest
];
5712 i
.types
[dupl
] = i
.types
[dest
];
5713 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
5716 if (i
.tm
.opcode_modifier
.immext
)
5719 else if (i
.tm
.opcode_modifier
.firstxmm0
)
5723 /* The first operand is implicit and must be xmm0/ymm0/zmm0. */
5724 gas_assert (i
.reg_operands
5725 && (operand_type_equal (&i
.types
[0], ®xmm
)
5726 || operand_type_equal (&i
.types
[0], ®ymm
)
5727 || operand_type_equal (&i
.types
[0], ®zmm
)));
5728 if (register_number (i
.op
[0].regs
) != 0)
5729 return bad_implicit_operand (i
.types
[0].bitfield
.regxmm
);
5731 for (j
= 1; j
< i
.operands
; j
++)
5733 i
.op
[j
- 1] = i
.op
[j
];
5734 i
.types
[j
- 1] = i
.types
[j
];
5736 /* We need to adjust fields in i.tm since they are used by
5737 build_modrm_byte. */
5738 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
5745 else if (i
.tm
.opcode_modifier
.regkludge
)
5747 /* The imul $imm, %reg instruction is converted into
5748 imul $imm, %reg, %reg, and the clr %reg instruction
5749 is converted into xor %reg, %reg. */
5751 unsigned int first_reg_op
;
5753 if (operand_type_check (i
.types
[0], reg
))
5757 /* Pretend we saw the extra register operand. */
5758 gas_assert (i
.reg_operands
== 1
5759 && i
.op
[first_reg_op
+ 1].regs
== 0);
5760 i
.op
[first_reg_op
+ 1].regs
= i
.op
[first_reg_op
].regs
;
5761 i
.types
[first_reg_op
+ 1] = i
.types
[first_reg_op
];
5766 if (i
.tm
.opcode_modifier
.shortform
)
5768 if (i
.types
[0].bitfield
.sreg2
5769 || i
.types
[0].bitfield
.sreg3
)
5771 if (i
.tm
.base_opcode
== POP_SEG_SHORT
5772 && i
.op
[0].regs
->reg_num
== 1)
5774 as_bad (_("you can't `pop %scs'"), register_prefix
);
5777 i
.tm
.base_opcode
|= (i
.op
[0].regs
->reg_num
<< 3);
5778 if ((i
.op
[0].regs
->reg_flags
& RegRex
) != 0)
5783 /* The register or float register operand is in operand
5787 if (i
.types
[0].bitfield
.floatreg
5788 || operand_type_check (i
.types
[0], reg
))
5792 /* Register goes in low 3 bits of opcode. */
5793 i
.tm
.base_opcode
|= i
.op
[op
].regs
->reg_num
;
5794 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
5796 if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
5798 /* Warn about some common errors, but press on regardless.
5799 The first case can be generated by gcc (<= 2.8.1). */
5800 if (i
.operands
== 2)
5802 /* Reversed arguments on faddp, fsubp, etc. */
5803 as_warn (_("translating to `%s %s%s,%s%s'"), i
.tm
.name
,
5804 register_prefix
, i
.op
[!intel_syntax
].regs
->reg_name
,
5805 register_prefix
, i
.op
[intel_syntax
].regs
->reg_name
);
5809 /* Extraneous `l' suffix on fp insn. */
5810 as_warn (_("translating to `%s %s%s'"), i
.tm
.name
,
5811 register_prefix
, i
.op
[0].regs
->reg_name
);
5816 else if (i
.tm
.opcode_modifier
.modrm
)
5818 /* The opcode is completed (modulo i.tm.extension_opcode which
5819 must be put into the modrm byte). Now, we make the modrm and
5820 index base bytes based on all the info we've collected. */
5822 default_seg
= build_modrm_byte ();
5824 else if ((i
.tm
.base_opcode
& ~0x3) == MOV_AX_DISP32
)
5828 else if (i
.tm
.opcode_modifier
.isstring
)
5830 /* For the string instructions that allow a segment override
5831 on one of their operands, the default segment is ds. */
5835 if (i
.tm
.base_opcode
== 0x8d /* lea */
5838 as_warn (_("segment override on `%s' is ineffectual"), i
.tm
.name
);
5840 /* If a segment was explicitly specified, and the specified segment
5841 is not the default, use an opcode prefix to select it. If we
5842 never figured out what the default segment is, then default_seg
5843 will be zero at this point, and the specified segment prefix will
5845 if ((i
.seg
[0]) && (i
.seg
[0] != default_seg
))
5847 if (!add_prefix (i
.seg
[0]->seg_prefix
))
5853 static const seg_entry
*
5854 build_modrm_byte (void)
5856 const seg_entry
*default_seg
= 0;
5857 unsigned int source
, dest
;
5860 /* The first operand of instructions with VEX prefix and 3 sources
5861 must be VEX_Imm4. */
5862 vex_3_sources
= i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
;
5865 unsigned int nds
, reg_slot
;
5868 if (i
.tm
.opcode_modifier
.veximmext
5869 && i
.tm
.opcode_modifier
.immext
)
5871 dest
= i
.operands
- 2;
5872 gas_assert (dest
== 3);
5875 dest
= i
.operands
- 1;
5878 /* There are 2 kinds of instructions:
5879 1. 5 operands: 4 register operands or 3 register operands
5880 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
5881 VexW0 or VexW1. The destination must be either XMM, YMM or
5883 2. 4 operands: 4 register operands or 3 register operands
5884 plus 1 memory operand, VexXDS, and VexImmExt */
5885 gas_assert ((i
.reg_operands
== 4
5886 || (i
.reg_operands
== 3 && i
.mem_operands
== 1))
5887 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
5888 && (i
.tm
.opcode_modifier
.veximmext
5889 || (i
.imm_operands
== 1
5890 && i
.types
[0].bitfield
.vec_imm4
5891 && (i
.tm
.opcode_modifier
.vexw
== VEXW0
5892 || i
.tm
.opcode_modifier
.vexw
== VEXW1
)
5893 && (operand_type_equal (&i
.tm
.operand_types
[dest
], ®xmm
)
5894 || operand_type_equal (&i
.tm
.operand_types
[dest
], ®ymm
)
5895 || operand_type_equal (&i
.tm
.operand_types
[dest
], ®zmm
)))));
5897 if (i
.imm_operands
== 0)
5899 /* When there is no immediate operand, generate an 8bit
5900 immediate operand to encode the first operand. */
5901 exp
= &im_expressions
[i
.imm_operands
++];
5902 i
.op
[i
.operands
].imms
= exp
;
5903 i
.types
[i
.operands
] = imm8
;
5905 /* If VexW1 is set, the first operand is the source and
5906 the second operand is encoded in the immediate operand. */
5907 if (i
.tm
.opcode_modifier
.vexw
== VEXW1
)
5918 /* FMA swaps REG and NDS. */
5919 if (i
.tm
.cpu_flags
.bitfield
.cpufma
)
5927 gas_assert (operand_type_equal (&i
.tm
.operand_types
[reg_slot
],
5929 || operand_type_equal (&i
.tm
.operand_types
[reg_slot
],
5931 || operand_type_equal (&i
.tm
.operand_types
[reg_slot
],
5933 exp
->X_op
= O_constant
;
5934 exp
->X_add_number
= register_number (i
.op
[reg_slot
].regs
) << 4;
5935 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
5939 unsigned int imm_slot
;
5941 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
5943 /* If VexW0 is set, the third operand is the source and
5944 the second operand is encoded in the immediate
5951 /* VexW1 is set, the second operand is the source and
5952 the third operand is encoded in the immediate
5958 if (i
.tm
.opcode_modifier
.immext
)
5960 /* When ImmExt is set, the immdiate byte is the last
5962 imm_slot
= i
.operands
- 1;
5970 /* Turn on Imm8 so that output_imm will generate it. */
5971 i
.types
[imm_slot
].bitfield
.imm8
= 1;
5974 gas_assert (operand_type_equal (&i
.tm
.operand_types
[reg_slot
],
5976 || operand_type_equal (&i
.tm
.operand_types
[reg_slot
],
5978 || operand_type_equal (&i
.tm
.operand_types
[reg_slot
],
5980 i
.op
[imm_slot
].imms
->X_add_number
5981 |= register_number (i
.op
[reg_slot
].regs
) << 4;
5982 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
5985 gas_assert (operand_type_equal (&i
.tm
.operand_types
[nds
], ®xmm
)
5986 || operand_type_equal (&i
.tm
.operand_types
[nds
],
5988 || operand_type_equal (&i
.tm
.operand_types
[nds
],
5990 i
.vex
.register_specifier
= i
.op
[nds
].regs
;
5995 /* i.reg_operands MUST be the number of real register operands;
5996 implicit registers do not count. If there are 3 register
5997 operands, it must be a instruction with VexNDS. For a
5998 instruction with VexNDD, the destination register is encoded
5999 in VEX prefix. If there are 4 register operands, it must be
6000 a instruction with VEX prefix and 3 sources. */
6001 if (i
.mem_operands
== 0
6002 && ((i
.reg_operands
== 2
6003 && i
.tm
.opcode_modifier
.vexvvvv
<= VEXXDS
)
6004 || (i
.reg_operands
== 3
6005 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
6006 || (i
.reg_operands
== 4 && vex_3_sources
)))
6014 /* When there are 3 operands, one of them may be immediate,
6015 which may be the first or the last operand. Otherwise,
6016 the first operand must be shift count register (cl) or it
6017 is an instruction with VexNDS. */
6018 gas_assert (i
.imm_operands
== 1
6019 || (i
.imm_operands
== 0
6020 && (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
6021 || i
.types
[0].bitfield
.shiftcount
)));
6022 if (operand_type_check (i
.types
[0], imm
)
6023 || i
.types
[0].bitfield
.shiftcount
)
6029 /* When there are 4 operands, the first two must be 8bit
6030 immediate operands. The source operand will be the 3rd
6033 For instructions with VexNDS, if the first operand
6034 an imm8, the source operand is the 2nd one. If the last
6035 operand is imm8, the source operand is the first one. */
6036 gas_assert ((i
.imm_operands
== 2
6037 && i
.types
[0].bitfield
.imm8
6038 && i
.types
[1].bitfield
.imm8
)
6039 || (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
6040 && i
.imm_operands
== 1
6041 && (i
.types
[0].bitfield
.imm8
6042 || i
.types
[i
.operands
- 1].bitfield
.imm8
6044 if (i
.imm_operands
== 2)
6048 if (i
.types
[0].bitfield
.imm8
)
6055 if (i
.tm
.opcode_modifier
.evex
)
6057 /* For EVEX instructions, when there are 5 operands, the
6058 first one must be immediate operand. If the second one
6059 is immediate operand, the source operand is the 3th
6060 one. If the last one is immediate operand, the source
6061 operand is the 2nd one. */
6062 gas_assert (i
.imm_operands
== 2
6063 && i
.tm
.opcode_modifier
.sae
6064 && operand_type_check (i
.types
[0], imm
));
6065 if (operand_type_check (i
.types
[1], imm
))
6067 else if (operand_type_check (i
.types
[4], imm
))
6081 /* RC/SAE operand could be between DEST and SRC. That happens
6082 when one operand is GPR and the other one is XMM/YMM/ZMM
6084 if (i
.rounding
&& i
.rounding
->operand
== (int) dest
)
6087 if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
6089 /* For instructions with VexNDS, the register-only source
6090 operand must be 32/64bit integer, XMM, YMM or ZMM
6091 register. It is encoded in VEX prefix. We need to
6092 clear RegMem bit before calling operand_type_equal. */
6094 i386_operand_type op
;
6097 /* Check register-only source operand when two source
6098 operands are swapped. */
6099 if (!i
.tm
.operand_types
[source
].bitfield
.baseindex
6100 && i
.tm
.operand_types
[dest
].bitfield
.baseindex
)
6108 op
= i
.tm
.operand_types
[vvvv
];
6109 op
.bitfield
.regmem
= 0;
6110 if ((dest
+ 1) >= i
.operands
6111 || (!op
.bitfield
.reg32
6112 && op
.bitfield
.reg64
6113 && !operand_type_equal (&op
, ®xmm
)
6114 && !operand_type_equal (&op
, ®ymm
)
6115 && !operand_type_equal (&op
, ®zmm
)
6116 && !operand_type_equal (&op
, ®mask
)))
6118 i
.vex
.register_specifier
= i
.op
[vvvv
].regs
;
6124 /* One of the register operands will be encoded in the i.tm.reg
6125 field, the other in the combined i.tm.mode and i.tm.regmem
6126 fields. If no form of this instruction supports a memory
6127 destination operand, then we assume the source operand may
6128 sometimes be a memory operand and so we need to store the
6129 destination in the i.rm.reg field. */
6130 if (!i
.tm
.operand_types
[dest
].bitfield
.regmem
6131 && operand_type_check (i
.tm
.operand_types
[dest
], anymem
) == 0)
6133 i
.rm
.reg
= i
.op
[dest
].regs
->reg_num
;
6134 i
.rm
.regmem
= i
.op
[source
].regs
->reg_num
;
6135 if ((i
.op
[dest
].regs
->reg_flags
& RegRex
) != 0)
6137 if ((i
.op
[dest
].regs
->reg_flags
& RegVRex
) != 0)
6139 if ((i
.op
[source
].regs
->reg_flags
& RegRex
) != 0)
6141 if ((i
.op
[source
].regs
->reg_flags
& RegVRex
) != 0)
6146 i
.rm
.reg
= i
.op
[source
].regs
->reg_num
;
6147 i
.rm
.regmem
= i
.op
[dest
].regs
->reg_num
;
6148 if ((i
.op
[dest
].regs
->reg_flags
& RegRex
) != 0)
6150 if ((i
.op
[dest
].regs
->reg_flags
& RegVRex
) != 0)
6152 if ((i
.op
[source
].regs
->reg_flags
& RegRex
) != 0)
6154 if ((i
.op
[source
].regs
->reg_flags
& RegVRex
) != 0)
6157 if (flag_code
!= CODE_64BIT
&& (i
.rex
& (REX_R
| REX_B
)))
6159 if (!i
.types
[0].bitfield
.control
6160 && !i
.types
[1].bitfield
.control
)
6162 i
.rex
&= ~(REX_R
| REX_B
);
6163 add_prefix (LOCK_PREFIX_OPCODE
);
6167 { /* If it's not 2 reg operands... */
6172 unsigned int fake_zero_displacement
= 0;
6175 for (op
= 0; op
< i
.operands
; op
++)
6176 if (operand_type_check (i
.types
[op
], anymem
))
6178 gas_assert (op
< i
.operands
);
6180 if (i
.tm
.opcode_modifier
.vecsib
)
6182 if (i
.index_reg
->reg_num
== RegEiz
6183 || i
.index_reg
->reg_num
== RegRiz
)
6186 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
6189 i
.sib
.base
= NO_BASE_REGISTER
;
6190 i
.sib
.scale
= i
.log2_scale_factor
;
6191 /* No Vec_Disp8 if there is no base. */
6192 i
.types
[op
].bitfield
.vec_disp8
= 0;
6193 i
.types
[op
].bitfield
.disp8
= 0;
6194 i
.types
[op
].bitfield
.disp16
= 0;
6195 i
.types
[op
].bitfield
.disp64
= 0;
6196 if (flag_code
!= CODE_64BIT
)
6198 /* Must be 32 bit */
6199 i
.types
[op
].bitfield
.disp32
= 1;
6200 i
.types
[op
].bitfield
.disp32s
= 0;
6204 i
.types
[op
].bitfield
.disp32
= 0;
6205 i
.types
[op
].bitfield
.disp32s
= 1;
6208 i
.sib
.index
= i
.index_reg
->reg_num
;
6209 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
6211 if ((i
.index_reg
->reg_flags
& RegVRex
) != 0)
6217 if (i
.base_reg
== 0)
6220 if (!i
.disp_operands
)
6222 fake_zero_displacement
= 1;
6223 /* Instructions with VSIB byte need 32bit displacement
6224 if there is no base register. */
6225 if (i
.tm
.opcode_modifier
.vecsib
)
6226 i
.types
[op
].bitfield
.disp32
= 1;
6228 if (i
.index_reg
== 0)
6230 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
6231 /* Operand is just <disp> */
6232 if (flag_code
== CODE_64BIT
)
6234 /* 64bit mode overwrites the 32bit absolute
6235 addressing by RIP relative addressing and
6236 absolute addressing is encoded by one of the
6237 redundant SIB forms. */
6238 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
6239 i
.sib
.base
= NO_BASE_REGISTER
;
6240 i
.sib
.index
= NO_INDEX_REGISTER
;
6241 i
.types
[op
] = ((i
.prefix
[ADDR_PREFIX
] == 0)
6242 ? disp32s
: disp32
);
6244 else if ((flag_code
== CODE_16BIT
)
6245 ^ (i
.prefix
[ADDR_PREFIX
] != 0))
6247 i
.rm
.regmem
= NO_BASE_REGISTER_16
;
6248 i
.types
[op
] = disp16
;
6252 i
.rm
.regmem
= NO_BASE_REGISTER
;
6253 i
.types
[op
] = disp32
;
6256 else if (!i
.tm
.opcode_modifier
.vecsib
)
6258 /* !i.base_reg && i.index_reg */
6259 if (i
.index_reg
->reg_num
== RegEiz
6260 || i
.index_reg
->reg_num
== RegRiz
)
6261 i
.sib
.index
= NO_INDEX_REGISTER
;
6263 i
.sib
.index
= i
.index_reg
->reg_num
;
6264 i
.sib
.base
= NO_BASE_REGISTER
;
6265 i
.sib
.scale
= i
.log2_scale_factor
;
6266 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
6267 /* No Vec_Disp8 if there is no base. */
6268 i
.types
[op
].bitfield
.vec_disp8
= 0;
6269 i
.types
[op
].bitfield
.disp8
= 0;
6270 i
.types
[op
].bitfield
.disp16
= 0;
6271 i
.types
[op
].bitfield
.disp64
= 0;
6272 if (flag_code
!= CODE_64BIT
)
6274 /* Must be 32 bit */
6275 i
.types
[op
].bitfield
.disp32
= 1;
6276 i
.types
[op
].bitfield
.disp32s
= 0;
6280 i
.types
[op
].bitfield
.disp32
= 0;
6281 i
.types
[op
].bitfield
.disp32s
= 1;
6283 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
6287 /* RIP addressing for 64bit mode. */
6288 else if (i
.base_reg
->reg_num
== RegRip
||
6289 i
.base_reg
->reg_num
== RegEip
)
6291 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
6292 i
.rm
.regmem
= NO_BASE_REGISTER
;
6293 i
.types
[op
].bitfield
.disp8
= 0;
6294 i
.types
[op
].bitfield
.disp16
= 0;
6295 i
.types
[op
].bitfield
.disp32
= 0;
6296 i
.types
[op
].bitfield
.disp32s
= 1;
6297 i
.types
[op
].bitfield
.disp64
= 0;
6298 i
.types
[op
].bitfield
.vec_disp8
= 0;
6299 i
.flags
[op
] |= Operand_PCrel
;
6300 if (! i
.disp_operands
)
6301 fake_zero_displacement
= 1;
6303 else if (i
.base_reg
->reg_type
.bitfield
.reg16
)
6305 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
6306 switch (i
.base_reg
->reg_num
)
6309 if (i
.index_reg
== 0)
6311 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
6312 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6;
6316 if (i
.index_reg
== 0)
6319 if (operand_type_check (i
.types
[op
], disp
) == 0)
6321 /* fake (%bp) into 0(%bp) */
6322 if (i
.tm
.operand_types
[op
].bitfield
.vec_disp8
)
6323 i
.types
[op
].bitfield
.vec_disp8
= 1;
6325 i
.types
[op
].bitfield
.disp8
= 1;
6326 fake_zero_displacement
= 1;
6329 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
6330 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6 + 2;
6332 default: /* (%si) -> 4 or (%di) -> 5 */
6333 i
.rm
.regmem
= i
.base_reg
->reg_num
- 6 + 4;
6335 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
6337 else /* i.base_reg and 32/64 bit mode */
6339 if (flag_code
== CODE_64BIT
6340 && operand_type_check (i
.types
[op
], disp
))
6342 i386_operand_type temp
;
6343 operand_type_set (&temp
, 0);
6344 temp
.bitfield
.disp8
= i
.types
[op
].bitfield
.disp8
;
6345 temp
.bitfield
.vec_disp8
6346 = i
.types
[op
].bitfield
.vec_disp8
;
6348 if (i
.prefix
[ADDR_PREFIX
] == 0)
6349 i
.types
[op
].bitfield
.disp32s
= 1;
6351 i
.types
[op
].bitfield
.disp32
= 1;
6354 if (!i
.tm
.opcode_modifier
.vecsib
)
6355 i
.rm
.regmem
= i
.base_reg
->reg_num
;
6356 if ((i
.base_reg
->reg_flags
& RegRex
) != 0)
6358 i
.sib
.base
= i
.base_reg
->reg_num
;
6359 /* x86-64 ignores REX prefix bit here to avoid decoder
6361 if (!(i
.base_reg
->reg_flags
& RegRex
)
6362 && (i
.base_reg
->reg_num
== EBP_REG_NUM
6363 || i
.base_reg
->reg_num
== ESP_REG_NUM
))
6365 if (i
.base_reg
->reg_num
== 5 && i
.disp_operands
== 0)
6367 fake_zero_displacement
= 1;
6368 if (i
.tm
.operand_types
[op
].bitfield
.vec_disp8
)
6369 i
.types
[op
].bitfield
.vec_disp8
= 1;
6371 i
.types
[op
].bitfield
.disp8
= 1;
6373 i
.sib
.scale
= i
.log2_scale_factor
;
6374 if (i
.index_reg
== 0)
6376 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
6377 /* <disp>(%esp) becomes two byte modrm with no index
6378 register. We've already stored the code for esp
6379 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
6380 Any base register besides %esp will not use the
6381 extra modrm byte. */
6382 i
.sib
.index
= NO_INDEX_REGISTER
;
6384 else if (!i
.tm
.opcode_modifier
.vecsib
)
6386 if (i
.index_reg
->reg_num
== RegEiz
6387 || i
.index_reg
->reg_num
== RegRiz
)
6388 i
.sib
.index
= NO_INDEX_REGISTER
;
6390 i
.sib
.index
= i
.index_reg
->reg_num
;
6391 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
6392 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
6397 && (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
6398 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
))
6402 if (!fake_zero_displacement
6406 fake_zero_displacement
= 1;
6407 if (i
.disp_encoding
== disp_encoding_8bit
)
6408 i
.types
[op
].bitfield
.disp8
= 1;
6410 i
.types
[op
].bitfield
.disp32
= 1;
6412 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
6416 if (fake_zero_displacement
)
6418 /* Fakes a zero displacement assuming that i.types[op]
6419 holds the correct displacement size. */
6422 gas_assert (i
.op
[op
].disps
== 0);
6423 exp
= &disp_expressions
[i
.disp_operands
++];
6424 i
.op
[op
].disps
= exp
;
6425 exp
->X_op
= O_constant
;
6426 exp
->X_add_number
= 0;
6427 exp
->X_add_symbol
= (symbolS
*) 0;
6428 exp
->X_op_symbol
= (symbolS
*) 0;
6436 if (i
.tm
.opcode_modifier
.vexsources
== XOP2SOURCES
)
6438 if (operand_type_check (i
.types
[0], imm
))
6439 i
.vex
.register_specifier
= NULL
;
6442 /* VEX.vvvv encodes one of the sources when the first
6443 operand is not an immediate. */
6444 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
6445 i
.vex
.register_specifier
= i
.op
[0].regs
;
6447 i
.vex
.register_specifier
= i
.op
[1].regs
;
6450 /* Destination is a XMM register encoded in the ModRM.reg
6452 i
.rm
.reg
= i
.op
[2].regs
->reg_num
;
6453 if ((i
.op
[2].regs
->reg_flags
& RegRex
) != 0)
6456 /* ModRM.rm and VEX.B encodes the other source. */
6457 if (!i
.mem_operands
)
6461 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
6462 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
6464 i
.rm
.regmem
= i
.op
[0].regs
->reg_num
;
6466 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
6470 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXLWP
)
6472 i
.vex
.register_specifier
= i
.op
[2].regs
;
6473 if (!i
.mem_operands
)
6476 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
6477 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
6481 /* Fill in i.rm.reg or i.rm.regmem field with register operand
6482 (if any) based on i.tm.extension_opcode. Again, we must be
6483 careful to make sure that segment/control/debug/test/MMX
6484 registers are coded into the i.rm.reg field. */
6485 else if (i
.reg_operands
)
6488 unsigned int vex_reg
= ~0;
6490 for (op
= 0; op
< i
.operands
; op
++)
6491 if (i
.types
[op
].bitfield
.reg8
6492 || i
.types
[op
].bitfield
.reg16
6493 || i
.types
[op
].bitfield
.reg32
6494 || i
.types
[op
].bitfield
.reg64
6495 || i
.types
[op
].bitfield
.regmmx
6496 || i
.types
[op
].bitfield
.regxmm
6497 || i
.types
[op
].bitfield
.regymm
6498 || i
.types
[op
].bitfield
.regbnd
6499 || i
.types
[op
].bitfield
.regzmm
6500 || i
.types
[op
].bitfield
.regmask
6501 || i
.types
[op
].bitfield
.sreg2
6502 || i
.types
[op
].bitfield
.sreg3
6503 || i
.types
[op
].bitfield
.control
6504 || i
.types
[op
].bitfield
.debug
6505 || i
.types
[op
].bitfield
.test
)
6510 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
6512 /* For instructions with VexNDS, the register-only
6513 source operand is encoded in VEX prefix. */
6514 gas_assert (mem
!= (unsigned int) ~0);
6519 gas_assert (op
< i
.operands
);
6523 /* Check register-only source operand when two source
6524 operands are swapped. */
6525 if (!i
.tm
.operand_types
[op
].bitfield
.baseindex
6526 && i
.tm
.operand_types
[op
+ 1].bitfield
.baseindex
)
6530 gas_assert (mem
== (vex_reg
+ 1)
6531 && op
< i
.operands
);
6536 gas_assert (vex_reg
< i
.operands
);
6540 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXNDD
)
6542 /* For instructions with VexNDD, the register destination
6543 is encoded in VEX prefix. */
6544 if (i
.mem_operands
== 0)
6546 /* There is no memory operand. */
6547 gas_assert ((op
+ 2) == i
.operands
);
6552 /* There are only 2 operands. */
6553 gas_assert (op
< 2 && i
.operands
== 2);
6558 gas_assert (op
< i
.operands
);
6560 if (vex_reg
!= (unsigned int) ~0)
6562 i386_operand_type
*type
= &i
.tm
.operand_types
[vex_reg
];
6564 if (type
->bitfield
.reg32
!= 1
6565 && type
->bitfield
.reg64
!= 1
6566 && !operand_type_equal (type
, ®xmm
)
6567 && !operand_type_equal (type
, ®ymm
)
6568 && !operand_type_equal (type
, ®zmm
)
6569 && !operand_type_equal (type
, ®mask
))
6572 i
.vex
.register_specifier
= i
.op
[vex_reg
].regs
;
6575 /* Don't set OP operand twice. */
6578 /* If there is an extension opcode to put here, the
6579 register number must be put into the regmem field. */
6580 if (i
.tm
.extension_opcode
!= None
)
6582 i
.rm
.regmem
= i
.op
[op
].regs
->reg_num
;
6583 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
6585 if ((i
.op
[op
].regs
->reg_flags
& RegVRex
) != 0)
6590 i
.rm
.reg
= i
.op
[op
].regs
->reg_num
;
6591 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
6593 if ((i
.op
[op
].regs
->reg_flags
& RegVRex
) != 0)
6598 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
6599 must set it to 3 to indicate this is a register operand
6600 in the regmem field. */
6601 if (!i
.mem_operands
)
6605 /* Fill in i.rm.reg field with extension opcode (if any). */
6606 if (i
.tm
.extension_opcode
!= None
)
6607 i
.rm
.reg
= i
.tm
.extension_opcode
;
6613 output_branch (void)
6619 relax_substateT subtype
;
6623 code16
= flag_code
== CODE_16BIT
? CODE16
: 0;
6624 size
= i
.disp_encoding
== disp_encoding_32bit
? BIG
: SMALL
;
6627 if (i
.prefix
[DATA_PREFIX
] != 0)
6633 /* Pentium4 branch hints. */
6634 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
6635 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
6640 if (i
.prefix
[REX_PREFIX
] != 0)
6646 /* BND prefixed jump. */
6647 if (i
.prefix
[BND_PREFIX
] != 0)
6649 FRAG_APPEND_1_CHAR (i
.prefix
[BND_PREFIX
]);
6653 if (i
.prefixes
!= 0 && !intel_syntax
)
6654 as_warn (_("skipping prefixes on this instruction"));
6656 /* It's always a symbol; End frag & setup for relax.
6657 Make sure there is enough room in this frag for the largest
6658 instruction we may generate in md_convert_frag. This is 2
6659 bytes for the opcode and room for the prefix and largest
6661 frag_grow (prefix
+ 2 + 4);
6662 /* Prefix and 1 opcode byte go in fr_fix. */
6663 p
= frag_more (prefix
+ 1);
6664 if (i
.prefix
[DATA_PREFIX
] != 0)
6665 *p
++ = DATA_PREFIX_OPCODE
;
6666 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
6667 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
)
6668 *p
++ = i
.prefix
[SEG_PREFIX
];
6669 if (i
.prefix
[REX_PREFIX
] != 0)
6670 *p
++ = i
.prefix
[REX_PREFIX
];
6671 *p
= i
.tm
.base_opcode
;
6673 if ((unsigned char) *p
== JUMP_PC_RELATIVE
)
6674 subtype
= ENCODE_RELAX_STATE (UNCOND_JUMP
, size
);
6675 else if (cpu_arch_flags
.bitfield
.cpui386
)
6676 subtype
= ENCODE_RELAX_STATE (COND_JUMP
, size
);
6678 subtype
= ENCODE_RELAX_STATE (COND_JUMP86
, size
);
6681 sym
= i
.op
[0].disps
->X_add_symbol
;
6682 off
= i
.op
[0].disps
->X_add_number
;
6684 if (i
.op
[0].disps
->X_op
!= O_constant
6685 && i
.op
[0].disps
->X_op
!= O_symbol
)
6687 /* Handle complex expressions. */
6688 sym
= make_expr_symbol (i
.op
[0].disps
);
6692 /* 1 possible extra opcode + 4 byte displacement go in var part.
6693 Pass reloc in fr_var. */
6694 frag_var (rs_machine_dependent
, 5, i
.reloc
[0], subtype
, sym
, off
, p
);
6704 if (i
.tm
.opcode_modifier
.jumpbyte
)
6706 /* This is a loop or jecxz type instruction. */
6708 if (i
.prefix
[ADDR_PREFIX
] != 0)
6710 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE
);
6713 /* Pentium4 branch hints. */
6714 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
6715 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
6717 FRAG_APPEND_1_CHAR (i
.prefix
[SEG_PREFIX
]);
6726 if (flag_code
== CODE_16BIT
)
6729 if (i
.prefix
[DATA_PREFIX
] != 0)
6731 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE
);
6741 if (i
.prefix
[REX_PREFIX
] != 0)
6743 FRAG_APPEND_1_CHAR (i
.prefix
[REX_PREFIX
]);
6747 /* BND prefixed jump. */
6748 if (i
.prefix
[BND_PREFIX
] != 0)
6750 FRAG_APPEND_1_CHAR (i
.prefix
[BND_PREFIX
]);
6754 if (i
.prefixes
!= 0 && !intel_syntax
)
6755 as_warn (_("skipping prefixes on this instruction"));
6757 p
= frag_more (i
.tm
.opcode_length
+ size
);
6758 switch (i
.tm
.opcode_length
)
6761 *p
++ = i
.tm
.base_opcode
>> 8;
6763 *p
++ = i
.tm
.base_opcode
;
6769 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
6770 i
.op
[0].disps
, 1, reloc (size
, 1, 1, i
.reloc
[0]));
6772 /* All jumps handled here are signed, but don't use a signed limit
6773 check for 32 and 16 bit jumps as we want to allow wrap around at
6774 4G and 64k respectively. */
6776 fixP
->fx_signed
= 1;
6780 output_interseg_jump (void)
6788 if (flag_code
== CODE_16BIT
)
6792 if (i
.prefix
[DATA_PREFIX
] != 0)
6798 if (i
.prefix
[REX_PREFIX
] != 0)
6808 if (i
.prefixes
!= 0 && !intel_syntax
)
6809 as_warn (_("skipping prefixes on this instruction"));
6811 /* 1 opcode; 2 segment; offset */
6812 p
= frag_more (prefix
+ 1 + 2 + size
);
6814 if (i
.prefix
[DATA_PREFIX
] != 0)
6815 *p
++ = DATA_PREFIX_OPCODE
;
6817 if (i
.prefix
[REX_PREFIX
] != 0)
6818 *p
++ = i
.prefix
[REX_PREFIX
];
6820 *p
++ = i
.tm
.base_opcode
;
6821 if (i
.op
[1].imms
->X_op
== O_constant
)
6823 offsetT n
= i
.op
[1].imms
->X_add_number
;
6826 && !fits_in_unsigned_word (n
)
6827 && !fits_in_signed_word (n
))
6829 as_bad (_("16-bit jump out of range"));
6832 md_number_to_chars (p
, n
, size
);
6835 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
6836 i
.op
[1].imms
, 0, reloc (size
, 0, 0, i
.reloc
[1]));
6837 if (i
.op
[0].imms
->X_op
!= O_constant
)
6838 as_bad (_("can't handle non absolute segment in `%s'"),
6840 md_number_to_chars (p
+ size
, (valueT
) i
.op
[0].imms
->X_add_number
, 2);
6846 fragS
*insn_start_frag
;
6847 offsetT insn_start_off
;
6849 /* Tie dwarf2 debug info to the address at the start of the insn.
6850 We can't do this after the insn has been output as the current
6851 frag may have been closed off. eg. by frag_var. */
6852 dwarf2_emit_insn (0);
6854 insn_start_frag
= frag_now
;
6855 insn_start_off
= frag_now_fix ();
6858 if (i
.tm
.opcode_modifier
.jump
)
6860 else if (i
.tm
.opcode_modifier
.jumpbyte
6861 || i
.tm
.opcode_modifier
.jumpdword
)
6863 else if (i
.tm
.opcode_modifier
.jumpintersegment
)
6864 output_interseg_jump ();
6867 /* Output normal instructions here. */
6871 unsigned int prefix
;
6873 /* Some processors fail on LOCK prefix. This options makes
6874 assembler ignore LOCK prefix and serves as a workaround. */
6875 if (omit_lock_prefix
)
6877 if (i
.tm
.base_opcode
== LOCK_PREFIX_OPCODE
)
6879 i
.prefix
[LOCK_PREFIX
] = 0;
6882 /* Since the VEX/EVEX prefix contains the implicit prefix, we
6883 don't need the explicit prefix. */
6884 if (!i
.tm
.opcode_modifier
.vex
&& !i
.tm
.opcode_modifier
.evex
)
6886 switch (i
.tm
.opcode_length
)
6889 if (i
.tm
.base_opcode
& 0xff000000)
6891 prefix
= (i
.tm
.base_opcode
>> 24) & 0xff;
6896 if ((i
.tm
.base_opcode
& 0xff0000) != 0)
6898 prefix
= (i
.tm
.base_opcode
>> 16) & 0xff;
6899 if (i
.tm
.cpu_flags
.bitfield
.cpupadlock
)
6902 if (prefix
!= REPE_PREFIX_OPCODE
6903 || (i
.prefix
[REP_PREFIX
]
6904 != REPE_PREFIX_OPCODE
))
6905 add_prefix (prefix
);
6908 add_prefix (prefix
);
6917 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
6918 /* For x32, add a dummy REX_OPCODE prefix for mov/add with
6919 R_X86_64_GOTTPOFF relocation so that linker can safely
6920 perform IE->LE optimization. */
6921 if (x86_elf_abi
== X86_64_X32_ABI
6923 && i
.reloc
[0] == BFD_RELOC_X86_64_GOTTPOFF
6924 && i
.prefix
[REX_PREFIX
] == 0)
6925 add_prefix (REX_OPCODE
);
6928 /* The prefix bytes. */
6929 for (j
= ARRAY_SIZE (i
.prefix
), q
= i
.prefix
; j
> 0; j
--, q
++)
6931 FRAG_APPEND_1_CHAR (*q
);
6935 for (j
= 0, q
= i
.prefix
; j
< ARRAY_SIZE (i
.prefix
); j
++, q
++)
6940 /* REX byte is encoded in VEX prefix. */
6944 FRAG_APPEND_1_CHAR (*q
);
6947 /* There should be no other prefixes for instructions
6952 /* For EVEX instructions i.vrex should become 0 after
6953 build_evex_prefix. For VEX instructions upper 16 registers
6954 aren't available, so VREX should be 0. */
6957 /* Now the VEX prefix. */
6958 p
= frag_more (i
.vex
.length
);
6959 for (j
= 0; j
< i
.vex
.length
; j
++)
6960 p
[j
] = i
.vex
.bytes
[j
];
6963 /* Now the opcode; be careful about word order here! */
6964 if (i
.tm
.opcode_length
== 1)
6966 FRAG_APPEND_1_CHAR (i
.tm
.base_opcode
);
6970 switch (i
.tm
.opcode_length
)
6974 *p
++ = (i
.tm
.base_opcode
>> 24) & 0xff;
6975 *p
++ = (i
.tm
.base_opcode
>> 16) & 0xff;
6979 *p
++ = (i
.tm
.base_opcode
>> 16) & 0xff;
6989 /* Put out high byte first: can't use md_number_to_chars! */
6990 *p
++ = (i
.tm
.base_opcode
>> 8) & 0xff;
6991 *p
= i
.tm
.base_opcode
& 0xff;
6994 /* Now the modrm byte and sib byte (if present). */
6995 if (i
.tm
.opcode_modifier
.modrm
)
6997 FRAG_APPEND_1_CHAR ((i
.rm
.regmem
<< 0
7000 /* If i.rm.regmem == ESP (4)
7001 && i.rm.mode != (Register mode)
7003 ==> need second modrm byte. */
7004 if (i
.rm
.regmem
== ESCAPE_TO_TWO_BYTE_ADDRESSING
7006 && !(i
.base_reg
&& i
.base_reg
->reg_type
.bitfield
.reg16
))
7007 FRAG_APPEND_1_CHAR ((i
.sib
.base
<< 0
7009 | i
.sib
.scale
<< 6));
7012 if (i
.disp_operands
)
7013 output_disp (insn_start_frag
, insn_start_off
);
7016 output_imm (insn_start_frag
, insn_start_off
);
7022 pi ("" /*line*/, &i
);
7024 #endif /* DEBUG386 */
7027 /* Return the size of the displacement operand N. */
7030 disp_size (unsigned int n
)
7034 /* Vec_Disp8 has to be 8bit. */
7035 if (i
.types
[n
].bitfield
.vec_disp8
)
7037 else if (i
.types
[n
].bitfield
.disp64
)
7039 else if (i
.types
[n
].bitfield
.disp8
)
7041 else if (i
.types
[n
].bitfield
.disp16
)
7046 /* Return the size of the immediate operand N. */
7049 imm_size (unsigned int n
)
7052 if (i
.types
[n
].bitfield
.imm64
)
7054 else if (i
.types
[n
].bitfield
.imm8
|| i
.types
[n
].bitfield
.imm8s
)
7056 else if (i
.types
[n
].bitfield
.imm16
)
7062 output_disp (fragS
*insn_start_frag
, offsetT insn_start_off
)
7067 for (n
= 0; n
< i
.operands
; n
++)
7069 if (i
.types
[n
].bitfield
.vec_disp8
7070 || operand_type_check (i
.types
[n
], disp
))
7072 if (i
.op
[n
].disps
->X_op
== O_constant
)
7074 int size
= disp_size (n
);
7075 offsetT val
= i
.op
[n
].disps
->X_add_number
;
7077 if (i
.types
[n
].bitfield
.vec_disp8
)
7079 val
= offset_in_range (val
, size
);
7080 p
= frag_more (size
);
7081 md_number_to_chars (p
, val
, size
);
7085 enum bfd_reloc_code_real reloc_type
;
7086 int size
= disp_size (n
);
7087 int sign
= i
.types
[n
].bitfield
.disp32s
;
7088 int pcrel
= (i
.flags
[n
] & Operand_PCrel
) != 0;
7090 /* We can't have 8 bit displacement here. */
7091 gas_assert (!i
.types
[n
].bitfield
.disp8
);
7093 /* The PC relative address is computed relative
7094 to the instruction boundary, so in case immediate
7095 fields follows, we need to adjust the value. */
7096 if (pcrel
&& i
.imm_operands
)
7101 for (n1
= 0; n1
< i
.operands
; n1
++)
7102 if (operand_type_check (i
.types
[n1
], imm
))
7104 /* Only one immediate is allowed for PC
7105 relative address. */
7106 gas_assert (sz
== 0);
7108 i
.op
[n
].disps
->X_add_number
-= sz
;
7110 /* We should find the immediate. */
7111 gas_assert (sz
!= 0);
7114 p
= frag_more (size
);
7115 reloc_type
= reloc (size
, pcrel
, sign
, i
.reloc
[n
]);
7117 && GOT_symbol
== i
.op
[n
].disps
->X_add_symbol
7118 && (((reloc_type
== BFD_RELOC_32
7119 || reloc_type
== BFD_RELOC_X86_64_32S
7120 || (reloc_type
== BFD_RELOC_64
7122 && (i
.op
[n
].disps
->X_op
== O_symbol
7123 || (i
.op
[n
].disps
->X_op
== O_add
7124 && ((symbol_get_value_expression
7125 (i
.op
[n
].disps
->X_op_symbol
)->X_op
)
7127 || reloc_type
== BFD_RELOC_32_PCREL
))
7131 if (insn_start_frag
== frag_now
)
7132 add
= (p
- frag_now
->fr_literal
) - insn_start_off
;
7137 add
= insn_start_frag
->fr_fix
- insn_start_off
;
7138 for (fr
= insn_start_frag
->fr_next
;
7139 fr
&& fr
!= frag_now
; fr
= fr
->fr_next
)
7141 add
+= p
- frag_now
->fr_literal
;
7146 reloc_type
= BFD_RELOC_386_GOTPC
;
7147 i
.op
[n
].imms
->X_add_number
+= add
;
7149 else if (reloc_type
== BFD_RELOC_64
)
7150 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
7152 /* Don't do the adjustment for x86-64, as there
7153 the pcrel addressing is relative to the _next_
7154 insn, and that is taken care of in other code. */
7155 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
7157 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
7158 i
.op
[n
].disps
, pcrel
, reloc_type
);
7165 output_imm (fragS
*insn_start_frag
, offsetT insn_start_off
)
7170 for (n
= 0; n
< i
.operands
; n
++)
7172 /* Skip SAE/RC Imm operand in EVEX. They are already handled. */
7173 if (i
.rounding
&& (int) n
== i
.rounding
->operand
)
7176 if (operand_type_check (i
.types
[n
], imm
))
7178 if (i
.op
[n
].imms
->X_op
== O_constant
)
7180 int size
= imm_size (n
);
7183 val
= offset_in_range (i
.op
[n
].imms
->X_add_number
,
7185 p
= frag_more (size
);
7186 md_number_to_chars (p
, val
, size
);
7190 /* Not absolute_section.
7191 Need a 32-bit fixup (don't support 8bit
7192 non-absolute imms). Try to support other
7194 enum bfd_reloc_code_real reloc_type
;
7195 int size
= imm_size (n
);
7198 if (i
.types
[n
].bitfield
.imm32s
7199 && (i
.suffix
== QWORD_MNEM_SUFFIX
7200 || (!i
.suffix
&& i
.tm
.opcode_modifier
.no_lsuf
)))
7205 p
= frag_more (size
);
7206 reloc_type
= reloc (size
, 0, sign
, i
.reloc
[n
]);
7208 /* This is tough to explain. We end up with this one if we
7209 * have operands that look like
7210 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
7211 * obtain the absolute address of the GOT, and it is strongly
7212 * preferable from a performance point of view to avoid using
7213 * a runtime relocation for this. The actual sequence of
7214 * instructions often look something like:
7219 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
7221 * The call and pop essentially return the absolute address
7222 * of the label .L66 and store it in %ebx. The linker itself
7223 * will ultimately change the first operand of the addl so
7224 * that %ebx points to the GOT, but to keep things simple, the
7225 * .o file must have this operand set so that it generates not
7226 * the absolute address of .L66, but the absolute address of
7227 * itself. This allows the linker itself simply treat a GOTPC
7228 * relocation as asking for a pcrel offset to the GOT to be
7229 * added in, and the addend of the relocation is stored in the
7230 * operand field for the instruction itself.
7232 * Our job here is to fix the operand so that it would add
7233 * the correct offset so that %ebx would point to itself. The
7234 * thing that is tricky is that .-.L66 will point to the
7235 * beginning of the instruction, so we need to further modify
7236 * the operand so that it will point to itself. There are
7237 * other cases where you have something like:
7239 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
7241 * and here no correction would be required. Internally in
7242 * the assembler we treat operands of this form as not being
7243 * pcrel since the '.' is explicitly mentioned, and I wonder
7244 * whether it would simplify matters to do it this way. Who
7245 * knows. In earlier versions of the PIC patches, the
7246 * pcrel_adjust field was used to store the correction, but
7247 * since the expression is not pcrel, I felt it would be
7248 * confusing to do it this way. */
7250 if ((reloc_type
== BFD_RELOC_32
7251 || reloc_type
== BFD_RELOC_X86_64_32S
7252 || reloc_type
== BFD_RELOC_64
)
7254 && GOT_symbol
== i
.op
[n
].imms
->X_add_symbol
7255 && (i
.op
[n
].imms
->X_op
== O_symbol
7256 || (i
.op
[n
].imms
->X_op
== O_add
7257 && ((symbol_get_value_expression
7258 (i
.op
[n
].imms
->X_op_symbol
)->X_op
)
7263 if (insn_start_frag
== frag_now
)
7264 add
= (p
- frag_now
->fr_literal
) - insn_start_off
;
7269 add
= insn_start_frag
->fr_fix
- insn_start_off
;
7270 for (fr
= insn_start_frag
->fr_next
;
7271 fr
&& fr
!= frag_now
; fr
= fr
->fr_next
)
7273 add
+= p
- frag_now
->fr_literal
;
7277 reloc_type
= BFD_RELOC_386_GOTPC
;
7279 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
7281 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
7282 i
.op
[n
].imms
->X_add_number
+= add
;
7284 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
7285 i
.op
[n
].imms
, 0, reloc_type
);
7291 /* x86_cons_fix_new is called via the expression parsing code when a
7292 reloc is needed. We use this hook to get the correct .got reloc. */
7293 static int cons_sign
= -1;
7296 x86_cons_fix_new (fragS
*frag
, unsigned int off
, unsigned int len
,
7297 expressionS
*exp
, bfd_reloc_code_real_type r
)
7299 r
= reloc (len
, 0, cons_sign
, r
);
7302 if (exp
->X_op
== O_secrel
)
7304 exp
->X_op
= O_symbol
;
7305 r
= BFD_RELOC_32_SECREL
;
7309 fix_new_exp (frag
, off
, len
, exp
, 0, r
);
7312 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
7313 purpose of the `.dc.a' internal pseudo-op. */
7316 x86_address_bytes (void)
7318 if ((stdoutput
->arch_info
->mach
& bfd_mach_x64_32
))
7320 return stdoutput
->arch_info
->bits_per_address
/ 8;
7323 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
7325 # define lex_got(reloc, adjust, types) NULL
7327 /* Parse operands of the form
7328 <symbol>@GOTOFF+<nnn>
7329 and similar .plt or .got references.
7331 If we find one, set up the correct relocation in RELOC and copy the
7332 input string, minus the `@GOTOFF' into a malloc'd buffer for
7333 parsing by the calling routine. Return this buffer, and if ADJUST
7334 is non-null set it to the length of the string we removed from the
7335 input line. Otherwise return NULL. */
7337 lex_got (enum bfd_reloc_code_real
*rel
,
7339 i386_operand_type
*types
)
7341 /* Some of the relocations depend on the size of what field is to
7342 be relocated. But in our callers i386_immediate and i386_displacement
7343 we don't yet know the operand size (this will be set by insn
7344 matching). Hence we record the word32 relocation here,
7345 and adjust the reloc according to the real size in reloc(). */
7346 static const struct {
7349 const enum bfd_reloc_code_real rel
[2];
7350 const i386_operand_type types64
;
7352 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7353 { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32
,
7355 OPERAND_TYPE_IMM32_64
},
7357 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real
,
7358 BFD_RELOC_X86_64_PLTOFF64
},
7359 OPERAND_TYPE_IMM64
},
7360 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32
,
7361 BFD_RELOC_X86_64_PLT32
},
7362 OPERAND_TYPE_IMM32_32S_DISP32
},
7363 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real
,
7364 BFD_RELOC_X86_64_GOTPLT64
},
7365 OPERAND_TYPE_IMM64_DISP64
},
7366 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF
,
7367 BFD_RELOC_X86_64_GOTOFF64
},
7368 OPERAND_TYPE_IMM64_DISP64
},
7369 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real
,
7370 BFD_RELOC_X86_64_GOTPCREL
},
7371 OPERAND_TYPE_IMM32_32S_DISP32
},
7372 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD
,
7373 BFD_RELOC_X86_64_TLSGD
},
7374 OPERAND_TYPE_IMM32_32S_DISP32
},
7375 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM
,
7376 _dummy_first_bfd_reloc_code_real
},
7377 OPERAND_TYPE_NONE
},
7378 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real
,
7379 BFD_RELOC_X86_64_TLSLD
},
7380 OPERAND_TYPE_IMM32_32S_DISP32
},
7381 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32
,
7382 BFD_RELOC_X86_64_GOTTPOFF
},
7383 OPERAND_TYPE_IMM32_32S_DISP32
},
7384 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32
,
7385 BFD_RELOC_X86_64_TPOFF32
},
7386 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
7387 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE
,
7388 _dummy_first_bfd_reloc_code_real
},
7389 OPERAND_TYPE_NONE
},
7390 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32
,
7391 BFD_RELOC_X86_64_DTPOFF32
},
7392 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
7393 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE
,
7394 _dummy_first_bfd_reloc_code_real
},
7395 OPERAND_TYPE_NONE
},
7396 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE
,
7397 _dummy_first_bfd_reloc_code_real
},
7398 OPERAND_TYPE_NONE
},
7399 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32
,
7400 BFD_RELOC_X86_64_GOT32
},
7401 OPERAND_TYPE_IMM32_32S_64_DISP32
},
7402 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC
,
7403 BFD_RELOC_X86_64_GOTPC32_TLSDESC
},
7404 OPERAND_TYPE_IMM32_32S_DISP32
},
7405 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL
,
7406 BFD_RELOC_X86_64_TLSDESC_CALL
},
7407 OPERAND_TYPE_IMM32_32S_DISP32
},
7412 #if defined (OBJ_MAYBE_ELF)
7417 for (cp
= input_line_pointer
; *cp
!= '@'; cp
++)
7418 if (is_end_of_line
[(unsigned char) *cp
] || *cp
== ',')
7421 for (j
= 0; j
< ARRAY_SIZE (gotrel
); j
++)
7423 int len
= gotrel
[j
].len
;
7424 if (strncasecmp (cp
+ 1, gotrel
[j
].str
, len
) == 0)
7426 if (gotrel
[j
].rel
[object_64bit
] != 0)
7429 char *tmpbuf
, *past_reloc
;
7431 *rel
= gotrel
[j
].rel
[object_64bit
];
7435 if (flag_code
!= CODE_64BIT
)
7437 types
->bitfield
.imm32
= 1;
7438 types
->bitfield
.disp32
= 1;
7441 *types
= gotrel
[j
].types64
;
7444 if (j
!= 0 && GOT_symbol
== NULL
)
7445 GOT_symbol
= symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME
);
7447 /* The length of the first part of our input line. */
7448 first
= cp
- input_line_pointer
;
7450 /* The second part goes from after the reloc token until
7451 (and including) an end_of_line char or comma. */
7452 past_reloc
= cp
+ 1 + len
;
7454 while (!is_end_of_line
[(unsigned char) *cp
] && *cp
!= ',')
7456 second
= cp
+ 1 - past_reloc
;
7458 /* Allocate and copy string. The trailing NUL shouldn't
7459 be necessary, but be safe. */
7460 tmpbuf
= (char *) xmalloc (first
+ second
+ 2);
7461 memcpy (tmpbuf
, input_line_pointer
, first
);
7462 if (second
!= 0 && *past_reloc
!= ' ')
7463 /* Replace the relocation token with ' ', so that
7464 errors like foo@GOTOFF1 will be detected. */
7465 tmpbuf
[first
++] = ' ';
7467 /* Increment length by 1 if the relocation token is
7472 memcpy (tmpbuf
+ first
, past_reloc
, second
);
7473 tmpbuf
[first
+ second
] = '\0';
7477 as_bad (_("@%s reloc is not supported with %d-bit output format"),
7478 gotrel
[j
].str
, 1 << (5 + object_64bit
));
7483 /* Might be a symbol version string. Don't as_bad here. */
7492 /* Parse operands of the form
7493 <symbol>@SECREL32+<nnn>
7495 If we find one, set up the correct relocation in RELOC and copy the
7496 input string, minus the `@SECREL32' into a malloc'd buffer for
7497 parsing by the calling routine. Return this buffer, and if ADJUST
7498 is non-null set it to the length of the string we removed from the
7499 input line. Otherwise return NULL.
7501 This function is copied from the ELF version above adjusted for PE targets. */
7504 lex_got (enum bfd_reloc_code_real
*rel ATTRIBUTE_UNUSED
,
7505 int *adjust ATTRIBUTE_UNUSED
,
7506 i386_operand_type
*types
)
7512 const enum bfd_reloc_code_real rel
[2];
7513 const i386_operand_type types64
;
7517 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL
,
7518 BFD_RELOC_32_SECREL
},
7519 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
7525 for (cp
= input_line_pointer
; *cp
!= '@'; cp
++)
7526 if (is_end_of_line
[(unsigned char) *cp
] || *cp
== ',')
7529 for (j
= 0; j
< ARRAY_SIZE (gotrel
); j
++)
7531 int len
= gotrel
[j
].len
;
7533 if (strncasecmp (cp
+ 1, gotrel
[j
].str
, len
) == 0)
7535 if (gotrel
[j
].rel
[object_64bit
] != 0)
7538 char *tmpbuf
, *past_reloc
;
7540 *rel
= gotrel
[j
].rel
[object_64bit
];
7546 if (flag_code
!= CODE_64BIT
)
7548 types
->bitfield
.imm32
= 1;
7549 types
->bitfield
.disp32
= 1;
7552 *types
= gotrel
[j
].types64
;
7555 /* The length of the first part of our input line. */
7556 first
= cp
- input_line_pointer
;
7558 /* The second part goes from after the reloc token until
7559 (and including) an end_of_line char or comma. */
7560 past_reloc
= cp
+ 1 + len
;
7562 while (!is_end_of_line
[(unsigned char) *cp
] && *cp
!= ',')
7564 second
= cp
+ 1 - past_reloc
;
7566 /* Allocate and copy string. The trailing NUL shouldn't
7567 be necessary, but be safe. */
7568 tmpbuf
= (char *) xmalloc (first
+ second
+ 2);
7569 memcpy (tmpbuf
, input_line_pointer
, first
);
7570 if (second
!= 0 && *past_reloc
!= ' ')
7571 /* Replace the relocation token with ' ', so that
7572 errors like foo@SECLREL321 will be detected. */
7573 tmpbuf
[first
++] = ' ';
7574 memcpy (tmpbuf
+ first
, past_reloc
, second
);
7575 tmpbuf
[first
+ second
] = '\0';
7579 as_bad (_("@%s reloc is not supported with %d-bit output format"),
7580 gotrel
[j
].str
, 1 << (5 + object_64bit
));
7585 /* Might be a symbol version string. Don't as_bad here. */
7591 bfd_reloc_code_real_type
7592 x86_cons (expressionS
*exp
, int size
)
7594 bfd_reloc_code_real_type got_reloc
= NO_RELOC
;
7596 intel_syntax
= -intel_syntax
;
7599 if (size
== 4 || (object_64bit
&& size
== 8))
7601 /* Handle @GOTOFF and the like in an expression. */
7603 char *gotfree_input_line
;
7606 save
= input_line_pointer
;
7607 gotfree_input_line
= lex_got (&got_reloc
, &adjust
, NULL
);
7608 if (gotfree_input_line
)
7609 input_line_pointer
= gotfree_input_line
;
7613 if (gotfree_input_line
)
7615 /* expression () has merrily parsed up to the end of line,
7616 or a comma - in the wrong buffer. Transfer how far
7617 input_line_pointer has moved to the right buffer. */
7618 input_line_pointer
= (save
7619 + (input_line_pointer
- gotfree_input_line
)
7621 free (gotfree_input_line
);
7622 if (exp
->X_op
== O_constant
7623 || exp
->X_op
== O_absent
7624 || exp
->X_op
== O_illegal
7625 || exp
->X_op
== O_register
7626 || exp
->X_op
== O_big
)
7628 char c
= *input_line_pointer
;
7629 *input_line_pointer
= 0;
7630 as_bad (_("missing or invalid expression `%s'"), save
);
7631 *input_line_pointer
= c
;
7638 intel_syntax
= -intel_syntax
;
7641 i386_intel_simplify (exp
);
7647 signed_cons (int size
)
7649 if (flag_code
== CODE_64BIT
)
7657 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
7664 if (exp
.X_op
== O_symbol
)
7665 exp
.X_op
= O_secrel
;
7667 emit_expr (&exp
, 4);
7669 while (*input_line_pointer
++ == ',');
7671 input_line_pointer
--;
7672 demand_empty_rest_of_line ();
7676 /* Handle Vector operations. */
7679 check_VecOperations (char *op_string
, char *op_end
)
7681 const reg_entry
*mask
;
7686 && (op_end
== NULL
|| op_string
< op_end
))
7689 if (*op_string
== '{')
7693 /* Check broadcasts. */
7694 if (strncmp (op_string
, "1to", 3) == 0)
7699 goto duplicated_vec_op
;
7702 if (*op_string
== '8')
7703 bcst_type
= BROADCAST_1TO8
;
7704 else if (*op_string
== '4')
7705 bcst_type
= BROADCAST_1TO4
;
7706 else if (*op_string
== '2')
7707 bcst_type
= BROADCAST_1TO2
;
7708 else if (*op_string
== '1'
7709 && *(op_string
+1) == '6')
7711 bcst_type
= BROADCAST_1TO16
;
7716 as_bad (_("Unsupported broadcast: `%s'"), saved
);
7721 broadcast_op
.type
= bcst_type
;
7722 broadcast_op
.operand
= this_operand
;
7723 i
.broadcast
= &broadcast_op
;
7725 /* Check masking operation. */
7726 else if ((mask
= parse_register (op_string
, &end_op
)) != NULL
)
7728 /* k0 can't be used for write mask. */
7729 if (mask
->reg_num
== 0)
7731 as_bad (_("`%s' can't be used for write mask"),
7738 mask_op
.mask
= mask
;
7739 mask_op
.zeroing
= 0;
7740 mask_op
.operand
= this_operand
;
7746 goto duplicated_vec_op
;
7748 i
.mask
->mask
= mask
;
7750 /* Only "{z}" is allowed here. No need to check
7751 zeroing mask explicitly. */
7752 if (i
.mask
->operand
!= this_operand
)
7754 as_bad (_("invalid write mask `%s'"), saved
);
7761 /* Check zeroing-flag for masking operation. */
7762 else if (*op_string
== 'z')
7766 mask_op
.mask
= NULL
;
7767 mask_op
.zeroing
= 1;
7768 mask_op
.operand
= this_operand
;
7773 if (i
.mask
->zeroing
)
7776 as_bad (_("duplicated `%s'"), saved
);
7780 i
.mask
->zeroing
= 1;
7782 /* Only "{%k}" is allowed here. No need to check mask
7783 register explicitly. */
7784 if (i
.mask
->operand
!= this_operand
)
7786 as_bad (_("invalid zeroing-masking `%s'"),
7795 goto unknown_vec_op
;
7797 if (*op_string
!= '}')
7799 as_bad (_("missing `}' in `%s'"), saved
);
7806 /* We don't know this one. */
7807 as_bad (_("unknown vector operation: `%s'"), saved
);
7815 i386_immediate (char *imm_start
)
7817 char *save_input_line_pointer
;
7818 char *gotfree_input_line
;
7821 i386_operand_type types
;
7823 operand_type_set (&types
, ~0);
7825 if (i
.imm_operands
== MAX_IMMEDIATE_OPERANDS
)
7827 as_bad (_("at most %d immediate operands are allowed"),
7828 MAX_IMMEDIATE_OPERANDS
);
7832 exp
= &im_expressions
[i
.imm_operands
++];
7833 i
.op
[this_operand
].imms
= exp
;
7835 if (is_space_char (*imm_start
))
7838 save_input_line_pointer
= input_line_pointer
;
7839 input_line_pointer
= imm_start
;
7841 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
7842 if (gotfree_input_line
)
7843 input_line_pointer
= gotfree_input_line
;
7845 exp_seg
= expression (exp
);
7849 /* Handle vector operations. */
7850 if (*input_line_pointer
== '{')
7852 input_line_pointer
= check_VecOperations (input_line_pointer
,
7854 if (input_line_pointer
== NULL
)
7858 if (*input_line_pointer
)
7859 as_bad (_("junk `%s' after expression"), input_line_pointer
);
7861 input_line_pointer
= save_input_line_pointer
;
7862 if (gotfree_input_line
)
7864 free (gotfree_input_line
);
7866 if (exp
->X_op
== O_constant
|| exp
->X_op
== O_register
)
7867 exp
->X_op
= O_illegal
;
7870 return i386_finalize_immediate (exp_seg
, exp
, types
, imm_start
);
7874 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
7875 i386_operand_type types
, const char *imm_start
)
7877 if (exp
->X_op
== O_absent
|| exp
->X_op
== O_illegal
|| exp
->X_op
== O_big
)
7880 as_bad (_("missing or invalid immediate expression `%s'"),
7884 else if (exp
->X_op
== O_constant
)
7886 /* Size it properly later. */
7887 i
.types
[this_operand
].bitfield
.imm64
= 1;
7888 /* If not 64bit, sign extend val. */
7889 if (flag_code
!= CODE_64BIT
7890 && (exp
->X_add_number
& ~(((addressT
) 2 << 31) - 1)) == 0)
7892 = (exp
->X_add_number
^ ((addressT
) 1 << 31)) - ((addressT
) 1 << 31);
7894 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7895 else if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
7896 && exp_seg
!= absolute_section
7897 && exp_seg
!= text_section
7898 && exp_seg
!= data_section
7899 && exp_seg
!= bss_section
7900 && exp_seg
!= undefined_section
7901 && !bfd_is_com_section (exp_seg
))
7903 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
7907 else if (!intel_syntax
&& exp_seg
== reg_section
)
7910 as_bad (_("illegal immediate register operand %s"), imm_start
);
7915 /* This is an address. The size of the address will be
7916 determined later, depending on destination register,
7917 suffix, or the default for the section. */
7918 i
.types
[this_operand
].bitfield
.imm8
= 1;
7919 i
.types
[this_operand
].bitfield
.imm16
= 1;
7920 i
.types
[this_operand
].bitfield
.imm32
= 1;
7921 i
.types
[this_operand
].bitfield
.imm32s
= 1;
7922 i
.types
[this_operand
].bitfield
.imm64
= 1;
7923 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
7931 i386_scale (char *scale
)
7934 char *save
= input_line_pointer
;
7936 input_line_pointer
= scale
;
7937 val
= get_absolute_expression ();
7942 i
.log2_scale_factor
= 0;
7945 i
.log2_scale_factor
= 1;
7948 i
.log2_scale_factor
= 2;
7951 i
.log2_scale_factor
= 3;
7955 char sep
= *input_line_pointer
;
7957 *input_line_pointer
= '\0';
7958 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
7960 *input_line_pointer
= sep
;
7961 input_line_pointer
= save
;
7965 if (i
.log2_scale_factor
!= 0 && i
.index_reg
== 0)
7967 as_warn (_("scale factor of %d without an index register"),
7968 1 << i
.log2_scale_factor
);
7969 i
.log2_scale_factor
= 0;
7971 scale
= input_line_pointer
;
7972 input_line_pointer
= save
;
7977 i386_displacement (char *disp_start
, char *disp_end
)
7981 char *save_input_line_pointer
;
7982 char *gotfree_input_line
;
7984 i386_operand_type bigdisp
, types
= anydisp
;
7987 if (i
.disp_operands
== MAX_MEMORY_OPERANDS
)
7989 as_bad (_("at most %d displacement operands are allowed"),
7990 MAX_MEMORY_OPERANDS
);
7994 operand_type_set (&bigdisp
, 0);
7995 if ((i
.types
[this_operand
].bitfield
.jumpabsolute
)
7996 || (!current_templates
->start
->opcode_modifier
.jump
7997 && !current_templates
->start
->opcode_modifier
.jumpdword
))
7999 bigdisp
.bitfield
.disp32
= 1;
8000 override
= (i
.prefix
[ADDR_PREFIX
] != 0);
8001 if (flag_code
== CODE_64BIT
)
8005 bigdisp
.bitfield
.disp32s
= 1;
8006 bigdisp
.bitfield
.disp64
= 1;
8009 else if ((flag_code
== CODE_16BIT
) ^ override
)
8011 bigdisp
.bitfield
.disp32
= 0;
8012 bigdisp
.bitfield
.disp16
= 1;
8017 /* For PC-relative branches, the width of the displacement
8018 is dependent upon data size, not address size. */
8019 override
= (i
.prefix
[DATA_PREFIX
] != 0);
8020 if (flag_code
== CODE_64BIT
)
8022 if (override
|| i
.suffix
== WORD_MNEM_SUFFIX
)
8023 bigdisp
.bitfield
.disp16
= 1;
8026 bigdisp
.bitfield
.disp32
= 1;
8027 bigdisp
.bitfield
.disp32s
= 1;
8033 override
= (i
.suffix
== (flag_code
!= CODE_16BIT
8035 : LONG_MNEM_SUFFIX
));
8036 bigdisp
.bitfield
.disp32
= 1;
8037 if ((flag_code
== CODE_16BIT
) ^ override
)
8039 bigdisp
.bitfield
.disp32
= 0;
8040 bigdisp
.bitfield
.disp16
= 1;
8044 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
8047 exp
= &disp_expressions
[i
.disp_operands
];
8048 i
.op
[this_operand
].disps
= exp
;
8050 save_input_line_pointer
= input_line_pointer
;
8051 input_line_pointer
= disp_start
;
8052 END_STRING_AND_SAVE (disp_end
);
8054 #ifndef GCC_ASM_O_HACK
8055 #define GCC_ASM_O_HACK 0
8058 END_STRING_AND_SAVE (disp_end
+ 1);
8059 if (i
.types
[this_operand
].bitfield
.baseIndex
8060 && displacement_string_end
[-1] == '+')
8062 /* This hack is to avoid a warning when using the "o"
8063 constraint within gcc asm statements.
8066 #define _set_tssldt_desc(n,addr,limit,type) \
8067 __asm__ __volatile__ ( \
8069 "movw %w1,2+%0\n\t" \
8071 "movb %b1,4+%0\n\t" \
8072 "movb %4,5+%0\n\t" \
8073 "movb $0,6+%0\n\t" \
8074 "movb %h1,7+%0\n\t" \
8076 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
8078 This works great except that the output assembler ends
8079 up looking a bit weird if it turns out that there is
8080 no offset. You end up producing code that looks like:
8093 So here we provide the missing zero. */
8095 *displacement_string_end
= '0';
8098 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
8099 if (gotfree_input_line
)
8100 input_line_pointer
= gotfree_input_line
;
8102 exp_seg
= expression (exp
);
8105 if (*input_line_pointer
)
8106 as_bad (_("junk `%s' after expression"), input_line_pointer
);
8108 RESTORE_END_STRING (disp_end
+ 1);
8110 input_line_pointer
= save_input_line_pointer
;
8111 if (gotfree_input_line
)
8113 free (gotfree_input_line
);
8115 if (exp
->X_op
== O_constant
|| exp
->X_op
== O_register
)
8116 exp
->X_op
= O_illegal
;
8119 ret
= i386_finalize_displacement (exp_seg
, exp
, types
, disp_start
);
8121 RESTORE_END_STRING (disp_end
);
8127 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
8128 i386_operand_type types
, const char *disp_start
)
8130 i386_operand_type bigdisp
;
8133 /* We do this to make sure that the section symbol is in
8134 the symbol table. We will ultimately change the relocation
8135 to be relative to the beginning of the section. */
8136 if (i
.reloc
[this_operand
] == BFD_RELOC_386_GOTOFF
8137 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
8138 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
8140 if (exp
->X_op
!= O_symbol
)
8143 if (S_IS_LOCAL (exp
->X_add_symbol
)
8144 && S_GET_SEGMENT (exp
->X_add_symbol
) != undefined_section
8145 && S_GET_SEGMENT (exp
->X_add_symbol
) != expr_section
)
8146 section_symbol (S_GET_SEGMENT (exp
->X_add_symbol
));
8147 exp
->X_op
= O_subtract
;
8148 exp
->X_op_symbol
= GOT_symbol
;
8149 if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
)
8150 i
.reloc
[this_operand
] = BFD_RELOC_32_PCREL
;
8151 else if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
8152 i
.reloc
[this_operand
] = BFD_RELOC_64
;
8154 i
.reloc
[this_operand
] = BFD_RELOC_32
;
8157 else if (exp
->X_op
== O_absent
8158 || exp
->X_op
== O_illegal
8159 || exp
->X_op
== O_big
)
8162 as_bad (_("missing or invalid displacement expression `%s'"),
8167 else if (flag_code
== CODE_64BIT
8168 && !i
.prefix
[ADDR_PREFIX
]
8169 && exp
->X_op
== O_constant
)
8171 /* Since displacement is signed extended to 64bit, don't allow
8172 disp32 and turn off disp32s if they are out of range. */
8173 i
.types
[this_operand
].bitfield
.disp32
= 0;
8174 if (!fits_in_signed_long (exp
->X_add_number
))
8176 i
.types
[this_operand
].bitfield
.disp32s
= 0;
8177 if (i
.types
[this_operand
].bitfield
.baseindex
)
8179 as_bad (_("0x%lx out range of signed 32bit displacement"),
8180 (long) exp
->X_add_number
);
8186 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8187 else if (exp
->X_op
!= O_constant
8188 && OUTPUT_FLAVOR
== bfd_target_aout_flavour
8189 && exp_seg
!= absolute_section
8190 && exp_seg
!= text_section
8191 && exp_seg
!= data_section
8192 && exp_seg
!= bss_section
8193 && exp_seg
!= undefined_section
8194 && !bfd_is_com_section (exp_seg
))
8196 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
8201 /* Check if this is a displacement only operand. */
8202 bigdisp
= i
.types
[this_operand
];
8203 bigdisp
.bitfield
.disp8
= 0;
8204 bigdisp
.bitfield
.disp16
= 0;
8205 bigdisp
.bitfield
.disp32
= 0;
8206 bigdisp
.bitfield
.disp32s
= 0;
8207 bigdisp
.bitfield
.disp64
= 0;
8208 if (operand_type_all_zero (&bigdisp
))
8209 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
8215 /* Make sure the memory operand we've been dealt is valid.
8216 Return 1 on success, 0 on a failure. */
8219 i386_index_check (const char *operand_string
)
8221 const char *kind
= "base/index";
8222 enum flag_code addr_mode
;
8224 if (i
.prefix
[ADDR_PREFIX
])
8225 addr_mode
= flag_code
== CODE_32BIT
? CODE_16BIT
: CODE_32BIT
;
8228 addr_mode
= flag_code
;
8230 #if INFER_ADDR_PREFIX
8231 if (i
.mem_operands
== 0)
8233 /* Infer address prefix from the first memory operand. */
8234 const reg_entry
*addr_reg
= i
.base_reg
;
8236 if (addr_reg
== NULL
)
8237 addr_reg
= i
.index_reg
;
8241 if (addr_reg
->reg_num
== RegEip
8242 || addr_reg
->reg_num
== RegEiz
8243 || addr_reg
->reg_type
.bitfield
.reg32
)
8244 addr_mode
= CODE_32BIT
;
8245 else if (flag_code
!= CODE_64BIT
8246 && addr_reg
->reg_type
.bitfield
.reg16
)
8247 addr_mode
= CODE_16BIT
;
8249 if (addr_mode
!= flag_code
)
8251 i
.prefix
[ADDR_PREFIX
] = ADDR_PREFIX_OPCODE
;
8253 /* Change the size of any displacement too. At most one
8254 of Disp16 or Disp32 is set.
8255 FIXME. There doesn't seem to be any real need for
8256 separate Disp16 and Disp32 flags. The same goes for
8257 Imm16 and Imm32. Removing them would probably clean
8258 up the code quite a lot. */
8259 if (flag_code
!= CODE_64BIT
8260 && (i
.types
[this_operand
].bitfield
.disp16
8261 || i
.types
[this_operand
].bitfield
.disp32
))
8262 i
.types
[this_operand
]
8263 = operand_type_xor (i
.types
[this_operand
], disp16_32
);
8270 if (current_templates
->start
->opcode_modifier
.isstring
8271 && !current_templates
->start
->opcode_modifier
.immext
8272 && (current_templates
->end
[-1].opcode_modifier
.isstring
8275 /* Memory operands of string insns are special in that they only allow
8276 a single register (rDI, rSI, or rBX) as their memory address. */
8277 const reg_entry
*expected_reg
;
8278 static const char *di_si
[][2] =
8284 static const char *bx
[] = { "ebx", "bx", "rbx" };
8286 kind
= "string address";
8288 if (current_templates
->start
->opcode_modifier
.w
)
8290 i386_operand_type type
= current_templates
->end
[-1].operand_types
[0];
8292 if (!type
.bitfield
.baseindex
8293 || ((!i
.mem_operands
!= !intel_syntax
)
8294 && current_templates
->end
[-1].operand_types
[1]
8295 .bitfield
.baseindex
))
8296 type
= current_templates
->end
[-1].operand_types
[1];
8297 expected_reg
= hash_find (reg_hash
,
8298 di_si
[addr_mode
][type
.bitfield
.esseg
]);
8302 expected_reg
= hash_find (reg_hash
, bx
[addr_mode
]);
8304 if (i
.base_reg
!= expected_reg
8306 || operand_type_check (i
.types
[this_operand
], disp
))
8308 /* The second memory operand must have the same size as
8312 && !((addr_mode
== CODE_64BIT
8313 && i
.base_reg
->reg_type
.bitfield
.reg64
)
8314 || (addr_mode
== CODE_32BIT
8315 ? i
.base_reg
->reg_type
.bitfield
.reg32
8316 : i
.base_reg
->reg_type
.bitfield
.reg16
)))
8319 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
8321 intel_syntax
? '[' : '(',
8323 expected_reg
->reg_name
,
8324 intel_syntax
? ']' : ')');
8331 as_bad (_("`%s' is not a valid %s expression"),
8332 operand_string
, kind
);
8337 if (addr_mode
!= CODE_16BIT
)
8339 /* 32-bit/64-bit checks. */
8341 && (addr_mode
== CODE_64BIT
8342 ? !i
.base_reg
->reg_type
.bitfield
.reg64
8343 : !i
.base_reg
->reg_type
.bitfield
.reg32
)
8345 || (i
.base_reg
->reg_num
8346 != (addr_mode
== CODE_64BIT
? RegRip
: RegEip
))))
8348 && !i
.index_reg
->reg_type
.bitfield
.regxmm
8349 && !i
.index_reg
->reg_type
.bitfield
.regymm
8350 && !i
.index_reg
->reg_type
.bitfield
.regzmm
8351 && ((addr_mode
== CODE_64BIT
8352 ? !(i
.index_reg
->reg_type
.bitfield
.reg64
8353 || i
.index_reg
->reg_num
== RegRiz
)
8354 : !(i
.index_reg
->reg_type
.bitfield
.reg32
8355 || i
.index_reg
->reg_num
== RegEiz
))
8356 || !i
.index_reg
->reg_type
.bitfield
.baseindex
)))
8361 /* 16-bit checks. */
8363 && (!i
.base_reg
->reg_type
.bitfield
.reg16
8364 || !i
.base_reg
->reg_type
.bitfield
.baseindex
))
8366 && (!i
.index_reg
->reg_type
.bitfield
.reg16
8367 || !i
.index_reg
->reg_type
.bitfield
.baseindex
8369 && i
.base_reg
->reg_num
< 6
8370 && i
.index_reg
->reg_num
>= 6
8371 && i
.log2_scale_factor
== 0))))
8378 /* Handle vector immediates. */
8381 RC_SAE_immediate (const char *imm_start
)
8383 unsigned int match_found
, j
;
8384 const char *pstr
= imm_start
;
8392 for (j
= 0; j
< ARRAY_SIZE (RC_NamesTable
); j
++)
8394 if (!strncmp (pstr
, RC_NamesTable
[j
].name
, RC_NamesTable
[j
].len
))
8398 rc_op
.type
= RC_NamesTable
[j
].type
;
8399 rc_op
.operand
= this_operand
;
8400 i
.rounding
= &rc_op
;
8404 as_bad (_("duplicated `%s'"), imm_start
);
8407 pstr
+= RC_NamesTable
[j
].len
;
8417 as_bad (_("Missing '}': '%s'"), imm_start
);
8420 /* RC/SAE immediate string should contain nothing more. */;
8423 as_bad (_("Junk after '}': '%s'"), imm_start
);
8427 exp
= &im_expressions
[i
.imm_operands
++];
8428 i
.op
[this_operand
].imms
= exp
;
8430 exp
->X_op
= O_constant
;
8431 exp
->X_add_number
= 0;
8432 exp
->X_add_symbol
= (symbolS
*) 0;
8433 exp
->X_op_symbol
= (symbolS
*) 0;
8435 i
.types
[this_operand
].bitfield
.imm8
= 1;
8439 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
8443 i386_att_operand (char *operand_string
)
8447 char *op_string
= operand_string
;
8449 if (is_space_char (*op_string
))
8452 /* We check for an absolute prefix (differentiating,
8453 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
8454 if (*op_string
== ABSOLUTE_PREFIX
)
8457 if (is_space_char (*op_string
))
8459 i
.types
[this_operand
].bitfield
.jumpabsolute
= 1;
8462 /* Check if operand is a register. */
8463 if ((r
= parse_register (op_string
, &end_op
)) != NULL
)
8465 i386_operand_type temp
;
8467 /* Check for a segment override by searching for ':' after a
8468 segment register. */
8470 if (is_space_char (*op_string
))
8472 if (*op_string
== ':'
8473 && (r
->reg_type
.bitfield
.sreg2
8474 || r
->reg_type
.bitfield
.sreg3
))
8479 i
.seg
[i
.mem_operands
] = &es
;
8482 i
.seg
[i
.mem_operands
] = &cs
;
8485 i
.seg
[i
.mem_operands
] = &ss
;
8488 i
.seg
[i
.mem_operands
] = &ds
;
8491 i
.seg
[i
.mem_operands
] = &fs
;
8494 i
.seg
[i
.mem_operands
] = &gs
;
8498 /* Skip the ':' and whitespace. */
8500 if (is_space_char (*op_string
))
8503 if (!is_digit_char (*op_string
)
8504 && !is_identifier_char (*op_string
)
8505 && *op_string
!= '('
8506 && *op_string
!= ABSOLUTE_PREFIX
)
8508 as_bad (_("bad memory operand `%s'"), op_string
);
8511 /* Handle case of %es:*foo. */
8512 if (*op_string
== ABSOLUTE_PREFIX
)
8515 if (is_space_char (*op_string
))
8517 i
.types
[this_operand
].bitfield
.jumpabsolute
= 1;
8519 goto do_memory_reference
;
8522 /* Handle vector operations. */
8523 if (*op_string
== '{')
8525 op_string
= check_VecOperations (op_string
, NULL
);
8526 if (op_string
== NULL
)
8532 as_bad (_("junk `%s' after register"), op_string
);
8536 temp
.bitfield
.baseindex
= 0;
8537 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
8539 i
.types
[this_operand
].bitfield
.unspecified
= 0;
8540 i
.op
[this_operand
].regs
= r
;
8543 else if (*op_string
== REGISTER_PREFIX
)
8545 as_bad (_("bad register name `%s'"), op_string
);
8548 else if (*op_string
== IMMEDIATE_PREFIX
)
8551 if (i
.types
[this_operand
].bitfield
.jumpabsolute
)
8553 as_bad (_("immediate operand illegal with absolute jump"));
8556 if (!i386_immediate (op_string
))
8559 else if (RC_SAE_immediate (operand_string
))
8561 /* If it is a RC or SAE immediate, do nothing. */
8564 else if (is_digit_char (*op_string
)
8565 || is_identifier_char (*op_string
)
8566 || *op_string
== '(')
8568 /* This is a memory reference of some sort. */
8571 /* Start and end of displacement string expression (if found). */
8572 char *displacement_string_start
;
8573 char *displacement_string_end
;
8576 do_memory_reference
:
8577 if ((i
.mem_operands
== 1
8578 && !current_templates
->start
->opcode_modifier
.isstring
)
8579 || i
.mem_operands
== 2)
8581 as_bad (_("too many memory references for `%s'"),
8582 current_templates
->start
->name
);
8586 /* Check for base index form. We detect the base index form by
8587 looking for an ')' at the end of the operand, searching
8588 for the '(' matching it, and finding a REGISTER_PREFIX or ','
8590 base_string
= op_string
+ strlen (op_string
);
8592 /* Handle vector operations. */
8593 vop_start
= strchr (op_string
, '{');
8594 if (vop_start
&& vop_start
< base_string
)
8596 if (check_VecOperations (vop_start
, base_string
) == NULL
)
8598 base_string
= vop_start
;
8602 if (is_space_char (*base_string
))
8605 /* If we only have a displacement, set-up for it to be parsed later. */
8606 displacement_string_start
= op_string
;
8607 displacement_string_end
= base_string
+ 1;
8609 if (*base_string
== ')')
8612 unsigned int parens_balanced
= 1;
8613 /* We've already checked that the number of left & right ()'s are
8614 equal, so this loop will not be infinite. */
8618 if (*base_string
== ')')
8620 if (*base_string
== '(')
8623 while (parens_balanced
);
8625 temp_string
= base_string
;
8627 /* Skip past '(' and whitespace. */
8629 if (is_space_char (*base_string
))
8632 if (*base_string
== ','
8633 || ((i
.base_reg
= parse_register (base_string
, &end_op
))
8636 displacement_string_end
= temp_string
;
8638 i
.types
[this_operand
].bitfield
.baseindex
= 1;
8642 base_string
= end_op
;
8643 if (is_space_char (*base_string
))
8647 /* There may be an index reg or scale factor here. */
8648 if (*base_string
== ',')
8651 if (is_space_char (*base_string
))
8654 if ((i
.index_reg
= parse_register (base_string
, &end_op
))
8657 base_string
= end_op
;
8658 if (is_space_char (*base_string
))
8660 if (*base_string
== ',')
8663 if (is_space_char (*base_string
))
8666 else if (*base_string
!= ')')
8668 as_bad (_("expecting `,' or `)' "
8669 "after index register in `%s'"),
8674 else if (*base_string
== REGISTER_PREFIX
)
8676 end_op
= strchr (base_string
, ',');
8679 as_bad (_("bad register name `%s'"), base_string
);
8683 /* Check for scale factor. */
8684 if (*base_string
!= ')')
8686 char *end_scale
= i386_scale (base_string
);
8691 base_string
= end_scale
;
8692 if (is_space_char (*base_string
))
8694 if (*base_string
!= ')')
8696 as_bad (_("expecting `)' "
8697 "after scale factor in `%s'"),
8702 else if (!i
.index_reg
)
8704 as_bad (_("expecting index register or scale factor "
8705 "after `,'; got '%c'"),
8710 else if (*base_string
!= ')')
8712 as_bad (_("expecting `,' or `)' "
8713 "after base register in `%s'"),
8718 else if (*base_string
== REGISTER_PREFIX
)
8720 end_op
= strchr (base_string
, ',');
8723 as_bad (_("bad register name `%s'"), base_string
);
8728 /* If there's an expression beginning the operand, parse it,
8729 assuming displacement_string_start and
8730 displacement_string_end are meaningful. */
8731 if (displacement_string_start
!= displacement_string_end
)
8733 if (!i386_displacement (displacement_string_start
,
8734 displacement_string_end
))
8738 /* Special case for (%dx) while doing input/output op. */
8740 && operand_type_equal (&i
.base_reg
->reg_type
,
8741 ®16_inoutportreg
)
8743 && i
.log2_scale_factor
== 0
8744 && i
.seg
[i
.mem_operands
] == 0
8745 && !operand_type_check (i
.types
[this_operand
], disp
))
8747 i
.types
[this_operand
] = inoutportreg
;
8751 if (i386_index_check (operand_string
) == 0)
8753 i
.types
[this_operand
].bitfield
.mem
= 1;
8758 /* It's not a memory operand; argh! */
8759 as_bad (_("invalid char %s beginning operand %d `%s'"),
8760 output_invalid (*op_string
),
8765 return 1; /* Normal return. */
8768 /* Calculate the maximum variable size (i.e., excluding fr_fix)
8769 that an rs_machine_dependent frag may reach. */
8772 i386_frag_max_var (fragS
*frag
)
8774 /* The only relaxable frags are for jumps.
8775 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
8776 gas_assert (frag
->fr_type
== rs_machine_dependent
);
8777 return TYPE_FROM_RELAX_STATE (frag
->fr_subtype
) == UNCOND_JUMP
? 4 : 5;
8780 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8782 elf_symbol_resolved_in_segment_p (symbolS
*fr_symbol
)
8784 /* STT_GNU_IFUNC symbol must go through PLT. */
8785 if ((symbol_get_bfdsym (fr_symbol
)->flags
8786 & BSF_GNU_INDIRECT_FUNCTION
) != 0)
8789 if (!S_IS_EXTERNAL (fr_symbol
))
8790 /* Symbol may be weak or local. */
8791 return !S_IS_WEAK (fr_symbol
);
8793 /* Non-weak symbols won't be preempted. */
8797 /* Global symbols with default visibility in a shared library may be
8798 preempted by another definition. */
8799 return ELF_ST_VISIBILITY (S_GET_OTHER (fr_symbol
)) != STV_DEFAULT
;
8803 /* md_estimate_size_before_relax()
8805 Called just before relax() for rs_machine_dependent frags. The x86
8806 assembler uses these frags to handle variable size jump
8809 Any symbol that is now undefined will not become defined.
8810 Return the correct fr_subtype in the frag.
8811 Return the initial "guess for variable size of frag" to caller.
8812 The guess is actually the growth beyond the fixed part. Whatever
8813 we do to grow the fixed or variable part contributes to our
8817 md_estimate_size_before_relax (fragS
*fragP
, segT segment
)
8819 /* We've already got fragP->fr_subtype right; all we have to do is
8820 check for un-relaxable symbols. On an ELF system, we can't relax
8821 an externally visible symbol, because it may be overridden by a
8823 if (S_GET_SEGMENT (fragP
->fr_symbol
) != segment
8824 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8826 && !elf_symbol_resolved_in_segment_p (fragP
->fr_symbol
))
8828 #if defined (OBJ_COFF) && defined (TE_PE)
8829 || (OUTPUT_FLAVOR
== bfd_target_coff_flavour
8830 && S_IS_WEAK (fragP
->fr_symbol
))
8834 /* Symbol is undefined in this segment, or we need to keep a
8835 reloc so that weak symbols can be overridden. */
8836 int size
= (fragP
->fr_subtype
& CODE16
) ? 2 : 4;
8837 enum bfd_reloc_code_real reloc_type
;
8838 unsigned char *opcode
;
8841 if (fragP
->fr_var
!= NO_RELOC
)
8842 reloc_type
= (enum bfd_reloc_code_real
) fragP
->fr_var
;
8844 reloc_type
= BFD_RELOC_16_PCREL
;
8846 reloc_type
= BFD_RELOC_32_PCREL
;
8848 old_fr_fix
= fragP
->fr_fix
;
8849 opcode
= (unsigned char *) fragP
->fr_opcode
;
8851 switch (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
))
8854 /* Make jmp (0xeb) a (d)word displacement jump. */
8856 fragP
->fr_fix
+= size
;
8857 fix_new (fragP
, old_fr_fix
, size
,
8859 fragP
->fr_offset
, 1,
8865 && (!no_cond_jump_promotion
|| fragP
->fr_var
!= NO_RELOC
))
8867 /* Negate the condition, and branch past an
8868 unconditional jump. */
8871 /* Insert an unconditional jump. */
8873 /* We added two extra opcode bytes, and have a two byte
8875 fragP
->fr_fix
+= 2 + 2;
8876 fix_new (fragP
, old_fr_fix
+ 2, 2,
8878 fragP
->fr_offset
, 1,
8885 if (no_cond_jump_promotion
&& fragP
->fr_var
== NO_RELOC
)
8890 fixP
= fix_new (fragP
, old_fr_fix
, 1,
8892 fragP
->fr_offset
, 1,
8894 fixP
->fx_signed
= 1;
8898 /* This changes the byte-displacement jump 0x7N
8899 to the (d)word-displacement jump 0x0f,0x8N. */
8900 opcode
[1] = opcode
[0] + 0x10;
8901 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
8902 /* We've added an opcode byte. */
8903 fragP
->fr_fix
+= 1 + size
;
8904 fix_new (fragP
, old_fr_fix
+ 1, size
,
8906 fragP
->fr_offset
, 1,
8911 BAD_CASE (fragP
->fr_subtype
);
8915 return fragP
->fr_fix
- old_fr_fix
;
8918 /* Guess size depending on current relax state. Initially the relax
8919 state will correspond to a short jump and we return 1, because
8920 the variable part of the frag (the branch offset) is one byte
8921 long. However, we can relax a section more than once and in that
8922 case we must either set fr_subtype back to the unrelaxed state,
8923 or return the value for the appropriate branch. */
8924 return md_relax_table
[fragP
->fr_subtype
].rlx_length
;
8927 /* Called after relax() is finished.
8929 In: Address of frag.
8930 fr_type == rs_machine_dependent.
8931 fr_subtype is what the address relaxed to.
8933 Out: Any fixSs and constants are set up.
8934 Caller will turn frag into a ".space 0". */
8937 md_convert_frag (bfd
*abfd ATTRIBUTE_UNUSED
, segT sec ATTRIBUTE_UNUSED
,
8940 unsigned char *opcode
;
8941 unsigned char *where_to_put_displacement
= NULL
;
8942 offsetT target_address
;
8943 offsetT opcode_address
;
8944 unsigned int extension
= 0;
8945 offsetT displacement_from_opcode_start
;
8947 opcode
= (unsigned char *) fragP
->fr_opcode
;
8949 /* Address we want to reach in file space. */
8950 target_address
= S_GET_VALUE (fragP
->fr_symbol
) + fragP
->fr_offset
;
8952 /* Address opcode resides at in file space. */
8953 opcode_address
= fragP
->fr_address
+ fragP
->fr_fix
;
8955 /* Displacement from opcode start to fill into instruction. */
8956 displacement_from_opcode_start
= target_address
- opcode_address
;
8958 if ((fragP
->fr_subtype
& BIG
) == 0)
8960 /* Don't have to change opcode. */
8961 extension
= 1; /* 1 opcode + 1 displacement */
8962 where_to_put_displacement
= &opcode
[1];
8966 if (no_cond_jump_promotion
8967 && TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) != UNCOND_JUMP
)
8968 as_warn_where (fragP
->fr_file
, fragP
->fr_line
,
8969 _("long jump required"));
8971 switch (fragP
->fr_subtype
)
8973 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
):
8974 extension
= 4; /* 1 opcode + 4 displacement */
8976 where_to_put_displacement
= &opcode
[1];
8979 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
):
8980 extension
= 2; /* 1 opcode + 2 displacement */
8982 where_to_put_displacement
= &opcode
[1];
8985 case ENCODE_RELAX_STATE (COND_JUMP
, BIG
):
8986 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG
):
8987 extension
= 5; /* 2 opcode + 4 displacement */
8988 opcode
[1] = opcode
[0] + 0x10;
8989 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
8990 where_to_put_displacement
= &opcode
[2];
8993 case ENCODE_RELAX_STATE (COND_JUMP
, BIG16
):
8994 extension
= 3; /* 2 opcode + 2 displacement */
8995 opcode
[1] = opcode
[0] + 0x10;
8996 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
8997 where_to_put_displacement
= &opcode
[2];
9000 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
):
9005 where_to_put_displacement
= &opcode
[3];
9009 BAD_CASE (fragP
->fr_subtype
);
9014 /* If size if less then four we are sure that the operand fits,
9015 but if it's 4, then it could be that the displacement is larger
9017 if (DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
) == 4
9019 && ((addressT
) (displacement_from_opcode_start
- extension
9020 + ((addressT
) 1 << 31))
9021 > (((addressT
) 2 << 31) - 1)))
9023 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
9024 _("jump target out of range"));
9025 /* Make us emit 0. */
9026 displacement_from_opcode_start
= extension
;
9028 /* Now put displacement after opcode. */
9029 md_number_to_chars ((char *) where_to_put_displacement
,
9030 (valueT
) (displacement_from_opcode_start
- extension
),
9031 DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
));
9032 fragP
->fr_fix
+= extension
;
9035 /* Apply a fixup (fixP) to segment data, once it has been determined
9036 by our caller that we have all the info we need to fix it up.
9038 Parameter valP is the pointer to the value of the bits.
9040 On the 386, immediates, displacements, and data pointers are all in
9041 the same (little-endian) format, so we don't need to care about which
9045 md_apply_fix (fixS
*fixP
, valueT
*valP
, segT seg ATTRIBUTE_UNUSED
)
9047 char *p
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
9048 valueT value
= *valP
;
9050 #if !defined (TE_Mach)
9053 switch (fixP
->fx_r_type
)
9059 fixP
->fx_r_type
= BFD_RELOC_64_PCREL
;
9062 case BFD_RELOC_X86_64_32S
:
9063 fixP
->fx_r_type
= BFD_RELOC_32_PCREL
;
9066 fixP
->fx_r_type
= BFD_RELOC_16_PCREL
;
9069 fixP
->fx_r_type
= BFD_RELOC_8_PCREL
;
9074 if (fixP
->fx_addsy
!= NULL
9075 && (fixP
->fx_r_type
== BFD_RELOC_32_PCREL
9076 || fixP
->fx_r_type
== BFD_RELOC_64_PCREL
9077 || fixP
->fx_r_type
== BFD_RELOC_16_PCREL
9078 || fixP
->fx_r_type
== BFD_RELOC_8_PCREL
)
9079 && !use_rela_relocations
)
9081 /* This is a hack. There should be a better way to handle this.
9082 This covers for the fact that bfd_install_relocation will
9083 subtract the current location (for partial_inplace, PC relative
9084 relocations); see more below. */
9088 || OUTPUT_FLAVOR
== bfd_target_coff_flavour
9091 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
9093 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9096 segT sym_seg
= S_GET_SEGMENT (fixP
->fx_addsy
);
9099 || (symbol_section_p (fixP
->fx_addsy
)
9100 && sym_seg
!= absolute_section
))
9101 && !generic_force_reloc (fixP
))
9103 /* Yes, we add the values in twice. This is because
9104 bfd_install_relocation subtracts them out again. I think
9105 bfd_install_relocation is broken, but I don't dare change
9107 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
9111 #if defined (OBJ_COFF) && defined (TE_PE)
9112 /* For some reason, the PE format does not store a
9113 section address offset for a PC relative symbol. */
9114 if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
9115 || S_IS_WEAK (fixP
->fx_addsy
))
9116 value
+= md_pcrel_from (fixP
);
9119 #if defined (OBJ_COFF) && defined (TE_PE)
9120 if (fixP
->fx_addsy
!= NULL
9121 && S_IS_WEAK (fixP
->fx_addsy
)
9122 /* PR 16858: Do not modify weak function references. */
9123 && ! fixP
->fx_pcrel
)
9125 #if !defined (TE_PEP)
9126 /* For x86 PE weak function symbols are neither PC-relative
9127 nor do they set S_IS_FUNCTION. So the only reliable way
9128 to detect them is to check the flags of their containing
9130 if (S_GET_SEGMENT (fixP
->fx_addsy
) != NULL
9131 && S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_CODE
)
9135 value
-= S_GET_VALUE (fixP
->fx_addsy
);
9139 /* Fix a few things - the dynamic linker expects certain values here,
9140 and we must not disappoint it. */
9141 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9142 if (IS_ELF
&& fixP
->fx_addsy
)
9143 switch (fixP
->fx_r_type
)
9145 case BFD_RELOC_386_PLT32
:
9146 case BFD_RELOC_X86_64_PLT32
:
9147 /* Make the jump instruction point to the address of the operand. At
9148 runtime we merely add the offset to the actual PLT entry. */
9152 case BFD_RELOC_386_TLS_GD
:
9153 case BFD_RELOC_386_TLS_LDM
:
9154 case BFD_RELOC_386_TLS_IE_32
:
9155 case BFD_RELOC_386_TLS_IE
:
9156 case BFD_RELOC_386_TLS_GOTIE
:
9157 case BFD_RELOC_386_TLS_GOTDESC
:
9158 case BFD_RELOC_X86_64_TLSGD
:
9159 case BFD_RELOC_X86_64_TLSLD
:
9160 case BFD_RELOC_X86_64_GOTTPOFF
:
9161 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
9162 value
= 0; /* Fully resolved at runtime. No addend. */
9164 case BFD_RELOC_386_TLS_LE
:
9165 case BFD_RELOC_386_TLS_LDO_32
:
9166 case BFD_RELOC_386_TLS_LE_32
:
9167 case BFD_RELOC_X86_64_DTPOFF32
:
9168 case BFD_RELOC_X86_64_DTPOFF64
:
9169 case BFD_RELOC_X86_64_TPOFF32
:
9170 case BFD_RELOC_X86_64_TPOFF64
:
9171 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
9174 case BFD_RELOC_386_TLS_DESC_CALL
:
9175 case BFD_RELOC_X86_64_TLSDESC_CALL
:
9176 value
= 0; /* Fully resolved at runtime. No addend. */
9177 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
9181 case BFD_RELOC_386_GOT32
:
9182 case BFD_RELOC_X86_64_GOT32
:
9183 value
= 0; /* Fully resolved at runtime. No addend. */
9186 case BFD_RELOC_VTABLE_INHERIT
:
9187 case BFD_RELOC_VTABLE_ENTRY
:
9194 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
9196 #endif /* !defined (TE_Mach) */
9198 /* Are we finished with this relocation now? */
9199 if (fixP
->fx_addsy
== NULL
)
9201 #if defined (OBJ_COFF) && defined (TE_PE)
9202 else if (fixP
->fx_addsy
!= NULL
&& S_IS_WEAK (fixP
->fx_addsy
))
9205 /* Remember value for tc_gen_reloc. */
9206 fixP
->fx_addnumber
= value
;
9207 /* Clear out the frag for now. */
9211 else if (use_rela_relocations
)
9213 fixP
->fx_no_overflow
= 1;
9214 /* Remember value for tc_gen_reloc. */
9215 fixP
->fx_addnumber
= value
;
9219 md_number_to_chars (p
, value
, fixP
->fx_size
);
9223 md_atof (int type
, char *litP
, int *sizeP
)
9225 /* This outputs the LITTLENUMs in REVERSE order;
9226 in accord with the bigendian 386. */
9227 return ieee_md_atof (type
, litP
, sizeP
, FALSE
);
9230 static char output_invalid_buf
[sizeof (unsigned char) * 2 + 6];
9233 output_invalid (int c
)
9236 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
9239 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
9240 "(0x%x)", (unsigned char) c
);
9241 return output_invalid_buf
;
9244 /* REG_STRING starts *before* REGISTER_PREFIX. */
9246 static const reg_entry
*
9247 parse_real_register (char *reg_string
, char **end_op
)
9249 char *s
= reg_string
;
9251 char reg_name_given
[MAX_REG_NAME_SIZE
+ 1];
9254 /* Skip possible REGISTER_PREFIX and possible whitespace. */
9255 if (*s
== REGISTER_PREFIX
)
9258 if (is_space_char (*s
))
9262 while ((*p
++ = register_chars
[(unsigned char) *s
]) != '\0')
9264 if (p
>= reg_name_given
+ MAX_REG_NAME_SIZE
)
9265 return (const reg_entry
*) NULL
;
9269 /* For naked regs, make sure that we are not dealing with an identifier.
9270 This prevents confusing an identifier like `eax_var' with register
9272 if (allow_naked_reg
&& identifier_chars
[(unsigned char) *s
])
9273 return (const reg_entry
*) NULL
;
9277 r
= (const reg_entry
*) hash_find (reg_hash
, reg_name_given
);
9279 /* Handle floating point regs, allowing spaces in the (i) part. */
9280 if (r
== i386_regtab
/* %st is first entry of table */)
9282 if (is_space_char (*s
))
9287 if (is_space_char (*s
))
9289 if (*s
>= '0' && *s
<= '7')
9293 if (is_space_char (*s
))
9298 r
= (const reg_entry
*) hash_find (reg_hash
, "st(0)");
9303 /* We have "%st(" then garbage. */
9304 return (const reg_entry
*) NULL
;
9308 if (r
== NULL
|| allow_pseudo_reg
)
9311 if (operand_type_all_zero (&r
->reg_type
))
9312 return (const reg_entry
*) NULL
;
9314 if ((r
->reg_type
.bitfield
.reg32
9315 || r
->reg_type
.bitfield
.sreg3
9316 || r
->reg_type
.bitfield
.control
9317 || r
->reg_type
.bitfield
.debug
9318 || r
->reg_type
.bitfield
.test
)
9319 && !cpu_arch_flags
.bitfield
.cpui386
)
9320 return (const reg_entry
*) NULL
;
9322 if (r
->reg_type
.bitfield
.floatreg
9323 && !cpu_arch_flags
.bitfield
.cpu8087
9324 && !cpu_arch_flags
.bitfield
.cpu287
9325 && !cpu_arch_flags
.bitfield
.cpu387
)
9326 return (const reg_entry
*) NULL
;
9328 if (r
->reg_type
.bitfield
.regmmx
&& !cpu_arch_flags
.bitfield
.cpummx
)
9329 return (const reg_entry
*) NULL
;
9331 if (r
->reg_type
.bitfield
.regxmm
&& !cpu_arch_flags
.bitfield
.cpusse
)
9332 return (const reg_entry
*) NULL
;
9334 if (r
->reg_type
.bitfield
.regymm
&& !cpu_arch_flags
.bitfield
.cpuavx
)
9335 return (const reg_entry
*) NULL
;
9337 if ((r
->reg_type
.bitfield
.regzmm
|| r
->reg_type
.bitfield
.regmask
)
9338 && !cpu_arch_flags
.bitfield
.cpuavx512f
)
9339 return (const reg_entry
*) NULL
;
9341 /* Don't allow fake index register unless allow_index_reg isn't 0. */
9342 if (!allow_index_reg
9343 && (r
->reg_num
== RegEiz
|| r
->reg_num
== RegRiz
))
9344 return (const reg_entry
*) NULL
;
9346 /* Upper 16 vector register is only available with VREX in 64bit
9348 if ((r
->reg_flags
& RegVRex
))
9350 if (!cpu_arch_flags
.bitfield
.cpuvrex
9351 || flag_code
!= CODE_64BIT
)
9352 return (const reg_entry
*) NULL
;
9357 if (((r
->reg_flags
& (RegRex64
| RegRex
))
9358 || r
->reg_type
.bitfield
.reg64
)
9359 && (!cpu_arch_flags
.bitfield
.cpulm
9360 || !operand_type_equal (&r
->reg_type
, &control
))
9361 && flag_code
!= CODE_64BIT
)
9362 return (const reg_entry
*) NULL
;
9364 if (r
->reg_type
.bitfield
.sreg3
&& r
->reg_num
== RegFlat
&& !intel_syntax
)
9365 return (const reg_entry
*) NULL
;
9370 /* REG_STRING starts *before* REGISTER_PREFIX. */
9372 static const reg_entry
*
9373 parse_register (char *reg_string
, char **end_op
)
9377 if (*reg_string
== REGISTER_PREFIX
|| allow_naked_reg
)
9378 r
= parse_real_register (reg_string
, end_op
);
9383 char *save
= input_line_pointer
;
9387 input_line_pointer
= reg_string
;
9388 c
= get_symbol_end ();
9389 symbolP
= symbol_find (reg_string
);
9390 if (symbolP
&& S_GET_SEGMENT (symbolP
) == reg_section
)
9392 const expressionS
*e
= symbol_get_value_expression (symbolP
);
9394 know (e
->X_op
== O_register
);
9395 know (e
->X_add_number
>= 0
9396 && (valueT
) e
->X_add_number
< i386_regtab_size
);
9397 r
= i386_regtab
+ e
->X_add_number
;
9398 if ((r
->reg_flags
& RegVRex
))
9400 *end_op
= input_line_pointer
;
9402 *input_line_pointer
= c
;
9403 input_line_pointer
= save
;
9409 i386_parse_name (char *name
, expressionS
*e
, char *nextcharP
)
9412 char *end
= input_line_pointer
;
9415 r
= parse_register (name
, &input_line_pointer
);
9416 if (r
&& end
<= input_line_pointer
)
9418 *nextcharP
= *input_line_pointer
;
9419 *input_line_pointer
= 0;
9420 e
->X_op
= O_register
;
9421 e
->X_add_number
= r
- i386_regtab
;
9424 input_line_pointer
= end
;
9426 return intel_syntax
? i386_intel_parse_name (name
, e
) : 0;
9430 md_operand (expressionS
*e
)
9435 switch (*input_line_pointer
)
9437 case REGISTER_PREFIX
:
9438 r
= parse_real_register (input_line_pointer
, &end
);
9441 e
->X_op
= O_register
;
9442 e
->X_add_number
= r
- i386_regtab
;
9443 input_line_pointer
= end
;
9448 gas_assert (intel_syntax
);
9449 end
= input_line_pointer
++;
9451 if (*input_line_pointer
== ']')
9453 ++input_line_pointer
;
9454 e
->X_op_symbol
= make_expr_symbol (e
);
9455 e
->X_add_symbol
= NULL
;
9456 e
->X_add_number
= 0;
9462 input_line_pointer
= end
;
9469 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9470 const char *md_shortopts
= "kVQ:sqn";
9472 const char *md_shortopts
= "qn";
9475 #define OPTION_32 (OPTION_MD_BASE + 0)
9476 #define OPTION_64 (OPTION_MD_BASE + 1)
9477 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
9478 #define OPTION_MARCH (OPTION_MD_BASE + 3)
9479 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
9480 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
9481 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
9482 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
9483 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
9484 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
9485 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
9486 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
9487 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
9488 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
9489 #define OPTION_X32 (OPTION_MD_BASE + 14)
9490 #define OPTION_MADD_BND_PREFIX (OPTION_MD_BASE + 15)
9491 #define OPTION_MEVEXLIG (OPTION_MD_BASE + 16)
9492 #define OPTION_MEVEXWIG (OPTION_MD_BASE + 17)
9493 #define OPTION_MBIG_OBJ (OPTION_MD_BASE + 18)
9494 #define OPTION_OMIT_LOCK_PREFIX (OPTION_MD_BASE + 19)
9495 #define OPTION_MEVEXRCIG (OPTION_MD_BASE + 20)
9496 #define OPTION_MNO_SHARED (OPTION_MD_BASE + 21)
9498 struct option md_longopts
[] =
9500 {"32", no_argument
, NULL
, OPTION_32
},
9501 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9502 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
9503 {"64", no_argument
, NULL
, OPTION_64
},
9505 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9506 {"x32", no_argument
, NULL
, OPTION_X32
},
9507 {"mno-shared", no_argument
, NULL
, OPTION_MNO_SHARED
},
9509 {"divide", no_argument
, NULL
, OPTION_DIVIDE
},
9510 {"march", required_argument
, NULL
, OPTION_MARCH
},
9511 {"mtune", required_argument
, NULL
, OPTION_MTUNE
},
9512 {"mmnemonic", required_argument
, NULL
, OPTION_MMNEMONIC
},
9513 {"msyntax", required_argument
, NULL
, OPTION_MSYNTAX
},
9514 {"mindex-reg", no_argument
, NULL
, OPTION_MINDEX_REG
},
9515 {"mnaked-reg", no_argument
, NULL
, OPTION_MNAKED_REG
},
9516 {"mold-gcc", no_argument
, NULL
, OPTION_MOLD_GCC
},
9517 {"msse2avx", no_argument
, NULL
, OPTION_MSSE2AVX
},
9518 {"msse-check", required_argument
, NULL
, OPTION_MSSE_CHECK
},
9519 {"moperand-check", required_argument
, NULL
, OPTION_MOPERAND_CHECK
},
9520 {"mavxscalar", required_argument
, NULL
, OPTION_MAVXSCALAR
},
9521 {"madd-bnd-prefix", no_argument
, NULL
, OPTION_MADD_BND_PREFIX
},
9522 {"mevexlig", required_argument
, NULL
, OPTION_MEVEXLIG
},
9523 {"mevexwig", required_argument
, NULL
, OPTION_MEVEXWIG
},
9524 # if defined (TE_PE) || defined (TE_PEP)
9525 {"mbig-obj", no_argument
, NULL
, OPTION_MBIG_OBJ
},
9527 {"momit-lock-prefix", required_argument
, NULL
, OPTION_OMIT_LOCK_PREFIX
},
9528 {"mevexrcig", required_argument
, NULL
, OPTION_MEVEXRCIG
},
9529 {NULL
, no_argument
, NULL
, 0}
9531 size_t md_longopts_size
= sizeof (md_longopts
);
9534 md_parse_option (int c
, char *arg
)
9542 optimize_align_code
= 0;
9549 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9550 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
9551 should be emitted or not. FIXME: Not implemented. */
9555 /* -V: SVR4 argument to print version ID. */
9557 print_version_id ();
9560 /* -k: Ignore for FreeBSD compatibility. */
9565 /* -s: On i386 Solaris, this tells the native assembler to use
9566 .stab instead of .stab.excl. We always use .stab anyhow. */
9569 case OPTION_MNO_SHARED
:
9573 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9574 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
9577 const char **list
, **l
;
9579 list
= bfd_target_list ();
9580 for (l
= list
; *l
!= NULL
; l
++)
9581 if (CONST_STRNEQ (*l
, "elf64-x86-64")
9582 || strcmp (*l
, "coff-x86-64") == 0
9583 || strcmp (*l
, "pe-x86-64") == 0
9584 || strcmp (*l
, "pei-x86-64") == 0
9585 || strcmp (*l
, "mach-o-x86-64") == 0)
9587 default_arch
= "x86_64";
9591 as_fatal (_("no compiled in support for x86_64"));
9597 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9601 const char **list
, **l
;
9603 list
= bfd_target_list ();
9604 for (l
= list
; *l
!= NULL
; l
++)
9605 if (CONST_STRNEQ (*l
, "elf32-x86-64"))
9607 default_arch
= "x86_64:32";
9611 as_fatal (_("no compiled in support for 32bit x86_64"));
9615 as_fatal (_("32bit x86_64 is only supported for ELF"));
9620 default_arch
= "i386";
9624 #ifdef SVR4_COMMENT_CHARS
9629 n
= (char *) xmalloc (strlen (i386_comment_chars
) + 1);
9631 for (s
= i386_comment_chars
; *s
!= '\0'; s
++)
9635 i386_comment_chars
= n
;
9641 arch
= xstrdup (arg
);
9645 as_fatal (_("invalid -march= option: `%s'"), arg
);
9646 next
= strchr (arch
, '+');
9649 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
9651 if (strcmp (arch
, cpu_arch
[j
].name
) == 0)
9654 if (! cpu_arch
[j
].flags
.bitfield
.cpui386
)
9657 cpu_arch_name
= cpu_arch
[j
].name
;
9658 cpu_sub_arch_name
= NULL
;
9659 cpu_arch_flags
= cpu_arch
[j
].flags
;
9660 cpu_arch_isa
= cpu_arch
[j
].type
;
9661 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
9662 if (!cpu_arch_tune_set
)
9664 cpu_arch_tune
= cpu_arch_isa
;
9665 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
9669 else if (*cpu_arch
[j
].name
== '.'
9670 && strcmp (arch
, cpu_arch
[j
].name
+ 1) == 0)
9672 /* ISA entension. */
9673 i386_cpu_flags flags
;
9675 if (!cpu_arch
[j
].negated
)
9676 flags
= cpu_flags_or (cpu_arch_flags
,
9679 flags
= cpu_flags_and_not (cpu_arch_flags
,
9681 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
9683 if (cpu_sub_arch_name
)
9685 char *name
= cpu_sub_arch_name
;
9686 cpu_sub_arch_name
= concat (name
,
9688 (const char *) NULL
);
9692 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
9693 cpu_arch_flags
= flags
;
9694 cpu_arch_isa_flags
= flags
;
9700 if (j
>= ARRAY_SIZE (cpu_arch
))
9701 as_fatal (_("invalid -march= option: `%s'"), arg
);
9705 while (next
!= NULL
);
9710 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
9711 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
9713 if (strcmp (arg
, cpu_arch
[j
].name
) == 0)
9715 cpu_arch_tune_set
= 1;
9716 cpu_arch_tune
= cpu_arch
[j
].type
;
9717 cpu_arch_tune_flags
= cpu_arch
[j
].flags
;
9721 if (j
>= ARRAY_SIZE (cpu_arch
))
9722 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
9725 case OPTION_MMNEMONIC
:
9726 if (strcasecmp (arg
, "att") == 0)
9728 else if (strcasecmp (arg
, "intel") == 0)
9731 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg
);
9734 case OPTION_MSYNTAX
:
9735 if (strcasecmp (arg
, "att") == 0)
9737 else if (strcasecmp (arg
, "intel") == 0)
9740 as_fatal (_("invalid -msyntax= option: `%s'"), arg
);
9743 case OPTION_MINDEX_REG
:
9744 allow_index_reg
= 1;
9747 case OPTION_MNAKED_REG
:
9748 allow_naked_reg
= 1;
9751 case OPTION_MOLD_GCC
:
9755 case OPTION_MSSE2AVX
:
9759 case OPTION_MSSE_CHECK
:
9760 if (strcasecmp (arg
, "error") == 0)
9761 sse_check
= check_error
;
9762 else if (strcasecmp (arg
, "warning") == 0)
9763 sse_check
= check_warning
;
9764 else if (strcasecmp (arg
, "none") == 0)
9765 sse_check
= check_none
;
9767 as_fatal (_("invalid -msse-check= option: `%s'"), arg
);
9770 case OPTION_MOPERAND_CHECK
:
9771 if (strcasecmp (arg
, "error") == 0)
9772 operand_check
= check_error
;
9773 else if (strcasecmp (arg
, "warning") == 0)
9774 operand_check
= check_warning
;
9775 else if (strcasecmp (arg
, "none") == 0)
9776 operand_check
= check_none
;
9778 as_fatal (_("invalid -moperand-check= option: `%s'"), arg
);
9781 case OPTION_MAVXSCALAR
:
9782 if (strcasecmp (arg
, "128") == 0)
9784 else if (strcasecmp (arg
, "256") == 0)
9787 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg
);
9790 case OPTION_MADD_BND_PREFIX
:
9794 case OPTION_MEVEXLIG
:
9795 if (strcmp (arg
, "128") == 0)
9797 else if (strcmp (arg
, "256") == 0)
9799 else if (strcmp (arg
, "512") == 0)
9802 as_fatal (_("invalid -mevexlig= option: `%s'"), arg
);
9805 case OPTION_MEVEXRCIG
:
9806 if (strcmp (arg
, "rne") == 0)
9808 else if (strcmp (arg
, "rd") == 0)
9810 else if (strcmp (arg
, "ru") == 0)
9812 else if (strcmp (arg
, "rz") == 0)
9815 as_fatal (_("invalid -mevexrcig= option: `%s'"), arg
);
9818 case OPTION_MEVEXWIG
:
9819 if (strcmp (arg
, "0") == 0)
9821 else if (strcmp (arg
, "1") == 0)
9824 as_fatal (_("invalid -mevexwig= option: `%s'"), arg
);
9827 # if defined (TE_PE) || defined (TE_PEP)
9828 case OPTION_MBIG_OBJ
:
9833 case OPTION_OMIT_LOCK_PREFIX
:
9834 if (strcasecmp (arg
, "yes") == 0)
9835 omit_lock_prefix
= 1;
9836 else if (strcasecmp (arg
, "no") == 0)
9837 omit_lock_prefix
= 0;
9839 as_fatal (_("invalid -momit-lock-prefix= option: `%s'"), arg
);
9848 #define MESSAGE_TEMPLATE \
9852 show_arch (FILE *stream
, int ext
, int check
)
9854 static char message
[] = MESSAGE_TEMPLATE
;
9855 char *start
= message
+ 27;
9857 int size
= sizeof (MESSAGE_TEMPLATE
);
9864 left
= size
- (start
- message
);
9865 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
9867 /* Should it be skipped? */
9868 if (cpu_arch
[j
].skip
)
9871 name
= cpu_arch
[j
].name
;
9872 len
= cpu_arch
[j
].len
;
9875 /* It is an extension. Skip if we aren't asked to show it. */
9886 /* It is an processor. Skip if we show only extension. */
9889 else if (check
&& ! cpu_arch
[j
].flags
.bitfield
.cpui386
)
9891 /* It is an impossible processor - skip. */
9895 /* Reserve 2 spaces for ", " or ",\0" */
9898 /* Check if there is any room. */
9906 p
= mempcpy (p
, name
, len
);
9910 /* Output the current message now and start a new one. */
9913 fprintf (stream
, "%s\n", message
);
9915 left
= size
- (start
- message
) - len
- 2;
9917 gas_assert (left
>= 0);
9919 p
= mempcpy (p
, name
, len
);
9924 fprintf (stream
, "%s\n", message
);
9928 md_show_usage (FILE *stream
)
9930 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9931 fprintf (stream
, _("\
9933 -V print assembler version number\n\
9936 fprintf (stream
, _("\
9937 -n Do not optimize code alignment\n\
9938 -q quieten some warnings\n"));
9939 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9940 fprintf (stream
, _("\
9943 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9944 || defined (TE_PE) || defined (TE_PEP))
9945 fprintf (stream
, _("\
9946 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
9948 #ifdef SVR4_COMMENT_CHARS
9949 fprintf (stream
, _("\
9950 --divide do not treat `/' as a comment character\n"));
9952 fprintf (stream
, _("\
9953 --divide ignored\n"));
9955 fprintf (stream
, _("\
9956 -march=CPU[,+EXTENSION...]\n\
9957 generate code for CPU and EXTENSION, CPU is one of:\n"));
9958 show_arch (stream
, 0, 1);
9959 fprintf (stream
, _("\
9960 EXTENSION is combination of:\n"));
9961 show_arch (stream
, 1, 0);
9962 fprintf (stream
, _("\
9963 -mtune=CPU optimize for CPU, CPU is one of:\n"));
9964 show_arch (stream
, 0, 0);
9965 fprintf (stream
, _("\
9966 -msse2avx encode SSE instructions with VEX prefix\n"));
9967 fprintf (stream
, _("\
9968 -msse-check=[none|error|warning]\n\
9969 check SSE instructions\n"));
9970 fprintf (stream
, _("\
9971 -moperand-check=[none|error|warning]\n\
9972 check operand combinations for validity\n"));
9973 fprintf (stream
, _("\
9974 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
9976 fprintf (stream
, _("\
9977 -mevexlig=[128|256|512] encode scalar EVEX instructions with specific vector\n\
9979 fprintf (stream
, _("\
9980 -mevexwig=[0|1] encode EVEX instructions with specific EVEX.W value\n\
9981 for EVEX.W bit ignored instructions\n"));
9982 fprintf (stream
, _("\
9983 -mevexrcig=[rne|rd|ru|rz]\n\
9984 encode EVEX instructions with specific EVEX.RC value\n\
9985 for SAE-only ignored instructions\n"));
9986 fprintf (stream
, _("\
9987 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
9988 fprintf (stream
, _("\
9989 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
9990 fprintf (stream
, _("\
9991 -mindex-reg support pseudo index registers\n"));
9992 fprintf (stream
, _("\
9993 -mnaked-reg don't require `%%' prefix for registers\n"));
9994 fprintf (stream
, _("\
9995 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
9996 fprintf (stream
, _("\
9997 -madd-bnd-prefix add BND prefix for all valid branches\n"));
9998 fprintf (stream
, _("\
9999 -mno-shared enable branch optimization for non shared code\n"));
10000 # if defined (TE_PE) || defined (TE_PEP)
10001 fprintf (stream
, _("\
10002 -mbig-obj generate big object files\n"));
10004 fprintf (stream
, _("\
10005 -momit-lock-prefix=[no|yes]\n\
10006 strip all lock prefixes\n"));
10009 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
10010 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
10011 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
10013 /* Pick the target format to use. */
10016 i386_target_format (void)
10018 if (!strncmp (default_arch
, "x86_64", 6))
10020 update_code_flag (CODE_64BIT
, 1);
10021 if (default_arch
[6] == '\0')
10022 x86_elf_abi
= X86_64_ABI
;
10024 x86_elf_abi
= X86_64_X32_ABI
;
10026 else if (!strcmp (default_arch
, "i386"))
10027 update_code_flag (CODE_32BIT
, 1);
10029 as_fatal (_("unknown architecture"));
10031 if (cpu_flags_all_zero (&cpu_arch_isa_flags
))
10032 cpu_arch_isa_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
10033 if (cpu_flags_all_zero (&cpu_arch_tune_flags
))
10034 cpu_arch_tune_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
10036 switch (OUTPUT_FLAVOR
)
10038 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
10039 case bfd_target_aout_flavour
:
10040 return AOUT_TARGET_FORMAT
;
10042 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
10043 # if defined (TE_PE) || defined (TE_PEP)
10044 case bfd_target_coff_flavour
:
10045 if (flag_code
== CODE_64BIT
)
10046 return use_big_obj
? "pe-bigobj-x86-64" : "pe-x86-64";
10049 # elif defined (TE_GO32)
10050 case bfd_target_coff_flavour
:
10051 return "coff-go32";
10053 case bfd_target_coff_flavour
:
10054 return "coff-i386";
10057 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
10058 case bfd_target_elf_flavour
:
10060 const char *format
;
10062 switch (x86_elf_abi
)
10065 format
= ELF_TARGET_FORMAT
;
10068 use_rela_relocations
= 1;
10070 format
= ELF_TARGET_FORMAT64
;
10072 case X86_64_X32_ABI
:
10073 use_rela_relocations
= 1;
10075 disallow_64bit_reloc
= 1;
10076 format
= ELF_TARGET_FORMAT32
;
10079 if (cpu_arch_isa
== PROCESSOR_L1OM
)
10081 if (x86_elf_abi
!= X86_64_ABI
)
10082 as_fatal (_("Intel L1OM is 64bit only"));
10083 return ELF_TARGET_L1OM_FORMAT
;
10085 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
10087 if (x86_elf_abi
!= X86_64_ABI
)
10088 as_fatal (_("Intel K1OM is 64bit only"));
10089 return ELF_TARGET_K1OM_FORMAT
;
10095 #if defined (OBJ_MACH_O)
10096 case bfd_target_mach_o_flavour
:
10097 if (flag_code
== CODE_64BIT
)
10099 use_rela_relocations
= 1;
10101 return "mach-o-x86-64";
10104 return "mach-o-i386";
10112 #endif /* OBJ_MAYBE_ more than one */
10115 md_undefined_symbol (char *name
)
10117 if (name
[0] == GLOBAL_OFFSET_TABLE_NAME
[0]
10118 && name
[1] == GLOBAL_OFFSET_TABLE_NAME
[1]
10119 && name
[2] == GLOBAL_OFFSET_TABLE_NAME
[2]
10120 && strcmp (name
, GLOBAL_OFFSET_TABLE_NAME
) == 0)
10124 if (symbol_find (name
))
10125 as_bad (_("GOT already in symbol table"));
10126 GOT_symbol
= symbol_new (name
, undefined_section
,
10127 (valueT
) 0, &zero_address_frag
);
10134 /* Round up a section size to the appropriate boundary. */
10137 md_section_align (segT segment ATTRIBUTE_UNUSED
, valueT size
)
10139 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
10140 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
10142 /* For a.out, force the section size to be aligned. If we don't do
10143 this, BFD will align it for us, but it will not write out the
10144 final bytes of the section. This may be a bug in BFD, but it is
10145 easier to fix it here since that is how the other a.out targets
10149 align
= bfd_get_section_alignment (stdoutput
, segment
);
10150 size
= ((size
+ (1 << align
) - 1) & ((valueT
) -1 << align
));
10157 /* On the i386, PC-relative offsets are relative to the start of the
10158 next instruction. That is, the address of the offset, plus its
10159 size, since the offset is always the last part of the insn. */
10162 md_pcrel_from (fixS
*fixP
)
10164 return fixP
->fx_size
+ fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
10170 s_bss (int ignore ATTRIBUTE_UNUSED
)
10174 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10176 obj_elf_section_change_hook ();
10178 temp
= get_absolute_expression ();
10179 subseg_set (bss_section
, (subsegT
) temp
);
10180 demand_empty_rest_of_line ();
10186 i386_validate_fix (fixS
*fixp
)
10188 if (fixp
->fx_subsy
&& fixp
->fx_subsy
== GOT_symbol
)
10190 if (fixp
->fx_r_type
== BFD_RELOC_32_PCREL
)
10194 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTPCREL
;
10199 fixp
->fx_r_type
= BFD_RELOC_386_GOTOFF
;
10201 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTOFF64
;
10203 fixp
->fx_subsy
= 0;
10208 tc_gen_reloc (asection
*section ATTRIBUTE_UNUSED
, fixS
*fixp
)
10211 bfd_reloc_code_real_type code
;
10213 switch (fixp
->fx_r_type
)
10215 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10216 case BFD_RELOC_SIZE32
:
10217 case BFD_RELOC_SIZE64
:
10218 if (S_IS_DEFINED (fixp
->fx_addsy
)
10219 && !S_IS_EXTERNAL (fixp
->fx_addsy
))
10221 /* Resolve size relocation against local symbol to size of
10222 the symbol plus addend. */
10223 valueT value
= S_GET_SIZE (fixp
->fx_addsy
) + fixp
->fx_offset
;
10224 if (fixp
->fx_r_type
== BFD_RELOC_SIZE32
10225 && !fits_in_unsigned_long (value
))
10226 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
10227 _("symbol size computation overflow"));
10228 fixp
->fx_addsy
= NULL
;
10229 fixp
->fx_subsy
= NULL
;
10230 md_apply_fix (fixp
, (valueT
*) &value
, NULL
);
10235 case BFD_RELOC_X86_64_PLT32
:
10236 case BFD_RELOC_X86_64_GOT32
:
10237 case BFD_RELOC_X86_64_GOTPCREL
:
10238 case BFD_RELOC_386_PLT32
:
10239 case BFD_RELOC_386_GOT32
:
10240 case BFD_RELOC_386_GOTOFF
:
10241 case BFD_RELOC_386_GOTPC
:
10242 case BFD_RELOC_386_TLS_GD
:
10243 case BFD_RELOC_386_TLS_LDM
:
10244 case BFD_RELOC_386_TLS_LDO_32
:
10245 case BFD_RELOC_386_TLS_IE_32
:
10246 case BFD_RELOC_386_TLS_IE
:
10247 case BFD_RELOC_386_TLS_GOTIE
:
10248 case BFD_RELOC_386_TLS_LE_32
:
10249 case BFD_RELOC_386_TLS_LE
:
10250 case BFD_RELOC_386_TLS_GOTDESC
:
10251 case BFD_RELOC_386_TLS_DESC_CALL
:
10252 case BFD_RELOC_X86_64_TLSGD
:
10253 case BFD_RELOC_X86_64_TLSLD
:
10254 case BFD_RELOC_X86_64_DTPOFF32
:
10255 case BFD_RELOC_X86_64_DTPOFF64
:
10256 case BFD_RELOC_X86_64_GOTTPOFF
:
10257 case BFD_RELOC_X86_64_TPOFF32
:
10258 case BFD_RELOC_X86_64_TPOFF64
:
10259 case BFD_RELOC_X86_64_GOTOFF64
:
10260 case BFD_RELOC_X86_64_GOTPC32
:
10261 case BFD_RELOC_X86_64_GOT64
:
10262 case BFD_RELOC_X86_64_GOTPCREL64
:
10263 case BFD_RELOC_X86_64_GOTPC64
:
10264 case BFD_RELOC_X86_64_GOTPLT64
:
10265 case BFD_RELOC_X86_64_PLTOFF64
:
10266 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
10267 case BFD_RELOC_X86_64_TLSDESC_CALL
:
10268 case BFD_RELOC_RVA
:
10269 case BFD_RELOC_VTABLE_ENTRY
:
10270 case BFD_RELOC_VTABLE_INHERIT
:
10272 case BFD_RELOC_32_SECREL
:
10274 code
= fixp
->fx_r_type
;
10276 case BFD_RELOC_X86_64_32S
:
10277 if (!fixp
->fx_pcrel
)
10279 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
10280 code
= fixp
->fx_r_type
;
10284 if (fixp
->fx_pcrel
)
10286 switch (fixp
->fx_size
)
10289 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
10290 _("can not do %d byte pc-relative relocation"),
10292 code
= BFD_RELOC_32_PCREL
;
10294 case 1: code
= BFD_RELOC_8_PCREL
; break;
10295 case 2: code
= BFD_RELOC_16_PCREL
; break;
10296 case 4: code
= BFD_RELOC_32_PCREL
; break;
10298 case 8: code
= BFD_RELOC_64_PCREL
; break;
10304 switch (fixp
->fx_size
)
10307 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
10308 _("can not do %d byte relocation"),
10310 code
= BFD_RELOC_32
;
10312 case 1: code
= BFD_RELOC_8
; break;
10313 case 2: code
= BFD_RELOC_16
; break;
10314 case 4: code
= BFD_RELOC_32
; break;
10316 case 8: code
= BFD_RELOC_64
; break;
10323 if ((code
== BFD_RELOC_32
10324 || code
== BFD_RELOC_32_PCREL
10325 || code
== BFD_RELOC_X86_64_32S
)
10327 && fixp
->fx_addsy
== GOT_symbol
)
10330 code
= BFD_RELOC_386_GOTPC
;
10332 code
= BFD_RELOC_X86_64_GOTPC32
;
10334 if ((code
== BFD_RELOC_64
|| code
== BFD_RELOC_64_PCREL
)
10336 && fixp
->fx_addsy
== GOT_symbol
)
10338 code
= BFD_RELOC_X86_64_GOTPC64
;
10341 rel
= (arelent
*) xmalloc (sizeof (arelent
));
10342 rel
->sym_ptr_ptr
= (asymbol
**) xmalloc (sizeof (asymbol
*));
10343 *rel
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
10345 rel
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
10347 if (!use_rela_relocations
)
10349 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
10350 vtable entry to be used in the relocation's section offset. */
10351 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
10352 rel
->address
= fixp
->fx_offset
;
10353 #if defined (OBJ_COFF) && defined (TE_PE)
10354 else if (fixp
->fx_addsy
&& S_IS_WEAK (fixp
->fx_addsy
))
10355 rel
->addend
= fixp
->fx_addnumber
- (S_GET_VALUE (fixp
->fx_addsy
) * 2);
10360 /* Use the rela in 64bit mode. */
10363 if (disallow_64bit_reloc
)
10366 case BFD_RELOC_X86_64_DTPOFF64
:
10367 case BFD_RELOC_X86_64_TPOFF64
:
10368 case BFD_RELOC_64_PCREL
:
10369 case BFD_RELOC_X86_64_GOTOFF64
:
10370 case BFD_RELOC_X86_64_GOT64
:
10371 case BFD_RELOC_X86_64_GOTPCREL64
:
10372 case BFD_RELOC_X86_64_GOTPC64
:
10373 case BFD_RELOC_X86_64_GOTPLT64
:
10374 case BFD_RELOC_X86_64_PLTOFF64
:
10375 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
10376 _("cannot represent relocation type %s in x32 mode"),
10377 bfd_get_reloc_code_name (code
));
10383 if (!fixp
->fx_pcrel
)
10384 rel
->addend
= fixp
->fx_offset
;
10388 case BFD_RELOC_X86_64_PLT32
:
10389 case BFD_RELOC_X86_64_GOT32
:
10390 case BFD_RELOC_X86_64_GOTPCREL
:
10391 case BFD_RELOC_X86_64_TLSGD
:
10392 case BFD_RELOC_X86_64_TLSLD
:
10393 case BFD_RELOC_X86_64_GOTTPOFF
:
10394 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
10395 case BFD_RELOC_X86_64_TLSDESC_CALL
:
10396 rel
->addend
= fixp
->fx_offset
- fixp
->fx_size
;
10399 rel
->addend
= (section
->vma
10401 + fixp
->fx_addnumber
10402 + md_pcrel_from (fixp
));
10407 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
10408 if (rel
->howto
== NULL
)
10410 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
10411 _("cannot represent relocation type %s"),
10412 bfd_get_reloc_code_name (code
));
10413 /* Set howto to a garbage value so that we can keep going. */
10414 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, BFD_RELOC_32
);
10415 gas_assert (rel
->howto
!= NULL
);
10421 #include "tc-i386-intel.c"
10424 tc_x86_parse_to_dw2regnum (expressionS
*exp
)
10426 int saved_naked_reg
;
10427 char saved_register_dot
;
10429 saved_naked_reg
= allow_naked_reg
;
10430 allow_naked_reg
= 1;
10431 saved_register_dot
= register_chars
['.'];
10432 register_chars
['.'] = '.';
10433 allow_pseudo_reg
= 1;
10434 expression_and_evaluate (exp
);
10435 allow_pseudo_reg
= 0;
10436 register_chars
['.'] = saved_register_dot
;
10437 allow_naked_reg
= saved_naked_reg
;
10439 if (exp
->X_op
== O_register
&& exp
->X_add_number
>= 0)
10441 if ((addressT
) exp
->X_add_number
< i386_regtab_size
)
10443 exp
->X_op
= O_constant
;
10444 exp
->X_add_number
= i386_regtab
[exp
->X_add_number
]
10445 .dw2_regnum
[flag_code
>> 1];
10448 exp
->X_op
= O_illegal
;
10453 tc_x86_frame_initial_instructions (void)
10455 static unsigned int sp_regno
[2];
10457 if (!sp_regno
[flag_code
>> 1])
10459 char *saved_input
= input_line_pointer
;
10460 char sp
[][4] = {"esp", "rsp"};
10463 input_line_pointer
= sp
[flag_code
>> 1];
10464 tc_x86_parse_to_dw2regnum (&exp
);
10465 gas_assert (exp
.X_op
== O_constant
);
10466 sp_regno
[flag_code
>> 1] = exp
.X_add_number
;
10467 input_line_pointer
= saved_input
;
10470 cfi_add_CFA_def_cfa (sp_regno
[flag_code
>> 1], -x86_cie_data_alignment
);
10471 cfi_add_CFA_offset (x86_dwarf2_return_column
, x86_cie_data_alignment
);
10475 x86_dwarf2_addr_size (void)
10477 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
10478 if (x86_elf_abi
== X86_64_X32_ABI
)
10481 return bfd_arch_bits_per_address (stdoutput
) / 8;
10485 i386_elf_section_type (const char *str
, size_t len
)
10487 if (flag_code
== CODE_64BIT
10488 && len
== sizeof ("unwind") - 1
10489 && strncmp (str
, "unwind", 6) == 0)
10490 return SHT_X86_64_UNWIND
;
10497 i386_solaris_fix_up_eh_frame (segT sec
)
10499 if (flag_code
== CODE_64BIT
)
10500 elf_section_type (sec
) = SHT_X86_64_UNWIND
;
10506 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
10510 exp
.X_op
= O_secrel
;
10511 exp
.X_add_symbol
= symbol
;
10512 exp
.X_add_number
= 0;
10513 emit_expr (&exp
, size
);
10517 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10518 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
10521 x86_64_section_letter (int letter
, char **ptr_msg
)
10523 if (flag_code
== CODE_64BIT
)
10526 return SHF_X86_64_LARGE
;
10528 *ptr_msg
= _("bad .section directive: want a,l,w,x,M,S,G,T in string");
10531 *ptr_msg
= _("bad .section directive: want a,w,x,M,S,G,T in string");
10536 x86_64_section_word (char *str
, size_t len
)
10538 if (len
== 5 && flag_code
== CODE_64BIT
&& CONST_STRNEQ (str
, "large"))
10539 return SHF_X86_64_LARGE
;
10545 handle_large_common (int small ATTRIBUTE_UNUSED
)
10547 if (flag_code
!= CODE_64BIT
)
10549 s_comm_internal (0, elf_common_parse
);
10550 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
10554 static segT lbss_section
;
10555 asection
*saved_com_section_ptr
= elf_com_section_ptr
;
10556 asection
*saved_bss_section
= bss_section
;
10558 if (lbss_section
== NULL
)
10560 flagword applicable
;
10561 segT seg
= now_seg
;
10562 subsegT subseg
= now_subseg
;
10564 /* The .lbss section is for local .largecomm symbols. */
10565 lbss_section
= subseg_new (".lbss", 0);
10566 applicable
= bfd_applicable_section_flags (stdoutput
);
10567 bfd_set_section_flags (stdoutput
, lbss_section
,
10568 applicable
& SEC_ALLOC
);
10569 seg_info (lbss_section
)->bss
= 1;
10571 subseg_set (seg
, subseg
);
10574 elf_com_section_ptr
= &_bfd_elf_large_com_section
;
10575 bss_section
= lbss_section
;
10577 s_comm_internal (0, elf_common_parse
);
10579 elf_com_section_ptr
= saved_com_section_ptr
;
10580 bss_section
= saved_bss_section
;
10583 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */