1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright (C) 1989-2015 Free Software Foundation, Inc.
4 This file is part of GAS, the GNU Assembler.
6 GAS is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GAS is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GAS; see the file COPYING. If not, write to the Free
18 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
21 /* Intel 80386 machine specific gas.
22 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
23 x86_64 support by Jan Hubicka (jh@suse.cz)
24 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
25 Bugs & suggestions are completely welcome. This is free software.
26 Please help us make it better. */
29 #include "safe-ctype.h"
31 #include "dwarf2dbg.h"
32 #include "dw2gencfi.h"
33 #include "elf/x86-64.h"
34 #include "opcodes/i386-init.h"
37 /* Default to compress debug sections for Linux. */
38 int flag_compress_debug
= 1;
41 #ifndef REGISTER_WARNINGS
42 #define REGISTER_WARNINGS 1
45 #ifndef INFER_ADDR_PREFIX
46 #define INFER_ADDR_PREFIX 1
50 #define DEFAULT_ARCH "i386"
55 #define INLINE __inline__
61 /* Prefixes will be emitted in the order defined below.
62 WAIT_PREFIX must be the first prefix since FWAIT is really is an
63 instruction, and so must come before any prefixes.
64 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
65 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
71 #define HLE_PREFIX REP_PREFIX
72 #define BND_PREFIX REP_PREFIX
74 #define REX_PREFIX 6 /* must come last. */
75 #define MAX_PREFIXES 7 /* max prefixes per opcode */
77 /* we define the syntax here (modulo base,index,scale syntax) */
78 #define REGISTER_PREFIX '%'
79 #define IMMEDIATE_PREFIX '$'
80 #define ABSOLUTE_PREFIX '*'
82 /* these are the instruction mnemonic suffixes in AT&T syntax or
83 memory operand size in Intel syntax. */
84 #define WORD_MNEM_SUFFIX 'w'
85 #define BYTE_MNEM_SUFFIX 'b'
86 #define SHORT_MNEM_SUFFIX 's'
87 #define LONG_MNEM_SUFFIX 'l'
88 #define QWORD_MNEM_SUFFIX 'q'
89 #define XMMWORD_MNEM_SUFFIX 'x'
90 #define YMMWORD_MNEM_SUFFIX 'y'
91 #define ZMMWORD_MNEM_SUFFIX 'z'
92 /* Intel Syntax. Use a non-ascii letter since since it never appears
94 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
96 #define END_OF_INSN '\0'
99 'templates' is for grouping together 'template' structures for opcodes
100 of the same name. This is only used for storing the insns in the grand
101 ole hash table of insns.
102 The templates themselves start at START and range up to (but not including)
107 const insn_template
*start
;
108 const insn_template
*end
;
112 /* 386 operand encoding bytes: see 386 book for details of this. */
115 unsigned int regmem
; /* codes register or memory operand */
116 unsigned int reg
; /* codes register operand (or extended opcode) */
117 unsigned int mode
; /* how to interpret regmem & reg */
121 /* x86-64 extension prefix. */
122 typedef int rex_byte
;
124 /* 386 opcode byte to code indirect addressing. */
133 /* x86 arch names, types and features */
136 const char *name
; /* arch name */
137 unsigned int len
; /* arch string length */
138 enum processor_type type
; /* arch type */
139 i386_cpu_flags flags
; /* cpu feature flags */
140 unsigned int skip
; /* show_arch should skip this. */
141 unsigned int negated
; /* turn off indicated flags. */
145 static void update_code_flag (int, int);
146 static void set_code_flag (int);
147 static void set_16bit_gcc_code_flag (int);
148 static void set_intel_syntax (int);
149 static void set_intel_mnemonic (int);
150 static void set_allow_index_reg (int);
151 static void set_check (int);
152 static void set_cpu_arch (int);
154 static void pe_directive_secrel (int);
156 static void signed_cons (int);
157 static char *output_invalid (int c
);
158 static int i386_finalize_immediate (segT
, expressionS
*, i386_operand_type
,
160 static int i386_finalize_displacement (segT
, expressionS
*, i386_operand_type
,
162 static int i386_att_operand (char *);
163 static int i386_intel_operand (char *, int);
164 static int i386_intel_simplify (expressionS
*);
165 static int i386_intel_parse_name (const char *, expressionS
*);
166 static const reg_entry
*parse_register (char *, char **);
167 static char *parse_insn (char *, char *);
168 static char *parse_operands (char *, const char *);
169 static void swap_operands (void);
170 static void swap_2_operands (int, int);
171 static void optimize_imm (void);
172 static void optimize_disp (void);
173 static const insn_template
*match_template (void);
174 static int check_string (void);
175 static int process_suffix (void);
176 static int check_byte_reg (void);
177 static int check_long_reg (void);
178 static int check_qword_reg (void);
179 static int check_word_reg (void);
180 static int finalize_imm (void);
181 static int process_operands (void);
182 static const seg_entry
*build_modrm_byte (void);
183 static void output_insn (void);
184 static void output_imm (fragS
*, offsetT
);
185 static void output_disp (fragS
*, offsetT
);
187 static void s_bss (int);
189 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
190 static void handle_large_common (int small ATTRIBUTE_UNUSED
);
193 static const char *default_arch
= DEFAULT_ARCH
;
195 /* This struct describes rounding control and SAE in the instruction. */
209 static struct RC_Operation rc_op
;
211 /* The struct describes masking, applied to OPERAND in the instruction.
212 MASK is a pointer to the corresponding mask register. ZEROING tells
213 whether merging or zeroing mask is used. */
214 struct Mask_Operation
216 const reg_entry
*mask
;
217 unsigned int zeroing
;
218 /* The operand where this operation is associated. */
222 static struct Mask_Operation mask_op
;
224 /* The struct describes broadcasting, applied to OPERAND. FACTOR is
226 struct Broadcast_Operation
228 /* Type of broadcast: no broadcast, {1to8}, or {1to16}. */
231 /* Index of broadcasted operand. */
235 static struct Broadcast_Operation broadcast_op
;
240 /* VEX prefix is either 2 byte or 3 byte. EVEX is 4 byte. */
241 unsigned char bytes
[4];
243 /* Destination or source register specifier. */
244 const reg_entry
*register_specifier
;
247 /* 'md_assemble ()' gathers together information and puts it into a
254 const reg_entry
*regs
;
259 operand_size_mismatch
,
260 operand_type_mismatch
,
261 register_type_mismatch
,
262 number_of_operands_mismatch
,
263 invalid_instruction_suffix
,
266 unsupported_with_intel_mnemonic
,
269 invalid_vsib_address
,
270 invalid_vector_register_set
,
271 unsupported_vector_index_register
,
272 unsupported_broadcast
,
273 broadcast_not_on_src_operand
,
276 mask_not_on_destination
,
279 rc_sae_operand_not_last_imm
,
280 invalid_register_operand
,
286 /* TM holds the template for the insn were currently assembling. */
289 /* SUFFIX holds the instruction size suffix for byte, word, dword
290 or qword, if given. */
293 /* OPERANDS gives the number of given operands. */
294 unsigned int operands
;
296 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
297 of given register, displacement, memory operands and immediate
299 unsigned int reg_operands
, disp_operands
, mem_operands
, imm_operands
;
301 /* TYPES [i] is the type (see above #defines) which tells us how to
302 use OP[i] for the corresponding operand. */
303 i386_operand_type types
[MAX_OPERANDS
];
305 /* Displacement expression, immediate expression, or register for each
307 union i386_op op
[MAX_OPERANDS
];
309 /* Flags for operands. */
310 unsigned int flags
[MAX_OPERANDS
];
311 #define Operand_PCrel 1
313 /* Relocation type for operand */
314 enum bfd_reloc_code_real reloc
[MAX_OPERANDS
];
316 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
317 the base index byte below. */
318 const reg_entry
*base_reg
;
319 const reg_entry
*index_reg
;
320 unsigned int log2_scale_factor
;
322 /* SEG gives the seg_entries of this insn. They are zero unless
323 explicit segment overrides are given. */
324 const seg_entry
*seg
[2];
326 /* PREFIX holds all the given prefix opcodes (usually null).
327 PREFIXES is the number of prefix opcodes. */
328 unsigned int prefixes
;
329 unsigned char prefix
[MAX_PREFIXES
];
331 /* RM and SIB are the modrm byte and the sib byte where the
332 addressing modes of this insn are encoded. */
339 /* Masking attributes. */
340 struct Mask_Operation
*mask
;
342 /* Rounding control and SAE attributes. */
343 struct RC_Operation
*rounding
;
345 /* Broadcasting attributes. */
346 struct Broadcast_Operation
*broadcast
;
348 /* Compressed disp8*N attribute. */
349 unsigned int memshift
;
351 /* Swap operand in encoding. */
352 unsigned int swap_operand
;
354 /* Prefer 8bit or 32bit displacement in encoding. */
357 disp_encoding_default
= 0,
363 const char *rep_prefix
;
366 const char *hle_prefix
;
368 /* Have BND prefix. */
369 const char *bnd_prefix
;
371 /* Need VREX to support upper 16 registers. */
375 enum i386_error error
;
378 typedef struct _i386_insn i386_insn
;
380 /* Link RC type with corresponding string, that'll be looked for in
389 static const struct RC_name RC_NamesTable
[] =
391 { rne
, STRING_COMMA_LEN ("rn-sae") },
392 { rd
, STRING_COMMA_LEN ("rd-sae") },
393 { ru
, STRING_COMMA_LEN ("ru-sae") },
394 { rz
, STRING_COMMA_LEN ("rz-sae") },
395 { saeonly
, STRING_COMMA_LEN ("sae") },
398 /* List of chars besides those in app.c:symbol_chars that can start an
399 operand. Used to prevent the scrubber eating vital white-space. */
400 const char extra_symbol_chars
[] = "*%-([{"
409 #if (defined (TE_I386AIX) \
410 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
411 && !defined (TE_GNU) \
412 && !defined (TE_LINUX) \
413 && !defined (TE_NACL) \
414 && !defined (TE_NETWARE) \
415 && !defined (TE_FreeBSD) \
416 && !defined (TE_DragonFly) \
417 && !defined (TE_NetBSD)))
418 /* This array holds the chars that always start a comment. If the
419 pre-processor is disabled, these aren't very useful. The option
420 --divide will remove '/' from this list. */
421 const char *i386_comment_chars
= "#/";
422 #define SVR4_COMMENT_CHARS 1
423 #define PREFIX_SEPARATOR '\\'
426 const char *i386_comment_chars
= "#";
427 #define PREFIX_SEPARATOR '/'
430 /* This array holds the chars that only start a comment at the beginning of
431 a line. If the line seems to have the form '# 123 filename'
432 .line and .file directives will appear in the pre-processed output.
433 Note that input_file.c hand checks for '#' at the beginning of the
434 first line of the input file. This is because the compiler outputs
435 #NO_APP at the beginning of its output.
436 Also note that comments started like this one will always work if
437 '/' isn't otherwise defined. */
438 const char line_comment_chars
[] = "#/";
440 const char line_separator_chars
[] = ";";
442 /* Chars that can be used to separate mant from exp in floating point
444 const char EXP_CHARS
[] = "eE";
446 /* Chars that mean this number is a floating point constant
449 const char FLT_CHARS
[] = "fFdDxX";
451 /* Tables for lexical analysis. */
452 static char mnemonic_chars
[256];
453 static char register_chars
[256];
454 static char operand_chars
[256];
455 static char identifier_chars
[256];
456 static char digit_chars
[256];
458 /* Lexical macros. */
459 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
460 #define is_operand_char(x) (operand_chars[(unsigned char) x])
461 #define is_register_char(x) (register_chars[(unsigned char) x])
462 #define is_space_char(x) ((x) == ' ')
463 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
464 #define is_digit_char(x) (digit_chars[(unsigned char) x])
466 /* All non-digit non-letter characters that may occur in an operand. */
467 static char operand_special_chars
[] = "%$-+(,)*._~/<>|&^!:[@]";
469 /* md_assemble() always leaves the strings it's passed unaltered. To
470 effect this we maintain a stack of saved characters that we've smashed
471 with '\0's (indicating end of strings for various sub-fields of the
472 assembler instruction). */
473 static char save_stack
[32];
474 static char *save_stack_p
;
475 #define END_STRING_AND_SAVE(s) \
476 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
477 #define RESTORE_END_STRING(s) \
478 do { *(s) = *--save_stack_p; } while (0)
480 /* The instruction we're assembling. */
483 /* Possible templates for current insn. */
484 static const templates
*current_templates
;
486 /* Per instruction expressionS buffers: max displacements & immediates. */
487 static expressionS disp_expressions
[MAX_MEMORY_OPERANDS
];
488 static expressionS im_expressions
[MAX_IMMEDIATE_OPERANDS
];
490 /* Current operand we are working on. */
491 static int this_operand
= -1;
493 /* We support four different modes. FLAG_CODE variable is used to distinguish
501 static enum flag_code flag_code
;
502 static unsigned int object_64bit
;
503 static unsigned int disallow_64bit_reloc
;
504 static int use_rela_relocations
= 0;
506 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
507 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
508 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
510 /* The ELF ABI to use. */
518 static enum x86_elf_abi x86_elf_abi
= I386_ABI
;
521 #if defined (TE_PE) || defined (TE_PEP)
522 /* Use big object file format. */
523 static int use_big_obj
= 0;
526 /* 1 for intel syntax,
528 static int intel_syntax
= 0;
530 /* 1 for intel mnemonic,
531 0 if att mnemonic. */
532 static int intel_mnemonic
= !SYSV386_COMPAT
;
534 /* 1 if support old (<= 2.8.1) versions of gcc. */
535 static int old_gcc
= OLDGCC_COMPAT
;
537 /* 1 if pseudo registers are permitted. */
538 static int allow_pseudo_reg
= 0;
540 /* 1 if register prefix % not required. */
541 static int allow_naked_reg
= 0;
543 /* 1 if the assembler should add BND prefix for all control-tranferring
544 instructions supporting it, even if this prefix wasn't specified
546 static int add_bnd_prefix
= 0;
548 /* 1 if pseudo index register, eiz/riz, is allowed . */
549 static int allow_index_reg
= 0;
551 /* 1 if the assembler should ignore LOCK prefix, even if it was
552 specified explicitly. */
553 static int omit_lock_prefix
= 0;
555 static enum check_kind
561 sse_check
, operand_check
= check_warning
;
563 /* Register prefix used for error message. */
564 static const char *register_prefix
= "%";
566 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
567 leave, push, and pop instructions so that gcc has the same stack
568 frame as in 32 bit mode. */
569 static char stackop_size
= '\0';
571 /* Non-zero to optimize code alignment. */
572 int optimize_align_code
= 1;
574 /* Non-zero to quieten some warnings. */
575 static int quiet_warnings
= 0;
578 static const char *cpu_arch_name
= NULL
;
579 static char *cpu_sub_arch_name
= NULL
;
581 /* CPU feature flags. */
582 static i386_cpu_flags cpu_arch_flags
= CPU_UNKNOWN_FLAGS
;
584 /* If we have selected a cpu we are generating instructions for. */
585 static int cpu_arch_tune_set
= 0;
587 /* Cpu we are generating instructions for. */
588 enum processor_type cpu_arch_tune
= PROCESSOR_UNKNOWN
;
590 /* CPU feature flags of cpu we are generating instructions for. */
591 static i386_cpu_flags cpu_arch_tune_flags
;
593 /* CPU instruction set architecture used. */
594 enum processor_type cpu_arch_isa
= PROCESSOR_UNKNOWN
;
596 /* CPU feature flags of instruction set architecture used. */
597 i386_cpu_flags cpu_arch_isa_flags
;
599 /* If set, conditional jumps are not automatically promoted to handle
600 larger than a byte offset. */
601 static unsigned int no_cond_jump_promotion
= 0;
603 /* Encode SSE instructions with VEX prefix. */
604 static unsigned int sse2avx
;
606 /* Encode scalar AVX instructions with specific vector length. */
613 /* Encode scalar EVEX LIG instructions with specific vector length. */
621 /* Encode EVEX WIG instructions with specific evex.w. */
628 /* Value to encode in EVEX RC bits, for SAE-only instructions. */
629 static enum rc_type evexrcig
= rne
;
631 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
632 static symbolS
*GOT_symbol
;
634 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
635 unsigned int x86_dwarf2_return_column
;
637 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
638 int x86_cie_data_alignment
;
640 /* Interface to relax_segment.
641 There are 3 major relax states for 386 jump insns because the
642 different types of jumps add different sizes to frags when we're
643 figuring out what sort of jump to choose to reach a given label. */
646 #define UNCOND_JUMP 0
648 #define COND_JUMP86 2
653 #define SMALL16 (SMALL | CODE16)
655 #define BIG16 (BIG | CODE16)
659 #define INLINE __inline__
665 #define ENCODE_RELAX_STATE(type, size) \
666 ((relax_substateT) (((type) << 2) | (size)))
667 #define TYPE_FROM_RELAX_STATE(s) \
669 #define DISP_SIZE_FROM_RELAX_STATE(s) \
670 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
672 /* This table is used by relax_frag to promote short jumps to long
673 ones where necessary. SMALL (short) jumps may be promoted to BIG
674 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
675 don't allow a short jump in a 32 bit code segment to be promoted to
676 a 16 bit offset jump because it's slower (requires data size
677 prefix), and doesn't work, unless the destination is in the bottom
678 64k of the code segment (The top 16 bits of eip are zeroed). */
680 const relax_typeS md_relax_table
[] =
683 1) most positive reach of this state,
684 2) most negative reach of this state,
685 3) how many bytes this mode will have in the variable part of the frag
686 4) which index into the table to try if we can't fit into this one. */
688 /* UNCOND_JUMP states. */
689 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
)},
690 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
)},
691 /* dword jmp adds 4 bytes to frag:
692 0 extra opcode bytes, 4 displacement bytes. */
694 /* word jmp adds 2 byte2 to frag:
695 0 extra opcode bytes, 2 displacement bytes. */
698 /* COND_JUMP states. */
699 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG
)},
700 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG16
)},
701 /* dword conditionals adds 5 bytes to frag:
702 1 extra opcode byte, 4 displacement bytes. */
704 /* word conditionals add 3 bytes to frag:
705 1 extra opcode byte, 2 displacement bytes. */
708 /* COND_JUMP86 states. */
709 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG
)},
710 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
)},
711 /* dword conditionals adds 5 bytes to frag:
712 1 extra opcode byte, 4 displacement bytes. */
714 /* word conditionals add 4 bytes to frag:
715 1 displacement byte and a 3 byte long branch insn. */
719 static const arch_entry cpu_arch
[] =
721 /* Do not replace the first two entries - i386_target_format()
722 relies on them being there in this order. */
723 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32
,
724 CPU_GENERIC32_FLAGS
, 0, 0 },
725 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64
,
726 CPU_GENERIC64_FLAGS
, 0, 0 },
727 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN
,
728 CPU_NONE_FLAGS
, 0, 0 },
729 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN
,
730 CPU_I186_FLAGS
, 0, 0 },
731 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN
,
732 CPU_I286_FLAGS
, 0, 0 },
733 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386
,
734 CPU_I386_FLAGS
, 0, 0 },
735 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486
,
736 CPU_I486_FLAGS
, 0, 0 },
737 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM
,
738 CPU_I586_FLAGS
, 0, 0 },
739 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO
,
740 CPU_I686_FLAGS
, 0, 0 },
741 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM
,
742 CPU_I586_FLAGS
, 0, 0 },
743 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO
,
744 CPU_PENTIUMPRO_FLAGS
, 0, 0 },
745 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO
,
746 CPU_P2_FLAGS
, 0, 0 },
747 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO
,
748 CPU_P3_FLAGS
, 0, 0 },
749 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4
,
750 CPU_P4_FLAGS
, 0, 0 },
751 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA
,
752 CPU_CORE_FLAGS
, 0, 0 },
753 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA
,
754 CPU_NOCONA_FLAGS
, 0, 0 },
755 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE
,
756 CPU_CORE_FLAGS
, 1, 0 },
757 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE
,
758 CPU_CORE_FLAGS
, 0, 0 },
759 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2
,
760 CPU_CORE2_FLAGS
, 1, 0 },
761 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2
,
762 CPU_CORE2_FLAGS
, 0, 0 },
763 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7
,
764 CPU_COREI7_FLAGS
, 0, 0 },
765 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM
,
766 CPU_L1OM_FLAGS
, 0, 0 },
767 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM
,
768 CPU_K1OM_FLAGS
, 0, 0 },
769 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6
,
770 CPU_K6_FLAGS
, 0, 0 },
771 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6
,
772 CPU_K6_2_FLAGS
, 0, 0 },
773 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON
,
774 CPU_ATHLON_FLAGS
, 0, 0 },
775 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8
,
776 CPU_K8_FLAGS
, 1, 0 },
777 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8
,
778 CPU_K8_FLAGS
, 0, 0 },
779 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8
,
780 CPU_K8_FLAGS
, 0, 0 },
781 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10
,
782 CPU_AMDFAM10_FLAGS
, 0, 0 },
783 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD
,
784 CPU_BDVER1_FLAGS
, 0, 0 },
785 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD
,
786 CPU_BDVER2_FLAGS
, 0, 0 },
787 { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD
,
788 CPU_BDVER3_FLAGS
, 0, 0 },
789 { STRING_COMMA_LEN ("bdver4"), PROCESSOR_BD
,
790 CPU_BDVER4_FLAGS
, 0, 0 },
791 { STRING_COMMA_LEN ("znver1"), PROCESSOR_ZNVER
,
792 CPU_ZNVER1_FLAGS
, 0, 0 },
793 { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT
,
794 CPU_BTVER1_FLAGS
, 0, 0 },
795 { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT
,
796 CPU_BTVER2_FLAGS
, 0, 0 },
797 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN
,
798 CPU_8087_FLAGS
, 0, 0 },
799 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN
,
800 CPU_287_FLAGS
, 0, 0 },
801 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN
,
802 CPU_387_FLAGS
, 0, 0 },
803 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN
,
804 CPU_ANY87_FLAGS
, 0, 1 },
805 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN
,
806 CPU_MMX_FLAGS
, 0, 0 },
807 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN
,
808 CPU_3DNOWA_FLAGS
, 0, 1 },
809 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN
,
810 CPU_SSE_FLAGS
, 0, 0 },
811 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN
,
812 CPU_SSE2_FLAGS
, 0, 0 },
813 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN
,
814 CPU_SSE3_FLAGS
, 0, 0 },
815 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN
,
816 CPU_SSSE3_FLAGS
, 0, 0 },
817 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN
,
818 CPU_SSE4_1_FLAGS
, 0, 0 },
819 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN
,
820 CPU_SSE4_2_FLAGS
, 0, 0 },
821 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN
,
822 CPU_SSE4_2_FLAGS
, 0, 0 },
823 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN
,
824 CPU_ANY_SSE_FLAGS
, 0, 1 },
825 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN
,
826 CPU_AVX_FLAGS
, 0, 0 },
827 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN
,
828 CPU_AVX2_FLAGS
, 0, 0 },
829 { STRING_COMMA_LEN (".avx512f"), PROCESSOR_UNKNOWN
,
830 CPU_AVX512F_FLAGS
, 0, 0 },
831 { STRING_COMMA_LEN (".avx512cd"), PROCESSOR_UNKNOWN
,
832 CPU_AVX512CD_FLAGS
, 0, 0 },
833 { STRING_COMMA_LEN (".avx512er"), PROCESSOR_UNKNOWN
,
834 CPU_AVX512ER_FLAGS
, 0, 0 },
835 { STRING_COMMA_LEN (".avx512pf"), PROCESSOR_UNKNOWN
,
836 CPU_AVX512PF_FLAGS
, 0, 0 },
837 { STRING_COMMA_LEN (".avx512dq"), PROCESSOR_UNKNOWN
,
838 CPU_AVX512DQ_FLAGS
, 0, 0 },
839 { STRING_COMMA_LEN (".avx512bw"), PROCESSOR_UNKNOWN
,
840 CPU_AVX512BW_FLAGS
, 0, 0 },
841 { STRING_COMMA_LEN (".avx512vl"), PROCESSOR_UNKNOWN
,
842 CPU_AVX512VL_FLAGS
, 0, 0 },
843 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN
,
844 CPU_ANY_AVX_FLAGS
, 0, 1 },
845 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN
,
846 CPU_VMX_FLAGS
, 0, 0 },
847 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN
,
848 CPU_VMFUNC_FLAGS
, 0, 0 },
849 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN
,
850 CPU_SMX_FLAGS
, 0, 0 },
851 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN
,
852 CPU_XSAVE_FLAGS
, 0, 0 },
853 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN
,
854 CPU_XSAVEOPT_FLAGS
, 0, 0 },
855 { STRING_COMMA_LEN (".xsavec"), PROCESSOR_UNKNOWN
,
856 CPU_XSAVEC_FLAGS
, 0, 0 },
857 { STRING_COMMA_LEN (".xsaves"), PROCESSOR_UNKNOWN
,
858 CPU_XSAVES_FLAGS
, 0, 0 },
859 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN
,
860 CPU_AES_FLAGS
, 0, 0 },
861 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN
,
862 CPU_PCLMUL_FLAGS
, 0, 0 },
863 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN
,
864 CPU_PCLMUL_FLAGS
, 1, 0 },
865 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN
,
866 CPU_FSGSBASE_FLAGS
, 0, 0 },
867 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN
,
868 CPU_RDRND_FLAGS
, 0, 0 },
869 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN
,
870 CPU_F16C_FLAGS
, 0, 0 },
871 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN
,
872 CPU_BMI2_FLAGS
, 0, 0 },
873 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN
,
874 CPU_FMA_FLAGS
, 0, 0 },
875 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN
,
876 CPU_FMA4_FLAGS
, 0, 0 },
877 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN
,
878 CPU_XOP_FLAGS
, 0, 0 },
879 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN
,
880 CPU_LWP_FLAGS
, 0, 0 },
881 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN
,
882 CPU_MOVBE_FLAGS
, 0, 0 },
883 { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN
,
884 CPU_CX16_FLAGS
, 0, 0 },
885 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN
,
886 CPU_EPT_FLAGS
, 0, 0 },
887 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN
,
888 CPU_LZCNT_FLAGS
, 0, 0 },
889 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN
,
890 CPU_HLE_FLAGS
, 0, 0 },
891 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN
,
892 CPU_RTM_FLAGS
, 0, 0 },
893 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN
,
894 CPU_INVPCID_FLAGS
, 0, 0 },
895 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN
,
896 CPU_CLFLUSH_FLAGS
, 0, 0 },
897 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN
,
898 CPU_NOP_FLAGS
, 0, 0 },
899 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN
,
900 CPU_SYSCALL_FLAGS
, 0, 0 },
901 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN
,
902 CPU_RDTSCP_FLAGS
, 0, 0 },
903 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN
,
904 CPU_3DNOW_FLAGS
, 0, 0 },
905 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN
,
906 CPU_3DNOWA_FLAGS
, 0, 0 },
907 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN
,
908 CPU_PADLOCK_FLAGS
, 0, 0 },
909 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN
,
910 CPU_SVME_FLAGS
, 1, 0 },
911 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN
,
912 CPU_SVME_FLAGS
, 0, 0 },
913 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN
,
914 CPU_SSE4A_FLAGS
, 0, 0 },
915 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN
,
916 CPU_ABM_FLAGS
, 0, 0 },
917 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN
,
918 CPU_BMI_FLAGS
, 0, 0 },
919 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN
,
920 CPU_TBM_FLAGS
, 0, 0 },
921 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN
,
922 CPU_ADX_FLAGS
, 0, 0 },
923 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN
,
924 CPU_RDSEED_FLAGS
, 0, 0 },
925 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN
,
926 CPU_PRFCHW_FLAGS
, 0, 0 },
927 { STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN
,
928 CPU_SMAP_FLAGS
, 0, 0 },
929 { STRING_COMMA_LEN (".mpx"), PROCESSOR_UNKNOWN
,
930 CPU_MPX_FLAGS
, 0, 0 },
931 { STRING_COMMA_LEN (".sha"), PROCESSOR_UNKNOWN
,
932 CPU_SHA_FLAGS
, 0, 0 },
933 { STRING_COMMA_LEN (".clflushopt"), PROCESSOR_UNKNOWN
,
934 CPU_CLFLUSHOPT_FLAGS
, 0, 0 },
935 { STRING_COMMA_LEN (".prefetchwt1"), PROCESSOR_UNKNOWN
,
936 CPU_PREFETCHWT1_FLAGS
, 0, 0 },
937 { STRING_COMMA_LEN (".se1"), PROCESSOR_UNKNOWN
,
938 CPU_SE1_FLAGS
, 0, 0 },
939 { STRING_COMMA_LEN (".clwb"), PROCESSOR_UNKNOWN
,
940 CPU_CLWB_FLAGS
, 0, 0 },
941 { STRING_COMMA_LEN (".pcommit"), PROCESSOR_UNKNOWN
,
942 CPU_PCOMMIT_FLAGS
, 0, 0 },
943 { STRING_COMMA_LEN (".avx512ifma"), PROCESSOR_UNKNOWN
,
944 CPU_AVX512IFMA_FLAGS
, 0, 0 },
945 { STRING_COMMA_LEN (".avx512vbmi"), PROCESSOR_UNKNOWN
,
946 CPU_AVX512VBMI_FLAGS
, 0, 0 },
947 { STRING_COMMA_LEN (".clzero"), PROCESSOR_UNKNOWN
,
948 CPU_CLZERO_FLAGS
, 0, 0 },
952 /* Like s_lcomm_internal in gas/read.c but the alignment string
953 is allowed to be optional. */
956 pe_lcomm_internal (int needs_align
, symbolS
*symbolP
, addressT size
)
963 && *input_line_pointer
== ',')
965 align
= parse_align (needs_align
- 1);
967 if (align
== (addressT
) -1)
982 bss_alloc (symbolP
, size
, align
);
987 pe_lcomm (int needs_align
)
989 s_comm_internal (needs_align
* 2, pe_lcomm_internal
);
993 const pseudo_typeS md_pseudo_table
[] =
995 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
996 {"align", s_align_bytes
, 0},
998 {"align", s_align_ptwo
, 0},
1000 {"arch", set_cpu_arch
, 0},
1004 {"lcomm", pe_lcomm
, 1},
1006 {"ffloat", float_cons
, 'f'},
1007 {"dfloat", float_cons
, 'd'},
1008 {"tfloat", float_cons
, 'x'},
1010 {"slong", signed_cons
, 4},
1011 {"noopt", s_ignore
, 0},
1012 {"optim", s_ignore
, 0},
1013 {"code16gcc", set_16bit_gcc_code_flag
, CODE_16BIT
},
1014 {"code16", set_code_flag
, CODE_16BIT
},
1015 {"code32", set_code_flag
, CODE_32BIT
},
1016 {"code64", set_code_flag
, CODE_64BIT
},
1017 {"intel_syntax", set_intel_syntax
, 1},
1018 {"att_syntax", set_intel_syntax
, 0},
1019 {"intel_mnemonic", set_intel_mnemonic
, 1},
1020 {"att_mnemonic", set_intel_mnemonic
, 0},
1021 {"allow_index_reg", set_allow_index_reg
, 1},
1022 {"disallow_index_reg", set_allow_index_reg
, 0},
1023 {"sse_check", set_check
, 0},
1024 {"operand_check", set_check
, 1},
1025 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
1026 {"largecomm", handle_large_common
, 0},
1028 {"file", (void (*) (int)) dwarf2_directive_file
, 0},
1029 {"loc", dwarf2_directive_loc
, 0},
1030 {"loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0},
1033 {"secrel32", pe_directive_secrel
, 0},
1038 /* For interface with expression (). */
1039 extern char *input_line_pointer
;
1041 /* Hash table for instruction mnemonic lookup. */
1042 static struct hash_control
*op_hash
;
1044 /* Hash table for register lookup. */
1045 static struct hash_control
*reg_hash
;
1048 i386_align_code (fragS
*fragP
, int count
)
1050 /* Various efficient no-op patterns for aligning code labels.
1051 Note: Don't try to assemble the instructions in the comments.
1052 0L and 0w are not legal. */
1053 static const char f32_1
[] =
1055 static const char f32_2
[] =
1056 {0x66,0x90}; /* xchg %ax,%ax */
1057 static const char f32_3
[] =
1058 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
1059 static const char f32_4
[] =
1060 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1061 static const char f32_5
[] =
1063 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1064 static const char f32_6
[] =
1065 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
1066 static const char f32_7
[] =
1067 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1068 static const char f32_8
[] =
1070 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1071 static const char f32_9
[] =
1072 {0x89,0xf6, /* movl %esi,%esi */
1073 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1074 static const char f32_10
[] =
1075 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
1076 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1077 static const char f32_11
[] =
1078 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
1079 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1080 static const char f32_12
[] =
1081 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
1082 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
1083 static const char f32_13
[] =
1084 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
1085 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1086 static const char f32_14
[] =
1087 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
1088 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1089 static const char f16_3
[] =
1090 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
1091 static const char f16_4
[] =
1092 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
1093 static const char f16_5
[] =
1095 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
1096 static const char f16_6
[] =
1097 {0x89,0xf6, /* mov %si,%si */
1098 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
1099 static const char f16_7
[] =
1100 {0x8d,0x74,0x00, /* lea 0(%si),%si */
1101 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
1102 static const char f16_8
[] =
1103 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
1104 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
1105 static const char jump_31
[] =
1106 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
1107 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
1108 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
1109 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
1110 static const char *const f32_patt
[] = {
1111 f32_1
, f32_2
, f32_3
, f32_4
, f32_5
, f32_6
, f32_7
, f32_8
,
1112 f32_9
, f32_10
, f32_11
, f32_12
, f32_13
, f32_14
1114 static const char *const f16_patt
[] = {
1115 f32_1
, f32_2
, f16_3
, f16_4
, f16_5
, f16_6
, f16_7
, f16_8
1117 /* nopl (%[re]ax) */
1118 static const char alt_3
[] =
1120 /* nopl 0(%[re]ax) */
1121 static const char alt_4
[] =
1122 {0x0f,0x1f,0x40,0x00};
1123 /* nopl 0(%[re]ax,%[re]ax,1) */
1124 static const char alt_5
[] =
1125 {0x0f,0x1f,0x44,0x00,0x00};
1126 /* nopw 0(%[re]ax,%[re]ax,1) */
1127 static const char alt_6
[] =
1128 {0x66,0x0f,0x1f,0x44,0x00,0x00};
1129 /* nopl 0L(%[re]ax) */
1130 static const char alt_7
[] =
1131 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1132 /* nopl 0L(%[re]ax,%[re]ax,1) */
1133 static const char alt_8
[] =
1134 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1135 /* nopw 0L(%[re]ax,%[re]ax,1) */
1136 static const char alt_9
[] =
1137 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1138 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
1139 static const char alt_10
[] =
1140 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1141 static const char *const alt_patt
[] = {
1142 f32_1
, f32_2
, alt_3
, alt_4
, alt_5
, alt_6
, alt_7
, alt_8
,
1146 /* Only align for at least a positive non-zero boundary. */
1147 if (count
<= 0 || count
> MAX_MEM_FOR_RS_ALIGN_CODE
)
1150 /* We need to decide which NOP sequence to use for 32bit and
1151 64bit. When -mtune= is used:
1153 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1154 PROCESSOR_GENERIC32, f32_patt will be used.
1155 2. For the rest, alt_patt will be used.
1157 When -mtune= isn't used, alt_patt will be used if
1158 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1161 When -march= or .arch is used, we can't use anything beyond
1162 cpu_arch_isa_flags. */
1164 if (flag_code
== CODE_16BIT
)
1168 memcpy (fragP
->fr_literal
+ fragP
->fr_fix
,
1170 /* Adjust jump offset. */
1171 fragP
->fr_literal
[fragP
->fr_fix
+ 1] = count
- 2;
1174 memcpy (fragP
->fr_literal
+ fragP
->fr_fix
,
1175 f16_patt
[count
- 1], count
);
1179 const char *const *patt
= NULL
;
1181 if (fragP
->tc_frag_data
.isa
== PROCESSOR_UNKNOWN
)
1183 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1184 switch (cpu_arch_tune
)
1186 case PROCESSOR_UNKNOWN
:
1187 /* We use cpu_arch_isa_flags to check if we SHOULD
1188 optimize with nops. */
1189 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1194 case PROCESSOR_PENTIUM4
:
1195 case PROCESSOR_NOCONA
:
1196 case PROCESSOR_CORE
:
1197 case PROCESSOR_CORE2
:
1198 case PROCESSOR_COREI7
:
1199 case PROCESSOR_L1OM
:
1200 case PROCESSOR_K1OM
:
1201 case PROCESSOR_GENERIC64
:
1203 case PROCESSOR_ATHLON
:
1205 case PROCESSOR_AMDFAM10
:
1207 case PROCESSOR_ZNVER
:
1211 case PROCESSOR_I386
:
1212 case PROCESSOR_I486
:
1213 case PROCESSOR_PENTIUM
:
1214 case PROCESSOR_PENTIUMPRO
:
1215 case PROCESSOR_GENERIC32
:
1222 switch (fragP
->tc_frag_data
.tune
)
1224 case PROCESSOR_UNKNOWN
:
1225 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1226 PROCESSOR_UNKNOWN. */
1230 case PROCESSOR_I386
:
1231 case PROCESSOR_I486
:
1232 case PROCESSOR_PENTIUM
:
1234 case PROCESSOR_ATHLON
:
1236 case PROCESSOR_AMDFAM10
:
1238 case PROCESSOR_ZNVER
:
1240 case PROCESSOR_GENERIC32
:
1241 /* We use cpu_arch_isa_flags to check if we CAN optimize
1243 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1248 case PROCESSOR_PENTIUMPRO
:
1249 case PROCESSOR_PENTIUM4
:
1250 case PROCESSOR_NOCONA
:
1251 case PROCESSOR_CORE
:
1252 case PROCESSOR_CORE2
:
1253 case PROCESSOR_COREI7
:
1254 case PROCESSOR_L1OM
:
1255 case PROCESSOR_K1OM
:
1256 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1261 case PROCESSOR_GENERIC64
:
1267 if (patt
== f32_patt
)
1269 /* If the padding is less than 15 bytes, we use the normal
1270 ones. Otherwise, we use a jump instruction and adjust
1274 /* For 64bit, the limit is 3 bytes. */
1275 if (flag_code
== CODE_64BIT
1276 && fragP
->tc_frag_data
.isa_flags
.bitfield
.cpulm
)
1281 memcpy (fragP
->fr_literal
+ fragP
->fr_fix
,
1282 patt
[count
- 1], count
);
1285 memcpy (fragP
->fr_literal
+ fragP
->fr_fix
,
1287 /* Adjust jump offset. */
1288 fragP
->fr_literal
[fragP
->fr_fix
+ 1] = count
- 2;
1293 /* Maximum length of an instruction is 10 byte. If the
1294 padding is greater than 10 bytes and we don't use jump,
1295 we have to break it into smaller pieces. */
1296 int padding
= count
;
1297 while (padding
> 10)
1300 memcpy (fragP
->fr_literal
+ fragP
->fr_fix
+ padding
,
1305 memcpy (fragP
->fr_literal
+ fragP
->fr_fix
,
1306 patt
[padding
- 1], padding
);
1309 fragP
->fr_var
= count
;
1313 operand_type_all_zero (const union i386_operand_type
*x
)
1315 switch (ARRAY_SIZE(x
->array
))
1324 return !x
->array
[0];
1331 operand_type_set (union i386_operand_type
*x
, unsigned int v
)
1333 switch (ARRAY_SIZE(x
->array
))
1348 operand_type_equal (const union i386_operand_type
*x
,
1349 const union i386_operand_type
*y
)
1351 switch (ARRAY_SIZE(x
->array
))
1354 if (x
->array
[2] != y
->array
[2])
1357 if (x
->array
[1] != y
->array
[1])
1360 return x
->array
[0] == y
->array
[0];
1368 cpu_flags_all_zero (const union i386_cpu_flags
*x
)
1370 switch (ARRAY_SIZE(x
->array
))
1379 return !x
->array
[0];
1386 cpu_flags_equal (const union i386_cpu_flags
*x
,
1387 const union i386_cpu_flags
*y
)
1389 switch (ARRAY_SIZE(x
->array
))
1392 if (x
->array
[2] != y
->array
[2])
1395 if (x
->array
[1] != y
->array
[1])
1398 return x
->array
[0] == y
->array
[0];
1406 cpu_flags_check_cpu64 (i386_cpu_flags f
)
1408 return !((flag_code
== CODE_64BIT
&& f
.bitfield
.cpuno64
)
1409 || (flag_code
!= CODE_64BIT
&& f
.bitfield
.cpu64
));
1412 static INLINE i386_cpu_flags
1413 cpu_flags_and (i386_cpu_flags x
, i386_cpu_flags y
)
1415 switch (ARRAY_SIZE (x
.array
))
1418 x
.array
[2] &= y
.array
[2];
1420 x
.array
[1] &= y
.array
[1];
1422 x
.array
[0] &= y
.array
[0];
1430 static INLINE i386_cpu_flags
1431 cpu_flags_or (i386_cpu_flags x
, i386_cpu_flags y
)
1433 switch (ARRAY_SIZE (x
.array
))
1436 x
.array
[2] |= y
.array
[2];
1438 x
.array
[1] |= y
.array
[1];
1440 x
.array
[0] |= y
.array
[0];
1448 static INLINE i386_cpu_flags
1449 cpu_flags_and_not (i386_cpu_flags x
, i386_cpu_flags y
)
1451 switch (ARRAY_SIZE (x
.array
))
1454 x
.array
[2] &= ~y
.array
[2];
1456 x
.array
[1] &= ~y
.array
[1];
1458 x
.array
[0] &= ~y
.array
[0];
1466 #define CPU_FLAGS_ARCH_MATCH 0x1
1467 #define CPU_FLAGS_64BIT_MATCH 0x2
1468 #define CPU_FLAGS_AES_MATCH 0x4
1469 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1470 #define CPU_FLAGS_AVX_MATCH 0x10
1472 #define CPU_FLAGS_32BIT_MATCH \
1473 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1474 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1475 #define CPU_FLAGS_PERFECT_MATCH \
1476 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1478 /* Return CPU flags match bits. */
1481 cpu_flags_match (const insn_template
*t
)
1483 i386_cpu_flags x
= t
->cpu_flags
;
1484 int match
= cpu_flags_check_cpu64 (x
) ? CPU_FLAGS_64BIT_MATCH
: 0;
1486 x
.bitfield
.cpu64
= 0;
1487 x
.bitfield
.cpuno64
= 0;
1489 if (cpu_flags_all_zero (&x
))
1491 /* This instruction is available on all archs. */
1492 match
|= CPU_FLAGS_32BIT_MATCH
;
1496 /* This instruction is available only on some archs. */
1497 i386_cpu_flags cpu
= cpu_arch_flags
;
1499 cpu
.bitfield
.cpu64
= 0;
1500 cpu
.bitfield
.cpuno64
= 0;
1501 cpu
= cpu_flags_and (x
, cpu
);
1502 if (!cpu_flags_all_zero (&cpu
))
1504 if (x
.bitfield
.cpuavx
)
1506 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1507 if (cpu
.bitfield
.cpuavx
)
1509 /* Check SSE2AVX. */
1510 if (!t
->opcode_modifier
.sse2avx
|| sse2avx
)
1512 match
|= (CPU_FLAGS_ARCH_MATCH
1513 | CPU_FLAGS_AVX_MATCH
);
1515 if (!x
.bitfield
.cpuaes
|| cpu
.bitfield
.cpuaes
)
1516 match
|= CPU_FLAGS_AES_MATCH
;
1518 if (!x
.bitfield
.cpupclmul
1519 || cpu
.bitfield
.cpupclmul
)
1520 match
|= CPU_FLAGS_PCLMUL_MATCH
;
1524 match
|= CPU_FLAGS_ARCH_MATCH
;
1527 match
|= CPU_FLAGS_32BIT_MATCH
;
1533 static INLINE i386_operand_type
1534 operand_type_and (i386_operand_type x
, i386_operand_type y
)
1536 switch (ARRAY_SIZE (x
.array
))
1539 x
.array
[2] &= y
.array
[2];
1541 x
.array
[1] &= y
.array
[1];
1543 x
.array
[0] &= y
.array
[0];
1551 static INLINE i386_operand_type
1552 operand_type_or (i386_operand_type x
, i386_operand_type y
)
1554 switch (ARRAY_SIZE (x
.array
))
1557 x
.array
[2] |= y
.array
[2];
1559 x
.array
[1] |= y
.array
[1];
1561 x
.array
[0] |= y
.array
[0];
1569 static INLINE i386_operand_type
1570 operand_type_xor (i386_operand_type x
, i386_operand_type y
)
1572 switch (ARRAY_SIZE (x
.array
))
1575 x
.array
[2] ^= y
.array
[2];
1577 x
.array
[1] ^= y
.array
[1];
1579 x
.array
[0] ^= y
.array
[0];
1587 static const i386_operand_type acc32
= OPERAND_TYPE_ACC32
;
1588 static const i386_operand_type acc64
= OPERAND_TYPE_ACC64
;
1589 static const i386_operand_type control
= OPERAND_TYPE_CONTROL
;
1590 static const i386_operand_type inoutportreg
1591 = OPERAND_TYPE_INOUTPORTREG
;
1592 static const i386_operand_type reg16_inoutportreg
1593 = OPERAND_TYPE_REG16_INOUTPORTREG
;
1594 static const i386_operand_type disp16
= OPERAND_TYPE_DISP16
;
1595 static const i386_operand_type disp32
= OPERAND_TYPE_DISP32
;
1596 static const i386_operand_type disp32s
= OPERAND_TYPE_DISP32S
;
1597 static const i386_operand_type disp16_32
= OPERAND_TYPE_DISP16_32
;
1598 static const i386_operand_type anydisp
1599 = OPERAND_TYPE_ANYDISP
;
1600 static const i386_operand_type regxmm
= OPERAND_TYPE_REGXMM
;
1601 static const i386_operand_type regymm
= OPERAND_TYPE_REGYMM
;
1602 static const i386_operand_type regzmm
= OPERAND_TYPE_REGZMM
;
1603 static const i386_operand_type regmask
= OPERAND_TYPE_REGMASK
;
1604 static const i386_operand_type imm8
= OPERAND_TYPE_IMM8
;
1605 static const i386_operand_type imm8s
= OPERAND_TYPE_IMM8S
;
1606 static const i386_operand_type imm16
= OPERAND_TYPE_IMM16
;
1607 static const i386_operand_type imm32
= OPERAND_TYPE_IMM32
;
1608 static const i386_operand_type imm32s
= OPERAND_TYPE_IMM32S
;
1609 static const i386_operand_type imm64
= OPERAND_TYPE_IMM64
;
1610 static const i386_operand_type imm16_32
= OPERAND_TYPE_IMM16_32
;
1611 static const i386_operand_type imm16_32s
= OPERAND_TYPE_IMM16_32S
;
1612 static const i386_operand_type imm16_32_32s
= OPERAND_TYPE_IMM16_32_32S
;
1613 static const i386_operand_type vec_imm4
= OPERAND_TYPE_VEC_IMM4
;
1624 operand_type_check (i386_operand_type t
, enum operand_type c
)
1629 return (t
.bitfield
.reg8
1632 || t
.bitfield
.reg64
);
1635 return (t
.bitfield
.imm8
1639 || t
.bitfield
.imm32s
1640 || t
.bitfield
.imm64
);
1643 return (t
.bitfield
.disp8
1644 || t
.bitfield
.disp16
1645 || t
.bitfield
.disp32
1646 || t
.bitfield
.disp32s
1647 || t
.bitfield
.disp64
);
1650 return (t
.bitfield
.disp8
1651 || t
.bitfield
.disp16
1652 || t
.bitfield
.disp32
1653 || t
.bitfield
.disp32s
1654 || t
.bitfield
.disp64
1655 || t
.bitfield
.baseindex
);
1664 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1665 operand J for instruction template T. */
1668 match_reg_size (const insn_template
*t
, unsigned int j
)
1670 return !((i
.types
[j
].bitfield
.byte
1671 && !t
->operand_types
[j
].bitfield
.byte
)
1672 || (i
.types
[j
].bitfield
.word
1673 && !t
->operand_types
[j
].bitfield
.word
)
1674 || (i
.types
[j
].bitfield
.dword
1675 && !t
->operand_types
[j
].bitfield
.dword
)
1676 || (i
.types
[j
].bitfield
.qword
1677 && !t
->operand_types
[j
].bitfield
.qword
));
1680 /* Return 1 if there is no conflict in any size on operand J for
1681 instruction template T. */
1684 match_mem_size (const insn_template
*t
, unsigned int j
)
1686 return (match_reg_size (t
, j
)
1687 && !((i
.types
[j
].bitfield
.unspecified
1688 && !t
->operand_types
[j
].bitfield
.unspecified
)
1689 || (i
.types
[j
].bitfield
.fword
1690 && !t
->operand_types
[j
].bitfield
.fword
)
1691 || (i
.types
[j
].bitfield
.tbyte
1692 && !t
->operand_types
[j
].bitfield
.tbyte
)
1693 || (i
.types
[j
].bitfield
.xmmword
1694 && !t
->operand_types
[j
].bitfield
.xmmword
)
1695 || (i
.types
[j
].bitfield
.ymmword
1696 && !t
->operand_types
[j
].bitfield
.ymmword
)
1697 || (i
.types
[j
].bitfield
.zmmword
1698 && !t
->operand_types
[j
].bitfield
.zmmword
)));
1701 /* Return 1 if there is no size conflict on any operands for
1702 instruction template T. */
1705 operand_size_match (const insn_template
*t
)
1710 /* Don't check jump instructions. */
1711 if (t
->opcode_modifier
.jump
1712 || t
->opcode_modifier
.jumpbyte
1713 || t
->opcode_modifier
.jumpdword
1714 || t
->opcode_modifier
.jumpintersegment
)
1717 /* Check memory and accumulator operand size. */
1718 for (j
= 0; j
< i
.operands
; j
++)
1720 if (t
->operand_types
[j
].bitfield
.anysize
)
1723 if (t
->operand_types
[j
].bitfield
.acc
&& !match_reg_size (t
, j
))
1729 if (i
.types
[j
].bitfield
.mem
&& !match_mem_size (t
, j
))
1738 else if (!t
->opcode_modifier
.d
&& !t
->opcode_modifier
.floatd
)
1741 i
.error
= operand_size_mismatch
;
1745 /* Check reverse. */
1746 gas_assert (i
.operands
== 2);
1749 for (j
= 0; j
< 2; j
++)
1751 if (t
->operand_types
[j
].bitfield
.acc
1752 && !match_reg_size (t
, j
? 0 : 1))
1755 if (i
.types
[j
].bitfield
.mem
1756 && !match_mem_size (t
, j
? 0 : 1))
1764 operand_type_match (i386_operand_type overlap
,
1765 i386_operand_type given
)
1767 i386_operand_type temp
= overlap
;
1769 temp
.bitfield
.jumpabsolute
= 0;
1770 temp
.bitfield
.unspecified
= 0;
1771 temp
.bitfield
.byte
= 0;
1772 temp
.bitfield
.word
= 0;
1773 temp
.bitfield
.dword
= 0;
1774 temp
.bitfield
.fword
= 0;
1775 temp
.bitfield
.qword
= 0;
1776 temp
.bitfield
.tbyte
= 0;
1777 temp
.bitfield
.xmmword
= 0;
1778 temp
.bitfield
.ymmword
= 0;
1779 temp
.bitfield
.zmmword
= 0;
1780 if (operand_type_all_zero (&temp
))
1783 if (given
.bitfield
.baseindex
== overlap
.bitfield
.baseindex
1784 && given
.bitfield
.jumpabsolute
== overlap
.bitfield
.jumpabsolute
)
1788 i
.error
= operand_type_mismatch
;
1792 /* If given types g0 and g1 are registers they must be of the same type
1793 unless the expected operand type register overlap is null.
1794 Note that Acc in a template matches every size of reg. */
1797 operand_type_register_match (i386_operand_type m0
,
1798 i386_operand_type g0
,
1799 i386_operand_type t0
,
1800 i386_operand_type m1
,
1801 i386_operand_type g1
,
1802 i386_operand_type t1
)
1804 if (!operand_type_check (g0
, reg
))
1807 if (!operand_type_check (g1
, reg
))
1810 if (g0
.bitfield
.reg8
== g1
.bitfield
.reg8
1811 && g0
.bitfield
.reg16
== g1
.bitfield
.reg16
1812 && g0
.bitfield
.reg32
== g1
.bitfield
.reg32
1813 && g0
.bitfield
.reg64
== g1
.bitfield
.reg64
)
1816 if (m0
.bitfield
.acc
)
1818 t0
.bitfield
.reg8
= 1;
1819 t0
.bitfield
.reg16
= 1;
1820 t0
.bitfield
.reg32
= 1;
1821 t0
.bitfield
.reg64
= 1;
1824 if (m1
.bitfield
.acc
)
1826 t1
.bitfield
.reg8
= 1;
1827 t1
.bitfield
.reg16
= 1;
1828 t1
.bitfield
.reg32
= 1;
1829 t1
.bitfield
.reg64
= 1;
1832 if (!(t0
.bitfield
.reg8
& t1
.bitfield
.reg8
)
1833 && !(t0
.bitfield
.reg16
& t1
.bitfield
.reg16
)
1834 && !(t0
.bitfield
.reg32
& t1
.bitfield
.reg32
)
1835 && !(t0
.bitfield
.reg64
& t1
.bitfield
.reg64
))
1838 i
.error
= register_type_mismatch
;
1843 static INLINE
unsigned int
1844 register_number (const reg_entry
*r
)
1846 unsigned int nr
= r
->reg_num
;
1848 if (r
->reg_flags
& RegRex
)
1854 static INLINE
unsigned int
1855 mode_from_disp_size (i386_operand_type t
)
1857 if (t
.bitfield
.disp8
|| t
.bitfield
.vec_disp8
)
1859 else if (t
.bitfield
.disp16
1860 || t
.bitfield
.disp32
1861 || t
.bitfield
.disp32s
)
1868 fits_in_signed_byte (addressT num
)
1870 return num
+ 0x80 <= 0xff;
1874 fits_in_unsigned_byte (addressT num
)
1880 fits_in_unsigned_word (addressT num
)
1882 return num
<= 0xffff;
1886 fits_in_signed_word (addressT num
)
1888 return num
+ 0x8000 <= 0xffff;
1892 fits_in_signed_long (addressT num ATTRIBUTE_UNUSED
)
1897 return num
+ 0x80000000 <= 0xffffffff;
1899 } /* fits_in_signed_long() */
1902 fits_in_unsigned_long (addressT num ATTRIBUTE_UNUSED
)
1907 return num
<= 0xffffffff;
1909 } /* fits_in_unsigned_long() */
1912 fits_in_vec_disp8 (offsetT num
)
1914 int shift
= i
.memshift
;
1920 mask
= (1 << shift
) - 1;
1922 /* Return 0 if NUM isn't properly aligned. */
1926 /* Check if NUM will fit in 8bit after shift. */
1927 return fits_in_signed_byte (num
>> shift
);
1931 fits_in_imm4 (offsetT num
)
1933 return (num
& 0xf) == num
;
1936 static i386_operand_type
1937 smallest_imm_type (offsetT num
)
1939 i386_operand_type t
;
1941 operand_type_set (&t
, 0);
1942 t
.bitfield
.imm64
= 1;
1944 if (cpu_arch_tune
!= PROCESSOR_I486
&& num
== 1)
1946 /* This code is disabled on the 486 because all the Imm1 forms
1947 in the opcode table are slower on the i486. They're the
1948 versions with the implicitly specified single-position
1949 displacement, which has another syntax if you really want to
1951 t
.bitfield
.imm1
= 1;
1952 t
.bitfield
.imm8
= 1;
1953 t
.bitfield
.imm8s
= 1;
1954 t
.bitfield
.imm16
= 1;
1955 t
.bitfield
.imm32
= 1;
1956 t
.bitfield
.imm32s
= 1;
1958 else if (fits_in_signed_byte (num
))
1960 t
.bitfield
.imm8
= 1;
1961 t
.bitfield
.imm8s
= 1;
1962 t
.bitfield
.imm16
= 1;
1963 t
.bitfield
.imm32
= 1;
1964 t
.bitfield
.imm32s
= 1;
1966 else if (fits_in_unsigned_byte (num
))
1968 t
.bitfield
.imm8
= 1;
1969 t
.bitfield
.imm16
= 1;
1970 t
.bitfield
.imm32
= 1;
1971 t
.bitfield
.imm32s
= 1;
1973 else if (fits_in_signed_word (num
) || fits_in_unsigned_word (num
))
1975 t
.bitfield
.imm16
= 1;
1976 t
.bitfield
.imm32
= 1;
1977 t
.bitfield
.imm32s
= 1;
1979 else if (fits_in_signed_long (num
))
1981 t
.bitfield
.imm32
= 1;
1982 t
.bitfield
.imm32s
= 1;
1984 else if (fits_in_unsigned_long (num
))
1985 t
.bitfield
.imm32
= 1;
1991 offset_in_range (offsetT val
, int size
)
1997 case 1: mask
= ((addressT
) 1 << 8) - 1; break;
1998 case 2: mask
= ((addressT
) 1 << 16) - 1; break;
1999 case 4: mask
= ((addressT
) 2 << 31) - 1; break;
2001 case 8: mask
= ((addressT
) 2 << 63) - 1; break;
2007 /* If BFD64, sign extend val for 32bit address mode. */
2008 if (flag_code
!= CODE_64BIT
2009 || i
.prefix
[ADDR_PREFIX
])
2010 if ((val
& ~(((addressT
) 2 << 31) - 1)) == 0)
2011 val
= (val
^ ((addressT
) 1 << 31)) - ((addressT
) 1 << 31);
2014 if ((val
& ~mask
) != 0 && (val
& ~mask
) != ~mask
)
2016 char buf1
[40], buf2
[40];
2018 sprint_value (buf1
, val
);
2019 sprint_value (buf2
, val
& mask
);
2020 as_warn (_("%s shortened to %s"), buf1
, buf2
);
2034 a. PREFIX_EXIST if attempting to add a prefix where one from the
2035 same class already exists.
2036 b. PREFIX_LOCK if lock prefix is added.
2037 c. PREFIX_REP if rep/repne prefix is added.
2038 d. PREFIX_OTHER if other prefix is added.
2041 static enum PREFIX_GROUP
2042 add_prefix (unsigned int prefix
)
2044 enum PREFIX_GROUP ret
= PREFIX_OTHER
;
2047 if (prefix
>= REX_OPCODE
&& prefix
< REX_OPCODE
+ 16
2048 && flag_code
== CODE_64BIT
)
2050 if ((i
.prefix
[REX_PREFIX
] & prefix
& REX_W
)
2051 || ((i
.prefix
[REX_PREFIX
] & (REX_R
| REX_X
| REX_B
))
2052 && (prefix
& (REX_R
| REX_X
| REX_B
))))
2063 case CS_PREFIX_OPCODE
:
2064 case DS_PREFIX_OPCODE
:
2065 case ES_PREFIX_OPCODE
:
2066 case FS_PREFIX_OPCODE
:
2067 case GS_PREFIX_OPCODE
:
2068 case SS_PREFIX_OPCODE
:
2072 case REPNE_PREFIX_OPCODE
:
2073 case REPE_PREFIX_OPCODE
:
2078 case LOCK_PREFIX_OPCODE
:
2087 case ADDR_PREFIX_OPCODE
:
2091 case DATA_PREFIX_OPCODE
:
2095 if (i
.prefix
[q
] != 0)
2103 i
.prefix
[q
] |= prefix
;
2106 as_bad (_("same type of prefix used twice"));
2112 update_code_flag (int value
, int check
)
2114 PRINTF_LIKE ((*as_error
));
2116 flag_code
= (enum flag_code
) value
;
2117 if (flag_code
== CODE_64BIT
)
2119 cpu_arch_flags
.bitfield
.cpu64
= 1;
2120 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2124 cpu_arch_flags
.bitfield
.cpu64
= 0;
2125 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2127 if (value
== CODE_64BIT
&& !cpu_arch_flags
.bitfield
.cpulm
)
2130 as_error
= as_fatal
;
2133 (*as_error
) (_("64bit mode not supported on `%s'."),
2134 cpu_arch_name
? cpu_arch_name
: default_arch
);
2136 if (value
== CODE_32BIT
&& !cpu_arch_flags
.bitfield
.cpui386
)
2139 as_error
= as_fatal
;
2142 (*as_error
) (_("32bit mode not supported on `%s'."),
2143 cpu_arch_name
? cpu_arch_name
: default_arch
);
2145 stackop_size
= '\0';
2149 set_code_flag (int value
)
2151 update_code_flag (value
, 0);
2155 set_16bit_gcc_code_flag (int new_code_flag
)
2157 flag_code
= (enum flag_code
) new_code_flag
;
2158 if (flag_code
!= CODE_16BIT
)
2160 cpu_arch_flags
.bitfield
.cpu64
= 0;
2161 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2162 stackop_size
= LONG_MNEM_SUFFIX
;
2166 set_intel_syntax (int syntax_flag
)
2168 /* Find out if register prefixing is specified. */
2169 int ask_naked_reg
= 0;
2172 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2174 char *string
= input_line_pointer
;
2175 int e
= get_symbol_end ();
2177 if (strcmp (string
, "prefix") == 0)
2179 else if (strcmp (string
, "noprefix") == 0)
2182 as_bad (_("bad argument to syntax directive."));
2183 *input_line_pointer
= e
;
2185 demand_empty_rest_of_line ();
2187 intel_syntax
= syntax_flag
;
2189 if (ask_naked_reg
== 0)
2190 allow_naked_reg
= (intel_syntax
2191 && (bfd_get_symbol_leading_char (stdoutput
) != '\0'));
2193 allow_naked_reg
= (ask_naked_reg
< 0);
2195 expr_set_rank (O_full_ptr
, syntax_flag
? 10 : 0);
2197 identifier_chars
['%'] = intel_syntax
&& allow_naked_reg
? '%' : 0;
2198 identifier_chars
['$'] = intel_syntax
? '$' : 0;
2199 register_prefix
= allow_naked_reg
? "" : "%";
2203 set_intel_mnemonic (int mnemonic_flag
)
2205 intel_mnemonic
= mnemonic_flag
;
2209 set_allow_index_reg (int flag
)
2211 allow_index_reg
= flag
;
2215 set_check (int what
)
2217 enum check_kind
*kind
;
2222 kind
= &operand_check
;
2233 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2235 char *string
= input_line_pointer
;
2236 int e
= get_symbol_end ();
2238 if (strcmp (string
, "none") == 0)
2240 else if (strcmp (string
, "warning") == 0)
2241 *kind
= check_warning
;
2242 else if (strcmp (string
, "error") == 0)
2243 *kind
= check_error
;
2245 as_bad (_("bad argument to %s_check directive."), str
);
2246 *input_line_pointer
= e
;
2249 as_bad (_("missing argument for %s_check directive"), str
);
2251 demand_empty_rest_of_line ();
2255 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED
,
2256 i386_cpu_flags new_flag ATTRIBUTE_UNUSED
)
2258 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2259 static const char *arch
;
2261 /* Intel LIOM is only supported on ELF. */
2267 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2268 use default_arch. */
2269 arch
= cpu_arch_name
;
2271 arch
= default_arch
;
2274 /* If we are targeting Intel L1OM, we must enable it. */
2275 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_L1OM
2276 || new_flag
.bitfield
.cpul1om
)
2279 /* If we are targeting Intel K1OM, we must enable it. */
2280 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_K1OM
2281 || new_flag
.bitfield
.cpuk1om
)
2284 as_bad (_("`%s' is not supported on `%s'"), name
, arch
);
2289 set_cpu_arch (int dummy ATTRIBUTE_UNUSED
)
2293 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2295 char *string
= input_line_pointer
;
2296 int e
= get_symbol_end ();
2298 i386_cpu_flags flags
;
2300 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
2302 if (strcmp (string
, cpu_arch
[j
].name
) == 0)
2304 check_cpu_arch_compatible (string
, cpu_arch
[j
].flags
);
2308 cpu_arch_name
= cpu_arch
[j
].name
;
2309 cpu_sub_arch_name
= NULL
;
2310 cpu_arch_flags
= cpu_arch
[j
].flags
;
2311 if (flag_code
== CODE_64BIT
)
2313 cpu_arch_flags
.bitfield
.cpu64
= 1;
2314 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2318 cpu_arch_flags
.bitfield
.cpu64
= 0;
2319 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2321 cpu_arch_isa
= cpu_arch
[j
].type
;
2322 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
2323 if (!cpu_arch_tune_set
)
2325 cpu_arch_tune
= cpu_arch_isa
;
2326 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
2331 if (!cpu_arch
[j
].negated
)
2332 flags
= cpu_flags_or (cpu_arch_flags
,
2335 flags
= cpu_flags_and_not (cpu_arch_flags
,
2337 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2339 if (cpu_sub_arch_name
)
2341 char *name
= cpu_sub_arch_name
;
2342 cpu_sub_arch_name
= concat (name
,
2344 (const char *) NULL
);
2348 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
2349 cpu_arch_flags
= flags
;
2350 cpu_arch_isa_flags
= flags
;
2352 *input_line_pointer
= e
;
2353 demand_empty_rest_of_line ();
2357 if (j
>= ARRAY_SIZE (cpu_arch
))
2358 as_bad (_("no such architecture: `%s'"), string
);
2360 *input_line_pointer
= e
;
2363 as_bad (_("missing cpu architecture"));
2365 no_cond_jump_promotion
= 0;
2366 if (*input_line_pointer
== ','
2367 && !is_end_of_line
[(unsigned char) input_line_pointer
[1]])
2369 char *string
= ++input_line_pointer
;
2370 int e
= get_symbol_end ();
2372 if (strcmp (string
, "nojumps") == 0)
2373 no_cond_jump_promotion
= 1;
2374 else if (strcmp (string
, "jumps") == 0)
2377 as_bad (_("no such architecture modifier: `%s'"), string
);
2379 *input_line_pointer
= e
;
2382 demand_empty_rest_of_line ();
2385 enum bfd_architecture
2388 if (cpu_arch_isa
== PROCESSOR_L1OM
)
2390 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2391 || flag_code
!= CODE_64BIT
)
2392 as_fatal (_("Intel L1OM is 64bit ELF only"));
2393 return bfd_arch_l1om
;
2395 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
2397 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2398 || flag_code
!= CODE_64BIT
)
2399 as_fatal (_("Intel K1OM is 64bit ELF only"));
2400 return bfd_arch_k1om
;
2403 return bfd_arch_i386
;
2409 if (!strncmp (default_arch
, "x86_64", 6))
2411 if (cpu_arch_isa
== PROCESSOR_L1OM
)
2413 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2414 || default_arch
[6] != '\0')
2415 as_fatal (_("Intel L1OM is 64bit ELF only"));
2416 return bfd_mach_l1om
;
2418 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
2420 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2421 || default_arch
[6] != '\0')
2422 as_fatal (_("Intel K1OM is 64bit ELF only"));
2423 return bfd_mach_k1om
;
2425 else if (default_arch
[6] == '\0')
2426 return bfd_mach_x86_64
;
2428 return bfd_mach_x64_32
;
2430 else if (!strcmp (default_arch
, "i386"))
2431 return bfd_mach_i386_i386
;
2433 as_fatal (_("unknown architecture"));
2439 const char *hash_err
;
2441 /* Initialize op_hash hash table. */
2442 op_hash
= hash_new ();
2445 const insn_template
*optab
;
2446 templates
*core_optab
;
2448 /* Setup for loop. */
2450 core_optab
= (templates
*) xmalloc (sizeof (templates
));
2451 core_optab
->start
= optab
;
2456 if (optab
->name
== NULL
2457 || strcmp (optab
->name
, (optab
- 1)->name
) != 0)
2459 /* different name --> ship out current template list;
2460 add to hash table; & begin anew. */
2461 core_optab
->end
= optab
;
2462 hash_err
= hash_insert (op_hash
,
2464 (void *) core_optab
);
2467 as_fatal (_("can't hash %s: %s"),
2471 if (optab
->name
== NULL
)
2473 core_optab
= (templates
*) xmalloc (sizeof (templates
));
2474 core_optab
->start
= optab
;
2479 /* Initialize reg_hash hash table. */
2480 reg_hash
= hash_new ();
2482 const reg_entry
*regtab
;
2483 unsigned int regtab_size
= i386_regtab_size
;
2485 for (regtab
= i386_regtab
; regtab_size
--; regtab
++)
2487 hash_err
= hash_insert (reg_hash
, regtab
->reg_name
, (void *) regtab
);
2489 as_fatal (_("can't hash %s: %s"),
2495 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2500 for (c
= 0; c
< 256; c
++)
2505 mnemonic_chars
[c
] = c
;
2506 register_chars
[c
] = c
;
2507 operand_chars
[c
] = c
;
2509 else if (ISLOWER (c
))
2511 mnemonic_chars
[c
] = c
;
2512 register_chars
[c
] = c
;
2513 operand_chars
[c
] = c
;
2515 else if (ISUPPER (c
))
2517 mnemonic_chars
[c
] = TOLOWER (c
);
2518 register_chars
[c
] = mnemonic_chars
[c
];
2519 operand_chars
[c
] = c
;
2521 else if (c
== '{' || c
== '}')
2522 operand_chars
[c
] = c
;
2524 if (ISALPHA (c
) || ISDIGIT (c
))
2525 identifier_chars
[c
] = c
;
2528 identifier_chars
[c
] = c
;
2529 operand_chars
[c
] = c
;
2534 identifier_chars
['@'] = '@';
2537 identifier_chars
['?'] = '?';
2538 operand_chars
['?'] = '?';
2540 digit_chars
['-'] = '-';
2541 mnemonic_chars
['_'] = '_';
2542 mnemonic_chars
['-'] = '-';
2543 mnemonic_chars
['.'] = '.';
2544 identifier_chars
['_'] = '_';
2545 identifier_chars
['.'] = '.';
2547 for (p
= operand_special_chars
; *p
!= '\0'; p
++)
2548 operand_chars
[(unsigned char) *p
] = *p
;
2551 if (flag_code
== CODE_64BIT
)
2553 #if defined (OBJ_COFF) && defined (TE_PE)
2554 x86_dwarf2_return_column
= (OUTPUT_FLAVOR
== bfd_target_coff_flavour
2557 x86_dwarf2_return_column
= 16;
2559 x86_cie_data_alignment
= -8;
2563 x86_dwarf2_return_column
= 8;
2564 x86_cie_data_alignment
= -4;
2569 i386_print_statistics (FILE *file
)
2571 hash_print_statistics (file
, "i386 opcode", op_hash
);
2572 hash_print_statistics (file
, "i386 register", reg_hash
);
2577 /* Debugging routines for md_assemble. */
2578 static void pte (insn_template
*);
2579 static void pt (i386_operand_type
);
2580 static void pe (expressionS
*);
2581 static void ps (symbolS
*);
2584 pi (char *line
, i386_insn
*x
)
2588 fprintf (stdout
, "%s: template ", line
);
2590 fprintf (stdout
, " address: base %s index %s scale %x\n",
2591 x
->base_reg
? x
->base_reg
->reg_name
: "none",
2592 x
->index_reg
? x
->index_reg
->reg_name
: "none",
2593 x
->log2_scale_factor
);
2594 fprintf (stdout
, " modrm: mode %x reg %x reg/mem %x\n",
2595 x
->rm
.mode
, x
->rm
.reg
, x
->rm
.regmem
);
2596 fprintf (stdout
, " sib: base %x index %x scale %x\n",
2597 x
->sib
.base
, x
->sib
.index
, x
->sib
.scale
);
2598 fprintf (stdout
, " rex: 64bit %x extX %x extY %x extZ %x\n",
2599 (x
->rex
& REX_W
) != 0,
2600 (x
->rex
& REX_R
) != 0,
2601 (x
->rex
& REX_X
) != 0,
2602 (x
->rex
& REX_B
) != 0);
2603 for (j
= 0; j
< x
->operands
; j
++)
2605 fprintf (stdout
, " #%d: ", j
+ 1);
2607 fprintf (stdout
, "\n");
2608 if (x
->types
[j
].bitfield
.reg8
2609 || x
->types
[j
].bitfield
.reg16
2610 || x
->types
[j
].bitfield
.reg32
2611 || x
->types
[j
].bitfield
.reg64
2612 || x
->types
[j
].bitfield
.regmmx
2613 || x
->types
[j
].bitfield
.regxmm
2614 || x
->types
[j
].bitfield
.regymm
2615 || x
->types
[j
].bitfield
.regzmm
2616 || x
->types
[j
].bitfield
.sreg2
2617 || x
->types
[j
].bitfield
.sreg3
2618 || x
->types
[j
].bitfield
.control
2619 || x
->types
[j
].bitfield
.debug
2620 || x
->types
[j
].bitfield
.test
)
2621 fprintf (stdout
, "%s\n", x
->op
[j
].regs
->reg_name
);
2622 if (operand_type_check (x
->types
[j
], imm
))
2624 if (operand_type_check (x
->types
[j
], disp
))
2625 pe (x
->op
[j
].disps
);
2630 pte (insn_template
*t
)
2633 fprintf (stdout
, " %d operands ", t
->operands
);
2634 fprintf (stdout
, "opcode %x ", t
->base_opcode
);
2635 if (t
->extension_opcode
!= None
)
2636 fprintf (stdout
, "ext %x ", t
->extension_opcode
);
2637 if (t
->opcode_modifier
.d
)
2638 fprintf (stdout
, "D");
2639 if (t
->opcode_modifier
.w
)
2640 fprintf (stdout
, "W");
2641 fprintf (stdout
, "\n");
2642 for (j
= 0; j
< t
->operands
; j
++)
2644 fprintf (stdout
, " #%d type ", j
+ 1);
2645 pt (t
->operand_types
[j
]);
2646 fprintf (stdout
, "\n");
2653 fprintf (stdout
, " operation %d\n", e
->X_op
);
2654 fprintf (stdout
, " add_number %ld (%lx)\n",
2655 (long) e
->X_add_number
, (long) e
->X_add_number
);
2656 if (e
->X_add_symbol
)
2658 fprintf (stdout
, " add_symbol ");
2659 ps (e
->X_add_symbol
);
2660 fprintf (stdout
, "\n");
2664 fprintf (stdout
, " op_symbol ");
2665 ps (e
->X_op_symbol
);
2666 fprintf (stdout
, "\n");
2673 fprintf (stdout
, "%s type %s%s",
2675 S_IS_EXTERNAL (s
) ? "EXTERNAL " : "",
2676 segment_name (S_GET_SEGMENT (s
)));
2679 static struct type_name
2681 i386_operand_type mask
;
2684 const type_names
[] =
2686 { OPERAND_TYPE_REG8
, "r8" },
2687 { OPERAND_TYPE_REG16
, "r16" },
2688 { OPERAND_TYPE_REG32
, "r32" },
2689 { OPERAND_TYPE_REG64
, "r64" },
2690 { OPERAND_TYPE_IMM8
, "i8" },
2691 { OPERAND_TYPE_IMM8
, "i8s" },
2692 { OPERAND_TYPE_IMM16
, "i16" },
2693 { OPERAND_TYPE_IMM32
, "i32" },
2694 { OPERAND_TYPE_IMM32S
, "i32s" },
2695 { OPERAND_TYPE_IMM64
, "i64" },
2696 { OPERAND_TYPE_IMM1
, "i1" },
2697 { OPERAND_TYPE_BASEINDEX
, "BaseIndex" },
2698 { OPERAND_TYPE_DISP8
, "d8" },
2699 { OPERAND_TYPE_DISP16
, "d16" },
2700 { OPERAND_TYPE_DISP32
, "d32" },
2701 { OPERAND_TYPE_DISP32S
, "d32s" },
2702 { OPERAND_TYPE_DISP64
, "d64" },
2703 { OPERAND_TYPE_VEC_DISP8
, "Vector d8" },
2704 { OPERAND_TYPE_INOUTPORTREG
, "InOutPortReg" },
2705 { OPERAND_TYPE_SHIFTCOUNT
, "ShiftCount" },
2706 { OPERAND_TYPE_CONTROL
, "control reg" },
2707 { OPERAND_TYPE_TEST
, "test reg" },
2708 { OPERAND_TYPE_DEBUG
, "debug reg" },
2709 { OPERAND_TYPE_FLOATREG
, "FReg" },
2710 { OPERAND_TYPE_FLOATACC
, "FAcc" },
2711 { OPERAND_TYPE_SREG2
, "SReg2" },
2712 { OPERAND_TYPE_SREG3
, "SReg3" },
2713 { OPERAND_TYPE_ACC
, "Acc" },
2714 { OPERAND_TYPE_JUMPABSOLUTE
, "Jump Absolute" },
2715 { OPERAND_TYPE_REGMMX
, "rMMX" },
2716 { OPERAND_TYPE_REGXMM
, "rXMM" },
2717 { OPERAND_TYPE_REGYMM
, "rYMM" },
2718 { OPERAND_TYPE_REGZMM
, "rZMM" },
2719 { OPERAND_TYPE_REGMASK
, "Mask reg" },
2720 { OPERAND_TYPE_ESSEG
, "es" },
2724 pt (i386_operand_type t
)
2727 i386_operand_type a
;
2729 for (j
= 0; j
< ARRAY_SIZE (type_names
); j
++)
2731 a
= operand_type_and (t
, type_names
[j
].mask
);
2732 if (!operand_type_all_zero (&a
))
2733 fprintf (stdout
, "%s, ", type_names
[j
].name
);
2738 #endif /* DEBUG386 */
2740 static bfd_reloc_code_real_type
2741 reloc (unsigned int size
,
2744 bfd_reloc_code_real_type other
)
2746 if (other
!= NO_RELOC
)
2748 reloc_howto_type
*rel
;
2753 case BFD_RELOC_X86_64_GOT32
:
2754 return BFD_RELOC_X86_64_GOT64
;
2756 case BFD_RELOC_X86_64_GOTPLT64
:
2757 return BFD_RELOC_X86_64_GOTPLT64
;
2759 case BFD_RELOC_X86_64_PLTOFF64
:
2760 return BFD_RELOC_X86_64_PLTOFF64
;
2762 case BFD_RELOC_X86_64_GOTPC32
:
2763 other
= BFD_RELOC_X86_64_GOTPC64
;
2765 case BFD_RELOC_X86_64_GOTPCREL
:
2766 other
= BFD_RELOC_X86_64_GOTPCREL64
;
2768 case BFD_RELOC_X86_64_TPOFF32
:
2769 other
= BFD_RELOC_X86_64_TPOFF64
;
2771 case BFD_RELOC_X86_64_DTPOFF32
:
2772 other
= BFD_RELOC_X86_64_DTPOFF64
;
2778 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2779 if (other
== BFD_RELOC_SIZE32
)
2782 other
= BFD_RELOC_SIZE64
;
2785 as_bad (_("there are no pc-relative size relocations"));
2791 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2792 if (size
== 4 && (flag_code
!= CODE_64BIT
|| disallow_64bit_reloc
))
2795 rel
= bfd_reloc_type_lookup (stdoutput
, other
);
2797 as_bad (_("unknown relocation (%u)"), other
);
2798 else if (size
!= bfd_get_reloc_size (rel
))
2799 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2800 bfd_get_reloc_size (rel
),
2802 else if (pcrel
&& !rel
->pc_relative
)
2803 as_bad (_("non-pc-relative relocation for pc-relative field"));
2804 else if ((rel
->complain_on_overflow
== complain_overflow_signed
2806 || (rel
->complain_on_overflow
== complain_overflow_unsigned
2808 as_bad (_("relocated field and relocation type differ in signedness"));
2817 as_bad (_("there are no unsigned pc-relative relocations"));
2820 case 1: return BFD_RELOC_8_PCREL
;
2821 case 2: return BFD_RELOC_16_PCREL
;
2822 case 4: return BFD_RELOC_32_PCREL
;
2823 case 8: return BFD_RELOC_64_PCREL
;
2825 as_bad (_("cannot do %u byte pc-relative relocation"), size
);
2832 case 4: return BFD_RELOC_X86_64_32S
;
2837 case 1: return BFD_RELOC_8
;
2838 case 2: return BFD_RELOC_16
;
2839 case 4: return BFD_RELOC_32
;
2840 case 8: return BFD_RELOC_64
;
2842 as_bad (_("cannot do %s %u byte relocation"),
2843 sign
> 0 ? "signed" : "unsigned", size
);
2849 /* Here we decide which fixups can be adjusted to make them relative to
2850 the beginning of the section instead of the symbol. Basically we need
2851 to make sure that the dynamic relocations are done correctly, so in
2852 some cases we force the original symbol to be used. */
2855 tc_i386_fix_adjustable (fixS
*fixP ATTRIBUTE_UNUSED
)
2857 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2861 /* Don't adjust pc-relative references to merge sections in 64-bit
2863 if (use_rela_relocations
2864 && (S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_MERGE
) != 0
2868 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2869 and changed later by validate_fix. */
2870 if (GOT_symbol
&& fixP
->fx_subsy
== GOT_symbol
2871 && fixP
->fx_r_type
== BFD_RELOC_32_PCREL
)
2874 /* Adjust_reloc_syms doesn't know about the GOT. Need to keep symbol
2875 for size relocations. */
2876 if (fixP
->fx_r_type
== BFD_RELOC_SIZE32
2877 || fixP
->fx_r_type
== BFD_RELOC_SIZE64
2878 || fixP
->fx_r_type
== BFD_RELOC_386_GOTOFF
2879 || fixP
->fx_r_type
== BFD_RELOC_386_PLT32
2880 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32
2881 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GD
2882 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDM
2883 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDO_32
2884 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE_32
2885 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE
2886 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTIE
2887 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE_32
2888 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE
2889 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTDESC
2890 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_DESC_CALL
2891 || fixP
->fx_r_type
== BFD_RELOC_X86_64_PLT32
2892 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOT32
2893 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCREL
2894 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSGD
2895 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSLD
2896 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF32
2897 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF64
2898 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTTPOFF
2899 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF32
2900 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF64
2901 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTOFF64
2902 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPC32_TLSDESC
2903 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSDESC_CALL
2904 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
2905 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
2912 intel_float_operand (const char *mnemonic
)
2914 /* Note that the value returned is meaningful only for opcodes with (memory)
2915 operands, hence the code here is free to improperly handle opcodes that
2916 have no operands (for better performance and smaller code). */
2918 if (mnemonic
[0] != 'f')
2919 return 0; /* non-math */
2921 switch (mnemonic
[1])
2923 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2924 the fs segment override prefix not currently handled because no
2925 call path can make opcodes without operands get here */
2927 return 2 /* integer op */;
2929 if (mnemonic
[2] == 'd' && (mnemonic
[3] == 'c' || mnemonic
[3] == 'e'))
2930 return 3; /* fldcw/fldenv */
2933 if (mnemonic
[2] != 'o' /* fnop */)
2934 return 3; /* non-waiting control op */
2937 if (mnemonic
[2] == 's')
2938 return 3; /* frstor/frstpm */
2941 if (mnemonic
[2] == 'a')
2942 return 3; /* fsave */
2943 if (mnemonic
[2] == 't')
2945 switch (mnemonic
[3])
2947 case 'c': /* fstcw */
2948 case 'd': /* fstdw */
2949 case 'e': /* fstenv */
2950 case 's': /* fsts[gw] */
2956 if (mnemonic
[2] == 'r' || mnemonic
[2] == 's')
2957 return 0; /* fxsave/fxrstor are not really math ops */
2964 /* Build the VEX prefix. */
2967 build_vex_prefix (const insn_template
*t
)
2969 unsigned int register_specifier
;
2970 unsigned int implied_prefix
;
2971 unsigned int vector_length
;
2973 /* Check register specifier. */
2974 if (i
.vex
.register_specifier
)
2976 register_specifier
=
2977 ~register_number (i
.vex
.register_specifier
) & 0xf;
2978 gas_assert ((i
.vex
.register_specifier
->reg_flags
& RegVRex
) == 0);
2981 register_specifier
= 0xf;
2983 /* Use 2-byte VEX prefix by swappping destination and source
2986 && i
.operands
== i
.reg_operands
2987 && i
.tm
.opcode_modifier
.vexopcode
== VEX0F
2988 && i
.tm
.opcode_modifier
.s
2991 unsigned int xchg
= i
.operands
- 1;
2992 union i386_op temp_op
;
2993 i386_operand_type temp_type
;
2995 temp_type
= i
.types
[xchg
];
2996 i
.types
[xchg
] = i
.types
[0];
2997 i
.types
[0] = temp_type
;
2998 temp_op
= i
.op
[xchg
];
2999 i
.op
[xchg
] = i
.op
[0];
3002 gas_assert (i
.rm
.mode
== 3);
3006 i
.rm
.regmem
= i
.rm
.reg
;
3009 /* Use the next insn. */
3013 if (i
.tm
.opcode_modifier
.vex
== VEXScalar
)
3014 vector_length
= avxscalar
;
3016 vector_length
= i
.tm
.opcode_modifier
.vex
== VEX256
? 1 : 0;
3018 switch ((i
.tm
.base_opcode
>> 8) & 0xff)
3023 case DATA_PREFIX_OPCODE
:
3026 case REPE_PREFIX_OPCODE
:
3029 case REPNE_PREFIX_OPCODE
:
3036 /* Use 2-byte VEX prefix if possible. */
3037 if (i
.tm
.opcode_modifier
.vexopcode
== VEX0F
3038 && i
.tm
.opcode_modifier
.vexw
!= VEXW1
3039 && (i
.rex
& (REX_W
| REX_X
| REX_B
)) == 0)
3041 /* 2-byte VEX prefix. */
3045 i
.vex
.bytes
[0] = 0xc5;
3047 /* Check the REX.R bit. */
3048 r
= (i
.rex
& REX_R
) ? 0 : 1;
3049 i
.vex
.bytes
[1] = (r
<< 7
3050 | register_specifier
<< 3
3051 | vector_length
<< 2
3056 /* 3-byte VEX prefix. */
3061 switch (i
.tm
.opcode_modifier
.vexopcode
)
3065 i
.vex
.bytes
[0] = 0xc4;
3069 i
.vex
.bytes
[0] = 0xc4;
3073 i
.vex
.bytes
[0] = 0xc4;
3077 i
.vex
.bytes
[0] = 0x8f;
3081 i
.vex
.bytes
[0] = 0x8f;
3085 i
.vex
.bytes
[0] = 0x8f;
3091 /* The high 3 bits of the second VEX byte are 1's compliment
3092 of RXB bits from REX. */
3093 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | m
;
3095 /* Check the REX.W bit. */
3096 w
= (i
.rex
& REX_W
) ? 1 : 0;
3097 if (i
.tm
.opcode_modifier
.vexw
== VEXW1
)
3100 i
.vex
.bytes
[2] = (w
<< 7
3101 | register_specifier
<< 3
3102 | vector_length
<< 2
3107 /* Build the EVEX prefix. */
3110 build_evex_prefix (void)
3112 unsigned int register_specifier
;
3113 unsigned int implied_prefix
;
3115 rex_byte vrex_used
= 0;
3117 /* Check register specifier. */
3118 if (i
.vex
.register_specifier
)
3120 gas_assert ((i
.vrex
& REX_X
) == 0);
3122 register_specifier
= i
.vex
.register_specifier
->reg_num
;
3123 if ((i
.vex
.register_specifier
->reg_flags
& RegRex
))
3124 register_specifier
+= 8;
3125 /* The upper 16 registers are encoded in the fourth byte of the
3127 if (!(i
.vex
.register_specifier
->reg_flags
& RegVRex
))
3128 i
.vex
.bytes
[3] = 0x8;
3129 register_specifier
= ~register_specifier
& 0xf;
3133 register_specifier
= 0xf;
3135 /* Encode upper 16 vector index register in the fourth byte of
3137 if (!(i
.vrex
& REX_X
))
3138 i
.vex
.bytes
[3] = 0x8;
3143 switch ((i
.tm
.base_opcode
>> 8) & 0xff)
3148 case DATA_PREFIX_OPCODE
:
3151 case REPE_PREFIX_OPCODE
:
3154 case REPNE_PREFIX_OPCODE
:
3161 /* 4 byte EVEX prefix. */
3163 i
.vex
.bytes
[0] = 0x62;
3166 switch (i
.tm
.opcode_modifier
.vexopcode
)
3182 /* The high 3 bits of the second EVEX byte are 1's compliment of RXB
3184 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | m
;
3186 /* The fifth bit of the second EVEX byte is 1's compliment of the
3187 REX_R bit in VREX. */
3188 if (!(i
.vrex
& REX_R
))
3189 i
.vex
.bytes
[1] |= 0x10;
3193 if ((i
.reg_operands
+ i
.imm_operands
) == i
.operands
)
3195 /* When all operands are registers, the REX_X bit in REX is not
3196 used. We reuse it to encode the upper 16 registers, which is
3197 indicated by the REX_B bit in VREX. The REX_X bit is encoded
3198 as 1's compliment. */
3199 if ((i
.vrex
& REX_B
))
3202 i
.vex
.bytes
[1] &= ~0x40;
3206 /* EVEX instructions shouldn't need the REX prefix. */
3207 i
.vrex
&= ~vrex_used
;
3208 gas_assert (i
.vrex
== 0);
3210 /* Check the REX.W bit. */
3211 w
= (i
.rex
& REX_W
) ? 1 : 0;
3212 if (i
.tm
.opcode_modifier
.vexw
)
3214 if (i
.tm
.opcode_modifier
.vexw
== VEXW1
)
3217 /* If w is not set it means we are dealing with WIG instruction. */
3220 if (evexwig
== evexw1
)
3224 /* Encode the U bit. */
3225 implied_prefix
|= 0x4;
3227 /* The third byte of the EVEX prefix. */
3228 i
.vex
.bytes
[2] = (w
<< 7 | register_specifier
<< 3 | implied_prefix
);
3230 /* The fourth byte of the EVEX prefix. */
3231 /* The zeroing-masking bit. */
3232 if (i
.mask
&& i
.mask
->zeroing
)
3233 i
.vex
.bytes
[3] |= 0x80;
3235 /* Don't always set the broadcast bit if there is no RC. */
3238 /* Encode the vector length. */
3239 unsigned int vec_length
;
3241 switch (i
.tm
.opcode_modifier
.evex
)
3243 case EVEXLIG
: /* LL' is ignored */
3244 vec_length
= evexlig
<< 5;
3247 vec_length
= 0 << 5;
3250 vec_length
= 1 << 5;
3253 vec_length
= 2 << 5;
3259 i
.vex
.bytes
[3] |= vec_length
;
3260 /* Encode the broadcast bit. */
3262 i
.vex
.bytes
[3] |= 0x10;
3266 if (i
.rounding
->type
!= saeonly
)
3267 i
.vex
.bytes
[3] |= 0x10 | (i
.rounding
->type
<< 5);
3269 i
.vex
.bytes
[3] |= 0x10 | (evexrcig
<< 5);
3272 if (i
.mask
&& i
.mask
->mask
)
3273 i
.vex
.bytes
[3] |= i
.mask
->mask
->reg_num
;
3277 process_immext (void)
3281 if ((i
.tm
.cpu_flags
.bitfield
.cpusse3
|| i
.tm
.cpu_flags
.bitfield
.cpusvme
)
3284 /* MONITOR/MWAIT as well as SVME instructions have fixed operands
3285 with an opcode suffix which is coded in the same place as an
3286 8-bit immediate field would be.
3287 Here we check those operands and remove them afterwards. */
3290 for (x
= 0; x
< i
.operands
; x
++)
3291 if (register_number (i
.op
[x
].regs
) != x
)
3292 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
3293 register_prefix
, i
.op
[x
].regs
->reg_name
, x
+ 1,
3299 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
3300 which is coded in the same place as an 8-bit immediate field
3301 would be. Here we fake an 8-bit immediate operand from the
3302 opcode suffix stored in tm.extension_opcode.
3304 AVX instructions also use this encoding, for some of
3305 3 argument instructions. */
3307 gas_assert (i
.imm_operands
<= 1
3309 || ((i
.tm
.opcode_modifier
.vex
3310 || i
.tm
.opcode_modifier
.evex
)
3311 && i
.operands
<= 4)));
3313 exp
= &im_expressions
[i
.imm_operands
++];
3314 i
.op
[i
.operands
].imms
= exp
;
3315 i
.types
[i
.operands
] = imm8
;
3317 exp
->X_op
= O_constant
;
3318 exp
->X_add_number
= i
.tm
.extension_opcode
;
3319 i
.tm
.extension_opcode
= None
;
3326 switch (i
.tm
.opcode_modifier
.hleprefixok
)
3331 as_bad (_("invalid instruction `%s' after `%s'"),
3332 i
.tm
.name
, i
.hle_prefix
);
3335 if (i
.prefix
[LOCK_PREFIX
])
3337 as_bad (_("missing `lock' with `%s'"), i
.hle_prefix
);
3341 case HLEPrefixRelease
:
3342 if (i
.prefix
[HLE_PREFIX
] != XRELEASE_PREFIX_OPCODE
)
3344 as_bad (_("instruction `%s' after `xacquire' not allowed"),
3348 if (i
.mem_operands
== 0
3349 || !operand_type_check (i
.types
[i
.operands
- 1], anymem
))
3351 as_bad (_("memory destination needed for instruction `%s'"
3352 " after `xrelease'"), i
.tm
.name
);
3359 /* This is the guts of the machine-dependent assembler. LINE points to a
3360 machine dependent instruction. This function is supposed to emit
3361 the frags/bytes it assembles to. */
3364 md_assemble (char *line
)
3367 char mnemonic
[MAX_MNEM_SIZE
];
3368 const insn_template
*t
;
3370 /* Initialize globals. */
3371 memset (&i
, '\0', sizeof (i
));
3372 for (j
= 0; j
< MAX_OPERANDS
; j
++)
3373 i
.reloc
[j
] = NO_RELOC
;
3374 memset (disp_expressions
, '\0', sizeof (disp_expressions
));
3375 memset (im_expressions
, '\0', sizeof (im_expressions
));
3376 save_stack_p
= save_stack
;
3378 /* First parse an instruction mnemonic & call i386_operand for the operands.
3379 We assume that the scrubber has arranged it so that line[0] is the valid
3380 start of a (possibly prefixed) mnemonic. */
3382 line
= parse_insn (line
, mnemonic
);
3386 line
= parse_operands (line
, mnemonic
);
3391 /* Now we've parsed the mnemonic into a set of templates, and have the
3392 operands at hand. */
3394 /* All intel opcodes have reversed operands except for "bound" and
3395 "enter". We also don't reverse intersegment "jmp" and "call"
3396 instructions with 2 immediate operands so that the immediate segment
3397 precedes the offset, as it does when in AT&T mode. */
3400 && (strcmp (mnemonic
, "bound") != 0)
3401 && (strcmp (mnemonic
, "invlpga") != 0)
3402 && !(operand_type_check (i
.types
[0], imm
)
3403 && operand_type_check (i
.types
[1], imm
)))
3406 /* The order of the immediates should be reversed
3407 for 2 immediates extrq and insertq instructions */
3408 if (i
.imm_operands
== 2
3409 && (strcmp (mnemonic
, "extrq") == 0
3410 || strcmp (mnemonic
, "insertq") == 0))
3411 swap_2_operands (0, 1);
3416 /* Don't optimize displacement for movabs since it only takes 64bit
3419 && i
.disp_encoding
!= disp_encoding_32bit
3420 && (flag_code
!= CODE_64BIT
3421 || strcmp (mnemonic
, "movabs") != 0))
3424 /* Next, we find a template that matches the given insn,
3425 making sure the overlap of the given operands types is consistent
3426 with the template operand types. */
3428 if (!(t
= match_template ()))
3431 if (sse_check
!= check_none
3432 && !i
.tm
.opcode_modifier
.noavx
3433 && (i
.tm
.cpu_flags
.bitfield
.cpusse
3434 || i
.tm
.cpu_flags
.bitfield
.cpusse2
3435 || i
.tm
.cpu_flags
.bitfield
.cpusse3
3436 || i
.tm
.cpu_flags
.bitfield
.cpussse3
3437 || i
.tm
.cpu_flags
.bitfield
.cpusse4_1
3438 || i
.tm
.cpu_flags
.bitfield
.cpusse4_2
))
3440 (sse_check
== check_warning
3442 : as_bad
) (_("SSE instruction `%s' is used"), i
.tm
.name
);
3445 /* Zap movzx and movsx suffix. The suffix has been set from
3446 "word ptr" or "byte ptr" on the source operand in Intel syntax
3447 or extracted from mnemonic in AT&T syntax. But we'll use
3448 the destination register to choose the suffix for encoding. */
3449 if ((i
.tm
.base_opcode
& ~9) == 0x0fb6)
3451 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3452 there is no suffix, the default will be byte extension. */
3453 if (i
.reg_operands
!= 2
3456 as_bad (_("ambiguous operand size for `%s'"), i
.tm
.name
);
3461 if (i
.tm
.opcode_modifier
.fwait
)
3462 if (!add_prefix (FWAIT_OPCODE
))
3465 /* Check if REP prefix is OK. */
3466 if (i
.rep_prefix
&& !i
.tm
.opcode_modifier
.repprefixok
)
3468 as_bad (_("invalid instruction `%s' after `%s'"),
3469 i
.tm
.name
, i
.rep_prefix
);
3473 /* Check for lock without a lockable instruction. Destination operand
3474 must be memory unless it is xchg (0x86). */
3475 if (i
.prefix
[LOCK_PREFIX
]
3476 && (!i
.tm
.opcode_modifier
.islockable
3477 || i
.mem_operands
== 0
3478 || (i
.tm
.base_opcode
!= 0x86
3479 && !operand_type_check (i
.types
[i
.operands
- 1], anymem
))))
3481 as_bad (_("expecting lockable instruction after `lock'"));
3485 /* Check if HLE prefix is OK. */
3486 if (i
.hle_prefix
&& !check_hle ())
3489 /* Check BND prefix. */
3490 if (i
.bnd_prefix
&& !i
.tm
.opcode_modifier
.bndprefixok
)
3491 as_bad (_("expecting valid branch instruction after `bnd'"));
3493 if (i
.tm
.cpu_flags
.bitfield
.cpumpx
3494 && flag_code
== CODE_64BIT
3495 && i
.prefix
[ADDR_PREFIX
])
3496 as_bad (_("32-bit address isn't allowed in 64-bit MPX instructions."));
3498 /* Insert BND prefix. */
3500 && i
.tm
.opcode_modifier
.bndprefixok
3501 && !i
.prefix
[BND_PREFIX
])
3502 add_prefix (BND_PREFIX_OPCODE
);
3504 /* Check string instruction segment overrides. */
3505 if (i
.tm
.opcode_modifier
.isstring
&& i
.mem_operands
!= 0)
3507 if (!check_string ())
3509 i
.disp_operands
= 0;
3512 if (!process_suffix ())
3515 /* Update operand types. */
3516 for (j
= 0; j
< i
.operands
; j
++)
3517 i
.types
[j
] = operand_type_and (i
.types
[j
], i
.tm
.operand_types
[j
]);
3519 /* Make still unresolved immediate matches conform to size of immediate
3520 given in i.suffix. */
3521 if (!finalize_imm ())
3524 if (i
.types
[0].bitfield
.imm1
)
3525 i
.imm_operands
= 0; /* kludge for shift insns. */
3527 /* We only need to check those implicit registers for instructions
3528 with 3 operands or less. */
3529 if (i
.operands
<= 3)
3530 for (j
= 0; j
< i
.operands
; j
++)
3531 if (i
.types
[j
].bitfield
.inoutportreg
3532 || i
.types
[j
].bitfield
.shiftcount
3533 || i
.types
[j
].bitfield
.acc
3534 || i
.types
[j
].bitfield
.floatacc
)
3537 /* ImmExt should be processed after SSE2AVX. */
3538 if (!i
.tm
.opcode_modifier
.sse2avx
3539 && i
.tm
.opcode_modifier
.immext
)
3542 /* For insns with operands there are more diddles to do to the opcode. */
3545 if (!process_operands ())
3548 else if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
3550 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3551 as_warn (_("translating to `%sp'"), i
.tm
.name
);
3554 if (i
.tm
.opcode_modifier
.vex
|| i
.tm
.opcode_modifier
.evex
)
3556 if (flag_code
== CODE_16BIT
)
3558 as_bad (_("instruction `%s' isn't supported in 16-bit mode."),
3563 if (i
.tm
.opcode_modifier
.vex
)
3564 build_vex_prefix (t
);
3566 build_evex_prefix ();
3569 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3570 instructions may define INT_OPCODE as well, so avoid this corner
3571 case for those instructions that use MODRM. */
3572 if (i
.tm
.base_opcode
== INT_OPCODE
3573 && !i
.tm
.opcode_modifier
.modrm
3574 && i
.op
[0].imms
->X_add_number
== 3)
3576 i
.tm
.base_opcode
= INT3_OPCODE
;
3580 if ((i
.tm
.opcode_modifier
.jump
3581 || i
.tm
.opcode_modifier
.jumpbyte
3582 || i
.tm
.opcode_modifier
.jumpdword
)
3583 && i
.op
[0].disps
->X_op
== O_constant
)
3585 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3586 the absolute address given by the constant. Since ix86 jumps and
3587 calls are pc relative, we need to generate a reloc. */
3588 i
.op
[0].disps
->X_add_symbol
= &abs_symbol
;
3589 i
.op
[0].disps
->X_op
= O_symbol
;
3592 if (i
.tm
.opcode_modifier
.rex64
)
3595 /* For 8 bit registers we need an empty rex prefix. Also if the
3596 instruction already has a prefix, we need to convert old
3597 registers to new ones. */
3599 if ((i
.types
[0].bitfield
.reg8
3600 && (i
.op
[0].regs
->reg_flags
& RegRex64
) != 0)
3601 || (i
.types
[1].bitfield
.reg8
3602 && (i
.op
[1].regs
->reg_flags
& RegRex64
) != 0)
3603 || ((i
.types
[0].bitfield
.reg8
3604 || i
.types
[1].bitfield
.reg8
)
3609 i
.rex
|= REX_OPCODE
;
3610 for (x
= 0; x
< 2; x
++)
3612 /* Look for 8 bit operand that uses old registers. */
3613 if (i
.types
[x
].bitfield
.reg8
3614 && (i
.op
[x
].regs
->reg_flags
& RegRex64
) == 0)
3616 /* In case it is "hi" register, give up. */
3617 if (i
.op
[x
].regs
->reg_num
> 3)
3618 as_bad (_("can't encode register '%s%s' in an "
3619 "instruction requiring REX prefix."),
3620 register_prefix
, i
.op
[x
].regs
->reg_name
);
3622 /* Otherwise it is equivalent to the extended register.
3623 Since the encoding doesn't change this is merely
3624 cosmetic cleanup for debug output. */
3626 i
.op
[x
].regs
= i
.op
[x
].regs
+ 8;
3632 add_prefix (REX_OPCODE
| i
.rex
);
3634 /* We are ready to output the insn. */
3639 parse_insn (char *line
, char *mnemonic
)
3642 char *token_start
= l
;
3645 const insn_template
*t
;
3651 while ((*mnem_p
= mnemonic_chars
[(unsigned char) *l
]) != 0)
3656 if (mnem_p
>= mnemonic
+ MAX_MNEM_SIZE
)
3658 as_bad (_("no such instruction: `%s'"), token_start
);
3663 if (!is_space_char (*l
)
3664 && *l
!= END_OF_INSN
3666 || (*l
!= PREFIX_SEPARATOR
3669 as_bad (_("invalid character %s in mnemonic"),
3670 output_invalid (*l
));
3673 if (token_start
== l
)
3675 if (!intel_syntax
&& *l
== PREFIX_SEPARATOR
)
3676 as_bad (_("expecting prefix; got nothing"));
3678 as_bad (_("expecting mnemonic; got nothing"));
3682 /* Look up instruction (or prefix) via hash table. */
3683 current_templates
= (const templates
*) hash_find (op_hash
, mnemonic
);
3685 if (*l
!= END_OF_INSN
3686 && (!is_space_char (*l
) || l
[1] != END_OF_INSN
)
3687 && current_templates
3688 && current_templates
->start
->opcode_modifier
.isprefix
)
3690 if (!cpu_flags_check_cpu64 (current_templates
->start
->cpu_flags
))
3692 as_bad ((flag_code
!= CODE_64BIT
3693 ? _("`%s' is only supported in 64-bit mode")
3694 : _("`%s' is not supported in 64-bit mode")),
3695 current_templates
->start
->name
);
3698 /* If we are in 16-bit mode, do not allow addr16 or data16.
3699 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3700 if ((current_templates
->start
->opcode_modifier
.size16
3701 || current_templates
->start
->opcode_modifier
.size32
)
3702 && flag_code
!= CODE_64BIT
3703 && (current_templates
->start
->opcode_modifier
.size32
3704 ^ (flag_code
== CODE_16BIT
)))
3706 as_bad (_("redundant %s prefix"),
3707 current_templates
->start
->name
);
3710 /* Add prefix, checking for repeated prefixes. */
3711 switch (add_prefix (current_templates
->start
->base_opcode
))
3716 if (current_templates
->start
->cpu_flags
.bitfield
.cpuhle
)
3717 i
.hle_prefix
= current_templates
->start
->name
;
3718 else if (current_templates
->start
->cpu_flags
.bitfield
.cpumpx
)
3719 i
.bnd_prefix
= current_templates
->start
->name
;
3721 i
.rep_prefix
= current_templates
->start
->name
;
3726 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3733 if (!current_templates
)
3735 /* Check if we should swap operand or force 32bit displacement in
3737 if (mnem_p
- 2 == dot_p
&& dot_p
[1] == 's')
3739 else if (mnem_p
- 3 == dot_p
3742 i
.disp_encoding
= disp_encoding_8bit
;
3743 else if (mnem_p
- 4 == dot_p
3747 i
.disp_encoding
= disp_encoding_32bit
;
3752 current_templates
= (const templates
*) hash_find (op_hash
, mnemonic
);
3755 if (!current_templates
)
3758 /* See if we can get a match by trimming off a suffix. */
3761 case WORD_MNEM_SUFFIX
:
3762 if (intel_syntax
&& (intel_float_operand (mnemonic
) & 2))
3763 i
.suffix
= SHORT_MNEM_SUFFIX
;
3765 case BYTE_MNEM_SUFFIX
:
3766 case QWORD_MNEM_SUFFIX
:
3767 i
.suffix
= mnem_p
[-1];
3769 current_templates
= (const templates
*) hash_find (op_hash
,
3772 case SHORT_MNEM_SUFFIX
:
3773 case LONG_MNEM_SUFFIX
:
3776 i
.suffix
= mnem_p
[-1];
3778 current_templates
= (const templates
*) hash_find (op_hash
,
3787 if (intel_float_operand (mnemonic
) == 1)
3788 i
.suffix
= SHORT_MNEM_SUFFIX
;
3790 i
.suffix
= LONG_MNEM_SUFFIX
;
3792 current_templates
= (const templates
*) hash_find (op_hash
,
3797 if (!current_templates
)
3799 as_bad (_("no such instruction: `%s'"), token_start
);
3804 if (current_templates
->start
->opcode_modifier
.jump
3805 || current_templates
->start
->opcode_modifier
.jumpbyte
)
3807 /* Check for a branch hint. We allow ",pt" and ",pn" for
3808 predict taken and predict not taken respectively.
3809 I'm not sure that branch hints actually do anything on loop
3810 and jcxz insns (JumpByte) for current Pentium4 chips. They
3811 may work in the future and it doesn't hurt to accept them
3813 if (l
[0] == ',' && l
[1] == 'p')
3817 if (!add_prefix (DS_PREFIX_OPCODE
))
3821 else if (l
[2] == 'n')
3823 if (!add_prefix (CS_PREFIX_OPCODE
))
3829 /* Any other comma loses. */
3832 as_bad (_("invalid character %s in mnemonic"),
3833 output_invalid (*l
));
3837 /* Check if instruction is supported on specified architecture. */
3839 for (t
= current_templates
->start
; t
< current_templates
->end
; ++t
)
3841 supported
|= cpu_flags_match (t
);
3842 if (supported
== CPU_FLAGS_PERFECT_MATCH
)
3846 if (!(supported
& CPU_FLAGS_64BIT_MATCH
))
3848 as_bad (flag_code
== CODE_64BIT
3849 ? _("`%s' is not supported in 64-bit mode")
3850 : _("`%s' is only supported in 64-bit mode"),
3851 current_templates
->start
->name
);
3854 if (supported
!= CPU_FLAGS_PERFECT_MATCH
)
3856 as_bad (_("`%s' is not supported on `%s%s'"),
3857 current_templates
->start
->name
,
3858 cpu_arch_name
? cpu_arch_name
: default_arch
,
3859 cpu_sub_arch_name
? cpu_sub_arch_name
: "");
3864 if (!cpu_arch_flags
.bitfield
.cpui386
3865 && (flag_code
!= CODE_16BIT
))
3867 as_warn (_("use .code16 to ensure correct addressing mode"));
3874 parse_operands (char *l
, const char *mnemonic
)
3878 /* 1 if operand is pending after ','. */
3879 unsigned int expecting_operand
= 0;
3881 /* Non-zero if operand parens not balanced. */
3882 unsigned int paren_not_balanced
;
3884 while (*l
!= END_OF_INSN
)
3886 /* Skip optional white space before operand. */
3887 if (is_space_char (*l
))
3889 if (!is_operand_char (*l
) && *l
!= END_OF_INSN
)
3891 as_bad (_("invalid character %s before operand %d"),
3892 output_invalid (*l
),
3896 token_start
= l
; /* after white space */
3897 paren_not_balanced
= 0;
3898 while (paren_not_balanced
|| *l
!= ',')
3900 if (*l
== END_OF_INSN
)
3902 if (paren_not_balanced
)
3905 as_bad (_("unbalanced parenthesis in operand %d."),
3908 as_bad (_("unbalanced brackets in operand %d."),
3913 break; /* we are done */
3915 else if (!is_operand_char (*l
) && !is_space_char (*l
))
3917 as_bad (_("invalid character %s in operand %d"),
3918 output_invalid (*l
),
3925 ++paren_not_balanced
;
3927 --paren_not_balanced
;
3932 ++paren_not_balanced
;
3934 --paren_not_balanced
;
3938 if (l
!= token_start
)
3939 { /* Yes, we've read in another operand. */
3940 unsigned int operand_ok
;
3941 this_operand
= i
.operands
++;
3942 i
.types
[this_operand
].bitfield
.unspecified
= 1;
3943 if (i
.operands
> MAX_OPERANDS
)
3945 as_bad (_("spurious operands; (%d operands/instruction max)"),
3949 /* Now parse operand adding info to 'i' as we go along. */
3950 END_STRING_AND_SAVE (l
);
3954 i386_intel_operand (token_start
,
3955 intel_float_operand (mnemonic
));
3957 operand_ok
= i386_att_operand (token_start
);
3959 RESTORE_END_STRING (l
);
3965 if (expecting_operand
)
3967 expecting_operand_after_comma
:
3968 as_bad (_("expecting operand after ','; got nothing"));
3973 as_bad (_("expecting operand before ','; got nothing"));
3978 /* Now *l must be either ',' or END_OF_INSN. */
3981 if (*++l
== END_OF_INSN
)
3983 /* Just skip it, if it's \n complain. */
3984 goto expecting_operand_after_comma
;
3986 expecting_operand
= 1;
3993 swap_2_operands (int xchg1
, int xchg2
)
3995 union i386_op temp_op
;
3996 i386_operand_type temp_type
;
3997 enum bfd_reloc_code_real temp_reloc
;
3999 temp_type
= i
.types
[xchg2
];
4000 i
.types
[xchg2
] = i
.types
[xchg1
];
4001 i
.types
[xchg1
] = temp_type
;
4002 temp_op
= i
.op
[xchg2
];
4003 i
.op
[xchg2
] = i
.op
[xchg1
];
4004 i
.op
[xchg1
] = temp_op
;
4005 temp_reloc
= i
.reloc
[xchg2
];
4006 i
.reloc
[xchg2
] = i
.reloc
[xchg1
];
4007 i
.reloc
[xchg1
] = temp_reloc
;
4011 if (i
.mask
->operand
== xchg1
)
4012 i
.mask
->operand
= xchg2
;
4013 else if (i
.mask
->operand
== xchg2
)
4014 i
.mask
->operand
= xchg1
;
4018 if (i
.broadcast
->operand
== xchg1
)
4019 i
.broadcast
->operand
= xchg2
;
4020 else if (i
.broadcast
->operand
== xchg2
)
4021 i
.broadcast
->operand
= xchg1
;
4025 if (i
.rounding
->operand
== xchg1
)
4026 i
.rounding
->operand
= xchg2
;
4027 else if (i
.rounding
->operand
== xchg2
)
4028 i
.rounding
->operand
= xchg1
;
4033 swap_operands (void)
4039 swap_2_operands (1, i
.operands
- 2);
4042 swap_2_operands (0, i
.operands
- 1);
4048 if (i
.mem_operands
== 2)
4050 const seg_entry
*temp_seg
;
4051 temp_seg
= i
.seg
[0];
4052 i
.seg
[0] = i
.seg
[1];
4053 i
.seg
[1] = temp_seg
;
4057 /* Try to ensure constant immediates are represented in the smallest
4062 char guess_suffix
= 0;
4066 guess_suffix
= i
.suffix
;
4067 else if (i
.reg_operands
)
4069 /* Figure out a suffix from the last register operand specified.
4070 We can't do this properly yet, ie. excluding InOutPortReg,
4071 but the following works for instructions with immediates.
4072 In any case, we can't set i.suffix yet. */
4073 for (op
= i
.operands
; --op
>= 0;)
4074 if (i
.types
[op
].bitfield
.reg8
)
4076 guess_suffix
= BYTE_MNEM_SUFFIX
;
4079 else if (i
.types
[op
].bitfield
.reg16
)
4081 guess_suffix
= WORD_MNEM_SUFFIX
;
4084 else if (i
.types
[op
].bitfield
.reg32
)
4086 guess_suffix
= LONG_MNEM_SUFFIX
;
4089 else if (i
.types
[op
].bitfield
.reg64
)
4091 guess_suffix
= QWORD_MNEM_SUFFIX
;
4095 else if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
4096 guess_suffix
= WORD_MNEM_SUFFIX
;
4098 for (op
= i
.operands
; --op
>= 0;)
4099 if (operand_type_check (i
.types
[op
], imm
))
4101 switch (i
.op
[op
].imms
->X_op
)
4104 /* If a suffix is given, this operand may be shortened. */
4105 switch (guess_suffix
)
4107 case LONG_MNEM_SUFFIX
:
4108 i
.types
[op
].bitfield
.imm32
= 1;
4109 i
.types
[op
].bitfield
.imm64
= 1;
4111 case WORD_MNEM_SUFFIX
:
4112 i
.types
[op
].bitfield
.imm16
= 1;
4113 i
.types
[op
].bitfield
.imm32
= 1;
4114 i
.types
[op
].bitfield
.imm32s
= 1;
4115 i
.types
[op
].bitfield
.imm64
= 1;
4117 case BYTE_MNEM_SUFFIX
:
4118 i
.types
[op
].bitfield
.imm8
= 1;
4119 i
.types
[op
].bitfield
.imm8s
= 1;
4120 i
.types
[op
].bitfield
.imm16
= 1;
4121 i
.types
[op
].bitfield
.imm32
= 1;
4122 i
.types
[op
].bitfield
.imm32s
= 1;
4123 i
.types
[op
].bitfield
.imm64
= 1;
4127 /* If this operand is at most 16 bits, convert it
4128 to a signed 16 bit number before trying to see
4129 whether it will fit in an even smaller size.
4130 This allows a 16-bit operand such as $0xffe0 to
4131 be recognised as within Imm8S range. */
4132 if ((i
.types
[op
].bitfield
.imm16
)
4133 && (i
.op
[op
].imms
->X_add_number
& ~(offsetT
) 0xffff) == 0)
4135 i
.op
[op
].imms
->X_add_number
=
4136 (((i
.op
[op
].imms
->X_add_number
& 0xffff) ^ 0x8000) - 0x8000);
4138 if ((i
.types
[op
].bitfield
.imm32
)
4139 && ((i
.op
[op
].imms
->X_add_number
& ~(((offsetT
) 2 << 31) - 1))
4142 i
.op
[op
].imms
->X_add_number
= ((i
.op
[op
].imms
->X_add_number
4143 ^ ((offsetT
) 1 << 31))
4144 - ((offsetT
) 1 << 31));
4147 = operand_type_or (i
.types
[op
],
4148 smallest_imm_type (i
.op
[op
].imms
->X_add_number
));
4150 /* We must avoid matching of Imm32 templates when 64bit
4151 only immediate is available. */
4152 if (guess_suffix
== QWORD_MNEM_SUFFIX
)
4153 i
.types
[op
].bitfield
.imm32
= 0;
4160 /* Symbols and expressions. */
4162 /* Convert symbolic operand to proper sizes for matching, but don't
4163 prevent matching a set of insns that only supports sizes other
4164 than those matching the insn suffix. */
4166 i386_operand_type mask
, allowed
;
4167 const insn_template
*t
;
4169 operand_type_set (&mask
, 0);
4170 operand_type_set (&allowed
, 0);
4172 for (t
= current_templates
->start
;
4173 t
< current_templates
->end
;
4175 allowed
= operand_type_or (allowed
,
4176 t
->operand_types
[op
]);
4177 switch (guess_suffix
)
4179 case QWORD_MNEM_SUFFIX
:
4180 mask
.bitfield
.imm64
= 1;
4181 mask
.bitfield
.imm32s
= 1;
4183 case LONG_MNEM_SUFFIX
:
4184 mask
.bitfield
.imm32
= 1;
4186 case WORD_MNEM_SUFFIX
:
4187 mask
.bitfield
.imm16
= 1;
4189 case BYTE_MNEM_SUFFIX
:
4190 mask
.bitfield
.imm8
= 1;
4195 allowed
= operand_type_and (mask
, allowed
);
4196 if (!operand_type_all_zero (&allowed
))
4197 i
.types
[op
] = operand_type_and (i
.types
[op
], mask
);
4204 /* Try to use the smallest displacement type too. */
4206 optimize_disp (void)
4210 for (op
= i
.operands
; --op
>= 0;)
4211 if (operand_type_check (i
.types
[op
], disp
))
4213 if (i
.op
[op
].disps
->X_op
== O_constant
)
4215 offsetT op_disp
= i
.op
[op
].disps
->X_add_number
;
4217 if (i
.types
[op
].bitfield
.disp16
4218 && (op_disp
& ~(offsetT
) 0xffff) == 0)
4220 /* If this operand is at most 16 bits, convert
4221 to a signed 16 bit number and don't use 64bit
4223 op_disp
= (((op_disp
& 0xffff) ^ 0x8000) - 0x8000);
4224 i
.types
[op
].bitfield
.disp64
= 0;
4226 if (i
.types
[op
].bitfield
.disp32
4227 && (op_disp
& ~(((offsetT
) 2 << 31) - 1)) == 0)
4229 /* If this operand is at most 32 bits, convert
4230 to a signed 32 bit number and don't use 64bit
4232 op_disp
&= (((offsetT
) 2 << 31) - 1);
4233 op_disp
= (op_disp
^ ((offsetT
) 1 << 31)) - ((addressT
) 1 << 31);
4234 i
.types
[op
].bitfield
.disp64
= 0;
4236 if (!op_disp
&& i
.types
[op
].bitfield
.baseindex
)
4238 i
.types
[op
].bitfield
.disp8
= 0;
4239 i
.types
[op
].bitfield
.disp16
= 0;
4240 i
.types
[op
].bitfield
.disp32
= 0;
4241 i
.types
[op
].bitfield
.disp32s
= 0;
4242 i
.types
[op
].bitfield
.disp64
= 0;
4246 else if (flag_code
== CODE_64BIT
)
4248 if (fits_in_signed_long (op_disp
))
4250 i
.types
[op
].bitfield
.disp64
= 0;
4251 i
.types
[op
].bitfield
.disp32s
= 1;
4253 if (i
.prefix
[ADDR_PREFIX
]
4254 && fits_in_unsigned_long (op_disp
))
4255 i
.types
[op
].bitfield
.disp32
= 1;
4257 if ((i
.types
[op
].bitfield
.disp32
4258 || i
.types
[op
].bitfield
.disp32s
4259 || i
.types
[op
].bitfield
.disp16
)
4260 && fits_in_signed_byte (op_disp
))
4261 i
.types
[op
].bitfield
.disp8
= 1;
4263 else if (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
4264 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
)
4266 fix_new_exp (frag_now
, frag_more (0) - frag_now
->fr_literal
, 0,
4267 i
.op
[op
].disps
, 0, i
.reloc
[op
]);
4268 i
.types
[op
].bitfield
.disp8
= 0;
4269 i
.types
[op
].bitfield
.disp16
= 0;
4270 i
.types
[op
].bitfield
.disp32
= 0;
4271 i
.types
[op
].bitfield
.disp32s
= 0;
4272 i
.types
[op
].bitfield
.disp64
= 0;
4275 /* We only support 64bit displacement on constants. */
4276 i
.types
[op
].bitfield
.disp64
= 0;
4280 /* Check if operands are valid for the instruction. */
4283 check_VecOperands (const insn_template
*t
)
4287 /* Without VSIB byte, we can't have a vector register for index. */
4288 if (!t
->opcode_modifier
.vecsib
4290 && (i
.index_reg
->reg_type
.bitfield
.regxmm
4291 || i
.index_reg
->reg_type
.bitfield
.regymm
4292 || i
.index_reg
->reg_type
.bitfield
.regzmm
))
4294 i
.error
= unsupported_vector_index_register
;
4298 /* Check if default mask is allowed. */
4299 if (t
->opcode_modifier
.nodefmask
4300 && (!i
.mask
|| i
.mask
->mask
->reg_num
== 0))
4302 i
.error
= no_default_mask
;
4306 /* For VSIB byte, we need a vector register for index, and all vector
4307 registers must be distinct. */
4308 if (t
->opcode_modifier
.vecsib
)
4311 || !((t
->opcode_modifier
.vecsib
== VecSIB128
4312 && i
.index_reg
->reg_type
.bitfield
.regxmm
)
4313 || (t
->opcode_modifier
.vecsib
== VecSIB256
4314 && i
.index_reg
->reg_type
.bitfield
.regymm
)
4315 || (t
->opcode_modifier
.vecsib
== VecSIB512
4316 && i
.index_reg
->reg_type
.bitfield
.regzmm
)))
4318 i
.error
= invalid_vsib_address
;
4322 gas_assert (i
.reg_operands
== 2 || i
.mask
);
4323 if (i
.reg_operands
== 2 && !i
.mask
)
4325 gas_assert (i
.types
[0].bitfield
.regxmm
4326 || i
.types
[0].bitfield
.regymm
);
4327 gas_assert (i
.types
[2].bitfield
.regxmm
4328 || i
.types
[2].bitfield
.regymm
);
4329 if (operand_check
== check_none
)
4331 if (register_number (i
.op
[0].regs
)
4332 != register_number (i
.index_reg
)
4333 && register_number (i
.op
[2].regs
)
4334 != register_number (i
.index_reg
)
4335 && register_number (i
.op
[0].regs
)
4336 != register_number (i
.op
[2].regs
))
4338 if (operand_check
== check_error
)
4340 i
.error
= invalid_vector_register_set
;
4343 as_warn (_("mask, index, and destination registers should be distinct"));
4345 else if (i
.reg_operands
== 1 && i
.mask
)
4347 if ((i
.types
[1].bitfield
.regymm
4348 || i
.types
[1].bitfield
.regzmm
)
4349 && (register_number (i
.op
[1].regs
)
4350 == register_number (i
.index_reg
)))
4352 if (operand_check
== check_error
)
4354 i
.error
= invalid_vector_register_set
;
4357 if (operand_check
!= check_none
)
4358 as_warn (_("index and destination registers should be distinct"));
4363 /* Check if broadcast is supported by the instruction and is applied
4364 to the memory operand. */
4367 int broadcasted_opnd_size
;
4369 /* Check if specified broadcast is supported in this instruction,
4370 and it's applied to memory operand of DWORD or QWORD type,
4371 depending on VecESize. */
4372 if (i
.broadcast
->type
!= t
->opcode_modifier
.broadcast
4373 || !i
.types
[i
.broadcast
->operand
].bitfield
.mem
4374 || (t
->opcode_modifier
.vecesize
== 0
4375 && !i
.types
[i
.broadcast
->operand
].bitfield
.dword
4376 && !i
.types
[i
.broadcast
->operand
].bitfield
.unspecified
)
4377 || (t
->opcode_modifier
.vecesize
== 1
4378 && !i
.types
[i
.broadcast
->operand
].bitfield
.qword
4379 && !i
.types
[i
.broadcast
->operand
].bitfield
.unspecified
))
4382 broadcasted_opnd_size
= t
->opcode_modifier
.vecesize
? 64 : 32;
4383 if (i
.broadcast
->type
== BROADCAST_1TO16
)
4384 broadcasted_opnd_size
<<= 4; /* Broadcast 1to16. */
4385 else if (i
.broadcast
->type
== BROADCAST_1TO8
)
4386 broadcasted_opnd_size
<<= 3; /* Broadcast 1to8. */
4387 else if (i
.broadcast
->type
== BROADCAST_1TO4
)
4388 broadcasted_opnd_size
<<= 2; /* Broadcast 1to4. */
4389 else if (i
.broadcast
->type
== BROADCAST_1TO2
)
4390 broadcasted_opnd_size
<<= 1; /* Broadcast 1to2. */
4394 if ((broadcasted_opnd_size
== 256
4395 && !t
->operand_types
[i
.broadcast
->operand
].bitfield
.ymmword
)
4396 || (broadcasted_opnd_size
== 512
4397 && !t
->operand_types
[i
.broadcast
->operand
].bitfield
.zmmword
))
4400 i
.error
= unsupported_broadcast
;
4404 /* If broadcast is supported in this instruction, we need to check if
4405 operand of one-element size isn't specified without broadcast. */
4406 else if (t
->opcode_modifier
.broadcast
&& i
.mem_operands
)
4408 /* Find memory operand. */
4409 for (op
= 0; op
< i
.operands
; op
++)
4410 if (operand_type_check (i
.types
[op
], anymem
))
4412 gas_assert (op
< i
.operands
);
4413 /* Check size of the memory operand. */
4414 if ((t
->opcode_modifier
.vecesize
== 0
4415 && i
.types
[op
].bitfield
.dword
)
4416 || (t
->opcode_modifier
.vecesize
== 1
4417 && i
.types
[op
].bitfield
.qword
))
4419 i
.error
= broadcast_needed
;
4424 /* Check if requested masking is supported. */
4426 && (!t
->opcode_modifier
.masking
4428 && t
->opcode_modifier
.masking
== MERGING_MASKING
)))
4430 i
.error
= unsupported_masking
;
4434 /* Check if masking is applied to dest operand. */
4435 if (i
.mask
&& (i
.mask
->operand
!= (int) (i
.operands
- 1)))
4437 i
.error
= mask_not_on_destination
;
4444 if ((i
.rounding
->type
!= saeonly
4445 && !t
->opcode_modifier
.staticrounding
)
4446 || (i
.rounding
->type
== saeonly
4447 && (t
->opcode_modifier
.staticrounding
4448 || !t
->opcode_modifier
.sae
)))
4450 i
.error
= unsupported_rc_sae
;
4453 /* If the instruction has several immediate operands and one of
4454 them is rounding, the rounding operand should be the last
4455 immediate operand. */
4456 if (i
.imm_operands
> 1
4457 && i
.rounding
->operand
!= (int) (i
.imm_operands
- 1))
4459 i
.error
= rc_sae_operand_not_last_imm
;
4464 /* Check vector Disp8 operand. */
4465 if (t
->opcode_modifier
.disp8memshift
)
4468 i
.memshift
= t
->opcode_modifier
.vecesize
? 3 : 2;
4470 i
.memshift
= t
->opcode_modifier
.disp8memshift
;
4472 for (op
= 0; op
< i
.operands
; op
++)
4473 if (operand_type_check (i
.types
[op
], disp
)
4474 && i
.op
[op
].disps
->X_op
== O_constant
)
4476 offsetT value
= i
.op
[op
].disps
->X_add_number
;
4477 int vec_disp8_ok
= fits_in_vec_disp8 (value
);
4478 if (t
->operand_types
[op
].bitfield
.vec_disp8
)
4481 i
.types
[op
].bitfield
.vec_disp8
= 1;
4484 /* Vector insn can only have Vec_Disp8/Disp32 in
4485 32/64bit modes, and Vec_Disp8/Disp16 in 16bit
4487 i
.types
[op
].bitfield
.disp8
= 0;
4488 if (flag_code
!= CODE_16BIT
)
4489 i
.types
[op
].bitfield
.disp16
= 0;
4492 else if (flag_code
!= CODE_16BIT
)
4494 /* One form of this instruction supports vector Disp8.
4495 Try vector Disp8 if we need to use Disp32. */
4496 if (vec_disp8_ok
&& !fits_in_signed_byte (value
))
4498 i
.error
= try_vector_disp8
;
4510 /* Check if operands are valid for the instruction. Update VEX
4514 VEX_check_operands (const insn_template
*t
)
4516 /* VREX is only valid with EVEX prefix. */
4517 if (i
.need_vrex
&& !t
->opcode_modifier
.evex
)
4519 i
.error
= invalid_register_operand
;
4523 if (!t
->opcode_modifier
.vex
)
4526 /* Only check VEX_Imm4, which must be the first operand. */
4527 if (t
->operand_types
[0].bitfield
.vec_imm4
)
4529 if (i
.op
[0].imms
->X_op
!= O_constant
4530 || !fits_in_imm4 (i
.op
[0].imms
->X_add_number
))
4536 /* Turn off Imm8 so that update_imm won't complain. */
4537 i
.types
[0] = vec_imm4
;
4543 static const insn_template
*
4544 match_template (void)
4546 /* Points to template once we've found it. */
4547 const insn_template
*t
;
4548 i386_operand_type overlap0
, overlap1
, overlap2
, overlap3
;
4549 i386_operand_type overlap4
;
4550 unsigned int found_reverse_match
;
4551 i386_opcode_modifier suffix_check
;
4552 i386_operand_type operand_types
[MAX_OPERANDS
];
4553 int addr_prefix_disp
;
4555 unsigned int found_cpu_match
;
4556 unsigned int check_register
;
4557 enum i386_error specific_error
= 0;
4559 #if MAX_OPERANDS != 5
4560 # error "MAX_OPERANDS must be 5."
4563 found_reverse_match
= 0;
4564 addr_prefix_disp
= -1;
4566 memset (&suffix_check
, 0, sizeof (suffix_check
));
4567 if (i
.suffix
== BYTE_MNEM_SUFFIX
)
4568 suffix_check
.no_bsuf
= 1;
4569 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
4570 suffix_check
.no_wsuf
= 1;
4571 else if (i
.suffix
== SHORT_MNEM_SUFFIX
)
4572 suffix_check
.no_ssuf
= 1;
4573 else if (i
.suffix
== LONG_MNEM_SUFFIX
)
4574 suffix_check
.no_lsuf
= 1;
4575 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
4576 suffix_check
.no_qsuf
= 1;
4577 else if (i
.suffix
== LONG_DOUBLE_MNEM_SUFFIX
)
4578 suffix_check
.no_ldsuf
= 1;
4580 /* Must have right number of operands. */
4581 i
.error
= number_of_operands_mismatch
;
4583 for (t
= current_templates
->start
; t
< current_templates
->end
; t
++)
4585 addr_prefix_disp
= -1;
4587 if (i
.operands
!= t
->operands
)
4590 /* Check processor support. */
4591 i
.error
= unsupported
;
4592 found_cpu_match
= (cpu_flags_match (t
)
4593 == CPU_FLAGS_PERFECT_MATCH
);
4594 if (!found_cpu_match
)
4597 /* Check old gcc support. */
4598 i
.error
= old_gcc_only
;
4599 if (!old_gcc
&& t
->opcode_modifier
.oldgcc
)
4602 /* Check AT&T mnemonic. */
4603 i
.error
= unsupported_with_intel_mnemonic
;
4604 if (intel_mnemonic
&& t
->opcode_modifier
.attmnemonic
)
4607 /* Check AT&T/Intel syntax. */
4608 i
.error
= unsupported_syntax
;
4609 if ((intel_syntax
&& t
->opcode_modifier
.attsyntax
)
4610 || (!intel_syntax
&& t
->opcode_modifier
.intelsyntax
))
4613 /* Check the suffix, except for some instructions in intel mode. */
4614 i
.error
= invalid_instruction_suffix
;
4615 if ((!intel_syntax
|| !t
->opcode_modifier
.ignoresize
)
4616 && ((t
->opcode_modifier
.no_bsuf
&& suffix_check
.no_bsuf
)
4617 || (t
->opcode_modifier
.no_wsuf
&& suffix_check
.no_wsuf
)
4618 || (t
->opcode_modifier
.no_lsuf
&& suffix_check
.no_lsuf
)
4619 || (t
->opcode_modifier
.no_ssuf
&& suffix_check
.no_ssuf
)
4620 || (t
->opcode_modifier
.no_qsuf
&& suffix_check
.no_qsuf
)
4621 || (t
->opcode_modifier
.no_ldsuf
&& suffix_check
.no_ldsuf
)))
4624 if (!operand_size_match (t
))
4627 for (j
= 0; j
< MAX_OPERANDS
; j
++)
4628 operand_types
[j
] = t
->operand_types
[j
];
4630 /* In general, don't allow 64-bit operands in 32-bit mode. */
4631 if (i
.suffix
== QWORD_MNEM_SUFFIX
4632 && flag_code
!= CODE_64BIT
4634 ? (!t
->opcode_modifier
.ignoresize
4635 && !intel_float_operand (t
->name
))
4636 : intel_float_operand (t
->name
) != 2)
4637 && ((!operand_types
[0].bitfield
.regmmx
4638 && !operand_types
[0].bitfield
.regxmm
4639 && !operand_types
[0].bitfield
.regymm
4640 && !operand_types
[0].bitfield
.regzmm
)
4641 || (!operand_types
[t
->operands
> 1].bitfield
.regmmx
4642 && operand_types
[t
->operands
> 1].bitfield
.regxmm
4643 && operand_types
[t
->operands
> 1].bitfield
.regymm
4644 && operand_types
[t
->operands
> 1].bitfield
.regzmm
))
4645 && (t
->base_opcode
!= 0x0fc7
4646 || t
->extension_opcode
!= 1 /* cmpxchg8b */))
4649 /* In general, don't allow 32-bit operands on pre-386. */
4650 else if (i
.suffix
== LONG_MNEM_SUFFIX
4651 && !cpu_arch_flags
.bitfield
.cpui386
4653 ? (!t
->opcode_modifier
.ignoresize
4654 && !intel_float_operand (t
->name
))
4655 : intel_float_operand (t
->name
) != 2)
4656 && ((!operand_types
[0].bitfield
.regmmx
4657 && !operand_types
[0].bitfield
.regxmm
)
4658 || (!operand_types
[t
->operands
> 1].bitfield
.regmmx
4659 && operand_types
[t
->operands
> 1].bitfield
.regxmm
)))
4662 /* Do not verify operands when there are none. */
4666 /* We've found a match; break out of loop. */
4670 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
4671 into Disp32/Disp16/Disp32 operand. */
4672 if (i
.prefix
[ADDR_PREFIX
] != 0)
4674 /* There should be only one Disp operand. */
4678 for (j
= 0; j
< MAX_OPERANDS
; j
++)
4680 if (operand_types
[j
].bitfield
.disp16
)
4682 addr_prefix_disp
= j
;
4683 operand_types
[j
].bitfield
.disp32
= 1;
4684 operand_types
[j
].bitfield
.disp16
= 0;
4690 for (j
= 0; j
< MAX_OPERANDS
; j
++)
4692 if (operand_types
[j
].bitfield
.disp32
)
4694 addr_prefix_disp
= j
;
4695 operand_types
[j
].bitfield
.disp32
= 0;
4696 operand_types
[j
].bitfield
.disp16
= 1;
4702 for (j
= 0; j
< MAX_OPERANDS
; j
++)
4704 if (operand_types
[j
].bitfield
.disp64
)
4706 addr_prefix_disp
= j
;
4707 operand_types
[j
].bitfield
.disp64
= 0;
4708 operand_types
[j
].bitfield
.disp32
= 1;
4716 /* We check register size if needed. */
4717 check_register
= t
->opcode_modifier
.checkregsize
;
4718 overlap0
= operand_type_and (i
.types
[0], operand_types
[0]);
4719 switch (t
->operands
)
4722 if (!operand_type_match (overlap0
, i
.types
[0]))
4726 /* xchg %eax, %eax is a special case. It is an aliase for nop
4727 only in 32bit mode and we can use opcode 0x90. In 64bit
4728 mode, we can't use 0x90 for xchg %eax, %eax since it should
4729 zero-extend %eax to %rax. */
4730 if (flag_code
== CODE_64BIT
4731 && t
->base_opcode
== 0x90
4732 && operand_type_equal (&i
.types
[0], &acc32
)
4733 && operand_type_equal (&i
.types
[1], &acc32
))
4737 /* If we swap operand in encoding, we either match
4738 the next one or reverse direction of operands. */
4739 if (t
->opcode_modifier
.s
)
4741 else if (t
->opcode_modifier
.d
)
4746 /* If we swap operand in encoding, we match the next one. */
4747 if (i
.swap_operand
&& t
->opcode_modifier
.s
)
4751 overlap1
= operand_type_and (i
.types
[1], operand_types
[1]);
4752 if (!operand_type_match (overlap0
, i
.types
[0])
4753 || !operand_type_match (overlap1
, i
.types
[1])
4755 && !operand_type_register_match (overlap0
, i
.types
[0],
4757 overlap1
, i
.types
[1],
4760 /* Check if other direction is valid ... */
4761 if (!t
->opcode_modifier
.d
&& !t
->opcode_modifier
.floatd
)
4765 /* Try reversing direction of operands. */
4766 overlap0
= operand_type_and (i
.types
[0], operand_types
[1]);
4767 overlap1
= operand_type_and (i
.types
[1], operand_types
[0]);
4768 if (!operand_type_match (overlap0
, i
.types
[0])
4769 || !operand_type_match (overlap1
, i
.types
[1])
4771 && !operand_type_register_match (overlap0
,
4778 /* Does not match either direction. */
4781 /* found_reverse_match holds which of D or FloatDR
4783 if (t
->opcode_modifier
.d
)
4784 found_reverse_match
= Opcode_D
;
4785 else if (t
->opcode_modifier
.floatd
)
4786 found_reverse_match
= Opcode_FloatD
;
4788 found_reverse_match
= 0;
4789 if (t
->opcode_modifier
.floatr
)
4790 found_reverse_match
|= Opcode_FloatR
;
4794 /* Found a forward 2 operand match here. */
4795 switch (t
->operands
)
4798 overlap4
= operand_type_and (i
.types
[4],
4801 overlap3
= operand_type_and (i
.types
[3],
4804 overlap2
= operand_type_and (i
.types
[2],
4809 switch (t
->operands
)
4812 if (!operand_type_match (overlap4
, i
.types
[4])
4813 || !operand_type_register_match (overlap3
,
4821 if (!operand_type_match (overlap3
, i
.types
[3])
4823 && !operand_type_register_match (overlap2
,
4831 /* Here we make use of the fact that there are no
4832 reverse match 3 operand instructions, and all 3
4833 operand instructions only need to be checked for
4834 register consistency between operands 2 and 3. */
4835 if (!operand_type_match (overlap2
, i
.types
[2])
4837 && !operand_type_register_match (overlap1
,
4847 /* Found either forward/reverse 2, 3 or 4 operand match here:
4848 slip through to break. */
4850 if (!found_cpu_match
)
4852 found_reverse_match
= 0;
4856 /* Check if vector and VEX operands are valid. */
4857 if (check_VecOperands (t
) || VEX_check_operands (t
))
4859 specific_error
= i
.error
;
4863 /* We've found a match; break out of loop. */
4867 if (t
== current_templates
->end
)
4869 /* We found no match. */
4870 const char *err_msg
;
4871 switch (specific_error
? specific_error
: i
.error
)
4875 case operand_size_mismatch
:
4876 err_msg
= _("operand size mismatch");
4878 case operand_type_mismatch
:
4879 err_msg
= _("operand type mismatch");
4881 case register_type_mismatch
:
4882 err_msg
= _("register type mismatch");
4884 case number_of_operands_mismatch
:
4885 err_msg
= _("number of operands mismatch");
4887 case invalid_instruction_suffix
:
4888 err_msg
= _("invalid instruction suffix");
4891 err_msg
= _("constant doesn't fit in 4 bits");
4894 err_msg
= _("only supported with old gcc");
4896 case unsupported_with_intel_mnemonic
:
4897 err_msg
= _("unsupported with Intel mnemonic");
4899 case unsupported_syntax
:
4900 err_msg
= _("unsupported syntax");
4903 as_bad (_("unsupported instruction `%s'"),
4904 current_templates
->start
->name
);
4906 case invalid_vsib_address
:
4907 err_msg
= _("invalid VSIB address");
4909 case invalid_vector_register_set
:
4910 err_msg
= _("mask, index, and destination registers must be distinct");
4912 case unsupported_vector_index_register
:
4913 err_msg
= _("unsupported vector index register");
4915 case unsupported_broadcast
:
4916 err_msg
= _("unsupported broadcast");
4918 case broadcast_not_on_src_operand
:
4919 err_msg
= _("broadcast not on source memory operand");
4921 case broadcast_needed
:
4922 err_msg
= _("broadcast is needed for operand of such type");
4924 case unsupported_masking
:
4925 err_msg
= _("unsupported masking");
4927 case mask_not_on_destination
:
4928 err_msg
= _("mask not on destination operand");
4930 case no_default_mask
:
4931 err_msg
= _("default mask isn't allowed");
4933 case unsupported_rc_sae
:
4934 err_msg
= _("unsupported static rounding/sae");
4936 case rc_sae_operand_not_last_imm
:
4938 err_msg
= _("RC/SAE operand must precede immediate operands");
4940 err_msg
= _("RC/SAE operand must follow immediate operands");
4942 case invalid_register_operand
:
4943 err_msg
= _("invalid register operand");
4946 as_bad (_("%s for `%s'"), err_msg
,
4947 current_templates
->start
->name
);
4951 if (!quiet_warnings
)
4954 && (i
.types
[0].bitfield
.jumpabsolute
4955 != operand_types
[0].bitfield
.jumpabsolute
))
4957 as_warn (_("indirect %s without `*'"), t
->name
);
4960 if (t
->opcode_modifier
.isprefix
4961 && t
->opcode_modifier
.ignoresize
)
4963 /* Warn them that a data or address size prefix doesn't
4964 affect assembly of the next line of code. */
4965 as_warn (_("stand-alone `%s' prefix"), t
->name
);
4969 /* Copy the template we found. */
4972 if (addr_prefix_disp
!= -1)
4973 i
.tm
.operand_types
[addr_prefix_disp
]
4974 = operand_types
[addr_prefix_disp
];
4976 if (found_reverse_match
)
4978 /* If we found a reverse match we must alter the opcode
4979 direction bit. found_reverse_match holds bits to change
4980 (different for int & float insns). */
4982 i
.tm
.base_opcode
^= found_reverse_match
;
4984 i
.tm
.operand_types
[0] = operand_types
[1];
4985 i
.tm
.operand_types
[1] = operand_types
[0];
4994 int mem_op
= operand_type_check (i
.types
[0], anymem
) ? 0 : 1;
4995 if (i
.tm
.operand_types
[mem_op
].bitfield
.esseg
)
4997 if (i
.seg
[0] != NULL
&& i
.seg
[0] != &es
)
4999 as_bad (_("`%s' operand %d must use `%ses' segment"),
5005 /* There's only ever one segment override allowed per instruction.
5006 This instruction possibly has a legal segment override on the
5007 second operand, so copy the segment to where non-string
5008 instructions store it, allowing common code. */
5009 i
.seg
[0] = i
.seg
[1];
5011 else if (i
.tm
.operand_types
[mem_op
+ 1].bitfield
.esseg
)
5013 if (i
.seg
[1] != NULL
&& i
.seg
[1] != &es
)
5015 as_bad (_("`%s' operand %d must use `%ses' segment"),
5026 process_suffix (void)
5028 /* If matched instruction specifies an explicit instruction mnemonic
5030 if (i
.tm
.opcode_modifier
.size16
)
5031 i
.suffix
= WORD_MNEM_SUFFIX
;
5032 else if (i
.tm
.opcode_modifier
.size32
)
5033 i
.suffix
= LONG_MNEM_SUFFIX
;
5034 else if (i
.tm
.opcode_modifier
.size64
)
5035 i
.suffix
= QWORD_MNEM_SUFFIX
;
5036 else if (i
.reg_operands
)
5038 /* If there's no instruction mnemonic suffix we try to invent one
5039 based on register operands. */
5042 /* We take i.suffix from the last register operand specified,
5043 Destination register type is more significant than source
5044 register type. crc32 in SSE4.2 prefers source register
5046 if (i
.tm
.base_opcode
== 0xf20f38f1)
5048 if (i
.types
[0].bitfield
.reg16
)
5049 i
.suffix
= WORD_MNEM_SUFFIX
;
5050 else if (i
.types
[0].bitfield
.reg32
)
5051 i
.suffix
= LONG_MNEM_SUFFIX
;
5052 else if (i
.types
[0].bitfield
.reg64
)
5053 i
.suffix
= QWORD_MNEM_SUFFIX
;
5055 else if (i
.tm
.base_opcode
== 0xf20f38f0)
5057 if (i
.types
[0].bitfield
.reg8
)
5058 i
.suffix
= BYTE_MNEM_SUFFIX
;
5065 if (i
.tm
.base_opcode
== 0xf20f38f1
5066 || i
.tm
.base_opcode
== 0xf20f38f0)
5068 /* We have to know the operand size for crc32. */
5069 as_bad (_("ambiguous memory operand size for `%s`"),
5074 for (op
= i
.operands
; --op
>= 0;)
5075 if (!i
.tm
.operand_types
[op
].bitfield
.inoutportreg
)
5077 if (i
.types
[op
].bitfield
.reg8
)
5079 i
.suffix
= BYTE_MNEM_SUFFIX
;
5082 else if (i
.types
[op
].bitfield
.reg16
)
5084 i
.suffix
= WORD_MNEM_SUFFIX
;
5087 else if (i
.types
[op
].bitfield
.reg32
)
5089 i
.suffix
= LONG_MNEM_SUFFIX
;
5092 else if (i
.types
[op
].bitfield
.reg64
)
5094 i
.suffix
= QWORD_MNEM_SUFFIX
;
5100 else if (i
.suffix
== BYTE_MNEM_SUFFIX
)
5103 && i
.tm
.opcode_modifier
.ignoresize
5104 && i
.tm
.opcode_modifier
.no_bsuf
)
5106 else if (!check_byte_reg ())
5109 else if (i
.suffix
== LONG_MNEM_SUFFIX
)
5112 && i
.tm
.opcode_modifier
.ignoresize
5113 && i
.tm
.opcode_modifier
.no_lsuf
)
5115 else if (!check_long_reg ())
5118 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
5121 && i
.tm
.opcode_modifier
.ignoresize
5122 && i
.tm
.opcode_modifier
.no_qsuf
)
5124 else if (!check_qword_reg ())
5127 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
5130 && i
.tm
.opcode_modifier
.ignoresize
5131 && i
.tm
.opcode_modifier
.no_wsuf
)
5133 else if (!check_word_reg ())
5136 else if (i
.suffix
== XMMWORD_MNEM_SUFFIX
5137 || i
.suffix
== YMMWORD_MNEM_SUFFIX
5138 || i
.suffix
== ZMMWORD_MNEM_SUFFIX
)
5140 /* Skip if the instruction has x/y/z suffix. match_template
5141 should check if it is a valid suffix. */
5143 else if (intel_syntax
&& i
.tm
.opcode_modifier
.ignoresize
)
5144 /* Do nothing if the instruction is going to ignore the prefix. */
5149 else if (i
.tm
.opcode_modifier
.defaultsize
5151 /* exclude fldenv/frstor/fsave/fstenv */
5152 && i
.tm
.opcode_modifier
.no_ssuf
)
5154 i
.suffix
= stackop_size
;
5156 else if (intel_syntax
5158 && (i
.tm
.operand_types
[0].bitfield
.jumpabsolute
5159 || i
.tm
.opcode_modifier
.jumpbyte
5160 || i
.tm
.opcode_modifier
.jumpintersegment
5161 || (i
.tm
.base_opcode
== 0x0f01 /* [ls][gi]dt */
5162 && i
.tm
.extension_opcode
<= 3)))
5167 if (!i
.tm
.opcode_modifier
.no_qsuf
)
5169 i
.suffix
= QWORD_MNEM_SUFFIX
;
5173 if (!i
.tm
.opcode_modifier
.no_lsuf
)
5174 i
.suffix
= LONG_MNEM_SUFFIX
;
5177 if (!i
.tm
.opcode_modifier
.no_wsuf
)
5178 i
.suffix
= WORD_MNEM_SUFFIX
;
5187 if (i
.tm
.opcode_modifier
.w
)
5189 as_bad (_("no instruction mnemonic suffix given and "
5190 "no register operands; can't size instruction"));
5196 unsigned int suffixes
;
5198 suffixes
= !i
.tm
.opcode_modifier
.no_bsuf
;
5199 if (!i
.tm
.opcode_modifier
.no_wsuf
)
5201 if (!i
.tm
.opcode_modifier
.no_lsuf
)
5203 if (!i
.tm
.opcode_modifier
.no_ldsuf
)
5205 if (!i
.tm
.opcode_modifier
.no_ssuf
)
5207 if (!i
.tm
.opcode_modifier
.no_qsuf
)
5210 /* There are more than suffix matches. */
5211 if (i
.tm
.opcode_modifier
.w
5212 || ((suffixes
& (suffixes
- 1))
5213 && !i
.tm
.opcode_modifier
.defaultsize
5214 && !i
.tm
.opcode_modifier
.ignoresize
))
5216 as_bad (_("ambiguous operand size for `%s'"), i
.tm
.name
);
5222 /* Change the opcode based on the operand size given by i.suffix;
5223 We don't need to change things for byte insns. */
5226 && i
.suffix
!= BYTE_MNEM_SUFFIX
5227 && i
.suffix
!= XMMWORD_MNEM_SUFFIX
5228 && i
.suffix
!= YMMWORD_MNEM_SUFFIX
5229 && i
.suffix
!= ZMMWORD_MNEM_SUFFIX
)
5231 /* It's not a byte, select word/dword operation. */
5232 if (i
.tm
.opcode_modifier
.w
)
5234 if (i
.tm
.opcode_modifier
.shortform
)
5235 i
.tm
.base_opcode
|= 8;
5237 i
.tm
.base_opcode
|= 1;
5240 /* Now select between word & dword operations via the operand
5241 size prefix, except for instructions that will ignore this
5243 if (i
.tm
.opcode_modifier
.addrprefixop0
)
5245 /* The address size override prefix changes the size of the
5247 if ((flag_code
== CODE_32BIT
5248 && i
.op
->regs
[0].reg_type
.bitfield
.reg16
)
5249 || (flag_code
!= CODE_32BIT
5250 && i
.op
->regs
[0].reg_type
.bitfield
.reg32
))
5251 if (!add_prefix (ADDR_PREFIX_OPCODE
))
5254 else if (i
.suffix
!= QWORD_MNEM_SUFFIX
5255 && i
.suffix
!= LONG_DOUBLE_MNEM_SUFFIX
5256 && !i
.tm
.opcode_modifier
.ignoresize
5257 && !i
.tm
.opcode_modifier
.floatmf
5258 && ((i
.suffix
== LONG_MNEM_SUFFIX
) == (flag_code
== CODE_16BIT
)
5259 || (flag_code
== CODE_64BIT
5260 && i
.tm
.opcode_modifier
.jumpbyte
)))
5262 unsigned int prefix
= DATA_PREFIX_OPCODE
;
5264 if (i
.tm
.opcode_modifier
.jumpbyte
) /* jcxz, loop */
5265 prefix
= ADDR_PREFIX_OPCODE
;
5267 if (!add_prefix (prefix
))
5271 /* Set mode64 for an operand. */
5272 if (i
.suffix
== QWORD_MNEM_SUFFIX
5273 && flag_code
== CODE_64BIT
5274 && !i
.tm
.opcode_modifier
.norex64
)
5276 /* Special case for xchg %rax,%rax. It is NOP and doesn't
5277 need rex64. cmpxchg8b is also a special case. */
5278 if (! (i
.operands
== 2
5279 && i
.tm
.base_opcode
== 0x90
5280 && i
.tm
.extension_opcode
== None
5281 && operand_type_equal (&i
.types
[0], &acc64
)
5282 && operand_type_equal (&i
.types
[1], &acc64
))
5283 && ! (i
.operands
== 1
5284 && i
.tm
.base_opcode
== 0xfc7
5285 && i
.tm
.extension_opcode
== 1
5286 && !operand_type_check (i
.types
[0], reg
)
5287 && operand_type_check (i
.types
[0], anymem
)))
5291 /* Size floating point instruction. */
5292 if (i
.suffix
== LONG_MNEM_SUFFIX
)
5293 if (i
.tm
.opcode_modifier
.floatmf
)
5294 i
.tm
.base_opcode
^= 4;
5301 check_byte_reg (void)
5305 for (op
= i
.operands
; --op
>= 0;)
5307 /* If this is an eight bit register, it's OK. If it's the 16 or
5308 32 bit version of an eight bit register, we will just use the
5309 low portion, and that's OK too. */
5310 if (i
.types
[op
].bitfield
.reg8
)
5313 /* I/O port address operands are OK too. */
5314 if (i
.tm
.operand_types
[op
].bitfield
.inoutportreg
)
5317 /* crc32 doesn't generate this warning. */
5318 if (i
.tm
.base_opcode
== 0xf20f38f0)
5321 if ((i
.types
[op
].bitfield
.reg16
5322 || i
.types
[op
].bitfield
.reg32
5323 || i
.types
[op
].bitfield
.reg64
)
5324 && i
.op
[op
].regs
->reg_num
< 4
5325 /* Prohibit these changes in 64bit mode, since the lowering
5326 would be more complicated. */
5327 && flag_code
!= CODE_64BIT
)
5329 #if REGISTER_WARNINGS
5330 if (!quiet_warnings
)
5331 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5333 (i
.op
[op
].regs
+ (i
.types
[op
].bitfield
.reg16
5334 ? REGNAM_AL
- REGNAM_AX
5335 : REGNAM_AL
- REGNAM_EAX
))->reg_name
,
5337 i
.op
[op
].regs
->reg_name
,
5342 /* Any other register is bad. */
5343 if (i
.types
[op
].bitfield
.reg16
5344 || i
.types
[op
].bitfield
.reg32
5345 || i
.types
[op
].bitfield
.reg64
5346 || i
.types
[op
].bitfield
.regmmx
5347 || i
.types
[op
].bitfield
.regxmm
5348 || i
.types
[op
].bitfield
.regymm
5349 || i
.types
[op
].bitfield
.regzmm
5350 || i
.types
[op
].bitfield
.sreg2
5351 || i
.types
[op
].bitfield
.sreg3
5352 || i
.types
[op
].bitfield
.control
5353 || i
.types
[op
].bitfield
.debug
5354 || i
.types
[op
].bitfield
.test
5355 || i
.types
[op
].bitfield
.floatreg
5356 || i
.types
[op
].bitfield
.floatacc
)
5358 as_bad (_("`%s%s' not allowed with `%s%c'"),
5360 i
.op
[op
].regs
->reg_name
,
5370 check_long_reg (void)
5374 for (op
= i
.operands
; --op
>= 0;)
5375 /* Reject eight bit registers, except where the template requires
5376 them. (eg. movzb) */
5377 if (i
.types
[op
].bitfield
.reg8
5378 && (i
.tm
.operand_types
[op
].bitfield
.reg16
5379 || i
.tm
.operand_types
[op
].bitfield
.reg32
5380 || i
.tm
.operand_types
[op
].bitfield
.acc
))
5382 as_bad (_("`%s%s' not allowed with `%s%c'"),
5384 i
.op
[op
].regs
->reg_name
,
5389 /* Warn if the e prefix on a general reg is missing. */
5390 else if ((!quiet_warnings
|| flag_code
== CODE_64BIT
)
5391 && i
.types
[op
].bitfield
.reg16
5392 && (i
.tm
.operand_types
[op
].bitfield
.reg32
5393 || i
.tm
.operand_types
[op
].bitfield
.acc
))
5395 /* Prohibit these changes in the 64bit mode, since the
5396 lowering is more complicated. */
5397 if (flag_code
== CODE_64BIT
)
5399 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5400 register_prefix
, i
.op
[op
].regs
->reg_name
,
5404 #if REGISTER_WARNINGS
5405 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5407 (i
.op
[op
].regs
+ REGNAM_EAX
- REGNAM_AX
)->reg_name
,
5408 register_prefix
, i
.op
[op
].regs
->reg_name
, i
.suffix
);
5411 /* Warn if the r prefix on a general reg is present. */
5412 else if (i
.types
[op
].bitfield
.reg64
5413 && (i
.tm
.operand_types
[op
].bitfield
.reg32
5414 || i
.tm
.operand_types
[op
].bitfield
.acc
))
5417 && i
.tm
.opcode_modifier
.toqword
5418 && !i
.types
[0].bitfield
.regxmm
)
5420 /* Convert to QWORD. We want REX byte. */
5421 i
.suffix
= QWORD_MNEM_SUFFIX
;
5425 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5426 register_prefix
, i
.op
[op
].regs
->reg_name
,
5435 check_qword_reg (void)
5439 for (op
= i
.operands
; --op
>= 0; )
5440 /* Reject eight bit registers, except where the template requires
5441 them. (eg. movzb) */
5442 if (i
.types
[op
].bitfield
.reg8
5443 && (i
.tm
.operand_types
[op
].bitfield
.reg16
5444 || i
.tm
.operand_types
[op
].bitfield
.reg32
5445 || i
.tm
.operand_types
[op
].bitfield
.acc
))
5447 as_bad (_("`%s%s' not allowed with `%s%c'"),
5449 i
.op
[op
].regs
->reg_name
,
5454 /* Warn if the r prefix on a general reg is missing. */
5455 else if ((i
.types
[op
].bitfield
.reg16
5456 || i
.types
[op
].bitfield
.reg32
)
5457 && (i
.tm
.operand_types
[op
].bitfield
.reg32
5458 || i
.tm
.operand_types
[op
].bitfield
.acc
))
5460 /* Prohibit these changes in the 64bit mode, since the
5461 lowering is more complicated. */
5463 && i
.tm
.opcode_modifier
.todword
5464 && !i
.types
[0].bitfield
.regxmm
)
5466 /* Convert to DWORD. We don't want REX byte. */
5467 i
.suffix
= LONG_MNEM_SUFFIX
;
5471 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5472 register_prefix
, i
.op
[op
].regs
->reg_name
,
5481 check_word_reg (void)
5484 for (op
= i
.operands
; --op
>= 0;)
5485 /* Reject eight bit registers, except where the template requires
5486 them. (eg. movzb) */
5487 if (i
.types
[op
].bitfield
.reg8
5488 && (i
.tm
.operand_types
[op
].bitfield
.reg16
5489 || i
.tm
.operand_types
[op
].bitfield
.reg32
5490 || i
.tm
.operand_types
[op
].bitfield
.acc
))
5492 as_bad (_("`%s%s' not allowed with `%s%c'"),
5494 i
.op
[op
].regs
->reg_name
,
5499 /* Warn if the e or r prefix on a general reg is present. */
5500 else if ((!quiet_warnings
|| flag_code
== CODE_64BIT
)
5501 && (i
.types
[op
].bitfield
.reg32
5502 || i
.types
[op
].bitfield
.reg64
)
5503 && (i
.tm
.operand_types
[op
].bitfield
.reg16
5504 || i
.tm
.operand_types
[op
].bitfield
.acc
))
5506 /* Prohibit these changes in the 64bit mode, since the
5507 lowering is more complicated. */
5508 if (flag_code
== CODE_64BIT
)
5510 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5511 register_prefix
, i
.op
[op
].regs
->reg_name
,
5515 #if REGISTER_WARNINGS
5516 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5518 (i
.op
[op
].regs
+ REGNAM_AX
- REGNAM_EAX
)->reg_name
,
5519 register_prefix
, i
.op
[op
].regs
->reg_name
, i
.suffix
);
5526 update_imm (unsigned int j
)
5528 i386_operand_type overlap
= i
.types
[j
];
5529 if ((overlap
.bitfield
.imm8
5530 || overlap
.bitfield
.imm8s
5531 || overlap
.bitfield
.imm16
5532 || overlap
.bitfield
.imm32
5533 || overlap
.bitfield
.imm32s
5534 || overlap
.bitfield
.imm64
)
5535 && !operand_type_equal (&overlap
, &imm8
)
5536 && !operand_type_equal (&overlap
, &imm8s
)
5537 && !operand_type_equal (&overlap
, &imm16
)
5538 && !operand_type_equal (&overlap
, &imm32
)
5539 && !operand_type_equal (&overlap
, &imm32s
)
5540 && !operand_type_equal (&overlap
, &imm64
))
5544 i386_operand_type temp
;
5546 operand_type_set (&temp
, 0);
5547 if (i
.suffix
== BYTE_MNEM_SUFFIX
)
5549 temp
.bitfield
.imm8
= overlap
.bitfield
.imm8
;
5550 temp
.bitfield
.imm8s
= overlap
.bitfield
.imm8s
;
5552 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
5553 temp
.bitfield
.imm16
= overlap
.bitfield
.imm16
;
5554 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
5556 temp
.bitfield
.imm64
= overlap
.bitfield
.imm64
;
5557 temp
.bitfield
.imm32s
= overlap
.bitfield
.imm32s
;
5560 temp
.bitfield
.imm32
= overlap
.bitfield
.imm32
;
5563 else if (operand_type_equal (&overlap
, &imm16_32_32s
)
5564 || operand_type_equal (&overlap
, &imm16_32
)
5565 || operand_type_equal (&overlap
, &imm16_32s
))
5567 if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
5572 if (!operand_type_equal (&overlap
, &imm8
)
5573 && !operand_type_equal (&overlap
, &imm8s
)
5574 && !operand_type_equal (&overlap
, &imm16
)
5575 && !operand_type_equal (&overlap
, &imm32
)
5576 && !operand_type_equal (&overlap
, &imm32s
)
5577 && !operand_type_equal (&overlap
, &imm64
))
5579 as_bad (_("no instruction mnemonic suffix given; "
5580 "can't determine immediate size"));
5584 i
.types
[j
] = overlap
;
5594 /* Update the first 2 immediate operands. */
5595 n
= i
.operands
> 2 ? 2 : i
.operands
;
5598 for (j
= 0; j
< n
; j
++)
5599 if (update_imm (j
) == 0)
5602 /* The 3rd operand can't be immediate operand. */
5603 gas_assert (operand_type_check (i
.types
[2], imm
) == 0);
5610 bad_implicit_operand (int xmm
)
5612 const char *ireg
= xmm
? "xmm0" : "ymm0";
5615 as_bad (_("the last operand of `%s' must be `%s%s'"),
5616 i
.tm
.name
, register_prefix
, ireg
);
5618 as_bad (_("the first operand of `%s' must be `%s%s'"),
5619 i
.tm
.name
, register_prefix
, ireg
);
5624 process_operands (void)
5626 /* Default segment register this instruction will use for memory
5627 accesses. 0 means unknown. This is only for optimizing out
5628 unnecessary segment overrides. */
5629 const seg_entry
*default_seg
= 0;
5631 if (i
.tm
.opcode_modifier
.sse2avx
&& i
.tm
.opcode_modifier
.vexvvvv
)
5633 unsigned int dupl
= i
.operands
;
5634 unsigned int dest
= dupl
- 1;
5637 /* The destination must be an xmm register. */
5638 gas_assert (i
.reg_operands
5639 && MAX_OPERANDS
> dupl
5640 && operand_type_equal (&i
.types
[dest
], ®xmm
));
5642 if (i
.tm
.opcode_modifier
.firstxmm0
)
5644 /* The first operand is implicit and must be xmm0. */
5645 gas_assert (operand_type_equal (&i
.types
[0], ®xmm
));
5646 if (register_number (i
.op
[0].regs
) != 0)
5647 return bad_implicit_operand (1);
5649 if (i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
)
5651 /* Keep xmm0 for instructions with VEX prefix and 3
5657 /* We remove the first xmm0 and keep the number of
5658 operands unchanged, which in fact duplicates the
5660 for (j
= 1; j
< i
.operands
; j
++)
5662 i
.op
[j
- 1] = i
.op
[j
];
5663 i
.types
[j
- 1] = i
.types
[j
];
5664 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
5668 else if (i
.tm
.opcode_modifier
.implicit1stxmm0
)
5670 gas_assert ((MAX_OPERANDS
- 1) > dupl
5671 && (i
.tm
.opcode_modifier
.vexsources
5674 /* Add the implicit xmm0 for instructions with VEX prefix
5676 for (j
= i
.operands
; j
> 0; j
--)
5678 i
.op
[j
] = i
.op
[j
- 1];
5679 i
.types
[j
] = i
.types
[j
- 1];
5680 i
.tm
.operand_types
[j
] = i
.tm
.operand_types
[j
- 1];
5683 = (const reg_entry
*) hash_find (reg_hash
, "xmm0");
5684 i
.types
[0] = regxmm
;
5685 i
.tm
.operand_types
[0] = regxmm
;
5688 i
.reg_operands
+= 2;
5693 i
.op
[dupl
] = i
.op
[dest
];
5694 i
.types
[dupl
] = i
.types
[dest
];
5695 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
5704 i
.op
[dupl
] = i
.op
[dest
];
5705 i
.types
[dupl
] = i
.types
[dest
];
5706 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
5709 if (i
.tm
.opcode_modifier
.immext
)
5712 else if (i
.tm
.opcode_modifier
.firstxmm0
)
5716 /* The first operand is implicit and must be xmm0/ymm0/zmm0. */
5717 gas_assert (i
.reg_operands
5718 && (operand_type_equal (&i
.types
[0], ®xmm
)
5719 || operand_type_equal (&i
.types
[0], ®ymm
)
5720 || operand_type_equal (&i
.types
[0], ®zmm
)));
5721 if (register_number (i
.op
[0].regs
) != 0)
5722 return bad_implicit_operand (i
.types
[0].bitfield
.regxmm
);
5724 for (j
= 1; j
< i
.operands
; j
++)
5726 i
.op
[j
- 1] = i
.op
[j
];
5727 i
.types
[j
- 1] = i
.types
[j
];
5729 /* We need to adjust fields in i.tm since they are used by
5730 build_modrm_byte. */
5731 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
5738 else if (i
.tm
.opcode_modifier
.regkludge
)
5740 /* The imul $imm, %reg instruction is converted into
5741 imul $imm, %reg, %reg, and the clr %reg instruction
5742 is converted into xor %reg, %reg. */
5744 unsigned int first_reg_op
;
5746 if (operand_type_check (i
.types
[0], reg
))
5750 /* Pretend we saw the extra register operand. */
5751 gas_assert (i
.reg_operands
== 1
5752 && i
.op
[first_reg_op
+ 1].regs
== 0);
5753 i
.op
[first_reg_op
+ 1].regs
= i
.op
[first_reg_op
].regs
;
5754 i
.types
[first_reg_op
+ 1] = i
.types
[first_reg_op
];
5759 if (i
.tm
.opcode_modifier
.shortform
)
5761 if (i
.types
[0].bitfield
.sreg2
5762 || i
.types
[0].bitfield
.sreg3
)
5764 if (i
.tm
.base_opcode
== POP_SEG_SHORT
5765 && i
.op
[0].regs
->reg_num
== 1)
5767 as_bad (_("you can't `pop %scs'"), register_prefix
);
5770 i
.tm
.base_opcode
|= (i
.op
[0].regs
->reg_num
<< 3);
5771 if ((i
.op
[0].regs
->reg_flags
& RegRex
) != 0)
5776 /* The register or float register operand is in operand
5780 if (i
.types
[0].bitfield
.floatreg
5781 || operand_type_check (i
.types
[0], reg
))
5785 /* Register goes in low 3 bits of opcode. */
5786 i
.tm
.base_opcode
|= i
.op
[op
].regs
->reg_num
;
5787 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
5789 if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
5791 /* Warn about some common errors, but press on regardless.
5792 The first case can be generated by gcc (<= 2.8.1). */
5793 if (i
.operands
== 2)
5795 /* Reversed arguments on faddp, fsubp, etc. */
5796 as_warn (_("translating to `%s %s%s,%s%s'"), i
.tm
.name
,
5797 register_prefix
, i
.op
[!intel_syntax
].regs
->reg_name
,
5798 register_prefix
, i
.op
[intel_syntax
].regs
->reg_name
);
5802 /* Extraneous `l' suffix on fp insn. */
5803 as_warn (_("translating to `%s %s%s'"), i
.tm
.name
,
5804 register_prefix
, i
.op
[0].regs
->reg_name
);
5809 else if (i
.tm
.opcode_modifier
.modrm
)
5811 /* The opcode is completed (modulo i.tm.extension_opcode which
5812 must be put into the modrm byte). Now, we make the modrm and
5813 index base bytes based on all the info we've collected. */
5815 default_seg
= build_modrm_byte ();
5817 else if ((i
.tm
.base_opcode
& ~0x3) == MOV_AX_DISP32
)
5821 else if (i
.tm
.opcode_modifier
.isstring
)
5823 /* For the string instructions that allow a segment override
5824 on one of their operands, the default segment is ds. */
5828 if (i
.tm
.base_opcode
== 0x8d /* lea */
5831 as_warn (_("segment override on `%s' is ineffectual"), i
.tm
.name
);
5833 /* If a segment was explicitly specified, and the specified segment
5834 is not the default, use an opcode prefix to select it. If we
5835 never figured out what the default segment is, then default_seg
5836 will be zero at this point, and the specified segment prefix will
5838 if ((i
.seg
[0]) && (i
.seg
[0] != default_seg
))
5840 if (!add_prefix (i
.seg
[0]->seg_prefix
))
5846 static const seg_entry
*
5847 build_modrm_byte (void)
5849 const seg_entry
*default_seg
= 0;
5850 unsigned int source
, dest
;
5853 /* The first operand of instructions with VEX prefix and 3 sources
5854 must be VEX_Imm4. */
5855 vex_3_sources
= i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
;
5858 unsigned int nds
, reg_slot
;
5861 if (i
.tm
.opcode_modifier
.veximmext
5862 && i
.tm
.opcode_modifier
.immext
)
5864 dest
= i
.operands
- 2;
5865 gas_assert (dest
== 3);
5868 dest
= i
.operands
- 1;
5871 /* There are 2 kinds of instructions:
5872 1. 5 operands: 4 register operands or 3 register operands
5873 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
5874 VexW0 or VexW1. The destination must be either XMM, YMM or
5876 2. 4 operands: 4 register operands or 3 register operands
5877 plus 1 memory operand, VexXDS, and VexImmExt */
5878 gas_assert ((i
.reg_operands
== 4
5879 || (i
.reg_operands
== 3 && i
.mem_operands
== 1))
5880 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
5881 && (i
.tm
.opcode_modifier
.veximmext
5882 || (i
.imm_operands
== 1
5883 && i
.types
[0].bitfield
.vec_imm4
5884 && (i
.tm
.opcode_modifier
.vexw
== VEXW0
5885 || i
.tm
.opcode_modifier
.vexw
== VEXW1
)
5886 && (operand_type_equal (&i
.tm
.operand_types
[dest
], ®xmm
)
5887 || operand_type_equal (&i
.tm
.operand_types
[dest
], ®ymm
)
5888 || operand_type_equal (&i
.tm
.operand_types
[dest
], ®zmm
)))));
5890 if (i
.imm_operands
== 0)
5892 /* When there is no immediate operand, generate an 8bit
5893 immediate operand to encode the first operand. */
5894 exp
= &im_expressions
[i
.imm_operands
++];
5895 i
.op
[i
.operands
].imms
= exp
;
5896 i
.types
[i
.operands
] = imm8
;
5898 /* If VexW1 is set, the first operand is the source and
5899 the second operand is encoded in the immediate operand. */
5900 if (i
.tm
.opcode_modifier
.vexw
== VEXW1
)
5911 /* FMA swaps REG and NDS. */
5912 if (i
.tm
.cpu_flags
.bitfield
.cpufma
)
5920 gas_assert (operand_type_equal (&i
.tm
.operand_types
[reg_slot
],
5922 || operand_type_equal (&i
.tm
.operand_types
[reg_slot
],
5924 || operand_type_equal (&i
.tm
.operand_types
[reg_slot
],
5926 exp
->X_op
= O_constant
;
5927 exp
->X_add_number
= register_number (i
.op
[reg_slot
].regs
) << 4;
5928 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
5932 unsigned int imm_slot
;
5934 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
5936 /* If VexW0 is set, the third operand is the source and
5937 the second operand is encoded in the immediate
5944 /* VexW1 is set, the second operand is the source and
5945 the third operand is encoded in the immediate
5951 if (i
.tm
.opcode_modifier
.immext
)
5953 /* When ImmExt is set, the immdiate byte is the last
5955 imm_slot
= i
.operands
- 1;
5963 /* Turn on Imm8 so that output_imm will generate it. */
5964 i
.types
[imm_slot
].bitfield
.imm8
= 1;
5967 gas_assert (operand_type_equal (&i
.tm
.operand_types
[reg_slot
],
5969 || operand_type_equal (&i
.tm
.operand_types
[reg_slot
],
5971 || operand_type_equal (&i
.tm
.operand_types
[reg_slot
],
5973 i
.op
[imm_slot
].imms
->X_add_number
5974 |= register_number (i
.op
[reg_slot
].regs
) << 4;
5975 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
5978 gas_assert (operand_type_equal (&i
.tm
.operand_types
[nds
], ®xmm
)
5979 || operand_type_equal (&i
.tm
.operand_types
[nds
],
5981 || operand_type_equal (&i
.tm
.operand_types
[nds
],
5983 i
.vex
.register_specifier
= i
.op
[nds
].regs
;
5988 /* i.reg_operands MUST be the number of real register operands;
5989 implicit registers do not count. If there are 3 register
5990 operands, it must be a instruction with VexNDS. For a
5991 instruction with VexNDD, the destination register is encoded
5992 in VEX prefix. If there are 4 register operands, it must be
5993 a instruction with VEX prefix and 3 sources. */
5994 if (i
.mem_operands
== 0
5995 && ((i
.reg_operands
== 2
5996 && i
.tm
.opcode_modifier
.vexvvvv
<= VEXXDS
)
5997 || (i
.reg_operands
== 3
5998 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
5999 || (i
.reg_operands
== 4 && vex_3_sources
)))
6007 /* When there are 3 operands, one of them may be immediate,
6008 which may be the first or the last operand. Otherwise,
6009 the first operand must be shift count register (cl) or it
6010 is an instruction with VexNDS. */
6011 gas_assert (i
.imm_operands
== 1
6012 || (i
.imm_operands
== 0
6013 && (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
6014 || i
.types
[0].bitfield
.shiftcount
)));
6015 if (operand_type_check (i
.types
[0], imm
)
6016 || i
.types
[0].bitfield
.shiftcount
)
6022 /* When there are 4 operands, the first two must be 8bit
6023 immediate operands. The source operand will be the 3rd
6026 For instructions with VexNDS, if the first operand
6027 an imm8, the source operand is the 2nd one. If the last
6028 operand is imm8, the source operand is the first one. */
6029 gas_assert ((i
.imm_operands
== 2
6030 && i
.types
[0].bitfield
.imm8
6031 && i
.types
[1].bitfield
.imm8
)
6032 || (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
6033 && i
.imm_operands
== 1
6034 && (i
.types
[0].bitfield
.imm8
6035 || i
.types
[i
.operands
- 1].bitfield
.imm8
6037 if (i
.imm_operands
== 2)
6041 if (i
.types
[0].bitfield
.imm8
)
6048 if (i
.tm
.opcode_modifier
.evex
)
6050 /* For EVEX instructions, when there are 5 operands, the
6051 first one must be immediate operand. If the second one
6052 is immediate operand, the source operand is the 3th
6053 one. If the last one is immediate operand, the source
6054 operand is the 2nd one. */
6055 gas_assert (i
.imm_operands
== 2
6056 && i
.tm
.opcode_modifier
.sae
6057 && operand_type_check (i
.types
[0], imm
));
6058 if (operand_type_check (i
.types
[1], imm
))
6060 else if (operand_type_check (i
.types
[4], imm
))
6074 /* RC/SAE operand could be between DEST and SRC. That happens
6075 when one operand is GPR and the other one is XMM/YMM/ZMM
6077 if (i
.rounding
&& i
.rounding
->operand
== (int) dest
)
6080 if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
6082 /* For instructions with VexNDS, the register-only source
6083 operand must be 32/64bit integer, XMM, YMM or ZMM
6084 register. It is encoded in VEX prefix. We need to
6085 clear RegMem bit before calling operand_type_equal. */
6087 i386_operand_type op
;
6090 /* Check register-only source operand when two source
6091 operands are swapped. */
6092 if (!i
.tm
.operand_types
[source
].bitfield
.baseindex
6093 && i
.tm
.operand_types
[dest
].bitfield
.baseindex
)
6101 op
= i
.tm
.operand_types
[vvvv
];
6102 op
.bitfield
.regmem
= 0;
6103 if ((dest
+ 1) >= i
.operands
6104 || (!op
.bitfield
.reg32
6105 && op
.bitfield
.reg64
6106 && !operand_type_equal (&op
, ®xmm
)
6107 && !operand_type_equal (&op
, ®ymm
)
6108 && !operand_type_equal (&op
, ®zmm
)
6109 && !operand_type_equal (&op
, ®mask
)))
6111 i
.vex
.register_specifier
= i
.op
[vvvv
].regs
;
6117 /* One of the register operands will be encoded in the i.tm.reg
6118 field, the other in the combined i.tm.mode and i.tm.regmem
6119 fields. If no form of this instruction supports a memory
6120 destination operand, then we assume the source operand may
6121 sometimes be a memory operand and so we need to store the
6122 destination in the i.rm.reg field. */
6123 if (!i
.tm
.operand_types
[dest
].bitfield
.regmem
6124 && operand_type_check (i
.tm
.operand_types
[dest
], anymem
) == 0)
6126 i
.rm
.reg
= i
.op
[dest
].regs
->reg_num
;
6127 i
.rm
.regmem
= i
.op
[source
].regs
->reg_num
;
6128 if ((i
.op
[dest
].regs
->reg_flags
& RegRex
) != 0)
6130 if ((i
.op
[dest
].regs
->reg_flags
& RegVRex
) != 0)
6132 if ((i
.op
[source
].regs
->reg_flags
& RegRex
) != 0)
6134 if ((i
.op
[source
].regs
->reg_flags
& RegVRex
) != 0)
6139 i
.rm
.reg
= i
.op
[source
].regs
->reg_num
;
6140 i
.rm
.regmem
= i
.op
[dest
].regs
->reg_num
;
6141 if ((i
.op
[dest
].regs
->reg_flags
& RegRex
) != 0)
6143 if ((i
.op
[dest
].regs
->reg_flags
& RegVRex
) != 0)
6145 if ((i
.op
[source
].regs
->reg_flags
& RegRex
) != 0)
6147 if ((i
.op
[source
].regs
->reg_flags
& RegVRex
) != 0)
6150 if (flag_code
!= CODE_64BIT
&& (i
.rex
& (REX_R
| REX_B
)))
6152 if (!i
.types
[0].bitfield
.control
6153 && !i
.types
[1].bitfield
.control
)
6155 i
.rex
&= ~(REX_R
| REX_B
);
6156 add_prefix (LOCK_PREFIX_OPCODE
);
6160 { /* If it's not 2 reg operands... */
6165 unsigned int fake_zero_displacement
= 0;
6168 for (op
= 0; op
< i
.operands
; op
++)
6169 if (operand_type_check (i
.types
[op
], anymem
))
6171 gas_assert (op
< i
.operands
);
6173 if (i
.tm
.opcode_modifier
.vecsib
)
6175 if (i
.index_reg
->reg_num
== RegEiz
6176 || i
.index_reg
->reg_num
== RegRiz
)
6179 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
6182 i
.sib
.base
= NO_BASE_REGISTER
;
6183 i
.sib
.scale
= i
.log2_scale_factor
;
6184 /* No Vec_Disp8 if there is no base. */
6185 i
.types
[op
].bitfield
.vec_disp8
= 0;
6186 i
.types
[op
].bitfield
.disp8
= 0;
6187 i
.types
[op
].bitfield
.disp16
= 0;
6188 i
.types
[op
].bitfield
.disp64
= 0;
6189 if (flag_code
!= CODE_64BIT
)
6191 /* Must be 32 bit */
6192 i
.types
[op
].bitfield
.disp32
= 1;
6193 i
.types
[op
].bitfield
.disp32s
= 0;
6197 i
.types
[op
].bitfield
.disp32
= 0;
6198 i
.types
[op
].bitfield
.disp32s
= 1;
6201 i
.sib
.index
= i
.index_reg
->reg_num
;
6202 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
6204 if ((i
.index_reg
->reg_flags
& RegVRex
) != 0)
6210 if (i
.base_reg
== 0)
6213 if (!i
.disp_operands
)
6215 fake_zero_displacement
= 1;
6216 /* Instructions with VSIB byte need 32bit displacement
6217 if there is no base register. */
6218 if (i
.tm
.opcode_modifier
.vecsib
)
6219 i
.types
[op
].bitfield
.disp32
= 1;
6221 if (i
.index_reg
== 0)
6223 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
6224 /* Operand is just <disp> */
6225 if (flag_code
== CODE_64BIT
)
6227 /* 64bit mode overwrites the 32bit absolute
6228 addressing by RIP relative addressing and
6229 absolute addressing is encoded by one of the
6230 redundant SIB forms. */
6231 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
6232 i
.sib
.base
= NO_BASE_REGISTER
;
6233 i
.sib
.index
= NO_INDEX_REGISTER
;
6234 i
.types
[op
] = ((i
.prefix
[ADDR_PREFIX
] == 0)
6235 ? disp32s
: disp32
);
6237 else if ((flag_code
== CODE_16BIT
)
6238 ^ (i
.prefix
[ADDR_PREFIX
] != 0))
6240 i
.rm
.regmem
= NO_BASE_REGISTER_16
;
6241 i
.types
[op
] = disp16
;
6245 i
.rm
.regmem
= NO_BASE_REGISTER
;
6246 i
.types
[op
] = disp32
;
6249 else if (!i
.tm
.opcode_modifier
.vecsib
)
6251 /* !i.base_reg && i.index_reg */
6252 if (i
.index_reg
->reg_num
== RegEiz
6253 || i
.index_reg
->reg_num
== RegRiz
)
6254 i
.sib
.index
= NO_INDEX_REGISTER
;
6256 i
.sib
.index
= i
.index_reg
->reg_num
;
6257 i
.sib
.base
= NO_BASE_REGISTER
;
6258 i
.sib
.scale
= i
.log2_scale_factor
;
6259 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
6260 /* No Vec_Disp8 if there is no base. */
6261 i
.types
[op
].bitfield
.vec_disp8
= 0;
6262 i
.types
[op
].bitfield
.disp8
= 0;
6263 i
.types
[op
].bitfield
.disp16
= 0;
6264 i
.types
[op
].bitfield
.disp64
= 0;
6265 if (flag_code
!= CODE_64BIT
)
6267 /* Must be 32 bit */
6268 i
.types
[op
].bitfield
.disp32
= 1;
6269 i
.types
[op
].bitfield
.disp32s
= 0;
6273 i
.types
[op
].bitfield
.disp32
= 0;
6274 i
.types
[op
].bitfield
.disp32s
= 1;
6276 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
6280 /* RIP addressing for 64bit mode. */
6281 else if (i
.base_reg
->reg_num
== RegRip
||
6282 i
.base_reg
->reg_num
== RegEip
)
6284 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
6285 i
.rm
.regmem
= NO_BASE_REGISTER
;
6286 i
.types
[op
].bitfield
.disp8
= 0;
6287 i
.types
[op
].bitfield
.disp16
= 0;
6288 i
.types
[op
].bitfield
.disp32
= 0;
6289 i
.types
[op
].bitfield
.disp32s
= 1;
6290 i
.types
[op
].bitfield
.disp64
= 0;
6291 i
.types
[op
].bitfield
.vec_disp8
= 0;
6292 i
.flags
[op
] |= Operand_PCrel
;
6293 if (! i
.disp_operands
)
6294 fake_zero_displacement
= 1;
6296 else if (i
.base_reg
->reg_type
.bitfield
.reg16
)
6298 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
6299 switch (i
.base_reg
->reg_num
)
6302 if (i
.index_reg
== 0)
6304 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
6305 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6;
6309 if (i
.index_reg
== 0)
6312 if (operand_type_check (i
.types
[op
], disp
) == 0)
6314 /* fake (%bp) into 0(%bp) */
6315 if (i
.tm
.operand_types
[op
].bitfield
.vec_disp8
)
6316 i
.types
[op
].bitfield
.vec_disp8
= 1;
6318 i
.types
[op
].bitfield
.disp8
= 1;
6319 fake_zero_displacement
= 1;
6322 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
6323 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6 + 2;
6325 default: /* (%si) -> 4 or (%di) -> 5 */
6326 i
.rm
.regmem
= i
.base_reg
->reg_num
- 6 + 4;
6328 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
6330 else /* i.base_reg and 32/64 bit mode */
6332 if (flag_code
== CODE_64BIT
6333 && operand_type_check (i
.types
[op
], disp
))
6335 i386_operand_type temp
;
6336 operand_type_set (&temp
, 0);
6337 temp
.bitfield
.disp8
= i
.types
[op
].bitfield
.disp8
;
6338 temp
.bitfield
.vec_disp8
6339 = i
.types
[op
].bitfield
.vec_disp8
;
6341 if (i
.prefix
[ADDR_PREFIX
] == 0)
6342 i
.types
[op
].bitfield
.disp32s
= 1;
6344 i
.types
[op
].bitfield
.disp32
= 1;
6347 if (!i
.tm
.opcode_modifier
.vecsib
)
6348 i
.rm
.regmem
= i
.base_reg
->reg_num
;
6349 if ((i
.base_reg
->reg_flags
& RegRex
) != 0)
6351 i
.sib
.base
= i
.base_reg
->reg_num
;
6352 /* x86-64 ignores REX prefix bit here to avoid decoder
6354 if (!(i
.base_reg
->reg_flags
& RegRex
)
6355 && (i
.base_reg
->reg_num
== EBP_REG_NUM
6356 || i
.base_reg
->reg_num
== ESP_REG_NUM
))
6358 if (i
.base_reg
->reg_num
== 5 && i
.disp_operands
== 0)
6360 fake_zero_displacement
= 1;
6361 if (i
.tm
.operand_types
[op
].bitfield
.vec_disp8
)
6362 i
.types
[op
].bitfield
.vec_disp8
= 1;
6364 i
.types
[op
].bitfield
.disp8
= 1;
6366 i
.sib
.scale
= i
.log2_scale_factor
;
6367 if (i
.index_reg
== 0)
6369 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
6370 /* <disp>(%esp) becomes two byte modrm with no index
6371 register. We've already stored the code for esp
6372 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
6373 Any base register besides %esp will not use the
6374 extra modrm byte. */
6375 i
.sib
.index
= NO_INDEX_REGISTER
;
6377 else if (!i
.tm
.opcode_modifier
.vecsib
)
6379 if (i
.index_reg
->reg_num
== RegEiz
6380 || i
.index_reg
->reg_num
== RegRiz
)
6381 i
.sib
.index
= NO_INDEX_REGISTER
;
6383 i
.sib
.index
= i
.index_reg
->reg_num
;
6384 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
6385 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
6390 && (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
6391 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
))
6395 if (!fake_zero_displacement
6399 fake_zero_displacement
= 1;
6400 if (i
.disp_encoding
== disp_encoding_8bit
)
6401 i
.types
[op
].bitfield
.disp8
= 1;
6403 i
.types
[op
].bitfield
.disp32
= 1;
6405 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
6409 if (fake_zero_displacement
)
6411 /* Fakes a zero displacement assuming that i.types[op]
6412 holds the correct displacement size. */
6415 gas_assert (i
.op
[op
].disps
== 0);
6416 exp
= &disp_expressions
[i
.disp_operands
++];
6417 i
.op
[op
].disps
= exp
;
6418 exp
->X_op
= O_constant
;
6419 exp
->X_add_number
= 0;
6420 exp
->X_add_symbol
= (symbolS
*) 0;
6421 exp
->X_op_symbol
= (symbolS
*) 0;
6429 if (i
.tm
.opcode_modifier
.vexsources
== XOP2SOURCES
)
6431 if (operand_type_check (i
.types
[0], imm
))
6432 i
.vex
.register_specifier
= NULL
;
6435 /* VEX.vvvv encodes one of the sources when the first
6436 operand is not an immediate. */
6437 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
6438 i
.vex
.register_specifier
= i
.op
[0].regs
;
6440 i
.vex
.register_specifier
= i
.op
[1].regs
;
6443 /* Destination is a XMM register encoded in the ModRM.reg
6445 i
.rm
.reg
= i
.op
[2].regs
->reg_num
;
6446 if ((i
.op
[2].regs
->reg_flags
& RegRex
) != 0)
6449 /* ModRM.rm and VEX.B encodes the other source. */
6450 if (!i
.mem_operands
)
6454 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
6455 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
6457 i
.rm
.regmem
= i
.op
[0].regs
->reg_num
;
6459 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
6463 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXLWP
)
6465 i
.vex
.register_specifier
= i
.op
[2].regs
;
6466 if (!i
.mem_operands
)
6469 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
6470 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
6474 /* Fill in i.rm.reg or i.rm.regmem field with register operand
6475 (if any) based on i.tm.extension_opcode. Again, we must be
6476 careful to make sure that segment/control/debug/test/MMX
6477 registers are coded into the i.rm.reg field. */
6478 else if (i
.reg_operands
)
6481 unsigned int vex_reg
= ~0;
6483 for (op
= 0; op
< i
.operands
; op
++)
6484 if (i
.types
[op
].bitfield
.reg8
6485 || i
.types
[op
].bitfield
.reg16
6486 || i
.types
[op
].bitfield
.reg32
6487 || i
.types
[op
].bitfield
.reg64
6488 || i
.types
[op
].bitfield
.regmmx
6489 || i
.types
[op
].bitfield
.regxmm
6490 || i
.types
[op
].bitfield
.regymm
6491 || i
.types
[op
].bitfield
.regbnd
6492 || i
.types
[op
].bitfield
.regzmm
6493 || i
.types
[op
].bitfield
.regmask
6494 || i
.types
[op
].bitfield
.sreg2
6495 || i
.types
[op
].bitfield
.sreg3
6496 || i
.types
[op
].bitfield
.control
6497 || i
.types
[op
].bitfield
.debug
6498 || i
.types
[op
].bitfield
.test
)
6503 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
6505 /* For instructions with VexNDS, the register-only
6506 source operand is encoded in VEX prefix. */
6507 gas_assert (mem
!= (unsigned int) ~0);
6512 gas_assert (op
< i
.operands
);
6516 /* Check register-only source operand when two source
6517 operands are swapped. */
6518 if (!i
.tm
.operand_types
[op
].bitfield
.baseindex
6519 && i
.tm
.operand_types
[op
+ 1].bitfield
.baseindex
)
6523 gas_assert (mem
== (vex_reg
+ 1)
6524 && op
< i
.operands
);
6529 gas_assert (vex_reg
< i
.operands
);
6533 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXNDD
)
6535 /* For instructions with VexNDD, the register destination
6536 is encoded in VEX prefix. */
6537 if (i
.mem_operands
== 0)
6539 /* There is no memory operand. */
6540 gas_assert ((op
+ 2) == i
.operands
);
6545 /* There are only 2 operands. */
6546 gas_assert (op
< 2 && i
.operands
== 2);
6551 gas_assert (op
< i
.operands
);
6553 if (vex_reg
!= (unsigned int) ~0)
6555 i386_operand_type
*type
= &i
.tm
.operand_types
[vex_reg
];
6557 if (type
->bitfield
.reg32
!= 1
6558 && type
->bitfield
.reg64
!= 1
6559 && !operand_type_equal (type
, ®xmm
)
6560 && !operand_type_equal (type
, ®ymm
)
6561 && !operand_type_equal (type
, ®zmm
)
6562 && !operand_type_equal (type
, ®mask
))
6565 i
.vex
.register_specifier
= i
.op
[vex_reg
].regs
;
6568 /* Don't set OP operand twice. */
6571 /* If there is an extension opcode to put here, the
6572 register number must be put into the regmem field. */
6573 if (i
.tm
.extension_opcode
!= None
)
6575 i
.rm
.regmem
= i
.op
[op
].regs
->reg_num
;
6576 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
6578 if ((i
.op
[op
].regs
->reg_flags
& RegVRex
) != 0)
6583 i
.rm
.reg
= i
.op
[op
].regs
->reg_num
;
6584 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
6586 if ((i
.op
[op
].regs
->reg_flags
& RegVRex
) != 0)
6591 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
6592 must set it to 3 to indicate this is a register operand
6593 in the regmem field. */
6594 if (!i
.mem_operands
)
6598 /* Fill in i.rm.reg field with extension opcode (if any). */
6599 if (i
.tm
.extension_opcode
!= None
)
6600 i
.rm
.reg
= i
.tm
.extension_opcode
;
6606 output_branch (void)
6612 relax_substateT subtype
;
6616 code16
= flag_code
== CODE_16BIT
? CODE16
: 0;
6617 size
= i
.disp_encoding
== disp_encoding_32bit
? BIG
: SMALL
;
6620 if (i
.prefix
[DATA_PREFIX
] != 0)
6626 /* Pentium4 branch hints. */
6627 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
6628 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
6633 if (i
.prefix
[REX_PREFIX
] != 0)
6639 /* BND prefixed jump. */
6640 if (i
.prefix
[BND_PREFIX
] != 0)
6642 FRAG_APPEND_1_CHAR (i
.prefix
[BND_PREFIX
]);
6646 if (i
.prefixes
!= 0 && !intel_syntax
)
6647 as_warn (_("skipping prefixes on this instruction"));
6649 /* It's always a symbol; End frag & setup for relax.
6650 Make sure there is enough room in this frag for the largest
6651 instruction we may generate in md_convert_frag. This is 2
6652 bytes for the opcode and room for the prefix and largest
6654 frag_grow (prefix
+ 2 + 4);
6655 /* Prefix and 1 opcode byte go in fr_fix. */
6656 p
= frag_more (prefix
+ 1);
6657 if (i
.prefix
[DATA_PREFIX
] != 0)
6658 *p
++ = DATA_PREFIX_OPCODE
;
6659 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
6660 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
)
6661 *p
++ = i
.prefix
[SEG_PREFIX
];
6662 if (i
.prefix
[REX_PREFIX
] != 0)
6663 *p
++ = i
.prefix
[REX_PREFIX
];
6664 *p
= i
.tm
.base_opcode
;
6666 if ((unsigned char) *p
== JUMP_PC_RELATIVE
)
6667 subtype
= ENCODE_RELAX_STATE (UNCOND_JUMP
, size
);
6668 else if (cpu_arch_flags
.bitfield
.cpui386
)
6669 subtype
= ENCODE_RELAX_STATE (COND_JUMP
, size
);
6671 subtype
= ENCODE_RELAX_STATE (COND_JUMP86
, size
);
6674 sym
= i
.op
[0].disps
->X_add_symbol
;
6675 off
= i
.op
[0].disps
->X_add_number
;
6677 if (i
.op
[0].disps
->X_op
!= O_constant
6678 && i
.op
[0].disps
->X_op
!= O_symbol
)
6680 /* Handle complex expressions. */
6681 sym
= make_expr_symbol (i
.op
[0].disps
);
6685 /* 1 possible extra opcode + 4 byte displacement go in var part.
6686 Pass reloc in fr_var. */
6687 frag_var (rs_machine_dependent
, 5, i
.reloc
[0], subtype
, sym
, off
, p
);
6697 if (i
.tm
.opcode_modifier
.jumpbyte
)
6699 /* This is a loop or jecxz type instruction. */
6701 if (i
.prefix
[ADDR_PREFIX
] != 0)
6703 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE
);
6706 /* Pentium4 branch hints. */
6707 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
6708 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
6710 FRAG_APPEND_1_CHAR (i
.prefix
[SEG_PREFIX
]);
6719 if (flag_code
== CODE_16BIT
)
6722 if (i
.prefix
[DATA_PREFIX
] != 0)
6724 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE
);
6734 if (i
.prefix
[REX_PREFIX
] != 0)
6736 FRAG_APPEND_1_CHAR (i
.prefix
[REX_PREFIX
]);
6740 /* BND prefixed jump. */
6741 if (i
.prefix
[BND_PREFIX
] != 0)
6743 FRAG_APPEND_1_CHAR (i
.prefix
[BND_PREFIX
]);
6747 if (i
.prefixes
!= 0 && !intel_syntax
)
6748 as_warn (_("skipping prefixes on this instruction"));
6750 p
= frag_more (i
.tm
.opcode_length
+ size
);
6751 switch (i
.tm
.opcode_length
)
6754 *p
++ = i
.tm
.base_opcode
>> 8;
6756 *p
++ = i
.tm
.base_opcode
;
6762 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
6763 i
.op
[0].disps
, 1, reloc (size
, 1, 1, i
.reloc
[0]));
6765 /* All jumps handled here are signed, but don't use a signed limit
6766 check for 32 and 16 bit jumps as we want to allow wrap around at
6767 4G and 64k respectively. */
6769 fixP
->fx_signed
= 1;
6773 output_interseg_jump (void)
6781 if (flag_code
== CODE_16BIT
)
6785 if (i
.prefix
[DATA_PREFIX
] != 0)
6791 if (i
.prefix
[REX_PREFIX
] != 0)
6801 if (i
.prefixes
!= 0 && !intel_syntax
)
6802 as_warn (_("skipping prefixes on this instruction"));
6804 /* 1 opcode; 2 segment; offset */
6805 p
= frag_more (prefix
+ 1 + 2 + size
);
6807 if (i
.prefix
[DATA_PREFIX
] != 0)
6808 *p
++ = DATA_PREFIX_OPCODE
;
6810 if (i
.prefix
[REX_PREFIX
] != 0)
6811 *p
++ = i
.prefix
[REX_PREFIX
];
6813 *p
++ = i
.tm
.base_opcode
;
6814 if (i
.op
[1].imms
->X_op
== O_constant
)
6816 offsetT n
= i
.op
[1].imms
->X_add_number
;
6819 && !fits_in_unsigned_word (n
)
6820 && !fits_in_signed_word (n
))
6822 as_bad (_("16-bit jump out of range"));
6825 md_number_to_chars (p
, n
, size
);
6828 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
6829 i
.op
[1].imms
, 0, reloc (size
, 0, 0, i
.reloc
[1]));
6830 if (i
.op
[0].imms
->X_op
!= O_constant
)
6831 as_bad (_("can't handle non absolute segment in `%s'"),
6833 md_number_to_chars (p
+ size
, (valueT
) i
.op
[0].imms
->X_add_number
, 2);
6839 fragS
*insn_start_frag
;
6840 offsetT insn_start_off
;
6842 /* Tie dwarf2 debug info to the address at the start of the insn.
6843 We can't do this after the insn has been output as the current
6844 frag may have been closed off. eg. by frag_var. */
6845 dwarf2_emit_insn (0);
6847 insn_start_frag
= frag_now
;
6848 insn_start_off
= frag_now_fix ();
6851 if (i
.tm
.opcode_modifier
.jump
)
6853 else if (i
.tm
.opcode_modifier
.jumpbyte
6854 || i
.tm
.opcode_modifier
.jumpdword
)
6856 else if (i
.tm
.opcode_modifier
.jumpintersegment
)
6857 output_interseg_jump ();
6860 /* Output normal instructions here. */
6864 unsigned int prefix
;
6866 /* Some processors fail on LOCK prefix. This options makes
6867 assembler ignore LOCK prefix and serves as a workaround. */
6868 if (omit_lock_prefix
)
6870 if (i
.tm
.base_opcode
== LOCK_PREFIX_OPCODE
)
6872 i
.prefix
[LOCK_PREFIX
] = 0;
6875 /* Since the VEX/EVEX prefix contains the implicit prefix, we
6876 don't need the explicit prefix. */
6877 if (!i
.tm
.opcode_modifier
.vex
&& !i
.tm
.opcode_modifier
.evex
)
6879 switch (i
.tm
.opcode_length
)
6882 if (i
.tm
.base_opcode
& 0xff000000)
6884 prefix
= (i
.tm
.base_opcode
>> 24) & 0xff;
6889 if ((i
.tm
.base_opcode
& 0xff0000) != 0)
6891 prefix
= (i
.tm
.base_opcode
>> 16) & 0xff;
6892 if (i
.tm
.cpu_flags
.bitfield
.cpupadlock
)
6895 if (prefix
!= REPE_PREFIX_OPCODE
6896 || (i
.prefix
[REP_PREFIX
]
6897 != REPE_PREFIX_OPCODE
))
6898 add_prefix (prefix
);
6901 add_prefix (prefix
);
6910 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
6911 /* For x32, add a dummy REX_OPCODE prefix for mov/add with
6912 R_X86_64_GOTTPOFF relocation so that linker can safely
6913 perform IE->LE optimization. */
6914 if (x86_elf_abi
== X86_64_X32_ABI
6916 && i
.reloc
[0] == BFD_RELOC_X86_64_GOTTPOFF
6917 && i
.prefix
[REX_PREFIX
] == 0)
6918 add_prefix (REX_OPCODE
);
6921 /* The prefix bytes. */
6922 for (j
= ARRAY_SIZE (i
.prefix
), q
= i
.prefix
; j
> 0; j
--, q
++)
6924 FRAG_APPEND_1_CHAR (*q
);
6928 for (j
= 0, q
= i
.prefix
; j
< ARRAY_SIZE (i
.prefix
); j
++, q
++)
6933 /* REX byte is encoded in VEX prefix. */
6937 FRAG_APPEND_1_CHAR (*q
);
6940 /* There should be no other prefixes for instructions
6945 /* For EVEX instructions i.vrex should become 0 after
6946 build_evex_prefix. For VEX instructions upper 16 registers
6947 aren't available, so VREX should be 0. */
6950 /* Now the VEX prefix. */
6951 p
= frag_more (i
.vex
.length
);
6952 for (j
= 0; j
< i
.vex
.length
; j
++)
6953 p
[j
] = i
.vex
.bytes
[j
];
6956 /* Now the opcode; be careful about word order here! */
6957 if (i
.tm
.opcode_length
== 1)
6959 FRAG_APPEND_1_CHAR (i
.tm
.base_opcode
);
6963 switch (i
.tm
.opcode_length
)
6967 *p
++ = (i
.tm
.base_opcode
>> 24) & 0xff;
6968 *p
++ = (i
.tm
.base_opcode
>> 16) & 0xff;
6972 *p
++ = (i
.tm
.base_opcode
>> 16) & 0xff;
6982 /* Put out high byte first: can't use md_number_to_chars! */
6983 *p
++ = (i
.tm
.base_opcode
>> 8) & 0xff;
6984 *p
= i
.tm
.base_opcode
& 0xff;
6987 /* Now the modrm byte and sib byte (if present). */
6988 if (i
.tm
.opcode_modifier
.modrm
)
6990 FRAG_APPEND_1_CHAR ((i
.rm
.regmem
<< 0
6993 /* If i.rm.regmem == ESP (4)
6994 && i.rm.mode != (Register mode)
6996 ==> need second modrm byte. */
6997 if (i
.rm
.regmem
== ESCAPE_TO_TWO_BYTE_ADDRESSING
6999 && !(i
.base_reg
&& i
.base_reg
->reg_type
.bitfield
.reg16
))
7000 FRAG_APPEND_1_CHAR ((i
.sib
.base
<< 0
7002 | i
.sib
.scale
<< 6));
7005 if (i
.disp_operands
)
7006 output_disp (insn_start_frag
, insn_start_off
);
7009 output_imm (insn_start_frag
, insn_start_off
);
7015 pi ("" /*line*/, &i
);
7017 #endif /* DEBUG386 */
7020 /* Return the size of the displacement operand N. */
7023 disp_size (unsigned int n
)
7027 /* Vec_Disp8 has to be 8bit. */
7028 if (i
.types
[n
].bitfield
.vec_disp8
)
7030 else if (i
.types
[n
].bitfield
.disp64
)
7032 else if (i
.types
[n
].bitfield
.disp8
)
7034 else if (i
.types
[n
].bitfield
.disp16
)
7039 /* Return the size of the immediate operand N. */
7042 imm_size (unsigned int n
)
7045 if (i
.types
[n
].bitfield
.imm64
)
7047 else if (i
.types
[n
].bitfield
.imm8
|| i
.types
[n
].bitfield
.imm8s
)
7049 else if (i
.types
[n
].bitfield
.imm16
)
7055 output_disp (fragS
*insn_start_frag
, offsetT insn_start_off
)
7060 for (n
= 0; n
< i
.operands
; n
++)
7062 if (i
.types
[n
].bitfield
.vec_disp8
7063 || operand_type_check (i
.types
[n
], disp
))
7065 if (i
.op
[n
].disps
->X_op
== O_constant
)
7067 int size
= disp_size (n
);
7068 offsetT val
= i
.op
[n
].disps
->X_add_number
;
7070 if (i
.types
[n
].bitfield
.vec_disp8
)
7072 val
= offset_in_range (val
, size
);
7073 p
= frag_more (size
);
7074 md_number_to_chars (p
, val
, size
);
7078 enum bfd_reloc_code_real reloc_type
;
7079 int size
= disp_size (n
);
7080 int sign
= i
.types
[n
].bitfield
.disp32s
;
7081 int pcrel
= (i
.flags
[n
] & Operand_PCrel
) != 0;
7083 /* We can't have 8 bit displacement here. */
7084 gas_assert (!i
.types
[n
].bitfield
.disp8
);
7086 /* The PC relative address is computed relative
7087 to the instruction boundary, so in case immediate
7088 fields follows, we need to adjust the value. */
7089 if (pcrel
&& i
.imm_operands
)
7094 for (n1
= 0; n1
< i
.operands
; n1
++)
7095 if (operand_type_check (i
.types
[n1
], imm
))
7097 /* Only one immediate is allowed for PC
7098 relative address. */
7099 gas_assert (sz
== 0);
7101 i
.op
[n
].disps
->X_add_number
-= sz
;
7103 /* We should find the immediate. */
7104 gas_assert (sz
!= 0);
7107 p
= frag_more (size
);
7108 reloc_type
= reloc (size
, pcrel
, sign
, i
.reloc
[n
]);
7110 && GOT_symbol
== i
.op
[n
].disps
->X_add_symbol
7111 && (((reloc_type
== BFD_RELOC_32
7112 || reloc_type
== BFD_RELOC_X86_64_32S
7113 || (reloc_type
== BFD_RELOC_64
7115 && (i
.op
[n
].disps
->X_op
== O_symbol
7116 || (i
.op
[n
].disps
->X_op
== O_add
7117 && ((symbol_get_value_expression
7118 (i
.op
[n
].disps
->X_op_symbol
)->X_op
)
7120 || reloc_type
== BFD_RELOC_32_PCREL
))
7124 if (insn_start_frag
== frag_now
)
7125 add
= (p
- frag_now
->fr_literal
) - insn_start_off
;
7130 add
= insn_start_frag
->fr_fix
- insn_start_off
;
7131 for (fr
= insn_start_frag
->fr_next
;
7132 fr
&& fr
!= frag_now
; fr
= fr
->fr_next
)
7134 add
+= p
- frag_now
->fr_literal
;
7139 reloc_type
= BFD_RELOC_386_GOTPC
;
7140 i
.op
[n
].imms
->X_add_number
+= add
;
7142 else if (reloc_type
== BFD_RELOC_64
)
7143 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
7145 /* Don't do the adjustment for x86-64, as there
7146 the pcrel addressing is relative to the _next_
7147 insn, and that is taken care of in other code. */
7148 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
7150 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
7151 i
.op
[n
].disps
, pcrel
, reloc_type
);
7158 output_imm (fragS
*insn_start_frag
, offsetT insn_start_off
)
7163 for (n
= 0; n
< i
.operands
; n
++)
7165 /* Skip SAE/RC Imm operand in EVEX. They are already handled. */
7166 if (i
.rounding
&& (int) n
== i
.rounding
->operand
)
7169 if (operand_type_check (i
.types
[n
], imm
))
7171 if (i
.op
[n
].imms
->X_op
== O_constant
)
7173 int size
= imm_size (n
);
7176 val
= offset_in_range (i
.op
[n
].imms
->X_add_number
,
7178 p
= frag_more (size
);
7179 md_number_to_chars (p
, val
, size
);
7183 /* Not absolute_section.
7184 Need a 32-bit fixup (don't support 8bit
7185 non-absolute imms). Try to support other
7187 enum bfd_reloc_code_real reloc_type
;
7188 int size
= imm_size (n
);
7191 if (i
.types
[n
].bitfield
.imm32s
7192 && (i
.suffix
== QWORD_MNEM_SUFFIX
7193 || (!i
.suffix
&& i
.tm
.opcode_modifier
.no_lsuf
)))
7198 p
= frag_more (size
);
7199 reloc_type
= reloc (size
, 0, sign
, i
.reloc
[n
]);
7201 /* This is tough to explain. We end up with this one if we
7202 * have operands that look like
7203 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
7204 * obtain the absolute address of the GOT, and it is strongly
7205 * preferable from a performance point of view to avoid using
7206 * a runtime relocation for this. The actual sequence of
7207 * instructions often look something like:
7212 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
7214 * The call and pop essentially return the absolute address
7215 * of the label .L66 and store it in %ebx. The linker itself
7216 * will ultimately change the first operand of the addl so
7217 * that %ebx points to the GOT, but to keep things simple, the
7218 * .o file must have this operand set so that it generates not
7219 * the absolute address of .L66, but the absolute address of
7220 * itself. This allows the linker itself simply treat a GOTPC
7221 * relocation as asking for a pcrel offset to the GOT to be
7222 * added in, and the addend of the relocation is stored in the
7223 * operand field for the instruction itself.
7225 * Our job here is to fix the operand so that it would add
7226 * the correct offset so that %ebx would point to itself. The
7227 * thing that is tricky is that .-.L66 will point to the
7228 * beginning of the instruction, so we need to further modify
7229 * the operand so that it will point to itself. There are
7230 * other cases where you have something like:
7232 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
7234 * and here no correction would be required. Internally in
7235 * the assembler we treat operands of this form as not being
7236 * pcrel since the '.' is explicitly mentioned, and I wonder
7237 * whether it would simplify matters to do it this way. Who
7238 * knows. In earlier versions of the PIC patches, the
7239 * pcrel_adjust field was used to store the correction, but
7240 * since the expression is not pcrel, I felt it would be
7241 * confusing to do it this way. */
7243 if ((reloc_type
== BFD_RELOC_32
7244 || reloc_type
== BFD_RELOC_X86_64_32S
7245 || reloc_type
== BFD_RELOC_64
)
7247 && GOT_symbol
== i
.op
[n
].imms
->X_add_symbol
7248 && (i
.op
[n
].imms
->X_op
== O_symbol
7249 || (i
.op
[n
].imms
->X_op
== O_add
7250 && ((symbol_get_value_expression
7251 (i
.op
[n
].imms
->X_op_symbol
)->X_op
)
7256 if (insn_start_frag
== frag_now
)
7257 add
= (p
- frag_now
->fr_literal
) - insn_start_off
;
7262 add
= insn_start_frag
->fr_fix
- insn_start_off
;
7263 for (fr
= insn_start_frag
->fr_next
;
7264 fr
&& fr
!= frag_now
; fr
= fr
->fr_next
)
7266 add
+= p
- frag_now
->fr_literal
;
7270 reloc_type
= BFD_RELOC_386_GOTPC
;
7272 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
7274 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
7275 i
.op
[n
].imms
->X_add_number
+= add
;
7277 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
7278 i
.op
[n
].imms
, 0, reloc_type
);
7284 /* x86_cons_fix_new is called via the expression parsing code when a
7285 reloc is needed. We use this hook to get the correct .got reloc. */
7286 static int cons_sign
= -1;
7289 x86_cons_fix_new (fragS
*frag
, unsigned int off
, unsigned int len
,
7290 expressionS
*exp
, bfd_reloc_code_real_type r
)
7292 r
= reloc (len
, 0, cons_sign
, r
);
7295 if (exp
->X_op
== O_secrel
)
7297 exp
->X_op
= O_symbol
;
7298 r
= BFD_RELOC_32_SECREL
;
7302 fix_new_exp (frag
, off
, len
, exp
, 0, r
);
7305 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
7306 purpose of the `.dc.a' internal pseudo-op. */
7309 x86_address_bytes (void)
7311 if ((stdoutput
->arch_info
->mach
& bfd_mach_x64_32
))
7313 return stdoutput
->arch_info
->bits_per_address
/ 8;
7316 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
7318 # define lex_got(reloc, adjust, types) NULL
7320 /* Parse operands of the form
7321 <symbol>@GOTOFF+<nnn>
7322 and similar .plt or .got references.
7324 If we find one, set up the correct relocation in RELOC and copy the
7325 input string, minus the `@GOTOFF' into a malloc'd buffer for
7326 parsing by the calling routine. Return this buffer, and if ADJUST
7327 is non-null set it to the length of the string we removed from the
7328 input line. Otherwise return NULL. */
7330 lex_got (enum bfd_reloc_code_real
*rel
,
7332 i386_operand_type
*types
)
7334 /* Some of the relocations depend on the size of what field is to
7335 be relocated. But in our callers i386_immediate and i386_displacement
7336 we don't yet know the operand size (this will be set by insn
7337 matching). Hence we record the word32 relocation here,
7338 and adjust the reloc according to the real size in reloc(). */
7339 static const struct {
7342 const enum bfd_reloc_code_real rel
[2];
7343 const i386_operand_type types64
;
7345 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7346 { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32
,
7348 OPERAND_TYPE_IMM32_64
},
7350 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real
,
7351 BFD_RELOC_X86_64_PLTOFF64
},
7352 OPERAND_TYPE_IMM64
},
7353 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32
,
7354 BFD_RELOC_X86_64_PLT32
},
7355 OPERAND_TYPE_IMM32_32S_DISP32
},
7356 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real
,
7357 BFD_RELOC_X86_64_GOTPLT64
},
7358 OPERAND_TYPE_IMM64_DISP64
},
7359 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF
,
7360 BFD_RELOC_X86_64_GOTOFF64
},
7361 OPERAND_TYPE_IMM64_DISP64
},
7362 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real
,
7363 BFD_RELOC_X86_64_GOTPCREL
},
7364 OPERAND_TYPE_IMM32_32S_DISP32
},
7365 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD
,
7366 BFD_RELOC_X86_64_TLSGD
},
7367 OPERAND_TYPE_IMM32_32S_DISP32
},
7368 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM
,
7369 _dummy_first_bfd_reloc_code_real
},
7370 OPERAND_TYPE_NONE
},
7371 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real
,
7372 BFD_RELOC_X86_64_TLSLD
},
7373 OPERAND_TYPE_IMM32_32S_DISP32
},
7374 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32
,
7375 BFD_RELOC_X86_64_GOTTPOFF
},
7376 OPERAND_TYPE_IMM32_32S_DISP32
},
7377 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32
,
7378 BFD_RELOC_X86_64_TPOFF32
},
7379 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
7380 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE
,
7381 _dummy_first_bfd_reloc_code_real
},
7382 OPERAND_TYPE_NONE
},
7383 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32
,
7384 BFD_RELOC_X86_64_DTPOFF32
},
7385 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
7386 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE
,
7387 _dummy_first_bfd_reloc_code_real
},
7388 OPERAND_TYPE_NONE
},
7389 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE
,
7390 _dummy_first_bfd_reloc_code_real
},
7391 OPERAND_TYPE_NONE
},
7392 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32
,
7393 BFD_RELOC_X86_64_GOT32
},
7394 OPERAND_TYPE_IMM32_32S_64_DISP32
},
7395 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC
,
7396 BFD_RELOC_X86_64_GOTPC32_TLSDESC
},
7397 OPERAND_TYPE_IMM32_32S_DISP32
},
7398 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL
,
7399 BFD_RELOC_X86_64_TLSDESC_CALL
},
7400 OPERAND_TYPE_IMM32_32S_DISP32
},
7405 #if defined (OBJ_MAYBE_ELF)
7410 for (cp
= input_line_pointer
; *cp
!= '@'; cp
++)
7411 if (is_end_of_line
[(unsigned char) *cp
] || *cp
== ',')
7414 for (j
= 0; j
< ARRAY_SIZE (gotrel
); j
++)
7416 int len
= gotrel
[j
].len
;
7417 if (strncasecmp (cp
+ 1, gotrel
[j
].str
, len
) == 0)
7419 if (gotrel
[j
].rel
[object_64bit
] != 0)
7422 char *tmpbuf
, *past_reloc
;
7424 *rel
= gotrel
[j
].rel
[object_64bit
];
7428 if (flag_code
!= CODE_64BIT
)
7430 types
->bitfield
.imm32
= 1;
7431 types
->bitfield
.disp32
= 1;
7434 *types
= gotrel
[j
].types64
;
7437 if (j
!= 0 && GOT_symbol
== NULL
)
7438 GOT_symbol
= symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME
);
7440 /* The length of the first part of our input line. */
7441 first
= cp
- input_line_pointer
;
7443 /* The second part goes from after the reloc token until
7444 (and including) an end_of_line char or comma. */
7445 past_reloc
= cp
+ 1 + len
;
7447 while (!is_end_of_line
[(unsigned char) *cp
] && *cp
!= ',')
7449 second
= cp
+ 1 - past_reloc
;
7451 /* Allocate and copy string. The trailing NUL shouldn't
7452 be necessary, but be safe. */
7453 tmpbuf
= (char *) xmalloc (first
+ second
+ 2);
7454 memcpy (tmpbuf
, input_line_pointer
, first
);
7455 if (second
!= 0 && *past_reloc
!= ' ')
7456 /* Replace the relocation token with ' ', so that
7457 errors like foo@GOTOFF1 will be detected. */
7458 tmpbuf
[first
++] = ' ';
7460 /* Increment length by 1 if the relocation token is
7465 memcpy (tmpbuf
+ first
, past_reloc
, second
);
7466 tmpbuf
[first
+ second
] = '\0';
7470 as_bad (_("@%s reloc is not supported with %d-bit output format"),
7471 gotrel
[j
].str
, 1 << (5 + object_64bit
));
7476 /* Might be a symbol version string. Don't as_bad here. */
7485 /* Parse operands of the form
7486 <symbol>@SECREL32+<nnn>
7488 If we find one, set up the correct relocation in RELOC and copy the
7489 input string, minus the `@SECREL32' into a malloc'd buffer for
7490 parsing by the calling routine. Return this buffer, and if ADJUST
7491 is non-null set it to the length of the string we removed from the
7492 input line. Otherwise return NULL.
7494 This function is copied from the ELF version above adjusted for PE targets. */
7497 lex_got (enum bfd_reloc_code_real
*rel ATTRIBUTE_UNUSED
,
7498 int *adjust ATTRIBUTE_UNUSED
,
7499 i386_operand_type
*types
)
7505 const enum bfd_reloc_code_real rel
[2];
7506 const i386_operand_type types64
;
7510 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL
,
7511 BFD_RELOC_32_SECREL
},
7512 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
7518 for (cp
= input_line_pointer
; *cp
!= '@'; cp
++)
7519 if (is_end_of_line
[(unsigned char) *cp
] || *cp
== ',')
7522 for (j
= 0; j
< ARRAY_SIZE (gotrel
); j
++)
7524 int len
= gotrel
[j
].len
;
7526 if (strncasecmp (cp
+ 1, gotrel
[j
].str
, len
) == 0)
7528 if (gotrel
[j
].rel
[object_64bit
] != 0)
7531 char *tmpbuf
, *past_reloc
;
7533 *rel
= gotrel
[j
].rel
[object_64bit
];
7539 if (flag_code
!= CODE_64BIT
)
7541 types
->bitfield
.imm32
= 1;
7542 types
->bitfield
.disp32
= 1;
7545 *types
= gotrel
[j
].types64
;
7548 /* The length of the first part of our input line. */
7549 first
= cp
- input_line_pointer
;
7551 /* The second part goes from after the reloc token until
7552 (and including) an end_of_line char or comma. */
7553 past_reloc
= cp
+ 1 + len
;
7555 while (!is_end_of_line
[(unsigned char) *cp
] && *cp
!= ',')
7557 second
= cp
+ 1 - past_reloc
;
7559 /* Allocate and copy string. The trailing NUL shouldn't
7560 be necessary, but be safe. */
7561 tmpbuf
= (char *) xmalloc (first
+ second
+ 2);
7562 memcpy (tmpbuf
, input_line_pointer
, first
);
7563 if (second
!= 0 && *past_reloc
!= ' ')
7564 /* Replace the relocation token with ' ', so that
7565 errors like foo@SECLREL321 will be detected. */
7566 tmpbuf
[first
++] = ' ';
7567 memcpy (tmpbuf
+ first
, past_reloc
, second
);
7568 tmpbuf
[first
+ second
] = '\0';
7572 as_bad (_("@%s reloc is not supported with %d-bit output format"),
7573 gotrel
[j
].str
, 1 << (5 + object_64bit
));
7578 /* Might be a symbol version string. Don't as_bad here. */
7584 bfd_reloc_code_real_type
7585 x86_cons (expressionS
*exp
, int size
)
7587 bfd_reloc_code_real_type got_reloc
= NO_RELOC
;
7589 intel_syntax
= -intel_syntax
;
7592 if (size
== 4 || (object_64bit
&& size
== 8))
7594 /* Handle @GOTOFF and the like in an expression. */
7596 char *gotfree_input_line
;
7599 save
= input_line_pointer
;
7600 gotfree_input_line
= lex_got (&got_reloc
, &adjust
, NULL
);
7601 if (gotfree_input_line
)
7602 input_line_pointer
= gotfree_input_line
;
7606 if (gotfree_input_line
)
7608 /* expression () has merrily parsed up to the end of line,
7609 or a comma - in the wrong buffer. Transfer how far
7610 input_line_pointer has moved to the right buffer. */
7611 input_line_pointer
= (save
7612 + (input_line_pointer
- gotfree_input_line
)
7614 free (gotfree_input_line
);
7615 if (exp
->X_op
== O_constant
7616 || exp
->X_op
== O_absent
7617 || exp
->X_op
== O_illegal
7618 || exp
->X_op
== O_register
7619 || exp
->X_op
== O_big
)
7621 char c
= *input_line_pointer
;
7622 *input_line_pointer
= 0;
7623 as_bad (_("missing or invalid expression `%s'"), save
);
7624 *input_line_pointer
= c
;
7631 intel_syntax
= -intel_syntax
;
7634 i386_intel_simplify (exp
);
7640 signed_cons (int size
)
7642 if (flag_code
== CODE_64BIT
)
7650 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
7657 if (exp
.X_op
== O_symbol
)
7658 exp
.X_op
= O_secrel
;
7660 emit_expr (&exp
, 4);
7662 while (*input_line_pointer
++ == ',');
7664 input_line_pointer
--;
7665 demand_empty_rest_of_line ();
7669 /* Handle Vector operations. */
7672 check_VecOperations (char *op_string
, char *op_end
)
7674 const reg_entry
*mask
;
7679 && (op_end
== NULL
|| op_string
< op_end
))
7682 if (*op_string
== '{')
7686 /* Check broadcasts. */
7687 if (strncmp (op_string
, "1to", 3) == 0)
7692 goto duplicated_vec_op
;
7695 if (*op_string
== '8')
7696 bcst_type
= BROADCAST_1TO8
;
7697 else if (*op_string
== '4')
7698 bcst_type
= BROADCAST_1TO4
;
7699 else if (*op_string
== '2')
7700 bcst_type
= BROADCAST_1TO2
;
7701 else if (*op_string
== '1'
7702 && *(op_string
+1) == '6')
7704 bcst_type
= BROADCAST_1TO16
;
7709 as_bad (_("Unsupported broadcast: `%s'"), saved
);
7714 broadcast_op
.type
= bcst_type
;
7715 broadcast_op
.operand
= this_operand
;
7716 i
.broadcast
= &broadcast_op
;
7718 /* Check masking operation. */
7719 else if ((mask
= parse_register (op_string
, &end_op
)) != NULL
)
7721 /* k0 can't be used for write mask. */
7722 if (mask
->reg_num
== 0)
7724 as_bad (_("`%s' can't be used for write mask"),
7731 mask_op
.mask
= mask
;
7732 mask_op
.zeroing
= 0;
7733 mask_op
.operand
= this_operand
;
7739 goto duplicated_vec_op
;
7741 i
.mask
->mask
= mask
;
7743 /* Only "{z}" is allowed here. No need to check
7744 zeroing mask explicitly. */
7745 if (i
.mask
->operand
!= this_operand
)
7747 as_bad (_("invalid write mask `%s'"), saved
);
7754 /* Check zeroing-flag for masking operation. */
7755 else if (*op_string
== 'z')
7759 mask_op
.mask
= NULL
;
7760 mask_op
.zeroing
= 1;
7761 mask_op
.operand
= this_operand
;
7766 if (i
.mask
->zeroing
)
7769 as_bad (_("duplicated `%s'"), saved
);
7773 i
.mask
->zeroing
= 1;
7775 /* Only "{%k}" is allowed here. No need to check mask
7776 register explicitly. */
7777 if (i
.mask
->operand
!= this_operand
)
7779 as_bad (_("invalid zeroing-masking `%s'"),
7788 goto unknown_vec_op
;
7790 if (*op_string
!= '}')
7792 as_bad (_("missing `}' in `%s'"), saved
);
7799 /* We don't know this one. */
7800 as_bad (_("unknown vector operation: `%s'"), saved
);
7808 i386_immediate (char *imm_start
)
7810 char *save_input_line_pointer
;
7811 char *gotfree_input_line
;
7814 i386_operand_type types
;
7816 operand_type_set (&types
, ~0);
7818 if (i
.imm_operands
== MAX_IMMEDIATE_OPERANDS
)
7820 as_bad (_("at most %d immediate operands are allowed"),
7821 MAX_IMMEDIATE_OPERANDS
);
7825 exp
= &im_expressions
[i
.imm_operands
++];
7826 i
.op
[this_operand
].imms
= exp
;
7828 if (is_space_char (*imm_start
))
7831 save_input_line_pointer
= input_line_pointer
;
7832 input_line_pointer
= imm_start
;
7834 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
7835 if (gotfree_input_line
)
7836 input_line_pointer
= gotfree_input_line
;
7838 exp_seg
= expression (exp
);
7842 /* Handle vector operations. */
7843 if (*input_line_pointer
== '{')
7845 input_line_pointer
= check_VecOperations (input_line_pointer
,
7847 if (input_line_pointer
== NULL
)
7851 if (*input_line_pointer
)
7852 as_bad (_("junk `%s' after expression"), input_line_pointer
);
7854 input_line_pointer
= save_input_line_pointer
;
7855 if (gotfree_input_line
)
7857 free (gotfree_input_line
);
7859 if (exp
->X_op
== O_constant
|| exp
->X_op
== O_register
)
7860 exp
->X_op
= O_illegal
;
7863 return i386_finalize_immediate (exp_seg
, exp
, types
, imm_start
);
7867 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
7868 i386_operand_type types
, const char *imm_start
)
7870 if (exp
->X_op
== O_absent
|| exp
->X_op
== O_illegal
|| exp
->X_op
== O_big
)
7873 as_bad (_("missing or invalid immediate expression `%s'"),
7877 else if (exp
->X_op
== O_constant
)
7879 /* Size it properly later. */
7880 i
.types
[this_operand
].bitfield
.imm64
= 1;
7881 /* If not 64bit, sign extend val. */
7882 if (flag_code
!= CODE_64BIT
7883 && (exp
->X_add_number
& ~(((addressT
) 2 << 31) - 1)) == 0)
7885 = (exp
->X_add_number
^ ((addressT
) 1 << 31)) - ((addressT
) 1 << 31);
7887 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7888 else if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
7889 && exp_seg
!= absolute_section
7890 && exp_seg
!= text_section
7891 && exp_seg
!= data_section
7892 && exp_seg
!= bss_section
7893 && exp_seg
!= undefined_section
7894 && !bfd_is_com_section (exp_seg
))
7896 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
7900 else if (!intel_syntax
&& exp_seg
== reg_section
)
7903 as_bad (_("illegal immediate register operand %s"), imm_start
);
7908 /* This is an address. The size of the address will be
7909 determined later, depending on destination register,
7910 suffix, or the default for the section. */
7911 i
.types
[this_operand
].bitfield
.imm8
= 1;
7912 i
.types
[this_operand
].bitfield
.imm16
= 1;
7913 i
.types
[this_operand
].bitfield
.imm32
= 1;
7914 i
.types
[this_operand
].bitfield
.imm32s
= 1;
7915 i
.types
[this_operand
].bitfield
.imm64
= 1;
7916 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
7924 i386_scale (char *scale
)
7927 char *save
= input_line_pointer
;
7929 input_line_pointer
= scale
;
7930 val
= get_absolute_expression ();
7935 i
.log2_scale_factor
= 0;
7938 i
.log2_scale_factor
= 1;
7941 i
.log2_scale_factor
= 2;
7944 i
.log2_scale_factor
= 3;
7948 char sep
= *input_line_pointer
;
7950 *input_line_pointer
= '\0';
7951 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
7953 *input_line_pointer
= sep
;
7954 input_line_pointer
= save
;
7958 if (i
.log2_scale_factor
!= 0 && i
.index_reg
== 0)
7960 as_warn (_("scale factor of %d without an index register"),
7961 1 << i
.log2_scale_factor
);
7962 i
.log2_scale_factor
= 0;
7964 scale
= input_line_pointer
;
7965 input_line_pointer
= save
;
7970 i386_displacement (char *disp_start
, char *disp_end
)
7974 char *save_input_line_pointer
;
7975 char *gotfree_input_line
;
7977 i386_operand_type bigdisp
, types
= anydisp
;
7980 if (i
.disp_operands
== MAX_MEMORY_OPERANDS
)
7982 as_bad (_("at most %d displacement operands are allowed"),
7983 MAX_MEMORY_OPERANDS
);
7987 operand_type_set (&bigdisp
, 0);
7988 if ((i
.types
[this_operand
].bitfield
.jumpabsolute
)
7989 || (!current_templates
->start
->opcode_modifier
.jump
7990 && !current_templates
->start
->opcode_modifier
.jumpdword
))
7992 bigdisp
.bitfield
.disp32
= 1;
7993 override
= (i
.prefix
[ADDR_PREFIX
] != 0);
7994 if (flag_code
== CODE_64BIT
)
7998 bigdisp
.bitfield
.disp32s
= 1;
7999 bigdisp
.bitfield
.disp64
= 1;
8002 else if ((flag_code
== CODE_16BIT
) ^ override
)
8004 bigdisp
.bitfield
.disp32
= 0;
8005 bigdisp
.bitfield
.disp16
= 1;
8010 /* For PC-relative branches, the width of the displacement
8011 is dependent upon data size, not address size. */
8012 override
= (i
.prefix
[DATA_PREFIX
] != 0);
8013 if (flag_code
== CODE_64BIT
)
8015 if (override
|| i
.suffix
== WORD_MNEM_SUFFIX
)
8016 bigdisp
.bitfield
.disp16
= 1;
8019 bigdisp
.bitfield
.disp32
= 1;
8020 bigdisp
.bitfield
.disp32s
= 1;
8026 override
= (i
.suffix
== (flag_code
!= CODE_16BIT
8028 : LONG_MNEM_SUFFIX
));
8029 bigdisp
.bitfield
.disp32
= 1;
8030 if ((flag_code
== CODE_16BIT
) ^ override
)
8032 bigdisp
.bitfield
.disp32
= 0;
8033 bigdisp
.bitfield
.disp16
= 1;
8037 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
8040 exp
= &disp_expressions
[i
.disp_operands
];
8041 i
.op
[this_operand
].disps
= exp
;
8043 save_input_line_pointer
= input_line_pointer
;
8044 input_line_pointer
= disp_start
;
8045 END_STRING_AND_SAVE (disp_end
);
8047 #ifndef GCC_ASM_O_HACK
8048 #define GCC_ASM_O_HACK 0
8051 END_STRING_AND_SAVE (disp_end
+ 1);
8052 if (i
.types
[this_operand
].bitfield
.baseIndex
8053 && displacement_string_end
[-1] == '+')
8055 /* This hack is to avoid a warning when using the "o"
8056 constraint within gcc asm statements.
8059 #define _set_tssldt_desc(n,addr,limit,type) \
8060 __asm__ __volatile__ ( \
8062 "movw %w1,2+%0\n\t" \
8064 "movb %b1,4+%0\n\t" \
8065 "movb %4,5+%0\n\t" \
8066 "movb $0,6+%0\n\t" \
8067 "movb %h1,7+%0\n\t" \
8069 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
8071 This works great except that the output assembler ends
8072 up looking a bit weird if it turns out that there is
8073 no offset. You end up producing code that looks like:
8086 So here we provide the missing zero. */
8088 *displacement_string_end
= '0';
8091 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
8092 if (gotfree_input_line
)
8093 input_line_pointer
= gotfree_input_line
;
8095 exp_seg
= expression (exp
);
8098 if (*input_line_pointer
)
8099 as_bad (_("junk `%s' after expression"), input_line_pointer
);
8101 RESTORE_END_STRING (disp_end
+ 1);
8103 input_line_pointer
= save_input_line_pointer
;
8104 if (gotfree_input_line
)
8106 free (gotfree_input_line
);
8108 if (exp
->X_op
== O_constant
|| exp
->X_op
== O_register
)
8109 exp
->X_op
= O_illegal
;
8112 ret
= i386_finalize_displacement (exp_seg
, exp
, types
, disp_start
);
8114 RESTORE_END_STRING (disp_end
);
8120 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
8121 i386_operand_type types
, const char *disp_start
)
8123 i386_operand_type bigdisp
;
8126 /* We do this to make sure that the section symbol is in
8127 the symbol table. We will ultimately change the relocation
8128 to be relative to the beginning of the section. */
8129 if (i
.reloc
[this_operand
] == BFD_RELOC_386_GOTOFF
8130 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
8131 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
8133 if (exp
->X_op
!= O_symbol
)
8136 if (S_IS_LOCAL (exp
->X_add_symbol
)
8137 && S_GET_SEGMENT (exp
->X_add_symbol
) != undefined_section
8138 && S_GET_SEGMENT (exp
->X_add_symbol
) != expr_section
)
8139 section_symbol (S_GET_SEGMENT (exp
->X_add_symbol
));
8140 exp
->X_op
= O_subtract
;
8141 exp
->X_op_symbol
= GOT_symbol
;
8142 if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
)
8143 i
.reloc
[this_operand
] = BFD_RELOC_32_PCREL
;
8144 else if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
8145 i
.reloc
[this_operand
] = BFD_RELOC_64
;
8147 i
.reloc
[this_operand
] = BFD_RELOC_32
;
8150 else if (exp
->X_op
== O_absent
8151 || exp
->X_op
== O_illegal
8152 || exp
->X_op
== O_big
)
8155 as_bad (_("missing or invalid displacement expression `%s'"),
8160 else if (flag_code
== CODE_64BIT
8161 && !i
.prefix
[ADDR_PREFIX
]
8162 && exp
->X_op
== O_constant
)
8164 /* Since displacement is signed extended to 64bit, don't allow
8165 disp32 and turn off disp32s if they are out of range. */
8166 i
.types
[this_operand
].bitfield
.disp32
= 0;
8167 if (!fits_in_signed_long (exp
->X_add_number
))
8169 i
.types
[this_operand
].bitfield
.disp32s
= 0;
8170 if (i
.types
[this_operand
].bitfield
.baseindex
)
8172 as_bad (_("0x%lx out range of signed 32bit displacement"),
8173 (long) exp
->X_add_number
);
8179 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8180 else if (exp
->X_op
!= O_constant
8181 && OUTPUT_FLAVOR
== bfd_target_aout_flavour
8182 && exp_seg
!= absolute_section
8183 && exp_seg
!= text_section
8184 && exp_seg
!= data_section
8185 && exp_seg
!= bss_section
8186 && exp_seg
!= undefined_section
8187 && !bfd_is_com_section (exp_seg
))
8189 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
8194 /* Check if this is a displacement only operand. */
8195 bigdisp
= i
.types
[this_operand
];
8196 bigdisp
.bitfield
.disp8
= 0;
8197 bigdisp
.bitfield
.disp16
= 0;
8198 bigdisp
.bitfield
.disp32
= 0;
8199 bigdisp
.bitfield
.disp32s
= 0;
8200 bigdisp
.bitfield
.disp64
= 0;
8201 if (operand_type_all_zero (&bigdisp
))
8202 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
8208 /* Make sure the memory operand we've been dealt is valid.
8209 Return 1 on success, 0 on a failure. */
8212 i386_index_check (const char *operand_string
)
8214 const char *kind
= "base/index";
8215 enum flag_code addr_mode
;
8217 if (i
.prefix
[ADDR_PREFIX
])
8218 addr_mode
= flag_code
== CODE_32BIT
? CODE_16BIT
: CODE_32BIT
;
8221 addr_mode
= flag_code
;
8223 #if INFER_ADDR_PREFIX
8224 if (i
.mem_operands
== 0)
8226 /* Infer address prefix from the first memory operand. */
8227 const reg_entry
*addr_reg
= i
.base_reg
;
8229 if (addr_reg
== NULL
)
8230 addr_reg
= i
.index_reg
;
8234 if (addr_reg
->reg_num
== RegEip
8235 || addr_reg
->reg_num
== RegEiz
8236 || addr_reg
->reg_type
.bitfield
.reg32
)
8237 addr_mode
= CODE_32BIT
;
8238 else if (flag_code
!= CODE_64BIT
8239 && addr_reg
->reg_type
.bitfield
.reg16
)
8240 addr_mode
= CODE_16BIT
;
8242 if (addr_mode
!= flag_code
)
8244 i
.prefix
[ADDR_PREFIX
] = ADDR_PREFIX_OPCODE
;
8246 /* Change the size of any displacement too. At most one
8247 of Disp16 or Disp32 is set.
8248 FIXME. There doesn't seem to be any real need for
8249 separate Disp16 and Disp32 flags. The same goes for
8250 Imm16 and Imm32. Removing them would probably clean
8251 up the code quite a lot. */
8252 if (flag_code
!= CODE_64BIT
8253 && (i
.types
[this_operand
].bitfield
.disp16
8254 || i
.types
[this_operand
].bitfield
.disp32
))
8255 i
.types
[this_operand
]
8256 = operand_type_xor (i
.types
[this_operand
], disp16_32
);
8263 if (current_templates
->start
->opcode_modifier
.isstring
8264 && !current_templates
->start
->opcode_modifier
.immext
8265 && (current_templates
->end
[-1].opcode_modifier
.isstring
8268 /* Memory operands of string insns are special in that they only allow
8269 a single register (rDI, rSI, or rBX) as their memory address. */
8270 const reg_entry
*expected_reg
;
8271 static const char *di_si
[][2] =
8277 static const char *bx
[] = { "ebx", "bx", "rbx" };
8279 kind
= "string address";
8281 if (current_templates
->start
->opcode_modifier
.w
)
8283 i386_operand_type type
= current_templates
->end
[-1].operand_types
[0];
8285 if (!type
.bitfield
.baseindex
8286 || ((!i
.mem_operands
!= !intel_syntax
)
8287 && current_templates
->end
[-1].operand_types
[1]
8288 .bitfield
.baseindex
))
8289 type
= current_templates
->end
[-1].operand_types
[1];
8290 expected_reg
= hash_find (reg_hash
,
8291 di_si
[addr_mode
][type
.bitfield
.esseg
]);
8295 expected_reg
= hash_find (reg_hash
, bx
[addr_mode
]);
8297 if (i
.base_reg
!= expected_reg
8299 || operand_type_check (i
.types
[this_operand
], disp
))
8301 /* The second memory operand must have the same size as
8305 && !((addr_mode
== CODE_64BIT
8306 && i
.base_reg
->reg_type
.bitfield
.reg64
)
8307 || (addr_mode
== CODE_32BIT
8308 ? i
.base_reg
->reg_type
.bitfield
.reg32
8309 : i
.base_reg
->reg_type
.bitfield
.reg16
)))
8312 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
8314 intel_syntax
? '[' : '(',
8316 expected_reg
->reg_name
,
8317 intel_syntax
? ']' : ')');
8324 as_bad (_("`%s' is not a valid %s expression"),
8325 operand_string
, kind
);
8330 if (addr_mode
!= CODE_16BIT
)
8332 /* 32-bit/64-bit checks. */
8334 && (addr_mode
== CODE_64BIT
8335 ? !i
.base_reg
->reg_type
.bitfield
.reg64
8336 : !i
.base_reg
->reg_type
.bitfield
.reg32
)
8338 || (i
.base_reg
->reg_num
8339 != (addr_mode
== CODE_64BIT
? RegRip
: RegEip
))))
8341 && !i
.index_reg
->reg_type
.bitfield
.regxmm
8342 && !i
.index_reg
->reg_type
.bitfield
.regymm
8343 && !i
.index_reg
->reg_type
.bitfield
.regzmm
8344 && ((addr_mode
== CODE_64BIT
8345 ? !(i
.index_reg
->reg_type
.bitfield
.reg64
8346 || i
.index_reg
->reg_num
== RegRiz
)
8347 : !(i
.index_reg
->reg_type
.bitfield
.reg32
8348 || i
.index_reg
->reg_num
== RegEiz
))
8349 || !i
.index_reg
->reg_type
.bitfield
.baseindex
)))
8354 /* 16-bit checks. */
8356 && (!i
.base_reg
->reg_type
.bitfield
.reg16
8357 || !i
.base_reg
->reg_type
.bitfield
.baseindex
))
8359 && (!i
.index_reg
->reg_type
.bitfield
.reg16
8360 || !i
.index_reg
->reg_type
.bitfield
.baseindex
8362 && i
.base_reg
->reg_num
< 6
8363 && i
.index_reg
->reg_num
>= 6
8364 && i
.log2_scale_factor
== 0))))
8371 /* Handle vector immediates. */
8374 RC_SAE_immediate (const char *imm_start
)
8376 unsigned int match_found
, j
;
8377 const char *pstr
= imm_start
;
8385 for (j
= 0; j
< ARRAY_SIZE (RC_NamesTable
); j
++)
8387 if (!strncmp (pstr
, RC_NamesTable
[j
].name
, RC_NamesTable
[j
].len
))
8391 rc_op
.type
= RC_NamesTable
[j
].type
;
8392 rc_op
.operand
= this_operand
;
8393 i
.rounding
= &rc_op
;
8397 as_bad (_("duplicated `%s'"), imm_start
);
8400 pstr
+= RC_NamesTable
[j
].len
;
8410 as_bad (_("Missing '}': '%s'"), imm_start
);
8413 /* RC/SAE immediate string should contain nothing more. */;
8416 as_bad (_("Junk after '}': '%s'"), imm_start
);
8420 exp
= &im_expressions
[i
.imm_operands
++];
8421 i
.op
[this_operand
].imms
= exp
;
8423 exp
->X_op
= O_constant
;
8424 exp
->X_add_number
= 0;
8425 exp
->X_add_symbol
= (symbolS
*) 0;
8426 exp
->X_op_symbol
= (symbolS
*) 0;
8428 i
.types
[this_operand
].bitfield
.imm8
= 1;
8432 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
8436 i386_att_operand (char *operand_string
)
8440 char *op_string
= operand_string
;
8442 if (is_space_char (*op_string
))
8445 /* We check for an absolute prefix (differentiating,
8446 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
8447 if (*op_string
== ABSOLUTE_PREFIX
)
8450 if (is_space_char (*op_string
))
8452 i
.types
[this_operand
].bitfield
.jumpabsolute
= 1;
8455 /* Check if operand is a register. */
8456 if ((r
= parse_register (op_string
, &end_op
)) != NULL
)
8458 i386_operand_type temp
;
8460 /* Check for a segment override by searching for ':' after a
8461 segment register. */
8463 if (is_space_char (*op_string
))
8465 if (*op_string
== ':'
8466 && (r
->reg_type
.bitfield
.sreg2
8467 || r
->reg_type
.bitfield
.sreg3
))
8472 i
.seg
[i
.mem_operands
] = &es
;
8475 i
.seg
[i
.mem_operands
] = &cs
;
8478 i
.seg
[i
.mem_operands
] = &ss
;
8481 i
.seg
[i
.mem_operands
] = &ds
;
8484 i
.seg
[i
.mem_operands
] = &fs
;
8487 i
.seg
[i
.mem_operands
] = &gs
;
8491 /* Skip the ':' and whitespace. */
8493 if (is_space_char (*op_string
))
8496 if (!is_digit_char (*op_string
)
8497 && !is_identifier_char (*op_string
)
8498 && *op_string
!= '('
8499 && *op_string
!= ABSOLUTE_PREFIX
)
8501 as_bad (_("bad memory operand `%s'"), op_string
);
8504 /* Handle case of %es:*foo. */
8505 if (*op_string
== ABSOLUTE_PREFIX
)
8508 if (is_space_char (*op_string
))
8510 i
.types
[this_operand
].bitfield
.jumpabsolute
= 1;
8512 goto do_memory_reference
;
8515 /* Handle vector operations. */
8516 if (*op_string
== '{')
8518 op_string
= check_VecOperations (op_string
, NULL
);
8519 if (op_string
== NULL
)
8525 as_bad (_("junk `%s' after register"), op_string
);
8529 temp
.bitfield
.baseindex
= 0;
8530 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
8532 i
.types
[this_operand
].bitfield
.unspecified
= 0;
8533 i
.op
[this_operand
].regs
= r
;
8536 else if (*op_string
== REGISTER_PREFIX
)
8538 as_bad (_("bad register name `%s'"), op_string
);
8541 else if (*op_string
== IMMEDIATE_PREFIX
)
8544 if (i
.types
[this_operand
].bitfield
.jumpabsolute
)
8546 as_bad (_("immediate operand illegal with absolute jump"));
8549 if (!i386_immediate (op_string
))
8552 else if (RC_SAE_immediate (operand_string
))
8554 /* If it is a RC or SAE immediate, do nothing. */
8557 else if (is_digit_char (*op_string
)
8558 || is_identifier_char (*op_string
)
8559 || *op_string
== '(')
8561 /* This is a memory reference of some sort. */
8564 /* Start and end of displacement string expression (if found). */
8565 char *displacement_string_start
;
8566 char *displacement_string_end
;
8569 do_memory_reference
:
8570 if ((i
.mem_operands
== 1
8571 && !current_templates
->start
->opcode_modifier
.isstring
)
8572 || i
.mem_operands
== 2)
8574 as_bad (_("too many memory references for `%s'"),
8575 current_templates
->start
->name
);
8579 /* Check for base index form. We detect the base index form by
8580 looking for an ')' at the end of the operand, searching
8581 for the '(' matching it, and finding a REGISTER_PREFIX or ','
8583 base_string
= op_string
+ strlen (op_string
);
8585 /* Handle vector operations. */
8586 vop_start
= strchr (op_string
, '{');
8587 if (vop_start
&& vop_start
< base_string
)
8589 if (check_VecOperations (vop_start
, base_string
) == NULL
)
8591 base_string
= vop_start
;
8595 if (is_space_char (*base_string
))
8598 /* If we only have a displacement, set-up for it to be parsed later. */
8599 displacement_string_start
= op_string
;
8600 displacement_string_end
= base_string
+ 1;
8602 if (*base_string
== ')')
8605 unsigned int parens_balanced
= 1;
8606 /* We've already checked that the number of left & right ()'s are
8607 equal, so this loop will not be infinite. */
8611 if (*base_string
== ')')
8613 if (*base_string
== '(')
8616 while (parens_balanced
);
8618 temp_string
= base_string
;
8620 /* Skip past '(' and whitespace. */
8622 if (is_space_char (*base_string
))
8625 if (*base_string
== ','
8626 || ((i
.base_reg
= parse_register (base_string
, &end_op
))
8629 displacement_string_end
= temp_string
;
8631 i
.types
[this_operand
].bitfield
.baseindex
= 1;
8635 base_string
= end_op
;
8636 if (is_space_char (*base_string
))
8640 /* There may be an index reg or scale factor here. */
8641 if (*base_string
== ',')
8644 if (is_space_char (*base_string
))
8647 if ((i
.index_reg
= parse_register (base_string
, &end_op
))
8650 base_string
= end_op
;
8651 if (is_space_char (*base_string
))
8653 if (*base_string
== ',')
8656 if (is_space_char (*base_string
))
8659 else if (*base_string
!= ')')
8661 as_bad (_("expecting `,' or `)' "
8662 "after index register in `%s'"),
8667 else if (*base_string
== REGISTER_PREFIX
)
8669 end_op
= strchr (base_string
, ',');
8672 as_bad (_("bad register name `%s'"), base_string
);
8676 /* Check for scale factor. */
8677 if (*base_string
!= ')')
8679 char *end_scale
= i386_scale (base_string
);
8684 base_string
= end_scale
;
8685 if (is_space_char (*base_string
))
8687 if (*base_string
!= ')')
8689 as_bad (_("expecting `)' "
8690 "after scale factor in `%s'"),
8695 else if (!i
.index_reg
)
8697 as_bad (_("expecting index register or scale factor "
8698 "after `,'; got '%c'"),
8703 else if (*base_string
!= ')')
8705 as_bad (_("expecting `,' or `)' "
8706 "after base register in `%s'"),
8711 else if (*base_string
== REGISTER_PREFIX
)
8713 end_op
= strchr (base_string
, ',');
8716 as_bad (_("bad register name `%s'"), base_string
);
8721 /* If there's an expression beginning the operand, parse it,
8722 assuming displacement_string_start and
8723 displacement_string_end are meaningful. */
8724 if (displacement_string_start
!= displacement_string_end
)
8726 if (!i386_displacement (displacement_string_start
,
8727 displacement_string_end
))
8731 /* Special case for (%dx) while doing input/output op. */
8733 && operand_type_equal (&i
.base_reg
->reg_type
,
8734 ®16_inoutportreg
)
8736 && i
.log2_scale_factor
== 0
8737 && i
.seg
[i
.mem_operands
] == 0
8738 && !operand_type_check (i
.types
[this_operand
], disp
))
8740 i
.types
[this_operand
] = inoutportreg
;
8744 if (i386_index_check (operand_string
) == 0)
8746 i
.types
[this_operand
].bitfield
.mem
= 1;
8751 /* It's not a memory operand; argh! */
8752 as_bad (_("invalid char %s beginning operand %d `%s'"),
8753 output_invalid (*op_string
),
8758 return 1; /* Normal return. */
8761 /* Calculate the maximum variable size (i.e., excluding fr_fix)
8762 that an rs_machine_dependent frag may reach. */
8765 i386_frag_max_var (fragS
*frag
)
8767 /* The only relaxable frags are for jumps.
8768 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
8769 gas_assert (frag
->fr_type
== rs_machine_dependent
);
8770 return TYPE_FROM_RELAX_STATE (frag
->fr_subtype
) == UNCOND_JUMP
? 4 : 5;
8773 /* md_estimate_size_before_relax()
8775 Called just before relax() for rs_machine_dependent frags. The x86
8776 assembler uses these frags to handle variable size jump
8779 Any symbol that is now undefined will not become defined.
8780 Return the correct fr_subtype in the frag.
8781 Return the initial "guess for variable size of frag" to caller.
8782 The guess is actually the growth beyond the fixed part. Whatever
8783 we do to grow the fixed or variable part contributes to our
8787 md_estimate_size_before_relax (fragS
*fragP
, segT segment
)
8789 /* We've already got fragP->fr_subtype right; all we have to do is
8790 check for un-relaxable symbols. On an ELF system, we can't relax
8791 an externally visible symbol, because it may be overridden by a
8793 if (S_GET_SEGMENT (fragP
->fr_symbol
) != segment
8794 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8796 && (S_IS_EXTERNAL (fragP
->fr_symbol
)
8797 || S_IS_WEAK (fragP
->fr_symbol
)
8798 || ((symbol_get_bfdsym (fragP
->fr_symbol
)->flags
8799 & BSF_GNU_INDIRECT_FUNCTION
))))
8801 #if defined (OBJ_COFF) && defined (TE_PE)
8802 || (OUTPUT_FLAVOR
== bfd_target_coff_flavour
8803 && S_IS_WEAK (fragP
->fr_symbol
))
8807 /* Symbol is undefined in this segment, or we need to keep a
8808 reloc so that weak symbols can be overridden. */
8809 int size
= (fragP
->fr_subtype
& CODE16
) ? 2 : 4;
8810 enum bfd_reloc_code_real reloc_type
;
8811 unsigned char *opcode
;
8814 if (fragP
->fr_var
!= NO_RELOC
)
8815 reloc_type
= (enum bfd_reloc_code_real
) fragP
->fr_var
;
8817 reloc_type
= BFD_RELOC_16_PCREL
;
8819 reloc_type
= BFD_RELOC_32_PCREL
;
8821 old_fr_fix
= fragP
->fr_fix
;
8822 opcode
= (unsigned char *) fragP
->fr_opcode
;
8824 switch (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
))
8827 /* Make jmp (0xeb) a (d)word displacement jump. */
8829 fragP
->fr_fix
+= size
;
8830 fix_new (fragP
, old_fr_fix
, size
,
8832 fragP
->fr_offset
, 1,
8838 && (!no_cond_jump_promotion
|| fragP
->fr_var
!= NO_RELOC
))
8840 /* Negate the condition, and branch past an
8841 unconditional jump. */
8844 /* Insert an unconditional jump. */
8846 /* We added two extra opcode bytes, and have a two byte
8848 fragP
->fr_fix
+= 2 + 2;
8849 fix_new (fragP
, old_fr_fix
+ 2, 2,
8851 fragP
->fr_offset
, 1,
8858 if (no_cond_jump_promotion
&& fragP
->fr_var
== NO_RELOC
)
8863 fixP
= fix_new (fragP
, old_fr_fix
, 1,
8865 fragP
->fr_offset
, 1,
8867 fixP
->fx_signed
= 1;
8871 /* This changes the byte-displacement jump 0x7N
8872 to the (d)word-displacement jump 0x0f,0x8N. */
8873 opcode
[1] = opcode
[0] + 0x10;
8874 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
8875 /* We've added an opcode byte. */
8876 fragP
->fr_fix
+= 1 + size
;
8877 fix_new (fragP
, old_fr_fix
+ 1, size
,
8879 fragP
->fr_offset
, 1,
8884 BAD_CASE (fragP
->fr_subtype
);
8888 return fragP
->fr_fix
- old_fr_fix
;
8891 /* Guess size depending on current relax state. Initially the relax
8892 state will correspond to a short jump and we return 1, because
8893 the variable part of the frag (the branch offset) is one byte
8894 long. However, we can relax a section more than once and in that
8895 case we must either set fr_subtype back to the unrelaxed state,
8896 or return the value for the appropriate branch. */
8897 return md_relax_table
[fragP
->fr_subtype
].rlx_length
;
8900 /* Called after relax() is finished.
8902 In: Address of frag.
8903 fr_type == rs_machine_dependent.
8904 fr_subtype is what the address relaxed to.
8906 Out: Any fixSs and constants are set up.
8907 Caller will turn frag into a ".space 0". */
8910 md_convert_frag (bfd
*abfd ATTRIBUTE_UNUSED
, segT sec ATTRIBUTE_UNUSED
,
8913 unsigned char *opcode
;
8914 unsigned char *where_to_put_displacement
= NULL
;
8915 offsetT target_address
;
8916 offsetT opcode_address
;
8917 unsigned int extension
= 0;
8918 offsetT displacement_from_opcode_start
;
8920 opcode
= (unsigned char *) fragP
->fr_opcode
;
8922 /* Address we want to reach in file space. */
8923 target_address
= S_GET_VALUE (fragP
->fr_symbol
) + fragP
->fr_offset
;
8925 /* Address opcode resides at in file space. */
8926 opcode_address
= fragP
->fr_address
+ fragP
->fr_fix
;
8928 /* Displacement from opcode start to fill into instruction. */
8929 displacement_from_opcode_start
= target_address
- opcode_address
;
8931 if ((fragP
->fr_subtype
& BIG
) == 0)
8933 /* Don't have to change opcode. */
8934 extension
= 1; /* 1 opcode + 1 displacement */
8935 where_to_put_displacement
= &opcode
[1];
8939 if (no_cond_jump_promotion
8940 && TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) != UNCOND_JUMP
)
8941 as_warn_where (fragP
->fr_file
, fragP
->fr_line
,
8942 _("long jump required"));
8944 switch (fragP
->fr_subtype
)
8946 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
):
8947 extension
= 4; /* 1 opcode + 4 displacement */
8949 where_to_put_displacement
= &opcode
[1];
8952 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
):
8953 extension
= 2; /* 1 opcode + 2 displacement */
8955 where_to_put_displacement
= &opcode
[1];
8958 case ENCODE_RELAX_STATE (COND_JUMP
, BIG
):
8959 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG
):
8960 extension
= 5; /* 2 opcode + 4 displacement */
8961 opcode
[1] = opcode
[0] + 0x10;
8962 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
8963 where_to_put_displacement
= &opcode
[2];
8966 case ENCODE_RELAX_STATE (COND_JUMP
, BIG16
):
8967 extension
= 3; /* 2 opcode + 2 displacement */
8968 opcode
[1] = opcode
[0] + 0x10;
8969 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
8970 where_to_put_displacement
= &opcode
[2];
8973 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
):
8978 where_to_put_displacement
= &opcode
[3];
8982 BAD_CASE (fragP
->fr_subtype
);
8987 /* If size if less then four we are sure that the operand fits,
8988 but if it's 4, then it could be that the displacement is larger
8990 if (DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
) == 4
8992 && ((addressT
) (displacement_from_opcode_start
- extension
8993 + ((addressT
) 1 << 31))
8994 > (((addressT
) 2 << 31) - 1)))
8996 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
8997 _("jump target out of range"));
8998 /* Make us emit 0. */
8999 displacement_from_opcode_start
= extension
;
9001 /* Now put displacement after opcode. */
9002 md_number_to_chars ((char *) where_to_put_displacement
,
9003 (valueT
) (displacement_from_opcode_start
- extension
),
9004 DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
));
9005 fragP
->fr_fix
+= extension
;
9008 /* Apply a fixup (fixP) to segment data, once it has been determined
9009 by our caller that we have all the info we need to fix it up.
9011 Parameter valP is the pointer to the value of the bits.
9013 On the 386, immediates, displacements, and data pointers are all in
9014 the same (little-endian) format, so we don't need to care about which
9018 md_apply_fix (fixS
*fixP
, valueT
*valP
, segT seg ATTRIBUTE_UNUSED
)
9020 char *p
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
9021 valueT value
= *valP
;
9023 #if !defined (TE_Mach)
9026 switch (fixP
->fx_r_type
)
9032 fixP
->fx_r_type
= BFD_RELOC_64_PCREL
;
9035 case BFD_RELOC_X86_64_32S
:
9036 fixP
->fx_r_type
= BFD_RELOC_32_PCREL
;
9039 fixP
->fx_r_type
= BFD_RELOC_16_PCREL
;
9042 fixP
->fx_r_type
= BFD_RELOC_8_PCREL
;
9047 if (fixP
->fx_addsy
!= NULL
9048 && (fixP
->fx_r_type
== BFD_RELOC_32_PCREL
9049 || fixP
->fx_r_type
== BFD_RELOC_64_PCREL
9050 || fixP
->fx_r_type
== BFD_RELOC_16_PCREL
9051 || fixP
->fx_r_type
== BFD_RELOC_8_PCREL
)
9052 && !use_rela_relocations
)
9054 /* This is a hack. There should be a better way to handle this.
9055 This covers for the fact that bfd_install_relocation will
9056 subtract the current location (for partial_inplace, PC relative
9057 relocations); see more below. */
9061 || OUTPUT_FLAVOR
== bfd_target_coff_flavour
9064 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
9066 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9069 segT sym_seg
= S_GET_SEGMENT (fixP
->fx_addsy
);
9072 || (symbol_section_p (fixP
->fx_addsy
)
9073 && sym_seg
!= absolute_section
))
9074 && !generic_force_reloc (fixP
))
9076 /* Yes, we add the values in twice. This is because
9077 bfd_install_relocation subtracts them out again. I think
9078 bfd_install_relocation is broken, but I don't dare change
9080 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
9084 #if defined (OBJ_COFF) && defined (TE_PE)
9085 /* For some reason, the PE format does not store a
9086 section address offset for a PC relative symbol. */
9087 if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
9088 || S_IS_WEAK (fixP
->fx_addsy
))
9089 value
+= md_pcrel_from (fixP
);
9092 #if defined (OBJ_COFF) && defined (TE_PE)
9093 if (fixP
->fx_addsy
!= NULL
9094 && S_IS_WEAK (fixP
->fx_addsy
)
9095 /* PR 16858: Do not modify weak function references. */
9096 && ! fixP
->fx_pcrel
)
9098 #if !defined (TE_PEP)
9099 /* For x86 PE weak function symbols are neither PC-relative
9100 nor do they set S_IS_FUNCTION. So the only reliable way
9101 to detect them is to check the flags of their containing
9103 if (S_GET_SEGMENT (fixP
->fx_addsy
) != NULL
9104 && S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_CODE
)
9108 value
-= S_GET_VALUE (fixP
->fx_addsy
);
9112 /* Fix a few things - the dynamic linker expects certain values here,
9113 and we must not disappoint it. */
9114 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9115 if (IS_ELF
&& fixP
->fx_addsy
)
9116 switch (fixP
->fx_r_type
)
9118 case BFD_RELOC_386_PLT32
:
9119 case BFD_RELOC_X86_64_PLT32
:
9120 /* Make the jump instruction point to the address of the operand. At
9121 runtime we merely add the offset to the actual PLT entry. */
9125 case BFD_RELOC_386_TLS_GD
:
9126 case BFD_RELOC_386_TLS_LDM
:
9127 case BFD_RELOC_386_TLS_IE_32
:
9128 case BFD_RELOC_386_TLS_IE
:
9129 case BFD_RELOC_386_TLS_GOTIE
:
9130 case BFD_RELOC_386_TLS_GOTDESC
:
9131 case BFD_RELOC_X86_64_TLSGD
:
9132 case BFD_RELOC_X86_64_TLSLD
:
9133 case BFD_RELOC_X86_64_GOTTPOFF
:
9134 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
9135 value
= 0; /* Fully resolved at runtime. No addend. */
9137 case BFD_RELOC_386_TLS_LE
:
9138 case BFD_RELOC_386_TLS_LDO_32
:
9139 case BFD_RELOC_386_TLS_LE_32
:
9140 case BFD_RELOC_X86_64_DTPOFF32
:
9141 case BFD_RELOC_X86_64_DTPOFF64
:
9142 case BFD_RELOC_X86_64_TPOFF32
:
9143 case BFD_RELOC_X86_64_TPOFF64
:
9144 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
9147 case BFD_RELOC_386_TLS_DESC_CALL
:
9148 case BFD_RELOC_X86_64_TLSDESC_CALL
:
9149 value
= 0; /* Fully resolved at runtime. No addend. */
9150 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
9154 case BFD_RELOC_386_GOT32
:
9155 case BFD_RELOC_X86_64_GOT32
:
9156 value
= 0; /* Fully resolved at runtime. No addend. */
9159 case BFD_RELOC_VTABLE_INHERIT
:
9160 case BFD_RELOC_VTABLE_ENTRY
:
9167 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
9169 #endif /* !defined (TE_Mach) */
9171 /* Are we finished with this relocation now? */
9172 if (fixP
->fx_addsy
== NULL
)
9174 #if defined (OBJ_COFF) && defined (TE_PE)
9175 else if (fixP
->fx_addsy
!= NULL
&& S_IS_WEAK (fixP
->fx_addsy
))
9178 /* Remember value for tc_gen_reloc. */
9179 fixP
->fx_addnumber
= value
;
9180 /* Clear out the frag for now. */
9184 else if (use_rela_relocations
)
9186 fixP
->fx_no_overflow
= 1;
9187 /* Remember value for tc_gen_reloc. */
9188 fixP
->fx_addnumber
= value
;
9192 md_number_to_chars (p
, value
, fixP
->fx_size
);
9196 md_atof (int type
, char *litP
, int *sizeP
)
9198 /* This outputs the LITTLENUMs in REVERSE order;
9199 in accord with the bigendian 386. */
9200 return ieee_md_atof (type
, litP
, sizeP
, FALSE
);
9203 static char output_invalid_buf
[sizeof (unsigned char) * 2 + 6];
9206 output_invalid (int c
)
9209 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
9212 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
9213 "(0x%x)", (unsigned char) c
);
9214 return output_invalid_buf
;
9217 /* REG_STRING starts *before* REGISTER_PREFIX. */
9219 static const reg_entry
*
9220 parse_real_register (char *reg_string
, char **end_op
)
9222 char *s
= reg_string
;
9224 char reg_name_given
[MAX_REG_NAME_SIZE
+ 1];
9227 /* Skip possible REGISTER_PREFIX and possible whitespace. */
9228 if (*s
== REGISTER_PREFIX
)
9231 if (is_space_char (*s
))
9235 while ((*p
++ = register_chars
[(unsigned char) *s
]) != '\0')
9237 if (p
>= reg_name_given
+ MAX_REG_NAME_SIZE
)
9238 return (const reg_entry
*) NULL
;
9242 /* For naked regs, make sure that we are not dealing with an identifier.
9243 This prevents confusing an identifier like `eax_var' with register
9245 if (allow_naked_reg
&& identifier_chars
[(unsigned char) *s
])
9246 return (const reg_entry
*) NULL
;
9250 r
= (const reg_entry
*) hash_find (reg_hash
, reg_name_given
);
9252 /* Handle floating point regs, allowing spaces in the (i) part. */
9253 if (r
== i386_regtab
/* %st is first entry of table */)
9255 if (is_space_char (*s
))
9260 if (is_space_char (*s
))
9262 if (*s
>= '0' && *s
<= '7')
9266 if (is_space_char (*s
))
9271 r
= (const reg_entry
*) hash_find (reg_hash
, "st(0)");
9276 /* We have "%st(" then garbage. */
9277 return (const reg_entry
*) NULL
;
9281 if (r
== NULL
|| allow_pseudo_reg
)
9284 if (operand_type_all_zero (&r
->reg_type
))
9285 return (const reg_entry
*) NULL
;
9287 if ((r
->reg_type
.bitfield
.reg32
9288 || r
->reg_type
.bitfield
.sreg3
9289 || r
->reg_type
.bitfield
.control
9290 || r
->reg_type
.bitfield
.debug
9291 || r
->reg_type
.bitfield
.test
)
9292 && !cpu_arch_flags
.bitfield
.cpui386
)
9293 return (const reg_entry
*) NULL
;
9295 if (r
->reg_type
.bitfield
.floatreg
9296 && !cpu_arch_flags
.bitfield
.cpu8087
9297 && !cpu_arch_flags
.bitfield
.cpu287
9298 && !cpu_arch_flags
.bitfield
.cpu387
)
9299 return (const reg_entry
*) NULL
;
9301 if (r
->reg_type
.bitfield
.regmmx
&& !cpu_arch_flags
.bitfield
.cpummx
)
9302 return (const reg_entry
*) NULL
;
9304 if (r
->reg_type
.bitfield
.regxmm
&& !cpu_arch_flags
.bitfield
.cpusse
)
9305 return (const reg_entry
*) NULL
;
9307 if (r
->reg_type
.bitfield
.regymm
&& !cpu_arch_flags
.bitfield
.cpuavx
)
9308 return (const reg_entry
*) NULL
;
9310 if ((r
->reg_type
.bitfield
.regzmm
|| r
->reg_type
.bitfield
.regmask
)
9311 && !cpu_arch_flags
.bitfield
.cpuavx512f
)
9312 return (const reg_entry
*) NULL
;
9314 /* Don't allow fake index register unless allow_index_reg isn't 0. */
9315 if (!allow_index_reg
9316 && (r
->reg_num
== RegEiz
|| r
->reg_num
== RegRiz
))
9317 return (const reg_entry
*) NULL
;
9319 /* Upper 16 vector register is only available with VREX in 64bit
9321 if ((r
->reg_flags
& RegVRex
))
9323 if (!cpu_arch_flags
.bitfield
.cpuvrex
9324 || flag_code
!= CODE_64BIT
)
9325 return (const reg_entry
*) NULL
;
9330 if (((r
->reg_flags
& (RegRex64
| RegRex
))
9331 || r
->reg_type
.bitfield
.reg64
)
9332 && (!cpu_arch_flags
.bitfield
.cpulm
9333 || !operand_type_equal (&r
->reg_type
, &control
))
9334 && flag_code
!= CODE_64BIT
)
9335 return (const reg_entry
*) NULL
;
9337 if (r
->reg_type
.bitfield
.sreg3
&& r
->reg_num
== RegFlat
&& !intel_syntax
)
9338 return (const reg_entry
*) NULL
;
9343 /* REG_STRING starts *before* REGISTER_PREFIX. */
9345 static const reg_entry
*
9346 parse_register (char *reg_string
, char **end_op
)
9350 if (*reg_string
== REGISTER_PREFIX
|| allow_naked_reg
)
9351 r
= parse_real_register (reg_string
, end_op
);
9356 char *save
= input_line_pointer
;
9360 input_line_pointer
= reg_string
;
9361 c
= get_symbol_end ();
9362 symbolP
= symbol_find (reg_string
);
9363 if (symbolP
&& S_GET_SEGMENT (symbolP
) == reg_section
)
9365 const expressionS
*e
= symbol_get_value_expression (symbolP
);
9367 know (e
->X_op
== O_register
);
9368 know (e
->X_add_number
>= 0
9369 && (valueT
) e
->X_add_number
< i386_regtab_size
);
9370 r
= i386_regtab
+ e
->X_add_number
;
9371 if ((r
->reg_flags
& RegVRex
))
9373 *end_op
= input_line_pointer
;
9375 *input_line_pointer
= c
;
9376 input_line_pointer
= save
;
9382 i386_parse_name (char *name
, expressionS
*e
, char *nextcharP
)
9385 char *end
= input_line_pointer
;
9388 r
= parse_register (name
, &input_line_pointer
);
9389 if (r
&& end
<= input_line_pointer
)
9391 *nextcharP
= *input_line_pointer
;
9392 *input_line_pointer
= 0;
9393 e
->X_op
= O_register
;
9394 e
->X_add_number
= r
- i386_regtab
;
9397 input_line_pointer
= end
;
9399 return intel_syntax
? i386_intel_parse_name (name
, e
) : 0;
9403 md_operand (expressionS
*e
)
9408 switch (*input_line_pointer
)
9410 case REGISTER_PREFIX
:
9411 r
= parse_real_register (input_line_pointer
, &end
);
9414 e
->X_op
= O_register
;
9415 e
->X_add_number
= r
- i386_regtab
;
9416 input_line_pointer
= end
;
9421 gas_assert (intel_syntax
);
9422 end
= input_line_pointer
++;
9424 if (*input_line_pointer
== ']')
9426 ++input_line_pointer
;
9427 e
->X_op_symbol
= make_expr_symbol (e
);
9428 e
->X_add_symbol
= NULL
;
9429 e
->X_add_number
= 0;
9435 input_line_pointer
= end
;
9442 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9443 const char *md_shortopts
= "kVQ:sqn";
9445 const char *md_shortopts
= "qn";
9448 #define OPTION_32 (OPTION_MD_BASE + 0)
9449 #define OPTION_64 (OPTION_MD_BASE + 1)
9450 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
9451 #define OPTION_MARCH (OPTION_MD_BASE + 3)
9452 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
9453 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
9454 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
9455 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
9456 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
9457 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
9458 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
9459 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
9460 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
9461 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
9462 #define OPTION_X32 (OPTION_MD_BASE + 14)
9463 #define OPTION_MADD_BND_PREFIX (OPTION_MD_BASE + 15)
9464 #define OPTION_MEVEXLIG (OPTION_MD_BASE + 16)
9465 #define OPTION_MEVEXWIG (OPTION_MD_BASE + 17)
9466 #define OPTION_MBIG_OBJ (OPTION_MD_BASE + 18)
9467 #define OPTION_OMIT_LOCK_PREFIX (OPTION_MD_BASE + 19)
9468 #define OPTION_MEVEXRCIG (OPTION_MD_BASE + 20)
9470 struct option md_longopts
[] =
9472 {"32", no_argument
, NULL
, OPTION_32
},
9473 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9474 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
9475 {"64", no_argument
, NULL
, OPTION_64
},
9477 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9478 {"x32", no_argument
, NULL
, OPTION_X32
},
9480 {"divide", no_argument
, NULL
, OPTION_DIVIDE
},
9481 {"march", required_argument
, NULL
, OPTION_MARCH
},
9482 {"mtune", required_argument
, NULL
, OPTION_MTUNE
},
9483 {"mmnemonic", required_argument
, NULL
, OPTION_MMNEMONIC
},
9484 {"msyntax", required_argument
, NULL
, OPTION_MSYNTAX
},
9485 {"mindex-reg", no_argument
, NULL
, OPTION_MINDEX_REG
},
9486 {"mnaked-reg", no_argument
, NULL
, OPTION_MNAKED_REG
},
9487 {"mold-gcc", no_argument
, NULL
, OPTION_MOLD_GCC
},
9488 {"msse2avx", no_argument
, NULL
, OPTION_MSSE2AVX
},
9489 {"msse-check", required_argument
, NULL
, OPTION_MSSE_CHECK
},
9490 {"moperand-check", required_argument
, NULL
, OPTION_MOPERAND_CHECK
},
9491 {"mavxscalar", required_argument
, NULL
, OPTION_MAVXSCALAR
},
9492 {"madd-bnd-prefix", no_argument
, NULL
, OPTION_MADD_BND_PREFIX
},
9493 {"mevexlig", required_argument
, NULL
, OPTION_MEVEXLIG
},
9494 {"mevexwig", required_argument
, NULL
, OPTION_MEVEXWIG
},
9495 # if defined (TE_PE) || defined (TE_PEP)
9496 {"mbig-obj", no_argument
, NULL
, OPTION_MBIG_OBJ
},
9498 {"momit-lock-prefix", required_argument
, NULL
, OPTION_OMIT_LOCK_PREFIX
},
9499 {"mevexrcig", required_argument
, NULL
, OPTION_MEVEXRCIG
},
9500 {NULL
, no_argument
, NULL
, 0}
9502 size_t md_longopts_size
= sizeof (md_longopts
);
9505 md_parse_option (int c
, char *arg
)
9513 optimize_align_code
= 0;
9520 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9521 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
9522 should be emitted or not. FIXME: Not implemented. */
9526 /* -V: SVR4 argument to print version ID. */
9528 print_version_id ();
9531 /* -k: Ignore for FreeBSD compatibility. */
9536 /* -s: On i386 Solaris, this tells the native assembler to use
9537 .stab instead of .stab.excl. We always use .stab anyhow. */
9540 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9541 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
9544 const char **list
, **l
;
9546 list
= bfd_target_list ();
9547 for (l
= list
; *l
!= NULL
; l
++)
9548 if (CONST_STRNEQ (*l
, "elf64-x86-64")
9549 || strcmp (*l
, "coff-x86-64") == 0
9550 || strcmp (*l
, "pe-x86-64") == 0
9551 || strcmp (*l
, "pei-x86-64") == 0
9552 || strcmp (*l
, "mach-o-x86-64") == 0)
9554 default_arch
= "x86_64";
9558 as_fatal (_("no compiled in support for x86_64"));
9564 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9568 const char **list
, **l
;
9570 list
= bfd_target_list ();
9571 for (l
= list
; *l
!= NULL
; l
++)
9572 if (CONST_STRNEQ (*l
, "elf32-x86-64"))
9574 default_arch
= "x86_64:32";
9578 as_fatal (_("no compiled in support for 32bit x86_64"));
9582 as_fatal (_("32bit x86_64 is only supported for ELF"));
9587 default_arch
= "i386";
9591 #ifdef SVR4_COMMENT_CHARS
9596 n
= (char *) xmalloc (strlen (i386_comment_chars
) + 1);
9598 for (s
= i386_comment_chars
; *s
!= '\0'; s
++)
9602 i386_comment_chars
= n
;
9608 arch
= xstrdup (arg
);
9612 as_fatal (_("invalid -march= option: `%s'"), arg
);
9613 next
= strchr (arch
, '+');
9616 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
9618 if (strcmp (arch
, cpu_arch
[j
].name
) == 0)
9621 if (! cpu_arch
[j
].flags
.bitfield
.cpui386
)
9624 cpu_arch_name
= cpu_arch
[j
].name
;
9625 cpu_sub_arch_name
= NULL
;
9626 cpu_arch_flags
= cpu_arch
[j
].flags
;
9627 cpu_arch_isa
= cpu_arch
[j
].type
;
9628 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
9629 if (!cpu_arch_tune_set
)
9631 cpu_arch_tune
= cpu_arch_isa
;
9632 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
9636 else if (*cpu_arch
[j
].name
== '.'
9637 && strcmp (arch
, cpu_arch
[j
].name
+ 1) == 0)
9639 /* ISA entension. */
9640 i386_cpu_flags flags
;
9642 if (!cpu_arch
[j
].negated
)
9643 flags
= cpu_flags_or (cpu_arch_flags
,
9646 flags
= cpu_flags_and_not (cpu_arch_flags
,
9648 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
9650 if (cpu_sub_arch_name
)
9652 char *name
= cpu_sub_arch_name
;
9653 cpu_sub_arch_name
= concat (name
,
9655 (const char *) NULL
);
9659 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
9660 cpu_arch_flags
= flags
;
9661 cpu_arch_isa_flags
= flags
;
9667 if (j
>= ARRAY_SIZE (cpu_arch
))
9668 as_fatal (_("invalid -march= option: `%s'"), arg
);
9672 while (next
!= NULL
);
9677 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
9678 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
9680 if (strcmp (arg
, cpu_arch
[j
].name
) == 0)
9682 cpu_arch_tune_set
= 1;
9683 cpu_arch_tune
= cpu_arch
[j
].type
;
9684 cpu_arch_tune_flags
= cpu_arch
[j
].flags
;
9688 if (j
>= ARRAY_SIZE (cpu_arch
))
9689 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
9692 case OPTION_MMNEMONIC
:
9693 if (strcasecmp (arg
, "att") == 0)
9695 else if (strcasecmp (arg
, "intel") == 0)
9698 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg
);
9701 case OPTION_MSYNTAX
:
9702 if (strcasecmp (arg
, "att") == 0)
9704 else if (strcasecmp (arg
, "intel") == 0)
9707 as_fatal (_("invalid -msyntax= option: `%s'"), arg
);
9710 case OPTION_MINDEX_REG
:
9711 allow_index_reg
= 1;
9714 case OPTION_MNAKED_REG
:
9715 allow_naked_reg
= 1;
9718 case OPTION_MOLD_GCC
:
9722 case OPTION_MSSE2AVX
:
9726 case OPTION_MSSE_CHECK
:
9727 if (strcasecmp (arg
, "error") == 0)
9728 sse_check
= check_error
;
9729 else if (strcasecmp (arg
, "warning") == 0)
9730 sse_check
= check_warning
;
9731 else if (strcasecmp (arg
, "none") == 0)
9732 sse_check
= check_none
;
9734 as_fatal (_("invalid -msse-check= option: `%s'"), arg
);
9737 case OPTION_MOPERAND_CHECK
:
9738 if (strcasecmp (arg
, "error") == 0)
9739 operand_check
= check_error
;
9740 else if (strcasecmp (arg
, "warning") == 0)
9741 operand_check
= check_warning
;
9742 else if (strcasecmp (arg
, "none") == 0)
9743 operand_check
= check_none
;
9745 as_fatal (_("invalid -moperand-check= option: `%s'"), arg
);
9748 case OPTION_MAVXSCALAR
:
9749 if (strcasecmp (arg
, "128") == 0)
9751 else if (strcasecmp (arg
, "256") == 0)
9754 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg
);
9757 case OPTION_MADD_BND_PREFIX
:
9761 case OPTION_MEVEXLIG
:
9762 if (strcmp (arg
, "128") == 0)
9764 else if (strcmp (arg
, "256") == 0)
9766 else if (strcmp (arg
, "512") == 0)
9769 as_fatal (_("invalid -mevexlig= option: `%s'"), arg
);
9772 case OPTION_MEVEXRCIG
:
9773 if (strcmp (arg
, "rne") == 0)
9775 else if (strcmp (arg
, "rd") == 0)
9777 else if (strcmp (arg
, "ru") == 0)
9779 else if (strcmp (arg
, "rz") == 0)
9782 as_fatal (_("invalid -mevexrcig= option: `%s'"), arg
);
9785 case OPTION_MEVEXWIG
:
9786 if (strcmp (arg
, "0") == 0)
9788 else if (strcmp (arg
, "1") == 0)
9791 as_fatal (_("invalid -mevexwig= option: `%s'"), arg
);
9794 # if defined (TE_PE) || defined (TE_PEP)
9795 case OPTION_MBIG_OBJ
:
9800 case OPTION_OMIT_LOCK_PREFIX
:
9801 if (strcasecmp (arg
, "yes") == 0)
9802 omit_lock_prefix
= 1;
9803 else if (strcasecmp (arg
, "no") == 0)
9804 omit_lock_prefix
= 0;
9806 as_fatal (_("invalid -momit-lock-prefix= option: `%s'"), arg
);
9815 #define MESSAGE_TEMPLATE \
9819 show_arch (FILE *stream
, int ext
, int check
)
9821 static char message
[] = MESSAGE_TEMPLATE
;
9822 char *start
= message
+ 27;
9824 int size
= sizeof (MESSAGE_TEMPLATE
);
9831 left
= size
- (start
- message
);
9832 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
9834 /* Should it be skipped? */
9835 if (cpu_arch
[j
].skip
)
9838 name
= cpu_arch
[j
].name
;
9839 len
= cpu_arch
[j
].len
;
9842 /* It is an extension. Skip if we aren't asked to show it. */
9853 /* It is an processor. Skip if we show only extension. */
9856 else if (check
&& ! cpu_arch
[j
].flags
.bitfield
.cpui386
)
9858 /* It is an impossible processor - skip. */
9862 /* Reserve 2 spaces for ", " or ",\0" */
9865 /* Check if there is any room. */
9873 p
= mempcpy (p
, name
, len
);
9877 /* Output the current message now and start a new one. */
9880 fprintf (stream
, "%s\n", message
);
9882 left
= size
- (start
- message
) - len
- 2;
9884 gas_assert (left
>= 0);
9886 p
= mempcpy (p
, name
, len
);
9891 fprintf (stream
, "%s\n", message
);
9895 md_show_usage (FILE *stream
)
9897 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9898 fprintf (stream
, _("\
9900 -V print assembler version number\n\
9903 fprintf (stream
, _("\
9904 -n Do not optimize code alignment\n\
9905 -q quieten some warnings\n"));
9906 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9907 fprintf (stream
, _("\
9910 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9911 || defined (TE_PE) || defined (TE_PEP))
9912 fprintf (stream
, _("\
9913 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
9915 #ifdef SVR4_COMMENT_CHARS
9916 fprintf (stream
, _("\
9917 --divide do not treat `/' as a comment character\n"));
9919 fprintf (stream
, _("\
9920 --divide ignored\n"));
9922 fprintf (stream
, _("\
9923 -march=CPU[,+EXTENSION...]\n\
9924 generate code for CPU and EXTENSION, CPU is one of:\n"));
9925 show_arch (stream
, 0, 1);
9926 fprintf (stream
, _("\
9927 EXTENSION is combination of:\n"));
9928 show_arch (stream
, 1, 0);
9929 fprintf (stream
, _("\
9930 -mtune=CPU optimize for CPU, CPU is one of:\n"));
9931 show_arch (stream
, 0, 0);
9932 fprintf (stream
, _("\
9933 -msse2avx encode SSE instructions with VEX prefix\n"));
9934 fprintf (stream
, _("\
9935 -msse-check=[none|error|warning]\n\
9936 check SSE instructions\n"));
9937 fprintf (stream
, _("\
9938 -moperand-check=[none|error|warning]\n\
9939 check operand combinations for validity\n"));
9940 fprintf (stream
, _("\
9941 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
9943 fprintf (stream
, _("\
9944 -mevexlig=[128|256|512] encode scalar EVEX instructions with specific vector\n\
9946 fprintf (stream
, _("\
9947 -mevexwig=[0|1] encode EVEX instructions with specific EVEX.W value\n\
9948 for EVEX.W bit ignored instructions\n"));
9949 fprintf (stream
, _("\
9950 -mevexrcig=[rne|rd|ru|rz]\n\
9951 encode EVEX instructions with specific EVEX.RC value\n\
9952 for SAE-only ignored instructions\n"));
9953 fprintf (stream
, _("\
9954 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
9955 fprintf (stream
, _("\
9956 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
9957 fprintf (stream
, _("\
9958 -mindex-reg support pseudo index registers\n"));
9959 fprintf (stream
, _("\
9960 -mnaked-reg don't require `%%' prefix for registers\n"));
9961 fprintf (stream
, _("\
9962 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
9963 fprintf (stream
, _("\
9964 -madd-bnd-prefix add BND prefix for all valid branches\n"));
9965 # if defined (TE_PE) || defined (TE_PEP)
9966 fprintf (stream
, _("\
9967 -mbig-obj generate big object files\n"));
9969 fprintf (stream
, _("\
9970 -momit-lock-prefix=[no|yes]\n\
9971 strip all lock prefixes\n"));
9974 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
9975 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9976 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
9978 /* Pick the target format to use. */
9981 i386_target_format (void)
9983 if (!strncmp (default_arch
, "x86_64", 6))
9985 update_code_flag (CODE_64BIT
, 1);
9986 if (default_arch
[6] == '\0')
9987 x86_elf_abi
= X86_64_ABI
;
9989 x86_elf_abi
= X86_64_X32_ABI
;
9991 else if (!strcmp (default_arch
, "i386"))
9992 update_code_flag (CODE_32BIT
, 1);
9994 as_fatal (_("unknown architecture"));
9996 if (cpu_flags_all_zero (&cpu_arch_isa_flags
))
9997 cpu_arch_isa_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
9998 if (cpu_flags_all_zero (&cpu_arch_tune_flags
))
9999 cpu_arch_tune_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
10001 switch (OUTPUT_FLAVOR
)
10003 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
10004 case bfd_target_aout_flavour
:
10005 return AOUT_TARGET_FORMAT
;
10007 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
10008 # if defined (TE_PE) || defined (TE_PEP)
10009 case bfd_target_coff_flavour
:
10010 if (flag_code
== CODE_64BIT
)
10011 return use_big_obj
? "pe-bigobj-x86-64" : "pe-x86-64";
10014 # elif defined (TE_GO32)
10015 case bfd_target_coff_flavour
:
10016 return "coff-go32";
10018 case bfd_target_coff_flavour
:
10019 return "coff-i386";
10022 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
10023 case bfd_target_elf_flavour
:
10025 const char *format
;
10027 switch (x86_elf_abi
)
10030 format
= ELF_TARGET_FORMAT
;
10033 use_rela_relocations
= 1;
10035 format
= ELF_TARGET_FORMAT64
;
10037 case X86_64_X32_ABI
:
10038 use_rela_relocations
= 1;
10040 disallow_64bit_reloc
= 1;
10041 format
= ELF_TARGET_FORMAT32
;
10044 if (cpu_arch_isa
== PROCESSOR_L1OM
)
10046 if (x86_elf_abi
!= X86_64_ABI
)
10047 as_fatal (_("Intel L1OM is 64bit only"));
10048 return ELF_TARGET_L1OM_FORMAT
;
10050 if (cpu_arch_isa
== PROCESSOR_K1OM
)
10052 if (x86_elf_abi
!= X86_64_ABI
)
10053 as_fatal (_("Intel K1OM is 64bit only"));
10054 return ELF_TARGET_K1OM_FORMAT
;
10060 #if defined (OBJ_MACH_O)
10061 case bfd_target_mach_o_flavour
:
10062 if (flag_code
== CODE_64BIT
)
10064 use_rela_relocations
= 1;
10066 return "mach-o-x86-64";
10069 return "mach-o-i386";
10077 #endif /* OBJ_MAYBE_ more than one */
10079 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
10081 i386_elf_emit_arch_note (void)
10083 if (IS_ELF
&& cpu_arch_name
!= NULL
)
10086 asection
*seg
= now_seg
;
10087 subsegT subseg
= now_subseg
;
10088 Elf_Internal_Note i_note
;
10089 Elf_External_Note e_note
;
10090 asection
*note_secp
;
10093 /* Create the .note section. */
10094 note_secp
= subseg_new (".note", 0);
10095 bfd_set_section_flags (stdoutput
,
10097 SEC_HAS_CONTENTS
| SEC_READONLY
);
10099 /* Process the arch string. */
10100 len
= strlen (cpu_arch_name
);
10102 i_note
.namesz
= len
+ 1;
10104 i_note
.type
= NT_ARCH
;
10105 p
= frag_more (sizeof (e_note
.namesz
));
10106 md_number_to_chars (p
, (valueT
) i_note
.namesz
, sizeof (e_note
.namesz
));
10107 p
= frag_more (sizeof (e_note
.descsz
));
10108 md_number_to_chars (p
, (valueT
) i_note
.descsz
, sizeof (e_note
.descsz
));
10109 p
= frag_more (sizeof (e_note
.type
));
10110 md_number_to_chars (p
, (valueT
) i_note
.type
, sizeof (e_note
.type
));
10111 p
= frag_more (len
+ 1);
10112 strcpy (p
, cpu_arch_name
);
10114 frag_align (2, 0, 0);
10116 subseg_set (seg
, subseg
);
10122 md_undefined_symbol (char *name
)
10124 if (name
[0] == GLOBAL_OFFSET_TABLE_NAME
[0]
10125 && name
[1] == GLOBAL_OFFSET_TABLE_NAME
[1]
10126 && name
[2] == GLOBAL_OFFSET_TABLE_NAME
[2]
10127 && strcmp (name
, GLOBAL_OFFSET_TABLE_NAME
) == 0)
10131 if (symbol_find (name
))
10132 as_bad (_("GOT already in symbol table"));
10133 GOT_symbol
= symbol_new (name
, undefined_section
,
10134 (valueT
) 0, &zero_address_frag
);
10141 /* Round up a section size to the appropriate boundary. */
10144 md_section_align (segT segment ATTRIBUTE_UNUSED
, valueT size
)
10146 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
10147 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
10149 /* For a.out, force the section size to be aligned. If we don't do
10150 this, BFD will align it for us, but it will not write out the
10151 final bytes of the section. This may be a bug in BFD, but it is
10152 easier to fix it here since that is how the other a.out targets
10156 align
= bfd_get_section_alignment (stdoutput
, segment
);
10157 size
= ((size
+ (1 << align
) - 1) & ((valueT
) -1 << align
));
10164 /* On the i386, PC-relative offsets are relative to the start of the
10165 next instruction. That is, the address of the offset, plus its
10166 size, since the offset is always the last part of the insn. */
10169 md_pcrel_from (fixS
*fixP
)
10171 return fixP
->fx_size
+ fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
10177 s_bss (int ignore ATTRIBUTE_UNUSED
)
10181 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10183 obj_elf_section_change_hook ();
10185 temp
= get_absolute_expression ();
10186 subseg_set (bss_section
, (subsegT
) temp
);
10187 demand_empty_rest_of_line ();
10193 i386_validate_fix (fixS
*fixp
)
10195 if (fixp
->fx_subsy
&& fixp
->fx_subsy
== GOT_symbol
)
10197 if (fixp
->fx_r_type
== BFD_RELOC_32_PCREL
)
10201 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTPCREL
;
10206 fixp
->fx_r_type
= BFD_RELOC_386_GOTOFF
;
10208 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTOFF64
;
10210 fixp
->fx_subsy
= 0;
10215 tc_gen_reloc (asection
*section ATTRIBUTE_UNUSED
, fixS
*fixp
)
10218 bfd_reloc_code_real_type code
;
10220 switch (fixp
->fx_r_type
)
10222 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10223 case BFD_RELOC_SIZE32
:
10224 case BFD_RELOC_SIZE64
:
10225 if (S_IS_DEFINED (fixp
->fx_addsy
)
10226 && !S_IS_EXTERNAL (fixp
->fx_addsy
))
10228 /* Resolve size relocation against local symbol to size of
10229 the symbol plus addend. */
10230 valueT value
= S_GET_SIZE (fixp
->fx_addsy
) + fixp
->fx_offset
;
10231 if (fixp
->fx_r_type
== BFD_RELOC_SIZE32
10232 && !fits_in_unsigned_long (value
))
10233 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
10234 _("symbol size computation overflow"));
10235 fixp
->fx_addsy
= NULL
;
10236 fixp
->fx_subsy
= NULL
;
10237 md_apply_fix (fixp
, (valueT
*) &value
, NULL
);
10242 case BFD_RELOC_X86_64_PLT32
:
10243 case BFD_RELOC_X86_64_GOT32
:
10244 case BFD_RELOC_X86_64_GOTPCREL
:
10245 case BFD_RELOC_386_PLT32
:
10246 case BFD_RELOC_386_GOT32
:
10247 case BFD_RELOC_386_GOTOFF
:
10248 case BFD_RELOC_386_GOTPC
:
10249 case BFD_RELOC_386_TLS_GD
:
10250 case BFD_RELOC_386_TLS_LDM
:
10251 case BFD_RELOC_386_TLS_LDO_32
:
10252 case BFD_RELOC_386_TLS_IE_32
:
10253 case BFD_RELOC_386_TLS_IE
:
10254 case BFD_RELOC_386_TLS_GOTIE
:
10255 case BFD_RELOC_386_TLS_LE_32
:
10256 case BFD_RELOC_386_TLS_LE
:
10257 case BFD_RELOC_386_TLS_GOTDESC
:
10258 case BFD_RELOC_386_TLS_DESC_CALL
:
10259 case BFD_RELOC_X86_64_TLSGD
:
10260 case BFD_RELOC_X86_64_TLSLD
:
10261 case BFD_RELOC_X86_64_DTPOFF32
:
10262 case BFD_RELOC_X86_64_DTPOFF64
:
10263 case BFD_RELOC_X86_64_GOTTPOFF
:
10264 case BFD_RELOC_X86_64_TPOFF32
:
10265 case BFD_RELOC_X86_64_TPOFF64
:
10266 case BFD_RELOC_X86_64_GOTOFF64
:
10267 case BFD_RELOC_X86_64_GOTPC32
:
10268 case BFD_RELOC_X86_64_GOT64
:
10269 case BFD_RELOC_X86_64_GOTPCREL64
:
10270 case BFD_RELOC_X86_64_GOTPC64
:
10271 case BFD_RELOC_X86_64_GOTPLT64
:
10272 case BFD_RELOC_X86_64_PLTOFF64
:
10273 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
10274 case BFD_RELOC_X86_64_TLSDESC_CALL
:
10275 case BFD_RELOC_RVA
:
10276 case BFD_RELOC_VTABLE_ENTRY
:
10277 case BFD_RELOC_VTABLE_INHERIT
:
10279 case BFD_RELOC_32_SECREL
:
10281 code
= fixp
->fx_r_type
;
10283 case BFD_RELOC_X86_64_32S
:
10284 if (!fixp
->fx_pcrel
)
10286 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
10287 code
= fixp
->fx_r_type
;
10291 if (fixp
->fx_pcrel
)
10293 switch (fixp
->fx_size
)
10296 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
10297 _("can not do %d byte pc-relative relocation"),
10299 code
= BFD_RELOC_32_PCREL
;
10301 case 1: code
= BFD_RELOC_8_PCREL
; break;
10302 case 2: code
= BFD_RELOC_16_PCREL
; break;
10303 case 4: code
= BFD_RELOC_32_PCREL
; break;
10305 case 8: code
= BFD_RELOC_64_PCREL
; break;
10311 switch (fixp
->fx_size
)
10314 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
10315 _("can not do %d byte relocation"),
10317 code
= BFD_RELOC_32
;
10319 case 1: code
= BFD_RELOC_8
; break;
10320 case 2: code
= BFD_RELOC_16
; break;
10321 case 4: code
= BFD_RELOC_32
; break;
10323 case 8: code
= BFD_RELOC_64
; break;
10330 if ((code
== BFD_RELOC_32
10331 || code
== BFD_RELOC_32_PCREL
10332 || code
== BFD_RELOC_X86_64_32S
)
10334 && fixp
->fx_addsy
== GOT_symbol
)
10337 code
= BFD_RELOC_386_GOTPC
;
10339 code
= BFD_RELOC_X86_64_GOTPC32
;
10341 if ((code
== BFD_RELOC_64
|| code
== BFD_RELOC_64_PCREL
)
10343 && fixp
->fx_addsy
== GOT_symbol
)
10345 code
= BFD_RELOC_X86_64_GOTPC64
;
10348 rel
= (arelent
*) xmalloc (sizeof (arelent
));
10349 rel
->sym_ptr_ptr
= (asymbol
**) xmalloc (sizeof (asymbol
*));
10350 *rel
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
10352 rel
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
10354 if (!use_rela_relocations
)
10356 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
10357 vtable entry to be used in the relocation's section offset. */
10358 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
10359 rel
->address
= fixp
->fx_offset
;
10360 #if defined (OBJ_COFF) && defined (TE_PE)
10361 else if (fixp
->fx_addsy
&& S_IS_WEAK (fixp
->fx_addsy
))
10362 rel
->addend
= fixp
->fx_addnumber
- (S_GET_VALUE (fixp
->fx_addsy
) * 2);
10367 /* Use the rela in 64bit mode. */
10370 if (disallow_64bit_reloc
)
10373 case BFD_RELOC_X86_64_DTPOFF64
:
10374 case BFD_RELOC_X86_64_TPOFF64
:
10375 case BFD_RELOC_64_PCREL
:
10376 case BFD_RELOC_X86_64_GOTOFF64
:
10377 case BFD_RELOC_X86_64_GOT64
:
10378 case BFD_RELOC_X86_64_GOTPCREL64
:
10379 case BFD_RELOC_X86_64_GOTPC64
:
10380 case BFD_RELOC_X86_64_GOTPLT64
:
10381 case BFD_RELOC_X86_64_PLTOFF64
:
10382 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
10383 _("cannot represent relocation type %s in x32 mode"),
10384 bfd_get_reloc_code_name (code
));
10390 if (!fixp
->fx_pcrel
)
10391 rel
->addend
= fixp
->fx_offset
;
10395 case BFD_RELOC_X86_64_PLT32
:
10396 case BFD_RELOC_X86_64_GOT32
:
10397 case BFD_RELOC_X86_64_GOTPCREL
:
10398 case BFD_RELOC_X86_64_TLSGD
:
10399 case BFD_RELOC_X86_64_TLSLD
:
10400 case BFD_RELOC_X86_64_GOTTPOFF
:
10401 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
10402 case BFD_RELOC_X86_64_TLSDESC_CALL
:
10403 rel
->addend
= fixp
->fx_offset
- fixp
->fx_size
;
10406 rel
->addend
= (section
->vma
10408 + fixp
->fx_addnumber
10409 + md_pcrel_from (fixp
));
10414 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
10415 if (rel
->howto
== NULL
)
10417 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
10418 _("cannot represent relocation type %s"),
10419 bfd_get_reloc_code_name (code
));
10420 /* Set howto to a garbage value so that we can keep going. */
10421 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, BFD_RELOC_32
);
10422 gas_assert (rel
->howto
!= NULL
);
10428 #include "tc-i386-intel.c"
10431 tc_x86_parse_to_dw2regnum (expressionS
*exp
)
10433 int saved_naked_reg
;
10434 char saved_register_dot
;
10436 saved_naked_reg
= allow_naked_reg
;
10437 allow_naked_reg
= 1;
10438 saved_register_dot
= register_chars
['.'];
10439 register_chars
['.'] = '.';
10440 allow_pseudo_reg
= 1;
10441 expression_and_evaluate (exp
);
10442 allow_pseudo_reg
= 0;
10443 register_chars
['.'] = saved_register_dot
;
10444 allow_naked_reg
= saved_naked_reg
;
10446 if (exp
->X_op
== O_register
&& exp
->X_add_number
>= 0)
10448 if ((addressT
) exp
->X_add_number
< i386_regtab_size
)
10450 exp
->X_op
= O_constant
;
10451 exp
->X_add_number
= i386_regtab
[exp
->X_add_number
]
10452 .dw2_regnum
[flag_code
>> 1];
10455 exp
->X_op
= O_illegal
;
10460 tc_x86_frame_initial_instructions (void)
10462 static unsigned int sp_regno
[2];
10464 if (!sp_regno
[flag_code
>> 1])
10466 char *saved_input
= input_line_pointer
;
10467 char sp
[][4] = {"esp", "rsp"};
10470 input_line_pointer
= sp
[flag_code
>> 1];
10471 tc_x86_parse_to_dw2regnum (&exp
);
10472 gas_assert (exp
.X_op
== O_constant
);
10473 sp_regno
[flag_code
>> 1] = exp
.X_add_number
;
10474 input_line_pointer
= saved_input
;
10477 cfi_add_CFA_def_cfa (sp_regno
[flag_code
>> 1], -x86_cie_data_alignment
);
10478 cfi_add_CFA_offset (x86_dwarf2_return_column
, x86_cie_data_alignment
);
10482 x86_dwarf2_addr_size (void)
10484 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
10485 if (x86_elf_abi
== X86_64_X32_ABI
)
10488 return bfd_arch_bits_per_address (stdoutput
) / 8;
10492 i386_elf_section_type (const char *str
, size_t len
)
10494 if (flag_code
== CODE_64BIT
10495 && len
== sizeof ("unwind") - 1
10496 && strncmp (str
, "unwind", 6) == 0)
10497 return SHT_X86_64_UNWIND
;
10504 i386_solaris_fix_up_eh_frame (segT sec
)
10506 if (flag_code
== CODE_64BIT
)
10507 elf_section_type (sec
) = SHT_X86_64_UNWIND
;
10513 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
10517 exp
.X_op
= O_secrel
;
10518 exp
.X_add_symbol
= symbol
;
10519 exp
.X_add_number
= 0;
10520 emit_expr (&exp
, size
);
10524 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10525 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
10528 x86_64_section_letter (int letter
, char **ptr_msg
)
10530 if (flag_code
== CODE_64BIT
)
10533 return SHF_X86_64_LARGE
;
10535 *ptr_msg
= _("bad .section directive: want a,l,w,x,M,S,G,T in string");
10538 *ptr_msg
= _("bad .section directive: want a,w,x,M,S,G,T in string");
10543 x86_64_section_word (char *str
, size_t len
)
10545 if (len
== 5 && flag_code
== CODE_64BIT
&& CONST_STRNEQ (str
, "large"))
10546 return SHF_X86_64_LARGE
;
10552 handle_large_common (int small ATTRIBUTE_UNUSED
)
10554 if (flag_code
!= CODE_64BIT
)
10556 s_comm_internal (0, elf_common_parse
);
10557 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
10561 static segT lbss_section
;
10562 asection
*saved_com_section_ptr
= elf_com_section_ptr
;
10563 asection
*saved_bss_section
= bss_section
;
10565 if (lbss_section
== NULL
)
10567 flagword applicable
;
10568 segT seg
= now_seg
;
10569 subsegT subseg
= now_subseg
;
10571 /* The .lbss section is for local .largecomm symbols. */
10572 lbss_section
= subseg_new (".lbss", 0);
10573 applicable
= bfd_applicable_section_flags (stdoutput
);
10574 bfd_set_section_flags (stdoutput
, lbss_section
,
10575 applicable
& SEC_ALLOC
);
10576 seg_info (lbss_section
)->bss
= 1;
10578 subseg_set (seg
, subseg
);
10581 elf_com_section_ptr
= &_bfd_elf_large_com_section
;
10582 bss_section
= lbss_section
;
10584 s_comm_internal (0, elf_common_parse
);
10586 elf_com_section_ptr
= saved_com_section_ptr
;
10587 bss_section
= saved_bss_section
;
10590 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */