x86: Add SwapSources
[deliverable/binutils-gdb.git] / gas / config / tc-i386.c
1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright (C) 1989-2020 Free Software Foundation, Inc.
3
4 This file is part of GAS, the GNU Assembler.
5
6 GAS is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GAS is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GAS; see the file COPYING. If not, write to the Free
18 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
19 02110-1301, USA. */
20
21 /* Intel 80386 machine specific gas.
22 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
23 x86_64 support by Jan Hubicka (jh@suse.cz)
24 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
25 Bugs & suggestions are completely welcome. This is free software.
26 Please help us make it better. */
27
28 #include "as.h"
29 #include "safe-ctype.h"
30 #include "subsegs.h"
31 #include "dwarf2dbg.h"
32 #include "dw2gencfi.h"
33 #include "elf/x86-64.h"
34 #include "opcodes/i386-init.h"
35
36 #ifdef HAVE_LIMITS_H
37 #include <limits.h>
38 #else
39 #ifdef HAVE_SYS_PARAM_H
40 #include <sys/param.h>
41 #endif
42 #ifndef INT_MAX
43 #define INT_MAX (int) (((unsigned) (-1)) >> 1)
44 #endif
45 #endif
46
47 #ifndef INFER_ADDR_PREFIX
48 #define INFER_ADDR_PREFIX 1
49 #endif
50
51 #ifndef DEFAULT_ARCH
52 #define DEFAULT_ARCH "i386"
53 #endif
54
55 #ifndef INLINE
56 #if __GNUC__ >= 2
57 #define INLINE __inline__
58 #else
59 #define INLINE
60 #endif
61 #endif
62
63 /* Prefixes will be emitted in the order defined below.
64 WAIT_PREFIX must be the first prefix since FWAIT is really is an
65 instruction, and so must come before any prefixes.
66 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
67 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
68 #define WAIT_PREFIX 0
69 #define SEG_PREFIX 1
70 #define ADDR_PREFIX 2
71 #define DATA_PREFIX 3
72 #define REP_PREFIX 4
73 #define HLE_PREFIX REP_PREFIX
74 #define BND_PREFIX REP_PREFIX
75 #define LOCK_PREFIX 5
76 #define REX_PREFIX 6 /* must come last. */
77 #define MAX_PREFIXES 7 /* max prefixes per opcode */
78
79 /* we define the syntax here (modulo base,index,scale syntax) */
80 #define REGISTER_PREFIX '%'
81 #define IMMEDIATE_PREFIX '$'
82 #define ABSOLUTE_PREFIX '*'
83
84 /* these are the instruction mnemonic suffixes in AT&T syntax or
85 memory operand size in Intel syntax. */
86 #define WORD_MNEM_SUFFIX 'w'
87 #define BYTE_MNEM_SUFFIX 'b'
88 #define SHORT_MNEM_SUFFIX 's'
89 #define LONG_MNEM_SUFFIX 'l'
90 #define QWORD_MNEM_SUFFIX 'q'
91 /* Intel Syntax. Use a non-ascii letter since since it never appears
92 in instructions. */
93 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
94
95 #define END_OF_INSN '\0'
96
97 /* This matches the C -> StaticRounding alias in the opcode table. */
98 #define commutative staticrounding
99
100 /*
101 'templates' is for grouping together 'template' structures for opcodes
102 of the same name. This is only used for storing the insns in the grand
103 ole hash table of insns.
104 The templates themselves start at START and range up to (but not including)
105 END.
106 */
107 typedef struct
108 {
109 const insn_template *start;
110 const insn_template *end;
111 }
112 templates;
113
114 /* 386 operand encoding bytes: see 386 book for details of this. */
115 typedef struct
116 {
117 unsigned int regmem; /* codes register or memory operand */
118 unsigned int reg; /* codes register operand (or extended opcode) */
119 unsigned int mode; /* how to interpret regmem & reg */
120 }
121 modrm_byte;
122
123 /* x86-64 extension prefix. */
124 typedef int rex_byte;
125
126 /* 386 opcode byte to code indirect addressing. */
127 typedef struct
128 {
129 unsigned base;
130 unsigned index;
131 unsigned scale;
132 }
133 sib_byte;
134
135 /* x86 arch names, types and features */
136 typedef struct
137 {
138 const char *name; /* arch name */
139 unsigned int len; /* arch string length */
140 enum processor_type type; /* arch type */
141 i386_cpu_flags flags; /* cpu feature flags */
142 unsigned int skip; /* show_arch should skip this. */
143 }
144 arch_entry;
145
146 /* Used to turn off indicated flags. */
147 typedef struct
148 {
149 const char *name; /* arch name */
150 unsigned int len; /* arch string length */
151 i386_cpu_flags flags; /* cpu feature flags */
152 }
153 noarch_entry;
154
155 static void update_code_flag (int, int);
156 static void set_code_flag (int);
157 static void set_16bit_gcc_code_flag (int);
158 static void set_intel_syntax (int);
159 static void set_intel_mnemonic (int);
160 static void set_allow_index_reg (int);
161 static void set_check (int);
162 static void set_cpu_arch (int);
163 #ifdef TE_PE
164 static void pe_directive_secrel (int);
165 #endif
166 static void signed_cons (int);
167 static char *output_invalid (int c);
168 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
169 const char *);
170 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
171 const char *);
172 static int i386_att_operand (char *);
173 static int i386_intel_operand (char *, int);
174 static int i386_intel_simplify (expressionS *);
175 static int i386_intel_parse_name (const char *, expressionS *);
176 static const reg_entry *parse_register (char *, char **);
177 static char *parse_insn (char *, char *);
178 static char *parse_operands (char *, const char *);
179 static void swap_operands (void);
180 static void swap_2_operands (int, int);
181 static enum flag_code i386_addressing_mode (void);
182 static void optimize_imm (void);
183 static void optimize_disp (void);
184 static const insn_template *match_template (char);
185 static int check_string (void);
186 static int process_suffix (void);
187 static int check_byte_reg (void);
188 static int check_long_reg (void);
189 static int check_qword_reg (void);
190 static int check_word_reg (void);
191 static int finalize_imm (void);
192 static int process_operands (void);
193 static const seg_entry *build_modrm_byte (void);
194 static void output_insn (void);
195 static void output_imm (fragS *, offsetT);
196 static void output_disp (fragS *, offsetT);
197 #ifndef I386COFF
198 static void s_bss (int);
199 #endif
200 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
201 static void handle_large_common (int small ATTRIBUTE_UNUSED);
202
203 /* GNU_PROPERTY_X86_ISA_1_USED. */
204 static unsigned int x86_isa_1_used;
205 /* GNU_PROPERTY_X86_FEATURE_2_USED. */
206 static unsigned int x86_feature_2_used;
207 /* Generate x86 used ISA and feature properties. */
208 static unsigned int x86_used_note = DEFAULT_X86_USED_NOTE;
209 #endif
210
211 static const char *default_arch = DEFAULT_ARCH;
212
213 /* parse_register() returns this when a register alias cannot be used. */
214 static const reg_entry bad_reg = { "<bad>", OPERAND_TYPE_NONE, 0, 0,
215 { Dw2Inval, Dw2Inval } };
216
217 /* This struct describes rounding control and SAE in the instruction. */
218 struct RC_Operation
219 {
220 enum rc_type
221 {
222 rne = 0,
223 rd,
224 ru,
225 rz,
226 saeonly
227 } type;
228 int operand;
229 };
230
231 static struct RC_Operation rc_op;
232
233 /* The struct describes masking, applied to OPERAND in the instruction.
234 MASK is a pointer to the corresponding mask register. ZEROING tells
235 whether merging or zeroing mask is used. */
236 struct Mask_Operation
237 {
238 const reg_entry *mask;
239 unsigned int zeroing;
240 /* The operand where this operation is associated. */
241 int operand;
242 };
243
244 static struct Mask_Operation mask_op;
245
246 /* The struct describes broadcasting, applied to OPERAND. FACTOR is
247 broadcast factor. */
248 struct Broadcast_Operation
249 {
250 /* Type of broadcast: {1to2}, {1to4}, {1to8}, or {1to16}. */
251 int type;
252
253 /* Index of broadcasted operand. */
254 int operand;
255
256 /* Number of bytes to broadcast. */
257 int bytes;
258 };
259
260 static struct Broadcast_Operation broadcast_op;
261
262 /* VEX prefix. */
263 typedef struct
264 {
265 /* VEX prefix is either 2 byte or 3 byte. EVEX is 4 byte. */
266 unsigned char bytes[4];
267 unsigned int length;
268 /* Destination or source register specifier. */
269 const reg_entry *register_specifier;
270 } vex_prefix;
271
272 /* 'md_assemble ()' gathers together information and puts it into a
273 i386_insn. */
274
275 union i386_op
276 {
277 expressionS *disps;
278 expressionS *imms;
279 const reg_entry *regs;
280 };
281
282 enum i386_error
283 {
284 operand_size_mismatch,
285 operand_type_mismatch,
286 register_type_mismatch,
287 number_of_operands_mismatch,
288 invalid_instruction_suffix,
289 bad_imm4,
290 unsupported_with_intel_mnemonic,
291 unsupported_syntax,
292 unsupported,
293 invalid_vsib_address,
294 invalid_vector_register_set,
295 unsupported_vector_index_register,
296 unsupported_broadcast,
297 broadcast_needed,
298 unsupported_masking,
299 mask_not_on_destination,
300 no_default_mask,
301 unsupported_rc_sae,
302 rc_sae_operand_not_last_imm,
303 invalid_register_operand,
304 };
305
306 struct _i386_insn
307 {
308 /* TM holds the template for the insn were currently assembling. */
309 insn_template tm;
310
311 /* SUFFIX holds the instruction size suffix for byte, word, dword
312 or qword, if given. */
313 char suffix;
314
315 /* OPERANDS gives the number of given operands. */
316 unsigned int operands;
317
318 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
319 of given register, displacement, memory operands and immediate
320 operands. */
321 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
322
323 /* TYPES [i] is the type (see above #defines) which tells us how to
324 use OP[i] for the corresponding operand. */
325 i386_operand_type types[MAX_OPERANDS];
326
327 /* Displacement expression, immediate expression, or register for each
328 operand. */
329 union i386_op op[MAX_OPERANDS];
330
331 /* Flags for operands. */
332 unsigned int flags[MAX_OPERANDS];
333 #define Operand_PCrel 1
334 #define Operand_Mem 2
335
336 /* Relocation type for operand */
337 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
338
339 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
340 the base index byte below. */
341 const reg_entry *base_reg;
342 const reg_entry *index_reg;
343 unsigned int log2_scale_factor;
344
345 /* SEG gives the seg_entries of this insn. They are zero unless
346 explicit segment overrides are given. */
347 const seg_entry *seg[2];
348
349 /* Copied first memory operand string, for re-checking. */
350 char *memop1_string;
351
352 /* PREFIX holds all the given prefix opcodes (usually null).
353 PREFIXES is the number of prefix opcodes. */
354 unsigned int prefixes;
355 unsigned char prefix[MAX_PREFIXES];
356
357 /* Register is in low 3 bits of opcode. */
358 bfd_boolean short_form;
359
360 /* The operand to a branch insn indicates an absolute branch. */
361 bfd_boolean jumpabsolute;
362
363 /* Has MMX register operands. */
364 bfd_boolean has_regmmx;
365
366 /* Has XMM register operands. */
367 bfd_boolean has_regxmm;
368
369 /* Has YMM register operands. */
370 bfd_boolean has_regymm;
371
372 /* Has ZMM register operands. */
373 bfd_boolean has_regzmm;
374
375 /* Has GOTPC or TLS relocation. */
376 bfd_boolean has_gotpc_tls_reloc;
377
378 /* RM and SIB are the modrm byte and the sib byte where the
379 addressing modes of this insn are encoded. */
380 modrm_byte rm;
381 rex_byte rex;
382 rex_byte vrex;
383 sib_byte sib;
384 vex_prefix vex;
385
386 /* Masking attributes. */
387 struct Mask_Operation *mask;
388
389 /* Rounding control and SAE attributes. */
390 struct RC_Operation *rounding;
391
392 /* Broadcasting attributes. */
393 struct Broadcast_Operation *broadcast;
394
395 /* Compressed disp8*N attribute. */
396 unsigned int memshift;
397
398 /* Prefer load or store in encoding. */
399 enum
400 {
401 dir_encoding_default = 0,
402 dir_encoding_load,
403 dir_encoding_store,
404 dir_encoding_swap
405 } dir_encoding;
406
407 /* Prefer 8bit or 32bit displacement in encoding. */
408 enum
409 {
410 disp_encoding_default = 0,
411 disp_encoding_8bit,
412 disp_encoding_32bit
413 } disp_encoding;
414
415 /* Prefer the REX byte in encoding. */
416 bfd_boolean rex_encoding;
417
418 /* Disable instruction size optimization. */
419 bfd_boolean no_optimize;
420
421 /* How to encode vector instructions. */
422 enum
423 {
424 vex_encoding_default = 0,
425 vex_encoding_vex,
426 vex_encoding_vex3,
427 vex_encoding_evex,
428 vex_encoding_error
429 } vec_encoding;
430
431 /* REP prefix. */
432 const char *rep_prefix;
433
434 /* HLE prefix. */
435 const char *hle_prefix;
436
437 /* Have BND prefix. */
438 const char *bnd_prefix;
439
440 /* Have NOTRACK prefix. */
441 const char *notrack_prefix;
442
443 /* Error message. */
444 enum i386_error error;
445 };
446
447 typedef struct _i386_insn i386_insn;
448
449 /* Link RC type with corresponding string, that'll be looked for in
450 asm. */
451 struct RC_name
452 {
453 enum rc_type type;
454 const char *name;
455 unsigned int len;
456 };
457
458 static const struct RC_name RC_NamesTable[] =
459 {
460 { rne, STRING_COMMA_LEN ("rn-sae") },
461 { rd, STRING_COMMA_LEN ("rd-sae") },
462 { ru, STRING_COMMA_LEN ("ru-sae") },
463 { rz, STRING_COMMA_LEN ("rz-sae") },
464 { saeonly, STRING_COMMA_LEN ("sae") },
465 };
466
467 /* List of chars besides those in app.c:symbol_chars that can start an
468 operand. Used to prevent the scrubber eating vital white-space. */
469 const char extra_symbol_chars[] = "*%-([{}"
470 #ifdef LEX_AT
471 "@"
472 #endif
473 #ifdef LEX_QM
474 "?"
475 #endif
476 ;
477
478 #if (defined (TE_I386AIX) \
479 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
480 && !defined (TE_GNU) \
481 && !defined (TE_LINUX) \
482 && !defined (TE_FreeBSD) \
483 && !defined (TE_DragonFly) \
484 && !defined (TE_NetBSD)))
485 /* This array holds the chars that always start a comment. If the
486 pre-processor is disabled, these aren't very useful. The option
487 --divide will remove '/' from this list. */
488 const char *i386_comment_chars = "#/";
489 #define SVR4_COMMENT_CHARS 1
490 #define PREFIX_SEPARATOR '\\'
491
492 #else
493 const char *i386_comment_chars = "#";
494 #define PREFIX_SEPARATOR '/'
495 #endif
496
497 /* This array holds the chars that only start a comment at the beginning of
498 a line. If the line seems to have the form '# 123 filename'
499 .line and .file directives will appear in the pre-processed output.
500 Note that input_file.c hand checks for '#' at the beginning of the
501 first line of the input file. This is because the compiler outputs
502 #NO_APP at the beginning of its output.
503 Also note that comments started like this one will always work if
504 '/' isn't otherwise defined. */
505 const char line_comment_chars[] = "#/";
506
507 const char line_separator_chars[] = ";";
508
509 /* Chars that can be used to separate mant from exp in floating point
510 nums. */
511 const char EXP_CHARS[] = "eE";
512
513 /* Chars that mean this number is a floating point constant
514 As in 0f12.456
515 or 0d1.2345e12. */
516 const char FLT_CHARS[] = "fFdDxX";
517
518 /* Tables for lexical analysis. */
519 static char mnemonic_chars[256];
520 static char register_chars[256];
521 static char operand_chars[256];
522 static char identifier_chars[256];
523 static char digit_chars[256];
524
525 /* Lexical macros. */
526 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
527 #define is_operand_char(x) (operand_chars[(unsigned char) x])
528 #define is_register_char(x) (register_chars[(unsigned char) x])
529 #define is_space_char(x) ((x) == ' ')
530 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
531 #define is_digit_char(x) (digit_chars[(unsigned char) x])
532
533 /* All non-digit non-letter characters that may occur in an operand. */
534 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
535
536 /* md_assemble() always leaves the strings it's passed unaltered. To
537 effect this we maintain a stack of saved characters that we've smashed
538 with '\0's (indicating end of strings for various sub-fields of the
539 assembler instruction). */
540 static char save_stack[32];
541 static char *save_stack_p;
542 #define END_STRING_AND_SAVE(s) \
543 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
544 #define RESTORE_END_STRING(s) \
545 do { *(s) = *--save_stack_p; } while (0)
546
547 /* The instruction we're assembling. */
548 static i386_insn i;
549
550 /* Possible templates for current insn. */
551 static const templates *current_templates;
552
553 /* Per instruction expressionS buffers: max displacements & immediates. */
554 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
555 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
556
557 /* Current operand we are working on. */
558 static int this_operand = -1;
559
560 /* We support four different modes. FLAG_CODE variable is used to distinguish
561 these. */
562
563 enum flag_code {
564 CODE_32BIT,
565 CODE_16BIT,
566 CODE_64BIT };
567
568 static enum flag_code flag_code;
569 static unsigned int object_64bit;
570 static unsigned int disallow_64bit_reloc;
571 static int use_rela_relocations = 0;
572 /* __tls_get_addr/___tls_get_addr symbol for TLS. */
573 static const char *tls_get_addr;
574
575 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
576 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
577 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
578
579 /* The ELF ABI to use. */
580 enum x86_elf_abi
581 {
582 I386_ABI,
583 X86_64_ABI,
584 X86_64_X32_ABI
585 };
586
587 static enum x86_elf_abi x86_elf_abi = I386_ABI;
588 #endif
589
590 #if defined (TE_PE) || defined (TE_PEP)
591 /* Use big object file format. */
592 static int use_big_obj = 0;
593 #endif
594
595 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
596 /* 1 if generating code for a shared library. */
597 static int shared = 0;
598 #endif
599
600 /* 1 for intel syntax,
601 0 if att syntax. */
602 static int intel_syntax = 0;
603
604 static enum x86_64_isa
605 {
606 amd64 = 1, /* AMD64 ISA. */
607 intel64 /* Intel64 ISA. */
608 } isa64;
609
610 /* 1 for intel mnemonic,
611 0 if att mnemonic. */
612 static int intel_mnemonic = !SYSV386_COMPAT;
613
614 /* 1 if pseudo registers are permitted. */
615 static int allow_pseudo_reg = 0;
616
617 /* 1 if register prefix % not required. */
618 static int allow_naked_reg = 0;
619
620 /* 1 if the assembler should add BND prefix for all control-transferring
621 instructions supporting it, even if this prefix wasn't specified
622 explicitly. */
623 static int add_bnd_prefix = 0;
624
625 /* 1 if pseudo index register, eiz/riz, is allowed . */
626 static int allow_index_reg = 0;
627
628 /* 1 if the assembler should ignore LOCK prefix, even if it was
629 specified explicitly. */
630 static int omit_lock_prefix = 0;
631
632 /* 1 if the assembler should encode lfence, mfence, and sfence as
633 "lock addl $0, (%{re}sp)". */
634 static int avoid_fence = 0;
635
636 /* 1 if lfence should be inserted after every load. */
637 static int lfence_after_load = 0;
638
639 /* Non-zero if lfence should be inserted before indirect branch. */
640 static enum lfence_before_indirect_branch_kind
641 {
642 lfence_branch_none = 0,
643 lfence_branch_register,
644 lfence_branch_memory,
645 lfence_branch_all
646 }
647 lfence_before_indirect_branch;
648
649 /* Non-zero if lfence should be inserted before ret. */
650 static enum lfence_before_ret_kind
651 {
652 lfence_before_ret_none = 0,
653 lfence_before_ret_not,
654 lfence_before_ret_or,
655 lfence_before_ret_shl
656 }
657 lfence_before_ret;
658
659 /* Types of previous instruction is .byte or prefix. */
660 static struct
661 {
662 segT seg;
663 const char *file;
664 const char *name;
665 unsigned int line;
666 enum last_insn_kind
667 {
668 last_insn_other = 0,
669 last_insn_directive,
670 last_insn_prefix
671 } kind;
672 } last_insn;
673
674 /* 1 if the assembler should generate relax relocations. */
675
676 static int generate_relax_relocations
677 = DEFAULT_GENERATE_X86_RELAX_RELOCATIONS;
678
679 static enum check_kind
680 {
681 check_none = 0,
682 check_warning,
683 check_error
684 }
685 sse_check, operand_check = check_warning;
686
687 /* Non-zero if branches should be aligned within power of 2 boundary. */
688 static int align_branch_power = 0;
689
690 /* Types of branches to align. */
691 enum align_branch_kind
692 {
693 align_branch_none = 0,
694 align_branch_jcc = 1,
695 align_branch_fused = 2,
696 align_branch_jmp = 3,
697 align_branch_call = 4,
698 align_branch_indirect = 5,
699 align_branch_ret = 6
700 };
701
702 /* Type bits of branches to align. */
703 enum align_branch_bit
704 {
705 align_branch_jcc_bit = 1 << align_branch_jcc,
706 align_branch_fused_bit = 1 << align_branch_fused,
707 align_branch_jmp_bit = 1 << align_branch_jmp,
708 align_branch_call_bit = 1 << align_branch_call,
709 align_branch_indirect_bit = 1 << align_branch_indirect,
710 align_branch_ret_bit = 1 << align_branch_ret
711 };
712
713 static unsigned int align_branch = (align_branch_jcc_bit
714 | align_branch_fused_bit
715 | align_branch_jmp_bit);
716
717 /* Types of condition jump used by macro-fusion. */
718 enum mf_jcc_kind
719 {
720 mf_jcc_jo = 0, /* base opcode 0x70 */
721 mf_jcc_jc, /* base opcode 0x72 */
722 mf_jcc_je, /* base opcode 0x74 */
723 mf_jcc_jna, /* base opcode 0x76 */
724 mf_jcc_js, /* base opcode 0x78 */
725 mf_jcc_jp, /* base opcode 0x7a */
726 mf_jcc_jl, /* base opcode 0x7c */
727 mf_jcc_jle, /* base opcode 0x7e */
728 };
729
730 /* Types of compare flag-modifying insntructions used by macro-fusion. */
731 enum mf_cmp_kind
732 {
733 mf_cmp_test_and, /* test/cmp */
734 mf_cmp_alu_cmp, /* add/sub/cmp */
735 mf_cmp_incdec /* inc/dec */
736 };
737
738 /* The maximum padding size for fused jcc. CMP like instruction can
739 be 9 bytes and jcc can be 6 bytes. Leave room just in case for
740 prefixes. */
741 #define MAX_FUSED_JCC_PADDING_SIZE 20
742
743 /* The maximum number of prefixes added for an instruction. */
744 static unsigned int align_branch_prefix_size = 5;
745
746 /* Optimization:
747 1. Clear the REX_W bit with register operand if possible.
748 2. Above plus use 128bit vector instruction to clear the full vector
749 register.
750 */
751 static int optimize = 0;
752
753 /* Optimization:
754 1. Clear the REX_W bit with register operand if possible.
755 2. Above plus use 128bit vector instruction to clear the full vector
756 register.
757 3. Above plus optimize "test{q,l,w} $imm8,%r{64,32,16}" to
758 "testb $imm7,%r8".
759 */
760 static int optimize_for_space = 0;
761
762 /* Register prefix used for error message. */
763 static const char *register_prefix = "%";
764
765 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
766 leave, push, and pop instructions so that gcc has the same stack
767 frame as in 32 bit mode. */
768 static char stackop_size = '\0';
769
770 /* Non-zero to optimize code alignment. */
771 int optimize_align_code = 1;
772
773 /* Non-zero to quieten some warnings. */
774 static int quiet_warnings = 0;
775
776 /* CPU name. */
777 static const char *cpu_arch_name = NULL;
778 static char *cpu_sub_arch_name = NULL;
779
780 /* CPU feature flags. */
781 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
782
783 /* If we have selected a cpu we are generating instructions for. */
784 static int cpu_arch_tune_set = 0;
785
786 /* Cpu we are generating instructions for. */
787 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
788
789 /* CPU feature flags of cpu we are generating instructions for. */
790 static i386_cpu_flags cpu_arch_tune_flags;
791
792 /* CPU instruction set architecture used. */
793 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
794
795 /* CPU feature flags of instruction set architecture used. */
796 i386_cpu_flags cpu_arch_isa_flags;
797
798 /* If set, conditional jumps are not automatically promoted to handle
799 larger than a byte offset. */
800 static unsigned int no_cond_jump_promotion = 0;
801
802 /* Encode SSE instructions with VEX prefix. */
803 static unsigned int sse2avx;
804
805 /* Encode scalar AVX instructions with specific vector length. */
806 static enum
807 {
808 vex128 = 0,
809 vex256
810 } avxscalar;
811
812 /* Encode VEX WIG instructions with specific vex.w. */
813 static enum
814 {
815 vexw0 = 0,
816 vexw1
817 } vexwig;
818
819 /* Encode scalar EVEX LIG instructions with specific vector length. */
820 static enum
821 {
822 evexl128 = 0,
823 evexl256,
824 evexl512
825 } evexlig;
826
827 /* Encode EVEX WIG instructions with specific evex.w. */
828 static enum
829 {
830 evexw0 = 0,
831 evexw1
832 } evexwig;
833
834 /* Value to encode in EVEX RC bits, for SAE-only instructions. */
835 static enum rc_type evexrcig = rne;
836
837 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
838 static symbolS *GOT_symbol;
839
840 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
841 unsigned int x86_dwarf2_return_column;
842
843 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
844 int x86_cie_data_alignment;
845
846 /* Interface to relax_segment.
847 There are 3 major relax states for 386 jump insns because the
848 different types of jumps add different sizes to frags when we're
849 figuring out what sort of jump to choose to reach a given label.
850
851 BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING are used to align
852 branches which are handled by md_estimate_size_before_relax() and
853 i386_generic_table_relax_frag(). */
854
855 /* Types. */
856 #define UNCOND_JUMP 0
857 #define COND_JUMP 1
858 #define COND_JUMP86 2
859 #define BRANCH_PADDING 3
860 #define BRANCH_PREFIX 4
861 #define FUSED_JCC_PADDING 5
862
863 /* Sizes. */
864 #define CODE16 1
865 #define SMALL 0
866 #define SMALL16 (SMALL | CODE16)
867 #define BIG 2
868 #define BIG16 (BIG | CODE16)
869
870 #ifndef INLINE
871 #ifdef __GNUC__
872 #define INLINE __inline__
873 #else
874 #define INLINE
875 #endif
876 #endif
877
878 #define ENCODE_RELAX_STATE(type, size) \
879 ((relax_substateT) (((type) << 2) | (size)))
880 #define TYPE_FROM_RELAX_STATE(s) \
881 ((s) >> 2)
882 #define DISP_SIZE_FROM_RELAX_STATE(s) \
883 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
884
885 /* This table is used by relax_frag to promote short jumps to long
886 ones where necessary. SMALL (short) jumps may be promoted to BIG
887 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
888 don't allow a short jump in a 32 bit code segment to be promoted to
889 a 16 bit offset jump because it's slower (requires data size
890 prefix), and doesn't work, unless the destination is in the bottom
891 64k of the code segment (The top 16 bits of eip are zeroed). */
892
893 const relax_typeS md_relax_table[] =
894 {
895 /* The fields are:
896 1) most positive reach of this state,
897 2) most negative reach of this state,
898 3) how many bytes this mode will have in the variable part of the frag
899 4) which index into the table to try if we can't fit into this one. */
900
901 /* UNCOND_JUMP states. */
902 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
903 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
904 /* dword jmp adds 4 bytes to frag:
905 0 extra opcode bytes, 4 displacement bytes. */
906 {0, 0, 4, 0},
907 /* word jmp adds 2 byte2 to frag:
908 0 extra opcode bytes, 2 displacement bytes. */
909 {0, 0, 2, 0},
910
911 /* COND_JUMP states. */
912 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
913 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
914 /* dword conditionals adds 5 bytes to frag:
915 1 extra opcode byte, 4 displacement bytes. */
916 {0, 0, 5, 0},
917 /* word conditionals add 3 bytes to frag:
918 1 extra opcode byte, 2 displacement bytes. */
919 {0, 0, 3, 0},
920
921 /* COND_JUMP86 states. */
922 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
923 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
924 /* dword conditionals adds 5 bytes to frag:
925 1 extra opcode byte, 4 displacement bytes. */
926 {0, 0, 5, 0},
927 /* word conditionals add 4 bytes to frag:
928 1 displacement byte and a 3 byte long branch insn. */
929 {0, 0, 4, 0}
930 };
931
932 static const arch_entry cpu_arch[] =
933 {
934 /* Do not replace the first two entries - i386_target_format()
935 relies on them being there in this order. */
936 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
937 CPU_GENERIC32_FLAGS, 0 },
938 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
939 CPU_GENERIC64_FLAGS, 0 },
940 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
941 CPU_NONE_FLAGS, 0 },
942 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
943 CPU_I186_FLAGS, 0 },
944 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
945 CPU_I286_FLAGS, 0 },
946 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
947 CPU_I386_FLAGS, 0 },
948 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
949 CPU_I486_FLAGS, 0 },
950 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
951 CPU_I586_FLAGS, 0 },
952 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
953 CPU_I686_FLAGS, 0 },
954 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
955 CPU_I586_FLAGS, 0 },
956 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
957 CPU_PENTIUMPRO_FLAGS, 0 },
958 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
959 CPU_P2_FLAGS, 0 },
960 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
961 CPU_P3_FLAGS, 0 },
962 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
963 CPU_P4_FLAGS, 0 },
964 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
965 CPU_CORE_FLAGS, 0 },
966 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
967 CPU_NOCONA_FLAGS, 0 },
968 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
969 CPU_CORE_FLAGS, 1 },
970 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
971 CPU_CORE_FLAGS, 0 },
972 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
973 CPU_CORE2_FLAGS, 1 },
974 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
975 CPU_CORE2_FLAGS, 0 },
976 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
977 CPU_COREI7_FLAGS, 0 },
978 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
979 CPU_L1OM_FLAGS, 0 },
980 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM,
981 CPU_K1OM_FLAGS, 0 },
982 { STRING_COMMA_LEN ("iamcu"), PROCESSOR_IAMCU,
983 CPU_IAMCU_FLAGS, 0 },
984 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
985 CPU_K6_FLAGS, 0 },
986 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
987 CPU_K6_2_FLAGS, 0 },
988 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
989 CPU_ATHLON_FLAGS, 0 },
990 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
991 CPU_K8_FLAGS, 1 },
992 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
993 CPU_K8_FLAGS, 0 },
994 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
995 CPU_K8_FLAGS, 0 },
996 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
997 CPU_AMDFAM10_FLAGS, 0 },
998 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD,
999 CPU_BDVER1_FLAGS, 0 },
1000 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD,
1001 CPU_BDVER2_FLAGS, 0 },
1002 { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD,
1003 CPU_BDVER3_FLAGS, 0 },
1004 { STRING_COMMA_LEN ("bdver4"), PROCESSOR_BD,
1005 CPU_BDVER4_FLAGS, 0 },
1006 { STRING_COMMA_LEN ("znver1"), PROCESSOR_ZNVER,
1007 CPU_ZNVER1_FLAGS, 0 },
1008 { STRING_COMMA_LEN ("znver2"), PROCESSOR_ZNVER,
1009 CPU_ZNVER2_FLAGS, 0 },
1010 { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT,
1011 CPU_BTVER1_FLAGS, 0 },
1012 { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT,
1013 CPU_BTVER2_FLAGS, 0 },
1014 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
1015 CPU_8087_FLAGS, 0 },
1016 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
1017 CPU_287_FLAGS, 0 },
1018 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
1019 CPU_387_FLAGS, 0 },
1020 { STRING_COMMA_LEN (".687"), PROCESSOR_UNKNOWN,
1021 CPU_687_FLAGS, 0 },
1022 { STRING_COMMA_LEN (".cmov"), PROCESSOR_UNKNOWN,
1023 CPU_CMOV_FLAGS, 0 },
1024 { STRING_COMMA_LEN (".fxsr"), PROCESSOR_UNKNOWN,
1025 CPU_FXSR_FLAGS, 0 },
1026 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
1027 CPU_MMX_FLAGS, 0 },
1028 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
1029 CPU_SSE_FLAGS, 0 },
1030 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
1031 CPU_SSE2_FLAGS, 0 },
1032 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
1033 CPU_SSE3_FLAGS, 0 },
1034 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
1035 CPU_SSE4A_FLAGS, 0 },
1036 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
1037 CPU_SSSE3_FLAGS, 0 },
1038 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
1039 CPU_SSE4_1_FLAGS, 0 },
1040 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
1041 CPU_SSE4_2_FLAGS, 0 },
1042 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
1043 CPU_SSE4_2_FLAGS, 0 },
1044 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
1045 CPU_AVX_FLAGS, 0 },
1046 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN,
1047 CPU_AVX2_FLAGS, 0 },
1048 { STRING_COMMA_LEN (".avx512f"), PROCESSOR_UNKNOWN,
1049 CPU_AVX512F_FLAGS, 0 },
1050 { STRING_COMMA_LEN (".avx512cd"), PROCESSOR_UNKNOWN,
1051 CPU_AVX512CD_FLAGS, 0 },
1052 { STRING_COMMA_LEN (".avx512er"), PROCESSOR_UNKNOWN,
1053 CPU_AVX512ER_FLAGS, 0 },
1054 { STRING_COMMA_LEN (".avx512pf"), PROCESSOR_UNKNOWN,
1055 CPU_AVX512PF_FLAGS, 0 },
1056 { STRING_COMMA_LEN (".avx512dq"), PROCESSOR_UNKNOWN,
1057 CPU_AVX512DQ_FLAGS, 0 },
1058 { STRING_COMMA_LEN (".avx512bw"), PROCESSOR_UNKNOWN,
1059 CPU_AVX512BW_FLAGS, 0 },
1060 { STRING_COMMA_LEN (".avx512vl"), PROCESSOR_UNKNOWN,
1061 CPU_AVX512VL_FLAGS, 0 },
1062 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
1063 CPU_VMX_FLAGS, 0 },
1064 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN,
1065 CPU_VMFUNC_FLAGS, 0 },
1066 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
1067 CPU_SMX_FLAGS, 0 },
1068 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
1069 CPU_XSAVE_FLAGS, 0 },
1070 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
1071 CPU_XSAVEOPT_FLAGS, 0 },
1072 { STRING_COMMA_LEN (".xsavec"), PROCESSOR_UNKNOWN,
1073 CPU_XSAVEC_FLAGS, 0 },
1074 { STRING_COMMA_LEN (".xsaves"), PROCESSOR_UNKNOWN,
1075 CPU_XSAVES_FLAGS, 0 },
1076 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
1077 CPU_AES_FLAGS, 0 },
1078 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
1079 CPU_PCLMUL_FLAGS, 0 },
1080 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
1081 CPU_PCLMUL_FLAGS, 1 },
1082 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
1083 CPU_FSGSBASE_FLAGS, 0 },
1084 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
1085 CPU_RDRND_FLAGS, 0 },
1086 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
1087 CPU_F16C_FLAGS, 0 },
1088 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN,
1089 CPU_BMI2_FLAGS, 0 },
1090 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
1091 CPU_FMA_FLAGS, 0 },
1092 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
1093 CPU_FMA4_FLAGS, 0 },
1094 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
1095 CPU_XOP_FLAGS, 0 },
1096 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
1097 CPU_LWP_FLAGS, 0 },
1098 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
1099 CPU_MOVBE_FLAGS, 0 },
1100 { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN,
1101 CPU_CX16_FLAGS, 0 },
1102 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
1103 CPU_EPT_FLAGS, 0 },
1104 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN,
1105 CPU_LZCNT_FLAGS, 0 },
1106 { STRING_COMMA_LEN (".popcnt"), PROCESSOR_UNKNOWN,
1107 CPU_POPCNT_FLAGS, 0 },
1108 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN,
1109 CPU_HLE_FLAGS, 0 },
1110 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN,
1111 CPU_RTM_FLAGS, 0 },
1112 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN,
1113 CPU_INVPCID_FLAGS, 0 },
1114 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
1115 CPU_CLFLUSH_FLAGS, 0 },
1116 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
1117 CPU_NOP_FLAGS, 0 },
1118 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
1119 CPU_SYSCALL_FLAGS, 0 },
1120 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
1121 CPU_RDTSCP_FLAGS, 0 },
1122 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
1123 CPU_3DNOW_FLAGS, 0 },
1124 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
1125 CPU_3DNOWA_FLAGS, 0 },
1126 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
1127 CPU_PADLOCK_FLAGS, 0 },
1128 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
1129 CPU_SVME_FLAGS, 1 },
1130 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
1131 CPU_SVME_FLAGS, 0 },
1132 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
1133 CPU_SSE4A_FLAGS, 0 },
1134 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
1135 CPU_ABM_FLAGS, 0 },
1136 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
1137 CPU_BMI_FLAGS, 0 },
1138 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
1139 CPU_TBM_FLAGS, 0 },
1140 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN,
1141 CPU_ADX_FLAGS, 0 },
1142 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN,
1143 CPU_RDSEED_FLAGS, 0 },
1144 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN,
1145 CPU_PRFCHW_FLAGS, 0 },
1146 { STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN,
1147 CPU_SMAP_FLAGS, 0 },
1148 { STRING_COMMA_LEN (".mpx"), PROCESSOR_UNKNOWN,
1149 CPU_MPX_FLAGS, 0 },
1150 { STRING_COMMA_LEN (".sha"), PROCESSOR_UNKNOWN,
1151 CPU_SHA_FLAGS, 0 },
1152 { STRING_COMMA_LEN (".clflushopt"), PROCESSOR_UNKNOWN,
1153 CPU_CLFLUSHOPT_FLAGS, 0 },
1154 { STRING_COMMA_LEN (".prefetchwt1"), PROCESSOR_UNKNOWN,
1155 CPU_PREFETCHWT1_FLAGS, 0 },
1156 { STRING_COMMA_LEN (".se1"), PROCESSOR_UNKNOWN,
1157 CPU_SE1_FLAGS, 0 },
1158 { STRING_COMMA_LEN (".clwb"), PROCESSOR_UNKNOWN,
1159 CPU_CLWB_FLAGS, 0 },
1160 { STRING_COMMA_LEN (".avx512ifma"), PROCESSOR_UNKNOWN,
1161 CPU_AVX512IFMA_FLAGS, 0 },
1162 { STRING_COMMA_LEN (".avx512vbmi"), PROCESSOR_UNKNOWN,
1163 CPU_AVX512VBMI_FLAGS, 0 },
1164 { STRING_COMMA_LEN (".avx512_4fmaps"), PROCESSOR_UNKNOWN,
1165 CPU_AVX512_4FMAPS_FLAGS, 0 },
1166 { STRING_COMMA_LEN (".avx512_4vnniw"), PROCESSOR_UNKNOWN,
1167 CPU_AVX512_4VNNIW_FLAGS, 0 },
1168 { STRING_COMMA_LEN (".avx512_vpopcntdq"), PROCESSOR_UNKNOWN,
1169 CPU_AVX512_VPOPCNTDQ_FLAGS, 0 },
1170 { STRING_COMMA_LEN (".avx512_vbmi2"), PROCESSOR_UNKNOWN,
1171 CPU_AVX512_VBMI2_FLAGS, 0 },
1172 { STRING_COMMA_LEN (".avx512_vnni"), PROCESSOR_UNKNOWN,
1173 CPU_AVX512_VNNI_FLAGS, 0 },
1174 { STRING_COMMA_LEN (".avx512_bitalg"), PROCESSOR_UNKNOWN,
1175 CPU_AVX512_BITALG_FLAGS, 0 },
1176 { STRING_COMMA_LEN (".clzero"), PROCESSOR_UNKNOWN,
1177 CPU_CLZERO_FLAGS, 0 },
1178 { STRING_COMMA_LEN (".mwaitx"), PROCESSOR_UNKNOWN,
1179 CPU_MWAITX_FLAGS, 0 },
1180 { STRING_COMMA_LEN (".ospke"), PROCESSOR_UNKNOWN,
1181 CPU_OSPKE_FLAGS, 0 },
1182 { STRING_COMMA_LEN (".rdpid"), PROCESSOR_UNKNOWN,
1183 CPU_RDPID_FLAGS, 0 },
1184 { STRING_COMMA_LEN (".ptwrite"), PROCESSOR_UNKNOWN,
1185 CPU_PTWRITE_FLAGS, 0 },
1186 { STRING_COMMA_LEN (".ibt"), PROCESSOR_UNKNOWN,
1187 CPU_IBT_FLAGS, 0 },
1188 { STRING_COMMA_LEN (".shstk"), PROCESSOR_UNKNOWN,
1189 CPU_SHSTK_FLAGS, 0 },
1190 { STRING_COMMA_LEN (".gfni"), PROCESSOR_UNKNOWN,
1191 CPU_GFNI_FLAGS, 0 },
1192 { STRING_COMMA_LEN (".vaes"), PROCESSOR_UNKNOWN,
1193 CPU_VAES_FLAGS, 0 },
1194 { STRING_COMMA_LEN (".vpclmulqdq"), PROCESSOR_UNKNOWN,
1195 CPU_VPCLMULQDQ_FLAGS, 0 },
1196 { STRING_COMMA_LEN (".wbnoinvd"), PROCESSOR_UNKNOWN,
1197 CPU_WBNOINVD_FLAGS, 0 },
1198 { STRING_COMMA_LEN (".pconfig"), PROCESSOR_UNKNOWN,
1199 CPU_PCONFIG_FLAGS, 0 },
1200 { STRING_COMMA_LEN (".waitpkg"), PROCESSOR_UNKNOWN,
1201 CPU_WAITPKG_FLAGS, 0 },
1202 { STRING_COMMA_LEN (".cldemote"), PROCESSOR_UNKNOWN,
1203 CPU_CLDEMOTE_FLAGS, 0 },
1204 { STRING_COMMA_LEN (".movdiri"), PROCESSOR_UNKNOWN,
1205 CPU_MOVDIRI_FLAGS, 0 },
1206 { STRING_COMMA_LEN (".movdir64b"), PROCESSOR_UNKNOWN,
1207 CPU_MOVDIR64B_FLAGS, 0 },
1208 { STRING_COMMA_LEN (".avx512_bf16"), PROCESSOR_UNKNOWN,
1209 CPU_AVX512_BF16_FLAGS, 0 },
1210 { STRING_COMMA_LEN (".avx512_vp2intersect"), PROCESSOR_UNKNOWN,
1211 CPU_AVX512_VP2INTERSECT_FLAGS, 0 },
1212 { STRING_COMMA_LEN (".enqcmd"), PROCESSOR_UNKNOWN,
1213 CPU_ENQCMD_FLAGS, 0 },
1214 { STRING_COMMA_LEN (".serialize"), PROCESSOR_UNKNOWN,
1215 CPU_SERIALIZE_FLAGS, 0 },
1216 { STRING_COMMA_LEN (".rdpru"), PROCESSOR_UNKNOWN,
1217 CPU_RDPRU_FLAGS, 0 },
1218 { STRING_COMMA_LEN (".mcommit"), PROCESSOR_UNKNOWN,
1219 CPU_MCOMMIT_FLAGS, 0 },
1220 { STRING_COMMA_LEN (".sev_es"), PROCESSOR_UNKNOWN,
1221 CPU_SEV_ES_FLAGS, 0 },
1222 { STRING_COMMA_LEN (".tsxldtrk"), PROCESSOR_UNKNOWN,
1223 CPU_TSXLDTRK_FLAGS, 0 },
1224 };
1225
1226 static const noarch_entry cpu_noarch[] =
1227 {
1228 { STRING_COMMA_LEN ("no87"), CPU_ANY_X87_FLAGS },
1229 { STRING_COMMA_LEN ("no287"), CPU_ANY_287_FLAGS },
1230 { STRING_COMMA_LEN ("no387"), CPU_ANY_387_FLAGS },
1231 { STRING_COMMA_LEN ("no687"), CPU_ANY_687_FLAGS },
1232 { STRING_COMMA_LEN ("nocmov"), CPU_ANY_CMOV_FLAGS },
1233 { STRING_COMMA_LEN ("nofxsr"), CPU_ANY_FXSR_FLAGS },
1234 { STRING_COMMA_LEN ("nommx"), CPU_ANY_MMX_FLAGS },
1235 { STRING_COMMA_LEN ("nosse"), CPU_ANY_SSE_FLAGS },
1236 { STRING_COMMA_LEN ("nosse2"), CPU_ANY_SSE2_FLAGS },
1237 { STRING_COMMA_LEN ("nosse3"), CPU_ANY_SSE3_FLAGS },
1238 { STRING_COMMA_LEN ("nosse4a"), CPU_ANY_SSE4A_FLAGS },
1239 { STRING_COMMA_LEN ("nossse3"), CPU_ANY_SSSE3_FLAGS },
1240 { STRING_COMMA_LEN ("nosse4.1"), CPU_ANY_SSE4_1_FLAGS },
1241 { STRING_COMMA_LEN ("nosse4.2"), CPU_ANY_SSE4_2_FLAGS },
1242 { STRING_COMMA_LEN ("nosse4"), CPU_ANY_SSE4_1_FLAGS },
1243 { STRING_COMMA_LEN ("noavx"), CPU_ANY_AVX_FLAGS },
1244 { STRING_COMMA_LEN ("noavx2"), CPU_ANY_AVX2_FLAGS },
1245 { STRING_COMMA_LEN ("noavx512f"), CPU_ANY_AVX512F_FLAGS },
1246 { STRING_COMMA_LEN ("noavx512cd"), CPU_ANY_AVX512CD_FLAGS },
1247 { STRING_COMMA_LEN ("noavx512er"), CPU_ANY_AVX512ER_FLAGS },
1248 { STRING_COMMA_LEN ("noavx512pf"), CPU_ANY_AVX512PF_FLAGS },
1249 { STRING_COMMA_LEN ("noavx512dq"), CPU_ANY_AVX512DQ_FLAGS },
1250 { STRING_COMMA_LEN ("noavx512bw"), CPU_ANY_AVX512BW_FLAGS },
1251 { STRING_COMMA_LEN ("noavx512vl"), CPU_ANY_AVX512VL_FLAGS },
1252 { STRING_COMMA_LEN ("noavx512ifma"), CPU_ANY_AVX512IFMA_FLAGS },
1253 { STRING_COMMA_LEN ("noavx512vbmi"), CPU_ANY_AVX512VBMI_FLAGS },
1254 { STRING_COMMA_LEN ("noavx512_4fmaps"), CPU_ANY_AVX512_4FMAPS_FLAGS },
1255 { STRING_COMMA_LEN ("noavx512_4vnniw"), CPU_ANY_AVX512_4VNNIW_FLAGS },
1256 { STRING_COMMA_LEN ("noavx512_vpopcntdq"), CPU_ANY_AVX512_VPOPCNTDQ_FLAGS },
1257 { STRING_COMMA_LEN ("noavx512_vbmi2"), CPU_ANY_AVX512_VBMI2_FLAGS },
1258 { STRING_COMMA_LEN ("noavx512_vnni"), CPU_ANY_AVX512_VNNI_FLAGS },
1259 { STRING_COMMA_LEN ("noavx512_bitalg"), CPU_ANY_AVX512_BITALG_FLAGS },
1260 { STRING_COMMA_LEN ("noibt"), CPU_ANY_IBT_FLAGS },
1261 { STRING_COMMA_LEN ("noshstk"), CPU_ANY_SHSTK_FLAGS },
1262 { STRING_COMMA_LEN ("nomovdiri"), CPU_ANY_MOVDIRI_FLAGS },
1263 { STRING_COMMA_LEN ("nomovdir64b"), CPU_ANY_MOVDIR64B_FLAGS },
1264 { STRING_COMMA_LEN ("noavx512_bf16"), CPU_ANY_AVX512_BF16_FLAGS },
1265 { STRING_COMMA_LEN ("noavx512_vp2intersect"),
1266 CPU_ANY_AVX512_VP2INTERSECT_FLAGS },
1267 { STRING_COMMA_LEN ("noenqcmd"), CPU_ANY_ENQCMD_FLAGS },
1268 { STRING_COMMA_LEN ("noserialize"), CPU_ANY_SERIALIZE_FLAGS },
1269 { STRING_COMMA_LEN ("notsxldtrk"), CPU_ANY_TSXLDTRK_FLAGS },
1270 };
1271
1272 #ifdef I386COFF
1273 /* Like s_lcomm_internal in gas/read.c but the alignment string
1274 is allowed to be optional. */
1275
1276 static symbolS *
1277 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
1278 {
1279 addressT align = 0;
1280
1281 SKIP_WHITESPACE ();
1282
1283 if (needs_align
1284 && *input_line_pointer == ',')
1285 {
1286 align = parse_align (needs_align - 1);
1287
1288 if (align == (addressT) -1)
1289 return NULL;
1290 }
1291 else
1292 {
1293 if (size >= 8)
1294 align = 3;
1295 else if (size >= 4)
1296 align = 2;
1297 else if (size >= 2)
1298 align = 1;
1299 else
1300 align = 0;
1301 }
1302
1303 bss_alloc (symbolP, size, align);
1304 return symbolP;
1305 }
1306
1307 static void
1308 pe_lcomm (int needs_align)
1309 {
1310 s_comm_internal (needs_align * 2, pe_lcomm_internal);
1311 }
1312 #endif
1313
1314 const pseudo_typeS md_pseudo_table[] =
1315 {
1316 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
1317 {"align", s_align_bytes, 0},
1318 #else
1319 {"align", s_align_ptwo, 0},
1320 #endif
1321 {"arch", set_cpu_arch, 0},
1322 #ifndef I386COFF
1323 {"bss", s_bss, 0},
1324 #else
1325 {"lcomm", pe_lcomm, 1},
1326 #endif
1327 {"ffloat", float_cons, 'f'},
1328 {"dfloat", float_cons, 'd'},
1329 {"tfloat", float_cons, 'x'},
1330 {"value", cons, 2},
1331 {"slong", signed_cons, 4},
1332 {"noopt", s_ignore, 0},
1333 {"optim", s_ignore, 0},
1334 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
1335 {"code16", set_code_flag, CODE_16BIT},
1336 {"code32", set_code_flag, CODE_32BIT},
1337 #ifdef BFD64
1338 {"code64", set_code_flag, CODE_64BIT},
1339 #endif
1340 {"intel_syntax", set_intel_syntax, 1},
1341 {"att_syntax", set_intel_syntax, 0},
1342 {"intel_mnemonic", set_intel_mnemonic, 1},
1343 {"att_mnemonic", set_intel_mnemonic, 0},
1344 {"allow_index_reg", set_allow_index_reg, 1},
1345 {"disallow_index_reg", set_allow_index_reg, 0},
1346 {"sse_check", set_check, 0},
1347 {"operand_check", set_check, 1},
1348 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
1349 {"largecomm", handle_large_common, 0},
1350 #else
1351 {"file", dwarf2_directive_file, 0},
1352 {"loc", dwarf2_directive_loc, 0},
1353 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
1354 #endif
1355 #ifdef TE_PE
1356 {"secrel32", pe_directive_secrel, 0},
1357 #endif
1358 {0, 0, 0}
1359 };
1360
1361 /* For interface with expression (). */
1362 extern char *input_line_pointer;
1363
1364 /* Hash table for instruction mnemonic lookup. */
1365 static struct hash_control *op_hash;
1366
1367 /* Hash table for register lookup. */
1368 static struct hash_control *reg_hash;
1369 \f
1370 /* Various efficient no-op patterns for aligning code labels.
1371 Note: Don't try to assemble the instructions in the comments.
1372 0L and 0w are not legal. */
1373 static const unsigned char f32_1[] =
1374 {0x90}; /* nop */
1375 static const unsigned char f32_2[] =
1376 {0x66,0x90}; /* xchg %ax,%ax */
1377 static const unsigned char f32_3[] =
1378 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
1379 static const unsigned char f32_4[] =
1380 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1381 static const unsigned char f32_6[] =
1382 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
1383 static const unsigned char f32_7[] =
1384 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1385 static const unsigned char f16_3[] =
1386 {0x8d,0x74,0x00}; /* lea 0(%si),%si */
1387 static const unsigned char f16_4[] =
1388 {0x8d,0xb4,0x00,0x00}; /* lea 0W(%si),%si */
1389 static const unsigned char jump_disp8[] =
1390 {0xeb}; /* jmp disp8 */
1391 static const unsigned char jump32_disp32[] =
1392 {0xe9}; /* jmp disp32 */
1393 static const unsigned char jump16_disp32[] =
1394 {0x66,0xe9}; /* jmp disp32 */
1395 /* 32-bit NOPs patterns. */
1396 static const unsigned char *const f32_patt[] = {
1397 f32_1, f32_2, f32_3, f32_4, NULL, f32_6, f32_7
1398 };
1399 /* 16-bit NOPs patterns. */
1400 static const unsigned char *const f16_patt[] = {
1401 f32_1, f32_2, f16_3, f16_4
1402 };
1403 /* nopl (%[re]ax) */
1404 static const unsigned char alt_3[] =
1405 {0x0f,0x1f,0x00};
1406 /* nopl 0(%[re]ax) */
1407 static const unsigned char alt_4[] =
1408 {0x0f,0x1f,0x40,0x00};
1409 /* nopl 0(%[re]ax,%[re]ax,1) */
1410 static const unsigned char alt_5[] =
1411 {0x0f,0x1f,0x44,0x00,0x00};
1412 /* nopw 0(%[re]ax,%[re]ax,1) */
1413 static const unsigned char alt_6[] =
1414 {0x66,0x0f,0x1f,0x44,0x00,0x00};
1415 /* nopl 0L(%[re]ax) */
1416 static const unsigned char alt_7[] =
1417 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1418 /* nopl 0L(%[re]ax,%[re]ax,1) */
1419 static const unsigned char alt_8[] =
1420 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1421 /* nopw 0L(%[re]ax,%[re]ax,1) */
1422 static const unsigned char alt_9[] =
1423 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1424 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
1425 static const unsigned char alt_10[] =
1426 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1427 /* data16 nopw %cs:0L(%eax,%eax,1) */
1428 static const unsigned char alt_11[] =
1429 {0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1430 /* 32-bit and 64-bit NOPs patterns. */
1431 static const unsigned char *const alt_patt[] = {
1432 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1433 alt_9, alt_10, alt_11
1434 };
1435
1436 /* Genenerate COUNT bytes of NOPs to WHERE from PATT with the maximum
1437 size of a single NOP instruction MAX_SINGLE_NOP_SIZE. */
1438
1439 static void
1440 i386_output_nops (char *where, const unsigned char *const *patt,
1441 int count, int max_single_nop_size)
1442
1443 {
1444 /* Place the longer NOP first. */
1445 int last;
1446 int offset;
1447 const unsigned char *nops;
1448
1449 if (max_single_nop_size < 1)
1450 {
1451 as_fatal (_("i386_output_nops called to generate nops of at most %d bytes!"),
1452 max_single_nop_size);
1453 return;
1454 }
1455
1456 nops = patt[max_single_nop_size - 1];
1457
1458 /* Use the smaller one if the requsted one isn't available. */
1459 if (nops == NULL)
1460 {
1461 max_single_nop_size--;
1462 nops = patt[max_single_nop_size - 1];
1463 }
1464
1465 last = count % max_single_nop_size;
1466
1467 count -= last;
1468 for (offset = 0; offset < count; offset += max_single_nop_size)
1469 memcpy (where + offset, nops, max_single_nop_size);
1470
1471 if (last)
1472 {
1473 nops = patt[last - 1];
1474 if (nops == NULL)
1475 {
1476 /* Use the smaller one plus one-byte NOP if the needed one
1477 isn't available. */
1478 last--;
1479 nops = patt[last - 1];
1480 memcpy (where + offset, nops, last);
1481 where[offset + last] = *patt[0];
1482 }
1483 else
1484 memcpy (where + offset, nops, last);
1485 }
1486 }
1487
1488 static INLINE int
1489 fits_in_imm7 (offsetT num)
1490 {
1491 return (num & 0x7f) == num;
1492 }
1493
1494 static INLINE int
1495 fits_in_imm31 (offsetT num)
1496 {
1497 return (num & 0x7fffffff) == num;
1498 }
1499
1500 /* Genenerate COUNT bytes of NOPs to WHERE with the maximum size of a
1501 single NOP instruction LIMIT. */
1502
1503 void
1504 i386_generate_nops (fragS *fragP, char *where, offsetT count, int limit)
1505 {
1506 const unsigned char *const *patt = NULL;
1507 int max_single_nop_size;
1508 /* Maximum number of NOPs before switching to jump over NOPs. */
1509 int max_number_of_nops;
1510
1511 switch (fragP->fr_type)
1512 {
1513 case rs_fill_nop:
1514 case rs_align_code:
1515 break;
1516 case rs_machine_dependent:
1517 /* Allow NOP padding for jumps and calls. */
1518 if (TYPE_FROM_RELAX_STATE (fragP->fr_subtype) == BRANCH_PADDING
1519 || TYPE_FROM_RELAX_STATE (fragP->fr_subtype) == FUSED_JCC_PADDING)
1520 break;
1521 /* Fall through. */
1522 default:
1523 return;
1524 }
1525
1526 /* We need to decide which NOP sequence to use for 32bit and
1527 64bit. When -mtune= is used:
1528
1529 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1530 PROCESSOR_GENERIC32, f32_patt will be used.
1531 2. For the rest, alt_patt will be used.
1532
1533 When -mtune= isn't used, alt_patt will be used if
1534 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1535 be used.
1536
1537 When -march= or .arch is used, we can't use anything beyond
1538 cpu_arch_isa_flags. */
1539
1540 if (flag_code == CODE_16BIT)
1541 {
1542 patt = f16_patt;
1543 max_single_nop_size = sizeof (f16_patt) / sizeof (f16_patt[0]);
1544 /* Limit number of NOPs to 2 in 16-bit mode. */
1545 max_number_of_nops = 2;
1546 }
1547 else
1548 {
1549 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1550 {
1551 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1552 switch (cpu_arch_tune)
1553 {
1554 case PROCESSOR_UNKNOWN:
1555 /* We use cpu_arch_isa_flags to check if we SHOULD
1556 optimize with nops. */
1557 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1558 patt = alt_patt;
1559 else
1560 patt = f32_patt;
1561 break;
1562 case PROCESSOR_PENTIUM4:
1563 case PROCESSOR_NOCONA:
1564 case PROCESSOR_CORE:
1565 case PROCESSOR_CORE2:
1566 case PROCESSOR_COREI7:
1567 case PROCESSOR_L1OM:
1568 case PROCESSOR_K1OM:
1569 case PROCESSOR_GENERIC64:
1570 case PROCESSOR_K6:
1571 case PROCESSOR_ATHLON:
1572 case PROCESSOR_K8:
1573 case PROCESSOR_AMDFAM10:
1574 case PROCESSOR_BD:
1575 case PROCESSOR_ZNVER:
1576 case PROCESSOR_BT:
1577 patt = alt_patt;
1578 break;
1579 case PROCESSOR_I386:
1580 case PROCESSOR_I486:
1581 case PROCESSOR_PENTIUM:
1582 case PROCESSOR_PENTIUMPRO:
1583 case PROCESSOR_IAMCU:
1584 case PROCESSOR_GENERIC32:
1585 patt = f32_patt;
1586 break;
1587 }
1588 }
1589 else
1590 {
1591 switch (fragP->tc_frag_data.tune)
1592 {
1593 case PROCESSOR_UNKNOWN:
1594 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1595 PROCESSOR_UNKNOWN. */
1596 abort ();
1597 break;
1598
1599 case PROCESSOR_I386:
1600 case PROCESSOR_I486:
1601 case PROCESSOR_PENTIUM:
1602 case PROCESSOR_IAMCU:
1603 case PROCESSOR_K6:
1604 case PROCESSOR_ATHLON:
1605 case PROCESSOR_K8:
1606 case PROCESSOR_AMDFAM10:
1607 case PROCESSOR_BD:
1608 case PROCESSOR_ZNVER:
1609 case PROCESSOR_BT:
1610 case PROCESSOR_GENERIC32:
1611 /* We use cpu_arch_isa_flags to check if we CAN optimize
1612 with nops. */
1613 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1614 patt = alt_patt;
1615 else
1616 patt = f32_patt;
1617 break;
1618 case PROCESSOR_PENTIUMPRO:
1619 case PROCESSOR_PENTIUM4:
1620 case PROCESSOR_NOCONA:
1621 case PROCESSOR_CORE:
1622 case PROCESSOR_CORE2:
1623 case PROCESSOR_COREI7:
1624 case PROCESSOR_L1OM:
1625 case PROCESSOR_K1OM:
1626 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1627 patt = alt_patt;
1628 else
1629 patt = f32_patt;
1630 break;
1631 case PROCESSOR_GENERIC64:
1632 patt = alt_patt;
1633 break;
1634 }
1635 }
1636
1637 if (patt == f32_patt)
1638 {
1639 max_single_nop_size = sizeof (f32_patt) / sizeof (f32_patt[0]);
1640 /* Limit number of NOPs to 2 for older processors. */
1641 max_number_of_nops = 2;
1642 }
1643 else
1644 {
1645 max_single_nop_size = sizeof (alt_patt) / sizeof (alt_patt[0]);
1646 /* Limit number of NOPs to 7 for newer processors. */
1647 max_number_of_nops = 7;
1648 }
1649 }
1650
1651 if (limit == 0)
1652 limit = max_single_nop_size;
1653
1654 if (fragP->fr_type == rs_fill_nop)
1655 {
1656 /* Output NOPs for .nop directive. */
1657 if (limit > max_single_nop_size)
1658 {
1659 as_bad_where (fragP->fr_file, fragP->fr_line,
1660 _("invalid single nop size: %d "
1661 "(expect within [0, %d])"),
1662 limit, max_single_nop_size);
1663 return;
1664 }
1665 }
1666 else if (fragP->fr_type != rs_machine_dependent)
1667 fragP->fr_var = count;
1668
1669 if ((count / max_single_nop_size) > max_number_of_nops)
1670 {
1671 /* Generate jump over NOPs. */
1672 offsetT disp = count - 2;
1673 if (fits_in_imm7 (disp))
1674 {
1675 /* Use "jmp disp8" if possible. */
1676 count = disp;
1677 where[0] = jump_disp8[0];
1678 where[1] = count;
1679 where += 2;
1680 }
1681 else
1682 {
1683 unsigned int size_of_jump;
1684
1685 if (flag_code == CODE_16BIT)
1686 {
1687 where[0] = jump16_disp32[0];
1688 where[1] = jump16_disp32[1];
1689 size_of_jump = 2;
1690 }
1691 else
1692 {
1693 where[0] = jump32_disp32[0];
1694 size_of_jump = 1;
1695 }
1696
1697 count -= size_of_jump + 4;
1698 if (!fits_in_imm31 (count))
1699 {
1700 as_bad_where (fragP->fr_file, fragP->fr_line,
1701 _("jump over nop padding out of range"));
1702 return;
1703 }
1704
1705 md_number_to_chars (where + size_of_jump, count, 4);
1706 where += size_of_jump + 4;
1707 }
1708 }
1709
1710 /* Generate multiple NOPs. */
1711 i386_output_nops (where, patt, count, limit);
1712 }
1713
1714 static INLINE int
1715 operand_type_all_zero (const union i386_operand_type *x)
1716 {
1717 switch (ARRAY_SIZE(x->array))
1718 {
1719 case 3:
1720 if (x->array[2])
1721 return 0;
1722 /* Fall through. */
1723 case 2:
1724 if (x->array[1])
1725 return 0;
1726 /* Fall through. */
1727 case 1:
1728 return !x->array[0];
1729 default:
1730 abort ();
1731 }
1732 }
1733
1734 static INLINE void
1735 operand_type_set (union i386_operand_type *x, unsigned int v)
1736 {
1737 switch (ARRAY_SIZE(x->array))
1738 {
1739 case 3:
1740 x->array[2] = v;
1741 /* Fall through. */
1742 case 2:
1743 x->array[1] = v;
1744 /* Fall through. */
1745 case 1:
1746 x->array[0] = v;
1747 /* Fall through. */
1748 break;
1749 default:
1750 abort ();
1751 }
1752
1753 x->bitfield.class = ClassNone;
1754 x->bitfield.instance = InstanceNone;
1755 }
1756
1757 static INLINE int
1758 operand_type_equal (const union i386_operand_type *x,
1759 const union i386_operand_type *y)
1760 {
1761 switch (ARRAY_SIZE(x->array))
1762 {
1763 case 3:
1764 if (x->array[2] != y->array[2])
1765 return 0;
1766 /* Fall through. */
1767 case 2:
1768 if (x->array[1] != y->array[1])
1769 return 0;
1770 /* Fall through. */
1771 case 1:
1772 return x->array[0] == y->array[0];
1773 break;
1774 default:
1775 abort ();
1776 }
1777 }
1778
1779 static INLINE int
1780 cpu_flags_all_zero (const union i386_cpu_flags *x)
1781 {
1782 switch (ARRAY_SIZE(x->array))
1783 {
1784 case 4:
1785 if (x->array[3])
1786 return 0;
1787 /* Fall through. */
1788 case 3:
1789 if (x->array[2])
1790 return 0;
1791 /* Fall through. */
1792 case 2:
1793 if (x->array[1])
1794 return 0;
1795 /* Fall through. */
1796 case 1:
1797 return !x->array[0];
1798 default:
1799 abort ();
1800 }
1801 }
1802
1803 static INLINE int
1804 cpu_flags_equal (const union i386_cpu_flags *x,
1805 const union i386_cpu_flags *y)
1806 {
1807 switch (ARRAY_SIZE(x->array))
1808 {
1809 case 4:
1810 if (x->array[3] != y->array[3])
1811 return 0;
1812 /* Fall through. */
1813 case 3:
1814 if (x->array[2] != y->array[2])
1815 return 0;
1816 /* Fall through. */
1817 case 2:
1818 if (x->array[1] != y->array[1])
1819 return 0;
1820 /* Fall through. */
1821 case 1:
1822 return x->array[0] == y->array[0];
1823 break;
1824 default:
1825 abort ();
1826 }
1827 }
1828
1829 static INLINE int
1830 cpu_flags_check_cpu64 (i386_cpu_flags f)
1831 {
1832 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1833 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1834 }
1835
1836 static INLINE i386_cpu_flags
1837 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1838 {
1839 switch (ARRAY_SIZE (x.array))
1840 {
1841 case 4:
1842 x.array [3] &= y.array [3];
1843 /* Fall through. */
1844 case 3:
1845 x.array [2] &= y.array [2];
1846 /* Fall through. */
1847 case 2:
1848 x.array [1] &= y.array [1];
1849 /* Fall through. */
1850 case 1:
1851 x.array [0] &= y.array [0];
1852 break;
1853 default:
1854 abort ();
1855 }
1856 return x;
1857 }
1858
1859 static INLINE i386_cpu_flags
1860 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1861 {
1862 switch (ARRAY_SIZE (x.array))
1863 {
1864 case 4:
1865 x.array [3] |= y.array [3];
1866 /* Fall through. */
1867 case 3:
1868 x.array [2] |= y.array [2];
1869 /* Fall through. */
1870 case 2:
1871 x.array [1] |= y.array [1];
1872 /* Fall through. */
1873 case 1:
1874 x.array [0] |= y.array [0];
1875 break;
1876 default:
1877 abort ();
1878 }
1879 return x;
1880 }
1881
1882 static INLINE i386_cpu_flags
1883 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1884 {
1885 switch (ARRAY_SIZE (x.array))
1886 {
1887 case 4:
1888 x.array [3] &= ~y.array [3];
1889 /* Fall through. */
1890 case 3:
1891 x.array [2] &= ~y.array [2];
1892 /* Fall through. */
1893 case 2:
1894 x.array [1] &= ~y.array [1];
1895 /* Fall through. */
1896 case 1:
1897 x.array [0] &= ~y.array [0];
1898 break;
1899 default:
1900 abort ();
1901 }
1902 return x;
1903 }
1904
1905 static const i386_cpu_flags avx512 = CPU_ANY_AVX512F_FLAGS;
1906
1907 #define CPU_FLAGS_ARCH_MATCH 0x1
1908 #define CPU_FLAGS_64BIT_MATCH 0x2
1909
1910 #define CPU_FLAGS_PERFECT_MATCH \
1911 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_64BIT_MATCH)
1912
1913 /* Return CPU flags match bits. */
1914
1915 static int
1916 cpu_flags_match (const insn_template *t)
1917 {
1918 i386_cpu_flags x = t->cpu_flags;
1919 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1920
1921 x.bitfield.cpu64 = 0;
1922 x.bitfield.cpuno64 = 0;
1923
1924 if (cpu_flags_all_zero (&x))
1925 {
1926 /* This instruction is available on all archs. */
1927 match |= CPU_FLAGS_ARCH_MATCH;
1928 }
1929 else
1930 {
1931 /* This instruction is available only on some archs. */
1932 i386_cpu_flags cpu = cpu_arch_flags;
1933
1934 /* AVX512VL is no standalone feature - match it and then strip it. */
1935 if (x.bitfield.cpuavx512vl && !cpu.bitfield.cpuavx512vl)
1936 return match;
1937 x.bitfield.cpuavx512vl = 0;
1938
1939 cpu = cpu_flags_and (x, cpu);
1940 if (!cpu_flags_all_zero (&cpu))
1941 {
1942 if (x.bitfield.cpuavx)
1943 {
1944 /* We need to check a few extra flags with AVX. */
1945 if (cpu.bitfield.cpuavx
1946 && (!t->opcode_modifier.sse2avx
1947 || (sse2avx && !i.prefix[DATA_PREFIX]))
1948 && (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1949 && (!x.bitfield.cpugfni || cpu.bitfield.cpugfni)
1950 && (!x.bitfield.cpupclmul || cpu.bitfield.cpupclmul))
1951 match |= CPU_FLAGS_ARCH_MATCH;
1952 }
1953 else if (x.bitfield.cpuavx512f)
1954 {
1955 /* We need to check a few extra flags with AVX512F. */
1956 if (cpu.bitfield.cpuavx512f
1957 && (!x.bitfield.cpugfni || cpu.bitfield.cpugfni)
1958 && (!x.bitfield.cpuvaes || cpu.bitfield.cpuvaes)
1959 && (!x.bitfield.cpuvpclmulqdq || cpu.bitfield.cpuvpclmulqdq))
1960 match |= CPU_FLAGS_ARCH_MATCH;
1961 }
1962 else
1963 match |= CPU_FLAGS_ARCH_MATCH;
1964 }
1965 }
1966 return match;
1967 }
1968
1969 static INLINE i386_operand_type
1970 operand_type_and (i386_operand_type x, i386_operand_type y)
1971 {
1972 if (x.bitfield.class != y.bitfield.class)
1973 x.bitfield.class = ClassNone;
1974 if (x.bitfield.instance != y.bitfield.instance)
1975 x.bitfield.instance = InstanceNone;
1976
1977 switch (ARRAY_SIZE (x.array))
1978 {
1979 case 3:
1980 x.array [2] &= y.array [2];
1981 /* Fall through. */
1982 case 2:
1983 x.array [1] &= y.array [1];
1984 /* Fall through. */
1985 case 1:
1986 x.array [0] &= y.array [0];
1987 break;
1988 default:
1989 abort ();
1990 }
1991 return x;
1992 }
1993
1994 static INLINE i386_operand_type
1995 operand_type_and_not (i386_operand_type x, i386_operand_type y)
1996 {
1997 gas_assert (y.bitfield.class == ClassNone);
1998 gas_assert (y.bitfield.instance == InstanceNone);
1999
2000 switch (ARRAY_SIZE (x.array))
2001 {
2002 case 3:
2003 x.array [2] &= ~y.array [2];
2004 /* Fall through. */
2005 case 2:
2006 x.array [1] &= ~y.array [1];
2007 /* Fall through. */
2008 case 1:
2009 x.array [0] &= ~y.array [0];
2010 break;
2011 default:
2012 abort ();
2013 }
2014 return x;
2015 }
2016
2017 static INLINE i386_operand_type
2018 operand_type_or (i386_operand_type x, i386_operand_type y)
2019 {
2020 gas_assert (x.bitfield.class == ClassNone ||
2021 y.bitfield.class == ClassNone ||
2022 x.bitfield.class == y.bitfield.class);
2023 gas_assert (x.bitfield.instance == InstanceNone ||
2024 y.bitfield.instance == InstanceNone ||
2025 x.bitfield.instance == y.bitfield.instance);
2026
2027 switch (ARRAY_SIZE (x.array))
2028 {
2029 case 3:
2030 x.array [2] |= y.array [2];
2031 /* Fall through. */
2032 case 2:
2033 x.array [1] |= y.array [1];
2034 /* Fall through. */
2035 case 1:
2036 x.array [0] |= y.array [0];
2037 break;
2038 default:
2039 abort ();
2040 }
2041 return x;
2042 }
2043
2044 static INLINE i386_operand_type
2045 operand_type_xor (i386_operand_type x, i386_operand_type y)
2046 {
2047 gas_assert (y.bitfield.class == ClassNone);
2048 gas_assert (y.bitfield.instance == InstanceNone);
2049
2050 switch (ARRAY_SIZE (x.array))
2051 {
2052 case 3:
2053 x.array [2] ^= y.array [2];
2054 /* Fall through. */
2055 case 2:
2056 x.array [1] ^= y.array [1];
2057 /* Fall through. */
2058 case 1:
2059 x.array [0] ^= y.array [0];
2060 break;
2061 default:
2062 abort ();
2063 }
2064 return x;
2065 }
2066
2067 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
2068 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
2069 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
2070 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
2071 static const i386_operand_type anydisp = OPERAND_TYPE_ANYDISP;
2072 static const i386_operand_type anyimm = OPERAND_TYPE_ANYIMM;
2073 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
2074 static const i386_operand_type regmask = OPERAND_TYPE_REGMASK;
2075 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
2076 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
2077 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
2078 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
2079 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
2080 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
2081 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
2082 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
2083 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
2084
2085 enum operand_type
2086 {
2087 reg,
2088 imm,
2089 disp,
2090 anymem
2091 };
2092
2093 static INLINE int
2094 operand_type_check (i386_operand_type t, enum operand_type c)
2095 {
2096 switch (c)
2097 {
2098 case reg:
2099 return t.bitfield.class == Reg;
2100
2101 case imm:
2102 return (t.bitfield.imm8
2103 || t.bitfield.imm8s
2104 || t.bitfield.imm16
2105 || t.bitfield.imm32
2106 || t.bitfield.imm32s
2107 || t.bitfield.imm64);
2108
2109 case disp:
2110 return (t.bitfield.disp8
2111 || t.bitfield.disp16
2112 || t.bitfield.disp32
2113 || t.bitfield.disp32s
2114 || t.bitfield.disp64);
2115
2116 case anymem:
2117 return (t.bitfield.disp8
2118 || t.bitfield.disp16
2119 || t.bitfield.disp32
2120 || t.bitfield.disp32s
2121 || t.bitfield.disp64
2122 || t.bitfield.baseindex);
2123
2124 default:
2125 abort ();
2126 }
2127
2128 return 0;
2129 }
2130
2131 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit/80bit size
2132 between operand GIVEN and opeand WANTED for instruction template T. */
2133
2134 static INLINE int
2135 match_operand_size (const insn_template *t, unsigned int wanted,
2136 unsigned int given)
2137 {
2138 return !((i.types[given].bitfield.byte
2139 && !t->operand_types[wanted].bitfield.byte)
2140 || (i.types[given].bitfield.word
2141 && !t->operand_types[wanted].bitfield.word)
2142 || (i.types[given].bitfield.dword
2143 && !t->operand_types[wanted].bitfield.dword)
2144 || (i.types[given].bitfield.qword
2145 && !t->operand_types[wanted].bitfield.qword)
2146 || (i.types[given].bitfield.tbyte
2147 && !t->operand_types[wanted].bitfield.tbyte));
2148 }
2149
2150 /* Return 1 if there is no conflict in SIMD register between operand
2151 GIVEN and opeand WANTED for instruction template T. */
2152
2153 static INLINE int
2154 match_simd_size (const insn_template *t, unsigned int wanted,
2155 unsigned int given)
2156 {
2157 return !((i.types[given].bitfield.xmmword
2158 && !t->operand_types[wanted].bitfield.xmmword)
2159 || (i.types[given].bitfield.ymmword
2160 && !t->operand_types[wanted].bitfield.ymmword)
2161 || (i.types[given].bitfield.zmmword
2162 && !t->operand_types[wanted].bitfield.zmmword));
2163 }
2164
2165 /* Return 1 if there is no conflict in any size between operand GIVEN
2166 and opeand WANTED for instruction template T. */
2167
2168 static INLINE int
2169 match_mem_size (const insn_template *t, unsigned int wanted,
2170 unsigned int given)
2171 {
2172 return (match_operand_size (t, wanted, given)
2173 && !((i.types[given].bitfield.unspecified
2174 && !i.broadcast
2175 && !t->operand_types[wanted].bitfield.unspecified)
2176 || (i.types[given].bitfield.fword
2177 && !t->operand_types[wanted].bitfield.fword)
2178 /* For scalar opcode templates to allow register and memory
2179 operands at the same time, some special casing is needed
2180 here. Also for v{,p}broadcast*, {,v}pmov{s,z}*, and
2181 down-conversion vpmov*. */
2182 || ((t->operand_types[wanted].bitfield.class == RegSIMD
2183 && t->operand_types[wanted].bitfield.byte
2184 + t->operand_types[wanted].bitfield.word
2185 + t->operand_types[wanted].bitfield.dword
2186 + t->operand_types[wanted].bitfield.qword
2187 > !!t->opcode_modifier.broadcast)
2188 ? (i.types[given].bitfield.xmmword
2189 || i.types[given].bitfield.ymmword
2190 || i.types[given].bitfield.zmmword)
2191 : !match_simd_size(t, wanted, given))));
2192 }
2193
2194 /* Return value has MATCH_STRAIGHT set if there is no size conflict on any
2195 operands for instruction template T, and it has MATCH_REVERSE set if there
2196 is no size conflict on any operands for the template with operands reversed
2197 (and the template allows for reversing in the first place). */
2198
2199 #define MATCH_STRAIGHT 1
2200 #define MATCH_REVERSE 2
2201
2202 static INLINE unsigned int
2203 operand_size_match (const insn_template *t)
2204 {
2205 unsigned int j, match = MATCH_STRAIGHT;
2206
2207 /* Don't check non-absolute jump instructions. */
2208 if (t->opcode_modifier.jump
2209 && t->opcode_modifier.jump != JUMP_ABSOLUTE)
2210 return match;
2211
2212 /* Check memory and accumulator operand size. */
2213 for (j = 0; j < i.operands; j++)
2214 {
2215 if (i.types[j].bitfield.class != Reg
2216 && i.types[j].bitfield.class != RegSIMD
2217 && t->opcode_modifier.anysize)
2218 continue;
2219
2220 if (t->operand_types[j].bitfield.class == Reg
2221 && !match_operand_size (t, j, j))
2222 {
2223 match = 0;
2224 break;
2225 }
2226
2227 if (t->operand_types[j].bitfield.class == RegSIMD
2228 && !match_simd_size (t, j, j))
2229 {
2230 match = 0;
2231 break;
2232 }
2233
2234 if (t->operand_types[j].bitfield.instance == Accum
2235 && (!match_operand_size (t, j, j) || !match_simd_size (t, j, j)))
2236 {
2237 match = 0;
2238 break;
2239 }
2240
2241 if ((i.flags[j] & Operand_Mem) && !match_mem_size (t, j, j))
2242 {
2243 match = 0;
2244 break;
2245 }
2246 }
2247
2248 if (!t->opcode_modifier.d)
2249 {
2250 mismatch:
2251 if (!match)
2252 i.error = operand_size_mismatch;
2253 return match;
2254 }
2255
2256 /* Check reverse. */
2257 gas_assert (i.operands >= 2 && i.operands <= 3);
2258
2259 for (j = 0; j < i.operands; j++)
2260 {
2261 unsigned int given = i.operands - j - 1;
2262
2263 if (t->operand_types[j].bitfield.class == Reg
2264 && !match_operand_size (t, j, given))
2265 goto mismatch;
2266
2267 if (t->operand_types[j].bitfield.class == RegSIMD
2268 && !match_simd_size (t, j, given))
2269 goto mismatch;
2270
2271 if (t->operand_types[j].bitfield.instance == Accum
2272 && (!match_operand_size (t, j, given)
2273 || !match_simd_size (t, j, given)))
2274 goto mismatch;
2275
2276 if ((i.flags[given] & Operand_Mem) && !match_mem_size (t, j, given))
2277 goto mismatch;
2278 }
2279
2280 return match | MATCH_REVERSE;
2281 }
2282
2283 static INLINE int
2284 operand_type_match (i386_operand_type overlap,
2285 i386_operand_type given)
2286 {
2287 i386_operand_type temp = overlap;
2288
2289 temp.bitfield.unspecified = 0;
2290 temp.bitfield.byte = 0;
2291 temp.bitfield.word = 0;
2292 temp.bitfield.dword = 0;
2293 temp.bitfield.fword = 0;
2294 temp.bitfield.qword = 0;
2295 temp.bitfield.tbyte = 0;
2296 temp.bitfield.xmmword = 0;
2297 temp.bitfield.ymmword = 0;
2298 temp.bitfield.zmmword = 0;
2299 if (operand_type_all_zero (&temp))
2300 goto mismatch;
2301
2302 if (given.bitfield.baseindex == overlap.bitfield.baseindex)
2303 return 1;
2304
2305 mismatch:
2306 i.error = operand_type_mismatch;
2307 return 0;
2308 }
2309
2310 /* If given types g0 and g1 are registers they must be of the same type
2311 unless the expected operand type register overlap is null.
2312 Some Intel syntax memory operand size checking also happens here. */
2313
2314 static INLINE int
2315 operand_type_register_match (i386_operand_type g0,
2316 i386_operand_type t0,
2317 i386_operand_type g1,
2318 i386_operand_type t1)
2319 {
2320 if (g0.bitfield.class != Reg
2321 && g0.bitfield.class != RegSIMD
2322 && (!operand_type_check (g0, anymem)
2323 || g0.bitfield.unspecified
2324 || (t0.bitfield.class != Reg
2325 && t0.bitfield.class != RegSIMD)))
2326 return 1;
2327
2328 if (g1.bitfield.class != Reg
2329 && g1.bitfield.class != RegSIMD
2330 && (!operand_type_check (g1, anymem)
2331 || g1.bitfield.unspecified
2332 || (t1.bitfield.class != Reg
2333 && t1.bitfield.class != RegSIMD)))
2334 return 1;
2335
2336 if (g0.bitfield.byte == g1.bitfield.byte
2337 && g0.bitfield.word == g1.bitfield.word
2338 && g0.bitfield.dword == g1.bitfield.dword
2339 && g0.bitfield.qword == g1.bitfield.qword
2340 && g0.bitfield.xmmword == g1.bitfield.xmmword
2341 && g0.bitfield.ymmword == g1.bitfield.ymmword
2342 && g0.bitfield.zmmword == g1.bitfield.zmmword)
2343 return 1;
2344
2345 if (!(t0.bitfield.byte & t1.bitfield.byte)
2346 && !(t0.bitfield.word & t1.bitfield.word)
2347 && !(t0.bitfield.dword & t1.bitfield.dword)
2348 && !(t0.bitfield.qword & t1.bitfield.qword)
2349 && !(t0.bitfield.xmmword & t1.bitfield.xmmword)
2350 && !(t0.bitfield.ymmword & t1.bitfield.ymmword)
2351 && !(t0.bitfield.zmmword & t1.bitfield.zmmword))
2352 return 1;
2353
2354 i.error = register_type_mismatch;
2355
2356 return 0;
2357 }
2358
2359 static INLINE unsigned int
2360 register_number (const reg_entry *r)
2361 {
2362 unsigned int nr = r->reg_num;
2363
2364 if (r->reg_flags & RegRex)
2365 nr += 8;
2366
2367 if (r->reg_flags & RegVRex)
2368 nr += 16;
2369
2370 return nr;
2371 }
2372
2373 static INLINE unsigned int
2374 mode_from_disp_size (i386_operand_type t)
2375 {
2376 if (t.bitfield.disp8)
2377 return 1;
2378 else if (t.bitfield.disp16
2379 || t.bitfield.disp32
2380 || t.bitfield.disp32s)
2381 return 2;
2382 else
2383 return 0;
2384 }
2385
2386 static INLINE int
2387 fits_in_signed_byte (addressT num)
2388 {
2389 return num + 0x80 <= 0xff;
2390 }
2391
2392 static INLINE int
2393 fits_in_unsigned_byte (addressT num)
2394 {
2395 return num <= 0xff;
2396 }
2397
2398 static INLINE int
2399 fits_in_unsigned_word (addressT num)
2400 {
2401 return num <= 0xffff;
2402 }
2403
2404 static INLINE int
2405 fits_in_signed_word (addressT num)
2406 {
2407 return num + 0x8000 <= 0xffff;
2408 }
2409
2410 static INLINE int
2411 fits_in_signed_long (addressT num ATTRIBUTE_UNUSED)
2412 {
2413 #ifndef BFD64
2414 return 1;
2415 #else
2416 return num + 0x80000000 <= 0xffffffff;
2417 #endif
2418 } /* fits_in_signed_long() */
2419
2420 static INLINE int
2421 fits_in_unsigned_long (addressT num ATTRIBUTE_UNUSED)
2422 {
2423 #ifndef BFD64
2424 return 1;
2425 #else
2426 return num <= 0xffffffff;
2427 #endif
2428 } /* fits_in_unsigned_long() */
2429
2430 static INLINE int
2431 fits_in_disp8 (offsetT num)
2432 {
2433 int shift = i.memshift;
2434 unsigned int mask;
2435
2436 if (shift == -1)
2437 abort ();
2438
2439 mask = (1 << shift) - 1;
2440
2441 /* Return 0 if NUM isn't properly aligned. */
2442 if ((num & mask))
2443 return 0;
2444
2445 /* Check if NUM will fit in 8bit after shift. */
2446 return fits_in_signed_byte (num >> shift);
2447 }
2448
2449 static INLINE int
2450 fits_in_imm4 (offsetT num)
2451 {
2452 return (num & 0xf) == num;
2453 }
2454
2455 static i386_operand_type
2456 smallest_imm_type (offsetT num)
2457 {
2458 i386_operand_type t;
2459
2460 operand_type_set (&t, 0);
2461 t.bitfield.imm64 = 1;
2462
2463 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
2464 {
2465 /* This code is disabled on the 486 because all the Imm1 forms
2466 in the opcode table are slower on the i486. They're the
2467 versions with the implicitly specified single-position
2468 displacement, which has another syntax if you really want to
2469 use that form. */
2470 t.bitfield.imm1 = 1;
2471 t.bitfield.imm8 = 1;
2472 t.bitfield.imm8s = 1;
2473 t.bitfield.imm16 = 1;
2474 t.bitfield.imm32 = 1;
2475 t.bitfield.imm32s = 1;
2476 }
2477 else if (fits_in_signed_byte (num))
2478 {
2479 t.bitfield.imm8 = 1;
2480 t.bitfield.imm8s = 1;
2481 t.bitfield.imm16 = 1;
2482 t.bitfield.imm32 = 1;
2483 t.bitfield.imm32s = 1;
2484 }
2485 else if (fits_in_unsigned_byte (num))
2486 {
2487 t.bitfield.imm8 = 1;
2488 t.bitfield.imm16 = 1;
2489 t.bitfield.imm32 = 1;
2490 t.bitfield.imm32s = 1;
2491 }
2492 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
2493 {
2494 t.bitfield.imm16 = 1;
2495 t.bitfield.imm32 = 1;
2496 t.bitfield.imm32s = 1;
2497 }
2498 else if (fits_in_signed_long (num))
2499 {
2500 t.bitfield.imm32 = 1;
2501 t.bitfield.imm32s = 1;
2502 }
2503 else if (fits_in_unsigned_long (num))
2504 t.bitfield.imm32 = 1;
2505
2506 return t;
2507 }
2508
2509 static offsetT
2510 offset_in_range (offsetT val, int size)
2511 {
2512 addressT mask;
2513
2514 switch (size)
2515 {
2516 case 1: mask = ((addressT) 1 << 8) - 1; break;
2517 case 2: mask = ((addressT) 1 << 16) - 1; break;
2518 case 4: mask = ((addressT) 2 << 31) - 1; break;
2519 #ifdef BFD64
2520 case 8: mask = ((addressT) 2 << 63) - 1; break;
2521 #endif
2522 default: abort ();
2523 }
2524
2525 #ifdef BFD64
2526 /* If BFD64, sign extend val for 32bit address mode. */
2527 if (flag_code != CODE_64BIT
2528 || i.prefix[ADDR_PREFIX])
2529 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
2530 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
2531 #endif
2532
2533 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
2534 {
2535 char buf1[40], buf2[40];
2536
2537 sprint_value (buf1, val);
2538 sprint_value (buf2, val & mask);
2539 as_warn (_("%s shortened to %s"), buf1, buf2);
2540 }
2541 return val & mask;
2542 }
2543
2544 enum PREFIX_GROUP
2545 {
2546 PREFIX_EXIST = 0,
2547 PREFIX_LOCK,
2548 PREFIX_REP,
2549 PREFIX_DS,
2550 PREFIX_OTHER
2551 };
2552
2553 /* Returns
2554 a. PREFIX_EXIST if attempting to add a prefix where one from the
2555 same class already exists.
2556 b. PREFIX_LOCK if lock prefix is added.
2557 c. PREFIX_REP if rep/repne prefix is added.
2558 d. PREFIX_DS if ds prefix is added.
2559 e. PREFIX_OTHER if other prefix is added.
2560 */
2561
2562 static enum PREFIX_GROUP
2563 add_prefix (unsigned int prefix)
2564 {
2565 enum PREFIX_GROUP ret = PREFIX_OTHER;
2566 unsigned int q;
2567
2568 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
2569 && flag_code == CODE_64BIT)
2570 {
2571 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
2572 || (i.prefix[REX_PREFIX] & prefix & REX_R)
2573 || (i.prefix[REX_PREFIX] & prefix & REX_X)
2574 || (i.prefix[REX_PREFIX] & prefix & REX_B))
2575 ret = PREFIX_EXIST;
2576 q = REX_PREFIX;
2577 }
2578 else
2579 {
2580 switch (prefix)
2581 {
2582 default:
2583 abort ();
2584
2585 case DS_PREFIX_OPCODE:
2586 ret = PREFIX_DS;
2587 /* Fall through. */
2588 case CS_PREFIX_OPCODE:
2589 case ES_PREFIX_OPCODE:
2590 case FS_PREFIX_OPCODE:
2591 case GS_PREFIX_OPCODE:
2592 case SS_PREFIX_OPCODE:
2593 q = SEG_PREFIX;
2594 break;
2595
2596 case REPNE_PREFIX_OPCODE:
2597 case REPE_PREFIX_OPCODE:
2598 q = REP_PREFIX;
2599 ret = PREFIX_REP;
2600 break;
2601
2602 case LOCK_PREFIX_OPCODE:
2603 q = LOCK_PREFIX;
2604 ret = PREFIX_LOCK;
2605 break;
2606
2607 case FWAIT_OPCODE:
2608 q = WAIT_PREFIX;
2609 break;
2610
2611 case ADDR_PREFIX_OPCODE:
2612 q = ADDR_PREFIX;
2613 break;
2614
2615 case DATA_PREFIX_OPCODE:
2616 q = DATA_PREFIX;
2617 break;
2618 }
2619 if (i.prefix[q] != 0)
2620 ret = PREFIX_EXIST;
2621 }
2622
2623 if (ret)
2624 {
2625 if (!i.prefix[q])
2626 ++i.prefixes;
2627 i.prefix[q] |= prefix;
2628 }
2629 else
2630 as_bad (_("same type of prefix used twice"));
2631
2632 return ret;
2633 }
2634
2635 static void
2636 update_code_flag (int value, int check)
2637 {
2638 PRINTF_LIKE ((*as_error));
2639
2640 flag_code = (enum flag_code) value;
2641 if (flag_code == CODE_64BIT)
2642 {
2643 cpu_arch_flags.bitfield.cpu64 = 1;
2644 cpu_arch_flags.bitfield.cpuno64 = 0;
2645 }
2646 else
2647 {
2648 cpu_arch_flags.bitfield.cpu64 = 0;
2649 cpu_arch_flags.bitfield.cpuno64 = 1;
2650 }
2651 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
2652 {
2653 if (check)
2654 as_error = as_fatal;
2655 else
2656 as_error = as_bad;
2657 (*as_error) (_("64bit mode not supported on `%s'."),
2658 cpu_arch_name ? cpu_arch_name : default_arch);
2659 }
2660 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
2661 {
2662 if (check)
2663 as_error = as_fatal;
2664 else
2665 as_error = as_bad;
2666 (*as_error) (_("32bit mode not supported on `%s'."),
2667 cpu_arch_name ? cpu_arch_name : default_arch);
2668 }
2669 stackop_size = '\0';
2670 }
2671
2672 static void
2673 set_code_flag (int value)
2674 {
2675 update_code_flag (value, 0);
2676 }
2677
2678 static void
2679 set_16bit_gcc_code_flag (int new_code_flag)
2680 {
2681 flag_code = (enum flag_code) new_code_flag;
2682 if (flag_code != CODE_16BIT)
2683 abort ();
2684 cpu_arch_flags.bitfield.cpu64 = 0;
2685 cpu_arch_flags.bitfield.cpuno64 = 1;
2686 stackop_size = LONG_MNEM_SUFFIX;
2687 }
2688
2689 static void
2690 set_intel_syntax (int syntax_flag)
2691 {
2692 /* Find out if register prefixing is specified. */
2693 int ask_naked_reg = 0;
2694
2695 SKIP_WHITESPACE ();
2696 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2697 {
2698 char *string;
2699 int e = get_symbol_name (&string);
2700
2701 if (strcmp (string, "prefix") == 0)
2702 ask_naked_reg = 1;
2703 else if (strcmp (string, "noprefix") == 0)
2704 ask_naked_reg = -1;
2705 else
2706 as_bad (_("bad argument to syntax directive."));
2707 (void) restore_line_pointer (e);
2708 }
2709 demand_empty_rest_of_line ();
2710
2711 intel_syntax = syntax_flag;
2712
2713 if (ask_naked_reg == 0)
2714 allow_naked_reg = (intel_syntax
2715 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2716 else
2717 allow_naked_reg = (ask_naked_reg < 0);
2718
2719 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2720
2721 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2722 identifier_chars['$'] = intel_syntax ? '$' : 0;
2723 register_prefix = allow_naked_reg ? "" : "%";
2724 }
2725
2726 static void
2727 set_intel_mnemonic (int mnemonic_flag)
2728 {
2729 intel_mnemonic = mnemonic_flag;
2730 }
2731
2732 static void
2733 set_allow_index_reg (int flag)
2734 {
2735 allow_index_reg = flag;
2736 }
2737
2738 static void
2739 set_check (int what)
2740 {
2741 enum check_kind *kind;
2742 const char *str;
2743
2744 if (what)
2745 {
2746 kind = &operand_check;
2747 str = "operand";
2748 }
2749 else
2750 {
2751 kind = &sse_check;
2752 str = "sse";
2753 }
2754
2755 SKIP_WHITESPACE ();
2756
2757 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2758 {
2759 char *string;
2760 int e = get_symbol_name (&string);
2761
2762 if (strcmp (string, "none") == 0)
2763 *kind = check_none;
2764 else if (strcmp (string, "warning") == 0)
2765 *kind = check_warning;
2766 else if (strcmp (string, "error") == 0)
2767 *kind = check_error;
2768 else
2769 as_bad (_("bad argument to %s_check directive."), str);
2770 (void) restore_line_pointer (e);
2771 }
2772 else
2773 as_bad (_("missing argument for %s_check directive"), str);
2774
2775 demand_empty_rest_of_line ();
2776 }
2777
2778 static void
2779 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2780 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2781 {
2782 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2783 static const char *arch;
2784
2785 /* Intel LIOM is only supported on ELF. */
2786 if (!IS_ELF)
2787 return;
2788
2789 if (!arch)
2790 {
2791 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2792 use default_arch. */
2793 arch = cpu_arch_name;
2794 if (!arch)
2795 arch = default_arch;
2796 }
2797
2798 /* If we are targeting Intel MCU, we must enable it. */
2799 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_IAMCU
2800 || new_flag.bitfield.cpuiamcu)
2801 return;
2802
2803 /* If we are targeting Intel L1OM, we must enable it. */
2804 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2805 || new_flag.bitfield.cpul1om)
2806 return;
2807
2808 /* If we are targeting Intel K1OM, we must enable it. */
2809 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_K1OM
2810 || new_flag.bitfield.cpuk1om)
2811 return;
2812
2813 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2814 #endif
2815 }
2816
2817 static void
2818 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2819 {
2820 SKIP_WHITESPACE ();
2821
2822 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2823 {
2824 char *string;
2825 int e = get_symbol_name (&string);
2826 unsigned int j;
2827 i386_cpu_flags flags;
2828
2829 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2830 {
2831 if (strcmp (string, cpu_arch[j].name) == 0)
2832 {
2833 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2834
2835 if (*string != '.')
2836 {
2837 cpu_arch_name = cpu_arch[j].name;
2838 cpu_sub_arch_name = NULL;
2839 cpu_arch_flags = cpu_arch[j].flags;
2840 if (flag_code == CODE_64BIT)
2841 {
2842 cpu_arch_flags.bitfield.cpu64 = 1;
2843 cpu_arch_flags.bitfield.cpuno64 = 0;
2844 }
2845 else
2846 {
2847 cpu_arch_flags.bitfield.cpu64 = 0;
2848 cpu_arch_flags.bitfield.cpuno64 = 1;
2849 }
2850 cpu_arch_isa = cpu_arch[j].type;
2851 cpu_arch_isa_flags = cpu_arch[j].flags;
2852 if (!cpu_arch_tune_set)
2853 {
2854 cpu_arch_tune = cpu_arch_isa;
2855 cpu_arch_tune_flags = cpu_arch_isa_flags;
2856 }
2857 break;
2858 }
2859
2860 flags = cpu_flags_or (cpu_arch_flags,
2861 cpu_arch[j].flags);
2862
2863 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2864 {
2865 if (cpu_sub_arch_name)
2866 {
2867 char *name = cpu_sub_arch_name;
2868 cpu_sub_arch_name = concat (name,
2869 cpu_arch[j].name,
2870 (const char *) NULL);
2871 free (name);
2872 }
2873 else
2874 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2875 cpu_arch_flags = flags;
2876 cpu_arch_isa_flags = flags;
2877 }
2878 else
2879 cpu_arch_isa_flags
2880 = cpu_flags_or (cpu_arch_isa_flags,
2881 cpu_arch[j].flags);
2882 (void) restore_line_pointer (e);
2883 demand_empty_rest_of_line ();
2884 return;
2885 }
2886 }
2887
2888 if (*string == '.' && j >= ARRAY_SIZE (cpu_arch))
2889 {
2890 /* Disable an ISA extension. */
2891 for (j = 0; j < ARRAY_SIZE (cpu_noarch); j++)
2892 if (strcmp (string + 1, cpu_noarch [j].name) == 0)
2893 {
2894 flags = cpu_flags_and_not (cpu_arch_flags,
2895 cpu_noarch[j].flags);
2896 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2897 {
2898 if (cpu_sub_arch_name)
2899 {
2900 char *name = cpu_sub_arch_name;
2901 cpu_sub_arch_name = concat (name, string,
2902 (const char *) NULL);
2903 free (name);
2904 }
2905 else
2906 cpu_sub_arch_name = xstrdup (string);
2907 cpu_arch_flags = flags;
2908 cpu_arch_isa_flags = flags;
2909 }
2910 (void) restore_line_pointer (e);
2911 demand_empty_rest_of_line ();
2912 return;
2913 }
2914
2915 j = ARRAY_SIZE (cpu_arch);
2916 }
2917
2918 if (j >= ARRAY_SIZE (cpu_arch))
2919 as_bad (_("no such architecture: `%s'"), string);
2920
2921 *input_line_pointer = e;
2922 }
2923 else
2924 as_bad (_("missing cpu architecture"));
2925
2926 no_cond_jump_promotion = 0;
2927 if (*input_line_pointer == ','
2928 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2929 {
2930 char *string;
2931 char e;
2932
2933 ++input_line_pointer;
2934 e = get_symbol_name (&string);
2935
2936 if (strcmp (string, "nojumps") == 0)
2937 no_cond_jump_promotion = 1;
2938 else if (strcmp (string, "jumps") == 0)
2939 ;
2940 else
2941 as_bad (_("no such architecture modifier: `%s'"), string);
2942
2943 (void) restore_line_pointer (e);
2944 }
2945
2946 demand_empty_rest_of_line ();
2947 }
2948
2949 enum bfd_architecture
2950 i386_arch (void)
2951 {
2952 if (cpu_arch_isa == PROCESSOR_L1OM)
2953 {
2954 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2955 || flag_code != CODE_64BIT)
2956 as_fatal (_("Intel L1OM is 64bit ELF only"));
2957 return bfd_arch_l1om;
2958 }
2959 else if (cpu_arch_isa == PROCESSOR_K1OM)
2960 {
2961 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2962 || flag_code != CODE_64BIT)
2963 as_fatal (_("Intel K1OM is 64bit ELF only"));
2964 return bfd_arch_k1om;
2965 }
2966 else if (cpu_arch_isa == PROCESSOR_IAMCU)
2967 {
2968 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2969 || flag_code == CODE_64BIT)
2970 as_fatal (_("Intel MCU is 32bit ELF only"));
2971 return bfd_arch_iamcu;
2972 }
2973 else
2974 return bfd_arch_i386;
2975 }
2976
2977 unsigned long
2978 i386_mach (void)
2979 {
2980 if (!strncmp (default_arch, "x86_64", 6))
2981 {
2982 if (cpu_arch_isa == PROCESSOR_L1OM)
2983 {
2984 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2985 || default_arch[6] != '\0')
2986 as_fatal (_("Intel L1OM is 64bit ELF only"));
2987 return bfd_mach_l1om;
2988 }
2989 else if (cpu_arch_isa == PROCESSOR_K1OM)
2990 {
2991 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2992 || default_arch[6] != '\0')
2993 as_fatal (_("Intel K1OM is 64bit ELF only"));
2994 return bfd_mach_k1om;
2995 }
2996 else if (default_arch[6] == '\0')
2997 return bfd_mach_x86_64;
2998 else
2999 return bfd_mach_x64_32;
3000 }
3001 else if (!strcmp (default_arch, "i386")
3002 || !strcmp (default_arch, "iamcu"))
3003 {
3004 if (cpu_arch_isa == PROCESSOR_IAMCU)
3005 {
3006 if (OUTPUT_FLAVOR != bfd_target_elf_flavour)
3007 as_fatal (_("Intel MCU is 32bit ELF only"));
3008 return bfd_mach_i386_iamcu;
3009 }
3010 else
3011 return bfd_mach_i386_i386;
3012 }
3013 else
3014 as_fatal (_("unknown architecture"));
3015 }
3016 \f
3017 void
3018 md_begin (void)
3019 {
3020 const char *hash_err;
3021
3022 /* Support pseudo prefixes like {disp32}. */
3023 lex_type ['{'] = LEX_BEGIN_NAME;
3024
3025 /* Initialize op_hash hash table. */
3026 op_hash = hash_new ();
3027
3028 {
3029 const insn_template *optab;
3030 templates *core_optab;
3031
3032 /* Setup for loop. */
3033 optab = i386_optab;
3034 core_optab = XNEW (templates);
3035 core_optab->start = optab;
3036
3037 while (1)
3038 {
3039 ++optab;
3040 if (optab->name == NULL
3041 || strcmp (optab->name, (optab - 1)->name) != 0)
3042 {
3043 /* different name --> ship out current template list;
3044 add to hash table; & begin anew. */
3045 core_optab->end = optab;
3046 hash_err = hash_insert (op_hash,
3047 (optab - 1)->name,
3048 (void *) core_optab);
3049 if (hash_err)
3050 {
3051 as_fatal (_("can't hash %s: %s"),
3052 (optab - 1)->name,
3053 hash_err);
3054 }
3055 if (optab->name == NULL)
3056 break;
3057 core_optab = XNEW (templates);
3058 core_optab->start = optab;
3059 }
3060 }
3061 }
3062
3063 /* Initialize reg_hash hash table. */
3064 reg_hash = hash_new ();
3065 {
3066 const reg_entry *regtab;
3067 unsigned int regtab_size = i386_regtab_size;
3068
3069 for (regtab = i386_regtab; regtab_size--; regtab++)
3070 {
3071 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
3072 if (hash_err)
3073 as_fatal (_("can't hash %s: %s"),
3074 regtab->reg_name,
3075 hash_err);
3076 }
3077 }
3078
3079 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
3080 {
3081 int c;
3082 char *p;
3083
3084 for (c = 0; c < 256; c++)
3085 {
3086 if (ISDIGIT (c))
3087 {
3088 digit_chars[c] = c;
3089 mnemonic_chars[c] = c;
3090 register_chars[c] = c;
3091 operand_chars[c] = c;
3092 }
3093 else if (ISLOWER (c))
3094 {
3095 mnemonic_chars[c] = c;
3096 register_chars[c] = c;
3097 operand_chars[c] = c;
3098 }
3099 else if (ISUPPER (c))
3100 {
3101 mnemonic_chars[c] = TOLOWER (c);
3102 register_chars[c] = mnemonic_chars[c];
3103 operand_chars[c] = c;
3104 }
3105 else if (c == '{' || c == '}')
3106 {
3107 mnemonic_chars[c] = c;
3108 operand_chars[c] = c;
3109 }
3110
3111 if (ISALPHA (c) || ISDIGIT (c))
3112 identifier_chars[c] = c;
3113 else if (c >= 128)
3114 {
3115 identifier_chars[c] = c;
3116 operand_chars[c] = c;
3117 }
3118 }
3119
3120 #ifdef LEX_AT
3121 identifier_chars['@'] = '@';
3122 #endif
3123 #ifdef LEX_QM
3124 identifier_chars['?'] = '?';
3125 operand_chars['?'] = '?';
3126 #endif
3127 digit_chars['-'] = '-';
3128 mnemonic_chars['_'] = '_';
3129 mnemonic_chars['-'] = '-';
3130 mnemonic_chars['.'] = '.';
3131 identifier_chars['_'] = '_';
3132 identifier_chars['.'] = '.';
3133
3134 for (p = operand_special_chars; *p != '\0'; p++)
3135 operand_chars[(unsigned char) *p] = *p;
3136 }
3137
3138 if (flag_code == CODE_64BIT)
3139 {
3140 #if defined (OBJ_COFF) && defined (TE_PE)
3141 x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
3142 ? 32 : 16);
3143 #else
3144 x86_dwarf2_return_column = 16;
3145 #endif
3146 x86_cie_data_alignment = -8;
3147 }
3148 else
3149 {
3150 x86_dwarf2_return_column = 8;
3151 x86_cie_data_alignment = -4;
3152 }
3153
3154 /* NB: FUSED_JCC_PADDING frag must have sufficient room so that it
3155 can be turned into BRANCH_PREFIX frag. */
3156 if (align_branch_prefix_size > MAX_FUSED_JCC_PADDING_SIZE)
3157 abort ();
3158 }
3159
3160 void
3161 i386_print_statistics (FILE *file)
3162 {
3163 hash_print_statistics (file, "i386 opcode", op_hash);
3164 hash_print_statistics (file, "i386 register", reg_hash);
3165 }
3166 \f
3167 #ifdef DEBUG386
3168
3169 /* Debugging routines for md_assemble. */
3170 static void pte (insn_template *);
3171 static void pt (i386_operand_type);
3172 static void pe (expressionS *);
3173 static void ps (symbolS *);
3174
3175 static void
3176 pi (const char *line, i386_insn *x)
3177 {
3178 unsigned int j;
3179
3180 fprintf (stdout, "%s: template ", line);
3181 pte (&x->tm);
3182 fprintf (stdout, " address: base %s index %s scale %x\n",
3183 x->base_reg ? x->base_reg->reg_name : "none",
3184 x->index_reg ? x->index_reg->reg_name : "none",
3185 x->log2_scale_factor);
3186 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
3187 x->rm.mode, x->rm.reg, x->rm.regmem);
3188 fprintf (stdout, " sib: base %x index %x scale %x\n",
3189 x->sib.base, x->sib.index, x->sib.scale);
3190 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
3191 (x->rex & REX_W) != 0,
3192 (x->rex & REX_R) != 0,
3193 (x->rex & REX_X) != 0,
3194 (x->rex & REX_B) != 0);
3195 for (j = 0; j < x->operands; j++)
3196 {
3197 fprintf (stdout, " #%d: ", j + 1);
3198 pt (x->types[j]);
3199 fprintf (stdout, "\n");
3200 if (x->types[j].bitfield.class == Reg
3201 || x->types[j].bitfield.class == RegMMX
3202 || x->types[j].bitfield.class == RegSIMD
3203 || x->types[j].bitfield.class == RegMask
3204 || x->types[j].bitfield.class == SReg
3205 || x->types[j].bitfield.class == RegCR
3206 || x->types[j].bitfield.class == RegDR
3207 || x->types[j].bitfield.class == RegTR
3208 || x->types[j].bitfield.class == RegBND)
3209 fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
3210 if (operand_type_check (x->types[j], imm))
3211 pe (x->op[j].imms);
3212 if (operand_type_check (x->types[j], disp))
3213 pe (x->op[j].disps);
3214 }
3215 }
3216
3217 static void
3218 pte (insn_template *t)
3219 {
3220 unsigned int j;
3221 fprintf (stdout, " %d operands ", t->operands);
3222 fprintf (stdout, "opcode %x ", t->base_opcode);
3223 if (t->extension_opcode != None)
3224 fprintf (stdout, "ext %x ", t->extension_opcode);
3225 if (t->opcode_modifier.d)
3226 fprintf (stdout, "D");
3227 if (t->opcode_modifier.w)
3228 fprintf (stdout, "W");
3229 fprintf (stdout, "\n");
3230 for (j = 0; j < t->operands; j++)
3231 {
3232 fprintf (stdout, " #%d type ", j + 1);
3233 pt (t->operand_types[j]);
3234 fprintf (stdout, "\n");
3235 }
3236 }
3237
3238 static void
3239 pe (expressionS *e)
3240 {
3241 fprintf (stdout, " operation %d\n", e->X_op);
3242 fprintf (stdout, " add_number %ld (%lx)\n",
3243 (long) e->X_add_number, (long) e->X_add_number);
3244 if (e->X_add_symbol)
3245 {
3246 fprintf (stdout, " add_symbol ");
3247 ps (e->X_add_symbol);
3248 fprintf (stdout, "\n");
3249 }
3250 if (e->X_op_symbol)
3251 {
3252 fprintf (stdout, " op_symbol ");
3253 ps (e->X_op_symbol);
3254 fprintf (stdout, "\n");
3255 }
3256 }
3257
3258 static void
3259 ps (symbolS *s)
3260 {
3261 fprintf (stdout, "%s type %s%s",
3262 S_GET_NAME (s),
3263 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
3264 segment_name (S_GET_SEGMENT (s)));
3265 }
3266
3267 static struct type_name
3268 {
3269 i386_operand_type mask;
3270 const char *name;
3271 }
3272 const type_names[] =
3273 {
3274 { OPERAND_TYPE_REG8, "r8" },
3275 { OPERAND_TYPE_REG16, "r16" },
3276 { OPERAND_TYPE_REG32, "r32" },
3277 { OPERAND_TYPE_REG64, "r64" },
3278 { OPERAND_TYPE_ACC8, "acc8" },
3279 { OPERAND_TYPE_ACC16, "acc16" },
3280 { OPERAND_TYPE_ACC32, "acc32" },
3281 { OPERAND_TYPE_ACC64, "acc64" },
3282 { OPERAND_TYPE_IMM8, "i8" },
3283 { OPERAND_TYPE_IMM8, "i8s" },
3284 { OPERAND_TYPE_IMM16, "i16" },
3285 { OPERAND_TYPE_IMM32, "i32" },
3286 { OPERAND_TYPE_IMM32S, "i32s" },
3287 { OPERAND_TYPE_IMM64, "i64" },
3288 { OPERAND_TYPE_IMM1, "i1" },
3289 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
3290 { OPERAND_TYPE_DISP8, "d8" },
3291 { OPERAND_TYPE_DISP16, "d16" },
3292 { OPERAND_TYPE_DISP32, "d32" },
3293 { OPERAND_TYPE_DISP32S, "d32s" },
3294 { OPERAND_TYPE_DISP64, "d64" },
3295 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
3296 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
3297 { OPERAND_TYPE_CONTROL, "control reg" },
3298 { OPERAND_TYPE_TEST, "test reg" },
3299 { OPERAND_TYPE_DEBUG, "debug reg" },
3300 { OPERAND_TYPE_FLOATREG, "FReg" },
3301 { OPERAND_TYPE_FLOATACC, "FAcc" },
3302 { OPERAND_TYPE_SREG, "SReg" },
3303 { OPERAND_TYPE_REGMMX, "rMMX" },
3304 { OPERAND_TYPE_REGXMM, "rXMM" },
3305 { OPERAND_TYPE_REGYMM, "rYMM" },
3306 { OPERAND_TYPE_REGZMM, "rZMM" },
3307 { OPERAND_TYPE_REGMASK, "Mask reg" },
3308 };
3309
3310 static void
3311 pt (i386_operand_type t)
3312 {
3313 unsigned int j;
3314 i386_operand_type a;
3315
3316 for (j = 0; j < ARRAY_SIZE (type_names); j++)
3317 {
3318 a = operand_type_and (t, type_names[j].mask);
3319 if (operand_type_equal (&a, &type_names[j].mask))
3320 fprintf (stdout, "%s, ", type_names[j].name);
3321 }
3322 fflush (stdout);
3323 }
3324
3325 #endif /* DEBUG386 */
3326 \f
3327 static bfd_reloc_code_real_type
3328 reloc (unsigned int size,
3329 int pcrel,
3330 int sign,
3331 bfd_reloc_code_real_type other)
3332 {
3333 if (other != NO_RELOC)
3334 {
3335 reloc_howto_type *rel;
3336
3337 if (size == 8)
3338 switch (other)
3339 {
3340 case BFD_RELOC_X86_64_GOT32:
3341 return BFD_RELOC_X86_64_GOT64;
3342 break;
3343 case BFD_RELOC_X86_64_GOTPLT64:
3344 return BFD_RELOC_X86_64_GOTPLT64;
3345 break;
3346 case BFD_RELOC_X86_64_PLTOFF64:
3347 return BFD_RELOC_X86_64_PLTOFF64;
3348 break;
3349 case BFD_RELOC_X86_64_GOTPC32:
3350 other = BFD_RELOC_X86_64_GOTPC64;
3351 break;
3352 case BFD_RELOC_X86_64_GOTPCREL:
3353 other = BFD_RELOC_X86_64_GOTPCREL64;
3354 break;
3355 case BFD_RELOC_X86_64_TPOFF32:
3356 other = BFD_RELOC_X86_64_TPOFF64;
3357 break;
3358 case BFD_RELOC_X86_64_DTPOFF32:
3359 other = BFD_RELOC_X86_64_DTPOFF64;
3360 break;
3361 default:
3362 break;
3363 }
3364
3365 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3366 if (other == BFD_RELOC_SIZE32)
3367 {
3368 if (size == 8)
3369 other = BFD_RELOC_SIZE64;
3370 if (pcrel)
3371 {
3372 as_bad (_("there are no pc-relative size relocations"));
3373 return NO_RELOC;
3374 }
3375 }
3376 #endif
3377
3378 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
3379 if (size == 4 && (flag_code != CODE_64BIT || disallow_64bit_reloc))
3380 sign = -1;
3381
3382 rel = bfd_reloc_type_lookup (stdoutput, other);
3383 if (!rel)
3384 as_bad (_("unknown relocation (%u)"), other);
3385 else if (size != bfd_get_reloc_size (rel))
3386 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
3387 bfd_get_reloc_size (rel),
3388 size);
3389 else if (pcrel && !rel->pc_relative)
3390 as_bad (_("non-pc-relative relocation for pc-relative field"));
3391 else if ((rel->complain_on_overflow == complain_overflow_signed
3392 && !sign)
3393 || (rel->complain_on_overflow == complain_overflow_unsigned
3394 && sign > 0))
3395 as_bad (_("relocated field and relocation type differ in signedness"));
3396 else
3397 return other;
3398 return NO_RELOC;
3399 }
3400
3401 if (pcrel)
3402 {
3403 if (!sign)
3404 as_bad (_("there are no unsigned pc-relative relocations"));
3405 switch (size)
3406 {
3407 case 1: return BFD_RELOC_8_PCREL;
3408 case 2: return BFD_RELOC_16_PCREL;
3409 case 4: return BFD_RELOC_32_PCREL;
3410 case 8: return BFD_RELOC_64_PCREL;
3411 }
3412 as_bad (_("cannot do %u byte pc-relative relocation"), size);
3413 }
3414 else
3415 {
3416 if (sign > 0)
3417 switch (size)
3418 {
3419 case 4: return BFD_RELOC_X86_64_32S;
3420 }
3421 else
3422 switch (size)
3423 {
3424 case 1: return BFD_RELOC_8;
3425 case 2: return BFD_RELOC_16;
3426 case 4: return BFD_RELOC_32;
3427 case 8: return BFD_RELOC_64;
3428 }
3429 as_bad (_("cannot do %s %u byte relocation"),
3430 sign > 0 ? "signed" : "unsigned", size);
3431 }
3432
3433 return NO_RELOC;
3434 }
3435
3436 /* Here we decide which fixups can be adjusted to make them relative to
3437 the beginning of the section instead of the symbol. Basically we need
3438 to make sure that the dynamic relocations are done correctly, so in
3439 some cases we force the original symbol to be used. */
3440
3441 int
3442 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
3443 {
3444 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3445 if (!IS_ELF)
3446 return 1;
3447
3448 /* Don't adjust pc-relative references to merge sections in 64-bit
3449 mode. */
3450 if (use_rela_relocations
3451 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
3452 && fixP->fx_pcrel)
3453 return 0;
3454
3455 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
3456 and changed later by validate_fix. */
3457 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
3458 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
3459 return 0;
3460
3461 /* Adjust_reloc_syms doesn't know about the GOT. Need to keep symbol
3462 for size relocations. */
3463 if (fixP->fx_r_type == BFD_RELOC_SIZE32
3464 || fixP->fx_r_type == BFD_RELOC_SIZE64
3465 || fixP->fx_r_type == BFD_RELOC_386_GOTOFF
3466 || fixP->fx_r_type == BFD_RELOC_386_GOT32
3467 || fixP->fx_r_type == BFD_RELOC_386_GOT32X
3468 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
3469 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
3470 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
3471 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
3472 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
3473 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
3474 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
3475 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
3476 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
3477 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
3478 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
3479 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
3480 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCRELX
3481 || fixP->fx_r_type == BFD_RELOC_X86_64_REX_GOTPCRELX
3482 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
3483 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
3484 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
3485 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
3486 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
3487 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
3488 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
3489 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
3490 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
3491 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
3492 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
3493 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
3494 return 0;
3495 #endif
3496 return 1;
3497 }
3498
3499 static int
3500 intel_float_operand (const char *mnemonic)
3501 {
3502 /* Note that the value returned is meaningful only for opcodes with (memory)
3503 operands, hence the code here is free to improperly handle opcodes that
3504 have no operands (for better performance and smaller code). */
3505
3506 if (mnemonic[0] != 'f')
3507 return 0; /* non-math */
3508
3509 switch (mnemonic[1])
3510 {
3511 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
3512 the fs segment override prefix not currently handled because no
3513 call path can make opcodes without operands get here */
3514 case 'i':
3515 return 2 /* integer op */;
3516 case 'l':
3517 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
3518 return 3; /* fldcw/fldenv */
3519 break;
3520 case 'n':
3521 if (mnemonic[2] != 'o' /* fnop */)
3522 return 3; /* non-waiting control op */
3523 break;
3524 case 'r':
3525 if (mnemonic[2] == 's')
3526 return 3; /* frstor/frstpm */
3527 break;
3528 case 's':
3529 if (mnemonic[2] == 'a')
3530 return 3; /* fsave */
3531 if (mnemonic[2] == 't')
3532 {
3533 switch (mnemonic[3])
3534 {
3535 case 'c': /* fstcw */
3536 case 'd': /* fstdw */
3537 case 'e': /* fstenv */
3538 case 's': /* fsts[gw] */
3539 return 3;
3540 }
3541 }
3542 break;
3543 case 'x':
3544 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
3545 return 0; /* fxsave/fxrstor are not really math ops */
3546 break;
3547 }
3548
3549 return 1;
3550 }
3551
3552 /* Build the VEX prefix. */
3553
3554 static void
3555 build_vex_prefix (const insn_template *t)
3556 {
3557 unsigned int register_specifier;
3558 unsigned int implied_prefix;
3559 unsigned int vector_length;
3560 unsigned int w;
3561
3562 /* Check register specifier. */
3563 if (i.vex.register_specifier)
3564 {
3565 register_specifier =
3566 ~register_number (i.vex.register_specifier) & 0xf;
3567 gas_assert ((i.vex.register_specifier->reg_flags & RegVRex) == 0);
3568 }
3569 else
3570 register_specifier = 0xf;
3571
3572 /* Use 2-byte VEX prefix by swapping destination and source operand
3573 if there are more than 1 register operand. */
3574 if (i.reg_operands > 1
3575 && i.vec_encoding != vex_encoding_vex3
3576 && i.dir_encoding == dir_encoding_default
3577 && i.operands == i.reg_operands
3578 && operand_type_equal (&i.types[0], &i.types[i.operands - 1])
3579 && i.tm.opcode_modifier.vexopcode == VEX0F
3580 && (i.tm.opcode_modifier.load || i.tm.opcode_modifier.d)
3581 && i.rex == REX_B)
3582 {
3583 unsigned int xchg = i.operands - 1;
3584 union i386_op temp_op;
3585 i386_operand_type temp_type;
3586
3587 temp_type = i.types[xchg];
3588 i.types[xchg] = i.types[0];
3589 i.types[0] = temp_type;
3590 temp_op = i.op[xchg];
3591 i.op[xchg] = i.op[0];
3592 i.op[0] = temp_op;
3593
3594 gas_assert (i.rm.mode == 3);
3595
3596 i.rex = REX_R;
3597 xchg = i.rm.regmem;
3598 i.rm.regmem = i.rm.reg;
3599 i.rm.reg = xchg;
3600
3601 if (i.tm.opcode_modifier.d)
3602 i.tm.base_opcode ^= (i.tm.base_opcode & 0xee) != 0x6e
3603 ? Opcode_SIMD_FloatD : Opcode_SIMD_IntD;
3604 else /* Use the next insn. */
3605 i.tm = t[1];
3606 }
3607
3608 /* Use 2-byte VEX prefix by swapping commutative source operands if there
3609 are no memory operands and at least 3 register ones. */
3610 if (i.reg_operands >= 3
3611 && i.vec_encoding != vex_encoding_vex3
3612 && i.reg_operands == i.operands - i.imm_operands
3613 && i.tm.opcode_modifier.vex
3614 && i.tm.opcode_modifier.commutative
3615 && (i.tm.opcode_modifier.sse2avx || optimize > 1)
3616 && i.rex == REX_B
3617 && i.vex.register_specifier
3618 && !(i.vex.register_specifier->reg_flags & RegRex))
3619 {
3620 unsigned int xchg = i.operands - i.reg_operands;
3621 union i386_op temp_op;
3622 i386_operand_type temp_type;
3623
3624 gas_assert (i.tm.opcode_modifier.vexopcode == VEX0F);
3625 gas_assert (!i.tm.opcode_modifier.sae);
3626 gas_assert (operand_type_equal (&i.types[i.operands - 2],
3627 &i.types[i.operands - 3]));
3628 gas_assert (i.rm.mode == 3);
3629
3630 temp_type = i.types[xchg];
3631 i.types[xchg] = i.types[xchg + 1];
3632 i.types[xchg + 1] = temp_type;
3633 temp_op = i.op[xchg];
3634 i.op[xchg] = i.op[xchg + 1];
3635 i.op[xchg + 1] = temp_op;
3636
3637 i.rex = 0;
3638 xchg = i.rm.regmem | 8;
3639 i.rm.regmem = ~register_specifier & 0xf;
3640 gas_assert (!(i.rm.regmem & 8));
3641 i.vex.register_specifier += xchg - i.rm.regmem;
3642 register_specifier = ~xchg & 0xf;
3643 }
3644
3645 if (i.tm.opcode_modifier.vex == VEXScalar)
3646 vector_length = avxscalar;
3647 else if (i.tm.opcode_modifier.vex == VEX256)
3648 vector_length = 1;
3649 else
3650 {
3651 unsigned int op;
3652
3653 /* Determine vector length from the last multi-length vector
3654 operand. */
3655 vector_length = 0;
3656 for (op = t->operands; op--;)
3657 if (t->operand_types[op].bitfield.xmmword
3658 && t->operand_types[op].bitfield.ymmword
3659 && i.types[op].bitfield.ymmword)
3660 {
3661 vector_length = 1;
3662 break;
3663 }
3664 }
3665
3666 switch ((i.tm.base_opcode >> (i.tm.opcode_length << 3)) & 0xff)
3667 {
3668 case 0:
3669 implied_prefix = 0;
3670 break;
3671 case DATA_PREFIX_OPCODE:
3672 implied_prefix = 1;
3673 break;
3674 case REPE_PREFIX_OPCODE:
3675 implied_prefix = 2;
3676 break;
3677 case REPNE_PREFIX_OPCODE:
3678 implied_prefix = 3;
3679 break;
3680 default:
3681 abort ();
3682 }
3683
3684 /* Check the REX.W bit and VEXW. */
3685 if (i.tm.opcode_modifier.vexw == VEXWIG)
3686 w = (vexwig == vexw1 || (i.rex & REX_W)) ? 1 : 0;
3687 else if (i.tm.opcode_modifier.vexw)
3688 w = i.tm.opcode_modifier.vexw == VEXW1 ? 1 : 0;
3689 else
3690 w = (flag_code == CODE_64BIT ? i.rex & REX_W : vexwig == vexw1) ? 1 : 0;
3691
3692 /* Use 2-byte VEX prefix if possible. */
3693 if (w == 0
3694 && i.vec_encoding != vex_encoding_vex3
3695 && i.tm.opcode_modifier.vexopcode == VEX0F
3696 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
3697 {
3698 /* 2-byte VEX prefix. */
3699 unsigned int r;
3700
3701 i.vex.length = 2;
3702 i.vex.bytes[0] = 0xc5;
3703
3704 /* Check the REX.R bit. */
3705 r = (i.rex & REX_R) ? 0 : 1;
3706 i.vex.bytes[1] = (r << 7
3707 | register_specifier << 3
3708 | vector_length << 2
3709 | implied_prefix);
3710 }
3711 else
3712 {
3713 /* 3-byte VEX prefix. */
3714 unsigned int m;
3715
3716 i.vex.length = 3;
3717
3718 switch (i.tm.opcode_modifier.vexopcode)
3719 {
3720 case VEX0F:
3721 m = 0x1;
3722 i.vex.bytes[0] = 0xc4;
3723 break;
3724 case VEX0F38:
3725 m = 0x2;
3726 i.vex.bytes[0] = 0xc4;
3727 break;
3728 case VEX0F3A:
3729 m = 0x3;
3730 i.vex.bytes[0] = 0xc4;
3731 break;
3732 case XOP08:
3733 m = 0x8;
3734 i.vex.bytes[0] = 0x8f;
3735 break;
3736 case XOP09:
3737 m = 0x9;
3738 i.vex.bytes[0] = 0x8f;
3739 break;
3740 case XOP0A:
3741 m = 0xa;
3742 i.vex.bytes[0] = 0x8f;
3743 break;
3744 default:
3745 abort ();
3746 }
3747
3748 /* The high 3 bits of the second VEX byte are 1's compliment
3749 of RXB bits from REX. */
3750 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
3751
3752 i.vex.bytes[2] = (w << 7
3753 | register_specifier << 3
3754 | vector_length << 2
3755 | implied_prefix);
3756 }
3757 }
3758
3759 static INLINE bfd_boolean
3760 is_evex_encoding (const insn_template *t)
3761 {
3762 return t->opcode_modifier.evex || t->opcode_modifier.disp8memshift
3763 || t->opcode_modifier.broadcast || t->opcode_modifier.masking
3764 || t->opcode_modifier.sae;
3765 }
3766
3767 static INLINE bfd_boolean
3768 is_any_vex_encoding (const insn_template *t)
3769 {
3770 return t->opcode_modifier.vex || t->opcode_modifier.vexopcode
3771 || is_evex_encoding (t);
3772 }
3773
3774 /* Build the EVEX prefix. */
3775
3776 static void
3777 build_evex_prefix (void)
3778 {
3779 unsigned int register_specifier;
3780 unsigned int implied_prefix;
3781 unsigned int m, w;
3782 rex_byte vrex_used = 0;
3783
3784 /* Check register specifier. */
3785 if (i.vex.register_specifier)
3786 {
3787 gas_assert ((i.vrex & REX_X) == 0);
3788
3789 register_specifier = i.vex.register_specifier->reg_num;
3790 if ((i.vex.register_specifier->reg_flags & RegRex))
3791 register_specifier += 8;
3792 /* The upper 16 registers are encoded in the fourth byte of the
3793 EVEX prefix. */
3794 if (!(i.vex.register_specifier->reg_flags & RegVRex))
3795 i.vex.bytes[3] = 0x8;
3796 register_specifier = ~register_specifier & 0xf;
3797 }
3798 else
3799 {
3800 register_specifier = 0xf;
3801
3802 /* Encode upper 16 vector index register in the fourth byte of
3803 the EVEX prefix. */
3804 if (!(i.vrex & REX_X))
3805 i.vex.bytes[3] = 0x8;
3806 else
3807 vrex_used |= REX_X;
3808 }
3809
3810 switch ((i.tm.base_opcode >> 8) & 0xff)
3811 {
3812 case 0:
3813 implied_prefix = 0;
3814 break;
3815 case DATA_PREFIX_OPCODE:
3816 implied_prefix = 1;
3817 break;
3818 case REPE_PREFIX_OPCODE:
3819 implied_prefix = 2;
3820 break;
3821 case REPNE_PREFIX_OPCODE:
3822 implied_prefix = 3;
3823 break;
3824 default:
3825 abort ();
3826 }
3827
3828 /* 4 byte EVEX prefix. */
3829 i.vex.length = 4;
3830 i.vex.bytes[0] = 0x62;
3831
3832 /* mmmm bits. */
3833 switch (i.tm.opcode_modifier.vexopcode)
3834 {
3835 case VEX0F:
3836 m = 1;
3837 break;
3838 case VEX0F38:
3839 m = 2;
3840 break;
3841 case VEX0F3A:
3842 m = 3;
3843 break;
3844 default:
3845 abort ();
3846 break;
3847 }
3848
3849 /* The high 3 bits of the second EVEX byte are 1's compliment of RXB
3850 bits from REX. */
3851 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
3852
3853 /* The fifth bit of the second EVEX byte is 1's compliment of the
3854 REX_R bit in VREX. */
3855 if (!(i.vrex & REX_R))
3856 i.vex.bytes[1] |= 0x10;
3857 else
3858 vrex_used |= REX_R;
3859
3860 if ((i.reg_operands + i.imm_operands) == i.operands)
3861 {
3862 /* When all operands are registers, the REX_X bit in REX is not
3863 used. We reuse it to encode the upper 16 registers, which is
3864 indicated by the REX_B bit in VREX. The REX_X bit is encoded
3865 as 1's compliment. */
3866 if ((i.vrex & REX_B))
3867 {
3868 vrex_used |= REX_B;
3869 i.vex.bytes[1] &= ~0x40;
3870 }
3871 }
3872
3873 /* EVEX instructions shouldn't need the REX prefix. */
3874 i.vrex &= ~vrex_used;
3875 gas_assert (i.vrex == 0);
3876
3877 /* Check the REX.W bit and VEXW. */
3878 if (i.tm.opcode_modifier.vexw == VEXWIG)
3879 w = (evexwig == evexw1 || (i.rex & REX_W)) ? 1 : 0;
3880 else if (i.tm.opcode_modifier.vexw)
3881 w = i.tm.opcode_modifier.vexw == VEXW1 ? 1 : 0;
3882 else
3883 w = (flag_code == CODE_64BIT ? i.rex & REX_W : evexwig == evexw1) ? 1 : 0;
3884
3885 /* Encode the U bit. */
3886 implied_prefix |= 0x4;
3887
3888 /* The third byte of the EVEX prefix. */
3889 i.vex.bytes[2] = (w << 7 | register_specifier << 3 | implied_prefix);
3890
3891 /* The fourth byte of the EVEX prefix. */
3892 /* The zeroing-masking bit. */
3893 if (i.mask && i.mask->zeroing)
3894 i.vex.bytes[3] |= 0x80;
3895
3896 /* Don't always set the broadcast bit if there is no RC. */
3897 if (!i.rounding)
3898 {
3899 /* Encode the vector length. */
3900 unsigned int vec_length;
3901
3902 if (!i.tm.opcode_modifier.evex
3903 || i.tm.opcode_modifier.evex == EVEXDYN)
3904 {
3905 unsigned int op;
3906
3907 /* Determine vector length from the last multi-length vector
3908 operand. */
3909 for (op = i.operands; op--;)
3910 if (i.tm.operand_types[op].bitfield.xmmword
3911 + i.tm.operand_types[op].bitfield.ymmword
3912 + i.tm.operand_types[op].bitfield.zmmword > 1)
3913 {
3914 if (i.types[op].bitfield.zmmword)
3915 {
3916 i.tm.opcode_modifier.evex = EVEX512;
3917 break;
3918 }
3919 else if (i.types[op].bitfield.ymmword)
3920 {
3921 i.tm.opcode_modifier.evex = EVEX256;
3922 break;
3923 }
3924 else if (i.types[op].bitfield.xmmword)
3925 {
3926 i.tm.opcode_modifier.evex = EVEX128;
3927 break;
3928 }
3929 else if (i.broadcast && (int) op == i.broadcast->operand)
3930 {
3931 switch (i.broadcast->bytes)
3932 {
3933 case 64:
3934 i.tm.opcode_modifier.evex = EVEX512;
3935 break;
3936 case 32:
3937 i.tm.opcode_modifier.evex = EVEX256;
3938 break;
3939 case 16:
3940 i.tm.opcode_modifier.evex = EVEX128;
3941 break;
3942 default:
3943 abort ();
3944 }
3945 break;
3946 }
3947 }
3948
3949 if (op >= MAX_OPERANDS)
3950 abort ();
3951 }
3952
3953 switch (i.tm.opcode_modifier.evex)
3954 {
3955 case EVEXLIG: /* LL' is ignored */
3956 vec_length = evexlig << 5;
3957 break;
3958 case EVEX128:
3959 vec_length = 0 << 5;
3960 break;
3961 case EVEX256:
3962 vec_length = 1 << 5;
3963 break;
3964 case EVEX512:
3965 vec_length = 2 << 5;
3966 break;
3967 default:
3968 abort ();
3969 break;
3970 }
3971 i.vex.bytes[3] |= vec_length;
3972 /* Encode the broadcast bit. */
3973 if (i.broadcast)
3974 i.vex.bytes[3] |= 0x10;
3975 }
3976 else
3977 {
3978 if (i.rounding->type != saeonly)
3979 i.vex.bytes[3] |= 0x10 | (i.rounding->type << 5);
3980 else
3981 i.vex.bytes[3] |= 0x10 | (evexrcig << 5);
3982 }
3983
3984 if (i.mask && i.mask->mask)
3985 i.vex.bytes[3] |= i.mask->mask->reg_num;
3986 }
3987
3988 static void
3989 process_immext (void)
3990 {
3991 expressionS *exp;
3992
3993 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
3994 which is coded in the same place as an 8-bit immediate field
3995 would be. Here we fake an 8-bit immediate operand from the
3996 opcode suffix stored in tm.extension_opcode.
3997
3998 AVX instructions also use this encoding, for some of
3999 3 argument instructions. */
4000
4001 gas_assert (i.imm_operands <= 1
4002 && (i.operands <= 2
4003 || (is_any_vex_encoding (&i.tm)
4004 && i.operands <= 4)));
4005
4006 exp = &im_expressions[i.imm_operands++];
4007 i.op[i.operands].imms = exp;
4008 i.types[i.operands] = imm8;
4009 i.operands++;
4010 exp->X_op = O_constant;
4011 exp->X_add_number = i.tm.extension_opcode;
4012 i.tm.extension_opcode = None;
4013 }
4014
4015
4016 static int
4017 check_hle (void)
4018 {
4019 switch (i.tm.opcode_modifier.hleprefixok)
4020 {
4021 default:
4022 abort ();
4023 case HLEPrefixNone:
4024 as_bad (_("invalid instruction `%s' after `%s'"),
4025 i.tm.name, i.hle_prefix);
4026 return 0;
4027 case HLEPrefixLock:
4028 if (i.prefix[LOCK_PREFIX])
4029 return 1;
4030 as_bad (_("missing `lock' with `%s'"), i.hle_prefix);
4031 return 0;
4032 case HLEPrefixAny:
4033 return 1;
4034 case HLEPrefixRelease:
4035 if (i.prefix[HLE_PREFIX] != XRELEASE_PREFIX_OPCODE)
4036 {
4037 as_bad (_("instruction `%s' after `xacquire' not allowed"),
4038 i.tm.name);
4039 return 0;
4040 }
4041 if (i.mem_operands == 0 || !(i.flags[i.operands - 1] & Operand_Mem))
4042 {
4043 as_bad (_("memory destination needed for instruction `%s'"
4044 " after `xrelease'"), i.tm.name);
4045 return 0;
4046 }
4047 return 1;
4048 }
4049 }
4050
4051 /* Try the shortest encoding by shortening operand size. */
4052
4053 static void
4054 optimize_encoding (void)
4055 {
4056 unsigned int j;
4057
4058 if (optimize_for_space
4059 && !is_any_vex_encoding (&i.tm)
4060 && i.reg_operands == 1
4061 && i.imm_operands == 1
4062 && !i.types[1].bitfield.byte
4063 && i.op[0].imms->X_op == O_constant
4064 && fits_in_imm7 (i.op[0].imms->X_add_number)
4065 && (i.tm.base_opcode == 0xa8
4066 || (i.tm.base_opcode == 0xf6
4067 && i.tm.extension_opcode == 0x0)))
4068 {
4069 /* Optimize: -Os:
4070 test $imm7, %r64/%r32/%r16 -> test $imm7, %r8
4071 */
4072 unsigned int base_regnum = i.op[1].regs->reg_num;
4073 if (flag_code == CODE_64BIT || base_regnum < 4)
4074 {
4075 i.types[1].bitfield.byte = 1;
4076 /* Ignore the suffix. */
4077 i.suffix = 0;
4078 /* Convert to byte registers. */
4079 if (i.types[1].bitfield.word)
4080 j = 16;
4081 else if (i.types[1].bitfield.dword)
4082 j = 32;
4083 else
4084 j = 48;
4085 if (!(i.op[1].regs->reg_flags & RegRex) && base_regnum < 4)
4086 j += 8;
4087 i.op[1].regs -= j;
4088 }
4089 }
4090 else if (flag_code == CODE_64BIT
4091 && !is_any_vex_encoding (&i.tm)
4092 && ((i.types[1].bitfield.qword
4093 && i.reg_operands == 1
4094 && i.imm_operands == 1
4095 && i.op[0].imms->X_op == O_constant
4096 && ((i.tm.base_opcode == 0xb8
4097 && i.tm.extension_opcode == None
4098 && fits_in_unsigned_long (i.op[0].imms->X_add_number))
4099 || (fits_in_imm31 (i.op[0].imms->X_add_number)
4100 && ((i.tm.base_opcode == 0x24
4101 || i.tm.base_opcode == 0xa8)
4102 || (i.tm.base_opcode == 0x80
4103 && i.tm.extension_opcode == 0x4)
4104 || ((i.tm.base_opcode == 0xf6
4105 || (i.tm.base_opcode | 1) == 0xc7)
4106 && i.tm.extension_opcode == 0x0)))
4107 || (fits_in_imm7 (i.op[0].imms->X_add_number)
4108 && i.tm.base_opcode == 0x83
4109 && i.tm.extension_opcode == 0x4)))
4110 || (i.types[0].bitfield.qword
4111 && ((i.reg_operands == 2
4112 && i.op[0].regs == i.op[1].regs
4113 && (i.tm.base_opcode == 0x30
4114 || i.tm.base_opcode == 0x28))
4115 || (i.reg_operands == 1
4116 && i.operands == 1
4117 && i.tm.base_opcode == 0x30)))))
4118 {
4119 /* Optimize: -O:
4120 andq $imm31, %r64 -> andl $imm31, %r32
4121 andq $imm7, %r64 -> andl $imm7, %r32
4122 testq $imm31, %r64 -> testl $imm31, %r32
4123 xorq %r64, %r64 -> xorl %r32, %r32
4124 subq %r64, %r64 -> subl %r32, %r32
4125 movq $imm31, %r64 -> movl $imm31, %r32
4126 movq $imm32, %r64 -> movl $imm32, %r32
4127 */
4128 i.tm.opcode_modifier.norex64 = 1;
4129 if (i.tm.base_opcode == 0xb8 || (i.tm.base_opcode | 1) == 0xc7)
4130 {
4131 /* Handle
4132 movq $imm31, %r64 -> movl $imm31, %r32
4133 movq $imm32, %r64 -> movl $imm32, %r32
4134 */
4135 i.tm.operand_types[0].bitfield.imm32 = 1;
4136 i.tm.operand_types[0].bitfield.imm32s = 0;
4137 i.tm.operand_types[0].bitfield.imm64 = 0;
4138 i.types[0].bitfield.imm32 = 1;
4139 i.types[0].bitfield.imm32s = 0;
4140 i.types[0].bitfield.imm64 = 0;
4141 i.types[1].bitfield.dword = 1;
4142 i.types[1].bitfield.qword = 0;
4143 if ((i.tm.base_opcode | 1) == 0xc7)
4144 {
4145 /* Handle
4146 movq $imm31, %r64 -> movl $imm31, %r32
4147 */
4148 i.tm.base_opcode = 0xb8;
4149 i.tm.extension_opcode = None;
4150 i.tm.opcode_modifier.w = 0;
4151 i.tm.opcode_modifier.modrm = 0;
4152 }
4153 }
4154 }
4155 else if (optimize > 1
4156 && !optimize_for_space
4157 && !is_any_vex_encoding (&i.tm)
4158 && i.reg_operands == 2
4159 && i.op[0].regs == i.op[1].regs
4160 && ((i.tm.base_opcode & ~(Opcode_D | 1)) == 0x8
4161 || (i.tm.base_opcode & ~(Opcode_D | 1)) == 0x20)
4162 && (flag_code != CODE_64BIT || !i.types[0].bitfield.dword))
4163 {
4164 /* Optimize: -O2:
4165 andb %rN, %rN -> testb %rN, %rN
4166 andw %rN, %rN -> testw %rN, %rN
4167 andq %rN, %rN -> testq %rN, %rN
4168 orb %rN, %rN -> testb %rN, %rN
4169 orw %rN, %rN -> testw %rN, %rN
4170 orq %rN, %rN -> testq %rN, %rN
4171
4172 and outside of 64-bit mode
4173
4174 andl %rN, %rN -> testl %rN, %rN
4175 orl %rN, %rN -> testl %rN, %rN
4176 */
4177 i.tm.base_opcode = 0x84 | (i.tm.base_opcode & 1);
4178 }
4179 else if (i.reg_operands == 3
4180 && i.op[0].regs == i.op[1].regs
4181 && !i.types[2].bitfield.xmmword
4182 && (i.tm.opcode_modifier.vex
4183 || ((!i.mask || i.mask->zeroing)
4184 && !i.rounding
4185 && is_evex_encoding (&i.tm)
4186 && (i.vec_encoding != vex_encoding_evex
4187 || cpu_arch_isa_flags.bitfield.cpuavx512vl
4188 || i.tm.cpu_flags.bitfield.cpuavx512vl
4189 || (i.tm.operand_types[2].bitfield.zmmword
4190 && i.types[2].bitfield.ymmword))))
4191 && ((i.tm.base_opcode == 0x55
4192 || i.tm.base_opcode == 0x6655
4193 || i.tm.base_opcode == 0x66df
4194 || i.tm.base_opcode == 0x57
4195 || i.tm.base_opcode == 0x6657
4196 || i.tm.base_opcode == 0x66ef
4197 || i.tm.base_opcode == 0x66f8
4198 || i.tm.base_opcode == 0x66f9
4199 || i.tm.base_opcode == 0x66fa
4200 || i.tm.base_opcode == 0x66fb
4201 || i.tm.base_opcode == 0x42
4202 || i.tm.base_opcode == 0x6642
4203 || i.tm.base_opcode == 0x47
4204 || i.tm.base_opcode == 0x6647)
4205 && i.tm.extension_opcode == None))
4206 {
4207 /* Optimize: -O1:
4208 VOP, one of vandnps, vandnpd, vxorps, vxorpd, vpsubb, vpsubd,
4209 vpsubq and vpsubw:
4210 EVEX VOP %zmmM, %zmmM, %zmmN
4211 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
4212 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4213 EVEX VOP %ymmM, %ymmM, %ymmN
4214 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
4215 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4216 VEX VOP %ymmM, %ymmM, %ymmN
4217 -> VEX VOP %xmmM, %xmmM, %xmmN
4218 VOP, one of vpandn and vpxor:
4219 VEX VOP %ymmM, %ymmM, %ymmN
4220 -> VEX VOP %xmmM, %xmmM, %xmmN
4221 VOP, one of vpandnd and vpandnq:
4222 EVEX VOP %zmmM, %zmmM, %zmmN
4223 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
4224 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4225 EVEX VOP %ymmM, %ymmM, %ymmN
4226 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
4227 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4228 VOP, one of vpxord and vpxorq:
4229 EVEX VOP %zmmM, %zmmM, %zmmN
4230 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
4231 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4232 EVEX VOP %ymmM, %ymmM, %ymmN
4233 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
4234 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4235 VOP, one of kxord and kxorq:
4236 VEX VOP %kM, %kM, %kN
4237 -> VEX kxorw %kM, %kM, %kN
4238 VOP, one of kandnd and kandnq:
4239 VEX VOP %kM, %kM, %kN
4240 -> VEX kandnw %kM, %kM, %kN
4241 */
4242 if (is_evex_encoding (&i.tm))
4243 {
4244 if (i.vec_encoding != vex_encoding_evex)
4245 {
4246 i.tm.opcode_modifier.vex = VEX128;
4247 i.tm.opcode_modifier.vexw = VEXW0;
4248 i.tm.opcode_modifier.evex = 0;
4249 }
4250 else if (optimize > 1)
4251 i.tm.opcode_modifier.evex = EVEX128;
4252 else
4253 return;
4254 }
4255 else if (i.tm.operand_types[0].bitfield.class == RegMask)
4256 {
4257 i.tm.base_opcode &= 0xff;
4258 i.tm.opcode_modifier.vexw = VEXW0;
4259 }
4260 else
4261 i.tm.opcode_modifier.vex = VEX128;
4262
4263 if (i.tm.opcode_modifier.vex)
4264 for (j = 0; j < 3; j++)
4265 {
4266 i.types[j].bitfield.xmmword = 1;
4267 i.types[j].bitfield.ymmword = 0;
4268 }
4269 }
4270 else if (i.vec_encoding != vex_encoding_evex
4271 && !i.types[0].bitfield.zmmword
4272 && !i.types[1].bitfield.zmmword
4273 && !i.mask
4274 && !i.broadcast
4275 && is_evex_encoding (&i.tm)
4276 && ((i.tm.base_opcode & ~Opcode_SIMD_IntD) == 0x666f
4277 || (i.tm.base_opcode & ~Opcode_SIMD_IntD) == 0xf36f
4278 || (i.tm.base_opcode & ~Opcode_SIMD_IntD) == 0xf26f
4279 || (i.tm.base_opcode & ~4) == 0x66db
4280 || (i.tm.base_opcode & ~4) == 0x66eb)
4281 && i.tm.extension_opcode == None)
4282 {
4283 /* Optimize: -O1:
4284 VOP, one of vmovdqa32, vmovdqa64, vmovdqu8, vmovdqu16,
4285 vmovdqu32 and vmovdqu64:
4286 EVEX VOP %xmmM, %xmmN
4287 -> VEX vmovdqa|vmovdqu %xmmM, %xmmN (M and N < 16)
4288 EVEX VOP %ymmM, %ymmN
4289 -> VEX vmovdqa|vmovdqu %ymmM, %ymmN (M and N < 16)
4290 EVEX VOP %xmmM, mem
4291 -> VEX vmovdqa|vmovdqu %xmmM, mem (M < 16)
4292 EVEX VOP %ymmM, mem
4293 -> VEX vmovdqa|vmovdqu %ymmM, mem (M < 16)
4294 EVEX VOP mem, %xmmN
4295 -> VEX mvmovdqa|vmovdquem, %xmmN (N < 16)
4296 EVEX VOP mem, %ymmN
4297 -> VEX vmovdqa|vmovdqu mem, %ymmN (N < 16)
4298 VOP, one of vpand, vpandn, vpor, vpxor:
4299 EVEX VOP{d,q} %xmmL, %xmmM, %xmmN
4300 -> VEX VOP %xmmL, %xmmM, %xmmN (L, M, and N < 16)
4301 EVEX VOP{d,q} %ymmL, %ymmM, %ymmN
4302 -> VEX VOP %ymmL, %ymmM, %ymmN (L, M, and N < 16)
4303 EVEX VOP{d,q} mem, %xmmM, %xmmN
4304 -> VEX VOP mem, %xmmM, %xmmN (M and N < 16)
4305 EVEX VOP{d,q} mem, %ymmM, %ymmN
4306 -> VEX VOP mem, %ymmM, %ymmN (M and N < 16)
4307 */
4308 for (j = 0; j < i.operands; j++)
4309 if (operand_type_check (i.types[j], disp)
4310 && i.op[j].disps->X_op == O_constant)
4311 {
4312 /* Since the VEX prefix has 2 or 3 bytes, the EVEX prefix
4313 has 4 bytes, EVEX Disp8 has 1 byte and VEX Disp32 has 4
4314 bytes, we choose EVEX Disp8 over VEX Disp32. */
4315 int evex_disp8, vex_disp8;
4316 unsigned int memshift = i.memshift;
4317 offsetT n = i.op[j].disps->X_add_number;
4318
4319 evex_disp8 = fits_in_disp8 (n);
4320 i.memshift = 0;
4321 vex_disp8 = fits_in_disp8 (n);
4322 if (evex_disp8 != vex_disp8)
4323 {
4324 i.memshift = memshift;
4325 return;
4326 }
4327
4328 i.types[j].bitfield.disp8 = vex_disp8;
4329 break;
4330 }
4331 if ((i.tm.base_opcode & ~Opcode_SIMD_IntD) == 0xf26f)
4332 i.tm.base_opcode ^= 0xf36f ^ 0xf26f;
4333 i.tm.opcode_modifier.vex
4334 = i.types[0].bitfield.ymmword ? VEX256 : VEX128;
4335 i.tm.opcode_modifier.vexw = VEXW0;
4336 /* VPAND, VPOR, and VPXOR are commutative. */
4337 if (i.reg_operands == 3 && i.tm.base_opcode != 0x66df)
4338 i.tm.opcode_modifier.commutative = 1;
4339 i.tm.opcode_modifier.evex = 0;
4340 i.tm.opcode_modifier.masking = 0;
4341 i.tm.opcode_modifier.broadcast = 0;
4342 i.tm.opcode_modifier.disp8memshift = 0;
4343 i.memshift = 0;
4344 if (j < i.operands)
4345 i.types[j].bitfield.disp8
4346 = fits_in_disp8 (i.op[j].disps->X_add_number);
4347 }
4348 }
4349
4350 /* Return non-zero for load instruction. */
4351
4352 static int
4353 load_insn_p (void)
4354 {
4355 unsigned int dest;
4356 int any_vex_p = is_any_vex_encoding (&i.tm);
4357 unsigned int base_opcode = i.tm.base_opcode | 1;
4358
4359 if (!any_vex_p)
4360 {
4361 /* Anysize insns: lea, invlpg, clflush, prefetchnta, prefetcht0,
4362 prefetcht1, prefetcht2, prefetchtw, bndmk, bndcl, bndcu, bndcn,
4363 bndstx, bndldx, prefetchwt1, clflushopt, clwb, cldemote. */
4364 if (i.tm.opcode_modifier.anysize)
4365 return 0;
4366
4367 /* pop, popf, popa. */
4368 if (strcmp (i.tm.name, "pop") == 0
4369 || i.tm.base_opcode == 0x9d
4370 || i.tm.base_opcode == 0x61)
4371 return 1;
4372
4373 /* movs, cmps, lods, scas. */
4374 if ((i.tm.base_opcode | 0xb) == 0xaf)
4375 return 1;
4376
4377 /* outs, xlatb. */
4378 if (base_opcode == 0x6f
4379 || i.tm.base_opcode == 0xd7)
4380 return 1;
4381 /* NB: For AMD-specific insns with implicit memory operands,
4382 they're intentionally not covered. */
4383 }
4384
4385 /* No memory operand. */
4386 if (!i.mem_operands)
4387 return 0;
4388
4389 if (any_vex_p)
4390 {
4391 /* vldmxcsr. */
4392 if (i.tm.base_opcode == 0xae
4393 && i.tm.opcode_modifier.vex
4394 && i.tm.opcode_modifier.vexopcode == VEX0F
4395 && i.tm.extension_opcode == 2)
4396 return 1;
4397 }
4398 else
4399 {
4400 /* test, not, neg, mul, imul, div, idiv. */
4401 if ((i.tm.base_opcode == 0xf6 || i.tm.base_opcode == 0xf7)
4402 && i.tm.extension_opcode != 1)
4403 return 1;
4404
4405 /* inc, dec. */
4406 if (base_opcode == 0xff && i.tm.extension_opcode <= 1)
4407 return 1;
4408
4409 /* add, or, adc, sbb, and, sub, xor, cmp. */
4410 if (i.tm.base_opcode >= 0x80 && i.tm.base_opcode <= 0x83)
4411 return 1;
4412
4413 /* bt, bts, btr, btc. */
4414 if (i.tm.base_opcode == 0xfba
4415 && (i.tm.extension_opcode >= 4 && i.tm.extension_opcode <= 7))
4416 return 1;
4417
4418 /* rol, ror, rcl, rcr, shl/sal, shr, sar. */
4419 if ((base_opcode == 0xc1
4420 || (i.tm.base_opcode >= 0xd0 && i.tm.base_opcode <= 0xd3))
4421 && i.tm.extension_opcode != 6)
4422 return 1;
4423
4424 /* cmpxchg8b, cmpxchg16b, xrstors. */
4425 if (i.tm.base_opcode == 0xfc7
4426 && (i.tm.extension_opcode == 1 || i.tm.extension_opcode == 3))
4427 return 1;
4428
4429 /* fxrstor, ldmxcsr, xrstor. */
4430 if (i.tm.base_opcode == 0xfae
4431 && (i.tm.extension_opcode == 1
4432 || i.tm.extension_opcode == 2
4433 || i.tm.extension_opcode == 5))
4434 return 1;
4435
4436 /* lgdt, lidt, lmsw. */
4437 if (i.tm.base_opcode == 0xf01
4438 && (i.tm.extension_opcode == 2
4439 || i.tm.extension_opcode == 3
4440 || i.tm.extension_opcode == 6))
4441 return 1;
4442
4443 /* vmptrld */
4444 if (i.tm.base_opcode == 0xfc7
4445 && i.tm.extension_opcode == 6)
4446 return 1;
4447
4448 /* Check for x87 instructions. */
4449 if (i.tm.base_opcode >= 0xd8 && i.tm.base_opcode <= 0xdf)
4450 {
4451 /* Skip fst, fstp, fstenv, fstcw. */
4452 if (i.tm.base_opcode == 0xd9
4453 && (i.tm.extension_opcode == 2
4454 || i.tm.extension_opcode == 3
4455 || i.tm.extension_opcode == 6
4456 || i.tm.extension_opcode == 7))
4457 return 0;
4458
4459 /* Skip fisttp, fist, fistp, fstp. */
4460 if (i.tm.base_opcode == 0xdb
4461 && (i.tm.extension_opcode == 1
4462 || i.tm.extension_opcode == 2
4463 || i.tm.extension_opcode == 3
4464 || i.tm.extension_opcode == 7))
4465 return 0;
4466
4467 /* Skip fisttp, fst, fstp, fsave, fstsw. */
4468 if (i.tm.base_opcode == 0xdd
4469 && (i.tm.extension_opcode == 1
4470 || i.tm.extension_opcode == 2
4471 || i.tm.extension_opcode == 3
4472 || i.tm.extension_opcode == 6
4473 || i.tm.extension_opcode == 7))
4474 return 0;
4475
4476 /* Skip fisttp, fist, fistp, fbstp, fistp. */
4477 if (i.tm.base_opcode == 0xdf
4478 && (i.tm.extension_opcode == 1
4479 || i.tm.extension_opcode == 2
4480 || i.tm.extension_opcode == 3
4481 || i.tm.extension_opcode == 6
4482 || i.tm.extension_opcode == 7))
4483 return 0;
4484
4485 return 1;
4486 }
4487 }
4488
4489 dest = i.operands - 1;
4490
4491 /* Check fake imm8 operand and 3 source operands. */
4492 if ((i.tm.opcode_modifier.immext
4493 || i.tm.opcode_modifier.vexsources == VEX3SOURCES)
4494 && i.types[dest].bitfield.imm8)
4495 dest--;
4496
4497 /* add, or, adc, sbb, and, sub, xor, cmp, test, xchg, xadd */
4498 if (!any_vex_p
4499 && (base_opcode == 0x1
4500 || base_opcode == 0x9
4501 || base_opcode == 0x11
4502 || base_opcode == 0x19
4503 || base_opcode == 0x21
4504 || base_opcode == 0x29
4505 || base_opcode == 0x31
4506 || base_opcode == 0x39
4507 || (i.tm.base_opcode >= 0x84 && i.tm.base_opcode <= 0x87)
4508 || base_opcode == 0xfc1))
4509 return 1;
4510
4511 /* Check for load instruction. */
4512 return (i.types[dest].bitfield.class != ClassNone
4513 || i.types[dest].bitfield.instance == Accum);
4514 }
4515
4516 /* Output lfence, 0xfaee8, after instruction. */
4517
4518 static void
4519 insert_lfence_after (void)
4520 {
4521 if (lfence_after_load && load_insn_p ())
4522 {
4523 /* There are also two REP string instructions that require
4524 special treatment. Specifically, the compare string (CMPS)
4525 and scan string (SCAS) instructions set EFLAGS in a manner
4526 that depends on the data being compared/scanned. When used
4527 with a REP prefix, the number of iterations may therefore
4528 vary depending on this data. If the data is a program secret
4529 chosen by the adversary using an LVI method,
4530 then this data-dependent behavior may leak some aspect
4531 of the secret. */
4532 if (((i.tm.base_opcode | 0x1) == 0xa7
4533 || (i.tm.base_opcode | 0x1) == 0xaf)
4534 && i.prefix[REP_PREFIX])
4535 {
4536 as_warn (_("`%s` changes flags which would affect control flow behavior"),
4537 i.tm.name);
4538 }
4539 char *p = frag_more (3);
4540 *p++ = 0xf;
4541 *p++ = 0xae;
4542 *p = 0xe8;
4543 }
4544 }
4545
4546 /* Output lfence, 0xfaee8, before instruction. */
4547
4548 static void
4549 insert_lfence_before (void)
4550 {
4551 char *p;
4552
4553 if (is_any_vex_encoding (&i.tm))
4554 return;
4555
4556 if (i.tm.base_opcode == 0xff
4557 && (i.tm.extension_opcode == 2 || i.tm.extension_opcode == 4))
4558 {
4559 /* Insert lfence before indirect branch if needed. */
4560
4561 if (lfence_before_indirect_branch == lfence_branch_none)
4562 return;
4563
4564 if (i.operands != 1)
4565 abort ();
4566
4567 if (i.reg_operands == 1)
4568 {
4569 /* Indirect branch via register. Don't insert lfence with
4570 -mlfence-after-load=yes. */
4571 if (lfence_after_load
4572 || lfence_before_indirect_branch == lfence_branch_memory)
4573 return;
4574 }
4575 else if (i.mem_operands == 1
4576 && lfence_before_indirect_branch != lfence_branch_register)
4577 {
4578 as_warn (_("indirect `%s` with memory operand should be avoided"),
4579 i.tm.name);
4580 return;
4581 }
4582 else
4583 return;
4584
4585 if (last_insn.kind != last_insn_other
4586 && last_insn.seg == now_seg)
4587 {
4588 as_warn_where (last_insn.file, last_insn.line,
4589 _("`%s` skips -mlfence-before-indirect-branch on `%s`"),
4590 last_insn.name, i.tm.name);
4591 return;
4592 }
4593
4594 p = frag_more (3);
4595 *p++ = 0xf;
4596 *p++ = 0xae;
4597 *p = 0xe8;
4598 return;
4599 }
4600
4601 /* Output or/not/shl and lfence before near ret. */
4602 if (lfence_before_ret != lfence_before_ret_none
4603 && (i.tm.base_opcode == 0xc2
4604 || i.tm.base_opcode == 0xc3))
4605 {
4606 if (last_insn.kind != last_insn_other
4607 && last_insn.seg == now_seg)
4608 {
4609 as_warn_where (last_insn.file, last_insn.line,
4610 _("`%s` skips -mlfence-before-ret on `%s`"),
4611 last_insn.name, i.tm.name);
4612 return;
4613 }
4614
4615 /* Near ret ingore operand size override under CPU64. */
4616 char prefix = flag_code == CODE_64BIT
4617 ? 0x48
4618 : i.prefix[DATA_PREFIX] ? 0x66 : 0x0;
4619
4620 if (lfence_before_ret == lfence_before_ret_not)
4621 {
4622 /* not: 0xf71424, may add prefix
4623 for operand size override or 64-bit code. */
4624 p = frag_more ((prefix ? 2 : 0) + 6 + 3);
4625 if (prefix)
4626 *p++ = prefix;
4627 *p++ = 0xf7;
4628 *p++ = 0x14;
4629 *p++ = 0x24;
4630 if (prefix)
4631 *p++ = prefix;
4632 *p++ = 0xf7;
4633 *p++ = 0x14;
4634 *p++ = 0x24;
4635 }
4636 else
4637 {
4638 p = frag_more ((prefix ? 1 : 0) + 4 + 3);
4639 if (prefix)
4640 *p++ = prefix;
4641 if (lfence_before_ret == lfence_before_ret_or)
4642 {
4643 /* or: 0x830c2400, may add prefix
4644 for operand size override or 64-bit code. */
4645 *p++ = 0x83;
4646 *p++ = 0x0c;
4647 }
4648 else
4649 {
4650 /* shl: 0xc1242400, may add prefix
4651 for operand size override or 64-bit code. */
4652 *p++ = 0xc1;
4653 *p++ = 0x24;
4654 }
4655
4656 *p++ = 0x24;
4657 *p++ = 0x0;
4658 }
4659
4660 *p++ = 0xf;
4661 *p++ = 0xae;
4662 *p = 0xe8;
4663 }
4664 }
4665
4666 /* This is the guts of the machine-dependent assembler. LINE points to a
4667 machine dependent instruction. This function is supposed to emit
4668 the frags/bytes it assembles to. */
4669
4670 void
4671 md_assemble (char *line)
4672 {
4673 unsigned int j;
4674 char mnemonic[MAX_MNEM_SIZE], mnem_suffix;
4675 const insn_template *t;
4676
4677 /* Initialize globals. */
4678 memset (&i, '\0', sizeof (i));
4679 for (j = 0; j < MAX_OPERANDS; j++)
4680 i.reloc[j] = NO_RELOC;
4681 memset (disp_expressions, '\0', sizeof (disp_expressions));
4682 memset (im_expressions, '\0', sizeof (im_expressions));
4683 save_stack_p = save_stack;
4684
4685 /* First parse an instruction mnemonic & call i386_operand for the operands.
4686 We assume that the scrubber has arranged it so that line[0] is the valid
4687 start of a (possibly prefixed) mnemonic. */
4688
4689 line = parse_insn (line, mnemonic);
4690 if (line == NULL)
4691 return;
4692 mnem_suffix = i.suffix;
4693
4694 line = parse_operands (line, mnemonic);
4695 this_operand = -1;
4696 xfree (i.memop1_string);
4697 i.memop1_string = NULL;
4698 if (line == NULL)
4699 return;
4700
4701 /* Now we've parsed the mnemonic into a set of templates, and have the
4702 operands at hand. */
4703
4704 /* All Intel opcodes have reversed operands except for "bound", "enter",
4705 "monitor*", "mwait*", "tpause", and "umwait". We also don't reverse
4706 intersegment "jmp" and "call" instructions with 2 immediate operands so
4707 that the immediate segment precedes the offset, as it does when in AT&T
4708 mode. */
4709 if (intel_syntax
4710 && i.operands > 1
4711 && (strcmp (mnemonic, "bound") != 0)
4712 && (strcmp (mnemonic, "invlpga") != 0)
4713 && (strncmp (mnemonic, "monitor", 7) != 0)
4714 && (strncmp (mnemonic, "mwait", 5) != 0)
4715 && (strcmp (mnemonic, "tpause") != 0)
4716 && (strcmp (mnemonic, "umwait") != 0)
4717 && !(operand_type_check (i.types[0], imm)
4718 && operand_type_check (i.types[1], imm)))
4719 swap_operands ();
4720
4721 /* The order of the immediates should be reversed
4722 for 2 immediates extrq and insertq instructions */
4723 if (i.imm_operands == 2
4724 && (strcmp (mnemonic, "extrq") == 0
4725 || strcmp (mnemonic, "insertq") == 0))
4726 swap_2_operands (0, 1);
4727
4728 if (i.imm_operands)
4729 optimize_imm ();
4730
4731 /* Don't optimize displacement for movabs since it only takes 64bit
4732 displacement. */
4733 if (i.disp_operands
4734 && i.disp_encoding != disp_encoding_32bit
4735 && (flag_code != CODE_64BIT
4736 || strcmp (mnemonic, "movabs") != 0))
4737 optimize_disp ();
4738
4739 /* Next, we find a template that matches the given insn,
4740 making sure the overlap of the given operands types is consistent
4741 with the template operand types. */
4742
4743 if (!(t = match_template (mnem_suffix)))
4744 return;
4745
4746 if (sse_check != check_none
4747 && !i.tm.opcode_modifier.noavx
4748 && !i.tm.cpu_flags.bitfield.cpuavx
4749 && !i.tm.cpu_flags.bitfield.cpuavx512f
4750 && (i.tm.cpu_flags.bitfield.cpusse
4751 || i.tm.cpu_flags.bitfield.cpusse2
4752 || i.tm.cpu_flags.bitfield.cpusse3
4753 || i.tm.cpu_flags.bitfield.cpussse3
4754 || i.tm.cpu_flags.bitfield.cpusse4_1
4755 || i.tm.cpu_flags.bitfield.cpusse4_2
4756 || i.tm.cpu_flags.bitfield.cpupclmul
4757 || i.tm.cpu_flags.bitfield.cpuaes
4758 || i.tm.cpu_flags.bitfield.cpusha
4759 || i.tm.cpu_flags.bitfield.cpugfni))
4760 {
4761 (sse_check == check_warning
4762 ? as_warn
4763 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
4764 }
4765
4766 if (i.tm.opcode_modifier.fwait)
4767 if (!add_prefix (FWAIT_OPCODE))
4768 return;
4769
4770 /* Check if REP prefix is OK. */
4771 if (i.rep_prefix && !i.tm.opcode_modifier.repprefixok)
4772 {
4773 as_bad (_("invalid instruction `%s' after `%s'"),
4774 i.tm.name, i.rep_prefix);
4775 return;
4776 }
4777
4778 /* Check for lock without a lockable instruction. Destination operand
4779 must be memory unless it is xchg (0x86). */
4780 if (i.prefix[LOCK_PREFIX]
4781 && (!i.tm.opcode_modifier.islockable
4782 || i.mem_operands == 0
4783 || (i.tm.base_opcode != 0x86
4784 && !(i.flags[i.operands - 1] & Operand_Mem))))
4785 {
4786 as_bad (_("expecting lockable instruction after `lock'"));
4787 return;
4788 }
4789
4790 /* Check for data size prefix on VEX/XOP/EVEX encoded and SIMD insns. */
4791 if (i.prefix[DATA_PREFIX]
4792 && (is_any_vex_encoding (&i.tm)
4793 || i.tm.operand_types[i.imm_operands].bitfield.class >= RegMMX
4794 || i.tm.operand_types[i.imm_operands + 1].bitfield.class >= RegMMX))
4795 {
4796 as_bad (_("data size prefix invalid with `%s'"), i.tm.name);
4797 return;
4798 }
4799
4800 /* Check if HLE prefix is OK. */
4801 if (i.hle_prefix && !check_hle ())
4802 return;
4803
4804 /* Check BND prefix. */
4805 if (i.bnd_prefix && !i.tm.opcode_modifier.bndprefixok)
4806 as_bad (_("expecting valid branch instruction after `bnd'"));
4807
4808 /* Check NOTRACK prefix. */
4809 if (i.notrack_prefix && !i.tm.opcode_modifier.notrackprefixok)
4810 as_bad (_("expecting indirect branch instruction after `notrack'"));
4811
4812 if (i.tm.cpu_flags.bitfield.cpumpx)
4813 {
4814 if (flag_code == CODE_64BIT && i.prefix[ADDR_PREFIX])
4815 as_bad (_("32-bit address isn't allowed in 64-bit MPX instructions."));
4816 else if (flag_code != CODE_16BIT
4817 ? i.prefix[ADDR_PREFIX]
4818 : i.mem_operands && !i.prefix[ADDR_PREFIX])
4819 as_bad (_("16-bit address isn't allowed in MPX instructions"));
4820 }
4821
4822 /* Insert BND prefix. */
4823 if (add_bnd_prefix && i.tm.opcode_modifier.bndprefixok)
4824 {
4825 if (!i.prefix[BND_PREFIX])
4826 add_prefix (BND_PREFIX_OPCODE);
4827 else if (i.prefix[BND_PREFIX] != BND_PREFIX_OPCODE)
4828 {
4829 as_warn (_("replacing `rep'/`repe' prefix by `bnd'"));
4830 i.prefix[BND_PREFIX] = BND_PREFIX_OPCODE;
4831 }
4832 }
4833
4834 /* Check string instruction segment overrides. */
4835 if (i.tm.opcode_modifier.isstring >= IS_STRING_ES_OP0)
4836 {
4837 gas_assert (i.mem_operands);
4838 if (!check_string ())
4839 return;
4840 i.disp_operands = 0;
4841 }
4842
4843 if (optimize && !i.no_optimize && i.tm.opcode_modifier.optimize)
4844 optimize_encoding ();
4845
4846 if (!process_suffix ())
4847 return;
4848
4849 /* Update operand types. */
4850 for (j = 0; j < i.operands; j++)
4851 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
4852
4853 /* Make still unresolved immediate matches conform to size of immediate
4854 given in i.suffix. */
4855 if (!finalize_imm ())
4856 return;
4857
4858 if (i.types[0].bitfield.imm1)
4859 i.imm_operands = 0; /* kludge for shift insns. */
4860
4861 /* We only need to check those implicit registers for instructions
4862 with 3 operands or less. */
4863 if (i.operands <= 3)
4864 for (j = 0; j < i.operands; j++)
4865 if (i.types[j].bitfield.instance != InstanceNone
4866 && !i.types[j].bitfield.xmmword)
4867 i.reg_operands--;
4868
4869 /* For insns with operands there are more diddles to do to the opcode. */
4870 if (i.operands)
4871 {
4872 if (!process_operands ())
4873 return;
4874 }
4875 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
4876 {
4877 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
4878 as_warn (_("translating to `%sp'"), i.tm.name);
4879 }
4880
4881 if (is_any_vex_encoding (&i.tm))
4882 {
4883 if (!cpu_arch_flags.bitfield.cpui286)
4884 {
4885 as_bad (_("instruction `%s' isn't supported outside of protected mode."),
4886 i.tm.name);
4887 return;
4888 }
4889
4890 /* Check for explicit REX prefix. */
4891 if (i.prefix[REX_PREFIX] || i.rex_encoding)
4892 {
4893 as_bad (_("REX prefix invalid with `%s'"), i.tm.name);
4894 return;
4895 }
4896
4897 if (i.tm.opcode_modifier.vex)
4898 build_vex_prefix (t);
4899 else
4900 build_evex_prefix ();
4901
4902 /* The individual REX.RXBW bits got consumed. */
4903 i.rex &= REX_OPCODE;
4904 }
4905
4906 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
4907 instructions may define INT_OPCODE as well, so avoid this corner
4908 case for those instructions that use MODRM. */
4909 if (i.tm.base_opcode == INT_OPCODE
4910 && !i.tm.opcode_modifier.modrm
4911 && i.op[0].imms->X_add_number == 3)
4912 {
4913 i.tm.base_opcode = INT3_OPCODE;
4914 i.imm_operands = 0;
4915 }
4916
4917 if ((i.tm.opcode_modifier.jump == JUMP
4918 || i.tm.opcode_modifier.jump == JUMP_BYTE
4919 || i.tm.opcode_modifier.jump == JUMP_DWORD)
4920 && i.op[0].disps->X_op == O_constant)
4921 {
4922 /* Convert "jmp constant" (and "call constant") to a jump (call) to
4923 the absolute address given by the constant. Since ix86 jumps and
4924 calls are pc relative, we need to generate a reloc. */
4925 i.op[0].disps->X_add_symbol = &abs_symbol;
4926 i.op[0].disps->X_op = O_symbol;
4927 }
4928
4929 /* For 8 bit registers we need an empty rex prefix. Also if the
4930 instruction already has a prefix, we need to convert old
4931 registers to new ones. */
4932
4933 if ((i.types[0].bitfield.class == Reg && i.types[0].bitfield.byte
4934 && (i.op[0].regs->reg_flags & RegRex64) != 0)
4935 || (i.types[1].bitfield.class == Reg && i.types[1].bitfield.byte
4936 && (i.op[1].regs->reg_flags & RegRex64) != 0)
4937 || (((i.types[0].bitfield.class == Reg && i.types[0].bitfield.byte)
4938 || (i.types[1].bitfield.class == Reg && i.types[1].bitfield.byte))
4939 && i.rex != 0))
4940 {
4941 int x;
4942
4943 i.rex |= REX_OPCODE;
4944 for (x = 0; x < 2; x++)
4945 {
4946 /* Look for 8 bit operand that uses old registers. */
4947 if (i.types[x].bitfield.class == Reg && i.types[x].bitfield.byte
4948 && (i.op[x].regs->reg_flags & RegRex64) == 0)
4949 {
4950 gas_assert (!(i.op[x].regs->reg_flags & RegRex));
4951 /* In case it is "hi" register, give up. */
4952 if (i.op[x].regs->reg_num > 3)
4953 as_bad (_("can't encode register '%s%s' in an "
4954 "instruction requiring REX prefix."),
4955 register_prefix, i.op[x].regs->reg_name);
4956
4957 /* Otherwise it is equivalent to the extended register.
4958 Since the encoding doesn't change this is merely
4959 cosmetic cleanup for debug output. */
4960
4961 i.op[x].regs = i.op[x].regs + 8;
4962 }
4963 }
4964 }
4965
4966 if (i.rex == 0 && i.rex_encoding)
4967 {
4968 /* Check if we can add a REX_OPCODE byte. Look for 8 bit operand
4969 that uses legacy register. If it is "hi" register, don't add
4970 the REX_OPCODE byte. */
4971 int x;
4972 for (x = 0; x < 2; x++)
4973 if (i.types[x].bitfield.class == Reg
4974 && i.types[x].bitfield.byte
4975 && (i.op[x].regs->reg_flags & RegRex64) == 0
4976 && i.op[x].regs->reg_num > 3)
4977 {
4978 gas_assert (!(i.op[x].regs->reg_flags & RegRex));
4979 i.rex_encoding = FALSE;
4980 break;
4981 }
4982
4983 if (i.rex_encoding)
4984 i.rex = REX_OPCODE;
4985 }
4986
4987 if (i.rex != 0)
4988 add_prefix (REX_OPCODE | i.rex);
4989
4990 insert_lfence_before ();
4991
4992 /* We are ready to output the insn. */
4993 output_insn ();
4994
4995 insert_lfence_after ();
4996
4997 last_insn.seg = now_seg;
4998
4999 if (i.tm.opcode_modifier.isprefix)
5000 {
5001 last_insn.kind = last_insn_prefix;
5002 last_insn.name = i.tm.name;
5003 last_insn.file = as_where (&last_insn.line);
5004 }
5005 else
5006 last_insn.kind = last_insn_other;
5007 }
5008
5009 static char *
5010 parse_insn (char *line, char *mnemonic)
5011 {
5012 char *l = line;
5013 char *token_start = l;
5014 char *mnem_p;
5015 int supported;
5016 const insn_template *t;
5017 char *dot_p = NULL;
5018
5019 while (1)
5020 {
5021 mnem_p = mnemonic;
5022 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
5023 {
5024 if (*mnem_p == '.')
5025 dot_p = mnem_p;
5026 mnem_p++;
5027 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
5028 {
5029 as_bad (_("no such instruction: `%s'"), token_start);
5030 return NULL;
5031 }
5032 l++;
5033 }
5034 if (!is_space_char (*l)
5035 && *l != END_OF_INSN
5036 && (intel_syntax
5037 || (*l != PREFIX_SEPARATOR
5038 && *l != ',')))
5039 {
5040 as_bad (_("invalid character %s in mnemonic"),
5041 output_invalid (*l));
5042 return NULL;
5043 }
5044 if (token_start == l)
5045 {
5046 if (!intel_syntax && *l == PREFIX_SEPARATOR)
5047 as_bad (_("expecting prefix; got nothing"));
5048 else
5049 as_bad (_("expecting mnemonic; got nothing"));
5050 return NULL;
5051 }
5052
5053 /* Look up instruction (or prefix) via hash table. */
5054 current_templates = (const templates *) hash_find (op_hash, mnemonic);
5055
5056 if (*l != END_OF_INSN
5057 && (!is_space_char (*l) || l[1] != END_OF_INSN)
5058 && current_templates
5059 && current_templates->start->opcode_modifier.isprefix)
5060 {
5061 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
5062 {
5063 as_bad ((flag_code != CODE_64BIT
5064 ? _("`%s' is only supported in 64-bit mode")
5065 : _("`%s' is not supported in 64-bit mode")),
5066 current_templates->start->name);
5067 return NULL;
5068 }
5069 /* If we are in 16-bit mode, do not allow addr16 or data16.
5070 Similarly, in 32-bit mode, do not allow addr32 or data32. */
5071 if ((current_templates->start->opcode_modifier.size == SIZE16
5072 || current_templates->start->opcode_modifier.size == SIZE32)
5073 && flag_code != CODE_64BIT
5074 && ((current_templates->start->opcode_modifier.size == SIZE32)
5075 ^ (flag_code == CODE_16BIT)))
5076 {
5077 as_bad (_("redundant %s prefix"),
5078 current_templates->start->name);
5079 return NULL;
5080 }
5081 if (current_templates->start->opcode_length == 0)
5082 {
5083 /* Handle pseudo prefixes. */
5084 switch (current_templates->start->base_opcode)
5085 {
5086 case 0x0:
5087 /* {disp8} */
5088 i.disp_encoding = disp_encoding_8bit;
5089 break;
5090 case 0x1:
5091 /* {disp32} */
5092 i.disp_encoding = disp_encoding_32bit;
5093 break;
5094 case 0x2:
5095 /* {load} */
5096 i.dir_encoding = dir_encoding_load;
5097 break;
5098 case 0x3:
5099 /* {store} */
5100 i.dir_encoding = dir_encoding_store;
5101 break;
5102 case 0x4:
5103 /* {vex} */
5104 i.vec_encoding = vex_encoding_vex;
5105 break;
5106 case 0x5:
5107 /* {vex3} */
5108 i.vec_encoding = vex_encoding_vex3;
5109 break;
5110 case 0x6:
5111 /* {evex} */
5112 i.vec_encoding = vex_encoding_evex;
5113 break;
5114 case 0x7:
5115 /* {rex} */
5116 i.rex_encoding = TRUE;
5117 break;
5118 case 0x8:
5119 /* {nooptimize} */
5120 i.no_optimize = TRUE;
5121 break;
5122 default:
5123 abort ();
5124 }
5125 }
5126 else
5127 {
5128 /* Add prefix, checking for repeated prefixes. */
5129 switch (add_prefix (current_templates->start->base_opcode))
5130 {
5131 case PREFIX_EXIST:
5132 return NULL;
5133 case PREFIX_DS:
5134 if (current_templates->start->cpu_flags.bitfield.cpuibt)
5135 i.notrack_prefix = current_templates->start->name;
5136 break;
5137 case PREFIX_REP:
5138 if (current_templates->start->cpu_flags.bitfield.cpuhle)
5139 i.hle_prefix = current_templates->start->name;
5140 else if (current_templates->start->cpu_flags.bitfield.cpumpx)
5141 i.bnd_prefix = current_templates->start->name;
5142 else
5143 i.rep_prefix = current_templates->start->name;
5144 break;
5145 default:
5146 break;
5147 }
5148 }
5149 /* Skip past PREFIX_SEPARATOR and reset token_start. */
5150 token_start = ++l;
5151 }
5152 else
5153 break;
5154 }
5155
5156 if (!current_templates)
5157 {
5158 /* Deprecated functionality (new code should use pseudo-prefixes instead):
5159 Check if we should swap operand or force 32bit displacement in
5160 encoding. */
5161 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
5162 i.dir_encoding = dir_encoding_swap;
5163 else if (mnem_p - 3 == dot_p
5164 && dot_p[1] == 'd'
5165 && dot_p[2] == '8')
5166 i.disp_encoding = disp_encoding_8bit;
5167 else if (mnem_p - 4 == dot_p
5168 && dot_p[1] == 'd'
5169 && dot_p[2] == '3'
5170 && dot_p[3] == '2')
5171 i.disp_encoding = disp_encoding_32bit;
5172 else
5173 goto check_suffix;
5174 mnem_p = dot_p;
5175 *dot_p = '\0';
5176 current_templates = (const templates *) hash_find (op_hash, mnemonic);
5177 }
5178
5179 if (!current_templates)
5180 {
5181 check_suffix:
5182 if (mnem_p > mnemonic)
5183 {
5184 /* See if we can get a match by trimming off a suffix. */
5185 switch (mnem_p[-1])
5186 {
5187 case WORD_MNEM_SUFFIX:
5188 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
5189 i.suffix = SHORT_MNEM_SUFFIX;
5190 else
5191 /* Fall through. */
5192 case BYTE_MNEM_SUFFIX:
5193 case QWORD_MNEM_SUFFIX:
5194 i.suffix = mnem_p[-1];
5195 mnem_p[-1] = '\0';
5196 current_templates = (const templates *) hash_find (op_hash,
5197 mnemonic);
5198 break;
5199 case SHORT_MNEM_SUFFIX:
5200 case LONG_MNEM_SUFFIX:
5201 if (!intel_syntax)
5202 {
5203 i.suffix = mnem_p[-1];
5204 mnem_p[-1] = '\0';
5205 current_templates = (const templates *) hash_find (op_hash,
5206 mnemonic);
5207 }
5208 break;
5209
5210 /* Intel Syntax. */
5211 case 'd':
5212 if (intel_syntax)
5213 {
5214 if (intel_float_operand (mnemonic) == 1)
5215 i.suffix = SHORT_MNEM_SUFFIX;
5216 else
5217 i.suffix = LONG_MNEM_SUFFIX;
5218 mnem_p[-1] = '\0';
5219 current_templates = (const templates *) hash_find (op_hash,
5220 mnemonic);
5221 }
5222 break;
5223 }
5224 }
5225
5226 if (!current_templates)
5227 {
5228 as_bad (_("no such instruction: `%s'"), token_start);
5229 return NULL;
5230 }
5231 }
5232
5233 if (current_templates->start->opcode_modifier.jump == JUMP
5234 || current_templates->start->opcode_modifier.jump == JUMP_BYTE)
5235 {
5236 /* Check for a branch hint. We allow ",pt" and ",pn" for
5237 predict taken and predict not taken respectively.
5238 I'm not sure that branch hints actually do anything on loop
5239 and jcxz insns (JumpByte) for current Pentium4 chips. They
5240 may work in the future and it doesn't hurt to accept them
5241 now. */
5242 if (l[0] == ',' && l[1] == 'p')
5243 {
5244 if (l[2] == 't')
5245 {
5246 if (!add_prefix (DS_PREFIX_OPCODE))
5247 return NULL;
5248 l += 3;
5249 }
5250 else if (l[2] == 'n')
5251 {
5252 if (!add_prefix (CS_PREFIX_OPCODE))
5253 return NULL;
5254 l += 3;
5255 }
5256 }
5257 }
5258 /* Any other comma loses. */
5259 if (*l == ',')
5260 {
5261 as_bad (_("invalid character %s in mnemonic"),
5262 output_invalid (*l));
5263 return NULL;
5264 }
5265
5266 /* Check if instruction is supported on specified architecture. */
5267 supported = 0;
5268 for (t = current_templates->start; t < current_templates->end; ++t)
5269 {
5270 supported |= cpu_flags_match (t);
5271 if (supported == CPU_FLAGS_PERFECT_MATCH)
5272 {
5273 if (!cpu_arch_flags.bitfield.cpui386 && (flag_code != CODE_16BIT))
5274 as_warn (_("use .code16 to ensure correct addressing mode"));
5275
5276 return l;
5277 }
5278 }
5279
5280 if (!(supported & CPU_FLAGS_64BIT_MATCH))
5281 as_bad (flag_code == CODE_64BIT
5282 ? _("`%s' is not supported in 64-bit mode")
5283 : _("`%s' is only supported in 64-bit mode"),
5284 current_templates->start->name);
5285 else
5286 as_bad (_("`%s' is not supported on `%s%s'"),
5287 current_templates->start->name,
5288 cpu_arch_name ? cpu_arch_name : default_arch,
5289 cpu_sub_arch_name ? cpu_sub_arch_name : "");
5290
5291 return NULL;
5292 }
5293
5294 static char *
5295 parse_operands (char *l, const char *mnemonic)
5296 {
5297 char *token_start;
5298
5299 /* 1 if operand is pending after ','. */
5300 unsigned int expecting_operand = 0;
5301
5302 /* Non-zero if operand parens not balanced. */
5303 unsigned int paren_not_balanced;
5304
5305 while (*l != END_OF_INSN)
5306 {
5307 /* Skip optional white space before operand. */
5308 if (is_space_char (*l))
5309 ++l;
5310 if (!is_operand_char (*l) && *l != END_OF_INSN && *l != '"')
5311 {
5312 as_bad (_("invalid character %s before operand %d"),
5313 output_invalid (*l),
5314 i.operands + 1);
5315 return NULL;
5316 }
5317 token_start = l; /* After white space. */
5318 paren_not_balanced = 0;
5319 while (paren_not_balanced || *l != ',')
5320 {
5321 if (*l == END_OF_INSN)
5322 {
5323 if (paren_not_balanced)
5324 {
5325 if (!intel_syntax)
5326 as_bad (_("unbalanced parenthesis in operand %d."),
5327 i.operands + 1);
5328 else
5329 as_bad (_("unbalanced brackets in operand %d."),
5330 i.operands + 1);
5331 return NULL;
5332 }
5333 else
5334 break; /* we are done */
5335 }
5336 else if (!is_operand_char (*l) && !is_space_char (*l) && *l != '"')
5337 {
5338 as_bad (_("invalid character %s in operand %d"),
5339 output_invalid (*l),
5340 i.operands + 1);
5341 return NULL;
5342 }
5343 if (!intel_syntax)
5344 {
5345 if (*l == '(')
5346 ++paren_not_balanced;
5347 if (*l == ')')
5348 --paren_not_balanced;
5349 }
5350 else
5351 {
5352 if (*l == '[')
5353 ++paren_not_balanced;
5354 if (*l == ']')
5355 --paren_not_balanced;
5356 }
5357 l++;
5358 }
5359 if (l != token_start)
5360 { /* Yes, we've read in another operand. */
5361 unsigned int operand_ok;
5362 this_operand = i.operands++;
5363 if (i.operands > MAX_OPERANDS)
5364 {
5365 as_bad (_("spurious operands; (%d operands/instruction max)"),
5366 MAX_OPERANDS);
5367 return NULL;
5368 }
5369 i.types[this_operand].bitfield.unspecified = 1;
5370 /* Now parse operand adding info to 'i' as we go along. */
5371 END_STRING_AND_SAVE (l);
5372
5373 if (i.mem_operands > 1)
5374 {
5375 as_bad (_("too many memory references for `%s'"),
5376 mnemonic);
5377 return 0;
5378 }
5379
5380 if (intel_syntax)
5381 operand_ok =
5382 i386_intel_operand (token_start,
5383 intel_float_operand (mnemonic));
5384 else
5385 operand_ok = i386_att_operand (token_start);
5386
5387 RESTORE_END_STRING (l);
5388 if (!operand_ok)
5389 return NULL;
5390 }
5391 else
5392 {
5393 if (expecting_operand)
5394 {
5395 expecting_operand_after_comma:
5396 as_bad (_("expecting operand after ','; got nothing"));
5397 return NULL;
5398 }
5399 if (*l == ',')
5400 {
5401 as_bad (_("expecting operand before ','; got nothing"));
5402 return NULL;
5403 }
5404 }
5405
5406 /* Now *l must be either ',' or END_OF_INSN. */
5407 if (*l == ',')
5408 {
5409 if (*++l == END_OF_INSN)
5410 {
5411 /* Just skip it, if it's \n complain. */
5412 goto expecting_operand_after_comma;
5413 }
5414 expecting_operand = 1;
5415 }
5416 }
5417 return l;
5418 }
5419
5420 static void
5421 swap_2_operands (int xchg1, int xchg2)
5422 {
5423 union i386_op temp_op;
5424 i386_operand_type temp_type;
5425 unsigned int temp_flags;
5426 enum bfd_reloc_code_real temp_reloc;
5427
5428 temp_type = i.types[xchg2];
5429 i.types[xchg2] = i.types[xchg1];
5430 i.types[xchg1] = temp_type;
5431
5432 temp_flags = i.flags[xchg2];
5433 i.flags[xchg2] = i.flags[xchg1];
5434 i.flags[xchg1] = temp_flags;
5435
5436 temp_op = i.op[xchg2];
5437 i.op[xchg2] = i.op[xchg1];
5438 i.op[xchg1] = temp_op;
5439
5440 temp_reloc = i.reloc[xchg2];
5441 i.reloc[xchg2] = i.reloc[xchg1];
5442 i.reloc[xchg1] = temp_reloc;
5443
5444 if (i.mask)
5445 {
5446 if (i.mask->operand == xchg1)
5447 i.mask->operand = xchg2;
5448 else if (i.mask->operand == xchg2)
5449 i.mask->operand = xchg1;
5450 }
5451 if (i.broadcast)
5452 {
5453 if (i.broadcast->operand == xchg1)
5454 i.broadcast->operand = xchg2;
5455 else if (i.broadcast->operand == xchg2)
5456 i.broadcast->operand = xchg1;
5457 }
5458 if (i.rounding)
5459 {
5460 if (i.rounding->operand == xchg1)
5461 i.rounding->operand = xchg2;
5462 else if (i.rounding->operand == xchg2)
5463 i.rounding->operand = xchg1;
5464 }
5465 }
5466
5467 static void
5468 swap_operands (void)
5469 {
5470 switch (i.operands)
5471 {
5472 case 5:
5473 case 4:
5474 swap_2_operands (1, i.operands - 2);
5475 /* Fall through. */
5476 case 3:
5477 case 2:
5478 swap_2_operands (0, i.operands - 1);
5479 break;
5480 default:
5481 abort ();
5482 }
5483
5484 if (i.mem_operands == 2)
5485 {
5486 const seg_entry *temp_seg;
5487 temp_seg = i.seg[0];
5488 i.seg[0] = i.seg[1];
5489 i.seg[1] = temp_seg;
5490 }
5491 }
5492
5493 /* Try to ensure constant immediates are represented in the smallest
5494 opcode possible. */
5495 static void
5496 optimize_imm (void)
5497 {
5498 char guess_suffix = 0;
5499 int op;
5500
5501 if (i.suffix)
5502 guess_suffix = i.suffix;
5503 else if (i.reg_operands)
5504 {
5505 /* Figure out a suffix from the last register operand specified.
5506 We can't do this properly yet, i.e. excluding special register
5507 instances, but the following works for instructions with
5508 immediates. In any case, we can't set i.suffix yet. */
5509 for (op = i.operands; --op >= 0;)
5510 if (i.types[op].bitfield.class != Reg)
5511 continue;
5512 else if (i.types[op].bitfield.byte)
5513 {
5514 guess_suffix = BYTE_MNEM_SUFFIX;
5515 break;
5516 }
5517 else if (i.types[op].bitfield.word)
5518 {
5519 guess_suffix = WORD_MNEM_SUFFIX;
5520 break;
5521 }
5522 else if (i.types[op].bitfield.dword)
5523 {
5524 guess_suffix = LONG_MNEM_SUFFIX;
5525 break;
5526 }
5527 else if (i.types[op].bitfield.qword)
5528 {
5529 guess_suffix = QWORD_MNEM_SUFFIX;
5530 break;
5531 }
5532 }
5533 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
5534 guess_suffix = WORD_MNEM_SUFFIX;
5535
5536 for (op = i.operands; --op >= 0;)
5537 if (operand_type_check (i.types[op], imm))
5538 {
5539 switch (i.op[op].imms->X_op)
5540 {
5541 case O_constant:
5542 /* If a suffix is given, this operand may be shortened. */
5543 switch (guess_suffix)
5544 {
5545 case LONG_MNEM_SUFFIX:
5546 i.types[op].bitfield.imm32 = 1;
5547 i.types[op].bitfield.imm64 = 1;
5548 break;
5549 case WORD_MNEM_SUFFIX:
5550 i.types[op].bitfield.imm16 = 1;
5551 i.types[op].bitfield.imm32 = 1;
5552 i.types[op].bitfield.imm32s = 1;
5553 i.types[op].bitfield.imm64 = 1;
5554 break;
5555 case BYTE_MNEM_SUFFIX:
5556 i.types[op].bitfield.imm8 = 1;
5557 i.types[op].bitfield.imm8s = 1;
5558 i.types[op].bitfield.imm16 = 1;
5559 i.types[op].bitfield.imm32 = 1;
5560 i.types[op].bitfield.imm32s = 1;
5561 i.types[op].bitfield.imm64 = 1;
5562 break;
5563 }
5564
5565 /* If this operand is at most 16 bits, convert it
5566 to a signed 16 bit number before trying to see
5567 whether it will fit in an even smaller size.
5568 This allows a 16-bit operand such as $0xffe0 to
5569 be recognised as within Imm8S range. */
5570 if ((i.types[op].bitfield.imm16)
5571 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
5572 {
5573 i.op[op].imms->X_add_number =
5574 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
5575 }
5576 #ifdef BFD64
5577 /* Store 32-bit immediate in 64-bit for 64-bit BFD. */
5578 if ((i.types[op].bitfield.imm32)
5579 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
5580 == 0))
5581 {
5582 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
5583 ^ ((offsetT) 1 << 31))
5584 - ((offsetT) 1 << 31));
5585 }
5586 #endif
5587 i.types[op]
5588 = operand_type_or (i.types[op],
5589 smallest_imm_type (i.op[op].imms->X_add_number));
5590
5591 /* We must avoid matching of Imm32 templates when 64bit
5592 only immediate is available. */
5593 if (guess_suffix == QWORD_MNEM_SUFFIX)
5594 i.types[op].bitfield.imm32 = 0;
5595 break;
5596
5597 case O_absent:
5598 case O_register:
5599 abort ();
5600
5601 /* Symbols and expressions. */
5602 default:
5603 /* Convert symbolic operand to proper sizes for matching, but don't
5604 prevent matching a set of insns that only supports sizes other
5605 than those matching the insn suffix. */
5606 {
5607 i386_operand_type mask, allowed;
5608 const insn_template *t;
5609
5610 operand_type_set (&mask, 0);
5611 operand_type_set (&allowed, 0);
5612
5613 for (t = current_templates->start;
5614 t < current_templates->end;
5615 ++t)
5616 {
5617 allowed = operand_type_or (allowed, t->operand_types[op]);
5618 allowed = operand_type_and (allowed, anyimm);
5619 }
5620 switch (guess_suffix)
5621 {
5622 case QWORD_MNEM_SUFFIX:
5623 mask.bitfield.imm64 = 1;
5624 mask.bitfield.imm32s = 1;
5625 break;
5626 case LONG_MNEM_SUFFIX:
5627 mask.bitfield.imm32 = 1;
5628 break;
5629 case WORD_MNEM_SUFFIX:
5630 mask.bitfield.imm16 = 1;
5631 break;
5632 case BYTE_MNEM_SUFFIX:
5633 mask.bitfield.imm8 = 1;
5634 break;
5635 default:
5636 break;
5637 }
5638 allowed = operand_type_and (mask, allowed);
5639 if (!operand_type_all_zero (&allowed))
5640 i.types[op] = operand_type_and (i.types[op], mask);
5641 }
5642 break;
5643 }
5644 }
5645 }
5646
5647 /* Try to use the smallest displacement type too. */
5648 static void
5649 optimize_disp (void)
5650 {
5651 int op;
5652
5653 for (op = i.operands; --op >= 0;)
5654 if (operand_type_check (i.types[op], disp))
5655 {
5656 if (i.op[op].disps->X_op == O_constant)
5657 {
5658 offsetT op_disp = i.op[op].disps->X_add_number;
5659
5660 if (i.types[op].bitfield.disp16
5661 && (op_disp & ~(offsetT) 0xffff) == 0)
5662 {
5663 /* If this operand is at most 16 bits, convert
5664 to a signed 16 bit number and don't use 64bit
5665 displacement. */
5666 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
5667 i.types[op].bitfield.disp64 = 0;
5668 }
5669 #ifdef BFD64
5670 /* Optimize 64-bit displacement to 32-bit for 64-bit BFD. */
5671 if (i.types[op].bitfield.disp32
5672 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
5673 {
5674 /* If this operand is at most 32 bits, convert
5675 to a signed 32 bit number and don't use 64bit
5676 displacement. */
5677 op_disp &= (((offsetT) 2 << 31) - 1);
5678 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
5679 i.types[op].bitfield.disp64 = 0;
5680 }
5681 #endif
5682 if (!op_disp && i.types[op].bitfield.baseindex)
5683 {
5684 i.types[op].bitfield.disp8 = 0;
5685 i.types[op].bitfield.disp16 = 0;
5686 i.types[op].bitfield.disp32 = 0;
5687 i.types[op].bitfield.disp32s = 0;
5688 i.types[op].bitfield.disp64 = 0;
5689 i.op[op].disps = 0;
5690 i.disp_operands--;
5691 }
5692 else if (flag_code == CODE_64BIT)
5693 {
5694 if (fits_in_signed_long (op_disp))
5695 {
5696 i.types[op].bitfield.disp64 = 0;
5697 i.types[op].bitfield.disp32s = 1;
5698 }
5699 if (i.prefix[ADDR_PREFIX]
5700 && fits_in_unsigned_long (op_disp))
5701 i.types[op].bitfield.disp32 = 1;
5702 }
5703 if ((i.types[op].bitfield.disp32
5704 || i.types[op].bitfield.disp32s
5705 || i.types[op].bitfield.disp16)
5706 && fits_in_disp8 (op_disp))
5707 i.types[op].bitfield.disp8 = 1;
5708 }
5709 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
5710 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
5711 {
5712 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
5713 i.op[op].disps, 0, i.reloc[op]);
5714 i.types[op].bitfield.disp8 = 0;
5715 i.types[op].bitfield.disp16 = 0;
5716 i.types[op].bitfield.disp32 = 0;
5717 i.types[op].bitfield.disp32s = 0;
5718 i.types[op].bitfield.disp64 = 0;
5719 }
5720 else
5721 /* We only support 64bit displacement on constants. */
5722 i.types[op].bitfield.disp64 = 0;
5723 }
5724 }
5725
5726 /* Return 1 if there is a match in broadcast bytes between operand
5727 GIVEN and instruction template T. */
5728
5729 static INLINE int
5730 match_broadcast_size (const insn_template *t, unsigned int given)
5731 {
5732 return ((t->opcode_modifier.broadcast == BYTE_BROADCAST
5733 && i.types[given].bitfield.byte)
5734 || (t->opcode_modifier.broadcast == WORD_BROADCAST
5735 && i.types[given].bitfield.word)
5736 || (t->opcode_modifier.broadcast == DWORD_BROADCAST
5737 && i.types[given].bitfield.dword)
5738 || (t->opcode_modifier.broadcast == QWORD_BROADCAST
5739 && i.types[given].bitfield.qword));
5740 }
5741
5742 /* Check if operands are valid for the instruction. */
5743
5744 static int
5745 check_VecOperands (const insn_template *t)
5746 {
5747 unsigned int op;
5748 i386_cpu_flags cpu;
5749
5750 /* Templates allowing for ZMMword as well as YMMword and/or XMMword for
5751 any one operand are implicity requiring AVX512VL support if the actual
5752 operand size is YMMword or XMMword. Since this function runs after
5753 template matching, there's no need to check for YMMword/XMMword in
5754 the template. */
5755 cpu = cpu_flags_and (t->cpu_flags, avx512);
5756 if (!cpu_flags_all_zero (&cpu)
5757 && !t->cpu_flags.bitfield.cpuavx512vl
5758 && !cpu_arch_flags.bitfield.cpuavx512vl)
5759 {
5760 for (op = 0; op < t->operands; ++op)
5761 {
5762 if (t->operand_types[op].bitfield.zmmword
5763 && (i.types[op].bitfield.ymmword
5764 || i.types[op].bitfield.xmmword))
5765 {
5766 i.error = unsupported;
5767 return 1;
5768 }
5769 }
5770 }
5771
5772 /* Without VSIB byte, we can't have a vector register for index. */
5773 if (!t->opcode_modifier.sib
5774 && i.index_reg
5775 && (i.index_reg->reg_type.bitfield.xmmword
5776 || i.index_reg->reg_type.bitfield.ymmword
5777 || i.index_reg->reg_type.bitfield.zmmword))
5778 {
5779 i.error = unsupported_vector_index_register;
5780 return 1;
5781 }
5782
5783 /* Check if default mask is allowed. */
5784 if (t->opcode_modifier.nodefmask
5785 && (!i.mask || i.mask->mask->reg_num == 0))
5786 {
5787 i.error = no_default_mask;
5788 return 1;
5789 }
5790
5791 /* For VSIB byte, we need a vector register for index, and all vector
5792 registers must be distinct. */
5793 if (t->opcode_modifier.sib)
5794 {
5795 if (!i.index_reg
5796 || !((t->opcode_modifier.sib == VECSIB128
5797 && i.index_reg->reg_type.bitfield.xmmword)
5798 || (t->opcode_modifier.sib == VECSIB256
5799 && i.index_reg->reg_type.bitfield.ymmword)
5800 || (t->opcode_modifier.sib == VECSIB512
5801 && i.index_reg->reg_type.bitfield.zmmword)))
5802 {
5803 i.error = invalid_vsib_address;
5804 return 1;
5805 }
5806
5807 gas_assert (i.reg_operands == 2 || i.mask);
5808 if (i.reg_operands == 2 && !i.mask)
5809 {
5810 gas_assert (i.types[0].bitfield.class == RegSIMD);
5811 gas_assert (i.types[0].bitfield.xmmword
5812 || i.types[0].bitfield.ymmword);
5813 gas_assert (i.types[2].bitfield.class == RegSIMD);
5814 gas_assert (i.types[2].bitfield.xmmword
5815 || i.types[2].bitfield.ymmword);
5816 if (operand_check == check_none)
5817 return 0;
5818 if (register_number (i.op[0].regs)
5819 != register_number (i.index_reg)
5820 && register_number (i.op[2].regs)
5821 != register_number (i.index_reg)
5822 && register_number (i.op[0].regs)
5823 != register_number (i.op[2].regs))
5824 return 0;
5825 if (operand_check == check_error)
5826 {
5827 i.error = invalid_vector_register_set;
5828 return 1;
5829 }
5830 as_warn (_("mask, index, and destination registers should be distinct"));
5831 }
5832 else if (i.reg_operands == 1 && i.mask)
5833 {
5834 if (i.types[1].bitfield.class == RegSIMD
5835 && (i.types[1].bitfield.xmmword
5836 || i.types[1].bitfield.ymmword
5837 || i.types[1].bitfield.zmmword)
5838 && (register_number (i.op[1].regs)
5839 == register_number (i.index_reg)))
5840 {
5841 if (operand_check == check_error)
5842 {
5843 i.error = invalid_vector_register_set;
5844 return 1;
5845 }
5846 if (operand_check != check_none)
5847 as_warn (_("index and destination registers should be distinct"));
5848 }
5849 }
5850 }
5851
5852 /* Check if broadcast is supported by the instruction and is applied
5853 to the memory operand. */
5854 if (i.broadcast)
5855 {
5856 i386_operand_type type, overlap;
5857
5858 /* Check if specified broadcast is supported in this instruction,
5859 and its broadcast bytes match the memory operand. */
5860 op = i.broadcast->operand;
5861 if (!t->opcode_modifier.broadcast
5862 || !(i.flags[op] & Operand_Mem)
5863 || (!i.types[op].bitfield.unspecified
5864 && !match_broadcast_size (t, op)))
5865 {
5866 bad_broadcast:
5867 i.error = unsupported_broadcast;
5868 return 1;
5869 }
5870
5871 i.broadcast->bytes = ((1 << (t->opcode_modifier.broadcast - 1))
5872 * i.broadcast->type);
5873 operand_type_set (&type, 0);
5874 switch (i.broadcast->bytes)
5875 {
5876 case 2:
5877 type.bitfield.word = 1;
5878 break;
5879 case 4:
5880 type.bitfield.dword = 1;
5881 break;
5882 case 8:
5883 type.bitfield.qword = 1;
5884 break;
5885 case 16:
5886 type.bitfield.xmmword = 1;
5887 break;
5888 case 32:
5889 type.bitfield.ymmword = 1;
5890 break;
5891 case 64:
5892 type.bitfield.zmmword = 1;
5893 break;
5894 default:
5895 goto bad_broadcast;
5896 }
5897
5898 overlap = operand_type_and (type, t->operand_types[op]);
5899 if (t->operand_types[op].bitfield.class == RegSIMD
5900 && t->operand_types[op].bitfield.byte
5901 + t->operand_types[op].bitfield.word
5902 + t->operand_types[op].bitfield.dword
5903 + t->operand_types[op].bitfield.qword > 1)
5904 {
5905 overlap.bitfield.xmmword = 0;
5906 overlap.bitfield.ymmword = 0;
5907 overlap.bitfield.zmmword = 0;
5908 }
5909 if (operand_type_all_zero (&overlap))
5910 goto bad_broadcast;
5911
5912 if (t->opcode_modifier.checkregsize)
5913 {
5914 unsigned int j;
5915
5916 type.bitfield.baseindex = 1;
5917 for (j = 0; j < i.operands; ++j)
5918 {
5919 if (j != op
5920 && !operand_type_register_match(i.types[j],
5921 t->operand_types[j],
5922 type,
5923 t->operand_types[op]))
5924 goto bad_broadcast;
5925 }
5926 }
5927 }
5928 /* If broadcast is supported in this instruction, we need to check if
5929 operand of one-element size isn't specified without broadcast. */
5930 else if (t->opcode_modifier.broadcast && i.mem_operands)
5931 {
5932 /* Find memory operand. */
5933 for (op = 0; op < i.operands; op++)
5934 if (i.flags[op] & Operand_Mem)
5935 break;
5936 gas_assert (op < i.operands);
5937 /* Check size of the memory operand. */
5938 if (match_broadcast_size (t, op))
5939 {
5940 i.error = broadcast_needed;
5941 return 1;
5942 }
5943 }
5944 else
5945 op = MAX_OPERANDS - 1; /* Avoid uninitialized variable warning. */
5946
5947 /* Check if requested masking is supported. */
5948 if (i.mask)
5949 {
5950 switch (t->opcode_modifier.masking)
5951 {
5952 case BOTH_MASKING:
5953 break;
5954 case MERGING_MASKING:
5955 if (i.mask->zeroing)
5956 {
5957 case 0:
5958 i.error = unsupported_masking;
5959 return 1;
5960 }
5961 break;
5962 case DYNAMIC_MASKING:
5963 /* Memory destinations allow only merging masking. */
5964 if (i.mask->zeroing && i.mem_operands)
5965 {
5966 /* Find memory operand. */
5967 for (op = 0; op < i.operands; op++)
5968 if (i.flags[op] & Operand_Mem)
5969 break;
5970 gas_assert (op < i.operands);
5971 if (op == i.operands - 1)
5972 {
5973 i.error = unsupported_masking;
5974 return 1;
5975 }
5976 }
5977 break;
5978 default:
5979 abort ();
5980 }
5981 }
5982
5983 /* Check if masking is applied to dest operand. */
5984 if (i.mask && (i.mask->operand != (int) (i.operands - 1)))
5985 {
5986 i.error = mask_not_on_destination;
5987 return 1;
5988 }
5989
5990 /* Check RC/SAE. */
5991 if (i.rounding)
5992 {
5993 if (!t->opcode_modifier.sae
5994 || (i.rounding->type != saeonly && !t->opcode_modifier.staticrounding))
5995 {
5996 i.error = unsupported_rc_sae;
5997 return 1;
5998 }
5999 /* If the instruction has several immediate operands and one of
6000 them is rounding, the rounding operand should be the last
6001 immediate operand. */
6002 if (i.imm_operands > 1
6003 && i.rounding->operand != (int) (i.imm_operands - 1))
6004 {
6005 i.error = rc_sae_operand_not_last_imm;
6006 return 1;
6007 }
6008 }
6009
6010 /* Check the special Imm4 cases; must be the first operand. */
6011 if (t->cpu_flags.bitfield.cpuxop && t->operands == 5)
6012 {
6013 if (i.op[0].imms->X_op != O_constant
6014 || !fits_in_imm4 (i.op[0].imms->X_add_number))
6015 {
6016 i.error = bad_imm4;
6017 return 1;
6018 }
6019
6020 /* Turn off Imm<N> so that update_imm won't complain. */
6021 operand_type_set (&i.types[0], 0);
6022 }
6023
6024 /* Check vector Disp8 operand. */
6025 if (t->opcode_modifier.disp8memshift
6026 && i.disp_encoding != disp_encoding_32bit)
6027 {
6028 if (i.broadcast)
6029 i.memshift = t->opcode_modifier.broadcast - 1;
6030 else if (t->opcode_modifier.disp8memshift != DISP8_SHIFT_VL)
6031 i.memshift = t->opcode_modifier.disp8memshift;
6032 else
6033 {
6034 const i386_operand_type *type = NULL;
6035
6036 i.memshift = 0;
6037 for (op = 0; op < i.operands; op++)
6038 if (i.flags[op] & Operand_Mem)
6039 {
6040 if (t->opcode_modifier.evex == EVEXLIG)
6041 i.memshift = 2 + (i.suffix == QWORD_MNEM_SUFFIX);
6042 else if (t->operand_types[op].bitfield.xmmword
6043 + t->operand_types[op].bitfield.ymmword
6044 + t->operand_types[op].bitfield.zmmword <= 1)
6045 type = &t->operand_types[op];
6046 else if (!i.types[op].bitfield.unspecified)
6047 type = &i.types[op];
6048 }
6049 else if (i.types[op].bitfield.class == RegSIMD
6050 && t->opcode_modifier.evex != EVEXLIG)
6051 {
6052 if (i.types[op].bitfield.zmmword)
6053 i.memshift = 6;
6054 else if (i.types[op].bitfield.ymmword && i.memshift < 5)
6055 i.memshift = 5;
6056 else if (i.types[op].bitfield.xmmword && i.memshift < 4)
6057 i.memshift = 4;
6058 }
6059
6060 if (type)
6061 {
6062 if (type->bitfield.zmmword)
6063 i.memshift = 6;
6064 else if (type->bitfield.ymmword)
6065 i.memshift = 5;
6066 else if (type->bitfield.xmmword)
6067 i.memshift = 4;
6068 }
6069
6070 /* For the check in fits_in_disp8(). */
6071 if (i.memshift == 0)
6072 i.memshift = -1;
6073 }
6074
6075 for (op = 0; op < i.operands; op++)
6076 if (operand_type_check (i.types[op], disp)
6077 && i.op[op].disps->X_op == O_constant)
6078 {
6079 if (fits_in_disp8 (i.op[op].disps->X_add_number))
6080 {
6081 i.types[op].bitfield.disp8 = 1;
6082 return 0;
6083 }
6084 i.types[op].bitfield.disp8 = 0;
6085 }
6086 }
6087
6088 i.memshift = 0;
6089
6090 return 0;
6091 }
6092
6093 /* Check if encoding requirements are met by the instruction. */
6094
6095 static int
6096 VEX_check_encoding (const insn_template *t)
6097 {
6098 if (i.vec_encoding == vex_encoding_error)
6099 {
6100 i.error = unsupported;
6101 return 1;
6102 }
6103
6104 if (i.vec_encoding == vex_encoding_evex)
6105 {
6106 /* This instruction must be encoded with EVEX prefix. */
6107 if (!is_evex_encoding (t))
6108 {
6109 i.error = unsupported;
6110 return 1;
6111 }
6112 return 0;
6113 }
6114
6115 if (!t->opcode_modifier.vex)
6116 {
6117 /* This instruction template doesn't have VEX prefix. */
6118 if (i.vec_encoding != vex_encoding_default)
6119 {
6120 i.error = unsupported;
6121 return 1;
6122 }
6123 return 0;
6124 }
6125
6126 return 0;
6127 }
6128
6129 static const insn_template *
6130 match_template (char mnem_suffix)
6131 {
6132 /* Points to template once we've found it. */
6133 const insn_template *t;
6134 i386_operand_type overlap0, overlap1, overlap2, overlap3;
6135 i386_operand_type overlap4;
6136 unsigned int found_reverse_match;
6137 i386_opcode_modifier suffix_check;
6138 i386_operand_type operand_types [MAX_OPERANDS];
6139 int addr_prefix_disp;
6140 unsigned int j, size_match, check_register;
6141 enum i386_error specific_error = 0;
6142
6143 #if MAX_OPERANDS != 5
6144 # error "MAX_OPERANDS must be 5."
6145 #endif
6146
6147 found_reverse_match = 0;
6148 addr_prefix_disp = -1;
6149
6150 /* Prepare for mnemonic suffix check. */
6151 memset (&suffix_check, 0, sizeof (suffix_check));
6152 switch (mnem_suffix)
6153 {
6154 case BYTE_MNEM_SUFFIX:
6155 suffix_check.no_bsuf = 1;
6156 break;
6157 case WORD_MNEM_SUFFIX:
6158 suffix_check.no_wsuf = 1;
6159 break;
6160 case SHORT_MNEM_SUFFIX:
6161 suffix_check.no_ssuf = 1;
6162 break;
6163 case LONG_MNEM_SUFFIX:
6164 suffix_check.no_lsuf = 1;
6165 break;
6166 case QWORD_MNEM_SUFFIX:
6167 suffix_check.no_qsuf = 1;
6168 break;
6169 default:
6170 /* NB: In Intel syntax, normally we can check for memory operand
6171 size when there is no mnemonic suffix. But jmp and call have
6172 2 different encodings with Dword memory operand size, one with
6173 No_ldSuf and the other without. i.suffix is set to
6174 LONG_DOUBLE_MNEM_SUFFIX to skip the one with No_ldSuf. */
6175 if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
6176 suffix_check.no_ldsuf = 1;
6177 }
6178
6179 /* Must have right number of operands. */
6180 i.error = number_of_operands_mismatch;
6181
6182 for (t = current_templates->start; t < current_templates->end; t++)
6183 {
6184 addr_prefix_disp = -1;
6185 found_reverse_match = 0;
6186
6187 if (i.operands != t->operands)
6188 continue;
6189
6190 /* Check processor support. */
6191 i.error = unsupported;
6192 if (cpu_flags_match (t) != CPU_FLAGS_PERFECT_MATCH)
6193 continue;
6194
6195 /* Check AT&T mnemonic. */
6196 i.error = unsupported_with_intel_mnemonic;
6197 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
6198 continue;
6199
6200 /* Check AT&T/Intel syntax. */
6201 i.error = unsupported_syntax;
6202 if ((intel_syntax && t->opcode_modifier.attsyntax)
6203 || (!intel_syntax && t->opcode_modifier.intelsyntax))
6204 continue;
6205
6206 /* Check Intel64/AMD64 ISA. */
6207 switch (isa64)
6208 {
6209 default:
6210 /* Default: Don't accept Intel64. */
6211 if (t->opcode_modifier.isa64 == INTEL64)
6212 continue;
6213 break;
6214 case amd64:
6215 /* -mamd64: Don't accept Intel64 and Intel64 only. */
6216 if (t->opcode_modifier.isa64 >= INTEL64)
6217 continue;
6218 break;
6219 case intel64:
6220 /* -mintel64: Don't accept AMD64. */
6221 if (t->opcode_modifier.isa64 == AMD64 && flag_code == CODE_64BIT)
6222 continue;
6223 break;
6224 }
6225
6226 /* Check the suffix. */
6227 i.error = invalid_instruction_suffix;
6228 if ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
6229 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
6230 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
6231 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
6232 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
6233 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf))
6234 continue;
6235
6236 size_match = operand_size_match (t);
6237 if (!size_match)
6238 continue;
6239
6240 /* This is intentionally not
6241
6242 if (i.jumpabsolute != (t->opcode_modifier.jump == JUMP_ABSOLUTE))
6243
6244 as the case of a missing * on the operand is accepted (perhaps with
6245 a warning, issued further down). */
6246 if (i.jumpabsolute && t->opcode_modifier.jump != JUMP_ABSOLUTE)
6247 {
6248 i.error = operand_type_mismatch;
6249 continue;
6250 }
6251
6252 for (j = 0; j < MAX_OPERANDS; j++)
6253 operand_types[j] = t->operand_types[j];
6254
6255 /* In general, don't allow
6256 - 64-bit operands outside of 64-bit mode,
6257 - 32-bit operands on pre-386. */
6258 j = i.imm_operands + (t->operands > i.imm_operands + 1);
6259 if (((i.suffix == QWORD_MNEM_SUFFIX
6260 && flag_code != CODE_64BIT
6261 && (t->base_opcode != 0x0fc7
6262 || t->extension_opcode != 1 /* cmpxchg8b */))
6263 || (i.suffix == LONG_MNEM_SUFFIX
6264 && !cpu_arch_flags.bitfield.cpui386))
6265 && (intel_syntax
6266 ? (t->opcode_modifier.mnemonicsize != IGNORESIZE
6267 && !intel_float_operand (t->name))
6268 : intel_float_operand (t->name) != 2)
6269 && (t->operands == i.imm_operands
6270 || (operand_types[i.imm_operands].bitfield.class != RegMMX
6271 && operand_types[i.imm_operands].bitfield.class != RegSIMD
6272 && operand_types[i.imm_operands].bitfield.class != RegMask)
6273 || (operand_types[j].bitfield.class != RegMMX
6274 && operand_types[j].bitfield.class != RegSIMD
6275 && operand_types[j].bitfield.class != RegMask))
6276 && !t->opcode_modifier.sib)
6277 continue;
6278
6279 /* Do not verify operands when there are none. */
6280 if (!t->operands)
6281 {
6282 if (VEX_check_encoding (t))
6283 {
6284 specific_error = i.error;
6285 continue;
6286 }
6287
6288 /* We've found a match; break out of loop. */
6289 break;
6290 }
6291
6292 if (!t->opcode_modifier.jump
6293 || t->opcode_modifier.jump == JUMP_ABSOLUTE)
6294 {
6295 /* There should be only one Disp operand. */
6296 for (j = 0; j < MAX_OPERANDS; j++)
6297 if (operand_type_check (operand_types[j], disp))
6298 break;
6299 if (j < MAX_OPERANDS)
6300 {
6301 bfd_boolean override = (i.prefix[ADDR_PREFIX] != 0);
6302
6303 addr_prefix_disp = j;
6304
6305 /* Address size prefix will turn Disp64/Disp32S/Disp32/Disp16
6306 operand into Disp32/Disp32/Disp16/Disp32 operand. */
6307 switch (flag_code)
6308 {
6309 case CODE_16BIT:
6310 override = !override;
6311 /* Fall through. */
6312 case CODE_32BIT:
6313 if (operand_types[j].bitfield.disp32
6314 && operand_types[j].bitfield.disp16)
6315 {
6316 operand_types[j].bitfield.disp16 = override;
6317 operand_types[j].bitfield.disp32 = !override;
6318 }
6319 operand_types[j].bitfield.disp32s = 0;
6320 operand_types[j].bitfield.disp64 = 0;
6321 break;
6322
6323 case CODE_64BIT:
6324 if (operand_types[j].bitfield.disp32s
6325 || operand_types[j].bitfield.disp64)
6326 {
6327 operand_types[j].bitfield.disp64 &= !override;
6328 operand_types[j].bitfield.disp32s &= !override;
6329 operand_types[j].bitfield.disp32 = override;
6330 }
6331 operand_types[j].bitfield.disp16 = 0;
6332 break;
6333 }
6334 }
6335 }
6336
6337 /* Force 0x8b encoding for "mov foo@GOT, %eax". */
6338 if (i.reloc[0] == BFD_RELOC_386_GOT32 && t->base_opcode == 0xa0)
6339 continue;
6340
6341 /* We check register size if needed. */
6342 if (t->opcode_modifier.checkregsize)
6343 {
6344 check_register = (1 << t->operands) - 1;
6345 if (i.broadcast)
6346 check_register &= ~(1 << i.broadcast->operand);
6347 }
6348 else
6349 check_register = 0;
6350
6351 overlap0 = operand_type_and (i.types[0], operand_types[0]);
6352 switch (t->operands)
6353 {
6354 case 1:
6355 if (!operand_type_match (overlap0, i.types[0]))
6356 continue;
6357 break;
6358 case 2:
6359 /* xchg %eax, %eax is a special case. It is an alias for nop
6360 only in 32bit mode and we can use opcode 0x90. In 64bit
6361 mode, we can't use 0x90 for xchg %eax, %eax since it should
6362 zero-extend %eax to %rax. */
6363 if (flag_code == CODE_64BIT
6364 && t->base_opcode == 0x90
6365 && i.types[0].bitfield.instance == Accum
6366 && i.types[0].bitfield.dword
6367 && i.types[1].bitfield.instance == Accum
6368 && i.types[1].bitfield.dword)
6369 continue;
6370 /* xrelease mov %eax, <disp> is another special case. It must not
6371 match the accumulator-only encoding of mov. */
6372 if (flag_code != CODE_64BIT
6373 && i.hle_prefix
6374 && t->base_opcode == 0xa0
6375 && i.types[0].bitfield.instance == Accum
6376 && (i.flags[1] & Operand_Mem))
6377 continue;
6378 /* Fall through. */
6379
6380 case 3:
6381 if (!(size_match & MATCH_STRAIGHT))
6382 goto check_reverse;
6383 /* Reverse direction of operands if swapping is possible in the first
6384 place (operands need to be symmetric) and
6385 - the load form is requested, and the template is a store form,
6386 - the store form is requested, and the template is a load form,
6387 - the non-default (swapped) form is requested. */
6388 overlap1 = operand_type_and (operand_types[0], operand_types[1]);
6389 if (t->opcode_modifier.d && i.reg_operands == i.operands
6390 && !operand_type_all_zero (&overlap1))
6391 switch (i.dir_encoding)
6392 {
6393 case dir_encoding_load:
6394 if (operand_type_check (operand_types[i.operands - 1], anymem)
6395 || t->opcode_modifier.regmem)
6396 goto check_reverse;
6397 break;
6398
6399 case dir_encoding_store:
6400 if (!operand_type_check (operand_types[i.operands - 1], anymem)
6401 && !t->opcode_modifier.regmem)
6402 goto check_reverse;
6403 break;
6404
6405 case dir_encoding_swap:
6406 goto check_reverse;
6407
6408 case dir_encoding_default:
6409 break;
6410 }
6411 /* If we want store form, we skip the current load. */
6412 if ((i.dir_encoding == dir_encoding_store
6413 || i.dir_encoding == dir_encoding_swap)
6414 && i.mem_operands == 0
6415 && t->opcode_modifier.load)
6416 continue;
6417 /* Fall through. */
6418 case 4:
6419 case 5:
6420 overlap1 = operand_type_and (i.types[1], operand_types[1]);
6421 if (!operand_type_match (overlap0, i.types[0])
6422 || !operand_type_match (overlap1, i.types[1])
6423 || ((check_register & 3) == 3
6424 && !operand_type_register_match (i.types[0],
6425 operand_types[0],
6426 i.types[1],
6427 operand_types[1])))
6428 {
6429 /* Check if other direction is valid ... */
6430 if (!t->opcode_modifier.d)
6431 continue;
6432
6433 check_reverse:
6434 if (!(size_match & MATCH_REVERSE))
6435 continue;
6436 /* Try reversing direction of operands. */
6437 overlap0 = operand_type_and (i.types[0], operand_types[i.operands - 1]);
6438 overlap1 = operand_type_and (i.types[i.operands - 1], operand_types[0]);
6439 if (!operand_type_match (overlap0, i.types[0])
6440 || !operand_type_match (overlap1, i.types[i.operands - 1])
6441 || (check_register
6442 && !operand_type_register_match (i.types[0],
6443 operand_types[i.operands - 1],
6444 i.types[i.operands - 1],
6445 operand_types[0])))
6446 {
6447 /* Does not match either direction. */
6448 continue;
6449 }
6450 /* found_reverse_match holds which of D or FloatR
6451 we've found. */
6452 if (!t->opcode_modifier.d)
6453 found_reverse_match = 0;
6454 else if (operand_types[0].bitfield.tbyte)
6455 found_reverse_match = Opcode_FloatD;
6456 else if (operand_types[0].bitfield.xmmword
6457 || operand_types[i.operands - 1].bitfield.xmmword
6458 || operand_types[0].bitfield.class == RegMMX
6459 || operand_types[i.operands - 1].bitfield.class == RegMMX
6460 || is_any_vex_encoding(t))
6461 found_reverse_match = (t->base_opcode & 0xee) != 0x6e
6462 ? Opcode_SIMD_FloatD : Opcode_SIMD_IntD;
6463 else
6464 found_reverse_match = Opcode_D;
6465 if (t->opcode_modifier.floatr)
6466 found_reverse_match |= Opcode_FloatR;
6467 }
6468 else
6469 {
6470 /* Found a forward 2 operand match here. */
6471 switch (t->operands)
6472 {
6473 case 5:
6474 overlap4 = operand_type_and (i.types[4],
6475 operand_types[4]);
6476 /* Fall through. */
6477 case 4:
6478 overlap3 = operand_type_and (i.types[3],
6479 operand_types[3]);
6480 /* Fall through. */
6481 case 3:
6482 overlap2 = operand_type_and (i.types[2],
6483 operand_types[2]);
6484 break;
6485 }
6486
6487 switch (t->operands)
6488 {
6489 case 5:
6490 if (!operand_type_match (overlap4, i.types[4])
6491 || !operand_type_register_match (i.types[3],
6492 operand_types[3],
6493 i.types[4],
6494 operand_types[4]))
6495 continue;
6496 /* Fall through. */
6497 case 4:
6498 if (!operand_type_match (overlap3, i.types[3])
6499 || ((check_register & 0xa) == 0xa
6500 && !operand_type_register_match (i.types[1],
6501 operand_types[1],
6502 i.types[3],
6503 operand_types[3]))
6504 || ((check_register & 0xc) == 0xc
6505 && !operand_type_register_match (i.types[2],
6506 operand_types[2],
6507 i.types[3],
6508 operand_types[3])))
6509 continue;
6510 /* Fall through. */
6511 case 3:
6512 /* Here we make use of the fact that there are no
6513 reverse match 3 operand instructions. */
6514 if (!operand_type_match (overlap2, i.types[2])
6515 || ((check_register & 5) == 5
6516 && !operand_type_register_match (i.types[0],
6517 operand_types[0],
6518 i.types[2],
6519 operand_types[2]))
6520 || ((check_register & 6) == 6
6521 && !operand_type_register_match (i.types[1],
6522 operand_types[1],
6523 i.types[2],
6524 operand_types[2])))
6525 continue;
6526 break;
6527 }
6528 }
6529 /* Found either forward/reverse 2, 3 or 4 operand match here:
6530 slip through to break. */
6531 }
6532
6533 /* Check if vector operands are valid. */
6534 if (check_VecOperands (t))
6535 {
6536 specific_error = i.error;
6537 continue;
6538 }
6539
6540 /* Check if VEX/EVEX encoding requirements can be satisfied. */
6541 if (VEX_check_encoding (t))
6542 {
6543 specific_error = i.error;
6544 continue;
6545 }
6546
6547 /* We've found a match; break out of loop. */
6548 break;
6549 }
6550
6551 if (t == current_templates->end)
6552 {
6553 /* We found no match. */
6554 const char *err_msg;
6555 switch (specific_error ? specific_error : i.error)
6556 {
6557 default:
6558 abort ();
6559 case operand_size_mismatch:
6560 err_msg = _("operand size mismatch");
6561 break;
6562 case operand_type_mismatch:
6563 err_msg = _("operand type mismatch");
6564 break;
6565 case register_type_mismatch:
6566 err_msg = _("register type mismatch");
6567 break;
6568 case number_of_operands_mismatch:
6569 err_msg = _("number of operands mismatch");
6570 break;
6571 case invalid_instruction_suffix:
6572 err_msg = _("invalid instruction suffix");
6573 break;
6574 case bad_imm4:
6575 err_msg = _("constant doesn't fit in 4 bits");
6576 break;
6577 case unsupported_with_intel_mnemonic:
6578 err_msg = _("unsupported with Intel mnemonic");
6579 break;
6580 case unsupported_syntax:
6581 err_msg = _("unsupported syntax");
6582 break;
6583 case unsupported:
6584 as_bad (_("unsupported instruction `%s'"),
6585 current_templates->start->name);
6586 return NULL;
6587 case invalid_vsib_address:
6588 err_msg = _("invalid VSIB address");
6589 break;
6590 case invalid_vector_register_set:
6591 err_msg = _("mask, index, and destination registers must be distinct");
6592 break;
6593 case unsupported_vector_index_register:
6594 err_msg = _("unsupported vector index register");
6595 break;
6596 case unsupported_broadcast:
6597 err_msg = _("unsupported broadcast");
6598 break;
6599 case broadcast_needed:
6600 err_msg = _("broadcast is needed for operand of such type");
6601 break;
6602 case unsupported_masking:
6603 err_msg = _("unsupported masking");
6604 break;
6605 case mask_not_on_destination:
6606 err_msg = _("mask not on destination operand");
6607 break;
6608 case no_default_mask:
6609 err_msg = _("default mask isn't allowed");
6610 break;
6611 case unsupported_rc_sae:
6612 err_msg = _("unsupported static rounding/sae");
6613 break;
6614 case rc_sae_operand_not_last_imm:
6615 if (intel_syntax)
6616 err_msg = _("RC/SAE operand must precede immediate operands");
6617 else
6618 err_msg = _("RC/SAE operand must follow immediate operands");
6619 break;
6620 case invalid_register_operand:
6621 err_msg = _("invalid register operand");
6622 break;
6623 }
6624 as_bad (_("%s for `%s'"), err_msg,
6625 current_templates->start->name);
6626 return NULL;
6627 }
6628
6629 if (!quiet_warnings)
6630 {
6631 if (!intel_syntax
6632 && (i.jumpabsolute != (t->opcode_modifier.jump == JUMP_ABSOLUTE)))
6633 as_warn (_("indirect %s without `*'"), t->name);
6634
6635 if (t->opcode_modifier.isprefix
6636 && t->opcode_modifier.mnemonicsize == IGNORESIZE)
6637 {
6638 /* Warn them that a data or address size prefix doesn't
6639 affect assembly of the next line of code. */
6640 as_warn (_("stand-alone `%s' prefix"), t->name);
6641 }
6642 }
6643
6644 /* Copy the template we found. */
6645 i.tm = *t;
6646
6647 if (addr_prefix_disp != -1)
6648 i.tm.operand_types[addr_prefix_disp]
6649 = operand_types[addr_prefix_disp];
6650
6651 if (found_reverse_match)
6652 {
6653 /* If we found a reverse match we must alter the opcode direction
6654 bit and clear/flip the regmem modifier one. found_reverse_match
6655 holds bits to change (different for int & float insns). */
6656
6657 i.tm.base_opcode ^= found_reverse_match;
6658
6659 i.tm.operand_types[0] = operand_types[i.operands - 1];
6660 i.tm.operand_types[i.operands - 1] = operand_types[0];
6661
6662 /* Certain SIMD insns have their load forms specified in the opcode
6663 table, and hence we need to _set_ RegMem instead of clearing it.
6664 We need to avoid setting the bit though on insns like KMOVW. */
6665 i.tm.opcode_modifier.regmem
6666 = i.tm.opcode_modifier.modrm && i.tm.opcode_modifier.d
6667 && i.tm.operands > 2U - i.tm.opcode_modifier.sse2avx
6668 && !i.tm.opcode_modifier.regmem;
6669 }
6670
6671 return t;
6672 }
6673
6674 static int
6675 check_string (void)
6676 {
6677 unsigned int es_op = i.tm.opcode_modifier.isstring - IS_STRING_ES_OP0;
6678 unsigned int op = i.tm.operand_types[0].bitfield.baseindex ? es_op : 0;
6679
6680 if (i.seg[op] != NULL && i.seg[op] != &es)
6681 {
6682 as_bad (_("`%s' operand %u must use `%ses' segment"),
6683 i.tm.name,
6684 intel_syntax ? i.tm.operands - es_op : es_op + 1,
6685 register_prefix);
6686 return 0;
6687 }
6688
6689 /* There's only ever one segment override allowed per instruction.
6690 This instruction possibly has a legal segment override on the
6691 second operand, so copy the segment to where non-string
6692 instructions store it, allowing common code. */
6693 i.seg[op] = i.seg[1];
6694
6695 return 1;
6696 }
6697
6698 static int
6699 process_suffix (void)
6700 {
6701 /* If matched instruction specifies an explicit instruction mnemonic
6702 suffix, use it. */
6703 if (i.tm.opcode_modifier.size == SIZE16)
6704 i.suffix = WORD_MNEM_SUFFIX;
6705 else if (i.tm.opcode_modifier.size == SIZE32)
6706 i.suffix = LONG_MNEM_SUFFIX;
6707 else if (i.tm.opcode_modifier.size == SIZE64)
6708 i.suffix = QWORD_MNEM_SUFFIX;
6709 else if (i.reg_operands
6710 && (i.operands > 1 || i.types[0].bitfield.class == Reg)
6711 && !i.tm.opcode_modifier.addrprefixopreg)
6712 {
6713 unsigned int numop = i.operands;
6714
6715 /* movsx/movzx want only their source operand considered here, for the
6716 ambiguity checking below. The suffix will be replaced afterwards
6717 to represent the destination (register). */
6718 if (((i.tm.base_opcode | 8) == 0xfbe && i.tm.opcode_modifier.w)
6719 || (i.tm.base_opcode == 0x63 && i.tm.cpu_flags.bitfield.cpu64))
6720 --i.operands;
6721
6722 /* crc32 needs REX.W set regardless of suffix / source operand size. */
6723 if (i.tm.base_opcode == 0xf20f38f0
6724 && i.tm.operand_types[1].bitfield.qword)
6725 i.rex |= REX_W;
6726
6727 /* If there's no instruction mnemonic suffix we try to invent one
6728 based on GPR operands. */
6729 if (!i.suffix)
6730 {
6731 /* We take i.suffix from the last register operand specified,
6732 Destination register type is more significant than source
6733 register type. crc32 in SSE4.2 prefers source register
6734 type. */
6735 unsigned int op = i.tm.base_opcode != 0xf20f38f0 ? i.operands : 1;
6736
6737 while (op--)
6738 if (i.tm.operand_types[op].bitfield.instance == InstanceNone
6739 || i.tm.operand_types[op].bitfield.instance == Accum)
6740 {
6741 if (i.types[op].bitfield.class != Reg)
6742 continue;
6743 if (i.types[op].bitfield.byte)
6744 i.suffix = BYTE_MNEM_SUFFIX;
6745 else if (i.types[op].bitfield.word)
6746 i.suffix = WORD_MNEM_SUFFIX;
6747 else if (i.types[op].bitfield.dword)
6748 i.suffix = LONG_MNEM_SUFFIX;
6749 else if (i.types[op].bitfield.qword)
6750 i.suffix = QWORD_MNEM_SUFFIX;
6751 else
6752 continue;
6753 break;
6754 }
6755
6756 /* As an exception, movsx/movzx silently default to a byte source
6757 in AT&T mode. */
6758 if ((i.tm.base_opcode | 8) == 0xfbe && i.tm.opcode_modifier.w
6759 && !i.suffix && !intel_syntax)
6760 i.suffix = BYTE_MNEM_SUFFIX;
6761 }
6762 else if (i.suffix == BYTE_MNEM_SUFFIX)
6763 {
6764 if (intel_syntax
6765 && i.tm.opcode_modifier.mnemonicsize == IGNORESIZE
6766 && i.tm.opcode_modifier.no_bsuf)
6767 i.suffix = 0;
6768 else if (!check_byte_reg ())
6769 return 0;
6770 }
6771 else if (i.suffix == LONG_MNEM_SUFFIX)
6772 {
6773 if (intel_syntax
6774 && i.tm.opcode_modifier.mnemonicsize == IGNORESIZE
6775 && i.tm.opcode_modifier.no_lsuf
6776 && !i.tm.opcode_modifier.todword
6777 && !i.tm.opcode_modifier.toqword)
6778 i.suffix = 0;
6779 else if (!check_long_reg ())
6780 return 0;
6781 }
6782 else if (i.suffix == QWORD_MNEM_SUFFIX)
6783 {
6784 if (intel_syntax
6785 && i.tm.opcode_modifier.mnemonicsize == IGNORESIZE
6786 && i.tm.opcode_modifier.no_qsuf
6787 && !i.tm.opcode_modifier.todword
6788 && !i.tm.opcode_modifier.toqword)
6789 i.suffix = 0;
6790 else if (!check_qword_reg ())
6791 return 0;
6792 }
6793 else if (i.suffix == WORD_MNEM_SUFFIX)
6794 {
6795 if (intel_syntax
6796 && i.tm.opcode_modifier.mnemonicsize == IGNORESIZE
6797 && i.tm.opcode_modifier.no_wsuf)
6798 i.suffix = 0;
6799 else if (!check_word_reg ())
6800 return 0;
6801 }
6802 else if (intel_syntax
6803 && i.tm.opcode_modifier.mnemonicsize == IGNORESIZE)
6804 /* Do nothing if the instruction is going to ignore the prefix. */
6805 ;
6806 else
6807 abort ();
6808
6809 /* Undo the movsx/movzx change done above. */
6810 i.operands = numop;
6811 }
6812 else if (i.tm.opcode_modifier.mnemonicsize == DEFAULTSIZE
6813 && !i.suffix)
6814 {
6815 i.suffix = stackop_size;
6816 if (stackop_size == LONG_MNEM_SUFFIX)
6817 {
6818 /* stackop_size is set to LONG_MNEM_SUFFIX for the
6819 .code16gcc directive to support 16-bit mode with
6820 32-bit address. For IRET without a suffix, generate
6821 16-bit IRET (opcode 0xcf) to return from an interrupt
6822 handler. */
6823 if (i.tm.base_opcode == 0xcf)
6824 {
6825 i.suffix = WORD_MNEM_SUFFIX;
6826 as_warn (_("generating 16-bit `iret' for .code16gcc directive"));
6827 }
6828 /* Warn about changed behavior for segment register push/pop. */
6829 else if ((i.tm.base_opcode | 1) == 0x07)
6830 as_warn (_("generating 32-bit `%s', unlike earlier gas versions"),
6831 i.tm.name);
6832 }
6833 }
6834 else if (!i.suffix
6835 && (i.tm.opcode_modifier.jump == JUMP_ABSOLUTE
6836 || i.tm.opcode_modifier.jump == JUMP_BYTE
6837 || i.tm.opcode_modifier.jump == JUMP_INTERSEGMENT
6838 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
6839 && i.tm.extension_opcode <= 3)))
6840 {
6841 switch (flag_code)
6842 {
6843 case CODE_64BIT:
6844 if (!i.tm.opcode_modifier.no_qsuf)
6845 {
6846 if (i.tm.opcode_modifier.jump == JUMP_BYTE
6847 || i.tm.opcode_modifier.no_lsuf)
6848 i.suffix = QWORD_MNEM_SUFFIX;
6849 break;
6850 }
6851 /* Fall through. */
6852 case CODE_32BIT:
6853 if (!i.tm.opcode_modifier.no_lsuf)
6854 i.suffix = LONG_MNEM_SUFFIX;
6855 break;
6856 case CODE_16BIT:
6857 if (!i.tm.opcode_modifier.no_wsuf)
6858 i.suffix = WORD_MNEM_SUFFIX;
6859 break;
6860 }
6861 }
6862
6863 if (!i.suffix
6864 && (i.tm.opcode_modifier.mnemonicsize != DEFAULTSIZE
6865 /* Also cover lret/retf/iret in 64-bit mode. */
6866 || (flag_code == CODE_64BIT
6867 && !i.tm.opcode_modifier.no_lsuf
6868 && !i.tm.opcode_modifier.no_qsuf))
6869 && i.tm.opcode_modifier.mnemonicsize != IGNORESIZE
6870 /* Explicit sizing prefixes are assumed to disambiguate insns. */
6871 && !i.prefix[DATA_PREFIX] && !(i.prefix[REX_PREFIX] & REX_W)
6872 /* Accept FLDENV et al without suffix. */
6873 && (i.tm.opcode_modifier.no_ssuf || i.tm.opcode_modifier.floatmf))
6874 {
6875 unsigned int suffixes, evex = 0;
6876
6877 suffixes = !i.tm.opcode_modifier.no_bsuf;
6878 if (!i.tm.opcode_modifier.no_wsuf)
6879 suffixes |= 1 << 1;
6880 if (!i.tm.opcode_modifier.no_lsuf)
6881 suffixes |= 1 << 2;
6882 if (!i.tm.opcode_modifier.no_ldsuf)
6883 suffixes |= 1 << 3;
6884 if (!i.tm.opcode_modifier.no_ssuf)
6885 suffixes |= 1 << 4;
6886 if (flag_code == CODE_64BIT && !i.tm.opcode_modifier.no_qsuf)
6887 suffixes |= 1 << 5;
6888
6889 /* For [XYZ]MMWORD operands inspect operand sizes. While generally
6890 also suitable for AT&T syntax mode, it was requested that this be
6891 restricted to just Intel syntax. */
6892 if (intel_syntax && is_any_vex_encoding (&i.tm) && !i.broadcast)
6893 {
6894 unsigned int op;
6895
6896 for (op = 0; op < i.tm.operands; ++op)
6897 {
6898 if (is_evex_encoding (&i.tm)
6899 && !cpu_arch_flags.bitfield.cpuavx512vl)
6900 {
6901 if (i.tm.operand_types[op].bitfield.ymmword)
6902 i.tm.operand_types[op].bitfield.xmmword = 0;
6903 if (i.tm.operand_types[op].bitfield.zmmword)
6904 i.tm.operand_types[op].bitfield.ymmword = 0;
6905 if (!i.tm.opcode_modifier.evex
6906 || i.tm.opcode_modifier.evex == EVEXDYN)
6907 i.tm.opcode_modifier.evex = EVEX512;
6908 }
6909
6910 if (i.tm.operand_types[op].bitfield.xmmword
6911 + i.tm.operand_types[op].bitfield.ymmword
6912 + i.tm.operand_types[op].bitfield.zmmword < 2)
6913 continue;
6914
6915 /* Any properly sized operand disambiguates the insn. */
6916 if (i.types[op].bitfield.xmmword
6917 || i.types[op].bitfield.ymmword
6918 || i.types[op].bitfield.zmmword)
6919 {
6920 suffixes &= ~(7 << 6);
6921 evex = 0;
6922 break;
6923 }
6924
6925 if ((i.flags[op] & Operand_Mem)
6926 && i.tm.operand_types[op].bitfield.unspecified)
6927 {
6928 if (i.tm.operand_types[op].bitfield.xmmword)
6929 suffixes |= 1 << 6;
6930 if (i.tm.operand_types[op].bitfield.ymmword)
6931 suffixes |= 1 << 7;
6932 if (i.tm.operand_types[op].bitfield.zmmword)
6933 suffixes |= 1 << 8;
6934 if (is_evex_encoding (&i.tm))
6935 evex = EVEX512;
6936 }
6937 }
6938 }
6939
6940 /* Are multiple suffixes / operand sizes allowed? */
6941 if (suffixes & (suffixes - 1))
6942 {
6943 if (intel_syntax
6944 && (i.tm.opcode_modifier.mnemonicsize != DEFAULTSIZE
6945 || operand_check == check_error))
6946 {
6947 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
6948 return 0;
6949 }
6950 if (operand_check == check_error)
6951 {
6952 as_bad (_("no instruction mnemonic suffix given and "
6953 "no register operands; can't size `%s'"), i.tm.name);
6954 return 0;
6955 }
6956 if (operand_check == check_warning)
6957 as_warn (_("%s; using default for `%s'"),
6958 intel_syntax
6959 ? _("ambiguous operand size")
6960 : _("no instruction mnemonic suffix given and "
6961 "no register operands"),
6962 i.tm.name);
6963
6964 if (i.tm.opcode_modifier.floatmf)
6965 i.suffix = SHORT_MNEM_SUFFIX;
6966 else if ((i.tm.base_opcode | 8) == 0xfbe
6967 || (i.tm.base_opcode == 0x63
6968 && i.tm.cpu_flags.bitfield.cpu64))
6969 /* handled below */;
6970 else if (evex)
6971 i.tm.opcode_modifier.evex = evex;
6972 else if (flag_code == CODE_16BIT)
6973 i.suffix = WORD_MNEM_SUFFIX;
6974 else if (!i.tm.opcode_modifier.no_lsuf)
6975 i.suffix = LONG_MNEM_SUFFIX;
6976 else
6977 i.suffix = QWORD_MNEM_SUFFIX;
6978 }
6979 }
6980
6981 if ((i.tm.base_opcode | 8) == 0xfbe
6982 || (i.tm.base_opcode == 0x63 && i.tm.cpu_flags.bitfield.cpu64))
6983 {
6984 /* In Intel syntax, movsx/movzx must have a "suffix" (checked above).
6985 In AT&T syntax, if there is no suffix (warned about above), the default
6986 will be byte extension. */
6987 if (i.tm.opcode_modifier.w && i.suffix && i.suffix != BYTE_MNEM_SUFFIX)
6988 i.tm.base_opcode |= 1;
6989
6990 /* For further processing, the suffix should represent the destination
6991 (register). This is already the case when one was used with
6992 mov[sz][bw]*, but we need to replace it for mov[sz]x, or if there was
6993 no suffix to begin with. */
6994 if (i.tm.opcode_modifier.w || i.tm.base_opcode == 0x63 || !i.suffix)
6995 {
6996 if (i.types[1].bitfield.word)
6997 i.suffix = WORD_MNEM_SUFFIX;
6998 else if (i.types[1].bitfield.qword)
6999 i.suffix = QWORD_MNEM_SUFFIX;
7000 else
7001 i.suffix = LONG_MNEM_SUFFIX;
7002
7003 i.tm.opcode_modifier.w = 0;
7004 }
7005 }
7006
7007 if (!i.tm.opcode_modifier.modrm && i.reg_operands && i.tm.operands < 3)
7008 i.short_form = (i.tm.operand_types[0].bitfield.class == Reg)
7009 != (i.tm.operand_types[1].bitfield.class == Reg);
7010
7011 /* Change the opcode based on the operand size given by i.suffix. */
7012 switch (i.suffix)
7013 {
7014 /* Size floating point instruction. */
7015 case LONG_MNEM_SUFFIX:
7016 if (i.tm.opcode_modifier.floatmf)
7017 {
7018 i.tm.base_opcode ^= 4;
7019 break;
7020 }
7021 /* fall through */
7022 case WORD_MNEM_SUFFIX:
7023 case QWORD_MNEM_SUFFIX:
7024 /* It's not a byte, select word/dword operation. */
7025 if (i.tm.opcode_modifier.w)
7026 {
7027 if (i.short_form)
7028 i.tm.base_opcode |= 8;
7029 else
7030 i.tm.base_opcode |= 1;
7031 }
7032 /* fall through */
7033 case SHORT_MNEM_SUFFIX:
7034 /* Now select between word & dword operations via the operand
7035 size prefix, except for instructions that will ignore this
7036 prefix anyway. */
7037 if (i.suffix != QWORD_MNEM_SUFFIX
7038 && i.tm.opcode_modifier.mnemonicsize != IGNORESIZE
7039 && !i.tm.opcode_modifier.floatmf
7040 && !is_any_vex_encoding (&i.tm)
7041 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
7042 || (flag_code == CODE_64BIT
7043 && i.tm.opcode_modifier.jump == JUMP_BYTE)))
7044 {
7045 unsigned int prefix = DATA_PREFIX_OPCODE;
7046
7047 if (i.tm.opcode_modifier.jump == JUMP_BYTE) /* jcxz, loop */
7048 prefix = ADDR_PREFIX_OPCODE;
7049
7050 if (!add_prefix (prefix))
7051 return 0;
7052 }
7053
7054 /* Set mode64 for an operand. */
7055 if (i.suffix == QWORD_MNEM_SUFFIX
7056 && flag_code == CODE_64BIT
7057 && !i.tm.opcode_modifier.norex64
7058 && !i.tm.opcode_modifier.vexw
7059 /* Special case for xchg %rax,%rax. It is NOP and doesn't
7060 need rex64. */
7061 && ! (i.operands == 2
7062 && i.tm.base_opcode == 0x90
7063 && i.tm.extension_opcode == None
7064 && i.types[0].bitfield.instance == Accum
7065 && i.types[0].bitfield.qword
7066 && i.types[1].bitfield.instance == Accum
7067 && i.types[1].bitfield.qword))
7068 i.rex |= REX_W;
7069
7070 break;
7071
7072 case 0:
7073 /* Select word/dword/qword operation with explict data sizing prefix
7074 when there are no suitable register operands. */
7075 if (i.tm.opcode_modifier.w
7076 && (i.prefix[DATA_PREFIX] || (i.prefix[REX_PREFIX] & REX_W))
7077 && (!i.reg_operands
7078 || (i.reg_operands == 1
7079 /* ShiftCount */
7080 && (i.tm.operand_types[0].bitfield.instance == RegC
7081 /* InOutPortReg */
7082 || i.tm.operand_types[0].bitfield.instance == RegD
7083 || i.tm.operand_types[1].bitfield.instance == RegD
7084 /* CRC32 */
7085 || i.tm.base_opcode == 0xf20f38f0))))
7086 i.tm.base_opcode |= 1;
7087 break;
7088 }
7089
7090 if (i.tm.opcode_modifier.addrprefixopreg)
7091 {
7092 gas_assert (!i.suffix);
7093 gas_assert (i.reg_operands);
7094
7095 if (i.tm.operand_types[0].bitfield.instance == Accum
7096 || i.operands == 1)
7097 {
7098 /* The address size override prefix changes the size of the
7099 first operand. */
7100 if (flag_code == CODE_64BIT
7101 && i.op[0].regs->reg_type.bitfield.word)
7102 {
7103 as_bad (_("16-bit addressing unavailable for `%s'"),
7104 i.tm.name);
7105 return 0;
7106 }
7107
7108 if ((flag_code == CODE_32BIT
7109 ? i.op[0].regs->reg_type.bitfield.word
7110 : i.op[0].regs->reg_type.bitfield.dword)
7111 && !add_prefix (ADDR_PREFIX_OPCODE))
7112 return 0;
7113 }
7114 else
7115 {
7116 /* Check invalid register operand when the address size override
7117 prefix changes the size of register operands. */
7118 unsigned int op;
7119 enum { need_word, need_dword, need_qword } need;
7120
7121 if (flag_code == CODE_32BIT)
7122 need = i.prefix[ADDR_PREFIX] ? need_word : need_dword;
7123 else if (i.prefix[ADDR_PREFIX])
7124 need = need_dword;
7125 else
7126 need = flag_code == CODE_64BIT ? need_qword : need_word;
7127
7128 for (op = 0; op < i.operands; op++)
7129 {
7130 if (i.types[op].bitfield.class != Reg)
7131 continue;
7132
7133 switch (need)
7134 {
7135 case need_word:
7136 if (i.op[op].regs->reg_type.bitfield.word)
7137 continue;
7138 break;
7139 case need_dword:
7140 if (i.op[op].regs->reg_type.bitfield.dword)
7141 continue;
7142 break;
7143 case need_qword:
7144 if (i.op[op].regs->reg_type.bitfield.qword)
7145 continue;
7146 break;
7147 }
7148
7149 as_bad (_("invalid register operand size for `%s'"),
7150 i.tm.name);
7151 return 0;
7152 }
7153 }
7154 }
7155
7156 return 1;
7157 }
7158
7159 static int
7160 check_byte_reg (void)
7161 {
7162 int op;
7163
7164 for (op = i.operands; --op >= 0;)
7165 {
7166 /* Skip non-register operands. */
7167 if (i.types[op].bitfield.class != Reg)
7168 continue;
7169
7170 /* If this is an eight bit register, it's OK. If it's the 16 or
7171 32 bit version of an eight bit register, we will just use the
7172 low portion, and that's OK too. */
7173 if (i.types[op].bitfield.byte)
7174 continue;
7175
7176 /* I/O port address operands are OK too. */
7177 if (i.tm.operand_types[op].bitfield.instance == RegD
7178 && i.tm.operand_types[op].bitfield.word)
7179 continue;
7180
7181 /* crc32 only wants its source operand checked here. */
7182 if (i.tm.base_opcode == 0xf20f38f0 && op)
7183 continue;
7184
7185 /* Any other register is bad. */
7186 as_bad (_("`%s%s' not allowed with `%s%c'"),
7187 register_prefix, i.op[op].regs->reg_name,
7188 i.tm.name, i.suffix);
7189 return 0;
7190 }
7191 return 1;
7192 }
7193
7194 static int
7195 check_long_reg (void)
7196 {
7197 int op;
7198
7199 for (op = i.operands; --op >= 0;)
7200 /* Skip non-register operands. */
7201 if (i.types[op].bitfield.class != Reg)
7202 continue;
7203 /* Reject eight bit registers, except where the template requires
7204 them. (eg. movzb) */
7205 else if (i.types[op].bitfield.byte
7206 && (i.tm.operand_types[op].bitfield.class == Reg
7207 || i.tm.operand_types[op].bitfield.instance == Accum)
7208 && (i.tm.operand_types[op].bitfield.word
7209 || i.tm.operand_types[op].bitfield.dword))
7210 {
7211 as_bad (_("`%s%s' not allowed with `%s%c'"),
7212 register_prefix,
7213 i.op[op].regs->reg_name,
7214 i.tm.name,
7215 i.suffix);
7216 return 0;
7217 }
7218 /* Error if the e prefix on a general reg is missing. */
7219 else if (i.types[op].bitfield.word
7220 && (i.tm.operand_types[op].bitfield.class == Reg
7221 || i.tm.operand_types[op].bitfield.instance == Accum)
7222 && i.tm.operand_types[op].bitfield.dword)
7223 {
7224 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7225 register_prefix, i.op[op].regs->reg_name,
7226 i.suffix);
7227 return 0;
7228 }
7229 /* Warn if the r prefix on a general reg is present. */
7230 else if (i.types[op].bitfield.qword
7231 && (i.tm.operand_types[op].bitfield.class == Reg
7232 || i.tm.operand_types[op].bitfield.instance == Accum)
7233 && i.tm.operand_types[op].bitfield.dword)
7234 {
7235 if (intel_syntax
7236 && i.tm.opcode_modifier.toqword
7237 && i.types[0].bitfield.class != RegSIMD)
7238 {
7239 /* Convert to QWORD. We want REX byte. */
7240 i.suffix = QWORD_MNEM_SUFFIX;
7241 }
7242 else
7243 {
7244 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7245 register_prefix, i.op[op].regs->reg_name,
7246 i.suffix);
7247 return 0;
7248 }
7249 }
7250 return 1;
7251 }
7252
7253 static int
7254 check_qword_reg (void)
7255 {
7256 int op;
7257
7258 for (op = i.operands; --op >= 0; )
7259 /* Skip non-register operands. */
7260 if (i.types[op].bitfield.class != Reg)
7261 continue;
7262 /* Reject eight bit registers, except where the template requires
7263 them. (eg. movzb) */
7264 else if (i.types[op].bitfield.byte
7265 && (i.tm.operand_types[op].bitfield.class == Reg
7266 || i.tm.operand_types[op].bitfield.instance == Accum)
7267 && (i.tm.operand_types[op].bitfield.word
7268 || i.tm.operand_types[op].bitfield.dword))
7269 {
7270 as_bad (_("`%s%s' not allowed with `%s%c'"),
7271 register_prefix,
7272 i.op[op].regs->reg_name,
7273 i.tm.name,
7274 i.suffix);
7275 return 0;
7276 }
7277 /* Warn if the r prefix on a general reg is missing. */
7278 else if ((i.types[op].bitfield.word
7279 || i.types[op].bitfield.dword)
7280 && (i.tm.operand_types[op].bitfield.class == Reg
7281 || i.tm.operand_types[op].bitfield.instance == Accum)
7282 && i.tm.operand_types[op].bitfield.qword)
7283 {
7284 /* Prohibit these changes in the 64bit mode, since the
7285 lowering is more complicated. */
7286 if (intel_syntax
7287 && i.tm.opcode_modifier.todword
7288 && i.types[0].bitfield.class != RegSIMD)
7289 {
7290 /* Convert to DWORD. We don't want REX byte. */
7291 i.suffix = LONG_MNEM_SUFFIX;
7292 }
7293 else
7294 {
7295 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7296 register_prefix, i.op[op].regs->reg_name,
7297 i.suffix);
7298 return 0;
7299 }
7300 }
7301 return 1;
7302 }
7303
7304 static int
7305 check_word_reg (void)
7306 {
7307 int op;
7308 for (op = i.operands; --op >= 0;)
7309 /* Skip non-register operands. */
7310 if (i.types[op].bitfield.class != Reg)
7311 continue;
7312 /* Reject eight bit registers, except where the template requires
7313 them. (eg. movzb) */
7314 else if (i.types[op].bitfield.byte
7315 && (i.tm.operand_types[op].bitfield.class == Reg
7316 || i.tm.operand_types[op].bitfield.instance == Accum)
7317 && (i.tm.operand_types[op].bitfield.word
7318 || i.tm.operand_types[op].bitfield.dword))
7319 {
7320 as_bad (_("`%s%s' not allowed with `%s%c'"),
7321 register_prefix,
7322 i.op[op].regs->reg_name,
7323 i.tm.name,
7324 i.suffix);
7325 return 0;
7326 }
7327 /* Error if the e or r prefix on a general reg is present. */
7328 else if ((i.types[op].bitfield.dword
7329 || i.types[op].bitfield.qword)
7330 && (i.tm.operand_types[op].bitfield.class == Reg
7331 || i.tm.operand_types[op].bitfield.instance == Accum)
7332 && i.tm.operand_types[op].bitfield.word)
7333 {
7334 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7335 register_prefix, i.op[op].regs->reg_name,
7336 i.suffix);
7337 return 0;
7338 }
7339 return 1;
7340 }
7341
7342 static int
7343 update_imm (unsigned int j)
7344 {
7345 i386_operand_type overlap = i.types[j];
7346 if ((overlap.bitfield.imm8
7347 || overlap.bitfield.imm8s
7348 || overlap.bitfield.imm16
7349 || overlap.bitfield.imm32
7350 || overlap.bitfield.imm32s
7351 || overlap.bitfield.imm64)
7352 && !operand_type_equal (&overlap, &imm8)
7353 && !operand_type_equal (&overlap, &imm8s)
7354 && !operand_type_equal (&overlap, &imm16)
7355 && !operand_type_equal (&overlap, &imm32)
7356 && !operand_type_equal (&overlap, &imm32s)
7357 && !operand_type_equal (&overlap, &imm64))
7358 {
7359 if (i.suffix)
7360 {
7361 i386_operand_type temp;
7362
7363 operand_type_set (&temp, 0);
7364 if (i.suffix == BYTE_MNEM_SUFFIX)
7365 {
7366 temp.bitfield.imm8 = overlap.bitfield.imm8;
7367 temp.bitfield.imm8s = overlap.bitfield.imm8s;
7368 }
7369 else if (i.suffix == WORD_MNEM_SUFFIX)
7370 temp.bitfield.imm16 = overlap.bitfield.imm16;
7371 else if (i.suffix == QWORD_MNEM_SUFFIX)
7372 {
7373 temp.bitfield.imm64 = overlap.bitfield.imm64;
7374 temp.bitfield.imm32s = overlap.bitfield.imm32s;
7375 }
7376 else
7377 temp.bitfield.imm32 = overlap.bitfield.imm32;
7378 overlap = temp;
7379 }
7380 else if (operand_type_equal (&overlap, &imm16_32_32s)
7381 || operand_type_equal (&overlap, &imm16_32)
7382 || operand_type_equal (&overlap, &imm16_32s))
7383 {
7384 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
7385 overlap = imm16;
7386 else
7387 overlap = imm32s;
7388 }
7389 else if (i.prefix[REX_PREFIX] & REX_W)
7390 overlap = operand_type_and (overlap, imm32s);
7391 else if (i.prefix[DATA_PREFIX])
7392 overlap = operand_type_and (overlap,
7393 flag_code != CODE_16BIT ? imm16 : imm32);
7394 if (!operand_type_equal (&overlap, &imm8)
7395 && !operand_type_equal (&overlap, &imm8s)
7396 && !operand_type_equal (&overlap, &imm16)
7397 && !operand_type_equal (&overlap, &imm32)
7398 && !operand_type_equal (&overlap, &imm32s)
7399 && !operand_type_equal (&overlap, &imm64))
7400 {
7401 as_bad (_("no instruction mnemonic suffix given; "
7402 "can't determine immediate size"));
7403 return 0;
7404 }
7405 }
7406 i.types[j] = overlap;
7407
7408 return 1;
7409 }
7410
7411 static int
7412 finalize_imm (void)
7413 {
7414 unsigned int j, n;
7415
7416 /* Update the first 2 immediate operands. */
7417 n = i.operands > 2 ? 2 : i.operands;
7418 if (n)
7419 {
7420 for (j = 0; j < n; j++)
7421 if (update_imm (j) == 0)
7422 return 0;
7423
7424 /* The 3rd operand can't be immediate operand. */
7425 gas_assert (operand_type_check (i.types[2], imm) == 0);
7426 }
7427
7428 return 1;
7429 }
7430
7431 static int
7432 process_operands (void)
7433 {
7434 /* Default segment register this instruction will use for memory
7435 accesses. 0 means unknown. This is only for optimizing out
7436 unnecessary segment overrides. */
7437 const seg_entry *default_seg = 0;
7438
7439 if (i.tm.opcode_modifier.sse2avx)
7440 {
7441 /* Legacy encoded insns allow explicit REX prefixes, so these prefixes
7442 need converting. */
7443 i.rex |= i.prefix[REX_PREFIX] & (REX_W | REX_R | REX_X | REX_B);
7444 i.prefix[REX_PREFIX] = 0;
7445 i.rex_encoding = 0;
7446 }
7447 /* ImmExt should be processed after SSE2AVX. */
7448 else if (i.tm.opcode_modifier.immext)
7449 process_immext ();
7450
7451 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
7452 {
7453 unsigned int dupl = i.operands;
7454 unsigned int dest = dupl - 1;
7455 unsigned int j;
7456
7457 /* The destination must be an xmm register. */
7458 gas_assert (i.reg_operands
7459 && MAX_OPERANDS > dupl
7460 && operand_type_equal (&i.types[dest], &regxmm));
7461
7462 if (i.tm.operand_types[0].bitfield.instance == Accum
7463 && i.tm.operand_types[0].bitfield.xmmword)
7464 {
7465 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
7466 {
7467 /* Keep xmm0 for instructions with VEX prefix and 3
7468 sources. */
7469 i.tm.operand_types[0].bitfield.instance = InstanceNone;
7470 i.tm.operand_types[0].bitfield.class = RegSIMD;
7471 goto duplicate;
7472 }
7473 else
7474 {
7475 /* We remove the first xmm0 and keep the number of
7476 operands unchanged, which in fact duplicates the
7477 destination. */
7478 for (j = 1; j < i.operands; j++)
7479 {
7480 i.op[j - 1] = i.op[j];
7481 i.types[j - 1] = i.types[j];
7482 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
7483 i.flags[j - 1] = i.flags[j];
7484 }
7485 }
7486 }
7487 else if (i.tm.opcode_modifier.implicit1stxmm0)
7488 {
7489 gas_assert ((MAX_OPERANDS - 1) > dupl
7490 && (i.tm.opcode_modifier.vexsources
7491 == VEX3SOURCES));
7492
7493 /* Add the implicit xmm0 for instructions with VEX prefix
7494 and 3 sources. */
7495 for (j = i.operands; j > 0; j--)
7496 {
7497 i.op[j] = i.op[j - 1];
7498 i.types[j] = i.types[j - 1];
7499 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
7500 i.flags[j] = i.flags[j - 1];
7501 }
7502 i.op[0].regs
7503 = (const reg_entry *) hash_find (reg_hash, "xmm0");
7504 i.types[0] = regxmm;
7505 i.tm.operand_types[0] = regxmm;
7506
7507 i.operands += 2;
7508 i.reg_operands += 2;
7509 i.tm.operands += 2;
7510
7511 dupl++;
7512 dest++;
7513 i.op[dupl] = i.op[dest];
7514 i.types[dupl] = i.types[dest];
7515 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
7516 i.flags[dupl] = i.flags[dest];
7517 }
7518 else
7519 {
7520 duplicate:
7521 i.operands++;
7522 i.reg_operands++;
7523 i.tm.operands++;
7524
7525 i.op[dupl] = i.op[dest];
7526 i.types[dupl] = i.types[dest];
7527 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
7528 i.flags[dupl] = i.flags[dest];
7529 }
7530
7531 if (i.tm.opcode_modifier.immext)
7532 process_immext ();
7533 }
7534 else if (i.tm.operand_types[0].bitfield.instance == Accum
7535 && i.tm.operand_types[0].bitfield.xmmword)
7536 {
7537 unsigned int j;
7538
7539 for (j = 1; j < i.operands; j++)
7540 {
7541 i.op[j - 1] = i.op[j];
7542 i.types[j - 1] = i.types[j];
7543
7544 /* We need to adjust fields in i.tm since they are used by
7545 build_modrm_byte. */
7546 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
7547
7548 i.flags[j - 1] = i.flags[j];
7549 }
7550
7551 i.operands--;
7552 i.reg_operands--;
7553 i.tm.operands--;
7554 }
7555 else if (i.tm.opcode_modifier.implicitquadgroup)
7556 {
7557 unsigned int regnum, first_reg_in_group, last_reg_in_group;
7558
7559 /* The second operand must be {x,y,z}mmN, where N is a multiple of 4. */
7560 gas_assert (i.operands >= 2 && i.types[1].bitfield.class == RegSIMD);
7561 regnum = register_number (i.op[1].regs);
7562 first_reg_in_group = regnum & ~3;
7563 last_reg_in_group = first_reg_in_group + 3;
7564 if (regnum != first_reg_in_group)
7565 as_warn (_("source register `%s%s' implicitly denotes"
7566 " `%s%.3s%u' to `%s%.3s%u' source group in `%s'"),
7567 register_prefix, i.op[1].regs->reg_name,
7568 register_prefix, i.op[1].regs->reg_name, first_reg_in_group,
7569 register_prefix, i.op[1].regs->reg_name, last_reg_in_group,
7570 i.tm.name);
7571 }
7572 else if (i.tm.opcode_modifier.regkludge)
7573 {
7574 /* The imul $imm, %reg instruction is converted into
7575 imul $imm, %reg, %reg, and the clr %reg instruction
7576 is converted into xor %reg, %reg. */
7577
7578 unsigned int first_reg_op;
7579
7580 if (operand_type_check (i.types[0], reg))
7581 first_reg_op = 0;
7582 else
7583 first_reg_op = 1;
7584 /* Pretend we saw the extra register operand. */
7585 gas_assert (i.reg_operands == 1
7586 && i.op[first_reg_op + 1].regs == 0);
7587 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
7588 i.types[first_reg_op + 1] = i.types[first_reg_op];
7589 i.operands++;
7590 i.reg_operands++;
7591 }
7592
7593 if (i.tm.opcode_modifier.modrm)
7594 {
7595 /* The opcode is completed (modulo i.tm.extension_opcode which
7596 must be put into the modrm byte). Now, we make the modrm and
7597 index base bytes based on all the info we've collected. */
7598
7599 default_seg = build_modrm_byte ();
7600 }
7601 else if (i.types[0].bitfield.class == SReg)
7602 {
7603 if (flag_code != CODE_64BIT
7604 ? i.tm.base_opcode == POP_SEG_SHORT
7605 && i.op[0].regs->reg_num == 1
7606 : (i.tm.base_opcode | 1) == POP_SEG386_SHORT
7607 && i.op[0].regs->reg_num < 4)
7608 {
7609 as_bad (_("you can't `%s %s%s'"),
7610 i.tm.name, register_prefix, i.op[0].regs->reg_name);
7611 return 0;
7612 }
7613 if ( i.op[0].regs->reg_num > 3 && i.tm.opcode_length == 1 )
7614 {
7615 i.tm.base_opcode ^= POP_SEG_SHORT ^ POP_SEG386_SHORT;
7616 i.tm.opcode_length = 2;
7617 }
7618 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
7619 }
7620 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
7621 {
7622 default_seg = &ds;
7623 }
7624 else if (i.tm.opcode_modifier.isstring)
7625 {
7626 /* For the string instructions that allow a segment override
7627 on one of their operands, the default segment is ds. */
7628 default_seg = &ds;
7629 }
7630 else if (i.short_form)
7631 {
7632 /* The register or float register operand is in operand
7633 0 or 1. */
7634 unsigned int op = i.tm.operand_types[0].bitfield.class != Reg;
7635
7636 /* Register goes in low 3 bits of opcode. */
7637 i.tm.base_opcode |= i.op[op].regs->reg_num;
7638 if ((i.op[op].regs->reg_flags & RegRex) != 0)
7639 i.rex |= REX_B;
7640 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
7641 {
7642 /* Warn about some common errors, but press on regardless.
7643 The first case can be generated by gcc (<= 2.8.1). */
7644 if (i.operands == 2)
7645 {
7646 /* Reversed arguments on faddp, fsubp, etc. */
7647 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
7648 register_prefix, i.op[!intel_syntax].regs->reg_name,
7649 register_prefix, i.op[intel_syntax].regs->reg_name);
7650 }
7651 else
7652 {
7653 /* Extraneous `l' suffix on fp insn. */
7654 as_warn (_("translating to `%s %s%s'"), i.tm.name,
7655 register_prefix, i.op[0].regs->reg_name);
7656 }
7657 }
7658 }
7659
7660 if ((i.seg[0] || i.prefix[SEG_PREFIX])
7661 && i.tm.base_opcode == 0x8d /* lea */
7662 && !is_any_vex_encoding(&i.tm))
7663 {
7664 if (!quiet_warnings)
7665 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
7666 if (optimize)
7667 {
7668 i.seg[0] = NULL;
7669 i.prefix[SEG_PREFIX] = 0;
7670 }
7671 }
7672
7673 /* If a segment was explicitly specified, and the specified segment
7674 is neither the default nor the one already recorded from a prefix,
7675 use an opcode prefix to select it. If we never figured out what
7676 the default segment is, then default_seg will be zero at this
7677 point, and the specified segment prefix will always be used. */
7678 if (i.seg[0]
7679 && i.seg[0] != default_seg
7680 && i.seg[0]->seg_prefix != i.prefix[SEG_PREFIX])
7681 {
7682 if (!add_prefix (i.seg[0]->seg_prefix))
7683 return 0;
7684 }
7685 return 1;
7686 }
7687
7688 static INLINE void set_rex_vrex (const reg_entry *r, unsigned int rex_bit,
7689 bfd_boolean do_sse2avx)
7690 {
7691 if (r->reg_flags & RegRex)
7692 {
7693 if (i.rex & rex_bit)
7694 as_bad (_("same type of prefix used twice"));
7695 i.rex |= rex_bit;
7696 }
7697 else if (do_sse2avx && (i.rex & rex_bit) && i.vex.register_specifier)
7698 {
7699 gas_assert (i.vex.register_specifier == r);
7700 i.vex.register_specifier += 8;
7701 }
7702
7703 if (r->reg_flags & RegVRex)
7704 i.vrex |= rex_bit;
7705 }
7706
7707 static const seg_entry *
7708 build_modrm_byte (void)
7709 {
7710 const seg_entry *default_seg = 0;
7711 unsigned int source, dest;
7712 int vex_3_sources;
7713
7714 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
7715 if (vex_3_sources)
7716 {
7717 unsigned int nds, reg_slot;
7718 expressionS *exp;
7719
7720 dest = i.operands - 1;
7721 nds = dest - 1;
7722
7723 /* There are 2 kinds of instructions:
7724 1. 5 operands: 4 register operands or 3 register operands
7725 plus 1 memory operand plus one Imm4 operand, VexXDS, and
7726 VexW0 or VexW1. The destination must be either XMM, YMM or
7727 ZMM register.
7728 2. 4 operands: 4 register operands or 3 register operands
7729 plus 1 memory operand, with VexXDS. */
7730 gas_assert ((i.reg_operands == 4
7731 || (i.reg_operands == 3 && i.mem_operands == 1))
7732 && i.tm.opcode_modifier.vexvvvv == VEXXDS
7733 && i.tm.opcode_modifier.vexw
7734 && i.tm.operand_types[dest].bitfield.class == RegSIMD);
7735
7736 /* If VexW1 is set, the first non-immediate operand is the source and
7737 the second non-immediate one is encoded in the immediate operand. */
7738 if (i.tm.opcode_modifier.vexw == VEXW1)
7739 {
7740 source = i.imm_operands;
7741 reg_slot = i.imm_operands + 1;
7742 }
7743 else
7744 {
7745 source = i.imm_operands + 1;
7746 reg_slot = i.imm_operands;
7747 }
7748
7749 if (i.imm_operands == 0)
7750 {
7751 /* When there is no immediate operand, generate an 8bit
7752 immediate operand to encode the first operand. */
7753 exp = &im_expressions[i.imm_operands++];
7754 i.op[i.operands].imms = exp;
7755 i.types[i.operands] = imm8;
7756 i.operands++;
7757
7758 gas_assert (i.tm.operand_types[reg_slot].bitfield.class == RegSIMD);
7759 exp->X_op = O_constant;
7760 exp->X_add_number = register_number (i.op[reg_slot].regs) << 4;
7761 gas_assert ((i.op[reg_slot].regs->reg_flags & RegVRex) == 0);
7762 }
7763 else
7764 {
7765 gas_assert (i.imm_operands == 1);
7766 gas_assert (fits_in_imm4 (i.op[0].imms->X_add_number));
7767 gas_assert (!i.tm.opcode_modifier.immext);
7768
7769 /* Turn on Imm8 again so that output_imm will generate it. */
7770 i.types[0].bitfield.imm8 = 1;
7771
7772 gas_assert (i.tm.operand_types[reg_slot].bitfield.class == RegSIMD);
7773 i.op[0].imms->X_add_number
7774 |= register_number (i.op[reg_slot].regs) << 4;
7775 gas_assert ((i.op[reg_slot].regs->reg_flags & RegVRex) == 0);
7776 }
7777
7778 gas_assert (i.tm.operand_types[nds].bitfield.class == RegSIMD);
7779 i.vex.register_specifier = i.op[nds].regs;
7780 }
7781 else
7782 source = dest = 0;
7783
7784 /* i.reg_operands MUST be the number of real register operands;
7785 implicit registers do not count. If there are 3 register
7786 operands, it must be a instruction with VexNDS. For a
7787 instruction with VexNDD, the destination register is encoded
7788 in VEX prefix. If there are 4 register operands, it must be
7789 a instruction with VEX prefix and 3 sources. */
7790 if (i.mem_operands == 0
7791 && ((i.reg_operands == 2
7792 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
7793 || (i.reg_operands == 3
7794 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
7795 || (i.reg_operands == 4 && vex_3_sources)))
7796 {
7797 switch (i.operands)
7798 {
7799 case 2:
7800 source = 0;
7801 break;
7802 case 3:
7803 /* When there are 3 operands, one of them may be immediate,
7804 which may be the first or the last operand. Otherwise,
7805 the first operand must be shift count register (cl) or it
7806 is an instruction with VexNDS. */
7807 gas_assert (i.imm_operands == 1
7808 || (i.imm_operands == 0
7809 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
7810 || (i.types[0].bitfield.instance == RegC
7811 && i.types[0].bitfield.byte))));
7812 if (operand_type_check (i.types[0], imm)
7813 || (i.types[0].bitfield.instance == RegC
7814 && i.types[0].bitfield.byte))
7815 source = 1;
7816 else
7817 source = 0;
7818 break;
7819 case 4:
7820 /* When there are 4 operands, the first two must be 8bit
7821 immediate operands. The source operand will be the 3rd
7822 one.
7823
7824 For instructions with VexNDS, if the first operand
7825 an imm8, the source operand is the 2nd one. If the last
7826 operand is imm8, the source operand is the first one. */
7827 gas_assert ((i.imm_operands == 2
7828 && i.types[0].bitfield.imm8
7829 && i.types[1].bitfield.imm8)
7830 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
7831 && i.imm_operands == 1
7832 && (i.types[0].bitfield.imm8
7833 || i.types[i.operands - 1].bitfield.imm8
7834 || i.rounding)));
7835 if (i.imm_operands == 2)
7836 source = 2;
7837 else
7838 {
7839 if (i.types[0].bitfield.imm8)
7840 source = 1;
7841 else
7842 source = 0;
7843 }
7844 break;
7845 case 5:
7846 if (is_evex_encoding (&i.tm))
7847 {
7848 /* For EVEX instructions, when there are 5 operands, the
7849 first one must be immediate operand. If the second one
7850 is immediate operand, the source operand is the 3th
7851 one. If the last one is immediate operand, the source
7852 operand is the 2nd one. */
7853 gas_assert (i.imm_operands == 2
7854 && i.tm.opcode_modifier.sae
7855 && operand_type_check (i.types[0], imm));
7856 if (operand_type_check (i.types[1], imm))
7857 source = 2;
7858 else if (operand_type_check (i.types[4], imm))
7859 source = 1;
7860 else
7861 abort ();
7862 }
7863 break;
7864 default:
7865 abort ();
7866 }
7867
7868 if (!vex_3_sources)
7869 {
7870 dest = source + 1;
7871
7872 /* RC/SAE operand could be between DEST and SRC. That happens
7873 when one operand is GPR and the other one is XMM/YMM/ZMM
7874 register. */
7875 if (i.rounding && i.rounding->operand == (int) dest)
7876 dest++;
7877
7878 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
7879 {
7880 /* For instructions with VexNDS, the register-only source
7881 operand must be a 32/64bit integer, XMM, YMM, ZMM, or mask
7882 register. It is encoded in VEX prefix. */
7883
7884 i386_operand_type op;
7885 unsigned int vvvv;
7886
7887 /* Swap two source operands if needed. */
7888 if (i.tm.opcode_modifier.swapsources)
7889 {
7890 vvvv = source;
7891 source = dest;
7892 }
7893 else
7894 vvvv = dest;
7895
7896 op = i.tm.operand_types[vvvv];
7897 if ((dest + 1) >= i.operands
7898 || ((op.bitfield.class != Reg
7899 || (!op.bitfield.dword && !op.bitfield.qword))
7900 && op.bitfield.class != RegSIMD
7901 && !operand_type_equal (&op, &regmask)))
7902 abort ();
7903 i.vex.register_specifier = i.op[vvvv].regs;
7904 dest++;
7905 }
7906 }
7907
7908 i.rm.mode = 3;
7909 /* One of the register operands will be encoded in the i.rm.reg
7910 field, the other in the combined i.rm.mode and i.rm.regmem
7911 fields. If no form of this instruction supports a memory
7912 destination operand, then we assume the source operand may
7913 sometimes be a memory operand and so we need to store the
7914 destination in the i.rm.reg field. */
7915 if (!i.tm.opcode_modifier.regmem
7916 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
7917 {
7918 i.rm.reg = i.op[dest].regs->reg_num;
7919 i.rm.regmem = i.op[source].regs->reg_num;
7920 if (i.op[dest].regs->reg_type.bitfield.class == RegMMX
7921 || i.op[source].regs->reg_type.bitfield.class == RegMMX)
7922 i.has_regmmx = TRUE;
7923 else if (i.op[dest].regs->reg_type.bitfield.class == RegSIMD
7924 || i.op[source].regs->reg_type.bitfield.class == RegSIMD)
7925 {
7926 if (i.types[dest].bitfield.zmmword
7927 || i.types[source].bitfield.zmmword)
7928 i.has_regzmm = TRUE;
7929 else if (i.types[dest].bitfield.ymmword
7930 || i.types[source].bitfield.ymmword)
7931 i.has_regymm = TRUE;
7932 else
7933 i.has_regxmm = TRUE;
7934 }
7935 set_rex_vrex (i.op[dest].regs, REX_R, i.tm.opcode_modifier.sse2avx);
7936 set_rex_vrex (i.op[source].regs, REX_B, FALSE);
7937 }
7938 else
7939 {
7940 i.rm.reg = i.op[source].regs->reg_num;
7941 i.rm.regmem = i.op[dest].regs->reg_num;
7942 set_rex_vrex (i.op[dest].regs, REX_B, i.tm.opcode_modifier.sse2avx);
7943 set_rex_vrex (i.op[source].regs, REX_R, FALSE);
7944 }
7945 if (flag_code != CODE_64BIT && (i.rex & REX_R))
7946 {
7947 if (i.types[!i.tm.opcode_modifier.regmem].bitfield.class != RegCR)
7948 abort ();
7949 i.rex &= ~REX_R;
7950 add_prefix (LOCK_PREFIX_OPCODE);
7951 }
7952 }
7953 else
7954 { /* If it's not 2 reg operands... */
7955 unsigned int mem;
7956
7957 if (i.mem_operands)
7958 {
7959 unsigned int fake_zero_displacement = 0;
7960 unsigned int op;
7961
7962 for (op = 0; op < i.operands; op++)
7963 if (i.flags[op] & Operand_Mem)
7964 break;
7965 gas_assert (op < i.operands);
7966
7967 if (i.tm.opcode_modifier.sib)
7968 {
7969 if (i.index_reg->reg_num == RegIZ)
7970 abort ();
7971
7972 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
7973 if (!i.base_reg)
7974 {
7975 i.sib.base = NO_BASE_REGISTER;
7976 i.sib.scale = i.log2_scale_factor;
7977 i.types[op].bitfield.disp8 = 0;
7978 i.types[op].bitfield.disp16 = 0;
7979 i.types[op].bitfield.disp64 = 0;
7980 if (flag_code != CODE_64BIT || i.prefix[ADDR_PREFIX])
7981 {
7982 /* Must be 32 bit */
7983 i.types[op].bitfield.disp32 = 1;
7984 i.types[op].bitfield.disp32s = 0;
7985 }
7986 else
7987 {
7988 i.types[op].bitfield.disp32 = 0;
7989 i.types[op].bitfield.disp32s = 1;
7990 }
7991 }
7992 i.sib.index = i.index_reg->reg_num;
7993 set_rex_vrex (i.index_reg, REX_X, FALSE);
7994 }
7995
7996 default_seg = &ds;
7997
7998 if (i.base_reg == 0)
7999 {
8000 i.rm.mode = 0;
8001 if (!i.disp_operands)
8002 fake_zero_displacement = 1;
8003 if (i.index_reg == 0)
8004 {
8005 i386_operand_type newdisp;
8006
8007 gas_assert (!i.tm.opcode_modifier.sib);
8008 /* Operand is just <disp> */
8009 if (flag_code == CODE_64BIT)
8010 {
8011 /* 64bit mode overwrites the 32bit absolute
8012 addressing by RIP relative addressing and
8013 absolute addressing is encoded by one of the
8014 redundant SIB forms. */
8015 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
8016 i.sib.base = NO_BASE_REGISTER;
8017 i.sib.index = NO_INDEX_REGISTER;
8018 newdisp = (!i.prefix[ADDR_PREFIX] ? disp32s : disp32);
8019 }
8020 else if ((flag_code == CODE_16BIT)
8021 ^ (i.prefix[ADDR_PREFIX] != 0))
8022 {
8023 i.rm.regmem = NO_BASE_REGISTER_16;
8024 newdisp = disp16;
8025 }
8026 else
8027 {
8028 i.rm.regmem = NO_BASE_REGISTER;
8029 newdisp = disp32;
8030 }
8031 i.types[op] = operand_type_and_not (i.types[op], anydisp);
8032 i.types[op] = operand_type_or (i.types[op], newdisp);
8033 }
8034 else if (!i.tm.opcode_modifier.sib)
8035 {
8036 /* !i.base_reg && i.index_reg */
8037 if (i.index_reg->reg_num == RegIZ)
8038 i.sib.index = NO_INDEX_REGISTER;
8039 else
8040 i.sib.index = i.index_reg->reg_num;
8041 i.sib.base = NO_BASE_REGISTER;
8042 i.sib.scale = i.log2_scale_factor;
8043 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
8044 i.types[op].bitfield.disp8 = 0;
8045 i.types[op].bitfield.disp16 = 0;
8046 i.types[op].bitfield.disp64 = 0;
8047 if (flag_code != CODE_64BIT || i.prefix[ADDR_PREFIX])
8048 {
8049 /* Must be 32 bit */
8050 i.types[op].bitfield.disp32 = 1;
8051 i.types[op].bitfield.disp32s = 0;
8052 }
8053 else
8054 {
8055 i.types[op].bitfield.disp32 = 0;
8056 i.types[op].bitfield.disp32s = 1;
8057 }
8058 if ((i.index_reg->reg_flags & RegRex) != 0)
8059 i.rex |= REX_X;
8060 }
8061 }
8062 /* RIP addressing for 64bit mode. */
8063 else if (i.base_reg->reg_num == RegIP)
8064 {
8065 gas_assert (!i.tm.opcode_modifier.sib);
8066 i.rm.regmem = NO_BASE_REGISTER;
8067 i.types[op].bitfield.disp8 = 0;
8068 i.types[op].bitfield.disp16 = 0;
8069 i.types[op].bitfield.disp32 = 0;
8070 i.types[op].bitfield.disp32s = 1;
8071 i.types[op].bitfield.disp64 = 0;
8072 i.flags[op] |= Operand_PCrel;
8073 if (! i.disp_operands)
8074 fake_zero_displacement = 1;
8075 }
8076 else if (i.base_reg->reg_type.bitfield.word)
8077 {
8078 gas_assert (!i.tm.opcode_modifier.sib);
8079 switch (i.base_reg->reg_num)
8080 {
8081 case 3: /* (%bx) */
8082 if (i.index_reg == 0)
8083 i.rm.regmem = 7;
8084 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
8085 i.rm.regmem = i.index_reg->reg_num - 6;
8086 break;
8087 case 5: /* (%bp) */
8088 default_seg = &ss;
8089 if (i.index_reg == 0)
8090 {
8091 i.rm.regmem = 6;
8092 if (operand_type_check (i.types[op], disp) == 0)
8093 {
8094 /* fake (%bp) into 0(%bp) */
8095 i.types[op].bitfield.disp8 = 1;
8096 fake_zero_displacement = 1;
8097 }
8098 }
8099 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
8100 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
8101 break;
8102 default: /* (%si) -> 4 or (%di) -> 5 */
8103 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
8104 }
8105 i.rm.mode = mode_from_disp_size (i.types[op]);
8106 }
8107 else /* i.base_reg and 32/64 bit mode */
8108 {
8109 if (flag_code == CODE_64BIT
8110 && operand_type_check (i.types[op], disp))
8111 {
8112 i.types[op].bitfield.disp16 = 0;
8113 i.types[op].bitfield.disp64 = 0;
8114 if (i.prefix[ADDR_PREFIX] == 0)
8115 {
8116 i.types[op].bitfield.disp32 = 0;
8117 i.types[op].bitfield.disp32s = 1;
8118 }
8119 else
8120 {
8121 i.types[op].bitfield.disp32 = 1;
8122 i.types[op].bitfield.disp32s = 0;
8123 }
8124 }
8125
8126 if (!i.tm.opcode_modifier.sib)
8127 i.rm.regmem = i.base_reg->reg_num;
8128 if ((i.base_reg->reg_flags & RegRex) != 0)
8129 i.rex |= REX_B;
8130 i.sib.base = i.base_reg->reg_num;
8131 /* x86-64 ignores REX prefix bit here to avoid decoder
8132 complications. */
8133 if (!(i.base_reg->reg_flags & RegRex)
8134 && (i.base_reg->reg_num == EBP_REG_NUM
8135 || i.base_reg->reg_num == ESP_REG_NUM))
8136 default_seg = &ss;
8137 if (i.base_reg->reg_num == 5 && i.disp_operands == 0)
8138 {
8139 fake_zero_displacement = 1;
8140 i.types[op].bitfield.disp8 = 1;
8141 }
8142 i.sib.scale = i.log2_scale_factor;
8143 if (i.index_reg == 0)
8144 {
8145 gas_assert (!i.tm.opcode_modifier.sib);
8146 /* <disp>(%esp) becomes two byte modrm with no index
8147 register. We've already stored the code for esp
8148 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
8149 Any base register besides %esp will not use the
8150 extra modrm byte. */
8151 i.sib.index = NO_INDEX_REGISTER;
8152 }
8153 else if (!i.tm.opcode_modifier.sib)
8154 {
8155 if (i.index_reg->reg_num == RegIZ)
8156 i.sib.index = NO_INDEX_REGISTER;
8157 else
8158 i.sib.index = i.index_reg->reg_num;
8159 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
8160 if ((i.index_reg->reg_flags & RegRex) != 0)
8161 i.rex |= REX_X;
8162 }
8163
8164 if (i.disp_operands
8165 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
8166 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
8167 i.rm.mode = 0;
8168 else
8169 {
8170 if (!fake_zero_displacement
8171 && !i.disp_operands
8172 && i.disp_encoding)
8173 {
8174 fake_zero_displacement = 1;
8175 if (i.disp_encoding == disp_encoding_8bit)
8176 i.types[op].bitfield.disp8 = 1;
8177 else
8178 i.types[op].bitfield.disp32 = 1;
8179 }
8180 i.rm.mode = mode_from_disp_size (i.types[op]);
8181 }
8182 }
8183
8184 if (fake_zero_displacement)
8185 {
8186 /* Fakes a zero displacement assuming that i.types[op]
8187 holds the correct displacement size. */
8188 expressionS *exp;
8189
8190 gas_assert (i.op[op].disps == 0);
8191 exp = &disp_expressions[i.disp_operands++];
8192 i.op[op].disps = exp;
8193 exp->X_op = O_constant;
8194 exp->X_add_number = 0;
8195 exp->X_add_symbol = (symbolS *) 0;
8196 exp->X_op_symbol = (symbolS *) 0;
8197 }
8198
8199 mem = op;
8200 }
8201 else
8202 mem = ~0;
8203
8204 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
8205 {
8206 if (operand_type_check (i.types[0], imm))
8207 i.vex.register_specifier = NULL;
8208 else
8209 {
8210 /* VEX.vvvv encodes one of the sources when the first
8211 operand is not an immediate. */
8212 if (i.tm.opcode_modifier.vexw == VEXW0)
8213 i.vex.register_specifier = i.op[0].regs;
8214 else
8215 i.vex.register_specifier = i.op[1].regs;
8216 }
8217
8218 /* Destination is a XMM register encoded in the ModRM.reg
8219 and VEX.R bit. */
8220 i.rm.reg = i.op[2].regs->reg_num;
8221 if ((i.op[2].regs->reg_flags & RegRex) != 0)
8222 i.rex |= REX_R;
8223
8224 /* ModRM.rm and VEX.B encodes the other source. */
8225 if (!i.mem_operands)
8226 {
8227 i.rm.mode = 3;
8228
8229 if (i.tm.opcode_modifier.vexw == VEXW0)
8230 i.rm.regmem = i.op[1].regs->reg_num;
8231 else
8232 i.rm.regmem = i.op[0].regs->reg_num;
8233
8234 if ((i.op[1].regs->reg_flags & RegRex) != 0)
8235 i.rex |= REX_B;
8236 }
8237 }
8238 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
8239 {
8240 i.vex.register_specifier = i.op[2].regs;
8241 if (!i.mem_operands)
8242 {
8243 i.rm.mode = 3;
8244 i.rm.regmem = i.op[1].regs->reg_num;
8245 if ((i.op[1].regs->reg_flags & RegRex) != 0)
8246 i.rex |= REX_B;
8247 }
8248 }
8249 /* Fill in i.rm.reg or i.rm.regmem field with register operand
8250 (if any) based on i.tm.extension_opcode. Again, we must be
8251 careful to make sure that segment/control/debug/test/MMX
8252 registers are coded into the i.rm.reg field. */
8253 else if (i.reg_operands)
8254 {
8255 unsigned int op;
8256 unsigned int vex_reg = ~0;
8257
8258 for (op = 0; op < i.operands; op++)
8259 {
8260 if (i.types[op].bitfield.class == Reg
8261 || i.types[op].bitfield.class == RegBND
8262 || i.types[op].bitfield.class == RegMask
8263 || i.types[op].bitfield.class == SReg
8264 || i.types[op].bitfield.class == RegCR
8265 || i.types[op].bitfield.class == RegDR
8266 || i.types[op].bitfield.class == RegTR)
8267 break;
8268 if (i.types[op].bitfield.class == RegSIMD)
8269 {
8270 if (i.types[op].bitfield.zmmword)
8271 i.has_regzmm = TRUE;
8272 else if (i.types[op].bitfield.ymmword)
8273 i.has_regymm = TRUE;
8274 else
8275 i.has_regxmm = TRUE;
8276 break;
8277 }
8278 if (i.types[op].bitfield.class == RegMMX)
8279 {
8280 i.has_regmmx = TRUE;
8281 break;
8282 }
8283 }
8284
8285 if (vex_3_sources)
8286 op = dest;
8287 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
8288 {
8289 /* For instructions with VexNDS, the register-only
8290 source operand is encoded in VEX prefix. */
8291 gas_assert (mem != (unsigned int) ~0);
8292
8293 if (op > mem)
8294 {
8295 vex_reg = op++;
8296 gas_assert (op < i.operands);
8297 }
8298 else
8299 {
8300 /* Check register-only source operand when two source
8301 operands are swapped. */
8302 if (!i.tm.operand_types[op].bitfield.baseindex
8303 && i.tm.operand_types[op + 1].bitfield.baseindex)
8304 {
8305 vex_reg = op;
8306 op += 2;
8307 gas_assert (mem == (vex_reg + 1)
8308 && op < i.operands);
8309 }
8310 else
8311 {
8312 vex_reg = op + 1;
8313 gas_assert (vex_reg < i.operands);
8314 }
8315 }
8316 }
8317 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
8318 {
8319 /* For instructions with VexNDD, the register destination
8320 is encoded in VEX prefix. */
8321 if (i.mem_operands == 0)
8322 {
8323 /* There is no memory operand. */
8324 gas_assert ((op + 2) == i.operands);
8325 vex_reg = op + 1;
8326 }
8327 else
8328 {
8329 /* There are only 2 non-immediate operands. */
8330 gas_assert (op < i.imm_operands + 2
8331 && i.operands == i.imm_operands + 2);
8332 vex_reg = i.imm_operands + 1;
8333 }
8334 }
8335 else
8336 gas_assert (op < i.operands);
8337
8338 if (vex_reg != (unsigned int) ~0)
8339 {
8340 i386_operand_type *type = &i.tm.operand_types[vex_reg];
8341
8342 if ((type->bitfield.class != Reg
8343 || (!type->bitfield.dword && !type->bitfield.qword))
8344 && type->bitfield.class != RegSIMD
8345 && !operand_type_equal (type, &regmask))
8346 abort ();
8347
8348 i.vex.register_specifier = i.op[vex_reg].regs;
8349 }
8350
8351 /* Don't set OP operand twice. */
8352 if (vex_reg != op)
8353 {
8354 /* If there is an extension opcode to put here, the
8355 register number must be put into the regmem field. */
8356 if (i.tm.extension_opcode != None)
8357 {
8358 i.rm.regmem = i.op[op].regs->reg_num;
8359 set_rex_vrex (i.op[op].regs, REX_B,
8360 i.tm.opcode_modifier.sse2avx);
8361 }
8362 else
8363 {
8364 i.rm.reg = i.op[op].regs->reg_num;
8365 set_rex_vrex (i.op[op].regs, REX_R,
8366 i.tm.opcode_modifier.sse2avx);
8367 }
8368 }
8369
8370 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
8371 must set it to 3 to indicate this is a register operand
8372 in the regmem field. */
8373 if (!i.mem_operands)
8374 i.rm.mode = 3;
8375 }
8376
8377 /* Fill in i.rm.reg field with extension opcode (if any). */
8378 if (i.tm.extension_opcode != None)
8379 i.rm.reg = i.tm.extension_opcode;
8380 }
8381 return default_seg;
8382 }
8383
8384 static unsigned int
8385 flip_code16 (unsigned int code16)
8386 {
8387 gas_assert (i.tm.operands == 1);
8388
8389 return !(i.prefix[REX_PREFIX] & REX_W)
8390 && (code16 ? i.tm.operand_types[0].bitfield.disp32
8391 || i.tm.operand_types[0].bitfield.disp32s
8392 : i.tm.operand_types[0].bitfield.disp16)
8393 ? CODE16 : 0;
8394 }
8395
8396 static void
8397 output_branch (void)
8398 {
8399 char *p;
8400 int size;
8401 int code16;
8402 int prefix;
8403 relax_substateT subtype;
8404 symbolS *sym;
8405 offsetT off;
8406
8407 code16 = flag_code == CODE_16BIT ? CODE16 : 0;
8408 size = i.disp_encoding == disp_encoding_32bit ? BIG : SMALL;
8409
8410 prefix = 0;
8411 if (i.prefix[DATA_PREFIX] != 0)
8412 {
8413 prefix = 1;
8414 i.prefixes -= 1;
8415 code16 ^= flip_code16(code16);
8416 }
8417 /* Pentium4 branch hints. */
8418 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
8419 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
8420 {
8421 prefix++;
8422 i.prefixes--;
8423 }
8424 if (i.prefix[REX_PREFIX] != 0)
8425 {
8426 prefix++;
8427 i.prefixes--;
8428 }
8429
8430 /* BND prefixed jump. */
8431 if (i.prefix[BND_PREFIX] != 0)
8432 {
8433 prefix++;
8434 i.prefixes--;
8435 }
8436
8437 if (i.prefixes != 0)
8438 as_warn (_("skipping prefixes on `%s'"), i.tm.name);
8439
8440 /* It's always a symbol; End frag & setup for relax.
8441 Make sure there is enough room in this frag for the largest
8442 instruction we may generate in md_convert_frag. This is 2
8443 bytes for the opcode and room for the prefix and largest
8444 displacement. */
8445 frag_grow (prefix + 2 + 4);
8446 /* Prefix and 1 opcode byte go in fr_fix. */
8447 p = frag_more (prefix + 1);
8448 if (i.prefix[DATA_PREFIX] != 0)
8449 *p++ = DATA_PREFIX_OPCODE;
8450 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
8451 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
8452 *p++ = i.prefix[SEG_PREFIX];
8453 if (i.prefix[BND_PREFIX] != 0)
8454 *p++ = BND_PREFIX_OPCODE;
8455 if (i.prefix[REX_PREFIX] != 0)
8456 *p++ = i.prefix[REX_PREFIX];
8457 *p = i.tm.base_opcode;
8458
8459 if ((unsigned char) *p == JUMP_PC_RELATIVE)
8460 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
8461 else if (cpu_arch_flags.bitfield.cpui386)
8462 subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
8463 else
8464 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
8465 subtype |= code16;
8466
8467 sym = i.op[0].disps->X_add_symbol;
8468 off = i.op[0].disps->X_add_number;
8469
8470 if (i.op[0].disps->X_op != O_constant
8471 && i.op[0].disps->X_op != O_symbol)
8472 {
8473 /* Handle complex expressions. */
8474 sym = make_expr_symbol (i.op[0].disps);
8475 off = 0;
8476 }
8477
8478 /* 1 possible extra opcode + 4 byte displacement go in var part.
8479 Pass reloc in fr_var. */
8480 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
8481 }
8482
8483 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8484 /* Return TRUE iff PLT32 relocation should be used for branching to
8485 symbol S. */
8486
8487 static bfd_boolean
8488 need_plt32_p (symbolS *s)
8489 {
8490 /* PLT32 relocation is ELF only. */
8491 if (!IS_ELF)
8492 return FALSE;
8493
8494 #ifdef TE_SOLARIS
8495 /* Don't emit PLT32 relocation on Solaris: neither native linker nor
8496 krtld support it. */
8497 return FALSE;
8498 #endif
8499
8500 /* Since there is no need to prepare for PLT branch on x86-64, we
8501 can generate R_X86_64_PLT32, instead of R_X86_64_PC32, which can
8502 be used as a marker for 32-bit PC-relative branches. */
8503 if (!object_64bit)
8504 return FALSE;
8505
8506 /* Weak or undefined symbol need PLT32 relocation. */
8507 if (S_IS_WEAK (s) || !S_IS_DEFINED (s))
8508 return TRUE;
8509
8510 /* Non-global symbol doesn't need PLT32 relocation. */
8511 if (! S_IS_EXTERNAL (s))
8512 return FALSE;
8513
8514 /* Other global symbols need PLT32 relocation. NB: Symbol with
8515 non-default visibilities are treated as normal global symbol
8516 so that PLT32 relocation can be used as a marker for 32-bit
8517 PC-relative branches. It is useful for linker relaxation. */
8518 return TRUE;
8519 }
8520 #endif
8521
8522 static void
8523 output_jump (void)
8524 {
8525 char *p;
8526 int size;
8527 fixS *fixP;
8528 bfd_reloc_code_real_type jump_reloc = i.reloc[0];
8529
8530 if (i.tm.opcode_modifier.jump == JUMP_BYTE)
8531 {
8532 /* This is a loop or jecxz type instruction. */
8533 size = 1;
8534 if (i.prefix[ADDR_PREFIX] != 0)
8535 {
8536 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
8537 i.prefixes -= 1;
8538 }
8539 /* Pentium4 branch hints. */
8540 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
8541 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
8542 {
8543 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
8544 i.prefixes--;
8545 }
8546 }
8547 else
8548 {
8549 int code16;
8550
8551 code16 = 0;
8552 if (flag_code == CODE_16BIT)
8553 code16 = CODE16;
8554
8555 if (i.prefix[DATA_PREFIX] != 0)
8556 {
8557 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
8558 i.prefixes -= 1;
8559 code16 ^= flip_code16(code16);
8560 }
8561
8562 size = 4;
8563 if (code16)
8564 size = 2;
8565 }
8566
8567 /* BND prefixed jump. */
8568 if (i.prefix[BND_PREFIX] != 0)
8569 {
8570 FRAG_APPEND_1_CHAR (i.prefix[BND_PREFIX]);
8571 i.prefixes -= 1;
8572 }
8573
8574 if (i.prefix[REX_PREFIX] != 0)
8575 {
8576 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
8577 i.prefixes -= 1;
8578 }
8579
8580 if (i.prefixes != 0)
8581 as_warn (_("skipping prefixes on `%s'"), i.tm.name);
8582
8583 p = frag_more (i.tm.opcode_length + size);
8584 switch (i.tm.opcode_length)
8585 {
8586 case 2:
8587 *p++ = i.tm.base_opcode >> 8;
8588 /* Fall through. */
8589 case 1:
8590 *p++ = i.tm.base_opcode;
8591 break;
8592 default:
8593 abort ();
8594 }
8595
8596 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8597 if (size == 4
8598 && jump_reloc == NO_RELOC
8599 && need_plt32_p (i.op[0].disps->X_add_symbol))
8600 jump_reloc = BFD_RELOC_X86_64_PLT32;
8601 #endif
8602
8603 jump_reloc = reloc (size, 1, 1, jump_reloc);
8604
8605 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
8606 i.op[0].disps, 1, jump_reloc);
8607
8608 /* All jumps handled here are signed, but don't use a signed limit
8609 check for 32 and 16 bit jumps as we want to allow wrap around at
8610 4G and 64k respectively. */
8611 if (size == 1)
8612 fixP->fx_signed = 1;
8613 }
8614
8615 static void
8616 output_interseg_jump (void)
8617 {
8618 char *p;
8619 int size;
8620 int prefix;
8621 int code16;
8622
8623 code16 = 0;
8624 if (flag_code == CODE_16BIT)
8625 code16 = CODE16;
8626
8627 prefix = 0;
8628 if (i.prefix[DATA_PREFIX] != 0)
8629 {
8630 prefix = 1;
8631 i.prefixes -= 1;
8632 code16 ^= CODE16;
8633 }
8634
8635 gas_assert (!i.prefix[REX_PREFIX]);
8636
8637 size = 4;
8638 if (code16)
8639 size = 2;
8640
8641 if (i.prefixes != 0)
8642 as_warn (_("skipping prefixes on `%s'"), i.tm.name);
8643
8644 /* 1 opcode; 2 segment; offset */
8645 p = frag_more (prefix + 1 + 2 + size);
8646
8647 if (i.prefix[DATA_PREFIX] != 0)
8648 *p++ = DATA_PREFIX_OPCODE;
8649
8650 if (i.prefix[REX_PREFIX] != 0)
8651 *p++ = i.prefix[REX_PREFIX];
8652
8653 *p++ = i.tm.base_opcode;
8654 if (i.op[1].imms->X_op == O_constant)
8655 {
8656 offsetT n = i.op[1].imms->X_add_number;
8657
8658 if (size == 2
8659 && !fits_in_unsigned_word (n)
8660 && !fits_in_signed_word (n))
8661 {
8662 as_bad (_("16-bit jump out of range"));
8663 return;
8664 }
8665 md_number_to_chars (p, n, size);
8666 }
8667 else
8668 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
8669 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
8670 if (i.op[0].imms->X_op != O_constant)
8671 as_bad (_("can't handle non absolute segment in `%s'"),
8672 i.tm.name);
8673 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
8674 }
8675
8676 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8677 void
8678 x86_cleanup (void)
8679 {
8680 char *p;
8681 asection *seg = now_seg;
8682 subsegT subseg = now_subseg;
8683 asection *sec;
8684 unsigned int alignment, align_size_1;
8685 unsigned int isa_1_descsz, feature_2_descsz, descsz;
8686 unsigned int isa_1_descsz_raw, feature_2_descsz_raw;
8687 unsigned int padding;
8688
8689 if (!IS_ELF || !x86_used_note)
8690 return;
8691
8692 x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_X86;
8693
8694 /* The .note.gnu.property section layout:
8695
8696 Field Length Contents
8697 ---- ---- ----
8698 n_namsz 4 4
8699 n_descsz 4 The note descriptor size
8700 n_type 4 NT_GNU_PROPERTY_TYPE_0
8701 n_name 4 "GNU"
8702 n_desc n_descsz The program property array
8703 .... .... ....
8704 */
8705
8706 /* Create the .note.gnu.property section. */
8707 sec = subseg_new (NOTE_GNU_PROPERTY_SECTION_NAME, 0);
8708 bfd_set_section_flags (sec,
8709 (SEC_ALLOC
8710 | SEC_LOAD
8711 | SEC_DATA
8712 | SEC_HAS_CONTENTS
8713 | SEC_READONLY));
8714
8715 if (get_elf_backend_data (stdoutput)->s->elfclass == ELFCLASS64)
8716 {
8717 align_size_1 = 7;
8718 alignment = 3;
8719 }
8720 else
8721 {
8722 align_size_1 = 3;
8723 alignment = 2;
8724 }
8725
8726 bfd_set_section_alignment (sec, alignment);
8727 elf_section_type (sec) = SHT_NOTE;
8728
8729 /* GNU_PROPERTY_X86_ISA_1_USED: 4-byte type + 4-byte data size
8730 + 4-byte data */
8731 isa_1_descsz_raw = 4 + 4 + 4;
8732 /* Align GNU_PROPERTY_X86_ISA_1_USED. */
8733 isa_1_descsz = (isa_1_descsz_raw + align_size_1) & ~align_size_1;
8734
8735 feature_2_descsz_raw = isa_1_descsz;
8736 /* GNU_PROPERTY_X86_FEATURE_2_USED: 4-byte type + 4-byte data size
8737 + 4-byte data */
8738 feature_2_descsz_raw += 4 + 4 + 4;
8739 /* Align GNU_PROPERTY_X86_FEATURE_2_USED. */
8740 feature_2_descsz = ((feature_2_descsz_raw + align_size_1)
8741 & ~align_size_1);
8742
8743 descsz = feature_2_descsz;
8744 /* Section size: n_namsz + n_descsz + n_type + n_name + n_descsz. */
8745 p = frag_more (4 + 4 + 4 + 4 + descsz);
8746
8747 /* Write n_namsz. */
8748 md_number_to_chars (p, (valueT) 4, 4);
8749
8750 /* Write n_descsz. */
8751 md_number_to_chars (p + 4, (valueT) descsz, 4);
8752
8753 /* Write n_type. */
8754 md_number_to_chars (p + 4 * 2, (valueT) NT_GNU_PROPERTY_TYPE_0, 4);
8755
8756 /* Write n_name. */
8757 memcpy (p + 4 * 3, "GNU", 4);
8758
8759 /* Write 4-byte type. */
8760 md_number_to_chars (p + 4 * 4,
8761 (valueT) GNU_PROPERTY_X86_ISA_1_USED, 4);
8762
8763 /* Write 4-byte data size. */
8764 md_number_to_chars (p + 4 * 5, (valueT) 4, 4);
8765
8766 /* Write 4-byte data. */
8767 md_number_to_chars (p + 4 * 6, (valueT) x86_isa_1_used, 4);
8768
8769 /* Zero out paddings. */
8770 padding = isa_1_descsz - isa_1_descsz_raw;
8771 if (padding)
8772 memset (p + 4 * 7, 0, padding);
8773
8774 /* Write 4-byte type. */
8775 md_number_to_chars (p + isa_1_descsz + 4 * 4,
8776 (valueT) GNU_PROPERTY_X86_FEATURE_2_USED, 4);
8777
8778 /* Write 4-byte data size. */
8779 md_number_to_chars (p + isa_1_descsz + 4 * 5, (valueT) 4, 4);
8780
8781 /* Write 4-byte data. */
8782 md_number_to_chars (p + isa_1_descsz + 4 * 6,
8783 (valueT) x86_feature_2_used, 4);
8784
8785 /* Zero out paddings. */
8786 padding = feature_2_descsz - feature_2_descsz_raw;
8787 if (padding)
8788 memset (p + isa_1_descsz + 4 * 7, 0, padding);
8789
8790 /* We probably can't restore the current segment, for there likely
8791 isn't one yet... */
8792 if (seg && subseg)
8793 subseg_set (seg, subseg);
8794 }
8795 #endif
8796
8797 static unsigned int
8798 encoding_length (const fragS *start_frag, offsetT start_off,
8799 const char *frag_now_ptr)
8800 {
8801 unsigned int len = 0;
8802
8803 if (start_frag != frag_now)
8804 {
8805 const fragS *fr = start_frag;
8806
8807 do {
8808 len += fr->fr_fix;
8809 fr = fr->fr_next;
8810 } while (fr && fr != frag_now);
8811 }
8812
8813 return len - start_off + (frag_now_ptr - frag_now->fr_literal);
8814 }
8815
8816 /* Return 1 for test, and, cmp, add, sub, inc and dec which may
8817 be macro-fused with conditional jumps.
8818 NB: If TEST/AND/CMP/ADD/SUB/INC/DEC is of RIP relative address,
8819 or is one of the following format:
8820
8821 cmp m, imm
8822 add m, imm
8823 sub m, imm
8824 test m, imm
8825 and m, imm
8826 inc m
8827 dec m
8828
8829 it is unfusible. */
8830
8831 static int
8832 maybe_fused_with_jcc_p (enum mf_cmp_kind* mf_cmp_p)
8833 {
8834 /* No RIP address. */
8835 if (i.base_reg && i.base_reg->reg_num == RegIP)
8836 return 0;
8837
8838 /* No VEX/EVEX encoding. */
8839 if (is_any_vex_encoding (&i.tm))
8840 return 0;
8841
8842 /* add, sub without add/sub m, imm. */
8843 if (i.tm.base_opcode <= 5
8844 || (i.tm.base_opcode >= 0x28 && i.tm.base_opcode <= 0x2d)
8845 || ((i.tm.base_opcode | 3) == 0x83
8846 && (i.tm.extension_opcode == 0x5
8847 || i.tm.extension_opcode == 0x0)))
8848 {
8849 *mf_cmp_p = mf_cmp_alu_cmp;
8850 return !(i.mem_operands && i.imm_operands);
8851 }
8852
8853 /* and without and m, imm. */
8854 if ((i.tm.base_opcode >= 0x20 && i.tm.base_opcode <= 0x25)
8855 || ((i.tm.base_opcode | 3) == 0x83
8856 && i.tm.extension_opcode == 0x4))
8857 {
8858 *mf_cmp_p = mf_cmp_test_and;
8859 return !(i.mem_operands && i.imm_operands);
8860 }
8861
8862 /* test without test m imm. */
8863 if ((i.tm.base_opcode | 1) == 0x85
8864 || (i.tm.base_opcode | 1) == 0xa9
8865 || ((i.tm.base_opcode | 1) == 0xf7
8866 && i.tm.extension_opcode == 0))
8867 {
8868 *mf_cmp_p = mf_cmp_test_and;
8869 return !(i.mem_operands && i.imm_operands);
8870 }
8871
8872 /* cmp without cmp m, imm. */
8873 if ((i.tm.base_opcode >= 0x38 && i.tm.base_opcode <= 0x3d)
8874 || ((i.tm.base_opcode | 3) == 0x83
8875 && (i.tm.extension_opcode == 0x7)))
8876 {
8877 *mf_cmp_p = mf_cmp_alu_cmp;
8878 return !(i.mem_operands && i.imm_operands);
8879 }
8880
8881 /* inc, dec without inc/dec m. */
8882 if ((i.tm.cpu_flags.bitfield.cpuno64
8883 && (i.tm.base_opcode | 0xf) == 0x4f)
8884 || ((i.tm.base_opcode | 1) == 0xff
8885 && i.tm.extension_opcode <= 0x1))
8886 {
8887 *mf_cmp_p = mf_cmp_incdec;
8888 return !i.mem_operands;
8889 }
8890
8891 return 0;
8892 }
8893
8894 /* Return 1 if a FUSED_JCC_PADDING frag should be generated. */
8895
8896 static int
8897 add_fused_jcc_padding_frag_p (enum mf_cmp_kind* mf_cmp_p)
8898 {
8899 /* NB: Don't work with COND_JUMP86 without i386. */
8900 if (!align_branch_power
8901 || now_seg == absolute_section
8902 || !cpu_arch_flags.bitfield.cpui386
8903 || !(align_branch & align_branch_fused_bit))
8904 return 0;
8905
8906 if (maybe_fused_with_jcc_p (mf_cmp_p))
8907 {
8908 if (last_insn.kind == last_insn_other
8909 || last_insn.seg != now_seg)
8910 return 1;
8911 if (flag_debug)
8912 as_warn_where (last_insn.file, last_insn.line,
8913 _("`%s` skips -malign-branch-boundary on `%s`"),
8914 last_insn.name, i.tm.name);
8915 }
8916
8917 return 0;
8918 }
8919
8920 /* Return 1 if a BRANCH_PREFIX frag should be generated. */
8921
8922 static int
8923 add_branch_prefix_frag_p (void)
8924 {
8925 /* NB: Don't work with COND_JUMP86 without i386. Don't add prefix
8926 to PadLock instructions since they include prefixes in opcode. */
8927 if (!align_branch_power
8928 || !align_branch_prefix_size
8929 || now_seg == absolute_section
8930 || i.tm.cpu_flags.bitfield.cpupadlock
8931 || !cpu_arch_flags.bitfield.cpui386)
8932 return 0;
8933
8934 /* Don't add prefix if it is a prefix or there is no operand in case
8935 that segment prefix is special. */
8936 if (!i.operands || i.tm.opcode_modifier.isprefix)
8937 return 0;
8938
8939 if (last_insn.kind == last_insn_other
8940 || last_insn.seg != now_seg)
8941 return 1;
8942
8943 if (flag_debug)
8944 as_warn_where (last_insn.file, last_insn.line,
8945 _("`%s` skips -malign-branch-boundary on `%s`"),
8946 last_insn.name, i.tm.name);
8947
8948 return 0;
8949 }
8950
8951 /* Return 1 if a BRANCH_PADDING frag should be generated. */
8952
8953 static int
8954 add_branch_padding_frag_p (enum align_branch_kind *branch_p,
8955 enum mf_jcc_kind *mf_jcc_p)
8956 {
8957 int add_padding;
8958
8959 /* NB: Don't work with COND_JUMP86 without i386. */
8960 if (!align_branch_power
8961 || now_seg == absolute_section
8962 || !cpu_arch_flags.bitfield.cpui386)
8963 return 0;
8964
8965 add_padding = 0;
8966
8967 /* Check for jcc and direct jmp. */
8968 if (i.tm.opcode_modifier.jump == JUMP)
8969 {
8970 if (i.tm.base_opcode == JUMP_PC_RELATIVE)
8971 {
8972 *branch_p = align_branch_jmp;
8973 add_padding = align_branch & align_branch_jmp_bit;
8974 }
8975 else
8976 {
8977 /* Because J<cc> and JN<cc> share same group in macro-fusible table,
8978 igore the lowest bit. */
8979 *mf_jcc_p = (i.tm.base_opcode & 0x0e) >> 1;
8980 *branch_p = align_branch_jcc;
8981 if ((align_branch & align_branch_jcc_bit))
8982 add_padding = 1;
8983 }
8984 }
8985 else if (is_any_vex_encoding (&i.tm))
8986 return 0;
8987 else if ((i.tm.base_opcode | 1) == 0xc3)
8988 {
8989 /* Near ret. */
8990 *branch_p = align_branch_ret;
8991 if ((align_branch & align_branch_ret_bit))
8992 add_padding = 1;
8993 }
8994 else
8995 {
8996 /* Check for indirect jmp, direct and indirect calls. */
8997 if (i.tm.base_opcode == 0xe8)
8998 {
8999 /* Direct call. */
9000 *branch_p = align_branch_call;
9001 if ((align_branch & align_branch_call_bit))
9002 add_padding = 1;
9003 }
9004 else if (i.tm.base_opcode == 0xff
9005 && (i.tm.extension_opcode == 2
9006 || i.tm.extension_opcode == 4))
9007 {
9008 /* Indirect call and jmp. */
9009 *branch_p = align_branch_indirect;
9010 if ((align_branch & align_branch_indirect_bit))
9011 add_padding = 1;
9012 }
9013
9014 if (add_padding
9015 && i.disp_operands
9016 && tls_get_addr
9017 && (i.op[0].disps->X_op == O_symbol
9018 || (i.op[0].disps->X_op == O_subtract
9019 && i.op[0].disps->X_op_symbol == GOT_symbol)))
9020 {
9021 symbolS *s = i.op[0].disps->X_add_symbol;
9022 /* No padding to call to global or undefined tls_get_addr. */
9023 if ((S_IS_EXTERNAL (s) || !S_IS_DEFINED (s))
9024 && strcmp (S_GET_NAME (s), tls_get_addr) == 0)
9025 return 0;
9026 }
9027 }
9028
9029 if (add_padding
9030 && last_insn.kind != last_insn_other
9031 && last_insn.seg == now_seg)
9032 {
9033 if (flag_debug)
9034 as_warn_where (last_insn.file, last_insn.line,
9035 _("`%s` skips -malign-branch-boundary on `%s`"),
9036 last_insn.name, i.tm.name);
9037 return 0;
9038 }
9039
9040 return add_padding;
9041 }
9042
9043 static void
9044 output_insn (void)
9045 {
9046 fragS *insn_start_frag;
9047 offsetT insn_start_off;
9048 fragS *fragP = NULL;
9049 enum align_branch_kind branch = align_branch_none;
9050 /* The initializer is arbitrary just to avoid uninitialized error.
9051 it's actually either assigned in add_branch_padding_frag_p
9052 or never be used. */
9053 enum mf_jcc_kind mf_jcc = mf_jcc_jo;
9054
9055 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9056 if (IS_ELF && x86_used_note)
9057 {
9058 if (i.tm.cpu_flags.bitfield.cpucmov)
9059 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_CMOV;
9060 if (i.tm.cpu_flags.bitfield.cpusse)
9061 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_SSE;
9062 if (i.tm.cpu_flags.bitfield.cpusse2)
9063 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_SSE2;
9064 if (i.tm.cpu_flags.bitfield.cpusse3)
9065 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_SSE3;
9066 if (i.tm.cpu_flags.bitfield.cpussse3)
9067 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_SSSE3;
9068 if (i.tm.cpu_flags.bitfield.cpusse4_1)
9069 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_SSE4_1;
9070 if (i.tm.cpu_flags.bitfield.cpusse4_2)
9071 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_SSE4_2;
9072 if (i.tm.cpu_flags.bitfield.cpuavx)
9073 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX;
9074 if (i.tm.cpu_flags.bitfield.cpuavx2)
9075 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX2;
9076 if (i.tm.cpu_flags.bitfield.cpufma)
9077 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_FMA;
9078 if (i.tm.cpu_flags.bitfield.cpuavx512f)
9079 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512F;
9080 if (i.tm.cpu_flags.bitfield.cpuavx512cd)
9081 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512CD;
9082 if (i.tm.cpu_flags.bitfield.cpuavx512er)
9083 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512ER;
9084 if (i.tm.cpu_flags.bitfield.cpuavx512pf)
9085 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512PF;
9086 if (i.tm.cpu_flags.bitfield.cpuavx512vl)
9087 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512VL;
9088 if (i.tm.cpu_flags.bitfield.cpuavx512dq)
9089 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512DQ;
9090 if (i.tm.cpu_flags.bitfield.cpuavx512bw)
9091 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512BW;
9092 if (i.tm.cpu_flags.bitfield.cpuavx512_4fmaps)
9093 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512_4FMAPS;
9094 if (i.tm.cpu_flags.bitfield.cpuavx512_4vnniw)
9095 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512_4VNNIW;
9096 if (i.tm.cpu_flags.bitfield.cpuavx512_bitalg)
9097 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512_BITALG;
9098 if (i.tm.cpu_flags.bitfield.cpuavx512ifma)
9099 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512_IFMA;
9100 if (i.tm.cpu_flags.bitfield.cpuavx512vbmi)
9101 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512_VBMI;
9102 if (i.tm.cpu_flags.bitfield.cpuavx512_vbmi2)
9103 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512_VBMI2;
9104 if (i.tm.cpu_flags.bitfield.cpuavx512_vnni)
9105 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512_VNNI;
9106 if (i.tm.cpu_flags.bitfield.cpuavx512_bf16)
9107 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512_BF16;
9108
9109 if (i.tm.cpu_flags.bitfield.cpu8087
9110 || i.tm.cpu_flags.bitfield.cpu287
9111 || i.tm.cpu_flags.bitfield.cpu387
9112 || i.tm.cpu_flags.bitfield.cpu687
9113 || i.tm.cpu_flags.bitfield.cpufisttp)
9114 x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_X87;
9115 if (i.has_regmmx
9116 || i.tm.base_opcode == 0xf77 /* emms */
9117 || i.tm.base_opcode == 0xf0e /* femms */
9118 || i.tm.base_opcode == 0xf2a /* cvtpi2ps */
9119 || i.tm.base_opcode == 0x660f2a /* cvtpi2pd */)
9120 x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_MMX;
9121 if (i.has_regxmm)
9122 x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_XMM;
9123 if (i.has_regymm)
9124 x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_YMM;
9125 if (i.has_regzmm)
9126 x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_ZMM;
9127 if (i.tm.cpu_flags.bitfield.cpufxsr)
9128 x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_FXSR;
9129 if (i.tm.cpu_flags.bitfield.cpuxsave)
9130 x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_XSAVE;
9131 if (i.tm.cpu_flags.bitfield.cpuxsaveopt)
9132 x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_XSAVEOPT;
9133 if (i.tm.cpu_flags.bitfield.cpuxsavec)
9134 x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_XSAVEC;
9135 }
9136 #endif
9137
9138 /* Tie dwarf2 debug info to the address at the start of the insn.
9139 We can't do this after the insn has been output as the current
9140 frag may have been closed off. eg. by frag_var. */
9141 dwarf2_emit_insn (0);
9142
9143 insn_start_frag = frag_now;
9144 insn_start_off = frag_now_fix ();
9145
9146 if (add_branch_padding_frag_p (&branch, &mf_jcc))
9147 {
9148 char *p;
9149 /* Branch can be 8 bytes. Leave some room for prefixes. */
9150 unsigned int max_branch_padding_size = 14;
9151
9152 /* Align section to boundary. */
9153 record_alignment (now_seg, align_branch_power);
9154
9155 /* Make room for padding. */
9156 frag_grow (max_branch_padding_size);
9157
9158 /* Start of the padding. */
9159 p = frag_more (0);
9160
9161 fragP = frag_now;
9162
9163 frag_var (rs_machine_dependent, max_branch_padding_size, 0,
9164 ENCODE_RELAX_STATE (BRANCH_PADDING, 0),
9165 NULL, 0, p);
9166
9167 fragP->tc_frag_data.mf_type = mf_jcc;
9168 fragP->tc_frag_data.branch_type = branch;
9169 fragP->tc_frag_data.max_bytes = max_branch_padding_size;
9170 }
9171
9172 /* Output jumps. */
9173 if (i.tm.opcode_modifier.jump == JUMP)
9174 output_branch ();
9175 else if (i.tm.opcode_modifier.jump == JUMP_BYTE
9176 || i.tm.opcode_modifier.jump == JUMP_DWORD)
9177 output_jump ();
9178 else if (i.tm.opcode_modifier.jump == JUMP_INTERSEGMENT)
9179 output_interseg_jump ();
9180 else
9181 {
9182 /* Output normal instructions here. */
9183 char *p;
9184 unsigned char *q;
9185 unsigned int j;
9186 unsigned int prefix;
9187 enum mf_cmp_kind mf_cmp;
9188
9189 if (avoid_fence
9190 && (i.tm.base_opcode == 0xfaee8
9191 || i.tm.base_opcode == 0xfaef0
9192 || i.tm.base_opcode == 0xfaef8))
9193 {
9194 /* Encode lfence, mfence, and sfence as
9195 f0 83 04 24 00 lock addl $0x0, (%{re}sp). */
9196 offsetT val = 0x240483f0ULL;
9197 p = frag_more (5);
9198 md_number_to_chars (p, val, 5);
9199 return;
9200 }
9201
9202 /* Some processors fail on LOCK prefix. This options makes
9203 assembler ignore LOCK prefix and serves as a workaround. */
9204 if (omit_lock_prefix)
9205 {
9206 if (i.tm.base_opcode == LOCK_PREFIX_OPCODE)
9207 return;
9208 i.prefix[LOCK_PREFIX] = 0;
9209 }
9210
9211 if (branch)
9212 /* Skip if this is a branch. */
9213 ;
9214 else if (add_fused_jcc_padding_frag_p (&mf_cmp))
9215 {
9216 /* Make room for padding. */
9217 frag_grow (MAX_FUSED_JCC_PADDING_SIZE);
9218 p = frag_more (0);
9219
9220 fragP = frag_now;
9221
9222 frag_var (rs_machine_dependent, MAX_FUSED_JCC_PADDING_SIZE, 0,
9223 ENCODE_RELAX_STATE (FUSED_JCC_PADDING, 0),
9224 NULL, 0, p);
9225
9226 fragP->tc_frag_data.mf_type = mf_cmp;
9227 fragP->tc_frag_data.branch_type = align_branch_fused;
9228 fragP->tc_frag_data.max_bytes = MAX_FUSED_JCC_PADDING_SIZE;
9229 }
9230 else if (add_branch_prefix_frag_p ())
9231 {
9232 unsigned int max_prefix_size = align_branch_prefix_size;
9233
9234 /* Make room for padding. */
9235 frag_grow (max_prefix_size);
9236 p = frag_more (0);
9237
9238 fragP = frag_now;
9239
9240 frag_var (rs_machine_dependent, max_prefix_size, 0,
9241 ENCODE_RELAX_STATE (BRANCH_PREFIX, 0),
9242 NULL, 0, p);
9243
9244 fragP->tc_frag_data.max_bytes = max_prefix_size;
9245 }
9246
9247 /* Since the VEX/EVEX prefix contains the implicit prefix, we
9248 don't need the explicit prefix. */
9249 if (!i.tm.opcode_modifier.vex && !i.tm.opcode_modifier.evex)
9250 {
9251 switch (i.tm.opcode_length)
9252 {
9253 case 3:
9254 if (i.tm.base_opcode & 0xff000000)
9255 {
9256 prefix = (i.tm.base_opcode >> 24) & 0xff;
9257 if (!i.tm.cpu_flags.bitfield.cpupadlock
9258 || prefix != REPE_PREFIX_OPCODE
9259 || (i.prefix[REP_PREFIX] != REPE_PREFIX_OPCODE))
9260 add_prefix (prefix);
9261 }
9262 break;
9263 case 2:
9264 if ((i.tm.base_opcode & 0xff0000) != 0)
9265 {
9266 prefix = (i.tm.base_opcode >> 16) & 0xff;
9267 add_prefix (prefix);
9268 }
9269 break;
9270 case 1:
9271 break;
9272 case 0:
9273 /* Check for pseudo prefixes. */
9274 as_bad_where (insn_start_frag->fr_file,
9275 insn_start_frag->fr_line,
9276 _("pseudo prefix without instruction"));
9277 return;
9278 default:
9279 abort ();
9280 }
9281
9282 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9283 /* For x32, add a dummy REX_OPCODE prefix for mov/add with
9284 R_X86_64_GOTTPOFF relocation so that linker can safely
9285 perform IE->LE optimization. A dummy REX_OPCODE prefix
9286 is also needed for lea with R_X86_64_GOTPC32_TLSDESC
9287 relocation for GDesc -> IE/LE optimization. */
9288 if (x86_elf_abi == X86_64_X32_ABI
9289 && i.operands == 2
9290 && (i.reloc[0] == BFD_RELOC_X86_64_GOTTPOFF
9291 || i.reloc[0] == BFD_RELOC_X86_64_GOTPC32_TLSDESC)
9292 && i.prefix[REX_PREFIX] == 0)
9293 add_prefix (REX_OPCODE);
9294 #endif
9295
9296 /* The prefix bytes. */
9297 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
9298 if (*q)
9299 FRAG_APPEND_1_CHAR (*q);
9300 }
9301 else
9302 {
9303 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
9304 if (*q)
9305 switch (j)
9306 {
9307 case SEG_PREFIX:
9308 case ADDR_PREFIX:
9309 FRAG_APPEND_1_CHAR (*q);
9310 break;
9311 default:
9312 /* There should be no other prefixes for instructions
9313 with VEX prefix. */
9314 abort ();
9315 }
9316
9317 /* For EVEX instructions i.vrex should become 0 after
9318 build_evex_prefix. For VEX instructions upper 16 registers
9319 aren't available, so VREX should be 0. */
9320 if (i.vrex)
9321 abort ();
9322 /* Now the VEX prefix. */
9323 p = frag_more (i.vex.length);
9324 for (j = 0; j < i.vex.length; j++)
9325 p[j] = i.vex.bytes[j];
9326 }
9327
9328 /* Now the opcode; be careful about word order here! */
9329 if (i.tm.opcode_length == 1)
9330 {
9331 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
9332 }
9333 else
9334 {
9335 switch (i.tm.opcode_length)
9336 {
9337 case 4:
9338 p = frag_more (4);
9339 *p++ = (i.tm.base_opcode >> 24) & 0xff;
9340 *p++ = (i.tm.base_opcode >> 16) & 0xff;
9341 break;
9342 case 3:
9343 p = frag_more (3);
9344 *p++ = (i.tm.base_opcode >> 16) & 0xff;
9345 break;
9346 case 2:
9347 p = frag_more (2);
9348 break;
9349 default:
9350 abort ();
9351 break;
9352 }
9353
9354 /* Put out high byte first: can't use md_number_to_chars! */
9355 *p++ = (i.tm.base_opcode >> 8) & 0xff;
9356 *p = i.tm.base_opcode & 0xff;
9357 }
9358
9359 /* Now the modrm byte and sib byte (if present). */
9360 if (i.tm.opcode_modifier.modrm)
9361 {
9362 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
9363 | i.rm.reg << 3
9364 | i.rm.mode << 6));
9365 /* If i.rm.regmem == ESP (4)
9366 && i.rm.mode != (Register mode)
9367 && not 16 bit
9368 ==> need second modrm byte. */
9369 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
9370 && i.rm.mode != 3
9371 && !(i.base_reg && i.base_reg->reg_type.bitfield.word))
9372 FRAG_APPEND_1_CHAR ((i.sib.base << 0
9373 | i.sib.index << 3
9374 | i.sib.scale << 6));
9375 }
9376
9377 if (i.disp_operands)
9378 output_disp (insn_start_frag, insn_start_off);
9379
9380 if (i.imm_operands)
9381 output_imm (insn_start_frag, insn_start_off);
9382
9383 /*
9384 * frag_now_fix () returning plain abs_section_offset when we're in the
9385 * absolute section, and abs_section_offset not getting updated as data
9386 * gets added to the frag breaks the logic below.
9387 */
9388 if (now_seg != absolute_section)
9389 {
9390 j = encoding_length (insn_start_frag, insn_start_off, frag_more (0));
9391 if (j > 15)
9392 as_warn (_("instruction length of %u bytes exceeds the limit of 15"),
9393 j);
9394 else if (fragP)
9395 {
9396 /* NB: Don't add prefix with GOTPC relocation since
9397 output_disp() above depends on the fixed encoding
9398 length. Can't add prefix with TLS relocation since
9399 it breaks TLS linker optimization. */
9400 unsigned int max = i.has_gotpc_tls_reloc ? 0 : 15 - j;
9401 /* Prefix count on the current instruction. */
9402 unsigned int count = i.vex.length;
9403 unsigned int k;
9404 for (k = 0; k < ARRAY_SIZE (i.prefix); k++)
9405 /* REX byte is encoded in VEX/EVEX prefix. */
9406 if (i.prefix[k] && (k != REX_PREFIX || !i.vex.length))
9407 count++;
9408
9409 /* Count prefixes for extended opcode maps. */
9410 if (!i.vex.length)
9411 switch (i.tm.opcode_length)
9412 {
9413 case 3:
9414 if (((i.tm.base_opcode >> 16) & 0xff) == 0xf)
9415 {
9416 count++;
9417 switch ((i.tm.base_opcode >> 8) & 0xff)
9418 {
9419 case 0x38:
9420 case 0x3a:
9421 count++;
9422 break;
9423 default:
9424 break;
9425 }
9426 }
9427 break;
9428 case 2:
9429 if (((i.tm.base_opcode >> 8) & 0xff) == 0xf)
9430 count++;
9431 break;
9432 case 1:
9433 break;
9434 default:
9435 abort ();
9436 }
9437
9438 if (TYPE_FROM_RELAX_STATE (fragP->fr_subtype)
9439 == BRANCH_PREFIX)
9440 {
9441 /* Set the maximum prefix size in BRANCH_PREFIX
9442 frag. */
9443 if (fragP->tc_frag_data.max_bytes > max)
9444 fragP->tc_frag_data.max_bytes = max;
9445 if (fragP->tc_frag_data.max_bytes > count)
9446 fragP->tc_frag_data.max_bytes -= count;
9447 else
9448 fragP->tc_frag_data.max_bytes = 0;
9449 }
9450 else
9451 {
9452 /* Remember the maximum prefix size in FUSED_JCC_PADDING
9453 frag. */
9454 unsigned int max_prefix_size;
9455 if (align_branch_prefix_size > max)
9456 max_prefix_size = max;
9457 else
9458 max_prefix_size = align_branch_prefix_size;
9459 if (max_prefix_size > count)
9460 fragP->tc_frag_data.max_prefix_length
9461 = max_prefix_size - count;
9462 }
9463
9464 /* Use existing segment prefix if possible. Use CS
9465 segment prefix in 64-bit mode. In 32-bit mode, use SS
9466 segment prefix with ESP/EBP base register and use DS
9467 segment prefix without ESP/EBP base register. */
9468 if (i.prefix[SEG_PREFIX])
9469 fragP->tc_frag_data.default_prefix = i.prefix[SEG_PREFIX];
9470 else if (flag_code == CODE_64BIT)
9471 fragP->tc_frag_data.default_prefix = CS_PREFIX_OPCODE;
9472 else if (i.base_reg
9473 && (i.base_reg->reg_num == 4
9474 || i.base_reg->reg_num == 5))
9475 fragP->tc_frag_data.default_prefix = SS_PREFIX_OPCODE;
9476 else
9477 fragP->tc_frag_data.default_prefix = DS_PREFIX_OPCODE;
9478 }
9479 }
9480 }
9481
9482 /* NB: Don't work with COND_JUMP86 without i386. */
9483 if (align_branch_power
9484 && now_seg != absolute_section
9485 && cpu_arch_flags.bitfield.cpui386)
9486 {
9487 /* Terminate each frag so that we can add prefix and check for
9488 fused jcc. */
9489 frag_wane (frag_now);
9490 frag_new (0);
9491 }
9492
9493 #ifdef DEBUG386
9494 if (flag_debug)
9495 {
9496 pi ("" /*line*/, &i);
9497 }
9498 #endif /* DEBUG386 */
9499 }
9500
9501 /* Return the size of the displacement operand N. */
9502
9503 static int
9504 disp_size (unsigned int n)
9505 {
9506 int size = 4;
9507
9508 if (i.types[n].bitfield.disp64)
9509 size = 8;
9510 else if (i.types[n].bitfield.disp8)
9511 size = 1;
9512 else if (i.types[n].bitfield.disp16)
9513 size = 2;
9514 return size;
9515 }
9516
9517 /* Return the size of the immediate operand N. */
9518
9519 static int
9520 imm_size (unsigned int n)
9521 {
9522 int size = 4;
9523 if (i.types[n].bitfield.imm64)
9524 size = 8;
9525 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
9526 size = 1;
9527 else if (i.types[n].bitfield.imm16)
9528 size = 2;
9529 return size;
9530 }
9531
9532 static void
9533 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
9534 {
9535 char *p;
9536 unsigned int n;
9537
9538 for (n = 0; n < i.operands; n++)
9539 {
9540 if (operand_type_check (i.types[n], disp))
9541 {
9542 if (i.op[n].disps->X_op == O_constant)
9543 {
9544 int size = disp_size (n);
9545 offsetT val = i.op[n].disps->X_add_number;
9546
9547 val = offset_in_range (val >> (size == 1 ? i.memshift : 0),
9548 size);
9549 p = frag_more (size);
9550 md_number_to_chars (p, val, size);
9551 }
9552 else
9553 {
9554 enum bfd_reloc_code_real reloc_type;
9555 int size = disp_size (n);
9556 int sign = i.types[n].bitfield.disp32s;
9557 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
9558 fixS *fixP;
9559
9560 /* We can't have 8 bit displacement here. */
9561 gas_assert (!i.types[n].bitfield.disp8);
9562
9563 /* The PC relative address is computed relative
9564 to the instruction boundary, so in case immediate
9565 fields follows, we need to adjust the value. */
9566 if (pcrel && i.imm_operands)
9567 {
9568 unsigned int n1;
9569 int sz = 0;
9570
9571 for (n1 = 0; n1 < i.operands; n1++)
9572 if (operand_type_check (i.types[n1], imm))
9573 {
9574 /* Only one immediate is allowed for PC
9575 relative address. */
9576 gas_assert (sz == 0);
9577 sz = imm_size (n1);
9578 i.op[n].disps->X_add_number -= sz;
9579 }
9580 /* We should find the immediate. */
9581 gas_assert (sz != 0);
9582 }
9583
9584 p = frag_more (size);
9585 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
9586 if (GOT_symbol
9587 && GOT_symbol == i.op[n].disps->X_add_symbol
9588 && (((reloc_type == BFD_RELOC_32
9589 || reloc_type == BFD_RELOC_X86_64_32S
9590 || (reloc_type == BFD_RELOC_64
9591 && object_64bit))
9592 && (i.op[n].disps->X_op == O_symbol
9593 || (i.op[n].disps->X_op == O_add
9594 && ((symbol_get_value_expression
9595 (i.op[n].disps->X_op_symbol)->X_op)
9596 == O_subtract))))
9597 || reloc_type == BFD_RELOC_32_PCREL))
9598 {
9599 if (!object_64bit)
9600 {
9601 reloc_type = BFD_RELOC_386_GOTPC;
9602 i.has_gotpc_tls_reloc = TRUE;
9603 i.op[n].imms->X_add_number +=
9604 encoding_length (insn_start_frag, insn_start_off, p);
9605 }
9606 else if (reloc_type == BFD_RELOC_64)
9607 reloc_type = BFD_RELOC_X86_64_GOTPC64;
9608 else
9609 /* Don't do the adjustment for x86-64, as there
9610 the pcrel addressing is relative to the _next_
9611 insn, and that is taken care of in other code. */
9612 reloc_type = BFD_RELOC_X86_64_GOTPC32;
9613 }
9614 else if (align_branch_power)
9615 {
9616 switch (reloc_type)
9617 {
9618 case BFD_RELOC_386_TLS_GD:
9619 case BFD_RELOC_386_TLS_LDM:
9620 case BFD_RELOC_386_TLS_IE:
9621 case BFD_RELOC_386_TLS_IE_32:
9622 case BFD_RELOC_386_TLS_GOTIE:
9623 case BFD_RELOC_386_TLS_GOTDESC:
9624 case BFD_RELOC_386_TLS_DESC_CALL:
9625 case BFD_RELOC_X86_64_TLSGD:
9626 case BFD_RELOC_X86_64_TLSLD:
9627 case BFD_RELOC_X86_64_GOTTPOFF:
9628 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9629 case BFD_RELOC_X86_64_TLSDESC_CALL:
9630 i.has_gotpc_tls_reloc = TRUE;
9631 default:
9632 break;
9633 }
9634 }
9635 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal,
9636 size, i.op[n].disps, pcrel,
9637 reloc_type);
9638 /* Check for "call/jmp *mem", "mov mem, %reg",
9639 "test %reg, mem" and "binop mem, %reg" where binop
9640 is one of adc, add, and, cmp, or, sbb, sub, xor
9641 instructions without data prefix. Always generate
9642 R_386_GOT32X for "sym*GOT" operand in 32-bit mode. */
9643 if (i.prefix[DATA_PREFIX] == 0
9644 && (generate_relax_relocations
9645 || (!object_64bit
9646 && i.rm.mode == 0
9647 && i.rm.regmem == 5))
9648 && (i.rm.mode == 2
9649 || (i.rm.mode == 0 && i.rm.regmem == 5))
9650 && !is_any_vex_encoding(&i.tm)
9651 && ((i.operands == 1
9652 && i.tm.base_opcode == 0xff
9653 && (i.rm.reg == 2 || i.rm.reg == 4))
9654 || (i.operands == 2
9655 && (i.tm.base_opcode == 0x8b
9656 || i.tm.base_opcode == 0x85
9657 || (i.tm.base_opcode & ~0x38) == 0x03))))
9658 {
9659 if (object_64bit)
9660 {
9661 fixP->fx_tcbit = i.rex != 0;
9662 if (i.base_reg
9663 && (i.base_reg->reg_num == RegIP))
9664 fixP->fx_tcbit2 = 1;
9665 }
9666 else
9667 fixP->fx_tcbit2 = 1;
9668 }
9669 }
9670 }
9671 }
9672 }
9673
9674 static void
9675 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
9676 {
9677 char *p;
9678 unsigned int n;
9679
9680 for (n = 0; n < i.operands; n++)
9681 {
9682 /* Skip SAE/RC Imm operand in EVEX. They are already handled. */
9683 if (i.rounding && (int) n == i.rounding->operand)
9684 continue;
9685
9686 if (operand_type_check (i.types[n], imm))
9687 {
9688 if (i.op[n].imms->X_op == O_constant)
9689 {
9690 int size = imm_size (n);
9691 offsetT val;
9692
9693 val = offset_in_range (i.op[n].imms->X_add_number,
9694 size);
9695 p = frag_more (size);
9696 md_number_to_chars (p, val, size);
9697 }
9698 else
9699 {
9700 /* Not absolute_section.
9701 Need a 32-bit fixup (don't support 8bit
9702 non-absolute imms). Try to support other
9703 sizes ... */
9704 enum bfd_reloc_code_real reloc_type;
9705 int size = imm_size (n);
9706 int sign;
9707
9708 if (i.types[n].bitfield.imm32s
9709 && (i.suffix == QWORD_MNEM_SUFFIX
9710 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
9711 sign = 1;
9712 else
9713 sign = 0;
9714
9715 p = frag_more (size);
9716 reloc_type = reloc (size, 0, sign, i.reloc[n]);
9717
9718 /* This is tough to explain. We end up with this one if we
9719 * have operands that look like
9720 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
9721 * obtain the absolute address of the GOT, and it is strongly
9722 * preferable from a performance point of view to avoid using
9723 * a runtime relocation for this. The actual sequence of
9724 * instructions often look something like:
9725 *
9726 * call .L66
9727 * .L66:
9728 * popl %ebx
9729 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
9730 *
9731 * The call and pop essentially return the absolute address
9732 * of the label .L66 and store it in %ebx. The linker itself
9733 * will ultimately change the first operand of the addl so
9734 * that %ebx points to the GOT, but to keep things simple, the
9735 * .o file must have this operand set so that it generates not
9736 * the absolute address of .L66, but the absolute address of
9737 * itself. This allows the linker itself simply treat a GOTPC
9738 * relocation as asking for a pcrel offset to the GOT to be
9739 * added in, and the addend of the relocation is stored in the
9740 * operand field for the instruction itself.
9741 *
9742 * Our job here is to fix the operand so that it would add
9743 * the correct offset so that %ebx would point to itself. The
9744 * thing that is tricky is that .-.L66 will point to the
9745 * beginning of the instruction, so we need to further modify
9746 * the operand so that it will point to itself. There are
9747 * other cases where you have something like:
9748 *
9749 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
9750 *
9751 * and here no correction would be required. Internally in
9752 * the assembler we treat operands of this form as not being
9753 * pcrel since the '.' is explicitly mentioned, and I wonder
9754 * whether it would simplify matters to do it this way. Who
9755 * knows. In earlier versions of the PIC patches, the
9756 * pcrel_adjust field was used to store the correction, but
9757 * since the expression is not pcrel, I felt it would be
9758 * confusing to do it this way. */
9759
9760 if ((reloc_type == BFD_RELOC_32
9761 || reloc_type == BFD_RELOC_X86_64_32S
9762 || reloc_type == BFD_RELOC_64)
9763 && GOT_symbol
9764 && GOT_symbol == i.op[n].imms->X_add_symbol
9765 && (i.op[n].imms->X_op == O_symbol
9766 || (i.op[n].imms->X_op == O_add
9767 && ((symbol_get_value_expression
9768 (i.op[n].imms->X_op_symbol)->X_op)
9769 == O_subtract))))
9770 {
9771 if (!object_64bit)
9772 reloc_type = BFD_RELOC_386_GOTPC;
9773 else if (size == 4)
9774 reloc_type = BFD_RELOC_X86_64_GOTPC32;
9775 else if (size == 8)
9776 reloc_type = BFD_RELOC_X86_64_GOTPC64;
9777 i.has_gotpc_tls_reloc = TRUE;
9778 i.op[n].imms->X_add_number +=
9779 encoding_length (insn_start_frag, insn_start_off, p);
9780 }
9781 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
9782 i.op[n].imms, 0, reloc_type);
9783 }
9784 }
9785 }
9786 }
9787 \f
9788 /* x86_cons_fix_new is called via the expression parsing code when a
9789 reloc is needed. We use this hook to get the correct .got reloc. */
9790 static int cons_sign = -1;
9791
9792 void
9793 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
9794 expressionS *exp, bfd_reloc_code_real_type r)
9795 {
9796 r = reloc (len, 0, cons_sign, r);
9797
9798 #ifdef TE_PE
9799 if (exp->X_op == O_secrel)
9800 {
9801 exp->X_op = O_symbol;
9802 r = BFD_RELOC_32_SECREL;
9803 }
9804 #endif
9805
9806 fix_new_exp (frag, off, len, exp, 0, r);
9807 }
9808
9809 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
9810 purpose of the `.dc.a' internal pseudo-op. */
9811
9812 int
9813 x86_address_bytes (void)
9814 {
9815 if ((stdoutput->arch_info->mach & bfd_mach_x64_32))
9816 return 4;
9817 return stdoutput->arch_info->bits_per_address / 8;
9818 }
9819
9820 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
9821 || defined (LEX_AT)
9822 # define lex_got(reloc, adjust, types) NULL
9823 #else
9824 /* Parse operands of the form
9825 <symbol>@GOTOFF+<nnn>
9826 and similar .plt or .got references.
9827
9828 If we find one, set up the correct relocation in RELOC and copy the
9829 input string, minus the `@GOTOFF' into a malloc'd buffer for
9830 parsing by the calling routine. Return this buffer, and if ADJUST
9831 is non-null set it to the length of the string we removed from the
9832 input line. Otherwise return NULL. */
9833 static char *
9834 lex_got (enum bfd_reloc_code_real *rel,
9835 int *adjust,
9836 i386_operand_type *types)
9837 {
9838 /* Some of the relocations depend on the size of what field is to
9839 be relocated. But in our callers i386_immediate and i386_displacement
9840 we don't yet know the operand size (this will be set by insn
9841 matching). Hence we record the word32 relocation here,
9842 and adjust the reloc according to the real size in reloc(). */
9843 static const struct {
9844 const char *str;
9845 int len;
9846 const enum bfd_reloc_code_real rel[2];
9847 const i386_operand_type types64;
9848 } gotrel[] = {
9849 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9850 { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32,
9851 BFD_RELOC_SIZE32 },
9852 OPERAND_TYPE_IMM32_64 },
9853 #endif
9854 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
9855 BFD_RELOC_X86_64_PLTOFF64 },
9856 OPERAND_TYPE_IMM64 },
9857 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
9858 BFD_RELOC_X86_64_PLT32 },
9859 OPERAND_TYPE_IMM32_32S_DISP32 },
9860 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
9861 BFD_RELOC_X86_64_GOTPLT64 },
9862 OPERAND_TYPE_IMM64_DISP64 },
9863 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
9864 BFD_RELOC_X86_64_GOTOFF64 },
9865 OPERAND_TYPE_IMM64_DISP64 },
9866 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
9867 BFD_RELOC_X86_64_GOTPCREL },
9868 OPERAND_TYPE_IMM32_32S_DISP32 },
9869 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
9870 BFD_RELOC_X86_64_TLSGD },
9871 OPERAND_TYPE_IMM32_32S_DISP32 },
9872 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
9873 _dummy_first_bfd_reloc_code_real },
9874 OPERAND_TYPE_NONE },
9875 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
9876 BFD_RELOC_X86_64_TLSLD },
9877 OPERAND_TYPE_IMM32_32S_DISP32 },
9878 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
9879 BFD_RELOC_X86_64_GOTTPOFF },
9880 OPERAND_TYPE_IMM32_32S_DISP32 },
9881 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
9882 BFD_RELOC_X86_64_TPOFF32 },
9883 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
9884 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
9885 _dummy_first_bfd_reloc_code_real },
9886 OPERAND_TYPE_NONE },
9887 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
9888 BFD_RELOC_X86_64_DTPOFF32 },
9889 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
9890 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
9891 _dummy_first_bfd_reloc_code_real },
9892 OPERAND_TYPE_NONE },
9893 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
9894 _dummy_first_bfd_reloc_code_real },
9895 OPERAND_TYPE_NONE },
9896 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
9897 BFD_RELOC_X86_64_GOT32 },
9898 OPERAND_TYPE_IMM32_32S_64_DISP32 },
9899 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
9900 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
9901 OPERAND_TYPE_IMM32_32S_DISP32 },
9902 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
9903 BFD_RELOC_X86_64_TLSDESC_CALL },
9904 OPERAND_TYPE_IMM32_32S_DISP32 },
9905 };
9906 char *cp;
9907 unsigned int j;
9908
9909 #if defined (OBJ_MAYBE_ELF)
9910 if (!IS_ELF)
9911 return NULL;
9912 #endif
9913
9914 for (cp = input_line_pointer; *cp != '@'; cp++)
9915 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
9916 return NULL;
9917
9918 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
9919 {
9920 int len = gotrel[j].len;
9921 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
9922 {
9923 if (gotrel[j].rel[object_64bit] != 0)
9924 {
9925 int first, second;
9926 char *tmpbuf, *past_reloc;
9927
9928 *rel = gotrel[j].rel[object_64bit];
9929
9930 if (types)
9931 {
9932 if (flag_code != CODE_64BIT)
9933 {
9934 types->bitfield.imm32 = 1;
9935 types->bitfield.disp32 = 1;
9936 }
9937 else
9938 *types = gotrel[j].types64;
9939 }
9940
9941 if (j != 0 && GOT_symbol == NULL)
9942 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
9943
9944 /* The length of the first part of our input line. */
9945 first = cp - input_line_pointer;
9946
9947 /* The second part goes from after the reloc token until
9948 (and including) an end_of_line char or comma. */
9949 past_reloc = cp + 1 + len;
9950 cp = past_reloc;
9951 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
9952 ++cp;
9953 second = cp + 1 - past_reloc;
9954
9955 /* Allocate and copy string. The trailing NUL shouldn't
9956 be necessary, but be safe. */
9957 tmpbuf = XNEWVEC (char, first + second + 2);
9958 memcpy (tmpbuf, input_line_pointer, first);
9959 if (second != 0 && *past_reloc != ' ')
9960 /* Replace the relocation token with ' ', so that
9961 errors like foo@GOTOFF1 will be detected. */
9962 tmpbuf[first++] = ' ';
9963 else
9964 /* Increment length by 1 if the relocation token is
9965 removed. */
9966 len++;
9967 if (adjust)
9968 *adjust = len;
9969 memcpy (tmpbuf + first, past_reloc, second);
9970 tmpbuf[first + second] = '\0';
9971 return tmpbuf;
9972 }
9973
9974 as_bad (_("@%s reloc is not supported with %d-bit output format"),
9975 gotrel[j].str, 1 << (5 + object_64bit));
9976 return NULL;
9977 }
9978 }
9979
9980 /* Might be a symbol version string. Don't as_bad here. */
9981 return NULL;
9982 }
9983 #endif
9984
9985 #ifdef TE_PE
9986 #ifdef lex_got
9987 #undef lex_got
9988 #endif
9989 /* Parse operands of the form
9990 <symbol>@SECREL32+<nnn>
9991
9992 If we find one, set up the correct relocation in RELOC and copy the
9993 input string, minus the `@SECREL32' into a malloc'd buffer for
9994 parsing by the calling routine. Return this buffer, and if ADJUST
9995 is non-null set it to the length of the string we removed from the
9996 input line. Otherwise return NULL.
9997
9998 This function is copied from the ELF version above adjusted for PE targets. */
9999
10000 static char *
10001 lex_got (enum bfd_reloc_code_real *rel ATTRIBUTE_UNUSED,
10002 int *adjust ATTRIBUTE_UNUSED,
10003 i386_operand_type *types)
10004 {
10005 static const struct
10006 {
10007 const char *str;
10008 int len;
10009 const enum bfd_reloc_code_real rel[2];
10010 const i386_operand_type types64;
10011 }
10012 gotrel[] =
10013 {
10014 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL,
10015 BFD_RELOC_32_SECREL },
10016 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
10017 };
10018
10019 char *cp;
10020 unsigned j;
10021
10022 for (cp = input_line_pointer; *cp != '@'; cp++)
10023 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
10024 return NULL;
10025
10026 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
10027 {
10028 int len = gotrel[j].len;
10029
10030 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
10031 {
10032 if (gotrel[j].rel[object_64bit] != 0)
10033 {
10034 int first, second;
10035 char *tmpbuf, *past_reloc;
10036
10037 *rel = gotrel[j].rel[object_64bit];
10038 if (adjust)
10039 *adjust = len;
10040
10041 if (types)
10042 {
10043 if (flag_code != CODE_64BIT)
10044 {
10045 types->bitfield.imm32 = 1;
10046 types->bitfield.disp32 = 1;
10047 }
10048 else
10049 *types = gotrel[j].types64;
10050 }
10051
10052 /* The length of the first part of our input line. */
10053 first = cp - input_line_pointer;
10054
10055 /* The second part goes from after the reloc token until
10056 (and including) an end_of_line char or comma. */
10057 past_reloc = cp + 1 + len;
10058 cp = past_reloc;
10059 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
10060 ++cp;
10061 second = cp + 1 - past_reloc;
10062
10063 /* Allocate and copy string. The trailing NUL shouldn't
10064 be necessary, but be safe. */
10065 tmpbuf = XNEWVEC (char, first + second + 2);
10066 memcpy (tmpbuf, input_line_pointer, first);
10067 if (second != 0 && *past_reloc != ' ')
10068 /* Replace the relocation token with ' ', so that
10069 errors like foo@SECLREL321 will be detected. */
10070 tmpbuf[first++] = ' ';
10071 memcpy (tmpbuf + first, past_reloc, second);
10072 tmpbuf[first + second] = '\0';
10073 return tmpbuf;
10074 }
10075
10076 as_bad (_("@%s reloc is not supported with %d-bit output format"),
10077 gotrel[j].str, 1 << (5 + object_64bit));
10078 return NULL;
10079 }
10080 }
10081
10082 /* Might be a symbol version string. Don't as_bad here. */
10083 return NULL;
10084 }
10085
10086 #endif /* TE_PE */
10087
10088 bfd_reloc_code_real_type
10089 x86_cons (expressionS *exp, int size)
10090 {
10091 bfd_reloc_code_real_type got_reloc = NO_RELOC;
10092
10093 intel_syntax = -intel_syntax;
10094
10095 exp->X_md = 0;
10096 if (size == 4 || (object_64bit && size == 8))
10097 {
10098 /* Handle @GOTOFF and the like in an expression. */
10099 char *save;
10100 char *gotfree_input_line;
10101 int adjust = 0;
10102
10103 save = input_line_pointer;
10104 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
10105 if (gotfree_input_line)
10106 input_line_pointer = gotfree_input_line;
10107
10108 expression (exp);
10109
10110 if (gotfree_input_line)
10111 {
10112 /* expression () has merrily parsed up to the end of line,
10113 or a comma - in the wrong buffer. Transfer how far
10114 input_line_pointer has moved to the right buffer. */
10115 input_line_pointer = (save
10116 + (input_line_pointer - gotfree_input_line)
10117 + adjust);
10118 free (gotfree_input_line);
10119 if (exp->X_op == O_constant
10120 || exp->X_op == O_absent
10121 || exp->X_op == O_illegal
10122 || exp->X_op == O_register
10123 || exp->X_op == O_big)
10124 {
10125 char c = *input_line_pointer;
10126 *input_line_pointer = 0;
10127 as_bad (_("missing or invalid expression `%s'"), save);
10128 *input_line_pointer = c;
10129 }
10130 else if ((got_reloc == BFD_RELOC_386_PLT32
10131 || got_reloc == BFD_RELOC_X86_64_PLT32)
10132 && exp->X_op != O_symbol)
10133 {
10134 char c = *input_line_pointer;
10135 *input_line_pointer = 0;
10136 as_bad (_("invalid PLT expression `%s'"), save);
10137 *input_line_pointer = c;
10138 }
10139 }
10140 }
10141 else
10142 expression (exp);
10143
10144 intel_syntax = -intel_syntax;
10145
10146 if (intel_syntax)
10147 i386_intel_simplify (exp);
10148
10149 return got_reloc;
10150 }
10151
10152 static void
10153 signed_cons (int size)
10154 {
10155 if (flag_code == CODE_64BIT)
10156 cons_sign = 1;
10157 cons (size);
10158 cons_sign = -1;
10159 }
10160
10161 #ifdef TE_PE
10162 static void
10163 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
10164 {
10165 expressionS exp;
10166
10167 do
10168 {
10169 expression (&exp);
10170 if (exp.X_op == O_symbol)
10171 exp.X_op = O_secrel;
10172
10173 emit_expr (&exp, 4);
10174 }
10175 while (*input_line_pointer++ == ',');
10176
10177 input_line_pointer--;
10178 demand_empty_rest_of_line ();
10179 }
10180 #endif
10181
10182 /* Handle Vector operations. */
10183
10184 static char *
10185 check_VecOperations (char *op_string, char *op_end)
10186 {
10187 const reg_entry *mask;
10188 const char *saved;
10189 char *end_op;
10190
10191 while (*op_string
10192 && (op_end == NULL || op_string < op_end))
10193 {
10194 saved = op_string;
10195 if (*op_string == '{')
10196 {
10197 op_string++;
10198
10199 /* Check broadcasts. */
10200 if (strncmp (op_string, "1to", 3) == 0)
10201 {
10202 int bcst_type;
10203
10204 if (i.broadcast)
10205 goto duplicated_vec_op;
10206
10207 op_string += 3;
10208 if (*op_string == '8')
10209 bcst_type = 8;
10210 else if (*op_string == '4')
10211 bcst_type = 4;
10212 else if (*op_string == '2')
10213 bcst_type = 2;
10214 else if (*op_string == '1'
10215 && *(op_string+1) == '6')
10216 {
10217 bcst_type = 16;
10218 op_string++;
10219 }
10220 else
10221 {
10222 as_bad (_("Unsupported broadcast: `%s'"), saved);
10223 return NULL;
10224 }
10225 op_string++;
10226
10227 broadcast_op.type = bcst_type;
10228 broadcast_op.operand = this_operand;
10229 broadcast_op.bytes = 0;
10230 i.broadcast = &broadcast_op;
10231 }
10232 /* Check masking operation. */
10233 else if ((mask = parse_register (op_string, &end_op)) != NULL)
10234 {
10235 if (mask == &bad_reg)
10236 return NULL;
10237
10238 /* k0 can't be used for write mask. */
10239 if (mask->reg_type.bitfield.class != RegMask || !mask->reg_num)
10240 {
10241 as_bad (_("`%s%s' can't be used for write mask"),
10242 register_prefix, mask->reg_name);
10243 return NULL;
10244 }
10245
10246 if (!i.mask)
10247 {
10248 mask_op.mask = mask;
10249 mask_op.zeroing = 0;
10250 mask_op.operand = this_operand;
10251 i.mask = &mask_op;
10252 }
10253 else
10254 {
10255 if (i.mask->mask)
10256 goto duplicated_vec_op;
10257
10258 i.mask->mask = mask;
10259
10260 /* Only "{z}" is allowed here. No need to check
10261 zeroing mask explicitly. */
10262 if (i.mask->operand != this_operand)
10263 {
10264 as_bad (_("invalid write mask `%s'"), saved);
10265 return NULL;
10266 }
10267 }
10268
10269 op_string = end_op;
10270 }
10271 /* Check zeroing-flag for masking operation. */
10272 else if (*op_string == 'z')
10273 {
10274 if (!i.mask)
10275 {
10276 mask_op.mask = NULL;
10277 mask_op.zeroing = 1;
10278 mask_op.operand = this_operand;
10279 i.mask = &mask_op;
10280 }
10281 else
10282 {
10283 if (i.mask->zeroing)
10284 {
10285 duplicated_vec_op:
10286 as_bad (_("duplicated `%s'"), saved);
10287 return NULL;
10288 }
10289
10290 i.mask->zeroing = 1;
10291
10292 /* Only "{%k}" is allowed here. No need to check mask
10293 register explicitly. */
10294 if (i.mask->operand != this_operand)
10295 {
10296 as_bad (_("invalid zeroing-masking `%s'"),
10297 saved);
10298 return NULL;
10299 }
10300 }
10301
10302 op_string++;
10303 }
10304 else
10305 goto unknown_vec_op;
10306
10307 if (*op_string != '}')
10308 {
10309 as_bad (_("missing `}' in `%s'"), saved);
10310 return NULL;
10311 }
10312 op_string++;
10313
10314 /* Strip whitespace since the addition of pseudo prefixes
10315 changed how the scrubber treats '{'. */
10316 if (is_space_char (*op_string))
10317 ++op_string;
10318
10319 continue;
10320 }
10321 unknown_vec_op:
10322 /* We don't know this one. */
10323 as_bad (_("unknown vector operation: `%s'"), saved);
10324 return NULL;
10325 }
10326
10327 if (i.mask && i.mask->zeroing && !i.mask->mask)
10328 {
10329 as_bad (_("zeroing-masking only allowed with write mask"));
10330 return NULL;
10331 }
10332
10333 return op_string;
10334 }
10335
10336 static int
10337 i386_immediate (char *imm_start)
10338 {
10339 char *save_input_line_pointer;
10340 char *gotfree_input_line;
10341 segT exp_seg = 0;
10342 expressionS *exp;
10343 i386_operand_type types;
10344
10345 operand_type_set (&types, ~0);
10346
10347 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
10348 {
10349 as_bad (_("at most %d immediate operands are allowed"),
10350 MAX_IMMEDIATE_OPERANDS);
10351 return 0;
10352 }
10353
10354 exp = &im_expressions[i.imm_operands++];
10355 i.op[this_operand].imms = exp;
10356
10357 if (is_space_char (*imm_start))
10358 ++imm_start;
10359
10360 save_input_line_pointer = input_line_pointer;
10361 input_line_pointer = imm_start;
10362
10363 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
10364 if (gotfree_input_line)
10365 input_line_pointer = gotfree_input_line;
10366
10367 exp_seg = expression (exp);
10368
10369 SKIP_WHITESPACE ();
10370
10371 /* Handle vector operations. */
10372 if (*input_line_pointer == '{')
10373 {
10374 input_line_pointer = check_VecOperations (input_line_pointer,
10375 NULL);
10376 if (input_line_pointer == NULL)
10377 return 0;
10378 }
10379
10380 if (*input_line_pointer)
10381 as_bad (_("junk `%s' after expression"), input_line_pointer);
10382
10383 input_line_pointer = save_input_line_pointer;
10384 if (gotfree_input_line)
10385 {
10386 free (gotfree_input_line);
10387
10388 if (exp->X_op == O_constant || exp->X_op == O_register)
10389 exp->X_op = O_illegal;
10390 }
10391
10392 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
10393 }
10394
10395 static int
10396 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
10397 i386_operand_type types, const char *imm_start)
10398 {
10399 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
10400 {
10401 if (imm_start)
10402 as_bad (_("missing or invalid immediate expression `%s'"),
10403 imm_start);
10404 return 0;
10405 }
10406 else if (exp->X_op == O_constant)
10407 {
10408 /* Size it properly later. */
10409 i.types[this_operand].bitfield.imm64 = 1;
10410 /* If not 64bit, sign extend val. */
10411 if (flag_code != CODE_64BIT
10412 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
10413 exp->X_add_number
10414 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
10415 }
10416 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
10417 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
10418 && exp_seg != absolute_section
10419 && exp_seg != text_section
10420 && exp_seg != data_section
10421 && exp_seg != bss_section
10422 && exp_seg != undefined_section
10423 && !bfd_is_com_section (exp_seg))
10424 {
10425 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
10426 return 0;
10427 }
10428 #endif
10429 else if (!intel_syntax && exp_seg == reg_section)
10430 {
10431 if (imm_start)
10432 as_bad (_("illegal immediate register operand %s"), imm_start);
10433 return 0;
10434 }
10435 else
10436 {
10437 /* This is an address. The size of the address will be
10438 determined later, depending on destination register,
10439 suffix, or the default for the section. */
10440 i.types[this_operand].bitfield.imm8 = 1;
10441 i.types[this_operand].bitfield.imm16 = 1;
10442 i.types[this_operand].bitfield.imm32 = 1;
10443 i.types[this_operand].bitfield.imm32s = 1;
10444 i.types[this_operand].bitfield.imm64 = 1;
10445 i.types[this_operand] = operand_type_and (i.types[this_operand],
10446 types);
10447 }
10448
10449 return 1;
10450 }
10451
10452 static char *
10453 i386_scale (char *scale)
10454 {
10455 offsetT val;
10456 char *save = input_line_pointer;
10457
10458 input_line_pointer = scale;
10459 val = get_absolute_expression ();
10460
10461 switch (val)
10462 {
10463 case 1:
10464 i.log2_scale_factor = 0;
10465 break;
10466 case 2:
10467 i.log2_scale_factor = 1;
10468 break;
10469 case 4:
10470 i.log2_scale_factor = 2;
10471 break;
10472 case 8:
10473 i.log2_scale_factor = 3;
10474 break;
10475 default:
10476 {
10477 char sep = *input_line_pointer;
10478
10479 *input_line_pointer = '\0';
10480 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
10481 scale);
10482 *input_line_pointer = sep;
10483 input_line_pointer = save;
10484 return NULL;
10485 }
10486 }
10487 if (i.log2_scale_factor != 0 && i.index_reg == 0)
10488 {
10489 as_warn (_("scale factor of %d without an index register"),
10490 1 << i.log2_scale_factor);
10491 i.log2_scale_factor = 0;
10492 }
10493 scale = input_line_pointer;
10494 input_line_pointer = save;
10495 return scale;
10496 }
10497
10498 static int
10499 i386_displacement (char *disp_start, char *disp_end)
10500 {
10501 expressionS *exp;
10502 segT exp_seg = 0;
10503 char *save_input_line_pointer;
10504 char *gotfree_input_line;
10505 int override;
10506 i386_operand_type bigdisp, types = anydisp;
10507 int ret;
10508
10509 if (i.disp_operands == MAX_MEMORY_OPERANDS)
10510 {
10511 as_bad (_("at most %d displacement operands are allowed"),
10512 MAX_MEMORY_OPERANDS);
10513 return 0;
10514 }
10515
10516 operand_type_set (&bigdisp, 0);
10517 if (i.jumpabsolute
10518 || i.types[this_operand].bitfield.baseindex
10519 || (current_templates->start->opcode_modifier.jump != JUMP
10520 && current_templates->start->opcode_modifier.jump != JUMP_DWORD))
10521 {
10522 i386_addressing_mode ();
10523 override = (i.prefix[ADDR_PREFIX] != 0);
10524 if (flag_code == CODE_64BIT)
10525 {
10526 if (!override)
10527 {
10528 bigdisp.bitfield.disp32s = 1;
10529 bigdisp.bitfield.disp64 = 1;
10530 }
10531 else
10532 bigdisp.bitfield.disp32 = 1;
10533 }
10534 else if ((flag_code == CODE_16BIT) ^ override)
10535 bigdisp.bitfield.disp16 = 1;
10536 else
10537 bigdisp.bitfield.disp32 = 1;
10538 }
10539 else
10540 {
10541 /* For PC-relative branches, the width of the displacement may be
10542 dependent upon data size, but is never dependent upon address size.
10543 Also make sure to not unintentionally match against a non-PC-relative
10544 branch template. */
10545 static templates aux_templates;
10546 const insn_template *t = current_templates->start;
10547 bfd_boolean has_intel64 = FALSE;
10548
10549 aux_templates.start = t;
10550 while (++t < current_templates->end)
10551 {
10552 if (t->opcode_modifier.jump
10553 != current_templates->start->opcode_modifier.jump)
10554 break;
10555 if ((t->opcode_modifier.isa64 >= INTEL64))
10556 has_intel64 = TRUE;
10557 }
10558 if (t < current_templates->end)
10559 {
10560 aux_templates.end = t;
10561 current_templates = &aux_templates;
10562 }
10563
10564 override = (i.prefix[DATA_PREFIX] != 0);
10565 if (flag_code == CODE_64BIT)
10566 {
10567 if ((override || i.suffix == WORD_MNEM_SUFFIX)
10568 && (!intel64 || !has_intel64))
10569 bigdisp.bitfield.disp16 = 1;
10570 else
10571 bigdisp.bitfield.disp32s = 1;
10572 }
10573 else
10574 {
10575 if (!override)
10576 override = (i.suffix == (flag_code != CODE_16BIT
10577 ? WORD_MNEM_SUFFIX
10578 : LONG_MNEM_SUFFIX));
10579 bigdisp.bitfield.disp32 = 1;
10580 if ((flag_code == CODE_16BIT) ^ override)
10581 {
10582 bigdisp.bitfield.disp32 = 0;
10583 bigdisp.bitfield.disp16 = 1;
10584 }
10585 }
10586 }
10587 i.types[this_operand] = operand_type_or (i.types[this_operand],
10588 bigdisp);
10589
10590 exp = &disp_expressions[i.disp_operands];
10591 i.op[this_operand].disps = exp;
10592 i.disp_operands++;
10593 save_input_line_pointer = input_line_pointer;
10594 input_line_pointer = disp_start;
10595 END_STRING_AND_SAVE (disp_end);
10596
10597 #ifndef GCC_ASM_O_HACK
10598 #define GCC_ASM_O_HACK 0
10599 #endif
10600 #if GCC_ASM_O_HACK
10601 END_STRING_AND_SAVE (disp_end + 1);
10602 if (i.types[this_operand].bitfield.baseIndex
10603 && displacement_string_end[-1] == '+')
10604 {
10605 /* This hack is to avoid a warning when using the "o"
10606 constraint within gcc asm statements.
10607 For instance:
10608
10609 #define _set_tssldt_desc(n,addr,limit,type) \
10610 __asm__ __volatile__ ( \
10611 "movw %w2,%0\n\t" \
10612 "movw %w1,2+%0\n\t" \
10613 "rorl $16,%1\n\t" \
10614 "movb %b1,4+%0\n\t" \
10615 "movb %4,5+%0\n\t" \
10616 "movb $0,6+%0\n\t" \
10617 "movb %h1,7+%0\n\t" \
10618 "rorl $16,%1" \
10619 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
10620
10621 This works great except that the output assembler ends
10622 up looking a bit weird if it turns out that there is
10623 no offset. You end up producing code that looks like:
10624
10625 #APP
10626 movw $235,(%eax)
10627 movw %dx,2+(%eax)
10628 rorl $16,%edx
10629 movb %dl,4+(%eax)
10630 movb $137,5+(%eax)
10631 movb $0,6+(%eax)
10632 movb %dh,7+(%eax)
10633 rorl $16,%edx
10634 #NO_APP
10635
10636 So here we provide the missing zero. */
10637
10638 *displacement_string_end = '0';
10639 }
10640 #endif
10641 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
10642 if (gotfree_input_line)
10643 input_line_pointer = gotfree_input_line;
10644
10645 exp_seg = expression (exp);
10646
10647 SKIP_WHITESPACE ();
10648 if (*input_line_pointer)
10649 as_bad (_("junk `%s' after expression"), input_line_pointer);
10650 #if GCC_ASM_O_HACK
10651 RESTORE_END_STRING (disp_end + 1);
10652 #endif
10653 input_line_pointer = save_input_line_pointer;
10654 if (gotfree_input_line)
10655 {
10656 free (gotfree_input_line);
10657
10658 if (exp->X_op == O_constant || exp->X_op == O_register)
10659 exp->X_op = O_illegal;
10660 }
10661
10662 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
10663
10664 RESTORE_END_STRING (disp_end);
10665
10666 return ret;
10667 }
10668
10669 static int
10670 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
10671 i386_operand_type types, const char *disp_start)
10672 {
10673 i386_operand_type bigdisp;
10674 int ret = 1;
10675
10676 /* We do this to make sure that the section symbol is in
10677 the symbol table. We will ultimately change the relocation
10678 to be relative to the beginning of the section. */
10679 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
10680 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
10681 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
10682 {
10683 if (exp->X_op != O_symbol)
10684 goto inv_disp;
10685
10686 if (S_IS_LOCAL (exp->X_add_symbol)
10687 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
10688 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
10689 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
10690 exp->X_op = O_subtract;
10691 exp->X_op_symbol = GOT_symbol;
10692 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
10693 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
10694 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
10695 i.reloc[this_operand] = BFD_RELOC_64;
10696 else
10697 i.reloc[this_operand] = BFD_RELOC_32;
10698 }
10699
10700 else if (exp->X_op == O_absent
10701 || exp->X_op == O_illegal
10702 || exp->X_op == O_big)
10703 {
10704 inv_disp:
10705 as_bad (_("missing or invalid displacement expression `%s'"),
10706 disp_start);
10707 ret = 0;
10708 }
10709
10710 else if (flag_code == CODE_64BIT
10711 && !i.prefix[ADDR_PREFIX]
10712 && exp->X_op == O_constant)
10713 {
10714 /* Since displacement is signed extended to 64bit, don't allow
10715 disp32 and turn off disp32s if they are out of range. */
10716 i.types[this_operand].bitfield.disp32 = 0;
10717 if (!fits_in_signed_long (exp->X_add_number))
10718 {
10719 i.types[this_operand].bitfield.disp32s = 0;
10720 if (i.types[this_operand].bitfield.baseindex)
10721 {
10722 as_bad (_("0x%lx out range of signed 32bit displacement"),
10723 (long) exp->X_add_number);
10724 ret = 0;
10725 }
10726 }
10727 }
10728
10729 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
10730 else if (exp->X_op != O_constant
10731 && OUTPUT_FLAVOR == bfd_target_aout_flavour
10732 && exp_seg != absolute_section
10733 && exp_seg != text_section
10734 && exp_seg != data_section
10735 && exp_seg != bss_section
10736 && exp_seg != undefined_section
10737 && !bfd_is_com_section (exp_seg))
10738 {
10739 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
10740 ret = 0;
10741 }
10742 #endif
10743
10744 if (current_templates->start->opcode_modifier.jump == JUMP_BYTE
10745 /* Constants get taken care of by optimize_disp(). */
10746 && exp->X_op != O_constant)
10747 i.types[this_operand].bitfield.disp8 = 1;
10748
10749 /* Check if this is a displacement only operand. */
10750 bigdisp = i.types[this_operand];
10751 bigdisp.bitfield.disp8 = 0;
10752 bigdisp.bitfield.disp16 = 0;
10753 bigdisp.bitfield.disp32 = 0;
10754 bigdisp.bitfield.disp32s = 0;
10755 bigdisp.bitfield.disp64 = 0;
10756 if (operand_type_all_zero (&bigdisp))
10757 i.types[this_operand] = operand_type_and (i.types[this_operand],
10758 types);
10759
10760 return ret;
10761 }
10762
10763 /* Return the active addressing mode, taking address override and
10764 registers forming the address into consideration. Update the
10765 address override prefix if necessary. */
10766
10767 static enum flag_code
10768 i386_addressing_mode (void)
10769 {
10770 enum flag_code addr_mode;
10771
10772 if (i.prefix[ADDR_PREFIX])
10773 addr_mode = flag_code == CODE_32BIT ? CODE_16BIT : CODE_32BIT;
10774 else if (flag_code == CODE_16BIT
10775 && current_templates->start->cpu_flags.bitfield.cpumpx
10776 /* Avoid replacing the "16-bit addressing not allowed" diagnostic
10777 from md_assemble() by "is not a valid base/index expression"
10778 when there is a base and/or index. */
10779 && !i.types[this_operand].bitfield.baseindex)
10780 {
10781 /* MPX insn memory operands with neither base nor index must be forced
10782 to use 32-bit addressing in 16-bit mode. */
10783 addr_mode = CODE_32BIT;
10784 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
10785 ++i.prefixes;
10786 gas_assert (!i.types[this_operand].bitfield.disp16);
10787 gas_assert (!i.types[this_operand].bitfield.disp32);
10788 }
10789 else
10790 {
10791 addr_mode = flag_code;
10792
10793 #if INFER_ADDR_PREFIX
10794 if (i.mem_operands == 0)
10795 {
10796 /* Infer address prefix from the first memory operand. */
10797 const reg_entry *addr_reg = i.base_reg;
10798
10799 if (addr_reg == NULL)
10800 addr_reg = i.index_reg;
10801
10802 if (addr_reg)
10803 {
10804 if (addr_reg->reg_type.bitfield.dword)
10805 addr_mode = CODE_32BIT;
10806 else if (flag_code != CODE_64BIT
10807 && addr_reg->reg_type.bitfield.word)
10808 addr_mode = CODE_16BIT;
10809
10810 if (addr_mode != flag_code)
10811 {
10812 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
10813 i.prefixes += 1;
10814 /* Change the size of any displacement too. At most one
10815 of Disp16 or Disp32 is set.
10816 FIXME. There doesn't seem to be any real need for
10817 separate Disp16 and Disp32 flags. The same goes for
10818 Imm16 and Imm32. Removing them would probably clean
10819 up the code quite a lot. */
10820 if (flag_code != CODE_64BIT
10821 && (i.types[this_operand].bitfield.disp16
10822 || i.types[this_operand].bitfield.disp32))
10823 i.types[this_operand]
10824 = operand_type_xor (i.types[this_operand], disp16_32);
10825 }
10826 }
10827 }
10828 #endif
10829 }
10830
10831 return addr_mode;
10832 }
10833
10834 /* Make sure the memory operand we've been dealt is valid.
10835 Return 1 on success, 0 on a failure. */
10836
10837 static int
10838 i386_index_check (const char *operand_string)
10839 {
10840 const char *kind = "base/index";
10841 enum flag_code addr_mode = i386_addressing_mode ();
10842
10843 if (current_templates->start->opcode_modifier.isstring
10844 && !current_templates->start->cpu_flags.bitfield.cpupadlock
10845 && (current_templates->end[-1].opcode_modifier.isstring
10846 || i.mem_operands))
10847 {
10848 /* Memory operands of string insns are special in that they only allow
10849 a single register (rDI, rSI, or rBX) as their memory address. */
10850 const reg_entry *expected_reg;
10851 static const char *di_si[][2] =
10852 {
10853 { "esi", "edi" },
10854 { "si", "di" },
10855 { "rsi", "rdi" }
10856 };
10857 static const char *bx[] = { "ebx", "bx", "rbx" };
10858
10859 kind = "string address";
10860
10861 if (current_templates->start->opcode_modifier.repprefixok)
10862 {
10863 int es_op = current_templates->end[-1].opcode_modifier.isstring
10864 - IS_STRING_ES_OP0;
10865 int op = 0;
10866
10867 if (!current_templates->end[-1].operand_types[0].bitfield.baseindex
10868 || ((!i.mem_operands != !intel_syntax)
10869 && current_templates->end[-1].operand_types[1]
10870 .bitfield.baseindex))
10871 op = 1;
10872 expected_reg = hash_find (reg_hash, di_si[addr_mode][op == es_op]);
10873 }
10874 else
10875 expected_reg = hash_find (reg_hash, bx[addr_mode]);
10876
10877 if (i.base_reg != expected_reg
10878 || i.index_reg
10879 || operand_type_check (i.types[this_operand], disp))
10880 {
10881 /* The second memory operand must have the same size as
10882 the first one. */
10883 if (i.mem_operands
10884 && i.base_reg
10885 && !((addr_mode == CODE_64BIT
10886 && i.base_reg->reg_type.bitfield.qword)
10887 || (addr_mode == CODE_32BIT
10888 ? i.base_reg->reg_type.bitfield.dword
10889 : i.base_reg->reg_type.bitfield.word)))
10890 goto bad_address;
10891
10892 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
10893 operand_string,
10894 intel_syntax ? '[' : '(',
10895 register_prefix,
10896 expected_reg->reg_name,
10897 intel_syntax ? ']' : ')');
10898 return 1;
10899 }
10900 else
10901 return 1;
10902
10903 bad_address:
10904 as_bad (_("`%s' is not a valid %s expression"),
10905 operand_string, kind);
10906 return 0;
10907 }
10908 else
10909 {
10910 if (addr_mode != CODE_16BIT)
10911 {
10912 /* 32-bit/64-bit checks. */
10913 if ((i.base_reg
10914 && ((addr_mode == CODE_64BIT
10915 ? !i.base_reg->reg_type.bitfield.qword
10916 : !i.base_reg->reg_type.bitfield.dword)
10917 || (i.index_reg && i.base_reg->reg_num == RegIP)
10918 || i.base_reg->reg_num == RegIZ))
10919 || (i.index_reg
10920 && !i.index_reg->reg_type.bitfield.xmmword
10921 && !i.index_reg->reg_type.bitfield.ymmword
10922 && !i.index_reg->reg_type.bitfield.zmmword
10923 && ((addr_mode == CODE_64BIT
10924 ? !i.index_reg->reg_type.bitfield.qword
10925 : !i.index_reg->reg_type.bitfield.dword)
10926 || !i.index_reg->reg_type.bitfield.baseindex)))
10927 goto bad_address;
10928
10929 /* bndmk, bndldx, and bndstx have special restrictions. */
10930 if (current_templates->start->base_opcode == 0xf30f1b
10931 || (current_templates->start->base_opcode & ~1) == 0x0f1a)
10932 {
10933 /* They cannot use RIP-relative addressing. */
10934 if (i.base_reg && i.base_reg->reg_num == RegIP)
10935 {
10936 as_bad (_("`%s' cannot be used here"), operand_string);
10937 return 0;
10938 }
10939
10940 /* bndldx and bndstx ignore their scale factor. */
10941 if (current_templates->start->base_opcode != 0xf30f1b
10942 && i.log2_scale_factor)
10943 as_warn (_("register scaling is being ignored here"));
10944 }
10945 }
10946 else
10947 {
10948 /* 16-bit checks. */
10949 if ((i.base_reg
10950 && (!i.base_reg->reg_type.bitfield.word
10951 || !i.base_reg->reg_type.bitfield.baseindex))
10952 || (i.index_reg
10953 && (!i.index_reg->reg_type.bitfield.word
10954 || !i.index_reg->reg_type.bitfield.baseindex
10955 || !(i.base_reg
10956 && i.base_reg->reg_num < 6
10957 && i.index_reg->reg_num >= 6
10958 && i.log2_scale_factor == 0))))
10959 goto bad_address;
10960 }
10961 }
10962 return 1;
10963 }
10964
10965 /* Handle vector immediates. */
10966
10967 static int
10968 RC_SAE_immediate (const char *imm_start)
10969 {
10970 unsigned int match_found, j;
10971 const char *pstr = imm_start;
10972 expressionS *exp;
10973
10974 if (*pstr != '{')
10975 return 0;
10976
10977 pstr++;
10978 match_found = 0;
10979 for (j = 0; j < ARRAY_SIZE (RC_NamesTable); j++)
10980 {
10981 if (!strncmp (pstr, RC_NamesTable[j].name, RC_NamesTable[j].len))
10982 {
10983 if (!i.rounding)
10984 {
10985 rc_op.type = RC_NamesTable[j].type;
10986 rc_op.operand = this_operand;
10987 i.rounding = &rc_op;
10988 }
10989 else
10990 {
10991 as_bad (_("duplicated `%s'"), imm_start);
10992 return 0;
10993 }
10994 pstr += RC_NamesTable[j].len;
10995 match_found = 1;
10996 break;
10997 }
10998 }
10999 if (!match_found)
11000 return 0;
11001
11002 if (*pstr++ != '}')
11003 {
11004 as_bad (_("Missing '}': '%s'"), imm_start);
11005 return 0;
11006 }
11007 /* RC/SAE immediate string should contain nothing more. */;
11008 if (*pstr != 0)
11009 {
11010 as_bad (_("Junk after '}': '%s'"), imm_start);
11011 return 0;
11012 }
11013
11014 exp = &im_expressions[i.imm_operands++];
11015 i.op[this_operand].imms = exp;
11016
11017 exp->X_op = O_constant;
11018 exp->X_add_number = 0;
11019 exp->X_add_symbol = (symbolS *) 0;
11020 exp->X_op_symbol = (symbolS *) 0;
11021
11022 i.types[this_operand].bitfield.imm8 = 1;
11023 return 1;
11024 }
11025
11026 /* Only string instructions can have a second memory operand, so
11027 reduce current_templates to just those if it contains any. */
11028 static int
11029 maybe_adjust_templates (void)
11030 {
11031 const insn_template *t;
11032
11033 gas_assert (i.mem_operands == 1);
11034
11035 for (t = current_templates->start; t < current_templates->end; ++t)
11036 if (t->opcode_modifier.isstring)
11037 break;
11038
11039 if (t < current_templates->end)
11040 {
11041 static templates aux_templates;
11042 bfd_boolean recheck;
11043
11044 aux_templates.start = t;
11045 for (; t < current_templates->end; ++t)
11046 if (!t->opcode_modifier.isstring)
11047 break;
11048 aux_templates.end = t;
11049
11050 /* Determine whether to re-check the first memory operand. */
11051 recheck = (aux_templates.start != current_templates->start
11052 || t != current_templates->end);
11053
11054 current_templates = &aux_templates;
11055
11056 if (recheck)
11057 {
11058 i.mem_operands = 0;
11059 if (i.memop1_string != NULL
11060 && i386_index_check (i.memop1_string) == 0)
11061 return 0;
11062 i.mem_operands = 1;
11063 }
11064 }
11065
11066 return 1;
11067 }
11068
11069 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
11070 on error. */
11071
11072 static int
11073 i386_att_operand (char *operand_string)
11074 {
11075 const reg_entry *r;
11076 char *end_op;
11077 char *op_string = operand_string;
11078
11079 if (is_space_char (*op_string))
11080 ++op_string;
11081
11082 /* We check for an absolute prefix (differentiating,
11083 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
11084 if (*op_string == ABSOLUTE_PREFIX)
11085 {
11086 ++op_string;
11087 if (is_space_char (*op_string))
11088 ++op_string;
11089 i.jumpabsolute = TRUE;
11090 }
11091
11092 /* Check if operand is a register. */
11093 if ((r = parse_register (op_string, &end_op)) != NULL)
11094 {
11095 i386_operand_type temp;
11096
11097 if (r == &bad_reg)
11098 return 0;
11099
11100 /* Check for a segment override by searching for ':' after a
11101 segment register. */
11102 op_string = end_op;
11103 if (is_space_char (*op_string))
11104 ++op_string;
11105 if (*op_string == ':' && r->reg_type.bitfield.class == SReg)
11106 {
11107 switch (r->reg_num)
11108 {
11109 case 0:
11110 i.seg[i.mem_operands] = &es;
11111 break;
11112 case 1:
11113 i.seg[i.mem_operands] = &cs;
11114 break;
11115 case 2:
11116 i.seg[i.mem_operands] = &ss;
11117 break;
11118 case 3:
11119 i.seg[i.mem_operands] = &ds;
11120 break;
11121 case 4:
11122 i.seg[i.mem_operands] = &fs;
11123 break;
11124 case 5:
11125 i.seg[i.mem_operands] = &gs;
11126 break;
11127 }
11128
11129 /* Skip the ':' and whitespace. */
11130 ++op_string;
11131 if (is_space_char (*op_string))
11132 ++op_string;
11133
11134 if (!is_digit_char (*op_string)
11135 && !is_identifier_char (*op_string)
11136 && *op_string != '('
11137 && *op_string != ABSOLUTE_PREFIX)
11138 {
11139 as_bad (_("bad memory operand `%s'"), op_string);
11140 return 0;
11141 }
11142 /* Handle case of %es:*foo. */
11143 if (*op_string == ABSOLUTE_PREFIX)
11144 {
11145 ++op_string;
11146 if (is_space_char (*op_string))
11147 ++op_string;
11148 i.jumpabsolute = TRUE;
11149 }
11150 goto do_memory_reference;
11151 }
11152
11153 /* Handle vector operations. */
11154 if (*op_string == '{')
11155 {
11156 op_string = check_VecOperations (op_string, NULL);
11157 if (op_string == NULL)
11158 return 0;
11159 }
11160
11161 if (*op_string)
11162 {
11163 as_bad (_("junk `%s' after register"), op_string);
11164 return 0;
11165 }
11166 temp = r->reg_type;
11167 temp.bitfield.baseindex = 0;
11168 i.types[this_operand] = operand_type_or (i.types[this_operand],
11169 temp);
11170 i.types[this_operand].bitfield.unspecified = 0;
11171 i.op[this_operand].regs = r;
11172 i.reg_operands++;
11173 }
11174 else if (*op_string == REGISTER_PREFIX)
11175 {
11176 as_bad (_("bad register name `%s'"), op_string);
11177 return 0;
11178 }
11179 else if (*op_string == IMMEDIATE_PREFIX)
11180 {
11181 ++op_string;
11182 if (i.jumpabsolute)
11183 {
11184 as_bad (_("immediate operand illegal with absolute jump"));
11185 return 0;
11186 }
11187 if (!i386_immediate (op_string))
11188 return 0;
11189 }
11190 else if (RC_SAE_immediate (operand_string))
11191 {
11192 /* If it is a RC or SAE immediate, do nothing. */
11193 ;
11194 }
11195 else if (is_digit_char (*op_string)
11196 || is_identifier_char (*op_string)
11197 || *op_string == '"'
11198 || *op_string == '(')
11199 {
11200 /* This is a memory reference of some sort. */
11201 char *base_string;
11202
11203 /* Start and end of displacement string expression (if found). */
11204 char *displacement_string_start;
11205 char *displacement_string_end;
11206 char *vop_start;
11207
11208 do_memory_reference:
11209 if (i.mem_operands == 1 && !maybe_adjust_templates ())
11210 return 0;
11211 if ((i.mem_operands == 1
11212 && !current_templates->start->opcode_modifier.isstring)
11213 || i.mem_operands == 2)
11214 {
11215 as_bad (_("too many memory references for `%s'"),
11216 current_templates->start->name);
11217 return 0;
11218 }
11219
11220 /* Check for base index form. We detect the base index form by
11221 looking for an ')' at the end of the operand, searching
11222 for the '(' matching it, and finding a REGISTER_PREFIX or ','
11223 after the '('. */
11224 base_string = op_string + strlen (op_string);
11225
11226 /* Handle vector operations. */
11227 vop_start = strchr (op_string, '{');
11228 if (vop_start && vop_start < base_string)
11229 {
11230 if (check_VecOperations (vop_start, base_string) == NULL)
11231 return 0;
11232 base_string = vop_start;
11233 }
11234
11235 --base_string;
11236 if (is_space_char (*base_string))
11237 --base_string;
11238
11239 /* If we only have a displacement, set-up for it to be parsed later. */
11240 displacement_string_start = op_string;
11241 displacement_string_end = base_string + 1;
11242
11243 if (*base_string == ')')
11244 {
11245 char *temp_string;
11246 unsigned int parens_balanced = 1;
11247 /* We've already checked that the number of left & right ()'s are
11248 equal, so this loop will not be infinite. */
11249 do
11250 {
11251 base_string--;
11252 if (*base_string == ')')
11253 parens_balanced++;
11254 if (*base_string == '(')
11255 parens_balanced--;
11256 }
11257 while (parens_balanced);
11258
11259 temp_string = base_string;
11260
11261 /* Skip past '(' and whitespace. */
11262 ++base_string;
11263 if (is_space_char (*base_string))
11264 ++base_string;
11265
11266 if (*base_string == ','
11267 || ((i.base_reg = parse_register (base_string, &end_op))
11268 != NULL))
11269 {
11270 displacement_string_end = temp_string;
11271
11272 i.types[this_operand].bitfield.baseindex = 1;
11273
11274 if (i.base_reg)
11275 {
11276 if (i.base_reg == &bad_reg)
11277 return 0;
11278 base_string = end_op;
11279 if (is_space_char (*base_string))
11280 ++base_string;
11281 }
11282
11283 /* There may be an index reg or scale factor here. */
11284 if (*base_string == ',')
11285 {
11286 ++base_string;
11287 if (is_space_char (*base_string))
11288 ++base_string;
11289
11290 if ((i.index_reg = parse_register (base_string, &end_op))
11291 != NULL)
11292 {
11293 if (i.index_reg == &bad_reg)
11294 return 0;
11295 base_string = end_op;
11296 if (is_space_char (*base_string))
11297 ++base_string;
11298 if (*base_string == ',')
11299 {
11300 ++base_string;
11301 if (is_space_char (*base_string))
11302 ++base_string;
11303 }
11304 else if (*base_string != ')')
11305 {
11306 as_bad (_("expecting `,' or `)' "
11307 "after index register in `%s'"),
11308 operand_string);
11309 return 0;
11310 }
11311 }
11312 else if (*base_string == REGISTER_PREFIX)
11313 {
11314 end_op = strchr (base_string, ',');
11315 if (end_op)
11316 *end_op = '\0';
11317 as_bad (_("bad register name `%s'"), base_string);
11318 return 0;
11319 }
11320
11321 /* Check for scale factor. */
11322 if (*base_string != ')')
11323 {
11324 char *end_scale = i386_scale (base_string);
11325
11326 if (!end_scale)
11327 return 0;
11328
11329 base_string = end_scale;
11330 if (is_space_char (*base_string))
11331 ++base_string;
11332 if (*base_string != ')')
11333 {
11334 as_bad (_("expecting `)' "
11335 "after scale factor in `%s'"),
11336 operand_string);
11337 return 0;
11338 }
11339 }
11340 else if (!i.index_reg)
11341 {
11342 as_bad (_("expecting index register or scale factor "
11343 "after `,'; got '%c'"),
11344 *base_string);
11345 return 0;
11346 }
11347 }
11348 else if (*base_string != ')')
11349 {
11350 as_bad (_("expecting `,' or `)' "
11351 "after base register in `%s'"),
11352 operand_string);
11353 return 0;
11354 }
11355 }
11356 else if (*base_string == REGISTER_PREFIX)
11357 {
11358 end_op = strchr (base_string, ',');
11359 if (end_op)
11360 *end_op = '\0';
11361 as_bad (_("bad register name `%s'"), base_string);
11362 return 0;
11363 }
11364 }
11365
11366 /* If there's an expression beginning the operand, parse it,
11367 assuming displacement_string_start and
11368 displacement_string_end are meaningful. */
11369 if (displacement_string_start != displacement_string_end)
11370 {
11371 if (!i386_displacement (displacement_string_start,
11372 displacement_string_end))
11373 return 0;
11374 }
11375
11376 /* Special case for (%dx) while doing input/output op. */
11377 if (i.base_reg
11378 && i.base_reg->reg_type.bitfield.instance == RegD
11379 && i.base_reg->reg_type.bitfield.word
11380 && i.index_reg == 0
11381 && i.log2_scale_factor == 0
11382 && i.seg[i.mem_operands] == 0
11383 && !operand_type_check (i.types[this_operand], disp))
11384 {
11385 i.types[this_operand] = i.base_reg->reg_type;
11386 return 1;
11387 }
11388
11389 if (i386_index_check (operand_string) == 0)
11390 return 0;
11391 i.flags[this_operand] |= Operand_Mem;
11392 if (i.mem_operands == 0)
11393 i.memop1_string = xstrdup (operand_string);
11394 i.mem_operands++;
11395 }
11396 else
11397 {
11398 /* It's not a memory operand; argh! */
11399 as_bad (_("invalid char %s beginning operand %d `%s'"),
11400 output_invalid (*op_string),
11401 this_operand + 1,
11402 op_string);
11403 return 0;
11404 }
11405 return 1; /* Normal return. */
11406 }
11407 \f
11408 /* Calculate the maximum variable size (i.e., excluding fr_fix)
11409 that an rs_machine_dependent frag may reach. */
11410
11411 unsigned int
11412 i386_frag_max_var (fragS *frag)
11413 {
11414 /* The only relaxable frags are for jumps.
11415 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
11416 gas_assert (frag->fr_type == rs_machine_dependent);
11417 return TYPE_FROM_RELAX_STATE (frag->fr_subtype) == UNCOND_JUMP ? 4 : 5;
11418 }
11419
11420 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11421 static int
11422 elf_symbol_resolved_in_segment_p (symbolS *fr_symbol, offsetT fr_var)
11423 {
11424 /* STT_GNU_IFUNC symbol must go through PLT. */
11425 if ((symbol_get_bfdsym (fr_symbol)->flags
11426 & BSF_GNU_INDIRECT_FUNCTION) != 0)
11427 return 0;
11428
11429 if (!S_IS_EXTERNAL (fr_symbol))
11430 /* Symbol may be weak or local. */
11431 return !S_IS_WEAK (fr_symbol);
11432
11433 /* Global symbols with non-default visibility can't be preempted. */
11434 if (ELF_ST_VISIBILITY (S_GET_OTHER (fr_symbol)) != STV_DEFAULT)
11435 return 1;
11436
11437 if (fr_var != NO_RELOC)
11438 switch ((enum bfd_reloc_code_real) fr_var)
11439 {
11440 case BFD_RELOC_386_PLT32:
11441 case BFD_RELOC_X86_64_PLT32:
11442 /* Symbol with PLT relocation may be preempted. */
11443 return 0;
11444 default:
11445 abort ();
11446 }
11447
11448 /* Global symbols with default visibility in a shared library may be
11449 preempted by another definition. */
11450 return !shared;
11451 }
11452 #endif
11453
11454 /* Table 3-2. Macro-Fusible Instructions in Haswell Microarchitecture
11455 Note also work for Skylake and Cascadelake.
11456 ---------------------------------------------------------------------
11457 | JCC | ADD/SUB/CMP | INC/DEC | TEST/AND |
11458 | ------ | ----------- | ------- | -------- |
11459 | Jo | N | N | Y |
11460 | Jno | N | N | Y |
11461 | Jc/Jb | Y | N | Y |
11462 | Jae/Jnb | Y | N | Y |
11463 | Je/Jz | Y | Y | Y |
11464 | Jne/Jnz | Y | Y | Y |
11465 | Jna/Jbe | Y | N | Y |
11466 | Ja/Jnbe | Y | N | Y |
11467 | Js | N | N | Y |
11468 | Jns | N | N | Y |
11469 | Jp/Jpe | N | N | Y |
11470 | Jnp/Jpo | N | N | Y |
11471 | Jl/Jnge | Y | Y | Y |
11472 | Jge/Jnl | Y | Y | Y |
11473 | Jle/Jng | Y | Y | Y |
11474 | Jg/Jnle | Y | Y | Y |
11475 --------------------------------------------------------------------- */
11476 static int
11477 i386_macro_fusible_p (enum mf_cmp_kind mf_cmp, enum mf_jcc_kind mf_jcc)
11478 {
11479 if (mf_cmp == mf_cmp_alu_cmp)
11480 return ((mf_jcc >= mf_jcc_jc && mf_jcc <= mf_jcc_jna)
11481 || mf_jcc == mf_jcc_jl || mf_jcc == mf_jcc_jle);
11482 if (mf_cmp == mf_cmp_incdec)
11483 return (mf_jcc == mf_jcc_je || mf_jcc == mf_jcc_jl
11484 || mf_jcc == mf_jcc_jle);
11485 if (mf_cmp == mf_cmp_test_and)
11486 return 1;
11487 return 0;
11488 }
11489
11490 /* Return the next non-empty frag. */
11491
11492 static fragS *
11493 i386_next_non_empty_frag (fragS *fragP)
11494 {
11495 /* There may be a frag with a ".fill 0" when there is no room in
11496 the current frag for frag_grow in output_insn. */
11497 for (fragP = fragP->fr_next;
11498 (fragP != NULL
11499 && fragP->fr_type == rs_fill
11500 && fragP->fr_fix == 0);
11501 fragP = fragP->fr_next)
11502 ;
11503 return fragP;
11504 }
11505
11506 /* Return the next jcc frag after BRANCH_PADDING. */
11507
11508 static fragS *
11509 i386_next_fusible_jcc_frag (fragS *maybe_cmp_fragP, fragS *pad_fragP)
11510 {
11511 fragS *branch_fragP;
11512 if (!pad_fragP)
11513 return NULL;
11514
11515 if (pad_fragP->fr_type == rs_machine_dependent
11516 && (TYPE_FROM_RELAX_STATE (pad_fragP->fr_subtype)
11517 == BRANCH_PADDING))
11518 {
11519 branch_fragP = i386_next_non_empty_frag (pad_fragP);
11520 if (branch_fragP->fr_type != rs_machine_dependent)
11521 return NULL;
11522 if (TYPE_FROM_RELAX_STATE (branch_fragP->fr_subtype) == COND_JUMP
11523 && i386_macro_fusible_p (maybe_cmp_fragP->tc_frag_data.mf_type,
11524 pad_fragP->tc_frag_data.mf_type))
11525 return branch_fragP;
11526 }
11527
11528 return NULL;
11529 }
11530
11531 /* Classify BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING frags. */
11532
11533 static void
11534 i386_classify_machine_dependent_frag (fragS *fragP)
11535 {
11536 fragS *cmp_fragP;
11537 fragS *pad_fragP;
11538 fragS *branch_fragP;
11539 fragS *next_fragP;
11540 unsigned int max_prefix_length;
11541
11542 if (fragP->tc_frag_data.classified)
11543 return;
11544
11545 /* First scan for BRANCH_PADDING and FUSED_JCC_PADDING. Convert
11546 FUSED_JCC_PADDING and merge BRANCH_PADDING. */
11547 for (next_fragP = fragP;
11548 next_fragP != NULL;
11549 next_fragP = next_fragP->fr_next)
11550 {
11551 next_fragP->tc_frag_data.classified = 1;
11552 if (next_fragP->fr_type == rs_machine_dependent)
11553 switch (TYPE_FROM_RELAX_STATE (next_fragP->fr_subtype))
11554 {
11555 case BRANCH_PADDING:
11556 /* The BRANCH_PADDING frag must be followed by a branch
11557 frag. */
11558 branch_fragP = i386_next_non_empty_frag (next_fragP);
11559 next_fragP->tc_frag_data.u.branch_fragP = branch_fragP;
11560 break;
11561 case FUSED_JCC_PADDING:
11562 /* Check if this is a fused jcc:
11563 FUSED_JCC_PADDING
11564 CMP like instruction
11565 BRANCH_PADDING
11566 COND_JUMP
11567 */
11568 cmp_fragP = i386_next_non_empty_frag (next_fragP);
11569 pad_fragP = i386_next_non_empty_frag (cmp_fragP);
11570 branch_fragP = i386_next_fusible_jcc_frag (next_fragP, pad_fragP);
11571 if (branch_fragP)
11572 {
11573 /* The BRANCH_PADDING frag is merged with the
11574 FUSED_JCC_PADDING frag. */
11575 next_fragP->tc_frag_data.u.branch_fragP = branch_fragP;
11576 /* CMP like instruction size. */
11577 next_fragP->tc_frag_data.cmp_size = cmp_fragP->fr_fix;
11578 frag_wane (pad_fragP);
11579 /* Skip to branch_fragP. */
11580 next_fragP = branch_fragP;
11581 }
11582 else if (next_fragP->tc_frag_data.max_prefix_length)
11583 {
11584 /* Turn FUSED_JCC_PADDING into BRANCH_PREFIX if it isn't
11585 a fused jcc. */
11586 next_fragP->fr_subtype
11587 = ENCODE_RELAX_STATE (BRANCH_PREFIX, 0);
11588 next_fragP->tc_frag_data.max_bytes
11589 = next_fragP->tc_frag_data.max_prefix_length;
11590 /* This will be updated in the BRANCH_PREFIX scan. */
11591 next_fragP->tc_frag_data.max_prefix_length = 0;
11592 }
11593 else
11594 frag_wane (next_fragP);
11595 break;
11596 }
11597 }
11598
11599 /* Stop if there is no BRANCH_PREFIX. */
11600 if (!align_branch_prefix_size)
11601 return;
11602
11603 /* Scan for BRANCH_PREFIX. */
11604 for (; fragP != NULL; fragP = fragP->fr_next)
11605 {
11606 if (fragP->fr_type != rs_machine_dependent
11607 || (TYPE_FROM_RELAX_STATE (fragP->fr_subtype)
11608 != BRANCH_PREFIX))
11609 continue;
11610
11611 /* Count all BRANCH_PREFIX frags before BRANCH_PADDING and
11612 COND_JUMP_PREFIX. */
11613 max_prefix_length = 0;
11614 for (next_fragP = fragP;
11615 next_fragP != NULL;
11616 next_fragP = next_fragP->fr_next)
11617 {
11618 if (next_fragP->fr_type == rs_fill)
11619 /* Skip rs_fill frags. */
11620 continue;
11621 else if (next_fragP->fr_type != rs_machine_dependent)
11622 /* Stop for all other frags. */
11623 break;
11624
11625 /* rs_machine_dependent frags. */
11626 if (TYPE_FROM_RELAX_STATE (next_fragP->fr_subtype)
11627 == BRANCH_PREFIX)
11628 {
11629 /* Count BRANCH_PREFIX frags. */
11630 if (max_prefix_length >= MAX_FUSED_JCC_PADDING_SIZE)
11631 {
11632 max_prefix_length = MAX_FUSED_JCC_PADDING_SIZE;
11633 frag_wane (next_fragP);
11634 }
11635 else
11636 max_prefix_length
11637 += next_fragP->tc_frag_data.max_bytes;
11638 }
11639 else if ((TYPE_FROM_RELAX_STATE (next_fragP->fr_subtype)
11640 == BRANCH_PADDING)
11641 || (TYPE_FROM_RELAX_STATE (next_fragP->fr_subtype)
11642 == FUSED_JCC_PADDING))
11643 {
11644 /* Stop at BRANCH_PADDING and FUSED_JCC_PADDING. */
11645 fragP->tc_frag_data.u.padding_fragP = next_fragP;
11646 break;
11647 }
11648 else
11649 /* Stop for other rs_machine_dependent frags. */
11650 break;
11651 }
11652
11653 fragP->tc_frag_data.max_prefix_length = max_prefix_length;
11654
11655 /* Skip to the next frag. */
11656 fragP = next_fragP;
11657 }
11658 }
11659
11660 /* Compute padding size for
11661
11662 FUSED_JCC_PADDING
11663 CMP like instruction
11664 BRANCH_PADDING
11665 COND_JUMP/UNCOND_JUMP
11666
11667 or
11668
11669 BRANCH_PADDING
11670 COND_JUMP/UNCOND_JUMP
11671 */
11672
11673 static int
11674 i386_branch_padding_size (fragS *fragP, offsetT address)
11675 {
11676 unsigned int offset, size, padding_size;
11677 fragS *branch_fragP = fragP->tc_frag_data.u.branch_fragP;
11678
11679 /* The start address of the BRANCH_PADDING or FUSED_JCC_PADDING frag. */
11680 if (!address)
11681 address = fragP->fr_address;
11682 address += fragP->fr_fix;
11683
11684 /* CMP like instrunction size. */
11685 size = fragP->tc_frag_data.cmp_size;
11686
11687 /* The base size of the branch frag. */
11688 size += branch_fragP->fr_fix;
11689
11690 /* Add opcode and displacement bytes for the rs_machine_dependent
11691 branch frag. */
11692 if (branch_fragP->fr_type == rs_machine_dependent)
11693 size += md_relax_table[branch_fragP->fr_subtype].rlx_length;
11694
11695 /* Check if branch is within boundary and doesn't end at the last
11696 byte. */
11697 offset = address & ((1U << align_branch_power) - 1);
11698 if ((offset + size) >= (1U << align_branch_power))
11699 /* Padding needed to avoid crossing boundary. */
11700 padding_size = (1U << align_branch_power) - offset;
11701 else
11702 /* No padding needed. */
11703 padding_size = 0;
11704
11705 /* The return value may be saved in tc_frag_data.length which is
11706 unsigned byte. */
11707 if (!fits_in_unsigned_byte (padding_size))
11708 abort ();
11709
11710 return padding_size;
11711 }
11712
11713 /* i386_generic_table_relax_frag()
11714
11715 Handle BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING frags to
11716 grow/shrink padding to align branch frags. Hand others to
11717 relax_frag(). */
11718
11719 long
11720 i386_generic_table_relax_frag (segT segment, fragS *fragP, long stretch)
11721 {
11722 if (TYPE_FROM_RELAX_STATE (fragP->fr_subtype) == BRANCH_PADDING
11723 || TYPE_FROM_RELAX_STATE (fragP->fr_subtype) == FUSED_JCC_PADDING)
11724 {
11725 long padding_size = i386_branch_padding_size (fragP, 0);
11726 long grow = padding_size - fragP->tc_frag_data.length;
11727
11728 /* When the BRANCH_PREFIX frag is used, the computed address
11729 must match the actual address and there should be no padding. */
11730 if (fragP->tc_frag_data.padding_address
11731 && (fragP->tc_frag_data.padding_address != fragP->fr_address
11732 || padding_size))
11733 abort ();
11734
11735 /* Update the padding size. */
11736 if (grow)
11737 fragP->tc_frag_data.length = padding_size;
11738
11739 return grow;
11740 }
11741 else if (TYPE_FROM_RELAX_STATE (fragP->fr_subtype) == BRANCH_PREFIX)
11742 {
11743 fragS *padding_fragP, *next_fragP;
11744 long padding_size, left_size, last_size;
11745
11746 padding_fragP = fragP->tc_frag_data.u.padding_fragP;
11747 if (!padding_fragP)
11748 /* Use the padding set by the leading BRANCH_PREFIX frag. */
11749 return (fragP->tc_frag_data.length
11750 - fragP->tc_frag_data.last_length);
11751
11752 /* Compute the relative address of the padding frag in the very
11753 first time where the BRANCH_PREFIX frag sizes are zero. */
11754 if (!fragP->tc_frag_data.padding_address)
11755 fragP->tc_frag_data.padding_address
11756 = padding_fragP->fr_address - (fragP->fr_address - stretch);
11757
11758 /* First update the last length from the previous interation. */
11759 left_size = fragP->tc_frag_data.prefix_length;
11760 for (next_fragP = fragP;
11761 next_fragP != padding_fragP;
11762 next_fragP = next_fragP->fr_next)
11763 if (next_fragP->fr_type == rs_machine_dependent
11764 && (TYPE_FROM_RELAX_STATE (next_fragP->fr_subtype)
11765 == BRANCH_PREFIX))
11766 {
11767 if (left_size)
11768 {
11769 int max = next_fragP->tc_frag_data.max_bytes;
11770 if (max)
11771 {
11772 int size;
11773 if (max > left_size)
11774 size = left_size;
11775 else
11776 size = max;
11777 left_size -= size;
11778 next_fragP->tc_frag_data.last_length = size;
11779 }
11780 }
11781 else
11782 next_fragP->tc_frag_data.last_length = 0;
11783 }
11784
11785 /* Check the padding size for the padding frag. */
11786 padding_size = i386_branch_padding_size
11787 (padding_fragP, (fragP->fr_address
11788 + fragP->tc_frag_data.padding_address));
11789
11790 last_size = fragP->tc_frag_data.prefix_length;
11791 /* Check if there is change from the last interation. */
11792 if (padding_size == last_size)
11793 {
11794 /* Update the expected address of the padding frag. */
11795 padding_fragP->tc_frag_data.padding_address
11796 = (fragP->fr_address + padding_size
11797 + fragP->tc_frag_data.padding_address);
11798 return 0;
11799 }
11800
11801 if (padding_size > fragP->tc_frag_data.max_prefix_length)
11802 {
11803 /* No padding if there is no sufficient room. Clear the
11804 expected address of the padding frag. */
11805 padding_fragP->tc_frag_data.padding_address = 0;
11806 padding_size = 0;
11807 }
11808 else
11809 /* Store the expected address of the padding frag. */
11810 padding_fragP->tc_frag_data.padding_address
11811 = (fragP->fr_address + padding_size
11812 + fragP->tc_frag_data.padding_address);
11813
11814 fragP->tc_frag_data.prefix_length = padding_size;
11815
11816 /* Update the length for the current interation. */
11817 left_size = padding_size;
11818 for (next_fragP = fragP;
11819 next_fragP != padding_fragP;
11820 next_fragP = next_fragP->fr_next)
11821 if (next_fragP->fr_type == rs_machine_dependent
11822 && (TYPE_FROM_RELAX_STATE (next_fragP->fr_subtype)
11823 == BRANCH_PREFIX))
11824 {
11825 if (left_size)
11826 {
11827 int max = next_fragP->tc_frag_data.max_bytes;
11828 if (max)
11829 {
11830 int size;
11831 if (max > left_size)
11832 size = left_size;
11833 else
11834 size = max;
11835 left_size -= size;
11836 next_fragP->tc_frag_data.length = size;
11837 }
11838 }
11839 else
11840 next_fragP->tc_frag_data.length = 0;
11841 }
11842
11843 return (fragP->tc_frag_data.length
11844 - fragP->tc_frag_data.last_length);
11845 }
11846 return relax_frag (segment, fragP, stretch);
11847 }
11848
11849 /* md_estimate_size_before_relax()
11850
11851 Called just before relax() for rs_machine_dependent frags. The x86
11852 assembler uses these frags to handle variable size jump
11853 instructions.
11854
11855 Any symbol that is now undefined will not become defined.
11856 Return the correct fr_subtype in the frag.
11857 Return the initial "guess for variable size of frag" to caller.
11858 The guess is actually the growth beyond the fixed part. Whatever
11859 we do to grow the fixed or variable part contributes to our
11860 returned value. */
11861
11862 int
11863 md_estimate_size_before_relax (fragS *fragP, segT segment)
11864 {
11865 if (TYPE_FROM_RELAX_STATE (fragP->fr_subtype) == BRANCH_PADDING
11866 || TYPE_FROM_RELAX_STATE (fragP->fr_subtype) == BRANCH_PREFIX
11867 || TYPE_FROM_RELAX_STATE (fragP->fr_subtype) == FUSED_JCC_PADDING)
11868 {
11869 i386_classify_machine_dependent_frag (fragP);
11870 return fragP->tc_frag_data.length;
11871 }
11872
11873 /* We've already got fragP->fr_subtype right; all we have to do is
11874 check for un-relaxable symbols. On an ELF system, we can't relax
11875 an externally visible symbol, because it may be overridden by a
11876 shared library. */
11877 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
11878 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11879 || (IS_ELF
11880 && !elf_symbol_resolved_in_segment_p (fragP->fr_symbol,
11881 fragP->fr_var))
11882 #endif
11883 #if defined (OBJ_COFF) && defined (TE_PE)
11884 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
11885 && S_IS_WEAK (fragP->fr_symbol))
11886 #endif
11887 )
11888 {
11889 /* Symbol is undefined in this segment, or we need to keep a
11890 reloc so that weak symbols can be overridden. */
11891 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
11892 enum bfd_reloc_code_real reloc_type;
11893 unsigned char *opcode;
11894 int old_fr_fix;
11895
11896 if (fragP->fr_var != NO_RELOC)
11897 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
11898 else if (size == 2)
11899 reloc_type = BFD_RELOC_16_PCREL;
11900 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11901 else if (need_plt32_p (fragP->fr_symbol))
11902 reloc_type = BFD_RELOC_X86_64_PLT32;
11903 #endif
11904 else
11905 reloc_type = BFD_RELOC_32_PCREL;
11906
11907 old_fr_fix = fragP->fr_fix;
11908 opcode = (unsigned char *) fragP->fr_opcode;
11909
11910 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
11911 {
11912 case UNCOND_JUMP:
11913 /* Make jmp (0xeb) a (d)word displacement jump. */
11914 opcode[0] = 0xe9;
11915 fragP->fr_fix += size;
11916 fix_new (fragP, old_fr_fix, size,
11917 fragP->fr_symbol,
11918 fragP->fr_offset, 1,
11919 reloc_type);
11920 break;
11921
11922 case COND_JUMP86:
11923 if (size == 2
11924 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
11925 {
11926 /* Negate the condition, and branch past an
11927 unconditional jump. */
11928 opcode[0] ^= 1;
11929 opcode[1] = 3;
11930 /* Insert an unconditional jump. */
11931 opcode[2] = 0xe9;
11932 /* We added two extra opcode bytes, and have a two byte
11933 offset. */
11934 fragP->fr_fix += 2 + 2;
11935 fix_new (fragP, old_fr_fix + 2, 2,
11936 fragP->fr_symbol,
11937 fragP->fr_offset, 1,
11938 reloc_type);
11939 break;
11940 }
11941 /* Fall through. */
11942
11943 case COND_JUMP:
11944 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
11945 {
11946 fixS *fixP;
11947
11948 fragP->fr_fix += 1;
11949 fixP = fix_new (fragP, old_fr_fix, 1,
11950 fragP->fr_symbol,
11951 fragP->fr_offset, 1,
11952 BFD_RELOC_8_PCREL);
11953 fixP->fx_signed = 1;
11954 break;
11955 }
11956
11957 /* This changes the byte-displacement jump 0x7N
11958 to the (d)word-displacement jump 0x0f,0x8N. */
11959 opcode[1] = opcode[0] + 0x10;
11960 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
11961 /* We've added an opcode byte. */
11962 fragP->fr_fix += 1 + size;
11963 fix_new (fragP, old_fr_fix + 1, size,
11964 fragP->fr_symbol,
11965 fragP->fr_offset, 1,
11966 reloc_type);
11967 break;
11968
11969 default:
11970 BAD_CASE (fragP->fr_subtype);
11971 break;
11972 }
11973 frag_wane (fragP);
11974 return fragP->fr_fix - old_fr_fix;
11975 }
11976
11977 /* Guess size depending on current relax state. Initially the relax
11978 state will correspond to a short jump and we return 1, because
11979 the variable part of the frag (the branch offset) is one byte
11980 long. However, we can relax a section more than once and in that
11981 case we must either set fr_subtype back to the unrelaxed state,
11982 or return the value for the appropriate branch. */
11983 return md_relax_table[fragP->fr_subtype].rlx_length;
11984 }
11985
11986 /* Called after relax() is finished.
11987
11988 In: Address of frag.
11989 fr_type == rs_machine_dependent.
11990 fr_subtype is what the address relaxed to.
11991
11992 Out: Any fixSs and constants are set up.
11993 Caller will turn frag into a ".space 0". */
11994
11995 void
11996 md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT sec ATTRIBUTE_UNUSED,
11997 fragS *fragP)
11998 {
11999 unsigned char *opcode;
12000 unsigned char *where_to_put_displacement = NULL;
12001 offsetT target_address;
12002 offsetT opcode_address;
12003 unsigned int extension = 0;
12004 offsetT displacement_from_opcode_start;
12005
12006 if (TYPE_FROM_RELAX_STATE (fragP->fr_subtype) == BRANCH_PADDING
12007 || TYPE_FROM_RELAX_STATE (fragP->fr_subtype) == FUSED_JCC_PADDING
12008 || TYPE_FROM_RELAX_STATE (fragP->fr_subtype) == BRANCH_PREFIX)
12009 {
12010 /* Generate nop padding. */
12011 unsigned int size = fragP->tc_frag_data.length;
12012 if (size)
12013 {
12014 if (size > fragP->tc_frag_data.max_bytes)
12015 abort ();
12016
12017 if (flag_debug)
12018 {
12019 const char *msg;
12020 const char *branch = "branch";
12021 const char *prefix = "";
12022 fragS *padding_fragP;
12023 if (TYPE_FROM_RELAX_STATE (fragP->fr_subtype)
12024 == BRANCH_PREFIX)
12025 {
12026 padding_fragP = fragP->tc_frag_data.u.padding_fragP;
12027 switch (fragP->tc_frag_data.default_prefix)
12028 {
12029 default:
12030 abort ();
12031 break;
12032 case CS_PREFIX_OPCODE:
12033 prefix = " cs";
12034 break;
12035 case DS_PREFIX_OPCODE:
12036 prefix = " ds";
12037 break;
12038 case ES_PREFIX_OPCODE:
12039 prefix = " es";
12040 break;
12041 case FS_PREFIX_OPCODE:
12042 prefix = " fs";
12043 break;
12044 case GS_PREFIX_OPCODE:
12045 prefix = " gs";
12046 break;
12047 case SS_PREFIX_OPCODE:
12048 prefix = " ss";
12049 break;
12050 }
12051 if (padding_fragP)
12052 msg = _("%s:%u: add %d%s at 0x%llx to align "
12053 "%s within %d-byte boundary\n");
12054 else
12055 msg = _("%s:%u: add additional %d%s at 0x%llx to "
12056 "align %s within %d-byte boundary\n");
12057 }
12058 else
12059 {
12060 padding_fragP = fragP;
12061 msg = _("%s:%u: add %d%s-byte nop at 0x%llx to align "
12062 "%s within %d-byte boundary\n");
12063 }
12064
12065 if (padding_fragP)
12066 switch (padding_fragP->tc_frag_data.branch_type)
12067 {
12068 case align_branch_jcc:
12069 branch = "jcc";
12070 break;
12071 case align_branch_fused:
12072 branch = "fused jcc";
12073 break;
12074 case align_branch_jmp:
12075 branch = "jmp";
12076 break;
12077 case align_branch_call:
12078 branch = "call";
12079 break;
12080 case align_branch_indirect:
12081 branch = "indiret branch";
12082 break;
12083 case align_branch_ret:
12084 branch = "ret";
12085 break;
12086 default:
12087 break;
12088 }
12089
12090 fprintf (stdout, msg,
12091 fragP->fr_file, fragP->fr_line, size, prefix,
12092 (long long) fragP->fr_address, branch,
12093 1 << align_branch_power);
12094 }
12095 if (TYPE_FROM_RELAX_STATE (fragP->fr_subtype) == BRANCH_PREFIX)
12096 memset (fragP->fr_opcode,
12097 fragP->tc_frag_data.default_prefix, size);
12098 else
12099 i386_generate_nops (fragP, (char *) fragP->fr_opcode,
12100 size, 0);
12101 fragP->fr_fix += size;
12102 }
12103 return;
12104 }
12105
12106 opcode = (unsigned char *) fragP->fr_opcode;
12107
12108 /* Address we want to reach in file space. */
12109 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
12110
12111 /* Address opcode resides at in file space. */
12112 opcode_address = fragP->fr_address + fragP->fr_fix;
12113
12114 /* Displacement from opcode start to fill into instruction. */
12115 displacement_from_opcode_start = target_address - opcode_address;
12116
12117 if ((fragP->fr_subtype & BIG) == 0)
12118 {
12119 /* Don't have to change opcode. */
12120 extension = 1; /* 1 opcode + 1 displacement */
12121 where_to_put_displacement = &opcode[1];
12122 }
12123 else
12124 {
12125 if (no_cond_jump_promotion
12126 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
12127 as_warn_where (fragP->fr_file, fragP->fr_line,
12128 _("long jump required"));
12129
12130 switch (fragP->fr_subtype)
12131 {
12132 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
12133 extension = 4; /* 1 opcode + 4 displacement */
12134 opcode[0] = 0xe9;
12135 where_to_put_displacement = &opcode[1];
12136 break;
12137
12138 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
12139 extension = 2; /* 1 opcode + 2 displacement */
12140 opcode[0] = 0xe9;
12141 where_to_put_displacement = &opcode[1];
12142 break;
12143
12144 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
12145 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
12146 extension = 5; /* 2 opcode + 4 displacement */
12147 opcode[1] = opcode[0] + 0x10;
12148 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
12149 where_to_put_displacement = &opcode[2];
12150 break;
12151
12152 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
12153 extension = 3; /* 2 opcode + 2 displacement */
12154 opcode[1] = opcode[0] + 0x10;
12155 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
12156 where_to_put_displacement = &opcode[2];
12157 break;
12158
12159 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
12160 extension = 4;
12161 opcode[0] ^= 1;
12162 opcode[1] = 3;
12163 opcode[2] = 0xe9;
12164 where_to_put_displacement = &opcode[3];
12165 break;
12166
12167 default:
12168 BAD_CASE (fragP->fr_subtype);
12169 break;
12170 }
12171 }
12172
12173 /* If size if less then four we are sure that the operand fits,
12174 but if it's 4, then it could be that the displacement is larger
12175 then -/+ 2GB. */
12176 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
12177 && object_64bit
12178 && ((addressT) (displacement_from_opcode_start - extension
12179 + ((addressT) 1 << 31))
12180 > (((addressT) 2 << 31) - 1)))
12181 {
12182 as_bad_where (fragP->fr_file, fragP->fr_line,
12183 _("jump target out of range"));
12184 /* Make us emit 0. */
12185 displacement_from_opcode_start = extension;
12186 }
12187 /* Now put displacement after opcode. */
12188 md_number_to_chars ((char *) where_to_put_displacement,
12189 (valueT) (displacement_from_opcode_start - extension),
12190 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
12191 fragP->fr_fix += extension;
12192 }
12193 \f
12194 /* Apply a fixup (fixP) to segment data, once it has been determined
12195 by our caller that we have all the info we need to fix it up.
12196
12197 Parameter valP is the pointer to the value of the bits.
12198
12199 On the 386, immediates, displacements, and data pointers are all in
12200 the same (little-endian) format, so we don't need to care about which
12201 we are handling. */
12202
12203 void
12204 md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
12205 {
12206 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
12207 valueT value = *valP;
12208
12209 #if !defined (TE_Mach)
12210 if (fixP->fx_pcrel)
12211 {
12212 switch (fixP->fx_r_type)
12213 {
12214 default:
12215 break;
12216
12217 case BFD_RELOC_64:
12218 fixP->fx_r_type = BFD_RELOC_64_PCREL;
12219 break;
12220 case BFD_RELOC_32:
12221 case BFD_RELOC_X86_64_32S:
12222 fixP->fx_r_type = BFD_RELOC_32_PCREL;
12223 break;
12224 case BFD_RELOC_16:
12225 fixP->fx_r_type = BFD_RELOC_16_PCREL;
12226 break;
12227 case BFD_RELOC_8:
12228 fixP->fx_r_type = BFD_RELOC_8_PCREL;
12229 break;
12230 }
12231 }
12232
12233 if (fixP->fx_addsy != NULL
12234 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
12235 || fixP->fx_r_type == BFD_RELOC_64_PCREL
12236 || fixP->fx_r_type == BFD_RELOC_16_PCREL
12237 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
12238 && !use_rela_relocations)
12239 {
12240 /* This is a hack. There should be a better way to handle this.
12241 This covers for the fact that bfd_install_relocation will
12242 subtract the current location (for partial_inplace, PC relative
12243 relocations); see more below. */
12244 #ifndef OBJ_AOUT
12245 if (IS_ELF
12246 #ifdef TE_PE
12247 || OUTPUT_FLAVOR == bfd_target_coff_flavour
12248 #endif
12249 )
12250 value += fixP->fx_where + fixP->fx_frag->fr_address;
12251 #endif
12252 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12253 if (IS_ELF)
12254 {
12255 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
12256
12257 if ((sym_seg == seg
12258 || (symbol_section_p (fixP->fx_addsy)
12259 && sym_seg != absolute_section))
12260 && !generic_force_reloc (fixP))
12261 {
12262 /* Yes, we add the values in twice. This is because
12263 bfd_install_relocation subtracts them out again. I think
12264 bfd_install_relocation is broken, but I don't dare change
12265 it. FIXME. */
12266 value += fixP->fx_where + fixP->fx_frag->fr_address;
12267 }
12268 }
12269 #endif
12270 #if defined (OBJ_COFF) && defined (TE_PE)
12271 /* For some reason, the PE format does not store a
12272 section address offset for a PC relative symbol. */
12273 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
12274 || S_IS_WEAK (fixP->fx_addsy))
12275 value += md_pcrel_from (fixP);
12276 #endif
12277 }
12278 #if defined (OBJ_COFF) && defined (TE_PE)
12279 if (fixP->fx_addsy != NULL
12280 && S_IS_WEAK (fixP->fx_addsy)
12281 /* PR 16858: Do not modify weak function references. */
12282 && ! fixP->fx_pcrel)
12283 {
12284 #if !defined (TE_PEP)
12285 /* For x86 PE weak function symbols are neither PC-relative
12286 nor do they set S_IS_FUNCTION. So the only reliable way
12287 to detect them is to check the flags of their containing
12288 section. */
12289 if (S_GET_SEGMENT (fixP->fx_addsy) != NULL
12290 && S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_CODE)
12291 ;
12292 else
12293 #endif
12294 value -= S_GET_VALUE (fixP->fx_addsy);
12295 }
12296 #endif
12297
12298 /* Fix a few things - the dynamic linker expects certain values here,
12299 and we must not disappoint it. */
12300 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12301 if (IS_ELF && fixP->fx_addsy)
12302 switch (fixP->fx_r_type)
12303 {
12304 case BFD_RELOC_386_PLT32:
12305 case BFD_RELOC_X86_64_PLT32:
12306 /* Make the jump instruction point to the address of the operand.
12307 At runtime we merely add the offset to the actual PLT entry.
12308 NB: Subtract the offset size only for jump instructions. */
12309 if (fixP->fx_pcrel)
12310 value = -4;
12311 break;
12312
12313 case BFD_RELOC_386_TLS_GD:
12314 case BFD_RELOC_386_TLS_LDM:
12315 case BFD_RELOC_386_TLS_IE_32:
12316 case BFD_RELOC_386_TLS_IE:
12317 case BFD_RELOC_386_TLS_GOTIE:
12318 case BFD_RELOC_386_TLS_GOTDESC:
12319 case BFD_RELOC_X86_64_TLSGD:
12320 case BFD_RELOC_X86_64_TLSLD:
12321 case BFD_RELOC_X86_64_GOTTPOFF:
12322 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
12323 value = 0; /* Fully resolved at runtime. No addend. */
12324 /* Fallthrough */
12325 case BFD_RELOC_386_TLS_LE:
12326 case BFD_RELOC_386_TLS_LDO_32:
12327 case BFD_RELOC_386_TLS_LE_32:
12328 case BFD_RELOC_X86_64_DTPOFF32:
12329 case BFD_RELOC_X86_64_DTPOFF64:
12330 case BFD_RELOC_X86_64_TPOFF32:
12331 case BFD_RELOC_X86_64_TPOFF64:
12332 S_SET_THREAD_LOCAL (fixP->fx_addsy);
12333 break;
12334
12335 case BFD_RELOC_386_TLS_DESC_CALL:
12336 case BFD_RELOC_X86_64_TLSDESC_CALL:
12337 value = 0; /* Fully resolved at runtime. No addend. */
12338 S_SET_THREAD_LOCAL (fixP->fx_addsy);
12339 fixP->fx_done = 0;
12340 return;
12341
12342 case BFD_RELOC_VTABLE_INHERIT:
12343 case BFD_RELOC_VTABLE_ENTRY:
12344 fixP->fx_done = 0;
12345 return;
12346
12347 default:
12348 break;
12349 }
12350 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
12351 *valP = value;
12352 #endif /* !defined (TE_Mach) */
12353
12354 /* Are we finished with this relocation now? */
12355 if (fixP->fx_addsy == NULL)
12356 fixP->fx_done = 1;
12357 #if defined (OBJ_COFF) && defined (TE_PE)
12358 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
12359 {
12360 fixP->fx_done = 0;
12361 /* Remember value for tc_gen_reloc. */
12362 fixP->fx_addnumber = value;
12363 /* Clear out the frag for now. */
12364 value = 0;
12365 }
12366 #endif
12367 else if (use_rela_relocations)
12368 {
12369 fixP->fx_no_overflow = 1;
12370 /* Remember value for tc_gen_reloc. */
12371 fixP->fx_addnumber = value;
12372 value = 0;
12373 }
12374
12375 md_number_to_chars (p, value, fixP->fx_size);
12376 }
12377 \f
12378 const char *
12379 md_atof (int type, char *litP, int *sizeP)
12380 {
12381 /* This outputs the LITTLENUMs in REVERSE order;
12382 in accord with the bigendian 386. */
12383 return ieee_md_atof (type, litP, sizeP, FALSE);
12384 }
12385 \f
12386 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
12387
12388 static char *
12389 output_invalid (int c)
12390 {
12391 if (ISPRINT (c))
12392 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
12393 "'%c'", c);
12394 else
12395 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
12396 "(0x%x)", (unsigned char) c);
12397 return output_invalid_buf;
12398 }
12399
12400 /* Verify that @r can be used in the current context. */
12401
12402 static bfd_boolean check_register (const reg_entry *r)
12403 {
12404 if (allow_pseudo_reg)
12405 return TRUE;
12406
12407 if (operand_type_all_zero (&r->reg_type))
12408 return FALSE;
12409
12410 if ((r->reg_type.bitfield.dword
12411 || (r->reg_type.bitfield.class == SReg && r->reg_num > 3)
12412 || r->reg_type.bitfield.class == RegCR
12413 || r->reg_type.bitfield.class == RegDR)
12414 && !cpu_arch_flags.bitfield.cpui386)
12415 return FALSE;
12416
12417 if (r->reg_type.bitfield.class == RegTR
12418 && (flag_code == CODE_64BIT
12419 || !cpu_arch_flags.bitfield.cpui386
12420 || cpu_arch_isa_flags.bitfield.cpui586
12421 || cpu_arch_isa_flags.bitfield.cpui686))
12422 return FALSE;
12423
12424 if (r->reg_type.bitfield.class == RegMMX && !cpu_arch_flags.bitfield.cpummx)
12425 return FALSE;
12426
12427 if (!cpu_arch_flags.bitfield.cpuavx512f)
12428 {
12429 if (r->reg_type.bitfield.zmmword
12430 || r->reg_type.bitfield.class == RegMask)
12431 return FALSE;
12432
12433 if (!cpu_arch_flags.bitfield.cpuavx)
12434 {
12435 if (r->reg_type.bitfield.ymmword)
12436 return FALSE;
12437
12438 if (!cpu_arch_flags.bitfield.cpusse && r->reg_type.bitfield.xmmword)
12439 return FALSE;
12440 }
12441 }
12442
12443 if (r->reg_type.bitfield.class == RegBND && !cpu_arch_flags.bitfield.cpumpx)
12444 return FALSE;
12445
12446 /* Don't allow fake index register unless allow_index_reg isn't 0. */
12447 if (!allow_index_reg && r->reg_num == RegIZ)
12448 return FALSE;
12449
12450 /* Upper 16 vector registers are only available with VREX in 64bit
12451 mode, and require EVEX encoding. */
12452 if (r->reg_flags & RegVRex)
12453 {
12454 if (!cpu_arch_flags.bitfield.cpuavx512f
12455 || flag_code != CODE_64BIT)
12456 return FALSE;
12457
12458 if (i.vec_encoding == vex_encoding_default)
12459 i.vec_encoding = vex_encoding_evex;
12460 else if (i.vec_encoding != vex_encoding_evex)
12461 i.vec_encoding = vex_encoding_error;
12462 }
12463
12464 if (((r->reg_flags & (RegRex64 | RegRex)) || r->reg_type.bitfield.qword)
12465 && (!cpu_arch_flags.bitfield.cpulm || r->reg_type.bitfield.class != RegCR)
12466 && flag_code != CODE_64BIT)
12467 return FALSE;
12468
12469 if (r->reg_type.bitfield.class == SReg && r->reg_num == RegFlat
12470 && !intel_syntax)
12471 return FALSE;
12472
12473 return TRUE;
12474 }
12475
12476 /* REG_STRING starts *before* REGISTER_PREFIX. */
12477
12478 static const reg_entry *
12479 parse_real_register (char *reg_string, char **end_op)
12480 {
12481 char *s = reg_string;
12482 char *p;
12483 char reg_name_given[MAX_REG_NAME_SIZE + 1];
12484 const reg_entry *r;
12485
12486 /* Skip possible REGISTER_PREFIX and possible whitespace. */
12487 if (*s == REGISTER_PREFIX)
12488 ++s;
12489
12490 if (is_space_char (*s))
12491 ++s;
12492
12493 p = reg_name_given;
12494 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
12495 {
12496 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
12497 return (const reg_entry *) NULL;
12498 s++;
12499 }
12500
12501 /* For naked regs, make sure that we are not dealing with an identifier.
12502 This prevents confusing an identifier like `eax_var' with register
12503 `eax'. */
12504 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
12505 return (const reg_entry *) NULL;
12506
12507 *end_op = s;
12508
12509 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
12510
12511 /* Handle floating point regs, allowing spaces in the (i) part. */
12512 if (r == i386_regtab /* %st is first entry of table */)
12513 {
12514 if (!cpu_arch_flags.bitfield.cpu8087
12515 && !cpu_arch_flags.bitfield.cpu287
12516 && !cpu_arch_flags.bitfield.cpu387
12517 && !allow_pseudo_reg)
12518 return (const reg_entry *) NULL;
12519
12520 if (is_space_char (*s))
12521 ++s;
12522 if (*s == '(')
12523 {
12524 ++s;
12525 if (is_space_char (*s))
12526 ++s;
12527 if (*s >= '0' && *s <= '7')
12528 {
12529 int fpr = *s - '0';
12530 ++s;
12531 if (is_space_char (*s))
12532 ++s;
12533 if (*s == ')')
12534 {
12535 *end_op = s + 1;
12536 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
12537 know (r);
12538 return r + fpr;
12539 }
12540 }
12541 /* We have "%st(" then garbage. */
12542 return (const reg_entry *) NULL;
12543 }
12544 }
12545
12546 return r && check_register (r) ? r : NULL;
12547 }
12548
12549 /* REG_STRING starts *before* REGISTER_PREFIX. */
12550
12551 static const reg_entry *
12552 parse_register (char *reg_string, char **end_op)
12553 {
12554 const reg_entry *r;
12555
12556 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
12557 r = parse_real_register (reg_string, end_op);
12558 else
12559 r = NULL;
12560 if (!r)
12561 {
12562 char *save = input_line_pointer;
12563 char c;
12564 symbolS *symbolP;
12565
12566 input_line_pointer = reg_string;
12567 c = get_symbol_name (&reg_string);
12568 symbolP = symbol_find (reg_string);
12569 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
12570 {
12571 const expressionS *e = symbol_get_value_expression (symbolP);
12572
12573 know (e->X_op == O_register);
12574 know (e->X_add_number >= 0
12575 && (valueT) e->X_add_number < i386_regtab_size);
12576 r = i386_regtab + e->X_add_number;
12577 if (!check_register (r))
12578 {
12579 as_bad (_("register '%s%s' cannot be used here"),
12580 register_prefix, r->reg_name);
12581 r = &bad_reg;
12582 }
12583 *end_op = input_line_pointer;
12584 }
12585 *input_line_pointer = c;
12586 input_line_pointer = save;
12587 }
12588 return r;
12589 }
12590
12591 int
12592 i386_parse_name (char *name, expressionS *e, char *nextcharP)
12593 {
12594 const reg_entry *r;
12595 char *end = input_line_pointer;
12596
12597 *end = *nextcharP;
12598 r = parse_register (name, &input_line_pointer);
12599 if (r && end <= input_line_pointer)
12600 {
12601 *nextcharP = *input_line_pointer;
12602 *input_line_pointer = 0;
12603 if (r != &bad_reg)
12604 {
12605 e->X_op = O_register;
12606 e->X_add_number = r - i386_regtab;
12607 }
12608 else
12609 e->X_op = O_illegal;
12610 return 1;
12611 }
12612 input_line_pointer = end;
12613 *end = 0;
12614 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
12615 }
12616
12617 void
12618 md_operand (expressionS *e)
12619 {
12620 char *end;
12621 const reg_entry *r;
12622
12623 switch (*input_line_pointer)
12624 {
12625 case REGISTER_PREFIX:
12626 r = parse_real_register (input_line_pointer, &end);
12627 if (r)
12628 {
12629 e->X_op = O_register;
12630 e->X_add_number = r - i386_regtab;
12631 input_line_pointer = end;
12632 }
12633 break;
12634
12635 case '[':
12636 gas_assert (intel_syntax);
12637 end = input_line_pointer++;
12638 expression (e);
12639 if (*input_line_pointer == ']')
12640 {
12641 ++input_line_pointer;
12642 e->X_op_symbol = make_expr_symbol (e);
12643 e->X_add_symbol = NULL;
12644 e->X_add_number = 0;
12645 e->X_op = O_index;
12646 }
12647 else
12648 {
12649 e->X_op = O_absent;
12650 input_line_pointer = end;
12651 }
12652 break;
12653 }
12654 }
12655
12656 \f
12657 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12658 const char *md_shortopts = "kVQ:sqnO::";
12659 #else
12660 const char *md_shortopts = "qnO::";
12661 #endif
12662
12663 #define OPTION_32 (OPTION_MD_BASE + 0)
12664 #define OPTION_64 (OPTION_MD_BASE + 1)
12665 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
12666 #define OPTION_MARCH (OPTION_MD_BASE + 3)
12667 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
12668 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
12669 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
12670 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
12671 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
12672 #define OPTION_MRELAX_RELOCATIONS (OPTION_MD_BASE + 9)
12673 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
12674 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
12675 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
12676 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
12677 #define OPTION_X32 (OPTION_MD_BASE + 14)
12678 #define OPTION_MADD_BND_PREFIX (OPTION_MD_BASE + 15)
12679 #define OPTION_MEVEXLIG (OPTION_MD_BASE + 16)
12680 #define OPTION_MEVEXWIG (OPTION_MD_BASE + 17)
12681 #define OPTION_MBIG_OBJ (OPTION_MD_BASE + 18)
12682 #define OPTION_MOMIT_LOCK_PREFIX (OPTION_MD_BASE + 19)
12683 #define OPTION_MEVEXRCIG (OPTION_MD_BASE + 20)
12684 #define OPTION_MSHARED (OPTION_MD_BASE + 21)
12685 #define OPTION_MAMD64 (OPTION_MD_BASE + 22)
12686 #define OPTION_MINTEL64 (OPTION_MD_BASE + 23)
12687 #define OPTION_MFENCE_AS_LOCK_ADD (OPTION_MD_BASE + 24)
12688 #define OPTION_X86_USED_NOTE (OPTION_MD_BASE + 25)
12689 #define OPTION_MVEXWIG (OPTION_MD_BASE + 26)
12690 #define OPTION_MALIGN_BRANCH_BOUNDARY (OPTION_MD_BASE + 27)
12691 #define OPTION_MALIGN_BRANCH_PREFIX_SIZE (OPTION_MD_BASE + 28)
12692 #define OPTION_MALIGN_BRANCH (OPTION_MD_BASE + 29)
12693 #define OPTION_MBRANCHES_WITH_32B_BOUNDARIES (OPTION_MD_BASE + 30)
12694 #define OPTION_MLFENCE_AFTER_LOAD (OPTION_MD_BASE + 31)
12695 #define OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH (OPTION_MD_BASE + 32)
12696 #define OPTION_MLFENCE_BEFORE_RET (OPTION_MD_BASE + 33)
12697
12698 struct option md_longopts[] =
12699 {
12700 {"32", no_argument, NULL, OPTION_32},
12701 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
12702 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
12703 {"64", no_argument, NULL, OPTION_64},
12704 #endif
12705 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12706 {"x32", no_argument, NULL, OPTION_X32},
12707 {"mshared", no_argument, NULL, OPTION_MSHARED},
12708 {"mx86-used-note", required_argument, NULL, OPTION_X86_USED_NOTE},
12709 #endif
12710 {"divide", no_argument, NULL, OPTION_DIVIDE},
12711 {"march", required_argument, NULL, OPTION_MARCH},
12712 {"mtune", required_argument, NULL, OPTION_MTUNE},
12713 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
12714 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
12715 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
12716 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
12717 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
12718 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
12719 {"moperand-check", required_argument, NULL, OPTION_MOPERAND_CHECK},
12720 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
12721 {"mvexwig", required_argument, NULL, OPTION_MVEXWIG},
12722 {"madd-bnd-prefix", no_argument, NULL, OPTION_MADD_BND_PREFIX},
12723 {"mevexlig", required_argument, NULL, OPTION_MEVEXLIG},
12724 {"mevexwig", required_argument, NULL, OPTION_MEVEXWIG},
12725 # if defined (TE_PE) || defined (TE_PEP)
12726 {"mbig-obj", no_argument, NULL, OPTION_MBIG_OBJ},
12727 #endif
12728 {"momit-lock-prefix", required_argument, NULL, OPTION_MOMIT_LOCK_PREFIX},
12729 {"mfence-as-lock-add", required_argument, NULL, OPTION_MFENCE_AS_LOCK_ADD},
12730 {"mrelax-relocations", required_argument, NULL, OPTION_MRELAX_RELOCATIONS},
12731 {"mevexrcig", required_argument, NULL, OPTION_MEVEXRCIG},
12732 {"malign-branch-boundary", required_argument, NULL, OPTION_MALIGN_BRANCH_BOUNDARY},
12733 {"malign-branch-prefix-size", required_argument, NULL, OPTION_MALIGN_BRANCH_PREFIX_SIZE},
12734 {"malign-branch", required_argument, NULL, OPTION_MALIGN_BRANCH},
12735 {"mbranches-within-32B-boundaries", no_argument, NULL, OPTION_MBRANCHES_WITH_32B_BOUNDARIES},
12736 {"mlfence-after-load", required_argument, NULL, OPTION_MLFENCE_AFTER_LOAD},
12737 {"mlfence-before-indirect-branch", required_argument, NULL,
12738 OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH},
12739 {"mlfence-before-ret", required_argument, NULL, OPTION_MLFENCE_BEFORE_RET},
12740 {"mamd64", no_argument, NULL, OPTION_MAMD64},
12741 {"mintel64", no_argument, NULL, OPTION_MINTEL64},
12742 {NULL, no_argument, NULL, 0}
12743 };
12744 size_t md_longopts_size = sizeof (md_longopts);
12745
12746 int
12747 md_parse_option (int c, const char *arg)
12748 {
12749 unsigned int j;
12750 char *arch, *next, *saved, *type;
12751
12752 switch (c)
12753 {
12754 case 'n':
12755 optimize_align_code = 0;
12756 break;
12757
12758 case 'q':
12759 quiet_warnings = 1;
12760 break;
12761
12762 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12763 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
12764 should be emitted or not. FIXME: Not implemented. */
12765 case 'Q':
12766 if ((arg[0] != 'y' && arg[0] != 'n') || arg[1])
12767 return 0;
12768 break;
12769
12770 /* -V: SVR4 argument to print version ID. */
12771 case 'V':
12772 print_version_id ();
12773 break;
12774
12775 /* -k: Ignore for FreeBSD compatibility. */
12776 case 'k':
12777 break;
12778
12779 case 's':
12780 /* -s: On i386 Solaris, this tells the native assembler to use
12781 .stab instead of .stab.excl. We always use .stab anyhow. */
12782 break;
12783
12784 case OPTION_MSHARED:
12785 shared = 1;
12786 break;
12787
12788 case OPTION_X86_USED_NOTE:
12789 if (strcasecmp (arg, "yes") == 0)
12790 x86_used_note = 1;
12791 else if (strcasecmp (arg, "no") == 0)
12792 x86_used_note = 0;
12793 else
12794 as_fatal (_("invalid -mx86-used-note= option: `%s'"), arg);
12795 break;
12796
12797
12798 #endif
12799 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
12800 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
12801 case OPTION_64:
12802 {
12803 const char **list, **l;
12804
12805 list = bfd_target_list ();
12806 for (l = list; *l != NULL; l++)
12807 if (CONST_STRNEQ (*l, "elf64-x86-64")
12808 || strcmp (*l, "coff-x86-64") == 0
12809 || strcmp (*l, "pe-x86-64") == 0
12810 || strcmp (*l, "pei-x86-64") == 0
12811 || strcmp (*l, "mach-o-x86-64") == 0)
12812 {
12813 default_arch = "x86_64";
12814 break;
12815 }
12816 if (*l == NULL)
12817 as_fatal (_("no compiled in support for x86_64"));
12818 free (list);
12819 }
12820 break;
12821 #endif
12822
12823 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12824 case OPTION_X32:
12825 if (IS_ELF)
12826 {
12827 const char **list, **l;
12828
12829 list = bfd_target_list ();
12830 for (l = list; *l != NULL; l++)
12831 if (CONST_STRNEQ (*l, "elf32-x86-64"))
12832 {
12833 default_arch = "x86_64:32";
12834 break;
12835 }
12836 if (*l == NULL)
12837 as_fatal (_("no compiled in support for 32bit x86_64"));
12838 free (list);
12839 }
12840 else
12841 as_fatal (_("32bit x86_64 is only supported for ELF"));
12842 break;
12843 #endif
12844
12845 case OPTION_32:
12846 default_arch = "i386";
12847 break;
12848
12849 case OPTION_DIVIDE:
12850 #ifdef SVR4_COMMENT_CHARS
12851 {
12852 char *n, *t;
12853 const char *s;
12854
12855 n = XNEWVEC (char, strlen (i386_comment_chars) + 1);
12856 t = n;
12857 for (s = i386_comment_chars; *s != '\0'; s++)
12858 if (*s != '/')
12859 *t++ = *s;
12860 *t = '\0';
12861 i386_comment_chars = n;
12862 }
12863 #endif
12864 break;
12865
12866 case OPTION_MARCH:
12867 saved = xstrdup (arg);
12868 arch = saved;
12869 /* Allow -march=+nosse. */
12870 if (*arch == '+')
12871 arch++;
12872 do
12873 {
12874 if (*arch == '.')
12875 as_fatal (_("invalid -march= option: `%s'"), arg);
12876 next = strchr (arch, '+');
12877 if (next)
12878 *next++ = '\0';
12879 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
12880 {
12881 if (strcmp (arch, cpu_arch [j].name) == 0)
12882 {
12883 /* Processor. */
12884 if (! cpu_arch[j].flags.bitfield.cpui386)
12885 continue;
12886
12887 cpu_arch_name = cpu_arch[j].name;
12888 cpu_sub_arch_name = NULL;
12889 cpu_arch_flags = cpu_arch[j].flags;
12890 cpu_arch_isa = cpu_arch[j].type;
12891 cpu_arch_isa_flags = cpu_arch[j].flags;
12892 if (!cpu_arch_tune_set)
12893 {
12894 cpu_arch_tune = cpu_arch_isa;
12895 cpu_arch_tune_flags = cpu_arch_isa_flags;
12896 }
12897 break;
12898 }
12899 else if (*cpu_arch [j].name == '.'
12900 && strcmp (arch, cpu_arch [j].name + 1) == 0)
12901 {
12902 /* ISA extension. */
12903 i386_cpu_flags flags;
12904
12905 flags = cpu_flags_or (cpu_arch_flags,
12906 cpu_arch[j].flags);
12907
12908 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
12909 {
12910 if (cpu_sub_arch_name)
12911 {
12912 char *name = cpu_sub_arch_name;
12913 cpu_sub_arch_name = concat (name,
12914 cpu_arch[j].name,
12915 (const char *) NULL);
12916 free (name);
12917 }
12918 else
12919 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
12920 cpu_arch_flags = flags;
12921 cpu_arch_isa_flags = flags;
12922 }
12923 else
12924 cpu_arch_isa_flags
12925 = cpu_flags_or (cpu_arch_isa_flags,
12926 cpu_arch[j].flags);
12927 break;
12928 }
12929 }
12930
12931 if (j >= ARRAY_SIZE (cpu_arch))
12932 {
12933 /* Disable an ISA extension. */
12934 for (j = 0; j < ARRAY_SIZE (cpu_noarch); j++)
12935 if (strcmp (arch, cpu_noarch [j].name) == 0)
12936 {
12937 i386_cpu_flags flags;
12938
12939 flags = cpu_flags_and_not (cpu_arch_flags,
12940 cpu_noarch[j].flags);
12941 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
12942 {
12943 if (cpu_sub_arch_name)
12944 {
12945 char *name = cpu_sub_arch_name;
12946 cpu_sub_arch_name = concat (arch,
12947 (const char *) NULL);
12948 free (name);
12949 }
12950 else
12951 cpu_sub_arch_name = xstrdup (arch);
12952 cpu_arch_flags = flags;
12953 cpu_arch_isa_flags = flags;
12954 }
12955 break;
12956 }
12957
12958 if (j >= ARRAY_SIZE (cpu_noarch))
12959 j = ARRAY_SIZE (cpu_arch);
12960 }
12961
12962 if (j >= ARRAY_SIZE (cpu_arch))
12963 as_fatal (_("invalid -march= option: `%s'"), arg);
12964
12965 arch = next;
12966 }
12967 while (next != NULL);
12968 free (saved);
12969 break;
12970
12971 case OPTION_MTUNE:
12972 if (*arg == '.')
12973 as_fatal (_("invalid -mtune= option: `%s'"), arg);
12974 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
12975 {
12976 if (strcmp (arg, cpu_arch [j].name) == 0)
12977 {
12978 cpu_arch_tune_set = 1;
12979 cpu_arch_tune = cpu_arch [j].type;
12980 cpu_arch_tune_flags = cpu_arch[j].flags;
12981 break;
12982 }
12983 }
12984 if (j >= ARRAY_SIZE (cpu_arch))
12985 as_fatal (_("invalid -mtune= option: `%s'"), arg);
12986 break;
12987
12988 case OPTION_MMNEMONIC:
12989 if (strcasecmp (arg, "att") == 0)
12990 intel_mnemonic = 0;
12991 else if (strcasecmp (arg, "intel") == 0)
12992 intel_mnemonic = 1;
12993 else
12994 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg);
12995 break;
12996
12997 case OPTION_MSYNTAX:
12998 if (strcasecmp (arg, "att") == 0)
12999 intel_syntax = 0;
13000 else if (strcasecmp (arg, "intel") == 0)
13001 intel_syntax = 1;
13002 else
13003 as_fatal (_("invalid -msyntax= option: `%s'"), arg);
13004 break;
13005
13006 case OPTION_MINDEX_REG:
13007 allow_index_reg = 1;
13008 break;
13009
13010 case OPTION_MNAKED_REG:
13011 allow_naked_reg = 1;
13012 break;
13013
13014 case OPTION_MSSE2AVX:
13015 sse2avx = 1;
13016 break;
13017
13018 case OPTION_MSSE_CHECK:
13019 if (strcasecmp (arg, "error") == 0)
13020 sse_check = check_error;
13021 else if (strcasecmp (arg, "warning") == 0)
13022 sse_check = check_warning;
13023 else if (strcasecmp (arg, "none") == 0)
13024 sse_check = check_none;
13025 else
13026 as_fatal (_("invalid -msse-check= option: `%s'"), arg);
13027 break;
13028
13029 case OPTION_MOPERAND_CHECK:
13030 if (strcasecmp (arg, "error") == 0)
13031 operand_check = check_error;
13032 else if (strcasecmp (arg, "warning") == 0)
13033 operand_check = check_warning;
13034 else if (strcasecmp (arg, "none") == 0)
13035 operand_check = check_none;
13036 else
13037 as_fatal (_("invalid -moperand-check= option: `%s'"), arg);
13038 break;
13039
13040 case OPTION_MAVXSCALAR:
13041 if (strcasecmp (arg, "128") == 0)
13042 avxscalar = vex128;
13043 else if (strcasecmp (arg, "256") == 0)
13044 avxscalar = vex256;
13045 else
13046 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg);
13047 break;
13048
13049 case OPTION_MVEXWIG:
13050 if (strcmp (arg, "0") == 0)
13051 vexwig = vexw0;
13052 else if (strcmp (arg, "1") == 0)
13053 vexwig = vexw1;
13054 else
13055 as_fatal (_("invalid -mvexwig= option: `%s'"), arg);
13056 break;
13057
13058 case OPTION_MADD_BND_PREFIX:
13059 add_bnd_prefix = 1;
13060 break;
13061
13062 case OPTION_MEVEXLIG:
13063 if (strcmp (arg, "128") == 0)
13064 evexlig = evexl128;
13065 else if (strcmp (arg, "256") == 0)
13066 evexlig = evexl256;
13067 else if (strcmp (arg, "512") == 0)
13068 evexlig = evexl512;
13069 else
13070 as_fatal (_("invalid -mevexlig= option: `%s'"), arg);
13071 break;
13072
13073 case OPTION_MEVEXRCIG:
13074 if (strcmp (arg, "rne") == 0)
13075 evexrcig = rne;
13076 else if (strcmp (arg, "rd") == 0)
13077 evexrcig = rd;
13078 else if (strcmp (arg, "ru") == 0)
13079 evexrcig = ru;
13080 else if (strcmp (arg, "rz") == 0)
13081 evexrcig = rz;
13082 else
13083 as_fatal (_("invalid -mevexrcig= option: `%s'"), arg);
13084 break;
13085
13086 case OPTION_MEVEXWIG:
13087 if (strcmp (arg, "0") == 0)
13088 evexwig = evexw0;
13089 else if (strcmp (arg, "1") == 0)
13090 evexwig = evexw1;
13091 else
13092 as_fatal (_("invalid -mevexwig= option: `%s'"), arg);
13093 break;
13094
13095 # if defined (TE_PE) || defined (TE_PEP)
13096 case OPTION_MBIG_OBJ:
13097 use_big_obj = 1;
13098 break;
13099 #endif
13100
13101 case OPTION_MOMIT_LOCK_PREFIX:
13102 if (strcasecmp (arg, "yes") == 0)
13103 omit_lock_prefix = 1;
13104 else if (strcasecmp (arg, "no") == 0)
13105 omit_lock_prefix = 0;
13106 else
13107 as_fatal (_("invalid -momit-lock-prefix= option: `%s'"), arg);
13108 break;
13109
13110 case OPTION_MFENCE_AS_LOCK_ADD:
13111 if (strcasecmp (arg, "yes") == 0)
13112 avoid_fence = 1;
13113 else if (strcasecmp (arg, "no") == 0)
13114 avoid_fence = 0;
13115 else
13116 as_fatal (_("invalid -mfence-as-lock-add= option: `%s'"), arg);
13117 break;
13118
13119 case OPTION_MLFENCE_AFTER_LOAD:
13120 if (strcasecmp (arg, "yes") == 0)
13121 lfence_after_load = 1;
13122 else if (strcasecmp (arg, "no") == 0)
13123 lfence_after_load = 0;
13124 else
13125 as_fatal (_("invalid -mlfence-after-load= option: `%s'"), arg);
13126 break;
13127
13128 case OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH:
13129 if (strcasecmp (arg, "all") == 0)
13130 {
13131 lfence_before_indirect_branch = lfence_branch_all;
13132 if (lfence_before_ret == lfence_before_ret_none)
13133 lfence_before_ret = lfence_before_ret_shl;
13134 }
13135 else if (strcasecmp (arg, "memory") == 0)
13136 lfence_before_indirect_branch = lfence_branch_memory;
13137 else if (strcasecmp (arg, "register") == 0)
13138 lfence_before_indirect_branch = lfence_branch_register;
13139 else if (strcasecmp (arg, "none") == 0)
13140 lfence_before_indirect_branch = lfence_branch_none;
13141 else
13142 as_fatal (_("invalid -mlfence-before-indirect-branch= option: `%s'"),
13143 arg);
13144 break;
13145
13146 case OPTION_MLFENCE_BEFORE_RET:
13147 if (strcasecmp (arg, "or") == 0)
13148 lfence_before_ret = lfence_before_ret_or;
13149 else if (strcasecmp (arg, "not") == 0)
13150 lfence_before_ret = lfence_before_ret_not;
13151 else if (strcasecmp (arg, "shl") == 0 || strcasecmp (arg, "yes") == 0)
13152 lfence_before_ret = lfence_before_ret_shl;
13153 else if (strcasecmp (arg, "none") == 0)
13154 lfence_before_ret = lfence_before_ret_none;
13155 else
13156 as_fatal (_("invalid -mlfence-before-ret= option: `%s'"),
13157 arg);
13158 break;
13159
13160 case OPTION_MRELAX_RELOCATIONS:
13161 if (strcasecmp (arg, "yes") == 0)
13162 generate_relax_relocations = 1;
13163 else if (strcasecmp (arg, "no") == 0)
13164 generate_relax_relocations = 0;
13165 else
13166 as_fatal (_("invalid -mrelax-relocations= option: `%s'"), arg);
13167 break;
13168
13169 case OPTION_MALIGN_BRANCH_BOUNDARY:
13170 {
13171 char *end;
13172 long int align = strtoul (arg, &end, 0);
13173 if (*end == '\0')
13174 {
13175 if (align == 0)
13176 {
13177 align_branch_power = 0;
13178 break;
13179 }
13180 else if (align >= 16)
13181 {
13182 int align_power;
13183 for (align_power = 0;
13184 (align & 1) == 0;
13185 align >>= 1, align_power++)
13186 continue;
13187 /* Limit alignment power to 31. */
13188 if (align == 1 && align_power < 32)
13189 {
13190 align_branch_power = align_power;
13191 break;
13192 }
13193 }
13194 }
13195 as_fatal (_("invalid -malign-branch-boundary= value: %s"), arg);
13196 }
13197 break;
13198
13199 case OPTION_MALIGN_BRANCH_PREFIX_SIZE:
13200 {
13201 char *end;
13202 int align = strtoul (arg, &end, 0);
13203 /* Some processors only support 5 prefixes. */
13204 if (*end == '\0' && align >= 0 && align < 6)
13205 {
13206 align_branch_prefix_size = align;
13207 break;
13208 }
13209 as_fatal (_("invalid -malign-branch-prefix-size= value: %s"),
13210 arg);
13211 }
13212 break;
13213
13214 case OPTION_MALIGN_BRANCH:
13215 align_branch = 0;
13216 saved = xstrdup (arg);
13217 type = saved;
13218 do
13219 {
13220 next = strchr (type, '+');
13221 if (next)
13222 *next++ = '\0';
13223 if (strcasecmp (type, "jcc") == 0)
13224 align_branch |= align_branch_jcc_bit;
13225 else if (strcasecmp (type, "fused") == 0)
13226 align_branch |= align_branch_fused_bit;
13227 else if (strcasecmp (type, "jmp") == 0)
13228 align_branch |= align_branch_jmp_bit;
13229 else if (strcasecmp (type, "call") == 0)
13230 align_branch |= align_branch_call_bit;
13231 else if (strcasecmp (type, "ret") == 0)
13232 align_branch |= align_branch_ret_bit;
13233 else if (strcasecmp (type, "indirect") == 0)
13234 align_branch |= align_branch_indirect_bit;
13235 else
13236 as_fatal (_("invalid -malign-branch= option: `%s'"), arg);
13237 type = next;
13238 }
13239 while (next != NULL);
13240 free (saved);
13241 break;
13242
13243 case OPTION_MBRANCHES_WITH_32B_BOUNDARIES:
13244 align_branch_power = 5;
13245 align_branch_prefix_size = 5;
13246 align_branch = (align_branch_jcc_bit
13247 | align_branch_fused_bit
13248 | align_branch_jmp_bit);
13249 break;
13250
13251 case OPTION_MAMD64:
13252 isa64 = amd64;
13253 break;
13254
13255 case OPTION_MINTEL64:
13256 isa64 = intel64;
13257 break;
13258
13259 case 'O':
13260 if (arg == NULL)
13261 {
13262 optimize = 1;
13263 /* Turn off -Os. */
13264 optimize_for_space = 0;
13265 }
13266 else if (*arg == 's')
13267 {
13268 optimize_for_space = 1;
13269 /* Turn on all encoding optimizations. */
13270 optimize = INT_MAX;
13271 }
13272 else
13273 {
13274 optimize = atoi (arg);
13275 /* Turn off -Os. */
13276 optimize_for_space = 0;
13277 }
13278 break;
13279
13280 default:
13281 return 0;
13282 }
13283 return 1;
13284 }
13285
13286 #define MESSAGE_TEMPLATE \
13287 " "
13288
13289 static char *
13290 output_message (FILE *stream, char *p, char *message, char *start,
13291 int *left_p, const char *name, int len)
13292 {
13293 int size = sizeof (MESSAGE_TEMPLATE);
13294 int left = *left_p;
13295
13296 /* Reserve 2 spaces for ", " or ",\0" */
13297 left -= len + 2;
13298
13299 /* Check if there is any room. */
13300 if (left >= 0)
13301 {
13302 if (p != start)
13303 {
13304 *p++ = ',';
13305 *p++ = ' ';
13306 }
13307 p = mempcpy (p, name, len);
13308 }
13309 else
13310 {
13311 /* Output the current message now and start a new one. */
13312 *p++ = ',';
13313 *p = '\0';
13314 fprintf (stream, "%s\n", message);
13315 p = start;
13316 left = size - (start - message) - len - 2;
13317
13318 gas_assert (left >= 0);
13319
13320 p = mempcpy (p, name, len);
13321 }
13322
13323 *left_p = left;
13324 return p;
13325 }
13326
13327 static void
13328 show_arch (FILE *stream, int ext, int check)
13329 {
13330 static char message[] = MESSAGE_TEMPLATE;
13331 char *start = message + 27;
13332 char *p;
13333 int size = sizeof (MESSAGE_TEMPLATE);
13334 int left;
13335 const char *name;
13336 int len;
13337 unsigned int j;
13338
13339 p = start;
13340 left = size - (start - message);
13341 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
13342 {
13343 /* Should it be skipped? */
13344 if (cpu_arch [j].skip)
13345 continue;
13346
13347 name = cpu_arch [j].name;
13348 len = cpu_arch [j].len;
13349 if (*name == '.')
13350 {
13351 /* It is an extension. Skip if we aren't asked to show it. */
13352 if (ext)
13353 {
13354 name++;
13355 len--;
13356 }
13357 else
13358 continue;
13359 }
13360 else if (ext)
13361 {
13362 /* It is an processor. Skip if we show only extension. */
13363 continue;
13364 }
13365 else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
13366 {
13367 /* It is an impossible processor - skip. */
13368 continue;
13369 }
13370
13371 p = output_message (stream, p, message, start, &left, name, len);
13372 }
13373
13374 /* Display disabled extensions. */
13375 if (ext)
13376 for (j = 0; j < ARRAY_SIZE (cpu_noarch); j++)
13377 {
13378 name = cpu_noarch [j].name;
13379 len = cpu_noarch [j].len;
13380 p = output_message (stream, p, message, start, &left, name,
13381 len);
13382 }
13383
13384 *p = '\0';
13385 fprintf (stream, "%s\n", message);
13386 }
13387
13388 void
13389 md_show_usage (FILE *stream)
13390 {
13391 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13392 fprintf (stream, _("\
13393 -Qy, -Qn ignored\n\
13394 -V print assembler version number\n\
13395 -k ignored\n"));
13396 #endif
13397 fprintf (stream, _("\
13398 -n Do not optimize code alignment\n\
13399 -q quieten some warnings\n"));
13400 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13401 fprintf (stream, _("\
13402 -s ignored\n"));
13403 #endif
13404 #if defined BFD64 && (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
13405 || defined (TE_PE) || defined (TE_PEP))
13406 fprintf (stream, _("\
13407 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
13408 #endif
13409 #ifdef SVR4_COMMENT_CHARS
13410 fprintf (stream, _("\
13411 --divide do not treat `/' as a comment character\n"));
13412 #else
13413 fprintf (stream, _("\
13414 --divide ignored\n"));
13415 #endif
13416 fprintf (stream, _("\
13417 -march=CPU[,+EXTENSION...]\n\
13418 generate code for CPU and EXTENSION, CPU is one of:\n"));
13419 show_arch (stream, 0, 1);
13420 fprintf (stream, _("\
13421 EXTENSION is combination of:\n"));
13422 show_arch (stream, 1, 0);
13423 fprintf (stream, _("\
13424 -mtune=CPU optimize for CPU, CPU is one of:\n"));
13425 show_arch (stream, 0, 0);
13426 fprintf (stream, _("\
13427 -msse2avx encode SSE instructions with VEX prefix\n"));
13428 fprintf (stream, _("\
13429 -msse-check=[none|error|warning] (default: warning)\n\
13430 check SSE instructions\n"));
13431 fprintf (stream, _("\
13432 -moperand-check=[none|error|warning] (default: warning)\n\
13433 check operand combinations for validity\n"));
13434 fprintf (stream, _("\
13435 -mavxscalar=[128|256] (default: 128)\n\
13436 encode scalar AVX instructions with specific vector\n\
13437 length\n"));
13438 fprintf (stream, _("\
13439 -mvexwig=[0|1] (default: 0)\n\
13440 encode VEX instructions with specific VEX.W value\n\
13441 for VEX.W bit ignored instructions\n"));
13442 fprintf (stream, _("\
13443 -mevexlig=[128|256|512] (default: 128)\n\
13444 encode scalar EVEX instructions with specific vector\n\
13445 length\n"));
13446 fprintf (stream, _("\
13447 -mevexwig=[0|1] (default: 0)\n\
13448 encode EVEX instructions with specific EVEX.W value\n\
13449 for EVEX.W bit ignored instructions\n"));
13450 fprintf (stream, _("\
13451 -mevexrcig=[rne|rd|ru|rz] (default: rne)\n\
13452 encode EVEX instructions with specific EVEX.RC value\n\
13453 for SAE-only ignored instructions\n"));
13454 fprintf (stream, _("\
13455 -mmnemonic=[att|intel] "));
13456 if (SYSV386_COMPAT)
13457 fprintf (stream, _("(default: att)\n"));
13458 else
13459 fprintf (stream, _("(default: intel)\n"));
13460 fprintf (stream, _("\
13461 use AT&T/Intel mnemonic\n"));
13462 fprintf (stream, _("\
13463 -msyntax=[att|intel] (default: att)\n\
13464 use AT&T/Intel syntax\n"));
13465 fprintf (stream, _("\
13466 -mindex-reg support pseudo index registers\n"));
13467 fprintf (stream, _("\
13468 -mnaked-reg don't require `%%' prefix for registers\n"));
13469 fprintf (stream, _("\
13470 -madd-bnd-prefix add BND prefix for all valid branches\n"));
13471 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13472 fprintf (stream, _("\
13473 -mshared disable branch optimization for shared code\n"));
13474 fprintf (stream, _("\
13475 -mx86-used-note=[no|yes] "));
13476 if (DEFAULT_X86_USED_NOTE)
13477 fprintf (stream, _("(default: yes)\n"));
13478 else
13479 fprintf (stream, _("(default: no)\n"));
13480 fprintf (stream, _("\
13481 generate x86 used ISA and feature properties\n"));
13482 #endif
13483 #if defined (TE_PE) || defined (TE_PEP)
13484 fprintf (stream, _("\
13485 -mbig-obj generate big object files\n"));
13486 #endif
13487 fprintf (stream, _("\
13488 -momit-lock-prefix=[no|yes] (default: no)\n\
13489 strip all lock prefixes\n"));
13490 fprintf (stream, _("\
13491 -mfence-as-lock-add=[no|yes] (default: no)\n\
13492 encode lfence, mfence and sfence as\n\
13493 lock addl $0x0, (%%{re}sp)\n"));
13494 fprintf (stream, _("\
13495 -mrelax-relocations=[no|yes] "));
13496 if (DEFAULT_GENERATE_X86_RELAX_RELOCATIONS)
13497 fprintf (stream, _("(default: yes)\n"));
13498 else
13499 fprintf (stream, _("(default: no)\n"));
13500 fprintf (stream, _("\
13501 generate relax relocations\n"));
13502 fprintf (stream, _("\
13503 -malign-branch-boundary=NUM (default: 0)\n\
13504 align branches within NUM byte boundary\n"));
13505 fprintf (stream, _("\
13506 -malign-branch=TYPE[+TYPE...] (default: jcc+fused+jmp)\n\
13507 TYPE is combination of jcc, fused, jmp, call, ret,\n\
13508 indirect\n\
13509 specify types of branches to align\n"));
13510 fprintf (stream, _("\
13511 -malign-branch-prefix-size=NUM (default: 5)\n\
13512 align branches with NUM prefixes per instruction\n"));
13513 fprintf (stream, _("\
13514 -mbranches-within-32B-boundaries\n\
13515 align branches within 32 byte boundary\n"));
13516 fprintf (stream, _("\
13517 -mlfence-after-load=[no|yes] (default: no)\n\
13518 generate lfence after load\n"));
13519 fprintf (stream, _("\
13520 -mlfence-before-indirect-branch=[none|all|register|memory] (default: none)\n\
13521 generate lfence before indirect near branch\n"));
13522 fprintf (stream, _("\
13523 -mlfence-before-ret=[none|or|not|shl|yes] (default: none)\n\
13524 generate lfence before ret\n"));
13525 fprintf (stream, _("\
13526 -mamd64 accept only AMD64 ISA [default]\n"));
13527 fprintf (stream, _("\
13528 -mintel64 accept only Intel64 ISA\n"));
13529 }
13530
13531 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
13532 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
13533 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
13534
13535 /* Pick the target format to use. */
13536
13537 const char *
13538 i386_target_format (void)
13539 {
13540 if (!strncmp (default_arch, "x86_64", 6))
13541 {
13542 update_code_flag (CODE_64BIT, 1);
13543 if (default_arch[6] == '\0')
13544 x86_elf_abi = X86_64_ABI;
13545 else
13546 x86_elf_abi = X86_64_X32_ABI;
13547 }
13548 else if (!strcmp (default_arch, "i386"))
13549 update_code_flag (CODE_32BIT, 1);
13550 else if (!strcmp (default_arch, "iamcu"))
13551 {
13552 update_code_flag (CODE_32BIT, 1);
13553 if (cpu_arch_isa == PROCESSOR_UNKNOWN)
13554 {
13555 static const i386_cpu_flags iamcu_flags = CPU_IAMCU_FLAGS;
13556 cpu_arch_name = "iamcu";
13557 cpu_sub_arch_name = NULL;
13558 cpu_arch_flags = iamcu_flags;
13559 cpu_arch_isa = PROCESSOR_IAMCU;
13560 cpu_arch_isa_flags = iamcu_flags;
13561 if (!cpu_arch_tune_set)
13562 {
13563 cpu_arch_tune = cpu_arch_isa;
13564 cpu_arch_tune_flags = cpu_arch_isa_flags;
13565 }
13566 }
13567 else if (cpu_arch_isa != PROCESSOR_IAMCU)
13568 as_fatal (_("Intel MCU doesn't support `%s' architecture"),
13569 cpu_arch_name);
13570 }
13571 else
13572 as_fatal (_("unknown architecture"));
13573
13574 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
13575 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
13576 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
13577 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
13578
13579 switch (OUTPUT_FLAVOR)
13580 {
13581 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
13582 case bfd_target_aout_flavour:
13583 return AOUT_TARGET_FORMAT;
13584 #endif
13585 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
13586 # if defined (TE_PE) || defined (TE_PEP)
13587 case bfd_target_coff_flavour:
13588 if (flag_code == CODE_64BIT)
13589 return use_big_obj ? "pe-bigobj-x86-64" : "pe-x86-64";
13590 else
13591 return use_big_obj ? "pe-bigobj-i386" : "pe-i386";
13592 # elif defined (TE_GO32)
13593 case bfd_target_coff_flavour:
13594 return "coff-go32";
13595 # else
13596 case bfd_target_coff_flavour:
13597 return "coff-i386";
13598 # endif
13599 #endif
13600 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
13601 case bfd_target_elf_flavour:
13602 {
13603 const char *format;
13604
13605 switch (x86_elf_abi)
13606 {
13607 default:
13608 format = ELF_TARGET_FORMAT;
13609 #ifndef TE_SOLARIS
13610 tls_get_addr = "___tls_get_addr";
13611 #endif
13612 break;
13613 case X86_64_ABI:
13614 use_rela_relocations = 1;
13615 object_64bit = 1;
13616 #ifndef TE_SOLARIS
13617 tls_get_addr = "__tls_get_addr";
13618 #endif
13619 format = ELF_TARGET_FORMAT64;
13620 break;
13621 case X86_64_X32_ABI:
13622 use_rela_relocations = 1;
13623 object_64bit = 1;
13624 #ifndef TE_SOLARIS
13625 tls_get_addr = "__tls_get_addr";
13626 #endif
13627 disallow_64bit_reloc = 1;
13628 format = ELF_TARGET_FORMAT32;
13629 break;
13630 }
13631 if (cpu_arch_isa == PROCESSOR_L1OM)
13632 {
13633 if (x86_elf_abi != X86_64_ABI)
13634 as_fatal (_("Intel L1OM is 64bit only"));
13635 return ELF_TARGET_L1OM_FORMAT;
13636 }
13637 else if (cpu_arch_isa == PROCESSOR_K1OM)
13638 {
13639 if (x86_elf_abi != X86_64_ABI)
13640 as_fatal (_("Intel K1OM is 64bit only"));
13641 return ELF_TARGET_K1OM_FORMAT;
13642 }
13643 else if (cpu_arch_isa == PROCESSOR_IAMCU)
13644 {
13645 if (x86_elf_abi != I386_ABI)
13646 as_fatal (_("Intel MCU is 32bit only"));
13647 return ELF_TARGET_IAMCU_FORMAT;
13648 }
13649 else
13650 return format;
13651 }
13652 #endif
13653 #if defined (OBJ_MACH_O)
13654 case bfd_target_mach_o_flavour:
13655 if (flag_code == CODE_64BIT)
13656 {
13657 use_rela_relocations = 1;
13658 object_64bit = 1;
13659 return "mach-o-x86-64";
13660 }
13661 else
13662 return "mach-o-i386";
13663 #endif
13664 default:
13665 abort ();
13666 return NULL;
13667 }
13668 }
13669
13670 #endif /* OBJ_MAYBE_ more than one */
13671 \f
13672 symbolS *
13673 md_undefined_symbol (char *name)
13674 {
13675 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
13676 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
13677 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
13678 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
13679 {
13680 if (!GOT_symbol)
13681 {
13682 if (symbol_find (name))
13683 as_bad (_("GOT already in symbol table"));
13684 GOT_symbol = symbol_new (name, undefined_section,
13685 (valueT) 0, &zero_address_frag);
13686 };
13687 return GOT_symbol;
13688 }
13689 return 0;
13690 }
13691
13692 /* Round up a section size to the appropriate boundary. */
13693
13694 valueT
13695 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
13696 {
13697 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
13698 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
13699 {
13700 /* For a.out, force the section size to be aligned. If we don't do
13701 this, BFD will align it for us, but it will not write out the
13702 final bytes of the section. This may be a bug in BFD, but it is
13703 easier to fix it here since that is how the other a.out targets
13704 work. */
13705 int align;
13706
13707 align = bfd_section_alignment (segment);
13708 size = ((size + (1 << align) - 1) & (-((valueT) 1 << align)));
13709 }
13710 #endif
13711
13712 return size;
13713 }
13714
13715 /* On the i386, PC-relative offsets are relative to the start of the
13716 next instruction. That is, the address of the offset, plus its
13717 size, since the offset is always the last part of the insn. */
13718
13719 long
13720 md_pcrel_from (fixS *fixP)
13721 {
13722 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
13723 }
13724
13725 #ifndef I386COFF
13726
13727 static void
13728 s_bss (int ignore ATTRIBUTE_UNUSED)
13729 {
13730 int temp;
13731
13732 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13733 if (IS_ELF)
13734 obj_elf_section_change_hook ();
13735 #endif
13736 temp = get_absolute_expression ();
13737 subseg_set (bss_section, (subsegT) temp);
13738 demand_empty_rest_of_line ();
13739 }
13740
13741 #endif
13742
13743 /* Remember constant directive. */
13744
13745 void
13746 i386_cons_align (int ignore ATTRIBUTE_UNUSED)
13747 {
13748 if (last_insn.kind != last_insn_directive
13749 && (bfd_section_flags (now_seg) & SEC_CODE))
13750 {
13751 last_insn.seg = now_seg;
13752 last_insn.kind = last_insn_directive;
13753 last_insn.name = "constant directive";
13754 last_insn.file = as_where (&last_insn.line);
13755 if (lfence_before_ret != lfence_before_ret_none)
13756 {
13757 if (lfence_before_indirect_branch != lfence_branch_none)
13758 as_warn (_("constant directive skips -mlfence-before-ret "
13759 "and -mlfence-before-indirect-branch"));
13760 else
13761 as_warn (_("constant directive skips -mlfence-before-ret"));
13762 }
13763 else if (lfence_before_indirect_branch != lfence_branch_none)
13764 as_warn (_("constant directive skips -mlfence-before-indirect-branch"));
13765 }
13766 }
13767
13768 void
13769 i386_validate_fix (fixS *fixp)
13770 {
13771 if (fixp->fx_subsy)
13772 {
13773 if (fixp->fx_subsy == GOT_symbol)
13774 {
13775 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
13776 {
13777 if (!object_64bit)
13778 abort ();
13779 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13780 if (fixp->fx_tcbit2)
13781 fixp->fx_r_type = (fixp->fx_tcbit
13782 ? BFD_RELOC_X86_64_REX_GOTPCRELX
13783 : BFD_RELOC_X86_64_GOTPCRELX);
13784 else
13785 #endif
13786 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
13787 }
13788 else
13789 {
13790 if (!object_64bit)
13791 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
13792 else
13793 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
13794 }
13795 fixp->fx_subsy = 0;
13796 }
13797 }
13798 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13799 else if (!object_64bit)
13800 {
13801 if (fixp->fx_r_type == BFD_RELOC_386_GOT32
13802 && fixp->fx_tcbit2)
13803 fixp->fx_r_type = BFD_RELOC_386_GOT32X;
13804 }
13805 #endif
13806 }
13807
13808 arelent *
13809 tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
13810 {
13811 arelent *rel;
13812 bfd_reloc_code_real_type code;
13813
13814 switch (fixp->fx_r_type)
13815 {
13816 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13817 case BFD_RELOC_SIZE32:
13818 case BFD_RELOC_SIZE64:
13819 if (S_IS_DEFINED (fixp->fx_addsy)
13820 && !S_IS_EXTERNAL (fixp->fx_addsy))
13821 {
13822 /* Resolve size relocation against local symbol to size of
13823 the symbol plus addend. */
13824 valueT value = S_GET_SIZE (fixp->fx_addsy) + fixp->fx_offset;
13825 if (fixp->fx_r_type == BFD_RELOC_SIZE32
13826 && !fits_in_unsigned_long (value))
13827 as_bad_where (fixp->fx_file, fixp->fx_line,
13828 _("symbol size computation overflow"));
13829 fixp->fx_addsy = NULL;
13830 fixp->fx_subsy = NULL;
13831 md_apply_fix (fixp, (valueT *) &value, NULL);
13832 return NULL;
13833 }
13834 #endif
13835 /* Fall through. */
13836
13837 case BFD_RELOC_X86_64_PLT32:
13838 case BFD_RELOC_X86_64_GOT32:
13839 case BFD_RELOC_X86_64_GOTPCREL:
13840 case BFD_RELOC_X86_64_GOTPCRELX:
13841 case BFD_RELOC_X86_64_REX_GOTPCRELX:
13842 case BFD_RELOC_386_PLT32:
13843 case BFD_RELOC_386_GOT32:
13844 case BFD_RELOC_386_GOT32X:
13845 case BFD_RELOC_386_GOTOFF:
13846 case BFD_RELOC_386_GOTPC:
13847 case BFD_RELOC_386_TLS_GD:
13848 case BFD_RELOC_386_TLS_LDM:
13849 case BFD_RELOC_386_TLS_LDO_32:
13850 case BFD_RELOC_386_TLS_IE_32:
13851 case BFD_RELOC_386_TLS_IE:
13852 case BFD_RELOC_386_TLS_GOTIE:
13853 case BFD_RELOC_386_TLS_LE_32:
13854 case BFD_RELOC_386_TLS_LE:
13855 case BFD_RELOC_386_TLS_GOTDESC:
13856 case BFD_RELOC_386_TLS_DESC_CALL:
13857 case BFD_RELOC_X86_64_TLSGD:
13858 case BFD_RELOC_X86_64_TLSLD:
13859 case BFD_RELOC_X86_64_DTPOFF32:
13860 case BFD_RELOC_X86_64_DTPOFF64:
13861 case BFD_RELOC_X86_64_GOTTPOFF:
13862 case BFD_RELOC_X86_64_TPOFF32:
13863 case BFD_RELOC_X86_64_TPOFF64:
13864 case BFD_RELOC_X86_64_GOTOFF64:
13865 case BFD_RELOC_X86_64_GOTPC32:
13866 case BFD_RELOC_X86_64_GOT64:
13867 case BFD_RELOC_X86_64_GOTPCREL64:
13868 case BFD_RELOC_X86_64_GOTPC64:
13869 case BFD_RELOC_X86_64_GOTPLT64:
13870 case BFD_RELOC_X86_64_PLTOFF64:
13871 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
13872 case BFD_RELOC_X86_64_TLSDESC_CALL:
13873 case BFD_RELOC_RVA:
13874 case BFD_RELOC_VTABLE_ENTRY:
13875 case BFD_RELOC_VTABLE_INHERIT:
13876 #ifdef TE_PE
13877 case BFD_RELOC_32_SECREL:
13878 #endif
13879 code = fixp->fx_r_type;
13880 break;
13881 case BFD_RELOC_X86_64_32S:
13882 if (!fixp->fx_pcrel)
13883 {
13884 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
13885 code = fixp->fx_r_type;
13886 break;
13887 }
13888 /* Fall through. */
13889 default:
13890 if (fixp->fx_pcrel)
13891 {
13892 switch (fixp->fx_size)
13893 {
13894 default:
13895 as_bad_where (fixp->fx_file, fixp->fx_line,
13896 _("can not do %d byte pc-relative relocation"),
13897 fixp->fx_size);
13898 code = BFD_RELOC_32_PCREL;
13899 break;
13900 case 1: code = BFD_RELOC_8_PCREL; break;
13901 case 2: code = BFD_RELOC_16_PCREL; break;
13902 case 4: code = BFD_RELOC_32_PCREL; break;
13903 #ifdef BFD64
13904 case 8: code = BFD_RELOC_64_PCREL; break;
13905 #endif
13906 }
13907 }
13908 else
13909 {
13910 switch (fixp->fx_size)
13911 {
13912 default:
13913 as_bad_where (fixp->fx_file, fixp->fx_line,
13914 _("can not do %d byte relocation"),
13915 fixp->fx_size);
13916 code = BFD_RELOC_32;
13917 break;
13918 case 1: code = BFD_RELOC_8; break;
13919 case 2: code = BFD_RELOC_16; break;
13920 case 4: code = BFD_RELOC_32; break;
13921 #ifdef BFD64
13922 case 8: code = BFD_RELOC_64; break;
13923 #endif
13924 }
13925 }
13926 break;
13927 }
13928
13929 if ((code == BFD_RELOC_32
13930 || code == BFD_RELOC_32_PCREL
13931 || code == BFD_RELOC_X86_64_32S)
13932 && GOT_symbol
13933 && fixp->fx_addsy == GOT_symbol)
13934 {
13935 if (!object_64bit)
13936 code = BFD_RELOC_386_GOTPC;
13937 else
13938 code = BFD_RELOC_X86_64_GOTPC32;
13939 }
13940 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
13941 && GOT_symbol
13942 && fixp->fx_addsy == GOT_symbol)
13943 {
13944 code = BFD_RELOC_X86_64_GOTPC64;
13945 }
13946
13947 rel = XNEW (arelent);
13948 rel->sym_ptr_ptr = XNEW (asymbol *);
13949 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
13950
13951 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
13952
13953 if (!use_rela_relocations)
13954 {
13955 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
13956 vtable entry to be used in the relocation's section offset. */
13957 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
13958 rel->address = fixp->fx_offset;
13959 #if defined (OBJ_COFF) && defined (TE_PE)
13960 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
13961 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
13962 else
13963 #endif
13964 rel->addend = 0;
13965 }
13966 /* Use the rela in 64bit mode. */
13967 else
13968 {
13969 if (disallow_64bit_reloc)
13970 switch (code)
13971 {
13972 case BFD_RELOC_X86_64_DTPOFF64:
13973 case BFD_RELOC_X86_64_TPOFF64:
13974 case BFD_RELOC_64_PCREL:
13975 case BFD_RELOC_X86_64_GOTOFF64:
13976 case BFD_RELOC_X86_64_GOT64:
13977 case BFD_RELOC_X86_64_GOTPCREL64:
13978 case BFD_RELOC_X86_64_GOTPC64:
13979 case BFD_RELOC_X86_64_GOTPLT64:
13980 case BFD_RELOC_X86_64_PLTOFF64:
13981 as_bad_where (fixp->fx_file, fixp->fx_line,
13982 _("cannot represent relocation type %s in x32 mode"),
13983 bfd_get_reloc_code_name (code));
13984 break;
13985 default:
13986 break;
13987 }
13988
13989 if (!fixp->fx_pcrel)
13990 rel->addend = fixp->fx_offset;
13991 else
13992 switch (code)
13993 {
13994 case BFD_RELOC_X86_64_PLT32:
13995 case BFD_RELOC_X86_64_GOT32:
13996 case BFD_RELOC_X86_64_GOTPCREL:
13997 case BFD_RELOC_X86_64_GOTPCRELX:
13998 case BFD_RELOC_X86_64_REX_GOTPCRELX:
13999 case BFD_RELOC_X86_64_TLSGD:
14000 case BFD_RELOC_X86_64_TLSLD:
14001 case BFD_RELOC_X86_64_GOTTPOFF:
14002 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
14003 case BFD_RELOC_X86_64_TLSDESC_CALL:
14004 rel->addend = fixp->fx_offset - fixp->fx_size;
14005 break;
14006 default:
14007 rel->addend = (section->vma
14008 - fixp->fx_size
14009 + fixp->fx_addnumber
14010 + md_pcrel_from (fixp));
14011 break;
14012 }
14013 }
14014
14015 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
14016 if (rel->howto == NULL)
14017 {
14018 as_bad_where (fixp->fx_file, fixp->fx_line,
14019 _("cannot represent relocation type %s"),
14020 bfd_get_reloc_code_name (code));
14021 /* Set howto to a garbage value so that we can keep going. */
14022 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
14023 gas_assert (rel->howto != NULL);
14024 }
14025
14026 return rel;
14027 }
14028
14029 #include "tc-i386-intel.c"
14030
14031 void
14032 tc_x86_parse_to_dw2regnum (expressionS *exp)
14033 {
14034 int saved_naked_reg;
14035 char saved_register_dot;
14036
14037 saved_naked_reg = allow_naked_reg;
14038 allow_naked_reg = 1;
14039 saved_register_dot = register_chars['.'];
14040 register_chars['.'] = '.';
14041 allow_pseudo_reg = 1;
14042 expression_and_evaluate (exp);
14043 allow_pseudo_reg = 0;
14044 register_chars['.'] = saved_register_dot;
14045 allow_naked_reg = saved_naked_reg;
14046
14047 if (exp->X_op == O_register && exp->X_add_number >= 0)
14048 {
14049 if ((addressT) exp->X_add_number < i386_regtab_size)
14050 {
14051 exp->X_op = O_constant;
14052 exp->X_add_number = i386_regtab[exp->X_add_number]
14053 .dw2_regnum[flag_code >> 1];
14054 }
14055 else
14056 exp->X_op = O_illegal;
14057 }
14058 }
14059
14060 void
14061 tc_x86_frame_initial_instructions (void)
14062 {
14063 static unsigned int sp_regno[2];
14064
14065 if (!sp_regno[flag_code >> 1])
14066 {
14067 char *saved_input = input_line_pointer;
14068 char sp[][4] = {"esp", "rsp"};
14069 expressionS exp;
14070
14071 input_line_pointer = sp[flag_code >> 1];
14072 tc_x86_parse_to_dw2regnum (&exp);
14073 gas_assert (exp.X_op == O_constant);
14074 sp_regno[flag_code >> 1] = exp.X_add_number;
14075 input_line_pointer = saved_input;
14076 }
14077
14078 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
14079 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
14080 }
14081
14082 int
14083 x86_dwarf2_addr_size (void)
14084 {
14085 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
14086 if (x86_elf_abi == X86_64_X32_ABI)
14087 return 4;
14088 #endif
14089 return bfd_arch_bits_per_address (stdoutput) / 8;
14090 }
14091
14092 int
14093 i386_elf_section_type (const char *str, size_t len)
14094 {
14095 if (flag_code == CODE_64BIT
14096 && len == sizeof ("unwind") - 1
14097 && strncmp (str, "unwind", 6) == 0)
14098 return SHT_X86_64_UNWIND;
14099
14100 return -1;
14101 }
14102
14103 #ifdef TE_SOLARIS
14104 void
14105 i386_solaris_fix_up_eh_frame (segT sec)
14106 {
14107 if (flag_code == CODE_64BIT)
14108 elf_section_type (sec) = SHT_X86_64_UNWIND;
14109 }
14110 #endif
14111
14112 #ifdef TE_PE
14113 void
14114 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
14115 {
14116 expressionS exp;
14117
14118 exp.X_op = O_secrel;
14119 exp.X_add_symbol = symbol;
14120 exp.X_add_number = 0;
14121 emit_expr (&exp, size);
14122 }
14123 #endif
14124
14125 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14126 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
14127
14128 bfd_vma
14129 x86_64_section_letter (int letter, const char **ptr_msg)
14130 {
14131 if (flag_code == CODE_64BIT)
14132 {
14133 if (letter == 'l')
14134 return SHF_X86_64_LARGE;
14135
14136 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
14137 }
14138 else
14139 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
14140 return -1;
14141 }
14142
14143 bfd_vma
14144 x86_64_section_word (char *str, size_t len)
14145 {
14146 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
14147 return SHF_X86_64_LARGE;
14148
14149 return -1;
14150 }
14151
14152 static void
14153 handle_large_common (int small ATTRIBUTE_UNUSED)
14154 {
14155 if (flag_code != CODE_64BIT)
14156 {
14157 s_comm_internal (0, elf_common_parse);
14158 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
14159 }
14160 else
14161 {
14162 static segT lbss_section;
14163 asection *saved_com_section_ptr = elf_com_section_ptr;
14164 asection *saved_bss_section = bss_section;
14165
14166 if (lbss_section == NULL)
14167 {
14168 flagword applicable;
14169 segT seg = now_seg;
14170 subsegT subseg = now_subseg;
14171
14172 /* The .lbss section is for local .largecomm symbols. */
14173 lbss_section = subseg_new (".lbss", 0);
14174 applicable = bfd_applicable_section_flags (stdoutput);
14175 bfd_set_section_flags (lbss_section, applicable & SEC_ALLOC);
14176 seg_info (lbss_section)->bss = 1;
14177
14178 subseg_set (seg, subseg);
14179 }
14180
14181 elf_com_section_ptr = &_bfd_elf_large_com_section;
14182 bss_section = lbss_section;
14183
14184 s_comm_internal (0, elf_common_parse);
14185
14186 elf_com_section_ptr = saved_com_section_ptr;
14187 bss_section = saved_bss_section;
14188 }
14189 }
14190 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */
This page took 0.382238 seconds and 4 git commands to generate.