Add -mno-shared to x86 assembler
[deliverable/binutils-gdb.git] / gas / config / tc-i386.c
1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright (C) 1989-2015 Free Software Foundation, Inc.
3
4 This file is part of GAS, the GNU Assembler.
5
6 GAS is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GAS is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GAS; see the file COPYING. If not, write to the Free
18 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
19 02110-1301, USA. */
20
21 /* Intel 80386 machine specific gas.
22 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
23 x86_64 support by Jan Hubicka (jh@suse.cz)
24 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
25 Bugs & suggestions are completely welcome. This is free software.
26 Please help us make it better. */
27
28 #include "as.h"
29 #include "safe-ctype.h"
30 #include "subsegs.h"
31 #include "dwarf2dbg.h"
32 #include "dw2gencfi.h"
33 #include "elf/x86-64.h"
34 #include "opcodes/i386-init.h"
35
36 #ifdef TE_LINUX
37 /* Default to compress debug sections for Linux. */
38 enum compressed_debug_section_type flag_compress_debug
39 = COMPRESS_DEBUG_ZLIB;
40 #endif
41
42 #ifndef REGISTER_WARNINGS
43 #define REGISTER_WARNINGS 1
44 #endif
45
46 #ifndef INFER_ADDR_PREFIX
47 #define INFER_ADDR_PREFIX 1
48 #endif
49
50 #ifndef DEFAULT_ARCH
51 #define DEFAULT_ARCH "i386"
52 #endif
53
54 #ifndef INLINE
55 #if __GNUC__ >= 2
56 #define INLINE __inline__
57 #else
58 #define INLINE
59 #endif
60 #endif
61
62 /* Prefixes will be emitted in the order defined below.
63 WAIT_PREFIX must be the first prefix since FWAIT is really is an
64 instruction, and so must come before any prefixes.
65 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
66 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
67 #define WAIT_PREFIX 0
68 #define SEG_PREFIX 1
69 #define ADDR_PREFIX 2
70 #define DATA_PREFIX 3
71 #define REP_PREFIX 4
72 #define HLE_PREFIX REP_PREFIX
73 #define BND_PREFIX REP_PREFIX
74 #define LOCK_PREFIX 5
75 #define REX_PREFIX 6 /* must come last. */
76 #define MAX_PREFIXES 7 /* max prefixes per opcode */
77
78 /* we define the syntax here (modulo base,index,scale syntax) */
79 #define REGISTER_PREFIX '%'
80 #define IMMEDIATE_PREFIX '$'
81 #define ABSOLUTE_PREFIX '*'
82
83 /* these are the instruction mnemonic suffixes in AT&T syntax or
84 memory operand size in Intel syntax. */
85 #define WORD_MNEM_SUFFIX 'w'
86 #define BYTE_MNEM_SUFFIX 'b'
87 #define SHORT_MNEM_SUFFIX 's'
88 #define LONG_MNEM_SUFFIX 'l'
89 #define QWORD_MNEM_SUFFIX 'q'
90 #define XMMWORD_MNEM_SUFFIX 'x'
91 #define YMMWORD_MNEM_SUFFIX 'y'
92 #define ZMMWORD_MNEM_SUFFIX 'z'
93 /* Intel Syntax. Use a non-ascii letter since since it never appears
94 in instructions. */
95 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
96
97 #define END_OF_INSN '\0'
98
99 /*
100 'templates' is for grouping together 'template' structures for opcodes
101 of the same name. This is only used for storing the insns in the grand
102 ole hash table of insns.
103 The templates themselves start at START and range up to (but not including)
104 END.
105 */
106 typedef struct
107 {
108 const insn_template *start;
109 const insn_template *end;
110 }
111 templates;
112
113 /* 386 operand encoding bytes: see 386 book for details of this. */
114 typedef struct
115 {
116 unsigned int regmem; /* codes register or memory operand */
117 unsigned int reg; /* codes register operand (or extended opcode) */
118 unsigned int mode; /* how to interpret regmem & reg */
119 }
120 modrm_byte;
121
122 /* x86-64 extension prefix. */
123 typedef int rex_byte;
124
125 /* 386 opcode byte to code indirect addressing. */
126 typedef struct
127 {
128 unsigned base;
129 unsigned index;
130 unsigned scale;
131 }
132 sib_byte;
133
134 /* x86 arch names, types and features */
135 typedef struct
136 {
137 const char *name; /* arch name */
138 unsigned int len; /* arch string length */
139 enum processor_type type; /* arch type */
140 i386_cpu_flags flags; /* cpu feature flags */
141 unsigned int skip; /* show_arch should skip this. */
142 unsigned int negated; /* turn off indicated flags. */
143 }
144 arch_entry;
145
146 static void update_code_flag (int, int);
147 static void set_code_flag (int);
148 static void set_16bit_gcc_code_flag (int);
149 static void set_intel_syntax (int);
150 static void set_intel_mnemonic (int);
151 static void set_allow_index_reg (int);
152 static void set_check (int);
153 static void set_cpu_arch (int);
154 #ifdef TE_PE
155 static void pe_directive_secrel (int);
156 #endif
157 static void signed_cons (int);
158 static char *output_invalid (int c);
159 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
160 const char *);
161 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
162 const char *);
163 static int i386_att_operand (char *);
164 static int i386_intel_operand (char *, int);
165 static int i386_intel_simplify (expressionS *);
166 static int i386_intel_parse_name (const char *, expressionS *);
167 static const reg_entry *parse_register (char *, char **);
168 static char *parse_insn (char *, char *);
169 static char *parse_operands (char *, const char *);
170 static void swap_operands (void);
171 static void swap_2_operands (int, int);
172 static void optimize_imm (void);
173 static void optimize_disp (void);
174 static const insn_template *match_template (void);
175 static int check_string (void);
176 static int process_suffix (void);
177 static int check_byte_reg (void);
178 static int check_long_reg (void);
179 static int check_qword_reg (void);
180 static int check_word_reg (void);
181 static int finalize_imm (void);
182 static int process_operands (void);
183 static const seg_entry *build_modrm_byte (void);
184 static void output_insn (void);
185 static void output_imm (fragS *, offsetT);
186 static void output_disp (fragS *, offsetT);
187 #ifndef I386COFF
188 static void s_bss (int);
189 #endif
190 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
191 static void handle_large_common (int small ATTRIBUTE_UNUSED);
192 #endif
193
194 static const char *default_arch = DEFAULT_ARCH;
195
196 /* This struct describes rounding control and SAE in the instruction. */
197 struct RC_Operation
198 {
199 enum rc_type
200 {
201 rne = 0,
202 rd,
203 ru,
204 rz,
205 saeonly
206 } type;
207 int operand;
208 };
209
210 static struct RC_Operation rc_op;
211
212 /* The struct describes masking, applied to OPERAND in the instruction.
213 MASK is a pointer to the corresponding mask register. ZEROING tells
214 whether merging or zeroing mask is used. */
215 struct Mask_Operation
216 {
217 const reg_entry *mask;
218 unsigned int zeroing;
219 /* The operand where this operation is associated. */
220 int operand;
221 };
222
223 static struct Mask_Operation mask_op;
224
225 /* The struct describes broadcasting, applied to OPERAND. FACTOR is
226 broadcast factor. */
227 struct Broadcast_Operation
228 {
229 /* Type of broadcast: no broadcast, {1to8}, or {1to16}. */
230 int type;
231
232 /* Index of broadcasted operand. */
233 int operand;
234 };
235
236 static struct Broadcast_Operation broadcast_op;
237
238 /* VEX prefix. */
239 typedef struct
240 {
241 /* VEX prefix is either 2 byte or 3 byte. EVEX is 4 byte. */
242 unsigned char bytes[4];
243 unsigned int length;
244 /* Destination or source register specifier. */
245 const reg_entry *register_specifier;
246 } vex_prefix;
247
248 /* 'md_assemble ()' gathers together information and puts it into a
249 i386_insn. */
250
251 union i386_op
252 {
253 expressionS *disps;
254 expressionS *imms;
255 const reg_entry *regs;
256 };
257
258 enum i386_error
259 {
260 operand_size_mismatch,
261 operand_type_mismatch,
262 register_type_mismatch,
263 number_of_operands_mismatch,
264 invalid_instruction_suffix,
265 bad_imm4,
266 old_gcc_only,
267 unsupported_with_intel_mnemonic,
268 unsupported_syntax,
269 unsupported,
270 invalid_vsib_address,
271 invalid_vector_register_set,
272 unsupported_vector_index_register,
273 unsupported_broadcast,
274 broadcast_not_on_src_operand,
275 broadcast_needed,
276 unsupported_masking,
277 mask_not_on_destination,
278 no_default_mask,
279 unsupported_rc_sae,
280 rc_sae_operand_not_last_imm,
281 invalid_register_operand,
282 try_vector_disp8
283 };
284
285 struct _i386_insn
286 {
287 /* TM holds the template for the insn were currently assembling. */
288 insn_template tm;
289
290 /* SUFFIX holds the instruction size suffix for byte, word, dword
291 or qword, if given. */
292 char suffix;
293
294 /* OPERANDS gives the number of given operands. */
295 unsigned int operands;
296
297 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
298 of given register, displacement, memory operands and immediate
299 operands. */
300 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
301
302 /* TYPES [i] is the type (see above #defines) which tells us how to
303 use OP[i] for the corresponding operand. */
304 i386_operand_type types[MAX_OPERANDS];
305
306 /* Displacement expression, immediate expression, or register for each
307 operand. */
308 union i386_op op[MAX_OPERANDS];
309
310 /* Flags for operands. */
311 unsigned int flags[MAX_OPERANDS];
312 #define Operand_PCrel 1
313
314 /* Relocation type for operand */
315 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
316
317 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
318 the base index byte below. */
319 const reg_entry *base_reg;
320 const reg_entry *index_reg;
321 unsigned int log2_scale_factor;
322
323 /* SEG gives the seg_entries of this insn. They are zero unless
324 explicit segment overrides are given. */
325 const seg_entry *seg[2];
326
327 /* PREFIX holds all the given prefix opcodes (usually null).
328 PREFIXES is the number of prefix opcodes. */
329 unsigned int prefixes;
330 unsigned char prefix[MAX_PREFIXES];
331
332 /* RM and SIB are the modrm byte and the sib byte where the
333 addressing modes of this insn are encoded. */
334 modrm_byte rm;
335 rex_byte rex;
336 rex_byte vrex;
337 sib_byte sib;
338 vex_prefix vex;
339
340 /* Masking attributes. */
341 struct Mask_Operation *mask;
342
343 /* Rounding control and SAE attributes. */
344 struct RC_Operation *rounding;
345
346 /* Broadcasting attributes. */
347 struct Broadcast_Operation *broadcast;
348
349 /* Compressed disp8*N attribute. */
350 unsigned int memshift;
351
352 /* Swap operand in encoding. */
353 unsigned int swap_operand;
354
355 /* Prefer 8bit or 32bit displacement in encoding. */
356 enum
357 {
358 disp_encoding_default = 0,
359 disp_encoding_8bit,
360 disp_encoding_32bit
361 } disp_encoding;
362
363 /* REP prefix. */
364 const char *rep_prefix;
365
366 /* HLE prefix. */
367 const char *hle_prefix;
368
369 /* Have BND prefix. */
370 const char *bnd_prefix;
371
372 /* Need VREX to support upper 16 registers. */
373 int need_vrex;
374
375 /* Error message. */
376 enum i386_error error;
377 };
378
379 typedef struct _i386_insn i386_insn;
380
381 /* Link RC type with corresponding string, that'll be looked for in
382 asm. */
383 struct RC_name
384 {
385 enum rc_type type;
386 const char *name;
387 unsigned int len;
388 };
389
390 static const struct RC_name RC_NamesTable[] =
391 {
392 { rne, STRING_COMMA_LEN ("rn-sae") },
393 { rd, STRING_COMMA_LEN ("rd-sae") },
394 { ru, STRING_COMMA_LEN ("ru-sae") },
395 { rz, STRING_COMMA_LEN ("rz-sae") },
396 { saeonly, STRING_COMMA_LEN ("sae") },
397 };
398
399 /* List of chars besides those in app.c:symbol_chars that can start an
400 operand. Used to prevent the scrubber eating vital white-space. */
401 const char extra_symbol_chars[] = "*%-([{"
402 #ifdef LEX_AT
403 "@"
404 #endif
405 #ifdef LEX_QM
406 "?"
407 #endif
408 ;
409
410 #if (defined (TE_I386AIX) \
411 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
412 && !defined (TE_GNU) \
413 && !defined (TE_LINUX) \
414 && !defined (TE_NACL) \
415 && !defined (TE_NETWARE) \
416 && !defined (TE_FreeBSD) \
417 && !defined (TE_DragonFly) \
418 && !defined (TE_NetBSD)))
419 /* This array holds the chars that always start a comment. If the
420 pre-processor is disabled, these aren't very useful. The option
421 --divide will remove '/' from this list. */
422 const char *i386_comment_chars = "#/";
423 #define SVR4_COMMENT_CHARS 1
424 #define PREFIX_SEPARATOR '\\'
425
426 #else
427 const char *i386_comment_chars = "#";
428 #define PREFIX_SEPARATOR '/'
429 #endif
430
431 /* This array holds the chars that only start a comment at the beginning of
432 a line. If the line seems to have the form '# 123 filename'
433 .line and .file directives will appear in the pre-processed output.
434 Note that input_file.c hand checks for '#' at the beginning of the
435 first line of the input file. This is because the compiler outputs
436 #NO_APP at the beginning of its output.
437 Also note that comments started like this one will always work if
438 '/' isn't otherwise defined. */
439 const char line_comment_chars[] = "#/";
440
441 const char line_separator_chars[] = ";";
442
443 /* Chars that can be used to separate mant from exp in floating point
444 nums. */
445 const char EXP_CHARS[] = "eE";
446
447 /* Chars that mean this number is a floating point constant
448 As in 0f12.456
449 or 0d1.2345e12. */
450 const char FLT_CHARS[] = "fFdDxX";
451
452 /* Tables for lexical analysis. */
453 static char mnemonic_chars[256];
454 static char register_chars[256];
455 static char operand_chars[256];
456 static char identifier_chars[256];
457 static char digit_chars[256];
458
459 /* Lexical macros. */
460 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
461 #define is_operand_char(x) (operand_chars[(unsigned char) x])
462 #define is_register_char(x) (register_chars[(unsigned char) x])
463 #define is_space_char(x) ((x) == ' ')
464 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
465 #define is_digit_char(x) (digit_chars[(unsigned char) x])
466
467 /* All non-digit non-letter characters that may occur in an operand. */
468 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
469
470 /* md_assemble() always leaves the strings it's passed unaltered. To
471 effect this we maintain a stack of saved characters that we've smashed
472 with '\0's (indicating end of strings for various sub-fields of the
473 assembler instruction). */
474 static char save_stack[32];
475 static char *save_stack_p;
476 #define END_STRING_AND_SAVE(s) \
477 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
478 #define RESTORE_END_STRING(s) \
479 do { *(s) = *--save_stack_p; } while (0)
480
481 /* The instruction we're assembling. */
482 static i386_insn i;
483
484 /* Possible templates for current insn. */
485 static const templates *current_templates;
486
487 /* Per instruction expressionS buffers: max displacements & immediates. */
488 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
489 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
490
491 /* Current operand we are working on. */
492 static int this_operand = -1;
493
494 /* We support four different modes. FLAG_CODE variable is used to distinguish
495 these. */
496
497 enum flag_code {
498 CODE_32BIT,
499 CODE_16BIT,
500 CODE_64BIT };
501
502 static enum flag_code flag_code;
503 static unsigned int object_64bit;
504 static unsigned int disallow_64bit_reloc;
505 static int use_rela_relocations = 0;
506
507 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
508 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
509 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
510
511 /* The ELF ABI to use. */
512 enum x86_elf_abi
513 {
514 I386_ABI,
515 X86_64_ABI,
516 X86_64_X32_ABI
517 };
518
519 static enum x86_elf_abi x86_elf_abi = I386_ABI;
520 #endif
521
522 #if defined (TE_PE) || defined (TE_PEP)
523 /* Use big object file format. */
524 static int use_big_obj = 0;
525 #endif
526
527 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
528 /* 1 if not generating code for a shared library. */
529 static int no_shared = 0;
530 #endif
531
532 /* 1 for intel syntax,
533 0 if att syntax. */
534 static int intel_syntax = 0;
535
536 /* 1 for intel mnemonic,
537 0 if att mnemonic. */
538 static int intel_mnemonic = !SYSV386_COMPAT;
539
540 /* 1 if support old (<= 2.8.1) versions of gcc. */
541 static int old_gcc = OLDGCC_COMPAT;
542
543 /* 1 if pseudo registers are permitted. */
544 static int allow_pseudo_reg = 0;
545
546 /* 1 if register prefix % not required. */
547 static int allow_naked_reg = 0;
548
549 /* 1 if the assembler should add BND prefix for all control-tranferring
550 instructions supporting it, even if this prefix wasn't specified
551 explicitly. */
552 static int add_bnd_prefix = 0;
553
554 /* 1 if pseudo index register, eiz/riz, is allowed . */
555 static int allow_index_reg = 0;
556
557 /* 1 if the assembler should ignore LOCK prefix, even if it was
558 specified explicitly. */
559 static int omit_lock_prefix = 0;
560
561 static enum check_kind
562 {
563 check_none = 0,
564 check_warning,
565 check_error
566 }
567 sse_check, operand_check = check_warning;
568
569 /* Register prefix used for error message. */
570 static const char *register_prefix = "%";
571
572 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
573 leave, push, and pop instructions so that gcc has the same stack
574 frame as in 32 bit mode. */
575 static char stackop_size = '\0';
576
577 /* Non-zero to optimize code alignment. */
578 int optimize_align_code = 1;
579
580 /* Non-zero to quieten some warnings. */
581 static int quiet_warnings = 0;
582
583 /* CPU name. */
584 static const char *cpu_arch_name = NULL;
585 static char *cpu_sub_arch_name = NULL;
586
587 /* CPU feature flags. */
588 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
589
590 /* If we have selected a cpu we are generating instructions for. */
591 static int cpu_arch_tune_set = 0;
592
593 /* Cpu we are generating instructions for. */
594 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
595
596 /* CPU feature flags of cpu we are generating instructions for. */
597 static i386_cpu_flags cpu_arch_tune_flags;
598
599 /* CPU instruction set architecture used. */
600 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
601
602 /* CPU feature flags of instruction set architecture used. */
603 i386_cpu_flags cpu_arch_isa_flags;
604
605 /* If set, conditional jumps are not automatically promoted to handle
606 larger than a byte offset. */
607 static unsigned int no_cond_jump_promotion = 0;
608
609 /* Encode SSE instructions with VEX prefix. */
610 static unsigned int sse2avx;
611
612 /* Encode scalar AVX instructions with specific vector length. */
613 static enum
614 {
615 vex128 = 0,
616 vex256
617 } avxscalar;
618
619 /* Encode scalar EVEX LIG instructions with specific vector length. */
620 static enum
621 {
622 evexl128 = 0,
623 evexl256,
624 evexl512
625 } evexlig;
626
627 /* Encode EVEX WIG instructions with specific evex.w. */
628 static enum
629 {
630 evexw0 = 0,
631 evexw1
632 } evexwig;
633
634 /* Value to encode in EVEX RC bits, for SAE-only instructions. */
635 static enum rc_type evexrcig = rne;
636
637 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
638 static symbolS *GOT_symbol;
639
640 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
641 unsigned int x86_dwarf2_return_column;
642
643 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
644 int x86_cie_data_alignment;
645
646 /* Interface to relax_segment.
647 There are 3 major relax states for 386 jump insns because the
648 different types of jumps add different sizes to frags when we're
649 figuring out what sort of jump to choose to reach a given label. */
650
651 /* Types. */
652 #define UNCOND_JUMP 0
653 #define COND_JUMP 1
654 #define COND_JUMP86 2
655
656 /* Sizes. */
657 #define CODE16 1
658 #define SMALL 0
659 #define SMALL16 (SMALL | CODE16)
660 #define BIG 2
661 #define BIG16 (BIG | CODE16)
662
663 #ifndef INLINE
664 #ifdef __GNUC__
665 #define INLINE __inline__
666 #else
667 #define INLINE
668 #endif
669 #endif
670
671 #define ENCODE_RELAX_STATE(type, size) \
672 ((relax_substateT) (((type) << 2) | (size)))
673 #define TYPE_FROM_RELAX_STATE(s) \
674 ((s) >> 2)
675 #define DISP_SIZE_FROM_RELAX_STATE(s) \
676 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
677
678 /* This table is used by relax_frag to promote short jumps to long
679 ones where necessary. SMALL (short) jumps may be promoted to BIG
680 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
681 don't allow a short jump in a 32 bit code segment to be promoted to
682 a 16 bit offset jump because it's slower (requires data size
683 prefix), and doesn't work, unless the destination is in the bottom
684 64k of the code segment (The top 16 bits of eip are zeroed). */
685
686 const relax_typeS md_relax_table[] =
687 {
688 /* The fields are:
689 1) most positive reach of this state,
690 2) most negative reach of this state,
691 3) how many bytes this mode will have in the variable part of the frag
692 4) which index into the table to try if we can't fit into this one. */
693
694 /* UNCOND_JUMP states. */
695 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
696 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
697 /* dword jmp adds 4 bytes to frag:
698 0 extra opcode bytes, 4 displacement bytes. */
699 {0, 0, 4, 0},
700 /* word jmp adds 2 byte2 to frag:
701 0 extra opcode bytes, 2 displacement bytes. */
702 {0, 0, 2, 0},
703
704 /* COND_JUMP states. */
705 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
706 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
707 /* dword conditionals adds 5 bytes to frag:
708 1 extra opcode byte, 4 displacement bytes. */
709 {0, 0, 5, 0},
710 /* word conditionals add 3 bytes to frag:
711 1 extra opcode byte, 2 displacement bytes. */
712 {0, 0, 3, 0},
713
714 /* COND_JUMP86 states. */
715 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
716 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
717 /* dword conditionals adds 5 bytes to frag:
718 1 extra opcode byte, 4 displacement bytes. */
719 {0, 0, 5, 0},
720 /* word conditionals add 4 bytes to frag:
721 1 displacement byte and a 3 byte long branch insn. */
722 {0, 0, 4, 0}
723 };
724
725 static const arch_entry cpu_arch[] =
726 {
727 /* Do not replace the first two entries - i386_target_format()
728 relies on them being there in this order. */
729 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
730 CPU_GENERIC32_FLAGS, 0, 0 },
731 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
732 CPU_GENERIC64_FLAGS, 0, 0 },
733 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
734 CPU_NONE_FLAGS, 0, 0 },
735 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
736 CPU_I186_FLAGS, 0, 0 },
737 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
738 CPU_I286_FLAGS, 0, 0 },
739 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
740 CPU_I386_FLAGS, 0, 0 },
741 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
742 CPU_I486_FLAGS, 0, 0 },
743 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
744 CPU_I586_FLAGS, 0, 0 },
745 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
746 CPU_I686_FLAGS, 0, 0 },
747 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
748 CPU_I586_FLAGS, 0, 0 },
749 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
750 CPU_PENTIUMPRO_FLAGS, 0, 0 },
751 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
752 CPU_P2_FLAGS, 0, 0 },
753 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
754 CPU_P3_FLAGS, 0, 0 },
755 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
756 CPU_P4_FLAGS, 0, 0 },
757 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
758 CPU_CORE_FLAGS, 0, 0 },
759 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
760 CPU_NOCONA_FLAGS, 0, 0 },
761 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
762 CPU_CORE_FLAGS, 1, 0 },
763 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
764 CPU_CORE_FLAGS, 0, 0 },
765 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
766 CPU_CORE2_FLAGS, 1, 0 },
767 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
768 CPU_CORE2_FLAGS, 0, 0 },
769 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
770 CPU_COREI7_FLAGS, 0, 0 },
771 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
772 CPU_L1OM_FLAGS, 0, 0 },
773 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM,
774 CPU_K1OM_FLAGS, 0, 0 },
775 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
776 CPU_K6_FLAGS, 0, 0 },
777 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
778 CPU_K6_2_FLAGS, 0, 0 },
779 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
780 CPU_ATHLON_FLAGS, 0, 0 },
781 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
782 CPU_K8_FLAGS, 1, 0 },
783 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
784 CPU_K8_FLAGS, 0, 0 },
785 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
786 CPU_K8_FLAGS, 0, 0 },
787 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
788 CPU_AMDFAM10_FLAGS, 0, 0 },
789 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD,
790 CPU_BDVER1_FLAGS, 0, 0 },
791 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD,
792 CPU_BDVER2_FLAGS, 0, 0 },
793 { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD,
794 CPU_BDVER3_FLAGS, 0, 0 },
795 { STRING_COMMA_LEN ("bdver4"), PROCESSOR_BD,
796 CPU_BDVER4_FLAGS, 0, 0 },
797 { STRING_COMMA_LEN ("znver1"), PROCESSOR_ZNVER,
798 CPU_ZNVER1_FLAGS, 0, 0 },
799 { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT,
800 CPU_BTVER1_FLAGS, 0, 0 },
801 { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT,
802 CPU_BTVER2_FLAGS, 0, 0 },
803 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
804 CPU_8087_FLAGS, 0, 0 },
805 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
806 CPU_287_FLAGS, 0, 0 },
807 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
808 CPU_387_FLAGS, 0, 0 },
809 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN,
810 CPU_ANY87_FLAGS, 0, 1 },
811 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
812 CPU_MMX_FLAGS, 0, 0 },
813 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN,
814 CPU_3DNOWA_FLAGS, 0, 1 },
815 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
816 CPU_SSE_FLAGS, 0, 0 },
817 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
818 CPU_SSE2_FLAGS, 0, 0 },
819 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
820 CPU_SSE3_FLAGS, 0, 0 },
821 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
822 CPU_SSSE3_FLAGS, 0, 0 },
823 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
824 CPU_SSE4_1_FLAGS, 0, 0 },
825 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
826 CPU_SSE4_2_FLAGS, 0, 0 },
827 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
828 CPU_SSE4_2_FLAGS, 0, 0 },
829 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN,
830 CPU_ANY_SSE_FLAGS, 0, 1 },
831 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
832 CPU_AVX_FLAGS, 0, 0 },
833 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN,
834 CPU_AVX2_FLAGS, 0, 0 },
835 { STRING_COMMA_LEN (".avx512f"), PROCESSOR_UNKNOWN,
836 CPU_AVX512F_FLAGS, 0, 0 },
837 { STRING_COMMA_LEN (".avx512cd"), PROCESSOR_UNKNOWN,
838 CPU_AVX512CD_FLAGS, 0, 0 },
839 { STRING_COMMA_LEN (".avx512er"), PROCESSOR_UNKNOWN,
840 CPU_AVX512ER_FLAGS, 0, 0 },
841 { STRING_COMMA_LEN (".avx512pf"), PROCESSOR_UNKNOWN,
842 CPU_AVX512PF_FLAGS, 0, 0 },
843 { STRING_COMMA_LEN (".avx512dq"), PROCESSOR_UNKNOWN,
844 CPU_AVX512DQ_FLAGS, 0, 0 },
845 { STRING_COMMA_LEN (".avx512bw"), PROCESSOR_UNKNOWN,
846 CPU_AVX512BW_FLAGS, 0, 0 },
847 { STRING_COMMA_LEN (".avx512vl"), PROCESSOR_UNKNOWN,
848 CPU_AVX512VL_FLAGS, 0, 0 },
849 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
850 CPU_ANY_AVX_FLAGS, 0, 1 },
851 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
852 CPU_VMX_FLAGS, 0, 0 },
853 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN,
854 CPU_VMFUNC_FLAGS, 0, 0 },
855 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
856 CPU_SMX_FLAGS, 0, 0 },
857 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
858 CPU_XSAVE_FLAGS, 0, 0 },
859 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
860 CPU_XSAVEOPT_FLAGS, 0, 0 },
861 { STRING_COMMA_LEN (".xsavec"), PROCESSOR_UNKNOWN,
862 CPU_XSAVEC_FLAGS, 0, 0 },
863 { STRING_COMMA_LEN (".xsaves"), PROCESSOR_UNKNOWN,
864 CPU_XSAVES_FLAGS, 0, 0 },
865 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
866 CPU_AES_FLAGS, 0, 0 },
867 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
868 CPU_PCLMUL_FLAGS, 0, 0 },
869 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
870 CPU_PCLMUL_FLAGS, 1, 0 },
871 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
872 CPU_FSGSBASE_FLAGS, 0, 0 },
873 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
874 CPU_RDRND_FLAGS, 0, 0 },
875 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
876 CPU_F16C_FLAGS, 0, 0 },
877 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN,
878 CPU_BMI2_FLAGS, 0, 0 },
879 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
880 CPU_FMA_FLAGS, 0, 0 },
881 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
882 CPU_FMA4_FLAGS, 0, 0 },
883 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
884 CPU_XOP_FLAGS, 0, 0 },
885 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
886 CPU_LWP_FLAGS, 0, 0 },
887 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
888 CPU_MOVBE_FLAGS, 0, 0 },
889 { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN,
890 CPU_CX16_FLAGS, 0, 0 },
891 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
892 CPU_EPT_FLAGS, 0, 0 },
893 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN,
894 CPU_LZCNT_FLAGS, 0, 0 },
895 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN,
896 CPU_HLE_FLAGS, 0, 0 },
897 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN,
898 CPU_RTM_FLAGS, 0, 0 },
899 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN,
900 CPU_INVPCID_FLAGS, 0, 0 },
901 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
902 CPU_CLFLUSH_FLAGS, 0, 0 },
903 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
904 CPU_NOP_FLAGS, 0, 0 },
905 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
906 CPU_SYSCALL_FLAGS, 0, 0 },
907 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
908 CPU_RDTSCP_FLAGS, 0, 0 },
909 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
910 CPU_3DNOW_FLAGS, 0, 0 },
911 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
912 CPU_3DNOWA_FLAGS, 0, 0 },
913 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
914 CPU_PADLOCK_FLAGS, 0, 0 },
915 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
916 CPU_SVME_FLAGS, 1, 0 },
917 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
918 CPU_SVME_FLAGS, 0, 0 },
919 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
920 CPU_SSE4A_FLAGS, 0, 0 },
921 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
922 CPU_ABM_FLAGS, 0, 0 },
923 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
924 CPU_BMI_FLAGS, 0, 0 },
925 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
926 CPU_TBM_FLAGS, 0, 0 },
927 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN,
928 CPU_ADX_FLAGS, 0, 0 },
929 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN,
930 CPU_RDSEED_FLAGS, 0, 0 },
931 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN,
932 CPU_PRFCHW_FLAGS, 0, 0 },
933 { STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN,
934 CPU_SMAP_FLAGS, 0, 0 },
935 { STRING_COMMA_LEN (".mpx"), PROCESSOR_UNKNOWN,
936 CPU_MPX_FLAGS, 0, 0 },
937 { STRING_COMMA_LEN (".sha"), PROCESSOR_UNKNOWN,
938 CPU_SHA_FLAGS, 0, 0 },
939 { STRING_COMMA_LEN (".clflushopt"), PROCESSOR_UNKNOWN,
940 CPU_CLFLUSHOPT_FLAGS, 0, 0 },
941 { STRING_COMMA_LEN (".prefetchwt1"), PROCESSOR_UNKNOWN,
942 CPU_PREFETCHWT1_FLAGS, 0, 0 },
943 { STRING_COMMA_LEN (".se1"), PROCESSOR_UNKNOWN,
944 CPU_SE1_FLAGS, 0, 0 },
945 { STRING_COMMA_LEN (".clwb"), PROCESSOR_UNKNOWN,
946 CPU_CLWB_FLAGS, 0, 0 },
947 { STRING_COMMA_LEN (".pcommit"), PROCESSOR_UNKNOWN,
948 CPU_PCOMMIT_FLAGS, 0, 0 },
949 { STRING_COMMA_LEN (".avx512ifma"), PROCESSOR_UNKNOWN,
950 CPU_AVX512IFMA_FLAGS, 0, 0 },
951 { STRING_COMMA_LEN (".avx512vbmi"), PROCESSOR_UNKNOWN,
952 CPU_AVX512VBMI_FLAGS, 0, 0 },
953 { STRING_COMMA_LEN (".clzero"), PROCESSOR_UNKNOWN,
954 CPU_CLZERO_FLAGS, 0, 0 },
955 };
956
957 #ifdef I386COFF
958 /* Like s_lcomm_internal in gas/read.c but the alignment string
959 is allowed to be optional. */
960
961 static symbolS *
962 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
963 {
964 addressT align = 0;
965
966 SKIP_WHITESPACE ();
967
968 if (needs_align
969 && *input_line_pointer == ',')
970 {
971 align = parse_align (needs_align - 1);
972
973 if (align == (addressT) -1)
974 return NULL;
975 }
976 else
977 {
978 if (size >= 8)
979 align = 3;
980 else if (size >= 4)
981 align = 2;
982 else if (size >= 2)
983 align = 1;
984 else
985 align = 0;
986 }
987
988 bss_alloc (symbolP, size, align);
989 return symbolP;
990 }
991
992 static void
993 pe_lcomm (int needs_align)
994 {
995 s_comm_internal (needs_align * 2, pe_lcomm_internal);
996 }
997 #endif
998
999 const pseudo_typeS md_pseudo_table[] =
1000 {
1001 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
1002 {"align", s_align_bytes, 0},
1003 #else
1004 {"align", s_align_ptwo, 0},
1005 #endif
1006 {"arch", set_cpu_arch, 0},
1007 #ifndef I386COFF
1008 {"bss", s_bss, 0},
1009 #else
1010 {"lcomm", pe_lcomm, 1},
1011 #endif
1012 {"ffloat", float_cons, 'f'},
1013 {"dfloat", float_cons, 'd'},
1014 {"tfloat", float_cons, 'x'},
1015 {"value", cons, 2},
1016 {"slong", signed_cons, 4},
1017 {"noopt", s_ignore, 0},
1018 {"optim", s_ignore, 0},
1019 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
1020 {"code16", set_code_flag, CODE_16BIT},
1021 {"code32", set_code_flag, CODE_32BIT},
1022 {"code64", set_code_flag, CODE_64BIT},
1023 {"intel_syntax", set_intel_syntax, 1},
1024 {"att_syntax", set_intel_syntax, 0},
1025 {"intel_mnemonic", set_intel_mnemonic, 1},
1026 {"att_mnemonic", set_intel_mnemonic, 0},
1027 {"allow_index_reg", set_allow_index_reg, 1},
1028 {"disallow_index_reg", set_allow_index_reg, 0},
1029 {"sse_check", set_check, 0},
1030 {"operand_check", set_check, 1},
1031 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
1032 {"largecomm", handle_large_common, 0},
1033 #else
1034 {"file", (void (*) (int)) dwarf2_directive_file, 0},
1035 {"loc", dwarf2_directive_loc, 0},
1036 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
1037 #endif
1038 #ifdef TE_PE
1039 {"secrel32", pe_directive_secrel, 0},
1040 #endif
1041 {0, 0, 0}
1042 };
1043
1044 /* For interface with expression (). */
1045 extern char *input_line_pointer;
1046
1047 /* Hash table for instruction mnemonic lookup. */
1048 static struct hash_control *op_hash;
1049
1050 /* Hash table for register lookup. */
1051 static struct hash_control *reg_hash;
1052 \f
1053 void
1054 i386_align_code (fragS *fragP, int count)
1055 {
1056 /* Various efficient no-op patterns for aligning code labels.
1057 Note: Don't try to assemble the instructions in the comments.
1058 0L and 0w are not legal. */
1059 static const char f32_1[] =
1060 {0x90}; /* nop */
1061 static const char f32_2[] =
1062 {0x66,0x90}; /* xchg %ax,%ax */
1063 static const char f32_3[] =
1064 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
1065 static const char f32_4[] =
1066 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1067 static const char f32_5[] =
1068 {0x90, /* nop */
1069 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1070 static const char f32_6[] =
1071 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
1072 static const char f32_7[] =
1073 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1074 static const char f32_8[] =
1075 {0x90, /* nop */
1076 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1077 static const char f32_9[] =
1078 {0x89,0xf6, /* movl %esi,%esi */
1079 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1080 static const char f32_10[] =
1081 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
1082 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1083 static const char f32_11[] =
1084 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
1085 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1086 static const char f32_12[] =
1087 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
1088 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
1089 static const char f32_13[] =
1090 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
1091 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1092 static const char f32_14[] =
1093 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
1094 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1095 static const char f16_3[] =
1096 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
1097 static const char f16_4[] =
1098 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
1099 static const char f16_5[] =
1100 {0x90, /* nop */
1101 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
1102 static const char f16_6[] =
1103 {0x89,0xf6, /* mov %si,%si */
1104 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
1105 static const char f16_7[] =
1106 {0x8d,0x74,0x00, /* lea 0(%si),%si */
1107 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
1108 static const char f16_8[] =
1109 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
1110 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
1111 static const char jump_31[] =
1112 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
1113 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
1114 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
1115 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
1116 static const char *const f32_patt[] = {
1117 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
1118 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
1119 };
1120 static const char *const f16_patt[] = {
1121 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
1122 };
1123 /* nopl (%[re]ax) */
1124 static const char alt_3[] =
1125 {0x0f,0x1f,0x00};
1126 /* nopl 0(%[re]ax) */
1127 static const char alt_4[] =
1128 {0x0f,0x1f,0x40,0x00};
1129 /* nopl 0(%[re]ax,%[re]ax,1) */
1130 static const char alt_5[] =
1131 {0x0f,0x1f,0x44,0x00,0x00};
1132 /* nopw 0(%[re]ax,%[re]ax,1) */
1133 static const char alt_6[] =
1134 {0x66,0x0f,0x1f,0x44,0x00,0x00};
1135 /* nopl 0L(%[re]ax) */
1136 static const char alt_7[] =
1137 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1138 /* nopl 0L(%[re]ax,%[re]ax,1) */
1139 static const char alt_8[] =
1140 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1141 /* nopw 0L(%[re]ax,%[re]ax,1) */
1142 static const char alt_9[] =
1143 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1144 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
1145 static const char alt_10[] =
1146 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1147 static const char *const alt_patt[] = {
1148 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1149 alt_9, alt_10
1150 };
1151
1152 /* Only align for at least a positive non-zero boundary. */
1153 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
1154 return;
1155
1156 /* We need to decide which NOP sequence to use for 32bit and
1157 64bit. When -mtune= is used:
1158
1159 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1160 PROCESSOR_GENERIC32, f32_patt will be used.
1161 2. For the rest, alt_patt will be used.
1162
1163 When -mtune= isn't used, alt_patt will be used if
1164 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1165 be used.
1166
1167 When -march= or .arch is used, we can't use anything beyond
1168 cpu_arch_isa_flags. */
1169
1170 if (flag_code == CODE_16BIT)
1171 {
1172 if (count > 8)
1173 {
1174 memcpy (fragP->fr_literal + fragP->fr_fix,
1175 jump_31, count);
1176 /* Adjust jump offset. */
1177 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1178 }
1179 else
1180 memcpy (fragP->fr_literal + fragP->fr_fix,
1181 f16_patt[count - 1], count);
1182 }
1183 else
1184 {
1185 const char *const *patt = NULL;
1186
1187 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1188 {
1189 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1190 switch (cpu_arch_tune)
1191 {
1192 case PROCESSOR_UNKNOWN:
1193 /* We use cpu_arch_isa_flags to check if we SHOULD
1194 optimize with nops. */
1195 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1196 patt = alt_patt;
1197 else
1198 patt = f32_patt;
1199 break;
1200 case PROCESSOR_PENTIUM4:
1201 case PROCESSOR_NOCONA:
1202 case PROCESSOR_CORE:
1203 case PROCESSOR_CORE2:
1204 case PROCESSOR_COREI7:
1205 case PROCESSOR_L1OM:
1206 case PROCESSOR_K1OM:
1207 case PROCESSOR_GENERIC64:
1208 case PROCESSOR_K6:
1209 case PROCESSOR_ATHLON:
1210 case PROCESSOR_K8:
1211 case PROCESSOR_AMDFAM10:
1212 case PROCESSOR_BD:
1213 case PROCESSOR_ZNVER:
1214 case PROCESSOR_BT:
1215 patt = alt_patt;
1216 break;
1217 case PROCESSOR_I386:
1218 case PROCESSOR_I486:
1219 case PROCESSOR_PENTIUM:
1220 case PROCESSOR_PENTIUMPRO:
1221 case PROCESSOR_GENERIC32:
1222 patt = f32_patt;
1223 break;
1224 }
1225 }
1226 else
1227 {
1228 switch (fragP->tc_frag_data.tune)
1229 {
1230 case PROCESSOR_UNKNOWN:
1231 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1232 PROCESSOR_UNKNOWN. */
1233 abort ();
1234 break;
1235
1236 case PROCESSOR_I386:
1237 case PROCESSOR_I486:
1238 case PROCESSOR_PENTIUM:
1239 case PROCESSOR_K6:
1240 case PROCESSOR_ATHLON:
1241 case PROCESSOR_K8:
1242 case PROCESSOR_AMDFAM10:
1243 case PROCESSOR_BD:
1244 case PROCESSOR_ZNVER:
1245 case PROCESSOR_BT:
1246 case PROCESSOR_GENERIC32:
1247 /* We use cpu_arch_isa_flags to check if we CAN optimize
1248 with nops. */
1249 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1250 patt = alt_patt;
1251 else
1252 patt = f32_patt;
1253 break;
1254 case PROCESSOR_PENTIUMPRO:
1255 case PROCESSOR_PENTIUM4:
1256 case PROCESSOR_NOCONA:
1257 case PROCESSOR_CORE:
1258 case PROCESSOR_CORE2:
1259 case PROCESSOR_COREI7:
1260 case PROCESSOR_L1OM:
1261 case PROCESSOR_K1OM:
1262 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1263 patt = alt_patt;
1264 else
1265 patt = f32_patt;
1266 break;
1267 case PROCESSOR_GENERIC64:
1268 patt = alt_patt;
1269 break;
1270 }
1271 }
1272
1273 if (patt == f32_patt)
1274 {
1275 /* If the padding is less than 15 bytes, we use the normal
1276 ones. Otherwise, we use a jump instruction and adjust
1277 its offset. */
1278 int limit;
1279
1280 /* For 64bit, the limit is 3 bytes. */
1281 if (flag_code == CODE_64BIT
1282 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1283 limit = 3;
1284 else
1285 limit = 15;
1286 if (count < limit)
1287 memcpy (fragP->fr_literal + fragP->fr_fix,
1288 patt[count - 1], count);
1289 else
1290 {
1291 memcpy (fragP->fr_literal + fragP->fr_fix,
1292 jump_31, count);
1293 /* Adjust jump offset. */
1294 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1295 }
1296 }
1297 else
1298 {
1299 /* Maximum length of an instruction is 10 byte. If the
1300 padding is greater than 10 bytes and we don't use jump,
1301 we have to break it into smaller pieces. */
1302 int padding = count;
1303 while (padding > 10)
1304 {
1305 padding -= 10;
1306 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1307 patt [9], 10);
1308 }
1309
1310 if (padding)
1311 memcpy (fragP->fr_literal + fragP->fr_fix,
1312 patt [padding - 1], padding);
1313 }
1314 }
1315 fragP->fr_var = count;
1316 }
1317
1318 static INLINE int
1319 operand_type_all_zero (const union i386_operand_type *x)
1320 {
1321 switch (ARRAY_SIZE(x->array))
1322 {
1323 case 3:
1324 if (x->array[2])
1325 return 0;
1326 case 2:
1327 if (x->array[1])
1328 return 0;
1329 case 1:
1330 return !x->array[0];
1331 default:
1332 abort ();
1333 }
1334 }
1335
1336 static INLINE void
1337 operand_type_set (union i386_operand_type *x, unsigned int v)
1338 {
1339 switch (ARRAY_SIZE(x->array))
1340 {
1341 case 3:
1342 x->array[2] = v;
1343 case 2:
1344 x->array[1] = v;
1345 case 1:
1346 x->array[0] = v;
1347 break;
1348 default:
1349 abort ();
1350 }
1351 }
1352
1353 static INLINE int
1354 operand_type_equal (const union i386_operand_type *x,
1355 const union i386_operand_type *y)
1356 {
1357 switch (ARRAY_SIZE(x->array))
1358 {
1359 case 3:
1360 if (x->array[2] != y->array[2])
1361 return 0;
1362 case 2:
1363 if (x->array[1] != y->array[1])
1364 return 0;
1365 case 1:
1366 return x->array[0] == y->array[0];
1367 break;
1368 default:
1369 abort ();
1370 }
1371 }
1372
1373 static INLINE int
1374 cpu_flags_all_zero (const union i386_cpu_flags *x)
1375 {
1376 switch (ARRAY_SIZE(x->array))
1377 {
1378 case 3:
1379 if (x->array[2])
1380 return 0;
1381 case 2:
1382 if (x->array[1])
1383 return 0;
1384 case 1:
1385 return !x->array[0];
1386 default:
1387 abort ();
1388 }
1389 }
1390
1391 static INLINE int
1392 cpu_flags_equal (const union i386_cpu_flags *x,
1393 const union i386_cpu_flags *y)
1394 {
1395 switch (ARRAY_SIZE(x->array))
1396 {
1397 case 3:
1398 if (x->array[2] != y->array[2])
1399 return 0;
1400 case 2:
1401 if (x->array[1] != y->array[1])
1402 return 0;
1403 case 1:
1404 return x->array[0] == y->array[0];
1405 break;
1406 default:
1407 abort ();
1408 }
1409 }
1410
1411 static INLINE int
1412 cpu_flags_check_cpu64 (i386_cpu_flags f)
1413 {
1414 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1415 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1416 }
1417
1418 static INLINE i386_cpu_flags
1419 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1420 {
1421 switch (ARRAY_SIZE (x.array))
1422 {
1423 case 3:
1424 x.array [2] &= y.array [2];
1425 case 2:
1426 x.array [1] &= y.array [1];
1427 case 1:
1428 x.array [0] &= y.array [0];
1429 break;
1430 default:
1431 abort ();
1432 }
1433 return x;
1434 }
1435
1436 static INLINE i386_cpu_flags
1437 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1438 {
1439 switch (ARRAY_SIZE (x.array))
1440 {
1441 case 3:
1442 x.array [2] |= y.array [2];
1443 case 2:
1444 x.array [1] |= y.array [1];
1445 case 1:
1446 x.array [0] |= y.array [0];
1447 break;
1448 default:
1449 abort ();
1450 }
1451 return x;
1452 }
1453
1454 static INLINE i386_cpu_flags
1455 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1456 {
1457 switch (ARRAY_SIZE (x.array))
1458 {
1459 case 3:
1460 x.array [2] &= ~y.array [2];
1461 case 2:
1462 x.array [1] &= ~y.array [1];
1463 case 1:
1464 x.array [0] &= ~y.array [0];
1465 break;
1466 default:
1467 abort ();
1468 }
1469 return x;
1470 }
1471
1472 #define CPU_FLAGS_ARCH_MATCH 0x1
1473 #define CPU_FLAGS_64BIT_MATCH 0x2
1474 #define CPU_FLAGS_AES_MATCH 0x4
1475 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1476 #define CPU_FLAGS_AVX_MATCH 0x10
1477
1478 #define CPU_FLAGS_32BIT_MATCH \
1479 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1480 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1481 #define CPU_FLAGS_PERFECT_MATCH \
1482 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1483
1484 /* Return CPU flags match bits. */
1485
1486 static int
1487 cpu_flags_match (const insn_template *t)
1488 {
1489 i386_cpu_flags x = t->cpu_flags;
1490 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1491
1492 x.bitfield.cpu64 = 0;
1493 x.bitfield.cpuno64 = 0;
1494
1495 if (cpu_flags_all_zero (&x))
1496 {
1497 /* This instruction is available on all archs. */
1498 match |= CPU_FLAGS_32BIT_MATCH;
1499 }
1500 else
1501 {
1502 /* This instruction is available only on some archs. */
1503 i386_cpu_flags cpu = cpu_arch_flags;
1504
1505 cpu.bitfield.cpu64 = 0;
1506 cpu.bitfield.cpuno64 = 0;
1507 cpu = cpu_flags_and (x, cpu);
1508 if (!cpu_flags_all_zero (&cpu))
1509 {
1510 if (x.bitfield.cpuavx)
1511 {
1512 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1513 if (cpu.bitfield.cpuavx)
1514 {
1515 /* Check SSE2AVX. */
1516 if (!t->opcode_modifier.sse2avx|| sse2avx)
1517 {
1518 match |= (CPU_FLAGS_ARCH_MATCH
1519 | CPU_FLAGS_AVX_MATCH);
1520 /* Check AES. */
1521 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1522 match |= CPU_FLAGS_AES_MATCH;
1523 /* Check PCLMUL. */
1524 if (!x.bitfield.cpupclmul
1525 || cpu.bitfield.cpupclmul)
1526 match |= CPU_FLAGS_PCLMUL_MATCH;
1527 }
1528 }
1529 else
1530 match |= CPU_FLAGS_ARCH_MATCH;
1531 }
1532 else
1533 match |= CPU_FLAGS_32BIT_MATCH;
1534 }
1535 }
1536 return match;
1537 }
1538
1539 static INLINE i386_operand_type
1540 operand_type_and (i386_operand_type x, i386_operand_type y)
1541 {
1542 switch (ARRAY_SIZE (x.array))
1543 {
1544 case 3:
1545 x.array [2] &= y.array [2];
1546 case 2:
1547 x.array [1] &= y.array [1];
1548 case 1:
1549 x.array [0] &= y.array [0];
1550 break;
1551 default:
1552 abort ();
1553 }
1554 return x;
1555 }
1556
1557 static INLINE i386_operand_type
1558 operand_type_or (i386_operand_type x, i386_operand_type y)
1559 {
1560 switch (ARRAY_SIZE (x.array))
1561 {
1562 case 3:
1563 x.array [2] |= y.array [2];
1564 case 2:
1565 x.array [1] |= y.array [1];
1566 case 1:
1567 x.array [0] |= y.array [0];
1568 break;
1569 default:
1570 abort ();
1571 }
1572 return x;
1573 }
1574
1575 static INLINE i386_operand_type
1576 operand_type_xor (i386_operand_type x, i386_operand_type y)
1577 {
1578 switch (ARRAY_SIZE (x.array))
1579 {
1580 case 3:
1581 x.array [2] ^= y.array [2];
1582 case 2:
1583 x.array [1] ^= y.array [1];
1584 case 1:
1585 x.array [0] ^= y.array [0];
1586 break;
1587 default:
1588 abort ();
1589 }
1590 return x;
1591 }
1592
1593 static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1594 static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1595 static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1596 static const i386_operand_type inoutportreg
1597 = OPERAND_TYPE_INOUTPORTREG;
1598 static const i386_operand_type reg16_inoutportreg
1599 = OPERAND_TYPE_REG16_INOUTPORTREG;
1600 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1601 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1602 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1603 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1604 static const i386_operand_type anydisp
1605 = OPERAND_TYPE_ANYDISP;
1606 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1607 static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1608 static const i386_operand_type regzmm = OPERAND_TYPE_REGZMM;
1609 static const i386_operand_type regmask = OPERAND_TYPE_REGMASK;
1610 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1611 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1612 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1613 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1614 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1615 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1616 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1617 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1618 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1619 static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
1620
1621 enum operand_type
1622 {
1623 reg,
1624 imm,
1625 disp,
1626 anymem
1627 };
1628
1629 static INLINE int
1630 operand_type_check (i386_operand_type t, enum operand_type c)
1631 {
1632 switch (c)
1633 {
1634 case reg:
1635 return (t.bitfield.reg8
1636 || t.bitfield.reg16
1637 || t.bitfield.reg32
1638 || t.bitfield.reg64);
1639
1640 case imm:
1641 return (t.bitfield.imm8
1642 || t.bitfield.imm8s
1643 || t.bitfield.imm16
1644 || t.bitfield.imm32
1645 || t.bitfield.imm32s
1646 || t.bitfield.imm64);
1647
1648 case disp:
1649 return (t.bitfield.disp8
1650 || t.bitfield.disp16
1651 || t.bitfield.disp32
1652 || t.bitfield.disp32s
1653 || t.bitfield.disp64);
1654
1655 case anymem:
1656 return (t.bitfield.disp8
1657 || t.bitfield.disp16
1658 || t.bitfield.disp32
1659 || t.bitfield.disp32s
1660 || t.bitfield.disp64
1661 || t.bitfield.baseindex);
1662
1663 default:
1664 abort ();
1665 }
1666
1667 return 0;
1668 }
1669
1670 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1671 operand J for instruction template T. */
1672
1673 static INLINE int
1674 match_reg_size (const insn_template *t, unsigned int j)
1675 {
1676 return !((i.types[j].bitfield.byte
1677 && !t->operand_types[j].bitfield.byte)
1678 || (i.types[j].bitfield.word
1679 && !t->operand_types[j].bitfield.word)
1680 || (i.types[j].bitfield.dword
1681 && !t->operand_types[j].bitfield.dword)
1682 || (i.types[j].bitfield.qword
1683 && !t->operand_types[j].bitfield.qword));
1684 }
1685
1686 /* Return 1 if there is no conflict in any size on operand J for
1687 instruction template T. */
1688
1689 static INLINE int
1690 match_mem_size (const insn_template *t, unsigned int j)
1691 {
1692 return (match_reg_size (t, j)
1693 && !((i.types[j].bitfield.unspecified
1694 && !i.broadcast
1695 && !t->operand_types[j].bitfield.unspecified)
1696 || (i.types[j].bitfield.fword
1697 && !t->operand_types[j].bitfield.fword)
1698 || (i.types[j].bitfield.tbyte
1699 && !t->operand_types[j].bitfield.tbyte)
1700 || (i.types[j].bitfield.xmmword
1701 && !t->operand_types[j].bitfield.xmmword)
1702 || (i.types[j].bitfield.ymmword
1703 && !t->operand_types[j].bitfield.ymmword)
1704 || (i.types[j].bitfield.zmmword
1705 && !t->operand_types[j].bitfield.zmmword)));
1706 }
1707
1708 /* Return 1 if there is no size conflict on any operands for
1709 instruction template T. */
1710
1711 static INLINE int
1712 operand_size_match (const insn_template *t)
1713 {
1714 unsigned int j;
1715 int match = 1;
1716
1717 /* Don't check jump instructions. */
1718 if (t->opcode_modifier.jump
1719 || t->opcode_modifier.jumpbyte
1720 || t->opcode_modifier.jumpdword
1721 || t->opcode_modifier.jumpintersegment)
1722 return match;
1723
1724 /* Check memory and accumulator operand size. */
1725 for (j = 0; j < i.operands; j++)
1726 {
1727 if (t->operand_types[j].bitfield.anysize)
1728 continue;
1729
1730 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1731 {
1732 match = 0;
1733 break;
1734 }
1735
1736 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1737 {
1738 match = 0;
1739 break;
1740 }
1741 }
1742
1743 if (match)
1744 return match;
1745 else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
1746 {
1747 mismatch:
1748 i.error = operand_size_mismatch;
1749 return 0;
1750 }
1751
1752 /* Check reverse. */
1753 gas_assert (i.operands == 2);
1754
1755 match = 1;
1756 for (j = 0; j < 2; j++)
1757 {
1758 if (t->operand_types[j].bitfield.acc
1759 && !match_reg_size (t, j ? 0 : 1))
1760 goto mismatch;
1761
1762 if (i.types[j].bitfield.mem
1763 && !match_mem_size (t, j ? 0 : 1))
1764 goto mismatch;
1765 }
1766
1767 return match;
1768 }
1769
1770 static INLINE int
1771 operand_type_match (i386_operand_type overlap,
1772 i386_operand_type given)
1773 {
1774 i386_operand_type temp = overlap;
1775
1776 temp.bitfield.jumpabsolute = 0;
1777 temp.bitfield.unspecified = 0;
1778 temp.bitfield.byte = 0;
1779 temp.bitfield.word = 0;
1780 temp.bitfield.dword = 0;
1781 temp.bitfield.fword = 0;
1782 temp.bitfield.qword = 0;
1783 temp.bitfield.tbyte = 0;
1784 temp.bitfield.xmmword = 0;
1785 temp.bitfield.ymmword = 0;
1786 temp.bitfield.zmmword = 0;
1787 if (operand_type_all_zero (&temp))
1788 goto mismatch;
1789
1790 if (given.bitfield.baseindex == overlap.bitfield.baseindex
1791 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
1792 return 1;
1793
1794 mismatch:
1795 i.error = operand_type_mismatch;
1796 return 0;
1797 }
1798
1799 /* If given types g0 and g1 are registers they must be of the same type
1800 unless the expected operand type register overlap is null.
1801 Note that Acc in a template matches every size of reg. */
1802
1803 static INLINE int
1804 operand_type_register_match (i386_operand_type m0,
1805 i386_operand_type g0,
1806 i386_operand_type t0,
1807 i386_operand_type m1,
1808 i386_operand_type g1,
1809 i386_operand_type t1)
1810 {
1811 if (!operand_type_check (g0, reg))
1812 return 1;
1813
1814 if (!operand_type_check (g1, reg))
1815 return 1;
1816
1817 if (g0.bitfield.reg8 == g1.bitfield.reg8
1818 && g0.bitfield.reg16 == g1.bitfield.reg16
1819 && g0.bitfield.reg32 == g1.bitfield.reg32
1820 && g0.bitfield.reg64 == g1.bitfield.reg64)
1821 return 1;
1822
1823 if (m0.bitfield.acc)
1824 {
1825 t0.bitfield.reg8 = 1;
1826 t0.bitfield.reg16 = 1;
1827 t0.bitfield.reg32 = 1;
1828 t0.bitfield.reg64 = 1;
1829 }
1830
1831 if (m1.bitfield.acc)
1832 {
1833 t1.bitfield.reg8 = 1;
1834 t1.bitfield.reg16 = 1;
1835 t1.bitfield.reg32 = 1;
1836 t1.bitfield.reg64 = 1;
1837 }
1838
1839 if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1840 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1841 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1842 && !(t0.bitfield.reg64 & t1.bitfield.reg64))
1843 return 1;
1844
1845 i.error = register_type_mismatch;
1846
1847 return 0;
1848 }
1849
1850 static INLINE unsigned int
1851 register_number (const reg_entry *r)
1852 {
1853 unsigned int nr = r->reg_num;
1854
1855 if (r->reg_flags & RegRex)
1856 nr += 8;
1857
1858 return nr;
1859 }
1860
1861 static INLINE unsigned int
1862 mode_from_disp_size (i386_operand_type t)
1863 {
1864 if (t.bitfield.disp8 || t.bitfield.vec_disp8)
1865 return 1;
1866 else if (t.bitfield.disp16
1867 || t.bitfield.disp32
1868 || t.bitfield.disp32s)
1869 return 2;
1870 else
1871 return 0;
1872 }
1873
1874 static INLINE int
1875 fits_in_signed_byte (addressT num)
1876 {
1877 return num + 0x80 <= 0xff;
1878 }
1879
1880 static INLINE int
1881 fits_in_unsigned_byte (addressT num)
1882 {
1883 return num <= 0xff;
1884 }
1885
1886 static INLINE int
1887 fits_in_unsigned_word (addressT num)
1888 {
1889 return num <= 0xffff;
1890 }
1891
1892 static INLINE int
1893 fits_in_signed_word (addressT num)
1894 {
1895 return num + 0x8000 <= 0xffff;
1896 }
1897
1898 static INLINE int
1899 fits_in_signed_long (addressT num ATTRIBUTE_UNUSED)
1900 {
1901 #ifndef BFD64
1902 return 1;
1903 #else
1904 return num + 0x80000000 <= 0xffffffff;
1905 #endif
1906 } /* fits_in_signed_long() */
1907
1908 static INLINE int
1909 fits_in_unsigned_long (addressT num ATTRIBUTE_UNUSED)
1910 {
1911 #ifndef BFD64
1912 return 1;
1913 #else
1914 return num <= 0xffffffff;
1915 #endif
1916 } /* fits_in_unsigned_long() */
1917
1918 static INLINE int
1919 fits_in_vec_disp8 (offsetT num)
1920 {
1921 int shift = i.memshift;
1922 unsigned int mask;
1923
1924 if (shift == -1)
1925 abort ();
1926
1927 mask = (1 << shift) - 1;
1928
1929 /* Return 0 if NUM isn't properly aligned. */
1930 if ((num & mask))
1931 return 0;
1932
1933 /* Check if NUM will fit in 8bit after shift. */
1934 return fits_in_signed_byte (num >> shift);
1935 }
1936
1937 static INLINE int
1938 fits_in_imm4 (offsetT num)
1939 {
1940 return (num & 0xf) == num;
1941 }
1942
1943 static i386_operand_type
1944 smallest_imm_type (offsetT num)
1945 {
1946 i386_operand_type t;
1947
1948 operand_type_set (&t, 0);
1949 t.bitfield.imm64 = 1;
1950
1951 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
1952 {
1953 /* This code is disabled on the 486 because all the Imm1 forms
1954 in the opcode table are slower on the i486. They're the
1955 versions with the implicitly specified single-position
1956 displacement, which has another syntax if you really want to
1957 use that form. */
1958 t.bitfield.imm1 = 1;
1959 t.bitfield.imm8 = 1;
1960 t.bitfield.imm8s = 1;
1961 t.bitfield.imm16 = 1;
1962 t.bitfield.imm32 = 1;
1963 t.bitfield.imm32s = 1;
1964 }
1965 else if (fits_in_signed_byte (num))
1966 {
1967 t.bitfield.imm8 = 1;
1968 t.bitfield.imm8s = 1;
1969 t.bitfield.imm16 = 1;
1970 t.bitfield.imm32 = 1;
1971 t.bitfield.imm32s = 1;
1972 }
1973 else if (fits_in_unsigned_byte (num))
1974 {
1975 t.bitfield.imm8 = 1;
1976 t.bitfield.imm16 = 1;
1977 t.bitfield.imm32 = 1;
1978 t.bitfield.imm32s = 1;
1979 }
1980 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
1981 {
1982 t.bitfield.imm16 = 1;
1983 t.bitfield.imm32 = 1;
1984 t.bitfield.imm32s = 1;
1985 }
1986 else if (fits_in_signed_long (num))
1987 {
1988 t.bitfield.imm32 = 1;
1989 t.bitfield.imm32s = 1;
1990 }
1991 else if (fits_in_unsigned_long (num))
1992 t.bitfield.imm32 = 1;
1993
1994 return t;
1995 }
1996
1997 static offsetT
1998 offset_in_range (offsetT val, int size)
1999 {
2000 addressT mask;
2001
2002 switch (size)
2003 {
2004 case 1: mask = ((addressT) 1 << 8) - 1; break;
2005 case 2: mask = ((addressT) 1 << 16) - 1; break;
2006 case 4: mask = ((addressT) 2 << 31) - 1; break;
2007 #ifdef BFD64
2008 case 8: mask = ((addressT) 2 << 63) - 1; break;
2009 #endif
2010 default: abort ();
2011 }
2012
2013 #ifdef BFD64
2014 /* If BFD64, sign extend val for 32bit address mode. */
2015 if (flag_code != CODE_64BIT
2016 || i.prefix[ADDR_PREFIX])
2017 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
2018 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
2019 #endif
2020
2021 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
2022 {
2023 char buf1[40], buf2[40];
2024
2025 sprint_value (buf1, val);
2026 sprint_value (buf2, val & mask);
2027 as_warn (_("%s shortened to %s"), buf1, buf2);
2028 }
2029 return val & mask;
2030 }
2031
2032 enum PREFIX_GROUP
2033 {
2034 PREFIX_EXIST = 0,
2035 PREFIX_LOCK,
2036 PREFIX_REP,
2037 PREFIX_OTHER
2038 };
2039
2040 /* Returns
2041 a. PREFIX_EXIST if attempting to add a prefix where one from the
2042 same class already exists.
2043 b. PREFIX_LOCK if lock prefix is added.
2044 c. PREFIX_REP if rep/repne prefix is added.
2045 d. PREFIX_OTHER if other prefix is added.
2046 */
2047
2048 static enum PREFIX_GROUP
2049 add_prefix (unsigned int prefix)
2050 {
2051 enum PREFIX_GROUP ret = PREFIX_OTHER;
2052 unsigned int q;
2053
2054 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
2055 && flag_code == CODE_64BIT)
2056 {
2057 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
2058 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
2059 && (prefix & (REX_R | REX_X | REX_B))))
2060 ret = PREFIX_EXIST;
2061 q = REX_PREFIX;
2062 }
2063 else
2064 {
2065 switch (prefix)
2066 {
2067 default:
2068 abort ();
2069
2070 case CS_PREFIX_OPCODE:
2071 case DS_PREFIX_OPCODE:
2072 case ES_PREFIX_OPCODE:
2073 case FS_PREFIX_OPCODE:
2074 case GS_PREFIX_OPCODE:
2075 case SS_PREFIX_OPCODE:
2076 q = SEG_PREFIX;
2077 break;
2078
2079 case REPNE_PREFIX_OPCODE:
2080 case REPE_PREFIX_OPCODE:
2081 q = REP_PREFIX;
2082 ret = PREFIX_REP;
2083 break;
2084
2085 case LOCK_PREFIX_OPCODE:
2086 q = LOCK_PREFIX;
2087 ret = PREFIX_LOCK;
2088 break;
2089
2090 case FWAIT_OPCODE:
2091 q = WAIT_PREFIX;
2092 break;
2093
2094 case ADDR_PREFIX_OPCODE:
2095 q = ADDR_PREFIX;
2096 break;
2097
2098 case DATA_PREFIX_OPCODE:
2099 q = DATA_PREFIX;
2100 break;
2101 }
2102 if (i.prefix[q] != 0)
2103 ret = PREFIX_EXIST;
2104 }
2105
2106 if (ret)
2107 {
2108 if (!i.prefix[q])
2109 ++i.prefixes;
2110 i.prefix[q] |= prefix;
2111 }
2112 else
2113 as_bad (_("same type of prefix used twice"));
2114
2115 return ret;
2116 }
2117
2118 static void
2119 update_code_flag (int value, int check)
2120 {
2121 PRINTF_LIKE ((*as_error));
2122
2123 flag_code = (enum flag_code) value;
2124 if (flag_code == CODE_64BIT)
2125 {
2126 cpu_arch_flags.bitfield.cpu64 = 1;
2127 cpu_arch_flags.bitfield.cpuno64 = 0;
2128 }
2129 else
2130 {
2131 cpu_arch_flags.bitfield.cpu64 = 0;
2132 cpu_arch_flags.bitfield.cpuno64 = 1;
2133 }
2134 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
2135 {
2136 if (check)
2137 as_error = as_fatal;
2138 else
2139 as_error = as_bad;
2140 (*as_error) (_("64bit mode not supported on `%s'."),
2141 cpu_arch_name ? cpu_arch_name : default_arch);
2142 }
2143 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
2144 {
2145 if (check)
2146 as_error = as_fatal;
2147 else
2148 as_error = as_bad;
2149 (*as_error) (_("32bit mode not supported on `%s'."),
2150 cpu_arch_name ? cpu_arch_name : default_arch);
2151 }
2152 stackop_size = '\0';
2153 }
2154
2155 static void
2156 set_code_flag (int value)
2157 {
2158 update_code_flag (value, 0);
2159 }
2160
2161 static void
2162 set_16bit_gcc_code_flag (int new_code_flag)
2163 {
2164 flag_code = (enum flag_code) new_code_flag;
2165 if (flag_code != CODE_16BIT)
2166 abort ();
2167 cpu_arch_flags.bitfield.cpu64 = 0;
2168 cpu_arch_flags.bitfield.cpuno64 = 1;
2169 stackop_size = LONG_MNEM_SUFFIX;
2170 }
2171
2172 static void
2173 set_intel_syntax (int syntax_flag)
2174 {
2175 /* Find out if register prefixing is specified. */
2176 int ask_naked_reg = 0;
2177
2178 SKIP_WHITESPACE ();
2179 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2180 {
2181 char *string = input_line_pointer;
2182 int e = get_symbol_end ();
2183
2184 if (strcmp (string, "prefix") == 0)
2185 ask_naked_reg = 1;
2186 else if (strcmp (string, "noprefix") == 0)
2187 ask_naked_reg = -1;
2188 else
2189 as_bad (_("bad argument to syntax directive."));
2190 *input_line_pointer = e;
2191 }
2192 demand_empty_rest_of_line ();
2193
2194 intel_syntax = syntax_flag;
2195
2196 if (ask_naked_reg == 0)
2197 allow_naked_reg = (intel_syntax
2198 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2199 else
2200 allow_naked_reg = (ask_naked_reg < 0);
2201
2202 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2203
2204 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2205 identifier_chars['$'] = intel_syntax ? '$' : 0;
2206 register_prefix = allow_naked_reg ? "" : "%";
2207 }
2208
2209 static void
2210 set_intel_mnemonic (int mnemonic_flag)
2211 {
2212 intel_mnemonic = mnemonic_flag;
2213 }
2214
2215 static void
2216 set_allow_index_reg (int flag)
2217 {
2218 allow_index_reg = flag;
2219 }
2220
2221 static void
2222 set_check (int what)
2223 {
2224 enum check_kind *kind;
2225 const char *str;
2226
2227 if (what)
2228 {
2229 kind = &operand_check;
2230 str = "operand";
2231 }
2232 else
2233 {
2234 kind = &sse_check;
2235 str = "sse";
2236 }
2237
2238 SKIP_WHITESPACE ();
2239
2240 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2241 {
2242 char *string = input_line_pointer;
2243 int e = get_symbol_end ();
2244
2245 if (strcmp (string, "none") == 0)
2246 *kind = check_none;
2247 else if (strcmp (string, "warning") == 0)
2248 *kind = check_warning;
2249 else if (strcmp (string, "error") == 0)
2250 *kind = check_error;
2251 else
2252 as_bad (_("bad argument to %s_check directive."), str);
2253 *input_line_pointer = e;
2254 }
2255 else
2256 as_bad (_("missing argument for %s_check directive"), str);
2257
2258 demand_empty_rest_of_line ();
2259 }
2260
2261 static void
2262 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2263 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2264 {
2265 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2266 static const char *arch;
2267
2268 /* Intel LIOM is only supported on ELF. */
2269 if (!IS_ELF)
2270 return;
2271
2272 if (!arch)
2273 {
2274 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2275 use default_arch. */
2276 arch = cpu_arch_name;
2277 if (!arch)
2278 arch = default_arch;
2279 }
2280
2281 /* If we are targeting Intel L1OM, we must enable it. */
2282 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2283 || new_flag.bitfield.cpul1om)
2284 return;
2285
2286 /* If we are targeting Intel K1OM, we must enable it. */
2287 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_K1OM
2288 || new_flag.bitfield.cpuk1om)
2289 return;
2290
2291 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2292 #endif
2293 }
2294
2295 static void
2296 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2297 {
2298 SKIP_WHITESPACE ();
2299
2300 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2301 {
2302 char *string = input_line_pointer;
2303 int e = get_symbol_end ();
2304 unsigned int j;
2305 i386_cpu_flags flags;
2306
2307 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2308 {
2309 if (strcmp (string, cpu_arch[j].name) == 0)
2310 {
2311 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2312
2313 if (*string != '.')
2314 {
2315 cpu_arch_name = cpu_arch[j].name;
2316 cpu_sub_arch_name = NULL;
2317 cpu_arch_flags = cpu_arch[j].flags;
2318 if (flag_code == CODE_64BIT)
2319 {
2320 cpu_arch_flags.bitfield.cpu64 = 1;
2321 cpu_arch_flags.bitfield.cpuno64 = 0;
2322 }
2323 else
2324 {
2325 cpu_arch_flags.bitfield.cpu64 = 0;
2326 cpu_arch_flags.bitfield.cpuno64 = 1;
2327 }
2328 cpu_arch_isa = cpu_arch[j].type;
2329 cpu_arch_isa_flags = cpu_arch[j].flags;
2330 if (!cpu_arch_tune_set)
2331 {
2332 cpu_arch_tune = cpu_arch_isa;
2333 cpu_arch_tune_flags = cpu_arch_isa_flags;
2334 }
2335 break;
2336 }
2337
2338 if (!cpu_arch[j].negated)
2339 flags = cpu_flags_or (cpu_arch_flags,
2340 cpu_arch[j].flags);
2341 else
2342 flags = cpu_flags_and_not (cpu_arch_flags,
2343 cpu_arch[j].flags);
2344 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2345 {
2346 if (cpu_sub_arch_name)
2347 {
2348 char *name = cpu_sub_arch_name;
2349 cpu_sub_arch_name = concat (name,
2350 cpu_arch[j].name,
2351 (const char *) NULL);
2352 free (name);
2353 }
2354 else
2355 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2356 cpu_arch_flags = flags;
2357 cpu_arch_isa_flags = flags;
2358 }
2359 *input_line_pointer = e;
2360 demand_empty_rest_of_line ();
2361 return;
2362 }
2363 }
2364 if (j >= ARRAY_SIZE (cpu_arch))
2365 as_bad (_("no such architecture: `%s'"), string);
2366
2367 *input_line_pointer = e;
2368 }
2369 else
2370 as_bad (_("missing cpu architecture"));
2371
2372 no_cond_jump_promotion = 0;
2373 if (*input_line_pointer == ','
2374 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2375 {
2376 char *string = ++input_line_pointer;
2377 int e = get_symbol_end ();
2378
2379 if (strcmp (string, "nojumps") == 0)
2380 no_cond_jump_promotion = 1;
2381 else if (strcmp (string, "jumps") == 0)
2382 ;
2383 else
2384 as_bad (_("no such architecture modifier: `%s'"), string);
2385
2386 *input_line_pointer = e;
2387 }
2388
2389 demand_empty_rest_of_line ();
2390 }
2391
2392 enum bfd_architecture
2393 i386_arch (void)
2394 {
2395 if (cpu_arch_isa == PROCESSOR_L1OM)
2396 {
2397 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2398 || flag_code != CODE_64BIT)
2399 as_fatal (_("Intel L1OM is 64bit ELF only"));
2400 return bfd_arch_l1om;
2401 }
2402 else if (cpu_arch_isa == PROCESSOR_K1OM)
2403 {
2404 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2405 || flag_code != CODE_64BIT)
2406 as_fatal (_("Intel K1OM is 64bit ELF only"));
2407 return bfd_arch_k1om;
2408 }
2409 else
2410 return bfd_arch_i386;
2411 }
2412
2413 unsigned long
2414 i386_mach (void)
2415 {
2416 if (!strncmp (default_arch, "x86_64", 6))
2417 {
2418 if (cpu_arch_isa == PROCESSOR_L1OM)
2419 {
2420 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2421 || default_arch[6] != '\0')
2422 as_fatal (_("Intel L1OM is 64bit ELF only"));
2423 return bfd_mach_l1om;
2424 }
2425 else if (cpu_arch_isa == PROCESSOR_K1OM)
2426 {
2427 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2428 || default_arch[6] != '\0')
2429 as_fatal (_("Intel K1OM is 64bit ELF only"));
2430 return bfd_mach_k1om;
2431 }
2432 else if (default_arch[6] == '\0')
2433 return bfd_mach_x86_64;
2434 else
2435 return bfd_mach_x64_32;
2436 }
2437 else if (!strcmp (default_arch, "i386"))
2438 return bfd_mach_i386_i386;
2439 else
2440 as_fatal (_("unknown architecture"));
2441 }
2442 \f
2443 void
2444 md_begin (void)
2445 {
2446 const char *hash_err;
2447
2448 /* Initialize op_hash hash table. */
2449 op_hash = hash_new ();
2450
2451 {
2452 const insn_template *optab;
2453 templates *core_optab;
2454
2455 /* Setup for loop. */
2456 optab = i386_optab;
2457 core_optab = (templates *) xmalloc (sizeof (templates));
2458 core_optab->start = optab;
2459
2460 while (1)
2461 {
2462 ++optab;
2463 if (optab->name == NULL
2464 || strcmp (optab->name, (optab - 1)->name) != 0)
2465 {
2466 /* different name --> ship out current template list;
2467 add to hash table; & begin anew. */
2468 core_optab->end = optab;
2469 hash_err = hash_insert (op_hash,
2470 (optab - 1)->name,
2471 (void *) core_optab);
2472 if (hash_err)
2473 {
2474 as_fatal (_("can't hash %s: %s"),
2475 (optab - 1)->name,
2476 hash_err);
2477 }
2478 if (optab->name == NULL)
2479 break;
2480 core_optab = (templates *) xmalloc (sizeof (templates));
2481 core_optab->start = optab;
2482 }
2483 }
2484 }
2485
2486 /* Initialize reg_hash hash table. */
2487 reg_hash = hash_new ();
2488 {
2489 const reg_entry *regtab;
2490 unsigned int regtab_size = i386_regtab_size;
2491
2492 for (regtab = i386_regtab; regtab_size--; regtab++)
2493 {
2494 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2495 if (hash_err)
2496 as_fatal (_("can't hash %s: %s"),
2497 regtab->reg_name,
2498 hash_err);
2499 }
2500 }
2501
2502 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2503 {
2504 int c;
2505 char *p;
2506
2507 for (c = 0; c < 256; c++)
2508 {
2509 if (ISDIGIT (c))
2510 {
2511 digit_chars[c] = c;
2512 mnemonic_chars[c] = c;
2513 register_chars[c] = c;
2514 operand_chars[c] = c;
2515 }
2516 else if (ISLOWER (c))
2517 {
2518 mnemonic_chars[c] = c;
2519 register_chars[c] = c;
2520 operand_chars[c] = c;
2521 }
2522 else if (ISUPPER (c))
2523 {
2524 mnemonic_chars[c] = TOLOWER (c);
2525 register_chars[c] = mnemonic_chars[c];
2526 operand_chars[c] = c;
2527 }
2528 else if (c == '{' || c == '}')
2529 operand_chars[c] = c;
2530
2531 if (ISALPHA (c) || ISDIGIT (c))
2532 identifier_chars[c] = c;
2533 else if (c >= 128)
2534 {
2535 identifier_chars[c] = c;
2536 operand_chars[c] = c;
2537 }
2538 }
2539
2540 #ifdef LEX_AT
2541 identifier_chars['@'] = '@';
2542 #endif
2543 #ifdef LEX_QM
2544 identifier_chars['?'] = '?';
2545 operand_chars['?'] = '?';
2546 #endif
2547 digit_chars['-'] = '-';
2548 mnemonic_chars['_'] = '_';
2549 mnemonic_chars['-'] = '-';
2550 mnemonic_chars['.'] = '.';
2551 identifier_chars['_'] = '_';
2552 identifier_chars['.'] = '.';
2553
2554 for (p = operand_special_chars; *p != '\0'; p++)
2555 operand_chars[(unsigned char) *p] = *p;
2556 }
2557
2558 if (flag_code == CODE_64BIT)
2559 {
2560 #if defined (OBJ_COFF) && defined (TE_PE)
2561 x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
2562 ? 32 : 16);
2563 #else
2564 x86_dwarf2_return_column = 16;
2565 #endif
2566 x86_cie_data_alignment = -8;
2567 }
2568 else
2569 {
2570 x86_dwarf2_return_column = 8;
2571 x86_cie_data_alignment = -4;
2572 }
2573 }
2574
2575 void
2576 i386_print_statistics (FILE *file)
2577 {
2578 hash_print_statistics (file, "i386 opcode", op_hash);
2579 hash_print_statistics (file, "i386 register", reg_hash);
2580 }
2581 \f
2582 #ifdef DEBUG386
2583
2584 /* Debugging routines for md_assemble. */
2585 static void pte (insn_template *);
2586 static void pt (i386_operand_type);
2587 static void pe (expressionS *);
2588 static void ps (symbolS *);
2589
2590 static void
2591 pi (char *line, i386_insn *x)
2592 {
2593 unsigned int j;
2594
2595 fprintf (stdout, "%s: template ", line);
2596 pte (&x->tm);
2597 fprintf (stdout, " address: base %s index %s scale %x\n",
2598 x->base_reg ? x->base_reg->reg_name : "none",
2599 x->index_reg ? x->index_reg->reg_name : "none",
2600 x->log2_scale_factor);
2601 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2602 x->rm.mode, x->rm.reg, x->rm.regmem);
2603 fprintf (stdout, " sib: base %x index %x scale %x\n",
2604 x->sib.base, x->sib.index, x->sib.scale);
2605 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2606 (x->rex & REX_W) != 0,
2607 (x->rex & REX_R) != 0,
2608 (x->rex & REX_X) != 0,
2609 (x->rex & REX_B) != 0);
2610 for (j = 0; j < x->operands; j++)
2611 {
2612 fprintf (stdout, " #%d: ", j + 1);
2613 pt (x->types[j]);
2614 fprintf (stdout, "\n");
2615 if (x->types[j].bitfield.reg8
2616 || x->types[j].bitfield.reg16
2617 || x->types[j].bitfield.reg32
2618 || x->types[j].bitfield.reg64
2619 || x->types[j].bitfield.regmmx
2620 || x->types[j].bitfield.regxmm
2621 || x->types[j].bitfield.regymm
2622 || x->types[j].bitfield.regzmm
2623 || x->types[j].bitfield.sreg2
2624 || x->types[j].bitfield.sreg3
2625 || x->types[j].bitfield.control
2626 || x->types[j].bitfield.debug
2627 || x->types[j].bitfield.test)
2628 fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
2629 if (operand_type_check (x->types[j], imm))
2630 pe (x->op[j].imms);
2631 if (operand_type_check (x->types[j], disp))
2632 pe (x->op[j].disps);
2633 }
2634 }
2635
2636 static void
2637 pte (insn_template *t)
2638 {
2639 unsigned int j;
2640 fprintf (stdout, " %d operands ", t->operands);
2641 fprintf (stdout, "opcode %x ", t->base_opcode);
2642 if (t->extension_opcode != None)
2643 fprintf (stdout, "ext %x ", t->extension_opcode);
2644 if (t->opcode_modifier.d)
2645 fprintf (stdout, "D");
2646 if (t->opcode_modifier.w)
2647 fprintf (stdout, "W");
2648 fprintf (stdout, "\n");
2649 for (j = 0; j < t->operands; j++)
2650 {
2651 fprintf (stdout, " #%d type ", j + 1);
2652 pt (t->operand_types[j]);
2653 fprintf (stdout, "\n");
2654 }
2655 }
2656
2657 static void
2658 pe (expressionS *e)
2659 {
2660 fprintf (stdout, " operation %d\n", e->X_op);
2661 fprintf (stdout, " add_number %ld (%lx)\n",
2662 (long) e->X_add_number, (long) e->X_add_number);
2663 if (e->X_add_symbol)
2664 {
2665 fprintf (stdout, " add_symbol ");
2666 ps (e->X_add_symbol);
2667 fprintf (stdout, "\n");
2668 }
2669 if (e->X_op_symbol)
2670 {
2671 fprintf (stdout, " op_symbol ");
2672 ps (e->X_op_symbol);
2673 fprintf (stdout, "\n");
2674 }
2675 }
2676
2677 static void
2678 ps (symbolS *s)
2679 {
2680 fprintf (stdout, "%s type %s%s",
2681 S_GET_NAME (s),
2682 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2683 segment_name (S_GET_SEGMENT (s)));
2684 }
2685
2686 static struct type_name
2687 {
2688 i386_operand_type mask;
2689 const char *name;
2690 }
2691 const type_names[] =
2692 {
2693 { OPERAND_TYPE_REG8, "r8" },
2694 { OPERAND_TYPE_REG16, "r16" },
2695 { OPERAND_TYPE_REG32, "r32" },
2696 { OPERAND_TYPE_REG64, "r64" },
2697 { OPERAND_TYPE_IMM8, "i8" },
2698 { OPERAND_TYPE_IMM8, "i8s" },
2699 { OPERAND_TYPE_IMM16, "i16" },
2700 { OPERAND_TYPE_IMM32, "i32" },
2701 { OPERAND_TYPE_IMM32S, "i32s" },
2702 { OPERAND_TYPE_IMM64, "i64" },
2703 { OPERAND_TYPE_IMM1, "i1" },
2704 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2705 { OPERAND_TYPE_DISP8, "d8" },
2706 { OPERAND_TYPE_DISP16, "d16" },
2707 { OPERAND_TYPE_DISP32, "d32" },
2708 { OPERAND_TYPE_DISP32S, "d32s" },
2709 { OPERAND_TYPE_DISP64, "d64" },
2710 { OPERAND_TYPE_VEC_DISP8, "Vector d8" },
2711 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2712 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2713 { OPERAND_TYPE_CONTROL, "control reg" },
2714 { OPERAND_TYPE_TEST, "test reg" },
2715 { OPERAND_TYPE_DEBUG, "debug reg" },
2716 { OPERAND_TYPE_FLOATREG, "FReg" },
2717 { OPERAND_TYPE_FLOATACC, "FAcc" },
2718 { OPERAND_TYPE_SREG2, "SReg2" },
2719 { OPERAND_TYPE_SREG3, "SReg3" },
2720 { OPERAND_TYPE_ACC, "Acc" },
2721 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2722 { OPERAND_TYPE_REGMMX, "rMMX" },
2723 { OPERAND_TYPE_REGXMM, "rXMM" },
2724 { OPERAND_TYPE_REGYMM, "rYMM" },
2725 { OPERAND_TYPE_REGZMM, "rZMM" },
2726 { OPERAND_TYPE_REGMASK, "Mask reg" },
2727 { OPERAND_TYPE_ESSEG, "es" },
2728 };
2729
2730 static void
2731 pt (i386_operand_type t)
2732 {
2733 unsigned int j;
2734 i386_operand_type a;
2735
2736 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2737 {
2738 a = operand_type_and (t, type_names[j].mask);
2739 if (!operand_type_all_zero (&a))
2740 fprintf (stdout, "%s, ", type_names[j].name);
2741 }
2742 fflush (stdout);
2743 }
2744
2745 #endif /* DEBUG386 */
2746 \f
2747 static bfd_reloc_code_real_type
2748 reloc (unsigned int size,
2749 int pcrel,
2750 int sign,
2751 bfd_reloc_code_real_type other)
2752 {
2753 if (other != NO_RELOC)
2754 {
2755 reloc_howto_type *rel;
2756
2757 if (size == 8)
2758 switch (other)
2759 {
2760 case BFD_RELOC_X86_64_GOT32:
2761 return BFD_RELOC_X86_64_GOT64;
2762 break;
2763 case BFD_RELOC_X86_64_GOTPLT64:
2764 return BFD_RELOC_X86_64_GOTPLT64;
2765 break;
2766 case BFD_RELOC_X86_64_PLTOFF64:
2767 return BFD_RELOC_X86_64_PLTOFF64;
2768 break;
2769 case BFD_RELOC_X86_64_GOTPC32:
2770 other = BFD_RELOC_X86_64_GOTPC64;
2771 break;
2772 case BFD_RELOC_X86_64_GOTPCREL:
2773 other = BFD_RELOC_X86_64_GOTPCREL64;
2774 break;
2775 case BFD_RELOC_X86_64_TPOFF32:
2776 other = BFD_RELOC_X86_64_TPOFF64;
2777 break;
2778 case BFD_RELOC_X86_64_DTPOFF32:
2779 other = BFD_RELOC_X86_64_DTPOFF64;
2780 break;
2781 default:
2782 break;
2783 }
2784
2785 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2786 if (other == BFD_RELOC_SIZE32)
2787 {
2788 if (size == 8)
2789 other = BFD_RELOC_SIZE64;
2790 if (pcrel)
2791 {
2792 as_bad (_("there are no pc-relative size relocations"));
2793 return NO_RELOC;
2794 }
2795 }
2796 #endif
2797
2798 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2799 if (size == 4 && (flag_code != CODE_64BIT || disallow_64bit_reloc))
2800 sign = -1;
2801
2802 rel = bfd_reloc_type_lookup (stdoutput, other);
2803 if (!rel)
2804 as_bad (_("unknown relocation (%u)"), other);
2805 else if (size != bfd_get_reloc_size (rel))
2806 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2807 bfd_get_reloc_size (rel),
2808 size);
2809 else if (pcrel && !rel->pc_relative)
2810 as_bad (_("non-pc-relative relocation for pc-relative field"));
2811 else if ((rel->complain_on_overflow == complain_overflow_signed
2812 && !sign)
2813 || (rel->complain_on_overflow == complain_overflow_unsigned
2814 && sign > 0))
2815 as_bad (_("relocated field and relocation type differ in signedness"));
2816 else
2817 return other;
2818 return NO_RELOC;
2819 }
2820
2821 if (pcrel)
2822 {
2823 if (!sign)
2824 as_bad (_("there are no unsigned pc-relative relocations"));
2825 switch (size)
2826 {
2827 case 1: return BFD_RELOC_8_PCREL;
2828 case 2: return BFD_RELOC_16_PCREL;
2829 case 4: return BFD_RELOC_32_PCREL;
2830 case 8: return BFD_RELOC_64_PCREL;
2831 }
2832 as_bad (_("cannot do %u byte pc-relative relocation"), size);
2833 }
2834 else
2835 {
2836 if (sign > 0)
2837 switch (size)
2838 {
2839 case 4: return BFD_RELOC_X86_64_32S;
2840 }
2841 else
2842 switch (size)
2843 {
2844 case 1: return BFD_RELOC_8;
2845 case 2: return BFD_RELOC_16;
2846 case 4: return BFD_RELOC_32;
2847 case 8: return BFD_RELOC_64;
2848 }
2849 as_bad (_("cannot do %s %u byte relocation"),
2850 sign > 0 ? "signed" : "unsigned", size);
2851 }
2852
2853 return NO_RELOC;
2854 }
2855
2856 /* Here we decide which fixups can be adjusted to make them relative to
2857 the beginning of the section instead of the symbol. Basically we need
2858 to make sure that the dynamic relocations are done correctly, so in
2859 some cases we force the original symbol to be used. */
2860
2861 int
2862 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2863 {
2864 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2865 if (!IS_ELF)
2866 return 1;
2867
2868 /* Don't adjust pc-relative references to merge sections in 64-bit
2869 mode. */
2870 if (use_rela_relocations
2871 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
2872 && fixP->fx_pcrel)
2873 return 0;
2874
2875 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2876 and changed later by validate_fix. */
2877 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
2878 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
2879 return 0;
2880
2881 /* Adjust_reloc_syms doesn't know about the GOT. Need to keep symbol
2882 for size relocations. */
2883 if (fixP->fx_r_type == BFD_RELOC_SIZE32
2884 || fixP->fx_r_type == BFD_RELOC_SIZE64
2885 || fixP->fx_r_type == BFD_RELOC_386_GOTOFF
2886 || fixP->fx_r_type == BFD_RELOC_386_PLT32
2887 || fixP->fx_r_type == BFD_RELOC_386_GOT32
2888 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
2889 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
2890 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
2891 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
2892 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
2893 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
2894 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
2895 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
2896 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
2897 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
2898 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
2899 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
2900 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
2901 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
2902 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
2903 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
2904 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
2905 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
2906 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
2907 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
2908 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
2909 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
2910 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
2911 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
2912 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
2913 return 0;
2914 #endif
2915 return 1;
2916 }
2917
2918 static int
2919 intel_float_operand (const char *mnemonic)
2920 {
2921 /* Note that the value returned is meaningful only for opcodes with (memory)
2922 operands, hence the code here is free to improperly handle opcodes that
2923 have no operands (for better performance and smaller code). */
2924
2925 if (mnemonic[0] != 'f')
2926 return 0; /* non-math */
2927
2928 switch (mnemonic[1])
2929 {
2930 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2931 the fs segment override prefix not currently handled because no
2932 call path can make opcodes without operands get here */
2933 case 'i':
2934 return 2 /* integer op */;
2935 case 'l':
2936 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
2937 return 3; /* fldcw/fldenv */
2938 break;
2939 case 'n':
2940 if (mnemonic[2] != 'o' /* fnop */)
2941 return 3; /* non-waiting control op */
2942 break;
2943 case 'r':
2944 if (mnemonic[2] == 's')
2945 return 3; /* frstor/frstpm */
2946 break;
2947 case 's':
2948 if (mnemonic[2] == 'a')
2949 return 3; /* fsave */
2950 if (mnemonic[2] == 't')
2951 {
2952 switch (mnemonic[3])
2953 {
2954 case 'c': /* fstcw */
2955 case 'd': /* fstdw */
2956 case 'e': /* fstenv */
2957 case 's': /* fsts[gw] */
2958 return 3;
2959 }
2960 }
2961 break;
2962 case 'x':
2963 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
2964 return 0; /* fxsave/fxrstor are not really math ops */
2965 break;
2966 }
2967
2968 return 1;
2969 }
2970
2971 /* Build the VEX prefix. */
2972
2973 static void
2974 build_vex_prefix (const insn_template *t)
2975 {
2976 unsigned int register_specifier;
2977 unsigned int implied_prefix;
2978 unsigned int vector_length;
2979
2980 /* Check register specifier. */
2981 if (i.vex.register_specifier)
2982 {
2983 register_specifier =
2984 ~register_number (i.vex.register_specifier) & 0xf;
2985 gas_assert ((i.vex.register_specifier->reg_flags & RegVRex) == 0);
2986 }
2987 else
2988 register_specifier = 0xf;
2989
2990 /* Use 2-byte VEX prefix by swappping destination and source
2991 operand. */
2992 if (!i.swap_operand
2993 && i.operands == i.reg_operands
2994 && i.tm.opcode_modifier.vexopcode == VEX0F
2995 && i.tm.opcode_modifier.s
2996 && i.rex == REX_B)
2997 {
2998 unsigned int xchg = i.operands - 1;
2999 union i386_op temp_op;
3000 i386_operand_type temp_type;
3001
3002 temp_type = i.types[xchg];
3003 i.types[xchg] = i.types[0];
3004 i.types[0] = temp_type;
3005 temp_op = i.op[xchg];
3006 i.op[xchg] = i.op[0];
3007 i.op[0] = temp_op;
3008
3009 gas_assert (i.rm.mode == 3);
3010
3011 i.rex = REX_R;
3012 xchg = i.rm.regmem;
3013 i.rm.regmem = i.rm.reg;
3014 i.rm.reg = xchg;
3015
3016 /* Use the next insn. */
3017 i.tm = t[1];
3018 }
3019
3020 if (i.tm.opcode_modifier.vex == VEXScalar)
3021 vector_length = avxscalar;
3022 else
3023 vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
3024
3025 switch ((i.tm.base_opcode >> 8) & 0xff)
3026 {
3027 case 0:
3028 implied_prefix = 0;
3029 break;
3030 case DATA_PREFIX_OPCODE:
3031 implied_prefix = 1;
3032 break;
3033 case REPE_PREFIX_OPCODE:
3034 implied_prefix = 2;
3035 break;
3036 case REPNE_PREFIX_OPCODE:
3037 implied_prefix = 3;
3038 break;
3039 default:
3040 abort ();
3041 }
3042
3043 /* Use 2-byte VEX prefix if possible. */
3044 if (i.tm.opcode_modifier.vexopcode == VEX0F
3045 && i.tm.opcode_modifier.vexw != VEXW1
3046 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
3047 {
3048 /* 2-byte VEX prefix. */
3049 unsigned int r;
3050
3051 i.vex.length = 2;
3052 i.vex.bytes[0] = 0xc5;
3053
3054 /* Check the REX.R bit. */
3055 r = (i.rex & REX_R) ? 0 : 1;
3056 i.vex.bytes[1] = (r << 7
3057 | register_specifier << 3
3058 | vector_length << 2
3059 | implied_prefix);
3060 }
3061 else
3062 {
3063 /* 3-byte VEX prefix. */
3064 unsigned int m, w;
3065
3066 i.vex.length = 3;
3067
3068 switch (i.tm.opcode_modifier.vexopcode)
3069 {
3070 case VEX0F:
3071 m = 0x1;
3072 i.vex.bytes[0] = 0xc4;
3073 break;
3074 case VEX0F38:
3075 m = 0x2;
3076 i.vex.bytes[0] = 0xc4;
3077 break;
3078 case VEX0F3A:
3079 m = 0x3;
3080 i.vex.bytes[0] = 0xc4;
3081 break;
3082 case XOP08:
3083 m = 0x8;
3084 i.vex.bytes[0] = 0x8f;
3085 break;
3086 case XOP09:
3087 m = 0x9;
3088 i.vex.bytes[0] = 0x8f;
3089 break;
3090 case XOP0A:
3091 m = 0xa;
3092 i.vex.bytes[0] = 0x8f;
3093 break;
3094 default:
3095 abort ();
3096 }
3097
3098 /* The high 3 bits of the second VEX byte are 1's compliment
3099 of RXB bits from REX. */
3100 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
3101
3102 /* Check the REX.W bit. */
3103 w = (i.rex & REX_W) ? 1 : 0;
3104 if (i.tm.opcode_modifier.vexw == VEXW1)
3105 w = 1;
3106
3107 i.vex.bytes[2] = (w << 7
3108 | register_specifier << 3
3109 | vector_length << 2
3110 | implied_prefix);
3111 }
3112 }
3113
3114 /* Build the EVEX prefix. */
3115
3116 static void
3117 build_evex_prefix (void)
3118 {
3119 unsigned int register_specifier;
3120 unsigned int implied_prefix;
3121 unsigned int m, w;
3122 rex_byte vrex_used = 0;
3123
3124 /* Check register specifier. */
3125 if (i.vex.register_specifier)
3126 {
3127 gas_assert ((i.vrex & REX_X) == 0);
3128
3129 register_specifier = i.vex.register_specifier->reg_num;
3130 if ((i.vex.register_specifier->reg_flags & RegRex))
3131 register_specifier += 8;
3132 /* The upper 16 registers are encoded in the fourth byte of the
3133 EVEX prefix. */
3134 if (!(i.vex.register_specifier->reg_flags & RegVRex))
3135 i.vex.bytes[3] = 0x8;
3136 register_specifier = ~register_specifier & 0xf;
3137 }
3138 else
3139 {
3140 register_specifier = 0xf;
3141
3142 /* Encode upper 16 vector index register in the fourth byte of
3143 the EVEX prefix. */
3144 if (!(i.vrex & REX_X))
3145 i.vex.bytes[3] = 0x8;
3146 else
3147 vrex_used |= REX_X;
3148 }
3149
3150 switch ((i.tm.base_opcode >> 8) & 0xff)
3151 {
3152 case 0:
3153 implied_prefix = 0;
3154 break;
3155 case DATA_PREFIX_OPCODE:
3156 implied_prefix = 1;
3157 break;
3158 case REPE_PREFIX_OPCODE:
3159 implied_prefix = 2;
3160 break;
3161 case REPNE_PREFIX_OPCODE:
3162 implied_prefix = 3;
3163 break;
3164 default:
3165 abort ();
3166 }
3167
3168 /* 4 byte EVEX prefix. */
3169 i.vex.length = 4;
3170 i.vex.bytes[0] = 0x62;
3171
3172 /* mmmm bits. */
3173 switch (i.tm.opcode_modifier.vexopcode)
3174 {
3175 case VEX0F:
3176 m = 1;
3177 break;
3178 case VEX0F38:
3179 m = 2;
3180 break;
3181 case VEX0F3A:
3182 m = 3;
3183 break;
3184 default:
3185 abort ();
3186 break;
3187 }
3188
3189 /* The high 3 bits of the second EVEX byte are 1's compliment of RXB
3190 bits from REX. */
3191 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
3192
3193 /* The fifth bit of the second EVEX byte is 1's compliment of the
3194 REX_R bit in VREX. */
3195 if (!(i.vrex & REX_R))
3196 i.vex.bytes[1] |= 0x10;
3197 else
3198 vrex_used |= REX_R;
3199
3200 if ((i.reg_operands + i.imm_operands) == i.operands)
3201 {
3202 /* When all operands are registers, the REX_X bit in REX is not
3203 used. We reuse it to encode the upper 16 registers, which is
3204 indicated by the REX_B bit in VREX. The REX_X bit is encoded
3205 as 1's compliment. */
3206 if ((i.vrex & REX_B))
3207 {
3208 vrex_used |= REX_B;
3209 i.vex.bytes[1] &= ~0x40;
3210 }
3211 }
3212
3213 /* EVEX instructions shouldn't need the REX prefix. */
3214 i.vrex &= ~vrex_used;
3215 gas_assert (i.vrex == 0);
3216
3217 /* Check the REX.W bit. */
3218 w = (i.rex & REX_W) ? 1 : 0;
3219 if (i.tm.opcode_modifier.vexw)
3220 {
3221 if (i.tm.opcode_modifier.vexw == VEXW1)
3222 w = 1;
3223 }
3224 /* If w is not set it means we are dealing with WIG instruction. */
3225 else if (!w)
3226 {
3227 if (evexwig == evexw1)
3228 w = 1;
3229 }
3230
3231 /* Encode the U bit. */
3232 implied_prefix |= 0x4;
3233
3234 /* The third byte of the EVEX prefix. */
3235 i.vex.bytes[2] = (w << 7 | register_specifier << 3 | implied_prefix);
3236
3237 /* The fourth byte of the EVEX prefix. */
3238 /* The zeroing-masking bit. */
3239 if (i.mask && i.mask->zeroing)
3240 i.vex.bytes[3] |= 0x80;
3241
3242 /* Don't always set the broadcast bit if there is no RC. */
3243 if (!i.rounding)
3244 {
3245 /* Encode the vector length. */
3246 unsigned int vec_length;
3247
3248 switch (i.tm.opcode_modifier.evex)
3249 {
3250 case EVEXLIG: /* LL' is ignored */
3251 vec_length = evexlig << 5;
3252 break;
3253 case EVEX128:
3254 vec_length = 0 << 5;
3255 break;
3256 case EVEX256:
3257 vec_length = 1 << 5;
3258 break;
3259 case EVEX512:
3260 vec_length = 2 << 5;
3261 break;
3262 default:
3263 abort ();
3264 break;
3265 }
3266 i.vex.bytes[3] |= vec_length;
3267 /* Encode the broadcast bit. */
3268 if (i.broadcast)
3269 i.vex.bytes[3] |= 0x10;
3270 }
3271 else
3272 {
3273 if (i.rounding->type != saeonly)
3274 i.vex.bytes[3] |= 0x10 | (i.rounding->type << 5);
3275 else
3276 i.vex.bytes[3] |= 0x10 | (evexrcig << 5);
3277 }
3278
3279 if (i.mask && i.mask->mask)
3280 i.vex.bytes[3] |= i.mask->mask->reg_num;
3281 }
3282
3283 static void
3284 process_immext (void)
3285 {
3286 expressionS *exp;
3287
3288 if ((i.tm.cpu_flags.bitfield.cpusse3 || i.tm.cpu_flags.bitfield.cpusvme)
3289 && i.operands > 0)
3290 {
3291 /* MONITOR/MWAIT as well as SVME instructions have fixed operands
3292 with an opcode suffix which is coded in the same place as an
3293 8-bit immediate field would be.
3294 Here we check those operands and remove them afterwards. */
3295 unsigned int x;
3296
3297 for (x = 0; x < i.operands; x++)
3298 if (register_number (i.op[x].regs) != x)
3299 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
3300 register_prefix, i.op[x].regs->reg_name, x + 1,
3301 i.tm.name);
3302
3303 i.operands = 0;
3304 }
3305
3306 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
3307 which is coded in the same place as an 8-bit immediate field
3308 would be. Here we fake an 8-bit immediate operand from the
3309 opcode suffix stored in tm.extension_opcode.
3310
3311 AVX instructions also use this encoding, for some of
3312 3 argument instructions. */
3313
3314 gas_assert (i.imm_operands <= 1
3315 && (i.operands <= 2
3316 || ((i.tm.opcode_modifier.vex
3317 || i.tm.opcode_modifier.evex)
3318 && i.operands <= 4)));
3319
3320 exp = &im_expressions[i.imm_operands++];
3321 i.op[i.operands].imms = exp;
3322 i.types[i.operands] = imm8;
3323 i.operands++;
3324 exp->X_op = O_constant;
3325 exp->X_add_number = i.tm.extension_opcode;
3326 i.tm.extension_opcode = None;
3327 }
3328
3329
3330 static int
3331 check_hle (void)
3332 {
3333 switch (i.tm.opcode_modifier.hleprefixok)
3334 {
3335 default:
3336 abort ();
3337 case HLEPrefixNone:
3338 as_bad (_("invalid instruction `%s' after `%s'"),
3339 i.tm.name, i.hle_prefix);
3340 return 0;
3341 case HLEPrefixLock:
3342 if (i.prefix[LOCK_PREFIX])
3343 return 1;
3344 as_bad (_("missing `lock' with `%s'"), i.hle_prefix);
3345 return 0;
3346 case HLEPrefixAny:
3347 return 1;
3348 case HLEPrefixRelease:
3349 if (i.prefix[HLE_PREFIX] != XRELEASE_PREFIX_OPCODE)
3350 {
3351 as_bad (_("instruction `%s' after `xacquire' not allowed"),
3352 i.tm.name);
3353 return 0;
3354 }
3355 if (i.mem_operands == 0
3356 || !operand_type_check (i.types[i.operands - 1], anymem))
3357 {
3358 as_bad (_("memory destination needed for instruction `%s'"
3359 " after `xrelease'"), i.tm.name);
3360 return 0;
3361 }
3362 return 1;
3363 }
3364 }
3365
3366 /* This is the guts of the machine-dependent assembler. LINE points to a
3367 machine dependent instruction. This function is supposed to emit
3368 the frags/bytes it assembles to. */
3369
3370 void
3371 md_assemble (char *line)
3372 {
3373 unsigned int j;
3374 char mnemonic[MAX_MNEM_SIZE];
3375 const insn_template *t;
3376
3377 /* Initialize globals. */
3378 memset (&i, '\0', sizeof (i));
3379 for (j = 0; j < MAX_OPERANDS; j++)
3380 i.reloc[j] = NO_RELOC;
3381 memset (disp_expressions, '\0', sizeof (disp_expressions));
3382 memset (im_expressions, '\0', sizeof (im_expressions));
3383 save_stack_p = save_stack;
3384
3385 /* First parse an instruction mnemonic & call i386_operand for the operands.
3386 We assume that the scrubber has arranged it so that line[0] is the valid
3387 start of a (possibly prefixed) mnemonic. */
3388
3389 line = parse_insn (line, mnemonic);
3390 if (line == NULL)
3391 return;
3392
3393 line = parse_operands (line, mnemonic);
3394 this_operand = -1;
3395 if (line == NULL)
3396 return;
3397
3398 /* Now we've parsed the mnemonic into a set of templates, and have the
3399 operands at hand. */
3400
3401 /* All intel opcodes have reversed operands except for "bound" and
3402 "enter". We also don't reverse intersegment "jmp" and "call"
3403 instructions with 2 immediate operands so that the immediate segment
3404 precedes the offset, as it does when in AT&T mode. */
3405 if (intel_syntax
3406 && i.operands > 1
3407 && (strcmp (mnemonic, "bound") != 0)
3408 && (strcmp (mnemonic, "invlpga") != 0)
3409 && !(operand_type_check (i.types[0], imm)
3410 && operand_type_check (i.types[1], imm)))
3411 swap_operands ();
3412
3413 /* The order of the immediates should be reversed
3414 for 2 immediates extrq and insertq instructions */
3415 if (i.imm_operands == 2
3416 && (strcmp (mnemonic, "extrq") == 0
3417 || strcmp (mnemonic, "insertq") == 0))
3418 swap_2_operands (0, 1);
3419
3420 if (i.imm_operands)
3421 optimize_imm ();
3422
3423 /* Don't optimize displacement for movabs since it only takes 64bit
3424 displacement. */
3425 if (i.disp_operands
3426 && i.disp_encoding != disp_encoding_32bit
3427 && (flag_code != CODE_64BIT
3428 || strcmp (mnemonic, "movabs") != 0))
3429 optimize_disp ();
3430
3431 /* Next, we find a template that matches the given insn,
3432 making sure the overlap of the given operands types is consistent
3433 with the template operand types. */
3434
3435 if (!(t = match_template ()))
3436 return;
3437
3438 if (sse_check != check_none
3439 && !i.tm.opcode_modifier.noavx
3440 && (i.tm.cpu_flags.bitfield.cpusse
3441 || i.tm.cpu_flags.bitfield.cpusse2
3442 || i.tm.cpu_flags.bitfield.cpusse3
3443 || i.tm.cpu_flags.bitfield.cpussse3
3444 || i.tm.cpu_flags.bitfield.cpusse4_1
3445 || i.tm.cpu_flags.bitfield.cpusse4_2))
3446 {
3447 (sse_check == check_warning
3448 ? as_warn
3449 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
3450 }
3451
3452 /* Zap movzx and movsx suffix. The suffix has been set from
3453 "word ptr" or "byte ptr" on the source operand in Intel syntax
3454 or extracted from mnemonic in AT&T syntax. But we'll use
3455 the destination register to choose the suffix for encoding. */
3456 if ((i.tm.base_opcode & ~9) == 0x0fb6)
3457 {
3458 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3459 there is no suffix, the default will be byte extension. */
3460 if (i.reg_operands != 2
3461 && !i.suffix
3462 && intel_syntax)
3463 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
3464
3465 i.suffix = 0;
3466 }
3467
3468 if (i.tm.opcode_modifier.fwait)
3469 if (!add_prefix (FWAIT_OPCODE))
3470 return;
3471
3472 /* Check if REP prefix is OK. */
3473 if (i.rep_prefix && !i.tm.opcode_modifier.repprefixok)
3474 {
3475 as_bad (_("invalid instruction `%s' after `%s'"),
3476 i.tm.name, i.rep_prefix);
3477 return;
3478 }
3479
3480 /* Check for lock without a lockable instruction. Destination operand
3481 must be memory unless it is xchg (0x86). */
3482 if (i.prefix[LOCK_PREFIX]
3483 && (!i.tm.opcode_modifier.islockable
3484 || i.mem_operands == 0
3485 || (i.tm.base_opcode != 0x86
3486 && !operand_type_check (i.types[i.operands - 1], anymem))))
3487 {
3488 as_bad (_("expecting lockable instruction after `lock'"));
3489 return;
3490 }
3491
3492 /* Check if HLE prefix is OK. */
3493 if (i.hle_prefix && !check_hle ())
3494 return;
3495
3496 /* Check BND prefix. */
3497 if (i.bnd_prefix && !i.tm.opcode_modifier.bndprefixok)
3498 as_bad (_("expecting valid branch instruction after `bnd'"));
3499
3500 if (i.tm.cpu_flags.bitfield.cpumpx
3501 && flag_code == CODE_64BIT
3502 && i.prefix[ADDR_PREFIX])
3503 as_bad (_("32-bit address isn't allowed in 64-bit MPX instructions."));
3504
3505 /* Insert BND prefix. */
3506 if (add_bnd_prefix
3507 && i.tm.opcode_modifier.bndprefixok
3508 && !i.prefix[BND_PREFIX])
3509 add_prefix (BND_PREFIX_OPCODE);
3510
3511 /* Check string instruction segment overrides. */
3512 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
3513 {
3514 if (!check_string ())
3515 return;
3516 i.disp_operands = 0;
3517 }
3518
3519 if (!process_suffix ())
3520 return;
3521
3522 /* Update operand types. */
3523 for (j = 0; j < i.operands; j++)
3524 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
3525
3526 /* Make still unresolved immediate matches conform to size of immediate
3527 given in i.suffix. */
3528 if (!finalize_imm ())
3529 return;
3530
3531 if (i.types[0].bitfield.imm1)
3532 i.imm_operands = 0; /* kludge for shift insns. */
3533
3534 /* We only need to check those implicit registers for instructions
3535 with 3 operands or less. */
3536 if (i.operands <= 3)
3537 for (j = 0; j < i.operands; j++)
3538 if (i.types[j].bitfield.inoutportreg
3539 || i.types[j].bitfield.shiftcount
3540 || i.types[j].bitfield.acc
3541 || i.types[j].bitfield.floatacc)
3542 i.reg_operands--;
3543
3544 /* ImmExt should be processed after SSE2AVX. */
3545 if (!i.tm.opcode_modifier.sse2avx
3546 && i.tm.opcode_modifier.immext)
3547 process_immext ();
3548
3549 /* For insns with operands there are more diddles to do to the opcode. */
3550 if (i.operands)
3551 {
3552 if (!process_operands ())
3553 return;
3554 }
3555 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
3556 {
3557 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3558 as_warn (_("translating to `%sp'"), i.tm.name);
3559 }
3560
3561 if (i.tm.opcode_modifier.vex || i.tm.opcode_modifier.evex)
3562 {
3563 if (flag_code == CODE_16BIT)
3564 {
3565 as_bad (_("instruction `%s' isn't supported in 16-bit mode."),
3566 i.tm.name);
3567 return;
3568 }
3569
3570 if (i.tm.opcode_modifier.vex)
3571 build_vex_prefix (t);
3572 else
3573 build_evex_prefix ();
3574 }
3575
3576 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3577 instructions may define INT_OPCODE as well, so avoid this corner
3578 case for those instructions that use MODRM. */
3579 if (i.tm.base_opcode == INT_OPCODE
3580 && !i.tm.opcode_modifier.modrm
3581 && i.op[0].imms->X_add_number == 3)
3582 {
3583 i.tm.base_opcode = INT3_OPCODE;
3584 i.imm_operands = 0;
3585 }
3586
3587 if ((i.tm.opcode_modifier.jump
3588 || i.tm.opcode_modifier.jumpbyte
3589 || i.tm.opcode_modifier.jumpdword)
3590 && i.op[0].disps->X_op == O_constant)
3591 {
3592 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3593 the absolute address given by the constant. Since ix86 jumps and
3594 calls are pc relative, we need to generate a reloc. */
3595 i.op[0].disps->X_add_symbol = &abs_symbol;
3596 i.op[0].disps->X_op = O_symbol;
3597 }
3598
3599 if (i.tm.opcode_modifier.rex64)
3600 i.rex |= REX_W;
3601
3602 /* For 8 bit registers we need an empty rex prefix. Also if the
3603 instruction already has a prefix, we need to convert old
3604 registers to new ones. */
3605
3606 if ((i.types[0].bitfield.reg8
3607 && (i.op[0].regs->reg_flags & RegRex64) != 0)
3608 || (i.types[1].bitfield.reg8
3609 && (i.op[1].regs->reg_flags & RegRex64) != 0)
3610 || ((i.types[0].bitfield.reg8
3611 || i.types[1].bitfield.reg8)
3612 && i.rex != 0))
3613 {
3614 int x;
3615
3616 i.rex |= REX_OPCODE;
3617 for (x = 0; x < 2; x++)
3618 {
3619 /* Look for 8 bit operand that uses old registers. */
3620 if (i.types[x].bitfield.reg8
3621 && (i.op[x].regs->reg_flags & RegRex64) == 0)
3622 {
3623 /* In case it is "hi" register, give up. */
3624 if (i.op[x].regs->reg_num > 3)
3625 as_bad (_("can't encode register '%s%s' in an "
3626 "instruction requiring REX prefix."),
3627 register_prefix, i.op[x].regs->reg_name);
3628
3629 /* Otherwise it is equivalent to the extended register.
3630 Since the encoding doesn't change this is merely
3631 cosmetic cleanup for debug output. */
3632
3633 i.op[x].regs = i.op[x].regs + 8;
3634 }
3635 }
3636 }
3637
3638 if (i.rex != 0)
3639 add_prefix (REX_OPCODE | i.rex);
3640
3641 /* We are ready to output the insn. */
3642 output_insn ();
3643 }
3644
3645 static char *
3646 parse_insn (char *line, char *mnemonic)
3647 {
3648 char *l = line;
3649 char *token_start = l;
3650 char *mnem_p;
3651 int supported;
3652 const insn_template *t;
3653 char *dot_p = NULL;
3654
3655 while (1)
3656 {
3657 mnem_p = mnemonic;
3658 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3659 {
3660 if (*mnem_p == '.')
3661 dot_p = mnem_p;
3662 mnem_p++;
3663 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3664 {
3665 as_bad (_("no such instruction: `%s'"), token_start);
3666 return NULL;
3667 }
3668 l++;
3669 }
3670 if (!is_space_char (*l)
3671 && *l != END_OF_INSN
3672 && (intel_syntax
3673 || (*l != PREFIX_SEPARATOR
3674 && *l != ',')))
3675 {
3676 as_bad (_("invalid character %s in mnemonic"),
3677 output_invalid (*l));
3678 return NULL;
3679 }
3680 if (token_start == l)
3681 {
3682 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3683 as_bad (_("expecting prefix; got nothing"));
3684 else
3685 as_bad (_("expecting mnemonic; got nothing"));
3686 return NULL;
3687 }
3688
3689 /* Look up instruction (or prefix) via hash table. */
3690 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3691
3692 if (*l != END_OF_INSN
3693 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3694 && current_templates
3695 && current_templates->start->opcode_modifier.isprefix)
3696 {
3697 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3698 {
3699 as_bad ((flag_code != CODE_64BIT
3700 ? _("`%s' is only supported in 64-bit mode")
3701 : _("`%s' is not supported in 64-bit mode")),
3702 current_templates->start->name);
3703 return NULL;
3704 }
3705 /* If we are in 16-bit mode, do not allow addr16 or data16.
3706 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3707 if ((current_templates->start->opcode_modifier.size16
3708 || current_templates->start->opcode_modifier.size32)
3709 && flag_code != CODE_64BIT
3710 && (current_templates->start->opcode_modifier.size32
3711 ^ (flag_code == CODE_16BIT)))
3712 {
3713 as_bad (_("redundant %s prefix"),
3714 current_templates->start->name);
3715 return NULL;
3716 }
3717 /* Add prefix, checking for repeated prefixes. */
3718 switch (add_prefix (current_templates->start->base_opcode))
3719 {
3720 case PREFIX_EXIST:
3721 return NULL;
3722 case PREFIX_REP:
3723 if (current_templates->start->cpu_flags.bitfield.cpuhle)
3724 i.hle_prefix = current_templates->start->name;
3725 else if (current_templates->start->cpu_flags.bitfield.cpumpx)
3726 i.bnd_prefix = current_templates->start->name;
3727 else
3728 i.rep_prefix = current_templates->start->name;
3729 break;
3730 default:
3731 break;
3732 }
3733 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3734 token_start = ++l;
3735 }
3736 else
3737 break;
3738 }
3739
3740 if (!current_templates)
3741 {
3742 /* Check if we should swap operand or force 32bit displacement in
3743 encoding. */
3744 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3745 i.swap_operand = 1;
3746 else if (mnem_p - 3 == dot_p
3747 && dot_p[1] == 'd'
3748 && dot_p[2] == '8')
3749 i.disp_encoding = disp_encoding_8bit;
3750 else if (mnem_p - 4 == dot_p
3751 && dot_p[1] == 'd'
3752 && dot_p[2] == '3'
3753 && dot_p[3] == '2')
3754 i.disp_encoding = disp_encoding_32bit;
3755 else
3756 goto check_suffix;
3757 mnem_p = dot_p;
3758 *dot_p = '\0';
3759 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3760 }
3761
3762 if (!current_templates)
3763 {
3764 check_suffix:
3765 /* See if we can get a match by trimming off a suffix. */
3766 switch (mnem_p[-1])
3767 {
3768 case WORD_MNEM_SUFFIX:
3769 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3770 i.suffix = SHORT_MNEM_SUFFIX;
3771 else
3772 case BYTE_MNEM_SUFFIX:
3773 case QWORD_MNEM_SUFFIX:
3774 i.suffix = mnem_p[-1];
3775 mnem_p[-1] = '\0';
3776 current_templates = (const templates *) hash_find (op_hash,
3777 mnemonic);
3778 break;
3779 case SHORT_MNEM_SUFFIX:
3780 case LONG_MNEM_SUFFIX:
3781 if (!intel_syntax)
3782 {
3783 i.suffix = mnem_p[-1];
3784 mnem_p[-1] = '\0';
3785 current_templates = (const templates *) hash_find (op_hash,
3786 mnemonic);
3787 }
3788 break;
3789
3790 /* Intel Syntax. */
3791 case 'd':
3792 if (intel_syntax)
3793 {
3794 if (intel_float_operand (mnemonic) == 1)
3795 i.suffix = SHORT_MNEM_SUFFIX;
3796 else
3797 i.suffix = LONG_MNEM_SUFFIX;
3798 mnem_p[-1] = '\0';
3799 current_templates = (const templates *) hash_find (op_hash,
3800 mnemonic);
3801 }
3802 break;
3803 }
3804 if (!current_templates)
3805 {
3806 as_bad (_("no such instruction: `%s'"), token_start);
3807 return NULL;
3808 }
3809 }
3810
3811 if (current_templates->start->opcode_modifier.jump
3812 || current_templates->start->opcode_modifier.jumpbyte)
3813 {
3814 /* Check for a branch hint. We allow ",pt" and ",pn" for
3815 predict taken and predict not taken respectively.
3816 I'm not sure that branch hints actually do anything on loop
3817 and jcxz insns (JumpByte) for current Pentium4 chips. They
3818 may work in the future and it doesn't hurt to accept them
3819 now. */
3820 if (l[0] == ',' && l[1] == 'p')
3821 {
3822 if (l[2] == 't')
3823 {
3824 if (!add_prefix (DS_PREFIX_OPCODE))
3825 return NULL;
3826 l += 3;
3827 }
3828 else if (l[2] == 'n')
3829 {
3830 if (!add_prefix (CS_PREFIX_OPCODE))
3831 return NULL;
3832 l += 3;
3833 }
3834 }
3835 }
3836 /* Any other comma loses. */
3837 if (*l == ',')
3838 {
3839 as_bad (_("invalid character %s in mnemonic"),
3840 output_invalid (*l));
3841 return NULL;
3842 }
3843
3844 /* Check if instruction is supported on specified architecture. */
3845 supported = 0;
3846 for (t = current_templates->start; t < current_templates->end; ++t)
3847 {
3848 supported |= cpu_flags_match (t);
3849 if (supported == CPU_FLAGS_PERFECT_MATCH)
3850 goto skip;
3851 }
3852
3853 if (!(supported & CPU_FLAGS_64BIT_MATCH))
3854 {
3855 as_bad (flag_code == CODE_64BIT
3856 ? _("`%s' is not supported in 64-bit mode")
3857 : _("`%s' is only supported in 64-bit mode"),
3858 current_templates->start->name);
3859 return NULL;
3860 }
3861 if (supported != CPU_FLAGS_PERFECT_MATCH)
3862 {
3863 as_bad (_("`%s' is not supported on `%s%s'"),
3864 current_templates->start->name,
3865 cpu_arch_name ? cpu_arch_name : default_arch,
3866 cpu_sub_arch_name ? cpu_sub_arch_name : "");
3867 return NULL;
3868 }
3869
3870 skip:
3871 if (!cpu_arch_flags.bitfield.cpui386
3872 && (flag_code != CODE_16BIT))
3873 {
3874 as_warn (_("use .code16 to ensure correct addressing mode"));
3875 }
3876
3877 return l;
3878 }
3879
3880 static char *
3881 parse_operands (char *l, const char *mnemonic)
3882 {
3883 char *token_start;
3884
3885 /* 1 if operand is pending after ','. */
3886 unsigned int expecting_operand = 0;
3887
3888 /* Non-zero if operand parens not balanced. */
3889 unsigned int paren_not_balanced;
3890
3891 while (*l != END_OF_INSN)
3892 {
3893 /* Skip optional white space before operand. */
3894 if (is_space_char (*l))
3895 ++l;
3896 if (!is_operand_char (*l) && *l != END_OF_INSN)
3897 {
3898 as_bad (_("invalid character %s before operand %d"),
3899 output_invalid (*l),
3900 i.operands + 1);
3901 return NULL;
3902 }
3903 token_start = l; /* after white space */
3904 paren_not_balanced = 0;
3905 while (paren_not_balanced || *l != ',')
3906 {
3907 if (*l == END_OF_INSN)
3908 {
3909 if (paren_not_balanced)
3910 {
3911 if (!intel_syntax)
3912 as_bad (_("unbalanced parenthesis in operand %d."),
3913 i.operands + 1);
3914 else
3915 as_bad (_("unbalanced brackets in operand %d."),
3916 i.operands + 1);
3917 return NULL;
3918 }
3919 else
3920 break; /* we are done */
3921 }
3922 else if (!is_operand_char (*l) && !is_space_char (*l))
3923 {
3924 as_bad (_("invalid character %s in operand %d"),
3925 output_invalid (*l),
3926 i.operands + 1);
3927 return NULL;
3928 }
3929 if (!intel_syntax)
3930 {
3931 if (*l == '(')
3932 ++paren_not_balanced;
3933 if (*l == ')')
3934 --paren_not_balanced;
3935 }
3936 else
3937 {
3938 if (*l == '[')
3939 ++paren_not_balanced;
3940 if (*l == ']')
3941 --paren_not_balanced;
3942 }
3943 l++;
3944 }
3945 if (l != token_start)
3946 { /* Yes, we've read in another operand. */
3947 unsigned int operand_ok;
3948 this_operand = i.operands++;
3949 i.types[this_operand].bitfield.unspecified = 1;
3950 if (i.operands > MAX_OPERANDS)
3951 {
3952 as_bad (_("spurious operands; (%d operands/instruction max)"),
3953 MAX_OPERANDS);
3954 return NULL;
3955 }
3956 /* Now parse operand adding info to 'i' as we go along. */
3957 END_STRING_AND_SAVE (l);
3958
3959 if (intel_syntax)
3960 operand_ok =
3961 i386_intel_operand (token_start,
3962 intel_float_operand (mnemonic));
3963 else
3964 operand_ok = i386_att_operand (token_start);
3965
3966 RESTORE_END_STRING (l);
3967 if (!operand_ok)
3968 return NULL;
3969 }
3970 else
3971 {
3972 if (expecting_operand)
3973 {
3974 expecting_operand_after_comma:
3975 as_bad (_("expecting operand after ','; got nothing"));
3976 return NULL;
3977 }
3978 if (*l == ',')
3979 {
3980 as_bad (_("expecting operand before ','; got nothing"));
3981 return NULL;
3982 }
3983 }
3984
3985 /* Now *l must be either ',' or END_OF_INSN. */
3986 if (*l == ',')
3987 {
3988 if (*++l == END_OF_INSN)
3989 {
3990 /* Just skip it, if it's \n complain. */
3991 goto expecting_operand_after_comma;
3992 }
3993 expecting_operand = 1;
3994 }
3995 }
3996 return l;
3997 }
3998
3999 static void
4000 swap_2_operands (int xchg1, int xchg2)
4001 {
4002 union i386_op temp_op;
4003 i386_operand_type temp_type;
4004 enum bfd_reloc_code_real temp_reloc;
4005
4006 temp_type = i.types[xchg2];
4007 i.types[xchg2] = i.types[xchg1];
4008 i.types[xchg1] = temp_type;
4009 temp_op = i.op[xchg2];
4010 i.op[xchg2] = i.op[xchg1];
4011 i.op[xchg1] = temp_op;
4012 temp_reloc = i.reloc[xchg2];
4013 i.reloc[xchg2] = i.reloc[xchg1];
4014 i.reloc[xchg1] = temp_reloc;
4015
4016 if (i.mask)
4017 {
4018 if (i.mask->operand == xchg1)
4019 i.mask->operand = xchg2;
4020 else if (i.mask->operand == xchg2)
4021 i.mask->operand = xchg1;
4022 }
4023 if (i.broadcast)
4024 {
4025 if (i.broadcast->operand == xchg1)
4026 i.broadcast->operand = xchg2;
4027 else if (i.broadcast->operand == xchg2)
4028 i.broadcast->operand = xchg1;
4029 }
4030 if (i.rounding)
4031 {
4032 if (i.rounding->operand == xchg1)
4033 i.rounding->operand = xchg2;
4034 else if (i.rounding->operand == xchg2)
4035 i.rounding->operand = xchg1;
4036 }
4037 }
4038
4039 static void
4040 swap_operands (void)
4041 {
4042 switch (i.operands)
4043 {
4044 case 5:
4045 case 4:
4046 swap_2_operands (1, i.operands - 2);
4047 case 3:
4048 case 2:
4049 swap_2_operands (0, i.operands - 1);
4050 break;
4051 default:
4052 abort ();
4053 }
4054
4055 if (i.mem_operands == 2)
4056 {
4057 const seg_entry *temp_seg;
4058 temp_seg = i.seg[0];
4059 i.seg[0] = i.seg[1];
4060 i.seg[1] = temp_seg;
4061 }
4062 }
4063
4064 /* Try to ensure constant immediates are represented in the smallest
4065 opcode possible. */
4066 static void
4067 optimize_imm (void)
4068 {
4069 char guess_suffix = 0;
4070 int op;
4071
4072 if (i.suffix)
4073 guess_suffix = i.suffix;
4074 else if (i.reg_operands)
4075 {
4076 /* Figure out a suffix from the last register operand specified.
4077 We can't do this properly yet, ie. excluding InOutPortReg,
4078 but the following works for instructions with immediates.
4079 In any case, we can't set i.suffix yet. */
4080 for (op = i.operands; --op >= 0;)
4081 if (i.types[op].bitfield.reg8)
4082 {
4083 guess_suffix = BYTE_MNEM_SUFFIX;
4084 break;
4085 }
4086 else if (i.types[op].bitfield.reg16)
4087 {
4088 guess_suffix = WORD_MNEM_SUFFIX;
4089 break;
4090 }
4091 else if (i.types[op].bitfield.reg32)
4092 {
4093 guess_suffix = LONG_MNEM_SUFFIX;
4094 break;
4095 }
4096 else if (i.types[op].bitfield.reg64)
4097 {
4098 guess_suffix = QWORD_MNEM_SUFFIX;
4099 break;
4100 }
4101 }
4102 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
4103 guess_suffix = WORD_MNEM_SUFFIX;
4104
4105 for (op = i.operands; --op >= 0;)
4106 if (operand_type_check (i.types[op], imm))
4107 {
4108 switch (i.op[op].imms->X_op)
4109 {
4110 case O_constant:
4111 /* If a suffix is given, this operand may be shortened. */
4112 switch (guess_suffix)
4113 {
4114 case LONG_MNEM_SUFFIX:
4115 i.types[op].bitfield.imm32 = 1;
4116 i.types[op].bitfield.imm64 = 1;
4117 break;
4118 case WORD_MNEM_SUFFIX:
4119 i.types[op].bitfield.imm16 = 1;
4120 i.types[op].bitfield.imm32 = 1;
4121 i.types[op].bitfield.imm32s = 1;
4122 i.types[op].bitfield.imm64 = 1;
4123 break;
4124 case BYTE_MNEM_SUFFIX:
4125 i.types[op].bitfield.imm8 = 1;
4126 i.types[op].bitfield.imm8s = 1;
4127 i.types[op].bitfield.imm16 = 1;
4128 i.types[op].bitfield.imm32 = 1;
4129 i.types[op].bitfield.imm32s = 1;
4130 i.types[op].bitfield.imm64 = 1;
4131 break;
4132 }
4133
4134 /* If this operand is at most 16 bits, convert it
4135 to a signed 16 bit number before trying to see
4136 whether it will fit in an even smaller size.
4137 This allows a 16-bit operand such as $0xffe0 to
4138 be recognised as within Imm8S range. */
4139 if ((i.types[op].bitfield.imm16)
4140 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
4141 {
4142 i.op[op].imms->X_add_number =
4143 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
4144 }
4145 if ((i.types[op].bitfield.imm32)
4146 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
4147 == 0))
4148 {
4149 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
4150 ^ ((offsetT) 1 << 31))
4151 - ((offsetT) 1 << 31));
4152 }
4153 i.types[op]
4154 = operand_type_or (i.types[op],
4155 smallest_imm_type (i.op[op].imms->X_add_number));
4156
4157 /* We must avoid matching of Imm32 templates when 64bit
4158 only immediate is available. */
4159 if (guess_suffix == QWORD_MNEM_SUFFIX)
4160 i.types[op].bitfield.imm32 = 0;
4161 break;
4162
4163 case O_absent:
4164 case O_register:
4165 abort ();
4166
4167 /* Symbols and expressions. */
4168 default:
4169 /* Convert symbolic operand to proper sizes for matching, but don't
4170 prevent matching a set of insns that only supports sizes other
4171 than those matching the insn suffix. */
4172 {
4173 i386_operand_type mask, allowed;
4174 const insn_template *t;
4175
4176 operand_type_set (&mask, 0);
4177 operand_type_set (&allowed, 0);
4178
4179 for (t = current_templates->start;
4180 t < current_templates->end;
4181 ++t)
4182 allowed = operand_type_or (allowed,
4183 t->operand_types[op]);
4184 switch (guess_suffix)
4185 {
4186 case QWORD_MNEM_SUFFIX:
4187 mask.bitfield.imm64 = 1;
4188 mask.bitfield.imm32s = 1;
4189 break;
4190 case LONG_MNEM_SUFFIX:
4191 mask.bitfield.imm32 = 1;
4192 break;
4193 case WORD_MNEM_SUFFIX:
4194 mask.bitfield.imm16 = 1;
4195 break;
4196 case BYTE_MNEM_SUFFIX:
4197 mask.bitfield.imm8 = 1;
4198 break;
4199 default:
4200 break;
4201 }
4202 allowed = operand_type_and (mask, allowed);
4203 if (!operand_type_all_zero (&allowed))
4204 i.types[op] = operand_type_and (i.types[op], mask);
4205 }
4206 break;
4207 }
4208 }
4209 }
4210
4211 /* Try to use the smallest displacement type too. */
4212 static void
4213 optimize_disp (void)
4214 {
4215 int op;
4216
4217 for (op = i.operands; --op >= 0;)
4218 if (operand_type_check (i.types[op], disp))
4219 {
4220 if (i.op[op].disps->X_op == O_constant)
4221 {
4222 offsetT op_disp = i.op[op].disps->X_add_number;
4223
4224 if (i.types[op].bitfield.disp16
4225 && (op_disp & ~(offsetT) 0xffff) == 0)
4226 {
4227 /* If this operand is at most 16 bits, convert
4228 to a signed 16 bit number and don't use 64bit
4229 displacement. */
4230 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
4231 i.types[op].bitfield.disp64 = 0;
4232 }
4233 if (i.types[op].bitfield.disp32
4234 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
4235 {
4236 /* If this operand is at most 32 bits, convert
4237 to a signed 32 bit number and don't use 64bit
4238 displacement. */
4239 op_disp &= (((offsetT) 2 << 31) - 1);
4240 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
4241 i.types[op].bitfield.disp64 = 0;
4242 }
4243 if (!op_disp && i.types[op].bitfield.baseindex)
4244 {
4245 i.types[op].bitfield.disp8 = 0;
4246 i.types[op].bitfield.disp16 = 0;
4247 i.types[op].bitfield.disp32 = 0;
4248 i.types[op].bitfield.disp32s = 0;
4249 i.types[op].bitfield.disp64 = 0;
4250 i.op[op].disps = 0;
4251 i.disp_operands--;
4252 }
4253 else if (flag_code == CODE_64BIT)
4254 {
4255 if (fits_in_signed_long (op_disp))
4256 {
4257 i.types[op].bitfield.disp64 = 0;
4258 i.types[op].bitfield.disp32s = 1;
4259 }
4260 if (i.prefix[ADDR_PREFIX]
4261 && fits_in_unsigned_long (op_disp))
4262 i.types[op].bitfield.disp32 = 1;
4263 }
4264 if ((i.types[op].bitfield.disp32
4265 || i.types[op].bitfield.disp32s
4266 || i.types[op].bitfield.disp16)
4267 && fits_in_signed_byte (op_disp))
4268 i.types[op].bitfield.disp8 = 1;
4269 }
4270 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
4271 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
4272 {
4273 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
4274 i.op[op].disps, 0, i.reloc[op]);
4275 i.types[op].bitfield.disp8 = 0;
4276 i.types[op].bitfield.disp16 = 0;
4277 i.types[op].bitfield.disp32 = 0;
4278 i.types[op].bitfield.disp32s = 0;
4279 i.types[op].bitfield.disp64 = 0;
4280 }
4281 else
4282 /* We only support 64bit displacement on constants. */
4283 i.types[op].bitfield.disp64 = 0;
4284 }
4285 }
4286
4287 /* Check if operands are valid for the instruction. */
4288
4289 static int
4290 check_VecOperands (const insn_template *t)
4291 {
4292 unsigned int op;
4293
4294 /* Without VSIB byte, we can't have a vector register for index. */
4295 if (!t->opcode_modifier.vecsib
4296 && i.index_reg
4297 && (i.index_reg->reg_type.bitfield.regxmm
4298 || i.index_reg->reg_type.bitfield.regymm
4299 || i.index_reg->reg_type.bitfield.regzmm))
4300 {
4301 i.error = unsupported_vector_index_register;
4302 return 1;
4303 }
4304
4305 /* Check if default mask is allowed. */
4306 if (t->opcode_modifier.nodefmask
4307 && (!i.mask || i.mask->mask->reg_num == 0))
4308 {
4309 i.error = no_default_mask;
4310 return 1;
4311 }
4312
4313 /* For VSIB byte, we need a vector register for index, and all vector
4314 registers must be distinct. */
4315 if (t->opcode_modifier.vecsib)
4316 {
4317 if (!i.index_reg
4318 || !((t->opcode_modifier.vecsib == VecSIB128
4319 && i.index_reg->reg_type.bitfield.regxmm)
4320 || (t->opcode_modifier.vecsib == VecSIB256
4321 && i.index_reg->reg_type.bitfield.regymm)
4322 || (t->opcode_modifier.vecsib == VecSIB512
4323 && i.index_reg->reg_type.bitfield.regzmm)))
4324 {
4325 i.error = invalid_vsib_address;
4326 return 1;
4327 }
4328
4329 gas_assert (i.reg_operands == 2 || i.mask);
4330 if (i.reg_operands == 2 && !i.mask)
4331 {
4332 gas_assert (i.types[0].bitfield.regxmm
4333 || i.types[0].bitfield.regymm);
4334 gas_assert (i.types[2].bitfield.regxmm
4335 || i.types[2].bitfield.regymm);
4336 if (operand_check == check_none)
4337 return 0;
4338 if (register_number (i.op[0].regs)
4339 != register_number (i.index_reg)
4340 && register_number (i.op[2].regs)
4341 != register_number (i.index_reg)
4342 && register_number (i.op[0].regs)
4343 != register_number (i.op[2].regs))
4344 return 0;
4345 if (operand_check == check_error)
4346 {
4347 i.error = invalid_vector_register_set;
4348 return 1;
4349 }
4350 as_warn (_("mask, index, and destination registers should be distinct"));
4351 }
4352 else if (i.reg_operands == 1 && i.mask)
4353 {
4354 if ((i.types[1].bitfield.regymm
4355 || i.types[1].bitfield.regzmm)
4356 && (register_number (i.op[1].regs)
4357 == register_number (i.index_reg)))
4358 {
4359 if (operand_check == check_error)
4360 {
4361 i.error = invalid_vector_register_set;
4362 return 1;
4363 }
4364 if (operand_check != check_none)
4365 as_warn (_("index and destination registers should be distinct"));
4366 }
4367 }
4368 }
4369
4370 /* Check if broadcast is supported by the instruction and is applied
4371 to the memory operand. */
4372 if (i.broadcast)
4373 {
4374 int broadcasted_opnd_size;
4375
4376 /* Check if specified broadcast is supported in this instruction,
4377 and it's applied to memory operand of DWORD or QWORD type,
4378 depending on VecESize. */
4379 if (i.broadcast->type != t->opcode_modifier.broadcast
4380 || !i.types[i.broadcast->operand].bitfield.mem
4381 || (t->opcode_modifier.vecesize == 0
4382 && !i.types[i.broadcast->operand].bitfield.dword
4383 && !i.types[i.broadcast->operand].bitfield.unspecified)
4384 || (t->opcode_modifier.vecesize == 1
4385 && !i.types[i.broadcast->operand].bitfield.qword
4386 && !i.types[i.broadcast->operand].bitfield.unspecified))
4387 goto bad_broadcast;
4388
4389 broadcasted_opnd_size = t->opcode_modifier.vecesize ? 64 : 32;
4390 if (i.broadcast->type == BROADCAST_1TO16)
4391 broadcasted_opnd_size <<= 4; /* Broadcast 1to16. */
4392 else if (i.broadcast->type == BROADCAST_1TO8)
4393 broadcasted_opnd_size <<= 3; /* Broadcast 1to8. */
4394 else if (i.broadcast->type == BROADCAST_1TO4)
4395 broadcasted_opnd_size <<= 2; /* Broadcast 1to4. */
4396 else if (i.broadcast->type == BROADCAST_1TO2)
4397 broadcasted_opnd_size <<= 1; /* Broadcast 1to2. */
4398 else
4399 goto bad_broadcast;
4400
4401 if ((broadcasted_opnd_size == 256
4402 && !t->operand_types[i.broadcast->operand].bitfield.ymmword)
4403 || (broadcasted_opnd_size == 512
4404 && !t->operand_types[i.broadcast->operand].bitfield.zmmword))
4405 {
4406 bad_broadcast:
4407 i.error = unsupported_broadcast;
4408 return 1;
4409 }
4410 }
4411 /* If broadcast is supported in this instruction, we need to check if
4412 operand of one-element size isn't specified without broadcast. */
4413 else if (t->opcode_modifier.broadcast && i.mem_operands)
4414 {
4415 /* Find memory operand. */
4416 for (op = 0; op < i.operands; op++)
4417 if (operand_type_check (i.types[op], anymem))
4418 break;
4419 gas_assert (op < i.operands);
4420 /* Check size of the memory operand. */
4421 if ((t->opcode_modifier.vecesize == 0
4422 && i.types[op].bitfield.dword)
4423 || (t->opcode_modifier.vecesize == 1
4424 && i.types[op].bitfield.qword))
4425 {
4426 i.error = broadcast_needed;
4427 return 1;
4428 }
4429 }
4430
4431 /* Check if requested masking is supported. */
4432 if (i.mask
4433 && (!t->opcode_modifier.masking
4434 || (i.mask->zeroing
4435 && t->opcode_modifier.masking == MERGING_MASKING)))
4436 {
4437 i.error = unsupported_masking;
4438 return 1;
4439 }
4440
4441 /* Check if masking is applied to dest operand. */
4442 if (i.mask && (i.mask->operand != (int) (i.operands - 1)))
4443 {
4444 i.error = mask_not_on_destination;
4445 return 1;
4446 }
4447
4448 /* Check RC/SAE. */
4449 if (i.rounding)
4450 {
4451 if ((i.rounding->type != saeonly
4452 && !t->opcode_modifier.staticrounding)
4453 || (i.rounding->type == saeonly
4454 && (t->opcode_modifier.staticrounding
4455 || !t->opcode_modifier.sae)))
4456 {
4457 i.error = unsupported_rc_sae;
4458 return 1;
4459 }
4460 /* If the instruction has several immediate operands and one of
4461 them is rounding, the rounding operand should be the last
4462 immediate operand. */
4463 if (i.imm_operands > 1
4464 && i.rounding->operand != (int) (i.imm_operands - 1))
4465 {
4466 i.error = rc_sae_operand_not_last_imm;
4467 return 1;
4468 }
4469 }
4470
4471 /* Check vector Disp8 operand. */
4472 if (t->opcode_modifier.disp8memshift)
4473 {
4474 if (i.broadcast)
4475 i.memshift = t->opcode_modifier.vecesize ? 3 : 2;
4476 else
4477 i.memshift = t->opcode_modifier.disp8memshift;
4478
4479 for (op = 0; op < i.operands; op++)
4480 if (operand_type_check (i.types[op], disp)
4481 && i.op[op].disps->X_op == O_constant)
4482 {
4483 offsetT value = i.op[op].disps->X_add_number;
4484 int vec_disp8_ok = fits_in_vec_disp8 (value);
4485 if (t->operand_types [op].bitfield.vec_disp8)
4486 {
4487 if (vec_disp8_ok)
4488 i.types[op].bitfield.vec_disp8 = 1;
4489 else
4490 {
4491 /* Vector insn can only have Vec_Disp8/Disp32 in
4492 32/64bit modes, and Vec_Disp8/Disp16 in 16bit
4493 mode. */
4494 i.types[op].bitfield.disp8 = 0;
4495 if (flag_code != CODE_16BIT)
4496 i.types[op].bitfield.disp16 = 0;
4497 }
4498 }
4499 else if (flag_code != CODE_16BIT)
4500 {
4501 /* One form of this instruction supports vector Disp8.
4502 Try vector Disp8 if we need to use Disp32. */
4503 if (vec_disp8_ok && !fits_in_signed_byte (value))
4504 {
4505 i.error = try_vector_disp8;
4506 return 1;
4507 }
4508 }
4509 }
4510 }
4511 else
4512 i.memshift = -1;
4513
4514 return 0;
4515 }
4516
4517 /* Check if operands are valid for the instruction. Update VEX
4518 operand types. */
4519
4520 static int
4521 VEX_check_operands (const insn_template *t)
4522 {
4523 /* VREX is only valid with EVEX prefix. */
4524 if (i.need_vrex && !t->opcode_modifier.evex)
4525 {
4526 i.error = invalid_register_operand;
4527 return 1;
4528 }
4529
4530 if (!t->opcode_modifier.vex)
4531 return 0;
4532
4533 /* Only check VEX_Imm4, which must be the first operand. */
4534 if (t->operand_types[0].bitfield.vec_imm4)
4535 {
4536 if (i.op[0].imms->X_op != O_constant
4537 || !fits_in_imm4 (i.op[0].imms->X_add_number))
4538 {
4539 i.error = bad_imm4;
4540 return 1;
4541 }
4542
4543 /* Turn off Imm8 so that update_imm won't complain. */
4544 i.types[0] = vec_imm4;
4545 }
4546
4547 return 0;
4548 }
4549
4550 static const insn_template *
4551 match_template (void)
4552 {
4553 /* Points to template once we've found it. */
4554 const insn_template *t;
4555 i386_operand_type overlap0, overlap1, overlap2, overlap3;
4556 i386_operand_type overlap4;
4557 unsigned int found_reverse_match;
4558 i386_opcode_modifier suffix_check;
4559 i386_operand_type operand_types [MAX_OPERANDS];
4560 int addr_prefix_disp;
4561 unsigned int j;
4562 unsigned int found_cpu_match;
4563 unsigned int check_register;
4564 enum i386_error specific_error = 0;
4565
4566 #if MAX_OPERANDS != 5
4567 # error "MAX_OPERANDS must be 5."
4568 #endif
4569
4570 found_reverse_match = 0;
4571 addr_prefix_disp = -1;
4572
4573 memset (&suffix_check, 0, sizeof (suffix_check));
4574 if (i.suffix == BYTE_MNEM_SUFFIX)
4575 suffix_check.no_bsuf = 1;
4576 else if (i.suffix == WORD_MNEM_SUFFIX)
4577 suffix_check.no_wsuf = 1;
4578 else if (i.suffix == SHORT_MNEM_SUFFIX)
4579 suffix_check.no_ssuf = 1;
4580 else if (i.suffix == LONG_MNEM_SUFFIX)
4581 suffix_check.no_lsuf = 1;
4582 else if (i.suffix == QWORD_MNEM_SUFFIX)
4583 suffix_check.no_qsuf = 1;
4584 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
4585 suffix_check.no_ldsuf = 1;
4586
4587 /* Must have right number of operands. */
4588 i.error = number_of_operands_mismatch;
4589
4590 for (t = current_templates->start; t < current_templates->end; t++)
4591 {
4592 addr_prefix_disp = -1;
4593
4594 if (i.operands != t->operands)
4595 continue;
4596
4597 /* Check processor support. */
4598 i.error = unsupported;
4599 found_cpu_match = (cpu_flags_match (t)
4600 == CPU_FLAGS_PERFECT_MATCH);
4601 if (!found_cpu_match)
4602 continue;
4603
4604 /* Check old gcc support. */
4605 i.error = old_gcc_only;
4606 if (!old_gcc && t->opcode_modifier.oldgcc)
4607 continue;
4608
4609 /* Check AT&T mnemonic. */
4610 i.error = unsupported_with_intel_mnemonic;
4611 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
4612 continue;
4613
4614 /* Check AT&T/Intel syntax. */
4615 i.error = unsupported_syntax;
4616 if ((intel_syntax && t->opcode_modifier.attsyntax)
4617 || (!intel_syntax && t->opcode_modifier.intelsyntax))
4618 continue;
4619
4620 /* Check the suffix, except for some instructions in intel mode. */
4621 i.error = invalid_instruction_suffix;
4622 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
4623 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
4624 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
4625 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
4626 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
4627 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
4628 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
4629 continue;
4630
4631 if (!operand_size_match (t))
4632 continue;
4633
4634 for (j = 0; j < MAX_OPERANDS; j++)
4635 operand_types[j] = t->operand_types[j];
4636
4637 /* In general, don't allow 64-bit operands in 32-bit mode. */
4638 if (i.suffix == QWORD_MNEM_SUFFIX
4639 && flag_code != CODE_64BIT
4640 && (intel_syntax
4641 ? (!t->opcode_modifier.ignoresize
4642 && !intel_float_operand (t->name))
4643 : intel_float_operand (t->name) != 2)
4644 && ((!operand_types[0].bitfield.regmmx
4645 && !operand_types[0].bitfield.regxmm
4646 && !operand_types[0].bitfield.regymm
4647 && !operand_types[0].bitfield.regzmm)
4648 || (!operand_types[t->operands > 1].bitfield.regmmx
4649 && operand_types[t->operands > 1].bitfield.regxmm
4650 && operand_types[t->operands > 1].bitfield.regymm
4651 && operand_types[t->operands > 1].bitfield.regzmm))
4652 && (t->base_opcode != 0x0fc7
4653 || t->extension_opcode != 1 /* cmpxchg8b */))
4654 continue;
4655
4656 /* In general, don't allow 32-bit operands on pre-386. */
4657 else if (i.suffix == LONG_MNEM_SUFFIX
4658 && !cpu_arch_flags.bitfield.cpui386
4659 && (intel_syntax
4660 ? (!t->opcode_modifier.ignoresize
4661 && !intel_float_operand (t->name))
4662 : intel_float_operand (t->name) != 2)
4663 && ((!operand_types[0].bitfield.regmmx
4664 && !operand_types[0].bitfield.regxmm)
4665 || (!operand_types[t->operands > 1].bitfield.regmmx
4666 && operand_types[t->operands > 1].bitfield.regxmm)))
4667 continue;
4668
4669 /* Do not verify operands when there are none. */
4670 else
4671 {
4672 if (!t->operands)
4673 /* We've found a match; break out of loop. */
4674 break;
4675 }
4676
4677 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
4678 into Disp32/Disp16/Disp32 operand. */
4679 if (i.prefix[ADDR_PREFIX] != 0)
4680 {
4681 /* There should be only one Disp operand. */
4682 switch (flag_code)
4683 {
4684 case CODE_16BIT:
4685 for (j = 0; j < MAX_OPERANDS; j++)
4686 {
4687 if (operand_types[j].bitfield.disp16)
4688 {
4689 addr_prefix_disp = j;
4690 operand_types[j].bitfield.disp32 = 1;
4691 operand_types[j].bitfield.disp16 = 0;
4692 break;
4693 }
4694 }
4695 break;
4696 case CODE_32BIT:
4697 for (j = 0; j < MAX_OPERANDS; j++)
4698 {
4699 if (operand_types[j].bitfield.disp32)
4700 {
4701 addr_prefix_disp = j;
4702 operand_types[j].bitfield.disp32 = 0;
4703 operand_types[j].bitfield.disp16 = 1;
4704 break;
4705 }
4706 }
4707 break;
4708 case CODE_64BIT:
4709 for (j = 0; j < MAX_OPERANDS; j++)
4710 {
4711 if (operand_types[j].bitfield.disp64)
4712 {
4713 addr_prefix_disp = j;
4714 operand_types[j].bitfield.disp64 = 0;
4715 operand_types[j].bitfield.disp32 = 1;
4716 break;
4717 }
4718 }
4719 break;
4720 }
4721 }
4722
4723 /* We check register size if needed. */
4724 check_register = t->opcode_modifier.checkregsize;
4725 overlap0 = operand_type_and (i.types[0], operand_types[0]);
4726 switch (t->operands)
4727 {
4728 case 1:
4729 if (!operand_type_match (overlap0, i.types[0]))
4730 continue;
4731 break;
4732 case 2:
4733 /* xchg %eax, %eax is a special case. It is an aliase for nop
4734 only in 32bit mode and we can use opcode 0x90. In 64bit
4735 mode, we can't use 0x90 for xchg %eax, %eax since it should
4736 zero-extend %eax to %rax. */
4737 if (flag_code == CODE_64BIT
4738 && t->base_opcode == 0x90
4739 && operand_type_equal (&i.types [0], &acc32)
4740 && operand_type_equal (&i.types [1], &acc32))
4741 continue;
4742 if (i.swap_operand)
4743 {
4744 /* If we swap operand in encoding, we either match
4745 the next one or reverse direction of operands. */
4746 if (t->opcode_modifier.s)
4747 continue;
4748 else if (t->opcode_modifier.d)
4749 goto check_reverse;
4750 }
4751
4752 case 3:
4753 /* If we swap operand in encoding, we match the next one. */
4754 if (i.swap_operand && t->opcode_modifier.s)
4755 continue;
4756 case 4:
4757 case 5:
4758 overlap1 = operand_type_and (i.types[1], operand_types[1]);
4759 if (!operand_type_match (overlap0, i.types[0])
4760 || !operand_type_match (overlap1, i.types[1])
4761 || (check_register
4762 && !operand_type_register_match (overlap0, i.types[0],
4763 operand_types[0],
4764 overlap1, i.types[1],
4765 operand_types[1])))
4766 {
4767 /* Check if other direction is valid ... */
4768 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
4769 continue;
4770
4771 check_reverse:
4772 /* Try reversing direction of operands. */
4773 overlap0 = operand_type_and (i.types[0], operand_types[1]);
4774 overlap1 = operand_type_and (i.types[1], operand_types[0]);
4775 if (!operand_type_match (overlap0, i.types[0])
4776 || !operand_type_match (overlap1, i.types[1])
4777 || (check_register
4778 && !operand_type_register_match (overlap0,
4779 i.types[0],
4780 operand_types[1],
4781 overlap1,
4782 i.types[1],
4783 operand_types[0])))
4784 {
4785 /* Does not match either direction. */
4786 continue;
4787 }
4788 /* found_reverse_match holds which of D or FloatDR
4789 we've found. */
4790 if (t->opcode_modifier.d)
4791 found_reverse_match = Opcode_D;
4792 else if (t->opcode_modifier.floatd)
4793 found_reverse_match = Opcode_FloatD;
4794 else
4795 found_reverse_match = 0;
4796 if (t->opcode_modifier.floatr)
4797 found_reverse_match |= Opcode_FloatR;
4798 }
4799 else
4800 {
4801 /* Found a forward 2 operand match here. */
4802 switch (t->operands)
4803 {
4804 case 5:
4805 overlap4 = operand_type_and (i.types[4],
4806 operand_types[4]);
4807 case 4:
4808 overlap3 = operand_type_and (i.types[3],
4809 operand_types[3]);
4810 case 3:
4811 overlap2 = operand_type_and (i.types[2],
4812 operand_types[2]);
4813 break;
4814 }
4815
4816 switch (t->operands)
4817 {
4818 case 5:
4819 if (!operand_type_match (overlap4, i.types[4])
4820 || !operand_type_register_match (overlap3,
4821 i.types[3],
4822 operand_types[3],
4823 overlap4,
4824 i.types[4],
4825 operand_types[4]))
4826 continue;
4827 case 4:
4828 if (!operand_type_match (overlap3, i.types[3])
4829 || (check_register
4830 && !operand_type_register_match (overlap2,
4831 i.types[2],
4832 operand_types[2],
4833 overlap3,
4834 i.types[3],
4835 operand_types[3])))
4836 continue;
4837 case 3:
4838 /* Here we make use of the fact that there are no
4839 reverse match 3 operand instructions, and all 3
4840 operand instructions only need to be checked for
4841 register consistency between operands 2 and 3. */
4842 if (!operand_type_match (overlap2, i.types[2])
4843 || (check_register
4844 && !operand_type_register_match (overlap1,
4845 i.types[1],
4846 operand_types[1],
4847 overlap2,
4848 i.types[2],
4849 operand_types[2])))
4850 continue;
4851 break;
4852 }
4853 }
4854 /* Found either forward/reverse 2, 3 or 4 operand match here:
4855 slip through to break. */
4856 }
4857 if (!found_cpu_match)
4858 {
4859 found_reverse_match = 0;
4860 continue;
4861 }
4862
4863 /* Check if vector and VEX operands are valid. */
4864 if (check_VecOperands (t) || VEX_check_operands (t))
4865 {
4866 specific_error = i.error;
4867 continue;
4868 }
4869
4870 /* We've found a match; break out of loop. */
4871 break;
4872 }
4873
4874 if (t == current_templates->end)
4875 {
4876 /* We found no match. */
4877 const char *err_msg;
4878 switch (specific_error ? specific_error : i.error)
4879 {
4880 default:
4881 abort ();
4882 case operand_size_mismatch:
4883 err_msg = _("operand size mismatch");
4884 break;
4885 case operand_type_mismatch:
4886 err_msg = _("operand type mismatch");
4887 break;
4888 case register_type_mismatch:
4889 err_msg = _("register type mismatch");
4890 break;
4891 case number_of_operands_mismatch:
4892 err_msg = _("number of operands mismatch");
4893 break;
4894 case invalid_instruction_suffix:
4895 err_msg = _("invalid instruction suffix");
4896 break;
4897 case bad_imm4:
4898 err_msg = _("constant doesn't fit in 4 bits");
4899 break;
4900 case old_gcc_only:
4901 err_msg = _("only supported with old gcc");
4902 break;
4903 case unsupported_with_intel_mnemonic:
4904 err_msg = _("unsupported with Intel mnemonic");
4905 break;
4906 case unsupported_syntax:
4907 err_msg = _("unsupported syntax");
4908 break;
4909 case unsupported:
4910 as_bad (_("unsupported instruction `%s'"),
4911 current_templates->start->name);
4912 return NULL;
4913 case invalid_vsib_address:
4914 err_msg = _("invalid VSIB address");
4915 break;
4916 case invalid_vector_register_set:
4917 err_msg = _("mask, index, and destination registers must be distinct");
4918 break;
4919 case unsupported_vector_index_register:
4920 err_msg = _("unsupported vector index register");
4921 break;
4922 case unsupported_broadcast:
4923 err_msg = _("unsupported broadcast");
4924 break;
4925 case broadcast_not_on_src_operand:
4926 err_msg = _("broadcast not on source memory operand");
4927 break;
4928 case broadcast_needed:
4929 err_msg = _("broadcast is needed for operand of such type");
4930 break;
4931 case unsupported_masking:
4932 err_msg = _("unsupported masking");
4933 break;
4934 case mask_not_on_destination:
4935 err_msg = _("mask not on destination operand");
4936 break;
4937 case no_default_mask:
4938 err_msg = _("default mask isn't allowed");
4939 break;
4940 case unsupported_rc_sae:
4941 err_msg = _("unsupported static rounding/sae");
4942 break;
4943 case rc_sae_operand_not_last_imm:
4944 if (intel_syntax)
4945 err_msg = _("RC/SAE operand must precede immediate operands");
4946 else
4947 err_msg = _("RC/SAE operand must follow immediate operands");
4948 break;
4949 case invalid_register_operand:
4950 err_msg = _("invalid register operand");
4951 break;
4952 }
4953 as_bad (_("%s for `%s'"), err_msg,
4954 current_templates->start->name);
4955 return NULL;
4956 }
4957
4958 if (!quiet_warnings)
4959 {
4960 if (!intel_syntax
4961 && (i.types[0].bitfield.jumpabsolute
4962 != operand_types[0].bitfield.jumpabsolute))
4963 {
4964 as_warn (_("indirect %s without `*'"), t->name);
4965 }
4966
4967 if (t->opcode_modifier.isprefix
4968 && t->opcode_modifier.ignoresize)
4969 {
4970 /* Warn them that a data or address size prefix doesn't
4971 affect assembly of the next line of code. */
4972 as_warn (_("stand-alone `%s' prefix"), t->name);
4973 }
4974 }
4975
4976 /* Copy the template we found. */
4977 i.tm = *t;
4978
4979 if (addr_prefix_disp != -1)
4980 i.tm.operand_types[addr_prefix_disp]
4981 = operand_types[addr_prefix_disp];
4982
4983 if (found_reverse_match)
4984 {
4985 /* If we found a reverse match we must alter the opcode
4986 direction bit. found_reverse_match holds bits to change
4987 (different for int & float insns). */
4988
4989 i.tm.base_opcode ^= found_reverse_match;
4990
4991 i.tm.operand_types[0] = operand_types[1];
4992 i.tm.operand_types[1] = operand_types[0];
4993 }
4994
4995 return t;
4996 }
4997
4998 static int
4999 check_string (void)
5000 {
5001 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
5002 if (i.tm.operand_types[mem_op].bitfield.esseg)
5003 {
5004 if (i.seg[0] != NULL && i.seg[0] != &es)
5005 {
5006 as_bad (_("`%s' operand %d must use `%ses' segment"),
5007 i.tm.name,
5008 mem_op + 1,
5009 register_prefix);
5010 return 0;
5011 }
5012 /* There's only ever one segment override allowed per instruction.
5013 This instruction possibly has a legal segment override on the
5014 second operand, so copy the segment to where non-string
5015 instructions store it, allowing common code. */
5016 i.seg[0] = i.seg[1];
5017 }
5018 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
5019 {
5020 if (i.seg[1] != NULL && i.seg[1] != &es)
5021 {
5022 as_bad (_("`%s' operand %d must use `%ses' segment"),
5023 i.tm.name,
5024 mem_op + 2,
5025 register_prefix);
5026 return 0;
5027 }
5028 }
5029 return 1;
5030 }
5031
5032 static int
5033 process_suffix (void)
5034 {
5035 /* If matched instruction specifies an explicit instruction mnemonic
5036 suffix, use it. */
5037 if (i.tm.opcode_modifier.size16)
5038 i.suffix = WORD_MNEM_SUFFIX;
5039 else if (i.tm.opcode_modifier.size32)
5040 i.suffix = LONG_MNEM_SUFFIX;
5041 else if (i.tm.opcode_modifier.size64)
5042 i.suffix = QWORD_MNEM_SUFFIX;
5043 else if (i.reg_operands)
5044 {
5045 /* If there's no instruction mnemonic suffix we try to invent one
5046 based on register operands. */
5047 if (!i.suffix)
5048 {
5049 /* We take i.suffix from the last register operand specified,
5050 Destination register type is more significant than source
5051 register type. crc32 in SSE4.2 prefers source register
5052 type. */
5053 if (i.tm.base_opcode == 0xf20f38f1)
5054 {
5055 if (i.types[0].bitfield.reg16)
5056 i.suffix = WORD_MNEM_SUFFIX;
5057 else if (i.types[0].bitfield.reg32)
5058 i.suffix = LONG_MNEM_SUFFIX;
5059 else if (i.types[0].bitfield.reg64)
5060 i.suffix = QWORD_MNEM_SUFFIX;
5061 }
5062 else if (i.tm.base_opcode == 0xf20f38f0)
5063 {
5064 if (i.types[0].bitfield.reg8)
5065 i.suffix = BYTE_MNEM_SUFFIX;
5066 }
5067
5068 if (!i.suffix)
5069 {
5070 int op;
5071
5072 if (i.tm.base_opcode == 0xf20f38f1
5073 || i.tm.base_opcode == 0xf20f38f0)
5074 {
5075 /* We have to know the operand size for crc32. */
5076 as_bad (_("ambiguous memory operand size for `%s`"),
5077 i.tm.name);
5078 return 0;
5079 }
5080
5081 for (op = i.operands; --op >= 0;)
5082 if (!i.tm.operand_types[op].bitfield.inoutportreg)
5083 {
5084 if (i.types[op].bitfield.reg8)
5085 {
5086 i.suffix = BYTE_MNEM_SUFFIX;
5087 break;
5088 }
5089 else if (i.types[op].bitfield.reg16)
5090 {
5091 i.suffix = WORD_MNEM_SUFFIX;
5092 break;
5093 }
5094 else if (i.types[op].bitfield.reg32)
5095 {
5096 i.suffix = LONG_MNEM_SUFFIX;
5097 break;
5098 }
5099 else if (i.types[op].bitfield.reg64)
5100 {
5101 i.suffix = QWORD_MNEM_SUFFIX;
5102 break;
5103 }
5104 }
5105 }
5106 }
5107 else if (i.suffix == BYTE_MNEM_SUFFIX)
5108 {
5109 if (intel_syntax
5110 && i.tm.opcode_modifier.ignoresize
5111 && i.tm.opcode_modifier.no_bsuf)
5112 i.suffix = 0;
5113 else if (!check_byte_reg ())
5114 return 0;
5115 }
5116 else if (i.suffix == LONG_MNEM_SUFFIX)
5117 {
5118 if (intel_syntax
5119 && i.tm.opcode_modifier.ignoresize
5120 && i.tm.opcode_modifier.no_lsuf)
5121 i.suffix = 0;
5122 else if (!check_long_reg ())
5123 return 0;
5124 }
5125 else if (i.suffix == QWORD_MNEM_SUFFIX)
5126 {
5127 if (intel_syntax
5128 && i.tm.opcode_modifier.ignoresize
5129 && i.tm.opcode_modifier.no_qsuf)
5130 i.suffix = 0;
5131 else if (!check_qword_reg ())
5132 return 0;
5133 }
5134 else if (i.suffix == WORD_MNEM_SUFFIX)
5135 {
5136 if (intel_syntax
5137 && i.tm.opcode_modifier.ignoresize
5138 && i.tm.opcode_modifier.no_wsuf)
5139 i.suffix = 0;
5140 else if (!check_word_reg ())
5141 return 0;
5142 }
5143 else if (i.suffix == XMMWORD_MNEM_SUFFIX
5144 || i.suffix == YMMWORD_MNEM_SUFFIX
5145 || i.suffix == ZMMWORD_MNEM_SUFFIX)
5146 {
5147 /* Skip if the instruction has x/y/z suffix. match_template
5148 should check if it is a valid suffix. */
5149 }
5150 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
5151 /* Do nothing if the instruction is going to ignore the prefix. */
5152 ;
5153 else
5154 abort ();
5155 }
5156 else if (i.tm.opcode_modifier.defaultsize
5157 && !i.suffix
5158 /* exclude fldenv/frstor/fsave/fstenv */
5159 && i.tm.opcode_modifier.no_ssuf)
5160 {
5161 i.suffix = stackop_size;
5162 }
5163 else if (intel_syntax
5164 && !i.suffix
5165 && (i.tm.operand_types[0].bitfield.jumpabsolute
5166 || i.tm.opcode_modifier.jumpbyte
5167 || i.tm.opcode_modifier.jumpintersegment
5168 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
5169 && i.tm.extension_opcode <= 3)))
5170 {
5171 switch (flag_code)
5172 {
5173 case CODE_64BIT:
5174 if (!i.tm.opcode_modifier.no_qsuf)
5175 {
5176 i.suffix = QWORD_MNEM_SUFFIX;
5177 break;
5178 }
5179 case CODE_32BIT:
5180 if (!i.tm.opcode_modifier.no_lsuf)
5181 i.suffix = LONG_MNEM_SUFFIX;
5182 break;
5183 case CODE_16BIT:
5184 if (!i.tm.opcode_modifier.no_wsuf)
5185 i.suffix = WORD_MNEM_SUFFIX;
5186 break;
5187 }
5188 }
5189
5190 if (!i.suffix)
5191 {
5192 if (!intel_syntax)
5193 {
5194 if (i.tm.opcode_modifier.w)
5195 {
5196 as_bad (_("no instruction mnemonic suffix given and "
5197 "no register operands; can't size instruction"));
5198 return 0;
5199 }
5200 }
5201 else
5202 {
5203 unsigned int suffixes;
5204
5205 suffixes = !i.tm.opcode_modifier.no_bsuf;
5206 if (!i.tm.opcode_modifier.no_wsuf)
5207 suffixes |= 1 << 1;
5208 if (!i.tm.opcode_modifier.no_lsuf)
5209 suffixes |= 1 << 2;
5210 if (!i.tm.opcode_modifier.no_ldsuf)
5211 suffixes |= 1 << 3;
5212 if (!i.tm.opcode_modifier.no_ssuf)
5213 suffixes |= 1 << 4;
5214 if (!i.tm.opcode_modifier.no_qsuf)
5215 suffixes |= 1 << 5;
5216
5217 /* There are more than suffix matches. */
5218 if (i.tm.opcode_modifier.w
5219 || ((suffixes & (suffixes - 1))
5220 && !i.tm.opcode_modifier.defaultsize
5221 && !i.tm.opcode_modifier.ignoresize))
5222 {
5223 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
5224 return 0;
5225 }
5226 }
5227 }
5228
5229 /* Change the opcode based on the operand size given by i.suffix;
5230 We don't need to change things for byte insns. */
5231
5232 if (i.suffix
5233 && i.suffix != BYTE_MNEM_SUFFIX
5234 && i.suffix != XMMWORD_MNEM_SUFFIX
5235 && i.suffix != YMMWORD_MNEM_SUFFIX
5236 && i.suffix != ZMMWORD_MNEM_SUFFIX)
5237 {
5238 /* It's not a byte, select word/dword operation. */
5239 if (i.tm.opcode_modifier.w)
5240 {
5241 if (i.tm.opcode_modifier.shortform)
5242 i.tm.base_opcode |= 8;
5243 else
5244 i.tm.base_opcode |= 1;
5245 }
5246
5247 /* Now select between word & dword operations via the operand
5248 size prefix, except for instructions that will ignore this
5249 prefix anyway. */
5250 if (i.tm.opcode_modifier.addrprefixop0)
5251 {
5252 /* The address size override prefix changes the size of the
5253 first operand. */
5254 if ((flag_code == CODE_32BIT
5255 && i.op->regs[0].reg_type.bitfield.reg16)
5256 || (flag_code != CODE_32BIT
5257 && i.op->regs[0].reg_type.bitfield.reg32))
5258 if (!add_prefix (ADDR_PREFIX_OPCODE))
5259 return 0;
5260 }
5261 else if (i.suffix != QWORD_MNEM_SUFFIX
5262 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
5263 && !i.tm.opcode_modifier.ignoresize
5264 && !i.tm.opcode_modifier.floatmf
5265 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
5266 || (flag_code == CODE_64BIT
5267 && i.tm.opcode_modifier.jumpbyte)))
5268 {
5269 unsigned int prefix = DATA_PREFIX_OPCODE;
5270
5271 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
5272 prefix = ADDR_PREFIX_OPCODE;
5273
5274 if (!add_prefix (prefix))
5275 return 0;
5276 }
5277
5278 /* Set mode64 for an operand. */
5279 if (i.suffix == QWORD_MNEM_SUFFIX
5280 && flag_code == CODE_64BIT
5281 && !i.tm.opcode_modifier.norex64)
5282 {
5283 /* Special case for xchg %rax,%rax. It is NOP and doesn't
5284 need rex64. cmpxchg8b is also a special case. */
5285 if (! (i.operands == 2
5286 && i.tm.base_opcode == 0x90
5287 && i.tm.extension_opcode == None
5288 && operand_type_equal (&i.types [0], &acc64)
5289 && operand_type_equal (&i.types [1], &acc64))
5290 && ! (i.operands == 1
5291 && i.tm.base_opcode == 0xfc7
5292 && i.tm.extension_opcode == 1
5293 && !operand_type_check (i.types [0], reg)
5294 && operand_type_check (i.types [0], anymem)))
5295 i.rex |= REX_W;
5296 }
5297
5298 /* Size floating point instruction. */
5299 if (i.suffix == LONG_MNEM_SUFFIX)
5300 if (i.tm.opcode_modifier.floatmf)
5301 i.tm.base_opcode ^= 4;
5302 }
5303
5304 return 1;
5305 }
5306
5307 static int
5308 check_byte_reg (void)
5309 {
5310 int op;
5311
5312 for (op = i.operands; --op >= 0;)
5313 {
5314 /* If this is an eight bit register, it's OK. If it's the 16 or
5315 32 bit version of an eight bit register, we will just use the
5316 low portion, and that's OK too. */
5317 if (i.types[op].bitfield.reg8)
5318 continue;
5319
5320 /* I/O port address operands are OK too. */
5321 if (i.tm.operand_types[op].bitfield.inoutportreg)
5322 continue;
5323
5324 /* crc32 doesn't generate this warning. */
5325 if (i.tm.base_opcode == 0xf20f38f0)
5326 continue;
5327
5328 if ((i.types[op].bitfield.reg16
5329 || i.types[op].bitfield.reg32
5330 || i.types[op].bitfield.reg64)
5331 && i.op[op].regs->reg_num < 4
5332 /* Prohibit these changes in 64bit mode, since the lowering
5333 would be more complicated. */
5334 && flag_code != CODE_64BIT)
5335 {
5336 #if REGISTER_WARNINGS
5337 if (!quiet_warnings)
5338 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5339 register_prefix,
5340 (i.op[op].regs + (i.types[op].bitfield.reg16
5341 ? REGNAM_AL - REGNAM_AX
5342 : REGNAM_AL - REGNAM_EAX))->reg_name,
5343 register_prefix,
5344 i.op[op].regs->reg_name,
5345 i.suffix);
5346 #endif
5347 continue;
5348 }
5349 /* Any other register is bad. */
5350 if (i.types[op].bitfield.reg16
5351 || i.types[op].bitfield.reg32
5352 || i.types[op].bitfield.reg64
5353 || i.types[op].bitfield.regmmx
5354 || i.types[op].bitfield.regxmm
5355 || i.types[op].bitfield.regymm
5356 || i.types[op].bitfield.regzmm
5357 || i.types[op].bitfield.sreg2
5358 || i.types[op].bitfield.sreg3
5359 || i.types[op].bitfield.control
5360 || i.types[op].bitfield.debug
5361 || i.types[op].bitfield.test
5362 || i.types[op].bitfield.floatreg
5363 || i.types[op].bitfield.floatacc)
5364 {
5365 as_bad (_("`%s%s' not allowed with `%s%c'"),
5366 register_prefix,
5367 i.op[op].regs->reg_name,
5368 i.tm.name,
5369 i.suffix);
5370 return 0;
5371 }
5372 }
5373 return 1;
5374 }
5375
5376 static int
5377 check_long_reg (void)
5378 {
5379 int op;
5380
5381 for (op = i.operands; --op >= 0;)
5382 /* Reject eight bit registers, except where the template requires
5383 them. (eg. movzb) */
5384 if (i.types[op].bitfield.reg8
5385 && (i.tm.operand_types[op].bitfield.reg16
5386 || i.tm.operand_types[op].bitfield.reg32
5387 || i.tm.operand_types[op].bitfield.acc))
5388 {
5389 as_bad (_("`%s%s' not allowed with `%s%c'"),
5390 register_prefix,
5391 i.op[op].regs->reg_name,
5392 i.tm.name,
5393 i.suffix);
5394 return 0;
5395 }
5396 /* Warn if the e prefix on a general reg is missing. */
5397 else if ((!quiet_warnings || flag_code == CODE_64BIT)
5398 && i.types[op].bitfield.reg16
5399 && (i.tm.operand_types[op].bitfield.reg32
5400 || i.tm.operand_types[op].bitfield.acc))
5401 {
5402 /* Prohibit these changes in the 64bit mode, since the
5403 lowering is more complicated. */
5404 if (flag_code == CODE_64BIT)
5405 {
5406 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5407 register_prefix, i.op[op].regs->reg_name,
5408 i.suffix);
5409 return 0;
5410 }
5411 #if REGISTER_WARNINGS
5412 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5413 register_prefix,
5414 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
5415 register_prefix, i.op[op].regs->reg_name, i.suffix);
5416 #endif
5417 }
5418 /* Warn if the r prefix on a general reg is present. */
5419 else if (i.types[op].bitfield.reg64
5420 && (i.tm.operand_types[op].bitfield.reg32
5421 || i.tm.operand_types[op].bitfield.acc))
5422 {
5423 if (intel_syntax
5424 && i.tm.opcode_modifier.toqword
5425 && !i.types[0].bitfield.regxmm)
5426 {
5427 /* Convert to QWORD. We want REX byte. */
5428 i.suffix = QWORD_MNEM_SUFFIX;
5429 }
5430 else
5431 {
5432 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5433 register_prefix, i.op[op].regs->reg_name,
5434 i.suffix);
5435 return 0;
5436 }
5437 }
5438 return 1;
5439 }
5440
5441 static int
5442 check_qword_reg (void)
5443 {
5444 int op;
5445
5446 for (op = i.operands; --op >= 0; )
5447 /* Reject eight bit registers, except where the template requires
5448 them. (eg. movzb) */
5449 if (i.types[op].bitfield.reg8
5450 && (i.tm.operand_types[op].bitfield.reg16
5451 || i.tm.operand_types[op].bitfield.reg32
5452 || i.tm.operand_types[op].bitfield.acc))
5453 {
5454 as_bad (_("`%s%s' not allowed with `%s%c'"),
5455 register_prefix,
5456 i.op[op].regs->reg_name,
5457 i.tm.name,
5458 i.suffix);
5459 return 0;
5460 }
5461 /* Warn if the r prefix on a general reg is missing. */
5462 else if ((i.types[op].bitfield.reg16
5463 || i.types[op].bitfield.reg32)
5464 && (i.tm.operand_types[op].bitfield.reg32
5465 || i.tm.operand_types[op].bitfield.acc))
5466 {
5467 /* Prohibit these changes in the 64bit mode, since the
5468 lowering is more complicated. */
5469 if (intel_syntax
5470 && i.tm.opcode_modifier.todword
5471 && !i.types[0].bitfield.regxmm)
5472 {
5473 /* Convert to DWORD. We don't want REX byte. */
5474 i.suffix = LONG_MNEM_SUFFIX;
5475 }
5476 else
5477 {
5478 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5479 register_prefix, i.op[op].regs->reg_name,
5480 i.suffix);
5481 return 0;
5482 }
5483 }
5484 return 1;
5485 }
5486
5487 static int
5488 check_word_reg (void)
5489 {
5490 int op;
5491 for (op = i.operands; --op >= 0;)
5492 /* Reject eight bit registers, except where the template requires
5493 them. (eg. movzb) */
5494 if (i.types[op].bitfield.reg8
5495 && (i.tm.operand_types[op].bitfield.reg16
5496 || i.tm.operand_types[op].bitfield.reg32
5497 || i.tm.operand_types[op].bitfield.acc))
5498 {
5499 as_bad (_("`%s%s' not allowed with `%s%c'"),
5500 register_prefix,
5501 i.op[op].regs->reg_name,
5502 i.tm.name,
5503 i.suffix);
5504 return 0;
5505 }
5506 /* Warn if the e or r prefix on a general reg is present. */
5507 else if ((!quiet_warnings || flag_code == CODE_64BIT)
5508 && (i.types[op].bitfield.reg32
5509 || i.types[op].bitfield.reg64)
5510 && (i.tm.operand_types[op].bitfield.reg16
5511 || i.tm.operand_types[op].bitfield.acc))
5512 {
5513 /* Prohibit these changes in the 64bit mode, since the
5514 lowering is more complicated. */
5515 if (flag_code == CODE_64BIT)
5516 {
5517 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5518 register_prefix, i.op[op].regs->reg_name,
5519 i.suffix);
5520 return 0;
5521 }
5522 #if REGISTER_WARNINGS
5523 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5524 register_prefix,
5525 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
5526 register_prefix, i.op[op].regs->reg_name, i.suffix);
5527 #endif
5528 }
5529 return 1;
5530 }
5531
5532 static int
5533 update_imm (unsigned int j)
5534 {
5535 i386_operand_type overlap = i.types[j];
5536 if ((overlap.bitfield.imm8
5537 || overlap.bitfield.imm8s
5538 || overlap.bitfield.imm16
5539 || overlap.bitfield.imm32
5540 || overlap.bitfield.imm32s
5541 || overlap.bitfield.imm64)
5542 && !operand_type_equal (&overlap, &imm8)
5543 && !operand_type_equal (&overlap, &imm8s)
5544 && !operand_type_equal (&overlap, &imm16)
5545 && !operand_type_equal (&overlap, &imm32)
5546 && !operand_type_equal (&overlap, &imm32s)
5547 && !operand_type_equal (&overlap, &imm64))
5548 {
5549 if (i.suffix)
5550 {
5551 i386_operand_type temp;
5552
5553 operand_type_set (&temp, 0);
5554 if (i.suffix == BYTE_MNEM_SUFFIX)
5555 {
5556 temp.bitfield.imm8 = overlap.bitfield.imm8;
5557 temp.bitfield.imm8s = overlap.bitfield.imm8s;
5558 }
5559 else if (i.suffix == WORD_MNEM_SUFFIX)
5560 temp.bitfield.imm16 = overlap.bitfield.imm16;
5561 else if (i.suffix == QWORD_MNEM_SUFFIX)
5562 {
5563 temp.bitfield.imm64 = overlap.bitfield.imm64;
5564 temp.bitfield.imm32s = overlap.bitfield.imm32s;
5565 }
5566 else
5567 temp.bitfield.imm32 = overlap.bitfield.imm32;
5568 overlap = temp;
5569 }
5570 else if (operand_type_equal (&overlap, &imm16_32_32s)
5571 || operand_type_equal (&overlap, &imm16_32)
5572 || operand_type_equal (&overlap, &imm16_32s))
5573 {
5574 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
5575 overlap = imm16;
5576 else
5577 overlap = imm32s;
5578 }
5579 if (!operand_type_equal (&overlap, &imm8)
5580 && !operand_type_equal (&overlap, &imm8s)
5581 && !operand_type_equal (&overlap, &imm16)
5582 && !operand_type_equal (&overlap, &imm32)
5583 && !operand_type_equal (&overlap, &imm32s)
5584 && !operand_type_equal (&overlap, &imm64))
5585 {
5586 as_bad (_("no instruction mnemonic suffix given; "
5587 "can't determine immediate size"));
5588 return 0;
5589 }
5590 }
5591 i.types[j] = overlap;
5592
5593 return 1;
5594 }
5595
5596 static int
5597 finalize_imm (void)
5598 {
5599 unsigned int j, n;
5600
5601 /* Update the first 2 immediate operands. */
5602 n = i.operands > 2 ? 2 : i.operands;
5603 if (n)
5604 {
5605 for (j = 0; j < n; j++)
5606 if (update_imm (j) == 0)
5607 return 0;
5608
5609 /* The 3rd operand can't be immediate operand. */
5610 gas_assert (operand_type_check (i.types[2], imm) == 0);
5611 }
5612
5613 return 1;
5614 }
5615
5616 static int
5617 bad_implicit_operand (int xmm)
5618 {
5619 const char *ireg = xmm ? "xmm0" : "ymm0";
5620
5621 if (intel_syntax)
5622 as_bad (_("the last operand of `%s' must be `%s%s'"),
5623 i.tm.name, register_prefix, ireg);
5624 else
5625 as_bad (_("the first operand of `%s' must be `%s%s'"),
5626 i.tm.name, register_prefix, ireg);
5627 return 0;
5628 }
5629
5630 static int
5631 process_operands (void)
5632 {
5633 /* Default segment register this instruction will use for memory
5634 accesses. 0 means unknown. This is only for optimizing out
5635 unnecessary segment overrides. */
5636 const seg_entry *default_seg = 0;
5637
5638 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
5639 {
5640 unsigned int dupl = i.operands;
5641 unsigned int dest = dupl - 1;
5642 unsigned int j;
5643
5644 /* The destination must be an xmm register. */
5645 gas_assert (i.reg_operands
5646 && MAX_OPERANDS > dupl
5647 && operand_type_equal (&i.types[dest], &regxmm));
5648
5649 if (i.tm.opcode_modifier.firstxmm0)
5650 {
5651 /* The first operand is implicit and must be xmm0. */
5652 gas_assert (operand_type_equal (&i.types[0], &regxmm));
5653 if (register_number (i.op[0].regs) != 0)
5654 return bad_implicit_operand (1);
5655
5656 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
5657 {
5658 /* Keep xmm0 for instructions with VEX prefix and 3
5659 sources. */
5660 goto duplicate;
5661 }
5662 else
5663 {
5664 /* We remove the first xmm0 and keep the number of
5665 operands unchanged, which in fact duplicates the
5666 destination. */
5667 for (j = 1; j < i.operands; j++)
5668 {
5669 i.op[j - 1] = i.op[j];
5670 i.types[j - 1] = i.types[j];
5671 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
5672 }
5673 }
5674 }
5675 else if (i.tm.opcode_modifier.implicit1stxmm0)
5676 {
5677 gas_assert ((MAX_OPERANDS - 1) > dupl
5678 && (i.tm.opcode_modifier.vexsources
5679 == VEX3SOURCES));
5680
5681 /* Add the implicit xmm0 for instructions with VEX prefix
5682 and 3 sources. */
5683 for (j = i.operands; j > 0; j--)
5684 {
5685 i.op[j] = i.op[j - 1];
5686 i.types[j] = i.types[j - 1];
5687 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
5688 }
5689 i.op[0].regs
5690 = (const reg_entry *) hash_find (reg_hash, "xmm0");
5691 i.types[0] = regxmm;
5692 i.tm.operand_types[0] = regxmm;
5693
5694 i.operands += 2;
5695 i.reg_operands += 2;
5696 i.tm.operands += 2;
5697
5698 dupl++;
5699 dest++;
5700 i.op[dupl] = i.op[dest];
5701 i.types[dupl] = i.types[dest];
5702 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5703 }
5704 else
5705 {
5706 duplicate:
5707 i.operands++;
5708 i.reg_operands++;
5709 i.tm.operands++;
5710
5711 i.op[dupl] = i.op[dest];
5712 i.types[dupl] = i.types[dest];
5713 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5714 }
5715
5716 if (i.tm.opcode_modifier.immext)
5717 process_immext ();
5718 }
5719 else if (i.tm.opcode_modifier.firstxmm0)
5720 {
5721 unsigned int j;
5722
5723 /* The first operand is implicit and must be xmm0/ymm0/zmm0. */
5724 gas_assert (i.reg_operands
5725 && (operand_type_equal (&i.types[0], &regxmm)
5726 || operand_type_equal (&i.types[0], &regymm)
5727 || operand_type_equal (&i.types[0], &regzmm)));
5728 if (register_number (i.op[0].regs) != 0)
5729 return bad_implicit_operand (i.types[0].bitfield.regxmm);
5730
5731 for (j = 1; j < i.operands; j++)
5732 {
5733 i.op[j - 1] = i.op[j];
5734 i.types[j - 1] = i.types[j];
5735
5736 /* We need to adjust fields in i.tm since they are used by
5737 build_modrm_byte. */
5738 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
5739 }
5740
5741 i.operands--;
5742 i.reg_operands--;
5743 i.tm.operands--;
5744 }
5745 else if (i.tm.opcode_modifier.regkludge)
5746 {
5747 /* The imul $imm, %reg instruction is converted into
5748 imul $imm, %reg, %reg, and the clr %reg instruction
5749 is converted into xor %reg, %reg. */
5750
5751 unsigned int first_reg_op;
5752
5753 if (operand_type_check (i.types[0], reg))
5754 first_reg_op = 0;
5755 else
5756 first_reg_op = 1;
5757 /* Pretend we saw the extra register operand. */
5758 gas_assert (i.reg_operands == 1
5759 && i.op[first_reg_op + 1].regs == 0);
5760 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
5761 i.types[first_reg_op + 1] = i.types[first_reg_op];
5762 i.operands++;
5763 i.reg_operands++;
5764 }
5765
5766 if (i.tm.opcode_modifier.shortform)
5767 {
5768 if (i.types[0].bitfield.sreg2
5769 || i.types[0].bitfield.sreg3)
5770 {
5771 if (i.tm.base_opcode == POP_SEG_SHORT
5772 && i.op[0].regs->reg_num == 1)
5773 {
5774 as_bad (_("you can't `pop %scs'"), register_prefix);
5775 return 0;
5776 }
5777 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
5778 if ((i.op[0].regs->reg_flags & RegRex) != 0)
5779 i.rex |= REX_B;
5780 }
5781 else
5782 {
5783 /* The register or float register operand is in operand
5784 0 or 1. */
5785 unsigned int op;
5786
5787 if (i.types[0].bitfield.floatreg
5788 || operand_type_check (i.types[0], reg))
5789 op = 0;
5790 else
5791 op = 1;
5792 /* Register goes in low 3 bits of opcode. */
5793 i.tm.base_opcode |= i.op[op].regs->reg_num;
5794 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5795 i.rex |= REX_B;
5796 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
5797 {
5798 /* Warn about some common errors, but press on regardless.
5799 The first case can be generated by gcc (<= 2.8.1). */
5800 if (i.operands == 2)
5801 {
5802 /* Reversed arguments on faddp, fsubp, etc. */
5803 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
5804 register_prefix, i.op[!intel_syntax].regs->reg_name,
5805 register_prefix, i.op[intel_syntax].regs->reg_name);
5806 }
5807 else
5808 {
5809 /* Extraneous `l' suffix on fp insn. */
5810 as_warn (_("translating to `%s %s%s'"), i.tm.name,
5811 register_prefix, i.op[0].regs->reg_name);
5812 }
5813 }
5814 }
5815 }
5816 else if (i.tm.opcode_modifier.modrm)
5817 {
5818 /* The opcode is completed (modulo i.tm.extension_opcode which
5819 must be put into the modrm byte). Now, we make the modrm and
5820 index base bytes based on all the info we've collected. */
5821
5822 default_seg = build_modrm_byte ();
5823 }
5824 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
5825 {
5826 default_seg = &ds;
5827 }
5828 else if (i.tm.opcode_modifier.isstring)
5829 {
5830 /* For the string instructions that allow a segment override
5831 on one of their operands, the default segment is ds. */
5832 default_seg = &ds;
5833 }
5834
5835 if (i.tm.base_opcode == 0x8d /* lea */
5836 && i.seg[0]
5837 && !quiet_warnings)
5838 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
5839
5840 /* If a segment was explicitly specified, and the specified segment
5841 is not the default, use an opcode prefix to select it. If we
5842 never figured out what the default segment is, then default_seg
5843 will be zero at this point, and the specified segment prefix will
5844 always be used. */
5845 if ((i.seg[0]) && (i.seg[0] != default_seg))
5846 {
5847 if (!add_prefix (i.seg[0]->seg_prefix))
5848 return 0;
5849 }
5850 return 1;
5851 }
5852
5853 static const seg_entry *
5854 build_modrm_byte (void)
5855 {
5856 const seg_entry *default_seg = 0;
5857 unsigned int source, dest;
5858 int vex_3_sources;
5859
5860 /* The first operand of instructions with VEX prefix and 3 sources
5861 must be VEX_Imm4. */
5862 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
5863 if (vex_3_sources)
5864 {
5865 unsigned int nds, reg_slot;
5866 expressionS *exp;
5867
5868 if (i.tm.opcode_modifier.veximmext
5869 && i.tm.opcode_modifier.immext)
5870 {
5871 dest = i.operands - 2;
5872 gas_assert (dest == 3);
5873 }
5874 else
5875 dest = i.operands - 1;
5876 nds = dest - 1;
5877
5878 /* There are 2 kinds of instructions:
5879 1. 5 operands: 4 register operands or 3 register operands
5880 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
5881 VexW0 or VexW1. The destination must be either XMM, YMM or
5882 ZMM register.
5883 2. 4 operands: 4 register operands or 3 register operands
5884 plus 1 memory operand, VexXDS, and VexImmExt */
5885 gas_assert ((i.reg_operands == 4
5886 || (i.reg_operands == 3 && i.mem_operands == 1))
5887 && i.tm.opcode_modifier.vexvvvv == VEXXDS
5888 && (i.tm.opcode_modifier.veximmext
5889 || (i.imm_operands == 1
5890 && i.types[0].bitfield.vec_imm4
5891 && (i.tm.opcode_modifier.vexw == VEXW0
5892 || i.tm.opcode_modifier.vexw == VEXW1)
5893 && (operand_type_equal (&i.tm.operand_types[dest], &regxmm)
5894 || operand_type_equal (&i.tm.operand_types[dest], &regymm)
5895 || operand_type_equal (&i.tm.operand_types[dest], &regzmm)))));
5896
5897 if (i.imm_operands == 0)
5898 {
5899 /* When there is no immediate operand, generate an 8bit
5900 immediate operand to encode the first operand. */
5901 exp = &im_expressions[i.imm_operands++];
5902 i.op[i.operands].imms = exp;
5903 i.types[i.operands] = imm8;
5904 i.operands++;
5905 /* If VexW1 is set, the first operand is the source and
5906 the second operand is encoded in the immediate operand. */
5907 if (i.tm.opcode_modifier.vexw == VEXW1)
5908 {
5909 source = 0;
5910 reg_slot = 1;
5911 }
5912 else
5913 {
5914 source = 1;
5915 reg_slot = 0;
5916 }
5917
5918 /* FMA swaps REG and NDS. */
5919 if (i.tm.cpu_flags.bitfield.cpufma)
5920 {
5921 unsigned int tmp;
5922 tmp = reg_slot;
5923 reg_slot = nds;
5924 nds = tmp;
5925 }
5926
5927 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5928 &regxmm)
5929 || operand_type_equal (&i.tm.operand_types[reg_slot],
5930 &regymm)
5931 || operand_type_equal (&i.tm.operand_types[reg_slot],
5932 &regzmm));
5933 exp->X_op = O_constant;
5934 exp->X_add_number = register_number (i.op[reg_slot].regs) << 4;
5935 gas_assert ((i.op[reg_slot].regs->reg_flags & RegVRex) == 0);
5936 }
5937 else
5938 {
5939 unsigned int imm_slot;
5940
5941 if (i.tm.opcode_modifier.vexw == VEXW0)
5942 {
5943 /* If VexW0 is set, the third operand is the source and
5944 the second operand is encoded in the immediate
5945 operand. */
5946 source = 2;
5947 reg_slot = 1;
5948 }
5949 else
5950 {
5951 /* VexW1 is set, the second operand is the source and
5952 the third operand is encoded in the immediate
5953 operand. */
5954 source = 1;
5955 reg_slot = 2;
5956 }
5957
5958 if (i.tm.opcode_modifier.immext)
5959 {
5960 /* When ImmExt is set, the immdiate byte is the last
5961 operand. */
5962 imm_slot = i.operands - 1;
5963 source--;
5964 reg_slot--;
5965 }
5966 else
5967 {
5968 imm_slot = 0;
5969
5970 /* Turn on Imm8 so that output_imm will generate it. */
5971 i.types[imm_slot].bitfield.imm8 = 1;
5972 }
5973
5974 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5975 &regxmm)
5976 || operand_type_equal (&i.tm.operand_types[reg_slot],
5977 &regymm)
5978 || operand_type_equal (&i.tm.operand_types[reg_slot],
5979 &regzmm));
5980 i.op[imm_slot].imms->X_add_number
5981 |= register_number (i.op[reg_slot].regs) << 4;
5982 gas_assert ((i.op[reg_slot].regs->reg_flags & RegVRex) == 0);
5983 }
5984
5985 gas_assert (operand_type_equal (&i.tm.operand_types[nds], &regxmm)
5986 || operand_type_equal (&i.tm.operand_types[nds],
5987 &regymm)
5988 || operand_type_equal (&i.tm.operand_types[nds],
5989 &regzmm));
5990 i.vex.register_specifier = i.op[nds].regs;
5991 }
5992 else
5993 source = dest = 0;
5994
5995 /* i.reg_operands MUST be the number of real register operands;
5996 implicit registers do not count. If there are 3 register
5997 operands, it must be a instruction with VexNDS. For a
5998 instruction with VexNDD, the destination register is encoded
5999 in VEX prefix. If there are 4 register operands, it must be
6000 a instruction with VEX prefix and 3 sources. */
6001 if (i.mem_operands == 0
6002 && ((i.reg_operands == 2
6003 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
6004 || (i.reg_operands == 3
6005 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
6006 || (i.reg_operands == 4 && vex_3_sources)))
6007 {
6008 switch (i.operands)
6009 {
6010 case 2:
6011 source = 0;
6012 break;
6013 case 3:
6014 /* When there are 3 operands, one of them may be immediate,
6015 which may be the first or the last operand. Otherwise,
6016 the first operand must be shift count register (cl) or it
6017 is an instruction with VexNDS. */
6018 gas_assert (i.imm_operands == 1
6019 || (i.imm_operands == 0
6020 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
6021 || i.types[0].bitfield.shiftcount)));
6022 if (operand_type_check (i.types[0], imm)
6023 || i.types[0].bitfield.shiftcount)
6024 source = 1;
6025 else
6026 source = 0;
6027 break;
6028 case 4:
6029 /* When there are 4 operands, the first two must be 8bit
6030 immediate operands. The source operand will be the 3rd
6031 one.
6032
6033 For instructions with VexNDS, if the first operand
6034 an imm8, the source operand is the 2nd one. If the last
6035 operand is imm8, the source operand is the first one. */
6036 gas_assert ((i.imm_operands == 2
6037 && i.types[0].bitfield.imm8
6038 && i.types[1].bitfield.imm8)
6039 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
6040 && i.imm_operands == 1
6041 && (i.types[0].bitfield.imm8
6042 || i.types[i.operands - 1].bitfield.imm8
6043 || i.rounding)));
6044 if (i.imm_operands == 2)
6045 source = 2;
6046 else
6047 {
6048 if (i.types[0].bitfield.imm8)
6049 source = 1;
6050 else
6051 source = 0;
6052 }
6053 break;
6054 case 5:
6055 if (i.tm.opcode_modifier.evex)
6056 {
6057 /* For EVEX instructions, when there are 5 operands, the
6058 first one must be immediate operand. If the second one
6059 is immediate operand, the source operand is the 3th
6060 one. If the last one is immediate operand, the source
6061 operand is the 2nd one. */
6062 gas_assert (i.imm_operands == 2
6063 && i.tm.opcode_modifier.sae
6064 && operand_type_check (i.types[0], imm));
6065 if (operand_type_check (i.types[1], imm))
6066 source = 2;
6067 else if (operand_type_check (i.types[4], imm))
6068 source = 1;
6069 else
6070 abort ();
6071 }
6072 break;
6073 default:
6074 abort ();
6075 }
6076
6077 if (!vex_3_sources)
6078 {
6079 dest = source + 1;
6080
6081 /* RC/SAE operand could be between DEST and SRC. That happens
6082 when one operand is GPR and the other one is XMM/YMM/ZMM
6083 register. */
6084 if (i.rounding && i.rounding->operand == (int) dest)
6085 dest++;
6086
6087 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
6088 {
6089 /* For instructions with VexNDS, the register-only source
6090 operand must be 32/64bit integer, XMM, YMM or ZMM
6091 register. It is encoded in VEX prefix. We need to
6092 clear RegMem bit before calling operand_type_equal. */
6093
6094 i386_operand_type op;
6095 unsigned int vvvv;
6096
6097 /* Check register-only source operand when two source
6098 operands are swapped. */
6099 if (!i.tm.operand_types[source].bitfield.baseindex
6100 && i.tm.operand_types[dest].bitfield.baseindex)
6101 {
6102 vvvv = source;
6103 source = dest;
6104 }
6105 else
6106 vvvv = dest;
6107
6108 op = i.tm.operand_types[vvvv];
6109 op.bitfield.regmem = 0;
6110 if ((dest + 1) >= i.operands
6111 || (!op.bitfield.reg32
6112 && op.bitfield.reg64
6113 && !operand_type_equal (&op, &regxmm)
6114 && !operand_type_equal (&op, &regymm)
6115 && !operand_type_equal (&op, &regzmm)
6116 && !operand_type_equal (&op, &regmask)))
6117 abort ();
6118 i.vex.register_specifier = i.op[vvvv].regs;
6119 dest++;
6120 }
6121 }
6122
6123 i.rm.mode = 3;
6124 /* One of the register operands will be encoded in the i.tm.reg
6125 field, the other in the combined i.tm.mode and i.tm.regmem
6126 fields. If no form of this instruction supports a memory
6127 destination operand, then we assume the source operand may
6128 sometimes be a memory operand and so we need to store the
6129 destination in the i.rm.reg field. */
6130 if (!i.tm.operand_types[dest].bitfield.regmem
6131 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
6132 {
6133 i.rm.reg = i.op[dest].regs->reg_num;
6134 i.rm.regmem = i.op[source].regs->reg_num;
6135 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
6136 i.rex |= REX_R;
6137 if ((i.op[dest].regs->reg_flags & RegVRex) != 0)
6138 i.vrex |= REX_R;
6139 if ((i.op[source].regs->reg_flags & RegRex) != 0)
6140 i.rex |= REX_B;
6141 if ((i.op[source].regs->reg_flags & RegVRex) != 0)
6142 i.vrex |= REX_B;
6143 }
6144 else
6145 {
6146 i.rm.reg = i.op[source].regs->reg_num;
6147 i.rm.regmem = i.op[dest].regs->reg_num;
6148 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
6149 i.rex |= REX_B;
6150 if ((i.op[dest].regs->reg_flags & RegVRex) != 0)
6151 i.vrex |= REX_B;
6152 if ((i.op[source].regs->reg_flags & RegRex) != 0)
6153 i.rex |= REX_R;
6154 if ((i.op[source].regs->reg_flags & RegVRex) != 0)
6155 i.vrex |= REX_R;
6156 }
6157 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
6158 {
6159 if (!i.types[0].bitfield.control
6160 && !i.types[1].bitfield.control)
6161 abort ();
6162 i.rex &= ~(REX_R | REX_B);
6163 add_prefix (LOCK_PREFIX_OPCODE);
6164 }
6165 }
6166 else
6167 { /* If it's not 2 reg operands... */
6168 unsigned int mem;
6169
6170 if (i.mem_operands)
6171 {
6172 unsigned int fake_zero_displacement = 0;
6173 unsigned int op;
6174
6175 for (op = 0; op < i.operands; op++)
6176 if (operand_type_check (i.types[op], anymem))
6177 break;
6178 gas_assert (op < i.operands);
6179
6180 if (i.tm.opcode_modifier.vecsib)
6181 {
6182 if (i.index_reg->reg_num == RegEiz
6183 || i.index_reg->reg_num == RegRiz)
6184 abort ();
6185
6186 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
6187 if (!i.base_reg)
6188 {
6189 i.sib.base = NO_BASE_REGISTER;
6190 i.sib.scale = i.log2_scale_factor;
6191 /* No Vec_Disp8 if there is no base. */
6192 i.types[op].bitfield.vec_disp8 = 0;
6193 i.types[op].bitfield.disp8 = 0;
6194 i.types[op].bitfield.disp16 = 0;
6195 i.types[op].bitfield.disp64 = 0;
6196 if (flag_code != CODE_64BIT)
6197 {
6198 /* Must be 32 bit */
6199 i.types[op].bitfield.disp32 = 1;
6200 i.types[op].bitfield.disp32s = 0;
6201 }
6202 else
6203 {
6204 i.types[op].bitfield.disp32 = 0;
6205 i.types[op].bitfield.disp32s = 1;
6206 }
6207 }
6208 i.sib.index = i.index_reg->reg_num;
6209 if ((i.index_reg->reg_flags & RegRex) != 0)
6210 i.rex |= REX_X;
6211 if ((i.index_reg->reg_flags & RegVRex) != 0)
6212 i.vrex |= REX_X;
6213 }
6214
6215 default_seg = &ds;
6216
6217 if (i.base_reg == 0)
6218 {
6219 i.rm.mode = 0;
6220 if (!i.disp_operands)
6221 {
6222 fake_zero_displacement = 1;
6223 /* Instructions with VSIB byte need 32bit displacement
6224 if there is no base register. */
6225 if (i.tm.opcode_modifier.vecsib)
6226 i.types[op].bitfield.disp32 = 1;
6227 }
6228 if (i.index_reg == 0)
6229 {
6230 gas_assert (!i.tm.opcode_modifier.vecsib);
6231 /* Operand is just <disp> */
6232 if (flag_code == CODE_64BIT)
6233 {
6234 /* 64bit mode overwrites the 32bit absolute
6235 addressing by RIP relative addressing and
6236 absolute addressing is encoded by one of the
6237 redundant SIB forms. */
6238 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
6239 i.sib.base = NO_BASE_REGISTER;
6240 i.sib.index = NO_INDEX_REGISTER;
6241 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
6242 ? disp32s : disp32);
6243 }
6244 else if ((flag_code == CODE_16BIT)
6245 ^ (i.prefix[ADDR_PREFIX] != 0))
6246 {
6247 i.rm.regmem = NO_BASE_REGISTER_16;
6248 i.types[op] = disp16;
6249 }
6250 else
6251 {
6252 i.rm.regmem = NO_BASE_REGISTER;
6253 i.types[op] = disp32;
6254 }
6255 }
6256 else if (!i.tm.opcode_modifier.vecsib)
6257 {
6258 /* !i.base_reg && i.index_reg */
6259 if (i.index_reg->reg_num == RegEiz
6260 || i.index_reg->reg_num == RegRiz)
6261 i.sib.index = NO_INDEX_REGISTER;
6262 else
6263 i.sib.index = i.index_reg->reg_num;
6264 i.sib.base = NO_BASE_REGISTER;
6265 i.sib.scale = i.log2_scale_factor;
6266 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
6267 /* No Vec_Disp8 if there is no base. */
6268 i.types[op].bitfield.vec_disp8 = 0;
6269 i.types[op].bitfield.disp8 = 0;
6270 i.types[op].bitfield.disp16 = 0;
6271 i.types[op].bitfield.disp64 = 0;
6272 if (flag_code != CODE_64BIT)
6273 {
6274 /* Must be 32 bit */
6275 i.types[op].bitfield.disp32 = 1;
6276 i.types[op].bitfield.disp32s = 0;
6277 }
6278 else
6279 {
6280 i.types[op].bitfield.disp32 = 0;
6281 i.types[op].bitfield.disp32s = 1;
6282 }
6283 if ((i.index_reg->reg_flags & RegRex) != 0)
6284 i.rex |= REX_X;
6285 }
6286 }
6287 /* RIP addressing for 64bit mode. */
6288 else if (i.base_reg->reg_num == RegRip ||
6289 i.base_reg->reg_num == RegEip)
6290 {
6291 gas_assert (!i.tm.opcode_modifier.vecsib);
6292 i.rm.regmem = NO_BASE_REGISTER;
6293 i.types[op].bitfield.disp8 = 0;
6294 i.types[op].bitfield.disp16 = 0;
6295 i.types[op].bitfield.disp32 = 0;
6296 i.types[op].bitfield.disp32s = 1;
6297 i.types[op].bitfield.disp64 = 0;
6298 i.types[op].bitfield.vec_disp8 = 0;
6299 i.flags[op] |= Operand_PCrel;
6300 if (! i.disp_operands)
6301 fake_zero_displacement = 1;
6302 }
6303 else if (i.base_reg->reg_type.bitfield.reg16)
6304 {
6305 gas_assert (!i.tm.opcode_modifier.vecsib);
6306 switch (i.base_reg->reg_num)
6307 {
6308 case 3: /* (%bx) */
6309 if (i.index_reg == 0)
6310 i.rm.regmem = 7;
6311 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
6312 i.rm.regmem = i.index_reg->reg_num - 6;
6313 break;
6314 case 5: /* (%bp) */
6315 default_seg = &ss;
6316 if (i.index_reg == 0)
6317 {
6318 i.rm.regmem = 6;
6319 if (operand_type_check (i.types[op], disp) == 0)
6320 {
6321 /* fake (%bp) into 0(%bp) */
6322 if (i.tm.operand_types[op].bitfield.vec_disp8)
6323 i.types[op].bitfield.vec_disp8 = 1;
6324 else
6325 i.types[op].bitfield.disp8 = 1;
6326 fake_zero_displacement = 1;
6327 }
6328 }
6329 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
6330 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
6331 break;
6332 default: /* (%si) -> 4 or (%di) -> 5 */
6333 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
6334 }
6335 i.rm.mode = mode_from_disp_size (i.types[op]);
6336 }
6337 else /* i.base_reg and 32/64 bit mode */
6338 {
6339 if (flag_code == CODE_64BIT
6340 && operand_type_check (i.types[op], disp))
6341 {
6342 i386_operand_type temp;
6343 operand_type_set (&temp, 0);
6344 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
6345 temp.bitfield.vec_disp8
6346 = i.types[op].bitfield.vec_disp8;
6347 i.types[op] = temp;
6348 if (i.prefix[ADDR_PREFIX] == 0)
6349 i.types[op].bitfield.disp32s = 1;
6350 else
6351 i.types[op].bitfield.disp32 = 1;
6352 }
6353
6354 if (!i.tm.opcode_modifier.vecsib)
6355 i.rm.regmem = i.base_reg->reg_num;
6356 if ((i.base_reg->reg_flags & RegRex) != 0)
6357 i.rex |= REX_B;
6358 i.sib.base = i.base_reg->reg_num;
6359 /* x86-64 ignores REX prefix bit here to avoid decoder
6360 complications. */
6361 if (!(i.base_reg->reg_flags & RegRex)
6362 && (i.base_reg->reg_num == EBP_REG_NUM
6363 || i.base_reg->reg_num == ESP_REG_NUM))
6364 default_seg = &ss;
6365 if (i.base_reg->reg_num == 5 && i.disp_operands == 0)
6366 {
6367 fake_zero_displacement = 1;
6368 if (i.tm.operand_types [op].bitfield.vec_disp8)
6369 i.types[op].bitfield.vec_disp8 = 1;
6370 else
6371 i.types[op].bitfield.disp8 = 1;
6372 }
6373 i.sib.scale = i.log2_scale_factor;
6374 if (i.index_reg == 0)
6375 {
6376 gas_assert (!i.tm.opcode_modifier.vecsib);
6377 /* <disp>(%esp) becomes two byte modrm with no index
6378 register. We've already stored the code for esp
6379 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
6380 Any base register besides %esp will not use the
6381 extra modrm byte. */
6382 i.sib.index = NO_INDEX_REGISTER;
6383 }
6384 else if (!i.tm.opcode_modifier.vecsib)
6385 {
6386 if (i.index_reg->reg_num == RegEiz
6387 || i.index_reg->reg_num == RegRiz)
6388 i.sib.index = NO_INDEX_REGISTER;
6389 else
6390 i.sib.index = i.index_reg->reg_num;
6391 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
6392 if ((i.index_reg->reg_flags & RegRex) != 0)
6393 i.rex |= REX_X;
6394 }
6395
6396 if (i.disp_operands
6397 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
6398 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
6399 i.rm.mode = 0;
6400 else
6401 {
6402 if (!fake_zero_displacement
6403 && !i.disp_operands
6404 && i.disp_encoding)
6405 {
6406 fake_zero_displacement = 1;
6407 if (i.disp_encoding == disp_encoding_8bit)
6408 i.types[op].bitfield.disp8 = 1;
6409 else
6410 i.types[op].bitfield.disp32 = 1;
6411 }
6412 i.rm.mode = mode_from_disp_size (i.types[op]);
6413 }
6414 }
6415
6416 if (fake_zero_displacement)
6417 {
6418 /* Fakes a zero displacement assuming that i.types[op]
6419 holds the correct displacement size. */
6420 expressionS *exp;
6421
6422 gas_assert (i.op[op].disps == 0);
6423 exp = &disp_expressions[i.disp_operands++];
6424 i.op[op].disps = exp;
6425 exp->X_op = O_constant;
6426 exp->X_add_number = 0;
6427 exp->X_add_symbol = (symbolS *) 0;
6428 exp->X_op_symbol = (symbolS *) 0;
6429 }
6430
6431 mem = op;
6432 }
6433 else
6434 mem = ~0;
6435
6436 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
6437 {
6438 if (operand_type_check (i.types[0], imm))
6439 i.vex.register_specifier = NULL;
6440 else
6441 {
6442 /* VEX.vvvv encodes one of the sources when the first
6443 operand is not an immediate. */
6444 if (i.tm.opcode_modifier.vexw == VEXW0)
6445 i.vex.register_specifier = i.op[0].regs;
6446 else
6447 i.vex.register_specifier = i.op[1].regs;
6448 }
6449
6450 /* Destination is a XMM register encoded in the ModRM.reg
6451 and VEX.R bit. */
6452 i.rm.reg = i.op[2].regs->reg_num;
6453 if ((i.op[2].regs->reg_flags & RegRex) != 0)
6454 i.rex |= REX_R;
6455
6456 /* ModRM.rm and VEX.B encodes the other source. */
6457 if (!i.mem_operands)
6458 {
6459 i.rm.mode = 3;
6460
6461 if (i.tm.opcode_modifier.vexw == VEXW0)
6462 i.rm.regmem = i.op[1].regs->reg_num;
6463 else
6464 i.rm.regmem = i.op[0].regs->reg_num;
6465
6466 if ((i.op[1].regs->reg_flags & RegRex) != 0)
6467 i.rex |= REX_B;
6468 }
6469 }
6470 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
6471 {
6472 i.vex.register_specifier = i.op[2].regs;
6473 if (!i.mem_operands)
6474 {
6475 i.rm.mode = 3;
6476 i.rm.regmem = i.op[1].regs->reg_num;
6477 if ((i.op[1].regs->reg_flags & RegRex) != 0)
6478 i.rex |= REX_B;
6479 }
6480 }
6481 /* Fill in i.rm.reg or i.rm.regmem field with register operand
6482 (if any) based on i.tm.extension_opcode. Again, we must be
6483 careful to make sure that segment/control/debug/test/MMX
6484 registers are coded into the i.rm.reg field. */
6485 else if (i.reg_operands)
6486 {
6487 unsigned int op;
6488 unsigned int vex_reg = ~0;
6489
6490 for (op = 0; op < i.operands; op++)
6491 if (i.types[op].bitfield.reg8
6492 || i.types[op].bitfield.reg16
6493 || i.types[op].bitfield.reg32
6494 || i.types[op].bitfield.reg64
6495 || i.types[op].bitfield.regmmx
6496 || i.types[op].bitfield.regxmm
6497 || i.types[op].bitfield.regymm
6498 || i.types[op].bitfield.regbnd
6499 || i.types[op].bitfield.regzmm
6500 || i.types[op].bitfield.regmask
6501 || i.types[op].bitfield.sreg2
6502 || i.types[op].bitfield.sreg3
6503 || i.types[op].bitfield.control
6504 || i.types[op].bitfield.debug
6505 || i.types[op].bitfield.test)
6506 break;
6507
6508 if (vex_3_sources)
6509 op = dest;
6510 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
6511 {
6512 /* For instructions with VexNDS, the register-only
6513 source operand is encoded in VEX prefix. */
6514 gas_assert (mem != (unsigned int) ~0);
6515
6516 if (op > mem)
6517 {
6518 vex_reg = op++;
6519 gas_assert (op < i.operands);
6520 }
6521 else
6522 {
6523 /* Check register-only source operand when two source
6524 operands are swapped. */
6525 if (!i.tm.operand_types[op].bitfield.baseindex
6526 && i.tm.operand_types[op + 1].bitfield.baseindex)
6527 {
6528 vex_reg = op;
6529 op += 2;
6530 gas_assert (mem == (vex_reg + 1)
6531 && op < i.operands);
6532 }
6533 else
6534 {
6535 vex_reg = op + 1;
6536 gas_assert (vex_reg < i.operands);
6537 }
6538 }
6539 }
6540 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
6541 {
6542 /* For instructions with VexNDD, the register destination
6543 is encoded in VEX prefix. */
6544 if (i.mem_operands == 0)
6545 {
6546 /* There is no memory operand. */
6547 gas_assert ((op + 2) == i.operands);
6548 vex_reg = op + 1;
6549 }
6550 else
6551 {
6552 /* There are only 2 operands. */
6553 gas_assert (op < 2 && i.operands == 2);
6554 vex_reg = 1;
6555 }
6556 }
6557 else
6558 gas_assert (op < i.operands);
6559
6560 if (vex_reg != (unsigned int) ~0)
6561 {
6562 i386_operand_type *type = &i.tm.operand_types[vex_reg];
6563
6564 if (type->bitfield.reg32 != 1
6565 && type->bitfield.reg64 != 1
6566 && !operand_type_equal (type, &regxmm)
6567 && !operand_type_equal (type, &regymm)
6568 && !operand_type_equal (type, &regzmm)
6569 && !operand_type_equal (type, &regmask))
6570 abort ();
6571
6572 i.vex.register_specifier = i.op[vex_reg].regs;
6573 }
6574
6575 /* Don't set OP operand twice. */
6576 if (vex_reg != op)
6577 {
6578 /* If there is an extension opcode to put here, the
6579 register number must be put into the regmem field. */
6580 if (i.tm.extension_opcode != None)
6581 {
6582 i.rm.regmem = i.op[op].regs->reg_num;
6583 if ((i.op[op].regs->reg_flags & RegRex) != 0)
6584 i.rex |= REX_B;
6585 if ((i.op[op].regs->reg_flags & RegVRex) != 0)
6586 i.vrex |= REX_B;
6587 }
6588 else
6589 {
6590 i.rm.reg = i.op[op].regs->reg_num;
6591 if ((i.op[op].regs->reg_flags & RegRex) != 0)
6592 i.rex |= REX_R;
6593 if ((i.op[op].regs->reg_flags & RegVRex) != 0)
6594 i.vrex |= REX_R;
6595 }
6596 }
6597
6598 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
6599 must set it to 3 to indicate this is a register operand
6600 in the regmem field. */
6601 if (!i.mem_operands)
6602 i.rm.mode = 3;
6603 }
6604
6605 /* Fill in i.rm.reg field with extension opcode (if any). */
6606 if (i.tm.extension_opcode != None)
6607 i.rm.reg = i.tm.extension_opcode;
6608 }
6609 return default_seg;
6610 }
6611
6612 static void
6613 output_branch (void)
6614 {
6615 char *p;
6616 int size;
6617 int code16;
6618 int prefix;
6619 relax_substateT subtype;
6620 symbolS *sym;
6621 offsetT off;
6622
6623 code16 = flag_code == CODE_16BIT ? CODE16 : 0;
6624 size = i.disp_encoding == disp_encoding_32bit ? BIG : SMALL;
6625
6626 prefix = 0;
6627 if (i.prefix[DATA_PREFIX] != 0)
6628 {
6629 prefix = 1;
6630 i.prefixes -= 1;
6631 code16 ^= CODE16;
6632 }
6633 /* Pentium4 branch hints. */
6634 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6635 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6636 {
6637 prefix++;
6638 i.prefixes--;
6639 }
6640 if (i.prefix[REX_PREFIX] != 0)
6641 {
6642 prefix++;
6643 i.prefixes--;
6644 }
6645
6646 /* BND prefixed jump. */
6647 if (i.prefix[BND_PREFIX] != 0)
6648 {
6649 FRAG_APPEND_1_CHAR (i.prefix[BND_PREFIX]);
6650 i.prefixes -= 1;
6651 }
6652
6653 if (i.prefixes != 0 && !intel_syntax)
6654 as_warn (_("skipping prefixes on this instruction"));
6655
6656 /* It's always a symbol; End frag & setup for relax.
6657 Make sure there is enough room in this frag for the largest
6658 instruction we may generate in md_convert_frag. This is 2
6659 bytes for the opcode and room for the prefix and largest
6660 displacement. */
6661 frag_grow (prefix + 2 + 4);
6662 /* Prefix and 1 opcode byte go in fr_fix. */
6663 p = frag_more (prefix + 1);
6664 if (i.prefix[DATA_PREFIX] != 0)
6665 *p++ = DATA_PREFIX_OPCODE;
6666 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
6667 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
6668 *p++ = i.prefix[SEG_PREFIX];
6669 if (i.prefix[REX_PREFIX] != 0)
6670 *p++ = i.prefix[REX_PREFIX];
6671 *p = i.tm.base_opcode;
6672
6673 if ((unsigned char) *p == JUMP_PC_RELATIVE)
6674 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
6675 else if (cpu_arch_flags.bitfield.cpui386)
6676 subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
6677 else
6678 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
6679 subtype |= code16;
6680
6681 sym = i.op[0].disps->X_add_symbol;
6682 off = i.op[0].disps->X_add_number;
6683
6684 if (i.op[0].disps->X_op != O_constant
6685 && i.op[0].disps->X_op != O_symbol)
6686 {
6687 /* Handle complex expressions. */
6688 sym = make_expr_symbol (i.op[0].disps);
6689 off = 0;
6690 }
6691
6692 /* 1 possible extra opcode + 4 byte displacement go in var part.
6693 Pass reloc in fr_var. */
6694 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
6695 }
6696
6697 static void
6698 output_jump (void)
6699 {
6700 char *p;
6701 int size;
6702 fixS *fixP;
6703
6704 if (i.tm.opcode_modifier.jumpbyte)
6705 {
6706 /* This is a loop or jecxz type instruction. */
6707 size = 1;
6708 if (i.prefix[ADDR_PREFIX] != 0)
6709 {
6710 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
6711 i.prefixes -= 1;
6712 }
6713 /* Pentium4 branch hints. */
6714 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6715 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6716 {
6717 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
6718 i.prefixes--;
6719 }
6720 }
6721 else
6722 {
6723 int code16;
6724
6725 code16 = 0;
6726 if (flag_code == CODE_16BIT)
6727 code16 = CODE16;
6728
6729 if (i.prefix[DATA_PREFIX] != 0)
6730 {
6731 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
6732 i.prefixes -= 1;
6733 code16 ^= CODE16;
6734 }
6735
6736 size = 4;
6737 if (code16)
6738 size = 2;
6739 }
6740
6741 if (i.prefix[REX_PREFIX] != 0)
6742 {
6743 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
6744 i.prefixes -= 1;
6745 }
6746
6747 /* BND prefixed jump. */
6748 if (i.prefix[BND_PREFIX] != 0)
6749 {
6750 FRAG_APPEND_1_CHAR (i.prefix[BND_PREFIX]);
6751 i.prefixes -= 1;
6752 }
6753
6754 if (i.prefixes != 0 && !intel_syntax)
6755 as_warn (_("skipping prefixes on this instruction"));
6756
6757 p = frag_more (i.tm.opcode_length + size);
6758 switch (i.tm.opcode_length)
6759 {
6760 case 2:
6761 *p++ = i.tm.base_opcode >> 8;
6762 case 1:
6763 *p++ = i.tm.base_opcode;
6764 break;
6765 default:
6766 abort ();
6767 }
6768
6769 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6770 i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
6771
6772 /* All jumps handled here are signed, but don't use a signed limit
6773 check for 32 and 16 bit jumps as we want to allow wrap around at
6774 4G and 64k respectively. */
6775 if (size == 1)
6776 fixP->fx_signed = 1;
6777 }
6778
6779 static void
6780 output_interseg_jump (void)
6781 {
6782 char *p;
6783 int size;
6784 int prefix;
6785 int code16;
6786
6787 code16 = 0;
6788 if (flag_code == CODE_16BIT)
6789 code16 = CODE16;
6790
6791 prefix = 0;
6792 if (i.prefix[DATA_PREFIX] != 0)
6793 {
6794 prefix = 1;
6795 i.prefixes -= 1;
6796 code16 ^= CODE16;
6797 }
6798 if (i.prefix[REX_PREFIX] != 0)
6799 {
6800 prefix++;
6801 i.prefixes -= 1;
6802 }
6803
6804 size = 4;
6805 if (code16)
6806 size = 2;
6807
6808 if (i.prefixes != 0 && !intel_syntax)
6809 as_warn (_("skipping prefixes on this instruction"));
6810
6811 /* 1 opcode; 2 segment; offset */
6812 p = frag_more (prefix + 1 + 2 + size);
6813
6814 if (i.prefix[DATA_PREFIX] != 0)
6815 *p++ = DATA_PREFIX_OPCODE;
6816
6817 if (i.prefix[REX_PREFIX] != 0)
6818 *p++ = i.prefix[REX_PREFIX];
6819
6820 *p++ = i.tm.base_opcode;
6821 if (i.op[1].imms->X_op == O_constant)
6822 {
6823 offsetT n = i.op[1].imms->X_add_number;
6824
6825 if (size == 2
6826 && !fits_in_unsigned_word (n)
6827 && !fits_in_signed_word (n))
6828 {
6829 as_bad (_("16-bit jump out of range"));
6830 return;
6831 }
6832 md_number_to_chars (p, n, size);
6833 }
6834 else
6835 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6836 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
6837 if (i.op[0].imms->X_op != O_constant)
6838 as_bad (_("can't handle non absolute segment in `%s'"),
6839 i.tm.name);
6840 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
6841 }
6842
6843 static void
6844 output_insn (void)
6845 {
6846 fragS *insn_start_frag;
6847 offsetT insn_start_off;
6848
6849 /* Tie dwarf2 debug info to the address at the start of the insn.
6850 We can't do this after the insn has been output as the current
6851 frag may have been closed off. eg. by frag_var. */
6852 dwarf2_emit_insn (0);
6853
6854 insn_start_frag = frag_now;
6855 insn_start_off = frag_now_fix ();
6856
6857 /* Output jumps. */
6858 if (i.tm.opcode_modifier.jump)
6859 output_branch ();
6860 else if (i.tm.opcode_modifier.jumpbyte
6861 || i.tm.opcode_modifier.jumpdword)
6862 output_jump ();
6863 else if (i.tm.opcode_modifier.jumpintersegment)
6864 output_interseg_jump ();
6865 else
6866 {
6867 /* Output normal instructions here. */
6868 char *p;
6869 unsigned char *q;
6870 unsigned int j;
6871 unsigned int prefix;
6872
6873 /* Some processors fail on LOCK prefix. This options makes
6874 assembler ignore LOCK prefix and serves as a workaround. */
6875 if (omit_lock_prefix)
6876 {
6877 if (i.tm.base_opcode == LOCK_PREFIX_OPCODE)
6878 return;
6879 i.prefix[LOCK_PREFIX] = 0;
6880 }
6881
6882 /* Since the VEX/EVEX prefix contains the implicit prefix, we
6883 don't need the explicit prefix. */
6884 if (!i.tm.opcode_modifier.vex && !i.tm.opcode_modifier.evex)
6885 {
6886 switch (i.tm.opcode_length)
6887 {
6888 case 3:
6889 if (i.tm.base_opcode & 0xff000000)
6890 {
6891 prefix = (i.tm.base_opcode >> 24) & 0xff;
6892 goto check_prefix;
6893 }
6894 break;
6895 case 2:
6896 if ((i.tm.base_opcode & 0xff0000) != 0)
6897 {
6898 prefix = (i.tm.base_opcode >> 16) & 0xff;
6899 if (i.tm.cpu_flags.bitfield.cpupadlock)
6900 {
6901 check_prefix:
6902 if (prefix != REPE_PREFIX_OPCODE
6903 || (i.prefix[REP_PREFIX]
6904 != REPE_PREFIX_OPCODE))
6905 add_prefix (prefix);
6906 }
6907 else
6908 add_prefix (prefix);
6909 }
6910 break;
6911 case 1:
6912 break;
6913 default:
6914 abort ();
6915 }
6916
6917 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
6918 /* For x32, add a dummy REX_OPCODE prefix for mov/add with
6919 R_X86_64_GOTTPOFF relocation so that linker can safely
6920 perform IE->LE optimization. */
6921 if (x86_elf_abi == X86_64_X32_ABI
6922 && i.operands == 2
6923 && i.reloc[0] == BFD_RELOC_X86_64_GOTTPOFF
6924 && i.prefix[REX_PREFIX] == 0)
6925 add_prefix (REX_OPCODE);
6926 #endif
6927
6928 /* The prefix bytes. */
6929 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
6930 if (*q)
6931 FRAG_APPEND_1_CHAR (*q);
6932 }
6933 else
6934 {
6935 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
6936 if (*q)
6937 switch (j)
6938 {
6939 case REX_PREFIX:
6940 /* REX byte is encoded in VEX prefix. */
6941 break;
6942 case SEG_PREFIX:
6943 case ADDR_PREFIX:
6944 FRAG_APPEND_1_CHAR (*q);
6945 break;
6946 default:
6947 /* There should be no other prefixes for instructions
6948 with VEX prefix. */
6949 abort ();
6950 }
6951
6952 /* For EVEX instructions i.vrex should become 0 after
6953 build_evex_prefix. For VEX instructions upper 16 registers
6954 aren't available, so VREX should be 0. */
6955 if (i.vrex)
6956 abort ();
6957 /* Now the VEX prefix. */
6958 p = frag_more (i.vex.length);
6959 for (j = 0; j < i.vex.length; j++)
6960 p[j] = i.vex.bytes[j];
6961 }
6962
6963 /* Now the opcode; be careful about word order here! */
6964 if (i.tm.opcode_length == 1)
6965 {
6966 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
6967 }
6968 else
6969 {
6970 switch (i.tm.opcode_length)
6971 {
6972 case 4:
6973 p = frag_more (4);
6974 *p++ = (i.tm.base_opcode >> 24) & 0xff;
6975 *p++ = (i.tm.base_opcode >> 16) & 0xff;
6976 break;
6977 case 3:
6978 p = frag_more (3);
6979 *p++ = (i.tm.base_opcode >> 16) & 0xff;
6980 break;
6981 case 2:
6982 p = frag_more (2);
6983 break;
6984 default:
6985 abort ();
6986 break;
6987 }
6988
6989 /* Put out high byte first: can't use md_number_to_chars! */
6990 *p++ = (i.tm.base_opcode >> 8) & 0xff;
6991 *p = i.tm.base_opcode & 0xff;
6992 }
6993
6994 /* Now the modrm byte and sib byte (if present). */
6995 if (i.tm.opcode_modifier.modrm)
6996 {
6997 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
6998 | i.rm.reg << 3
6999 | i.rm.mode << 6));
7000 /* If i.rm.regmem == ESP (4)
7001 && i.rm.mode != (Register mode)
7002 && not 16 bit
7003 ==> need second modrm byte. */
7004 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
7005 && i.rm.mode != 3
7006 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
7007 FRAG_APPEND_1_CHAR ((i.sib.base << 0
7008 | i.sib.index << 3
7009 | i.sib.scale << 6));
7010 }
7011
7012 if (i.disp_operands)
7013 output_disp (insn_start_frag, insn_start_off);
7014
7015 if (i.imm_operands)
7016 output_imm (insn_start_frag, insn_start_off);
7017 }
7018
7019 #ifdef DEBUG386
7020 if (flag_debug)
7021 {
7022 pi ("" /*line*/, &i);
7023 }
7024 #endif /* DEBUG386 */
7025 }
7026
7027 /* Return the size of the displacement operand N. */
7028
7029 static int
7030 disp_size (unsigned int n)
7031 {
7032 int size = 4;
7033
7034 /* Vec_Disp8 has to be 8bit. */
7035 if (i.types[n].bitfield.vec_disp8)
7036 size = 1;
7037 else if (i.types[n].bitfield.disp64)
7038 size = 8;
7039 else if (i.types[n].bitfield.disp8)
7040 size = 1;
7041 else if (i.types[n].bitfield.disp16)
7042 size = 2;
7043 return size;
7044 }
7045
7046 /* Return the size of the immediate operand N. */
7047
7048 static int
7049 imm_size (unsigned int n)
7050 {
7051 int size = 4;
7052 if (i.types[n].bitfield.imm64)
7053 size = 8;
7054 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
7055 size = 1;
7056 else if (i.types[n].bitfield.imm16)
7057 size = 2;
7058 return size;
7059 }
7060
7061 static void
7062 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
7063 {
7064 char *p;
7065 unsigned int n;
7066
7067 for (n = 0; n < i.operands; n++)
7068 {
7069 if (i.types[n].bitfield.vec_disp8
7070 || operand_type_check (i.types[n], disp))
7071 {
7072 if (i.op[n].disps->X_op == O_constant)
7073 {
7074 int size = disp_size (n);
7075 offsetT val = i.op[n].disps->X_add_number;
7076
7077 if (i.types[n].bitfield.vec_disp8)
7078 val >>= i.memshift;
7079 val = offset_in_range (val, size);
7080 p = frag_more (size);
7081 md_number_to_chars (p, val, size);
7082 }
7083 else
7084 {
7085 enum bfd_reloc_code_real reloc_type;
7086 int size = disp_size (n);
7087 int sign = i.types[n].bitfield.disp32s;
7088 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
7089
7090 /* We can't have 8 bit displacement here. */
7091 gas_assert (!i.types[n].bitfield.disp8);
7092
7093 /* The PC relative address is computed relative
7094 to the instruction boundary, so in case immediate
7095 fields follows, we need to adjust the value. */
7096 if (pcrel && i.imm_operands)
7097 {
7098 unsigned int n1;
7099 int sz = 0;
7100
7101 for (n1 = 0; n1 < i.operands; n1++)
7102 if (operand_type_check (i.types[n1], imm))
7103 {
7104 /* Only one immediate is allowed for PC
7105 relative address. */
7106 gas_assert (sz == 0);
7107 sz = imm_size (n1);
7108 i.op[n].disps->X_add_number -= sz;
7109 }
7110 /* We should find the immediate. */
7111 gas_assert (sz != 0);
7112 }
7113
7114 p = frag_more (size);
7115 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
7116 if (GOT_symbol
7117 && GOT_symbol == i.op[n].disps->X_add_symbol
7118 && (((reloc_type == BFD_RELOC_32
7119 || reloc_type == BFD_RELOC_X86_64_32S
7120 || (reloc_type == BFD_RELOC_64
7121 && object_64bit))
7122 && (i.op[n].disps->X_op == O_symbol
7123 || (i.op[n].disps->X_op == O_add
7124 && ((symbol_get_value_expression
7125 (i.op[n].disps->X_op_symbol)->X_op)
7126 == O_subtract))))
7127 || reloc_type == BFD_RELOC_32_PCREL))
7128 {
7129 offsetT add;
7130
7131 if (insn_start_frag == frag_now)
7132 add = (p - frag_now->fr_literal) - insn_start_off;
7133 else
7134 {
7135 fragS *fr;
7136
7137 add = insn_start_frag->fr_fix - insn_start_off;
7138 for (fr = insn_start_frag->fr_next;
7139 fr && fr != frag_now; fr = fr->fr_next)
7140 add += fr->fr_fix;
7141 add += p - frag_now->fr_literal;
7142 }
7143
7144 if (!object_64bit)
7145 {
7146 reloc_type = BFD_RELOC_386_GOTPC;
7147 i.op[n].imms->X_add_number += add;
7148 }
7149 else if (reloc_type == BFD_RELOC_64)
7150 reloc_type = BFD_RELOC_X86_64_GOTPC64;
7151 else
7152 /* Don't do the adjustment for x86-64, as there
7153 the pcrel addressing is relative to the _next_
7154 insn, and that is taken care of in other code. */
7155 reloc_type = BFD_RELOC_X86_64_GOTPC32;
7156 }
7157 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
7158 i.op[n].disps, pcrel, reloc_type);
7159 }
7160 }
7161 }
7162 }
7163
7164 static void
7165 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
7166 {
7167 char *p;
7168 unsigned int n;
7169
7170 for (n = 0; n < i.operands; n++)
7171 {
7172 /* Skip SAE/RC Imm operand in EVEX. They are already handled. */
7173 if (i.rounding && (int) n == i.rounding->operand)
7174 continue;
7175
7176 if (operand_type_check (i.types[n], imm))
7177 {
7178 if (i.op[n].imms->X_op == O_constant)
7179 {
7180 int size = imm_size (n);
7181 offsetT val;
7182
7183 val = offset_in_range (i.op[n].imms->X_add_number,
7184 size);
7185 p = frag_more (size);
7186 md_number_to_chars (p, val, size);
7187 }
7188 else
7189 {
7190 /* Not absolute_section.
7191 Need a 32-bit fixup (don't support 8bit
7192 non-absolute imms). Try to support other
7193 sizes ... */
7194 enum bfd_reloc_code_real reloc_type;
7195 int size = imm_size (n);
7196 int sign;
7197
7198 if (i.types[n].bitfield.imm32s
7199 && (i.suffix == QWORD_MNEM_SUFFIX
7200 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
7201 sign = 1;
7202 else
7203 sign = 0;
7204
7205 p = frag_more (size);
7206 reloc_type = reloc (size, 0, sign, i.reloc[n]);
7207
7208 /* This is tough to explain. We end up with this one if we
7209 * have operands that look like
7210 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
7211 * obtain the absolute address of the GOT, and it is strongly
7212 * preferable from a performance point of view to avoid using
7213 * a runtime relocation for this. The actual sequence of
7214 * instructions often look something like:
7215 *
7216 * call .L66
7217 * .L66:
7218 * popl %ebx
7219 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
7220 *
7221 * The call and pop essentially return the absolute address
7222 * of the label .L66 and store it in %ebx. The linker itself
7223 * will ultimately change the first operand of the addl so
7224 * that %ebx points to the GOT, but to keep things simple, the
7225 * .o file must have this operand set so that it generates not
7226 * the absolute address of .L66, but the absolute address of
7227 * itself. This allows the linker itself simply treat a GOTPC
7228 * relocation as asking for a pcrel offset to the GOT to be
7229 * added in, and the addend of the relocation is stored in the
7230 * operand field for the instruction itself.
7231 *
7232 * Our job here is to fix the operand so that it would add
7233 * the correct offset so that %ebx would point to itself. The
7234 * thing that is tricky is that .-.L66 will point to the
7235 * beginning of the instruction, so we need to further modify
7236 * the operand so that it will point to itself. There are
7237 * other cases where you have something like:
7238 *
7239 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
7240 *
7241 * and here no correction would be required. Internally in
7242 * the assembler we treat operands of this form as not being
7243 * pcrel since the '.' is explicitly mentioned, and I wonder
7244 * whether it would simplify matters to do it this way. Who
7245 * knows. In earlier versions of the PIC patches, the
7246 * pcrel_adjust field was used to store the correction, but
7247 * since the expression is not pcrel, I felt it would be
7248 * confusing to do it this way. */
7249
7250 if ((reloc_type == BFD_RELOC_32
7251 || reloc_type == BFD_RELOC_X86_64_32S
7252 || reloc_type == BFD_RELOC_64)
7253 && GOT_symbol
7254 && GOT_symbol == i.op[n].imms->X_add_symbol
7255 && (i.op[n].imms->X_op == O_symbol
7256 || (i.op[n].imms->X_op == O_add
7257 && ((symbol_get_value_expression
7258 (i.op[n].imms->X_op_symbol)->X_op)
7259 == O_subtract))))
7260 {
7261 offsetT add;
7262
7263 if (insn_start_frag == frag_now)
7264 add = (p - frag_now->fr_literal) - insn_start_off;
7265 else
7266 {
7267 fragS *fr;
7268
7269 add = insn_start_frag->fr_fix - insn_start_off;
7270 for (fr = insn_start_frag->fr_next;
7271 fr && fr != frag_now; fr = fr->fr_next)
7272 add += fr->fr_fix;
7273 add += p - frag_now->fr_literal;
7274 }
7275
7276 if (!object_64bit)
7277 reloc_type = BFD_RELOC_386_GOTPC;
7278 else if (size == 4)
7279 reloc_type = BFD_RELOC_X86_64_GOTPC32;
7280 else if (size == 8)
7281 reloc_type = BFD_RELOC_X86_64_GOTPC64;
7282 i.op[n].imms->X_add_number += add;
7283 }
7284 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
7285 i.op[n].imms, 0, reloc_type);
7286 }
7287 }
7288 }
7289 }
7290 \f
7291 /* x86_cons_fix_new is called via the expression parsing code when a
7292 reloc is needed. We use this hook to get the correct .got reloc. */
7293 static int cons_sign = -1;
7294
7295 void
7296 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
7297 expressionS *exp, bfd_reloc_code_real_type r)
7298 {
7299 r = reloc (len, 0, cons_sign, r);
7300
7301 #ifdef TE_PE
7302 if (exp->X_op == O_secrel)
7303 {
7304 exp->X_op = O_symbol;
7305 r = BFD_RELOC_32_SECREL;
7306 }
7307 #endif
7308
7309 fix_new_exp (frag, off, len, exp, 0, r);
7310 }
7311
7312 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
7313 purpose of the `.dc.a' internal pseudo-op. */
7314
7315 int
7316 x86_address_bytes (void)
7317 {
7318 if ((stdoutput->arch_info->mach & bfd_mach_x64_32))
7319 return 4;
7320 return stdoutput->arch_info->bits_per_address / 8;
7321 }
7322
7323 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
7324 || defined (LEX_AT)
7325 # define lex_got(reloc, adjust, types) NULL
7326 #else
7327 /* Parse operands of the form
7328 <symbol>@GOTOFF+<nnn>
7329 and similar .plt or .got references.
7330
7331 If we find one, set up the correct relocation in RELOC and copy the
7332 input string, minus the `@GOTOFF' into a malloc'd buffer for
7333 parsing by the calling routine. Return this buffer, and if ADJUST
7334 is non-null set it to the length of the string we removed from the
7335 input line. Otherwise return NULL. */
7336 static char *
7337 lex_got (enum bfd_reloc_code_real *rel,
7338 int *adjust,
7339 i386_operand_type *types)
7340 {
7341 /* Some of the relocations depend on the size of what field is to
7342 be relocated. But in our callers i386_immediate and i386_displacement
7343 we don't yet know the operand size (this will be set by insn
7344 matching). Hence we record the word32 relocation here,
7345 and adjust the reloc according to the real size in reloc(). */
7346 static const struct {
7347 const char *str;
7348 int len;
7349 const enum bfd_reloc_code_real rel[2];
7350 const i386_operand_type types64;
7351 } gotrel[] = {
7352 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7353 { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32,
7354 BFD_RELOC_SIZE32 },
7355 OPERAND_TYPE_IMM32_64 },
7356 #endif
7357 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
7358 BFD_RELOC_X86_64_PLTOFF64 },
7359 OPERAND_TYPE_IMM64 },
7360 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
7361 BFD_RELOC_X86_64_PLT32 },
7362 OPERAND_TYPE_IMM32_32S_DISP32 },
7363 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
7364 BFD_RELOC_X86_64_GOTPLT64 },
7365 OPERAND_TYPE_IMM64_DISP64 },
7366 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
7367 BFD_RELOC_X86_64_GOTOFF64 },
7368 OPERAND_TYPE_IMM64_DISP64 },
7369 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
7370 BFD_RELOC_X86_64_GOTPCREL },
7371 OPERAND_TYPE_IMM32_32S_DISP32 },
7372 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
7373 BFD_RELOC_X86_64_TLSGD },
7374 OPERAND_TYPE_IMM32_32S_DISP32 },
7375 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
7376 _dummy_first_bfd_reloc_code_real },
7377 OPERAND_TYPE_NONE },
7378 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
7379 BFD_RELOC_X86_64_TLSLD },
7380 OPERAND_TYPE_IMM32_32S_DISP32 },
7381 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
7382 BFD_RELOC_X86_64_GOTTPOFF },
7383 OPERAND_TYPE_IMM32_32S_DISP32 },
7384 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
7385 BFD_RELOC_X86_64_TPOFF32 },
7386 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
7387 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
7388 _dummy_first_bfd_reloc_code_real },
7389 OPERAND_TYPE_NONE },
7390 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
7391 BFD_RELOC_X86_64_DTPOFF32 },
7392 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
7393 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
7394 _dummy_first_bfd_reloc_code_real },
7395 OPERAND_TYPE_NONE },
7396 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
7397 _dummy_first_bfd_reloc_code_real },
7398 OPERAND_TYPE_NONE },
7399 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
7400 BFD_RELOC_X86_64_GOT32 },
7401 OPERAND_TYPE_IMM32_32S_64_DISP32 },
7402 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
7403 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
7404 OPERAND_TYPE_IMM32_32S_DISP32 },
7405 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
7406 BFD_RELOC_X86_64_TLSDESC_CALL },
7407 OPERAND_TYPE_IMM32_32S_DISP32 },
7408 };
7409 char *cp;
7410 unsigned int j;
7411
7412 #if defined (OBJ_MAYBE_ELF)
7413 if (!IS_ELF)
7414 return NULL;
7415 #endif
7416
7417 for (cp = input_line_pointer; *cp != '@'; cp++)
7418 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
7419 return NULL;
7420
7421 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
7422 {
7423 int len = gotrel[j].len;
7424 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
7425 {
7426 if (gotrel[j].rel[object_64bit] != 0)
7427 {
7428 int first, second;
7429 char *tmpbuf, *past_reloc;
7430
7431 *rel = gotrel[j].rel[object_64bit];
7432
7433 if (types)
7434 {
7435 if (flag_code != CODE_64BIT)
7436 {
7437 types->bitfield.imm32 = 1;
7438 types->bitfield.disp32 = 1;
7439 }
7440 else
7441 *types = gotrel[j].types64;
7442 }
7443
7444 if (j != 0 && GOT_symbol == NULL)
7445 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
7446
7447 /* The length of the first part of our input line. */
7448 first = cp - input_line_pointer;
7449
7450 /* The second part goes from after the reloc token until
7451 (and including) an end_of_line char or comma. */
7452 past_reloc = cp + 1 + len;
7453 cp = past_reloc;
7454 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
7455 ++cp;
7456 second = cp + 1 - past_reloc;
7457
7458 /* Allocate and copy string. The trailing NUL shouldn't
7459 be necessary, but be safe. */
7460 tmpbuf = (char *) xmalloc (first + second + 2);
7461 memcpy (tmpbuf, input_line_pointer, first);
7462 if (second != 0 && *past_reloc != ' ')
7463 /* Replace the relocation token with ' ', so that
7464 errors like foo@GOTOFF1 will be detected. */
7465 tmpbuf[first++] = ' ';
7466 else
7467 /* Increment length by 1 if the relocation token is
7468 removed. */
7469 len++;
7470 if (adjust)
7471 *adjust = len;
7472 memcpy (tmpbuf + first, past_reloc, second);
7473 tmpbuf[first + second] = '\0';
7474 return tmpbuf;
7475 }
7476
7477 as_bad (_("@%s reloc is not supported with %d-bit output format"),
7478 gotrel[j].str, 1 << (5 + object_64bit));
7479 return NULL;
7480 }
7481 }
7482
7483 /* Might be a symbol version string. Don't as_bad here. */
7484 return NULL;
7485 }
7486 #endif
7487
7488 #ifdef TE_PE
7489 #ifdef lex_got
7490 #undef lex_got
7491 #endif
7492 /* Parse operands of the form
7493 <symbol>@SECREL32+<nnn>
7494
7495 If we find one, set up the correct relocation in RELOC and copy the
7496 input string, minus the `@SECREL32' into a malloc'd buffer for
7497 parsing by the calling routine. Return this buffer, and if ADJUST
7498 is non-null set it to the length of the string we removed from the
7499 input line. Otherwise return NULL.
7500
7501 This function is copied from the ELF version above adjusted for PE targets. */
7502
7503 static char *
7504 lex_got (enum bfd_reloc_code_real *rel ATTRIBUTE_UNUSED,
7505 int *adjust ATTRIBUTE_UNUSED,
7506 i386_operand_type *types)
7507 {
7508 static const struct
7509 {
7510 const char *str;
7511 int len;
7512 const enum bfd_reloc_code_real rel[2];
7513 const i386_operand_type types64;
7514 }
7515 gotrel[] =
7516 {
7517 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL,
7518 BFD_RELOC_32_SECREL },
7519 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
7520 };
7521
7522 char *cp;
7523 unsigned j;
7524
7525 for (cp = input_line_pointer; *cp != '@'; cp++)
7526 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
7527 return NULL;
7528
7529 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
7530 {
7531 int len = gotrel[j].len;
7532
7533 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
7534 {
7535 if (gotrel[j].rel[object_64bit] != 0)
7536 {
7537 int first, second;
7538 char *tmpbuf, *past_reloc;
7539
7540 *rel = gotrel[j].rel[object_64bit];
7541 if (adjust)
7542 *adjust = len;
7543
7544 if (types)
7545 {
7546 if (flag_code != CODE_64BIT)
7547 {
7548 types->bitfield.imm32 = 1;
7549 types->bitfield.disp32 = 1;
7550 }
7551 else
7552 *types = gotrel[j].types64;
7553 }
7554
7555 /* The length of the first part of our input line. */
7556 first = cp - input_line_pointer;
7557
7558 /* The second part goes from after the reloc token until
7559 (and including) an end_of_line char or comma. */
7560 past_reloc = cp + 1 + len;
7561 cp = past_reloc;
7562 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
7563 ++cp;
7564 second = cp + 1 - past_reloc;
7565
7566 /* Allocate and copy string. The trailing NUL shouldn't
7567 be necessary, but be safe. */
7568 tmpbuf = (char *) xmalloc (first + second + 2);
7569 memcpy (tmpbuf, input_line_pointer, first);
7570 if (second != 0 && *past_reloc != ' ')
7571 /* Replace the relocation token with ' ', so that
7572 errors like foo@SECLREL321 will be detected. */
7573 tmpbuf[first++] = ' ';
7574 memcpy (tmpbuf + first, past_reloc, second);
7575 tmpbuf[first + second] = '\0';
7576 return tmpbuf;
7577 }
7578
7579 as_bad (_("@%s reloc is not supported with %d-bit output format"),
7580 gotrel[j].str, 1 << (5 + object_64bit));
7581 return NULL;
7582 }
7583 }
7584
7585 /* Might be a symbol version string. Don't as_bad here. */
7586 return NULL;
7587 }
7588
7589 #endif /* TE_PE */
7590
7591 bfd_reloc_code_real_type
7592 x86_cons (expressionS *exp, int size)
7593 {
7594 bfd_reloc_code_real_type got_reloc = NO_RELOC;
7595
7596 intel_syntax = -intel_syntax;
7597
7598 exp->X_md = 0;
7599 if (size == 4 || (object_64bit && size == 8))
7600 {
7601 /* Handle @GOTOFF and the like in an expression. */
7602 char *save;
7603 char *gotfree_input_line;
7604 int adjust = 0;
7605
7606 save = input_line_pointer;
7607 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
7608 if (gotfree_input_line)
7609 input_line_pointer = gotfree_input_line;
7610
7611 expression (exp);
7612
7613 if (gotfree_input_line)
7614 {
7615 /* expression () has merrily parsed up to the end of line,
7616 or a comma - in the wrong buffer. Transfer how far
7617 input_line_pointer has moved to the right buffer. */
7618 input_line_pointer = (save
7619 + (input_line_pointer - gotfree_input_line)
7620 + adjust);
7621 free (gotfree_input_line);
7622 if (exp->X_op == O_constant
7623 || exp->X_op == O_absent
7624 || exp->X_op == O_illegal
7625 || exp->X_op == O_register
7626 || exp->X_op == O_big)
7627 {
7628 char c = *input_line_pointer;
7629 *input_line_pointer = 0;
7630 as_bad (_("missing or invalid expression `%s'"), save);
7631 *input_line_pointer = c;
7632 }
7633 }
7634 }
7635 else
7636 expression (exp);
7637
7638 intel_syntax = -intel_syntax;
7639
7640 if (intel_syntax)
7641 i386_intel_simplify (exp);
7642
7643 return got_reloc;
7644 }
7645
7646 static void
7647 signed_cons (int size)
7648 {
7649 if (flag_code == CODE_64BIT)
7650 cons_sign = 1;
7651 cons (size);
7652 cons_sign = -1;
7653 }
7654
7655 #ifdef TE_PE
7656 static void
7657 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
7658 {
7659 expressionS exp;
7660
7661 do
7662 {
7663 expression (&exp);
7664 if (exp.X_op == O_symbol)
7665 exp.X_op = O_secrel;
7666
7667 emit_expr (&exp, 4);
7668 }
7669 while (*input_line_pointer++ == ',');
7670
7671 input_line_pointer--;
7672 demand_empty_rest_of_line ();
7673 }
7674 #endif
7675
7676 /* Handle Vector operations. */
7677
7678 static char *
7679 check_VecOperations (char *op_string, char *op_end)
7680 {
7681 const reg_entry *mask;
7682 const char *saved;
7683 char *end_op;
7684
7685 while (*op_string
7686 && (op_end == NULL || op_string < op_end))
7687 {
7688 saved = op_string;
7689 if (*op_string == '{')
7690 {
7691 op_string++;
7692
7693 /* Check broadcasts. */
7694 if (strncmp (op_string, "1to", 3) == 0)
7695 {
7696 int bcst_type;
7697
7698 if (i.broadcast)
7699 goto duplicated_vec_op;
7700
7701 op_string += 3;
7702 if (*op_string == '8')
7703 bcst_type = BROADCAST_1TO8;
7704 else if (*op_string == '4')
7705 bcst_type = BROADCAST_1TO4;
7706 else if (*op_string == '2')
7707 bcst_type = BROADCAST_1TO2;
7708 else if (*op_string == '1'
7709 && *(op_string+1) == '6')
7710 {
7711 bcst_type = BROADCAST_1TO16;
7712 op_string++;
7713 }
7714 else
7715 {
7716 as_bad (_("Unsupported broadcast: `%s'"), saved);
7717 return NULL;
7718 }
7719 op_string++;
7720
7721 broadcast_op.type = bcst_type;
7722 broadcast_op.operand = this_operand;
7723 i.broadcast = &broadcast_op;
7724 }
7725 /* Check masking operation. */
7726 else if ((mask = parse_register (op_string, &end_op)) != NULL)
7727 {
7728 /* k0 can't be used for write mask. */
7729 if (mask->reg_num == 0)
7730 {
7731 as_bad (_("`%s' can't be used for write mask"),
7732 op_string);
7733 return NULL;
7734 }
7735
7736 if (!i.mask)
7737 {
7738 mask_op.mask = mask;
7739 mask_op.zeroing = 0;
7740 mask_op.operand = this_operand;
7741 i.mask = &mask_op;
7742 }
7743 else
7744 {
7745 if (i.mask->mask)
7746 goto duplicated_vec_op;
7747
7748 i.mask->mask = mask;
7749
7750 /* Only "{z}" is allowed here. No need to check
7751 zeroing mask explicitly. */
7752 if (i.mask->operand != this_operand)
7753 {
7754 as_bad (_("invalid write mask `%s'"), saved);
7755 return NULL;
7756 }
7757 }
7758
7759 op_string = end_op;
7760 }
7761 /* Check zeroing-flag for masking operation. */
7762 else if (*op_string == 'z')
7763 {
7764 if (!i.mask)
7765 {
7766 mask_op.mask = NULL;
7767 mask_op.zeroing = 1;
7768 mask_op.operand = this_operand;
7769 i.mask = &mask_op;
7770 }
7771 else
7772 {
7773 if (i.mask->zeroing)
7774 {
7775 duplicated_vec_op:
7776 as_bad (_("duplicated `%s'"), saved);
7777 return NULL;
7778 }
7779
7780 i.mask->zeroing = 1;
7781
7782 /* Only "{%k}" is allowed here. No need to check mask
7783 register explicitly. */
7784 if (i.mask->operand != this_operand)
7785 {
7786 as_bad (_("invalid zeroing-masking `%s'"),
7787 saved);
7788 return NULL;
7789 }
7790 }
7791
7792 op_string++;
7793 }
7794 else
7795 goto unknown_vec_op;
7796
7797 if (*op_string != '}')
7798 {
7799 as_bad (_("missing `}' in `%s'"), saved);
7800 return NULL;
7801 }
7802 op_string++;
7803 continue;
7804 }
7805 unknown_vec_op:
7806 /* We don't know this one. */
7807 as_bad (_("unknown vector operation: `%s'"), saved);
7808 return NULL;
7809 }
7810
7811 return op_string;
7812 }
7813
7814 static int
7815 i386_immediate (char *imm_start)
7816 {
7817 char *save_input_line_pointer;
7818 char *gotfree_input_line;
7819 segT exp_seg = 0;
7820 expressionS *exp;
7821 i386_operand_type types;
7822
7823 operand_type_set (&types, ~0);
7824
7825 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
7826 {
7827 as_bad (_("at most %d immediate operands are allowed"),
7828 MAX_IMMEDIATE_OPERANDS);
7829 return 0;
7830 }
7831
7832 exp = &im_expressions[i.imm_operands++];
7833 i.op[this_operand].imms = exp;
7834
7835 if (is_space_char (*imm_start))
7836 ++imm_start;
7837
7838 save_input_line_pointer = input_line_pointer;
7839 input_line_pointer = imm_start;
7840
7841 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
7842 if (gotfree_input_line)
7843 input_line_pointer = gotfree_input_line;
7844
7845 exp_seg = expression (exp);
7846
7847 SKIP_WHITESPACE ();
7848
7849 /* Handle vector operations. */
7850 if (*input_line_pointer == '{')
7851 {
7852 input_line_pointer = check_VecOperations (input_line_pointer,
7853 NULL);
7854 if (input_line_pointer == NULL)
7855 return 0;
7856 }
7857
7858 if (*input_line_pointer)
7859 as_bad (_("junk `%s' after expression"), input_line_pointer);
7860
7861 input_line_pointer = save_input_line_pointer;
7862 if (gotfree_input_line)
7863 {
7864 free (gotfree_input_line);
7865
7866 if (exp->X_op == O_constant || exp->X_op == O_register)
7867 exp->X_op = O_illegal;
7868 }
7869
7870 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
7871 }
7872
7873 static int
7874 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
7875 i386_operand_type types, const char *imm_start)
7876 {
7877 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
7878 {
7879 if (imm_start)
7880 as_bad (_("missing or invalid immediate expression `%s'"),
7881 imm_start);
7882 return 0;
7883 }
7884 else if (exp->X_op == O_constant)
7885 {
7886 /* Size it properly later. */
7887 i.types[this_operand].bitfield.imm64 = 1;
7888 /* If not 64bit, sign extend val. */
7889 if (flag_code != CODE_64BIT
7890 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
7891 exp->X_add_number
7892 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
7893 }
7894 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7895 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
7896 && exp_seg != absolute_section
7897 && exp_seg != text_section
7898 && exp_seg != data_section
7899 && exp_seg != bss_section
7900 && exp_seg != undefined_section
7901 && !bfd_is_com_section (exp_seg))
7902 {
7903 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7904 return 0;
7905 }
7906 #endif
7907 else if (!intel_syntax && exp_seg == reg_section)
7908 {
7909 if (imm_start)
7910 as_bad (_("illegal immediate register operand %s"), imm_start);
7911 return 0;
7912 }
7913 else
7914 {
7915 /* This is an address. The size of the address will be
7916 determined later, depending on destination register,
7917 suffix, or the default for the section. */
7918 i.types[this_operand].bitfield.imm8 = 1;
7919 i.types[this_operand].bitfield.imm16 = 1;
7920 i.types[this_operand].bitfield.imm32 = 1;
7921 i.types[this_operand].bitfield.imm32s = 1;
7922 i.types[this_operand].bitfield.imm64 = 1;
7923 i.types[this_operand] = operand_type_and (i.types[this_operand],
7924 types);
7925 }
7926
7927 return 1;
7928 }
7929
7930 static char *
7931 i386_scale (char *scale)
7932 {
7933 offsetT val;
7934 char *save = input_line_pointer;
7935
7936 input_line_pointer = scale;
7937 val = get_absolute_expression ();
7938
7939 switch (val)
7940 {
7941 case 1:
7942 i.log2_scale_factor = 0;
7943 break;
7944 case 2:
7945 i.log2_scale_factor = 1;
7946 break;
7947 case 4:
7948 i.log2_scale_factor = 2;
7949 break;
7950 case 8:
7951 i.log2_scale_factor = 3;
7952 break;
7953 default:
7954 {
7955 char sep = *input_line_pointer;
7956
7957 *input_line_pointer = '\0';
7958 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
7959 scale);
7960 *input_line_pointer = sep;
7961 input_line_pointer = save;
7962 return NULL;
7963 }
7964 }
7965 if (i.log2_scale_factor != 0 && i.index_reg == 0)
7966 {
7967 as_warn (_("scale factor of %d without an index register"),
7968 1 << i.log2_scale_factor);
7969 i.log2_scale_factor = 0;
7970 }
7971 scale = input_line_pointer;
7972 input_line_pointer = save;
7973 return scale;
7974 }
7975
7976 static int
7977 i386_displacement (char *disp_start, char *disp_end)
7978 {
7979 expressionS *exp;
7980 segT exp_seg = 0;
7981 char *save_input_line_pointer;
7982 char *gotfree_input_line;
7983 int override;
7984 i386_operand_type bigdisp, types = anydisp;
7985 int ret;
7986
7987 if (i.disp_operands == MAX_MEMORY_OPERANDS)
7988 {
7989 as_bad (_("at most %d displacement operands are allowed"),
7990 MAX_MEMORY_OPERANDS);
7991 return 0;
7992 }
7993
7994 operand_type_set (&bigdisp, 0);
7995 if ((i.types[this_operand].bitfield.jumpabsolute)
7996 || (!current_templates->start->opcode_modifier.jump
7997 && !current_templates->start->opcode_modifier.jumpdword))
7998 {
7999 bigdisp.bitfield.disp32 = 1;
8000 override = (i.prefix[ADDR_PREFIX] != 0);
8001 if (flag_code == CODE_64BIT)
8002 {
8003 if (!override)
8004 {
8005 bigdisp.bitfield.disp32s = 1;
8006 bigdisp.bitfield.disp64 = 1;
8007 }
8008 }
8009 else if ((flag_code == CODE_16BIT) ^ override)
8010 {
8011 bigdisp.bitfield.disp32 = 0;
8012 bigdisp.bitfield.disp16 = 1;
8013 }
8014 }
8015 else
8016 {
8017 /* For PC-relative branches, the width of the displacement
8018 is dependent upon data size, not address size. */
8019 override = (i.prefix[DATA_PREFIX] != 0);
8020 if (flag_code == CODE_64BIT)
8021 {
8022 if (override || i.suffix == WORD_MNEM_SUFFIX)
8023 bigdisp.bitfield.disp16 = 1;
8024 else
8025 {
8026 bigdisp.bitfield.disp32 = 1;
8027 bigdisp.bitfield.disp32s = 1;
8028 }
8029 }
8030 else
8031 {
8032 if (!override)
8033 override = (i.suffix == (flag_code != CODE_16BIT
8034 ? WORD_MNEM_SUFFIX
8035 : LONG_MNEM_SUFFIX));
8036 bigdisp.bitfield.disp32 = 1;
8037 if ((flag_code == CODE_16BIT) ^ override)
8038 {
8039 bigdisp.bitfield.disp32 = 0;
8040 bigdisp.bitfield.disp16 = 1;
8041 }
8042 }
8043 }
8044 i.types[this_operand] = operand_type_or (i.types[this_operand],
8045 bigdisp);
8046
8047 exp = &disp_expressions[i.disp_operands];
8048 i.op[this_operand].disps = exp;
8049 i.disp_operands++;
8050 save_input_line_pointer = input_line_pointer;
8051 input_line_pointer = disp_start;
8052 END_STRING_AND_SAVE (disp_end);
8053
8054 #ifndef GCC_ASM_O_HACK
8055 #define GCC_ASM_O_HACK 0
8056 #endif
8057 #if GCC_ASM_O_HACK
8058 END_STRING_AND_SAVE (disp_end + 1);
8059 if (i.types[this_operand].bitfield.baseIndex
8060 && displacement_string_end[-1] == '+')
8061 {
8062 /* This hack is to avoid a warning when using the "o"
8063 constraint within gcc asm statements.
8064 For instance:
8065
8066 #define _set_tssldt_desc(n,addr,limit,type) \
8067 __asm__ __volatile__ ( \
8068 "movw %w2,%0\n\t" \
8069 "movw %w1,2+%0\n\t" \
8070 "rorl $16,%1\n\t" \
8071 "movb %b1,4+%0\n\t" \
8072 "movb %4,5+%0\n\t" \
8073 "movb $0,6+%0\n\t" \
8074 "movb %h1,7+%0\n\t" \
8075 "rorl $16,%1" \
8076 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
8077
8078 This works great except that the output assembler ends
8079 up looking a bit weird if it turns out that there is
8080 no offset. You end up producing code that looks like:
8081
8082 #APP
8083 movw $235,(%eax)
8084 movw %dx,2+(%eax)
8085 rorl $16,%edx
8086 movb %dl,4+(%eax)
8087 movb $137,5+(%eax)
8088 movb $0,6+(%eax)
8089 movb %dh,7+(%eax)
8090 rorl $16,%edx
8091 #NO_APP
8092
8093 So here we provide the missing zero. */
8094
8095 *displacement_string_end = '0';
8096 }
8097 #endif
8098 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
8099 if (gotfree_input_line)
8100 input_line_pointer = gotfree_input_line;
8101
8102 exp_seg = expression (exp);
8103
8104 SKIP_WHITESPACE ();
8105 if (*input_line_pointer)
8106 as_bad (_("junk `%s' after expression"), input_line_pointer);
8107 #if GCC_ASM_O_HACK
8108 RESTORE_END_STRING (disp_end + 1);
8109 #endif
8110 input_line_pointer = save_input_line_pointer;
8111 if (gotfree_input_line)
8112 {
8113 free (gotfree_input_line);
8114
8115 if (exp->X_op == O_constant || exp->X_op == O_register)
8116 exp->X_op = O_illegal;
8117 }
8118
8119 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
8120
8121 RESTORE_END_STRING (disp_end);
8122
8123 return ret;
8124 }
8125
8126 static int
8127 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
8128 i386_operand_type types, const char *disp_start)
8129 {
8130 i386_operand_type bigdisp;
8131 int ret = 1;
8132
8133 /* We do this to make sure that the section symbol is in
8134 the symbol table. We will ultimately change the relocation
8135 to be relative to the beginning of the section. */
8136 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
8137 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
8138 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
8139 {
8140 if (exp->X_op != O_symbol)
8141 goto inv_disp;
8142
8143 if (S_IS_LOCAL (exp->X_add_symbol)
8144 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
8145 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
8146 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
8147 exp->X_op = O_subtract;
8148 exp->X_op_symbol = GOT_symbol;
8149 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
8150 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
8151 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
8152 i.reloc[this_operand] = BFD_RELOC_64;
8153 else
8154 i.reloc[this_operand] = BFD_RELOC_32;
8155 }
8156
8157 else if (exp->X_op == O_absent
8158 || exp->X_op == O_illegal
8159 || exp->X_op == O_big)
8160 {
8161 inv_disp:
8162 as_bad (_("missing or invalid displacement expression `%s'"),
8163 disp_start);
8164 ret = 0;
8165 }
8166
8167 else if (flag_code == CODE_64BIT
8168 && !i.prefix[ADDR_PREFIX]
8169 && exp->X_op == O_constant)
8170 {
8171 /* Since displacement is signed extended to 64bit, don't allow
8172 disp32 and turn off disp32s if they are out of range. */
8173 i.types[this_operand].bitfield.disp32 = 0;
8174 if (!fits_in_signed_long (exp->X_add_number))
8175 {
8176 i.types[this_operand].bitfield.disp32s = 0;
8177 if (i.types[this_operand].bitfield.baseindex)
8178 {
8179 as_bad (_("0x%lx out range of signed 32bit displacement"),
8180 (long) exp->X_add_number);
8181 ret = 0;
8182 }
8183 }
8184 }
8185
8186 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8187 else if (exp->X_op != O_constant
8188 && OUTPUT_FLAVOR == bfd_target_aout_flavour
8189 && exp_seg != absolute_section
8190 && exp_seg != text_section
8191 && exp_seg != data_section
8192 && exp_seg != bss_section
8193 && exp_seg != undefined_section
8194 && !bfd_is_com_section (exp_seg))
8195 {
8196 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
8197 ret = 0;
8198 }
8199 #endif
8200
8201 /* Check if this is a displacement only operand. */
8202 bigdisp = i.types[this_operand];
8203 bigdisp.bitfield.disp8 = 0;
8204 bigdisp.bitfield.disp16 = 0;
8205 bigdisp.bitfield.disp32 = 0;
8206 bigdisp.bitfield.disp32s = 0;
8207 bigdisp.bitfield.disp64 = 0;
8208 if (operand_type_all_zero (&bigdisp))
8209 i.types[this_operand] = operand_type_and (i.types[this_operand],
8210 types);
8211
8212 return ret;
8213 }
8214
8215 /* Make sure the memory operand we've been dealt is valid.
8216 Return 1 on success, 0 on a failure. */
8217
8218 static int
8219 i386_index_check (const char *operand_string)
8220 {
8221 const char *kind = "base/index";
8222 enum flag_code addr_mode;
8223
8224 if (i.prefix[ADDR_PREFIX])
8225 addr_mode = flag_code == CODE_32BIT ? CODE_16BIT : CODE_32BIT;
8226 else
8227 {
8228 addr_mode = flag_code;
8229
8230 #if INFER_ADDR_PREFIX
8231 if (i.mem_operands == 0)
8232 {
8233 /* Infer address prefix from the first memory operand. */
8234 const reg_entry *addr_reg = i.base_reg;
8235
8236 if (addr_reg == NULL)
8237 addr_reg = i.index_reg;
8238
8239 if (addr_reg)
8240 {
8241 if (addr_reg->reg_num == RegEip
8242 || addr_reg->reg_num == RegEiz
8243 || addr_reg->reg_type.bitfield.reg32)
8244 addr_mode = CODE_32BIT;
8245 else if (flag_code != CODE_64BIT
8246 && addr_reg->reg_type.bitfield.reg16)
8247 addr_mode = CODE_16BIT;
8248
8249 if (addr_mode != flag_code)
8250 {
8251 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
8252 i.prefixes += 1;
8253 /* Change the size of any displacement too. At most one
8254 of Disp16 or Disp32 is set.
8255 FIXME. There doesn't seem to be any real need for
8256 separate Disp16 and Disp32 flags. The same goes for
8257 Imm16 and Imm32. Removing them would probably clean
8258 up the code quite a lot. */
8259 if (flag_code != CODE_64BIT
8260 && (i.types[this_operand].bitfield.disp16
8261 || i.types[this_operand].bitfield.disp32))
8262 i.types[this_operand]
8263 = operand_type_xor (i.types[this_operand], disp16_32);
8264 }
8265 }
8266 }
8267 #endif
8268 }
8269
8270 if (current_templates->start->opcode_modifier.isstring
8271 && !current_templates->start->opcode_modifier.immext
8272 && (current_templates->end[-1].opcode_modifier.isstring
8273 || i.mem_operands))
8274 {
8275 /* Memory operands of string insns are special in that they only allow
8276 a single register (rDI, rSI, or rBX) as their memory address. */
8277 const reg_entry *expected_reg;
8278 static const char *di_si[][2] =
8279 {
8280 { "esi", "edi" },
8281 { "si", "di" },
8282 { "rsi", "rdi" }
8283 };
8284 static const char *bx[] = { "ebx", "bx", "rbx" };
8285
8286 kind = "string address";
8287
8288 if (current_templates->start->opcode_modifier.w)
8289 {
8290 i386_operand_type type = current_templates->end[-1].operand_types[0];
8291
8292 if (!type.bitfield.baseindex
8293 || ((!i.mem_operands != !intel_syntax)
8294 && current_templates->end[-1].operand_types[1]
8295 .bitfield.baseindex))
8296 type = current_templates->end[-1].operand_types[1];
8297 expected_reg = hash_find (reg_hash,
8298 di_si[addr_mode][type.bitfield.esseg]);
8299
8300 }
8301 else
8302 expected_reg = hash_find (reg_hash, bx[addr_mode]);
8303
8304 if (i.base_reg != expected_reg
8305 || i.index_reg
8306 || operand_type_check (i.types[this_operand], disp))
8307 {
8308 /* The second memory operand must have the same size as
8309 the first one. */
8310 if (i.mem_operands
8311 && i.base_reg
8312 && !((addr_mode == CODE_64BIT
8313 && i.base_reg->reg_type.bitfield.reg64)
8314 || (addr_mode == CODE_32BIT
8315 ? i.base_reg->reg_type.bitfield.reg32
8316 : i.base_reg->reg_type.bitfield.reg16)))
8317 goto bad_address;
8318
8319 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
8320 operand_string,
8321 intel_syntax ? '[' : '(',
8322 register_prefix,
8323 expected_reg->reg_name,
8324 intel_syntax ? ']' : ')');
8325 return 1;
8326 }
8327 else
8328 return 1;
8329
8330 bad_address:
8331 as_bad (_("`%s' is not a valid %s expression"),
8332 operand_string, kind);
8333 return 0;
8334 }
8335 else
8336 {
8337 if (addr_mode != CODE_16BIT)
8338 {
8339 /* 32-bit/64-bit checks. */
8340 if ((i.base_reg
8341 && (addr_mode == CODE_64BIT
8342 ? !i.base_reg->reg_type.bitfield.reg64
8343 : !i.base_reg->reg_type.bitfield.reg32)
8344 && (i.index_reg
8345 || (i.base_reg->reg_num
8346 != (addr_mode == CODE_64BIT ? RegRip : RegEip))))
8347 || (i.index_reg
8348 && !i.index_reg->reg_type.bitfield.regxmm
8349 && !i.index_reg->reg_type.bitfield.regymm
8350 && !i.index_reg->reg_type.bitfield.regzmm
8351 && ((addr_mode == CODE_64BIT
8352 ? !(i.index_reg->reg_type.bitfield.reg64
8353 || i.index_reg->reg_num == RegRiz)
8354 : !(i.index_reg->reg_type.bitfield.reg32
8355 || i.index_reg->reg_num == RegEiz))
8356 || !i.index_reg->reg_type.bitfield.baseindex)))
8357 goto bad_address;
8358 }
8359 else
8360 {
8361 /* 16-bit checks. */
8362 if ((i.base_reg
8363 && (!i.base_reg->reg_type.bitfield.reg16
8364 || !i.base_reg->reg_type.bitfield.baseindex))
8365 || (i.index_reg
8366 && (!i.index_reg->reg_type.bitfield.reg16
8367 || !i.index_reg->reg_type.bitfield.baseindex
8368 || !(i.base_reg
8369 && i.base_reg->reg_num < 6
8370 && i.index_reg->reg_num >= 6
8371 && i.log2_scale_factor == 0))))
8372 goto bad_address;
8373 }
8374 }
8375 return 1;
8376 }
8377
8378 /* Handle vector immediates. */
8379
8380 static int
8381 RC_SAE_immediate (const char *imm_start)
8382 {
8383 unsigned int match_found, j;
8384 const char *pstr = imm_start;
8385 expressionS *exp;
8386
8387 if (*pstr != '{')
8388 return 0;
8389
8390 pstr++;
8391 match_found = 0;
8392 for (j = 0; j < ARRAY_SIZE (RC_NamesTable); j++)
8393 {
8394 if (!strncmp (pstr, RC_NamesTable[j].name, RC_NamesTable[j].len))
8395 {
8396 if (!i.rounding)
8397 {
8398 rc_op.type = RC_NamesTable[j].type;
8399 rc_op.operand = this_operand;
8400 i.rounding = &rc_op;
8401 }
8402 else
8403 {
8404 as_bad (_("duplicated `%s'"), imm_start);
8405 return 0;
8406 }
8407 pstr += RC_NamesTable[j].len;
8408 match_found = 1;
8409 break;
8410 }
8411 }
8412 if (!match_found)
8413 return 0;
8414
8415 if (*pstr++ != '}')
8416 {
8417 as_bad (_("Missing '}': '%s'"), imm_start);
8418 return 0;
8419 }
8420 /* RC/SAE immediate string should contain nothing more. */;
8421 if (*pstr != 0)
8422 {
8423 as_bad (_("Junk after '}': '%s'"), imm_start);
8424 return 0;
8425 }
8426
8427 exp = &im_expressions[i.imm_operands++];
8428 i.op[this_operand].imms = exp;
8429
8430 exp->X_op = O_constant;
8431 exp->X_add_number = 0;
8432 exp->X_add_symbol = (symbolS *) 0;
8433 exp->X_op_symbol = (symbolS *) 0;
8434
8435 i.types[this_operand].bitfield.imm8 = 1;
8436 return 1;
8437 }
8438
8439 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
8440 on error. */
8441
8442 static int
8443 i386_att_operand (char *operand_string)
8444 {
8445 const reg_entry *r;
8446 char *end_op;
8447 char *op_string = operand_string;
8448
8449 if (is_space_char (*op_string))
8450 ++op_string;
8451
8452 /* We check for an absolute prefix (differentiating,
8453 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
8454 if (*op_string == ABSOLUTE_PREFIX)
8455 {
8456 ++op_string;
8457 if (is_space_char (*op_string))
8458 ++op_string;
8459 i.types[this_operand].bitfield.jumpabsolute = 1;
8460 }
8461
8462 /* Check if operand is a register. */
8463 if ((r = parse_register (op_string, &end_op)) != NULL)
8464 {
8465 i386_operand_type temp;
8466
8467 /* Check for a segment override by searching for ':' after a
8468 segment register. */
8469 op_string = end_op;
8470 if (is_space_char (*op_string))
8471 ++op_string;
8472 if (*op_string == ':'
8473 && (r->reg_type.bitfield.sreg2
8474 || r->reg_type.bitfield.sreg3))
8475 {
8476 switch (r->reg_num)
8477 {
8478 case 0:
8479 i.seg[i.mem_operands] = &es;
8480 break;
8481 case 1:
8482 i.seg[i.mem_operands] = &cs;
8483 break;
8484 case 2:
8485 i.seg[i.mem_operands] = &ss;
8486 break;
8487 case 3:
8488 i.seg[i.mem_operands] = &ds;
8489 break;
8490 case 4:
8491 i.seg[i.mem_operands] = &fs;
8492 break;
8493 case 5:
8494 i.seg[i.mem_operands] = &gs;
8495 break;
8496 }
8497
8498 /* Skip the ':' and whitespace. */
8499 ++op_string;
8500 if (is_space_char (*op_string))
8501 ++op_string;
8502
8503 if (!is_digit_char (*op_string)
8504 && !is_identifier_char (*op_string)
8505 && *op_string != '('
8506 && *op_string != ABSOLUTE_PREFIX)
8507 {
8508 as_bad (_("bad memory operand `%s'"), op_string);
8509 return 0;
8510 }
8511 /* Handle case of %es:*foo. */
8512 if (*op_string == ABSOLUTE_PREFIX)
8513 {
8514 ++op_string;
8515 if (is_space_char (*op_string))
8516 ++op_string;
8517 i.types[this_operand].bitfield.jumpabsolute = 1;
8518 }
8519 goto do_memory_reference;
8520 }
8521
8522 /* Handle vector operations. */
8523 if (*op_string == '{')
8524 {
8525 op_string = check_VecOperations (op_string, NULL);
8526 if (op_string == NULL)
8527 return 0;
8528 }
8529
8530 if (*op_string)
8531 {
8532 as_bad (_("junk `%s' after register"), op_string);
8533 return 0;
8534 }
8535 temp = r->reg_type;
8536 temp.bitfield.baseindex = 0;
8537 i.types[this_operand] = operand_type_or (i.types[this_operand],
8538 temp);
8539 i.types[this_operand].bitfield.unspecified = 0;
8540 i.op[this_operand].regs = r;
8541 i.reg_operands++;
8542 }
8543 else if (*op_string == REGISTER_PREFIX)
8544 {
8545 as_bad (_("bad register name `%s'"), op_string);
8546 return 0;
8547 }
8548 else if (*op_string == IMMEDIATE_PREFIX)
8549 {
8550 ++op_string;
8551 if (i.types[this_operand].bitfield.jumpabsolute)
8552 {
8553 as_bad (_("immediate operand illegal with absolute jump"));
8554 return 0;
8555 }
8556 if (!i386_immediate (op_string))
8557 return 0;
8558 }
8559 else if (RC_SAE_immediate (operand_string))
8560 {
8561 /* If it is a RC or SAE immediate, do nothing. */
8562 ;
8563 }
8564 else if (is_digit_char (*op_string)
8565 || is_identifier_char (*op_string)
8566 || *op_string == '(')
8567 {
8568 /* This is a memory reference of some sort. */
8569 char *base_string;
8570
8571 /* Start and end of displacement string expression (if found). */
8572 char *displacement_string_start;
8573 char *displacement_string_end;
8574 char *vop_start;
8575
8576 do_memory_reference:
8577 if ((i.mem_operands == 1
8578 && !current_templates->start->opcode_modifier.isstring)
8579 || i.mem_operands == 2)
8580 {
8581 as_bad (_("too many memory references for `%s'"),
8582 current_templates->start->name);
8583 return 0;
8584 }
8585
8586 /* Check for base index form. We detect the base index form by
8587 looking for an ')' at the end of the operand, searching
8588 for the '(' matching it, and finding a REGISTER_PREFIX or ','
8589 after the '('. */
8590 base_string = op_string + strlen (op_string);
8591
8592 /* Handle vector operations. */
8593 vop_start = strchr (op_string, '{');
8594 if (vop_start && vop_start < base_string)
8595 {
8596 if (check_VecOperations (vop_start, base_string) == NULL)
8597 return 0;
8598 base_string = vop_start;
8599 }
8600
8601 --base_string;
8602 if (is_space_char (*base_string))
8603 --base_string;
8604
8605 /* If we only have a displacement, set-up for it to be parsed later. */
8606 displacement_string_start = op_string;
8607 displacement_string_end = base_string + 1;
8608
8609 if (*base_string == ')')
8610 {
8611 char *temp_string;
8612 unsigned int parens_balanced = 1;
8613 /* We've already checked that the number of left & right ()'s are
8614 equal, so this loop will not be infinite. */
8615 do
8616 {
8617 base_string--;
8618 if (*base_string == ')')
8619 parens_balanced++;
8620 if (*base_string == '(')
8621 parens_balanced--;
8622 }
8623 while (parens_balanced);
8624
8625 temp_string = base_string;
8626
8627 /* Skip past '(' and whitespace. */
8628 ++base_string;
8629 if (is_space_char (*base_string))
8630 ++base_string;
8631
8632 if (*base_string == ','
8633 || ((i.base_reg = parse_register (base_string, &end_op))
8634 != NULL))
8635 {
8636 displacement_string_end = temp_string;
8637
8638 i.types[this_operand].bitfield.baseindex = 1;
8639
8640 if (i.base_reg)
8641 {
8642 base_string = end_op;
8643 if (is_space_char (*base_string))
8644 ++base_string;
8645 }
8646
8647 /* There may be an index reg or scale factor here. */
8648 if (*base_string == ',')
8649 {
8650 ++base_string;
8651 if (is_space_char (*base_string))
8652 ++base_string;
8653
8654 if ((i.index_reg = parse_register (base_string, &end_op))
8655 != NULL)
8656 {
8657 base_string = end_op;
8658 if (is_space_char (*base_string))
8659 ++base_string;
8660 if (*base_string == ',')
8661 {
8662 ++base_string;
8663 if (is_space_char (*base_string))
8664 ++base_string;
8665 }
8666 else if (*base_string != ')')
8667 {
8668 as_bad (_("expecting `,' or `)' "
8669 "after index register in `%s'"),
8670 operand_string);
8671 return 0;
8672 }
8673 }
8674 else if (*base_string == REGISTER_PREFIX)
8675 {
8676 end_op = strchr (base_string, ',');
8677 if (end_op)
8678 *end_op = '\0';
8679 as_bad (_("bad register name `%s'"), base_string);
8680 return 0;
8681 }
8682
8683 /* Check for scale factor. */
8684 if (*base_string != ')')
8685 {
8686 char *end_scale = i386_scale (base_string);
8687
8688 if (!end_scale)
8689 return 0;
8690
8691 base_string = end_scale;
8692 if (is_space_char (*base_string))
8693 ++base_string;
8694 if (*base_string != ')')
8695 {
8696 as_bad (_("expecting `)' "
8697 "after scale factor in `%s'"),
8698 operand_string);
8699 return 0;
8700 }
8701 }
8702 else if (!i.index_reg)
8703 {
8704 as_bad (_("expecting index register or scale factor "
8705 "after `,'; got '%c'"),
8706 *base_string);
8707 return 0;
8708 }
8709 }
8710 else if (*base_string != ')')
8711 {
8712 as_bad (_("expecting `,' or `)' "
8713 "after base register in `%s'"),
8714 operand_string);
8715 return 0;
8716 }
8717 }
8718 else if (*base_string == REGISTER_PREFIX)
8719 {
8720 end_op = strchr (base_string, ',');
8721 if (end_op)
8722 *end_op = '\0';
8723 as_bad (_("bad register name `%s'"), base_string);
8724 return 0;
8725 }
8726 }
8727
8728 /* If there's an expression beginning the operand, parse it,
8729 assuming displacement_string_start and
8730 displacement_string_end are meaningful. */
8731 if (displacement_string_start != displacement_string_end)
8732 {
8733 if (!i386_displacement (displacement_string_start,
8734 displacement_string_end))
8735 return 0;
8736 }
8737
8738 /* Special case for (%dx) while doing input/output op. */
8739 if (i.base_reg
8740 && operand_type_equal (&i.base_reg->reg_type,
8741 &reg16_inoutportreg)
8742 && i.index_reg == 0
8743 && i.log2_scale_factor == 0
8744 && i.seg[i.mem_operands] == 0
8745 && !operand_type_check (i.types[this_operand], disp))
8746 {
8747 i.types[this_operand] = inoutportreg;
8748 return 1;
8749 }
8750
8751 if (i386_index_check (operand_string) == 0)
8752 return 0;
8753 i.types[this_operand].bitfield.mem = 1;
8754 i.mem_operands++;
8755 }
8756 else
8757 {
8758 /* It's not a memory operand; argh! */
8759 as_bad (_("invalid char %s beginning operand %d `%s'"),
8760 output_invalid (*op_string),
8761 this_operand + 1,
8762 op_string);
8763 return 0;
8764 }
8765 return 1; /* Normal return. */
8766 }
8767 \f
8768 /* Calculate the maximum variable size (i.e., excluding fr_fix)
8769 that an rs_machine_dependent frag may reach. */
8770
8771 unsigned int
8772 i386_frag_max_var (fragS *frag)
8773 {
8774 /* The only relaxable frags are for jumps.
8775 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
8776 gas_assert (frag->fr_type == rs_machine_dependent);
8777 return TYPE_FROM_RELAX_STATE (frag->fr_subtype) == UNCOND_JUMP ? 4 : 5;
8778 }
8779
8780 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8781 static int
8782 elf_symbol_resolved_in_segment_p (symbolS *fr_symbol)
8783 {
8784 /* STT_GNU_IFUNC symbol must go through PLT. */
8785 if ((symbol_get_bfdsym (fr_symbol)->flags
8786 & BSF_GNU_INDIRECT_FUNCTION) != 0)
8787 return 0;
8788
8789 if (!S_IS_EXTERNAL (fr_symbol))
8790 /* Symbol may be weak or local. */
8791 return !S_IS_WEAK (fr_symbol);
8792
8793 /* Non-weak symbols won't be preempted. */
8794 if (no_shared)
8795 return 1;
8796
8797 /* Global symbols with default visibility in a shared library may be
8798 preempted by another definition. */
8799 return ELF_ST_VISIBILITY (S_GET_OTHER (fr_symbol)) != STV_DEFAULT;
8800 }
8801 #endif
8802
8803 /* md_estimate_size_before_relax()
8804
8805 Called just before relax() for rs_machine_dependent frags. The x86
8806 assembler uses these frags to handle variable size jump
8807 instructions.
8808
8809 Any symbol that is now undefined will not become defined.
8810 Return the correct fr_subtype in the frag.
8811 Return the initial "guess for variable size of frag" to caller.
8812 The guess is actually the growth beyond the fixed part. Whatever
8813 we do to grow the fixed or variable part contributes to our
8814 returned value. */
8815
8816 int
8817 md_estimate_size_before_relax (fragS *fragP, segT segment)
8818 {
8819 /* We've already got fragP->fr_subtype right; all we have to do is
8820 check for un-relaxable symbols. On an ELF system, we can't relax
8821 an externally visible symbol, because it may be overridden by a
8822 shared library. */
8823 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
8824 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8825 || (IS_ELF
8826 && !elf_symbol_resolved_in_segment_p (fragP->fr_symbol))
8827 #endif
8828 #if defined (OBJ_COFF) && defined (TE_PE)
8829 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
8830 && S_IS_WEAK (fragP->fr_symbol))
8831 #endif
8832 )
8833 {
8834 /* Symbol is undefined in this segment, or we need to keep a
8835 reloc so that weak symbols can be overridden. */
8836 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
8837 enum bfd_reloc_code_real reloc_type;
8838 unsigned char *opcode;
8839 int old_fr_fix;
8840
8841 if (fragP->fr_var != NO_RELOC)
8842 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
8843 else if (size == 2)
8844 reloc_type = BFD_RELOC_16_PCREL;
8845 else
8846 reloc_type = BFD_RELOC_32_PCREL;
8847
8848 old_fr_fix = fragP->fr_fix;
8849 opcode = (unsigned char *) fragP->fr_opcode;
8850
8851 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
8852 {
8853 case UNCOND_JUMP:
8854 /* Make jmp (0xeb) a (d)word displacement jump. */
8855 opcode[0] = 0xe9;
8856 fragP->fr_fix += size;
8857 fix_new (fragP, old_fr_fix, size,
8858 fragP->fr_symbol,
8859 fragP->fr_offset, 1,
8860 reloc_type);
8861 break;
8862
8863 case COND_JUMP86:
8864 if (size == 2
8865 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
8866 {
8867 /* Negate the condition, and branch past an
8868 unconditional jump. */
8869 opcode[0] ^= 1;
8870 opcode[1] = 3;
8871 /* Insert an unconditional jump. */
8872 opcode[2] = 0xe9;
8873 /* We added two extra opcode bytes, and have a two byte
8874 offset. */
8875 fragP->fr_fix += 2 + 2;
8876 fix_new (fragP, old_fr_fix + 2, 2,
8877 fragP->fr_symbol,
8878 fragP->fr_offset, 1,
8879 reloc_type);
8880 break;
8881 }
8882 /* Fall through. */
8883
8884 case COND_JUMP:
8885 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
8886 {
8887 fixS *fixP;
8888
8889 fragP->fr_fix += 1;
8890 fixP = fix_new (fragP, old_fr_fix, 1,
8891 fragP->fr_symbol,
8892 fragP->fr_offset, 1,
8893 BFD_RELOC_8_PCREL);
8894 fixP->fx_signed = 1;
8895 break;
8896 }
8897
8898 /* This changes the byte-displacement jump 0x7N
8899 to the (d)word-displacement jump 0x0f,0x8N. */
8900 opcode[1] = opcode[0] + 0x10;
8901 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
8902 /* We've added an opcode byte. */
8903 fragP->fr_fix += 1 + size;
8904 fix_new (fragP, old_fr_fix + 1, size,
8905 fragP->fr_symbol,
8906 fragP->fr_offset, 1,
8907 reloc_type);
8908 break;
8909
8910 default:
8911 BAD_CASE (fragP->fr_subtype);
8912 break;
8913 }
8914 frag_wane (fragP);
8915 return fragP->fr_fix - old_fr_fix;
8916 }
8917
8918 /* Guess size depending on current relax state. Initially the relax
8919 state will correspond to a short jump and we return 1, because
8920 the variable part of the frag (the branch offset) is one byte
8921 long. However, we can relax a section more than once and in that
8922 case we must either set fr_subtype back to the unrelaxed state,
8923 or return the value for the appropriate branch. */
8924 return md_relax_table[fragP->fr_subtype].rlx_length;
8925 }
8926
8927 /* Called after relax() is finished.
8928
8929 In: Address of frag.
8930 fr_type == rs_machine_dependent.
8931 fr_subtype is what the address relaxed to.
8932
8933 Out: Any fixSs and constants are set up.
8934 Caller will turn frag into a ".space 0". */
8935
8936 void
8937 md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT sec ATTRIBUTE_UNUSED,
8938 fragS *fragP)
8939 {
8940 unsigned char *opcode;
8941 unsigned char *where_to_put_displacement = NULL;
8942 offsetT target_address;
8943 offsetT opcode_address;
8944 unsigned int extension = 0;
8945 offsetT displacement_from_opcode_start;
8946
8947 opcode = (unsigned char *) fragP->fr_opcode;
8948
8949 /* Address we want to reach in file space. */
8950 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
8951
8952 /* Address opcode resides at in file space. */
8953 opcode_address = fragP->fr_address + fragP->fr_fix;
8954
8955 /* Displacement from opcode start to fill into instruction. */
8956 displacement_from_opcode_start = target_address - opcode_address;
8957
8958 if ((fragP->fr_subtype & BIG) == 0)
8959 {
8960 /* Don't have to change opcode. */
8961 extension = 1; /* 1 opcode + 1 displacement */
8962 where_to_put_displacement = &opcode[1];
8963 }
8964 else
8965 {
8966 if (no_cond_jump_promotion
8967 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
8968 as_warn_where (fragP->fr_file, fragP->fr_line,
8969 _("long jump required"));
8970
8971 switch (fragP->fr_subtype)
8972 {
8973 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
8974 extension = 4; /* 1 opcode + 4 displacement */
8975 opcode[0] = 0xe9;
8976 where_to_put_displacement = &opcode[1];
8977 break;
8978
8979 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
8980 extension = 2; /* 1 opcode + 2 displacement */
8981 opcode[0] = 0xe9;
8982 where_to_put_displacement = &opcode[1];
8983 break;
8984
8985 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
8986 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
8987 extension = 5; /* 2 opcode + 4 displacement */
8988 opcode[1] = opcode[0] + 0x10;
8989 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
8990 where_to_put_displacement = &opcode[2];
8991 break;
8992
8993 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
8994 extension = 3; /* 2 opcode + 2 displacement */
8995 opcode[1] = opcode[0] + 0x10;
8996 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
8997 where_to_put_displacement = &opcode[2];
8998 break;
8999
9000 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
9001 extension = 4;
9002 opcode[0] ^= 1;
9003 opcode[1] = 3;
9004 opcode[2] = 0xe9;
9005 where_to_put_displacement = &opcode[3];
9006 break;
9007
9008 default:
9009 BAD_CASE (fragP->fr_subtype);
9010 break;
9011 }
9012 }
9013
9014 /* If size if less then four we are sure that the operand fits,
9015 but if it's 4, then it could be that the displacement is larger
9016 then -/+ 2GB. */
9017 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
9018 && object_64bit
9019 && ((addressT) (displacement_from_opcode_start - extension
9020 + ((addressT) 1 << 31))
9021 > (((addressT) 2 << 31) - 1)))
9022 {
9023 as_bad_where (fragP->fr_file, fragP->fr_line,
9024 _("jump target out of range"));
9025 /* Make us emit 0. */
9026 displacement_from_opcode_start = extension;
9027 }
9028 /* Now put displacement after opcode. */
9029 md_number_to_chars ((char *) where_to_put_displacement,
9030 (valueT) (displacement_from_opcode_start - extension),
9031 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
9032 fragP->fr_fix += extension;
9033 }
9034 \f
9035 /* Apply a fixup (fixP) to segment data, once it has been determined
9036 by our caller that we have all the info we need to fix it up.
9037
9038 Parameter valP is the pointer to the value of the bits.
9039
9040 On the 386, immediates, displacements, and data pointers are all in
9041 the same (little-endian) format, so we don't need to care about which
9042 we are handling. */
9043
9044 void
9045 md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
9046 {
9047 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
9048 valueT value = *valP;
9049
9050 #if !defined (TE_Mach)
9051 if (fixP->fx_pcrel)
9052 {
9053 switch (fixP->fx_r_type)
9054 {
9055 default:
9056 break;
9057
9058 case BFD_RELOC_64:
9059 fixP->fx_r_type = BFD_RELOC_64_PCREL;
9060 break;
9061 case BFD_RELOC_32:
9062 case BFD_RELOC_X86_64_32S:
9063 fixP->fx_r_type = BFD_RELOC_32_PCREL;
9064 break;
9065 case BFD_RELOC_16:
9066 fixP->fx_r_type = BFD_RELOC_16_PCREL;
9067 break;
9068 case BFD_RELOC_8:
9069 fixP->fx_r_type = BFD_RELOC_8_PCREL;
9070 break;
9071 }
9072 }
9073
9074 if (fixP->fx_addsy != NULL
9075 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
9076 || fixP->fx_r_type == BFD_RELOC_64_PCREL
9077 || fixP->fx_r_type == BFD_RELOC_16_PCREL
9078 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
9079 && !use_rela_relocations)
9080 {
9081 /* This is a hack. There should be a better way to handle this.
9082 This covers for the fact that bfd_install_relocation will
9083 subtract the current location (for partial_inplace, PC relative
9084 relocations); see more below. */
9085 #ifndef OBJ_AOUT
9086 if (IS_ELF
9087 #ifdef TE_PE
9088 || OUTPUT_FLAVOR == bfd_target_coff_flavour
9089 #endif
9090 )
9091 value += fixP->fx_where + fixP->fx_frag->fr_address;
9092 #endif
9093 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9094 if (IS_ELF)
9095 {
9096 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
9097
9098 if ((sym_seg == seg
9099 || (symbol_section_p (fixP->fx_addsy)
9100 && sym_seg != absolute_section))
9101 && !generic_force_reloc (fixP))
9102 {
9103 /* Yes, we add the values in twice. This is because
9104 bfd_install_relocation subtracts them out again. I think
9105 bfd_install_relocation is broken, but I don't dare change
9106 it. FIXME. */
9107 value += fixP->fx_where + fixP->fx_frag->fr_address;
9108 }
9109 }
9110 #endif
9111 #if defined (OBJ_COFF) && defined (TE_PE)
9112 /* For some reason, the PE format does not store a
9113 section address offset for a PC relative symbol. */
9114 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
9115 || S_IS_WEAK (fixP->fx_addsy))
9116 value += md_pcrel_from (fixP);
9117 #endif
9118 }
9119 #if defined (OBJ_COFF) && defined (TE_PE)
9120 if (fixP->fx_addsy != NULL
9121 && S_IS_WEAK (fixP->fx_addsy)
9122 /* PR 16858: Do not modify weak function references. */
9123 && ! fixP->fx_pcrel)
9124 {
9125 #if !defined (TE_PEP)
9126 /* For x86 PE weak function symbols are neither PC-relative
9127 nor do they set S_IS_FUNCTION. So the only reliable way
9128 to detect them is to check the flags of their containing
9129 section. */
9130 if (S_GET_SEGMENT (fixP->fx_addsy) != NULL
9131 && S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_CODE)
9132 ;
9133 else
9134 #endif
9135 value -= S_GET_VALUE (fixP->fx_addsy);
9136 }
9137 #endif
9138
9139 /* Fix a few things - the dynamic linker expects certain values here,
9140 and we must not disappoint it. */
9141 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9142 if (IS_ELF && fixP->fx_addsy)
9143 switch (fixP->fx_r_type)
9144 {
9145 case BFD_RELOC_386_PLT32:
9146 case BFD_RELOC_X86_64_PLT32:
9147 /* Make the jump instruction point to the address of the operand. At
9148 runtime we merely add the offset to the actual PLT entry. */
9149 value = -4;
9150 break;
9151
9152 case BFD_RELOC_386_TLS_GD:
9153 case BFD_RELOC_386_TLS_LDM:
9154 case BFD_RELOC_386_TLS_IE_32:
9155 case BFD_RELOC_386_TLS_IE:
9156 case BFD_RELOC_386_TLS_GOTIE:
9157 case BFD_RELOC_386_TLS_GOTDESC:
9158 case BFD_RELOC_X86_64_TLSGD:
9159 case BFD_RELOC_X86_64_TLSLD:
9160 case BFD_RELOC_X86_64_GOTTPOFF:
9161 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9162 value = 0; /* Fully resolved at runtime. No addend. */
9163 /* Fallthrough */
9164 case BFD_RELOC_386_TLS_LE:
9165 case BFD_RELOC_386_TLS_LDO_32:
9166 case BFD_RELOC_386_TLS_LE_32:
9167 case BFD_RELOC_X86_64_DTPOFF32:
9168 case BFD_RELOC_X86_64_DTPOFF64:
9169 case BFD_RELOC_X86_64_TPOFF32:
9170 case BFD_RELOC_X86_64_TPOFF64:
9171 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9172 break;
9173
9174 case BFD_RELOC_386_TLS_DESC_CALL:
9175 case BFD_RELOC_X86_64_TLSDESC_CALL:
9176 value = 0; /* Fully resolved at runtime. No addend. */
9177 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9178 fixP->fx_done = 0;
9179 return;
9180
9181 case BFD_RELOC_386_GOT32:
9182 case BFD_RELOC_X86_64_GOT32:
9183 value = 0; /* Fully resolved at runtime. No addend. */
9184 break;
9185
9186 case BFD_RELOC_VTABLE_INHERIT:
9187 case BFD_RELOC_VTABLE_ENTRY:
9188 fixP->fx_done = 0;
9189 return;
9190
9191 default:
9192 break;
9193 }
9194 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
9195 *valP = value;
9196 #endif /* !defined (TE_Mach) */
9197
9198 /* Are we finished with this relocation now? */
9199 if (fixP->fx_addsy == NULL)
9200 fixP->fx_done = 1;
9201 #if defined (OBJ_COFF) && defined (TE_PE)
9202 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
9203 {
9204 fixP->fx_done = 0;
9205 /* Remember value for tc_gen_reloc. */
9206 fixP->fx_addnumber = value;
9207 /* Clear out the frag for now. */
9208 value = 0;
9209 }
9210 #endif
9211 else if (use_rela_relocations)
9212 {
9213 fixP->fx_no_overflow = 1;
9214 /* Remember value for tc_gen_reloc. */
9215 fixP->fx_addnumber = value;
9216 value = 0;
9217 }
9218
9219 md_number_to_chars (p, value, fixP->fx_size);
9220 }
9221 \f
9222 char *
9223 md_atof (int type, char *litP, int *sizeP)
9224 {
9225 /* This outputs the LITTLENUMs in REVERSE order;
9226 in accord with the bigendian 386. */
9227 return ieee_md_atof (type, litP, sizeP, FALSE);
9228 }
9229 \f
9230 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
9231
9232 static char *
9233 output_invalid (int c)
9234 {
9235 if (ISPRINT (c))
9236 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
9237 "'%c'", c);
9238 else
9239 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
9240 "(0x%x)", (unsigned char) c);
9241 return output_invalid_buf;
9242 }
9243
9244 /* REG_STRING starts *before* REGISTER_PREFIX. */
9245
9246 static const reg_entry *
9247 parse_real_register (char *reg_string, char **end_op)
9248 {
9249 char *s = reg_string;
9250 char *p;
9251 char reg_name_given[MAX_REG_NAME_SIZE + 1];
9252 const reg_entry *r;
9253
9254 /* Skip possible REGISTER_PREFIX and possible whitespace. */
9255 if (*s == REGISTER_PREFIX)
9256 ++s;
9257
9258 if (is_space_char (*s))
9259 ++s;
9260
9261 p = reg_name_given;
9262 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
9263 {
9264 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
9265 return (const reg_entry *) NULL;
9266 s++;
9267 }
9268
9269 /* For naked regs, make sure that we are not dealing with an identifier.
9270 This prevents confusing an identifier like `eax_var' with register
9271 `eax'. */
9272 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
9273 return (const reg_entry *) NULL;
9274
9275 *end_op = s;
9276
9277 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
9278
9279 /* Handle floating point regs, allowing spaces in the (i) part. */
9280 if (r == i386_regtab /* %st is first entry of table */)
9281 {
9282 if (is_space_char (*s))
9283 ++s;
9284 if (*s == '(')
9285 {
9286 ++s;
9287 if (is_space_char (*s))
9288 ++s;
9289 if (*s >= '0' && *s <= '7')
9290 {
9291 int fpr = *s - '0';
9292 ++s;
9293 if (is_space_char (*s))
9294 ++s;
9295 if (*s == ')')
9296 {
9297 *end_op = s + 1;
9298 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
9299 know (r);
9300 return r + fpr;
9301 }
9302 }
9303 /* We have "%st(" then garbage. */
9304 return (const reg_entry *) NULL;
9305 }
9306 }
9307
9308 if (r == NULL || allow_pseudo_reg)
9309 return r;
9310
9311 if (operand_type_all_zero (&r->reg_type))
9312 return (const reg_entry *) NULL;
9313
9314 if ((r->reg_type.bitfield.reg32
9315 || r->reg_type.bitfield.sreg3
9316 || r->reg_type.bitfield.control
9317 || r->reg_type.bitfield.debug
9318 || r->reg_type.bitfield.test)
9319 && !cpu_arch_flags.bitfield.cpui386)
9320 return (const reg_entry *) NULL;
9321
9322 if (r->reg_type.bitfield.floatreg
9323 && !cpu_arch_flags.bitfield.cpu8087
9324 && !cpu_arch_flags.bitfield.cpu287
9325 && !cpu_arch_flags.bitfield.cpu387)
9326 return (const reg_entry *) NULL;
9327
9328 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
9329 return (const reg_entry *) NULL;
9330
9331 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse)
9332 return (const reg_entry *) NULL;
9333
9334 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
9335 return (const reg_entry *) NULL;
9336
9337 if ((r->reg_type.bitfield.regzmm || r->reg_type.bitfield.regmask)
9338 && !cpu_arch_flags.bitfield.cpuavx512f)
9339 return (const reg_entry *) NULL;
9340
9341 /* Don't allow fake index register unless allow_index_reg isn't 0. */
9342 if (!allow_index_reg
9343 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
9344 return (const reg_entry *) NULL;
9345
9346 /* Upper 16 vector register is only available with VREX in 64bit
9347 mode. */
9348 if ((r->reg_flags & RegVRex))
9349 {
9350 if (!cpu_arch_flags.bitfield.cpuvrex
9351 || flag_code != CODE_64BIT)
9352 return (const reg_entry *) NULL;
9353
9354 i.need_vrex = 1;
9355 }
9356
9357 if (((r->reg_flags & (RegRex64 | RegRex))
9358 || r->reg_type.bitfield.reg64)
9359 && (!cpu_arch_flags.bitfield.cpulm
9360 || !operand_type_equal (&r->reg_type, &control))
9361 && flag_code != CODE_64BIT)
9362 return (const reg_entry *) NULL;
9363
9364 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
9365 return (const reg_entry *) NULL;
9366
9367 return r;
9368 }
9369
9370 /* REG_STRING starts *before* REGISTER_PREFIX. */
9371
9372 static const reg_entry *
9373 parse_register (char *reg_string, char **end_op)
9374 {
9375 const reg_entry *r;
9376
9377 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
9378 r = parse_real_register (reg_string, end_op);
9379 else
9380 r = NULL;
9381 if (!r)
9382 {
9383 char *save = input_line_pointer;
9384 char c;
9385 symbolS *symbolP;
9386
9387 input_line_pointer = reg_string;
9388 c = get_symbol_end ();
9389 symbolP = symbol_find (reg_string);
9390 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
9391 {
9392 const expressionS *e = symbol_get_value_expression (symbolP);
9393
9394 know (e->X_op == O_register);
9395 know (e->X_add_number >= 0
9396 && (valueT) e->X_add_number < i386_regtab_size);
9397 r = i386_regtab + e->X_add_number;
9398 if ((r->reg_flags & RegVRex))
9399 i.need_vrex = 1;
9400 *end_op = input_line_pointer;
9401 }
9402 *input_line_pointer = c;
9403 input_line_pointer = save;
9404 }
9405 return r;
9406 }
9407
9408 int
9409 i386_parse_name (char *name, expressionS *e, char *nextcharP)
9410 {
9411 const reg_entry *r;
9412 char *end = input_line_pointer;
9413
9414 *end = *nextcharP;
9415 r = parse_register (name, &input_line_pointer);
9416 if (r && end <= input_line_pointer)
9417 {
9418 *nextcharP = *input_line_pointer;
9419 *input_line_pointer = 0;
9420 e->X_op = O_register;
9421 e->X_add_number = r - i386_regtab;
9422 return 1;
9423 }
9424 input_line_pointer = end;
9425 *end = 0;
9426 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
9427 }
9428
9429 void
9430 md_operand (expressionS *e)
9431 {
9432 char *end;
9433 const reg_entry *r;
9434
9435 switch (*input_line_pointer)
9436 {
9437 case REGISTER_PREFIX:
9438 r = parse_real_register (input_line_pointer, &end);
9439 if (r)
9440 {
9441 e->X_op = O_register;
9442 e->X_add_number = r - i386_regtab;
9443 input_line_pointer = end;
9444 }
9445 break;
9446
9447 case '[':
9448 gas_assert (intel_syntax);
9449 end = input_line_pointer++;
9450 expression (e);
9451 if (*input_line_pointer == ']')
9452 {
9453 ++input_line_pointer;
9454 e->X_op_symbol = make_expr_symbol (e);
9455 e->X_add_symbol = NULL;
9456 e->X_add_number = 0;
9457 e->X_op = O_index;
9458 }
9459 else
9460 {
9461 e->X_op = O_absent;
9462 input_line_pointer = end;
9463 }
9464 break;
9465 }
9466 }
9467
9468 \f
9469 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9470 const char *md_shortopts = "kVQ:sqn";
9471 #else
9472 const char *md_shortopts = "qn";
9473 #endif
9474
9475 #define OPTION_32 (OPTION_MD_BASE + 0)
9476 #define OPTION_64 (OPTION_MD_BASE + 1)
9477 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
9478 #define OPTION_MARCH (OPTION_MD_BASE + 3)
9479 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
9480 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
9481 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
9482 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
9483 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
9484 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
9485 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
9486 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
9487 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
9488 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
9489 #define OPTION_X32 (OPTION_MD_BASE + 14)
9490 #define OPTION_MADD_BND_PREFIX (OPTION_MD_BASE + 15)
9491 #define OPTION_MEVEXLIG (OPTION_MD_BASE + 16)
9492 #define OPTION_MEVEXWIG (OPTION_MD_BASE + 17)
9493 #define OPTION_MBIG_OBJ (OPTION_MD_BASE + 18)
9494 #define OPTION_OMIT_LOCK_PREFIX (OPTION_MD_BASE + 19)
9495 #define OPTION_MEVEXRCIG (OPTION_MD_BASE + 20)
9496 #define OPTION_MNO_SHARED (OPTION_MD_BASE + 21)
9497
9498 struct option md_longopts[] =
9499 {
9500 {"32", no_argument, NULL, OPTION_32},
9501 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9502 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
9503 {"64", no_argument, NULL, OPTION_64},
9504 #endif
9505 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9506 {"x32", no_argument, NULL, OPTION_X32},
9507 {"mno-shared", no_argument, NULL, OPTION_MNO_SHARED},
9508 #endif
9509 {"divide", no_argument, NULL, OPTION_DIVIDE},
9510 {"march", required_argument, NULL, OPTION_MARCH},
9511 {"mtune", required_argument, NULL, OPTION_MTUNE},
9512 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
9513 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
9514 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
9515 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
9516 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
9517 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
9518 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
9519 {"moperand-check", required_argument, NULL, OPTION_MOPERAND_CHECK},
9520 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
9521 {"madd-bnd-prefix", no_argument, NULL, OPTION_MADD_BND_PREFIX},
9522 {"mevexlig", required_argument, NULL, OPTION_MEVEXLIG},
9523 {"mevexwig", required_argument, NULL, OPTION_MEVEXWIG},
9524 # if defined (TE_PE) || defined (TE_PEP)
9525 {"mbig-obj", no_argument, NULL, OPTION_MBIG_OBJ},
9526 #endif
9527 {"momit-lock-prefix", required_argument, NULL, OPTION_OMIT_LOCK_PREFIX},
9528 {"mevexrcig", required_argument, NULL, OPTION_MEVEXRCIG},
9529 {NULL, no_argument, NULL, 0}
9530 };
9531 size_t md_longopts_size = sizeof (md_longopts);
9532
9533 int
9534 md_parse_option (int c, char *arg)
9535 {
9536 unsigned int j;
9537 char *arch, *next;
9538
9539 switch (c)
9540 {
9541 case 'n':
9542 optimize_align_code = 0;
9543 break;
9544
9545 case 'q':
9546 quiet_warnings = 1;
9547 break;
9548
9549 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9550 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
9551 should be emitted or not. FIXME: Not implemented. */
9552 case 'Q':
9553 break;
9554
9555 /* -V: SVR4 argument to print version ID. */
9556 case 'V':
9557 print_version_id ();
9558 break;
9559
9560 /* -k: Ignore for FreeBSD compatibility. */
9561 case 'k':
9562 break;
9563
9564 case 's':
9565 /* -s: On i386 Solaris, this tells the native assembler to use
9566 .stab instead of .stab.excl. We always use .stab anyhow. */
9567 break;
9568
9569 case OPTION_MNO_SHARED:
9570 no_shared = 1;
9571 break;
9572 #endif
9573 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9574 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
9575 case OPTION_64:
9576 {
9577 const char **list, **l;
9578
9579 list = bfd_target_list ();
9580 for (l = list; *l != NULL; l++)
9581 if (CONST_STRNEQ (*l, "elf64-x86-64")
9582 || strcmp (*l, "coff-x86-64") == 0
9583 || strcmp (*l, "pe-x86-64") == 0
9584 || strcmp (*l, "pei-x86-64") == 0
9585 || strcmp (*l, "mach-o-x86-64") == 0)
9586 {
9587 default_arch = "x86_64";
9588 break;
9589 }
9590 if (*l == NULL)
9591 as_fatal (_("no compiled in support for x86_64"));
9592 free (list);
9593 }
9594 break;
9595 #endif
9596
9597 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9598 case OPTION_X32:
9599 if (IS_ELF)
9600 {
9601 const char **list, **l;
9602
9603 list = bfd_target_list ();
9604 for (l = list; *l != NULL; l++)
9605 if (CONST_STRNEQ (*l, "elf32-x86-64"))
9606 {
9607 default_arch = "x86_64:32";
9608 break;
9609 }
9610 if (*l == NULL)
9611 as_fatal (_("no compiled in support for 32bit x86_64"));
9612 free (list);
9613 }
9614 else
9615 as_fatal (_("32bit x86_64 is only supported for ELF"));
9616 break;
9617 #endif
9618
9619 case OPTION_32:
9620 default_arch = "i386";
9621 break;
9622
9623 case OPTION_DIVIDE:
9624 #ifdef SVR4_COMMENT_CHARS
9625 {
9626 char *n, *t;
9627 const char *s;
9628
9629 n = (char *) xmalloc (strlen (i386_comment_chars) + 1);
9630 t = n;
9631 for (s = i386_comment_chars; *s != '\0'; s++)
9632 if (*s != '/')
9633 *t++ = *s;
9634 *t = '\0';
9635 i386_comment_chars = n;
9636 }
9637 #endif
9638 break;
9639
9640 case OPTION_MARCH:
9641 arch = xstrdup (arg);
9642 do
9643 {
9644 if (*arch == '.')
9645 as_fatal (_("invalid -march= option: `%s'"), arg);
9646 next = strchr (arch, '+');
9647 if (next)
9648 *next++ = '\0';
9649 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
9650 {
9651 if (strcmp (arch, cpu_arch [j].name) == 0)
9652 {
9653 /* Processor. */
9654 if (! cpu_arch[j].flags.bitfield.cpui386)
9655 continue;
9656
9657 cpu_arch_name = cpu_arch[j].name;
9658 cpu_sub_arch_name = NULL;
9659 cpu_arch_flags = cpu_arch[j].flags;
9660 cpu_arch_isa = cpu_arch[j].type;
9661 cpu_arch_isa_flags = cpu_arch[j].flags;
9662 if (!cpu_arch_tune_set)
9663 {
9664 cpu_arch_tune = cpu_arch_isa;
9665 cpu_arch_tune_flags = cpu_arch_isa_flags;
9666 }
9667 break;
9668 }
9669 else if (*cpu_arch [j].name == '.'
9670 && strcmp (arch, cpu_arch [j].name + 1) == 0)
9671 {
9672 /* ISA entension. */
9673 i386_cpu_flags flags;
9674
9675 if (!cpu_arch[j].negated)
9676 flags = cpu_flags_or (cpu_arch_flags,
9677 cpu_arch[j].flags);
9678 else
9679 flags = cpu_flags_and_not (cpu_arch_flags,
9680 cpu_arch[j].flags);
9681 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
9682 {
9683 if (cpu_sub_arch_name)
9684 {
9685 char *name = cpu_sub_arch_name;
9686 cpu_sub_arch_name = concat (name,
9687 cpu_arch[j].name,
9688 (const char *) NULL);
9689 free (name);
9690 }
9691 else
9692 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
9693 cpu_arch_flags = flags;
9694 cpu_arch_isa_flags = flags;
9695 }
9696 break;
9697 }
9698 }
9699
9700 if (j >= ARRAY_SIZE (cpu_arch))
9701 as_fatal (_("invalid -march= option: `%s'"), arg);
9702
9703 arch = next;
9704 }
9705 while (next != NULL );
9706 break;
9707
9708 case OPTION_MTUNE:
9709 if (*arg == '.')
9710 as_fatal (_("invalid -mtune= option: `%s'"), arg);
9711 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
9712 {
9713 if (strcmp (arg, cpu_arch [j].name) == 0)
9714 {
9715 cpu_arch_tune_set = 1;
9716 cpu_arch_tune = cpu_arch [j].type;
9717 cpu_arch_tune_flags = cpu_arch[j].flags;
9718 break;
9719 }
9720 }
9721 if (j >= ARRAY_SIZE (cpu_arch))
9722 as_fatal (_("invalid -mtune= option: `%s'"), arg);
9723 break;
9724
9725 case OPTION_MMNEMONIC:
9726 if (strcasecmp (arg, "att") == 0)
9727 intel_mnemonic = 0;
9728 else if (strcasecmp (arg, "intel") == 0)
9729 intel_mnemonic = 1;
9730 else
9731 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg);
9732 break;
9733
9734 case OPTION_MSYNTAX:
9735 if (strcasecmp (arg, "att") == 0)
9736 intel_syntax = 0;
9737 else if (strcasecmp (arg, "intel") == 0)
9738 intel_syntax = 1;
9739 else
9740 as_fatal (_("invalid -msyntax= option: `%s'"), arg);
9741 break;
9742
9743 case OPTION_MINDEX_REG:
9744 allow_index_reg = 1;
9745 break;
9746
9747 case OPTION_MNAKED_REG:
9748 allow_naked_reg = 1;
9749 break;
9750
9751 case OPTION_MOLD_GCC:
9752 old_gcc = 1;
9753 break;
9754
9755 case OPTION_MSSE2AVX:
9756 sse2avx = 1;
9757 break;
9758
9759 case OPTION_MSSE_CHECK:
9760 if (strcasecmp (arg, "error") == 0)
9761 sse_check = check_error;
9762 else if (strcasecmp (arg, "warning") == 0)
9763 sse_check = check_warning;
9764 else if (strcasecmp (arg, "none") == 0)
9765 sse_check = check_none;
9766 else
9767 as_fatal (_("invalid -msse-check= option: `%s'"), arg);
9768 break;
9769
9770 case OPTION_MOPERAND_CHECK:
9771 if (strcasecmp (arg, "error") == 0)
9772 operand_check = check_error;
9773 else if (strcasecmp (arg, "warning") == 0)
9774 operand_check = check_warning;
9775 else if (strcasecmp (arg, "none") == 0)
9776 operand_check = check_none;
9777 else
9778 as_fatal (_("invalid -moperand-check= option: `%s'"), arg);
9779 break;
9780
9781 case OPTION_MAVXSCALAR:
9782 if (strcasecmp (arg, "128") == 0)
9783 avxscalar = vex128;
9784 else if (strcasecmp (arg, "256") == 0)
9785 avxscalar = vex256;
9786 else
9787 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg);
9788 break;
9789
9790 case OPTION_MADD_BND_PREFIX:
9791 add_bnd_prefix = 1;
9792 break;
9793
9794 case OPTION_MEVEXLIG:
9795 if (strcmp (arg, "128") == 0)
9796 evexlig = evexl128;
9797 else if (strcmp (arg, "256") == 0)
9798 evexlig = evexl256;
9799 else if (strcmp (arg, "512") == 0)
9800 evexlig = evexl512;
9801 else
9802 as_fatal (_("invalid -mevexlig= option: `%s'"), arg);
9803 break;
9804
9805 case OPTION_MEVEXRCIG:
9806 if (strcmp (arg, "rne") == 0)
9807 evexrcig = rne;
9808 else if (strcmp (arg, "rd") == 0)
9809 evexrcig = rd;
9810 else if (strcmp (arg, "ru") == 0)
9811 evexrcig = ru;
9812 else if (strcmp (arg, "rz") == 0)
9813 evexrcig = rz;
9814 else
9815 as_fatal (_("invalid -mevexrcig= option: `%s'"), arg);
9816 break;
9817
9818 case OPTION_MEVEXWIG:
9819 if (strcmp (arg, "0") == 0)
9820 evexwig = evexw0;
9821 else if (strcmp (arg, "1") == 0)
9822 evexwig = evexw1;
9823 else
9824 as_fatal (_("invalid -mevexwig= option: `%s'"), arg);
9825 break;
9826
9827 # if defined (TE_PE) || defined (TE_PEP)
9828 case OPTION_MBIG_OBJ:
9829 use_big_obj = 1;
9830 break;
9831 #endif
9832
9833 case OPTION_OMIT_LOCK_PREFIX:
9834 if (strcasecmp (arg, "yes") == 0)
9835 omit_lock_prefix = 1;
9836 else if (strcasecmp (arg, "no") == 0)
9837 omit_lock_prefix = 0;
9838 else
9839 as_fatal (_("invalid -momit-lock-prefix= option: `%s'"), arg);
9840 break;
9841
9842 default:
9843 return 0;
9844 }
9845 return 1;
9846 }
9847
9848 #define MESSAGE_TEMPLATE \
9849 " "
9850
9851 static void
9852 show_arch (FILE *stream, int ext, int check)
9853 {
9854 static char message[] = MESSAGE_TEMPLATE;
9855 char *start = message + 27;
9856 char *p;
9857 int size = sizeof (MESSAGE_TEMPLATE);
9858 int left;
9859 const char *name;
9860 int len;
9861 unsigned int j;
9862
9863 p = start;
9864 left = size - (start - message);
9865 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
9866 {
9867 /* Should it be skipped? */
9868 if (cpu_arch [j].skip)
9869 continue;
9870
9871 name = cpu_arch [j].name;
9872 len = cpu_arch [j].len;
9873 if (*name == '.')
9874 {
9875 /* It is an extension. Skip if we aren't asked to show it. */
9876 if (ext)
9877 {
9878 name++;
9879 len--;
9880 }
9881 else
9882 continue;
9883 }
9884 else if (ext)
9885 {
9886 /* It is an processor. Skip if we show only extension. */
9887 continue;
9888 }
9889 else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
9890 {
9891 /* It is an impossible processor - skip. */
9892 continue;
9893 }
9894
9895 /* Reserve 2 spaces for ", " or ",\0" */
9896 left -= len + 2;
9897
9898 /* Check if there is any room. */
9899 if (left >= 0)
9900 {
9901 if (p != start)
9902 {
9903 *p++ = ',';
9904 *p++ = ' ';
9905 }
9906 p = mempcpy (p, name, len);
9907 }
9908 else
9909 {
9910 /* Output the current message now and start a new one. */
9911 *p++ = ',';
9912 *p = '\0';
9913 fprintf (stream, "%s\n", message);
9914 p = start;
9915 left = size - (start - message) - len - 2;
9916
9917 gas_assert (left >= 0);
9918
9919 p = mempcpy (p, name, len);
9920 }
9921 }
9922
9923 *p = '\0';
9924 fprintf (stream, "%s\n", message);
9925 }
9926
9927 void
9928 md_show_usage (FILE *stream)
9929 {
9930 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9931 fprintf (stream, _("\
9932 -Q ignored\n\
9933 -V print assembler version number\n\
9934 -k ignored\n"));
9935 #endif
9936 fprintf (stream, _("\
9937 -n Do not optimize code alignment\n\
9938 -q quieten some warnings\n"));
9939 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9940 fprintf (stream, _("\
9941 -s ignored\n"));
9942 #endif
9943 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9944 || defined (TE_PE) || defined (TE_PEP))
9945 fprintf (stream, _("\
9946 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
9947 #endif
9948 #ifdef SVR4_COMMENT_CHARS
9949 fprintf (stream, _("\
9950 --divide do not treat `/' as a comment character\n"));
9951 #else
9952 fprintf (stream, _("\
9953 --divide ignored\n"));
9954 #endif
9955 fprintf (stream, _("\
9956 -march=CPU[,+EXTENSION...]\n\
9957 generate code for CPU and EXTENSION, CPU is one of:\n"));
9958 show_arch (stream, 0, 1);
9959 fprintf (stream, _("\
9960 EXTENSION is combination of:\n"));
9961 show_arch (stream, 1, 0);
9962 fprintf (stream, _("\
9963 -mtune=CPU optimize for CPU, CPU is one of:\n"));
9964 show_arch (stream, 0, 0);
9965 fprintf (stream, _("\
9966 -msse2avx encode SSE instructions with VEX prefix\n"));
9967 fprintf (stream, _("\
9968 -msse-check=[none|error|warning]\n\
9969 check SSE instructions\n"));
9970 fprintf (stream, _("\
9971 -moperand-check=[none|error|warning]\n\
9972 check operand combinations for validity\n"));
9973 fprintf (stream, _("\
9974 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
9975 length\n"));
9976 fprintf (stream, _("\
9977 -mevexlig=[128|256|512] encode scalar EVEX instructions with specific vector\n\
9978 length\n"));
9979 fprintf (stream, _("\
9980 -mevexwig=[0|1] encode EVEX instructions with specific EVEX.W value\n\
9981 for EVEX.W bit ignored instructions\n"));
9982 fprintf (stream, _("\
9983 -mevexrcig=[rne|rd|ru|rz]\n\
9984 encode EVEX instructions with specific EVEX.RC value\n\
9985 for SAE-only ignored instructions\n"));
9986 fprintf (stream, _("\
9987 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
9988 fprintf (stream, _("\
9989 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
9990 fprintf (stream, _("\
9991 -mindex-reg support pseudo index registers\n"));
9992 fprintf (stream, _("\
9993 -mnaked-reg don't require `%%' prefix for registers\n"));
9994 fprintf (stream, _("\
9995 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
9996 fprintf (stream, _("\
9997 -madd-bnd-prefix add BND prefix for all valid branches\n"));
9998 fprintf (stream, _("\
9999 -mno-shared enable branch optimization for non shared code\n"));
10000 # if defined (TE_PE) || defined (TE_PEP)
10001 fprintf (stream, _("\
10002 -mbig-obj generate big object files\n"));
10003 #endif
10004 fprintf (stream, _("\
10005 -momit-lock-prefix=[no|yes]\n\
10006 strip all lock prefixes\n"));
10007 }
10008
10009 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
10010 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
10011 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
10012
10013 /* Pick the target format to use. */
10014
10015 const char *
10016 i386_target_format (void)
10017 {
10018 if (!strncmp (default_arch, "x86_64", 6))
10019 {
10020 update_code_flag (CODE_64BIT, 1);
10021 if (default_arch[6] == '\0')
10022 x86_elf_abi = X86_64_ABI;
10023 else
10024 x86_elf_abi = X86_64_X32_ABI;
10025 }
10026 else if (!strcmp (default_arch, "i386"))
10027 update_code_flag (CODE_32BIT, 1);
10028 else
10029 as_fatal (_("unknown architecture"));
10030
10031 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
10032 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
10033 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
10034 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
10035
10036 switch (OUTPUT_FLAVOR)
10037 {
10038 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
10039 case bfd_target_aout_flavour:
10040 return AOUT_TARGET_FORMAT;
10041 #endif
10042 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
10043 # if defined (TE_PE) || defined (TE_PEP)
10044 case bfd_target_coff_flavour:
10045 if (flag_code == CODE_64BIT)
10046 return use_big_obj ? "pe-bigobj-x86-64" : "pe-x86-64";
10047 else
10048 return "pe-i386";
10049 # elif defined (TE_GO32)
10050 case bfd_target_coff_flavour:
10051 return "coff-go32";
10052 # else
10053 case bfd_target_coff_flavour:
10054 return "coff-i386";
10055 # endif
10056 #endif
10057 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
10058 case bfd_target_elf_flavour:
10059 {
10060 const char *format;
10061
10062 switch (x86_elf_abi)
10063 {
10064 default:
10065 format = ELF_TARGET_FORMAT;
10066 break;
10067 case X86_64_ABI:
10068 use_rela_relocations = 1;
10069 object_64bit = 1;
10070 format = ELF_TARGET_FORMAT64;
10071 break;
10072 case X86_64_X32_ABI:
10073 use_rela_relocations = 1;
10074 object_64bit = 1;
10075 disallow_64bit_reloc = 1;
10076 format = ELF_TARGET_FORMAT32;
10077 break;
10078 }
10079 if (cpu_arch_isa == PROCESSOR_L1OM)
10080 {
10081 if (x86_elf_abi != X86_64_ABI)
10082 as_fatal (_("Intel L1OM is 64bit only"));
10083 return ELF_TARGET_L1OM_FORMAT;
10084 }
10085 else if (cpu_arch_isa == PROCESSOR_K1OM)
10086 {
10087 if (x86_elf_abi != X86_64_ABI)
10088 as_fatal (_("Intel K1OM is 64bit only"));
10089 return ELF_TARGET_K1OM_FORMAT;
10090 }
10091 else
10092 return format;
10093 }
10094 #endif
10095 #if defined (OBJ_MACH_O)
10096 case bfd_target_mach_o_flavour:
10097 if (flag_code == CODE_64BIT)
10098 {
10099 use_rela_relocations = 1;
10100 object_64bit = 1;
10101 return "mach-o-x86-64";
10102 }
10103 else
10104 return "mach-o-i386";
10105 #endif
10106 default:
10107 abort ();
10108 return NULL;
10109 }
10110 }
10111
10112 #endif /* OBJ_MAYBE_ more than one */
10113 \f
10114 symbolS *
10115 md_undefined_symbol (char *name)
10116 {
10117 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
10118 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
10119 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
10120 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
10121 {
10122 if (!GOT_symbol)
10123 {
10124 if (symbol_find (name))
10125 as_bad (_("GOT already in symbol table"));
10126 GOT_symbol = symbol_new (name, undefined_section,
10127 (valueT) 0, &zero_address_frag);
10128 };
10129 return GOT_symbol;
10130 }
10131 return 0;
10132 }
10133
10134 /* Round up a section size to the appropriate boundary. */
10135
10136 valueT
10137 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
10138 {
10139 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
10140 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
10141 {
10142 /* For a.out, force the section size to be aligned. If we don't do
10143 this, BFD will align it for us, but it will not write out the
10144 final bytes of the section. This may be a bug in BFD, but it is
10145 easier to fix it here since that is how the other a.out targets
10146 work. */
10147 int align;
10148
10149 align = bfd_get_section_alignment (stdoutput, segment);
10150 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
10151 }
10152 #endif
10153
10154 return size;
10155 }
10156
10157 /* On the i386, PC-relative offsets are relative to the start of the
10158 next instruction. That is, the address of the offset, plus its
10159 size, since the offset is always the last part of the insn. */
10160
10161 long
10162 md_pcrel_from (fixS *fixP)
10163 {
10164 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
10165 }
10166
10167 #ifndef I386COFF
10168
10169 static void
10170 s_bss (int ignore ATTRIBUTE_UNUSED)
10171 {
10172 int temp;
10173
10174 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10175 if (IS_ELF)
10176 obj_elf_section_change_hook ();
10177 #endif
10178 temp = get_absolute_expression ();
10179 subseg_set (bss_section, (subsegT) temp);
10180 demand_empty_rest_of_line ();
10181 }
10182
10183 #endif
10184
10185 void
10186 i386_validate_fix (fixS *fixp)
10187 {
10188 if (fixp->fx_subsy && fixp->fx_subsy == GOT_symbol)
10189 {
10190 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
10191 {
10192 if (!object_64bit)
10193 abort ();
10194 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
10195 }
10196 else
10197 {
10198 if (!object_64bit)
10199 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
10200 else
10201 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
10202 }
10203 fixp->fx_subsy = 0;
10204 }
10205 }
10206
10207 arelent *
10208 tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
10209 {
10210 arelent *rel;
10211 bfd_reloc_code_real_type code;
10212
10213 switch (fixp->fx_r_type)
10214 {
10215 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10216 case BFD_RELOC_SIZE32:
10217 case BFD_RELOC_SIZE64:
10218 if (S_IS_DEFINED (fixp->fx_addsy)
10219 && !S_IS_EXTERNAL (fixp->fx_addsy))
10220 {
10221 /* Resolve size relocation against local symbol to size of
10222 the symbol plus addend. */
10223 valueT value = S_GET_SIZE (fixp->fx_addsy) + fixp->fx_offset;
10224 if (fixp->fx_r_type == BFD_RELOC_SIZE32
10225 && !fits_in_unsigned_long (value))
10226 as_bad_where (fixp->fx_file, fixp->fx_line,
10227 _("symbol size computation overflow"));
10228 fixp->fx_addsy = NULL;
10229 fixp->fx_subsy = NULL;
10230 md_apply_fix (fixp, (valueT *) &value, NULL);
10231 return NULL;
10232 }
10233 #endif
10234
10235 case BFD_RELOC_X86_64_PLT32:
10236 case BFD_RELOC_X86_64_GOT32:
10237 case BFD_RELOC_X86_64_GOTPCREL:
10238 case BFD_RELOC_386_PLT32:
10239 case BFD_RELOC_386_GOT32:
10240 case BFD_RELOC_386_GOTOFF:
10241 case BFD_RELOC_386_GOTPC:
10242 case BFD_RELOC_386_TLS_GD:
10243 case BFD_RELOC_386_TLS_LDM:
10244 case BFD_RELOC_386_TLS_LDO_32:
10245 case BFD_RELOC_386_TLS_IE_32:
10246 case BFD_RELOC_386_TLS_IE:
10247 case BFD_RELOC_386_TLS_GOTIE:
10248 case BFD_RELOC_386_TLS_LE_32:
10249 case BFD_RELOC_386_TLS_LE:
10250 case BFD_RELOC_386_TLS_GOTDESC:
10251 case BFD_RELOC_386_TLS_DESC_CALL:
10252 case BFD_RELOC_X86_64_TLSGD:
10253 case BFD_RELOC_X86_64_TLSLD:
10254 case BFD_RELOC_X86_64_DTPOFF32:
10255 case BFD_RELOC_X86_64_DTPOFF64:
10256 case BFD_RELOC_X86_64_GOTTPOFF:
10257 case BFD_RELOC_X86_64_TPOFF32:
10258 case BFD_RELOC_X86_64_TPOFF64:
10259 case BFD_RELOC_X86_64_GOTOFF64:
10260 case BFD_RELOC_X86_64_GOTPC32:
10261 case BFD_RELOC_X86_64_GOT64:
10262 case BFD_RELOC_X86_64_GOTPCREL64:
10263 case BFD_RELOC_X86_64_GOTPC64:
10264 case BFD_RELOC_X86_64_GOTPLT64:
10265 case BFD_RELOC_X86_64_PLTOFF64:
10266 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
10267 case BFD_RELOC_X86_64_TLSDESC_CALL:
10268 case BFD_RELOC_RVA:
10269 case BFD_RELOC_VTABLE_ENTRY:
10270 case BFD_RELOC_VTABLE_INHERIT:
10271 #ifdef TE_PE
10272 case BFD_RELOC_32_SECREL:
10273 #endif
10274 code = fixp->fx_r_type;
10275 break;
10276 case BFD_RELOC_X86_64_32S:
10277 if (!fixp->fx_pcrel)
10278 {
10279 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
10280 code = fixp->fx_r_type;
10281 break;
10282 }
10283 default:
10284 if (fixp->fx_pcrel)
10285 {
10286 switch (fixp->fx_size)
10287 {
10288 default:
10289 as_bad_where (fixp->fx_file, fixp->fx_line,
10290 _("can not do %d byte pc-relative relocation"),
10291 fixp->fx_size);
10292 code = BFD_RELOC_32_PCREL;
10293 break;
10294 case 1: code = BFD_RELOC_8_PCREL; break;
10295 case 2: code = BFD_RELOC_16_PCREL; break;
10296 case 4: code = BFD_RELOC_32_PCREL; break;
10297 #ifdef BFD64
10298 case 8: code = BFD_RELOC_64_PCREL; break;
10299 #endif
10300 }
10301 }
10302 else
10303 {
10304 switch (fixp->fx_size)
10305 {
10306 default:
10307 as_bad_where (fixp->fx_file, fixp->fx_line,
10308 _("can not do %d byte relocation"),
10309 fixp->fx_size);
10310 code = BFD_RELOC_32;
10311 break;
10312 case 1: code = BFD_RELOC_8; break;
10313 case 2: code = BFD_RELOC_16; break;
10314 case 4: code = BFD_RELOC_32; break;
10315 #ifdef BFD64
10316 case 8: code = BFD_RELOC_64; break;
10317 #endif
10318 }
10319 }
10320 break;
10321 }
10322
10323 if ((code == BFD_RELOC_32
10324 || code == BFD_RELOC_32_PCREL
10325 || code == BFD_RELOC_X86_64_32S)
10326 && GOT_symbol
10327 && fixp->fx_addsy == GOT_symbol)
10328 {
10329 if (!object_64bit)
10330 code = BFD_RELOC_386_GOTPC;
10331 else
10332 code = BFD_RELOC_X86_64_GOTPC32;
10333 }
10334 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
10335 && GOT_symbol
10336 && fixp->fx_addsy == GOT_symbol)
10337 {
10338 code = BFD_RELOC_X86_64_GOTPC64;
10339 }
10340
10341 rel = (arelent *) xmalloc (sizeof (arelent));
10342 rel->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
10343 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
10344
10345 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
10346
10347 if (!use_rela_relocations)
10348 {
10349 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
10350 vtable entry to be used in the relocation's section offset. */
10351 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
10352 rel->address = fixp->fx_offset;
10353 #if defined (OBJ_COFF) && defined (TE_PE)
10354 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
10355 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
10356 else
10357 #endif
10358 rel->addend = 0;
10359 }
10360 /* Use the rela in 64bit mode. */
10361 else
10362 {
10363 if (disallow_64bit_reloc)
10364 switch (code)
10365 {
10366 case BFD_RELOC_X86_64_DTPOFF64:
10367 case BFD_RELOC_X86_64_TPOFF64:
10368 case BFD_RELOC_64_PCREL:
10369 case BFD_RELOC_X86_64_GOTOFF64:
10370 case BFD_RELOC_X86_64_GOT64:
10371 case BFD_RELOC_X86_64_GOTPCREL64:
10372 case BFD_RELOC_X86_64_GOTPC64:
10373 case BFD_RELOC_X86_64_GOTPLT64:
10374 case BFD_RELOC_X86_64_PLTOFF64:
10375 as_bad_where (fixp->fx_file, fixp->fx_line,
10376 _("cannot represent relocation type %s in x32 mode"),
10377 bfd_get_reloc_code_name (code));
10378 break;
10379 default:
10380 break;
10381 }
10382
10383 if (!fixp->fx_pcrel)
10384 rel->addend = fixp->fx_offset;
10385 else
10386 switch (code)
10387 {
10388 case BFD_RELOC_X86_64_PLT32:
10389 case BFD_RELOC_X86_64_GOT32:
10390 case BFD_RELOC_X86_64_GOTPCREL:
10391 case BFD_RELOC_X86_64_TLSGD:
10392 case BFD_RELOC_X86_64_TLSLD:
10393 case BFD_RELOC_X86_64_GOTTPOFF:
10394 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
10395 case BFD_RELOC_X86_64_TLSDESC_CALL:
10396 rel->addend = fixp->fx_offset - fixp->fx_size;
10397 break;
10398 default:
10399 rel->addend = (section->vma
10400 - fixp->fx_size
10401 + fixp->fx_addnumber
10402 + md_pcrel_from (fixp));
10403 break;
10404 }
10405 }
10406
10407 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
10408 if (rel->howto == NULL)
10409 {
10410 as_bad_where (fixp->fx_file, fixp->fx_line,
10411 _("cannot represent relocation type %s"),
10412 bfd_get_reloc_code_name (code));
10413 /* Set howto to a garbage value so that we can keep going. */
10414 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
10415 gas_assert (rel->howto != NULL);
10416 }
10417
10418 return rel;
10419 }
10420
10421 #include "tc-i386-intel.c"
10422
10423 void
10424 tc_x86_parse_to_dw2regnum (expressionS *exp)
10425 {
10426 int saved_naked_reg;
10427 char saved_register_dot;
10428
10429 saved_naked_reg = allow_naked_reg;
10430 allow_naked_reg = 1;
10431 saved_register_dot = register_chars['.'];
10432 register_chars['.'] = '.';
10433 allow_pseudo_reg = 1;
10434 expression_and_evaluate (exp);
10435 allow_pseudo_reg = 0;
10436 register_chars['.'] = saved_register_dot;
10437 allow_naked_reg = saved_naked_reg;
10438
10439 if (exp->X_op == O_register && exp->X_add_number >= 0)
10440 {
10441 if ((addressT) exp->X_add_number < i386_regtab_size)
10442 {
10443 exp->X_op = O_constant;
10444 exp->X_add_number = i386_regtab[exp->X_add_number]
10445 .dw2_regnum[flag_code >> 1];
10446 }
10447 else
10448 exp->X_op = O_illegal;
10449 }
10450 }
10451
10452 void
10453 tc_x86_frame_initial_instructions (void)
10454 {
10455 static unsigned int sp_regno[2];
10456
10457 if (!sp_regno[flag_code >> 1])
10458 {
10459 char *saved_input = input_line_pointer;
10460 char sp[][4] = {"esp", "rsp"};
10461 expressionS exp;
10462
10463 input_line_pointer = sp[flag_code >> 1];
10464 tc_x86_parse_to_dw2regnum (&exp);
10465 gas_assert (exp.X_op == O_constant);
10466 sp_regno[flag_code >> 1] = exp.X_add_number;
10467 input_line_pointer = saved_input;
10468 }
10469
10470 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
10471 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
10472 }
10473
10474 int
10475 x86_dwarf2_addr_size (void)
10476 {
10477 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
10478 if (x86_elf_abi == X86_64_X32_ABI)
10479 return 4;
10480 #endif
10481 return bfd_arch_bits_per_address (stdoutput) / 8;
10482 }
10483
10484 int
10485 i386_elf_section_type (const char *str, size_t len)
10486 {
10487 if (flag_code == CODE_64BIT
10488 && len == sizeof ("unwind") - 1
10489 && strncmp (str, "unwind", 6) == 0)
10490 return SHT_X86_64_UNWIND;
10491
10492 return -1;
10493 }
10494
10495 #ifdef TE_SOLARIS
10496 void
10497 i386_solaris_fix_up_eh_frame (segT sec)
10498 {
10499 if (flag_code == CODE_64BIT)
10500 elf_section_type (sec) = SHT_X86_64_UNWIND;
10501 }
10502 #endif
10503
10504 #ifdef TE_PE
10505 void
10506 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
10507 {
10508 expressionS exp;
10509
10510 exp.X_op = O_secrel;
10511 exp.X_add_symbol = symbol;
10512 exp.X_add_number = 0;
10513 emit_expr (&exp, size);
10514 }
10515 #endif
10516
10517 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10518 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
10519
10520 bfd_vma
10521 x86_64_section_letter (int letter, char **ptr_msg)
10522 {
10523 if (flag_code == CODE_64BIT)
10524 {
10525 if (letter == 'l')
10526 return SHF_X86_64_LARGE;
10527
10528 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
10529 }
10530 else
10531 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
10532 return -1;
10533 }
10534
10535 bfd_vma
10536 x86_64_section_word (char *str, size_t len)
10537 {
10538 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
10539 return SHF_X86_64_LARGE;
10540
10541 return -1;
10542 }
10543
10544 static void
10545 handle_large_common (int small ATTRIBUTE_UNUSED)
10546 {
10547 if (flag_code != CODE_64BIT)
10548 {
10549 s_comm_internal (0, elf_common_parse);
10550 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
10551 }
10552 else
10553 {
10554 static segT lbss_section;
10555 asection *saved_com_section_ptr = elf_com_section_ptr;
10556 asection *saved_bss_section = bss_section;
10557
10558 if (lbss_section == NULL)
10559 {
10560 flagword applicable;
10561 segT seg = now_seg;
10562 subsegT subseg = now_subseg;
10563
10564 /* The .lbss section is for local .largecomm symbols. */
10565 lbss_section = subseg_new (".lbss", 0);
10566 applicable = bfd_applicable_section_flags (stdoutput);
10567 bfd_set_section_flags (stdoutput, lbss_section,
10568 applicable & SEC_ALLOC);
10569 seg_info (lbss_section)->bss = 1;
10570
10571 subseg_set (seg, subseg);
10572 }
10573
10574 elf_com_section_ptr = &_bfd_elf_large_com_section;
10575 bss_section = lbss_section;
10576
10577 s_comm_internal (0, elf_common_parse);
10578
10579 elf_com_section_ptr = saved_com_section_ptr;
10580 bss_section = saved_bss_section;
10581 }
10582 }
10583 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */
This page took 0.305674 seconds and 4 git commands to generate.