Add Intel AVX-512 support
[deliverable/binutils-gdb.git] / gas / config / tc-i386.c
1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
4 2012
5 Free Software Foundation, Inc.
6
7 This file is part of GAS, the GNU Assembler.
8
9 GAS is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
13
14 GAS is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GAS; see the file COPYING. If not, write to the Free
21 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
22 02110-1301, USA. */
23
24 /* Intel 80386 machine specific gas.
25 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
26 x86_64 support by Jan Hubicka (jh@suse.cz)
27 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
28 Bugs & suggestions are completely welcome. This is free software.
29 Please help us make it better. */
30
31 #include "as.h"
32 #include "safe-ctype.h"
33 #include "subsegs.h"
34 #include "dwarf2dbg.h"
35 #include "dw2gencfi.h"
36 #include "elf/x86-64.h"
37 #include "opcodes/i386-init.h"
38
39 #ifndef REGISTER_WARNINGS
40 #define REGISTER_WARNINGS 1
41 #endif
42
43 #ifndef INFER_ADDR_PREFIX
44 #define INFER_ADDR_PREFIX 1
45 #endif
46
47 #ifndef DEFAULT_ARCH
48 #define DEFAULT_ARCH "i386"
49 #endif
50
51 #ifndef INLINE
52 #if __GNUC__ >= 2
53 #define INLINE __inline__
54 #else
55 #define INLINE
56 #endif
57 #endif
58
59 /* Prefixes will be emitted in the order defined below.
60 WAIT_PREFIX must be the first prefix since FWAIT is really is an
61 instruction, and so must come before any prefixes.
62 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
63 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
64 #define WAIT_PREFIX 0
65 #define SEG_PREFIX 1
66 #define ADDR_PREFIX 2
67 #define DATA_PREFIX 3
68 #define REP_PREFIX 4
69 #define HLE_PREFIX REP_PREFIX
70 #define BND_PREFIX REP_PREFIX
71 #define LOCK_PREFIX 5
72 #define REX_PREFIX 6 /* must come last. */
73 #define MAX_PREFIXES 7 /* max prefixes per opcode */
74
75 /* we define the syntax here (modulo base,index,scale syntax) */
76 #define REGISTER_PREFIX '%'
77 #define IMMEDIATE_PREFIX '$'
78 #define ABSOLUTE_PREFIX '*'
79
80 /* these are the instruction mnemonic suffixes in AT&T syntax or
81 memory operand size in Intel syntax. */
82 #define WORD_MNEM_SUFFIX 'w'
83 #define BYTE_MNEM_SUFFIX 'b'
84 #define SHORT_MNEM_SUFFIX 's'
85 #define LONG_MNEM_SUFFIX 'l'
86 #define QWORD_MNEM_SUFFIX 'q'
87 #define XMMWORD_MNEM_SUFFIX 'x'
88 #define YMMWORD_MNEM_SUFFIX 'y'
89 #define ZMMWORD_MNEM_SUFFIX 'z'
90 /* Intel Syntax. Use a non-ascii letter since since it never appears
91 in instructions. */
92 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
93
94 #define END_OF_INSN '\0'
95
96 /*
97 'templates' is for grouping together 'template' structures for opcodes
98 of the same name. This is only used for storing the insns in the grand
99 ole hash table of insns.
100 The templates themselves start at START and range up to (but not including)
101 END.
102 */
103 typedef struct
104 {
105 const insn_template *start;
106 const insn_template *end;
107 }
108 templates;
109
110 /* 386 operand encoding bytes: see 386 book for details of this. */
111 typedef struct
112 {
113 unsigned int regmem; /* codes register or memory operand */
114 unsigned int reg; /* codes register operand (or extended opcode) */
115 unsigned int mode; /* how to interpret regmem & reg */
116 }
117 modrm_byte;
118
119 /* x86-64 extension prefix. */
120 typedef int rex_byte;
121
122 /* 386 opcode byte to code indirect addressing. */
123 typedef struct
124 {
125 unsigned base;
126 unsigned index;
127 unsigned scale;
128 }
129 sib_byte;
130
131 /* x86 arch names, types and features */
132 typedef struct
133 {
134 const char *name; /* arch name */
135 unsigned int len; /* arch string length */
136 enum processor_type type; /* arch type */
137 i386_cpu_flags flags; /* cpu feature flags */
138 unsigned int skip; /* show_arch should skip this. */
139 unsigned int negated; /* turn off indicated flags. */
140 }
141 arch_entry;
142
143 static void update_code_flag (int, int);
144 static void set_code_flag (int);
145 static void set_16bit_gcc_code_flag (int);
146 static void set_intel_syntax (int);
147 static void set_intel_mnemonic (int);
148 static void set_allow_index_reg (int);
149 static void set_check (int);
150 static void set_cpu_arch (int);
151 #ifdef TE_PE
152 static void pe_directive_secrel (int);
153 #endif
154 static void signed_cons (int);
155 static char *output_invalid (int c);
156 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
157 const char *);
158 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
159 const char *);
160 static int i386_att_operand (char *);
161 static int i386_intel_operand (char *, int);
162 static int i386_intel_simplify (expressionS *);
163 static int i386_intel_parse_name (const char *, expressionS *);
164 static const reg_entry *parse_register (char *, char **);
165 static char *parse_insn (char *, char *);
166 static char *parse_operands (char *, const char *);
167 static void swap_operands (void);
168 static void swap_2_operands (int, int);
169 static void optimize_imm (void);
170 static void optimize_disp (void);
171 static const insn_template *match_template (void);
172 static int check_string (void);
173 static int process_suffix (void);
174 static int check_byte_reg (void);
175 static int check_long_reg (void);
176 static int check_qword_reg (void);
177 static int check_word_reg (void);
178 static int finalize_imm (void);
179 static int process_operands (void);
180 static const seg_entry *build_modrm_byte (void);
181 static void output_insn (void);
182 static void output_imm (fragS *, offsetT);
183 static void output_disp (fragS *, offsetT);
184 #ifndef I386COFF
185 static void s_bss (int);
186 #endif
187 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
188 static void handle_large_common (int small ATTRIBUTE_UNUSED);
189 #endif
190
191 static const char *default_arch = DEFAULT_ARCH;
192
193 /* This struct describes rounding control and SAE in the instruction. */
194 struct RC_Operation
195 {
196 enum rc_type
197 {
198 rne = 0,
199 rd,
200 ru,
201 rz,
202 saeonly
203 } type;
204 int operand;
205 };
206
207 static struct RC_Operation rc_op;
208
209 /* The struct describes masking, applied to OPERAND in the instruction.
210 MASK is a pointer to the corresponding mask register. ZEROING tells
211 whether merging or zeroing mask is used. */
212 struct Mask_Operation
213 {
214 const reg_entry *mask;
215 unsigned int zeroing;
216 /* The operand where this operation is associated. */
217 int operand;
218 };
219
220 static struct Mask_Operation mask_op;
221
222 /* The struct describes broadcasting, applied to OPERAND. FACTOR is
223 broadcast factor. */
224 struct Broadcast_Operation
225 {
226 /* Type of broadcast: no broadcast, {1to8}, or {1to16}. */
227 int type;
228
229 /* Index of broadcasted operand. */
230 int operand;
231 };
232
233 static struct Broadcast_Operation broadcast_op;
234
235 /* VEX prefix. */
236 typedef struct
237 {
238 /* VEX prefix is either 2 byte or 3 byte. EVEX is 4 byte. */
239 unsigned char bytes[4];
240 unsigned int length;
241 /* Destination or source register specifier. */
242 const reg_entry *register_specifier;
243 } vex_prefix;
244
245 /* 'md_assemble ()' gathers together information and puts it into a
246 i386_insn. */
247
248 union i386_op
249 {
250 expressionS *disps;
251 expressionS *imms;
252 const reg_entry *regs;
253 };
254
255 enum i386_error
256 {
257 operand_size_mismatch,
258 operand_type_mismatch,
259 register_type_mismatch,
260 number_of_operands_mismatch,
261 invalid_instruction_suffix,
262 bad_imm4,
263 old_gcc_only,
264 unsupported_with_intel_mnemonic,
265 unsupported_syntax,
266 unsupported,
267 invalid_vsib_address,
268 invalid_vector_register_set,
269 unsupported_vector_index_register,
270 unsupported_broadcast,
271 broadcast_not_on_src_operand,
272 broadcast_needed,
273 unsupported_masking,
274 mask_not_on_destination,
275 no_default_mask,
276 unsupported_rc_sae,
277 rc_sae_operand_not_last_imm,
278 invalid_register_operand,
279 try_vector_disp8
280 };
281
282 struct _i386_insn
283 {
284 /* TM holds the template for the insn were currently assembling. */
285 insn_template tm;
286
287 /* SUFFIX holds the instruction size suffix for byte, word, dword
288 or qword, if given. */
289 char suffix;
290
291 /* OPERANDS gives the number of given operands. */
292 unsigned int operands;
293
294 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
295 of given register, displacement, memory operands and immediate
296 operands. */
297 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
298
299 /* TYPES [i] is the type (see above #defines) which tells us how to
300 use OP[i] for the corresponding operand. */
301 i386_operand_type types[MAX_OPERANDS];
302
303 /* Displacement expression, immediate expression, or register for each
304 operand. */
305 union i386_op op[MAX_OPERANDS];
306
307 /* Flags for operands. */
308 unsigned int flags[MAX_OPERANDS];
309 #define Operand_PCrel 1
310
311 /* Relocation type for operand */
312 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
313
314 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
315 the base index byte below. */
316 const reg_entry *base_reg;
317 const reg_entry *index_reg;
318 unsigned int log2_scale_factor;
319
320 /* SEG gives the seg_entries of this insn. They are zero unless
321 explicit segment overrides are given. */
322 const seg_entry *seg[2];
323
324 /* PREFIX holds all the given prefix opcodes (usually null).
325 PREFIXES is the number of prefix opcodes. */
326 unsigned int prefixes;
327 unsigned char prefix[MAX_PREFIXES];
328
329 /* RM and SIB are the modrm byte and the sib byte where the
330 addressing modes of this insn are encoded. */
331 modrm_byte rm;
332 rex_byte rex;
333 rex_byte vrex;
334 sib_byte sib;
335 vex_prefix vex;
336
337 /* Masking attributes. */
338 struct Mask_Operation *mask;
339
340 /* Rounding control and SAE attributes. */
341 struct RC_Operation *rounding;
342
343 /* Broadcasting attributes. */
344 struct Broadcast_Operation *broadcast;
345
346 /* Compressed disp8*N attribute. */
347 unsigned int memshift;
348
349 /* Swap operand in encoding. */
350 unsigned int swap_operand;
351
352 /* Prefer 8bit or 32bit displacement in encoding. */
353 enum
354 {
355 disp_encoding_default = 0,
356 disp_encoding_8bit,
357 disp_encoding_32bit
358 } disp_encoding;
359
360 /* REP prefix. */
361 const char *rep_prefix;
362
363 /* HLE prefix. */
364 const char *hle_prefix;
365
366 /* Have BND prefix. */
367 const char *bnd_prefix;
368
369 /* Need VREX to support upper 16 registers. */
370 int need_vrex;
371
372 /* Error message. */
373 enum i386_error error;
374 };
375
376 typedef struct _i386_insn i386_insn;
377
378 /* Link RC type with corresponding string, that'll be looked for in
379 asm. */
380 struct RC_name
381 {
382 enum rc_type type;
383 const char *name;
384 unsigned int len;
385 };
386
387 static const struct RC_name RC_NamesTable[] =
388 {
389 { rne, STRING_COMMA_LEN ("rn-sae") },
390 { rd, STRING_COMMA_LEN ("rd-sae") },
391 { ru, STRING_COMMA_LEN ("ru-sae") },
392 { rz, STRING_COMMA_LEN ("rz-sae") },
393 { saeonly, STRING_COMMA_LEN ("sae") },
394 };
395
396 /* List of chars besides those in app.c:symbol_chars that can start an
397 operand. Used to prevent the scrubber eating vital white-space. */
398 const char extra_symbol_chars[] = "*%-([{"
399 #ifdef LEX_AT
400 "@"
401 #endif
402 #ifdef LEX_QM
403 "?"
404 #endif
405 ;
406
407 #if (defined (TE_I386AIX) \
408 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
409 && !defined (TE_GNU) \
410 && !defined (TE_LINUX) \
411 && !defined (TE_NACL) \
412 && !defined (TE_NETWARE) \
413 && !defined (TE_FreeBSD) \
414 && !defined (TE_DragonFly) \
415 && !defined (TE_NetBSD)))
416 /* This array holds the chars that always start a comment. If the
417 pre-processor is disabled, these aren't very useful. The option
418 --divide will remove '/' from this list. */
419 const char *i386_comment_chars = "#/";
420 #define SVR4_COMMENT_CHARS 1
421 #define PREFIX_SEPARATOR '\\'
422
423 #else
424 const char *i386_comment_chars = "#";
425 #define PREFIX_SEPARATOR '/'
426 #endif
427
428 /* This array holds the chars that only start a comment at the beginning of
429 a line. If the line seems to have the form '# 123 filename'
430 .line and .file directives will appear in the pre-processed output.
431 Note that input_file.c hand checks for '#' at the beginning of the
432 first line of the input file. This is because the compiler outputs
433 #NO_APP at the beginning of its output.
434 Also note that comments started like this one will always work if
435 '/' isn't otherwise defined. */
436 const char line_comment_chars[] = "#/";
437
438 const char line_separator_chars[] = ";";
439
440 /* Chars that can be used to separate mant from exp in floating point
441 nums. */
442 const char EXP_CHARS[] = "eE";
443
444 /* Chars that mean this number is a floating point constant
445 As in 0f12.456
446 or 0d1.2345e12. */
447 const char FLT_CHARS[] = "fFdDxX";
448
449 /* Tables for lexical analysis. */
450 static char mnemonic_chars[256];
451 static char register_chars[256];
452 static char operand_chars[256];
453 static char identifier_chars[256];
454 static char digit_chars[256];
455
456 /* Lexical macros. */
457 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
458 #define is_operand_char(x) (operand_chars[(unsigned char) x])
459 #define is_register_char(x) (register_chars[(unsigned char) x])
460 #define is_space_char(x) ((x) == ' ')
461 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
462 #define is_digit_char(x) (digit_chars[(unsigned char) x])
463
464 /* All non-digit non-letter characters that may occur in an operand. */
465 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
466
467 /* md_assemble() always leaves the strings it's passed unaltered. To
468 effect this we maintain a stack of saved characters that we've smashed
469 with '\0's (indicating end of strings for various sub-fields of the
470 assembler instruction). */
471 static char save_stack[32];
472 static char *save_stack_p;
473 #define END_STRING_AND_SAVE(s) \
474 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
475 #define RESTORE_END_STRING(s) \
476 do { *(s) = *--save_stack_p; } while (0)
477
478 /* The instruction we're assembling. */
479 static i386_insn i;
480
481 /* Possible templates for current insn. */
482 static const templates *current_templates;
483
484 /* Per instruction expressionS buffers: max displacements & immediates. */
485 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
486 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
487
488 /* Current operand we are working on. */
489 static int this_operand = -1;
490
491 /* We support four different modes. FLAG_CODE variable is used to distinguish
492 these. */
493
494 enum flag_code {
495 CODE_32BIT,
496 CODE_16BIT,
497 CODE_64BIT };
498
499 static enum flag_code flag_code;
500 static unsigned int object_64bit;
501 static unsigned int disallow_64bit_reloc;
502 static int use_rela_relocations = 0;
503
504 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
505 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
506 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
507
508 /* The ELF ABI to use. */
509 enum x86_elf_abi
510 {
511 I386_ABI,
512 X86_64_ABI,
513 X86_64_X32_ABI
514 };
515
516 static enum x86_elf_abi x86_elf_abi = I386_ABI;
517 #endif
518
519 /* 1 for intel syntax,
520 0 if att syntax. */
521 static int intel_syntax = 0;
522
523 /* 1 for intel mnemonic,
524 0 if att mnemonic. */
525 static int intel_mnemonic = !SYSV386_COMPAT;
526
527 /* 1 if support old (<= 2.8.1) versions of gcc. */
528 static int old_gcc = OLDGCC_COMPAT;
529
530 /* 1 if pseudo registers are permitted. */
531 static int allow_pseudo_reg = 0;
532
533 /* 1 if register prefix % not required. */
534 static int allow_naked_reg = 0;
535
536 /* 1 if the assembler should add BND prefix for all control-tranferring
537 instructions supporting it, even if this prefix wasn't specified
538 explicitly. */
539 static int add_bnd_prefix = 0;
540
541 /* 1 if pseudo index register, eiz/riz, is allowed . */
542 static int allow_index_reg = 0;
543
544 static enum check_kind
545 {
546 check_none = 0,
547 check_warning,
548 check_error
549 }
550 sse_check, operand_check = check_warning;
551
552 /* Register prefix used for error message. */
553 static const char *register_prefix = "%";
554
555 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
556 leave, push, and pop instructions so that gcc has the same stack
557 frame as in 32 bit mode. */
558 static char stackop_size = '\0';
559
560 /* Non-zero to optimize code alignment. */
561 int optimize_align_code = 1;
562
563 /* Non-zero to quieten some warnings. */
564 static int quiet_warnings = 0;
565
566 /* CPU name. */
567 static const char *cpu_arch_name = NULL;
568 static char *cpu_sub_arch_name = NULL;
569
570 /* CPU feature flags. */
571 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
572
573 /* If we have selected a cpu we are generating instructions for. */
574 static int cpu_arch_tune_set = 0;
575
576 /* Cpu we are generating instructions for. */
577 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
578
579 /* CPU feature flags of cpu we are generating instructions for. */
580 static i386_cpu_flags cpu_arch_tune_flags;
581
582 /* CPU instruction set architecture used. */
583 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
584
585 /* CPU feature flags of instruction set architecture used. */
586 i386_cpu_flags cpu_arch_isa_flags;
587
588 /* If set, conditional jumps are not automatically promoted to handle
589 larger than a byte offset. */
590 static unsigned int no_cond_jump_promotion = 0;
591
592 /* Encode SSE instructions with VEX prefix. */
593 static unsigned int sse2avx;
594
595 /* Encode scalar AVX instructions with specific vector length. */
596 static enum
597 {
598 vex128 = 0,
599 vex256
600 } avxscalar;
601
602 /* Encode scalar EVEX LIG instructions with specific vector length. */
603 static enum
604 {
605 evexl128 = 0,
606 evexl256,
607 evexl512
608 } evexlig;
609
610 /* Encode EVEX WIG instructions with specific evex.w. */
611 static enum
612 {
613 evexw0 = 0,
614 evexw1
615 } evexwig;
616
617 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
618 static symbolS *GOT_symbol;
619
620 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
621 unsigned int x86_dwarf2_return_column;
622
623 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
624 int x86_cie_data_alignment;
625
626 /* Interface to relax_segment.
627 There are 3 major relax states for 386 jump insns because the
628 different types of jumps add different sizes to frags when we're
629 figuring out what sort of jump to choose to reach a given label. */
630
631 /* Types. */
632 #define UNCOND_JUMP 0
633 #define COND_JUMP 1
634 #define COND_JUMP86 2
635
636 /* Sizes. */
637 #define CODE16 1
638 #define SMALL 0
639 #define SMALL16 (SMALL | CODE16)
640 #define BIG 2
641 #define BIG16 (BIG | CODE16)
642
643 #ifndef INLINE
644 #ifdef __GNUC__
645 #define INLINE __inline__
646 #else
647 #define INLINE
648 #endif
649 #endif
650
651 #define ENCODE_RELAX_STATE(type, size) \
652 ((relax_substateT) (((type) << 2) | (size)))
653 #define TYPE_FROM_RELAX_STATE(s) \
654 ((s) >> 2)
655 #define DISP_SIZE_FROM_RELAX_STATE(s) \
656 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
657
658 /* This table is used by relax_frag to promote short jumps to long
659 ones where necessary. SMALL (short) jumps may be promoted to BIG
660 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
661 don't allow a short jump in a 32 bit code segment to be promoted to
662 a 16 bit offset jump because it's slower (requires data size
663 prefix), and doesn't work, unless the destination is in the bottom
664 64k of the code segment (The top 16 bits of eip are zeroed). */
665
666 const relax_typeS md_relax_table[] =
667 {
668 /* The fields are:
669 1) most positive reach of this state,
670 2) most negative reach of this state,
671 3) how many bytes this mode will have in the variable part of the frag
672 4) which index into the table to try if we can't fit into this one. */
673
674 /* UNCOND_JUMP states. */
675 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
676 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
677 /* dword jmp adds 4 bytes to frag:
678 0 extra opcode bytes, 4 displacement bytes. */
679 {0, 0, 4, 0},
680 /* word jmp adds 2 byte2 to frag:
681 0 extra opcode bytes, 2 displacement bytes. */
682 {0, 0, 2, 0},
683
684 /* COND_JUMP states. */
685 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
686 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
687 /* dword conditionals adds 5 bytes to frag:
688 1 extra opcode byte, 4 displacement bytes. */
689 {0, 0, 5, 0},
690 /* word conditionals add 3 bytes to frag:
691 1 extra opcode byte, 2 displacement bytes. */
692 {0, 0, 3, 0},
693
694 /* COND_JUMP86 states. */
695 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
696 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
697 /* dword conditionals adds 5 bytes to frag:
698 1 extra opcode byte, 4 displacement bytes. */
699 {0, 0, 5, 0},
700 /* word conditionals add 4 bytes to frag:
701 1 displacement byte and a 3 byte long branch insn. */
702 {0, 0, 4, 0}
703 };
704
705 static const arch_entry cpu_arch[] =
706 {
707 /* Do not replace the first two entries - i386_target_format()
708 relies on them being there in this order. */
709 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
710 CPU_GENERIC32_FLAGS, 0, 0 },
711 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
712 CPU_GENERIC64_FLAGS, 0, 0 },
713 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
714 CPU_NONE_FLAGS, 0, 0 },
715 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
716 CPU_I186_FLAGS, 0, 0 },
717 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
718 CPU_I286_FLAGS, 0, 0 },
719 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
720 CPU_I386_FLAGS, 0, 0 },
721 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
722 CPU_I486_FLAGS, 0, 0 },
723 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
724 CPU_I586_FLAGS, 0, 0 },
725 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
726 CPU_I686_FLAGS, 0, 0 },
727 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
728 CPU_I586_FLAGS, 0, 0 },
729 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
730 CPU_PENTIUMPRO_FLAGS, 0, 0 },
731 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
732 CPU_P2_FLAGS, 0, 0 },
733 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
734 CPU_P3_FLAGS, 0, 0 },
735 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
736 CPU_P4_FLAGS, 0, 0 },
737 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
738 CPU_CORE_FLAGS, 0, 0 },
739 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
740 CPU_NOCONA_FLAGS, 0, 0 },
741 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
742 CPU_CORE_FLAGS, 1, 0 },
743 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
744 CPU_CORE_FLAGS, 0, 0 },
745 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
746 CPU_CORE2_FLAGS, 1, 0 },
747 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
748 CPU_CORE2_FLAGS, 0, 0 },
749 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
750 CPU_COREI7_FLAGS, 0, 0 },
751 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
752 CPU_L1OM_FLAGS, 0, 0 },
753 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM,
754 CPU_K1OM_FLAGS, 0, 0 },
755 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
756 CPU_K6_FLAGS, 0, 0 },
757 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
758 CPU_K6_2_FLAGS, 0, 0 },
759 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
760 CPU_ATHLON_FLAGS, 0, 0 },
761 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
762 CPU_K8_FLAGS, 1, 0 },
763 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
764 CPU_K8_FLAGS, 0, 0 },
765 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
766 CPU_K8_FLAGS, 0, 0 },
767 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
768 CPU_AMDFAM10_FLAGS, 0, 0 },
769 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD,
770 CPU_BDVER1_FLAGS, 0, 0 },
771 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD,
772 CPU_BDVER2_FLAGS, 0, 0 },
773 { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD,
774 CPU_BDVER3_FLAGS, 0, 0 },
775 { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT,
776 CPU_BTVER1_FLAGS, 0, 0 },
777 { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT,
778 CPU_BTVER2_FLAGS, 0, 0 },
779 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
780 CPU_8087_FLAGS, 0, 0 },
781 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
782 CPU_287_FLAGS, 0, 0 },
783 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
784 CPU_387_FLAGS, 0, 0 },
785 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN,
786 CPU_ANY87_FLAGS, 0, 1 },
787 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
788 CPU_MMX_FLAGS, 0, 0 },
789 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN,
790 CPU_3DNOWA_FLAGS, 0, 1 },
791 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
792 CPU_SSE_FLAGS, 0, 0 },
793 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
794 CPU_SSE2_FLAGS, 0, 0 },
795 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
796 CPU_SSE3_FLAGS, 0, 0 },
797 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
798 CPU_SSSE3_FLAGS, 0, 0 },
799 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
800 CPU_SSE4_1_FLAGS, 0, 0 },
801 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
802 CPU_SSE4_2_FLAGS, 0, 0 },
803 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
804 CPU_SSE4_2_FLAGS, 0, 0 },
805 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN,
806 CPU_ANY_SSE_FLAGS, 0, 1 },
807 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
808 CPU_AVX_FLAGS, 0, 0 },
809 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN,
810 CPU_AVX2_FLAGS, 0, 0 },
811 { STRING_COMMA_LEN (".avx512f"), PROCESSOR_UNKNOWN,
812 CPU_AVX512F_FLAGS, 0, 0 },
813 { STRING_COMMA_LEN (".avx512cd"), PROCESSOR_UNKNOWN,
814 CPU_AVX512CD_FLAGS, 0, 0 },
815 { STRING_COMMA_LEN (".avx512er"), PROCESSOR_UNKNOWN,
816 CPU_AVX512ER_FLAGS, 0, 0 },
817 { STRING_COMMA_LEN (".avx512pf"), PROCESSOR_UNKNOWN,
818 CPU_AVX512PF_FLAGS, 0, 0 },
819 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
820 CPU_ANY_AVX_FLAGS, 0, 1 },
821 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
822 CPU_VMX_FLAGS, 0, 0 },
823 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN,
824 CPU_VMFUNC_FLAGS, 0, 0 },
825 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
826 CPU_SMX_FLAGS, 0, 0 },
827 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
828 CPU_XSAVE_FLAGS, 0, 0 },
829 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
830 CPU_XSAVEOPT_FLAGS, 0, 0 },
831 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
832 CPU_AES_FLAGS, 0, 0 },
833 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
834 CPU_PCLMUL_FLAGS, 0, 0 },
835 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
836 CPU_PCLMUL_FLAGS, 1, 0 },
837 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
838 CPU_FSGSBASE_FLAGS, 0, 0 },
839 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
840 CPU_RDRND_FLAGS, 0, 0 },
841 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
842 CPU_F16C_FLAGS, 0, 0 },
843 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN,
844 CPU_BMI2_FLAGS, 0, 0 },
845 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
846 CPU_FMA_FLAGS, 0, 0 },
847 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
848 CPU_FMA4_FLAGS, 0, 0 },
849 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
850 CPU_XOP_FLAGS, 0, 0 },
851 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
852 CPU_LWP_FLAGS, 0, 0 },
853 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
854 CPU_MOVBE_FLAGS, 0, 0 },
855 { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN,
856 CPU_CX16_FLAGS, 0, 0 },
857 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
858 CPU_EPT_FLAGS, 0, 0 },
859 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN,
860 CPU_LZCNT_FLAGS, 0, 0 },
861 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN,
862 CPU_HLE_FLAGS, 0, 0 },
863 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN,
864 CPU_RTM_FLAGS, 0, 0 },
865 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN,
866 CPU_INVPCID_FLAGS, 0, 0 },
867 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
868 CPU_CLFLUSH_FLAGS, 0, 0 },
869 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
870 CPU_NOP_FLAGS, 0, 0 },
871 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
872 CPU_SYSCALL_FLAGS, 0, 0 },
873 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
874 CPU_RDTSCP_FLAGS, 0, 0 },
875 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
876 CPU_3DNOW_FLAGS, 0, 0 },
877 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
878 CPU_3DNOWA_FLAGS, 0, 0 },
879 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
880 CPU_PADLOCK_FLAGS, 0, 0 },
881 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
882 CPU_SVME_FLAGS, 1, 0 },
883 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
884 CPU_SVME_FLAGS, 0, 0 },
885 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
886 CPU_SSE4A_FLAGS, 0, 0 },
887 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
888 CPU_ABM_FLAGS, 0, 0 },
889 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
890 CPU_BMI_FLAGS, 0, 0 },
891 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
892 CPU_TBM_FLAGS, 0, 0 },
893 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN,
894 CPU_ADX_FLAGS, 0, 0 },
895 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN,
896 CPU_RDSEED_FLAGS, 0, 0 },
897 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN,
898 CPU_PRFCHW_FLAGS, 0, 0 },
899 { STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN,
900 CPU_SMAP_FLAGS, 0, 0 },
901 { STRING_COMMA_LEN (".mpx"), PROCESSOR_UNKNOWN,
902 CPU_MPX_FLAGS, 0, 0 },
903 { STRING_COMMA_LEN (".sha"), PROCESSOR_UNKNOWN,
904 CPU_SHA_FLAGS, 0, 0 },
905 };
906
907 #ifdef I386COFF
908 /* Like s_lcomm_internal in gas/read.c but the alignment string
909 is allowed to be optional. */
910
911 static symbolS *
912 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
913 {
914 addressT align = 0;
915
916 SKIP_WHITESPACE ();
917
918 if (needs_align
919 && *input_line_pointer == ',')
920 {
921 align = parse_align (needs_align - 1);
922
923 if (align == (addressT) -1)
924 return NULL;
925 }
926 else
927 {
928 if (size >= 8)
929 align = 3;
930 else if (size >= 4)
931 align = 2;
932 else if (size >= 2)
933 align = 1;
934 else
935 align = 0;
936 }
937
938 bss_alloc (symbolP, size, align);
939 return symbolP;
940 }
941
942 static void
943 pe_lcomm (int needs_align)
944 {
945 s_comm_internal (needs_align * 2, pe_lcomm_internal);
946 }
947 #endif
948
949 const pseudo_typeS md_pseudo_table[] =
950 {
951 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
952 {"align", s_align_bytes, 0},
953 #else
954 {"align", s_align_ptwo, 0},
955 #endif
956 {"arch", set_cpu_arch, 0},
957 #ifndef I386COFF
958 {"bss", s_bss, 0},
959 #else
960 {"lcomm", pe_lcomm, 1},
961 #endif
962 {"ffloat", float_cons, 'f'},
963 {"dfloat", float_cons, 'd'},
964 {"tfloat", float_cons, 'x'},
965 {"value", cons, 2},
966 {"slong", signed_cons, 4},
967 {"noopt", s_ignore, 0},
968 {"optim", s_ignore, 0},
969 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
970 {"code16", set_code_flag, CODE_16BIT},
971 {"code32", set_code_flag, CODE_32BIT},
972 {"code64", set_code_flag, CODE_64BIT},
973 {"intel_syntax", set_intel_syntax, 1},
974 {"att_syntax", set_intel_syntax, 0},
975 {"intel_mnemonic", set_intel_mnemonic, 1},
976 {"att_mnemonic", set_intel_mnemonic, 0},
977 {"allow_index_reg", set_allow_index_reg, 1},
978 {"disallow_index_reg", set_allow_index_reg, 0},
979 {"sse_check", set_check, 0},
980 {"operand_check", set_check, 1},
981 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
982 {"largecomm", handle_large_common, 0},
983 #else
984 {"file", (void (*) (int)) dwarf2_directive_file, 0},
985 {"loc", dwarf2_directive_loc, 0},
986 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
987 #endif
988 #ifdef TE_PE
989 {"secrel32", pe_directive_secrel, 0},
990 #endif
991 {0, 0, 0}
992 };
993
994 /* For interface with expression (). */
995 extern char *input_line_pointer;
996
997 /* Hash table for instruction mnemonic lookup. */
998 static struct hash_control *op_hash;
999
1000 /* Hash table for register lookup. */
1001 static struct hash_control *reg_hash;
1002 \f
1003 void
1004 i386_align_code (fragS *fragP, int count)
1005 {
1006 /* Various efficient no-op patterns for aligning code labels.
1007 Note: Don't try to assemble the instructions in the comments.
1008 0L and 0w are not legal. */
1009 static const char f32_1[] =
1010 {0x90}; /* nop */
1011 static const char f32_2[] =
1012 {0x66,0x90}; /* xchg %ax,%ax */
1013 static const char f32_3[] =
1014 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
1015 static const char f32_4[] =
1016 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1017 static const char f32_5[] =
1018 {0x90, /* nop */
1019 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1020 static const char f32_6[] =
1021 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
1022 static const char f32_7[] =
1023 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1024 static const char f32_8[] =
1025 {0x90, /* nop */
1026 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1027 static const char f32_9[] =
1028 {0x89,0xf6, /* movl %esi,%esi */
1029 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1030 static const char f32_10[] =
1031 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
1032 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1033 static const char f32_11[] =
1034 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
1035 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1036 static const char f32_12[] =
1037 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
1038 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
1039 static const char f32_13[] =
1040 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
1041 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1042 static const char f32_14[] =
1043 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
1044 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1045 static const char f16_3[] =
1046 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
1047 static const char f16_4[] =
1048 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
1049 static const char f16_5[] =
1050 {0x90, /* nop */
1051 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
1052 static const char f16_6[] =
1053 {0x89,0xf6, /* mov %si,%si */
1054 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
1055 static const char f16_7[] =
1056 {0x8d,0x74,0x00, /* lea 0(%si),%si */
1057 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
1058 static const char f16_8[] =
1059 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
1060 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
1061 static const char jump_31[] =
1062 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
1063 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
1064 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
1065 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
1066 static const char *const f32_patt[] = {
1067 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
1068 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
1069 };
1070 static const char *const f16_patt[] = {
1071 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
1072 };
1073 /* nopl (%[re]ax) */
1074 static const char alt_3[] =
1075 {0x0f,0x1f,0x00};
1076 /* nopl 0(%[re]ax) */
1077 static const char alt_4[] =
1078 {0x0f,0x1f,0x40,0x00};
1079 /* nopl 0(%[re]ax,%[re]ax,1) */
1080 static const char alt_5[] =
1081 {0x0f,0x1f,0x44,0x00,0x00};
1082 /* nopw 0(%[re]ax,%[re]ax,1) */
1083 static const char alt_6[] =
1084 {0x66,0x0f,0x1f,0x44,0x00,0x00};
1085 /* nopl 0L(%[re]ax) */
1086 static const char alt_7[] =
1087 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1088 /* nopl 0L(%[re]ax,%[re]ax,1) */
1089 static const char alt_8[] =
1090 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1091 /* nopw 0L(%[re]ax,%[re]ax,1) */
1092 static const char alt_9[] =
1093 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1094 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
1095 static const char alt_10[] =
1096 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1097 /* data16
1098 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1099 static const char alt_long_11[] =
1100 {0x66,
1101 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1102 /* data16
1103 data16
1104 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1105 static const char alt_long_12[] =
1106 {0x66,
1107 0x66,
1108 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1109 /* data16
1110 data16
1111 data16
1112 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1113 static const char alt_long_13[] =
1114 {0x66,
1115 0x66,
1116 0x66,
1117 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1118 /* data16
1119 data16
1120 data16
1121 data16
1122 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1123 static const char alt_long_14[] =
1124 {0x66,
1125 0x66,
1126 0x66,
1127 0x66,
1128 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1129 /* data16
1130 data16
1131 data16
1132 data16
1133 data16
1134 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1135 static const char alt_long_15[] =
1136 {0x66,
1137 0x66,
1138 0x66,
1139 0x66,
1140 0x66,
1141 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1142 /* nopl 0(%[re]ax,%[re]ax,1)
1143 nopw 0(%[re]ax,%[re]ax,1) */
1144 static const char alt_short_11[] =
1145 {0x0f,0x1f,0x44,0x00,0x00,
1146 0x66,0x0f,0x1f,0x44,0x00,0x00};
1147 /* nopw 0(%[re]ax,%[re]ax,1)
1148 nopw 0(%[re]ax,%[re]ax,1) */
1149 static const char alt_short_12[] =
1150 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1151 0x66,0x0f,0x1f,0x44,0x00,0x00};
1152 /* nopw 0(%[re]ax,%[re]ax,1)
1153 nopl 0L(%[re]ax) */
1154 static const char alt_short_13[] =
1155 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1156 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1157 /* nopl 0L(%[re]ax)
1158 nopl 0L(%[re]ax) */
1159 static const char alt_short_14[] =
1160 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1161 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1162 /* nopl 0L(%[re]ax)
1163 nopl 0L(%[re]ax,%[re]ax,1) */
1164 static const char alt_short_15[] =
1165 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1166 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1167 static const char *const alt_short_patt[] = {
1168 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1169 alt_9, alt_10, alt_short_11, alt_short_12, alt_short_13,
1170 alt_short_14, alt_short_15
1171 };
1172 static const char *const alt_long_patt[] = {
1173 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1174 alt_9, alt_10, alt_long_11, alt_long_12, alt_long_13,
1175 alt_long_14, alt_long_15
1176 };
1177
1178 /* Only align for at least a positive non-zero boundary. */
1179 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
1180 return;
1181
1182 /* We need to decide which NOP sequence to use for 32bit and
1183 64bit. When -mtune= is used:
1184
1185 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1186 PROCESSOR_GENERIC32, f32_patt will be used.
1187 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA,
1188 PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
1189 PROCESSOR_GENERIC64, alt_long_patt will be used.
1190 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
1191 PROCESSOR_AMDFAM10, PROCESSOR_BD and PROCESSOR_BT, alt_short_patt
1192 will be used.
1193
1194 When -mtune= isn't used, alt_long_patt will be used if
1195 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1196 be used.
1197
1198 When -march= or .arch is used, we can't use anything beyond
1199 cpu_arch_isa_flags. */
1200
1201 if (flag_code == CODE_16BIT)
1202 {
1203 if (count > 8)
1204 {
1205 memcpy (fragP->fr_literal + fragP->fr_fix,
1206 jump_31, count);
1207 /* Adjust jump offset. */
1208 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1209 }
1210 else
1211 memcpy (fragP->fr_literal + fragP->fr_fix,
1212 f16_patt[count - 1], count);
1213 }
1214 else
1215 {
1216 const char *const *patt = NULL;
1217
1218 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1219 {
1220 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1221 switch (cpu_arch_tune)
1222 {
1223 case PROCESSOR_UNKNOWN:
1224 /* We use cpu_arch_isa_flags to check if we SHOULD
1225 optimize with nops. */
1226 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1227 patt = alt_long_patt;
1228 else
1229 patt = f32_patt;
1230 break;
1231 case PROCESSOR_PENTIUM4:
1232 case PROCESSOR_NOCONA:
1233 case PROCESSOR_CORE:
1234 case PROCESSOR_CORE2:
1235 case PROCESSOR_COREI7:
1236 case PROCESSOR_L1OM:
1237 case PROCESSOR_K1OM:
1238 case PROCESSOR_GENERIC64:
1239 patt = alt_long_patt;
1240 break;
1241 case PROCESSOR_K6:
1242 case PROCESSOR_ATHLON:
1243 case PROCESSOR_K8:
1244 case PROCESSOR_AMDFAM10:
1245 case PROCESSOR_BD:
1246 case PROCESSOR_BT:
1247 patt = alt_short_patt;
1248 break;
1249 case PROCESSOR_I386:
1250 case PROCESSOR_I486:
1251 case PROCESSOR_PENTIUM:
1252 case PROCESSOR_PENTIUMPRO:
1253 case PROCESSOR_GENERIC32:
1254 patt = f32_patt;
1255 break;
1256 }
1257 }
1258 else
1259 {
1260 switch (fragP->tc_frag_data.tune)
1261 {
1262 case PROCESSOR_UNKNOWN:
1263 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1264 PROCESSOR_UNKNOWN. */
1265 abort ();
1266 break;
1267
1268 case PROCESSOR_I386:
1269 case PROCESSOR_I486:
1270 case PROCESSOR_PENTIUM:
1271 case PROCESSOR_K6:
1272 case PROCESSOR_ATHLON:
1273 case PROCESSOR_K8:
1274 case PROCESSOR_AMDFAM10:
1275 case PROCESSOR_BD:
1276 case PROCESSOR_BT:
1277 case PROCESSOR_GENERIC32:
1278 /* We use cpu_arch_isa_flags to check if we CAN optimize
1279 with nops. */
1280 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1281 patt = alt_short_patt;
1282 else
1283 patt = f32_patt;
1284 break;
1285 case PROCESSOR_PENTIUMPRO:
1286 case PROCESSOR_PENTIUM4:
1287 case PROCESSOR_NOCONA:
1288 case PROCESSOR_CORE:
1289 case PROCESSOR_CORE2:
1290 case PROCESSOR_COREI7:
1291 case PROCESSOR_L1OM:
1292 case PROCESSOR_K1OM:
1293 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1294 patt = alt_long_patt;
1295 else
1296 patt = f32_patt;
1297 break;
1298 case PROCESSOR_GENERIC64:
1299 patt = alt_long_patt;
1300 break;
1301 }
1302 }
1303
1304 if (patt == f32_patt)
1305 {
1306 /* If the padding is less than 15 bytes, we use the normal
1307 ones. Otherwise, we use a jump instruction and adjust
1308 its offset. */
1309 int limit;
1310
1311 /* For 64bit, the limit is 3 bytes. */
1312 if (flag_code == CODE_64BIT
1313 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1314 limit = 3;
1315 else
1316 limit = 15;
1317 if (count < limit)
1318 memcpy (fragP->fr_literal + fragP->fr_fix,
1319 patt[count - 1], count);
1320 else
1321 {
1322 memcpy (fragP->fr_literal + fragP->fr_fix,
1323 jump_31, count);
1324 /* Adjust jump offset. */
1325 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1326 }
1327 }
1328 else
1329 {
1330 /* Maximum length of an instruction is 15 byte. If the
1331 padding is greater than 15 bytes and we don't use jump,
1332 we have to break it into smaller pieces. */
1333 int padding = count;
1334 while (padding > 15)
1335 {
1336 padding -= 15;
1337 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1338 patt [14], 15);
1339 }
1340
1341 if (padding)
1342 memcpy (fragP->fr_literal + fragP->fr_fix,
1343 patt [padding - 1], padding);
1344 }
1345 }
1346 fragP->fr_var = count;
1347 }
1348
1349 static INLINE int
1350 operand_type_all_zero (const union i386_operand_type *x)
1351 {
1352 switch (ARRAY_SIZE(x->array))
1353 {
1354 case 3:
1355 if (x->array[2])
1356 return 0;
1357 case 2:
1358 if (x->array[1])
1359 return 0;
1360 case 1:
1361 return !x->array[0];
1362 default:
1363 abort ();
1364 }
1365 }
1366
1367 static INLINE void
1368 operand_type_set (union i386_operand_type *x, unsigned int v)
1369 {
1370 switch (ARRAY_SIZE(x->array))
1371 {
1372 case 3:
1373 x->array[2] = v;
1374 case 2:
1375 x->array[1] = v;
1376 case 1:
1377 x->array[0] = v;
1378 break;
1379 default:
1380 abort ();
1381 }
1382 }
1383
1384 static INLINE int
1385 operand_type_equal (const union i386_operand_type *x,
1386 const union i386_operand_type *y)
1387 {
1388 switch (ARRAY_SIZE(x->array))
1389 {
1390 case 3:
1391 if (x->array[2] != y->array[2])
1392 return 0;
1393 case 2:
1394 if (x->array[1] != y->array[1])
1395 return 0;
1396 case 1:
1397 return x->array[0] == y->array[0];
1398 break;
1399 default:
1400 abort ();
1401 }
1402 }
1403
1404 static INLINE int
1405 cpu_flags_all_zero (const union i386_cpu_flags *x)
1406 {
1407 switch (ARRAY_SIZE(x->array))
1408 {
1409 case 3:
1410 if (x->array[2])
1411 return 0;
1412 case 2:
1413 if (x->array[1])
1414 return 0;
1415 case 1:
1416 return !x->array[0];
1417 default:
1418 abort ();
1419 }
1420 }
1421
1422 static INLINE void
1423 cpu_flags_set (union i386_cpu_flags *x, unsigned int v)
1424 {
1425 switch (ARRAY_SIZE(x->array))
1426 {
1427 case 3:
1428 x->array[2] = v;
1429 case 2:
1430 x->array[1] = v;
1431 case 1:
1432 x->array[0] = v;
1433 break;
1434 default:
1435 abort ();
1436 }
1437 }
1438
1439 static INLINE int
1440 cpu_flags_equal (const union i386_cpu_flags *x,
1441 const union i386_cpu_flags *y)
1442 {
1443 switch (ARRAY_SIZE(x->array))
1444 {
1445 case 3:
1446 if (x->array[2] != y->array[2])
1447 return 0;
1448 case 2:
1449 if (x->array[1] != y->array[1])
1450 return 0;
1451 case 1:
1452 return x->array[0] == y->array[0];
1453 break;
1454 default:
1455 abort ();
1456 }
1457 }
1458
1459 static INLINE int
1460 cpu_flags_check_cpu64 (i386_cpu_flags f)
1461 {
1462 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1463 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1464 }
1465
1466 static INLINE i386_cpu_flags
1467 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1468 {
1469 switch (ARRAY_SIZE (x.array))
1470 {
1471 case 3:
1472 x.array [2] &= y.array [2];
1473 case 2:
1474 x.array [1] &= y.array [1];
1475 case 1:
1476 x.array [0] &= y.array [0];
1477 break;
1478 default:
1479 abort ();
1480 }
1481 return x;
1482 }
1483
1484 static INLINE i386_cpu_flags
1485 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1486 {
1487 switch (ARRAY_SIZE (x.array))
1488 {
1489 case 3:
1490 x.array [2] |= y.array [2];
1491 case 2:
1492 x.array [1] |= y.array [1];
1493 case 1:
1494 x.array [0] |= y.array [0];
1495 break;
1496 default:
1497 abort ();
1498 }
1499 return x;
1500 }
1501
1502 static INLINE i386_cpu_flags
1503 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1504 {
1505 switch (ARRAY_SIZE (x.array))
1506 {
1507 case 3:
1508 x.array [2] &= ~y.array [2];
1509 case 2:
1510 x.array [1] &= ~y.array [1];
1511 case 1:
1512 x.array [0] &= ~y.array [0];
1513 break;
1514 default:
1515 abort ();
1516 }
1517 return x;
1518 }
1519
1520 #define CPU_FLAGS_ARCH_MATCH 0x1
1521 #define CPU_FLAGS_64BIT_MATCH 0x2
1522 #define CPU_FLAGS_AES_MATCH 0x4
1523 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1524 #define CPU_FLAGS_AVX_MATCH 0x10
1525
1526 #define CPU_FLAGS_32BIT_MATCH \
1527 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1528 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1529 #define CPU_FLAGS_PERFECT_MATCH \
1530 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1531
1532 /* Return CPU flags match bits. */
1533
1534 static int
1535 cpu_flags_match (const insn_template *t)
1536 {
1537 i386_cpu_flags x = t->cpu_flags;
1538 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1539
1540 x.bitfield.cpu64 = 0;
1541 x.bitfield.cpuno64 = 0;
1542
1543 if (cpu_flags_all_zero (&x))
1544 {
1545 /* This instruction is available on all archs. */
1546 match |= CPU_FLAGS_32BIT_MATCH;
1547 }
1548 else
1549 {
1550 /* This instruction is available only on some archs. */
1551 i386_cpu_flags cpu = cpu_arch_flags;
1552
1553 cpu.bitfield.cpu64 = 0;
1554 cpu.bitfield.cpuno64 = 0;
1555 cpu = cpu_flags_and (x, cpu);
1556 if (!cpu_flags_all_zero (&cpu))
1557 {
1558 if (x.bitfield.cpuavx)
1559 {
1560 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1561 if (cpu.bitfield.cpuavx)
1562 {
1563 /* Check SSE2AVX. */
1564 if (!t->opcode_modifier.sse2avx|| sse2avx)
1565 {
1566 match |= (CPU_FLAGS_ARCH_MATCH
1567 | CPU_FLAGS_AVX_MATCH);
1568 /* Check AES. */
1569 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1570 match |= CPU_FLAGS_AES_MATCH;
1571 /* Check PCLMUL. */
1572 if (!x.bitfield.cpupclmul
1573 || cpu.bitfield.cpupclmul)
1574 match |= CPU_FLAGS_PCLMUL_MATCH;
1575 }
1576 }
1577 else
1578 match |= CPU_FLAGS_ARCH_MATCH;
1579 }
1580 else
1581 match |= CPU_FLAGS_32BIT_MATCH;
1582 }
1583 }
1584 return match;
1585 }
1586
1587 static INLINE i386_operand_type
1588 operand_type_and (i386_operand_type x, i386_operand_type y)
1589 {
1590 switch (ARRAY_SIZE (x.array))
1591 {
1592 case 3:
1593 x.array [2] &= y.array [2];
1594 case 2:
1595 x.array [1] &= y.array [1];
1596 case 1:
1597 x.array [0] &= y.array [0];
1598 break;
1599 default:
1600 abort ();
1601 }
1602 return x;
1603 }
1604
1605 static INLINE i386_operand_type
1606 operand_type_or (i386_operand_type x, i386_operand_type y)
1607 {
1608 switch (ARRAY_SIZE (x.array))
1609 {
1610 case 3:
1611 x.array [2] |= y.array [2];
1612 case 2:
1613 x.array [1] |= y.array [1];
1614 case 1:
1615 x.array [0] |= y.array [0];
1616 break;
1617 default:
1618 abort ();
1619 }
1620 return x;
1621 }
1622
1623 static INLINE i386_operand_type
1624 operand_type_xor (i386_operand_type x, i386_operand_type y)
1625 {
1626 switch (ARRAY_SIZE (x.array))
1627 {
1628 case 3:
1629 x.array [2] ^= y.array [2];
1630 case 2:
1631 x.array [1] ^= y.array [1];
1632 case 1:
1633 x.array [0] ^= y.array [0];
1634 break;
1635 default:
1636 abort ();
1637 }
1638 return x;
1639 }
1640
1641 static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1642 static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1643 static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1644 static const i386_operand_type inoutportreg
1645 = OPERAND_TYPE_INOUTPORTREG;
1646 static const i386_operand_type reg16_inoutportreg
1647 = OPERAND_TYPE_REG16_INOUTPORTREG;
1648 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1649 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1650 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1651 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1652 static const i386_operand_type anydisp
1653 = OPERAND_TYPE_ANYDISP;
1654 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1655 static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1656 static const i386_operand_type regzmm = OPERAND_TYPE_REGZMM;
1657 static const i386_operand_type regmask = OPERAND_TYPE_REGMASK;
1658 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1659 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1660 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1661 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1662 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1663 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1664 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1665 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1666 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1667 static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
1668 static const i386_operand_type regbnd = OPERAND_TYPE_REGBND;
1669 static const i386_operand_type vec_disp8 = OPERAND_TYPE_VEC_DISP8;
1670
1671 enum operand_type
1672 {
1673 reg,
1674 imm,
1675 disp,
1676 anymem
1677 };
1678
1679 static INLINE int
1680 operand_type_check (i386_operand_type t, enum operand_type c)
1681 {
1682 switch (c)
1683 {
1684 case reg:
1685 return (t.bitfield.reg8
1686 || t.bitfield.reg16
1687 || t.bitfield.reg32
1688 || t.bitfield.reg64);
1689
1690 case imm:
1691 return (t.bitfield.imm8
1692 || t.bitfield.imm8s
1693 || t.bitfield.imm16
1694 || t.bitfield.imm32
1695 || t.bitfield.imm32s
1696 || t.bitfield.imm64);
1697
1698 case disp:
1699 return (t.bitfield.disp8
1700 || t.bitfield.disp16
1701 || t.bitfield.disp32
1702 || t.bitfield.disp32s
1703 || t.bitfield.disp64);
1704
1705 case anymem:
1706 return (t.bitfield.disp8
1707 || t.bitfield.disp16
1708 || t.bitfield.disp32
1709 || t.bitfield.disp32s
1710 || t.bitfield.disp64
1711 || t.bitfield.baseindex);
1712
1713 default:
1714 abort ();
1715 }
1716
1717 return 0;
1718 }
1719
1720 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1721 operand J for instruction template T. */
1722
1723 static INLINE int
1724 match_reg_size (const insn_template *t, unsigned int j)
1725 {
1726 return !((i.types[j].bitfield.byte
1727 && !t->operand_types[j].bitfield.byte)
1728 || (i.types[j].bitfield.word
1729 && !t->operand_types[j].bitfield.word)
1730 || (i.types[j].bitfield.dword
1731 && !t->operand_types[j].bitfield.dword)
1732 || (i.types[j].bitfield.qword
1733 && !t->operand_types[j].bitfield.qword));
1734 }
1735
1736 /* Return 1 if there is no conflict in any size on operand J for
1737 instruction template T. */
1738
1739 static INLINE int
1740 match_mem_size (const insn_template *t, unsigned int j)
1741 {
1742 return (match_reg_size (t, j)
1743 && !((i.types[j].bitfield.unspecified
1744 && !t->operand_types[j].bitfield.unspecified)
1745 || (i.types[j].bitfield.fword
1746 && !t->operand_types[j].bitfield.fword)
1747 || (i.types[j].bitfield.tbyte
1748 && !t->operand_types[j].bitfield.tbyte)
1749 || (i.types[j].bitfield.xmmword
1750 && !t->operand_types[j].bitfield.xmmword)
1751 || (i.types[j].bitfield.ymmword
1752 && !t->operand_types[j].bitfield.ymmword)
1753 || (i.types[j].bitfield.zmmword
1754 && !t->operand_types[j].bitfield.zmmword)));
1755 }
1756
1757 /* Return 1 if there is no size conflict on any operands for
1758 instruction template T. */
1759
1760 static INLINE int
1761 operand_size_match (const insn_template *t)
1762 {
1763 unsigned int j;
1764 int match = 1;
1765
1766 /* Don't check jump instructions. */
1767 if (t->opcode_modifier.jump
1768 || t->opcode_modifier.jumpbyte
1769 || t->opcode_modifier.jumpdword
1770 || t->opcode_modifier.jumpintersegment)
1771 return match;
1772
1773 /* Check memory and accumulator operand size. */
1774 for (j = 0; j < i.operands; j++)
1775 {
1776 if (t->operand_types[j].bitfield.anysize)
1777 continue;
1778
1779 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1780 {
1781 match = 0;
1782 break;
1783 }
1784
1785 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1786 {
1787 match = 0;
1788 break;
1789 }
1790 }
1791
1792 if (match)
1793 return match;
1794 else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
1795 {
1796 mismatch:
1797 i.error = operand_size_mismatch;
1798 return 0;
1799 }
1800
1801 /* Check reverse. */
1802 gas_assert (i.operands == 2);
1803
1804 match = 1;
1805 for (j = 0; j < 2; j++)
1806 {
1807 if (t->operand_types[j].bitfield.acc
1808 && !match_reg_size (t, j ? 0 : 1))
1809 goto mismatch;
1810
1811 if (i.types[j].bitfield.mem
1812 && !match_mem_size (t, j ? 0 : 1))
1813 goto mismatch;
1814 }
1815
1816 return match;
1817 }
1818
1819 static INLINE int
1820 operand_type_match (i386_operand_type overlap,
1821 i386_operand_type given)
1822 {
1823 i386_operand_type temp = overlap;
1824
1825 temp.bitfield.jumpabsolute = 0;
1826 temp.bitfield.unspecified = 0;
1827 temp.bitfield.byte = 0;
1828 temp.bitfield.word = 0;
1829 temp.bitfield.dword = 0;
1830 temp.bitfield.fword = 0;
1831 temp.bitfield.qword = 0;
1832 temp.bitfield.tbyte = 0;
1833 temp.bitfield.xmmword = 0;
1834 temp.bitfield.ymmword = 0;
1835 temp.bitfield.zmmword = 0;
1836 if (operand_type_all_zero (&temp))
1837 goto mismatch;
1838
1839 if (given.bitfield.baseindex == overlap.bitfield.baseindex
1840 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
1841 return 1;
1842
1843 mismatch:
1844 i.error = operand_type_mismatch;
1845 return 0;
1846 }
1847
1848 /* If given types g0 and g1 are registers they must be of the same type
1849 unless the expected operand type register overlap is null.
1850 Note that Acc in a template matches every size of reg. */
1851
1852 static INLINE int
1853 operand_type_register_match (i386_operand_type m0,
1854 i386_operand_type g0,
1855 i386_operand_type t0,
1856 i386_operand_type m1,
1857 i386_operand_type g1,
1858 i386_operand_type t1)
1859 {
1860 if (!operand_type_check (g0, reg))
1861 return 1;
1862
1863 if (!operand_type_check (g1, reg))
1864 return 1;
1865
1866 if (g0.bitfield.reg8 == g1.bitfield.reg8
1867 && g0.bitfield.reg16 == g1.bitfield.reg16
1868 && g0.bitfield.reg32 == g1.bitfield.reg32
1869 && g0.bitfield.reg64 == g1.bitfield.reg64)
1870 return 1;
1871
1872 if (m0.bitfield.acc)
1873 {
1874 t0.bitfield.reg8 = 1;
1875 t0.bitfield.reg16 = 1;
1876 t0.bitfield.reg32 = 1;
1877 t0.bitfield.reg64 = 1;
1878 }
1879
1880 if (m1.bitfield.acc)
1881 {
1882 t1.bitfield.reg8 = 1;
1883 t1.bitfield.reg16 = 1;
1884 t1.bitfield.reg32 = 1;
1885 t1.bitfield.reg64 = 1;
1886 }
1887
1888 if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1889 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1890 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1891 && !(t0.bitfield.reg64 & t1.bitfield.reg64))
1892 return 1;
1893
1894 i.error = register_type_mismatch;
1895
1896 return 0;
1897 }
1898
1899 static INLINE unsigned int
1900 register_number (const reg_entry *r)
1901 {
1902 unsigned int nr = r->reg_num;
1903
1904 if (r->reg_flags & RegRex)
1905 nr += 8;
1906
1907 return nr;
1908 }
1909
1910 static INLINE unsigned int
1911 mode_from_disp_size (i386_operand_type t)
1912 {
1913 if (t.bitfield.disp8 || t.bitfield.vec_disp8)
1914 return 1;
1915 else if (t.bitfield.disp16
1916 || t.bitfield.disp32
1917 || t.bitfield.disp32s)
1918 return 2;
1919 else
1920 return 0;
1921 }
1922
1923 static INLINE int
1924 fits_in_signed_byte (offsetT num)
1925 {
1926 return (num >= -128) && (num <= 127);
1927 }
1928
1929 static INLINE int
1930 fits_in_unsigned_byte (offsetT num)
1931 {
1932 return (num & 0xff) == num;
1933 }
1934
1935 static INLINE int
1936 fits_in_unsigned_word (offsetT num)
1937 {
1938 return (num & 0xffff) == num;
1939 }
1940
1941 static INLINE int
1942 fits_in_signed_word (offsetT num)
1943 {
1944 return (-32768 <= num) && (num <= 32767);
1945 }
1946
1947 static INLINE int
1948 fits_in_signed_long (offsetT num ATTRIBUTE_UNUSED)
1949 {
1950 #ifndef BFD64
1951 return 1;
1952 #else
1953 return (!(((offsetT) -1 << 31) & num)
1954 || (((offsetT) -1 << 31) & num) == ((offsetT) -1 << 31));
1955 #endif
1956 } /* fits_in_signed_long() */
1957
1958 static INLINE int
1959 fits_in_unsigned_long (offsetT num ATTRIBUTE_UNUSED)
1960 {
1961 #ifndef BFD64
1962 return 1;
1963 #else
1964 return (num & (((offsetT) 2 << 31) - 1)) == num;
1965 #endif
1966 } /* fits_in_unsigned_long() */
1967
1968 static INLINE int
1969 fits_in_vec_disp8 (offsetT num)
1970 {
1971 int shift = i.memshift;
1972 unsigned int mask;
1973
1974 if (shift == -1)
1975 abort ();
1976
1977 mask = (1 << shift) - 1;
1978
1979 /* Return 0 if NUM isn't properly aligned. */
1980 if ((num & mask))
1981 return 0;
1982
1983 /* Check if NUM will fit in 8bit after shift. */
1984 return fits_in_signed_byte (num >> shift);
1985 }
1986
1987 static INLINE int
1988 fits_in_imm4 (offsetT num)
1989 {
1990 return (num & 0xf) == num;
1991 }
1992
1993 static i386_operand_type
1994 smallest_imm_type (offsetT num)
1995 {
1996 i386_operand_type t;
1997
1998 operand_type_set (&t, 0);
1999 t.bitfield.imm64 = 1;
2000
2001 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
2002 {
2003 /* This code is disabled on the 486 because all the Imm1 forms
2004 in the opcode table are slower on the i486. They're the
2005 versions with the implicitly specified single-position
2006 displacement, which has another syntax if you really want to
2007 use that form. */
2008 t.bitfield.imm1 = 1;
2009 t.bitfield.imm8 = 1;
2010 t.bitfield.imm8s = 1;
2011 t.bitfield.imm16 = 1;
2012 t.bitfield.imm32 = 1;
2013 t.bitfield.imm32s = 1;
2014 }
2015 else if (fits_in_signed_byte (num))
2016 {
2017 t.bitfield.imm8 = 1;
2018 t.bitfield.imm8s = 1;
2019 t.bitfield.imm16 = 1;
2020 t.bitfield.imm32 = 1;
2021 t.bitfield.imm32s = 1;
2022 }
2023 else if (fits_in_unsigned_byte (num))
2024 {
2025 t.bitfield.imm8 = 1;
2026 t.bitfield.imm16 = 1;
2027 t.bitfield.imm32 = 1;
2028 t.bitfield.imm32s = 1;
2029 }
2030 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
2031 {
2032 t.bitfield.imm16 = 1;
2033 t.bitfield.imm32 = 1;
2034 t.bitfield.imm32s = 1;
2035 }
2036 else if (fits_in_signed_long (num))
2037 {
2038 t.bitfield.imm32 = 1;
2039 t.bitfield.imm32s = 1;
2040 }
2041 else if (fits_in_unsigned_long (num))
2042 t.bitfield.imm32 = 1;
2043
2044 return t;
2045 }
2046
2047 static offsetT
2048 offset_in_range (offsetT val, int size)
2049 {
2050 addressT mask;
2051
2052 switch (size)
2053 {
2054 case 1: mask = ((addressT) 1 << 8) - 1; break;
2055 case 2: mask = ((addressT) 1 << 16) - 1; break;
2056 case 4: mask = ((addressT) 2 << 31) - 1; break;
2057 #ifdef BFD64
2058 case 8: mask = ((addressT) 2 << 63) - 1; break;
2059 #endif
2060 default: abort ();
2061 }
2062
2063 #ifdef BFD64
2064 /* If BFD64, sign extend val for 32bit address mode. */
2065 if (flag_code != CODE_64BIT
2066 || i.prefix[ADDR_PREFIX])
2067 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
2068 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
2069 #endif
2070
2071 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
2072 {
2073 char buf1[40], buf2[40];
2074
2075 sprint_value (buf1, val);
2076 sprint_value (buf2, val & mask);
2077 as_warn (_("%s shortened to %s"), buf1, buf2);
2078 }
2079 return val & mask;
2080 }
2081
2082 enum PREFIX_GROUP
2083 {
2084 PREFIX_EXIST = 0,
2085 PREFIX_LOCK,
2086 PREFIX_REP,
2087 PREFIX_OTHER
2088 };
2089
2090 /* Returns
2091 a. PREFIX_EXIST if attempting to add a prefix where one from the
2092 same class already exists.
2093 b. PREFIX_LOCK if lock prefix is added.
2094 c. PREFIX_REP if rep/repne prefix is added.
2095 d. PREFIX_OTHER if other prefix is added.
2096 */
2097
2098 static enum PREFIX_GROUP
2099 add_prefix (unsigned int prefix)
2100 {
2101 enum PREFIX_GROUP ret = PREFIX_OTHER;
2102 unsigned int q;
2103
2104 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
2105 && flag_code == CODE_64BIT)
2106 {
2107 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
2108 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
2109 && (prefix & (REX_R | REX_X | REX_B))))
2110 ret = PREFIX_EXIST;
2111 q = REX_PREFIX;
2112 }
2113 else
2114 {
2115 switch (prefix)
2116 {
2117 default:
2118 abort ();
2119
2120 case CS_PREFIX_OPCODE:
2121 case DS_PREFIX_OPCODE:
2122 case ES_PREFIX_OPCODE:
2123 case FS_PREFIX_OPCODE:
2124 case GS_PREFIX_OPCODE:
2125 case SS_PREFIX_OPCODE:
2126 q = SEG_PREFIX;
2127 break;
2128
2129 case REPNE_PREFIX_OPCODE:
2130 case REPE_PREFIX_OPCODE:
2131 q = REP_PREFIX;
2132 ret = PREFIX_REP;
2133 break;
2134
2135 case LOCK_PREFIX_OPCODE:
2136 q = LOCK_PREFIX;
2137 ret = PREFIX_LOCK;
2138 break;
2139
2140 case FWAIT_OPCODE:
2141 q = WAIT_PREFIX;
2142 break;
2143
2144 case ADDR_PREFIX_OPCODE:
2145 q = ADDR_PREFIX;
2146 break;
2147
2148 case DATA_PREFIX_OPCODE:
2149 q = DATA_PREFIX;
2150 break;
2151 }
2152 if (i.prefix[q] != 0)
2153 ret = PREFIX_EXIST;
2154 }
2155
2156 if (ret)
2157 {
2158 if (!i.prefix[q])
2159 ++i.prefixes;
2160 i.prefix[q] |= prefix;
2161 }
2162 else
2163 as_bad (_("same type of prefix used twice"));
2164
2165 return ret;
2166 }
2167
2168 static void
2169 update_code_flag (int value, int check)
2170 {
2171 PRINTF_LIKE ((*as_error));
2172
2173 flag_code = (enum flag_code) value;
2174 if (flag_code == CODE_64BIT)
2175 {
2176 cpu_arch_flags.bitfield.cpu64 = 1;
2177 cpu_arch_flags.bitfield.cpuno64 = 0;
2178 }
2179 else
2180 {
2181 cpu_arch_flags.bitfield.cpu64 = 0;
2182 cpu_arch_flags.bitfield.cpuno64 = 1;
2183 }
2184 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
2185 {
2186 if (check)
2187 as_error = as_fatal;
2188 else
2189 as_error = as_bad;
2190 (*as_error) (_("64bit mode not supported on `%s'."),
2191 cpu_arch_name ? cpu_arch_name : default_arch);
2192 }
2193 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
2194 {
2195 if (check)
2196 as_error = as_fatal;
2197 else
2198 as_error = as_bad;
2199 (*as_error) (_("32bit mode not supported on `%s'."),
2200 cpu_arch_name ? cpu_arch_name : default_arch);
2201 }
2202 stackop_size = '\0';
2203 }
2204
2205 static void
2206 set_code_flag (int value)
2207 {
2208 update_code_flag (value, 0);
2209 }
2210
2211 static void
2212 set_16bit_gcc_code_flag (int new_code_flag)
2213 {
2214 flag_code = (enum flag_code) new_code_flag;
2215 if (flag_code != CODE_16BIT)
2216 abort ();
2217 cpu_arch_flags.bitfield.cpu64 = 0;
2218 cpu_arch_flags.bitfield.cpuno64 = 1;
2219 stackop_size = LONG_MNEM_SUFFIX;
2220 }
2221
2222 static void
2223 set_intel_syntax (int syntax_flag)
2224 {
2225 /* Find out if register prefixing is specified. */
2226 int ask_naked_reg = 0;
2227
2228 SKIP_WHITESPACE ();
2229 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2230 {
2231 char *string = input_line_pointer;
2232 int e = get_symbol_end ();
2233
2234 if (strcmp (string, "prefix") == 0)
2235 ask_naked_reg = 1;
2236 else if (strcmp (string, "noprefix") == 0)
2237 ask_naked_reg = -1;
2238 else
2239 as_bad (_("bad argument to syntax directive."));
2240 *input_line_pointer = e;
2241 }
2242 demand_empty_rest_of_line ();
2243
2244 intel_syntax = syntax_flag;
2245
2246 if (ask_naked_reg == 0)
2247 allow_naked_reg = (intel_syntax
2248 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2249 else
2250 allow_naked_reg = (ask_naked_reg < 0);
2251
2252 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2253
2254 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2255 identifier_chars['$'] = intel_syntax ? '$' : 0;
2256 register_prefix = allow_naked_reg ? "" : "%";
2257 }
2258
2259 static void
2260 set_intel_mnemonic (int mnemonic_flag)
2261 {
2262 intel_mnemonic = mnemonic_flag;
2263 }
2264
2265 static void
2266 set_allow_index_reg (int flag)
2267 {
2268 allow_index_reg = flag;
2269 }
2270
2271 static void
2272 set_check (int what)
2273 {
2274 enum check_kind *kind;
2275 const char *str;
2276
2277 if (what)
2278 {
2279 kind = &operand_check;
2280 str = "operand";
2281 }
2282 else
2283 {
2284 kind = &sse_check;
2285 str = "sse";
2286 }
2287
2288 SKIP_WHITESPACE ();
2289
2290 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2291 {
2292 char *string = input_line_pointer;
2293 int e = get_symbol_end ();
2294
2295 if (strcmp (string, "none") == 0)
2296 *kind = check_none;
2297 else if (strcmp (string, "warning") == 0)
2298 *kind = check_warning;
2299 else if (strcmp (string, "error") == 0)
2300 *kind = check_error;
2301 else
2302 as_bad (_("bad argument to %s_check directive."), str);
2303 *input_line_pointer = e;
2304 }
2305 else
2306 as_bad (_("missing argument for %s_check directive"), str);
2307
2308 demand_empty_rest_of_line ();
2309 }
2310
2311 static void
2312 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2313 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2314 {
2315 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2316 static const char *arch;
2317
2318 /* Intel LIOM is only supported on ELF. */
2319 if (!IS_ELF)
2320 return;
2321
2322 if (!arch)
2323 {
2324 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2325 use default_arch. */
2326 arch = cpu_arch_name;
2327 if (!arch)
2328 arch = default_arch;
2329 }
2330
2331 /* If we are targeting Intel L1OM, we must enable it. */
2332 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2333 || new_flag.bitfield.cpul1om)
2334 return;
2335
2336 /* If we are targeting Intel K1OM, we must enable it. */
2337 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_K1OM
2338 || new_flag.bitfield.cpuk1om)
2339 return;
2340
2341 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2342 #endif
2343 }
2344
2345 static void
2346 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2347 {
2348 SKIP_WHITESPACE ();
2349
2350 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2351 {
2352 char *string = input_line_pointer;
2353 int e = get_symbol_end ();
2354 unsigned int j;
2355 i386_cpu_flags flags;
2356
2357 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2358 {
2359 if (strcmp (string, cpu_arch[j].name) == 0)
2360 {
2361 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2362
2363 if (*string != '.')
2364 {
2365 cpu_arch_name = cpu_arch[j].name;
2366 cpu_sub_arch_name = NULL;
2367 cpu_arch_flags = cpu_arch[j].flags;
2368 if (flag_code == CODE_64BIT)
2369 {
2370 cpu_arch_flags.bitfield.cpu64 = 1;
2371 cpu_arch_flags.bitfield.cpuno64 = 0;
2372 }
2373 else
2374 {
2375 cpu_arch_flags.bitfield.cpu64 = 0;
2376 cpu_arch_flags.bitfield.cpuno64 = 1;
2377 }
2378 cpu_arch_isa = cpu_arch[j].type;
2379 cpu_arch_isa_flags = cpu_arch[j].flags;
2380 if (!cpu_arch_tune_set)
2381 {
2382 cpu_arch_tune = cpu_arch_isa;
2383 cpu_arch_tune_flags = cpu_arch_isa_flags;
2384 }
2385 break;
2386 }
2387
2388 if (!cpu_arch[j].negated)
2389 flags = cpu_flags_or (cpu_arch_flags,
2390 cpu_arch[j].flags);
2391 else
2392 flags = cpu_flags_and_not (cpu_arch_flags,
2393 cpu_arch[j].flags);
2394 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2395 {
2396 if (cpu_sub_arch_name)
2397 {
2398 char *name = cpu_sub_arch_name;
2399 cpu_sub_arch_name = concat (name,
2400 cpu_arch[j].name,
2401 (const char *) NULL);
2402 free (name);
2403 }
2404 else
2405 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2406 cpu_arch_flags = flags;
2407 cpu_arch_isa_flags = flags;
2408 }
2409 *input_line_pointer = e;
2410 demand_empty_rest_of_line ();
2411 return;
2412 }
2413 }
2414 if (j >= ARRAY_SIZE (cpu_arch))
2415 as_bad (_("no such architecture: `%s'"), string);
2416
2417 *input_line_pointer = e;
2418 }
2419 else
2420 as_bad (_("missing cpu architecture"));
2421
2422 no_cond_jump_promotion = 0;
2423 if (*input_line_pointer == ','
2424 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2425 {
2426 char *string = ++input_line_pointer;
2427 int e = get_symbol_end ();
2428
2429 if (strcmp (string, "nojumps") == 0)
2430 no_cond_jump_promotion = 1;
2431 else if (strcmp (string, "jumps") == 0)
2432 ;
2433 else
2434 as_bad (_("no such architecture modifier: `%s'"), string);
2435
2436 *input_line_pointer = e;
2437 }
2438
2439 demand_empty_rest_of_line ();
2440 }
2441
2442 enum bfd_architecture
2443 i386_arch (void)
2444 {
2445 if (cpu_arch_isa == PROCESSOR_L1OM)
2446 {
2447 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2448 || flag_code != CODE_64BIT)
2449 as_fatal (_("Intel L1OM is 64bit ELF only"));
2450 return bfd_arch_l1om;
2451 }
2452 else if (cpu_arch_isa == PROCESSOR_K1OM)
2453 {
2454 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2455 || flag_code != CODE_64BIT)
2456 as_fatal (_("Intel K1OM is 64bit ELF only"));
2457 return bfd_arch_k1om;
2458 }
2459 else
2460 return bfd_arch_i386;
2461 }
2462
2463 unsigned long
2464 i386_mach (void)
2465 {
2466 if (!strncmp (default_arch, "x86_64", 6))
2467 {
2468 if (cpu_arch_isa == PROCESSOR_L1OM)
2469 {
2470 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2471 || default_arch[6] != '\0')
2472 as_fatal (_("Intel L1OM is 64bit ELF only"));
2473 return bfd_mach_l1om;
2474 }
2475 else if (cpu_arch_isa == PROCESSOR_K1OM)
2476 {
2477 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2478 || default_arch[6] != '\0')
2479 as_fatal (_("Intel K1OM is 64bit ELF only"));
2480 return bfd_mach_k1om;
2481 }
2482 else if (default_arch[6] == '\0')
2483 return bfd_mach_x86_64;
2484 else
2485 return bfd_mach_x64_32;
2486 }
2487 else if (!strcmp (default_arch, "i386"))
2488 return bfd_mach_i386_i386;
2489 else
2490 as_fatal (_("unknown architecture"));
2491 }
2492 \f
2493 void
2494 md_begin (void)
2495 {
2496 const char *hash_err;
2497
2498 /* Initialize op_hash hash table. */
2499 op_hash = hash_new ();
2500
2501 {
2502 const insn_template *optab;
2503 templates *core_optab;
2504
2505 /* Setup for loop. */
2506 optab = i386_optab;
2507 core_optab = (templates *) xmalloc (sizeof (templates));
2508 core_optab->start = optab;
2509
2510 while (1)
2511 {
2512 ++optab;
2513 if (optab->name == NULL
2514 || strcmp (optab->name, (optab - 1)->name) != 0)
2515 {
2516 /* different name --> ship out current template list;
2517 add to hash table; & begin anew. */
2518 core_optab->end = optab;
2519 hash_err = hash_insert (op_hash,
2520 (optab - 1)->name,
2521 (void *) core_optab);
2522 if (hash_err)
2523 {
2524 as_fatal (_("can't hash %s: %s"),
2525 (optab - 1)->name,
2526 hash_err);
2527 }
2528 if (optab->name == NULL)
2529 break;
2530 core_optab = (templates *) xmalloc (sizeof (templates));
2531 core_optab->start = optab;
2532 }
2533 }
2534 }
2535
2536 /* Initialize reg_hash hash table. */
2537 reg_hash = hash_new ();
2538 {
2539 const reg_entry *regtab;
2540 unsigned int regtab_size = i386_regtab_size;
2541
2542 for (regtab = i386_regtab; regtab_size--; regtab++)
2543 {
2544 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2545 if (hash_err)
2546 as_fatal (_("can't hash %s: %s"),
2547 regtab->reg_name,
2548 hash_err);
2549 }
2550 }
2551
2552 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2553 {
2554 int c;
2555 char *p;
2556
2557 for (c = 0; c < 256; c++)
2558 {
2559 if (ISDIGIT (c))
2560 {
2561 digit_chars[c] = c;
2562 mnemonic_chars[c] = c;
2563 register_chars[c] = c;
2564 operand_chars[c] = c;
2565 }
2566 else if (ISLOWER (c))
2567 {
2568 mnemonic_chars[c] = c;
2569 register_chars[c] = c;
2570 operand_chars[c] = c;
2571 }
2572 else if (ISUPPER (c))
2573 {
2574 mnemonic_chars[c] = TOLOWER (c);
2575 register_chars[c] = mnemonic_chars[c];
2576 operand_chars[c] = c;
2577 }
2578 else if (c == '{' || c == '}')
2579 operand_chars[c] = c;
2580
2581 if (ISALPHA (c) || ISDIGIT (c))
2582 identifier_chars[c] = c;
2583 else if (c >= 128)
2584 {
2585 identifier_chars[c] = c;
2586 operand_chars[c] = c;
2587 }
2588 }
2589
2590 #ifdef LEX_AT
2591 identifier_chars['@'] = '@';
2592 #endif
2593 #ifdef LEX_QM
2594 identifier_chars['?'] = '?';
2595 operand_chars['?'] = '?';
2596 #endif
2597 digit_chars['-'] = '-';
2598 mnemonic_chars['_'] = '_';
2599 mnemonic_chars['-'] = '-';
2600 mnemonic_chars['.'] = '.';
2601 identifier_chars['_'] = '_';
2602 identifier_chars['.'] = '.';
2603
2604 for (p = operand_special_chars; *p != '\0'; p++)
2605 operand_chars[(unsigned char) *p] = *p;
2606 }
2607
2608 if (flag_code == CODE_64BIT)
2609 {
2610 #if defined (OBJ_COFF) && defined (TE_PE)
2611 x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
2612 ? 32 : 16);
2613 #else
2614 x86_dwarf2_return_column = 16;
2615 #endif
2616 x86_cie_data_alignment = -8;
2617 }
2618 else
2619 {
2620 x86_dwarf2_return_column = 8;
2621 x86_cie_data_alignment = -4;
2622 }
2623 }
2624
2625 void
2626 i386_print_statistics (FILE *file)
2627 {
2628 hash_print_statistics (file, "i386 opcode", op_hash);
2629 hash_print_statistics (file, "i386 register", reg_hash);
2630 }
2631 \f
2632 #ifdef DEBUG386
2633
2634 /* Debugging routines for md_assemble. */
2635 static void pte (insn_template *);
2636 static void pt (i386_operand_type);
2637 static void pe (expressionS *);
2638 static void ps (symbolS *);
2639
2640 static void
2641 pi (char *line, i386_insn *x)
2642 {
2643 unsigned int j;
2644
2645 fprintf (stdout, "%s: template ", line);
2646 pte (&x->tm);
2647 fprintf (stdout, " address: base %s index %s scale %x\n",
2648 x->base_reg ? x->base_reg->reg_name : "none",
2649 x->index_reg ? x->index_reg->reg_name : "none",
2650 x->log2_scale_factor);
2651 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2652 x->rm.mode, x->rm.reg, x->rm.regmem);
2653 fprintf (stdout, " sib: base %x index %x scale %x\n",
2654 x->sib.base, x->sib.index, x->sib.scale);
2655 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2656 (x->rex & REX_W) != 0,
2657 (x->rex & REX_R) != 0,
2658 (x->rex & REX_X) != 0,
2659 (x->rex & REX_B) != 0);
2660 for (j = 0; j < x->operands; j++)
2661 {
2662 fprintf (stdout, " #%d: ", j + 1);
2663 pt (x->types[j]);
2664 fprintf (stdout, "\n");
2665 if (x->types[j].bitfield.reg8
2666 || x->types[j].bitfield.reg16
2667 || x->types[j].bitfield.reg32
2668 || x->types[j].bitfield.reg64
2669 || x->types[j].bitfield.regmmx
2670 || x->types[j].bitfield.regxmm
2671 || x->types[j].bitfield.regymm
2672 || x->types[j].bitfield.regzmm
2673 || x->types[j].bitfield.sreg2
2674 || x->types[j].bitfield.sreg3
2675 || x->types[j].bitfield.control
2676 || x->types[j].bitfield.debug
2677 || x->types[j].bitfield.test)
2678 fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
2679 if (operand_type_check (x->types[j], imm))
2680 pe (x->op[j].imms);
2681 if (operand_type_check (x->types[j], disp))
2682 pe (x->op[j].disps);
2683 }
2684 }
2685
2686 static void
2687 pte (insn_template *t)
2688 {
2689 unsigned int j;
2690 fprintf (stdout, " %d operands ", t->operands);
2691 fprintf (stdout, "opcode %x ", t->base_opcode);
2692 if (t->extension_opcode != None)
2693 fprintf (stdout, "ext %x ", t->extension_opcode);
2694 if (t->opcode_modifier.d)
2695 fprintf (stdout, "D");
2696 if (t->opcode_modifier.w)
2697 fprintf (stdout, "W");
2698 fprintf (stdout, "\n");
2699 for (j = 0; j < t->operands; j++)
2700 {
2701 fprintf (stdout, " #%d type ", j + 1);
2702 pt (t->operand_types[j]);
2703 fprintf (stdout, "\n");
2704 }
2705 }
2706
2707 static void
2708 pe (expressionS *e)
2709 {
2710 fprintf (stdout, " operation %d\n", e->X_op);
2711 fprintf (stdout, " add_number %ld (%lx)\n",
2712 (long) e->X_add_number, (long) e->X_add_number);
2713 if (e->X_add_symbol)
2714 {
2715 fprintf (stdout, " add_symbol ");
2716 ps (e->X_add_symbol);
2717 fprintf (stdout, "\n");
2718 }
2719 if (e->X_op_symbol)
2720 {
2721 fprintf (stdout, " op_symbol ");
2722 ps (e->X_op_symbol);
2723 fprintf (stdout, "\n");
2724 }
2725 }
2726
2727 static void
2728 ps (symbolS *s)
2729 {
2730 fprintf (stdout, "%s type %s%s",
2731 S_GET_NAME (s),
2732 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2733 segment_name (S_GET_SEGMENT (s)));
2734 }
2735
2736 static struct type_name
2737 {
2738 i386_operand_type mask;
2739 const char *name;
2740 }
2741 const type_names[] =
2742 {
2743 { OPERAND_TYPE_REG8, "r8" },
2744 { OPERAND_TYPE_REG16, "r16" },
2745 { OPERAND_TYPE_REG32, "r32" },
2746 { OPERAND_TYPE_REG64, "r64" },
2747 { OPERAND_TYPE_IMM8, "i8" },
2748 { OPERAND_TYPE_IMM8, "i8s" },
2749 { OPERAND_TYPE_IMM16, "i16" },
2750 { OPERAND_TYPE_IMM32, "i32" },
2751 { OPERAND_TYPE_IMM32S, "i32s" },
2752 { OPERAND_TYPE_IMM64, "i64" },
2753 { OPERAND_TYPE_IMM1, "i1" },
2754 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2755 { OPERAND_TYPE_DISP8, "d8" },
2756 { OPERAND_TYPE_DISP16, "d16" },
2757 { OPERAND_TYPE_DISP32, "d32" },
2758 { OPERAND_TYPE_DISP32S, "d32s" },
2759 { OPERAND_TYPE_DISP64, "d64" },
2760 { OPERAND_TYPE_VEC_DISP8, "Vector d8" },
2761 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2762 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2763 { OPERAND_TYPE_CONTROL, "control reg" },
2764 { OPERAND_TYPE_TEST, "test reg" },
2765 { OPERAND_TYPE_DEBUG, "debug reg" },
2766 { OPERAND_TYPE_FLOATREG, "FReg" },
2767 { OPERAND_TYPE_FLOATACC, "FAcc" },
2768 { OPERAND_TYPE_SREG2, "SReg2" },
2769 { OPERAND_TYPE_SREG3, "SReg3" },
2770 { OPERAND_TYPE_ACC, "Acc" },
2771 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2772 { OPERAND_TYPE_REGMMX, "rMMX" },
2773 { OPERAND_TYPE_REGXMM, "rXMM" },
2774 { OPERAND_TYPE_REGYMM, "rYMM" },
2775 { OPERAND_TYPE_REGZMM, "rZMM" },
2776 { OPERAND_TYPE_REGMASK, "Mask reg" },
2777 { OPERAND_TYPE_ESSEG, "es" },
2778 };
2779
2780 static void
2781 pt (i386_operand_type t)
2782 {
2783 unsigned int j;
2784 i386_operand_type a;
2785
2786 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2787 {
2788 a = operand_type_and (t, type_names[j].mask);
2789 if (!operand_type_all_zero (&a))
2790 fprintf (stdout, "%s, ", type_names[j].name);
2791 }
2792 fflush (stdout);
2793 }
2794
2795 #endif /* DEBUG386 */
2796 \f
2797 static bfd_reloc_code_real_type
2798 reloc (unsigned int size,
2799 int pcrel,
2800 int sign,
2801 bfd_reloc_code_real_type other)
2802 {
2803 if (other != NO_RELOC)
2804 {
2805 reloc_howto_type *rel;
2806
2807 if (size == 8)
2808 switch (other)
2809 {
2810 case BFD_RELOC_X86_64_GOT32:
2811 return BFD_RELOC_X86_64_GOT64;
2812 break;
2813 case BFD_RELOC_X86_64_PLTOFF64:
2814 return BFD_RELOC_X86_64_PLTOFF64;
2815 break;
2816 case BFD_RELOC_X86_64_GOTPC32:
2817 other = BFD_RELOC_X86_64_GOTPC64;
2818 break;
2819 case BFD_RELOC_X86_64_GOTPCREL:
2820 other = BFD_RELOC_X86_64_GOTPCREL64;
2821 break;
2822 case BFD_RELOC_X86_64_TPOFF32:
2823 other = BFD_RELOC_X86_64_TPOFF64;
2824 break;
2825 case BFD_RELOC_X86_64_DTPOFF32:
2826 other = BFD_RELOC_X86_64_DTPOFF64;
2827 break;
2828 default:
2829 break;
2830 }
2831
2832 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2833 if (other == BFD_RELOC_SIZE32)
2834 {
2835 if (size == 8)
2836 return BFD_RELOC_SIZE64;
2837 if (pcrel)
2838 as_bad (_("there are no pc-relative size relocations"));
2839 }
2840 #endif
2841
2842 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2843 if (size == 4 && (flag_code != CODE_64BIT || disallow_64bit_reloc))
2844 sign = -1;
2845
2846 rel = bfd_reloc_type_lookup (stdoutput, other);
2847 if (!rel)
2848 as_bad (_("unknown relocation (%u)"), other);
2849 else if (size != bfd_get_reloc_size (rel))
2850 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2851 bfd_get_reloc_size (rel),
2852 size);
2853 else if (pcrel && !rel->pc_relative)
2854 as_bad (_("non-pc-relative relocation for pc-relative field"));
2855 else if ((rel->complain_on_overflow == complain_overflow_signed
2856 && !sign)
2857 || (rel->complain_on_overflow == complain_overflow_unsigned
2858 && sign > 0))
2859 as_bad (_("relocated field and relocation type differ in signedness"));
2860 else
2861 return other;
2862 return NO_RELOC;
2863 }
2864
2865 if (pcrel)
2866 {
2867 if (!sign)
2868 as_bad (_("there are no unsigned pc-relative relocations"));
2869 switch (size)
2870 {
2871 case 1: return BFD_RELOC_8_PCREL;
2872 case 2: return BFD_RELOC_16_PCREL;
2873 case 4: return BFD_RELOC_32_PCREL;
2874 case 8: return BFD_RELOC_64_PCREL;
2875 }
2876 as_bad (_("cannot do %u byte pc-relative relocation"), size);
2877 }
2878 else
2879 {
2880 if (sign > 0)
2881 switch (size)
2882 {
2883 case 4: return BFD_RELOC_X86_64_32S;
2884 }
2885 else
2886 switch (size)
2887 {
2888 case 1: return BFD_RELOC_8;
2889 case 2: return BFD_RELOC_16;
2890 case 4: return BFD_RELOC_32;
2891 case 8: return BFD_RELOC_64;
2892 }
2893 as_bad (_("cannot do %s %u byte relocation"),
2894 sign > 0 ? "signed" : "unsigned", size);
2895 }
2896
2897 return NO_RELOC;
2898 }
2899
2900 /* Here we decide which fixups can be adjusted to make them relative to
2901 the beginning of the section instead of the symbol. Basically we need
2902 to make sure that the dynamic relocations are done correctly, so in
2903 some cases we force the original symbol to be used. */
2904
2905 int
2906 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2907 {
2908 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2909 if (!IS_ELF)
2910 return 1;
2911
2912 /* Don't adjust pc-relative references to merge sections in 64-bit
2913 mode. */
2914 if (use_rela_relocations
2915 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
2916 && fixP->fx_pcrel)
2917 return 0;
2918
2919 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2920 and changed later by validate_fix. */
2921 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
2922 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
2923 return 0;
2924
2925 /* Adjust_reloc_syms doesn't know about the GOT. Need to keep symbol
2926 for size relocations. */
2927 if (fixP->fx_r_type == BFD_RELOC_SIZE32
2928 || fixP->fx_r_type == BFD_RELOC_SIZE64
2929 || fixP->fx_r_type == BFD_RELOC_386_GOTOFF
2930 || fixP->fx_r_type == BFD_RELOC_386_PLT32
2931 || fixP->fx_r_type == BFD_RELOC_386_GOT32
2932 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
2933 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
2934 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
2935 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
2936 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
2937 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
2938 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
2939 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
2940 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
2941 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
2942 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
2943 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
2944 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
2945 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
2946 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
2947 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
2948 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
2949 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
2950 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
2951 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
2952 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
2953 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
2954 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
2955 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
2956 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
2957 return 0;
2958 #endif
2959 return 1;
2960 }
2961
2962 static int
2963 intel_float_operand (const char *mnemonic)
2964 {
2965 /* Note that the value returned is meaningful only for opcodes with (memory)
2966 operands, hence the code here is free to improperly handle opcodes that
2967 have no operands (for better performance and smaller code). */
2968
2969 if (mnemonic[0] != 'f')
2970 return 0; /* non-math */
2971
2972 switch (mnemonic[1])
2973 {
2974 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2975 the fs segment override prefix not currently handled because no
2976 call path can make opcodes without operands get here */
2977 case 'i':
2978 return 2 /* integer op */;
2979 case 'l':
2980 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
2981 return 3; /* fldcw/fldenv */
2982 break;
2983 case 'n':
2984 if (mnemonic[2] != 'o' /* fnop */)
2985 return 3; /* non-waiting control op */
2986 break;
2987 case 'r':
2988 if (mnemonic[2] == 's')
2989 return 3; /* frstor/frstpm */
2990 break;
2991 case 's':
2992 if (mnemonic[2] == 'a')
2993 return 3; /* fsave */
2994 if (mnemonic[2] == 't')
2995 {
2996 switch (mnemonic[3])
2997 {
2998 case 'c': /* fstcw */
2999 case 'd': /* fstdw */
3000 case 'e': /* fstenv */
3001 case 's': /* fsts[gw] */
3002 return 3;
3003 }
3004 }
3005 break;
3006 case 'x':
3007 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
3008 return 0; /* fxsave/fxrstor are not really math ops */
3009 break;
3010 }
3011
3012 return 1;
3013 }
3014
3015 /* Build the VEX prefix. */
3016
3017 static void
3018 build_vex_prefix (const insn_template *t)
3019 {
3020 unsigned int register_specifier;
3021 unsigned int implied_prefix;
3022 unsigned int vector_length;
3023
3024 /* Check register specifier. */
3025 if (i.vex.register_specifier)
3026 {
3027 register_specifier =
3028 ~register_number (i.vex.register_specifier) & 0xf;
3029 gas_assert ((i.vex.register_specifier->reg_flags & RegVRex) == 0);
3030 }
3031 else
3032 register_specifier = 0xf;
3033
3034 /* Use 2-byte VEX prefix by swappping destination and source
3035 operand. */
3036 if (!i.swap_operand
3037 && i.operands == i.reg_operands
3038 && i.tm.opcode_modifier.vexopcode == VEX0F
3039 && i.tm.opcode_modifier.s
3040 && i.rex == REX_B)
3041 {
3042 unsigned int xchg = i.operands - 1;
3043 union i386_op temp_op;
3044 i386_operand_type temp_type;
3045
3046 temp_type = i.types[xchg];
3047 i.types[xchg] = i.types[0];
3048 i.types[0] = temp_type;
3049 temp_op = i.op[xchg];
3050 i.op[xchg] = i.op[0];
3051 i.op[0] = temp_op;
3052
3053 gas_assert (i.rm.mode == 3);
3054
3055 i.rex = REX_R;
3056 xchg = i.rm.regmem;
3057 i.rm.regmem = i.rm.reg;
3058 i.rm.reg = xchg;
3059
3060 /* Use the next insn. */
3061 i.tm = t[1];
3062 }
3063
3064 if (i.tm.opcode_modifier.vex == VEXScalar)
3065 vector_length = avxscalar;
3066 else
3067 vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
3068
3069 switch ((i.tm.base_opcode >> 8) & 0xff)
3070 {
3071 case 0:
3072 implied_prefix = 0;
3073 break;
3074 case DATA_PREFIX_OPCODE:
3075 implied_prefix = 1;
3076 break;
3077 case REPE_PREFIX_OPCODE:
3078 implied_prefix = 2;
3079 break;
3080 case REPNE_PREFIX_OPCODE:
3081 implied_prefix = 3;
3082 break;
3083 default:
3084 abort ();
3085 }
3086
3087 /* Use 2-byte VEX prefix if possible. */
3088 if (i.tm.opcode_modifier.vexopcode == VEX0F
3089 && i.tm.opcode_modifier.vexw != VEXW1
3090 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
3091 {
3092 /* 2-byte VEX prefix. */
3093 unsigned int r;
3094
3095 i.vex.length = 2;
3096 i.vex.bytes[0] = 0xc5;
3097
3098 /* Check the REX.R bit. */
3099 r = (i.rex & REX_R) ? 0 : 1;
3100 i.vex.bytes[1] = (r << 7
3101 | register_specifier << 3
3102 | vector_length << 2
3103 | implied_prefix);
3104 }
3105 else
3106 {
3107 /* 3-byte VEX prefix. */
3108 unsigned int m, w;
3109
3110 i.vex.length = 3;
3111
3112 switch (i.tm.opcode_modifier.vexopcode)
3113 {
3114 case VEX0F:
3115 m = 0x1;
3116 i.vex.bytes[0] = 0xc4;
3117 break;
3118 case VEX0F38:
3119 m = 0x2;
3120 i.vex.bytes[0] = 0xc4;
3121 break;
3122 case VEX0F3A:
3123 m = 0x3;
3124 i.vex.bytes[0] = 0xc4;
3125 break;
3126 case XOP08:
3127 m = 0x8;
3128 i.vex.bytes[0] = 0x8f;
3129 break;
3130 case XOP09:
3131 m = 0x9;
3132 i.vex.bytes[0] = 0x8f;
3133 break;
3134 case XOP0A:
3135 m = 0xa;
3136 i.vex.bytes[0] = 0x8f;
3137 break;
3138 default:
3139 abort ();
3140 }
3141
3142 /* The high 3 bits of the second VEX byte are 1's compliment
3143 of RXB bits from REX. */
3144 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
3145
3146 /* Check the REX.W bit. */
3147 w = (i.rex & REX_W) ? 1 : 0;
3148 if (i.tm.opcode_modifier.vexw)
3149 {
3150 if (w)
3151 abort ();
3152
3153 if (i.tm.opcode_modifier.vexw == VEXW1)
3154 w = 1;
3155 }
3156
3157 i.vex.bytes[2] = (w << 7
3158 | register_specifier << 3
3159 | vector_length << 2
3160 | implied_prefix);
3161 }
3162 }
3163
3164 /* Build the EVEX prefix. */
3165
3166 static void
3167 build_evex_prefix (void)
3168 {
3169 unsigned int register_specifier;
3170 unsigned int implied_prefix;
3171 unsigned int m, w;
3172 rex_byte vrex_used = 0;
3173
3174 /* Check register specifier. */
3175 if (i.vex.register_specifier)
3176 {
3177 gas_assert ((i.vrex & REX_X) == 0);
3178
3179 register_specifier = i.vex.register_specifier->reg_num;
3180 if ((i.vex.register_specifier->reg_flags & RegRex))
3181 register_specifier += 8;
3182 /* The upper 16 registers are encoded in the fourth byte of the
3183 EVEX prefix. */
3184 if (!(i.vex.register_specifier->reg_flags & RegVRex))
3185 i.vex.bytes[3] = 0x8;
3186 register_specifier = ~register_specifier & 0xf;
3187 }
3188 else
3189 {
3190 register_specifier = 0xf;
3191
3192 /* Encode upper 16 vector index register in the fourth byte of
3193 the EVEX prefix. */
3194 if (!(i.vrex & REX_X))
3195 i.vex.bytes[3] = 0x8;
3196 else
3197 vrex_used |= REX_X;
3198 }
3199
3200 switch ((i.tm.base_opcode >> 8) & 0xff)
3201 {
3202 case 0:
3203 implied_prefix = 0;
3204 break;
3205 case DATA_PREFIX_OPCODE:
3206 implied_prefix = 1;
3207 break;
3208 case REPE_PREFIX_OPCODE:
3209 implied_prefix = 2;
3210 break;
3211 case REPNE_PREFIX_OPCODE:
3212 implied_prefix = 3;
3213 break;
3214 default:
3215 abort ();
3216 }
3217
3218 /* 4 byte EVEX prefix. */
3219 i.vex.length = 4;
3220 i.vex.bytes[0] = 0x62;
3221
3222 /* mmmm bits. */
3223 switch (i.tm.opcode_modifier.vexopcode)
3224 {
3225 case VEX0F:
3226 m = 1;
3227 break;
3228 case VEX0F38:
3229 m = 2;
3230 break;
3231 case VEX0F3A:
3232 m = 3;
3233 break;
3234 default:
3235 abort ();
3236 break;
3237 }
3238
3239 /* The high 3 bits of the second EVEX byte are 1's compliment of RXB
3240 bits from REX. */
3241 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
3242
3243 /* The fifth bit of the second EVEX byte is 1's compliment of the
3244 REX_R bit in VREX. */
3245 if (!(i.vrex & REX_R))
3246 i.vex.bytes[1] |= 0x10;
3247 else
3248 vrex_used |= REX_R;
3249
3250 if ((i.reg_operands + i.imm_operands) == i.operands)
3251 {
3252 /* When all operands are registers, the REX_X bit in REX is not
3253 used. We reuse it to encode the upper 16 registers, which is
3254 indicated by the REX_B bit in VREX. The REX_X bit is encoded
3255 as 1's compliment. */
3256 if ((i.vrex & REX_B))
3257 {
3258 vrex_used |= REX_B;
3259 i.vex.bytes[1] &= ~0x40;
3260 }
3261 }
3262
3263 /* EVEX instructions shouldn't need the REX prefix. */
3264 i.vrex &= ~vrex_used;
3265 gas_assert (i.vrex == 0);
3266
3267 /* Check the REX.W bit. */
3268 w = (i.rex & REX_W) ? 1 : 0;
3269 if (i.tm.opcode_modifier.vexw)
3270 {
3271 if (i.tm.opcode_modifier.vexw == VEXW1)
3272 w = 1;
3273 }
3274 /* If w is not set it means we are dealing with WIG instruction. */
3275 else if (!w)
3276 {
3277 if (evexwig == evexw1)
3278 w = 1;
3279 }
3280
3281 /* Encode the U bit. */
3282 implied_prefix |= 0x4;
3283
3284 /* The third byte of the EVEX prefix. */
3285 i.vex.bytes[2] = (w << 7 | register_specifier << 3 | implied_prefix);
3286
3287 /* The fourth byte of the EVEX prefix. */
3288 /* The zeroing-masking bit. */
3289 if (i.mask && i.mask->zeroing)
3290 i.vex.bytes[3] |= 0x80;
3291
3292 /* Don't always set the broadcast bit if there is no RC. */
3293 if (!i.rounding)
3294 {
3295 /* Encode the vector length. */
3296 unsigned int vec_length;
3297
3298 switch (i.tm.opcode_modifier.evex)
3299 {
3300 case EVEXLIG: /* LL' is ignored */
3301 vec_length = evexlig << 5;
3302 break;
3303 case EVEX128:
3304 vec_length = 0 << 5;
3305 break;
3306 case EVEX256:
3307 vec_length = 1 << 5;
3308 break;
3309 case EVEX512:
3310 vec_length = 2 << 5;
3311 break;
3312 default:
3313 abort ();
3314 break;
3315 }
3316 i.vex.bytes[3] |= vec_length;
3317 /* Encode the broadcast bit. */
3318 if (i.broadcast)
3319 i.vex.bytes[3] |= 0x10;
3320 }
3321 else
3322 {
3323 if (i.rounding->type != saeonly)
3324 i.vex.bytes[3] |= 0x10 | (i.rounding->type << 5);
3325 else
3326 i.vex.bytes[3] |= 0x10;
3327 }
3328
3329 if (i.mask && i.mask->mask)
3330 i.vex.bytes[3] |= i.mask->mask->reg_num;
3331 }
3332
3333 static void
3334 process_immext (void)
3335 {
3336 expressionS *exp;
3337
3338 if ((i.tm.cpu_flags.bitfield.cpusse3 || i.tm.cpu_flags.bitfield.cpusvme)
3339 && i.operands > 0)
3340 {
3341 /* MONITOR/MWAIT as well as SVME instructions have fixed operands
3342 with an opcode suffix which is coded in the same place as an
3343 8-bit immediate field would be.
3344 Here we check those operands and remove them afterwards. */
3345 unsigned int x;
3346
3347 for (x = 0; x < i.operands; x++)
3348 if (register_number (i.op[x].regs) != x)
3349 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
3350 register_prefix, i.op[x].regs->reg_name, x + 1,
3351 i.tm.name);
3352
3353 i.operands = 0;
3354 }
3355
3356 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
3357 which is coded in the same place as an 8-bit immediate field
3358 would be. Here we fake an 8-bit immediate operand from the
3359 opcode suffix stored in tm.extension_opcode.
3360
3361 AVX instructions also use this encoding, for some of
3362 3 argument instructions. */
3363
3364 gas_assert (i.imm_operands <= 1
3365 && (i.operands <= 2
3366 || ((i.tm.opcode_modifier.vex
3367 || i.tm.opcode_modifier.evex)
3368 && i.operands <= 4)));
3369
3370 exp = &im_expressions[i.imm_operands++];
3371 i.op[i.operands].imms = exp;
3372 i.types[i.operands] = imm8;
3373 i.operands++;
3374 exp->X_op = O_constant;
3375 exp->X_add_number = i.tm.extension_opcode;
3376 i.tm.extension_opcode = None;
3377 }
3378
3379
3380 static int
3381 check_hle (void)
3382 {
3383 switch (i.tm.opcode_modifier.hleprefixok)
3384 {
3385 default:
3386 abort ();
3387 case HLEPrefixNone:
3388 as_bad (_("invalid instruction `%s' after `%s'"),
3389 i.tm.name, i.hle_prefix);
3390 return 0;
3391 case HLEPrefixLock:
3392 if (i.prefix[LOCK_PREFIX])
3393 return 1;
3394 as_bad (_("missing `lock' with `%s'"), i.hle_prefix);
3395 return 0;
3396 case HLEPrefixAny:
3397 return 1;
3398 case HLEPrefixRelease:
3399 if (i.prefix[HLE_PREFIX] != XRELEASE_PREFIX_OPCODE)
3400 {
3401 as_bad (_("instruction `%s' after `xacquire' not allowed"),
3402 i.tm.name);
3403 return 0;
3404 }
3405 if (i.mem_operands == 0
3406 || !operand_type_check (i.types[i.operands - 1], anymem))
3407 {
3408 as_bad (_("memory destination needed for instruction `%s'"
3409 " after `xrelease'"), i.tm.name);
3410 return 0;
3411 }
3412 return 1;
3413 }
3414 }
3415
3416 /* This is the guts of the machine-dependent assembler. LINE points to a
3417 machine dependent instruction. This function is supposed to emit
3418 the frags/bytes it assembles to. */
3419
3420 void
3421 md_assemble (char *line)
3422 {
3423 unsigned int j;
3424 char mnemonic[MAX_MNEM_SIZE];
3425 const insn_template *t;
3426
3427 /* Initialize globals. */
3428 memset (&i, '\0', sizeof (i));
3429 for (j = 0; j < MAX_OPERANDS; j++)
3430 i.reloc[j] = NO_RELOC;
3431 memset (disp_expressions, '\0', sizeof (disp_expressions));
3432 memset (im_expressions, '\0', sizeof (im_expressions));
3433 save_stack_p = save_stack;
3434
3435 /* First parse an instruction mnemonic & call i386_operand for the operands.
3436 We assume that the scrubber has arranged it so that line[0] is the valid
3437 start of a (possibly prefixed) mnemonic. */
3438
3439 line = parse_insn (line, mnemonic);
3440 if (line == NULL)
3441 return;
3442
3443 line = parse_operands (line, mnemonic);
3444 this_operand = -1;
3445 if (line == NULL)
3446 return;
3447
3448 /* Now we've parsed the mnemonic into a set of templates, and have the
3449 operands at hand. */
3450
3451 /* All intel opcodes have reversed operands except for "bound" and
3452 "enter". We also don't reverse intersegment "jmp" and "call"
3453 instructions with 2 immediate operands so that the immediate segment
3454 precedes the offset, as it does when in AT&T mode. */
3455 if (intel_syntax
3456 && i.operands > 1
3457 && (strcmp (mnemonic, "bound") != 0)
3458 && (strcmp (mnemonic, "invlpga") != 0)
3459 && !(operand_type_check (i.types[0], imm)
3460 && operand_type_check (i.types[1], imm)))
3461 swap_operands ();
3462
3463 /* The order of the immediates should be reversed
3464 for 2 immediates extrq and insertq instructions */
3465 if (i.imm_operands == 2
3466 && (strcmp (mnemonic, "extrq") == 0
3467 || strcmp (mnemonic, "insertq") == 0))
3468 swap_2_operands (0, 1);
3469
3470 if (i.imm_operands)
3471 optimize_imm ();
3472
3473 /* Don't optimize displacement for movabs since it only takes 64bit
3474 displacement. */
3475 if (i.disp_operands
3476 && i.disp_encoding != disp_encoding_32bit
3477 && (flag_code != CODE_64BIT
3478 || strcmp (mnemonic, "movabs") != 0))
3479 optimize_disp ();
3480
3481 /* Next, we find a template that matches the given insn,
3482 making sure the overlap of the given operands types is consistent
3483 with the template operand types. */
3484
3485 if (!(t = match_template ()))
3486 return;
3487
3488 if (sse_check != check_none
3489 && !i.tm.opcode_modifier.noavx
3490 && (i.tm.cpu_flags.bitfield.cpusse
3491 || i.tm.cpu_flags.bitfield.cpusse2
3492 || i.tm.cpu_flags.bitfield.cpusse3
3493 || i.tm.cpu_flags.bitfield.cpussse3
3494 || i.tm.cpu_flags.bitfield.cpusse4_1
3495 || i.tm.cpu_flags.bitfield.cpusse4_2))
3496 {
3497 (sse_check == check_warning
3498 ? as_warn
3499 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
3500 }
3501
3502 /* Zap movzx and movsx suffix. The suffix has been set from
3503 "word ptr" or "byte ptr" on the source operand in Intel syntax
3504 or extracted from mnemonic in AT&T syntax. But we'll use
3505 the destination register to choose the suffix for encoding. */
3506 if ((i.tm.base_opcode & ~9) == 0x0fb6)
3507 {
3508 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3509 there is no suffix, the default will be byte extension. */
3510 if (i.reg_operands != 2
3511 && !i.suffix
3512 && intel_syntax)
3513 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
3514
3515 i.suffix = 0;
3516 }
3517
3518 if (i.tm.opcode_modifier.fwait)
3519 if (!add_prefix (FWAIT_OPCODE))
3520 return;
3521
3522 /* Check if REP prefix is OK. */
3523 if (i.rep_prefix && !i.tm.opcode_modifier.repprefixok)
3524 {
3525 as_bad (_("invalid instruction `%s' after `%s'"),
3526 i.tm.name, i.rep_prefix);
3527 return;
3528 }
3529
3530 /* Check for lock without a lockable instruction. Destination operand
3531 must be memory unless it is xchg (0x86). */
3532 if (i.prefix[LOCK_PREFIX]
3533 && (!i.tm.opcode_modifier.islockable
3534 || i.mem_operands == 0
3535 || (i.tm.base_opcode != 0x86
3536 && !operand_type_check (i.types[i.operands - 1], anymem))))
3537 {
3538 as_bad (_("expecting lockable instruction after `lock'"));
3539 return;
3540 }
3541
3542 /* Check if HLE prefix is OK. */
3543 if (i.hle_prefix && !check_hle ())
3544 return;
3545
3546 /* Check BND prefix. */
3547 if (i.bnd_prefix && !i.tm.opcode_modifier.bndprefixok)
3548 as_bad (_("expecting valid branch instruction after `bnd'"));
3549
3550 if (i.tm.cpu_flags.bitfield.cpumpx
3551 && flag_code == CODE_64BIT
3552 && i.prefix[ADDR_PREFIX])
3553 as_bad (_("32-bit address isn't allowed in 64-bit MPX instructions."));
3554
3555 /* Insert BND prefix. */
3556 if (add_bnd_prefix
3557 && i.tm.opcode_modifier.bndprefixok
3558 && !i.prefix[BND_PREFIX])
3559 add_prefix (BND_PREFIX_OPCODE);
3560
3561 /* Check string instruction segment overrides. */
3562 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
3563 {
3564 if (!check_string ())
3565 return;
3566 i.disp_operands = 0;
3567 }
3568
3569 if (!process_suffix ())
3570 return;
3571
3572 /* Update operand types. */
3573 for (j = 0; j < i.operands; j++)
3574 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
3575
3576 /* Make still unresolved immediate matches conform to size of immediate
3577 given in i.suffix. */
3578 if (!finalize_imm ())
3579 return;
3580
3581 if (i.types[0].bitfield.imm1)
3582 i.imm_operands = 0; /* kludge for shift insns. */
3583
3584 /* We only need to check those implicit registers for instructions
3585 with 3 operands or less. */
3586 if (i.operands <= 3)
3587 for (j = 0; j < i.operands; j++)
3588 if (i.types[j].bitfield.inoutportreg
3589 || i.types[j].bitfield.shiftcount
3590 || i.types[j].bitfield.acc
3591 || i.types[j].bitfield.floatacc)
3592 i.reg_operands--;
3593
3594 /* ImmExt should be processed after SSE2AVX. */
3595 if (!i.tm.opcode_modifier.sse2avx
3596 && i.tm.opcode_modifier.immext)
3597 process_immext ();
3598
3599 /* For insns with operands there are more diddles to do to the opcode. */
3600 if (i.operands)
3601 {
3602 if (!process_operands ())
3603 return;
3604 }
3605 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
3606 {
3607 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3608 as_warn (_("translating to `%sp'"), i.tm.name);
3609 }
3610
3611 if (i.tm.opcode_modifier.vex)
3612 build_vex_prefix (t);
3613
3614 if (i.tm.opcode_modifier.evex)
3615 build_evex_prefix ();
3616
3617 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3618 instructions may define INT_OPCODE as well, so avoid this corner
3619 case for those instructions that use MODRM. */
3620 if (i.tm.base_opcode == INT_OPCODE
3621 && !i.tm.opcode_modifier.modrm
3622 && i.op[0].imms->X_add_number == 3)
3623 {
3624 i.tm.base_opcode = INT3_OPCODE;
3625 i.imm_operands = 0;
3626 }
3627
3628 if ((i.tm.opcode_modifier.jump
3629 || i.tm.opcode_modifier.jumpbyte
3630 || i.tm.opcode_modifier.jumpdword)
3631 && i.op[0].disps->X_op == O_constant)
3632 {
3633 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3634 the absolute address given by the constant. Since ix86 jumps and
3635 calls are pc relative, we need to generate a reloc. */
3636 i.op[0].disps->X_add_symbol = &abs_symbol;
3637 i.op[0].disps->X_op = O_symbol;
3638 }
3639
3640 if (i.tm.opcode_modifier.rex64)
3641 i.rex |= REX_W;
3642
3643 /* For 8 bit registers we need an empty rex prefix. Also if the
3644 instruction already has a prefix, we need to convert old
3645 registers to new ones. */
3646
3647 if ((i.types[0].bitfield.reg8
3648 && (i.op[0].regs->reg_flags & RegRex64) != 0)
3649 || (i.types[1].bitfield.reg8
3650 && (i.op[1].regs->reg_flags & RegRex64) != 0)
3651 || ((i.types[0].bitfield.reg8
3652 || i.types[1].bitfield.reg8)
3653 && i.rex != 0))
3654 {
3655 int x;
3656
3657 i.rex |= REX_OPCODE;
3658 for (x = 0; x < 2; x++)
3659 {
3660 /* Look for 8 bit operand that uses old registers. */
3661 if (i.types[x].bitfield.reg8
3662 && (i.op[x].regs->reg_flags & RegRex64) == 0)
3663 {
3664 /* In case it is "hi" register, give up. */
3665 if (i.op[x].regs->reg_num > 3)
3666 as_bad (_("can't encode register '%s%s' in an "
3667 "instruction requiring REX prefix."),
3668 register_prefix, i.op[x].regs->reg_name);
3669
3670 /* Otherwise it is equivalent to the extended register.
3671 Since the encoding doesn't change this is merely
3672 cosmetic cleanup for debug output. */
3673
3674 i.op[x].regs = i.op[x].regs + 8;
3675 }
3676 }
3677 }
3678
3679 if (i.rex != 0)
3680 add_prefix (REX_OPCODE | i.rex);
3681
3682 /* We are ready to output the insn. */
3683 output_insn ();
3684 }
3685
3686 static char *
3687 parse_insn (char *line, char *mnemonic)
3688 {
3689 char *l = line;
3690 char *token_start = l;
3691 char *mnem_p;
3692 int supported;
3693 const insn_template *t;
3694 char *dot_p = NULL;
3695
3696 while (1)
3697 {
3698 mnem_p = mnemonic;
3699 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3700 {
3701 if (*mnem_p == '.')
3702 dot_p = mnem_p;
3703 mnem_p++;
3704 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3705 {
3706 as_bad (_("no such instruction: `%s'"), token_start);
3707 return NULL;
3708 }
3709 l++;
3710 }
3711 if (!is_space_char (*l)
3712 && *l != END_OF_INSN
3713 && (intel_syntax
3714 || (*l != PREFIX_SEPARATOR
3715 && *l != ',')))
3716 {
3717 as_bad (_("invalid character %s in mnemonic"),
3718 output_invalid (*l));
3719 return NULL;
3720 }
3721 if (token_start == l)
3722 {
3723 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3724 as_bad (_("expecting prefix; got nothing"));
3725 else
3726 as_bad (_("expecting mnemonic; got nothing"));
3727 return NULL;
3728 }
3729
3730 /* Look up instruction (or prefix) via hash table. */
3731 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3732
3733 if (*l != END_OF_INSN
3734 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3735 && current_templates
3736 && current_templates->start->opcode_modifier.isprefix)
3737 {
3738 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3739 {
3740 as_bad ((flag_code != CODE_64BIT
3741 ? _("`%s' is only supported in 64-bit mode")
3742 : _("`%s' is not supported in 64-bit mode")),
3743 current_templates->start->name);
3744 return NULL;
3745 }
3746 /* If we are in 16-bit mode, do not allow addr16 or data16.
3747 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3748 if ((current_templates->start->opcode_modifier.size16
3749 || current_templates->start->opcode_modifier.size32)
3750 && flag_code != CODE_64BIT
3751 && (current_templates->start->opcode_modifier.size32
3752 ^ (flag_code == CODE_16BIT)))
3753 {
3754 as_bad (_("redundant %s prefix"),
3755 current_templates->start->name);
3756 return NULL;
3757 }
3758 /* Add prefix, checking for repeated prefixes. */
3759 switch (add_prefix (current_templates->start->base_opcode))
3760 {
3761 case PREFIX_EXIST:
3762 return NULL;
3763 case PREFIX_REP:
3764 if (current_templates->start->cpu_flags.bitfield.cpuhle)
3765 i.hle_prefix = current_templates->start->name;
3766 else if (current_templates->start->cpu_flags.bitfield.cpumpx)
3767 i.bnd_prefix = current_templates->start->name;
3768 else
3769 i.rep_prefix = current_templates->start->name;
3770 break;
3771 default:
3772 break;
3773 }
3774 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3775 token_start = ++l;
3776 }
3777 else
3778 break;
3779 }
3780
3781 if (!current_templates)
3782 {
3783 /* Check if we should swap operand or force 32bit displacement in
3784 encoding. */
3785 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3786 i.swap_operand = 1;
3787 else if (mnem_p - 3 == dot_p
3788 && dot_p[1] == 'd'
3789 && dot_p[2] == '8')
3790 i.disp_encoding = disp_encoding_8bit;
3791 else if (mnem_p - 4 == dot_p
3792 && dot_p[1] == 'd'
3793 && dot_p[2] == '3'
3794 && dot_p[3] == '2')
3795 i.disp_encoding = disp_encoding_32bit;
3796 else
3797 goto check_suffix;
3798 mnem_p = dot_p;
3799 *dot_p = '\0';
3800 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3801 }
3802
3803 if (!current_templates)
3804 {
3805 check_suffix:
3806 /* See if we can get a match by trimming off a suffix. */
3807 switch (mnem_p[-1])
3808 {
3809 case WORD_MNEM_SUFFIX:
3810 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3811 i.suffix = SHORT_MNEM_SUFFIX;
3812 else
3813 case BYTE_MNEM_SUFFIX:
3814 case QWORD_MNEM_SUFFIX:
3815 i.suffix = mnem_p[-1];
3816 mnem_p[-1] = '\0';
3817 current_templates = (const templates *) hash_find (op_hash,
3818 mnemonic);
3819 break;
3820 case SHORT_MNEM_SUFFIX:
3821 case LONG_MNEM_SUFFIX:
3822 if (!intel_syntax)
3823 {
3824 i.suffix = mnem_p[-1];
3825 mnem_p[-1] = '\0';
3826 current_templates = (const templates *) hash_find (op_hash,
3827 mnemonic);
3828 }
3829 break;
3830
3831 /* Intel Syntax. */
3832 case 'd':
3833 if (intel_syntax)
3834 {
3835 if (intel_float_operand (mnemonic) == 1)
3836 i.suffix = SHORT_MNEM_SUFFIX;
3837 else
3838 i.suffix = LONG_MNEM_SUFFIX;
3839 mnem_p[-1] = '\0';
3840 current_templates = (const templates *) hash_find (op_hash,
3841 mnemonic);
3842 }
3843 break;
3844 }
3845 if (!current_templates)
3846 {
3847 as_bad (_("no such instruction: `%s'"), token_start);
3848 return NULL;
3849 }
3850 }
3851
3852 if (current_templates->start->opcode_modifier.jump
3853 || current_templates->start->opcode_modifier.jumpbyte)
3854 {
3855 /* Check for a branch hint. We allow ",pt" and ",pn" for
3856 predict taken and predict not taken respectively.
3857 I'm not sure that branch hints actually do anything on loop
3858 and jcxz insns (JumpByte) for current Pentium4 chips. They
3859 may work in the future and it doesn't hurt to accept them
3860 now. */
3861 if (l[0] == ',' && l[1] == 'p')
3862 {
3863 if (l[2] == 't')
3864 {
3865 if (!add_prefix (DS_PREFIX_OPCODE))
3866 return NULL;
3867 l += 3;
3868 }
3869 else if (l[2] == 'n')
3870 {
3871 if (!add_prefix (CS_PREFIX_OPCODE))
3872 return NULL;
3873 l += 3;
3874 }
3875 }
3876 }
3877 /* Any other comma loses. */
3878 if (*l == ',')
3879 {
3880 as_bad (_("invalid character %s in mnemonic"),
3881 output_invalid (*l));
3882 return NULL;
3883 }
3884
3885 /* Check if instruction is supported on specified architecture. */
3886 supported = 0;
3887 for (t = current_templates->start; t < current_templates->end; ++t)
3888 {
3889 supported |= cpu_flags_match (t);
3890 if (supported == CPU_FLAGS_PERFECT_MATCH)
3891 goto skip;
3892 }
3893
3894 if (!(supported & CPU_FLAGS_64BIT_MATCH))
3895 {
3896 as_bad (flag_code == CODE_64BIT
3897 ? _("`%s' is not supported in 64-bit mode")
3898 : _("`%s' is only supported in 64-bit mode"),
3899 current_templates->start->name);
3900 return NULL;
3901 }
3902 if (supported != CPU_FLAGS_PERFECT_MATCH)
3903 {
3904 as_bad (_("`%s' is not supported on `%s%s'"),
3905 current_templates->start->name,
3906 cpu_arch_name ? cpu_arch_name : default_arch,
3907 cpu_sub_arch_name ? cpu_sub_arch_name : "");
3908 return NULL;
3909 }
3910
3911 skip:
3912 if (!cpu_arch_flags.bitfield.cpui386
3913 && (flag_code != CODE_16BIT))
3914 {
3915 as_warn (_("use .code16 to ensure correct addressing mode"));
3916 }
3917
3918 return l;
3919 }
3920
3921 static char *
3922 parse_operands (char *l, const char *mnemonic)
3923 {
3924 char *token_start;
3925
3926 /* 1 if operand is pending after ','. */
3927 unsigned int expecting_operand = 0;
3928
3929 /* Non-zero if operand parens not balanced. */
3930 unsigned int paren_not_balanced;
3931
3932 while (*l != END_OF_INSN)
3933 {
3934 /* Skip optional white space before operand. */
3935 if (is_space_char (*l))
3936 ++l;
3937 if (!is_operand_char (*l) && *l != END_OF_INSN)
3938 {
3939 as_bad (_("invalid character %s before operand %d"),
3940 output_invalid (*l),
3941 i.operands + 1);
3942 return NULL;
3943 }
3944 token_start = l; /* after white space */
3945 paren_not_balanced = 0;
3946 while (paren_not_balanced || *l != ',')
3947 {
3948 if (*l == END_OF_INSN)
3949 {
3950 if (paren_not_balanced)
3951 {
3952 if (!intel_syntax)
3953 as_bad (_("unbalanced parenthesis in operand %d."),
3954 i.operands + 1);
3955 else
3956 as_bad (_("unbalanced brackets in operand %d."),
3957 i.operands + 1);
3958 return NULL;
3959 }
3960 else
3961 break; /* we are done */
3962 }
3963 else if (!is_operand_char (*l) && !is_space_char (*l))
3964 {
3965 as_bad (_("invalid character %s in operand %d"),
3966 output_invalid (*l),
3967 i.operands + 1);
3968 return NULL;
3969 }
3970 if (!intel_syntax)
3971 {
3972 if (*l == '(')
3973 ++paren_not_balanced;
3974 if (*l == ')')
3975 --paren_not_balanced;
3976 }
3977 else
3978 {
3979 if (*l == '[')
3980 ++paren_not_balanced;
3981 if (*l == ']')
3982 --paren_not_balanced;
3983 }
3984 l++;
3985 }
3986 if (l != token_start)
3987 { /* Yes, we've read in another operand. */
3988 unsigned int operand_ok;
3989 this_operand = i.operands++;
3990 i.types[this_operand].bitfield.unspecified = 1;
3991 if (i.operands > MAX_OPERANDS)
3992 {
3993 as_bad (_("spurious operands; (%d operands/instruction max)"),
3994 MAX_OPERANDS);
3995 return NULL;
3996 }
3997 /* Now parse operand adding info to 'i' as we go along. */
3998 END_STRING_AND_SAVE (l);
3999
4000 if (intel_syntax)
4001 operand_ok =
4002 i386_intel_operand (token_start,
4003 intel_float_operand (mnemonic));
4004 else
4005 operand_ok = i386_att_operand (token_start);
4006
4007 RESTORE_END_STRING (l);
4008 if (!operand_ok)
4009 return NULL;
4010 }
4011 else
4012 {
4013 if (expecting_operand)
4014 {
4015 expecting_operand_after_comma:
4016 as_bad (_("expecting operand after ','; got nothing"));
4017 return NULL;
4018 }
4019 if (*l == ',')
4020 {
4021 as_bad (_("expecting operand before ','; got nothing"));
4022 return NULL;
4023 }
4024 }
4025
4026 /* Now *l must be either ',' or END_OF_INSN. */
4027 if (*l == ',')
4028 {
4029 if (*++l == END_OF_INSN)
4030 {
4031 /* Just skip it, if it's \n complain. */
4032 goto expecting_operand_after_comma;
4033 }
4034 expecting_operand = 1;
4035 }
4036 }
4037 return l;
4038 }
4039
4040 static void
4041 swap_2_operands (int xchg1, int xchg2)
4042 {
4043 union i386_op temp_op;
4044 i386_operand_type temp_type;
4045 enum bfd_reloc_code_real temp_reloc;
4046
4047 temp_type = i.types[xchg2];
4048 i.types[xchg2] = i.types[xchg1];
4049 i.types[xchg1] = temp_type;
4050 temp_op = i.op[xchg2];
4051 i.op[xchg2] = i.op[xchg1];
4052 i.op[xchg1] = temp_op;
4053 temp_reloc = i.reloc[xchg2];
4054 i.reloc[xchg2] = i.reloc[xchg1];
4055 i.reloc[xchg1] = temp_reloc;
4056
4057 if (i.mask)
4058 {
4059 if (i.mask->operand == xchg1)
4060 i.mask->operand = xchg2;
4061 else if (i.mask->operand == xchg2)
4062 i.mask->operand = xchg1;
4063 }
4064 if (i.broadcast)
4065 {
4066 if (i.broadcast->operand == xchg1)
4067 i.broadcast->operand = xchg2;
4068 else if (i.broadcast->operand == xchg2)
4069 i.broadcast->operand = xchg1;
4070 }
4071 if (i.rounding)
4072 {
4073 if (i.rounding->operand == xchg1)
4074 i.rounding->operand = xchg2;
4075 else if (i.rounding->operand == xchg2)
4076 i.rounding->operand = xchg1;
4077 }
4078 }
4079
4080 static void
4081 swap_operands (void)
4082 {
4083 switch (i.operands)
4084 {
4085 case 5:
4086 case 4:
4087 swap_2_operands (1, i.operands - 2);
4088 case 3:
4089 case 2:
4090 swap_2_operands (0, i.operands - 1);
4091 break;
4092 default:
4093 abort ();
4094 }
4095
4096 if (i.mem_operands == 2)
4097 {
4098 const seg_entry *temp_seg;
4099 temp_seg = i.seg[0];
4100 i.seg[0] = i.seg[1];
4101 i.seg[1] = temp_seg;
4102 }
4103 }
4104
4105 /* Try to ensure constant immediates are represented in the smallest
4106 opcode possible. */
4107 static void
4108 optimize_imm (void)
4109 {
4110 char guess_suffix = 0;
4111 int op;
4112
4113 if (i.suffix)
4114 guess_suffix = i.suffix;
4115 else if (i.reg_operands)
4116 {
4117 /* Figure out a suffix from the last register operand specified.
4118 We can't do this properly yet, ie. excluding InOutPortReg,
4119 but the following works for instructions with immediates.
4120 In any case, we can't set i.suffix yet. */
4121 for (op = i.operands; --op >= 0;)
4122 if (i.types[op].bitfield.reg8)
4123 {
4124 guess_suffix = BYTE_MNEM_SUFFIX;
4125 break;
4126 }
4127 else if (i.types[op].bitfield.reg16)
4128 {
4129 guess_suffix = WORD_MNEM_SUFFIX;
4130 break;
4131 }
4132 else if (i.types[op].bitfield.reg32)
4133 {
4134 guess_suffix = LONG_MNEM_SUFFIX;
4135 break;
4136 }
4137 else if (i.types[op].bitfield.reg64)
4138 {
4139 guess_suffix = QWORD_MNEM_SUFFIX;
4140 break;
4141 }
4142 }
4143 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
4144 guess_suffix = WORD_MNEM_SUFFIX;
4145
4146 for (op = i.operands; --op >= 0;)
4147 if (operand_type_check (i.types[op], imm))
4148 {
4149 switch (i.op[op].imms->X_op)
4150 {
4151 case O_constant:
4152 /* If a suffix is given, this operand may be shortened. */
4153 switch (guess_suffix)
4154 {
4155 case LONG_MNEM_SUFFIX:
4156 i.types[op].bitfield.imm32 = 1;
4157 i.types[op].bitfield.imm64 = 1;
4158 break;
4159 case WORD_MNEM_SUFFIX:
4160 i.types[op].bitfield.imm16 = 1;
4161 i.types[op].bitfield.imm32 = 1;
4162 i.types[op].bitfield.imm32s = 1;
4163 i.types[op].bitfield.imm64 = 1;
4164 break;
4165 case BYTE_MNEM_SUFFIX:
4166 i.types[op].bitfield.imm8 = 1;
4167 i.types[op].bitfield.imm8s = 1;
4168 i.types[op].bitfield.imm16 = 1;
4169 i.types[op].bitfield.imm32 = 1;
4170 i.types[op].bitfield.imm32s = 1;
4171 i.types[op].bitfield.imm64 = 1;
4172 break;
4173 }
4174
4175 /* If this operand is at most 16 bits, convert it
4176 to a signed 16 bit number before trying to see
4177 whether it will fit in an even smaller size.
4178 This allows a 16-bit operand such as $0xffe0 to
4179 be recognised as within Imm8S range. */
4180 if ((i.types[op].bitfield.imm16)
4181 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
4182 {
4183 i.op[op].imms->X_add_number =
4184 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
4185 }
4186 if ((i.types[op].bitfield.imm32)
4187 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
4188 == 0))
4189 {
4190 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
4191 ^ ((offsetT) 1 << 31))
4192 - ((offsetT) 1 << 31));
4193 }
4194 i.types[op]
4195 = operand_type_or (i.types[op],
4196 smallest_imm_type (i.op[op].imms->X_add_number));
4197
4198 /* We must avoid matching of Imm32 templates when 64bit
4199 only immediate is available. */
4200 if (guess_suffix == QWORD_MNEM_SUFFIX)
4201 i.types[op].bitfield.imm32 = 0;
4202 break;
4203
4204 case O_absent:
4205 case O_register:
4206 abort ();
4207
4208 /* Symbols and expressions. */
4209 default:
4210 /* Convert symbolic operand to proper sizes for matching, but don't
4211 prevent matching a set of insns that only supports sizes other
4212 than those matching the insn suffix. */
4213 {
4214 i386_operand_type mask, allowed;
4215 const insn_template *t;
4216
4217 operand_type_set (&mask, 0);
4218 operand_type_set (&allowed, 0);
4219
4220 for (t = current_templates->start;
4221 t < current_templates->end;
4222 ++t)
4223 allowed = operand_type_or (allowed,
4224 t->operand_types[op]);
4225 switch (guess_suffix)
4226 {
4227 case QWORD_MNEM_SUFFIX:
4228 mask.bitfield.imm64 = 1;
4229 mask.bitfield.imm32s = 1;
4230 break;
4231 case LONG_MNEM_SUFFIX:
4232 mask.bitfield.imm32 = 1;
4233 break;
4234 case WORD_MNEM_SUFFIX:
4235 mask.bitfield.imm16 = 1;
4236 break;
4237 case BYTE_MNEM_SUFFIX:
4238 mask.bitfield.imm8 = 1;
4239 break;
4240 default:
4241 break;
4242 }
4243 allowed = operand_type_and (mask, allowed);
4244 if (!operand_type_all_zero (&allowed))
4245 i.types[op] = operand_type_and (i.types[op], mask);
4246 }
4247 break;
4248 }
4249 }
4250 }
4251
4252 /* Try to use the smallest displacement type too. */
4253 static void
4254 optimize_disp (void)
4255 {
4256 int op;
4257
4258 for (op = i.operands; --op >= 0;)
4259 if (operand_type_check (i.types[op], disp))
4260 {
4261 if (i.op[op].disps->X_op == O_constant)
4262 {
4263 offsetT op_disp = i.op[op].disps->X_add_number;
4264
4265 if (i.types[op].bitfield.disp16
4266 && (op_disp & ~(offsetT) 0xffff) == 0)
4267 {
4268 /* If this operand is at most 16 bits, convert
4269 to a signed 16 bit number and don't use 64bit
4270 displacement. */
4271 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
4272 i.types[op].bitfield.disp64 = 0;
4273 }
4274 if (i.types[op].bitfield.disp32
4275 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
4276 {
4277 /* If this operand is at most 32 bits, convert
4278 to a signed 32 bit number and don't use 64bit
4279 displacement. */
4280 op_disp &= (((offsetT) 2 << 31) - 1);
4281 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
4282 i.types[op].bitfield.disp64 = 0;
4283 }
4284 if (!op_disp && i.types[op].bitfield.baseindex)
4285 {
4286 i.types[op].bitfield.disp8 = 0;
4287 i.types[op].bitfield.disp16 = 0;
4288 i.types[op].bitfield.disp32 = 0;
4289 i.types[op].bitfield.disp32s = 0;
4290 i.types[op].bitfield.disp64 = 0;
4291 i.op[op].disps = 0;
4292 i.disp_operands--;
4293 }
4294 else if (flag_code == CODE_64BIT)
4295 {
4296 if (fits_in_signed_long (op_disp))
4297 {
4298 i.types[op].bitfield.disp64 = 0;
4299 i.types[op].bitfield.disp32s = 1;
4300 }
4301 if (i.prefix[ADDR_PREFIX]
4302 && fits_in_unsigned_long (op_disp))
4303 i.types[op].bitfield.disp32 = 1;
4304 }
4305 if ((i.types[op].bitfield.disp32
4306 || i.types[op].bitfield.disp32s
4307 || i.types[op].bitfield.disp16)
4308 && fits_in_signed_byte (op_disp))
4309 i.types[op].bitfield.disp8 = 1;
4310 }
4311 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
4312 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
4313 {
4314 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
4315 i.op[op].disps, 0, i.reloc[op]);
4316 i.types[op].bitfield.disp8 = 0;
4317 i.types[op].bitfield.disp16 = 0;
4318 i.types[op].bitfield.disp32 = 0;
4319 i.types[op].bitfield.disp32s = 0;
4320 i.types[op].bitfield.disp64 = 0;
4321 }
4322 else
4323 /* We only support 64bit displacement on constants. */
4324 i.types[op].bitfield.disp64 = 0;
4325 }
4326 }
4327
4328 /* Check if operands are valid for the instruction. */
4329
4330 static int
4331 check_VecOperands (const insn_template *t)
4332 {
4333 unsigned int op;
4334
4335 /* Without VSIB byte, we can't have a vector register for index. */
4336 if (!t->opcode_modifier.vecsib
4337 && i.index_reg
4338 && (i.index_reg->reg_type.bitfield.regxmm
4339 || i.index_reg->reg_type.bitfield.regymm
4340 || i.index_reg->reg_type.bitfield.regzmm))
4341 {
4342 i.error = unsupported_vector_index_register;
4343 return 1;
4344 }
4345
4346 /* For VSIB byte, we need a vector register for index, and all vector
4347 registers must be distinct. */
4348 if (t->opcode_modifier.vecsib)
4349 {
4350 if (!i.index_reg
4351 || !((t->opcode_modifier.vecsib == VecSIB128
4352 && i.index_reg->reg_type.bitfield.regxmm)
4353 || (t->opcode_modifier.vecsib == VecSIB256
4354 && i.index_reg->reg_type.bitfield.regymm)
4355 || (t->opcode_modifier.vecsib == VecSIB512
4356 && i.index_reg->reg_type.bitfield.regzmm)))
4357 {
4358 i.error = invalid_vsib_address;
4359 return 1;
4360 }
4361
4362 gas_assert (i.reg_operands == 2 || i.mask);
4363 if (i.reg_operands == 2 && !i.mask)
4364 {
4365 gas_assert (i.types[0].bitfield.regxmm
4366 || i.types[0].bitfield.regymm
4367 || i.types[0].bitfield.regzmm);
4368 gas_assert (i.types[2].bitfield.regxmm
4369 || i.types[2].bitfield.regymm
4370 || i.types[2].bitfield.regzmm);
4371 if (operand_check == check_none)
4372 return 0;
4373 if (register_number (i.op[0].regs)
4374 != register_number (i.index_reg)
4375 && register_number (i.op[2].regs)
4376 != register_number (i.index_reg)
4377 && register_number (i.op[0].regs)
4378 != register_number (i.op[2].regs))
4379 return 0;
4380 if (operand_check == check_error)
4381 {
4382 i.error = invalid_vector_register_set;
4383 return 1;
4384 }
4385 as_warn (_("mask, index, and destination registers should be distinct"));
4386 }
4387 }
4388
4389 /* Check if broadcast is supported by the instruction and is applied
4390 to the memory operand. */
4391 if (i.broadcast)
4392 {
4393 int broadcasted_opnd_size;
4394
4395 /* Check if specified broadcast is supported in this instruction,
4396 and it's applied to memory operand of DWORD or QWORD type,
4397 depending on VecESize. */
4398 if (i.broadcast->type != t->opcode_modifier.broadcast
4399 || !i.types[i.broadcast->operand].bitfield.mem
4400 || (t->opcode_modifier.vecesize == 0
4401 && !i.types[i.broadcast->operand].bitfield.dword
4402 && !i.types[i.broadcast->operand].bitfield.unspecified)
4403 || (t->opcode_modifier.vecesize == 1
4404 && !i.types[i.broadcast->operand].bitfield.qword
4405 && !i.types[i.broadcast->operand].bitfield.unspecified))
4406 goto bad_broadcast;
4407
4408 broadcasted_opnd_size = t->opcode_modifier.vecesize ? 64 : 32;
4409 if (i.broadcast->type == BROADCAST_1TO16)
4410 broadcasted_opnd_size <<= 4; /* Broadcast 1to16. */
4411 else if (i.broadcast->type == BROADCAST_1TO8)
4412 broadcasted_opnd_size <<= 3; /* Broadcast 1to8. */
4413 else
4414 goto bad_broadcast;
4415
4416 if ((broadcasted_opnd_size == 256
4417 && !t->operand_types[i.broadcast->operand].bitfield.ymmword)
4418 || (broadcasted_opnd_size == 512
4419 && !t->operand_types[i.broadcast->operand].bitfield.zmmword))
4420 {
4421 bad_broadcast:
4422 i.error = unsupported_broadcast;
4423 return 1;
4424 }
4425 }
4426 /* If broadcast is supported in this instruction, we need to check if
4427 operand of one-element size isn't specified without broadcast. */
4428 else if (t->opcode_modifier.broadcast && i.mem_operands)
4429 {
4430 /* Find memory operand. */
4431 for (op = 0; op < i.operands; op++)
4432 if (operand_type_check (i.types[op], anymem))
4433 break;
4434 gas_assert (op < i.operands);
4435 /* Check size of the memory operand. */
4436 if ((t->opcode_modifier.vecesize == 0
4437 && i.types[op].bitfield.dword)
4438 || (t->opcode_modifier.vecesize == 1
4439 && i.types[op].bitfield.qword))
4440 {
4441 i.error = broadcast_needed;
4442 return 1;
4443 }
4444 }
4445
4446 /* Check if requested masking is supported. */
4447 if (i.mask
4448 && (!t->opcode_modifier.masking
4449 || (i.mask->zeroing
4450 && t->opcode_modifier.masking == MERGING_MASKING)))
4451 {
4452 i.error = unsupported_masking;
4453 return 1;
4454 }
4455
4456 /* Check if masking is applied to dest operand. */
4457 if (i.mask && (i.mask->operand != (int) (i.operands - 1)))
4458 {
4459 i.error = mask_not_on_destination;
4460 return 1;
4461 }
4462
4463 /* Check if default mask is allowed. */
4464 if (t->opcode_modifier.nodefmask
4465 && (!i.mask || i.mask->mask->reg_num == 0))
4466 {
4467 i.error = no_default_mask;
4468 return 1;
4469 }
4470
4471 /* Check RC/SAE. */
4472 if (i.rounding)
4473 {
4474 if ((i.rounding->type != saeonly
4475 && !t->opcode_modifier.staticrounding)
4476 || (i.rounding->type == saeonly
4477 && (t->opcode_modifier.staticrounding
4478 || !t->opcode_modifier.sae)))
4479 {
4480 i.error = unsupported_rc_sae;
4481 return 1;
4482 }
4483 /* If the instruction has several immediate operands and one of
4484 them is rounding, the rounding operand should be the last
4485 immediate operand. */
4486 if (i.imm_operands > 1
4487 && i.rounding->operand != (int) (i.imm_operands - 1))
4488 {
4489 i.error = rc_sae_operand_not_last_imm;
4490 return 1;
4491 }
4492 }
4493
4494 /* Check vector Disp8 operand. */
4495 if (t->opcode_modifier.disp8memshift)
4496 {
4497 if (i.broadcast)
4498 i.memshift = t->opcode_modifier.vecesize ? 3 : 2;
4499 else
4500 i.memshift = t->opcode_modifier.disp8memshift;
4501
4502 for (op = 0; op < i.operands; op++)
4503 if (operand_type_check (i.types[op], disp)
4504 && i.op[op].disps->X_op == O_constant)
4505 {
4506 offsetT value = i.op[op].disps->X_add_number;
4507 int vec_disp8_ok = fits_in_vec_disp8 (value);
4508 if (t->operand_types [op].bitfield.vec_disp8)
4509 {
4510 if (vec_disp8_ok)
4511 i.types[op].bitfield.vec_disp8 = 1;
4512 else
4513 {
4514 /* Vector insn can only have Vec_Disp8/Disp32 in
4515 32/64bit modes, and Vec_Disp8/Disp16 in 16bit
4516 mode. */
4517 i.types[op].bitfield.disp8 = 0;
4518 if (flag_code != CODE_16BIT)
4519 i.types[op].bitfield.disp16 = 0;
4520 }
4521 }
4522 else if (flag_code != CODE_16BIT)
4523 {
4524 /* One form of this instruction supports vector Disp8.
4525 Try vector Disp8 if we need to use Disp32. */
4526 if (vec_disp8_ok && !fits_in_signed_byte (value))
4527 {
4528 i.error = try_vector_disp8;
4529 return 1;
4530 }
4531 }
4532 }
4533 }
4534 else
4535 i.memshift = -1;
4536
4537 return 0;
4538 }
4539
4540 /* Check if operands are valid for the instruction. Update VEX
4541 operand types. */
4542
4543 static int
4544 VEX_check_operands (const insn_template *t)
4545 {
4546 /* VREX is only valid with EVEX prefix. */
4547 if (i.need_vrex && !t->opcode_modifier.evex)
4548 {
4549 i.error = invalid_register_operand;
4550 return 1;
4551 }
4552
4553 if (!t->opcode_modifier.vex)
4554 return 0;
4555
4556 /* Only check VEX_Imm4, which must be the first operand. */
4557 if (t->operand_types[0].bitfield.vec_imm4)
4558 {
4559 if (i.op[0].imms->X_op != O_constant
4560 || !fits_in_imm4 (i.op[0].imms->X_add_number))
4561 {
4562 i.error = bad_imm4;
4563 return 1;
4564 }
4565
4566 /* Turn off Imm8 so that update_imm won't complain. */
4567 i.types[0] = vec_imm4;
4568 }
4569
4570 return 0;
4571 }
4572
4573 static const insn_template *
4574 match_template (void)
4575 {
4576 /* Points to template once we've found it. */
4577 const insn_template *t;
4578 i386_operand_type overlap0, overlap1, overlap2, overlap3;
4579 i386_operand_type overlap4;
4580 unsigned int found_reverse_match;
4581 i386_opcode_modifier suffix_check;
4582 i386_operand_type operand_types [MAX_OPERANDS];
4583 int addr_prefix_disp;
4584 unsigned int j;
4585 unsigned int found_cpu_match;
4586 unsigned int check_register;
4587 enum i386_error specific_error = 0;
4588
4589 #if MAX_OPERANDS != 5
4590 # error "MAX_OPERANDS must be 5."
4591 #endif
4592
4593 found_reverse_match = 0;
4594 addr_prefix_disp = -1;
4595
4596 memset (&suffix_check, 0, sizeof (suffix_check));
4597 if (i.suffix == BYTE_MNEM_SUFFIX)
4598 suffix_check.no_bsuf = 1;
4599 else if (i.suffix == WORD_MNEM_SUFFIX)
4600 suffix_check.no_wsuf = 1;
4601 else if (i.suffix == SHORT_MNEM_SUFFIX)
4602 suffix_check.no_ssuf = 1;
4603 else if (i.suffix == LONG_MNEM_SUFFIX)
4604 suffix_check.no_lsuf = 1;
4605 else if (i.suffix == QWORD_MNEM_SUFFIX)
4606 suffix_check.no_qsuf = 1;
4607 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
4608 suffix_check.no_ldsuf = 1;
4609
4610 /* Must have right number of operands. */
4611 i.error = number_of_operands_mismatch;
4612
4613 for (t = current_templates->start; t < current_templates->end; t++)
4614 {
4615 addr_prefix_disp = -1;
4616
4617 if (i.operands != t->operands)
4618 continue;
4619
4620 /* Check processor support. */
4621 i.error = unsupported;
4622 found_cpu_match = (cpu_flags_match (t)
4623 == CPU_FLAGS_PERFECT_MATCH);
4624 if (!found_cpu_match)
4625 continue;
4626
4627 /* Check old gcc support. */
4628 i.error = old_gcc_only;
4629 if (!old_gcc && t->opcode_modifier.oldgcc)
4630 continue;
4631
4632 /* Check AT&T mnemonic. */
4633 i.error = unsupported_with_intel_mnemonic;
4634 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
4635 continue;
4636
4637 /* Check AT&T/Intel syntax. */
4638 i.error = unsupported_syntax;
4639 if ((intel_syntax && t->opcode_modifier.attsyntax)
4640 || (!intel_syntax && t->opcode_modifier.intelsyntax))
4641 continue;
4642
4643 /* Check the suffix, except for some instructions in intel mode. */
4644 i.error = invalid_instruction_suffix;
4645 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
4646 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
4647 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
4648 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
4649 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
4650 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
4651 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
4652 continue;
4653
4654 if (!operand_size_match (t))
4655 continue;
4656
4657 for (j = 0; j < MAX_OPERANDS; j++)
4658 operand_types[j] = t->operand_types[j];
4659
4660 /* In general, don't allow 64-bit operands in 32-bit mode. */
4661 if (i.suffix == QWORD_MNEM_SUFFIX
4662 && flag_code != CODE_64BIT
4663 && (intel_syntax
4664 ? (!t->opcode_modifier.ignoresize
4665 && !intel_float_operand (t->name))
4666 : intel_float_operand (t->name) != 2)
4667 && ((!operand_types[0].bitfield.regmmx
4668 && !operand_types[0].bitfield.regxmm
4669 && !operand_types[0].bitfield.regymm
4670 && !operand_types[0].bitfield.regzmm)
4671 || (!operand_types[t->operands > 1].bitfield.regmmx
4672 && !!operand_types[t->operands > 1].bitfield.regxmm
4673 && !!operand_types[t->operands > 1].bitfield.regymm
4674 && !!operand_types[t->operands > 1].bitfield.regzmm))
4675 && (t->base_opcode != 0x0fc7
4676 || t->extension_opcode != 1 /* cmpxchg8b */))
4677 continue;
4678
4679 /* In general, don't allow 32-bit operands on pre-386. */
4680 else if (i.suffix == LONG_MNEM_SUFFIX
4681 && !cpu_arch_flags.bitfield.cpui386
4682 && (intel_syntax
4683 ? (!t->opcode_modifier.ignoresize
4684 && !intel_float_operand (t->name))
4685 : intel_float_operand (t->name) != 2)
4686 && ((!operand_types[0].bitfield.regmmx
4687 && !operand_types[0].bitfield.regxmm)
4688 || (!operand_types[t->operands > 1].bitfield.regmmx
4689 && !!operand_types[t->operands > 1].bitfield.regxmm)))
4690 continue;
4691
4692 /* Do not verify operands when there are none. */
4693 else
4694 {
4695 if (!t->operands)
4696 /* We've found a match; break out of loop. */
4697 break;
4698 }
4699
4700 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
4701 into Disp32/Disp16/Disp32 operand. */
4702 if (i.prefix[ADDR_PREFIX] != 0)
4703 {
4704 /* There should be only one Disp operand. */
4705 switch (flag_code)
4706 {
4707 case CODE_16BIT:
4708 for (j = 0; j < MAX_OPERANDS; j++)
4709 {
4710 if (operand_types[j].bitfield.disp16)
4711 {
4712 addr_prefix_disp = j;
4713 operand_types[j].bitfield.disp32 = 1;
4714 operand_types[j].bitfield.disp16 = 0;
4715 break;
4716 }
4717 }
4718 break;
4719 case CODE_32BIT:
4720 for (j = 0; j < MAX_OPERANDS; j++)
4721 {
4722 if (operand_types[j].bitfield.disp32)
4723 {
4724 addr_prefix_disp = j;
4725 operand_types[j].bitfield.disp32 = 0;
4726 operand_types[j].bitfield.disp16 = 1;
4727 break;
4728 }
4729 }
4730 break;
4731 case CODE_64BIT:
4732 for (j = 0; j < MAX_OPERANDS; j++)
4733 {
4734 if (operand_types[j].bitfield.disp64)
4735 {
4736 addr_prefix_disp = j;
4737 operand_types[j].bitfield.disp64 = 0;
4738 operand_types[j].bitfield.disp32 = 1;
4739 break;
4740 }
4741 }
4742 break;
4743 }
4744 }
4745
4746 /* We check register size if needed. */
4747 check_register = t->opcode_modifier.checkregsize;
4748 overlap0 = operand_type_and (i.types[0], operand_types[0]);
4749 switch (t->operands)
4750 {
4751 case 1:
4752 if (!operand_type_match (overlap0, i.types[0]))
4753 continue;
4754 break;
4755 case 2:
4756 /* xchg %eax, %eax is a special case. It is an aliase for nop
4757 only in 32bit mode and we can use opcode 0x90. In 64bit
4758 mode, we can't use 0x90 for xchg %eax, %eax since it should
4759 zero-extend %eax to %rax. */
4760 if (flag_code == CODE_64BIT
4761 && t->base_opcode == 0x90
4762 && operand_type_equal (&i.types [0], &acc32)
4763 && operand_type_equal (&i.types [1], &acc32))
4764 continue;
4765 if (i.swap_operand)
4766 {
4767 /* If we swap operand in encoding, we either match
4768 the next one or reverse direction of operands. */
4769 if (t->opcode_modifier.s)
4770 continue;
4771 else if (t->opcode_modifier.d)
4772 goto check_reverse;
4773 }
4774
4775 case 3:
4776 /* If we swap operand in encoding, we match the next one. */
4777 if (i.swap_operand && t->opcode_modifier.s)
4778 continue;
4779 case 4:
4780 case 5:
4781 overlap1 = operand_type_and (i.types[1], operand_types[1]);
4782 if (!operand_type_match (overlap0, i.types[0])
4783 || !operand_type_match (overlap1, i.types[1])
4784 || (check_register
4785 && !operand_type_register_match (overlap0, i.types[0],
4786 operand_types[0],
4787 overlap1, i.types[1],
4788 operand_types[1])))
4789 {
4790 /* Check if other direction is valid ... */
4791 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
4792 continue;
4793
4794 check_reverse:
4795 /* Try reversing direction of operands. */
4796 overlap0 = operand_type_and (i.types[0], operand_types[1]);
4797 overlap1 = operand_type_and (i.types[1], operand_types[0]);
4798 if (!operand_type_match (overlap0, i.types[0])
4799 || !operand_type_match (overlap1, i.types[1])
4800 || (check_register
4801 && !operand_type_register_match (overlap0,
4802 i.types[0],
4803 operand_types[1],
4804 overlap1,
4805 i.types[1],
4806 operand_types[0])))
4807 {
4808 /* Does not match either direction. */
4809 continue;
4810 }
4811 /* found_reverse_match holds which of D or FloatDR
4812 we've found. */
4813 if (t->opcode_modifier.d)
4814 found_reverse_match = Opcode_D;
4815 else if (t->opcode_modifier.floatd)
4816 found_reverse_match = Opcode_FloatD;
4817 else
4818 found_reverse_match = 0;
4819 if (t->opcode_modifier.floatr)
4820 found_reverse_match |= Opcode_FloatR;
4821 }
4822 else
4823 {
4824 /* Found a forward 2 operand match here. */
4825 switch (t->operands)
4826 {
4827 case 5:
4828 overlap4 = operand_type_and (i.types[4],
4829 operand_types[4]);
4830 case 4:
4831 overlap3 = operand_type_and (i.types[3],
4832 operand_types[3]);
4833 case 3:
4834 overlap2 = operand_type_and (i.types[2],
4835 operand_types[2]);
4836 break;
4837 }
4838
4839 switch (t->operands)
4840 {
4841 case 5:
4842 if (!operand_type_match (overlap4, i.types[4])
4843 || !operand_type_register_match (overlap3,
4844 i.types[3],
4845 operand_types[3],
4846 overlap4,
4847 i.types[4],
4848 operand_types[4]))
4849 continue;
4850 case 4:
4851 if (!operand_type_match (overlap3, i.types[3])
4852 || (check_register
4853 && !operand_type_register_match (overlap2,
4854 i.types[2],
4855 operand_types[2],
4856 overlap3,
4857 i.types[3],
4858 operand_types[3])))
4859 continue;
4860 case 3:
4861 /* Here we make use of the fact that there are no
4862 reverse match 3 operand instructions, and all 3
4863 operand instructions only need to be checked for
4864 register consistency between operands 2 and 3. */
4865 if (!operand_type_match (overlap2, i.types[2])
4866 || (check_register
4867 && !operand_type_register_match (overlap1,
4868 i.types[1],
4869 operand_types[1],
4870 overlap2,
4871 i.types[2],
4872 operand_types[2])))
4873 continue;
4874 break;
4875 }
4876 }
4877 /* Found either forward/reverse 2, 3 or 4 operand match here:
4878 slip through to break. */
4879 }
4880 if (!found_cpu_match)
4881 {
4882 found_reverse_match = 0;
4883 continue;
4884 }
4885
4886 /* Check if vector and VEX operands are valid. */
4887 if (check_VecOperands (t) || VEX_check_operands (t))
4888 {
4889 specific_error = i.error;
4890 continue;
4891 }
4892
4893 /* We've found a match; break out of loop. */
4894 break;
4895 }
4896
4897 if (t == current_templates->end)
4898 {
4899 /* We found no match. */
4900 const char *err_msg;
4901 switch (specific_error ? specific_error : i.error)
4902 {
4903 default:
4904 abort ();
4905 case operand_size_mismatch:
4906 err_msg = _("operand size mismatch");
4907 break;
4908 case operand_type_mismatch:
4909 err_msg = _("operand type mismatch");
4910 break;
4911 case register_type_mismatch:
4912 err_msg = _("register type mismatch");
4913 break;
4914 case number_of_operands_mismatch:
4915 err_msg = _("number of operands mismatch");
4916 break;
4917 case invalid_instruction_suffix:
4918 err_msg = _("invalid instruction suffix");
4919 break;
4920 case bad_imm4:
4921 err_msg = _("constant doesn't fit in 4 bits");
4922 break;
4923 case old_gcc_only:
4924 err_msg = _("only supported with old gcc");
4925 break;
4926 case unsupported_with_intel_mnemonic:
4927 err_msg = _("unsupported with Intel mnemonic");
4928 break;
4929 case unsupported_syntax:
4930 err_msg = _("unsupported syntax");
4931 break;
4932 case unsupported:
4933 as_bad (_("unsupported instruction `%s'"),
4934 current_templates->start->name);
4935 return NULL;
4936 case invalid_vsib_address:
4937 err_msg = _("invalid VSIB address");
4938 break;
4939 case invalid_vector_register_set:
4940 err_msg = _("mask, index, and destination registers must be distinct");
4941 break;
4942 case unsupported_vector_index_register:
4943 err_msg = _("unsupported vector index register");
4944 break;
4945 case unsupported_broadcast:
4946 err_msg = _("unsupported broadcast");
4947 break;
4948 case broadcast_not_on_src_operand:
4949 err_msg = _("broadcast not on source memory operand");
4950 break;
4951 case broadcast_needed:
4952 err_msg = _("broadcast is needed for operand of such type");
4953 break;
4954 case unsupported_masking:
4955 err_msg = _("unsupported masking");
4956 break;
4957 case mask_not_on_destination:
4958 err_msg = _("mask not on destination operand");
4959 break;
4960 case no_default_mask:
4961 err_msg = _("default mask isn't allowed");
4962 break;
4963 case unsupported_rc_sae:
4964 err_msg = _("unsupported static rounding/sae");
4965 break;
4966 case rc_sae_operand_not_last_imm:
4967 if (intel_syntax)
4968 err_msg = _("RC/SAE operand must precede immediate operands");
4969 else
4970 err_msg = _("RC/SAE operand must follow immediate operands");
4971 break;
4972 case invalid_register_operand:
4973 err_msg = _("invalid register operand");
4974 break;
4975 }
4976 as_bad (_("%s for `%s'"), err_msg,
4977 current_templates->start->name);
4978 return NULL;
4979 }
4980
4981 if (!quiet_warnings)
4982 {
4983 if (!intel_syntax
4984 && (i.types[0].bitfield.jumpabsolute
4985 != operand_types[0].bitfield.jumpabsolute))
4986 {
4987 as_warn (_("indirect %s without `*'"), t->name);
4988 }
4989
4990 if (t->opcode_modifier.isprefix
4991 && t->opcode_modifier.ignoresize)
4992 {
4993 /* Warn them that a data or address size prefix doesn't
4994 affect assembly of the next line of code. */
4995 as_warn (_("stand-alone `%s' prefix"), t->name);
4996 }
4997 }
4998
4999 /* Copy the template we found. */
5000 i.tm = *t;
5001
5002 if (addr_prefix_disp != -1)
5003 i.tm.operand_types[addr_prefix_disp]
5004 = operand_types[addr_prefix_disp];
5005
5006 if (found_reverse_match)
5007 {
5008 /* If we found a reverse match we must alter the opcode
5009 direction bit. found_reverse_match holds bits to change
5010 (different for int & float insns). */
5011
5012 i.tm.base_opcode ^= found_reverse_match;
5013
5014 i.tm.operand_types[0] = operand_types[1];
5015 i.tm.operand_types[1] = operand_types[0];
5016 }
5017
5018 return t;
5019 }
5020
5021 static int
5022 check_string (void)
5023 {
5024 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
5025 if (i.tm.operand_types[mem_op].bitfield.esseg)
5026 {
5027 if (i.seg[0] != NULL && i.seg[0] != &es)
5028 {
5029 as_bad (_("`%s' operand %d must use `%ses' segment"),
5030 i.tm.name,
5031 mem_op + 1,
5032 register_prefix);
5033 return 0;
5034 }
5035 /* There's only ever one segment override allowed per instruction.
5036 This instruction possibly has a legal segment override on the
5037 second operand, so copy the segment to where non-string
5038 instructions store it, allowing common code. */
5039 i.seg[0] = i.seg[1];
5040 }
5041 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
5042 {
5043 if (i.seg[1] != NULL && i.seg[1] != &es)
5044 {
5045 as_bad (_("`%s' operand %d must use `%ses' segment"),
5046 i.tm.name,
5047 mem_op + 2,
5048 register_prefix);
5049 return 0;
5050 }
5051 }
5052 return 1;
5053 }
5054
5055 static int
5056 process_suffix (void)
5057 {
5058 /* If matched instruction specifies an explicit instruction mnemonic
5059 suffix, use it. */
5060 if (i.tm.opcode_modifier.size16)
5061 i.suffix = WORD_MNEM_SUFFIX;
5062 else if (i.tm.opcode_modifier.size32)
5063 i.suffix = LONG_MNEM_SUFFIX;
5064 else if (i.tm.opcode_modifier.size64)
5065 i.suffix = QWORD_MNEM_SUFFIX;
5066 else if (i.reg_operands)
5067 {
5068 /* If there's no instruction mnemonic suffix we try to invent one
5069 based on register operands. */
5070 if (!i.suffix)
5071 {
5072 /* We take i.suffix from the last register operand specified,
5073 Destination register type is more significant than source
5074 register type. crc32 in SSE4.2 prefers source register
5075 type. */
5076 if (i.tm.base_opcode == 0xf20f38f1)
5077 {
5078 if (i.types[0].bitfield.reg16)
5079 i.suffix = WORD_MNEM_SUFFIX;
5080 else if (i.types[0].bitfield.reg32)
5081 i.suffix = LONG_MNEM_SUFFIX;
5082 else if (i.types[0].bitfield.reg64)
5083 i.suffix = QWORD_MNEM_SUFFIX;
5084 }
5085 else if (i.tm.base_opcode == 0xf20f38f0)
5086 {
5087 if (i.types[0].bitfield.reg8)
5088 i.suffix = BYTE_MNEM_SUFFIX;
5089 }
5090
5091 if (!i.suffix)
5092 {
5093 int op;
5094
5095 if (i.tm.base_opcode == 0xf20f38f1
5096 || i.tm.base_opcode == 0xf20f38f0)
5097 {
5098 /* We have to know the operand size for crc32. */
5099 as_bad (_("ambiguous memory operand size for `%s`"),
5100 i.tm.name);
5101 return 0;
5102 }
5103
5104 for (op = i.operands; --op >= 0;)
5105 if (!i.tm.operand_types[op].bitfield.inoutportreg)
5106 {
5107 if (i.types[op].bitfield.reg8)
5108 {
5109 i.suffix = BYTE_MNEM_SUFFIX;
5110 break;
5111 }
5112 else if (i.types[op].bitfield.reg16)
5113 {
5114 i.suffix = WORD_MNEM_SUFFIX;
5115 break;
5116 }
5117 else if (i.types[op].bitfield.reg32)
5118 {
5119 i.suffix = LONG_MNEM_SUFFIX;
5120 break;
5121 }
5122 else if (i.types[op].bitfield.reg64)
5123 {
5124 i.suffix = QWORD_MNEM_SUFFIX;
5125 break;
5126 }
5127 }
5128 }
5129 }
5130 else if (i.suffix == BYTE_MNEM_SUFFIX)
5131 {
5132 if (intel_syntax
5133 && i.tm.opcode_modifier.ignoresize
5134 && i.tm.opcode_modifier.no_bsuf)
5135 i.suffix = 0;
5136 else if (!check_byte_reg ())
5137 return 0;
5138 }
5139 else if (i.suffix == LONG_MNEM_SUFFIX)
5140 {
5141 if (intel_syntax
5142 && i.tm.opcode_modifier.ignoresize
5143 && i.tm.opcode_modifier.no_lsuf)
5144 i.suffix = 0;
5145 else if (!check_long_reg ())
5146 return 0;
5147 }
5148 else if (i.suffix == QWORD_MNEM_SUFFIX)
5149 {
5150 if (intel_syntax
5151 && i.tm.opcode_modifier.ignoresize
5152 && i.tm.opcode_modifier.no_qsuf)
5153 i.suffix = 0;
5154 else if (!check_qword_reg ())
5155 return 0;
5156 }
5157 else if (i.suffix == WORD_MNEM_SUFFIX)
5158 {
5159 if (intel_syntax
5160 && i.tm.opcode_modifier.ignoresize
5161 && i.tm.opcode_modifier.no_wsuf)
5162 i.suffix = 0;
5163 else if (!check_word_reg ())
5164 return 0;
5165 }
5166 else if (i.suffix == XMMWORD_MNEM_SUFFIX
5167 || i.suffix == YMMWORD_MNEM_SUFFIX
5168 || i.suffix == ZMMWORD_MNEM_SUFFIX)
5169 {
5170 /* Skip if the instruction has x/y/z suffix. match_template
5171 should check if it is a valid suffix. */
5172 }
5173 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
5174 /* Do nothing if the instruction is going to ignore the prefix. */
5175 ;
5176 else
5177 abort ();
5178 }
5179 else if (i.tm.opcode_modifier.defaultsize
5180 && !i.suffix
5181 /* exclude fldenv/frstor/fsave/fstenv */
5182 && i.tm.opcode_modifier.no_ssuf)
5183 {
5184 i.suffix = stackop_size;
5185 }
5186 else if (intel_syntax
5187 && !i.suffix
5188 && (i.tm.operand_types[0].bitfield.jumpabsolute
5189 || i.tm.opcode_modifier.jumpbyte
5190 || i.tm.opcode_modifier.jumpintersegment
5191 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
5192 && i.tm.extension_opcode <= 3)))
5193 {
5194 switch (flag_code)
5195 {
5196 case CODE_64BIT:
5197 if (!i.tm.opcode_modifier.no_qsuf)
5198 {
5199 i.suffix = QWORD_MNEM_SUFFIX;
5200 break;
5201 }
5202 case CODE_32BIT:
5203 if (!i.tm.opcode_modifier.no_lsuf)
5204 i.suffix = LONG_MNEM_SUFFIX;
5205 break;
5206 case CODE_16BIT:
5207 if (!i.tm.opcode_modifier.no_wsuf)
5208 i.suffix = WORD_MNEM_SUFFIX;
5209 break;
5210 }
5211 }
5212
5213 if (!i.suffix)
5214 {
5215 if (!intel_syntax)
5216 {
5217 if (i.tm.opcode_modifier.w)
5218 {
5219 as_bad (_("no instruction mnemonic suffix given and "
5220 "no register operands; can't size instruction"));
5221 return 0;
5222 }
5223 }
5224 else
5225 {
5226 unsigned int suffixes;
5227
5228 suffixes = !i.tm.opcode_modifier.no_bsuf;
5229 if (!i.tm.opcode_modifier.no_wsuf)
5230 suffixes |= 1 << 1;
5231 if (!i.tm.opcode_modifier.no_lsuf)
5232 suffixes |= 1 << 2;
5233 if (!i.tm.opcode_modifier.no_ldsuf)
5234 suffixes |= 1 << 3;
5235 if (!i.tm.opcode_modifier.no_ssuf)
5236 suffixes |= 1 << 4;
5237 if (!i.tm.opcode_modifier.no_qsuf)
5238 suffixes |= 1 << 5;
5239
5240 /* There are more than suffix matches. */
5241 if (i.tm.opcode_modifier.w
5242 || ((suffixes & (suffixes - 1))
5243 && !i.tm.opcode_modifier.defaultsize
5244 && !i.tm.opcode_modifier.ignoresize))
5245 {
5246 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
5247 return 0;
5248 }
5249 }
5250 }
5251
5252 /* Change the opcode based on the operand size given by i.suffix;
5253 We don't need to change things for byte insns. */
5254
5255 if (i.suffix
5256 && i.suffix != BYTE_MNEM_SUFFIX
5257 && i.suffix != XMMWORD_MNEM_SUFFIX
5258 && i.suffix != YMMWORD_MNEM_SUFFIX
5259 && i.suffix != ZMMWORD_MNEM_SUFFIX)
5260 {
5261 /* It's not a byte, select word/dword operation. */
5262 if (i.tm.opcode_modifier.w)
5263 {
5264 if (i.tm.opcode_modifier.shortform)
5265 i.tm.base_opcode |= 8;
5266 else
5267 i.tm.base_opcode |= 1;
5268 }
5269
5270 /* Now select between word & dword operations via the operand
5271 size prefix, except for instructions that will ignore this
5272 prefix anyway. */
5273 if (i.tm.opcode_modifier.addrprefixop0)
5274 {
5275 /* The address size override prefix changes the size of the
5276 first operand. */
5277 if ((flag_code == CODE_32BIT
5278 && i.op->regs[0].reg_type.bitfield.reg16)
5279 || (flag_code != CODE_32BIT
5280 && i.op->regs[0].reg_type.bitfield.reg32))
5281 if (!add_prefix (ADDR_PREFIX_OPCODE))
5282 return 0;
5283 }
5284 else if (i.suffix != QWORD_MNEM_SUFFIX
5285 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
5286 && !i.tm.opcode_modifier.ignoresize
5287 && !i.tm.opcode_modifier.floatmf
5288 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
5289 || (flag_code == CODE_64BIT
5290 && i.tm.opcode_modifier.jumpbyte)))
5291 {
5292 unsigned int prefix = DATA_PREFIX_OPCODE;
5293
5294 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
5295 prefix = ADDR_PREFIX_OPCODE;
5296
5297 if (!add_prefix (prefix))
5298 return 0;
5299 }
5300
5301 /* Set mode64 for an operand. */
5302 if (i.suffix == QWORD_MNEM_SUFFIX
5303 && flag_code == CODE_64BIT
5304 && !i.tm.opcode_modifier.norex64)
5305 {
5306 /* Special case for xchg %rax,%rax. It is NOP and doesn't
5307 need rex64. cmpxchg8b is also a special case. */
5308 if (! (i.operands == 2
5309 && i.tm.base_opcode == 0x90
5310 && i.tm.extension_opcode == None
5311 && operand_type_equal (&i.types [0], &acc64)
5312 && operand_type_equal (&i.types [1], &acc64))
5313 && ! (i.operands == 1
5314 && i.tm.base_opcode == 0xfc7
5315 && i.tm.extension_opcode == 1
5316 && !operand_type_check (i.types [0], reg)
5317 && operand_type_check (i.types [0], anymem)))
5318 i.rex |= REX_W;
5319 }
5320
5321 /* Size floating point instruction. */
5322 if (i.suffix == LONG_MNEM_SUFFIX)
5323 if (i.tm.opcode_modifier.floatmf)
5324 i.tm.base_opcode ^= 4;
5325 }
5326
5327 return 1;
5328 }
5329
5330 static int
5331 check_byte_reg (void)
5332 {
5333 int op;
5334
5335 for (op = i.operands; --op >= 0;)
5336 {
5337 /* If this is an eight bit register, it's OK. If it's the 16 or
5338 32 bit version of an eight bit register, we will just use the
5339 low portion, and that's OK too. */
5340 if (i.types[op].bitfield.reg8)
5341 continue;
5342
5343 /* I/O port address operands are OK too. */
5344 if (i.tm.operand_types[op].bitfield.inoutportreg)
5345 continue;
5346
5347 /* crc32 doesn't generate this warning. */
5348 if (i.tm.base_opcode == 0xf20f38f0)
5349 continue;
5350
5351 if ((i.types[op].bitfield.reg16
5352 || i.types[op].bitfield.reg32
5353 || i.types[op].bitfield.reg64)
5354 && i.op[op].regs->reg_num < 4
5355 /* Prohibit these changes in 64bit mode, since the lowering
5356 would be more complicated. */
5357 && flag_code != CODE_64BIT)
5358 {
5359 #if REGISTER_WARNINGS
5360 if (!quiet_warnings)
5361 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5362 register_prefix,
5363 (i.op[op].regs + (i.types[op].bitfield.reg16
5364 ? REGNAM_AL - REGNAM_AX
5365 : REGNAM_AL - REGNAM_EAX))->reg_name,
5366 register_prefix,
5367 i.op[op].regs->reg_name,
5368 i.suffix);
5369 #endif
5370 continue;
5371 }
5372 /* Any other register is bad. */
5373 if (i.types[op].bitfield.reg16
5374 || i.types[op].bitfield.reg32
5375 || i.types[op].bitfield.reg64
5376 || i.types[op].bitfield.regmmx
5377 || i.types[op].bitfield.regxmm
5378 || i.types[op].bitfield.regymm
5379 || i.types[op].bitfield.regzmm
5380 || i.types[op].bitfield.sreg2
5381 || i.types[op].bitfield.sreg3
5382 || i.types[op].bitfield.control
5383 || i.types[op].bitfield.debug
5384 || i.types[op].bitfield.test
5385 || i.types[op].bitfield.floatreg
5386 || i.types[op].bitfield.floatacc)
5387 {
5388 as_bad (_("`%s%s' not allowed with `%s%c'"),
5389 register_prefix,
5390 i.op[op].regs->reg_name,
5391 i.tm.name,
5392 i.suffix);
5393 return 0;
5394 }
5395 }
5396 return 1;
5397 }
5398
5399 static int
5400 check_long_reg (void)
5401 {
5402 int op;
5403
5404 for (op = i.operands; --op >= 0;)
5405 /* Reject eight bit registers, except where the template requires
5406 them. (eg. movzb) */
5407 if (i.types[op].bitfield.reg8
5408 && (i.tm.operand_types[op].bitfield.reg16
5409 || i.tm.operand_types[op].bitfield.reg32
5410 || i.tm.operand_types[op].bitfield.acc))
5411 {
5412 as_bad (_("`%s%s' not allowed with `%s%c'"),
5413 register_prefix,
5414 i.op[op].regs->reg_name,
5415 i.tm.name,
5416 i.suffix);
5417 return 0;
5418 }
5419 /* Warn if the e prefix on a general reg is missing. */
5420 else if ((!quiet_warnings || flag_code == CODE_64BIT)
5421 && i.types[op].bitfield.reg16
5422 && (i.tm.operand_types[op].bitfield.reg32
5423 || i.tm.operand_types[op].bitfield.acc))
5424 {
5425 /* Prohibit these changes in the 64bit mode, since the
5426 lowering is more complicated. */
5427 if (flag_code == CODE_64BIT)
5428 {
5429 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5430 register_prefix, i.op[op].regs->reg_name,
5431 i.suffix);
5432 return 0;
5433 }
5434 #if REGISTER_WARNINGS
5435 else
5436 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5437 register_prefix,
5438 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
5439 register_prefix,
5440 i.op[op].regs->reg_name,
5441 i.suffix);
5442 #endif
5443 }
5444 /* Warn if the r prefix on a general reg is missing. */
5445 else if (i.types[op].bitfield.reg64
5446 && (i.tm.operand_types[op].bitfield.reg32
5447 || i.tm.operand_types[op].bitfield.acc))
5448 {
5449 if (intel_syntax
5450 && i.tm.opcode_modifier.toqword
5451 && !i.types[0].bitfield.regxmm)
5452 {
5453 /* Convert to QWORD. We want REX byte. */
5454 i.suffix = QWORD_MNEM_SUFFIX;
5455 }
5456 else
5457 {
5458 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5459 register_prefix, i.op[op].regs->reg_name,
5460 i.suffix);
5461 return 0;
5462 }
5463 }
5464 return 1;
5465 }
5466
5467 static int
5468 check_qword_reg (void)
5469 {
5470 int op;
5471
5472 for (op = i.operands; --op >= 0; )
5473 /* Reject eight bit registers, except where the template requires
5474 them. (eg. movzb) */
5475 if (i.types[op].bitfield.reg8
5476 && (i.tm.operand_types[op].bitfield.reg16
5477 || i.tm.operand_types[op].bitfield.reg32
5478 || i.tm.operand_types[op].bitfield.acc))
5479 {
5480 as_bad (_("`%s%s' not allowed with `%s%c'"),
5481 register_prefix,
5482 i.op[op].regs->reg_name,
5483 i.tm.name,
5484 i.suffix);
5485 return 0;
5486 }
5487 /* Warn if the e prefix on a general reg is missing. */
5488 else if ((i.types[op].bitfield.reg16
5489 || i.types[op].bitfield.reg32)
5490 && (i.tm.operand_types[op].bitfield.reg32
5491 || i.tm.operand_types[op].bitfield.acc))
5492 {
5493 /* Prohibit these changes in the 64bit mode, since the
5494 lowering is more complicated. */
5495 if (intel_syntax
5496 && i.tm.opcode_modifier.todword
5497 && !i.types[0].bitfield.regxmm)
5498 {
5499 /* Convert to DWORD. We don't want REX byte. */
5500 i.suffix = LONG_MNEM_SUFFIX;
5501 }
5502 else
5503 {
5504 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5505 register_prefix, i.op[op].regs->reg_name,
5506 i.suffix);
5507 return 0;
5508 }
5509 }
5510 return 1;
5511 }
5512
5513 static int
5514 check_word_reg (void)
5515 {
5516 int op;
5517 for (op = i.operands; --op >= 0;)
5518 /* Reject eight bit registers, except where the template requires
5519 them. (eg. movzb) */
5520 if (i.types[op].bitfield.reg8
5521 && (i.tm.operand_types[op].bitfield.reg16
5522 || i.tm.operand_types[op].bitfield.reg32
5523 || i.tm.operand_types[op].bitfield.acc))
5524 {
5525 as_bad (_("`%s%s' not allowed with `%s%c'"),
5526 register_prefix,
5527 i.op[op].regs->reg_name,
5528 i.tm.name,
5529 i.suffix);
5530 return 0;
5531 }
5532 /* Warn if the e prefix on a general reg is present. */
5533 else if ((!quiet_warnings || flag_code == CODE_64BIT)
5534 && i.types[op].bitfield.reg32
5535 && (i.tm.operand_types[op].bitfield.reg16
5536 || i.tm.operand_types[op].bitfield.acc))
5537 {
5538 /* Prohibit these changes in the 64bit mode, since the
5539 lowering is more complicated. */
5540 if (flag_code == CODE_64BIT)
5541 {
5542 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5543 register_prefix, i.op[op].regs->reg_name,
5544 i.suffix);
5545 return 0;
5546 }
5547 else
5548 #if REGISTER_WARNINGS
5549 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5550 register_prefix,
5551 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
5552 register_prefix,
5553 i.op[op].regs->reg_name,
5554 i.suffix);
5555 #endif
5556 }
5557 return 1;
5558 }
5559
5560 static int
5561 update_imm (unsigned int j)
5562 {
5563 i386_operand_type overlap = i.types[j];
5564 if ((overlap.bitfield.imm8
5565 || overlap.bitfield.imm8s
5566 || overlap.bitfield.imm16
5567 || overlap.bitfield.imm32
5568 || overlap.bitfield.imm32s
5569 || overlap.bitfield.imm64)
5570 && !operand_type_equal (&overlap, &imm8)
5571 && !operand_type_equal (&overlap, &imm8s)
5572 && !operand_type_equal (&overlap, &imm16)
5573 && !operand_type_equal (&overlap, &imm32)
5574 && !operand_type_equal (&overlap, &imm32s)
5575 && !operand_type_equal (&overlap, &imm64))
5576 {
5577 if (i.suffix)
5578 {
5579 i386_operand_type temp;
5580
5581 operand_type_set (&temp, 0);
5582 if (i.suffix == BYTE_MNEM_SUFFIX)
5583 {
5584 temp.bitfield.imm8 = overlap.bitfield.imm8;
5585 temp.bitfield.imm8s = overlap.bitfield.imm8s;
5586 }
5587 else if (i.suffix == WORD_MNEM_SUFFIX)
5588 temp.bitfield.imm16 = overlap.bitfield.imm16;
5589 else if (i.suffix == QWORD_MNEM_SUFFIX)
5590 {
5591 temp.bitfield.imm64 = overlap.bitfield.imm64;
5592 temp.bitfield.imm32s = overlap.bitfield.imm32s;
5593 }
5594 else
5595 temp.bitfield.imm32 = overlap.bitfield.imm32;
5596 overlap = temp;
5597 }
5598 else if (operand_type_equal (&overlap, &imm16_32_32s)
5599 || operand_type_equal (&overlap, &imm16_32)
5600 || operand_type_equal (&overlap, &imm16_32s))
5601 {
5602 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
5603 overlap = imm16;
5604 else
5605 overlap = imm32s;
5606 }
5607 if (!operand_type_equal (&overlap, &imm8)
5608 && !operand_type_equal (&overlap, &imm8s)
5609 && !operand_type_equal (&overlap, &imm16)
5610 && !operand_type_equal (&overlap, &imm32)
5611 && !operand_type_equal (&overlap, &imm32s)
5612 && !operand_type_equal (&overlap, &imm64))
5613 {
5614 as_bad (_("no instruction mnemonic suffix given; "
5615 "can't determine immediate size"));
5616 return 0;
5617 }
5618 }
5619 i.types[j] = overlap;
5620
5621 return 1;
5622 }
5623
5624 static int
5625 finalize_imm (void)
5626 {
5627 unsigned int j, n;
5628
5629 /* Update the first 2 immediate operands. */
5630 n = i.operands > 2 ? 2 : i.operands;
5631 if (n)
5632 {
5633 for (j = 0; j < n; j++)
5634 if (update_imm (j) == 0)
5635 return 0;
5636
5637 /* The 3rd operand can't be immediate operand. */
5638 gas_assert (operand_type_check (i.types[2], imm) == 0);
5639 }
5640
5641 return 1;
5642 }
5643
5644 static int
5645 bad_implicit_operand (int xmm)
5646 {
5647 const char *ireg = xmm ? "xmm0" : "ymm0";
5648
5649 if (intel_syntax)
5650 as_bad (_("the last operand of `%s' must be `%s%s'"),
5651 i.tm.name, register_prefix, ireg);
5652 else
5653 as_bad (_("the first operand of `%s' must be `%s%s'"),
5654 i.tm.name, register_prefix, ireg);
5655 return 0;
5656 }
5657
5658 static int
5659 process_operands (void)
5660 {
5661 /* Default segment register this instruction will use for memory
5662 accesses. 0 means unknown. This is only for optimizing out
5663 unnecessary segment overrides. */
5664 const seg_entry *default_seg = 0;
5665
5666 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
5667 {
5668 unsigned int dupl = i.operands;
5669 unsigned int dest = dupl - 1;
5670 unsigned int j;
5671
5672 /* The destination must be an xmm register. */
5673 gas_assert (i.reg_operands
5674 && MAX_OPERANDS > dupl
5675 && operand_type_equal (&i.types[dest], &regxmm));
5676
5677 if (i.tm.opcode_modifier.firstxmm0)
5678 {
5679 /* The first operand is implicit and must be xmm0. */
5680 gas_assert (operand_type_equal (&i.types[0], &regxmm));
5681 if (register_number (i.op[0].regs) != 0)
5682 return bad_implicit_operand (1);
5683
5684 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
5685 {
5686 /* Keep xmm0 for instructions with VEX prefix and 3
5687 sources. */
5688 goto duplicate;
5689 }
5690 else
5691 {
5692 /* We remove the first xmm0 and keep the number of
5693 operands unchanged, which in fact duplicates the
5694 destination. */
5695 for (j = 1; j < i.operands; j++)
5696 {
5697 i.op[j - 1] = i.op[j];
5698 i.types[j - 1] = i.types[j];
5699 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
5700 }
5701 }
5702 }
5703 else if (i.tm.opcode_modifier.implicit1stxmm0)
5704 {
5705 gas_assert ((MAX_OPERANDS - 1) > dupl
5706 && (i.tm.opcode_modifier.vexsources
5707 == VEX3SOURCES));
5708
5709 /* Add the implicit xmm0 for instructions with VEX prefix
5710 and 3 sources. */
5711 for (j = i.operands; j > 0; j--)
5712 {
5713 i.op[j] = i.op[j - 1];
5714 i.types[j] = i.types[j - 1];
5715 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
5716 }
5717 i.op[0].regs
5718 = (const reg_entry *) hash_find (reg_hash, "xmm0");
5719 i.types[0] = regxmm;
5720 i.tm.operand_types[0] = regxmm;
5721
5722 i.operands += 2;
5723 i.reg_operands += 2;
5724 i.tm.operands += 2;
5725
5726 dupl++;
5727 dest++;
5728 i.op[dupl] = i.op[dest];
5729 i.types[dupl] = i.types[dest];
5730 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5731 }
5732 else
5733 {
5734 duplicate:
5735 i.operands++;
5736 i.reg_operands++;
5737 i.tm.operands++;
5738
5739 i.op[dupl] = i.op[dest];
5740 i.types[dupl] = i.types[dest];
5741 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5742 }
5743
5744 if (i.tm.opcode_modifier.immext)
5745 process_immext ();
5746 }
5747 else if (i.tm.opcode_modifier.firstxmm0)
5748 {
5749 unsigned int j;
5750
5751 /* The first operand is implicit and must be xmm0/ymm0/zmm0. */
5752 gas_assert (i.reg_operands
5753 && (operand_type_equal (&i.types[0], &regxmm)
5754 || operand_type_equal (&i.types[0], &regymm)
5755 || operand_type_equal (&i.types[0], &regzmm)));
5756 if (register_number (i.op[0].regs) != 0)
5757 return bad_implicit_operand (i.types[0].bitfield.regxmm);
5758
5759 for (j = 1; j < i.operands; j++)
5760 {
5761 i.op[j - 1] = i.op[j];
5762 i.types[j - 1] = i.types[j];
5763
5764 /* We need to adjust fields in i.tm since they are used by
5765 build_modrm_byte. */
5766 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
5767 }
5768
5769 i.operands--;
5770 i.reg_operands--;
5771 i.tm.operands--;
5772 }
5773 else if (i.tm.opcode_modifier.regkludge)
5774 {
5775 /* The imul $imm, %reg instruction is converted into
5776 imul $imm, %reg, %reg, and the clr %reg instruction
5777 is converted into xor %reg, %reg. */
5778
5779 unsigned int first_reg_op;
5780
5781 if (operand_type_check (i.types[0], reg))
5782 first_reg_op = 0;
5783 else
5784 first_reg_op = 1;
5785 /* Pretend we saw the extra register operand. */
5786 gas_assert (i.reg_operands == 1
5787 && i.op[first_reg_op + 1].regs == 0);
5788 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
5789 i.types[first_reg_op + 1] = i.types[first_reg_op];
5790 i.operands++;
5791 i.reg_operands++;
5792 }
5793
5794 if (i.tm.opcode_modifier.shortform)
5795 {
5796 if (i.types[0].bitfield.sreg2
5797 || i.types[0].bitfield.sreg3)
5798 {
5799 if (i.tm.base_opcode == POP_SEG_SHORT
5800 && i.op[0].regs->reg_num == 1)
5801 {
5802 as_bad (_("you can't `pop %scs'"), register_prefix);
5803 return 0;
5804 }
5805 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
5806 if ((i.op[0].regs->reg_flags & RegRex) != 0)
5807 i.rex |= REX_B;
5808 }
5809 else
5810 {
5811 /* The register or float register operand is in operand
5812 0 or 1. */
5813 unsigned int op;
5814
5815 if (i.types[0].bitfield.floatreg
5816 || operand_type_check (i.types[0], reg))
5817 op = 0;
5818 else
5819 op = 1;
5820 /* Register goes in low 3 bits of opcode. */
5821 i.tm.base_opcode |= i.op[op].regs->reg_num;
5822 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5823 i.rex |= REX_B;
5824 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
5825 {
5826 /* Warn about some common errors, but press on regardless.
5827 The first case can be generated by gcc (<= 2.8.1). */
5828 if (i.operands == 2)
5829 {
5830 /* Reversed arguments on faddp, fsubp, etc. */
5831 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
5832 register_prefix, i.op[!intel_syntax].regs->reg_name,
5833 register_prefix, i.op[intel_syntax].regs->reg_name);
5834 }
5835 else
5836 {
5837 /* Extraneous `l' suffix on fp insn. */
5838 as_warn (_("translating to `%s %s%s'"), i.tm.name,
5839 register_prefix, i.op[0].regs->reg_name);
5840 }
5841 }
5842 }
5843 }
5844 else if (i.tm.opcode_modifier.modrm)
5845 {
5846 /* The opcode is completed (modulo i.tm.extension_opcode which
5847 must be put into the modrm byte). Now, we make the modrm and
5848 index base bytes based on all the info we've collected. */
5849
5850 default_seg = build_modrm_byte ();
5851 }
5852 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
5853 {
5854 default_seg = &ds;
5855 }
5856 else if (i.tm.opcode_modifier.isstring)
5857 {
5858 /* For the string instructions that allow a segment override
5859 on one of their operands, the default segment is ds. */
5860 default_seg = &ds;
5861 }
5862
5863 if (i.tm.base_opcode == 0x8d /* lea */
5864 && i.seg[0]
5865 && !quiet_warnings)
5866 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
5867
5868 /* If a segment was explicitly specified, and the specified segment
5869 is not the default, use an opcode prefix to select it. If we
5870 never figured out what the default segment is, then default_seg
5871 will be zero at this point, and the specified segment prefix will
5872 always be used. */
5873 if ((i.seg[0]) && (i.seg[0] != default_seg))
5874 {
5875 if (!add_prefix (i.seg[0]->seg_prefix))
5876 return 0;
5877 }
5878 return 1;
5879 }
5880
5881 static const seg_entry *
5882 build_modrm_byte (void)
5883 {
5884 const seg_entry *default_seg = 0;
5885 unsigned int source, dest;
5886 int vex_3_sources;
5887
5888 /* The first operand of instructions with VEX prefix and 3 sources
5889 must be VEX_Imm4. */
5890 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
5891 if (vex_3_sources)
5892 {
5893 unsigned int nds, reg_slot;
5894 expressionS *exp;
5895
5896 if (i.tm.opcode_modifier.veximmext
5897 && i.tm.opcode_modifier.immext)
5898 {
5899 dest = i.operands - 2;
5900 gas_assert (dest == 3);
5901 }
5902 else
5903 dest = i.operands - 1;
5904 nds = dest - 1;
5905
5906 /* There are 2 kinds of instructions:
5907 1. 5 operands: 4 register operands or 3 register operands
5908 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
5909 VexW0 or VexW1. The destination must be either XMM, YMM or
5910 ZMM register.
5911 2. 4 operands: 4 register operands or 3 register operands
5912 plus 1 memory operand, VexXDS, and VexImmExt */
5913 gas_assert ((i.reg_operands == 4
5914 || (i.reg_operands == 3 && i.mem_operands == 1))
5915 && i.tm.opcode_modifier.vexvvvv == VEXXDS
5916 && (i.tm.opcode_modifier.veximmext
5917 || (i.imm_operands == 1
5918 && i.types[0].bitfield.vec_imm4
5919 && (i.tm.opcode_modifier.vexw == VEXW0
5920 || i.tm.opcode_modifier.vexw == VEXW1)
5921 && (operand_type_equal (&i.tm.operand_types[dest], &regxmm)
5922 || operand_type_equal (&i.tm.operand_types[dest], &regymm)
5923 || operand_type_equal (&i.tm.operand_types[dest], &regzmm)))));
5924
5925 if (i.imm_operands == 0)
5926 {
5927 /* When there is no immediate operand, generate an 8bit
5928 immediate operand to encode the first operand. */
5929 exp = &im_expressions[i.imm_operands++];
5930 i.op[i.operands].imms = exp;
5931 i.types[i.operands] = imm8;
5932 i.operands++;
5933 /* If VexW1 is set, the first operand is the source and
5934 the second operand is encoded in the immediate operand. */
5935 if (i.tm.opcode_modifier.vexw == VEXW1)
5936 {
5937 source = 0;
5938 reg_slot = 1;
5939 }
5940 else
5941 {
5942 source = 1;
5943 reg_slot = 0;
5944 }
5945
5946 /* FMA swaps REG and NDS. */
5947 if (i.tm.cpu_flags.bitfield.cpufma)
5948 {
5949 unsigned int tmp;
5950 tmp = reg_slot;
5951 reg_slot = nds;
5952 nds = tmp;
5953 }
5954
5955 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5956 &regxmm)
5957 || operand_type_equal (&i.tm.operand_types[reg_slot],
5958 &regymm)
5959 || operand_type_equal (&i.tm.operand_types[reg_slot],
5960 &regzmm));
5961 exp->X_op = O_constant;
5962 exp->X_add_number = register_number (i.op[reg_slot].regs) << 4;
5963 gas_assert ((i.op[reg_slot].regs->reg_flags & RegVRex) == 0);
5964 }
5965 else
5966 {
5967 unsigned int imm_slot;
5968
5969 if (i.tm.opcode_modifier.vexw == VEXW0)
5970 {
5971 /* If VexW0 is set, the third operand is the source and
5972 the second operand is encoded in the immediate
5973 operand. */
5974 source = 2;
5975 reg_slot = 1;
5976 }
5977 else
5978 {
5979 /* VexW1 is set, the second operand is the source and
5980 the third operand is encoded in the immediate
5981 operand. */
5982 source = 1;
5983 reg_slot = 2;
5984 }
5985
5986 if (i.tm.opcode_modifier.immext)
5987 {
5988 /* When ImmExt is set, the immdiate byte is the last
5989 operand. */
5990 imm_slot = i.operands - 1;
5991 source--;
5992 reg_slot--;
5993 }
5994 else
5995 {
5996 imm_slot = 0;
5997
5998 /* Turn on Imm8 so that output_imm will generate it. */
5999 i.types[imm_slot].bitfield.imm8 = 1;
6000 }
6001
6002 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
6003 &regxmm)
6004 || operand_type_equal (&i.tm.operand_types[reg_slot],
6005 &regymm)
6006 || operand_type_equal (&i.tm.operand_types[reg_slot],
6007 &regzmm));
6008 i.op[imm_slot].imms->X_add_number
6009 |= register_number (i.op[reg_slot].regs) << 4;
6010 gas_assert ((i.op[reg_slot].regs->reg_flags & RegVRex) == 0);
6011 }
6012
6013 gas_assert (operand_type_equal (&i.tm.operand_types[nds], &regxmm)
6014 || operand_type_equal (&i.tm.operand_types[nds],
6015 &regymm)
6016 || operand_type_equal (&i.tm.operand_types[nds],
6017 &regzmm));
6018 i.vex.register_specifier = i.op[nds].regs;
6019 }
6020 else
6021 source = dest = 0;
6022
6023 /* i.reg_operands MUST be the number of real register operands;
6024 implicit registers do not count. If there are 3 register
6025 operands, it must be a instruction with VexNDS. For a
6026 instruction with VexNDD, the destination register is encoded
6027 in VEX prefix. If there are 4 register operands, it must be
6028 a instruction with VEX prefix and 3 sources. */
6029 if (i.mem_operands == 0
6030 && ((i.reg_operands == 2
6031 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
6032 || (i.reg_operands == 3
6033 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
6034 || (i.reg_operands == 4 && vex_3_sources)))
6035 {
6036 switch (i.operands)
6037 {
6038 case 2:
6039 source = 0;
6040 break;
6041 case 3:
6042 /* When there are 3 operands, one of them may be immediate,
6043 which may be the first or the last operand. Otherwise,
6044 the first operand must be shift count register (cl) or it
6045 is an instruction with VexNDS. */
6046 gas_assert (i.imm_operands == 1
6047 || (i.imm_operands == 0
6048 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
6049 || i.types[0].bitfield.shiftcount)));
6050 if (operand_type_check (i.types[0], imm)
6051 || i.types[0].bitfield.shiftcount)
6052 source = 1;
6053 else
6054 source = 0;
6055 break;
6056 case 4:
6057 /* When there are 4 operands, the first two must be 8bit
6058 immediate operands. The source operand will be the 3rd
6059 one.
6060
6061 For instructions with VexNDS, if the first operand
6062 an imm8, the source operand is the 2nd one. If the last
6063 operand is imm8, the source operand is the first one. */
6064 gas_assert ((i.imm_operands == 2
6065 && i.types[0].bitfield.imm8
6066 && i.types[1].bitfield.imm8)
6067 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
6068 && i.imm_operands == 1
6069 && (i.types[0].bitfield.imm8
6070 || i.types[i.operands - 1].bitfield.imm8
6071 || i.rounding)));
6072 if (i.imm_operands == 2)
6073 source = 2;
6074 else
6075 {
6076 if (i.types[0].bitfield.imm8)
6077 source = 1;
6078 else
6079 source = 0;
6080 }
6081 break;
6082 case 5:
6083 if (i.tm.opcode_modifier.evex)
6084 {
6085 /* For EVEX instructions, when there are 5 operands, the
6086 first one must be immediate operand. If the second one
6087 is immediate operand, the source operand is the 3th
6088 one. If the last one is immediate operand, the source
6089 operand is the 2nd one. */
6090 gas_assert (i.imm_operands == 2
6091 && i.tm.opcode_modifier.sae
6092 && operand_type_check (i.types[0], imm));
6093 if (operand_type_check (i.types[1], imm))
6094 source = 2;
6095 else if (operand_type_check (i.types[4], imm))
6096 source = 1;
6097 else
6098 abort ();
6099 }
6100 break;
6101 default:
6102 abort ();
6103 }
6104
6105 if (!vex_3_sources)
6106 {
6107 dest = source + 1;
6108
6109 /* RC/SAE operand could be between DEST and SRC. That happens
6110 when one operand is GPR and the other one is XMM/YMM/ZMM
6111 register. */
6112 if (i.rounding && i.rounding->operand == (int) dest)
6113 dest++;
6114
6115 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
6116 {
6117 /* For instructions with VexNDS, the register-only source
6118 operand must be 32/64bit integer, XMM, YMM or ZMM
6119 register. It is encoded in VEX prefix. We need to
6120 clear RegMem bit before calling operand_type_equal. */
6121
6122 i386_operand_type op;
6123 unsigned int vvvv;
6124
6125 /* Check register-only source operand when two source
6126 operands are swapped. */
6127 if (!i.tm.operand_types[source].bitfield.baseindex
6128 && i.tm.operand_types[dest].bitfield.baseindex)
6129 {
6130 vvvv = source;
6131 source = dest;
6132 }
6133 else
6134 vvvv = dest;
6135
6136 op = i.tm.operand_types[vvvv];
6137 op.bitfield.regmem = 0;
6138 if ((dest + 1) >= i.operands
6139 || (op.bitfield.reg32 != 1
6140 && !op.bitfield.reg64 != 1
6141 && !operand_type_equal (&op, &regxmm)
6142 && !operand_type_equal (&op, &regymm)
6143 && !operand_type_equal (&op, &regzmm)
6144 && !operand_type_equal (&op, &regmask)))
6145 abort ();
6146 i.vex.register_specifier = i.op[vvvv].regs;
6147 dest++;
6148 }
6149 }
6150
6151 i.rm.mode = 3;
6152 /* One of the register operands will be encoded in the i.tm.reg
6153 field, the other in the combined i.tm.mode and i.tm.regmem
6154 fields. If no form of this instruction supports a memory
6155 destination operand, then we assume the source operand may
6156 sometimes be a memory operand and so we need to store the
6157 destination in the i.rm.reg field. */
6158 if (!i.tm.operand_types[dest].bitfield.regmem
6159 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
6160 {
6161 i.rm.reg = i.op[dest].regs->reg_num;
6162 i.rm.regmem = i.op[source].regs->reg_num;
6163 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
6164 i.rex |= REX_R;
6165 if ((i.op[dest].regs->reg_flags & RegVRex) != 0)
6166 i.vrex |= REX_R;
6167 if ((i.op[source].regs->reg_flags & RegRex) != 0)
6168 i.rex |= REX_B;
6169 if ((i.op[source].regs->reg_flags & RegVRex) != 0)
6170 i.vrex |= REX_B;
6171 }
6172 else
6173 {
6174 i.rm.reg = i.op[source].regs->reg_num;
6175 i.rm.regmem = i.op[dest].regs->reg_num;
6176 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
6177 i.rex |= REX_B;
6178 if ((i.op[dest].regs->reg_flags & RegVRex) != 0)
6179 i.vrex |= REX_B;
6180 if ((i.op[source].regs->reg_flags & RegRex) != 0)
6181 i.rex |= REX_R;
6182 if ((i.op[source].regs->reg_flags & RegVRex) != 0)
6183 i.vrex |= REX_R;
6184 }
6185 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
6186 {
6187 if (!i.types[0].bitfield.control
6188 && !i.types[1].bitfield.control)
6189 abort ();
6190 i.rex &= ~(REX_R | REX_B);
6191 add_prefix (LOCK_PREFIX_OPCODE);
6192 }
6193 }
6194 else
6195 { /* If it's not 2 reg operands... */
6196 unsigned int mem;
6197
6198 if (i.mem_operands)
6199 {
6200 unsigned int fake_zero_displacement = 0;
6201 unsigned int op;
6202
6203 for (op = 0; op < i.operands; op++)
6204 if (operand_type_check (i.types[op], anymem))
6205 break;
6206 gas_assert (op < i.operands);
6207
6208 if (i.tm.opcode_modifier.vecsib)
6209 {
6210 if (i.index_reg->reg_num == RegEiz
6211 || i.index_reg->reg_num == RegRiz)
6212 abort ();
6213
6214 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
6215 if (!i.base_reg)
6216 {
6217 i.sib.base = NO_BASE_REGISTER;
6218 i.sib.scale = i.log2_scale_factor;
6219 /* No Vec_Disp8 if there is no base. */
6220 i.types[op].bitfield.vec_disp8 = 0;
6221 i.types[op].bitfield.disp8 = 0;
6222 i.types[op].bitfield.disp16 = 0;
6223 i.types[op].bitfield.disp64 = 0;
6224 if (flag_code != CODE_64BIT)
6225 {
6226 /* Must be 32 bit */
6227 i.types[op].bitfield.disp32 = 1;
6228 i.types[op].bitfield.disp32s = 0;
6229 }
6230 else
6231 {
6232 i.types[op].bitfield.disp32 = 0;
6233 i.types[op].bitfield.disp32s = 1;
6234 }
6235 }
6236 i.sib.index = i.index_reg->reg_num;
6237 if ((i.index_reg->reg_flags & RegRex) != 0)
6238 i.rex |= REX_X;
6239 if ((i.index_reg->reg_flags & RegVRex) != 0)
6240 i.vrex |= REX_X;
6241 }
6242
6243 default_seg = &ds;
6244
6245 if (i.base_reg == 0)
6246 {
6247 i.rm.mode = 0;
6248 if (!i.disp_operands)
6249 {
6250 fake_zero_displacement = 1;
6251 /* Instructions with VSIB byte need 32bit displacement
6252 if there is no base register. */
6253 if (i.tm.opcode_modifier.vecsib)
6254 i.types[op].bitfield.disp32 = 1;
6255 }
6256 if (i.index_reg == 0)
6257 {
6258 gas_assert (!i.tm.opcode_modifier.vecsib);
6259 /* Operand is just <disp> */
6260 if (flag_code == CODE_64BIT)
6261 {
6262 /* 64bit mode overwrites the 32bit absolute
6263 addressing by RIP relative addressing and
6264 absolute addressing is encoded by one of the
6265 redundant SIB forms. */
6266 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
6267 i.sib.base = NO_BASE_REGISTER;
6268 i.sib.index = NO_INDEX_REGISTER;
6269 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
6270 ? disp32s : disp32);
6271 }
6272 else if ((flag_code == CODE_16BIT)
6273 ^ (i.prefix[ADDR_PREFIX] != 0))
6274 {
6275 i.rm.regmem = NO_BASE_REGISTER_16;
6276 i.types[op] = disp16;
6277 }
6278 else
6279 {
6280 i.rm.regmem = NO_BASE_REGISTER;
6281 i.types[op] = disp32;
6282 }
6283 }
6284 else if (!i.tm.opcode_modifier.vecsib)
6285 {
6286 /* !i.base_reg && i.index_reg */
6287 if (i.index_reg->reg_num == RegEiz
6288 || i.index_reg->reg_num == RegRiz)
6289 i.sib.index = NO_INDEX_REGISTER;
6290 else
6291 i.sib.index = i.index_reg->reg_num;
6292 i.sib.base = NO_BASE_REGISTER;
6293 i.sib.scale = i.log2_scale_factor;
6294 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
6295 /* No Vec_Disp8 if there is no base. */
6296 i.types[op].bitfield.vec_disp8 = 0;
6297 i.types[op].bitfield.disp8 = 0;
6298 i.types[op].bitfield.disp16 = 0;
6299 i.types[op].bitfield.disp64 = 0;
6300 if (flag_code != CODE_64BIT)
6301 {
6302 /* Must be 32 bit */
6303 i.types[op].bitfield.disp32 = 1;
6304 i.types[op].bitfield.disp32s = 0;
6305 }
6306 else
6307 {
6308 i.types[op].bitfield.disp32 = 0;
6309 i.types[op].bitfield.disp32s = 1;
6310 }
6311 if ((i.index_reg->reg_flags & RegRex) != 0)
6312 i.rex |= REX_X;
6313 }
6314 }
6315 /* RIP addressing for 64bit mode. */
6316 else if (i.base_reg->reg_num == RegRip ||
6317 i.base_reg->reg_num == RegEip)
6318 {
6319 gas_assert (!i.tm.opcode_modifier.vecsib);
6320 i.rm.regmem = NO_BASE_REGISTER;
6321 i.types[op].bitfield.disp8 = 0;
6322 i.types[op].bitfield.disp16 = 0;
6323 i.types[op].bitfield.disp32 = 0;
6324 i.types[op].bitfield.disp32s = 1;
6325 i.types[op].bitfield.disp64 = 0;
6326 i.types[op].bitfield.vec_disp8 = 0;
6327 i.flags[op] |= Operand_PCrel;
6328 if (! i.disp_operands)
6329 fake_zero_displacement = 1;
6330 }
6331 else if (i.base_reg->reg_type.bitfield.reg16)
6332 {
6333 gas_assert (!i.tm.opcode_modifier.vecsib);
6334 switch (i.base_reg->reg_num)
6335 {
6336 case 3: /* (%bx) */
6337 if (i.index_reg == 0)
6338 i.rm.regmem = 7;
6339 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
6340 i.rm.regmem = i.index_reg->reg_num - 6;
6341 break;
6342 case 5: /* (%bp) */
6343 default_seg = &ss;
6344 if (i.index_reg == 0)
6345 {
6346 i.rm.regmem = 6;
6347 if (operand_type_check (i.types[op], disp) == 0)
6348 {
6349 /* fake (%bp) into 0(%bp) */
6350 if (i.tm.operand_types[op].bitfield.vec_disp8)
6351 i.types[op].bitfield.vec_disp8 = 1;
6352 else
6353 i.types[op].bitfield.disp8 = 1;
6354 fake_zero_displacement = 1;
6355 }
6356 }
6357 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
6358 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
6359 break;
6360 default: /* (%si) -> 4 or (%di) -> 5 */
6361 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
6362 }
6363 i.rm.mode = mode_from_disp_size (i.types[op]);
6364 }
6365 else /* i.base_reg and 32/64 bit mode */
6366 {
6367 if (flag_code == CODE_64BIT
6368 && operand_type_check (i.types[op], disp))
6369 {
6370 i386_operand_type temp;
6371 operand_type_set (&temp, 0);
6372 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
6373 temp.bitfield.vec_disp8
6374 = i.types[op].bitfield.vec_disp8;
6375 i.types[op] = temp;
6376 if (i.prefix[ADDR_PREFIX] == 0)
6377 i.types[op].bitfield.disp32s = 1;
6378 else
6379 i.types[op].bitfield.disp32 = 1;
6380 }
6381
6382 if (!i.tm.opcode_modifier.vecsib)
6383 i.rm.regmem = i.base_reg->reg_num;
6384 if ((i.base_reg->reg_flags & RegRex) != 0)
6385 i.rex |= REX_B;
6386 i.sib.base = i.base_reg->reg_num;
6387 /* x86-64 ignores REX prefix bit here to avoid decoder
6388 complications. */
6389 if (!(i.base_reg->reg_flags & RegRex)
6390 && (i.base_reg->reg_num == EBP_REG_NUM
6391 || i.base_reg->reg_num == ESP_REG_NUM))
6392 default_seg = &ss;
6393 if (i.base_reg->reg_num == 5 && i.disp_operands == 0)
6394 {
6395 fake_zero_displacement = 1;
6396 if (i.tm.operand_types [op].bitfield.vec_disp8)
6397 i.types[op].bitfield.vec_disp8 = 1;
6398 else
6399 i.types[op].bitfield.disp8 = 1;
6400 }
6401 i.sib.scale = i.log2_scale_factor;
6402 if (i.index_reg == 0)
6403 {
6404 gas_assert (!i.tm.opcode_modifier.vecsib);
6405 /* <disp>(%esp) becomes two byte modrm with no index
6406 register. We've already stored the code for esp
6407 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
6408 Any base register besides %esp will not use the
6409 extra modrm byte. */
6410 i.sib.index = NO_INDEX_REGISTER;
6411 }
6412 else if (!i.tm.opcode_modifier.vecsib)
6413 {
6414 if (i.index_reg->reg_num == RegEiz
6415 || i.index_reg->reg_num == RegRiz)
6416 i.sib.index = NO_INDEX_REGISTER;
6417 else
6418 i.sib.index = i.index_reg->reg_num;
6419 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
6420 if ((i.index_reg->reg_flags & RegRex) != 0)
6421 i.rex |= REX_X;
6422 }
6423
6424 if (i.disp_operands
6425 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
6426 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
6427 i.rm.mode = 0;
6428 else
6429 {
6430 if (!fake_zero_displacement
6431 && !i.disp_operands
6432 && i.disp_encoding)
6433 {
6434 fake_zero_displacement = 1;
6435 if (i.disp_encoding == disp_encoding_8bit)
6436 i.types[op].bitfield.disp8 = 1;
6437 else
6438 i.types[op].bitfield.disp32 = 1;
6439 }
6440 i.rm.mode = mode_from_disp_size (i.types[op]);
6441 }
6442 }
6443
6444 if (fake_zero_displacement)
6445 {
6446 /* Fakes a zero displacement assuming that i.types[op]
6447 holds the correct displacement size. */
6448 expressionS *exp;
6449
6450 gas_assert (i.op[op].disps == 0);
6451 exp = &disp_expressions[i.disp_operands++];
6452 i.op[op].disps = exp;
6453 exp->X_op = O_constant;
6454 exp->X_add_number = 0;
6455 exp->X_add_symbol = (symbolS *) 0;
6456 exp->X_op_symbol = (symbolS *) 0;
6457 }
6458
6459 mem = op;
6460 }
6461 else
6462 mem = ~0;
6463
6464 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
6465 {
6466 if (operand_type_check (i.types[0], imm))
6467 i.vex.register_specifier = NULL;
6468 else
6469 {
6470 /* VEX.vvvv encodes one of the sources when the first
6471 operand is not an immediate. */
6472 if (i.tm.opcode_modifier.vexw == VEXW0)
6473 i.vex.register_specifier = i.op[0].regs;
6474 else
6475 i.vex.register_specifier = i.op[1].regs;
6476 }
6477
6478 /* Destination is a XMM register encoded in the ModRM.reg
6479 and VEX.R bit. */
6480 i.rm.reg = i.op[2].regs->reg_num;
6481 if ((i.op[2].regs->reg_flags & RegRex) != 0)
6482 i.rex |= REX_R;
6483
6484 /* ModRM.rm and VEX.B encodes the other source. */
6485 if (!i.mem_operands)
6486 {
6487 i.rm.mode = 3;
6488
6489 if (i.tm.opcode_modifier.vexw == VEXW0)
6490 i.rm.regmem = i.op[1].regs->reg_num;
6491 else
6492 i.rm.regmem = i.op[0].regs->reg_num;
6493
6494 if ((i.op[1].regs->reg_flags & RegRex) != 0)
6495 i.rex |= REX_B;
6496 }
6497 }
6498 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
6499 {
6500 i.vex.register_specifier = i.op[2].regs;
6501 if (!i.mem_operands)
6502 {
6503 i.rm.mode = 3;
6504 i.rm.regmem = i.op[1].regs->reg_num;
6505 if ((i.op[1].regs->reg_flags & RegRex) != 0)
6506 i.rex |= REX_B;
6507 }
6508 }
6509 /* Fill in i.rm.reg or i.rm.regmem field with register operand
6510 (if any) based on i.tm.extension_opcode. Again, we must be
6511 careful to make sure that segment/control/debug/test/MMX
6512 registers are coded into the i.rm.reg field. */
6513 else if (i.reg_operands)
6514 {
6515 unsigned int op;
6516 unsigned int vex_reg = ~0;
6517
6518 for (op = 0; op < i.operands; op++)
6519 if (i.types[op].bitfield.reg8
6520 || i.types[op].bitfield.reg16
6521 || i.types[op].bitfield.reg32
6522 || i.types[op].bitfield.reg64
6523 || i.types[op].bitfield.regmmx
6524 || i.types[op].bitfield.regxmm
6525 || i.types[op].bitfield.regymm
6526 || i.types[op].bitfield.regbnd
6527 || i.types[op].bitfield.regzmm
6528 || i.types[op].bitfield.regmask
6529 || i.types[op].bitfield.sreg2
6530 || i.types[op].bitfield.sreg3
6531 || i.types[op].bitfield.control
6532 || i.types[op].bitfield.debug
6533 || i.types[op].bitfield.test)
6534 break;
6535
6536 if (vex_3_sources)
6537 op = dest;
6538 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
6539 {
6540 /* For instructions with VexNDS, the register-only
6541 source operand is encoded in VEX prefix. */
6542 gas_assert (mem != (unsigned int) ~0);
6543
6544 if (op > mem)
6545 {
6546 vex_reg = op++;
6547 gas_assert (op < i.operands);
6548 }
6549 else
6550 {
6551 /* Check register-only source operand when two source
6552 operands are swapped. */
6553 if (!i.tm.operand_types[op].bitfield.baseindex
6554 && i.tm.operand_types[op + 1].bitfield.baseindex)
6555 {
6556 vex_reg = op;
6557 op += 2;
6558 gas_assert (mem == (vex_reg + 1)
6559 && op < i.operands);
6560 }
6561 else
6562 {
6563 vex_reg = op + 1;
6564 gas_assert (vex_reg < i.operands);
6565 }
6566 }
6567 }
6568 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
6569 {
6570 /* For instructions with VexNDD, the register destination
6571 is encoded in VEX prefix. */
6572 if (i.mem_operands == 0)
6573 {
6574 /* There is no memory operand. */
6575 gas_assert ((op + 2) == i.operands);
6576 vex_reg = op + 1;
6577 }
6578 else
6579 {
6580 /* There are only 2 operands. */
6581 gas_assert (op < 2 && i.operands == 2);
6582 vex_reg = 1;
6583 }
6584 }
6585 else
6586 gas_assert (op < i.operands);
6587
6588 if (vex_reg != (unsigned int) ~0)
6589 {
6590 i386_operand_type *type = &i.tm.operand_types[vex_reg];
6591
6592 if (type->bitfield.reg32 != 1
6593 && type->bitfield.reg64 != 1
6594 && !operand_type_equal (type, &regxmm)
6595 && !operand_type_equal (type, &regymm)
6596 && !operand_type_equal (type, &regzmm)
6597 && !operand_type_equal (type, &regmask))
6598 abort ();
6599
6600 i.vex.register_specifier = i.op[vex_reg].regs;
6601 }
6602
6603 /* Don't set OP operand twice. */
6604 if (vex_reg != op)
6605 {
6606 /* If there is an extension opcode to put here, the
6607 register number must be put into the regmem field. */
6608 if (i.tm.extension_opcode != None)
6609 {
6610 i.rm.regmem = i.op[op].regs->reg_num;
6611 if ((i.op[op].regs->reg_flags & RegRex) != 0)
6612 i.rex |= REX_B;
6613 if ((i.op[op].regs->reg_flags & RegVRex) != 0)
6614 i.vrex |= REX_B;
6615 }
6616 else
6617 {
6618 i.rm.reg = i.op[op].regs->reg_num;
6619 if ((i.op[op].regs->reg_flags & RegRex) != 0)
6620 i.rex |= REX_R;
6621 if ((i.op[op].regs->reg_flags & RegVRex) != 0)
6622 i.vrex |= REX_R;
6623 }
6624 }
6625
6626 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
6627 must set it to 3 to indicate this is a register operand
6628 in the regmem field. */
6629 if (!i.mem_operands)
6630 i.rm.mode = 3;
6631 }
6632
6633 /* Fill in i.rm.reg field with extension opcode (if any). */
6634 if (i.tm.extension_opcode != None)
6635 i.rm.reg = i.tm.extension_opcode;
6636 }
6637 return default_seg;
6638 }
6639
6640 static void
6641 output_branch (void)
6642 {
6643 char *p;
6644 int size;
6645 int code16;
6646 int prefix;
6647 relax_substateT subtype;
6648 symbolS *sym;
6649 offsetT off;
6650
6651 code16 = flag_code == CODE_16BIT ? CODE16 : 0;
6652 size = i.disp_encoding == disp_encoding_32bit ? BIG : SMALL;
6653
6654 prefix = 0;
6655 if (i.prefix[DATA_PREFIX] != 0)
6656 {
6657 prefix = 1;
6658 i.prefixes -= 1;
6659 code16 ^= CODE16;
6660 }
6661 /* Pentium4 branch hints. */
6662 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6663 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6664 {
6665 prefix++;
6666 i.prefixes--;
6667 }
6668 if (i.prefix[REX_PREFIX] != 0)
6669 {
6670 prefix++;
6671 i.prefixes--;
6672 }
6673
6674 /* BND prefixed jump. */
6675 if (i.prefix[BND_PREFIX] != 0)
6676 {
6677 FRAG_APPEND_1_CHAR (i.prefix[BND_PREFIX]);
6678 i.prefixes -= 1;
6679 }
6680
6681 if (i.prefixes != 0 && !intel_syntax)
6682 as_warn (_("skipping prefixes on this instruction"));
6683
6684 /* It's always a symbol; End frag & setup for relax.
6685 Make sure there is enough room in this frag for the largest
6686 instruction we may generate in md_convert_frag. This is 2
6687 bytes for the opcode and room for the prefix and largest
6688 displacement. */
6689 frag_grow (prefix + 2 + 4);
6690 /* Prefix and 1 opcode byte go in fr_fix. */
6691 p = frag_more (prefix + 1);
6692 if (i.prefix[DATA_PREFIX] != 0)
6693 *p++ = DATA_PREFIX_OPCODE;
6694 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
6695 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
6696 *p++ = i.prefix[SEG_PREFIX];
6697 if (i.prefix[REX_PREFIX] != 0)
6698 *p++ = i.prefix[REX_PREFIX];
6699 *p = i.tm.base_opcode;
6700
6701 if ((unsigned char) *p == JUMP_PC_RELATIVE)
6702 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
6703 else if (cpu_arch_flags.bitfield.cpui386)
6704 subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
6705 else
6706 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
6707 subtype |= code16;
6708
6709 sym = i.op[0].disps->X_add_symbol;
6710 off = i.op[0].disps->X_add_number;
6711
6712 if (i.op[0].disps->X_op != O_constant
6713 && i.op[0].disps->X_op != O_symbol)
6714 {
6715 /* Handle complex expressions. */
6716 sym = make_expr_symbol (i.op[0].disps);
6717 off = 0;
6718 }
6719
6720 /* 1 possible extra opcode + 4 byte displacement go in var part.
6721 Pass reloc in fr_var. */
6722 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
6723 }
6724
6725 static void
6726 output_jump (void)
6727 {
6728 char *p;
6729 int size;
6730 fixS *fixP;
6731
6732 if (i.tm.opcode_modifier.jumpbyte)
6733 {
6734 /* This is a loop or jecxz type instruction. */
6735 size = 1;
6736 if (i.prefix[ADDR_PREFIX] != 0)
6737 {
6738 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
6739 i.prefixes -= 1;
6740 }
6741 /* Pentium4 branch hints. */
6742 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6743 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6744 {
6745 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
6746 i.prefixes--;
6747 }
6748 }
6749 else
6750 {
6751 int code16;
6752
6753 code16 = 0;
6754 if (flag_code == CODE_16BIT)
6755 code16 = CODE16;
6756
6757 if (i.prefix[DATA_PREFIX] != 0)
6758 {
6759 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
6760 i.prefixes -= 1;
6761 code16 ^= CODE16;
6762 }
6763
6764 size = 4;
6765 if (code16)
6766 size = 2;
6767 }
6768
6769 if (i.prefix[REX_PREFIX] != 0)
6770 {
6771 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
6772 i.prefixes -= 1;
6773 }
6774
6775 /* BND prefixed jump. */
6776 if (i.prefix[BND_PREFIX] != 0)
6777 {
6778 FRAG_APPEND_1_CHAR (i.prefix[BND_PREFIX]);
6779 i.prefixes -= 1;
6780 }
6781
6782 if (i.prefixes != 0 && !intel_syntax)
6783 as_warn (_("skipping prefixes on this instruction"));
6784
6785 p = frag_more (i.tm.opcode_length + size);
6786 switch (i.tm.opcode_length)
6787 {
6788 case 2:
6789 *p++ = i.tm.base_opcode >> 8;
6790 case 1:
6791 *p++ = i.tm.base_opcode;
6792 break;
6793 default:
6794 abort ();
6795 }
6796
6797 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6798 i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
6799
6800 /* All jumps handled here are signed, but don't use a signed limit
6801 check for 32 and 16 bit jumps as we want to allow wrap around at
6802 4G and 64k respectively. */
6803 if (size == 1)
6804 fixP->fx_signed = 1;
6805 }
6806
6807 static void
6808 output_interseg_jump (void)
6809 {
6810 char *p;
6811 int size;
6812 int prefix;
6813 int code16;
6814
6815 code16 = 0;
6816 if (flag_code == CODE_16BIT)
6817 code16 = CODE16;
6818
6819 prefix = 0;
6820 if (i.prefix[DATA_PREFIX] != 0)
6821 {
6822 prefix = 1;
6823 i.prefixes -= 1;
6824 code16 ^= CODE16;
6825 }
6826 if (i.prefix[REX_PREFIX] != 0)
6827 {
6828 prefix++;
6829 i.prefixes -= 1;
6830 }
6831
6832 size = 4;
6833 if (code16)
6834 size = 2;
6835
6836 if (i.prefixes != 0 && !intel_syntax)
6837 as_warn (_("skipping prefixes on this instruction"));
6838
6839 /* 1 opcode; 2 segment; offset */
6840 p = frag_more (prefix + 1 + 2 + size);
6841
6842 if (i.prefix[DATA_PREFIX] != 0)
6843 *p++ = DATA_PREFIX_OPCODE;
6844
6845 if (i.prefix[REX_PREFIX] != 0)
6846 *p++ = i.prefix[REX_PREFIX];
6847
6848 *p++ = i.tm.base_opcode;
6849 if (i.op[1].imms->X_op == O_constant)
6850 {
6851 offsetT n = i.op[1].imms->X_add_number;
6852
6853 if (size == 2
6854 && !fits_in_unsigned_word (n)
6855 && !fits_in_signed_word (n))
6856 {
6857 as_bad (_("16-bit jump out of range"));
6858 return;
6859 }
6860 md_number_to_chars (p, n, size);
6861 }
6862 else
6863 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6864 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
6865 if (i.op[0].imms->X_op != O_constant)
6866 as_bad (_("can't handle non absolute segment in `%s'"),
6867 i.tm.name);
6868 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
6869 }
6870
6871 static void
6872 output_insn (void)
6873 {
6874 fragS *insn_start_frag;
6875 offsetT insn_start_off;
6876
6877 /* Tie dwarf2 debug info to the address at the start of the insn.
6878 We can't do this after the insn has been output as the current
6879 frag may have been closed off. eg. by frag_var. */
6880 dwarf2_emit_insn (0);
6881
6882 insn_start_frag = frag_now;
6883 insn_start_off = frag_now_fix ();
6884
6885 /* Output jumps. */
6886 if (i.tm.opcode_modifier.jump)
6887 output_branch ();
6888 else if (i.tm.opcode_modifier.jumpbyte
6889 || i.tm.opcode_modifier.jumpdword)
6890 output_jump ();
6891 else if (i.tm.opcode_modifier.jumpintersegment)
6892 output_interseg_jump ();
6893 else
6894 {
6895 /* Output normal instructions here. */
6896 char *p;
6897 unsigned char *q;
6898 unsigned int j;
6899 unsigned int prefix;
6900
6901 /* Since the VEX/EVEX prefix contains the implicit prefix, we
6902 don't need the explicit prefix. */
6903 if (!i.tm.opcode_modifier.vex && !i.tm.opcode_modifier.evex)
6904 {
6905 switch (i.tm.opcode_length)
6906 {
6907 case 3:
6908 if (i.tm.base_opcode & 0xff000000)
6909 {
6910 prefix = (i.tm.base_opcode >> 24) & 0xff;
6911 goto check_prefix;
6912 }
6913 break;
6914 case 2:
6915 if ((i.tm.base_opcode & 0xff0000) != 0)
6916 {
6917 prefix = (i.tm.base_opcode >> 16) & 0xff;
6918 if (i.tm.cpu_flags.bitfield.cpupadlock)
6919 {
6920 check_prefix:
6921 if (prefix != REPE_PREFIX_OPCODE
6922 || (i.prefix[REP_PREFIX]
6923 != REPE_PREFIX_OPCODE))
6924 add_prefix (prefix);
6925 }
6926 else
6927 add_prefix (prefix);
6928 }
6929 break;
6930 case 1:
6931 break;
6932 default:
6933 abort ();
6934 }
6935
6936 /* The prefix bytes. */
6937 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
6938 if (*q)
6939 FRAG_APPEND_1_CHAR (*q);
6940 }
6941 else
6942 {
6943 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
6944 if (*q)
6945 switch (j)
6946 {
6947 case REX_PREFIX:
6948 /* REX byte is encoded in VEX prefix. */
6949 break;
6950 case SEG_PREFIX:
6951 case ADDR_PREFIX:
6952 FRAG_APPEND_1_CHAR (*q);
6953 break;
6954 default:
6955 /* There should be no other prefixes for instructions
6956 with VEX prefix. */
6957 abort ();
6958 }
6959
6960 /* For EVEX instructions i.vrex should become 0 after
6961 build_evex_prefix. For VEX instructions upper 16 registers
6962 aren't available, so VREX should be 0. */
6963 if (i.vrex)
6964 abort ();
6965 /* Now the VEX prefix. */
6966 p = frag_more (i.vex.length);
6967 for (j = 0; j < i.vex.length; j++)
6968 p[j] = i.vex.bytes[j];
6969 }
6970
6971 /* Now the opcode; be careful about word order here! */
6972 if (i.tm.opcode_length == 1)
6973 {
6974 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
6975 }
6976 else
6977 {
6978 switch (i.tm.opcode_length)
6979 {
6980 case 4:
6981 p = frag_more (4);
6982 *p++ = (i.tm.base_opcode >> 24) & 0xff;
6983 *p++ = (i.tm.base_opcode >> 16) & 0xff;
6984 break;
6985 case 3:
6986 p = frag_more (3);
6987 *p++ = (i.tm.base_opcode >> 16) & 0xff;
6988 break;
6989 case 2:
6990 p = frag_more (2);
6991 break;
6992 default:
6993 abort ();
6994 break;
6995 }
6996
6997 /* Put out high byte first: can't use md_number_to_chars! */
6998 *p++ = (i.tm.base_opcode >> 8) & 0xff;
6999 *p = i.tm.base_opcode & 0xff;
7000 }
7001
7002 /* Now the modrm byte and sib byte (if present). */
7003 if (i.tm.opcode_modifier.modrm)
7004 {
7005 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
7006 | i.rm.reg << 3
7007 | i.rm.mode << 6));
7008 /* If i.rm.regmem == ESP (4)
7009 && i.rm.mode != (Register mode)
7010 && not 16 bit
7011 ==> need second modrm byte. */
7012 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
7013 && i.rm.mode != 3
7014 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
7015 FRAG_APPEND_1_CHAR ((i.sib.base << 0
7016 | i.sib.index << 3
7017 | i.sib.scale << 6));
7018 }
7019
7020 if (i.disp_operands)
7021 output_disp (insn_start_frag, insn_start_off);
7022
7023 if (i.imm_operands)
7024 output_imm (insn_start_frag, insn_start_off);
7025 }
7026
7027 #ifdef DEBUG386
7028 if (flag_debug)
7029 {
7030 pi ("" /*line*/, &i);
7031 }
7032 #endif /* DEBUG386 */
7033 }
7034
7035 /* Return the size of the displacement operand N. */
7036
7037 static int
7038 disp_size (unsigned int n)
7039 {
7040 int size = 4;
7041
7042 /* Vec_Disp8 has to be 8bit. */
7043 if (i.types[n].bitfield.vec_disp8)
7044 size = 1;
7045 else if (i.types[n].bitfield.disp64)
7046 size = 8;
7047 else if (i.types[n].bitfield.disp8)
7048 size = 1;
7049 else if (i.types[n].bitfield.disp16)
7050 size = 2;
7051 return size;
7052 }
7053
7054 /* Return the size of the immediate operand N. */
7055
7056 static int
7057 imm_size (unsigned int n)
7058 {
7059 int size = 4;
7060 if (i.types[n].bitfield.imm64)
7061 size = 8;
7062 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
7063 size = 1;
7064 else if (i.types[n].bitfield.imm16)
7065 size = 2;
7066 return size;
7067 }
7068
7069 static void
7070 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
7071 {
7072 char *p;
7073 unsigned int n;
7074
7075 for (n = 0; n < i.operands; n++)
7076 {
7077 if (i.types[n].bitfield.vec_disp8
7078 || operand_type_check (i.types[n], disp))
7079 {
7080 if (i.op[n].disps->X_op == O_constant)
7081 {
7082 int size = disp_size (n);
7083 offsetT val = i.op[n].disps->X_add_number;
7084
7085 if (i.types[n].bitfield.vec_disp8)
7086 val >>= i.memshift;
7087 val = offset_in_range (val, size);
7088 p = frag_more (size);
7089 md_number_to_chars (p, val, size);
7090 }
7091 else
7092 {
7093 enum bfd_reloc_code_real reloc_type;
7094 int size = disp_size (n);
7095 int sign = i.types[n].bitfield.disp32s;
7096 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
7097
7098 /* We can't have 8 bit displacement here. */
7099 gas_assert (!i.types[n].bitfield.disp8);
7100
7101 /* The PC relative address is computed relative
7102 to the instruction boundary, so in case immediate
7103 fields follows, we need to adjust the value. */
7104 if (pcrel && i.imm_operands)
7105 {
7106 unsigned int n1;
7107 int sz = 0;
7108
7109 for (n1 = 0; n1 < i.operands; n1++)
7110 if (operand_type_check (i.types[n1], imm))
7111 {
7112 /* Only one immediate is allowed for PC
7113 relative address. */
7114 gas_assert (sz == 0);
7115 sz = imm_size (n1);
7116 i.op[n].disps->X_add_number -= sz;
7117 }
7118 /* We should find the immediate. */
7119 gas_assert (sz != 0);
7120 }
7121
7122 p = frag_more (size);
7123 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
7124 if (GOT_symbol
7125 && GOT_symbol == i.op[n].disps->X_add_symbol
7126 && (((reloc_type == BFD_RELOC_32
7127 || reloc_type == BFD_RELOC_X86_64_32S
7128 || (reloc_type == BFD_RELOC_64
7129 && object_64bit))
7130 && (i.op[n].disps->X_op == O_symbol
7131 || (i.op[n].disps->X_op == O_add
7132 && ((symbol_get_value_expression
7133 (i.op[n].disps->X_op_symbol)->X_op)
7134 == O_subtract))))
7135 || reloc_type == BFD_RELOC_32_PCREL))
7136 {
7137 offsetT add;
7138
7139 if (insn_start_frag == frag_now)
7140 add = (p - frag_now->fr_literal) - insn_start_off;
7141 else
7142 {
7143 fragS *fr;
7144
7145 add = insn_start_frag->fr_fix - insn_start_off;
7146 for (fr = insn_start_frag->fr_next;
7147 fr && fr != frag_now; fr = fr->fr_next)
7148 add += fr->fr_fix;
7149 add += p - frag_now->fr_literal;
7150 }
7151
7152 if (!object_64bit)
7153 {
7154 reloc_type = BFD_RELOC_386_GOTPC;
7155 i.op[n].imms->X_add_number += add;
7156 }
7157 else if (reloc_type == BFD_RELOC_64)
7158 reloc_type = BFD_RELOC_X86_64_GOTPC64;
7159 else
7160 /* Don't do the adjustment for x86-64, as there
7161 the pcrel addressing is relative to the _next_
7162 insn, and that is taken care of in other code. */
7163 reloc_type = BFD_RELOC_X86_64_GOTPC32;
7164 }
7165 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
7166 i.op[n].disps, pcrel, reloc_type);
7167 }
7168 }
7169 }
7170 }
7171
7172 static void
7173 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
7174 {
7175 char *p;
7176 unsigned int n;
7177
7178 for (n = 0; n < i.operands; n++)
7179 {
7180 /* Skip SAE/RC Imm operand in EVEX. They are already handled. */
7181 if (i.rounding && (int) n == i.rounding->operand)
7182 continue;
7183
7184 if (operand_type_check (i.types[n], imm))
7185 {
7186 if (i.op[n].imms->X_op == O_constant)
7187 {
7188 int size = imm_size (n);
7189 offsetT val;
7190
7191 val = offset_in_range (i.op[n].imms->X_add_number,
7192 size);
7193 p = frag_more (size);
7194 md_number_to_chars (p, val, size);
7195 }
7196 else
7197 {
7198 /* Not absolute_section.
7199 Need a 32-bit fixup (don't support 8bit
7200 non-absolute imms). Try to support other
7201 sizes ... */
7202 enum bfd_reloc_code_real reloc_type;
7203 int size = imm_size (n);
7204 int sign;
7205
7206 if (i.types[n].bitfield.imm32s
7207 && (i.suffix == QWORD_MNEM_SUFFIX
7208 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
7209 sign = 1;
7210 else
7211 sign = 0;
7212
7213 p = frag_more (size);
7214 reloc_type = reloc (size, 0, sign, i.reloc[n]);
7215
7216 /* This is tough to explain. We end up with this one if we
7217 * have operands that look like
7218 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
7219 * obtain the absolute address of the GOT, and it is strongly
7220 * preferable from a performance point of view to avoid using
7221 * a runtime relocation for this. The actual sequence of
7222 * instructions often look something like:
7223 *
7224 * call .L66
7225 * .L66:
7226 * popl %ebx
7227 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
7228 *
7229 * The call and pop essentially return the absolute address
7230 * of the label .L66 and store it in %ebx. The linker itself
7231 * will ultimately change the first operand of the addl so
7232 * that %ebx points to the GOT, but to keep things simple, the
7233 * .o file must have this operand set so that it generates not
7234 * the absolute address of .L66, but the absolute address of
7235 * itself. This allows the linker itself simply treat a GOTPC
7236 * relocation as asking for a pcrel offset to the GOT to be
7237 * added in, and the addend of the relocation is stored in the
7238 * operand field for the instruction itself.
7239 *
7240 * Our job here is to fix the operand so that it would add
7241 * the correct offset so that %ebx would point to itself. The
7242 * thing that is tricky is that .-.L66 will point to the
7243 * beginning of the instruction, so we need to further modify
7244 * the operand so that it will point to itself. There are
7245 * other cases where you have something like:
7246 *
7247 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
7248 *
7249 * and here no correction would be required. Internally in
7250 * the assembler we treat operands of this form as not being
7251 * pcrel since the '.' is explicitly mentioned, and I wonder
7252 * whether it would simplify matters to do it this way. Who
7253 * knows. In earlier versions of the PIC patches, the
7254 * pcrel_adjust field was used to store the correction, but
7255 * since the expression is not pcrel, I felt it would be
7256 * confusing to do it this way. */
7257
7258 if ((reloc_type == BFD_RELOC_32
7259 || reloc_type == BFD_RELOC_X86_64_32S
7260 || reloc_type == BFD_RELOC_64)
7261 && GOT_symbol
7262 && GOT_symbol == i.op[n].imms->X_add_symbol
7263 && (i.op[n].imms->X_op == O_symbol
7264 || (i.op[n].imms->X_op == O_add
7265 && ((symbol_get_value_expression
7266 (i.op[n].imms->X_op_symbol)->X_op)
7267 == O_subtract))))
7268 {
7269 offsetT add;
7270
7271 if (insn_start_frag == frag_now)
7272 add = (p - frag_now->fr_literal) - insn_start_off;
7273 else
7274 {
7275 fragS *fr;
7276
7277 add = insn_start_frag->fr_fix - insn_start_off;
7278 for (fr = insn_start_frag->fr_next;
7279 fr && fr != frag_now; fr = fr->fr_next)
7280 add += fr->fr_fix;
7281 add += p - frag_now->fr_literal;
7282 }
7283
7284 if (!object_64bit)
7285 reloc_type = BFD_RELOC_386_GOTPC;
7286 else if (size == 4)
7287 reloc_type = BFD_RELOC_X86_64_GOTPC32;
7288 else if (size == 8)
7289 reloc_type = BFD_RELOC_X86_64_GOTPC64;
7290 i.op[n].imms->X_add_number += add;
7291 }
7292 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
7293 i.op[n].imms, 0, reloc_type);
7294 }
7295 }
7296 }
7297 }
7298 \f
7299 /* x86_cons_fix_new is called via the expression parsing code when a
7300 reloc is needed. We use this hook to get the correct .got reloc. */
7301 static enum bfd_reloc_code_real got_reloc = NO_RELOC;
7302 static int cons_sign = -1;
7303
7304 void
7305 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
7306 expressionS *exp)
7307 {
7308 enum bfd_reloc_code_real r = reloc (len, 0, cons_sign, got_reloc);
7309
7310 got_reloc = NO_RELOC;
7311
7312 #ifdef TE_PE
7313 if (exp->X_op == O_secrel)
7314 {
7315 exp->X_op = O_symbol;
7316 r = BFD_RELOC_32_SECREL;
7317 }
7318 #endif
7319
7320 fix_new_exp (frag, off, len, exp, 0, r);
7321 }
7322
7323 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
7324 purpose of the `.dc.a' internal pseudo-op. */
7325
7326 int
7327 x86_address_bytes (void)
7328 {
7329 if ((stdoutput->arch_info->mach & bfd_mach_x64_32))
7330 return 4;
7331 return stdoutput->arch_info->bits_per_address / 8;
7332 }
7333
7334 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
7335 || defined (LEX_AT)
7336 # define lex_got(reloc, adjust, types) NULL
7337 #else
7338 /* Parse operands of the form
7339 <symbol>@GOTOFF+<nnn>
7340 and similar .plt or .got references.
7341
7342 If we find one, set up the correct relocation in RELOC and copy the
7343 input string, minus the `@GOTOFF' into a malloc'd buffer for
7344 parsing by the calling routine. Return this buffer, and if ADJUST
7345 is non-null set it to the length of the string we removed from the
7346 input line. Otherwise return NULL. */
7347 static char *
7348 lex_got (enum bfd_reloc_code_real *rel,
7349 int *adjust,
7350 i386_operand_type *types)
7351 {
7352 /* Some of the relocations depend on the size of what field is to
7353 be relocated. But in our callers i386_immediate and i386_displacement
7354 we don't yet know the operand size (this will be set by insn
7355 matching). Hence we record the word32 relocation here,
7356 and adjust the reloc according to the real size in reloc(). */
7357 static const struct {
7358 const char *str;
7359 int len;
7360 const enum bfd_reloc_code_real rel[2];
7361 const i386_operand_type types64;
7362 } gotrel[] = {
7363 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7364 { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32,
7365 BFD_RELOC_SIZE32 },
7366 OPERAND_TYPE_IMM32_64 },
7367 #endif
7368 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
7369 BFD_RELOC_X86_64_PLTOFF64 },
7370 OPERAND_TYPE_IMM64 },
7371 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
7372 BFD_RELOC_X86_64_PLT32 },
7373 OPERAND_TYPE_IMM32_32S_DISP32 },
7374 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
7375 BFD_RELOC_X86_64_GOTPLT64 },
7376 OPERAND_TYPE_IMM64_DISP64 },
7377 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
7378 BFD_RELOC_X86_64_GOTOFF64 },
7379 OPERAND_TYPE_IMM64_DISP64 },
7380 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
7381 BFD_RELOC_X86_64_GOTPCREL },
7382 OPERAND_TYPE_IMM32_32S_DISP32 },
7383 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
7384 BFD_RELOC_X86_64_TLSGD },
7385 OPERAND_TYPE_IMM32_32S_DISP32 },
7386 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
7387 _dummy_first_bfd_reloc_code_real },
7388 OPERAND_TYPE_NONE },
7389 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
7390 BFD_RELOC_X86_64_TLSLD },
7391 OPERAND_TYPE_IMM32_32S_DISP32 },
7392 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
7393 BFD_RELOC_X86_64_GOTTPOFF },
7394 OPERAND_TYPE_IMM32_32S_DISP32 },
7395 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
7396 BFD_RELOC_X86_64_TPOFF32 },
7397 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
7398 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
7399 _dummy_first_bfd_reloc_code_real },
7400 OPERAND_TYPE_NONE },
7401 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
7402 BFD_RELOC_X86_64_DTPOFF32 },
7403 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
7404 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
7405 _dummy_first_bfd_reloc_code_real },
7406 OPERAND_TYPE_NONE },
7407 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
7408 _dummy_first_bfd_reloc_code_real },
7409 OPERAND_TYPE_NONE },
7410 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
7411 BFD_RELOC_X86_64_GOT32 },
7412 OPERAND_TYPE_IMM32_32S_64_DISP32 },
7413 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
7414 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
7415 OPERAND_TYPE_IMM32_32S_DISP32 },
7416 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
7417 BFD_RELOC_X86_64_TLSDESC_CALL },
7418 OPERAND_TYPE_IMM32_32S_DISP32 },
7419 };
7420 char *cp;
7421 unsigned int j;
7422
7423 #if defined (OBJ_MAYBE_ELF)
7424 if (!IS_ELF)
7425 return NULL;
7426 #endif
7427
7428 for (cp = input_line_pointer; *cp != '@'; cp++)
7429 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
7430 return NULL;
7431
7432 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
7433 {
7434 int len = gotrel[j].len;
7435 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
7436 {
7437 if (gotrel[j].rel[object_64bit] != 0)
7438 {
7439 int first, second;
7440 char *tmpbuf, *past_reloc;
7441
7442 *rel = gotrel[j].rel[object_64bit];
7443
7444 if (types)
7445 {
7446 if (flag_code != CODE_64BIT)
7447 {
7448 types->bitfield.imm32 = 1;
7449 types->bitfield.disp32 = 1;
7450 }
7451 else
7452 *types = gotrel[j].types64;
7453 }
7454
7455 if (j != 0 && GOT_symbol == NULL)
7456 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
7457
7458 /* The length of the first part of our input line. */
7459 first = cp - input_line_pointer;
7460
7461 /* The second part goes from after the reloc token until
7462 (and including) an end_of_line char or comma. */
7463 past_reloc = cp + 1 + len;
7464 cp = past_reloc;
7465 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
7466 ++cp;
7467 second = cp + 1 - past_reloc;
7468
7469 /* Allocate and copy string. The trailing NUL shouldn't
7470 be necessary, but be safe. */
7471 tmpbuf = (char *) xmalloc (first + second + 2);
7472 memcpy (tmpbuf, input_line_pointer, first);
7473 if (second != 0 && *past_reloc != ' ')
7474 /* Replace the relocation token with ' ', so that
7475 errors like foo@GOTOFF1 will be detected. */
7476 tmpbuf[first++] = ' ';
7477 else
7478 /* Increment length by 1 if the relocation token is
7479 removed. */
7480 len++;
7481 if (adjust)
7482 *adjust = len;
7483 memcpy (tmpbuf + first, past_reloc, second);
7484 tmpbuf[first + second] = '\0';
7485 return tmpbuf;
7486 }
7487
7488 as_bad (_("@%s reloc is not supported with %d-bit output format"),
7489 gotrel[j].str, 1 << (5 + object_64bit));
7490 return NULL;
7491 }
7492 }
7493
7494 /* Might be a symbol version string. Don't as_bad here. */
7495 return NULL;
7496 }
7497 #endif
7498
7499 #ifdef TE_PE
7500 #ifdef lex_got
7501 #undef lex_got
7502 #endif
7503 /* Parse operands of the form
7504 <symbol>@SECREL32+<nnn>
7505
7506 If we find one, set up the correct relocation in RELOC and copy the
7507 input string, minus the `@SECREL32' into a malloc'd buffer for
7508 parsing by the calling routine. Return this buffer, and if ADJUST
7509 is non-null set it to the length of the string we removed from the
7510 input line. Otherwise return NULL.
7511
7512 This function is copied from the ELF version above adjusted for PE targets. */
7513
7514 static char *
7515 lex_got (enum bfd_reloc_code_real *rel ATTRIBUTE_UNUSED,
7516 int *adjust ATTRIBUTE_UNUSED,
7517 i386_operand_type *types ATTRIBUTE_UNUSED)
7518 {
7519 static const struct
7520 {
7521 const char *str;
7522 int len;
7523 const enum bfd_reloc_code_real rel[2];
7524 const i386_operand_type types64;
7525 }
7526 gotrel[] =
7527 {
7528 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL,
7529 BFD_RELOC_32_SECREL },
7530 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
7531 };
7532
7533 char *cp;
7534 unsigned j;
7535
7536 for (cp = input_line_pointer; *cp != '@'; cp++)
7537 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
7538 return NULL;
7539
7540 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
7541 {
7542 int len = gotrel[j].len;
7543
7544 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
7545 {
7546 if (gotrel[j].rel[object_64bit] != 0)
7547 {
7548 int first, second;
7549 char *tmpbuf, *past_reloc;
7550
7551 *rel = gotrel[j].rel[object_64bit];
7552 if (adjust)
7553 *adjust = len;
7554
7555 if (types)
7556 {
7557 if (flag_code != CODE_64BIT)
7558 {
7559 types->bitfield.imm32 = 1;
7560 types->bitfield.disp32 = 1;
7561 }
7562 else
7563 *types = gotrel[j].types64;
7564 }
7565
7566 /* The length of the first part of our input line. */
7567 first = cp - input_line_pointer;
7568
7569 /* The second part goes from after the reloc token until
7570 (and including) an end_of_line char or comma. */
7571 past_reloc = cp + 1 + len;
7572 cp = past_reloc;
7573 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
7574 ++cp;
7575 second = cp + 1 - past_reloc;
7576
7577 /* Allocate and copy string. The trailing NUL shouldn't
7578 be necessary, but be safe. */
7579 tmpbuf = (char *) xmalloc (first + second + 2);
7580 memcpy (tmpbuf, input_line_pointer, first);
7581 if (second != 0 && *past_reloc != ' ')
7582 /* Replace the relocation token with ' ', so that
7583 errors like foo@SECLREL321 will be detected. */
7584 tmpbuf[first++] = ' ';
7585 memcpy (tmpbuf + first, past_reloc, second);
7586 tmpbuf[first + second] = '\0';
7587 return tmpbuf;
7588 }
7589
7590 as_bad (_("@%s reloc is not supported with %d-bit output format"),
7591 gotrel[j].str, 1 << (5 + object_64bit));
7592 return NULL;
7593 }
7594 }
7595
7596 /* Might be a symbol version string. Don't as_bad here. */
7597 return NULL;
7598 }
7599
7600 #endif /* TE_PE */
7601
7602 void
7603 x86_cons (expressionS *exp, int size)
7604 {
7605 intel_syntax = -intel_syntax;
7606
7607 exp->X_md = 0;
7608 if (size == 4 || (object_64bit && size == 8))
7609 {
7610 /* Handle @GOTOFF and the like in an expression. */
7611 char *save;
7612 char *gotfree_input_line;
7613 int adjust = 0;
7614
7615 save = input_line_pointer;
7616 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
7617 if (gotfree_input_line)
7618 input_line_pointer = gotfree_input_line;
7619
7620 expression (exp);
7621
7622 if (gotfree_input_line)
7623 {
7624 /* expression () has merrily parsed up to the end of line,
7625 or a comma - in the wrong buffer. Transfer how far
7626 input_line_pointer has moved to the right buffer. */
7627 input_line_pointer = (save
7628 + (input_line_pointer - gotfree_input_line)
7629 + adjust);
7630 free (gotfree_input_line);
7631 if (exp->X_op == O_constant
7632 || exp->X_op == O_absent
7633 || exp->X_op == O_illegal
7634 || exp->X_op == O_register
7635 || exp->X_op == O_big)
7636 {
7637 char c = *input_line_pointer;
7638 *input_line_pointer = 0;
7639 as_bad (_("missing or invalid expression `%s'"), save);
7640 *input_line_pointer = c;
7641 }
7642 }
7643 }
7644 else
7645 expression (exp);
7646
7647 intel_syntax = -intel_syntax;
7648
7649 if (intel_syntax)
7650 i386_intel_simplify (exp);
7651 }
7652
7653 static void
7654 signed_cons (int size)
7655 {
7656 if (flag_code == CODE_64BIT)
7657 cons_sign = 1;
7658 cons (size);
7659 cons_sign = -1;
7660 }
7661
7662 #ifdef TE_PE
7663 static void
7664 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
7665 {
7666 expressionS exp;
7667
7668 do
7669 {
7670 expression (&exp);
7671 if (exp.X_op == O_symbol)
7672 exp.X_op = O_secrel;
7673
7674 emit_expr (&exp, 4);
7675 }
7676 while (*input_line_pointer++ == ',');
7677
7678 input_line_pointer--;
7679 demand_empty_rest_of_line ();
7680 }
7681 #endif
7682
7683 /* Handle Vector operations. */
7684
7685 static char *
7686 check_VecOperations (char *op_string, char *op_end)
7687 {
7688 const reg_entry *mask;
7689 const char *saved;
7690 char *end_op;
7691
7692 while (*op_string
7693 && (op_end == NULL || op_string < op_end))
7694 {
7695 saved = op_string;
7696 if (*op_string == '{')
7697 {
7698 op_string++;
7699
7700 /* Check broadcasts. */
7701 if (strncmp (op_string, "1to", 3) == 0)
7702 {
7703 int bcst_type;
7704
7705 if (i.broadcast)
7706 goto duplicated_vec_op;
7707
7708 op_string += 3;
7709 if (*op_string == '8')
7710 bcst_type = BROADCAST_1TO8;
7711 else if (*op_string == '1'
7712 && *(op_string+1) == '6')
7713 {
7714 bcst_type = BROADCAST_1TO16;
7715 op_string++;
7716 }
7717 else
7718 {
7719 as_bad (_("Unsupported broadcast: `%s'"), saved);
7720 return NULL;
7721 }
7722 op_string++;
7723
7724 broadcast_op.type = bcst_type;
7725 broadcast_op.operand = this_operand;
7726 i.broadcast = &broadcast_op;
7727 }
7728 /* Check masking operation. */
7729 else if ((mask = parse_register (op_string, &end_op)) != NULL)
7730 {
7731 /* k0 can't be used for write mask. */
7732 if (mask->reg_num == 0)
7733 {
7734 as_bad (_("`%s' can't be used for write mask"),
7735 op_string);
7736 return NULL;
7737 }
7738
7739 if (!i.mask)
7740 {
7741 mask_op.mask = mask;
7742 mask_op.zeroing = 0;
7743 mask_op.operand = this_operand;
7744 i.mask = &mask_op;
7745 }
7746 else
7747 {
7748 if (i.mask->mask)
7749 goto duplicated_vec_op;
7750
7751 i.mask->mask = mask;
7752
7753 /* Only "{z}" is allowed here. No need to check
7754 zeroing mask explicitly. */
7755 if (i.mask->operand != this_operand)
7756 {
7757 as_bad (_("invalid write mask `%s'"), saved);
7758 return NULL;
7759 }
7760 }
7761
7762 op_string = end_op;
7763 }
7764 /* Check zeroing-flag for masking operation. */
7765 else if (*op_string == 'z')
7766 {
7767 if (!i.mask)
7768 {
7769 mask_op.mask = NULL;
7770 mask_op.zeroing = 1;
7771 mask_op.operand = this_operand;
7772 i.mask = &mask_op;
7773 }
7774 else
7775 {
7776 if (i.mask->zeroing)
7777 {
7778 duplicated_vec_op:
7779 as_bad (_("duplicated `%s'"), saved);
7780 return NULL;
7781 }
7782
7783 i.mask->zeroing = 1;
7784
7785 /* Only "{%k}" is allowed here. No need to check mask
7786 register explicitly. */
7787 if (i.mask->operand != this_operand)
7788 {
7789 as_bad (_("invalid zeroing-masking `%s'"),
7790 saved);
7791 return NULL;
7792 }
7793 }
7794
7795 op_string++;
7796 }
7797 else
7798 goto unknown_vec_op;
7799
7800 if (*op_string != '}')
7801 {
7802 as_bad (_("missing `}' in `%s'"), saved);
7803 return NULL;
7804 }
7805 op_string++;
7806 continue;
7807 }
7808 unknown_vec_op:
7809 /* We don't know this one. */
7810 as_bad (_("unknown vector operation: `%s'"), saved);
7811 return NULL;
7812 }
7813
7814 return op_string;
7815 }
7816
7817 static int
7818 i386_immediate (char *imm_start)
7819 {
7820 char *save_input_line_pointer;
7821 char *gotfree_input_line;
7822 segT exp_seg = 0;
7823 expressionS *exp;
7824 i386_operand_type types;
7825
7826 operand_type_set (&types, ~0);
7827
7828 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
7829 {
7830 as_bad (_("at most %d immediate operands are allowed"),
7831 MAX_IMMEDIATE_OPERANDS);
7832 return 0;
7833 }
7834
7835 exp = &im_expressions[i.imm_operands++];
7836 i.op[this_operand].imms = exp;
7837
7838 if (is_space_char (*imm_start))
7839 ++imm_start;
7840
7841 save_input_line_pointer = input_line_pointer;
7842 input_line_pointer = imm_start;
7843
7844 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
7845 if (gotfree_input_line)
7846 input_line_pointer = gotfree_input_line;
7847
7848 exp_seg = expression (exp);
7849
7850 SKIP_WHITESPACE ();
7851
7852 /* Handle vector operations. */
7853 if (*input_line_pointer == '{')
7854 {
7855 input_line_pointer = check_VecOperations (input_line_pointer,
7856 NULL);
7857 if (input_line_pointer == NULL)
7858 return 0;
7859 }
7860
7861 if (*input_line_pointer)
7862 as_bad (_("junk `%s' after expression"), input_line_pointer);
7863
7864 input_line_pointer = save_input_line_pointer;
7865 if (gotfree_input_line)
7866 {
7867 free (gotfree_input_line);
7868
7869 if (exp->X_op == O_constant || exp->X_op == O_register)
7870 exp->X_op = O_illegal;
7871 }
7872
7873 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
7874 }
7875
7876 static int
7877 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
7878 i386_operand_type types, const char *imm_start)
7879 {
7880 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
7881 {
7882 if (imm_start)
7883 as_bad (_("missing or invalid immediate expression `%s'"),
7884 imm_start);
7885 return 0;
7886 }
7887 else if (exp->X_op == O_constant)
7888 {
7889 /* Size it properly later. */
7890 i.types[this_operand].bitfield.imm64 = 1;
7891 /* If not 64bit, sign extend val. */
7892 if (flag_code != CODE_64BIT
7893 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
7894 exp->X_add_number
7895 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
7896 }
7897 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7898 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
7899 && exp_seg != absolute_section
7900 && exp_seg != text_section
7901 && exp_seg != data_section
7902 && exp_seg != bss_section
7903 && exp_seg != undefined_section
7904 && !bfd_is_com_section (exp_seg))
7905 {
7906 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7907 return 0;
7908 }
7909 #endif
7910 else if (!intel_syntax && exp->X_op == O_register)
7911 {
7912 if (imm_start)
7913 as_bad (_("illegal immediate register operand %s"), imm_start);
7914 return 0;
7915 }
7916 else
7917 {
7918 /* This is an address. The size of the address will be
7919 determined later, depending on destination register,
7920 suffix, or the default for the section. */
7921 i.types[this_operand].bitfield.imm8 = 1;
7922 i.types[this_operand].bitfield.imm16 = 1;
7923 i.types[this_operand].bitfield.imm32 = 1;
7924 i.types[this_operand].bitfield.imm32s = 1;
7925 i.types[this_operand].bitfield.imm64 = 1;
7926 i.types[this_operand] = operand_type_and (i.types[this_operand],
7927 types);
7928 }
7929
7930 return 1;
7931 }
7932
7933 static char *
7934 i386_scale (char *scale)
7935 {
7936 offsetT val;
7937 char *save = input_line_pointer;
7938
7939 input_line_pointer = scale;
7940 val = get_absolute_expression ();
7941
7942 switch (val)
7943 {
7944 case 1:
7945 i.log2_scale_factor = 0;
7946 break;
7947 case 2:
7948 i.log2_scale_factor = 1;
7949 break;
7950 case 4:
7951 i.log2_scale_factor = 2;
7952 break;
7953 case 8:
7954 i.log2_scale_factor = 3;
7955 break;
7956 default:
7957 {
7958 char sep = *input_line_pointer;
7959
7960 *input_line_pointer = '\0';
7961 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
7962 scale);
7963 *input_line_pointer = sep;
7964 input_line_pointer = save;
7965 return NULL;
7966 }
7967 }
7968 if (i.log2_scale_factor != 0 && i.index_reg == 0)
7969 {
7970 as_warn (_("scale factor of %d without an index register"),
7971 1 << i.log2_scale_factor);
7972 i.log2_scale_factor = 0;
7973 }
7974 scale = input_line_pointer;
7975 input_line_pointer = save;
7976 return scale;
7977 }
7978
7979 static int
7980 i386_displacement (char *disp_start, char *disp_end)
7981 {
7982 expressionS *exp;
7983 segT exp_seg = 0;
7984 char *save_input_line_pointer;
7985 char *gotfree_input_line;
7986 int override;
7987 i386_operand_type bigdisp, types = anydisp;
7988 int ret;
7989
7990 if (i.disp_operands == MAX_MEMORY_OPERANDS)
7991 {
7992 as_bad (_("at most %d displacement operands are allowed"),
7993 MAX_MEMORY_OPERANDS);
7994 return 0;
7995 }
7996
7997 operand_type_set (&bigdisp, 0);
7998 if ((i.types[this_operand].bitfield.jumpabsolute)
7999 || (!current_templates->start->opcode_modifier.jump
8000 && !current_templates->start->opcode_modifier.jumpdword))
8001 {
8002 bigdisp.bitfield.disp32 = 1;
8003 override = (i.prefix[ADDR_PREFIX] != 0);
8004 if (flag_code == CODE_64BIT)
8005 {
8006 if (!override)
8007 {
8008 bigdisp.bitfield.disp32s = 1;
8009 bigdisp.bitfield.disp64 = 1;
8010 }
8011 }
8012 else if ((flag_code == CODE_16BIT) ^ override)
8013 {
8014 bigdisp.bitfield.disp32 = 0;
8015 bigdisp.bitfield.disp16 = 1;
8016 }
8017 }
8018 else
8019 {
8020 /* For PC-relative branches, the width of the displacement
8021 is dependent upon data size, not address size. */
8022 override = (i.prefix[DATA_PREFIX] != 0);
8023 if (flag_code == CODE_64BIT)
8024 {
8025 if (override || i.suffix == WORD_MNEM_SUFFIX)
8026 bigdisp.bitfield.disp16 = 1;
8027 else
8028 {
8029 bigdisp.bitfield.disp32 = 1;
8030 bigdisp.bitfield.disp32s = 1;
8031 }
8032 }
8033 else
8034 {
8035 if (!override)
8036 override = (i.suffix == (flag_code != CODE_16BIT
8037 ? WORD_MNEM_SUFFIX
8038 : LONG_MNEM_SUFFIX));
8039 bigdisp.bitfield.disp32 = 1;
8040 if ((flag_code == CODE_16BIT) ^ override)
8041 {
8042 bigdisp.bitfield.disp32 = 0;
8043 bigdisp.bitfield.disp16 = 1;
8044 }
8045 }
8046 }
8047 i.types[this_operand] = operand_type_or (i.types[this_operand],
8048 bigdisp);
8049
8050 exp = &disp_expressions[i.disp_operands];
8051 i.op[this_operand].disps = exp;
8052 i.disp_operands++;
8053 save_input_line_pointer = input_line_pointer;
8054 input_line_pointer = disp_start;
8055 END_STRING_AND_SAVE (disp_end);
8056
8057 #ifndef GCC_ASM_O_HACK
8058 #define GCC_ASM_O_HACK 0
8059 #endif
8060 #if GCC_ASM_O_HACK
8061 END_STRING_AND_SAVE (disp_end + 1);
8062 if (i.types[this_operand].bitfield.baseIndex
8063 && displacement_string_end[-1] == '+')
8064 {
8065 /* This hack is to avoid a warning when using the "o"
8066 constraint within gcc asm statements.
8067 For instance:
8068
8069 #define _set_tssldt_desc(n,addr,limit,type) \
8070 __asm__ __volatile__ ( \
8071 "movw %w2,%0\n\t" \
8072 "movw %w1,2+%0\n\t" \
8073 "rorl $16,%1\n\t" \
8074 "movb %b1,4+%0\n\t" \
8075 "movb %4,5+%0\n\t" \
8076 "movb $0,6+%0\n\t" \
8077 "movb %h1,7+%0\n\t" \
8078 "rorl $16,%1" \
8079 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
8080
8081 This works great except that the output assembler ends
8082 up looking a bit weird if it turns out that there is
8083 no offset. You end up producing code that looks like:
8084
8085 #APP
8086 movw $235,(%eax)
8087 movw %dx,2+(%eax)
8088 rorl $16,%edx
8089 movb %dl,4+(%eax)
8090 movb $137,5+(%eax)
8091 movb $0,6+(%eax)
8092 movb %dh,7+(%eax)
8093 rorl $16,%edx
8094 #NO_APP
8095
8096 So here we provide the missing zero. */
8097
8098 *displacement_string_end = '0';
8099 }
8100 #endif
8101 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
8102 if (gotfree_input_line)
8103 input_line_pointer = gotfree_input_line;
8104
8105 exp_seg = expression (exp);
8106
8107 SKIP_WHITESPACE ();
8108 if (*input_line_pointer)
8109 as_bad (_("junk `%s' after expression"), input_line_pointer);
8110 #if GCC_ASM_O_HACK
8111 RESTORE_END_STRING (disp_end + 1);
8112 #endif
8113 input_line_pointer = save_input_line_pointer;
8114 if (gotfree_input_line)
8115 {
8116 free (gotfree_input_line);
8117
8118 if (exp->X_op == O_constant || exp->X_op == O_register)
8119 exp->X_op = O_illegal;
8120 }
8121
8122 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
8123
8124 RESTORE_END_STRING (disp_end);
8125
8126 return ret;
8127 }
8128
8129 static int
8130 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
8131 i386_operand_type types, const char *disp_start)
8132 {
8133 i386_operand_type bigdisp;
8134 int ret = 1;
8135
8136 /* We do this to make sure that the section symbol is in
8137 the symbol table. We will ultimately change the relocation
8138 to be relative to the beginning of the section. */
8139 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
8140 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
8141 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
8142 {
8143 if (exp->X_op != O_symbol)
8144 goto inv_disp;
8145
8146 if (S_IS_LOCAL (exp->X_add_symbol)
8147 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
8148 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
8149 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
8150 exp->X_op = O_subtract;
8151 exp->X_op_symbol = GOT_symbol;
8152 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
8153 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
8154 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
8155 i.reloc[this_operand] = BFD_RELOC_64;
8156 else
8157 i.reloc[this_operand] = BFD_RELOC_32;
8158 }
8159
8160 else if (exp->X_op == O_absent
8161 || exp->X_op == O_illegal
8162 || exp->X_op == O_big)
8163 {
8164 inv_disp:
8165 as_bad (_("missing or invalid displacement expression `%s'"),
8166 disp_start);
8167 ret = 0;
8168 }
8169
8170 else if (flag_code == CODE_64BIT
8171 && !i.prefix[ADDR_PREFIX]
8172 && exp->X_op == O_constant)
8173 {
8174 /* Since displacement is signed extended to 64bit, don't allow
8175 disp32 and turn off disp32s if they are out of range. */
8176 i.types[this_operand].bitfield.disp32 = 0;
8177 if (!fits_in_signed_long (exp->X_add_number))
8178 {
8179 i.types[this_operand].bitfield.disp32s = 0;
8180 if (i.types[this_operand].bitfield.baseindex)
8181 {
8182 as_bad (_("0x%lx out range of signed 32bit displacement"),
8183 (long) exp->X_add_number);
8184 ret = 0;
8185 }
8186 }
8187 }
8188
8189 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8190 else if (exp->X_op != O_constant
8191 && OUTPUT_FLAVOR == bfd_target_aout_flavour
8192 && exp_seg != absolute_section
8193 && exp_seg != text_section
8194 && exp_seg != data_section
8195 && exp_seg != bss_section
8196 && exp_seg != undefined_section
8197 && !bfd_is_com_section (exp_seg))
8198 {
8199 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
8200 ret = 0;
8201 }
8202 #endif
8203
8204 /* Check if this is a displacement only operand. */
8205 bigdisp = i.types[this_operand];
8206 bigdisp.bitfield.disp8 = 0;
8207 bigdisp.bitfield.disp16 = 0;
8208 bigdisp.bitfield.disp32 = 0;
8209 bigdisp.bitfield.disp32s = 0;
8210 bigdisp.bitfield.disp64 = 0;
8211 if (operand_type_all_zero (&bigdisp))
8212 i.types[this_operand] = operand_type_and (i.types[this_operand],
8213 types);
8214
8215 return ret;
8216 }
8217
8218 /* Make sure the memory operand we've been dealt is valid.
8219 Return 1 on success, 0 on a failure. */
8220
8221 static int
8222 i386_index_check (const char *operand_string)
8223 {
8224 const char *kind = "base/index";
8225 enum flag_code addr_mode;
8226
8227 if (i.prefix[ADDR_PREFIX])
8228 addr_mode = flag_code == CODE_32BIT ? CODE_16BIT : CODE_32BIT;
8229 else
8230 {
8231 addr_mode = flag_code;
8232
8233 #if INFER_ADDR_PREFIX
8234 if (i.mem_operands == 0)
8235 {
8236 /* Infer address prefix from the first memory operand. */
8237 const reg_entry *addr_reg = i.base_reg;
8238
8239 if (addr_reg == NULL)
8240 addr_reg = i.index_reg;
8241
8242 if (addr_reg)
8243 {
8244 if (addr_reg->reg_num == RegEip
8245 || addr_reg->reg_num == RegEiz
8246 || addr_reg->reg_type.bitfield.reg32)
8247 addr_mode = CODE_32BIT;
8248 else if (flag_code != CODE_64BIT
8249 && addr_reg->reg_type.bitfield.reg16)
8250 addr_mode = CODE_16BIT;
8251
8252 if (addr_mode != flag_code)
8253 {
8254 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
8255 i.prefixes += 1;
8256 /* Change the size of any displacement too. At most one
8257 of Disp16 or Disp32 is set.
8258 FIXME. There doesn't seem to be any real need for
8259 separate Disp16 and Disp32 flags. The same goes for
8260 Imm16 and Imm32. Removing them would probably clean
8261 up the code quite a lot. */
8262 if (flag_code != CODE_64BIT
8263 && (i.types[this_operand].bitfield.disp16
8264 || i.types[this_operand].bitfield.disp32))
8265 i.types[this_operand]
8266 = operand_type_xor (i.types[this_operand], disp16_32);
8267 }
8268 }
8269 }
8270 #endif
8271 }
8272
8273 if (current_templates->start->opcode_modifier.isstring
8274 && !current_templates->start->opcode_modifier.immext
8275 && (current_templates->end[-1].opcode_modifier.isstring
8276 || i.mem_operands))
8277 {
8278 /* Memory operands of string insns are special in that they only allow
8279 a single register (rDI, rSI, or rBX) as their memory address. */
8280 const reg_entry *expected_reg;
8281 static const char *di_si[][2] =
8282 {
8283 { "esi", "edi" },
8284 { "si", "di" },
8285 { "rsi", "rdi" }
8286 };
8287 static const char *bx[] = { "ebx", "bx", "rbx" };
8288
8289 kind = "string address";
8290
8291 if (current_templates->start->opcode_modifier.w)
8292 {
8293 i386_operand_type type = current_templates->end[-1].operand_types[0];
8294
8295 if (!type.bitfield.baseindex
8296 || ((!i.mem_operands != !intel_syntax)
8297 && current_templates->end[-1].operand_types[1]
8298 .bitfield.baseindex))
8299 type = current_templates->end[-1].operand_types[1];
8300 expected_reg = hash_find (reg_hash,
8301 di_si[addr_mode][type.bitfield.esseg]);
8302
8303 }
8304 else
8305 expected_reg = hash_find (reg_hash, bx[addr_mode]);
8306
8307 if (i.base_reg != expected_reg
8308 || i.index_reg
8309 || operand_type_check (i.types[this_operand], disp))
8310 {
8311 /* The second memory operand must have the same size as
8312 the first one. */
8313 if (i.mem_operands
8314 && i.base_reg
8315 && !((addr_mode == CODE_64BIT
8316 && i.base_reg->reg_type.bitfield.reg64)
8317 || (addr_mode == CODE_32BIT
8318 ? i.base_reg->reg_type.bitfield.reg32
8319 : i.base_reg->reg_type.bitfield.reg16)))
8320 goto bad_address;
8321
8322 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
8323 operand_string,
8324 intel_syntax ? '[' : '(',
8325 register_prefix,
8326 expected_reg->reg_name,
8327 intel_syntax ? ']' : ')');
8328 return 1;
8329 }
8330 else
8331 return 1;
8332
8333 bad_address:
8334 as_bad (_("`%s' is not a valid %s expression"),
8335 operand_string, kind);
8336 return 0;
8337 }
8338 else
8339 {
8340 if (addr_mode != CODE_16BIT)
8341 {
8342 /* 32-bit/64-bit checks. */
8343 if ((i.base_reg
8344 && (addr_mode == CODE_64BIT
8345 ? !i.base_reg->reg_type.bitfield.reg64
8346 : !i.base_reg->reg_type.bitfield.reg32)
8347 && (i.index_reg
8348 || (i.base_reg->reg_num
8349 != (addr_mode == CODE_64BIT ? RegRip : RegEip))))
8350 || (i.index_reg
8351 && !i.index_reg->reg_type.bitfield.regxmm
8352 && !i.index_reg->reg_type.bitfield.regymm
8353 && !i.index_reg->reg_type.bitfield.regzmm
8354 && ((addr_mode == CODE_64BIT
8355 ? !(i.index_reg->reg_type.bitfield.reg64
8356 || i.index_reg->reg_num == RegRiz)
8357 : !(i.index_reg->reg_type.bitfield.reg32
8358 || i.index_reg->reg_num == RegEiz))
8359 || !i.index_reg->reg_type.bitfield.baseindex)))
8360 goto bad_address;
8361 }
8362 else
8363 {
8364 /* 16-bit checks. */
8365 if ((i.base_reg
8366 && (!i.base_reg->reg_type.bitfield.reg16
8367 || !i.base_reg->reg_type.bitfield.baseindex))
8368 || (i.index_reg
8369 && (!i.index_reg->reg_type.bitfield.reg16
8370 || !i.index_reg->reg_type.bitfield.baseindex
8371 || !(i.base_reg
8372 && i.base_reg->reg_num < 6
8373 && i.index_reg->reg_num >= 6
8374 && i.log2_scale_factor == 0))))
8375 goto bad_address;
8376 }
8377 }
8378 return 1;
8379 }
8380
8381 /* Handle vector immediates. */
8382
8383 static int
8384 RC_SAE_immediate (const char *imm_start)
8385 {
8386 unsigned int match_found, j;
8387 const char *pstr = imm_start;
8388 expressionS *exp;
8389
8390 if (*pstr != '{')
8391 return 0;
8392
8393 pstr++;
8394 match_found = 0;
8395 for (j = 0; j < ARRAY_SIZE (RC_NamesTable); j++)
8396 {
8397 if (!strncmp (pstr, RC_NamesTable[j].name, RC_NamesTable[j].len))
8398 {
8399 if (!i.rounding)
8400 {
8401 rc_op.type = RC_NamesTable[j].type;
8402 rc_op.operand = this_operand;
8403 i.rounding = &rc_op;
8404 }
8405 else
8406 {
8407 as_bad (_("duplicated `%s'"), imm_start);
8408 return 0;
8409 }
8410 pstr += RC_NamesTable[j].len;
8411 match_found = 1;
8412 break;
8413 }
8414 }
8415 if (!match_found)
8416 return 0;
8417
8418 if (*pstr++ != '}')
8419 {
8420 as_bad (_("Missing '}': '%s'"), imm_start);
8421 return 0;
8422 }
8423 /* RC/SAE immediate string should contain nothing more. */;
8424 if (*pstr != 0)
8425 {
8426 as_bad (_("Junk after '}': '%s'"), imm_start);
8427 return 0;
8428 }
8429
8430 exp = &im_expressions[i.imm_operands++];
8431 i.op[this_operand].imms = exp;
8432
8433 exp->X_op = O_constant;
8434 exp->X_add_number = 0;
8435 exp->X_add_symbol = (symbolS *) 0;
8436 exp->X_op_symbol = (symbolS *) 0;
8437
8438 i.types[this_operand].bitfield.imm8 = 1;
8439 return 1;
8440 }
8441
8442 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
8443 on error. */
8444
8445 static int
8446 i386_att_operand (char *operand_string)
8447 {
8448 const reg_entry *r;
8449 char *end_op;
8450 char *op_string = operand_string;
8451
8452 if (is_space_char (*op_string))
8453 ++op_string;
8454
8455 /* We check for an absolute prefix (differentiating,
8456 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
8457 if (*op_string == ABSOLUTE_PREFIX)
8458 {
8459 ++op_string;
8460 if (is_space_char (*op_string))
8461 ++op_string;
8462 i.types[this_operand].bitfield.jumpabsolute = 1;
8463 }
8464
8465 /* Check if operand is a register. */
8466 if ((r = parse_register (op_string, &end_op)) != NULL)
8467 {
8468 i386_operand_type temp;
8469
8470 /* Check for a segment override by searching for ':' after a
8471 segment register. */
8472 op_string = end_op;
8473 if (is_space_char (*op_string))
8474 ++op_string;
8475 if (*op_string == ':'
8476 && (r->reg_type.bitfield.sreg2
8477 || r->reg_type.bitfield.sreg3))
8478 {
8479 switch (r->reg_num)
8480 {
8481 case 0:
8482 i.seg[i.mem_operands] = &es;
8483 break;
8484 case 1:
8485 i.seg[i.mem_operands] = &cs;
8486 break;
8487 case 2:
8488 i.seg[i.mem_operands] = &ss;
8489 break;
8490 case 3:
8491 i.seg[i.mem_operands] = &ds;
8492 break;
8493 case 4:
8494 i.seg[i.mem_operands] = &fs;
8495 break;
8496 case 5:
8497 i.seg[i.mem_operands] = &gs;
8498 break;
8499 }
8500
8501 /* Skip the ':' and whitespace. */
8502 ++op_string;
8503 if (is_space_char (*op_string))
8504 ++op_string;
8505
8506 if (!is_digit_char (*op_string)
8507 && !is_identifier_char (*op_string)
8508 && *op_string != '('
8509 && *op_string != ABSOLUTE_PREFIX)
8510 {
8511 as_bad (_("bad memory operand `%s'"), op_string);
8512 return 0;
8513 }
8514 /* Handle case of %es:*foo. */
8515 if (*op_string == ABSOLUTE_PREFIX)
8516 {
8517 ++op_string;
8518 if (is_space_char (*op_string))
8519 ++op_string;
8520 i.types[this_operand].bitfield.jumpabsolute = 1;
8521 }
8522 goto do_memory_reference;
8523 }
8524
8525 /* Handle vector operations. */
8526 if (*op_string == '{')
8527 {
8528 op_string = check_VecOperations (op_string, NULL);
8529 if (op_string == NULL)
8530 return 0;
8531 }
8532
8533 if (*op_string)
8534 {
8535 as_bad (_("junk `%s' after register"), op_string);
8536 return 0;
8537 }
8538 temp = r->reg_type;
8539 temp.bitfield.baseindex = 0;
8540 i.types[this_operand] = operand_type_or (i.types[this_operand],
8541 temp);
8542 i.types[this_operand].bitfield.unspecified = 0;
8543 i.op[this_operand].regs = r;
8544 i.reg_operands++;
8545 }
8546 else if (*op_string == REGISTER_PREFIX)
8547 {
8548 as_bad (_("bad register name `%s'"), op_string);
8549 return 0;
8550 }
8551 else if (*op_string == IMMEDIATE_PREFIX)
8552 {
8553 ++op_string;
8554 if (i.types[this_operand].bitfield.jumpabsolute)
8555 {
8556 as_bad (_("immediate operand illegal with absolute jump"));
8557 return 0;
8558 }
8559 if (!i386_immediate (op_string))
8560 return 0;
8561 }
8562 else if (RC_SAE_immediate (operand_string))
8563 {
8564 /* If it is a RC or SAE immediate, do nothing. */
8565 ;
8566 }
8567 else if (is_digit_char (*op_string)
8568 || is_identifier_char (*op_string)
8569 || *op_string == '(')
8570 {
8571 /* This is a memory reference of some sort. */
8572 char *base_string;
8573
8574 /* Start and end of displacement string expression (if found). */
8575 char *displacement_string_start;
8576 char *displacement_string_end;
8577 char *vop_start;
8578
8579 do_memory_reference:
8580 if ((i.mem_operands == 1
8581 && !current_templates->start->opcode_modifier.isstring)
8582 || i.mem_operands == 2)
8583 {
8584 as_bad (_("too many memory references for `%s'"),
8585 current_templates->start->name);
8586 return 0;
8587 }
8588
8589 /* Check for base index form. We detect the base index form by
8590 looking for an ')' at the end of the operand, searching
8591 for the '(' matching it, and finding a REGISTER_PREFIX or ','
8592 after the '('. */
8593 base_string = op_string + strlen (op_string);
8594
8595 /* Handle vector operations. */
8596 vop_start = strchr (op_string, '{');
8597 if (vop_start && vop_start < base_string)
8598 {
8599 if (check_VecOperations (vop_start, base_string) == NULL)
8600 return 0;
8601 base_string = vop_start;
8602 }
8603
8604 --base_string;
8605 if (is_space_char (*base_string))
8606 --base_string;
8607
8608 /* If we only have a displacement, set-up for it to be parsed later. */
8609 displacement_string_start = op_string;
8610 displacement_string_end = base_string + 1;
8611
8612 if (*base_string == ')')
8613 {
8614 char *temp_string;
8615 unsigned int parens_balanced = 1;
8616 /* We've already checked that the number of left & right ()'s are
8617 equal, so this loop will not be infinite. */
8618 do
8619 {
8620 base_string--;
8621 if (*base_string == ')')
8622 parens_balanced++;
8623 if (*base_string == '(')
8624 parens_balanced--;
8625 }
8626 while (parens_balanced);
8627
8628 temp_string = base_string;
8629
8630 /* Skip past '(' and whitespace. */
8631 ++base_string;
8632 if (is_space_char (*base_string))
8633 ++base_string;
8634
8635 if (*base_string == ','
8636 || ((i.base_reg = parse_register (base_string, &end_op))
8637 != NULL))
8638 {
8639 displacement_string_end = temp_string;
8640
8641 i.types[this_operand].bitfield.baseindex = 1;
8642
8643 if (i.base_reg)
8644 {
8645 base_string = end_op;
8646 if (is_space_char (*base_string))
8647 ++base_string;
8648 }
8649
8650 /* There may be an index reg or scale factor here. */
8651 if (*base_string == ',')
8652 {
8653 ++base_string;
8654 if (is_space_char (*base_string))
8655 ++base_string;
8656
8657 if ((i.index_reg = parse_register (base_string, &end_op))
8658 != NULL)
8659 {
8660 base_string = end_op;
8661 if (is_space_char (*base_string))
8662 ++base_string;
8663 if (*base_string == ',')
8664 {
8665 ++base_string;
8666 if (is_space_char (*base_string))
8667 ++base_string;
8668 }
8669 else if (*base_string != ')')
8670 {
8671 as_bad (_("expecting `,' or `)' "
8672 "after index register in `%s'"),
8673 operand_string);
8674 return 0;
8675 }
8676 }
8677 else if (*base_string == REGISTER_PREFIX)
8678 {
8679 end_op = strchr (base_string, ',');
8680 if (end_op)
8681 *end_op = '\0';
8682 as_bad (_("bad register name `%s'"), base_string);
8683 return 0;
8684 }
8685
8686 /* Check for scale factor. */
8687 if (*base_string != ')')
8688 {
8689 char *end_scale = i386_scale (base_string);
8690
8691 if (!end_scale)
8692 return 0;
8693
8694 base_string = end_scale;
8695 if (is_space_char (*base_string))
8696 ++base_string;
8697 if (*base_string != ')')
8698 {
8699 as_bad (_("expecting `)' "
8700 "after scale factor in `%s'"),
8701 operand_string);
8702 return 0;
8703 }
8704 }
8705 else if (!i.index_reg)
8706 {
8707 as_bad (_("expecting index register or scale factor "
8708 "after `,'; got '%c'"),
8709 *base_string);
8710 return 0;
8711 }
8712 }
8713 else if (*base_string != ')')
8714 {
8715 as_bad (_("expecting `,' or `)' "
8716 "after base register in `%s'"),
8717 operand_string);
8718 return 0;
8719 }
8720 }
8721 else if (*base_string == REGISTER_PREFIX)
8722 {
8723 end_op = strchr (base_string, ',');
8724 if (end_op)
8725 *end_op = '\0';
8726 as_bad (_("bad register name `%s'"), base_string);
8727 return 0;
8728 }
8729 }
8730
8731 /* If there's an expression beginning the operand, parse it,
8732 assuming displacement_string_start and
8733 displacement_string_end are meaningful. */
8734 if (displacement_string_start != displacement_string_end)
8735 {
8736 if (!i386_displacement (displacement_string_start,
8737 displacement_string_end))
8738 return 0;
8739 }
8740
8741 /* Special case for (%dx) while doing input/output op. */
8742 if (i.base_reg
8743 && operand_type_equal (&i.base_reg->reg_type,
8744 &reg16_inoutportreg)
8745 && i.index_reg == 0
8746 && i.log2_scale_factor == 0
8747 && i.seg[i.mem_operands] == 0
8748 && !operand_type_check (i.types[this_operand], disp))
8749 {
8750 i.types[this_operand] = inoutportreg;
8751 return 1;
8752 }
8753
8754 if (i386_index_check (operand_string) == 0)
8755 return 0;
8756 i.types[this_operand].bitfield.mem = 1;
8757 i.mem_operands++;
8758 }
8759 else
8760 {
8761 /* It's not a memory operand; argh! */
8762 as_bad (_("invalid char %s beginning operand %d `%s'"),
8763 output_invalid (*op_string),
8764 this_operand + 1,
8765 op_string);
8766 return 0;
8767 }
8768 return 1; /* Normal return. */
8769 }
8770 \f
8771 /* Calculate the maximum variable size (i.e., excluding fr_fix)
8772 that an rs_machine_dependent frag may reach. */
8773
8774 unsigned int
8775 i386_frag_max_var (fragS *frag)
8776 {
8777 /* The only relaxable frags are for jumps.
8778 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
8779 gas_assert (frag->fr_type == rs_machine_dependent);
8780 return TYPE_FROM_RELAX_STATE (frag->fr_subtype) == UNCOND_JUMP ? 4 : 5;
8781 }
8782
8783 /* md_estimate_size_before_relax()
8784
8785 Called just before relax() for rs_machine_dependent frags. The x86
8786 assembler uses these frags to handle variable size jump
8787 instructions.
8788
8789 Any symbol that is now undefined will not become defined.
8790 Return the correct fr_subtype in the frag.
8791 Return the initial "guess for variable size of frag" to caller.
8792 The guess is actually the growth beyond the fixed part. Whatever
8793 we do to grow the fixed or variable part contributes to our
8794 returned value. */
8795
8796 int
8797 md_estimate_size_before_relax (fragS *fragP, segT segment)
8798 {
8799 /* We've already got fragP->fr_subtype right; all we have to do is
8800 check for un-relaxable symbols. On an ELF system, we can't relax
8801 an externally visible symbol, because it may be overridden by a
8802 shared library. */
8803 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
8804 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8805 || (IS_ELF
8806 && (S_IS_EXTERNAL (fragP->fr_symbol)
8807 || S_IS_WEAK (fragP->fr_symbol)
8808 || ((symbol_get_bfdsym (fragP->fr_symbol)->flags
8809 & BSF_GNU_INDIRECT_FUNCTION))))
8810 #endif
8811 #if defined (OBJ_COFF) && defined (TE_PE)
8812 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
8813 && S_IS_WEAK (fragP->fr_symbol))
8814 #endif
8815 )
8816 {
8817 /* Symbol is undefined in this segment, or we need to keep a
8818 reloc so that weak symbols can be overridden. */
8819 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
8820 enum bfd_reloc_code_real reloc_type;
8821 unsigned char *opcode;
8822 int old_fr_fix;
8823
8824 if (fragP->fr_var != NO_RELOC)
8825 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
8826 else if (size == 2)
8827 reloc_type = BFD_RELOC_16_PCREL;
8828 else
8829 reloc_type = BFD_RELOC_32_PCREL;
8830
8831 old_fr_fix = fragP->fr_fix;
8832 opcode = (unsigned char *) fragP->fr_opcode;
8833
8834 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
8835 {
8836 case UNCOND_JUMP:
8837 /* Make jmp (0xeb) a (d)word displacement jump. */
8838 opcode[0] = 0xe9;
8839 fragP->fr_fix += size;
8840 fix_new (fragP, old_fr_fix, size,
8841 fragP->fr_symbol,
8842 fragP->fr_offset, 1,
8843 reloc_type);
8844 break;
8845
8846 case COND_JUMP86:
8847 if (size == 2
8848 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
8849 {
8850 /* Negate the condition, and branch past an
8851 unconditional jump. */
8852 opcode[0] ^= 1;
8853 opcode[1] = 3;
8854 /* Insert an unconditional jump. */
8855 opcode[2] = 0xe9;
8856 /* We added two extra opcode bytes, and have a two byte
8857 offset. */
8858 fragP->fr_fix += 2 + 2;
8859 fix_new (fragP, old_fr_fix + 2, 2,
8860 fragP->fr_symbol,
8861 fragP->fr_offset, 1,
8862 reloc_type);
8863 break;
8864 }
8865 /* Fall through. */
8866
8867 case COND_JUMP:
8868 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
8869 {
8870 fixS *fixP;
8871
8872 fragP->fr_fix += 1;
8873 fixP = fix_new (fragP, old_fr_fix, 1,
8874 fragP->fr_symbol,
8875 fragP->fr_offset, 1,
8876 BFD_RELOC_8_PCREL);
8877 fixP->fx_signed = 1;
8878 break;
8879 }
8880
8881 /* This changes the byte-displacement jump 0x7N
8882 to the (d)word-displacement jump 0x0f,0x8N. */
8883 opcode[1] = opcode[0] + 0x10;
8884 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
8885 /* We've added an opcode byte. */
8886 fragP->fr_fix += 1 + size;
8887 fix_new (fragP, old_fr_fix + 1, size,
8888 fragP->fr_symbol,
8889 fragP->fr_offset, 1,
8890 reloc_type);
8891 break;
8892
8893 default:
8894 BAD_CASE (fragP->fr_subtype);
8895 break;
8896 }
8897 frag_wane (fragP);
8898 return fragP->fr_fix - old_fr_fix;
8899 }
8900
8901 /* Guess size depending on current relax state. Initially the relax
8902 state will correspond to a short jump and we return 1, because
8903 the variable part of the frag (the branch offset) is one byte
8904 long. However, we can relax a section more than once and in that
8905 case we must either set fr_subtype back to the unrelaxed state,
8906 or return the value for the appropriate branch. */
8907 return md_relax_table[fragP->fr_subtype].rlx_length;
8908 }
8909
8910 /* Called after relax() is finished.
8911
8912 In: Address of frag.
8913 fr_type == rs_machine_dependent.
8914 fr_subtype is what the address relaxed to.
8915
8916 Out: Any fixSs and constants are set up.
8917 Caller will turn frag into a ".space 0". */
8918
8919 void
8920 md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT sec ATTRIBUTE_UNUSED,
8921 fragS *fragP)
8922 {
8923 unsigned char *opcode;
8924 unsigned char *where_to_put_displacement = NULL;
8925 offsetT target_address;
8926 offsetT opcode_address;
8927 unsigned int extension = 0;
8928 offsetT displacement_from_opcode_start;
8929
8930 opcode = (unsigned char *) fragP->fr_opcode;
8931
8932 /* Address we want to reach in file space. */
8933 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
8934
8935 /* Address opcode resides at in file space. */
8936 opcode_address = fragP->fr_address + fragP->fr_fix;
8937
8938 /* Displacement from opcode start to fill into instruction. */
8939 displacement_from_opcode_start = target_address - opcode_address;
8940
8941 if ((fragP->fr_subtype & BIG) == 0)
8942 {
8943 /* Don't have to change opcode. */
8944 extension = 1; /* 1 opcode + 1 displacement */
8945 where_to_put_displacement = &opcode[1];
8946 }
8947 else
8948 {
8949 if (no_cond_jump_promotion
8950 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
8951 as_warn_where (fragP->fr_file, fragP->fr_line,
8952 _("long jump required"));
8953
8954 switch (fragP->fr_subtype)
8955 {
8956 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
8957 extension = 4; /* 1 opcode + 4 displacement */
8958 opcode[0] = 0xe9;
8959 where_to_put_displacement = &opcode[1];
8960 break;
8961
8962 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
8963 extension = 2; /* 1 opcode + 2 displacement */
8964 opcode[0] = 0xe9;
8965 where_to_put_displacement = &opcode[1];
8966 break;
8967
8968 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
8969 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
8970 extension = 5; /* 2 opcode + 4 displacement */
8971 opcode[1] = opcode[0] + 0x10;
8972 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
8973 where_to_put_displacement = &opcode[2];
8974 break;
8975
8976 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
8977 extension = 3; /* 2 opcode + 2 displacement */
8978 opcode[1] = opcode[0] + 0x10;
8979 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
8980 where_to_put_displacement = &opcode[2];
8981 break;
8982
8983 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
8984 extension = 4;
8985 opcode[0] ^= 1;
8986 opcode[1] = 3;
8987 opcode[2] = 0xe9;
8988 where_to_put_displacement = &opcode[3];
8989 break;
8990
8991 default:
8992 BAD_CASE (fragP->fr_subtype);
8993 break;
8994 }
8995 }
8996
8997 /* If size if less then four we are sure that the operand fits,
8998 but if it's 4, then it could be that the displacement is larger
8999 then -/+ 2GB. */
9000 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
9001 && object_64bit
9002 && ((addressT) (displacement_from_opcode_start - extension
9003 + ((addressT) 1 << 31))
9004 > (((addressT) 2 << 31) - 1)))
9005 {
9006 as_bad_where (fragP->fr_file, fragP->fr_line,
9007 _("jump target out of range"));
9008 /* Make us emit 0. */
9009 displacement_from_opcode_start = extension;
9010 }
9011 /* Now put displacement after opcode. */
9012 md_number_to_chars ((char *) where_to_put_displacement,
9013 (valueT) (displacement_from_opcode_start - extension),
9014 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
9015 fragP->fr_fix += extension;
9016 }
9017 \f
9018 /* Apply a fixup (fixP) to segment data, once it has been determined
9019 by our caller that we have all the info we need to fix it up.
9020
9021 Parameter valP is the pointer to the value of the bits.
9022
9023 On the 386, immediates, displacements, and data pointers are all in
9024 the same (little-endian) format, so we don't need to care about which
9025 we are handling. */
9026
9027 void
9028 md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
9029 {
9030 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
9031 valueT value = *valP;
9032
9033 #if !defined (TE_Mach)
9034 if (fixP->fx_pcrel)
9035 {
9036 switch (fixP->fx_r_type)
9037 {
9038 default:
9039 break;
9040
9041 case BFD_RELOC_64:
9042 fixP->fx_r_type = BFD_RELOC_64_PCREL;
9043 break;
9044 case BFD_RELOC_32:
9045 case BFD_RELOC_X86_64_32S:
9046 fixP->fx_r_type = BFD_RELOC_32_PCREL;
9047 break;
9048 case BFD_RELOC_16:
9049 fixP->fx_r_type = BFD_RELOC_16_PCREL;
9050 break;
9051 case BFD_RELOC_8:
9052 fixP->fx_r_type = BFD_RELOC_8_PCREL;
9053 break;
9054 }
9055 }
9056
9057 if (fixP->fx_addsy != NULL
9058 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
9059 || fixP->fx_r_type == BFD_RELOC_64_PCREL
9060 || fixP->fx_r_type == BFD_RELOC_16_PCREL
9061 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
9062 && !use_rela_relocations)
9063 {
9064 /* This is a hack. There should be a better way to handle this.
9065 This covers for the fact that bfd_install_relocation will
9066 subtract the current location (for partial_inplace, PC relative
9067 relocations); see more below. */
9068 #ifndef OBJ_AOUT
9069 if (IS_ELF
9070 #ifdef TE_PE
9071 || OUTPUT_FLAVOR == bfd_target_coff_flavour
9072 #endif
9073 )
9074 value += fixP->fx_where + fixP->fx_frag->fr_address;
9075 #endif
9076 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9077 if (IS_ELF)
9078 {
9079 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
9080
9081 if ((sym_seg == seg
9082 || (symbol_section_p (fixP->fx_addsy)
9083 && sym_seg != absolute_section))
9084 && !generic_force_reloc (fixP))
9085 {
9086 /* Yes, we add the values in twice. This is because
9087 bfd_install_relocation subtracts them out again. I think
9088 bfd_install_relocation is broken, but I don't dare change
9089 it. FIXME. */
9090 value += fixP->fx_where + fixP->fx_frag->fr_address;
9091 }
9092 }
9093 #endif
9094 #if defined (OBJ_COFF) && defined (TE_PE)
9095 /* For some reason, the PE format does not store a
9096 section address offset for a PC relative symbol. */
9097 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
9098 || S_IS_WEAK (fixP->fx_addsy))
9099 value += md_pcrel_from (fixP);
9100 #endif
9101 }
9102 #if defined (OBJ_COFF) && defined (TE_PE)
9103 if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
9104 {
9105 value -= S_GET_VALUE (fixP->fx_addsy);
9106 }
9107 #endif
9108
9109 /* Fix a few things - the dynamic linker expects certain values here,
9110 and we must not disappoint it. */
9111 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9112 if (IS_ELF && fixP->fx_addsy)
9113 switch (fixP->fx_r_type)
9114 {
9115 case BFD_RELOC_386_PLT32:
9116 case BFD_RELOC_X86_64_PLT32:
9117 /* Make the jump instruction point to the address of the operand. At
9118 runtime we merely add the offset to the actual PLT entry. */
9119 value = -4;
9120 break;
9121
9122 case BFD_RELOC_386_TLS_GD:
9123 case BFD_RELOC_386_TLS_LDM:
9124 case BFD_RELOC_386_TLS_IE_32:
9125 case BFD_RELOC_386_TLS_IE:
9126 case BFD_RELOC_386_TLS_GOTIE:
9127 case BFD_RELOC_386_TLS_GOTDESC:
9128 case BFD_RELOC_X86_64_TLSGD:
9129 case BFD_RELOC_X86_64_TLSLD:
9130 case BFD_RELOC_X86_64_GOTTPOFF:
9131 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9132 value = 0; /* Fully resolved at runtime. No addend. */
9133 /* Fallthrough */
9134 case BFD_RELOC_386_TLS_LE:
9135 case BFD_RELOC_386_TLS_LDO_32:
9136 case BFD_RELOC_386_TLS_LE_32:
9137 case BFD_RELOC_X86_64_DTPOFF32:
9138 case BFD_RELOC_X86_64_DTPOFF64:
9139 case BFD_RELOC_X86_64_TPOFF32:
9140 case BFD_RELOC_X86_64_TPOFF64:
9141 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9142 break;
9143
9144 case BFD_RELOC_386_TLS_DESC_CALL:
9145 case BFD_RELOC_X86_64_TLSDESC_CALL:
9146 value = 0; /* Fully resolved at runtime. No addend. */
9147 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9148 fixP->fx_done = 0;
9149 return;
9150
9151 case BFD_RELOC_386_GOT32:
9152 case BFD_RELOC_X86_64_GOT32:
9153 value = 0; /* Fully resolved at runtime. No addend. */
9154 break;
9155
9156 case BFD_RELOC_VTABLE_INHERIT:
9157 case BFD_RELOC_VTABLE_ENTRY:
9158 fixP->fx_done = 0;
9159 return;
9160
9161 default:
9162 break;
9163 }
9164 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
9165 *valP = value;
9166 #endif /* !defined (TE_Mach) */
9167
9168 /* Are we finished with this relocation now? */
9169 if (fixP->fx_addsy == NULL)
9170 fixP->fx_done = 1;
9171 #if defined (OBJ_COFF) && defined (TE_PE)
9172 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
9173 {
9174 fixP->fx_done = 0;
9175 /* Remember value for tc_gen_reloc. */
9176 fixP->fx_addnumber = value;
9177 /* Clear out the frag for now. */
9178 value = 0;
9179 }
9180 #endif
9181 else if (use_rela_relocations)
9182 {
9183 fixP->fx_no_overflow = 1;
9184 /* Remember value for tc_gen_reloc. */
9185 fixP->fx_addnumber = value;
9186 value = 0;
9187 }
9188
9189 md_number_to_chars (p, value, fixP->fx_size);
9190 }
9191 \f
9192 char *
9193 md_atof (int type, char *litP, int *sizeP)
9194 {
9195 /* This outputs the LITTLENUMs in REVERSE order;
9196 in accord with the bigendian 386. */
9197 return ieee_md_atof (type, litP, sizeP, FALSE);
9198 }
9199 \f
9200 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
9201
9202 static char *
9203 output_invalid (int c)
9204 {
9205 if (ISPRINT (c))
9206 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
9207 "'%c'", c);
9208 else
9209 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
9210 "(0x%x)", (unsigned char) c);
9211 return output_invalid_buf;
9212 }
9213
9214 /* REG_STRING starts *before* REGISTER_PREFIX. */
9215
9216 static const reg_entry *
9217 parse_real_register (char *reg_string, char **end_op)
9218 {
9219 char *s = reg_string;
9220 char *p;
9221 char reg_name_given[MAX_REG_NAME_SIZE + 1];
9222 const reg_entry *r;
9223
9224 /* Skip possible REGISTER_PREFIX and possible whitespace. */
9225 if (*s == REGISTER_PREFIX)
9226 ++s;
9227
9228 if (is_space_char (*s))
9229 ++s;
9230
9231 p = reg_name_given;
9232 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
9233 {
9234 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
9235 return (const reg_entry *) NULL;
9236 s++;
9237 }
9238
9239 /* For naked regs, make sure that we are not dealing with an identifier.
9240 This prevents confusing an identifier like `eax_var' with register
9241 `eax'. */
9242 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
9243 return (const reg_entry *) NULL;
9244
9245 *end_op = s;
9246
9247 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
9248
9249 /* Handle floating point regs, allowing spaces in the (i) part. */
9250 if (r == i386_regtab /* %st is first entry of table */)
9251 {
9252 if (is_space_char (*s))
9253 ++s;
9254 if (*s == '(')
9255 {
9256 ++s;
9257 if (is_space_char (*s))
9258 ++s;
9259 if (*s >= '0' && *s <= '7')
9260 {
9261 int fpr = *s - '0';
9262 ++s;
9263 if (is_space_char (*s))
9264 ++s;
9265 if (*s == ')')
9266 {
9267 *end_op = s + 1;
9268 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
9269 know (r);
9270 return r + fpr;
9271 }
9272 }
9273 /* We have "%st(" then garbage. */
9274 return (const reg_entry *) NULL;
9275 }
9276 }
9277
9278 if (r == NULL || allow_pseudo_reg)
9279 return r;
9280
9281 if (operand_type_all_zero (&r->reg_type))
9282 return (const reg_entry *) NULL;
9283
9284 if ((r->reg_type.bitfield.reg32
9285 || r->reg_type.bitfield.sreg3
9286 || r->reg_type.bitfield.control
9287 || r->reg_type.bitfield.debug
9288 || r->reg_type.bitfield.test)
9289 && !cpu_arch_flags.bitfield.cpui386)
9290 return (const reg_entry *) NULL;
9291
9292 if (r->reg_type.bitfield.floatreg
9293 && !cpu_arch_flags.bitfield.cpu8087
9294 && !cpu_arch_flags.bitfield.cpu287
9295 && !cpu_arch_flags.bitfield.cpu387)
9296 return (const reg_entry *) NULL;
9297
9298 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
9299 return (const reg_entry *) NULL;
9300
9301 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse)
9302 return (const reg_entry *) NULL;
9303
9304 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
9305 return (const reg_entry *) NULL;
9306
9307 if ((r->reg_type.bitfield.regzmm || r->reg_type.bitfield.regmask)
9308 && !cpu_arch_flags.bitfield.cpuavx512f)
9309 return (const reg_entry *) NULL;
9310
9311 /* Don't allow fake index register unless allow_index_reg isn't 0. */
9312 if (!allow_index_reg
9313 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
9314 return (const reg_entry *) NULL;
9315
9316 /* Upper 16 vector register is only available with VREX in 64bit
9317 mode. */
9318 if ((r->reg_flags & RegVRex))
9319 {
9320 if (!cpu_arch_flags.bitfield.cpuvrex
9321 || flag_code != CODE_64BIT)
9322 return (const reg_entry *) NULL;
9323
9324 i.need_vrex = 1;
9325 }
9326
9327 if (((r->reg_flags & (RegRex64 | RegRex))
9328 || r->reg_type.bitfield.reg64)
9329 && (!cpu_arch_flags.bitfield.cpulm
9330 || !operand_type_equal (&r->reg_type, &control))
9331 && flag_code != CODE_64BIT)
9332 return (const reg_entry *) NULL;
9333
9334 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
9335 return (const reg_entry *) NULL;
9336
9337 return r;
9338 }
9339
9340 /* REG_STRING starts *before* REGISTER_PREFIX. */
9341
9342 static const reg_entry *
9343 parse_register (char *reg_string, char **end_op)
9344 {
9345 const reg_entry *r;
9346
9347 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
9348 r = parse_real_register (reg_string, end_op);
9349 else
9350 r = NULL;
9351 if (!r)
9352 {
9353 char *save = input_line_pointer;
9354 char c;
9355 symbolS *symbolP;
9356
9357 input_line_pointer = reg_string;
9358 c = get_symbol_end ();
9359 symbolP = symbol_find (reg_string);
9360 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
9361 {
9362 const expressionS *e = symbol_get_value_expression (symbolP);
9363
9364 know (e->X_op == O_register);
9365 know (e->X_add_number >= 0
9366 && (valueT) e->X_add_number < i386_regtab_size);
9367 r = i386_regtab + e->X_add_number;
9368 *end_op = input_line_pointer;
9369 }
9370 *input_line_pointer = c;
9371 input_line_pointer = save;
9372 }
9373 return r;
9374 }
9375
9376 int
9377 i386_parse_name (char *name, expressionS *e, char *nextcharP)
9378 {
9379 const reg_entry *r;
9380 char *end = input_line_pointer;
9381
9382 *end = *nextcharP;
9383 r = parse_register (name, &input_line_pointer);
9384 if (r && end <= input_line_pointer)
9385 {
9386 *nextcharP = *input_line_pointer;
9387 *input_line_pointer = 0;
9388 e->X_op = O_register;
9389 e->X_add_number = r - i386_regtab;
9390 return 1;
9391 }
9392 input_line_pointer = end;
9393 *end = 0;
9394 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
9395 }
9396
9397 void
9398 md_operand (expressionS *e)
9399 {
9400 char *end;
9401 const reg_entry *r;
9402
9403 switch (*input_line_pointer)
9404 {
9405 case REGISTER_PREFIX:
9406 r = parse_real_register (input_line_pointer, &end);
9407 if (r)
9408 {
9409 e->X_op = O_register;
9410 e->X_add_number = r - i386_regtab;
9411 input_line_pointer = end;
9412 }
9413 break;
9414
9415 case '[':
9416 gas_assert (intel_syntax);
9417 end = input_line_pointer++;
9418 expression (e);
9419 if (*input_line_pointer == ']')
9420 {
9421 ++input_line_pointer;
9422 e->X_op_symbol = make_expr_symbol (e);
9423 e->X_add_symbol = NULL;
9424 e->X_add_number = 0;
9425 e->X_op = O_index;
9426 }
9427 else
9428 {
9429 e->X_op = O_absent;
9430 input_line_pointer = end;
9431 }
9432 break;
9433 }
9434 }
9435
9436 \f
9437 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9438 const char *md_shortopts = "kVQ:sqn";
9439 #else
9440 const char *md_shortopts = "qn";
9441 #endif
9442
9443 #define OPTION_32 (OPTION_MD_BASE + 0)
9444 #define OPTION_64 (OPTION_MD_BASE + 1)
9445 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
9446 #define OPTION_MARCH (OPTION_MD_BASE + 3)
9447 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
9448 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
9449 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
9450 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
9451 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
9452 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
9453 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
9454 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
9455 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
9456 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
9457 #define OPTION_X32 (OPTION_MD_BASE + 14)
9458 #define OPTION_MADD_BND_PREFIX (OPTION_MD_BASE + 15)
9459 #define OPTION_MEVEXLIG (OPTION_MD_BASE + 16)
9460 #define OPTION_MEVEXWIG (OPTION_MD_BASE + 17)
9461
9462 struct option md_longopts[] =
9463 {
9464 {"32", no_argument, NULL, OPTION_32},
9465 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9466 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
9467 {"64", no_argument, NULL, OPTION_64},
9468 #endif
9469 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9470 {"x32", no_argument, NULL, OPTION_X32},
9471 #endif
9472 {"divide", no_argument, NULL, OPTION_DIVIDE},
9473 {"march", required_argument, NULL, OPTION_MARCH},
9474 {"mtune", required_argument, NULL, OPTION_MTUNE},
9475 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
9476 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
9477 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
9478 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
9479 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
9480 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
9481 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
9482 {"moperand-check", required_argument, NULL, OPTION_MOPERAND_CHECK},
9483 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
9484 {"madd-bnd-prefix", no_argument, NULL, OPTION_MADD_BND_PREFIX},
9485 {"mevexlig", required_argument, NULL, OPTION_MEVEXLIG},
9486 {"mevexwig", required_argument, NULL, OPTION_MEVEXWIG},
9487 {NULL, no_argument, NULL, 0}
9488 };
9489 size_t md_longopts_size = sizeof (md_longopts);
9490
9491 int
9492 md_parse_option (int c, char *arg)
9493 {
9494 unsigned int j;
9495 char *arch, *next;
9496
9497 switch (c)
9498 {
9499 case 'n':
9500 optimize_align_code = 0;
9501 break;
9502
9503 case 'q':
9504 quiet_warnings = 1;
9505 break;
9506
9507 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9508 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
9509 should be emitted or not. FIXME: Not implemented. */
9510 case 'Q':
9511 break;
9512
9513 /* -V: SVR4 argument to print version ID. */
9514 case 'V':
9515 print_version_id ();
9516 break;
9517
9518 /* -k: Ignore for FreeBSD compatibility. */
9519 case 'k':
9520 break;
9521
9522 case 's':
9523 /* -s: On i386 Solaris, this tells the native assembler to use
9524 .stab instead of .stab.excl. We always use .stab anyhow. */
9525 break;
9526 #endif
9527 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9528 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
9529 case OPTION_64:
9530 {
9531 const char **list, **l;
9532
9533 list = bfd_target_list ();
9534 for (l = list; *l != NULL; l++)
9535 if (CONST_STRNEQ (*l, "elf64-x86-64")
9536 || strcmp (*l, "coff-x86-64") == 0
9537 || strcmp (*l, "pe-x86-64") == 0
9538 || strcmp (*l, "pei-x86-64") == 0
9539 || strcmp (*l, "mach-o-x86-64") == 0)
9540 {
9541 default_arch = "x86_64";
9542 break;
9543 }
9544 if (*l == NULL)
9545 as_fatal (_("no compiled in support for x86_64"));
9546 free (list);
9547 }
9548 break;
9549 #endif
9550
9551 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9552 case OPTION_X32:
9553 if (IS_ELF)
9554 {
9555 const char **list, **l;
9556
9557 list = bfd_target_list ();
9558 for (l = list; *l != NULL; l++)
9559 if (CONST_STRNEQ (*l, "elf32-x86-64"))
9560 {
9561 default_arch = "x86_64:32";
9562 break;
9563 }
9564 if (*l == NULL)
9565 as_fatal (_("no compiled in support for 32bit x86_64"));
9566 free (list);
9567 }
9568 else
9569 as_fatal (_("32bit x86_64 is only supported for ELF"));
9570 break;
9571 #endif
9572
9573 case OPTION_32:
9574 default_arch = "i386";
9575 break;
9576
9577 case OPTION_DIVIDE:
9578 #ifdef SVR4_COMMENT_CHARS
9579 {
9580 char *n, *t;
9581 const char *s;
9582
9583 n = (char *) xmalloc (strlen (i386_comment_chars) + 1);
9584 t = n;
9585 for (s = i386_comment_chars; *s != '\0'; s++)
9586 if (*s != '/')
9587 *t++ = *s;
9588 *t = '\0';
9589 i386_comment_chars = n;
9590 }
9591 #endif
9592 break;
9593
9594 case OPTION_MARCH:
9595 arch = xstrdup (arg);
9596 do
9597 {
9598 if (*arch == '.')
9599 as_fatal (_("invalid -march= option: `%s'"), arg);
9600 next = strchr (arch, '+');
9601 if (next)
9602 *next++ = '\0';
9603 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
9604 {
9605 if (strcmp (arch, cpu_arch [j].name) == 0)
9606 {
9607 /* Processor. */
9608 if (! cpu_arch[j].flags.bitfield.cpui386)
9609 continue;
9610
9611 cpu_arch_name = cpu_arch[j].name;
9612 cpu_sub_arch_name = NULL;
9613 cpu_arch_flags = cpu_arch[j].flags;
9614 cpu_arch_isa = cpu_arch[j].type;
9615 cpu_arch_isa_flags = cpu_arch[j].flags;
9616 if (!cpu_arch_tune_set)
9617 {
9618 cpu_arch_tune = cpu_arch_isa;
9619 cpu_arch_tune_flags = cpu_arch_isa_flags;
9620 }
9621 break;
9622 }
9623 else if (*cpu_arch [j].name == '.'
9624 && strcmp (arch, cpu_arch [j].name + 1) == 0)
9625 {
9626 /* ISA entension. */
9627 i386_cpu_flags flags;
9628
9629 if (!cpu_arch[j].negated)
9630 flags = cpu_flags_or (cpu_arch_flags,
9631 cpu_arch[j].flags);
9632 else
9633 flags = cpu_flags_and_not (cpu_arch_flags,
9634 cpu_arch[j].flags);
9635 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
9636 {
9637 if (cpu_sub_arch_name)
9638 {
9639 char *name = cpu_sub_arch_name;
9640 cpu_sub_arch_name = concat (name,
9641 cpu_arch[j].name,
9642 (const char *) NULL);
9643 free (name);
9644 }
9645 else
9646 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
9647 cpu_arch_flags = flags;
9648 cpu_arch_isa_flags = flags;
9649 }
9650 break;
9651 }
9652 }
9653
9654 if (j >= ARRAY_SIZE (cpu_arch))
9655 as_fatal (_("invalid -march= option: `%s'"), arg);
9656
9657 arch = next;
9658 }
9659 while (next != NULL );
9660 break;
9661
9662 case OPTION_MTUNE:
9663 if (*arg == '.')
9664 as_fatal (_("invalid -mtune= option: `%s'"), arg);
9665 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
9666 {
9667 if (strcmp (arg, cpu_arch [j].name) == 0)
9668 {
9669 cpu_arch_tune_set = 1;
9670 cpu_arch_tune = cpu_arch [j].type;
9671 cpu_arch_tune_flags = cpu_arch[j].flags;
9672 break;
9673 }
9674 }
9675 if (j >= ARRAY_SIZE (cpu_arch))
9676 as_fatal (_("invalid -mtune= option: `%s'"), arg);
9677 break;
9678
9679 case OPTION_MMNEMONIC:
9680 if (strcasecmp (arg, "att") == 0)
9681 intel_mnemonic = 0;
9682 else if (strcasecmp (arg, "intel") == 0)
9683 intel_mnemonic = 1;
9684 else
9685 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg);
9686 break;
9687
9688 case OPTION_MSYNTAX:
9689 if (strcasecmp (arg, "att") == 0)
9690 intel_syntax = 0;
9691 else if (strcasecmp (arg, "intel") == 0)
9692 intel_syntax = 1;
9693 else
9694 as_fatal (_("invalid -msyntax= option: `%s'"), arg);
9695 break;
9696
9697 case OPTION_MINDEX_REG:
9698 allow_index_reg = 1;
9699 break;
9700
9701 case OPTION_MNAKED_REG:
9702 allow_naked_reg = 1;
9703 break;
9704
9705 case OPTION_MOLD_GCC:
9706 old_gcc = 1;
9707 break;
9708
9709 case OPTION_MSSE2AVX:
9710 sse2avx = 1;
9711 break;
9712
9713 case OPTION_MSSE_CHECK:
9714 if (strcasecmp (arg, "error") == 0)
9715 sse_check = check_error;
9716 else if (strcasecmp (arg, "warning") == 0)
9717 sse_check = check_warning;
9718 else if (strcasecmp (arg, "none") == 0)
9719 sse_check = check_none;
9720 else
9721 as_fatal (_("invalid -msse-check= option: `%s'"), arg);
9722 break;
9723
9724 case OPTION_MOPERAND_CHECK:
9725 if (strcasecmp (arg, "error") == 0)
9726 operand_check = check_error;
9727 else if (strcasecmp (arg, "warning") == 0)
9728 operand_check = check_warning;
9729 else if (strcasecmp (arg, "none") == 0)
9730 operand_check = check_none;
9731 else
9732 as_fatal (_("invalid -moperand-check= option: `%s'"), arg);
9733 break;
9734
9735 case OPTION_MAVXSCALAR:
9736 if (strcasecmp (arg, "128") == 0)
9737 avxscalar = vex128;
9738 else if (strcasecmp (arg, "256") == 0)
9739 avxscalar = vex256;
9740 else
9741 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg);
9742 break;
9743
9744 case OPTION_MADD_BND_PREFIX:
9745 add_bnd_prefix = 1;
9746 break;
9747
9748 case OPTION_MEVEXLIG:
9749 if (strcmp (arg, "128") == 0)
9750 evexlig = evexl128;
9751 else if (strcmp (arg, "256") == 0)
9752 evexlig = evexl256;
9753 else if (strcmp (arg, "512") == 0)
9754 evexlig = evexl512;
9755 else
9756 as_fatal (_("invalid -mevexlig= option: `%s'"), arg);
9757 break;
9758
9759 case OPTION_MEVEXWIG:
9760 if (strcmp (arg, "0") == 0)
9761 evexwig = evexw0;
9762 else if (strcmp (arg, "1") == 0)
9763 evexwig = evexw1;
9764 else
9765 as_fatal (_("invalid -mevexwig= option: `%s'"), arg);
9766 break;
9767
9768 default:
9769 return 0;
9770 }
9771 return 1;
9772 }
9773
9774 #define MESSAGE_TEMPLATE \
9775 " "
9776
9777 static void
9778 show_arch (FILE *stream, int ext, int check)
9779 {
9780 static char message[] = MESSAGE_TEMPLATE;
9781 char *start = message + 27;
9782 char *p;
9783 int size = sizeof (MESSAGE_TEMPLATE);
9784 int left;
9785 const char *name;
9786 int len;
9787 unsigned int j;
9788
9789 p = start;
9790 left = size - (start - message);
9791 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
9792 {
9793 /* Should it be skipped? */
9794 if (cpu_arch [j].skip)
9795 continue;
9796
9797 name = cpu_arch [j].name;
9798 len = cpu_arch [j].len;
9799 if (*name == '.')
9800 {
9801 /* It is an extension. Skip if we aren't asked to show it. */
9802 if (ext)
9803 {
9804 name++;
9805 len--;
9806 }
9807 else
9808 continue;
9809 }
9810 else if (ext)
9811 {
9812 /* It is an processor. Skip if we show only extension. */
9813 continue;
9814 }
9815 else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
9816 {
9817 /* It is an impossible processor - skip. */
9818 continue;
9819 }
9820
9821 /* Reserve 2 spaces for ", " or ",\0" */
9822 left -= len + 2;
9823
9824 /* Check if there is any room. */
9825 if (left >= 0)
9826 {
9827 if (p != start)
9828 {
9829 *p++ = ',';
9830 *p++ = ' ';
9831 }
9832 p = mempcpy (p, name, len);
9833 }
9834 else
9835 {
9836 /* Output the current message now and start a new one. */
9837 *p++ = ',';
9838 *p = '\0';
9839 fprintf (stream, "%s\n", message);
9840 p = start;
9841 left = size - (start - message) - len - 2;
9842
9843 gas_assert (left >= 0);
9844
9845 p = mempcpy (p, name, len);
9846 }
9847 }
9848
9849 *p = '\0';
9850 fprintf (stream, "%s\n", message);
9851 }
9852
9853 void
9854 md_show_usage (FILE *stream)
9855 {
9856 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9857 fprintf (stream, _("\
9858 -Q ignored\n\
9859 -V print assembler version number\n\
9860 -k ignored\n"));
9861 #endif
9862 fprintf (stream, _("\
9863 -n Do not optimize code alignment\n\
9864 -q quieten some warnings\n"));
9865 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9866 fprintf (stream, _("\
9867 -s ignored\n"));
9868 #endif
9869 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9870 || defined (TE_PE) || defined (TE_PEP))
9871 fprintf (stream, _("\
9872 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
9873 #endif
9874 #ifdef SVR4_COMMENT_CHARS
9875 fprintf (stream, _("\
9876 --divide do not treat `/' as a comment character\n"));
9877 #else
9878 fprintf (stream, _("\
9879 --divide ignored\n"));
9880 #endif
9881 fprintf (stream, _("\
9882 -march=CPU[,+EXTENSION...]\n\
9883 generate code for CPU and EXTENSION, CPU is one of:\n"));
9884 show_arch (stream, 0, 1);
9885 fprintf (stream, _("\
9886 EXTENSION is combination of:\n"));
9887 show_arch (stream, 1, 0);
9888 fprintf (stream, _("\
9889 -mtune=CPU optimize for CPU, CPU is one of:\n"));
9890 show_arch (stream, 0, 0);
9891 fprintf (stream, _("\
9892 -msse2avx encode SSE instructions with VEX prefix\n"));
9893 fprintf (stream, _("\
9894 -msse-check=[none|error|warning]\n\
9895 check SSE instructions\n"));
9896 fprintf (stream, _("\
9897 -moperand-check=[none|error|warning]\n\
9898 check operand combinations for validity\n"));
9899 fprintf (stream, _("\
9900 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
9901 length\n"));
9902 fprintf (stream, _("\
9903 -mevexlig=[128|256|512] encode scalar EVEX instructions with specific vector\n\
9904 length\n"));
9905 fprintf (stream, _("\
9906 -mevexwig=[0|1] encode EVEX instructions with specific EVEX.W value\n\
9907 for EVEX.W bit ignored instructions\n"));
9908 fprintf (stream, _("\
9909 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
9910 fprintf (stream, _("\
9911 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
9912 fprintf (stream, _("\
9913 -mindex-reg support pseudo index registers\n"));
9914 fprintf (stream, _("\
9915 -mnaked-reg don't require `%%' prefix for registers\n"));
9916 fprintf (stream, _("\
9917 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
9918 fprintf (stream, _("\
9919 -madd-bnd-prefix add BND prefix for all valid branches\n"));
9920 }
9921
9922 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
9923 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9924 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
9925
9926 /* Pick the target format to use. */
9927
9928 const char *
9929 i386_target_format (void)
9930 {
9931 if (!strncmp (default_arch, "x86_64", 6))
9932 {
9933 update_code_flag (CODE_64BIT, 1);
9934 if (default_arch[6] == '\0')
9935 x86_elf_abi = X86_64_ABI;
9936 else
9937 x86_elf_abi = X86_64_X32_ABI;
9938 }
9939 else if (!strcmp (default_arch, "i386"))
9940 update_code_flag (CODE_32BIT, 1);
9941 else
9942 as_fatal (_("unknown architecture"));
9943
9944 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
9945 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
9946 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
9947 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
9948
9949 switch (OUTPUT_FLAVOR)
9950 {
9951 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
9952 case bfd_target_aout_flavour:
9953 return AOUT_TARGET_FORMAT;
9954 #endif
9955 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
9956 # if defined (TE_PE) || defined (TE_PEP)
9957 case bfd_target_coff_flavour:
9958 return flag_code == CODE_64BIT ? "pe-x86-64" : "pe-i386";
9959 # elif defined (TE_GO32)
9960 case bfd_target_coff_flavour:
9961 return "coff-go32";
9962 # else
9963 case bfd_target_coff_flavour:
9964 return "coff-i386";
9965 # endif
9966 #endif
9967 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9968 case bfd_target_elf_flavour:
9969 {
9970 const char *format;
9971
9972 switch (x86_elf_abi)
9973 {
9974 default:
9975 format = ELF_TARGET_FORMAT;
9976 break;
9977 case X86_64_ABI:
9978 use_rela_relocations = 1;
9979 object_64bit = 1;
9980 format = ELF_TARGET_FORMAT64;
9981 break;
9982 case X86_64_X32_ABI:
9983 use_rela_relocations = 1;
9984 object_64bit = 1;
9985 disallow_64bit_reloc = 1;
9986 format = ELF_TARGET_FORMAT32;
9987 break;
9988 }
9989 if (cpu_arch_isa == PROCESSOR_L1OM)
9990 {
9991 if (x86_elf_abi != X86_64_ABI)
9992 as_fatal (_("Intel L1OM is 64bit only"));
9993 return ELF_TARGET_L1OM_FORMAT;
9994 }
9995 if (cpu_arch_isa == PROCESSOR_K1OM)
9996 {
9997 if (x86_elf_abi != X86_64_ABI)
9998 as_fatal (_("Intel K1OM is 64bit only"));
9999 return ELF_TARGET_K1OM_FORMAT;
10000 }
10001 else
10002 return format;
10003 }
10004 #endif
10005 #if defined (OBJ_MACH_O)
10006 case bfd_target_mach_o_flavour:
10007 if (flag_code == CODE_64BIT)
10008 {
10009 use_rela_relocations = 1;
10010 object_64bit = 1;
10011 return "mach-o-x86-64";
10012 }
10013 else
10014 return "mach-o-i386";
10015 #endif
10016 default:
10017 abort ();
10018 return NULL;
10019 }
10020 }
10021
10022 #endif /* OBJ_MAYBE_ more than one */
10023
10024 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
10025 void
10026 i386_elf_emit_arch_note (void)
10027 {
10028 if (IS_ELF && cpu_arch_name != NULL)
10029 {
10030 char *p;
10031 asection *seg = now_seg;
10032 subsegT subseg = now_subseg;
10033 Elf_Internal_Note i_note;
10034 Elf_External_Note e_note;
10035 asection *note_secp;
10036 int len;
10037
10038 /* Create the .note section. */
10039 note_secp = subseg_new (".note", 0);
10040 bfd_set_section_flags (stdoutput,
10041 note_secp,
10042 SEC_HAS_CONTENTS | SEC_READONLY);
10043
10044 /* Process the arch string. */
10045 len = strlen (cpu_arch_name);
10046
10047 i_note.namesz = len + 1;
10048 i_note.descsz = 0;
10049 i_note.type = NT_ARCH;
10050 p = frag_more (sizeof (e_note.namesz));
10051 md_number_to_chars (p, (valueT) i_note.namesz, sizeof (e_note.namesz));
10052 p = frag_more (sizeof (e_note.descsz));
10053 md_number_to_chars (p, (valueT) i_note.descsz, sizeof (e_note.descsz));
10054 p = frag_more (sizeof (e_note.type));
10055 md_number_to_chars (p, (valueT) i_note.type, sizeof (e_note.type));
10056 p = frag_more (len + 1);
10057 strcpy (p, cpu_arch_name);
10058
10059 frag_align (2, 0, 0);
10060
10061 subseg_set (seg, subseg);
10062 }
10063 }
10064 #endif
10065 \f
10066 symbolS *
10067 md_undefined_symbol (char *name)
10068 {
10069 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
10070 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
10071 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
10072 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
10073 {
10074 if (!GOT_symbol)
10075 {
10076 if (symbol_find (name))
10077 as_bad (_("GOT already in symbol table"));
10078 GOT_symbol = symbol_new (name, undefined_section,
10079 (valueT) 0, &zero_address_frag);
10080 };
10081 return GOT_symbol;
10082 }
10083 return 0;
10084 }
10085
10086 /* Round up a section size to the appropriate boundary. */
10087
10088 valueT
10089 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
10090 {
10091 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
10092 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
10093 {
10094 /* For a.out, force the section size to be aligned. If we don't do
10095 this, BFD will align it for us, but it will not write out the
10096 final bytes of the section. This may be a bug in BFD, but it is
10097 easier to fix it here since that is how the other a.out targets
10098 work. */
10099 int align;
10100
10101 align = bfd_get_section_alignment (stdoutput, segment);
10102 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
10103 }
10104 #endif
10105
10106 return size;
10107 }
10108
10109 /* On the i386, PC-relative offsets are relative to the start of the
10110 next instruction. That is, the address of the offset, plus its
10111 size, since the offset is always the last part of the insn. */
10112
10113 long
10114 md_pcrel_from (fixS *fixP)
10115 {
10116 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
10117 }
10118
10119 #ifndef I386COFF
10120
10121 static void
10122 s_bss (int ignore ATTRIBUTE_UNUSED)
10123 {
10124 int temp;
10125
10126 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10127 if (IS_ELF)
10128 obj_elf_section_change_hook ();
10129 #endif
10130 temp = get_absolute_expression ();
10131 subseg_set (bss_section, (subsegT) temp);
10132 demand_empty_rest_of_line ();
10133 }
10134
10135 #endif
10136
10137 void
10138 i386_validate_fix (fixS *fixp)
10139 {
10140 if (fixp->fx_subsy && fixp->fx_subsy == GOT_symbol)
10141 {
10142 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
10143 {
10144 if (!object_64bit)
10145 abort ();
10146 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
10147 }
10148 else
10149 {
10150 if (!object_64bit)
10151 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
10152 else
10153 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
10154 }
10155 fixp->fx_subsy = 0;
10156 }
10157 }
10158
10159 arelent *
10160 tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
10161 {
10162 arelent *rel;
10163 bfd_reloc_code_real_type code;
10164
10165 switch (fixp->fx_r_type)
10166 {
10167 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10168 case BFD_RELOC_SIZE32:
10169 case BFD_RELOC_SIZE64:
10170 if (S_IS_DEFINED (fixp->fx_addsy)
10171 && !S_IS_EXTERNAL (fixp->fx_addsy))
10172 {
10173 /* Resolve size relocation against local symbol to size of
10174 the symbol plus addend. */
10175 valueT value = S_GET_SIZE (fixp->fx_addsy) + fixp->fx_offset;
10176 if (fixp->fx_r_type == BFD_RELOC_SIZE32
10177 && !fits_in_unsigned_long (value))
10178 as_bad_where (fixp->fx_file, fixp->fx_line,
10179 _("symbol size computation overflow"));
10180 fixp->fx_addsy = NULL;
10181 fixp->fx_subsy = NULL;
10182 md_apply_fix (fixp, (valueT *) &value, NULL);
10183 return NULL;
10184 }
10185 #endif
10186
10187 case BFD_RELOC_X86_64_PLT32:
10188 case BFD_RELOC_X86_64_GOT32:
10189 case BFD_RELOC_X86_64_GOTPCREL:
10190 case BFD_RELOC_386_PLT32:
10191 case BFD_RELOC_386_GOT32:
10192 case BFD_RELOC_386_GOTOFF:
10193 case BFD_RELOC_386_GOTPC:
10194 case BFD_RELOC_386_TLS_GD:
10195 case BFD_RELOC_386_TLS_LDM:
10196 case BFD_RELOC_386_TLS_LDO_32:
10197 case BFD_RELOC_386_TLS_IE_32:
10198 case BFD_RELOC_386_TLS_IE:
10199 case BFD_RELOC_386_TLS_GOTIE:
10200 case BFD_RELOC_386_TLS_LE_32:
10201 case BFD_RELOC_386_TLS_LE:
10202 case BFD_RELOC_386_TLS_GOTDESC:
10203 case BFD_RELOC_386_TLS_DESC_CALL:
10204 case BFD_RELOC_X86_64_TLSGD:
10205 case BFD_RELOC_X86_64_TLSLD:
10206 case BFD_RELOC_X86_64_DTPOFF32:
10207 case BFD_RELOC_X86_64_DTPOFF64:
10208 case BFD_RELOC_X86_64_GOTTPOFF:
10209 case BFD_RELOC_X86_64_TPOFF32:
10210 case BFD_RELOC_X86_64_TPOFF64:
10211 case BFD_RELOC_X86_64_GOTOFF64:
10212 case BFD_RELOC_X86_64_GOTPC32:
10213 case BFD_RELOC_X86_64_GOT64:
10214 case BFD_RELOC_X86_64_GOTPCREL64:
10215 case BFD_RELOC_X86_64_GOTPC64:
10216 case BFD_RELOC_X86_64_GOTPLT64:
10217 case BFD_RELOC_X86_64_PLTOFF64:
10218 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
10219 case BFD_RELOC_X86_64_TLSDESC_CALL:
10220 case BFD_RELOC_RVA:
10221 case BFD_RELOC_VTABLE_ENTRY:
10222 case BFD_RELOC_VTABLE_INHERIT:
10223 #ifdef TE_PE
10224 case BFD_RELOC_32_SECREL:
10225 #endif
10226 code = fixp->fx_r_type;
10227 break;
10228 case BFD_RELOC_X86_64_32S:
10229 if (!fixp->fx_pcrel)
10230 {
10231 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
10232 code = fixp->fx_r_type;
10233 break;
10234 }
10235 default:
10236 if (fixp->fx_pcrel)
10237 {
10238 switch (fixp->fx_size)
10239 {
10240 default:
10241 as_bad_where (fixp->fx_file, fixp->fx_line,
10242 _("can not do %d byte pc-relative relocation"),
10243 fixp->fx_size);
10244 code = BFD_RELOC_32_PCREL;
10245 break;
10246 case 1: code = BFD_RELOC_8_PCREL; break;
10247 case 2: code = BFD_RELOC_16_PCREL; break;
10248 case 4: code = BFD_RELOC_32_PCREL; break;
10249 #ifdef BFD64
10250 case 8: code = BFD_RELOC_64_PCREL; break;
10251 #endif
10252 }
10253 }
10254 else
10255 {
10256 switch (fixp->fx_size)
10257 {
10258 default:
10259 as_bad_where (fixp->fx_file, fixp->fx_line,
10260 _("can not do %d byte relocation"),
10261 fixp->fx_size);
10262 code = BFD_RELOC_32;
10263 break;
10264 case 1: code = BFD_RELOC_8; break;
10265 case 2: code = BFD_RELOC_16; break;
10266 case 4: code = BFD_RELOC_32; break;
10267 #ifdef BFD64
10268 case 8: code = BFD_RELOC_64; break;
10269 #endif
10270 }
10271 }
10272 break;
10273 }
10274
10275 if ((code == BFD_RELOC_32
10276 || code == BFD_RELOC_32_PCREL
10277 || code == BFD_RELOC_X86_64_32S)
10278 && GOT_symbol
10279 && fixp->fx_addsy == GOT_symbol)
10280 {
10281 if (!object_64bit)
10282 code = BFD_RELOC_386_GOTPC;
10283 else
10284 code = BFD_RELOC_X86_64_GOTPC32;
10285 }
10286 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
10287 && GOT_symbol
10288 && fixp->fx_addsy == GOT_symbol)
10289 {
10290 code = BFD_RELOC_X86_64_GOTPC64;
10291 }
10292
10293 rel = (arelent *) xmalloc (sizeof (arelent));
10294 rel->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
10295 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
10296
10297 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
10298
10299 if (!use_rela_relocations)
10300 {
10301 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
10302 vtable entry to be used in the relocation's section offset. */
10303 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
10304 rel->address = fixp->fx_offset;
10305 #if defined (OBJ_COFF) && defined (TE_PE)
10306 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
10307 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
10308 else
10309 #endif
10310 rel->addend = 0;
10311 }
10312 /* Use the rela in 64bit mode. */
10313 else
10314 {
10315 if (disallow_64bit_reloc)
10316 switch (code)
10317 {
10318 case BFD_RELOC_X86_64_DTPOFF64:
10319 case BFD_RELOC_X86_64_TPOFF64:
10320 case BFD_RELOC_64_PCREL:
10321 case BFD_RELOC_X86_64_GOTOFF64:
10322 case BFD_RELOC_X86_64_GOT64:
10323 case BFD_RELOC_X86_64_GOTPCREL64:
10324 case BFD_RELOC_X86_64_GOTPC64:
10325 case BFD_RELOC_X86_64_GOTPLT64:
10326 case BFD_RELOC_X86_64_PLTOFF64:
10327 as_bad_where (fixp->fx_file, fixp->fx_line,
10328 _("cannot represent relocation type %s in x32 mode"),
10329 bfd_get_reloc_code_name (code));
10330 break;
10331 default:
10332 break;
10333 }
10334
10335 if (!fixp->fx_pcrel)
10336 rel->addend = fixp->fx_offset;
10337 else
10338 switch (code)
10339 {
10340 case BFD_RELOC_X86_64_PLT32:
10341 case BFD_RELOC_X86_64_GOT32:
10342 case BFD_RELOC_X86_64_GOTPCREL:
10343 case BFD_RELOC_X86_64_TLSGD:
10344 case BFD_RELOC_X86_64_TLSLD:
10345 case BFD_RELOC_X86_64_GOTTPOFF:
10346 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
10347 case BFD_RELOC_X86_64_TLSDESC_CALL:
10348 rel->addend = fixp->fx_offset - fixp->fx_size;
10349 break;
10350 default:
10351 rel->addend = (section->vma
10352 - fixp->fx_size
10353 + fixp->fx_addnumber
10354 + md_pcrel_from (fixp));
10355 break;
10356 }
10357 }
10358
10359 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
10360 if (rel->howto == NULL)
10361 {
10362 as_bad_where (fixp->fx_file, fixp->fx_line,
10363 _("cannot represent relocation type %s"),
10364 bfd_get_reloc_code_name (code));
10365 /* Set howto to a garbage value so that we can keep going. */
10366 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
10367 gas_assert (rel->howto != NULL);
10368 }
10369
10370 return rel;
10371 }
10372
10373 #include "tc-i386-intel.c"
10374
10375 void
10376 tc_x86_parse_to_dw2regnum (expressionS *exp)
10377 {
10378 int saved_naked_reg;
10379 char saved_register_dot;
10380
10381 saved_naked_reg = allow_naked_reg;
10382 allow_naked_reg = 1;
10383 saved_register_dot = register_chars['.'];
10384 register_chars['.'] = '.';
10385 allow_pseudo_reg = 1;
10386 expression_and_evaluate (exp);
10387 allow_pseudo_reg = 0;
10388 register_chars['.'] = saved_register_dot;
10389 allow_naked_reg = saved_naked_reg;
10390
10391 if (exp->X_op == O_register && exp->X_add_number >= 0)
10392 {
10393 if ((addressT) exp->X_add_number < i386_regtab_size)
10394 {
10395 exp->X_op = O_constant;
10396 exp->X_add_number = i386_regtab[exp->X_add_number]
10397 .dw2_regnum[flag_code >> 1];
10398 }
10399 else
10400 exp->X_op = O_illegal;
10401 }
10402 }
10403
10404 void
10405 tc_x86_frame_initial_instructions (void)
10406 {
10407 static unsigned int sp_regno[2];
10408
10409 if (!sp_regno[flag_code >> 1])
10410 {
10411 char *saved_input = input_line_pointer;
10412 char sp[][4] = {"esp", "rsp"};
10413 expressionS exp;
10414
10415 input_line_pointer = sp[flag_code >> 1];
10416 tc_x86_parse_to_dw2regnum (&exp);
10417 gas_assert (exp.X_op == O_constant);
10418 sp_regno[flag_code >> 1] = exp.X_add_number;
10419 input_line_pointer = saved_input;
10420 }
10421
10422 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
10423 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
10424 }
10425
10426 int
10427 x86_dwarf2_addr_size (void)
10428 {
10429 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
10430 if (x86_elf_abi == X86_64_X32_ABI)
10431 return 4;
10432 #endif
10433 return bfd_arch_bits_per_address (stdoutput) / 8;
10434 }
10435
10436 int
10437 i386_elf_section_type (const char *str, size_t len)
10438 {
10439 if (flag_code == CODE_64BIT
10440 && len == sizeof ("unwind") - 1
10441 && strncmp (str, "unwind", 6) == 0)
10442 return SHT_X86_64_UNWIND;
10443
10444 return -1;
10445 }
10446
10447 #ifdef TE_SOLARIS
10448 void
10449 i386_solaris_fix_up_eh_frame (segT sec)
10450 {
10451 if (flag_code == CODE_64BIT)
10452 elf_section_type (sec) = SHT_X86_64_UNWIND;
10453 }
10454 #endif
10455
10456 #ifdef TE_PE
10457 void
10458 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
10459 {
10460 expressionS exp;
10461
10462 exp.X_op = O_secrel;
10463 exp.X_add_symbol = symbol;
10464 exp.X_add_number = 0;
10465 emit_expr (&exp, size);
10466 }
10467 #endif
10468
10469 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10470 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
10471
10472 bfd_vma
10473 x86_64_section_letter (int letter, char **ptr_msg)
10474 {
10475 if (flag_code == CODE_64BIT)
10476 {
10477 if (letter == 'l')
10478 return SHF_X86_64_LARGE;
10479
10480 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
10481 }
10482 else
10483 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
10484 return -1;
10485 }
10486
10487 bfd_vma
10488 x86_64_section_word (char *str, size_t len)
10489 {
10490 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
10491 return SHF_X86_64_LARGE;
10492
10493 return -1;
10494 }
10495
10496 static void
10497 handle_large_common (int small ATTRIBUTE_UNUSED)
10498 {
10499 if (flag_code != CODE_64BIT)
10500 {
10501 s_comm_internal (0, elf_common_parse);
10502 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
10503 }
10504 else
10505 {
10506 static segT lbss_section;
10507 asection *saved_com_section_ptr = elf_com_section_ptr;
10508 asection *saved_bss_section = bss_section;
10509
10510 if (lbss_section == NULL)
10511 {
10512 flagword applicable;
10513 segT seg = now_seg;
10514 subsegT subseg = now_subseg;
10515
10516 /* The .lbss section is for local .largecomm symbols. */
10517 lbss_section = subseg_new (".lbss", 0);
10518 applicable = bfd_applicable_section_flags (stdoutput);
10519 bfd_set_section_flags (stdoutput, lbss_section,
10520 applicable & SEC_ALLOC);
10521 seg_info (lbss_section)->bss = 1;
10522
10523 subseg_set (seg, subseg);
10524 }
10525
10526 elf_com_section_ptr = &_bfd_elf_large_com_section;
10527 bss_section = lbss_section;
10528
10529 s_comm_internal (0, elf_common_parse);
10530
10531 elf_com_section_ptr = saved_com_section_ptr;
10532 bss_section = saved_bss_section;
10533 }
10534 }
10535 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */
This page took 0.2424 seconds and 5 git commands to generate.