Add SHF_COMPRESSED support to gas and objcopy
[deliverable/binutils-gdb.git] / gas / config / tc-i386.c
1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright (C) 1989-2015 Free Software Foundation, Inc.
3
4 This file is part of GAS, the GNU Assembler.
5
6 GAS is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GAS is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GAS; see the file COPYING. If not, write to the Free
18 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
19 02110-1301, USA. */
20
21 /* Intel 80386 machine specific gas.
22 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
23 x86_64 support by Jan Hubicka (jh@suse.cz)
24 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
25 Bugs & suggestions are completely welcome. This is free software.
26 Please help us make it better. */
27
28 #include "as.h"
29 #include "safe-ctype.h"
30 #include "subsegs.h"
31 #include "dwarf2dbg.h"
32 #include "dw2gencfi.h"
33 #include "elf/x86-64.h"
34 #include "opcodes/i386-init.h"
35
36 #ifdef TE_LINUX
37 /* Default to compress debug sections for Linux. */
38 enum compressed_debug_section_type flag_compress_debug
39 = COMPRESS_DEBUG_ZLIB;
40 #endif
41
42 #ifndef REGISTER_WARNINGS
43 #define REGISTER_WARNINGS 1
44 #endif
45
46 #ifndef INFER_ADDR_PREFIX
47 #define INFER_ADDR_PREFIX 1
48 #endif
49
50 #ifndef DEFAULT_ARCH
51 #define DEFAULT_ARCH "i386"
52 #endif
53
54 #ifndef INLINE
55 #if __GNUC__ >= 2
56 #define INLINE __inline__
57 #else
58 #define INLINE
59 #endif
60 #endif
61
62 /* Prefixes will be emitted in the order defined below.
63 WAIT_PREFIX must be the first prefix since FWAIT is really is an
64 instruction, and so must come before any prefixes.
65 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
66 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
67 #define WAIT_PREFIX 0
68 #define SEG_PREFIX 1
69 #define ADDR_PREFIX 2
70 #define DATA_PREFIX 3
71 #define REP_PREFIX 4
72 #define HLE_PREFIX REP_PREFIX
73 #define BND_PREFIX REP_PREFIX
74 #define LOCK_PREFIX 5
75 #define REX_PREFIX 6 /* must come last. */
76 #define MAX_PREFIXES 7 /* max prefixes per opcode */
77
78 /* we define the syntax here (modulo base,index,scale syntax) */
79 #define REGISTER_PREFIX '%'
80 #define IMMEDIATE_PREFIX '$'
81 #define ABSOLUTE_PREFIX '*'
82
83 /* these are the instruction mnemonic suffixes in AT&T syntax or
84 memory operand size in Intel syntax. */
85 #define WORD_MNEM_SUFFIX 'w'
86 #define BYTE_MNEM_SUFFIX 'b'
87 #define SHORT_MNEM_SUFFIX 's'
88 #define LONG_MNEM_SUFFIX 'l'
89 #define QWORD_MNEM_SUFFIX 'q'
90 #define XMMWORD_MNEM_SUFFIX 'x'
91 #define YMMWORD_MNEM_SUFFIX 'y'
92 #define ZMMWORD_MNEM_SUFFIX 'z'
93 /* Intel Syntax. Use a non-ascii letter since since it never appears
94 in instructions. */
95 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
96
97 #define END_OF_INSN '\0'
98
99 /*
100 'templates' is for grouping together 'template' structures for opcodes
101 of the same name. This is only used for storing the insns in the grand
102 ole hash table of insns.
103 The templates themselves start at START and range up to (but not including)
104 END.
105 */
106 typedef struct
107 {
108 const insn_template *start;
109 const insn_template *end;
110 }
111 templates;
112
113 /* 386 operand encoding bytes: see 386 book for details of this. */
114 typedef struct
115 {
116 unsigned int regmem; /* codes register or memory operand */
117 unsigned int reg; /* codes register operand (or extended opcode) */
118 unsigned int mode; /* how to interpret regmem & reg */
119 }
120 modrm_byte;
121
122 /* x86-64 extension prefix. */
123 typedef int rex_byte;
124
125 /* 386 opcode byte to code indirect addressing. */
126 typedef struct
127 {
128 unsigned base;
129 unsigned index;
130 unsigned scale;
131 }
132 sib_byte;
133
134 /* x86 arch names, types and features */
135 typedef struct
136 {
137 const char *name; /* arch name */
138 unsigned int len; /* arch string length */
139 enum processor_type type; /* arch type */
140 i386_cpu_flags flags; /* cpu feature flags */
141 unsigned int skip; /* show_arch should skip this. */
142 unsigned int negated; /* turn off indicated flags. */
143 }
144 arch_entry;
145
146 static void update_code_flag (int, int);
147 static void set_code_flag (int);
148 static void set_16bit_gcc_code_flag (int);
149 static void set_intel_syntax (int);
150 static void set_intel_mnemonic (int);
151 static void set_allow_index_reg (int);
152 static void set_check (int);
153 static void set_cpu_arch (int);
154 #ifdef TE_PE
155 static void pe_directive_secrel (int);
156 #endif
157 static void signed_cons (int);
158 static char *output_invalid (int c);
159 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
160 const char *);
161 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
162 const char *);
163 static int i386_att_operand (char *);
164 static int i386_intel_operand (char *, int);
165 static int i386_intel_simplify (expressionS *);
166 static int i386_intel_parse_name (const char *, expressionS *);
167 static const reg_entry *parse_register (char *, char **);
168 static char *parse_insn (char *, char *);
169 static char *parse_operands (char *, const char *);
170 static void swap_operands (void);
171 static void swap_2_operands (int, int);
172 static void optimize_imm (void);
173 static void optimize_disp (void);
174 static const insn_template *match_template (void);
175 static int check_string (void);
176 static int process_suffix (void);
177 static int check_byte_reg (void);
178 static int check_long_reg (void);
179 static int check_qword_reg (void);
180 static int check_word_reg (void);
181 static int finalize_imm (void);
182 static int process_operands (void);
183 static const seg_entry *build_modrm_byte (void);
184 static void output_insn (void);
185 static void output_imm (fragS *, offsetT);
186 static void output_disp (fragS *, offsetT);
187 #ifndef I386COFF
188 static void s_bss (int);
189 #endif
190 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
191 static void handle_large_common (int small ATTRIBUTE_UNUSED);
192 #endif
193
194 static const char *default_arch = DEFAULT_ARCH;
195
196 /* This struct describes rounding control and SAE in the instruction. */
197 struct RC_Operation
198 {
199 enum rc_type
200 {
201 rne = 0,
202 rd,
203 ru,
204 rz,
205 saeonly
206 } type;
207 int operand;
208 };
209
210 static struct RC_Operation rc_op;
211
212 /* The struct describes masking, applied to OPERAND in the instruction.
213 MASK is a pointer to the corresponding mask register. ZEROING tells
214 whether merging or zeroing mask is used. */
215 struct Mask_Operation
216 {
217 const reg_entry *mask;
218 unsigned int zeroing;
219 /* The operand where this operation is associated. */
220 int operand;
221 };
222
223 static struct Mask_Operation mask_op;
224
225 /* The struct describes broadcasting, applied to OPERAND. FACTOR is
226 broadcast factor. */
227 struct Broadcast_Operation
228 {
229 /* Type of broadcast: no broadcast, {1to8}, or {1to16}. */
230 int type;
231
232 /* Index of broadcasted operand. */
233 int operand;
234 };
235
236 static struct Broadcast_Operation broadcast_op;
237
238 /* VEX prefix. */
239 typedef struct
240 {
241 /* VEX prefix is either 2 byte or 3 byte. EVEX is 4 byte. */
242 unsigned char bytes[4];
243 unsigned int length;
244 /* Destination or source register specifier. */
245 const reg_entry *register_specifier;
246 } vex_prefix;
247
248 /* 'md_assemble ()' gathers together information and puts it into a
249 i386_insn. */
250
251 union i386_op
252 {
253 expressionS *disps;
254 expressionS *imms;
255 const reg_entry *regs;
256 };
257
258 enum i386_error
259 {
260 operand_size_mismatch,
261 operand_type_mismatch,
262 register_type_mismatch,
263 number_of_operands_mismatch,
264 invalid_instruction_suffix,
265 bad_imm4,
266 old_gcc_only,
267 unsupported_with_intel_mnemonic,
268 unsupported_syntax,
269 unsupported,
270 invalid_vsib_address,
271 invalid_vector_register_set,
272 unsupported_vector_index_register,
273 unsupported_broadcast,
274 broadcast_not_on_src_operand,
275 broadcast_needed,
276 unsupported_masking,
277 mask_not_on_destination,
278 no_default_mask,
279 unsupported_rc_sae,
280 rc_sae_operand_not_last_imm,
281 invalid_register_operand,
282 try_vector_disp8
283 };
284
285 struct _i386_insn
286 {
287 /* TM holds the template for the insn were currently assembling. */
288 insn_template tm;
289
290 /* SUFFIX holds the instruction size suffix for byte, word, dword
291 or qword, if given. */
292 char suffix;
293
294 /* OPERANDS gives the number of given operands. */
295 unsigned int operands;
296
297 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
298 of given register, displacement, memory operands and immediate
299 operands. */
300 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
301
302 /* TYPES [i] is the type (see above #defines) which tells us how to
303 use OP[i] for the corresponding operand. */
304 i386_operand_type types[MAX_OPERANDS];
305
306 /* Displacement expression, immediate expression, or register for each
307 operand. */
308 union i386_op op[MAX_OPERANDS];
309
310 /* Flags for operands. */
311 unsigned int flags[MAX_OPERANDS];
312 #define Operand_PCrel 1
313
314 /* Relocation type for operand */
315 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
316
317 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
318 the base index byte below. */
319 const reg_entry *base_reg;
320 const reg_entry *index_reg;
321 unsigned int log2_scale_factor;
322
323 /* SEG gives the seg_entries of this insn. They are zero unless
324 explicit segment overrides are given. */
325 const seg_entry *seg[2];
326
327 /* PREFIX holds all the given prefix opcodes (usually null).
328 PREFIXES is the number of prefix opcodes. */
329 unsigned int prefixes;
330 unsigned char prefix[MAX_PREFIXES];
331
332 /* RM and SIB are the modrm byte and the sib byte where the
333 addressing modes of this insn are encoded. */
334 modrm_byte rm;
335 rex_byte rex;
336 rex_byte vrex;
337 sib_byte sib;
338 vex_prefix vex;
339
340 /* Masking attributes. */
341 struct Mask_Operation *mask;
342
343 /* Rounding control and SAE attributes. */
344 struct RC_Operation *rounding;
345
346 /* Broadcasting attributes. */
347 struct Broadcast_Operation *broadcast;
348
349 /* Compressed disp8*N attribute. */
350 unsigned int memshift;
351
352 /* Swap operand in encoding. */
353 unsigned int swap_operand;
354
355 /* Prefer 8bit or 32bit displacement in encoding. */
356 enum
357 {
358 disp_encoding_default = 0,
359 disp_encoding_8bit,
360 disp_encoding_32bit
361 } disp_encoding;
362
363 /* REP prefix. */
364 const char *rep_prefix;
365
366 /* HLE prefix. */
367 const char *hle_prefix;
368
369 /* Have BND prefix. */
370 const char *bnd_prefix;
371
372 /* Need VREX to support upper 16 registers. */
373 int need_vrex;
374
375 /* Error message. */
376 enum i386_error error;
377 };
378
379 typedef struct _i386_insn i386_insn;
380
381 /* Link RC type with corresponding string, that'll be looked for in
382 asm. */
383 struct RC_name
384 {
385 enum rc_type type;
386 const char *name;
387 unsigned int len;
388 };
389
390 static const struct RC_name RC_NamesTable[] =
391 {
392 { rne, STRING_COMMA_LEN ("rn-sae") },
393 { rd, STRING_COMMA_LEN ("rd-sae") },
394 { ru, STRING_COMMA_LEN ("ru-sae") },
395 { rz, STRING_COMMA_LEN ("rz-sae") },
396 { saeonly, STRING_COMMA_LEN ("sae") },
397 };
398
399 /* List of chars besides those in app.c:symbol_chars that can start an
400 operand. Used to prevent the scrubber eating vital white-space. */
401 const char extra_symbol_chars[] = "*%-([{"
402 #ifdef LEX_AT
403 "@"
404 #endif
405 #ifdef LEX_QM
406 "?"
407 #endif
408 ;
409
410 #if (defined (TE_I386AIX) \
411 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
412 && !defined (TE_GNU) \
413 && !defined (TE_LINUX) \
414 && !defined (TE_NACL) \
415 && !defined (TE_NETWARE) \
416 && !defined (TE_FreeBSD) \
417 && !defined (TE_DragonFly) \
418 && !defined (TE_NetBSD)))
419 /* This array holds the chars that always start a comment. If the
420 pre-processor is disabled, these aren't very useful. The option
421 --divide will remove '/' from this list. */
422 const char *i386_comment_chars = "#/";
423 #define SVR4_COMMENT_CHARS 1
424 #define PREFIX_SEPARATOR '\\'
425
426 #else
427 const char *i386_comment_chars = "#";
428 #define PREFIX_SEPARATOR '/'
429 #endif
430
431 /* This array holds the chars that only start a comment at the beginning of
432 a line. If the line seems to have the form '# 123 filename'
433 .line and .file directives will appear in the pre-processed output.
434 Note that input_file.c hand checks for '#' at the beginning of the
435 first line of the input file. This is because the compiler outputs
436 #NO_APP at the beginning of its output.
437 Also note that comments started like this one will always work if
438 '/' isn't otherwise defined. */
439 const char line_comment_chars[] = "#/";
440
441 const char line_separator_chars[] = ";";
442
443 /* Chars that can be used to separate mant from exp in floating point
444 nums. */
445 const char EXP_CHARS[] = "eE";
446
447 /* Chars that mean this number is a floating point constant
448 As in 0f12.456
449 or 0d1.2345e12. */
450 const char FLT_CHARS[] = "fFdDxX";
451
452 /* Tables for lexical analysis. */
453 static char mnemonic_chars[256];
454 static char register_chars[256];
455 static char operand_chars[256];
456 static char identifier_chars[256];
457 static char digit_chars[256];
458
459 /* Lexical macros. */
460 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
461 #define is_operand_char(x) (operand_chars[(unsigned char) x])
462 #define is_register_char(x) (register_chars[(unsigned char) x])
463 #define is_space_char(x) ((x) == ' ')
464 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
465 #define is_digit_char(x) (digit_chars[(unsigned char) x])
466
467 /* All non-digit non-letter characters that may occur in an operand. */
468 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
469
470 /* md_assemble() always leaves the strings it's passed unaltered. To
471 effect this we maintain a stack of saved characters that we've smashed
472 with '\0's (indicating end of strings for various sub-fields of the
473 assembler instruction). */
474 static char save_stack[32];
475 static char *save_stack_p;
476 #define END_STRING_AND_SAVE(s) \
477 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
478 #define RESTORE_END_STRING(s) \
479 do { *(s) = *--save_stack_p; } while (0)
480
481 /* The instruction we're assembling. */
482 static i386_insn i;
483
484 /* Possible templates for current insn. */
485 static const templates *current_templates;
486
487 /* Per instruction expressionS buffers: max displacements & immediates. */
488 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
489 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
490
491 /* Current operand we are working on. */
492 static int this_operand = -1;
493
494 /* We support four different modes. FLAG_CODE variable is used to distinguish
495 these. */
496
497 enum flag_code {
498 CODE_32BIT,
499 CODE_16BIT,
500 CODE_64BIT };
501
502 static enum flag_code flag_code;
503 static unsigned int object_64bit;
504 static unsigned int disallow_64bit_reloc;
505 static int use_rela_relocations = 0;
506
507 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
508 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
509 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
510
511 /* The ELF ABI to use. */
512 enum x86_elf_abi
513 {
514 I386_ABI,
515 X86_64_ABI,
516 X86_64_X32_ABI
517 };
518
519 static enum x86_elf_abi x86_elf_abi = I386_ABI;
520 #endif
521
522 #if defined (TE_PE) || defined (TE_PEP)
523 /* Use big object file format. */
524 static int use_big_obj = 0;
525 #endif
526
527 /* 1 for intel syntax,
528 0 if att syntax. */
529 static int intel_syntax = 0;
530
531 /* 1 for intel mnemonic,
532 0 if att mnemonic. */
533 static int intel_mnemonic = !SYSV386_COMPAT;
534
535 /* 1 if support old (<= 2.8.1) versions of gcc. */
536 static int old_gcc = OLDGCC_COMPAT;
537
538 /* 1 if pseudo registers are permitted. */
539 static int allow_pseudo_reg = 0;
540
541 /* 1 if register prefix % not required. */
542 static int allow_naked_reg = 0;
543
544 /* 1 if the assembler should add BND prefix for all control-tranferring
545 instructions supporting it, even if this prefix wasn't specified
546 explicitly. */
547 static int add_bnd_prefix = 0;
548
549 /* 1 if pseudo index register, eiz/riz, is allowed . */
550 static int allow_index_reg = 0;
551
552 /* 1 if the assembler should ignore LOCK prefix, even if it was
553 specified explicitly. */
554 static int omit_lock_prefix = 0;
555
556 static enum check_kind
557 {
558 check_none = 0,
559 check_warning,
560 check_error
561 }
562 sse_check, operand_check = check_warning;
563
564 /* Register prefix used for error message. */
565 static const char *register_prefix = "%";
566
567 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
568 leave, push, and pop instructions so that gcc has the same stack
569 frame as in 32 bit mode. */
570 static char stackop_size = '\0';
571
572 /* Non-zero to optimize code alignment. */
573 int optimize_align_code = 1;
574
575 /* Non-zero to quieten some warnings. */
576 static int quiet_warnings = 0;
577
578 /* CPU name. */
579 static const char *cpu_arch_name = NULL;
580 static char *cpu_sub_arch_name = NULL;
581
582 /* CPU feature flags. */
583 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
584
585 /* If we have selected a cpu we are generating instructions for. */
586 static int cpu_arch_tune_set = 0;
587
588 /* Cpu we are generating instructions for. */
589 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
590
591 /* CPU feature flags of cpu we are generating instructions for. */
592 static i386_cpu_flags cpu_arch_tune_flags;
593
594 /* CPU instruction set architecture used. */
595 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
596
597 /* CPU feature flags of instruction set architecture used. */
598 i386_cpu_flags cpu_arch_isa_flags;
599
600 /* If set, conditional jumps are not automatically promoted to handle
601 larger than a byte offset. */
602 static unsigned int no_cond_jump_promotion = 0;
603
604 /* Encode SSE instructions with VEX prefix. */
605 static unsigned int sse2avx;
606
607 /* Encode scalar AVX instructions with specific vector length. */
608 static enum
609 {
610 vex128 = 0,
611 vex256
612 } avxscalar;
613
614 /* Encode scalar EVEX LIG instructions with specific vector length. */
615 static enum
616 {
617 evexl128 = 0,
618 evexl256,
619 evexl512
620 } evexlig;
621
622 /* Encode EVEX WIG instructions with specific evex.w. */
623 static enum
624 {
625 evexw0 = 0,
626 evexw1
627 } evexwig;
628
629 /* Value to encode in EVEX RC bits, for SAE-only instructions. */
630 static enum rc_type evexrcig = rne;
631
632 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
633 static symbolS *GOT_symbol;
634
635 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
636 unsigned int x86_dwarf2_return_column;
637
638 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
639 int x86_cie_data_alignment;
640
641 /* Interface to relax_segment.
642 There are 3 major relax states for 386 jump insns because the
643 different types of jumps add different sizes to frags when we're
644 figuring out what sort of jump to choose to reach a given label. */
645
646 /* Types. */
647 #define UNCOND_JUMP 0
648 #define COND_JUMP 1
649 #define COND_JUMP86 2
650
651 /* Sizes. */
652 #define CODE16 1
653 #define SMALL 0
654 #define SMALL16 (SMALL | CODE16)
655 #define BIG 2
656 #define BIG16 (BIG | CODE16)
657
658 #ifndef INLINE
659 #ifdef __GNUC__
660 #define INLINE __inline__
661 #else
662 #define INLINE
663 #endif
664 #endif
665
666 #define ENCODE_RELAX_STATE(type, size) \
667 ((relax_substateT) (((type) << 2) | (size)))
668 #define TYPE_FROM_RELAX_STATE(s) \
669 ((s) >> 2)
670 #define DISP_SIZE_FROM_RELAX_STATE(s) \
671 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
672
673 /* This table is used by relax_frag to promote short jumps to long
674 ones where necessary. SMALL (short) jumps may be promoted to BIG
675 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
676 don't allow a short jump in a 32 bit code segment to be promoted to
677 a 16 bit offset jump because it's slower (requires data size
678 prefix), and doesn't work, unless the destination is in the bottom
679 64k of the code segment (The top 16 bits of eip are zeroed). */
680
681 const relax_typeS md_relax_table[] =
682 {
683 /* The fields are:
684 1) most positive reach of this state,
685 2) most negative reach of this state,
686 3) how many bytes this mode will have in the variable part of the frag
687 4) which index into the table to try if we can't fit into this one. */
688
689 /* UNCOND_JUMP states. */
690 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
691 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
692 /* dword jmp adds 4 bytes to frag:
693 0 extra opcode bytes, 4 displacement bytes. */
694 {0, 0, 4, 0},
695 /* word jmp adds 2 byte2 to frag:
696 0 extra opcode bytes, 2 displacement bytes. */
697 {0, 0, 2, 0},
698
699 /* COND_JUMP states. */
700 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
701 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
702 /* dword conditionals adds 5 bytes to frag:
703 1 extra opcode byte, 4 displacement bytes. */
704 {0, 0, 5, 0},
705 /* word conditionals add 3 bytes to frag:
706 1 extra opcode byte, 2 displacement bytes. */
707 {0, 0, 3, 0},
708
709 /* COND_JUMP86 states. */
710 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
711 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
712 /* dword conditionals adds 5 bytes to frag:
713 1 extra opcode byte, 4 displacement bytes. */
714 {0, 0, 5, 0},
715 /* word conditionals add 4 bytes to frag:
716 1 displacement byte and a 3 byte long branch insn. */
717 {0, 0, 4, 0}
718 };
719
720 static const arch_entry cpu_arch[] =
721 {
722 /* Do not replace the first two entries - i386_target_format()
723 relies on them being there in this order. */
724 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
725 CPU_GENERIC32_FLAGS, 0, 0 },
726 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
727 CPU_GENERIC64_FLAGS, 0, 0 },
728 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
729 CPU_NONE_FLAGS, 0, 0 },
730 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
731 CPU_I186_FLAGS, 0, 0 },
732 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
733 CPU_I286_FLAGS, 0, 0 },
734 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
735 CPU_I386_FLAGS, 0, 0 },
736 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
737 CPU_I486_FLAGS, 0, 0 },
738 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
739 CPU_I586_FLAGS, 0, 0 },
740 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
741 CPU_I686_FLAGS, 0, 0 },
742 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
743 CPU_I586_FLAGS, 0, 0 },
744 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
745 CPU_PENTIUMPRO_FLAGS, 0, 0 },
746 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
747 CPU_P2_FLAGS, 0, 0 },
748 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
749 CPU_P3_FLAGS, 0, 0 },
750 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
751 CPU_P4_FLAGS, 0, 0 },
752 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
753 CPU_CORE_FLAGS, 0, 0 },
754 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
755 CPU_NOCONA_FLAGS, 0, 0 },
756 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
757 CPU_CORE_FLAGS, 1, 0 },
758 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
759 CPU_CORE_FLAGS, 0, 0 },
760 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
761 CPU_CORE2_FLAGS, 1, 0 },
762 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
763 CPU_CORE2_FLAGS, 0, 0 },
764 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
765 CPU_COREI7_FLAGS, 0, 0 },
766 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
767 CPU_L1OM_FLAGS, 0, 0 },
768 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM,
769 CPU_K1OM_FLAGS, 0, 0 },
770 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
771 CPU_K6_FLAGS, 0, 0 },
772 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
773 CPU_K6_2_FLAGS, 0, 0 },
774 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
775 CPU_ATHLON_FLAGS, 0, 0 },
776 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
777 CPU_K8_FLAGS, 1, 0 },
778 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
779 CPU_K8_FLAGS, 0, 0 },
780 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
781 CPU_K8_FLAGS, 0, 0 },
782 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
783 CPU_AMDFAM10_FLAGS, 0, 0 },
784 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD,
785 CPU_BDVER1_FLAGS, 0, 0 },
786 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD,
787 CPU_BDVER2_FLAGS, 0, 0 },
788 { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD,
789 CPU_BDVER3_FLAGS, 0, 0 },
790 { STRING_COMMA_LEN ("bdver4"), PROCESSOR_BD,
791 CPU_BDVER4_FLAGS, 0, 0 },
792 { STRING_COMMA_LEN ("znver1"), PROCESSOR_ZNVER,
793 CPU_ZNVER1_FLAGS, 0, 0 },
794 { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT,
795 CPU_BTVER1_FLAGS, 0, 0 },
796 { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT,
797 CPU_BTVER2_FLAGS, 0, 0 },
798 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
799 CPU_8087_FLAGS, 0, 0 },
800 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
801 CPU_287_FLAGS, 0, 0 },
802 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
803 CPU_387_FLAGS, 0, 0 },
804 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN,
805 CPU_ANY87_FLAGS, 0, 1 },
806 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
807 CPU_MMX_FLAGS, 0, 0 },
808 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN,
809 CPU_3DNOWA_FLAGS, 0, 1 },
810 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
811 CPU_SSE_FLAGS, 0, 0 },
812 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
813 CPU_SSE2_FLAGS, 0, 0 },
814 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
815 CPU_SSE3_FLAGS, 0, 0 },
816 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
817 CPU_SSSE3_FLAGS, 0, 0 },
818 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
819 CPU_SSE4_1_FLAGS, 0, 0 },
820 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
821 CPU_SSE4_2_FLAGS, 0, 0 },
822 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
823 CPU_SSE4_2_FLAGS, 0, 0 },
824 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN,
825 CPU_ANY_SSE_FLAGS, 0, 1 },
826 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
827 CPU_AVX_FLAGS, 0, 0 },
828 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN,
829 CPU_AVX2_FLAGS, 0, 0 },
830 { STRING_COMMA_LEN (".avx512f"), PROCESSOR_UNKNOWN,
831 CPU_AVX512F_FLAGS, 0, 0 },
832 { STRING_COMMA_LEN (".avx512cd"), PROCESSOR_UNKNOWN,
833 CPU_AVX512CD_FLAGS, 0, 0 },
834 { STRING_COMMA_LEN (".avx512er"), PROCESSOR_UNKNOWN,
835 CPU_AVX512ER_FLAGS, 0, 0 },
836 { STRING_COMMA_LEN (".avx512pf"), PROCESSOR_UNKNOWN,
837 CPU_AVX512PF_FLAGS, 0, 0 },
838 { STRING_COMMA_LEN (".avx512dq"), PROCESSOR_UNKNOWN,
839 CPU_AVX512DQ_FLAGS, 0, 0 },
840 { STRING_COMMA_LEN (".avx512bw"), PROCESSOR_UNKNOWN,
841 CPU_AVX512BW_FLAGS, 0, 0 },
842 { STRING_COMMA_LEN (".avx512vl"), PROCESSOR_UNKNOWN,
843 CPU_AVX512VL_FLAGS, 0, 0 },
844 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
845 CPU_ANY_AVX_FLAGS, 0, 1 },
846 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
847 CPU_VMX_FLAGS, 0, 0 },
848 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN,
849 CPU_VMFUNC_FLAGS, 0, 0 },
850 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
851 CPU_SMX_FLAGS, 0, 0 },
852 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
853 CPU_XSAVE_FLAGS, 0, 0 },
854 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
855 CPU_XSAVEOPT_FLAGS, 0, 0 },
856 { STRING_COMMA_LEN (".xsavec"), PROCESSOR_UNKNOWN,
857 CPU_XSAVEC_FLAGS, 0, 0 },
858 { STRING_COMMA_LEN (".xsaves"), PROCESSOR_UNKNOWN,
859 CPU_XSAVES_FLAGS, 0, 0 },
860 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
861 CPU_AES_FLAGS, 0, 0 },
862 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
863 CPU_PCLMUL_FLAGS, 0, 0 },
864 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
865 CPU_PCLMUL_FLAGS, 1, 0 },
866 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
867 CPU_FSGSBASE_FLAGS, 0, 0 },
868 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
869 CPU_RDRND_FLAGS, 0, 0 },
870 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
871 CPU_F16C_FLAGS, 0, 0 },
872 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN,
873 CPU_BMI2_FLAGS, 0, 0 },
874 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
875 CPU_FMA_FLAGS, 0, 0 },
876 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
877 CPU_FMA4_FLAGS, 0, 0 },
878 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
879 CPU_XOP_FLAGS, 0, 0 },
880 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
881 CPU_LWP_FLAGS, 0, 0 },
882 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
883 CPU_MOVBE_FLAGS, 0, 0 },
884 { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN,
885 CPU_CX16_FLAGS, 0, 0 },
886 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
887 CPU_EPT_FLAGS, 0, 0 },
888 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN,
889 CPU_LZCNT_FLAGS, 0, 0 },
890 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN,
891 CPU_HLE_FLAGS, 0, 0 },
892 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN,
893 CPU_RTM_FLAGS, 0, 0 },
894 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN,
895 CPU_INVPCID_FLAGS, 0, 0 },
896 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
897 CPU_CLFLUSH_FLAGS, 0, 0 },
898 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
899 CPU_NOP_FLAGS, 0, 0 },
900 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
901 CPU_SYSCALL_FLAGS, 0, 0 },
902 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
903 CPU_RDTSCP_FLAGS, 0, 0 },
904 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
905 CPU_3DNOW_FLAGS, 0, 0 },
906 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
907 CPU_3DNOWA_FLAGS, 0, 0 },
908 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
909 CPU_PADLOCK_FLAGS, 0, 0 },
910 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
911 CPU_SVME_FLAGS, 1, 0 },
912 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
913 CPU_SVME_FLAGS, 0, 0 },
914 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
915 CPU_SSE4A_FLAGS, 0, 0 },
916 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
917 CPU_ABM_FLAGS, 0, 0 },
918 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
919 CPU_BMI_FLAGS, 0, 0 },
920 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
921 CPU_TBM_FLAGS, 0, 0 },
922 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN,
923 CPU_ADX_FLAGS, 0, 0 },
924 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN,
925 CPU_RDSEED_FLAGS, 0, 0 },
926 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN,
927 CPU_PRFCHW_FLAGS, 0, 0 },
928 { STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN,
929 CPU_SMAP_FLAGS, 0, 0 },
930 { STRING_COMMA_LEN (".mpx"), PROCESSOR_UNKNOWN,
931 CPU_MPX_FLAGS, 0, 0 },
932 { STRING_COMMA_LEN (".sha"), PROCESSOR_UNKNOWN,
933 CPU_SHA_FLAGS, 0, 0 },
934 { STRING_COMMA_LEN (".clflushopt"), PROCESSOR_UNKNOWN,
935 CPU_CLFLUSHOPT_FLAGS, 0, 0 },
936 { STRING_COMMA_LEN (".prefetchwt1"), PROCESSOR_UNKNOWN,
937 CPU_PREFETCHWT1_FLAGS, 0, 0 },
938 { STRING_COMMA_LEN (".se1"), PROCESSOR_UNKNOWN,
939 CPU_SE1_FLAGS, 0, 0 },
940 { STRING_COMMA_LEN (".clwb"), PROCESSOR_UNKNOWN,
941 CPU_CLWB_FLAGS, 0, 0 },
942 { STRING_COMMA_LEN (".pcommit"), PROCESSOR_UNKNOWN,
943 CPU_PCOMMIT_FLAGS, 0, 0 },
944 { STRING_COMMA_LEN (".avx512ifma"), PROCESSOR_UNKNOWN,
945 CPU_AVX512IFMA_FLAGS, 0, 0 },
946 { STRING_COMMA_LEN (".avx512vbmi"), PROCESSOR_UNKNOWN,
947 CPU_AVX512VBMI_FLAGS, 0, 0 },
948 { STRING_COMMA_LEN (".clzero"), PROCESSOR_UNKNOWN,
949 CPU_CLZERO_FLAGS, 0, 0 },
950 };
951
952 #ifdef I386COFF
953 /* Like s_lcomm_internal in gas/read.c but the alignment string
954 is allowed to be optional. */
955
956 static symbolS *
957 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
958 {
959 addressT align = 0;
960
961 SKIP_WHITESPACE ();
962
963 if (needs_align
964 && *input_line_pointer == ',')
965 {
966 align = parse_align (needs_align - 1);
967
968 if (align == (addressT) -1)
969 return NULL;
970 }
971 else
972 {
973 if (size >= 8)
974 align = 3;
975 else if (size >= 4)
976 align = 2;
977 else if (size >= 2)
978 align = 1;
979 else
980 align = 0;
981 }
982
983 bss_alloc (symbolP, size, align);
984 return symbolP;
985 }
986
987 static void
988 pe_lcomm (int needs_align)
989 {
990 s_comm_internal (needs_align * 2, pe_lcomm_internal);
991 }
992 #endif
993
994 const pseudo_typeS md_pseudo_table[] =
995 {
996 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
997 {"align", s_align_bytes, 0},
998 #else
999 {"align", s_align_ptwo, 0},
1000 #endif
1001 {"arch", set_cpu_arch, 0},
1002 #ifndef I386COFF
1003 {"bss", s_bss, 0},
1004 #else
1005 {"lcomm", pe_lcomm, 1},
1006 #endif
1007 {"ffloat", float_cons, 'f'},
1008 {"dfloat", float_cons, 'd'},
1009 {"tfloat", float_cons, 'x'},
1010 {"value", cons, 2},
1011 {"slong", signed_cons, 4},
1012 {"noopt", s_ignore, 0},
1013 {"optim", s_ignore, 0},
1014 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
1015 {"code16", set_code_flag, CODE_16BIT},
1016 {"code32", set_code_flag, CODE_32BIT},
1017 {"code64", set_code_flag, CODE_64BIT},
1018 {"intel_syntax", set_intel_syntax, 1},
1019 {"att_syntax", set_intel_syntax, 0},
1020 {"intel_mnemonic", set_intel_mnemonic, 1},
1021 {"att_mnemonic", set_intel_mnemonic, 0},
1022 {"allow_index_reg", set_allow_index_reg, 1},
1023 {"disallow_index_reg", set_allow_index_reg, 0},
1024 {"sse_check", set_check, 0},
1025 {"operand_check", set_check, 1},
1026 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
1027 {"largecomm", handle_large_common, 0},
1028 #else
1029 {"file", (void (*) (int)) dwarf2_directive_file, 0},
1030 {"loc", dwarf2_directive_loc, 0},
1031 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
1032 #endif
1033 #ifdef TE_PE
1034 {"secrel32", pe_directive_secrel, 0},
1035 #endif
1036 {0, 0, 0}
1037 };
1038
1039 /* For interface with expression (). */
1040 extern char *input_line_pointer;
1041
1042 /* Hash table for instruction mnemonic lookup. */
1043 static struct hash_control *op_hash;
1044
1045 /* Hash table for register lookup. */
1046 static struct hash_control *reg_hash;
1047 \f
1048 void
1049 i386_align_code (fragS *fragP, int count)
1050 {
1051 /* Various efficient no-op patterns for aligning code labels.
1052 Note: Don't try to assemble the instructions in the comments.
1053 0L and 0w are not legal. */
1054 static const char f32_1[] =
1055 {0x90}; /* nop */
1056 static const char f32_2[] =
1057 {0x66,0x90}; /* xchg %ax,%ax */
1058 static const char f32_3[] =
1059 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
1060 static const char f32_4[] =
1061 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1062 static const char f32_5[] =
1063 {0x90, /* nop */
1064 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1065 static const char f32_6[] =
1066 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
1067 static const char f32_7[] =
1068 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1069 static const char f32_8[] =
1070 {0x90, /* nop */
1071 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1072 static const char f32_9[] =
1073 {0x89,0xf6, /* movl %esi,%esi */
1074 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1075 static const char f32_10[] =
1076 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
1077 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1078 static const char f32_11[] =
1079 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
1080 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1081 static const char f32_12[] =
1082 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
1083 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
1084 static const char f32_13[] =
1085 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
1086 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1087 static const char f32_14[] =
1088 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
1089 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1090 static const char f16_3[] =
1091 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
1092 static const char f16_4[] =
1093 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
1094 static const char f16_5[] =
1095 {0x90, /* nop */
1096 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
1097 static const char f16_6[] =
1098 {0x89,0xf6, /* mov %si,%si */
1099 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
1100 static const char f16_7[] =
1101 {0x8d,0x74,0x00, /* lea 0(%si),%si */
1102 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
1103 static const char f16_8[] =
1104 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
1105 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
1106 static const char jump_31[] =
1107 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
1108 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
1109 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
1110 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
1111 static const char *const f32_patt[] = {
1112 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
1113 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
1114 };
1115 static const char *const f16_patt[] = {
1116 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
1117 };
1118 /* nopl (%[re]ax) */
1119 static const char alt_3[] =
1120 {0x0f,0x1f,0x00};
1121 /* nopl 0(%[re]ax) */
1122 static const char alt_4[] =
1123 {0x0f,0x1f,0x40,0x00};
1124 /* nopl 0(%[re]ax,%[re]ax,1) */
1125 static const char alt_5[] =
1126 {0x0f,0x1f,0x44,0x00,0x00};
1127 /* nopw 0(%[re]ax,%[re]ax,1) */
1128 static const char alt_6[] =
1129 {0x66,0x0f,0x1f,0x44,0x00,0x00};
1130 /* nopl 0L(%[re]ax) */
1131 static const char alt_7[] =
1132 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1133 /* nopl 0L(%[re]ax,%[re]ax,1) */
1134 static const char alt_8[] =
1135 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1136 /* nopw 0L(%[re]ax,%[re]ax,1) */
1137 static const char alt_9[] =
1138 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1139 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
1140 static const char alt_10[] =
1141 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1142 static const char *const alt_patt[] = {
1143 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1144 alt_9, alt_10
1145 };
1146
1147 /* Only align for at least a positive non-zero boundary. */
1148 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
1149 return;
1150
1151 /* We need to decide which NOP sequence to use for 32bit and
1152 64bit. When -mtune= is used:
1153
1154 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1155 PROCESSOR_GENERIC32, f32_patt will be used.
1156 2. For the rest, alt_patt will be used.
1157
1158 When -mtune= isn't used, alt_patt will be used if
1159 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1160 be used.
1161
1162 When -march= or .arch is used, we can't use anything beyond
1163 cpu_arch_isa_flags. */
1164
1165 if (flag_code == CODE_16BIT)
1166 {
1167 if (count > 8)
1168 {
1169 memcpy (fragP->fr_literal + fragP->fr_fix,
1170 jump_31, count);
1171 /* Adjust jump offset. */
1172 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1173 }
1174 else
1175 memcpy (fragP->fr_literal + fragP->fr_fix,
1176 f16_patt[count - 1], count);
1177 }
1178 else
1179 {
1180 const char *const *patt = NULL;
1181
1182 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1183 {
1184 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1185 switch (cpu_arch_tune)
1186 {
1187 case PROCESSOR_UNKNOWN:
1188 /* We use cpu_arch_isa_flags to check if we SHOULD
1189 optimize with nops. */
1190 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1191 patt = alt_patt;
1192 else
1193 patt = f32_patt;
1194 break;
1195 case PROCESSOR_PENTIUM4:
1196 case PROCESSOR_NOCONA:
1197 case PROCESSOR_CORE:
1198 case PROCESSOR_CORE2:
1199 case PROCESSOR_COREI7:
1200 case PROCESSOR_L1OM:
1201 case PROCESSOR_K1OM:
1202 case PROCESSOR_GENERIC64:
1203 case PROCESSOR_K6:
1204 case PROCESSOR_ATHLON:
1205 case PROCESSOR_K8:
1206 case PROCESSOR_AMDFAM10:
1207 case PROCESSOR_BD:
1208 case PROCESSOR_ZNVER:
1209 case PROCESSOR_BT:
1210 patt = alt_patt;
1211 break;
1212 case PROCESSOR_I386:
1213 case PROCESSOR_I486:
1214 case PROCESSOR_PENTIUM:
1215 case PROCESSOR_PENTIUMPRO:
1216 case PROCESSOR_GENERIC32:
1217 patt = f32_patt;
1218 break;
1219 }
1220 }
1221 else
1222 {
1223 switch (fragP->tc_frag_data.tune)
1224 {
1225 case PROCESSOR_UNKNOWN:
1226 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1227 PROCESSOR_UNKNOWN. */
1228 abort ();
1229 break;
1230
1231 case PROCESSOR_I386:
1232 case PROCESSOR_I486:
1233 case PROCESSOR_PENTIUM:
1234 case PROCESSOR_K6:
1235 case PROCESSOR_ATHLON:
1236 case PROCESSOR_K8:
1237 case PROCESSOR_AMDFAM10:
1238 case PROCESSOR_BD:
1239 case PROCESSOR_ZNVER:
1240 case PROCESSOR_BT:
1241 case PROCESSOR_GENERIC32:
1242 /* We use cpu_arch_isa_flags to check if we CAN optimize
1243 with nops. */
1244 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1245 patt = alt_patt;
1246 else
1247 patt = f32_patt;
1248 break;
1249 case PROCESSOR_PENTIUMPRO:
1250 case PROCESSOR_PENTIUM4:
1251 case PROCESSOR_NOCONA:
1252 case PROCESSOR_CORE:
1253 case PROCESSOR_CORE2:
1254 case PROCESSOR_COREI7:
1255 case PROCESSOR_L1OM:
1256 case PROCESSOR_K1OM:
1257 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1258 patt = alt_patt;
1259 else
1260 patt = f32_patt;
1261 break;
1262 case PROCESSOR_GENERIC64:
1263 patt = alt_patt;
1264 break;
1265 }
1266 }
1267
1268 if (patt == f32_patt)
1269 {
1270 /* If the padding is less than 15 bytes, we use the normal
1271 ones. Otherwise, we use a jump instruction and adjust
1272 its offset. */
1273 int limit;
1274
1275 /* For 64bit, the limit is 3 bytes. */
1276 if (flag_code == CODE_64BIT
1277 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1278 limit = 3;
1279 else
1280 limit = 15;
1281 if (count < limit)
1282 memcpy (fragP->fr_literal + fragP->fr_fix,
1283 patt[count - 1], count);
1284 else
1285 {
1286 memcpy (fragP->fr_literal + fragP->fr_fix,
1287 jump_31, count);
1288 /* Adjust jump offset. */
1289 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1290 }
1291 }
1292 else
1293 {
1294 /* Maximum length of an instruction is 10 byte. If the
1295 padding is greater than 10 bytes and we don't use jump,
1296 we have to break it into smaller pieces. */
1297 int padding = count;
1298 while (padding > 10)
1299 {
1300 padding -= 10;
1301 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1302 patt [9], 10);
1303 }
1304
1305 if (padding)
1306 memcpy (fragP->fr_literal + fragP->fr_fix,
1307 patt [padding - 1], padding);
1308 }
1309 }
1310 fragP->fr_var = count;
1311 }
1312
1313 static INLINE int
1314 operand_type_all_zero (const union i386_operand_type *x)
1315 {
1316 switch (ARRAY_SIZE(x->array))
1317 {
1318 case 3:
1319 if (x->array[2])
1320 return 0;
1321 case 2:
1322 if (x->array[1])
1323 return 0;
1324 case 1:
1325 return !x->array[0];
1326 default:
1327 abort ();
1328 }
1329 }
1330
1331 static INLINE void
1332 operand_type_set (union i386_operand_type *x, unsigned int v)
1333 {
1334 switch (ARRAY_SIZE(x->array))
1335 {
1336 case 3:
1337 x->array[2] = v;
1338 case 2:
1339 x->array[1] = v;
1340 case 1:
1341 x->array[0] = v;
1342 break;
1343 default:
1344 abort ();
1345 }
1346 }
1347
1348 static INLINE int
1349 operand_type_equal (const union i386_operand_type *x,
1350 const union i386_operand_type *y)
1351 {
1352 switch (ARRAY_SIZE(x->array))
1353 {
1354 case 3:
1355 if (x->array[2] != y->array[2])
1356 return 0;
1357 case 2:
1358 if (x->array[1] != y->array[1])
1359 return 0;
1360 case 1:
1361 return x->array[0] == y->array[0];
1362 break;
1363 default:
1364 abort ();
1365 }
1366 }
1367
1368 static INLINE int
1369 cpu_flags_all_zero (const union i386_cpu_flags *x)
1370 {
1371 switch (ARRAY_SIZE(x->array))
1372 {
1373 case 3:
1374 if (x->array[2])
1375 return 0;
1376 case 2:
1377 if (x->array[1])
1378 return 0;
1379 case 1:
1380 return !x->array[0];
1381 default:
1382 abort ();
1383 }
1384 }
1385
1386 static INLINE int
1387 cpu_flags_equal (const union i386_cpu_flags *x,
1388 const union i386_cpu_flags *y)
1389 {
1390 switch (ARRAY_SIZE(x->array))
1391 {
1392 case 3:
1393 if (x->array[2] != y->array[2])
1394 return 0;
1395 case 2:
1396 if (x->array[1] != y->array[1])
1397 return 0;
1398 case 1:
1399 return x->array[0] == y->array[0];
1400 break;
1401 default:
1402 abort ();
1403 }
1404 }
1405
1406 static INLINE int
1407 cpu_flags_check_cpu64 (i386_cpu_flags f)
1408 {
1409 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1410 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1411 }
1412
1413 static INLINE i386_cpu_flags
1414 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1415 {
1416 switch (ARRAY_SIZE (x.array))
1417 {
1418 case 3:
1419 x.array [2] &= y.array [2];
1420 case 2:
1421 x.array [1] &= y.array [1];
1422 case 1:
1423 x.array [0] &= y.array [0];
1424 break;
1425 default:
1426 abort ();
1427 }
1428 return x;
1429 }
1430
1431 static INLINE i386_cpu_flags
1432 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1433 {
1434 switch (ARRAY_SIZE (x.array))
1435 {
1436 case 3:
1437 x.array [2] |= y.array [2];
1438 case 2:
1439 x.array [1] |= y.array [1];
1440 case 1:
1441 x.array [0] |= y.array [0];
1442 break;
1443 default:
1444 abort ();
1445 }
1446 return x;
1447 }
1448
1449 static INLINE i386_cpu_flags
1450 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1451 {
1452 switch (ARRAY_SIZE (x.array))
1453 {
1454 case 3:
1455 x.array [2] &= ~y.array [2];
1456 case 2:
1457 x.array [1] &= ~y.array [1];
1458 case 1:
1459 x.array [0] &= ~y.array [0];
1460 break;
1461 default:
1462 abort ();
1463 }
1464 return x;
1465 }
1466
1467 #define CPU_FLAGS_ARCH_MATCH 0x1
1468 #define CPU_FLAGS_64BIT_MATCH 0x2
1469 #define CPU_FLAGS_AES_MATCH 0x4
1470 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1471 #define CPU_FLAGS_AVX_MATCH 0x10
1472
1473 #define CPU_FLAGS_32BIT_MATCH \
1474 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1475 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1476 #define CPU_FLAGS_PERFECT_MATCH \
1477 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1478
1479 /* Return CPU flags match bits. */
1480
1481 static int
1482 cpu_flags_match (const insn_template *t)
1483 {
1484 i386_cpu_flags x = t->cpu_flags;
1485 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1486
1487 x.bitfield.cpu64 = 0;
1488 x.bitfield.cpuno64 = 0;
1489
1490 if (cpu_flags_all_zero (&x))
1491 {
1492 /* This instruction is available on all archs. */
1493 match |= CPU_FLAGS_32BIT_MATCH;
1494 }
1495 else
1496 {
1497 /* This instruction is available only on some archs. */
1498 i386_cpu_flags cpu = cpu_arch_flags;
1499
1500 cpu.bitfield.cpu64 = 0;
1501 cpu.bitfield.cpuno64 = 0;
1502 cpu = cpu_flags_and (x, cpu);
1503 if (!cpu_flags_all_zero (&cpu))
1504 {
1505 if (x.bitfield.cpuavx)
1506 {
1507 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1508 if (cpu.bitfield.cpuavx)
1509 {
1510 /* Check SSE2AVX. */
1511 if (!t->opcode_modifier.sse2avx|| sse2avx)
1512 {
1513 match |= (CPU_FLAGS_ARCH_MATCH
1514 | CPU_FLAGS_AVX_MATCH);
1515 /* Check AES. */
1516 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1517 match |= CPU_FLAGS_AES_MATCH;
1518 /* Check PCLMUL. */
1519 if (!x.bitfield.cpupclmul
1520 || cpu.bitfield.cpupclmul)
1521 match |= CPU_FLAGS_PCLMUL_MATCH;
1522 }
1523 }
1524 else
1525 match |= CPU_FLAGS_ARCH_MATCH;
1526 }
1527 else
1528 match |= CPU_FLAGS_32BIT_MATCH;
1529 }
1530 }
1531 return match;
1532 }
1533
1534 static INLINE i386_operand_type
1535 operand_type_and (i386_operand_type x, i386_operand_type y)
1536 {
1537 switch (ARRAY_SIZE (x.array))
1538 {
1539 case 3:
1540 x.array [2] &= y.array [2];
1541 case 2:
1542 x.array [1] &= y.array [1];
1543 case 1:
1544 x.array [0] &= y.array [0];
1545 break;
1546 default:
1547 abort ();
1548 }
1549 return x;
1550 }
1551
1552 static INLINE i386_operand_type
1553 operand_type_or (i386_operand_type x, i386_operand_type y)
1554 {
1555 switch (ARRAY_SIZE (x.array))
1556 {
1557 case 3:
1558 x.array [2] |= y.array [2];
1559 case 2:
1560 x.array [1] |= y.array [1];
1561 case 1:
1562 x.array [0] |= y.array [0];
1563 break;
1564 default:
1565 abort ();
1566 }
1567 return x;
1568 }
1569
1570 static INLINE i386_operand_type
1571 operand_type_xor (i386_operand_type x, i386_operand_type y)
1572 {
1573 switch (ARRAY_SIZE (x.array))
1574 {
1575 case 3:
1576 x.array [2] ^= y.array [2];
1577 case 2:
1578 x.array [1] ^= y.array [1];
1579 case 1:
1580 x.array [0] ^= y.array [0];
1581 break;
1582 default:
1583 abort ();
1584 }
1585 return x;
1586 }
1587
1588 static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1589 static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1590 static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1591 static const i386_operand_type inoutportreg
1592 = OPERAND_TYPE_INOUTPORTREG;
1593 static const i386_operand_type reg16_inoutportreg
1594 = OPERAND_TYPE_REG16_INOUTPORTREG;
1595 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1596 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1597 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1598 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1599 static const i386_operand_type anydisp
1600 = OPERAND_TYPE_ANYDISP;
1601 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1602 static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1603 static const i386_operand_type regzmm = OPERAND_TYPE_REGZMM;
1604 static const i386_operand_type regmask = OPERAND_TYPE_REGMASK;
1605 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1606 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1607 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1608 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1609 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1610 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1611 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1612 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1613 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1614 static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
1615
1616 enum operand_type
1617 {
1618 reg,
1619 imm,
1620 disp,
1621 anymem
1622 };
1623
1624 static INLINE int
1625 operand_type_check (i386_operand_type t, enum operand_type c)
1626 {
1627 switch (c)
1628 {
1629 case reg:
1630 return (t.bitfield.reg8
1631 || t.bitfield.reg16
1632 || t.bitfield.reg32
1633 || t.bitfield.reg64);
1634
1635 case imm:
1636 return (t.bitfield.imm8
1637 || t.bitfield.imm8s
1638 || t.bitfield.imm16
1639 || t.bitfield.imm32
1640 || t.bitfield.imm32s
1641 || t.bitfield.imm64);
1642
1643 case disp:
1644 return (t.bitfield.disp8
1645 || t.bitfield.disp16
1646 || t.bitfield.disp32
1647 || t.bitfield.disp32s
1648 || t.bitfield.disp64);
1649
1650 case anymem:
1651 return (t.bitfield.disp8
1652 || t.bitfield.disp16
1653 || t.bitfield.disp32
1654 || t.bitfield.disp32s
1655 || t.bitfield.disp64
1656 || t.bitfield.baseindex);
1657
1658 default:
1659 abort ();
1660 }
1661
1662 return 0;
1663 }
1664
1665 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1666 operand J for instruction template T. */
1667
1668 static INLINE int
1669 match_reg_size (const insn_template *t, unsigned int j)
1670 {
1671 return !((i.types[j].bitfield.byte
1672 && !t->operand_types[j].bitfield.byte)
1673 || (i.types[j].bitfield.word
1674 && !t->operand_types[j].bitfield.word)
1675 || (i.types[j].bitfield.dword
1676 && !t->operand_types[j].bitfield.dword)
1677 || (i.types[j].bitfield.qword
1678 && !t->operand_types[j].bitfield.qword));
1679 }
1680
1681 /* Return 1 if there is no conflict in any size on operand J for
1682 instruction template T. */
1683
1684 static INLINE int
1685 match_mem_size (const insn_template *t, unsigned int j)
1686 {
1687 return (match_reg_size (t, j)
1688 && !((i.types[j].bitfield.unspecified
1689 && !t->operand_types[j].bitfield.unspecified)
1690 || (i.types[j].bitfield.fword
1691 && !t->operand_types[j].bitfield.fword)
1692 || (i.types[j].bitfield.tbyte
1693 && !t->operand_types[j].bitfield.tbyte)
1694 || (i.types[j].bitfield.xmmword
1695 && !t->operand_types[j].bitfield.xmmword)
1696 || (i.types[j].bitfield.ymmword
1697 && !t->operand_types[j].bitfield.ymmword)
1698 || (i.types[j].bitfield.zmmword
1699 && !t->operand_types[j].bitfield.zmmword)));
1700 }
1701
1702 /* Return 1 if there is no size conflict on any operands for
1703 instruction template T. */
1704
1705 static INLINE int
1706 operand_size_match (const insn_template *t)
1707 {
1708 unsigned int j;
1709 int match = 1;
1710
1711 /* Don't check jump instructions. */
1712 if (t->opcode_modifier.jump
1713 || t->opcode_modifier.jumpbyte
1714 || t->opcode_modifier.jumpdword
1715 || t->opcode_modifier.jumpintersegment)
1716 return match;
1717
1718 /* Check memory and accumulator operand size. */
1719 for (j = 0; j < i.operands; j++)
1720 {
1721 if (t->operand_types[j].bitfield.anysize)
1722 continue;
1723
1724 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1725 {
1726 match = 0;
1727 break;
1728 }
1729
1730 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1731 {
1732 match = 0;
1733 break;
1734 }
1735 }
1736
1737 if (match)
1738 return match;
1739 else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
1740 {
1741 mismatch:
1742 i.error = operand_size_mismatch;
1743 return 0;
1744 }
1745
1746 /* Check reverse. */
1747 gas_assert (i.operands == 2);
1748
1749 match = 1;
1750 for (j = 0; j < 2; j++)
1751 {
1752 if (t->operand_types[j].bitfield.acc
1753 && !match_reg_size (t, j ? 0 : 1))
1754 goto mismatch;
1755
1756 if (i.types[j].bitfield.mem
1757 && !match_mem_size (t, j ? 0 : 1))
1758 goto mismatch;
1759 }
1760
1761 return match;
1762 }
1763
1764 static INLINE int
1765 operand_type_match (i386_operand_type overlap,
1766 i386_operand_type given)
1767 {
1768 i386_operand_type temp = overlap;
1769
1770 temp.bitfield.jumpabsolute = 0;
1771 temp.bitfield.unspecified = 0;
1772 temp.bitfield.byte = 0;
1773 temp.bitfield.word = 0;
1774 temp.bitfield.dword = 0;
1775 temp.bitfield.fword = 0;
1776 temp.bitfield.qword = 0;
1777 temp.bitfield.tbyte = 0;
1778 temp.bitfield.xmmword = 0;
1779 temp.bitfield.ymmword = 0;
1780 temp.bitfield.zmmword = 0;
1781 if (operand_type_all_zero (&temp))
1782 goto mismatch;
1783
1784 if (given.bitfield.baseindex == overlap.bitfield.baseindex
1785 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
1786 return 1;
1787
1788 mismatch:
1789 i.error = operand_type_mismatch;
1790 return 0;
1791 }
1792
1793 /* If given types g0 and g1 are registers they must be of the same type
1794 unless the expected operand type register overlap is null.
1795 Note that Acc in a template matches every size of reg. */
1796
1797 static INLINE int
1798 operand_type_register_match (i386_operand_type m0,
1799 i386_operand_type g0,
1800 i386_operand_type t0,
1801 i386_operand_type m1,
1802 i386_operand_type g1,
1803 i386_operand_type t1)
1804 {
1805 if (!operand_type_check (g0, reg))
1806 return 1;
1807
1808 if (!operand_type_check (g1, reg))
1809 return 1;
1810
1811 if (g0.bitfield.reg8 == g1.bitfield.reg8
1812 && g0.bitfield.reg16 == g1.bitfield.reg16
1813 && g0.bitfield.reg32 == g1.bitfield.reg32
1814 && g0.bitfield.reg64 == g1.bitfield.reg64)
1815 return 1;
1816
1817 if (m0.bitfield.acc)
1818 {
1819 t0.bitfield.reg8 = 1;
1820 t0.bitfield.reg16 = 1;
1821 t0.bitfield.reg32 = 1;
1822 t0.bitfield.reg64 = 1;
1823 }
1824
1825 if (m1.bitfield.acc)
1826 {
1827 t1.bitfield.reg8 = 1;
1828 t1.bitfield.reg16 = 1;
1829 t1.bitfield.reg32 = 1;
1830 t1.bitfield.reg64 = 1;
1831 }
1832
1833 if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1834 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1835 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1836 && !(t0.bitfield.reg64 & t1.bitfield.reg64))
1837 return 1;
1838
1839 i.error = register_type_mismatch;
1840
1841 return 0;
1842 }
1843
1844 static INLINE unsigned int
1845 register_number (const reg_entry *r)
1846 {
1847 unsigned int nr = r->reg_num;
1848
1849 if (r->reg_flags & RegRex)
1850 nr += 8;
1851
1852 return nr;
1853 }
1854
1855 static INLINE unsigned int
1856 mode_from_disp_size (i386_operand_type t)
1857 {
1858 if (t.bitfield.disp8 || t.bitfield.vec_disp8)
1859 return 1;
1860 else if (t.bitfield.disp16
1861 || t.bitfield.disp32
1862 || t.bitfield.disp32s)
1863 return 2;
1864 else
1865 return 0;
1866 }
1867
1868 static INLINE int
1869 fits_in_signed_byte (addressT num)
1870 {
1871 return num + 0x80 <= 0xff;
1872 }
1873
1874 static INLINE int
1875 fits_in_unsigned_byte (addressT num)
1876 {
1877 return num <= 0xff;
1878 }
1879
1880 static INLINE int
1881 fits_in_unsigned_word (addressT num)
1882 {
1883 return num <= 0xffff;
1884 }
1885
1886 static INLINE int
1887 fits_in_signed_word (addressT num)
1888 {
1889 return num + 0x8000 <= 0xffff;
1890 }
1891
1892 static INLINE int
1893 fits_in_signed_long (addressT num ATTRIBUTE_UNUSED)
1894 {
1895 #ifndef BFD64
1896 return 1;
1897 #else
1898 return num + 0x80000000 <= 0xffffffff;
1899 #endif
1900 } /* fits_in_signed_long() */
1901
1902 static INLINE int
1903 fits_in_unsigned_long (addressT num ATTRIBUTE_UNUSED)
1904 {
1905 #ifndef BFD64
1906 return 1;
1907 #else
1908 return num <= 0xffffffff;
1909 #endif
1910 } /* fits_in_unsigned_long() */
1911
1912 static INLINE int
1913 fits_in_vec_disp8 (offsetT num)
1914 {
1915 int shift = i.memshift;
1916 unsigned int mask;
1917
1918 if (shift == -1)
1919 abort ();
1920
1921 mask = (1 << shift) - 1;
1922
1923 /* Return 0 if NUM isn't properly aligned. */
1924 if ((num & mask))
1925 return 0;
1926
1927 /* Check if NUM will fit in 8bit after shift. */
1928 return fits_in_signed_byte (num >> shift);
1929 }
1930
1931 static INLINE int
1932 fits_in_imm4 (offsetT num)
1933 {
1934 return (num & 0xf) == num;
1935 }
1936
1937 static i386_operand_type
1938 smallest_imm_type (offsetT num)
1939 {
1940 i386_operand_type t;
1941
1942 operand_type_set (&t, 0);
1943 t.bitfield.imm64 = 1;
1944
1945 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
1946 {
1947 /* This code is disabled on the 486 because all the Imm1 forms
1948 in the opcode table are slower on the i486. They're the
1949 versions with the implicitly specified single-position
1950 displacement, which has another syntax if you really want to
1951 use that form. */
1952 t.bitfield.imm1 = 1;
1953 t.bitfield.imm8 = 1;
1954 t.bitfield.imm8s = 1;
1955 t.bitfield.imm16 = 1;
1956 t.bitfield.imm32 = 1;
1957 t.bitfield.imm32s = 1;
1958 }
1959 else if (fits_in_signed_byte (num))
1960 {
1961 t.bitfield.imm8 = 1;
1962 t.bitfield.imm8s = 1;
1963 t.bitfield.imm16 = 1;
1964 t.bitfield.imm32 = 1;
1965 t.bitfield.imm32s = 1;
1966 }
1967 else if (fits_in_unsigned_byte (num))
1968 {
1969 t.bitfield.imm8 = 1;
1970 t.bitfield.imm16 = 1;
1971 t.bitfield.imm32 = 1;
1972 t.bitfield.imm32s = 1;
1973 }
1974 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
1975 {
1976 t.bitfield.imm16 = 1;
1977 t.bitfield.imm32 = 1;
1978 t.bitfield.imm32s = 1;
1979 }
1980 else if (fits_in_signed_long (num))
1981 {
1982 t.bitfield.imm32 = 1;
1983 t.bitfield.imm32s = 1;
1984 }
1985 else if (fits_in_unsigned_long (num))
1986 t.bitfield.imm32 = 1;
1987
1988 return t;
1989 }
1990
1991 static offsetT
1992 offset_in_range (offsetT val, int size)
1993 {
1994 addressT mask;
1995
1996 switch (size)
1997 {
1998 case 1: mask = ((addressT) 1 << 8) - 1; break;
1999 case 2: mask = ((addressT) 1 << 16) - 1; break;
2000 case 4: mask = ((addressT) 2 << 31) - 1; break;
2001 #ifdef BFD64
2002 case 8: mask = ((addressT) 2 << 63) - 1; break;
2003 #endif
2004 default: abort ();
2005 }
2006
2007 #ifdef BFD64
2008 /* If BFD64, sign extend val for 32bit address mode. */
2009 if (flag_code != CODE_64BIT
2010 || i.prefix[ADDR_PREFIX])
2011 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
2012 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
2013 #endif
2014
2015 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
2016 {
2017 char buf1[40], buf2[40];
2018
2019 sprint_value (buf1, val);
2020 sprint_value (buf2, val & mask);
2021 as_warn (_("%s shortened to %s"), buf1, buf2);
2022 }
2023 return val & mask;
2024 }
2025
2026 enum PREFIX_GROUP
2027 {
2028 PREFIX_EXIST = 0,
2029 PREFIX_LOCK,
2030 PREFIX_REP,
2031 PREFIX_OTHER
2032 };
2033
2034 /* Returns
2035 a. PREFIX_EXIST if attempting to add a prefix where one from the
2036 same class already exists.
2037 b. PREFIX_LOCK if lock prefix is added.
2038 c. PREFIX_REP if rep/repne prefix is added.
2039 d. PREFIX_OTHER if other prefix is added.
2040 */
2041
2042 static enum PREFIX_GROUP
2043 add_prefix (unsigned int prefix)
2044 {
2045 enum PREFIX_GROUP ret = PREFIX_OTHER;
2046 unsigned int q;
2047
2048 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
2049 && flag_code == CODE_64BIT)
2050 {
2051 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
2052 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
2053 && (prefix & (REX_R | REX_X | REX_B))))
2054 ret = PREFIX_EXIST;
2055 q = REX_PREFIX;
2056 }
2057 else
2058 {
2059 switch (prefix)
2060 {
2061 default:
2062 abort ();
2063
2064 case CS_PREFIX_OPCODE:
2065 case DS_PREFIX_OPCODE:
2066 case ES_PREFIX_OPCODE:
2067 case FS_PREFIX_OPCODE:
2068 case GS_PREFIX_OPCODE:
2069 case SS_PREFIX_OPCODE:
2070 q = SEG_PREFIX;
2071 break;
2072
2073 case REPNE_PREFIX_OPCODE:
2074 case REPE_PREFIX_OPCODE:
2075 q = REP_PREFIX;
2076 ret = PREFIX_REP;
2077 break;
2078
2079 case LOCK_PREFIX_OPCODE:
2080 q = LOCK_PREFIX;
2081 ret = PREFIX_LOCK;
2082 break;
2083
2084 case FWAIT_OPCODE:
2085 q = WAIT_PREFIX;
2086 break;
2087
2088 case ADDR_PREFIX_OPCODE:
2089 q = ADDR_PREFIX;
2090 break;
2091
2092 case DATA_PREFIX_OPCODE:
2093 q = DATA_PREFIX;
2094 break;
2095 }
2096 if (i.prefix[q] != 0)
2097 ret = PREFIX_EXIST;
2098 }
2099
2100 if (ret)
2101 {
2102 if (!i.prefix[q])
2103 ++i.prefixes;
2104 i.prefix[q] |= prefix;
2105 }
2106 else
2107 as_bad (_("same type of prefix used twice"));
2108
2109 return ret;
2110 }
2111
2112 static void
2113 update_code_flag (int value, int check)
2114 {
2115 PRINTF_LIKE ((*as_error));
2116
2117 flag_code = (enum flag_code) value;
2118 if (flag_code == CODE_64BIT)
2119 {
2120 cpu_arch_flags.bitfield.cpu64 = 1;
2121 cpu_arch_flags.bitfield.cpuno64 = 0;
2122 }
2123 else
2124 {
2125 cpu_arch_flags.bitfield.cpu64 = 0;
2126 cpu_arch_flags.bitfield.cpuno64 = 1;
2127 }
2128 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
2129 {
2130 if (check)
2131 as_error = as_fatal;
2132 else
2133 as_error = as_bad;
2134 (*as_error) (_("64bit mode not supported on `%s'."),
2135 cpu_arch_name ? cpu_arch_name : default_arch);
2136 }
2137 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
2138 {
2139 if (check)
2140 as_error = as_fatal;
2141 else
2142 as_error = as_bad;
2143 (*as_error) (_("32bit mode not supported on `%s'."),
2144 cpu_arch_name ? cpu_arch_name : default_arch);
2145 }
2146 stackop_size = '\0';
2147 }
2148
2149 static void
2150 set_code_flag (int value)
2151 {
2152 update_code_flag (value, 0);
2153 }
2154
2155 static void
2156 set_16bit_gcc_code_flag (int new_code_flag)
2157 {
2158 flag_code = (enum flag_code) new_code_flag;
2159 if (flag_code != CODE_16BIT)
2160 abort ();
2161 cpu_arch_flags.bitfield.cpu64 = 0;
2162 cpu_arch_flags.bitfield.cpuno64 = 1;
2163 stackop_size = LONG_MNEM_SUFFIX;
2164 }
2165
2166 static void
2167 set_intel_syntax (int syntax_flag)
2168 {
2169 /* Find out if register prefixing is specified. */
2170 int ask_naked_reg = 0;
2171
2172 SKIP_WHITESPACE ();
2173 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2174 {
2175 char *string = input_line_pointer;
2176 int e = get_symbol_end ();
2177
2178 if (strcmp (string, "prefix") == 0)
2179 ask_naked_reg = 1;
2180 else if (strcmp (string, "noprefix") == 0)
2181 ask_naked_reg = -1;
2182 else
2183 as_bad (_("bad argument to syntax directive."));
2184 *input_line_pointer = e;
2185 }
2186 demand_empty_rest_of_line ();
2187
2188 intel_syntax = syntax_flag;
2189
2190 if (ask_naked_reg == 0)
2191 allow_naked_reg = (intel_syntax
2192 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2193 else
2194 allow_naked_reg = (ask_naked_reg < 0);
2195
2196 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2197
2198 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2199 identifier_chars['$'] = intel_syntax ? '$' : 0;
2200 register_prefix = allow_naked_reg ? "" : "%";
2201 }
2202
2203 static void
2204 set_intel_mnemonic (int mnemonic_flag)
2205 {
2206 intel_mnemonic = mnemonic_flag;
2207 }
2208
2209 static void
2210 set_allow_index_reg (int flag)
2211 {
2212 allow_index_reg = flag;
2213 }
2214
2215 static void
2216 set_check (int what)
2217 {
2218 enum check_kind *kind;
2219 const char *str;
2220
2221 if (what)
2222 {
2223 kind = &operand_check;
2224 str = "operand";
2225 }
2226 else
2227 {
2228 kind = &sse_check;
2229 str = "sse";
2230 }
2231
2232 SKIP_WHITESPACE ();
2233
2234 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2235 {
2236 char *string = input_line_pointer;
2237 int e = get_symbol_end ();
2238
2239 if (strcmp (string, "none") == 0)
2240 *kind = check_none;
2241 else if (strcmp (string, "warning") == 0)
2242 *kind = check_warning;
2243 else if (strcmp (string, "error") == 0)
2244 *kind = check_error;
2245 else
2246 as_bad (_("bad argument to %s_check directive."), str);
2247 *input_line_pointer = e;
2248 }
2249 else
2250 as_bad (_("missing argument for %s_check directive"), str);
2251
2252 demand_empty_rest_of_line ();
2253 }
2254
2255 static void
2256 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2257 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2258 {
2259 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2260 static const char *arch;
2261
2262 /* Intel LIOM is only supported on ELF. */
2263 if (!IS_ELF)
2264 return;
2265
2266 if (!arch)
2267 {
2268 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2269 use default_arch. */
2270 arch = cpu_arch_name;
2271 if (!arch)
2272 arch = default_arch;
2273 }
2274
2275 /* If we are targeting Intel L1OM, we must enable it. */
2276 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2277 || new_flag.bitfield.cpul1om)
2278 return;
2279
2280 /* If we are targeting Intel K1OM, we must enable it. */
2281 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_K1OM
2282 || new_flag.bitfield.cpuk1om)
2283 return;
2284
2285 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2286 #endif
2287 }
2288
2289 static void
2290 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2291 {
2292 SKIP_WHITESPACE ();
2293
2294 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2295 {
2296 char *string = input_line_pointer;
2297 int e = get_symbol_end ();
2298 unsigned int j;
2299 i386_cpu_flags flags;
2300
2301 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2302 {
2303 if (strcmp (string, cpu_arch[j].name) == 0)
2304 {
2305 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2306
2307 if (*string != '.')
2308 {
2309 cpu_arch_name = cpu_arch[j].name;
2310 cpu_sub_arch_name = NULL;
2311 cpu_arch_flags = cpu_arch[j].flags;
2312 if (flag_code == CODE_64BIT)
2313 {
2314 cpu_arch_flags.bitfield.cpu64 = 1;
2315 cpu_arch_flags.bitfield.cpuno64 = 0;
2316 }
2317 else
2318 {
2319 cpu_arch_flags.bitfield.cpu64 = 0;
2320 cpu_arch_flags.bitfield.cpuno64 = 1;
2321 }
2322 cpu_arch_isa = cpu_arch[j].type;
2323 cpu_arch_isa_flags = cpu_arch[j].flags;
2324 if (!cpu_arch_tune_set)
2325 {
2326 cpu_arch_tune = cpu_arch_isa;
2327 cpu_arch_tune_flags = cpu_arch_isa_flags;
2328 }
2329 break;
2330 }
2331
2332 if (!cpu_arch[j].negated)
2333 flags = cpu_flags_or (cpu_arch_flags,
2334 cpu_arch[j].flags);
2335 else
2336 flags = cpu_flags_and_not (cpu_arch_flags,
2337 cpu_arch[j].flags);
2338 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2339 {
2340 if (cpu_sub_arch_name)
2341 {
2342 char *name = cpu_sub_arch_name;
2343 cpu_sub_arch_name = concat (name,
2344 cpu_arch[j].name,
2345 (const char *) NULL);
2346 free (name);
2347 }
2348 else
2349 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2350 cpu_arch_flags = flags;
2351 cpu_arch_isa_flags = flags;
2352 }
2353 *input_line_pointer = e;
2354 demand_empty_rest_of_line ();
2355 return;
2356 }
2357 }
2358 if (j >= ARRAY_SIZE (cpu_arch))
2359 as_bad (_("no such architecture: `%s'"), string);
2360
2361 *input_line_pointer = e;
2362 }
2363 else
2364 as_bad (_("missing cpu architecture"));
2365
2366 no_cond_jump_promotion = 0;
2367 if (*input_line_pointer == ','
2368 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2369 {
2370 char *string = ++input_line_pointer;
2371 int e = get_symbol_end ();
2372
2373 if (strcmp (string, "nojumps") == 0)
2374 no_cond_jump_promotion = 1;
2375 else if (strcmp (string, "jumps") == 0)
2376 ;
2377 else
2378 as_bad (_("no such architecture modifier: `%s'"), string);
2379
2380 *input_line_pointer = e;
2381 }
2382
2383 demand_empty_rest_of_line ();
2384 }
2385
2386 enum bfd_architecture
2387 i386_arch (void)
2388 {
2389 if (cpu_arch_isa == PROCESSOR_L1OM)
2390 {
2391 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2392 || flag_code != CODE_64BIT)
2393 as_fatal (_("Intel L1OM is 64bit ELF only"));
2394 return bfd_arch_l1om;
2395 }
2396 else if (cpu_arch_isa == PROCESSOR_K1OM)
2397 {
2398 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2399 || flag_code != CODE_64BIT)
2400 as_fatal (_("Intel K1OM is 64bit ELF only"));
2401 return bfd_arch_k1om;
2402 }
2403 else
2404 return bfd_arch_i386;
2405 }
2406
2407 unsigned long
2408 i386_mach (void)
2409 {
2410 if (!strncmp (default_arch, "x86_64", 6))
2411 {
2412 if (cpu_arch_isa == PROCESSOR_L1OM)
2413 {
2414 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2415 || default_arch[6] != '\0')
2416 as_fatal (_("Intel L1OM is 64bit ELF only"));
2417 return bfd_mach_l1om;
2418 }
2419 else if (cpu_arch_isa == PROCESSOR_K1OM)
2420 {
2421 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2422 || default_arch[6] != '\0')
2423 as_fatal (_("Intel K1OM is 64bit ELF only"));
2424 return bfd_mach_k1om;
2425 }
2426 else if (default_arch[6] == '\0')
2427 return bfd_mach_x86_64;
2428 else
2429 return bfd_mach_x64_32;
2430 }
2431 else if (!strcmp (default_arch, "i386"))
2432 return bfd_mach_i386_i386;
2433 else
2434 as_fatal (_("unknown architecture"));
2435 }
2436 \f
2437 void
2438 md_begin (void)
2439 {
2440 const char *hash_err;
2441
2442 /* Initialize op_hash hash table. */
2443 op_hash = hash_new ();
2444
2445 {
2446 const insn_template *optab;
2447 templates *core_optab;
2448
2449 /* Setup for loop. */
2450 optab = i386_optab;
2451 core_optab = (templates *) xmalloc (sizeof (templates));
2452 core_optab->start = optab;
2453
2454 while (1)
2455 {
2456 ++optab;
2457 if (optab->name == NULL
2458 || strcmp (optab->name, (optab - 1)->name) != 0)
2459 {
2460 /* different name --> ship out current template list;
2461 add to hash table; & begin anew. */
2462 core_optab->end = optab;
2463 hash_err = hash_insert (op_hash,
2464 (optab - 1)->name,
2465 (void *) core_optab);
2466 if (hash_err)
2467 {
2468 as_fatal (_("can't hash %s: %s"),
2469 (optab - 1)->name,
2470 hash_err);
2471 }
2472 if (optab->name == NULL)
2473 break;
2474 core_optab = (templates *) xmalloc (sizeof (templates));
2475 core_optab->start = optab;
2476 }
2477 }
2478 }
2479
2480 /* Initialize reg_hash hash table. */
2481 reg_hash = hash_new ();
2482 {
2483 const reg_entry *regtab;
2484 unsigned int regtab_size = i386_regtab_size;
2485
2486 for (regtab = i386_regtab; regtab_size--; regtab++)
2487 {
2488 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2489 if (hash_err)
2490 as_fatal (_("can't hash %s: %s"),
2491 regtab->reg_name,
2492 hash_err);
2493 }
2494 }
2495
2496 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2497 {
2498 int c;
2499 char *p;
2500
2501 for (c = 0; c < 256; c++)
2502 {
2503 if (ISDIGIT (c))
2504 {
2505 digit_chars[c] = c;
2506 mnemonic_chars[c] = c;
2507 register_chars[c] = c;
2508 operand_chars[c] = c;
2509 }
2510 else if (ISLOWER (c))
2511 {
2512 mnemonic_chars[c] = c;
2513 register_chars[c] = c;
2514 operand_chars[c] = c;
2515 }
2516 else if (ISUPPER (c))
2517 {
2518 mnemonic_chars[c] = TOLOWER (c);
2519 register_chars[c] = mnemonic_chars[c];
2520 operand_chars[c] = c;
2521 }
2522 else if (c == '{' || c == '}')
2523 operand_chars[c] = c;
2524
2525 if (ISALPHA (c) || ISDIGIT (c))
2526 identifier_chars[c] = c;
2527 else if (c >= 128)
2528 {
2529 identifier_chars[c] = c;
2530 operand_chars[c] = c;
2531 }
2532 }
2533
2534 #ifdef LEX_AT
2535 identifier_chars['@'] = '@';
2536 #endif
2537 #ifdef LEX_QM
2538 identifier_chars['?'] = '?';
2539 operand_chars['?'] = '?';
2540 #endif
2541 digit_chars['-'] = '-';
2542 mnemonic_chars['_'] = '_';
2543 mnemonic_chars['-'] = '-';
2544 mnemonic_chars['.'] = '.';
2545 identifier_chars['_'] = '_';
2546 identifier_chars['.'] = '.';
2547
2548 for (p = operand_special_chars; *p != '\0'; p++)
2549 operand_chars[(unsigned char) *p] = *p;
2550 }
2551
2552 if (flag_code == CODE_64BIT)
2553 {
2554 #if defined (OBJ_COFF) && defined (TE_PE)
2555 x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
2556 ? 32 : 16);
2557 #else
2558 x86_dwarf2_return_column = 16;
2559 #endif
2560 x86_cie_data_alignment = -8;
2561 }
2562 else
2563 {
2564 x86_dwarf2_return_column = 8;
2565 x86_cie_data_alignment = -4;
2566 }
2567 }
2568
2569 void
2570 i386_print_statistics (FILE *file)
2571 {
2572 hash_print_statistics (file, "i386 opcode", op_hash);
2573 hash_print_statistics (file, "i386 register", reg_hash);
2574 }
2575 \f
2576 #ifdef DEBUG386
2577
2578 /* Debugging routines for md_assemble. */
2579 static void pte (insn_template *);
2580 static void pt (i386_operand_type);
2581 static void pe (expressionS *);
2582 static void ps (symbolS *);
2583
2584 static void
2585 pi (char *line, i386_insn *x)
2586 {
2587 unsigned int j;
2588
2589 fprintf (stdout, "%s: template ", line);
2590 pte (&x->tm);
2591 fprintf (stdout, " address: base %s index %s scale %x\n",
2592 x->base_reg ? x->base_reg->reg_name : "none",
2593 x->index_reg ? x->index_reg->reg_name : "none",
2594 x->log2_scale_factor);
2595 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2596 x->rm.mode, x->rm.reg, x->rm.regmem);
2597 fprintf (stdout, " sib: base %x index %x scale %x\n",
2598 x->sib.base, x->sib.index, x->sib.scale);
2599 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2600 (x->rex & REX_W) != 0,
2601 (x->rex & REX_R) != 0,
2602 (x->rex & REX_X) != 0,
2603 (x->rex & REX_B) != 0);
2604 for (j = 0; j < x->operands; j++)
2605 {
2606 fprintf (stdout, " #%d: ", j + 1);
2607 pt (x->types[j]);
2608 fprintf (stdout, "\n");
2609 if (x->types[j].bitfield.reg8
2610 || x->types[j].bitfield.reg16
2611 || x->types[j].bitfield.reg32
2612 || x->types[j].bitfield.reg64
2613 || x->types[j].bitfield.regmmx
2614 || x->types[j].bitfield.regxmm
2615 || x->types[j].bitfield.regymm
2616 || x->types[j].bitfield.regzmm
2617 || x->types[j].bitfield.sreg2
2618 || x->types[j].bitfield.sreg3
2619 || x->types[j].bitfield.control
2620 || x->types[j].bitfield.debug
2621 || x->types[j].bitfield.test)
2622 fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
2623 if (operand_type_check (x->types[j], imm))
2624 pe (x->op[j].imms);
2625 if (operand_type_check (x->types[j], disp))
2626 pe (x->op[j].disps);
2627 }
2628 }
2629
2630 static void
2631 pte (insn_template *t)
2632 {
2633 unsigned int j;
2634 fprintf (stdout, " %d operands ", t->operands);
2635 fprintf (stdout, "opcode %x ", t->base_opcode);
2636 if (t->extension_opcode != None)
2637 fprintf (stdout, "ext %x ", t->extension_opcode);
2638 if (t->opcode_modifier.d)
2639 fprintf (stdout, "D");
2640 if (t->opcode_modifier.w)
2641 fprintf (stdout, "W");
2642 fprintf (stdout, "\n");
2643 for (j = 0; j < t->operands; j++)
2644 {
2645 fprintf (stdout, " #%d type ", j + 1);
2646 pt (t->operand_types[j]);
2647 fprintf (stdout, "\n");
2648 }
2649 }
2650
2651 static void
2652 pe (expressionS *e)
2653 {
2654 fprintf (stdout, " operation %d\n", e->X_op);
2655 fprintf (stdout, " add_number %ld (%lx)\n",
2656 (long) e->X_add_number, (long) e->X_add_number);
2657 if (e->X_add_symbol)
2658 {
2659 fprintf (stdout, " add_symbol ");
2660 ps (e->X_add_symbol);
2661 fprintf (stdout, "\n");
2662 }
2663 if (e->X_op_symbol)
2664 {
2665 fprintf (stdout, " op_symbol ");
2666 ps (e->X_op_symbol);
2667 fprintf (stdout, "\n");
2668 }
2669 }
2670
2671 static void
2672 ps (symbolS *s)
2673 {
2674 fprintf (stdout, "%s type %s%s",
2675 S_GET_NAME (s),
2676 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2677 segment_name (S_GET_SEGMENT (s)));
2678 }
2679
2680 static struct type_name
2681 {
2682 i386_operand_type mask;
2683 const char *name;
2684 }
2685 const type_names[] =
2686 {
2687 { OPERAND_TYPE_REG8, "r8" },
2688 { OPERAND_TYPE_REG16, "r16" },
2689 { OPERAND_TYPE_REG32, "r32" },
2690 { OPERAND_TYPE_REG64, "r64" },
2691 { OPERAND_TYPE_IMM8, "i8" },
2692 { OPERAND_TYPE_IMM8, "i8s" },
2693 { OPERAND_TYPE_IMM16, "i16" },
2694 { OPERAND_TYPE_IMM32, "i32" },
2695 { OPERAND_TYPE_IMM32S, "i32s" },
2696 { OPERAND_TYPE_IMM64, "i64" },
2697 { OPERAND_TYPE_IMM1, "i1" },
2698 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2699 { OPERAND_TYPE_DISP8, "d8" },
2700 { OPERAND_TYPE_DISP16, "d16" },
2701 { OPERAND_TYPE_DISP32, "d32" },
2702 { OPERAND_TYPE_DISP32S, "d32s" },
2703 { OPERAND_TYPE_DISP64, "d64" },
2704 { OPERAND_TYPE_VEC_DISP8, "Vector d8" },
2705 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2706 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2707 { OPERAND_TYPE_CONTROL, "control reg" },
2708 { OPERAND_TYPE_TEST, "test reg" },
2709 { OPERAND_TYPE_DEBUG, "debug reg" },
2710 { OPERAND_TYPE_FLOATREG, "FReg" },
2711 { OPERAND_TYPE_FLOATACC, "FAcc" },
2712 { OPERAND_TYPE_SREG2, "SReg2" },
2713 { OPERAND_TYPE_SREG3, "SReg3" },
2714 { OPERAND_TYPE_ACC, "Acc" },
2715 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2716 { OPERAND_TYPE_REGMMX, "rMMX" },
2717 { OPERAND_TYPE_REGXMM, "rXMM" },
2718 { OPERAND_TYPE_REGYMM, "rYMM" },
2719 { OPERAND_TYPE_REGZMM, "rZMM" },
2720 { OPERAND_TYPE_REGMASK, "Mask reg" },
2721 { OPERAND_TYPE_ESSEG, "es" },
2722 };
2723
2724 static void
2725 pt (i386_operand_type t)
2726 {
2727 unsigned int j;
2728 i386_operand_type a;
2729
2730 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2731 {
2732 a = operand_type_and (t, type_names[j].mask);
2733 if (!operand_type_all_zero (&a))
2734 fprintf (stdout, "%s, ", type_names[j].name);
2735 }
2736 fflush (stdout);
2737 }
2738
2739 #endif /* DEBUG386 */
2740 \f
2741 static bfd_reloc_code_real_type
2742 reloc (unsigned int size,
2743 int pcrel,
2744 int sign,
2745 bfd_reloc_code_real_type other)
2746 {
2747 if (other != NO_RELOC)
2748 {
2749 reloc_howto_type *rel;
2750
2751 if (size == 8)
2752 switch (other)
2753 {
2754 case BFD_RELOC_X86_64_GOT32:
2755 return BFD_RELOC_X86_64_GOT64;
2756 break;
2757 case BFD_RELOC_X86_64_GOTPLT64:
2758 return BFD_RELOC_X86_64_GOTPLT64;
2759 break;
2760 case BFD_RELOC_X86_64_PLTOFF64:
2761 return BFD_RELOC_X86_64_PLTOFF64;
2762 break;
2763 case BFD_RELOC_X86_64_GOTPC32:
2764 other = BFD_RELOC_X86_64_GOTPC64;
2765 break;
2766 case BFD_RELOC_X86_64_GOTPCREL:
2767 other = BFD_RELOC_X86_64_GOTPCREL64;
2768 break;
2769 case BFD_RELOC_X86_64_TPOFF32:
2770 other = BFD_RELOC_X86_64_TPOFF64;
2771 break;
2772 case BFD_RELOC_X86_64_DTPOFF32:
2773 other = BFD_RELOC_X86_64_DTPOFF64;
2774 break;
2775 default:
2776 break;
2777 }
2778
2779 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2780 if (other == BFD_RELOC_SIZE32)
2781 {
2782 if (size == 8)
2783 other = BFD_RELOC_SIZE64;
2784 if (pcrel)
2785 {
2786 as_bad (_("there are no pc-relative size relocations"));
2787 return NO_RELOC;
2788 }
2789 }
2790 #endif
2791
2792 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2793 if (size == 4 && (flag_code != CODE_64BIT || disallow_64bit_reloc))
2794 sign = -1;
2795
2796 rel = bfd_reloc_type_lookup (stdoutput, other);
2797 if (!rel)
2798 as_bad (_("unknown relocation (%u)"), other);
2799 else if (size != bfd_get_reloc_size (rel))
2800 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2801 bfd_get_reloc_size (rel),
2802 size);
2803 else if (pcrel && !rel->pc_relative)
2804 as_bad (_("non-pc-relative relocation for pc-relative field"));
2805 else if ((rel->complain_on_overflow == complain_overflow_signed
2806 && !sign)
2807 || (rel->complain_on_overflow == complain_overflow_unsigned
2808 && sign > 0))
2809 as_bad (_("relocated field and relocation type differ in signedness"));
2810 else
2811 return other;
2812 return NO_RELOC;
2813 }
2814
2815 if (pcrel)
2816 {
2817 if (!sign)
2818 as_bad (_("there are no unsigned pc-relative relocations"));
2819 switch (size)
2820 {
2821 case 1: return BFD_RELOC_8_PCREL;
2822 case 2: return BFD_RELOC_16_PCREL;
2823 case 4: return BFD_RELOC_32_PCREL;
2824 case 8: return BFD_RELOC_64_PCREL;
2825 }
2826 as_bad (_("cannot do %u byte pc-relative relocation"), size);
2827 }
2828 else
2829 {
2830 if (sign > 0)
2831 switch (size)
2832 {
2833 case 4: return BFD_RELOC_X86_64_32S;
2834 }
2835 else
2836 switch (size)
2837 {
2838 case 1: return BFD_RELOC_8;
2839 case 2: return BFD_RELOC_16;
2840 case 4: return BFD_RELOC_32;
2841 case 8: return BFD_RELOC_64;
2842 }
2843 as_bad (_("cannot do %s %u byte relocation"),
2844 sign > 0 ? "signed" : "unsigned", size);
2845 }
2846
2847 return NO_RELOC;
2848 }
2849
2850 /* Here we decide which fixups can be adjusted to make them relative to
2851 the beginning of the section instead of the symbol. Basically we need
2852 to make sure that the dynamic relocations are done correctly, so in
2853 some cases we force the original symbol to be used. */
2854
2855 int
2856 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2857 {
2858 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2859 if (!IS_ELF)
2860 return 1;
2861
2862 /* Don't adjust pc-relative references to merge sections in 64-bit
2863 mode. */
2864 if (use_rela_relocations
2865 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
2866 && fixP->fx_pcrel)
2867 return 0;
2868
2869 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2870 and changed later by validate_fix. */
2871 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
2872 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
2873 return 0;
2874
2875 /* Adjust_reloc_syms doesn't know about the GOT. Need to keep symbol
2876 for size relocations. */
2877 if (fixP->fx_r_type == BFD_RELOC_SIZE32
2878 || fixP->fx_r_type == BFD_RELOC_SIZE64
2879 || fixP->fx_r_type == BFD_RELOC_386_GOTOFF
2880 || fixP->fx_r_type == BFD_RELOC_386_PLT32
2881 || fixP->fx_r_type == BFD_RELOC_386_GOT32
2882 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
2883 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
2884 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
2885 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
2886 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
2887 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
2888 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
2889 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
2890 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
2891 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
2892 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
2893 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
2894 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
2895 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
2896 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
2897 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
2898 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
2899 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
2900 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
2901 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
2902 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
2903 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
2904 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
2905 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
2906 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
2907 return 0;
2908 #endif
2909 return 1;
2910 }
2911
2912 static int
2913 intel_float_operand (const char *mnemonic)
2914 {
2915 /* Note that the value returned is meaningful only for opcodes with (memory)
2916 operands, hence the code here is free to improperly handle opcodes that
2917 have no operands (for better performance and smaller code). */
2918
2919 if (mnemonic[0] != 'f')
2920 return 0; /* non-math */
2921
2922 switch (mnemonic[1])
2923 {
2924 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2925 the fs segment override prefix not currently handled because no
2926 call path can make opcodes without operands get here */
2927 case 'i':
2928 return 2 /* integer op */;
2929 case 'l':
2930 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
2931 return 3; /* fldcw/fldenv */
2932 break;
2933 case 'n':
2934 if (mnemonic[2] != 'o' /* fnop */)
2935 return 3; /* non-waiting control op */
2936 break;
2937 case 'r':
2938 if (mnemonic[2] == 's')
2939 return 3; /* frstor/frstpm */
2940 break;
2941 case 's':
2942 if (mnemonic[2] == 'a')
2943 return 3; /* fsave */
2944 if (mnemonic[2] == 't')
2945 {
2946 switch (mnemonic[3])
2947 {
2948 case 'c': /* fstcw */
2949 case 'd': /* fstdw */
2950 case 'e': /* fstenv */
2951 case 's': /* fsts[gw] */
2952 return 3;
2953 }
2954 }
2955 break;
2956 case 'x':
2957 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
2958 return 0; /* fxsave/fxrstor are not really math ops */
2959 break;
2960 }
2961
2962 return 1;
2963 }
2964
2965 /* Build the VEX prefix. */
2966
2967 static void
2968 build_vex_prefix (const insn_template *t)
2969 {
2970 unsigned int register_specifier;
2971 unsigned int implied_prefix;
2972 unsigned int vector_length;
2973
2974 /* Check register specifier. */
2975 if (i.vex.register_specifier)
2976 {
2977 register_specifier =
2978 ~register_number (i.vex.register_specifier) & 0xf;
2979 gas_assert ((i.vex.register_specifier->reg_flags & RegVRex) == 0);
2980 }
2981 else
2982 register_specifier = 0xf;
2983
2984 /* Use 2-byte VEX prefix by swappping destination and source
2985 operand. */
2986 if (!i.swap_operand
2987 && i.operands == i.reg_operands
2988 && i.tm.opcode_modifier.vexopcode == VEX0F
2989 && i.tm.opcode_modifier.s
2990 && i.rex == REX_B)
2991 {
2992 unsigned int xchg = i.operands - 1;
2993 union i386_op temp_op;
2994 i386_operand_type temp_type;
2995
2996 temp_type = i.types[xchg];
2997 i.types[xchg] = i.types[0];
2998 i.types[0] = temp_type;
2999 temp_op = i.op[xchg];
3000 i.op[xchg] = i.op[0];
3001 i.op[0] = temp_op;
3002
3003 gas_assert (i.rm.mode == 3);
3004
3005 i.rex = REX_R;
3006 xchg = i.rm.regmem;
3007 i.rm.regmem = i.rm.reg;
3008 i.rm.reg = xchg;
3009
3010 /* Use the next insn. */
3011 i.tm = t[1];
3012 }
3013
3014 if (i.tm.opcode_modifier.vex == VEXScalar)
3015 vector_length = avxscalar;
3016 else
3017 vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
3018
3019 switch ((i.tm.base_opcode >> 8) & 0xff)
3020 {
3021 case 0:
3022 implied_prefix = 0;
3023 break;
3024 case DATA_PREFIX_OPCODE:
3025 implied_prefix = 1;
3026 break;
3027 case REPE_PREFIX_OPCODE:
3028 implied_prefix = 2;
3029 break;
3030 case REPNE_PREFIX_OPCODE:
3031 implied_prefix = 3;
3032 break;
3033 default:
3034 abort ();
3035 }
3036
3037 /* Use 2-byte VEX prefix if possible. */
3038 if (i.tm.opcode_modifier.vexopcode == VEX0F
3039 && i.tm.opcode_modifier.vexw != VEXW1
3040 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
3041 {
3042 /* 2-byte VEX prefix. */
3043 unsigned int r;
3044
3045 i.vex.length = 2;
3046 i.vex.bytes[0] = 0xc5;
3047
3048 /* Check the REX.R bit. */
3049 r = (i.rex & REX_R) ? 0 : 1;
3050 i.vex.bytes[1] = (r << 7
3051 | register_specifier << 3
3052 | vector_length << 2
3053 | implied_prefix);
3054 }
3055 else
3056 {
3057 /* 3-byte VEX prefix. */
3058 unsigned int m, w;
3059
3060 i.vex.length = 3;
3061
3062 switch (i.tm.opcode_modifier.vexopcode)
3063 {
3064 case VEX0F:
3065 m = 0x1;
3066 i.vex.bytes[0] = 0xc4;
3067 break;
3068 case VEX0F38:
3069 m = 0x2;
3070 i.vex.bytes[0] = 0xc4;
3071 break;
3072 case VEX0F3A:
3073 m = 0x3;
3074 i.vex.bytes[0] = 0xc4;
3075 break;
3076 case XOP08:
3077 m = 0x8;
3078 i.vex.bytes[0] = 0x8f;
3079 break;
3080 case XOP09:
3081 m = 0x9;
3082 i.vex.bytes[0] = 0x8f;
3083 break;
3084 case XOP0A:
3085 m = 0xa;
3086 i.vex.bytes[0] = 0x8f;
3087 break;
3088 default:
3089 abort ();
3090 }
3091
3092 /* The high 3 bits of the second VEX byte are 1's compliment
3093 of RXB bits from REX. */
3094 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
3095
3096 /* Check the REX.W bit. */
3097 w = (i.rex & REX_W) ? 1 : 0;
3098 if (i.tm.opcode_modifier.vexw == VEXW1)
3099 w = 1;
3100
3101 i.vex.bytes[2] = (w << 7
3102 | register_specifier << 3
3103 | vector_length << 2
3104 | implied_prefix);
3105 }
3106 }
3107
3108 /* Build the EVEX prefix. */
3109
3110 static void
3111 build_evex_prefix (void)
3112 {
3113 unsigned int register_specifier;
3114 unsigned int implied_prefix;
3115 unsigned int m, w;
3116 rex_byte vrex_used = 0;
3117
3118 /* Check register specifier. */
3119 if (i.vex.register_specifier)
3120 {
3121 gas_assert ((i.vrex & REX_X) == 0);
3122
3123 register_specifier = i.vex.register_specifier->reg_num;
3124 if ((i.vex.register_specifier->reg_flags & RegRex))
3125 register_specifier += 8;
3126 /* The upper 16 registers are encoded in the fourth byte of the
3127 EVEX prefix. */
3128 if (!(i.vex.register_specifier->reg_flags & RegVRex))
3129 i.vex.bytes[3] = 0x8;
3130 register_specifier = ~register_specifier & 0xf;
3131 }
3132 else
3133 {
3134 register_specifier = 0xf;
3135
3136 /* Encode upper 16 vector index register in the fourth byte of
3137 the EVEX prefix. */
3138 if (!(i.vrex & REX_X))
3139 i.vex.bytes[3] = 0x8;
3140 else
3141 vrex_used |= REX_X;
3142 }
3143
3144 switch ((i.tm.base_opcode >> 8) & 0xff)
3145 {
3146 case 0:
3147 implied_prefix = 0;
3148 break;
3149 case DATA_PREFIX_OPCODE:
3150 implied_prefix = 1;
3151 break;
3152 case REPE_PREFIX_OPCODE:
3153 implied_prefix = 2;
3154 break;
3155 case REPNE_PREFIX_OPCODE:
3156 implied_prefix = 3;
3157 break;
3158 default:
3159 abort ();
3160 }
3161
3162 /* 4 byte EVEX prefix. */
3163 i.vex.length = 4;
3164 i.vex.bytes[0] = 0x62;
3165
3166 /* mmmm bits. */
3167 switch (i.tm.opcode_modifier.vexopcode)
3168 {
3169 case VEX0F:
3170 m = 1;
3171 break;
3172 case VEX0F38:
3173 m = 2;
3174 break;
3175 case VEX0F3A:
3176 m = 3;
3177 break;
3178 default:
3179 abort ();
3180 break;
3181 }
3182
3183 /* The high 3 bits of the second EVEX byte are 1's compliment of RXB
3184 bits from REX. */
3185 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
3186
3187 /* The fifth bit of the second EVEX byte is 1's compliment of the
3188 REX_R bit in VREX. */
3189 if (!(i.vrex & REX_R))
3190 i.vex.bytes[1] |= 0x10;
3191 else
3192 vrex_used |= REX_R;
3193
3194 if ((i.reg_operands + i.imm_operands) == i.operands)
3195 {
3196 /* When all operands are registers, the REX_X bit in REX is not
3197 used. We reuse it to encode the upper 16 registers, which is
3198 indicated by the REX_B bit in VREX. The REX_X bit is encoded
3199 as 1's compliment. */
3200 if ((i.vrex & REX_B))
3201 {
3202 vrex_used |= REX_B;
3203 i.vex.bytes[1] &= ~0x40;
3204 }
3205 }
3206
3207 /* EVEX instructions shouldn't need the REX prefix. */
3208 i.vrex &= ~vrex_used;
3209 gas_assert (i.vrex == 0);
3210
3211 /* Check the REX.W bit. */
3212 w = (i.rex & REX_W) ? 1 : 0;
3213 if (i.tm.opcode_modifier.vexw)
3214 {
3215 if (i.tm.opcode_modifier.vexw == VEXW1)
3216 w = 1;
3217 }
3218 /* If w is not set it means we are dealing with WIG instruction. */
3219 else if (!w)
3220 {
3221 if (evexwig == evexw1)
3222 w = 1;
3223 }
3224
3225 /* Encode the U bit. */
3226 implied_prefix |= 0x4;
3227
3228 /* The third byte of the EVEX prefix. */
3229 i.vex.bytes[2] = (w << 7 | register_specifier << 3 | implied_prefix);
3230
3231 /* The fourth byte of the EVEX prefix. */
3232 /* The zeroing-masking bit. */
3233 if (i.mask && i.mask->zeroing)
3234 i.vex.bytes[3] |= 0x80;
3235
3236 /* Don't always set the broadcast bit if there is no RC. */
3237 if (!i.rounding)
3238 {
3239 /* Encode the vector length. */
3240 unsigned int vec_length;
3241
3242 switch (i.tm.opcode_modifier.evex)
3243 {
3244 case EVEXLIG: /* LL' is ignored */
3245 vec_length = evexlig << 5;
3246 break;
3247 case EVEX128:
3248 vec_length = 0 << 5;
3249 break;
3250 case EVEX256:
3251 vec_length = 1 << 5;
3252 break;
3253 case EVEX512:
3254 vec_length = 2 << 5;
3255 break;
3256 default:
3257 abort ();
3258 break;
3259 }
3260 i.vex.bytes[3] |= vec_length;
3261 /* Encode the broadcast bit. */
3262 if (i.broadcast)
3263 i.vex.bytes[3] |= 0x10;
3264 }
3265 else
3266 {
3267 if (i.rounding->type != saeonly)
3268 i.vex.bytes[3] |= 0x10 | (i.rounding->type << 5);
3269 else
3270 i.vex.bytes[3] |= 0x10 | (evexrcig << 5);
3271 }
3272
3273 if (i.mask && i.mask->mask)
3274 i.vex.bytes[3] |= i.mask->mask->reg_num;
3275 }
3276
3277 static void
3278 process_immext (void)
3279 {
3280 expressionS *exp;
3281
3282 if ((i.tm.cpu_flags.bitfield.cpusse3 || i.tm.cpu_flags.bitfield.cpusvme)
3283 && i.operands > 0)
3284 {
3285 /* MONITOR/MWAIT as well as SVME instructions have fixed operands
3286 with an opcode suffix which is coded in the same place as an
3287 8-bit immediate field would be.
3288 Here we check those operands and remove them afterwards. */
3289 unsigned int x;
3290
3291 for (x = 0; x < i.operands; x++)
3292 if (register_number (i.op[x].regs) != x)
3293 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
3294 register_prefix, i.op[x].regs->reg_name, x + 1,
3295 i.tm.name);
3296
3297 i.operands = 0;
3298 }
3299
3300 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
3301 which is coded in the same place as an 8-bit immediate field
3302 would be. Here we fake an 8-bit immediate operand from the
3303 opcode suffix stored in tm.extension_opcode.
3304
3305 AVX instructions also use this encoding, for some of
3306 3 argument instructions. */
3307
3308 gas_assert (i.imm_operands <= 1
3309 && (i.operands <= 2
3310 || ((i.tm.opcode_modifier.vex
3311 || i.tm.opcode_modifier.evex)
3312 && i.operands <= 4)));
3313
3314 exp = &im_expressions[i.imm_operands++];
3315 i.op[i.operands].imms = exp;
3316 i.types[i.operands] = imm8;
3317 i.operands++;
3318 exp->X_op = O_constant;
3319 exp->X_add_number = i.tm.extension_opcode;
3320 i.tm.extension_opcode = None;
3321 }
3322
3323
3324 static int
3325 check_hle (void)
3326 {
3327 switch (i.tm.opcode_modifier.hleprefixok)
3328 {
3329 default:
3330 abort ();
3331 case HLEPrefixNone:
3332 as_bad (_("invalid instruction `%s' after `%s'"),
3333 i.tm.name, i.hle_prefix);
3334 return 0;
3335 case HLEPrefixLock:
3336 if (i.prefix[LOCK_PREFIX])
3337 return 1;
3338 as_bad (_("missing `lock' with `%s'"), i.hle_prefix);
3339 return 0;
3340 case HLEPrefixAny:
3341 return 1;
3342 case HLEPrefixRelease:
3343 if (i.prefix[HLE_PREFIX] != XRELEASE_PREFIX_OPCODE)
3344 {
3345 as_bad (_("instruction `%s' after `xacquire' not allowed"),
3346 i.tm.name);
3347 return 0;
3348 }
3349 if (i.mem_operands == 0
3350 || !operand_type_check (i.types[i.operands - 1], anymem))
3351 {
3352 as_bad (_("memory destination needed for instruction `%s'"
3353 " after `xrelease'"), i.tm.name);
3354 return 0;
3355 }
3356 return 1;
3357 }
3358 }
3359
3360 /* This is the guts of the machine-dependent assembler. LINE points to a
3361 machine dependent instruction. This function is supposed to emit
3362 the frags/bytes it assembles to. */
3363
3364 void
3365 md_assemble (char *line)
3366 {
3367 unsigned int j;
3368 char mnemonic[MAX_MNEM_SIZE];
3369 const insn_template *t;
3370
3371 /* Initialize globals. */
3372 memset (&i, '\0', sizeof (i));
3373 for (j = 0; j < MAX_OPERANDS; j++)
3374 i.reloc[j] = NO_RELOC;
3375 memset (disp_expressions, '\0', sizeof (disp_expressions));
3376 memset (im_expressions, '\0', sizeof (im_expressions));
3377 save_stack_p = save_stack;
3378
3379 /* First parse an instruction mnemonic & call i386_operand for the operands.
3380 We assume that the scrubber has arranged it so that line[0] is the valid
3381 start of a (possibly prefixed) mnemonic. */
3382
3383 line = parse_insn (line, mnemonic);
3384 if (line == NULL)
3385 return;
3386
3387 line = parse_operands (line, mnemonic);
3388 this_operand = -1;
3389 if (line == NULL)
3390 return;
3391
3392 /* Now we've parsed the mnemonic into a set of templates, and have the
3393 operands at hand. */
3394
3395 /* All intel opcodes have reversed operands except for "bound" and
3396 "enter". We also don't reverse intersegment "jmp" and "call"
3397 instructions with 2 immediate operands so that the immediate segment
3398 precedes the offset, as it does when in AT&T mode. */
3399 if (intel_syntax
3400 && i.operands > 1
3401 && (strcmp (mnemonic, "bound") != 0)
3402 && (strcmp (mnemonic, "invlpga") != 0)
3403 && !(operand_type_check (i.types[0], imm)
3404 && operand_type_check (i.types[1], imm)))
3405 swap_operands ();
3406
3407 /* The order of the immediates should be reversed
3408 for 2 immediates extrq and insertq instructions */
3409 if (i.imm_operands == 2
3410 && (strcmp (mnemonic, "extrq") == 0
3411 || strcmp (mnemonic, "insertq") == 0))
3412 swap_2_operands (0, 1);
3413
3414 if (i.imm_operands)
3415 optimize_imm ();
3416
3417 /* Don't optimize displacement for movabs since it only takes 64bit
3418 displacement. */
3419 if (i.disp_operands
3420 && i.disp_encoding != disp_encoding_32bit
3421 && (flag_code != CODE_64BIT
3422 || strcmp (mnemonic, "movabs") != 0))
3423 optimize_disp ();
3424
3425 /* Next, we find a template that matches the given insn,
3426 making sure the overlap of the given operands types is consistent
3427 with the template operand types. */
3428
3429 if (!(t = match_template ()))
3430 return;
3431
3432 if (sse_check != check_none
3433 && !i.tm.opcode_modifier.noavx
3434 && (i.tm.cpu_flags.bitfield.cpusse
3435 || i.tm.cpu_flags.bitfield.cpusse2
3436 || i.tm.cpu_flags.bitfield.cpusse3
3437 || i.tm.cpu_flags.bitfield.cpussse3
3438 || i.tm.cpu_flags.bitfield.cpusse4_1
3439 || i.tm.cpu_flags.bitfield.cpusse4_2))
3440 {
3441 (sse_check == check_warning
3442 ? as_warn
3443 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
3444 }
3445
3446 /* Zap movzx and movsx suffix. The suffix has been set from
3447 "word ptr" or "byte ptr" on the source operand in Intel syntax
3448 or extracted from mnemonic in AT&T syntax. But we'll use
3449 the destination register to choose the suffix for encoding. */
3450 if ((i.tm.base_opcode & ~9) == 0x0fb6)
3451 {
3452 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3453 there is no suffix, the default will be byte extension. */
3454 if (i.reg_operands != 2
3455 && !i.suffix
3456 && intel_syntax)
3457 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
3458
3459 i.suffix = 0;
3460 }
3461
3462 if (i.tm.opcode_modifier.fwait)
3463 if (!add_prefix (FWAIT_OPCODE))
3464 return;
3465
3466 /* Check if REP prefix is OK. */
3467 if (i.rep_prefix && !i.tm.opcode_modifier.repprefixok)
3468 {
3469 as_bad (_("invalid instruction `%s' after `%s'"),
3470 i.tm.name, i.rep_prefix);
3471 return;
3472 }
3473
3474 /* Check for lock without a lockable instruction. Destination operand
3475 must be memory unless it is xchg (0x86). */
3476 if (i.prefix[LOCK_PREFIX]
3477 && (!i.tm.opcode_modifier.islockable
3478 || i.mem_operands == 0
3479 || (i.tm.base_opcode != 0x86
3480 && !operand_type_check (i.types[i.operands - 1], anymem))))
3481 {
3482 as_bad (_("expecting lockable instruction after `lock'"));
3483 return;
3484 }
3485
3486 /* Check if HLE prefix is OK. */
3487 if (i.hle_prefix && !check_hle ())
3488 return;
3489
3490 /* Check BND prefix. */
3491 if (i.bnd_prefix && !i.tm.opcode_modifier.bndprefixok)
3492 as_bad (_("expecting valid branch instruction after `bnd'"));
3493
3494 if (i.tm.cpu_flags.bitfield.cpumpx
3495 && flag_code == CODE_64BIT
3496 && i.prefix[ADDR_PREFIX])
3497 as_bad (_("32-bit address isn't allowed in 64-bit MPX instructions."));
3498
3499 /* Insert BND prefix. */
3500 if (add_bnd_prefix
3501 && i.tm.opcode_modifier.bndprefixok
3502 && !i.prefix[BND_PREFIX])
3503 add_prefix (BND_PREFIX_OPCODE);
3504
3505 /* Check string instruction segment overrides. */
3506 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
3507 {
3508 if (!check_string ())
3509 return;
3510 i.disp_operands = 0;
3511 }
3512
3513 if (!process_suffix ())
3514 return;
3515
3516 /* Update operand types. */
3517 for (j = 0; j < i.operands; j++)
3518 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
3519
3520 /* Make still unresolved immediate matches conform to size of immediate
3521 given in i.suffix. */
3522 if (!finalize_imm ())
3523 return;
3524
3525 if (i.types[0].bitfield.imm1)
3526 i.imm_operands = 0; /* kludge for shift insns. */
3527
3528 /* We only need to check those implicit registers for instructions
3529 with 3 operands or less. */
3530 if (i.operands <= 3)
3531 for (j = 0; j < i.operands; j++)
3532 if (i.types[j].bitfield.inoutportreg
3533 || i.types[j].bitfield.shiftcount
3534 || i.types[j].bitfield.acc
3535 || i.types[j].bitfield.floatacc)
3536 i.reg_operands--;
3537
3538 /* ImmExt should be processed after SSE2AVX. */
3539 if (!i.tm.opcode_modifier.sse2avx
3540 && i.tm.opcode_modifier.immext)
3541 process_immext ();
3542
3543 /* For insns with operands there are more diddles to do to the opcode. */
3544 if (i.operands)
3545 {
3546 if (!process_operands ())
3547 return;
3548 }
3549 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
3550 {
3551 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3552 as_warn (_("translating to `%sp'"), i.tm.name);
3553 }
3554
3555 if (i.tm.opcode_modifier.vex || i.tm.opcode_modifier.evex)
3556 {
3557 if (flag_code == CODE_16BIT)
3558 {
3559 as_bad (_("instruction `%s' isn't supported in 16-bit mode."),
3560 i.tm.name);
3561 return;
3562 }
3563
3564 if (i.tm.opcode_modifier.vex)
3565 build_vex_prefix (t);
3566 else
3567 build_evex_prefix ();
3568 }
3569
3570 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3571 instructions may define INT_OPCODE as well, so avoid this corner
3572 case for those instructions that use MODRM. */
3573 if (i.tm.base_opcode == INT_OPCODE
3574 && !i.tm.opcode_modifier.modrm
3575 && i.op[0].imms->X_add_number == 3)
3576 {
3577 i.tm.base_opcode = INT3_OPCODE;
3578 i.imm_operands = 0;
3579 }
3580
3581 if ((i.tm.opcode_modifier.jump
3582 || i.tm.opcode_modifier.jumpbyte
3583 || i.tm.opcode_modifier.jumpdword)
3584 && i.op[0].disps->X_op == O_constant)
3585 {
3586 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3587 the absolute address given by the constant. Since ix86 jumps and
3588 calls are pc relative, we need to generate a reloc. */
3589 i.op[0].disps->X_add_symbol = &abs_symbol;
3590 i.op[0].disps->X_op = O_symbol;
3591 }
3592
3593 if (i.tm.opcode_modifier.rex64)
3594 i.rex |= REX_W;
3595
3596 /* For 8 bit registers we need an empty rex prefix. Also if the
3597 instruction already has a prefix, we need to convert old
3598 registers to new ones. */
3599
3600 if ((i.types[0].bitfield.reg8
3601 && (i.op[0].regs->reg_flags & RegRex64) != 0)
3602 || (i.types[1].bitfield.reg8
3603 && (i.op[1].regs->reg_flags & RegRex64) != 0)
3604 || ((i.types[0].bitfield.reg8
3605 || i.types[1].bitfield.reg8)
3606 && i.rex != 0))
3607 {
3608 int x;
3609
3610 i.rex |= REX_OPCODE;
3611 for (x = 0; x < 2; x++)
3612 {
3613 /* Look for 8 bit operand that uses old registers. */
3614 if (i.types[x].bitfield.reg8
3615 && (i.op[x].regs->reg_flags & RegRex64) == 0)
3616 {
3617 /* In case it is "hi" register, give up. */
3618 if (i.op[x].regs->reg_num > 3)
3619 as_bad (_("can't encode register '%s%s' in an "
3620 "instruction requiring REX prefix."),
3621 register_prefix, i.op[x].regs->reg_name);
3622
3623 /* Otherwise it is equivalent to the extended register.
3624 Since the encoding doesn't change this is merely
3625 cosmetic cleanup for debug output. */
3626
3627 i.op[x].regs = i.op[x].regs + 8;
3628 }
3629 }
3630 }
3631
3632 if (i.rex != 0)
3633 add_prefix (REX_OPCODE | i.rex);
3634
3635 /* We are ready to output the insn. */
3636 output_insn ();
3637 }
3638
3639 static char *
3640 parse_insn (char *line, char *mnemonic)
3641 {
3642 char *l = line;
3643 char *token_start = l;
3644 char *mnem_p;
3645 int supported;
3646 const insn_template *t;
3647 char *dot_p = NULL;
3648
3649 while (1)
3650 {
3651 mnem_p = mnemonic;
3652 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3653 {
3654 if (*mnem_p == '.')
3655 dot_p = mnem_p;
3656 mnem_p++;
3657 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3658 {
3659 as_bad (_("no such instruction: `%s'"), token_start);
3660 return NULL;
3661 }
3662 l++;
3663 }
3664 if (!is_space_char (*l)
3665 && *l != END_OF_INSN
3666 && (intel_syntax
3667 || (*l != PREFIX_SEPARATOR
3668 && *l != ',')))
3669 {
3670 as_bad (_("invalid character %s in mnemonic"),
3671 output_invalid (*l));
3672 return NULL;
3673 }
3674 if (token_start == l)
3675 {
3676 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3677 as_bad (_("expecting prefix; got nothing"));
3678 else
3679 as_bad (_("expecting mnemonic; got nothing"));
3680 return NULL;
3681 }
3682
3683 /* Look up instruction (or prefix) via hash table. */
3684 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3685
3686 if (*l != END_OF_INSN
3687 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3688 && current_templates
3689 && current_templates->start->opcode_modifier.isprefix)
3690 {
3691 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3692 {
3693 as_bad ((flag_code != CODE_64BIT
3694 ? _("`%s' is only supported in 64-bit mode")
3695 : _("`%s' is not supported in 64-bit mode")),
3696 current_templates->start->name);
3697 return NULL;
3698 }
3699 /* If we are in 16-bit mode, do not allow addr16 or data16.
3700 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3701 if ((current_templates->start->opcode_modifier.size16
3702 || current_templates->start->opcode_modifier.size32)
3703 && flag_code != CODE_64BIT
3704 && (current_templates->start->opcode_modifier.size32
3705 ^ (flag_code == CODE_16BIT)))
3706 {
3707 as_bad (_("redundant %s prefix"),
3708 current_templates->start->name);
3709 return NULL;
3710 }
3711 /* Add prefix, checking for repeated prefixes. */
3712 switch (add_prefix (current_templates->start->base_opcode))
3713 {
3714 case PREFIX_EXIST:
3715 return NULL;
3716 case PREFIX_REP:
3717 if (current_templates->start->cpu_flags.bitfield.cpuhle)
3718 i.hle_prefix = current_templates->start->name;
3719 else if (current_templates->start->cpu_flags.bitfield.cpumpx)
3720 i.bnd_prefix = current_templates->start->name;
3721 else
3722 i.rep_prefix = current_templates->start->name;
3723 break;
3724 default:
3725 break;
3726 }
3727 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3728 token_start = ++l;
3729 }
3730 else
3731 break;
3732 }
3733
3734 if (!current_templates)
3735 {
3736 /* Check if we should swap operand or force 32bit displacement in
3737 encoding. */
3738 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3739 i.swap_operand = 1;
3740 else if (mnem_p - 3 == dot_p
3741 && dot_p[1] == 'd'
3742 && dot_p[2] == '8')
3743 i.disp_encoding = disp_encoding_8bit;
3744 else if (mnem_p - 4 == dot_p
3745 && dot_p[1] == 'd'
3746 && dot_p[2] == '3'
3747 && dot_p[3] == '2')
3748 i.disp_encoding = disp_encoding_32bit;
3749 else
3750 goto check_suffix;
3751 mnem_p = dot_p;
3752 *dot_p = '\0';
3753 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3754 }
3755
3756 if (!current_templates)
3757 {
3758 check_suffix:
3759 /* See if we can get a match by trimming off a suffix. */
3760 switch (mnem_p[-1])
3761 {
3762 case WORD_MNEM_SUFFIX:
3763 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3764 i.suffix = SHORT_MNEM_SUFFIX;
3765 else
3766 case BYTE_MNEM_SUFFIX:
3767 case QWORD_MNEM_SUFFIX:
3768 i.suffix = mnem_p[-1];
3769 mnem_p[-1] = '\0';
3770 current_templates = (const templates *) hash_find (op_hash,
3771 mnemonic);
3772 break;
3773 case SHORT_MNEM_SUFFIX:
3774 case LONG_MNEM_SUFFIX:
3775 if (!intel_syntax)
3776 {
3777 i.suffix = mnem_p[-1];
3778 mnem_p[-1] = '\0';
3779 current_templates = (const templates *) hash_find (op_hash,
3780 mnemonic);
3781 }
3782 break;
3783
3784 /* Intel Syntax. */
3785 case 'd':
3786 if (intel_syntax)
3787 {
3788 if (intel_float_operand (mnemonic) == 1)
3789 i.suffix = SHORT_MNEM_SUFFIX;
3790 else
3791 i.suffix = LONG_MNEM_SUFFIX;
3792 mnem_p[-1] = '\0';
3793 current_templates = (const templates *) hash_find (op_hash,
3794 mnemonic);
3795 }
3796 break;
3797 }
3798 if (!current_templates)
3799 {
3800 as_bad (_("no such instruction: `%s'"), token_start);
3801 return NULL;
3802 }
3803 }
3804
3805 if (current_templates->start->opcode_modifier.jump
3806 || current_templates->start->opcode_modifier.jumpbyte)
3807 {
3808 /* Check for a branch hint. We allow ",pt" and ",pn" for
3809 predict taken and predict not taken respectively.
3810 I'm not sure that branch hints actually do anything on loop
3811 and jcxz insns (JumpByte) for current Pentium4 chips. They
3812 may work in the future and it doesn't hurt to accept them
3813 now. */
3814 if (l[0] == ',' && l[1] == 'p')
3815 {
3816 if (l[2] == 't')
3817 {
3818 if (!add_prefix (DS_PREFIX_OPCODE))
3819 return NULL;
3820 l += 3;
3821 }
3822 else if (l[2] == 'n')
3823 {
3824 if (!add_prefix (CS_PREFIX_OPCODE))
3825 return NULL;
3826 l += 3;
3827 }
3828 }
3829 }
3830 /* Any other comma loses. */
3831 if (*l == ',')
3832 {
3833 as_bad (_("invalid character %s in mnemonic"),
3834 output_invalid (*l));
3835 return NULL;
3836 }
3837
3838 /* Check if instruction is supported on specified architecture. */
3839 supported = 0;
3840 for (t = current_templates->start; t < current_templates->end; ++t)
3841 {
3842 supported |= cpu_flags_match (t);
3843 if (supported == CPU_FLAGS_PERFECT_MATCH)
3844 goto skip;
3845 }
3846
3847 if (!(supported & CPU_FLAGS_64BIT_MATCH))
3848 {
3849 as_bad (flag_code == CODE_64BIT
3850 ? _("`%s' is not supported in 64-bit mode")
3851 : _("`%s' is only supported in 64-bit mode"),
3852 current_templates->start->name);
3853 return NULL;
3854 }
3855 if (supported != CPU_FLAGS_PERFECT_MATCH)
3856 {
3857 as_bad (_("`%s' is not supported on `%s%s'"),
3858 current_templates->start->name,
3859 cpu_arch_name ? cpu_arch_name : default_arch,
3860 cpu_sub_arch_name ? cpu_sub_arch_name : "");
3861 return NULL;
3862 }
3863
3864 skip:
3865 if (!cpu_arch_flags.bitfield.cpui386
3866 && (flag_code != CODE_16BIT))
3867 {
3868 as_warn (_("use .code16 to ensure correct addressing mode"));
3869 }
3870
3871 return l;
3872 }
3873
3874 static char *
3875 parse_operands (char *l, const char *mnemonic)
3876 {
3877 char *token_start;
3878
3879 /* 1 if operand is pending after ','. */
3880 unsigned int expecting_operand = 0;
3881
3882 /* Non-zero if operand parens not balanced. */
3883 unsigned int paren_not_balanced;
3884
3885 while (*l != END_OF_INSN)
3886 {
3887 /* Skip optional white space before operand. */
3888 if (is_space_char (*l))
3889 ++l;
3890 if (!is_operand_char (*l) && *l != END_OF_INSN)
3891 {
3892 as_bad (_("invalid character %s before operand %d"),
3893 output_invalid (*l),
3894 i.operands + 1);
3895 return NULL;
3896 }
3897 token_start = l; /* after white space */
3898 paren_not_balanced = 0;
3899 while (paren_not_balanced || *l != ',')
3900 {
3901 if (*l == END_OF_INSN)
3902 {
3903 if (paren_not_balanced)
3904 {
3905 if (!intel_syntax)
3906 as_bad (_("unbalanced parenthesis in operand %d."),
3907 i.operands + 1);
3908 else
3909 as_bad (_("unbalanced brackets in operand %d."),
3910 i.operands + 1);
3911 return NULL;
3912 }
3913 else
3914 break; /* we are done */
3915 }
3916 else if (!is_operand_char (*l) && !is_space_char (*l))
3917 {
3918 as_bad (_("invalid character %s in operand %d"),
3919 output_invalid (*l),
3920 i.operands + 1);
3921 return NULL;
3922 }
3923 if (!intel_syntax)
3924 {
3925 if (*l == '(')
3926 ++paren_not_balanced;
3927 if (*l == ')')
3928 --paren_not_balanced;
3929 }
3930 else
3931 {
3932 if (*l == '[')
3933 ++paren_not_balanced;
3934 if (*l == ']')
3935 --paren_not_balanced;
3936 }
3937 l++;
3938 }
3939 if (l != token_start)
3940 { /* Yes, we've read in another operand. */
3941 unsigned int operand_ok;
3942 this_operand = i.operands++;
3943 i.types[this_operand].bitfield.unspecified = 1;
3944 if (i.operands > MAX_OPERANDS)
3945 {
3946 as_bad (_("spurious operands; (%d operands/instruction max)"),
3947 MAX_OPERANDS);
3948 return NULL;
3949 }
3950 /* Now parse operand adding info to 'i' as we go along. */
3951 END_STRING_AND_SAVE (l);
3952
3953 if (intel_syntax)
3954 operand_ok =
3955 i386_intel_operand (token_start,
3956 intel_float_operand (mnemonic));
3957 else
3958 operand_ok = i386_att_operand (token_start);
3959
3960 RESTORE_END_STRING (l);
3961 if (!operand_ok)
3962 return NULL;
3963 }
3964 else
3965 {
3966 if (expecting_operand)
3967 {
3968 expecting_operand_after_comma:
3969 as_bad (_("expecting operand after ','; got nothing"));
3970 return NULL;
3971 }
3972 if (*l == ',')
3973 {
3974 as_bad (_("expecting operand before ','; got nothing"));
3975 return NULL;
3976 }
3977 }
3978
3979 /* Now *l must be either ',' or END_OF_INSN. */
3980 if (*l == ',')
3981 {
3982 if (*++l == END_OF_INSN)
3983 {
3984 /* Just skip it, if it's \n complain. */
3985 goto expecting_operand_after_comma;
3986 }
3987 expecting_operand = 1;
3988 }
3989 }
3990 return l;
3991 }
3992
3993 static void
3994 swap_2_operands (int xchg1, int xchg2)
3995 {
3996 union i386_op temp_op;
3997 i386_operand_type temp_type;
3998 enum bfd_reloc_code_real temp_reloc;
3999
4000 temp_type = i.types[xchg2];
4001 i.types[xchg2] = i.types[xchg1];
4002 i.types[xchg1] = temp_type;
4003 temp_op = i.op[xchg2];
4004 i.op[xchg2] = i.op[xchg1];
4005 i.op[xchg1] = temp_op;
4006 temp_reloc = i.reloc[xchg2];
4007 i.reloc[xchg2] = i.reloc[xchg1];
4008 i.reloc[xchg1] = temp_reloc;
4009
4010 if (i.mask)
4011 {
4012 if (i.mask->operand == xchg1)
4013 i.mask->operand = xchg2;
4014 else if (i.mask->operand == xchg2)
4015 i.mask->operand = xchg1;
4016 }
4017 if (i.broadcast)
4018 {
4019 if (i.broadcast->operand == xchg1)
4020 i.broadcast->operand = xchg2;
4021 else if (i.broadcast->operand == xchg2)
4022 i.broadcast->operand = xchg1;
4023 }
4024 if (i.rounding)
4025 {
4026 if (i.rounding->operand == xchg1)
4027 i.rounding->operand = xchg2;
4028 else if (i.rounding->operand == xchg2)
4029 i.rounding->operand = xchg1;
4030 }
4031 }
4032
4033 static void
4034 swap_operands (void)
4035 {
4036 switch (i.operands)
4037 {
4038 case 5:
4039 case 4:
4040 swap_2_operands (1, i.operands - 2);
4041 case 3:
4042 case 2:
4043 swap_2_operands (0, i.operands - 1);
4044 break;
4045 default:
4046 abort ();
4047 }
4048
4049 if (i.mem_operands == 2)
4050 {
4051 const seg_entry *temp_seg;
4052 temp_seg = i.seg[0];
4053 i.seg[0] = i.seg[1];
4054 i.seg[1] = temp_seg;
4055 }
4056 }
4057
4058 /* Try to ensure constant immediates are represented in the smallest
4059 opcode possible. */
4060 static void
4061 optimize_imm (void)
4062 {
4063 char guess_suffix = 0;
4064 int op;
4065
4066 if (i.suffix)
4067 guess_suffix = i.suffix;
4068 else if (i.reg_operands)
4069 {
4070 /* Figure out a suffix from the last register operand specified.
4071 We can't do this properly yet, ie. excluding InOutPortReg,
4072 but the following works for instructions with immediates.
4073 In any case, we can't set i.suffix yet. */
4074 for (op = i.operands; --op >= 0;)
4075 if (i.types[op].bitfield.reg8)
4076 {
4077 guess_suffix = BYTE_MNEM_SUFFIX;
4078 break;
4079 }
4080 else if (i.types[op].bitfield.reg16)
4081 {
4082 guess_suffix = WORD_MNEM_SUFFIX;
4083 break;
4084 }
4085 else if (i.types[op].bitfield.reg32)
4086 {
4087 guess_suffix = LONG_MNEM_SUFFIX;
4088 break;
4089 }
4090 else if (i.types[op].bitfield.reg64)
4091 {
4092 guess_suffix = QWORD_MNEM_SUFFIX;
4093 break;
4094 }
4095 }
4096 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
4097 guess_suffix = WORD_MNEM_SUFFIX;
4098
4099 for (op = i.operands; --op >= 0;)
4100 if (operand_type_check (i.types[op], imm))
4101 {
4102 switch (i.op[op].imms->X_op)
4103 {
4104 case O_constant:
4105 /* If a suffix is given, this operand may be shortened. */
4106 switch (guess_suffix)
4107 {
4108 case LONG_MNEM_SUFFIX:
4109 i.types[op].bitfield.imm32 = 1;
4110 i.types[op].bitfield.imm64 = 1;
4111 break;
4112 case WORD_MNEM_SUFFIX:
4113 i.types[op].bitfield.imm16 = 1;
4114 i.types[op].bitfield.imm32 = 1;
4115 i.types[op].bitfield.imm32s = 1;
4116 i.types[op].bitfield.imm64 = 1;
4117 break;
4118 case BYTE_MNEM_SUFFIX:
4119 i.types[op].bitfield.imm8 = 1;
4120 i.types[op].bitfield.imm8s = 1;
4121 i.types[op].bitfield.imm16 = 1;
4122 i.types[op].bitfield.imm32 = 1;
4123 i.types[op].bitfield.imm32s = 1;
4124 i.types[op].bitfield.imm64 = 1;
4125 break;
4126 }
4127
4128 /* If this operand is at most 16 bits, convert it
4129 to a signed 16 bit number before trying to see
4130 whether it will fit in an even smaller size.
4131 This allows a 16-bit operand such as $0xffe0 to
4132 be recognised as within Imm8S range. */
4133 if ((i.types[op].bitfield.imm16)
4134 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
4135 {
4136 i.op[op].imms->X_add_number =
4137 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
4138 }
4139 if ((i.types[op].bitfield.imm32)
4140 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
4141 == 0))
4142 {
4143 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
4144 ^ ((offsetT) 1 << 31))
4145 - ((offsetT) 1 << 31));
4146 }
4147 i.types[op]
4148 = operand_type_or (i.types[op],
4149 smallest_imm_type (i.op[op].imms->X_add_number));
4150
4151 /* We must avoid matching of Imm32 templates when 64bit
4152 only immediate is available. */
4153 if (guess_suffix == QWORD_MNEM_SUFFIX)
4154 i.types[op].bitfield.imm32 = 0;
4155 break;
4156
4157 case O_absent:
4158 case O_register:
4159 abort ();
4160
4161 /* Symbols and expressions. */
4162 default:
4163 /* Convert symbolic operand to proper sizes for matching, but don't
4164 prevent matching a set of insns that only supports sizes other
4165 than those matching the insn suffix. */
4166 {
4167 i386_operand_type mask, allowed;
4168 const insn_template *t;
4169
4170 operand_type_set (&mask, 0);
4171 operand_type_set (&allowed, 0);
4172
4173 for (t = current_templates->start;
4174 t < current_templates->end;
4175 ++t)
4176 allowed = operand_type_or (allowed,
4177 t->operand_types[op]);
4178 switch (guess_suffix)
4179 {
4180 case QWORD_MNEM_SUFFIX:
4181 mask.bitfield.imm64 = 1;
4182 mask.bitfield.imm32s = 1;
4183 break;
4184 case LONG_MNEM_SUFFIX:
4185 mask.bitfield.imm32 = 1;
4186 break;
4187 case WORD_MNEM_SUFFIX:
4188 mask.bitfield.imm16 = 1;
4189 break;
4190 case BYTE_MNEM_SUFFIX:
4191 mask.bitfield.imm8 = 1;
4192 break;
4193 default:
4194 break;
4195 }
4196 allowed = operand_type_and (mask, allowed);
4197 if (!operand_type_all_zero (&allowed))
4198 i.types[op] = operand_type_and (i.types[op], mask);
4199 }
4200 break;
4201 }
4202 }
4203 }
4204
4205 /* Try to use the smallest displacement type too. */
4206 static void
4207 optimize_disp (void)
4208 {
4209 int op;
4210
4211 for (op = i.operands; --op >= 0;)
4212 if (operand_type_check (i.types[op], disp))
4213 {
4214 if (i.op[op].disps->X_op == O_constant)
4215 {
4216 offsetT op_disp = i.op[op].disps->X_add_number;
4217
4218 if (i.types[op].bitfield.disp16
4219 && (op_disp & ~(offsetT) 0xffff) == 0)
4220 {
4221 /* If this operand is at most 16 bits, convert
4222 to a signed 16 bit number and don't use 64bit
4223 displacement. */
4224 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
4225 i.types[op].bitfield.disp64 = 0;
4226 }
4227 if (i.types[op].bitfield.disp32
4228 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
4229 {
4230 /* If this operand is at most 32 bits, convert
4231 to a signed 32 bit number and don't use 64bit
4232 displacement. */
4233 op_disp &= (((offsetT) 2 << 31) - 1);
4234 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
4235 i.types[op].bitfield.disp64 = 0;
4236 }
4237 if (!op_disp && i.types[op].bitfield.baseindex)
4238 {
4239 i.types[op].bitfield.disp8 = 0;
4240 i.types[op].bitfield.disp16 = 0;
4241 i.types[op].bitfield.disp32 = 0;
4242 i.types[op].bitfield.disp32s = 0;
4243 i.types[op].bitfield.disp64 = 0;
4244 i.op[op].disps = 0;
4245 i.disp_operands--;
4246 }
4247 else if (flag_code == CODE_64BIT)
4248 {
4249 if (fits_in_signed_long (op_disp))
4250 {
4251 i.types[op].bitfield.disp64 = 0;
4252 i.types[op].bitfield.disp32s = 1;
4253 }
4254 if (i.prefix[ADDR_PREFIX]
4255 && fits_in_unsigned_long (op_disp))
4256 i.types[op].bitfield.disp32 = 1;
4257 }
4258 if ((i.types[op].bitfield.disp32
4259 || i.types[op].bitfield.disp32s
4260 || i.types[op].bitfield.disp16)
4261 && fits_in_signed_byte (op_disp))
4262 i.types[op].bitfield.disp8 = 1;
4263 }
4264 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
4265 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
4266 {
4267 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
4268 i.op[op].disps, 0, i.reloc[op]);
4269 i.types[op].bitfield.disp8 = 0;
4270 i.types[op].bitfield.disp16 = 0;
4271 i.types[op].bitfield.disp32 = 0;
4272 i.types[op].bitfield.disp32s = 0;
4273 i.types[op].bitfield.disp64 = 0;
4274 }
4275 else
4276 /* We only support 64bit displacement on constants. */
4277 i.types[op].bitfield.disp64 = 0;
4278 }
4279 }
4280
4281 /* Check if operands are valid for the instruction. */
4282
4283 static int
4284 check_VecOperands (const insn_template *t)
4285 {
4286 unsigned int op;
4287
4288 /* Without VSIB byte, we can't have a vector register for index. */
4289 if (!t->opcode_modifier.vecsib
4290 && i.index_reg
4291 && (i.index_reg->reg_type.bitfield.regxmm
4292 || i.index_reg->reg_type.bitfield.regymm
4293 || i.index_reg->reg_type.bitfield.regzmm))
4294 {
4295 i.error = unsupported_vector_index_register;
4296 return 1;
4297 }
4298
4299 /* Check if default mask is allowed. */
4300 if (t->opcode_modifier.nodefmask
4301 && (!i.mask || i.mask->mask->reg_num == 0))
4302 {
4303 i.error = no_default_mask;
4304 return 1;
4305 }
4306
4307 /* For VSIB byte, we need a vector register for index, and all vector
4308 registers must be distinct. */
4309 if (t->opcode_modifier.vecsib)
4310 {
4311 if (!i.index_reg
4312 || !((t->opcode_modifier.vecsib == VecSIB128
4313 && i.index_reg->reg_type.bitfield.regxmm)
4314 || (t->opcode_modifier.vecsib == VecSIB256
4315 && i.index_reg->reg_type.bitfield.regymm)
4316 || (t->opcode_modifier.vecsib == VecSIB512
4317 && i.index_reg->reg_type.bitfield.regzmm)))
4318 {
4319 i.error = invalid_vsib_address;
4320 return 1;
4321 }
4322
4323 gas_assert (i.reg_operands == 2 || i.mask);
4324 if (i.reg_operands == 2 && !i.mask)
4325 {
4326 gas_assert (i.types[0].bitfield.regxmm
4327 || i.types[0].bitfield.regymm);
4328 gas_assert (i.types[2].bitfield.regxmm
4329 || i.types[2].bitfield.regymm);
4330 if (operand_check == check_none)
4331 return 0;
4332 if (register_number (i.op[0].regs)
4333 != register_number (i.index_reg)
4334 && register_number (i.op[2].regs)
4335 != register_number (i.index_reg)
4336 && register_number (i.op[0].regs)
4337 != register_number (i.op[2].regs))
4338 return 0;
4339 if (operand_check == check_error)
4340 {
4341 i.error = invalid_vector_register_set;
4342 return 1;
4343 }
4344 as_warn (_("mask, index, and destination registers should be distinct"));
4345 }
4346 else if (i.reg_operands == 1 && i.mask)
4347 {
4348 if ((i.types[1].bitfield.regymm
4349 || i.types[1].bitfield.regzmm)
4350 && (register_number (i.op[1].regs)
4351 == register_number (i.index_reg)))
4352 {
4353 if (operand_check == check_error)
4354 {
4355 i.error = invalid_vector_register_set;
4356 return 1;
4357 }
4358 if (operand_check != check_none)
4359 as_warn (_("index and destination registers should be distinct"));
4360 }
4361 }
4362 }
4363
4364 /* Check if broadcast is supported by the instruction and is applied
4365 to the memory operand. */
4366 if (i.broadcast)
4367 {
4368 int broadcasted_opnd_size;
4369
4370 /* Check if specified broadcast is supported in this instruction,
4371 and it's applied to memory operand of DWORD or QWORD type,
4372 depending on VecESize. */
4373 if (i.broadcast->type != t->opcode_modifier.broadcast
4374 || !i.types[i.broadcast->operand].bitfield.mem
4375 || (t->opcode_modifier.vecesize == 0
4376 && !i.types[i.broadcast->operand].bitfield.dword
4377 && !i.types[i.broadcast->operand].bitfield.unspecified)
4378 || (t->opcode_modifier.vecesize == 1
4379 && !i.types[i.broadcast->operand].bitfield.qword
4380 && !i.types[i.broadcast->operand].bitfield.unspecified))
4381 goto bad_broadcast;
4382
4383 broadcasted_opnd_size = t->opcode_modifier.vecesize ? 64 : 32;
4384 if (i.broadcast->type == BROADCAST_1TO16)
4385 broadcasted_opnd_size <<= 4; /* Broadcast 1to16. */
4386 else if (i.broadcast->type == BROADCAST_1TO8)
4387 broadcasted_opnd_size <<= 3; /* Broadcast 1to8. */
4388 else if (i.broadcast->type == BROADCAST_1TO4)
4389 broadcasted_opnd_size <<= 2; /* Broadcast 1to4. */
4390 else if (i.broadcast->type == BROADCAST_1TO2)
4391 broadcasted_opnd_size <<= 1; /* Broadcast 1to2. */
4392 else
4393 goto bad_broadcast;
4394
4395 if ((broadcasted_opnd_size == 256
4396 && !t->operand_types[i.broadcast->operand].bitfield.ymmword)
4397 || (broadcasted_opnd_size == 512
4398 && !t->operand_types[i.broadcast->operand].bitfield.zmmword))
4399 {
4400 bad_broadcast:
4401 i.error = unsupported_broadcast;
4402 return 1;
4403 }
4404 }
4405 /* If broadcast is supported in this instruction, we need to check if
4406 operand of one-element size isn't specified without broadcast. */
4407 else if (t->opcode_modifier.broadcast && i.mem_operands)
4408 {
4409 /* Find memory operand. */
4410 for (op = 0; op < i.operands; op++)
4411 if (operand_type_check (i.types[op], anymem))
4412 break;
4413 gas_assert (op < i.operands);
4414 /* Check size of the memory operand. */
4415 if ((t->opcode_modifier.vecesize == 0
4416 && i.types[op].bitfield.dword)
4417 || (t->opcode_modifier.vecesize == 1
4418 && i.types[op].bitfield.qword))
4419 {
4420 i.error = broadcast_needed;
4421 return 1;
4422 }
4423 }
4424
4425 /* Check if requested masking is supported. */
4426 if (i.mask
4427 && (!t->opcode_modifier.masking
4428 || (i.mask->zeroing
4429 && t->opcode_modifier.masking == MERGING_MASKING)))
4430 {
4431 i.error = unsupported_masking;
4432 return 1;
4433 }
4434
4435 /* Check if masking is applied to dest operand. */
4436 if (i.mask && (i.mask->operand != (int) (i.operands - 1)))
4437 {
4438 i.error = mask_not_on_destination;
4439 return 1;
4440 }
4441
4442 /* Check RC/SAE. */
4443 if (i.rounding)
4444 {
4445 if ((i.rounding->type != saeonly
4446 && !t->opcode_modifier.staticrounding)
4447 || (i.rounding->type == saeonly
4448 && (t->opcode_modifier.staticrounding
4449 || !t->opcode_modifier.sae)))
4450 {
4451 i.error = unsupported_rc_sae;
4452 return 1;
4453 }
4454 /* If the instruction has several immediate operands and one of
4455 them is rounding, the rounding operand should be the last
4456 immediate operand. */
4457 if (i.imm_operands > 1
4458 && i.rounding->operand != (int) (i.imm_operands - 1))
4459 {
4460 i.error = rc_sae_operand_not_last_imm;
4461 return 1;
4462 }
4463 }
4464
4465 /* Check vector Disp8 operand. */
4466 if (t->opcode_modifier.disp8memshift)
4467 {
4468 if (i.broadcast)
4469 i.memshift = t->opcode_modifier.vecesize ? 3 : 2;
4470 else
4471 i.memshift = t->opcode_modifier.disp8memshift;
4472
4473 for (op = 0; op < i.operands; op++)
4474 if (operand_type_check (i.types[op], disp)
4475 && i.op[op].disps->X_op == O_constant)
4476 {
4477 offsetT value = i.op[op].disps->X_add_number;
4478 int vec_disp8_ok = fits_in_vec_disp8 (value);
4479 if (t->operand_types [op].bitfield.vec_disp8)
4480 {
4481 if (vec_disp8_ok)
4482 i.types[op].bitfield.vec_disp8 = 1;
4483 else
4484 {
4485 /* Vector insn can only have Vec_Disp8/Disp32 in
4486 32/64bit modes, and Vec_Disp8/Disp16 in 16bit
4487 mode. */
4488 i.types[op].bitfield.disp8 = 0;
4489 if (flag_code != CODE_16BIT)
4490 i.types[op].bitfield.disp16 = 0;
4491 }
4492 }
4493 else if (flag_code != CODE_16BIT)
4494 {
4495 /* One form of this instruction supports vector Disp8.
4496 Try vector Disp8 if we need to use Disp32. */
4497 if (vec_disp8_ok && !fits_in_signed_byte (value))
4498 {
4499 i.error = try_vector_disp8;
4500 return 1;
4501 }
4502 }
4503 }
4504 }
4505 else
4506 i.memshift = -1;
4507
4508 return 0;
4509 }
4510
4511 /* Check if operands are valid for the instruction. Update VEX
4512 operand types. */
4513
4514 static int
4515 VEX_check_operands (const insn_template *t)
4516 {
4517 /* VREX is only valid with EVEX prefix. */
4518 if (i.need_vrex && !t->opcode_modifier.evex)
4519 {
4520 i.error = invalid_register_operand;
4521 return 1;
4522 }
4523
4524 if (!t->opcode_modifier.vex)
4525 return 0;
4526
4527 /* Only check VEX_Imm4, which must be the first operand. */
4528 if (t->operand_types[0].bitfield.vec_imm4)
4529 {
4530 if (i.op[0].imms->X_op != O_constant
4531 || !fits_in_imm4 (i.op[0].imms->X_add_number))
4532 {
4533 i.error = bad_imm4;
4534 return 1;
4535 }
4536
4537 /* Turn off Imm8 so that update_imm won't complain. */
4538 i.types[0] = vec_imm4;
4539 }
4540
4541 return 0;
4542 }
4543
4544 static const insn_template *
4545 match_template (void)
4546 {
4547 /* Points to template once we've found it. */
4548 const insn_template *t;
4549 i386_operand_type overlap0, overlap1, overlap2, overlap3;
4550 i386_operand_type overlap4;
4551 unsigned int found_reverse_match;
4552 i386_opcode_modifier suffix_check;
4553 i386_operand_type operand_types [MAX_OPERANDS];
4554 int addr_prefix_disp;
4555 unsigned int j;
4556 unsigned int found_cpu_match;
4557 unsigned int check_register;
4558 enum i386_error specific_error = 0;
4559
4560 #if MAX_OPERANDS != 5
4561 # error "MAX_OPERANDS must be 5."
4562 #endif
4563
4564 found_reverse_match = 0;
4565 addr_prefix_disp = -1;
4566
4567 memset (&suffix_check, 0, sizeof (suffix_check));
4568 if (i.suffix == BYTE_MNEM_SUFFIX)
4569 suffix_check.no_bsuf = 1;
4570 else if (i.suffix == WORD_MNEM_SUFFIX)
4571 suffix_check.no_wsuf = 1;
4572 else if (i.suffix == SHORT_MNEM_SUFFIX)
4573 suffix_check.no_ssuf = 1;
4574 else if (i.suffix == LONG_MNEM_SUFFIX)
4575 suffix_check.no_lsuf = 1;
4576 else if (i.suffix == QWORD_MNEM_SUFFIX)
4577 suffix_check.no_qsuf = 1;
4578 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
4579 suffix_check.no_ldsuf = 1;
4580
4581 /* Must have right number of operands. */
4582 i.error = number_of_operands_mismatch;
4583
4584 for (t = current_templates->start; t < current_templates->end; t++)
4585 {
4586 addr_prefix_disp = -1;
4587
4588 if (i.operands != t->operands)
4589 continue;
4590
4591 /* Check processor support. */
4592 i.error = unsupported;
4593 found_cpu_match = (cpu_flags_match (t)
4594 == CPU_FLAGS_PERFECT_MATCH);
4595 if (!found_cpu_match)
4596 continue;
4597
4598 /* Check old gcc support. */
4599 i.error = old_gcc_only;
4600 if (!old_gcc && t->opcode_modifier.oldgcc)
4601 continue;
4602
4603 /* Check AT&T mnemonic. */
4604 i.error = unsupported_with_intel_mnemonic;
4605 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
4606 continue;
4607
4608 /* Check AT&T/Intel syntax. */
4609 i.error = unsupported_syntax;
4610 if ((intel_syntax && t->opcode_modifier.attsyntax)
4611 || (!intel_syntax && t->opcode_modifier.intelsyntax))
4612 continue;
4613
4614 /* Check the suffix, except for some instructions in intel mode. */
4615 i.error = invalid_instruction_suffix;
4616 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
4617 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
4618 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
4619 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
4620 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
4621 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
4622 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
4623 continue;
4624
4625 if (!operand_size_match (t))
4626 continue;
4627
4628 for (j = 0; j < MAX_OPERANDS; j++)
4629 operand_types[j] = t->operand_types[j];
4630
4631 /* In general, don't allow 64-bit operands in 32-bit mode. */
4632 if (i.suffix == QWORD_MNEM_SUFFIX
4633 && flag_code != CODE_64BIT
4634 && (intel_syntax
4635 ? (!t->opcode_modifier.ignoresize
4636 && !intel_float_operand (t->name))
4637 : intel_float_operand (t->name) != 2)
4638 && ((!operand_types[0].bitfield.regmmx
4639 && !operand_types[0].bitfield.regxmm
4640 && !operand_types[0].bitfield.regymm
4641 && !operand_types[0].bitfield.regzmm)
4642 || (!operand_types[t->operands > 1].bitfield.regmmx
4643 && operand_types[t->operands > 1].bitfield.regxmm
4644 && operand_types[t->operands > 1].bitfield.regymm
4645 && operand_types[t->operands > 1].bitfield.regzmm))
4646 && (t->base_opcode != 0x0fc7
4647 || t->extension_opcode != 1 /* cmpxchg8b */))
4648 continue;
4649
4650 /* In general, don't allow 32-bit operands on pre-386. */
4651 else if (i.suffix == LONG_MNEM_SUFFIX
4652 && !cpu_arch_flags.bitfield.cpui386
4653 && (intel_syntax
4654 ? (!t->opcode_modifier.ignoresize
4655 && !intel_float_operand (t->name))
4656 : intel_float_operand (t->name) != 2)
4657 && ((!operand_types[0].bitfield.regmmx
4658 && !operand_types[0].bitfield.regxmm)
4659 || (!operand_types[t->operands > 1].bitfield.regmmx
4660 && operand_types[t->operands > 1].bitfield.regxmm)))
4661 continue;
4662
4663 /* Do not verify operands when there are none. */
4664 else
4665 {
4666 if (!t->operands)
4667 /* We've found a match; break out of loop. */
4668 break;
4669 }
4670
4671 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
4672 into Disp32/Disp16/Disp32 operand. */
4673 if (i.prefix[ADDR_PREFIX] != 0)
4674 {
4675 /* There should be only one Disp operand. */
4676 switch (flag_code)
4677 {
4678 case CODE_16BIT:
4679 for (j = 0; j < MAX_OPERANDS; j++)
4680 {
4681 if (operand_types[j].bitfield.disp16)
4682 {
4683 addr_prefix_disp = j;
4684 operand_types[j].bitfield.disp32 = 1;
4685 operand_types[j].bitfield.disp16 = 0;
4686 break;
4687 }
4688 }
4689 break;
4690 case CODE_32BIT:
4691 for (j = 0; j < MAX_OPERANDS; j++)
4692 {
4693 if (operand_types[j].bitfield.disp32)
4694 {
4695 addr_prefix_disp = j;
4696 operand_types[j].bitfield.disp32 = 0;
4697 operand_types[j].bitfield.disp16 = 1;
4698 break;
4699 }
4700 }
4701 break;
4702 case CODE_64BIT:
4703 for (j = 0; j < MAX_OPERANDS; j++)
4704 {
4705 if (operand_types[j].bitfield.disp64)
4706 {
4707 addr_prefix_disp = j;
4708 operand_types[j].bitfield.disp64 = 0;
4709 operand_types[j].bitfield.disp32 = 1;
4710 break;
4711 }
4712 }
4713 break;
4714 }
4715 }
4716
4717 /* We check register size if needed. */
4718 check_register = t->opcode_modifier.checkregsize;
4719 overlap0 = operand_type_and (i.types[0], operand_types[0]);
4720 switch (t->operands)
4721 {
4722 case 1:
4723 if (!operand_type_match (overlap0, i.types[0]))
4724 continue;
4725 break;
4726 case 2:
4727 /* xchg %eax, %eax is a special case. It is an aliase for nop
4728 only in 32bit mode and we can use opcode 0x90. In 64bit
4729 mode, we can't use 0x90 for xchg %eax, %eax since it should
4730 zero-extend %eax to %rax. */
4731 if (flag_code == CODE_64BIT
4732 && t->base_opcode == 0x90
4733 && operand_type_equal (&i.types [0], &acc32)
4734 && operand_type_equal (&i.types [1], &acc32))
4735 continue;
4736 if (i.swap_operand)
4737 {
4738 /* If we swap operand in encoding, we either match
4739 the next one or reverse direction of operands. */
4740 if (t->opcode_modifier.s)
4741 continue;
4742 else if (t->opcode_modifier.d)
4743 goto check_reverse;
4744 }
4745
4746 case 3:
4747 /* If we swap operand in encoding, we match the next one. */
4748 if (i.swap_operand && t->opcode_modifier.s)
4749 continue;
4750 case 4:
4751 case 5:
4752 overlap1 = operand_type_and (i.types[1], operand_types[1]);
4753 if (!operand_type_match (overlap0, i.types[0])
4754 || !operand_type_match (overlap1, i.types[1])
4755 || (check_register
4756 && !operand_type_register_match (overlap0, i.types[0],
4757 operand_types[0],
4758 overlap1, i.types[1],
4759 operand_types[1])))
4760 {
4761 /* Check if other direction is valid ... */
4762 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
4763 continue;
4764
4765 check_reverse:
4766 /* Try reversing direction of operands. */
4767 overlap0 = operand_type_and (i.types[0], operand_types[1]);
4768 overlap1 = operand_type_and (i.types[1], operand_types[0]);
4769 if (!operand_type_match (overlap0, i.types[0])
4770 || !operand_type_match (overlap1, i.types[1])
4771 || (check_register
4772 && !operand_type_register_match (overlap0,
4773 i.types[0],
4774 operand_types[1],
4775 overlap1,
4776 i.types[1],
4777 operand_types[0])))
4778 {
4779 /* Does not match either direction. */
4780 continue;
4781 }
4782 /* found_reverse_match holds which of D or FloatDR
4783 we've found. */
4784 if (t->opcode_modifier.d)
4785 found_reverse_match = Opcode_D;
4786 else if (t->opcode_modifier.floatd)
4787 found_reverse_match = Opcode_FloatD;
4788 else
4789 found_reverse_match = 0;
4790 if (t->opcode_modifier.floatr)
4791 found_reverse_match |= Opcode_FloatR;
4792 }
4793 else
4794 {
4795 /* Found a forward 2 operand match here. */
4796 switch (t->operands)
4797 {
4798 case 5:
4799 overlap4 = operand_type_and (i.types[4],
4800 operand_types[4]);
4801 case 4:
4802 overlap3 = operand_type_and (i.types[3],
4803 operand_types[3]);
4804 case 3:
4805 overlap2 = operand_type_and (i.types[2],
4806 operand_types[2]);
4807 break;
4808 }
4809
4810 switch (t->operands)
4811 {
4812 case 5:
4813 if (!operand_type_match (overlap4, i.types[4])
4814 || !operand_type_register_match (overlap3,
4815 i.types[3],
4816 operand_types[3],
4817 overlap4,
4818 i.types[4],
4819 operand_types[4]))
4820 continue;
4821 case 4:
4822 if (!operand_type_match (overlap3, i.types[3])
4823 || (check_register
4824 && !operand_type_register_match (overlap2,
4825 i.types[2],
4826 operand_types[2],
4827 overlap3,
4828 i.types[3],
4829 operand_types[3])))
4830 continue;
4831 case 3:
4832 /* Here we make use of the fact that there are no
4833 reverse match 3 operand instructions, and all 3
4834 operand instructions only need to be checked for
4835 register consistency between operands 2 and 3. */
4836 if (!operand_type_match (overlap2, i.types[2])
4837 || (check_register
4838 && !operand_type_register_match (overlap1,
4839 i.types[1],
4840 operand_types[1],
4841 overlap2,
4842 i.types[2],
4843 operand_types[2])))
4844 continue;
4845 break;
4846 }
4847 }
4848 /* Found either forward/reverse 2, 3 or 4 operand match here:
4849 slip through to break. */
4850 }
4851 if (!found_cpu_match)
4852 {
4853 found_reverse_match = 0;
4854 continue;
4855 }
4856
4857 /* Check if vector and VEX operands are valid. */
4858 if (check_VecOperands (t) || VEX_check_operands (t))
4859 {
4860 specific_error = i.error;
4861 continue;
4862 }
4863
4864 /* We've found a match; break out of loop. */
4865 break;
4866 }
4867
4868 if (t == current_templates->end)
4869 {
4870 /* We found no match. */
4871 const char *err_msg;
4872 switch (specific_error ? specific_error : i.error)
4873 {
4874 default:
4875 abort ();
4876 case operand_size_mismatch:
4877 err_msg = _("operand size mismatch");
4878 break;
4879 case operand_type_mismatch:
4880 err_msg = _("operand type mismatch");
4881 break;
4882 case register_type_mismatch:
4883 err_msg = _("register type mismatch");
4884 break;
4885 case number_of_operands_mismatch:
4886 err_msg = _("number of operands mismatch");
4887 break;
4888 case invalid_instruction_suffix:
4889 err_msg = _("invalid instruction suffix");
4890 break;
4891 case bad_imm4:
4892 err_msg = _("constant doesn't fit in 4 bits");
4893 break;
4894 case old_gcc_only:
4895 err_msg = _("only supported with old gcc");
4896 break;
4897 case unsupported_with_intel_mnemonic:
4898 err_msg = _("unsupported with Intel mnemonic");
4899 break;
4900 case unsupported_syntax:
4901 err_msg = _("unsupported syntax");
4902 break;
4903 case unsupported:
4904 as_bad (_("unsupported instruction `%s'"),
4905 current_templates->start->name);
4906 return NULL;
4907 case invalid_vsib_address:
4908 err_msg = _("invalid VSIB address");
4909 break;
4910 case invalid_vector_register_set:
4911 err_msg = _("mask, index, and destination registers must be distinct");
4912 break;
4913 case unsupported_vector_index_register:
4914 err_msg = _("unsupported vector index register");
4915 break;
4916 case unsupported_broadcast:
4917 err_msg = _("unsupported broadcast");
4918 break;
4919 case broadcast_not_on_src_operand:
4920 err_msg = _("broadcast not on source memory operand");
4921 break;
4922 case broadcast_needed:
4923 err_msg = _("broadcast is needed for operand of such type");
4924 break;
4925 case unsupported_masking:
4926 err_msg = _("unsupported masking");
4927 break;
4928 case mask_not_on_destination:
4929 err_msg = _("mask not on destination operand");
4930 break;
4931 case no_default_mask:
4932 err_msg = _("default mask isn't allowed");
4933 break;
4934 case unsupported_rc_sae:
4935 err_msg = _("unsupported static rounding/sae");
4936 break;
4937 case rc_sae_operand_not_last_imm:
4938 if (intel_syntax)
4939 err_msg = _("RC/SAE operand must precede immediate operands");
4940 else
4941 err_msg = _("RC/SAE operand must follow immediate operands");
4942 break;
4943 case invalid_register_operand:
4944 err_msg = _("invalid register operand");
4945 break;
4946 }
4947 as_bad (_("%s for `%s'"), err_msg,
4948 current_templates->start->name);
4949 return NULL;
4950 }
4951
4952 if (!quiet_warnings)
4953 {
4954 if (!intel_syntax
4955 && (i.types[0].bitfield.jumpabsolute
4956 != operand_types[0].bitfield.jumpabsolute))
4957 {
4958 as_warn (_("indirect %s without `*'"), t->name);
4959 }
4960
4961 if (t->opcode_modifier.isprefix
4962 && t->opcode_modifier.ignoresize)
4963 {
4964 /* Warn them that a data or address size prefix doesn't
4965 affect assembly of the next line of code. */
4966 as_warn (_("stand-alone `%s' prefix"), t->name);
4967 }
4968 }
4969
4970 /* Copy the template we found. */
4971 i.tm = *t;
4972
4973 if (addr_prefix_disp != -1)
4974 i.tm.operand_types[addr_prefix_disp]
4975 = operand_types[addr_prefix_disp];
4976
4977 if (found_reverse_match)
4978 {
4979 /* If we found a reverse match we must alter the opcode
4980 direction bit. found_reverse_match holds bits to change
4981 (different for int & float insns). */
4982
4983 i.tm.base_opcode ^= found_reverse_match;
4984
4985 i.tm.operand_types[0] = operand_types[1];
4986 i.tm.operand_types[1] = operand_types[0];
4987 }
4988
4989 return t;
4990 }
4991
4992 static int
4993 check_string (void)
4994 {
4995 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
4996 if (i.tm.operand_types[mem_op].bitfield.esseg)
4997 {
4998 if (i.seg[0] != NULL && i.seg[0] != &es)
4999 {
5000 as_bad (_("`%s' operand %d must use `%ses' segment"),
5001 i.tm.name,
5002 mem_op + 1,
5003 register_prefix);
5004 return 0;
5005 }
5006 /* There's only ever one segment override allowed per instruction.
5007 This instruction possibly has a legal segment override on the
5008 second operand, so copy the segment to where non-string
5009 instructions store it, allowing common code. */
5010 i.seg[0] = i.seg[1];
5011 }
5012 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
5013 {
5014 if (i.seg[1] != NULL && i.seg[1] != &es)
5015 {
5016 as_bad (_("`%s' operand %d must use `%ses' segment"),
5017 i.tm.name,
5018 mem_op + 2,
5019 register_prefix);
5020 return 0;
5021 }
5022 }
5023 return 1;
5024 }
5025
5026 static int
5027 process_suffix (void)
5028 {
5029 /* If matched instruction specifies an explicit instruction mnemonic
5030 suffix, use it. */
5031 if (i.tm.opcode_modifier.size16)
5032 i.suffix = WORD_MNEM_SUFFIX;
5033 else if (i.tm.opcode_modifier.size32)
5034 i.suffix = LONG_MNEM_SUFFIX;
5035 else if (i.tm.opcode_modifier.size64)
5036 i.suffix = QWORD_MNEM_SUFFIX;
5037 else if (i.reg_operands)
5038 {
5039 /* If there's no instruction mnemonic suffix we try to invent one
5040 based on register operands. */
5041 if (!i.suffix)
5042 {
5043 /* We take i.suffix from the last register operand specified,
5044 Destination register type is more significant than source
5045 register type. crc32 in SSE4.2 prefers source register
5046 type. */
5047 if (i.tm.base_opcode == 0xf20f38f1)
5048 {
5049 if (i.types[0].bitfield.reg16)
5050 i.suffix = WORD_MNEM_SUFFIX;
5051 else if (i.types[0].bitfield.reg32)
5052 i.suffix = LONG_MNEM_SUFFIX;
5053 else if (i.types[0].bitfield.reg64)
5054 i.suffix = QWORD_MNEM_SUFFIX;
5055 }
5056 else if (i.tm.base_opcode == 0xf20f38f0)
5057 {
5058 if (i.types[0].bitfield.reg8)
5059 i.suffix = BYTE_MNEM_SUFFIX;
5060 }
5061
5062 if (!i.suffix)
5063 {
5064 int op;
5065
5066 if (i.tm.base_opcode == 0xf20f38f1
5067 || i.tm.base_opcode == 0xf20f38f0)
5068 {
5069 /* We have to know the operand size for crc32. */
5070 as_bad (_("ambiguous memory operand size for `%s`"),
5071 i.tm.name);
5072 return 0;
5073 }
5074
5075 for (op = i.operands; --op >= 0;)
5076 if (!i.tm.operand_types[op].bitfield.inoutportreg)
5077 {
5078 if (i.types[op].bitfield.reg8)
5079 {
5080 i.suffix = BYTE_MNEM_SUFFIX;
5081 break;
5082 }
5083 else if (i.types[op].bitfield.reg16)
5084 {
5085 i.suffix = WORD_MNEM_SUFFIX;
5086 break;
5087 }
5088 else if (i.types[op].bitfield.reg32)
5089 {
5090 i.suffix = LONG_MNEM_SUFFIX;
5091 break;
5092 }
5093 else if (i.types[op].bitfield.reg64)
5094 {
5095 i.suffix = QWORD_MNEM_SUFFIX;
5096 break;
5097 }
5098 }
5099 }
5100 }
5101 else if (i.suffix == BYTE_MNEM_SUFFIX)
5102 {
5103 if (intel_syntax
5104 && i.tm.opcode_modifier.ignoresize
5105 && i.tm.opcode_modifier.no_bsuf)
5106 i.suffix = 0;
5107 else if (!check_byte_reg ())
5108 return 0;
5109 }
5110 else if (i.suffix == LONG_MNEM_SUFFIX)
5111 {
5112 if (intel_syntax
5113 && i.tm.opcode_modifier.ignoresize
5114 && i.tm.opcode_modifier.no_lsuf)
5115 i.suffix = 0;
5116 else if (!check_long_reg ())
5117 return 0;
5118 }
5119 else if (i.suffix == QWORD_MNEM_SUFFIX)
5120 {
5121 if (intel_syntax
5122 && i.tm.opcode_modifier.ignoresize
5123 && i.tm.opcode_modifier.no_qsuf)
5124 i.suffix = 0;
5125 else if (!check_qword_reg ())
5126 return 0;
5127 }
5128 else if (i.suffix == WORD_MNEM_SUFFIX)
5129 {
5130 if (intel_syntax
5131 && i.tm.opcode_modifier.ignoresize
5132 && i.tm.opcode_modifier.no_wsuf)
5133 i.suffix = 0;
5134 else if (!check_word_reg ())
5135 return 0;
5136 }
5137 else if (i.suffix == XMMWORD_MNEM_SUFFIX
5138 || i.suffix == YMMWORD_MNEM_SUFFIX
5139 || i.suffix == ZMMWORD_MNEM_SUFFIX)
5140 {
5141 /* Skip if the instruction has x/y/z suffix. match_template
5142 should check if it is a valid suffix. */
5143 }
5144 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
5145 /* Do nothing if the instruction is going to ignore the prefix. */
5146 ;
5147 else
5148 abort ();
5149 }
5150 else if (i.tm.opcode_modifier.defaultsize
5151 && !i.suffix
5152 /* exclude fldenv/frstor/fsave/fstenv */
5153 && i.tm.opcode_modifier.no_ssuf)
5154 {
5155 i.suffix = stackop_size;
5156 }
5157 else if (intel_syntax
5158 && !i.suffix
5159 && (i.tm.operand_types[0].bitfield.jumpabsolute
5160 || i.tm.opcode_modifier.jumpbyte
5161 || i.tm.opcode_modifier.jumpintersegment
5162 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
5163 && i.tm.extension_opcode <= 3)))
5164 {
5165 switch (flag_code)
5166 {
5167 case CODE_64BIT:
5168 if (!i.tm.opcode_modifier.no_qsuf)
5169 {
5170 i.suffix = QWORD_MNEM_SUFFIX;
5171 break;
5172 }
5173 case CODE_32BIT:
5174 if (!i.tm.opcode_modifier.no_lsuf)
5175 i.suffix = LONG_MNEM_SUFFIX;
5176 break;
5177 case CODE_16BIT:
5178 if (!i.tm.opcode_modifier.no_wsuf)
5179 i.suffix = WORD_MNEM_SUFFIX;
5180 break;
5181 }
5182 }
5183
5184 if (!i.suffix)
5185 {
5186 if (!intel_syntax)
5187 {
5188 if (i.tm.opcode_modifier.w)
5189 {
5190 as_bad (_("no instruction mnemonic suffix given and "
5191 "no register operands; can't size instruction"));
5192 return 0;
5193 }
5194 }
5195 else
5196 {
5197 unsigned int suffixes;
5198
5199 suffixes = !i.tm.opcode_modifier.no_bsuf;
5200 if (!i.tm.opcode_modifier.no_wsuf)
5201 suffixes |= 1 << 1;
5202 if (!i.tm.opcode_modifier.no_lsuf)
5203 suffixes |= 1 << 2;
5204 if (!i.tm.opcode_modifier.no_ldsuf)
5205 suffixes |= 1 << 3;
5206 if (!i.tm.opcode_modifier.no_ssuf)
5207 suffixes |= 1 << 4;
5208 if (!i.tm.opcode_modifier.no_qsuf)
5209 suffixes |= 1 << 5;
5210
5211 /* There are more than suffix matches. */
5212 if (i.tm.opcode_modifier.w
5213 || ((suffixes & (suffixes - 1))
5214 && !i.tm.opcode_modifier.defaultsize
5215 && !i.tm.opcode_modifier.ignoresize))
5216 {
5217 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
5218 return 0;
5219 }
5220 }
5221 }
5222
5223 /* Change the opcode based on the operand size given by i.suffix;
5224 We don't need to change things for byte insns. */
5225
5226 if (i.suffix
5227 && i.suffix != BYTE_MNEM_SUFFIX
5228 && i.suffix != XMMWORD_MNEM_SUFFIX
5229 && i.suffix != YMMWORD_MNEM_SUFFIX
5230 && i.suffix != ZMMWORD_MNEM_SUFFIX)
5231 {
5232 /* It's not a byte, select word/dword operation. */
5233 if (i.tm.opcode_modifier.w)
5234 {
5235 if (i.tm.opcode_modifier.shortform)
5236 i.tm.base_opcode |= 8;
5237 else
5238 i.tm.base_opcode |= 1;
5239 }
5240
5241 /* Now select between word & dword operations via the operand
5242 size prefix, except for instructions that will ignore this
5243 prefix anyway. */
5244 if (i.tm.opcode_modifier.addrprefixop0)
5245 {
5246 /* The address size override prefix changes the size of the
5247 first operand. */
5248 if ((flag_code == CODE_32BIT
5249 && i.op->regs[0].reg_type.bitfield.reg16)
5250 || (flag_code != CODE_32BIT
5251 && i.op->regs[0].reg_type.bitfield.reg32))
5252 if (!add_prefix (ADDR_PREFIX_OPCODE))
5253 return 0;
5254 }
5255 else if (i.suffix != QWORD_MNEM_SUFFIX
5256 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
5257 && !i.tm.opcode_modifier.ignoresize
5258 && !i.tm.opcode_modifier.floatmf
5259 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
5260 || (flag_code == CODE_64BIT
5261 && i.tm.opcode_modifier.jumpbyte)))
5262 {
5263 unsigned int prefix = DATA_PREFIX_OPCODE;
5264
5265 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
5266 prefix = ADDR_PREFIX_OPCODE;
5267
5268 if (!add_prefix (prefix))
5269 return 0;
5270 }
5271
5272 /* Set mode64 for an operand. */
5273 if (i.suffix == QWORD_MNEM_SUFFIX
5274 && flag_code == CODE_64BIT
5275 && !i.tm.opcode_modifier.norex64)
5276 {
5277 /* Special case for xchg %rax,%rax. It is NOP and doesn't
5278 need rex64. cmpxchg8b is also a special case. */
5279 if (! (i.operands == 2
5280 && i.tm.base_opcode == 0x90
5281 && i.tm.extension_opcode == None
5282 && operand_type_equal (&i.types [0], &acc64)
5283 && operand_type_equal (&i.types [1], &acc64))
5284 && ! (i.operands == 1
5285 && i.tm.base_opcode == 0xfc7
5286 && i.tm.extension_opcode == 1
5287 && !operand_type_check (i.types [0], reg)
5288 && operand_type_check (i.types [0], anymem)))
5289 i.rex |= REX_W;
5290 }
5291
5292 /* Size floating point instruction. */
5293 if (i.suffix == LONG_MNEM_SUFFIX)
5294 if (i.tm.opcode_modifier.floatmf)
5295 i.tm.base_opcode ^= 4;
5296 }
5297
5298 return 1;
5299 }
5300
5301 static int
5302 check_byte_reg (void)
5303 {
5304 int op;
5305
5306 for (op = i.operands; --op >= 0;)
5307 {
5308 /* If this is an eight bit register, it's OK. If it's the 16 or
5309 32 bit version of an eight bit register, we will just use the
5310 low portion, and that's OK too. */
5311 if (i.types[op].bitfield.reg8)
5312 continue;
5313
5314 /* I/O port address operands are OK too. */
5315 if (i.tm.operand_types[op].bitfield.inoutportreg)
5316 continue;
5317
5318 /* crc32 doesn't generate this warning. */
5319 if (i.tm.base_opcode == 0xf20f38f0)
5320 continue;
5321
5322 if ((i.types[op].bitfield.reg16
5323 || i.types[op].bitfield.reg32
5324 || i.types[op].bitfield.reg64)
5325 && i.op[op].regs->reg_num < 4
5326 /* Prohibit these changes in 64bit mode, since the lowering
5327 would be more complicated. */
5328 && flag_code != CODE_64BIT)
5329 {
5330 #if REGISTER_WARNINGS
5331 if (!quiet_warnings)
5332 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5333 register_prefix,
5334 (i.op[op].regs + (i.types[op].bitfield.reg16
5335 ? REGNAM_AL - REGNAM_AX
5336 : REGNAM_AL - REGNAM_EAX))->reg_name,
5337 register_prefix,
5338 i.op[op].regs->reg_name,
5339 i.suffix);
5340 #endif
5341 continue;
5342 }
5343 /* Any other register is bad. */
5344 if (i.types[op].bitfield.reg16
5345 || i.types[op].bitfield.reg32
5346 || i.types[op].bitfield.reg64
5347 || i.types[op].bitfield.regmmx
5348 || i.types[op].bitfield.regxmm
5349 || i.types[op].bitfield.regymm
5350 || i.types[op].bitfield.regzmm
5351 || i.types[op].bitfield.sreg2
5352 || i.types[op].bitfield.sreg3
5353 || i.types[op].bitfield.control
5354 || i.types[op].bitfield.debug
5355 || i.types[op].bitfield.test
5356 || i.types[op].bitfield.floatreg
5357 || i.types[op].bitfield.floatacc)
5358 {
5359 as_bad (_("`%s%s' not allowed with `%s%c'"),
5360 register_prefix,
5361 i.op[op].regs->reg_name,
5362 i.tm.name,
5363 i.suffix);
5364 return 0;
5365 }
5366 }
5367 return 1;
5368 }
5369
5370 static int
5371 check_long_reg (void)
5372 {
5373 int op;
5374
5375 for (op = i.operands; --op >= 0;)
5376 /* Reject eight bit registers, except where the template requires
5377 them. (eg. movzb) */
5378 if (i.types[op].bitfield.reg8
5379 && (i.tm.operand_types[op].bitfield.reg16
5380 || i.tm.operand_types[op].bitfield.reg32
5381 || i.tm.operand_types[op].bitfield.acc))
5382 {
5383 as_bad (_("`%s%s' not allowed with `%s%c'"),
5384 register_prefix,
5385 i.op[op].regs->reg_name,
5386 i.tm.name,
5387 i.suffix);
5388 return 0;
5389 }
5390 /* Warn if the e prefix on a general reg is missing. */
5391 else if ((!quiet_warnings || flag_code == CODE_64BIT)
5392 && i.types[op].bitfield.reg16
5393 && (i.tm.operand_types[op].bitfield.reg32
5394 || i.tm.operand_types[op].bitfield.acc))
5395 {
5396 /* Prohibit these changes in the 64bit mode, since the
5397 lowering is more complicated. */
5398 if (flag_code == CODE_64BIT)
5399 {
5400 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5401 register_prefix, i.op[op].regs->reg_name,
5402 i.suffix);
5403 return 0;
5404 }
5405 #if REGISTER_WARNINGS
5406 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5407 register_prefix,
5408 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
5409 register_prefix, i.op[op].regs->reg_name, i.suffix);
5410 #endif
5411 }
5412 /* Warn if the r prefix on a general reg is present. */
5413 else if (i.types[op].bitfield.reg64
5414 && (i.tm.operand_types[op].bitfield.reg32
5415 || i.tm.operand_types[op].bitfield.acc))
5416 {
5417 if (intel_syntax
5418 && i.tm.opcode_modifier.toqword
5419 && !i.types[0].bitfield.regxmm)
5420 {
5421 /* Convert to QWORD. We want REX byte. */
5422 i.suffix = QWORD_MNEM_SUFFIX;
5423 }
5424 else
5425 {
5426 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5427 register_prefix, i.op[op].regs->reg_name,
5428 i.suffix);
5429 return 0;
5430 }
5431 }
5432 return 1;
5433 }
5434
5435 static int
5436 check_qword_reg (void)
5437 {
5438 int op;
5439
5440 for (op = i.operands; --op >= 0; )
5441 /* Reject eight bit registers, except where the template requires
5442 them. (eg. movzb) */
5443 if (i.types[op].bitfield.reg8
5444 && (i.tm.operand_types[op].bitfield.reg16
5445 || i.tm.operand_types[op].bitfield.reg32
5446 || i.tm.operand_types[op].bitfield.acc))
5447 {
5448 as_bad (_("`%s%s' not allowed with `%s%c'"),
5449 register_prefix,
5450 i.op[op].regs->reg_name,
5451 i.tm.name,
5452 i.suffix);
5453 return 0;
5454 }
5455 /* Warn if the r prefix on a general reg is missing. */
5456 else if ((i.types[op].bitfield.reg16
5457 || i.types[op].bitfield.reg32)
5458 && (i.tm.operand_types[op].bitfield.reg32
5459 || i.tm.operand_types[op].bitfield.acc))
5460 {
5461 /* Prohibit these changes in the 64bit mode, since the
5462 lowering is more complicated. */
5463 if (intel_syntax
5464 && i.tm.opcode_modifier.todword
5465 && !i.types[0].bitfield.regxmm)
5466 {
5467 /* Convert to DWORD. We don't want REX byte. */
5468 i.suffix = LONG_MNEM_SUFFIX;
5469 }
5470 else
5471 {
5472 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5473 register_prefix, i.op[op].regs->reg_name,
5474 i.suffix);
5475 return 0;
5476 }
5477 }
5478 return 1;
5479 }
5480
5481 static int
5482 check_word_reg (void)
5483 {
5484 int op;
5485 for (op = i.operands; --op >= 0;)
5486 /* Reject eight bit registers, except where the template requires
5487 them. (eg. movzb) */
5488 if (i.types[op].bitfield.reg8
5489 && (i.tm.operand_types[op].bitfield.reg16
5490 || i.tm.operand_types[op].bitfield.reg32
5491 || i.tm.operand_types[op].bitfield.acc))
5492 {
5493 as_bad (_("`%s%s' not allowed with `%s%c'"),
5494 register_prefix,
5495 i.op[op].regs->reg_name,
5496 i.tm.name,
5497 i.suffix);
5498 return 0;
5499 }
5500 /* Warn if the e or r prefix on a general reg is present. */
5501 else if ((!quiet_warnings || flag_code == CODE_64BIT)
5502 && (i.types[op].bitfield.reg32
5503 || i.types[op].bitfield.reg64)
5504 && (i.tm.operand_types[op].bitfield.reg16
5505 || i.tm.operand_types[op].bitfield.acc))
5506 {
5507 /* Prohibit these changes in the 64bit mode, since the
5508 lowering is more complicated. */
5509 if (flag_code == CODE_64BIT)
5510 {
5511 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5512 register_prefix, i.op[op].regs->reg_name,
5513 i.suffix);
5514 return 0;
5515 }
5516 #if REGISTER_WARNINGS
5517 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5518 register_prefix,
5519 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
5520 register_prefix, i.op[op].regs->reg_name, i.suffix);
5521 #endif
5522 }
5523 return 1;
5524 }
5525
5526 static int
5527 update_imm (unsigned int j)
5528 {
5529 i386_operand_type overlap = i.types[j];
5530 if ((overlap.bitfield.imm8
5531 || overlap.bitfield.imm8s
5532 || overlap.bitfield.imm16
5533 || overlap.bitfield.imm32
5534 || overlap.bitfield.imm32s
5535 || overlap.bitfield.imm64)
5536 && !operand_type_equal (&overlap, &imm8)
5537 && !operand_type_equal (&overlap, &imm8s)
5538 && !operand_type_equal (&overlap, &imm16)
5539 && !operand_type_equal (&overlap, &imm32)
5540 && !operand_type_equal (&overlap, &imm32s)
5541 && !operand_type_equal (&overlap, &imm64))
5542 {
5543 if (i.suffix)
5544 {
5545 i386_operand_type temp;
5546
5547 operand_type_set (&temp, 0);
5548 if (i.suffix == BYTE_MNEM_SUFFIX)
5549 {
5550 temp.bitfield.imm8 = overlap.bitfield.imm8;
5551 temp.bitfield.imm8s = overlap.bitfield.imm8s;
5552 }
5553 else if (i.suffix == WORD_MNEM_SUFFIX)
5554 temp.bitfield.imm16 = overlap.bitfield.imm16;
5555 else if (i.suffix == QWORD_MNEM_SUFFIX)
5556 {
5557 temp.bitfield.imm64 = overlap.bitfield.imm64;
5558 temp.bitfield.imm32s = overlap.bitfield.imm32s;
5559 }
5560 else
5561 temp.bitfield.imm32 = overlap.bitfield.imm32;
5562 overlap = temp;
5563 }
5564 else if (operand_type_equal (&overlap, &imm16_32_32s)
5565 || operand_type_equal (&overlap, &imm16_32)
5566 || operand_type_equal (&overlap, &imm16_32s))
5567 {
5568 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
5569 overlap = imm16;
5570 else
5571 overlap = imm32s;
5572 }
5573 if (!operand_type_equal (&overlap, &imm8)
5574 && !operand_type_equal (&overlap, &imm8s)
5575 && !operand_type_equal (&overlap, &imm16)
5576 && !operand_type_equal (&overlap, &imm32)
5577 && !operand_type_equal (&overlap, &imm32s)
5578 && !operand_type_equal (&overlap, &imm64))
5579 {
5580 as_bad (_("no instruction mnemonic suffix given; "
5581 "can't determine immediate size"));
5582 return 0;
5583 }
5584 }
5585 i.types[j] = overlap;
5586
5587 return 1;
5588 }
5589
5590 static int
5591 finalize_imm (void)
5592 {
5593 unsigned int j, n;
5594
5595 /* Update the first 2 immediate operands. */
5596 n = i.operands > 2 ? 2 : i.operands;
5597 if (n)
5598 {
5599 for (j = 0; j < n; j++)
5600 if (update_imm (j) == 0)
5601 return 0;
5602
5603 /* The 3rd operand can't be immediate operand. */
5604 gas_assert (operand_type_check (i.types[2], imm) == 0);
5605 }
5606
5607 return 1;
5608 }
5609
5610 static int
5611 bad_implicit_operand (int xmm)
5612 {
5613 const char *ireg = xmm ? "xmm0" : "ymm0";
5614
5615 if (intel_syntax)
5616 as_bad (_("the last operand of `%s' must be `%s%s'"),
5617 i.tm.name, register_prefix, ireg);
5618 else
5619 as_bad (_("the first operand of `%s' must be `%s%s'"),
5620 i.tm.name, register_prefix, ireg);
5621 return 0;
5622 }
5623
5624 static int
5625 process_operands (void)
5626 {
5627 /* Default segment register this instruction will use for memory
5628 accesses. 0 means unknown. This is only for optimizing out
5629 unnecessary segment overrides. */
5630 const seg_entry *default_seg = 0;
5631
5632 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
5633 {
5634 unsigned int dupl = i.operands;
5635 unsigned int dest = dupl - 1;
5636 unsigned int j;
5637
5638 /* The destination must be an xmm register. */
5639 gas_assert (i.reg_operands
5640 && MAX_OPERANDS > dupl
5641 && operand_type_equal (&i.types[dest], &regxmm));
5642
5643 if (i.tm.opcode_modifier.firstxmm0)
5644 {
5645 /* The first operand is implicit and must be xmm0. */
5646 gas_assert (operand_type_equal (&i.types[0], &regxmm));
5647 if (register_number (i.op[0].regs) != 0)
5648 return bad_implicit_operand (1);
5649
5650 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
5651 {
5652 /* Keep xmm0 for instructions with VEX prefix and 3
5653 sources. */
5654 goto duplicate;
5655 }
5656 else
5657 {
5658 /* We remove the first xmm0 and keep the number of
5659 operands unchanged, which in fact duplicates the
5660 destination. */
5661 for (j = 1; j < i.operands; j++)
5662 {
5663 i.op[j - 1] = i.op[j];
5664 i.types[j - 1] = i.types[j];
5665 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
5666 }
5667 }
5668 }
5669 else if (i.tm.opcode_modifier.implicit1stxmm0)
5670 {
5671 gas_assert ((MAX_OPERANDS - 1) > dupl
5672 && (i.tm.opcode_modifier.vexsources
5673 == VEX3SOURCES));
5674
5675 /* Add the implicit xmm0 for instructions with VEX prefix
5676 and 3 sources. */
5677 for (j = i.operands; j > 0; j--)
5678 {
5679 i.op[j] = i.op[j - 1];
5680 i.types[j] = i.types[j - 1];
5681 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
5682 }
5683 i.op[0].regs
5684 = (const reg_entry *) hash_find (reg_hash, "xmm0");
5685 i.types[0] = regxmm;
5686 i.tm.operand_types[0] = regxmm;
5687
5688 i.operands += 2;
5689 i.reg_operands += 2;
5690 i.tm.operands += 2;
5691
5692 dupl++;
5693 dest++;
5694 i.op[dupl] = i.op[dest];
5695 i.types[dupl] = i.types[dest];
5696 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5697 }
5698 else
5699 {
5700 duplicate:
5701 i.operands++;
5702 i.reg_operands++;
5703 i.tm.operands++;
5704
5705 i.op[dupl] = i.op[dest];
5706 i.types[dupl] = i.types[dest];
5707 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5708 }
5709
5710 if (i.tm.opcode_modifier.immext)
5711 process_immext ();
5712 }
5713 else if (i.tm.opcode_modifier.firstxmm0)
5714 {
5715 unsigned int j;
5716
5717 /* The first operand is implicit and must be xmm0/ymm0/zmm0. */
5718 gas_assert (i.reg_operands
5719 && (operand_type_equal (&i.types[0], &regxmm)
5720 || operand_type_equal (&i.types[0], &regymm)
5721 || operand_type_equal (&i.types[0], &regzmm)));
5722 if (register_number (i.op[0].regs) != 0)
5723 return bad_implicit_operand (i.types[0].bitfield.regxmm);
5724
5725 for (j = 1; j < i.operands; j++)
5726 {
5727 i.op[j - 1] = i.op[j];
5728 i.types[j - 1] = i.types[j];
5729
5730 /* We need to adjust fields in i.tm since they are used by
5731 build_modrm_byte. */
5732 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
5733 }
5734
5735 i.operands--;
5736 i.reg_operands--;
5737 i.tm.operands--;
5738 }
5739 else if (i.tm.opcode_modifier.regkludge)
5740 {
5741 /* The imul $imm, %reg instruction is converted into
5742 imul $imm, %reg, %reg, and the clr %reg instruction
5743 is converted into xor %reg, %reg. */
5744
5745 unsigned int first_reg_op;
5746
5747 if (operand_type_check (i.types[0], reg))
5748 first_reg_op = 0;
5749 else
5750 first_reg_op = 1;
5751 /* Pretend we saw the extra register operand. */
5752 gas_assert (i.reg_operands == 1
5753 && i.op[first_reg_op + 1].regs == 0);
5754 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
5755 i.types[first_reg_op + 1] = i.types[first_reg_op];
5756 i.operands++;
5757 i.reg_operands++;
5758 }
5759
5760 if (i.tm.opcode_modifier.shortform)
5761 {
5762 if (i.types[0].bitfield.sreg2
5763 || i.types[0].bitfield.sreg3)
5764 {
5765 if (i.tm.base_opcode == POP_SEG_SHORT
5766 && i.op[0].regs->reg_num == 1)
5767 {
5768 as_bad (_("you can't `pop %scs'"), register_prefix);
5769 return 0;
5770 }
5771 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
5772 if ((i.op[0].regs->reg_flags & RegRex) != 0)
5773 i.rex |= REX_B;
5774 }
5775 else
5776 {
5777 /* The register or float register operand is in operand
5778 0 or 1. */
5779 unsigned int op;
5780
5781 if (i.types[0].bitfield.floatreg
5782 || operand_type_check (i.types[0], reg))
5783 op = 0;
5784 else
5785 op = 1;
5786 /* Register goes in low 3 bits of opcode. */
5787 i.tm.base_opcode |= i.op[op].regs->reg_num;
5788 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5789 i.rex |= REX_B;
5790 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
5791 {
5792 /* Warn about some common errors, but press on regardless.
5793 The first case can be generated by gcc (<= 2.8.1). */
5794 if (i.operands == 2)
5795 {
5796 /* Reversed arguments on faddp, fsubp, etc. */
5797 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
5798 register_prefix, i.op[!intel_syntax].regs->reg_name,
5799 register_prefix, i.op[intel_syntax].regs->reg_name);
5800 }
5801 else
5802 {
5803 /* Extraneous `l' suffix on fp insn. */
5804 as_warn (_("translating to `%s %s%s'"), i.tm.name,
5805 register_prefix, i.op[0].regs->reg_name);
5806 }
5807 }
5808 }
5809 }
5810 else if (i.tm.opcode_modifier.modrm)
5811 {
5812 /* The opcode is completed (modulo i.tm.extension_opcode which
5813 must be put into the modrm byte). Now, we make the modrm and
5814 index base bytes based on all the info we've collected. */
5815
5816 default_seg = build_modrm_byte ();
5817 }
5818 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
5819 {
5820 default_seg = &ds;
5821 }
5822 else if (i.tm.opcode_modifier.isstring)
5823 {
5824 /* For the string instructions that allow a segment override
5825 on one of their operands, the default segment is ds. */
5826 default_seg = &ds;
5827 }
5828
5829 if (i.tm.base_opcode == 0x8d /* lea */
5830 && i.seg[0]
5831 && !quiet_warnings)
5832 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
5833
5834 /* If a segment was explicitly specified, and the specified segment
5835 is not the default, use an opcode prefix to select it. If we
5836 never figured out what the default segment is, then default_seg
5837 will be zero at this point, and the specified segment prefix will
5838 always be used. */
5839 if ((i.seg[0]) && (i.seg[0] != default_seg))
5840 {
5841 if (!add_prefix (i.seg[0]->seg_prefix))
5842 return 0;
5843 }
5844 return 1;
5845 }
5846
5847 static const seg_entry *
5848 build_modrm_byte (void)
5849 {
5850 const seg_entry *default_seg = 0;
5851 unsigned int source, dest;
5852 int vex_3_sources;
5853
5854 /* The first operand of instructions with VEX prefix and 3 sources
5855 must be VEX_Imm4. */
5856 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
5857 if (vex_3_sources)
5858 {
5859 unsigned int nds, reg_slot;
5860 expressionS *exp;
5861
5862 if (i.tm.opcode_modifier.veximmext
5863 && i.tm.opcode_modifier.immext)
5864 {
5865 dest = i.operands - 2;
5866 gas_assert (dest == 3);
5867 }
5868 else
5869 dest = i.operands - 1;
5870 nds = dest - 1;
5871
5872 /* There are 2 kinds of instructions:
5873 1. 5 operands: 4 register operands or 3 register operands
5874 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
5875 VexW0 or VexW1. The destination must be either XMM, YMM or
5876 ZMM register.
5877 2. 4 operands: 4 register operands or 3 register operands
5878 plus 1 memory operand, VexXDS, and VexImmExt */
5879 gas_assert ((i.reg_operands == 4
5880 || (i.reg_operands == 3 && i.mem_operands == 1))
5881 && i.tm.opcode_modifier.vexvvvv == VEXXDS
5882 && (i.tm.opcode_modifier.veximmext
5883 || (i.imm_operands == 1
5884 && i.types[0].bitfield.vec_imm4
5885 && (i.tm.opcode_modifier.vexw == VEXW0
5886 || i.tm.opcode_modifier.vexw == VEXW1)
5887 && (operand_type_equal (&i.tm.operand_types[dest], &regxmm)
5888 || operand_type_equal (&i.tm.operand_types[dest], &regymm)
5889 || operand_type_equal (&i.tm.operand_types[dest], &regzmm)))));
5890
5891 if (i.imm_operands == 0)
5892 {
5893 /* When there is no immediate operand, generate an 8bit
5894 immediate operand to encode the first operand. */
5895 exp = &im_expressions[i.imm_operands++];
5896 i.op[i.operands].imms = exp;
5897 i.types[i.operands] = imm8;
5898 i.operands++;
5899 /* If VexW1 is set, the first operand is the source and
5900 the second operand is encoded in the immediate operand. */
5901 if (i.tm.opcode_modifier.vexw == VEXW1)
5902 {
5903 source = 0;
5904 reg_slot = 1;
5905 }
5906 else
5907 {
5908 source = 1;
5909 reg_slot = 0;
5910 }
5911
5912 /* FMA swaps REG and NDS. */
5913 if (i.tm.cpu_flags.bitfield.cpufma)
5914 {
5915 unsigned int tmp;
5916 tmp = reg_slot;
5917 reg_slot = nds;
5918 nds = tmp;
5919 }
5920
5921 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5922 &regxmm)
5923 || operand_type_equal (&i.tm.operand_types[reg_slot],
5924 &regymm)
5925 || operand_type_equal (&i.tm.operand_types[reg_slot],
5926 &regzmm));
5927 exp->X_op = O_constant;
5928 exp->X_add_number = register_number (i.op[reg_slot].regs) << 4;
5929 gas_assert ((i.op[reg_slot].regs->reg_flags & RegVRex) == 0);
5930 }
5931 else
5932 {
5933 unsigned int imm_slot;
5934
5935 if (i.tm.opcode_modifier.vexw == VEXW0)
5936 {
5937 /* If VexW0 is set, the third operand is the source and
5938 the second operand is encoded in the immediate
5939 operand. */
5940 source = 2;
5941 reg_slot = 1;
5942 }
5943 else
5944 {
5945 /* VexW1 is set, the second operand is the source and
5946 the third operand is encoded in the immediate
5947 operand. */
5948 source = 1;
5949 reg_slot = 2;
5950 }
5951
5952 if (i.tm.opcode_modifier.immext)
5953 {
5954 /* When ImmExt is set, the immdiate byte is the last
5955 operand. */
5956 imm_slot = i.operands - 1;
5957 source--;
5958 reg_slot--;
5959 }
5960 else
5961 {
5962 imm_slot = 0;
5963
5964 /* Turn on Imm8 so that output_imm will generate it. */
5965 i.types[imm_slot].bitfield.imm8 = 1;
5966 }
5967
5968 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5969 &regxmm)
5970 || operand_type_equal (&i.tm.operand_types[reg_slot],
5971 &regymm)
5972 || operand_type_equal (&i.tm.operand_types[reg_slot],
5973 &regzmm));
5974 i.op[imm_slot].imms->X_add_number
5975 |= register_number (i.op[reg_slot].regs) << 4;
5976 gas_assert ((i.op[reg_slot].regs->reg_flags & RegVRex) == 0);
5977 }
5978
5979 gas_assert (operand_type_equal (&i.tm.operand_types[nds], &regxmm)
5980 || operand_type_equal (&i.tm.operand_types[nds],
5981 &regymm)
5982 || operand_type_equal (&i.tm.operand_types[nds],
5983 &regzmm));
5984 i.vex.register_specifier = i.op[nds].regs;
5985 }
5986 else
5987 source = dest = 0;
5988
5989 /* i.reg_operands MUST be the number of real register operands;
5990 implicit registers do not count. If there are 3 register
5991 operands, it must be a instruction with VexNDS. For a
5992 instruction with VexNDD, the destination register is encoded
5993 in VEX prefix. If there are 4 register operands, it must be
5994 a instruction with VEX prefix and 3 sources. */
5995 if (i.mem_operands == 0
5996 && ((i.reg_operands == 2
5997 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
5998 || (i.reg_operands == 3
5999 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
6000 || (i.reg_operands == 4 && vex_3_sources)))
6001 {
6002 switch (i.operands)
6003 {
6004 case 2:
6005 source = 0;
6006 break;
6007 case 3:
6008 /* When there are 3 operands, one of them may be immediate,
6009 which may be the first or the last operand. Otherwise,
6010 the first operand must be shift count register (cl) or it
6011 is an instruction with VexNDS. */
6012 gas_assert (i.imm_operands == 1
6013 || (i.imm_operands == 0
6014 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
6015 || i.types[0].bitfield.shiftcount)));
6016 if (operand_type_check (i.types[0], imm)
6017 || i.types[0].bitfield.shiftcount)
6018 source = 1;
6019 else
6020 source = 0;
6021 break;
6022 case 4:
6023 /* When there are 4 operands, the first two must be 8bit
6024 immediate operands. The source operand will be the 3rd
6025 one.
6026
6027 For instructions with VexNDS, if the first operand
6028 an imm8, the source operand is the 2nd one. If the last
6029 operand is imm8, the source operand is the first one. */
6030 gas_assert ((i.imm_operands == 2
6031 && i.types[0].bitfield.imm8
6032 && i.types[1].bitfield.imm8)
6033 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
6034 && i.imm_operands == 1
6035 && (i.types[0].bitfield.imm8
6036 || i.types[i.operands - 1].bitfield.imm8
6037 || i.rounding)));
6038 if (i.imm_operands == 2)
6039 source = 2;
6040 else
6041 {
6042 if (i.types[0].bitfield.imm8)
6043 source = 1;
6044 else
6045 source = 0;
6046 }
6047 break;
6048 case 5:
6049 if (i.tm.opcode_modifier.evex)
6050 {
6051 /* For EVEX instructions, when there are 5 operands, the
6052 first one must be immediate operand. If the second one
6053 is immediate operand, the source operand is the 3th
6054 one. If the last one is immediate operand, the source
6055 operand is the 2nd one. */
6056 gas_assert (i.imm_operands == 2
6057 && i.tm.opcode_modifier.sae
6058 && operand_type_check (i.types[0], imm));
6059 if (operand_type_check (i.types[1], imm))
6060 source = 2;
6061 else if (operand_type_check (i.types[4], imm))
6062 source = 1;
6063 else
6064 abort ();
6065 }
6066 break;
6067 default:
6068 abort ();
6069 }
6070
6071 if (!vex_3_sources)
6072 {
6073 dest = source + 1;
6074
6075 /* RC/SAE operand could be between DEST and SRC. That happens
6076 when one operand is GPR and the other one is XMM/YMM/ZMM
6077 register. */
6078 if (i.rounding && i.rounding->operand == (int) dest)
6079 dest++;
6080
6081 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
6082 {
6083 /* For instructions with VexNDS, the register-only source
6084 operand must be 32/64bit integer, XMM, YMM or ZMM
6085 register. It is encoded in VEX prefix. We need to
6086 clear RegMem bit before calling operand_type_equal. */
6087
6088 i386_operand_type op;
6089 unsigned int vvvv;
6090
6091 /* Check register-only source operand when two source
6092 operands are swapped. */
6093 if (!i.tm.operand_types[source].bitfield.baseindex
6094 && i.tm.operand_types[dest].bitfield.baseindex)
6095 {
6096 vvvv = source;
6097 source = dest;
6098 }
6099 else
6100 vvvv = dest;
6101
6102 op = i.tm.operand_types[vvvv];
6103 op.bitfield.regmem = 0;
6104 if ((dest + 1) >= i.operands
6105 || (!op.bitfield.reg32
6106 && op.bitfield.reg64
6107 && !operand_type_equal (&op, &regxmm)
6108 && !operand_type_equal (&op, &regymm)
6109 && !operand_type_equal (&op, &regzmm)
6110 && !operand_type_equal (&op, &regmask)))
6111 abort ();
6112 i.vex.register_specifier = i.op[vvvv].regs;
6113 dest++;
6114 }
6115 }
6116
6117 i.rm.mode = 3;
6118 /* One of the register operands will be encoded in the i.tm.reg
6119 field, the other in the combined i.tm.mode and i.tm.regmem
6120 fields. If no form of this instruction supports a memory
6121 destination operand, then we assume the source operand may
6122 sometimes be a memory operand and so we need to store the
6123 destination in the i.rm.reg field. */
6124 if (!i.tm.operand_types[dest].bitfield.regmem
6125 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
6126 {
6127 i.rm.reg = i.op[dest].regs->reg_num;
6128 i.rm.regmem = i.op[source].regs->reg_num;
6129 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
6130 i.rex |= REX_R;
6131 if ((i.op[dest].regs->reg_flags & RegVRex) != 0)
6132 i.vrex |= REX_R;
6133 if ((i.op[source].regs->reg_flags & RegRex) != 0)
6134 i.rex |= REX_B;
6135 if ((i.op[source].regs->reg_flags & RegVRex) != 0)
6136 i.vrex |= REX_B;
6137 }
6138 else
6139 {
6140 i.rm.reg = i.op[source].regs->reg_num;
6141 i.rm.regmem = i.op[dest].regs->reg_num;
6142 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
6143 i.rex |= REX_B;
6144 if ((i.op[dest].regs->reg_flags & RegVRex) != 0)
6145 i.vrex |= REX_B;
6146 if ((i.op[source].regs->reg_flags & RegRex) != 0)
6147 i.rex |= REX_R;
6148 if ((i.op[source].regs->reg_flags & RegVRex) != 0)
6149 i.vrex |= REX_R;
6150 }
6151 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
6152 {
6153 if (!i.types[0].bitfield.control
6154 && !i.types[1].bitfield.control)
6155 abort ();
6156 i.rex &= ~(REX_R | REX_B);
6157 add_prefix (LOCK_PREFIX_OPCODE);
6158 }
6159 }
6160 else
6161 { /* If it's not 2 reg operands... */
6162 unsigned int mem;
6163
6164 if (i.mem_operands)
6165 {
6166 unsigned int fake_zero_displacement = 0;
6167 unsigned int op;
6168
6169 for (op = 0; op < i.operands; op++)
6170 if (operand_type_check (i.types[op], anymem))
6171 break;
6172 gas_assert (op < i.operands);
6173
6174 if (i.tm.opcode_modifier.vecsib)
6175 {
6176 if (i.index_reg->reg_num == RegEiz
6177 || i.index_reg->reg_num == RegRiz)
6178 abort ();
6179
6180 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
6181 if (!i.base_reg)
6182 {
6183 i.sib.base = NO_BASE_REGISTER;
6184 i.sib.scale = i.log2_scale_factor;
6185 /* No Vec_Disp8 if there is no base. */
6186 i.types[op].bitfield.vec_disp8 = 0;
6187 i.types[op].bitfield.disp8 = 0;
6188 i.types[op].bitfield.disp16 = 0;
6189 i.types[op].bitfield.disp64 = 0;
6190 if (flag_code != CODE_64BIT)
6191 {
6192 /* Must be 32 bit */
6193 i.types[op].bitfield.disp32 = 1;
6194 i.types[op].bitfield.disp32s = 0;
6195 }
6196 else
6197 {
6198 i.types[op].bitfield.disp32 = 0;
6199 i.types[op].bitfield.disp32s = 1;
6200 }
6201 }
6202 i.sib.index = i.index_reg->reg_num;
6203 if ((i.index_reg->reg_flags & RegRex) != 0)
6204 i.rex |= REX_X;
6205 if ((i.index_reg->reg_flags & RegVRex) != 0)
6206 i.vrex |= REX_X;
6207 }
6208
6209 default_seg = &ds;
6210
6211 if (i.base_reg == 0)
6212 {
6213 i.rm.mode = 0;
6214 if (!i.disp_operands)
6215 {
6216 fake_zero_displacement = 1;
6217 /* Instructions with VSIB byte need 32bit displacement
6218 if there is no base register. */
6219 if (i.tm.opcode_modifier.vecsib)
6220 i.types[op].bitfield.disp32 = 1;
6221 }
6222 if (i.index_reg == 0)
6223 {
6224 gas_assert (!i.tm.opcode_modifier.vecsib);
6225 /* Operand is just <disp> */
6226 if (flag_code == CODE_64BIT)
6227 {
6228 /* 64bit mode overwrites the 32bit absolute
6229 addressing by RIP relative addressing and
6230 absolute addressing is encoded by one of the
6231 redundant SIB forms. */
6232 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
6233 i.sib.base = NO_BASE_REGISTER;
6234 i.sib.index = NO_INDEX_REGISTER;
6235 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
6236 ? disp32s : disp32);
6237 }
6238 else if ((flag_code == CODE_16BIT)
6239 ^ (i.prefix[ADDR_PREFIX] != 0))
6240 {
6241 i.rm.regmem = NO_BASE_REGISTER_16;
6242 i.types[op] = disp16;
6243 }
6244 else
6245 {
6246 i.rm.regmem = NO_BASE_REGISTER;
6247 i.types[op] = disp32;
6248 }
6249 }
6250 else if (!i.tm.opcode_modifier.vecsib)
6251 {
6252 /* !i.base_reg && i.index_reg */
6253 if (i.index_reg->reg_num == RegEiz
6254 || i.index_reg->reg_num == RegRiz)
6255 i.sib.index = NO_INDEX_REGISTER;
6256 else
6257 i.sib.index = i.index_reg->reg_num;
6258 i.sib.base = NO_BASE_REGISTER;
6259 i.sib.scale = i.log2_scale_factor;
6260 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
6261 /* No Vec_Disp8 if there is no base. */
6262 i.types[op].bitfield.vec_disp8 = 0;
6263 i.types[op].bitfield.disp8 = 0;
6264 i.types[op].bitfield.disp16 = 0;
6265 i.types[op].bitfield.disp64 = 0;
6266 if (flag_code != CODE_64BIT)
6267 {
6268 /* Must be 32 bit */
6269 i.types[op].bitfield.disp32 = 1;
6270 i.types[op].bitfield.disp32s = 0;
6271 }
6272 else
6273 {
6274 i.types[op].bitfield.disp32 = 0;
6275 i.types[op].bitfield.disp32s = 1;
6276 }
6277 if ((i.index_reg->reg_flags & RegRex) != 0)
6278 i.rex |= REX_X;
6279 }
6280 }
6281 /* RIP addressing for 64bit mode. */
6282 else if (i.base_reg->reg_num == RegRip ||
6283 i.base_reg->reg_num == RegEip)
6284 {
6285 gas_assert (!i.tm.opcode_modifier.vecsib);
6286 i.rm.regmem = NO_BASE_REGISTER;
6287 i.types[op].bitfield.disp8 = 0;
6288 i.types[op].bitfield.disp16 = 0;
6289 i.types[op].bitfield.disp32 = 0;
6290 i.types[op].bitfield.disp32s = 1;
6291 i.types[op].bitfield.disp64 = 0;
6292 i.types[op].bitfield.vec_disp8 = 0;
6293 i.flags[op] |= Operand_PCrel;
6294 if (! i.disp_operands)
6295 fake_zero_displacement = 1;
6296 }
6297 else if (i.base_reg->reg_type.bitfield.reg16)
6298 {
6299 gas_assert (!i.tm.opcode_modifier.vecsib);
6300 switch (i.base_reg->reg_num)
6301 {
6302 case 3: /* (%bx) */
6303 if (i.index_reg == 0)
6304 i.rm.regmem = 7;
6305 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
6306 i.rm.regmem = i.index_reg->reg_num - 6;
6307 break;
6308 case 5: /* (%bp) */
6309 default_seg = &ss;
6310 if (i.index_reg == 0)
6311 {
6312 i.rm.regmem = 6;
6313 if (operand_type_check (i.types[op], disp) == 0)
6314 {
6315 /* fake (%bp) into 0(%bp) */
6316 if (i.tm.operand_types[op].bitfield.vec_disp8)
6317 i.types[op].bitfield.vec_disp8 = 1;
6318 else
6319 i.types[op].bitfield.disp8 = 1;
6320 fake_zero_displacement = 1;
6321 }
6322 }
6323 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
6324 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
6325 break;
6326 default: /* (%si) -> 4 or (%di) -> 5 */
6327 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
6328 }
6329 i.rm.mode = mode_from_disp_size (i.types[op]);
6330 }
6331 else /* i.base_reg and 32/64 bit mode */
6332 {
6333 if (flag_code == CODE_64BIT
6334 && operand_type_check (i.types[op], disp))
6335 {
6336 i386_operand_type temp;
6337 operand_type_set (&temp, 0);
6338 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
6339 temp.bitfield.vec_disp8
6340 = i.types[op].bitfield.vec_disp8;
6341 i.types[op] = temp;
6342 if (i.prefix[ADDR_PREFIX] == 0)
6343 i.types[op].bitfield.disp32s = 1;
6344 else
6345 i.types[op].bitfield.disp32 = 1;
6346 }
6347
6348 if (!i.tm.opcode_modifier.vecsib)
6349 i.rm.regmem = i.base_reg->reg_num;
6350 if ((i.base_reg->reg_flags & RegRex) != 0)
6351 i.rex |= REX_B;
6352 i.sib.base = i.base_reg->reg_num;
6353 /* x86-64 ignores REX prefix bit here to avoid decoder
6354 complications. */
6355 if (!(i.base_reg->reg_flags & RegRex)
6356 && (i.base_reg->reg_num == EBP_REG_NUM
6357 || i.base_reg->reg_num == ESP_REG_NUM))
6358 default_seg = &ss;
6359 if (i.base_reg->reg_num == 5 && i.disp_operands == 0)
6360 {
6361 fake_zero_displacement = 1;
6362 if (i.tm.operand_types [op].bitfield.vec_disp8)
6363 i.types[op].bitfield.vec_disp8 = 1;
6364 else
6365 i.types[op].bitfield.disp8 = 1;
6366 }
6367 i.sib.scale = i.log2_scale_factor;
6368 if (i.index_reg == 0)
6369 {
6370 gas_assert (!i.tm.opcode_modifier.vecsib);
6371 /* <disp>(%esp) becomes two byte modrm with no index
6372 register. We've already stored the code for esp
6373 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
6374 Any base register besides %esp will not use the
6375 extra modrm byte. */
6376 i.sib.index = NO_INDEX_REGISTER;
6377 }
6378 else if (!i.tm.opcode_modifier.vecsib)
6379 {
6380 if (i.index_reg->reg_num == RegEiz
6381 || i.index_reg->reg_num == RegRiz)
6382 i.sib.index = NO_INDEX_REGISTER;
6383 else
6384 i.sib.index = i.index_reg->reg_num;
6385 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
6386 if ((i.index_reg->reg_flags & RegRex) != 0)
6387 i.rex |= REX_X;
6388 }
6389
6390 if (i.disp_operands
6391 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
6392 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
6393 i.rm.mode = 0;
6394 else
6395 {
6396 if (!fake_zero_displacement
6397 && !i.disp_operands
6398 && i.disp_encoding)
6399 {
6400 fake_zero_displacement = 1;
6401 if (i.disp_encoding == disp_encoding_8bit)
6402 i.types[op].bitfield.disp8 = 1;
6403 else
6404 i.types[op].bitfield.disp32 = 1;
6405 }
6406 i.rm.mode = mode_from_disp_size (i.types[op]);
6407 }
6408 }
6409
6410 if (fake_zero_displacement)
6411 {
6412 /* Fakes a zero displacement assuming that i.types[op]
6413 holds the correct displacement size. */
6414 expressionS *exp;
6415
6416 gas_assert (i.op[op].disps == 0);
6417 exp = &disp_expressions[i.disp_operands++];
6418 i.op[op].disps = exp;
6419 exp->X_op = O_constant;
6420 exp->X_add_number = 0;
6421 exp->X_add_symbol = (symbolS *) 0;
6422 exp->X_op_symbol = (symbolS *) 0;
6423 }
6424
6425 mem = op;
6426 }
6427 else
6428 mem = ~0;
6429
6430 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
6431 {
6432 if (operand_type_check (i.types[0], imm))
6433 i.vex.register_specifier = NULL;
6434 else
6435 {
6436 /* VEX.vvvv encodes one of the sources when the first
6437 operand is not an immediate. */
6438 if (i.tm.opcode_modifier.vexw == VEXW0)
6439 i.vex.register_specifier = i.op[0].regs;
6440 else
6441 i.vex.register_specifier = i.op[1].regs;
6442 }
6443
6444 /* Destination is a XMM register encoded in the ModRM.reg
6445 and VEX.R bit. */
6446 i.rm.reg = i.op[2].regs->reg_num;
6447 if ((i.op[2].regs->reg_flags & RegRex) != 0)
6448 i.rex |= REX_R;
6449
6450 /* ModRM.rm and VEX.B encodes the other source. */
6451 if (!i.mem_operands)
6452 {
6453 i.rm.mode = 3;
6454
6455 if (i.tm.opcode_modifier.vexw == VEXW0)
6456 i.rm.regmem = i.op[1].regs->reg_num;
6457 else
6458 i.rm.regmem = i.op[0].regs->reg_num;
6459
6460 if ((i.op[1].regs->reg_flags & RegRex) != 0)
6461 i.rex |= REX_B;
6462 }
6463 }
6464 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
6465 {
6466 i.vex.register_specifier = i.op[2].regs;
6467 if (!i.mem_operands)
6468 {
6469 i.rm.mode = 3;
6470 i.rm.regmem = i.op[1].regs->reg_num;
6471 if ((i.op[1].regs->reg_flags & RegRex) != 0)
6472 i.rex |= REX_B;
6473 }
6474 }
6475 /* Fill in i.rm.reg or i.rm.regmem field with register operand
6476 (if any) based on i.tm.extension_opcode. Again, we must be
6477 careful to make sure that segment/control/debug/test/MMX
6478 registers are coded into the i.rm.reg field. */
6479 else if (i.reg_operands)
6480 {
6481 unsigned int op;
6482 unsigned int vex_reg = ~0;
6483
6484 for (op = 0; op < i.operands; op++)
6485 if (i.types[op].bitfield.reg8
6486 || i.types[op].bitfield.reg16
6487 || i.types[op].bitfield.reg32
6488 || i.types[op].bitfield.reg64
6489 || i.types[op].bitfield.regmmx
6490 || i.types[op].bitfield.regxmm
6491 || i.types[op].bitfield.regymm
6492 || i.types[op].bitfield.regbnd
6493 || i.types[op].bitfield.regzmm
6494 || i.types[op].bitfield.regmask
6495 || i.types[op].bitfield.sreg2
6496 || i.types[op].bitfield.sreg3
6497 || i.types[op].bitfield.control
6498 || i.types[op].bitfield.debug
6499 || i.types[op].bitfield.test)
6500 break;
6501
6502 if (vex_3_sources)
6503 op = dest;
6504 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
6505 {
6506 /* For instructions with VexNDS, the register-only
6507 source operand is encoded in VEX prefix. */
6508 gas_assert (mem != (unsigned int) ~0);
6509
6510 if (op > mem)
6511 {
6512 vex_reg = op++;
6513 gas_assert (op < i.operands);
6514 }
6515 else
6516 {
6517 /* Check register-only source operand when two source
6518 operands are swapped. */
6519 if (!i.tm.operand_types[op].bitfield.baseindex
6520 && i.tm.operand_types[op + 1].bitfield.baseindex)
6521 {
6522 vex_reg = op;
6523 op += 2;
6524 gas_assert (mem == (vex_reg + 1)
6525 && op < i.operands);
6526 }
6527 else
6528 {
6529 vex_reg = op + 1;
6530 gas_assert (vex_reg < i.operands);
6531 }
6532 }
6533 }
6534 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
6535 {
6536 /* For instructions with VexNDD, the register destination
6537 is encoded in VEX prefix. */
6538 if (i.mem_operands == 0)
6539 {
6540 /* There is no memory operand. */
6541 gas_assert ((op + 2) == i.operands);
6542 vex_reg = op + 1;
6543 }
6544 else
6545 {
6546 /* There are only 2 operands. */
6547 gas_assert (op < 2 && i.operands == 2);
6548 vex_reg = 1;
6549 }
6550 }
6551 else
6552 gas_assert (op < i.operands);
6553
6554 if (vex_reg != (unsigned int) ~0)
6555 {
6556 i386_operand_type *type = &i.tm.operand_types[vex_reg];
6557
6558 if (type->bitfield.reg32 != 1
6559 && type->bitfield.reg64 != 1
6560 && !operand_type_equal (type, &regxmm)
6561 && !operand_type_equal (type, &regymm)
6562 && !operand_type_equal (type, &regzmm)
6563 && !operand_type_equal (type, &regmask))
6564 abort ();
6565
6566 i.vex.register_specifier = i.op[vex_reg].regs;
6567 }
6568
6569 /* Don't set OP operand twice. */
6570 if (vex_reg != op)
6571 {
6572 /* If there is an extension opcode to put here, the
6573 register number must be put into the regmem field. */
6574 if (i.tm.extension_opcode != None)
6575 {
6576 i.rm.regmem = i.op[op].regs->reg_num;
6577 if ((i.op[op].regs->reg_flags & RegRex) != 0)
6578 i.rex |= REX_B;
6579 if ((i.op[op].regs->reg_flags & RegVRex) != 0)
6580 i.vrex |= REX_B;
6581 }
6582 else
6583 {
6584 i.rm.reg = i.op[op].regs->reg_num;
6585 if ((i.op[op].regs->reg_flags & RegRex) != 0)
6586 i.rex |= REX_R;
6587 if ((i.op[op].regs->reg_flags & RegVRex) != 0)
6588 i.vrex |= REX_R;
6589 }
6590 }
6591
6592 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
6593 must set it to 3 to indicate this is a register operand
6594 in the regmem field. */
6595 if (!i.mem_operands)
6596 i.rm.mode = 3;
6597 }
6598
6599 /* Fill in i.rm.reg field with extension opcode (if any). */
6600 if (i.tm.extension_opcode != None)
6601 i.rm.reg = i.tm.extension_opcode;
6602 }
6603 return default_seg;
6604 }
6605
6606 static void
6607 output_branch (void)
6608 {
6609 char *p;
6610 int size;
6611 int code16;
6612 int prefix;
6613 relax_substateT subtype;
6614 symbolS *sym;
6615 offsetT off;
6616
6617 code16 = flag_code == CODE_16BIT ? CODE16 : 0;
6618 size = i.disp_encoding == disp_encoding_32bit ? BIG : SMALL;
6619
6620 prefix = 0;
6621 if (i.prefix[DATA_PREFIX] != 0)
6622 {
6623 prefix = 1;
6624 i.prefixes -= 1;
6625 code16 ^= CODE16;
6626 }
6627 /* Pentium4 branch hints. */
6628 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6629 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6630 {
6631 prefix++;
6632 i.prefixes--;
6633 }
6634 if (i.prefix[REX_PREFIX] != 0)
6635 {
6636 prefix++;
6637 i.prefixes--;
6638 }
6639
6640 /* BND prefixed jump. */
6641 if (i.prefix[BND_PREFIX] != 0)
6642 {
6643 FRAG_APPEND_1_CHAR (i.prefix[BND_PREFIX]);
6644 i.prefixes -= 1;
6645 }
6646
6647 if (i.prefixes != 0 && !intel_syntax)
6648 as_warn (_("skipping prefixes on this instruction"));
6649
6650 /* It's always a symbol; End frag & setup for relax.
6651 Make sure there is enough room in this frag for the largest
6652 instruction we may generate in md_convert_frag. This is 2
6653 bytes for the opcode and room for the prefix and largest
6654 displacement. */
6655 frag_grow (prefix + 2 + 4);
6656 /* Prefix and 1 opcode byte go in fr_fix. */
6657 p = frag_more (prefix + 1);
6658 if (i.prefix[DATA_PREFIX] != 0)
6659 *p++ = DATA_PREFIX_OPCODE;
6660 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
6661 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
6662 *p++ = i.prefix[SEG_PREFIX];
6663 if (i.prefix[REX_PREFIX] != 0)
6664 *p++ = i.prefix[REX_PREFIX];
6665 *p = i.tm.base_opcode;
6666
6667 if ((unsigned char) *p == JUMP_PC_RELATIVE)
6668 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
6669 else if (cpu_arch_flags.bitfield.cpui386)
6670 subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
6671 else
6672 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
6673 subtype |= code16;
6674
6675 sym = i.op[0].disps->X_add_symbol;
6676 off = i.op[0].disps->X_add_number;
6677
6678 if (i.op[0].disps->X_op != O_constant
6679 && i.op[0].disps->X_op != O_symbol)
6680 {
6681 /* Handle complex expressions. */
6682 sym = make_expr_symbol (i.op[0].disps);
6683 off = 0;
6684 }
6685
6686 /* 1 possible extra opcode + 4 byte displacement go in var part.
6687 Pass reloc in fr_var. */
6688 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
6689 }
6690
6691 static void
6692 output_jump (void)
6693 {
6694 char *p;
6695 int size;
6696 fixS *fixP;
6697
6698 if (i.tm.opcode_modifier.jumpbyte)
6699 {
6700 /* This is a loop or jecxz type instruction. */
6701 size = 1;
6702 if (i.prefix[ADDR_PREFIX] != 0)
6703 {
6704 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
6705 i.prefixes -= 1;
6706 }
6707 /* Pentium4 branch hints. */
6708 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6709 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6710 {
6711 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
6712 i.prefixes--;
6713 }
6714 }
6715 else
6716 {
6717 int code16;
6718
6719 code16 = 0;
6720 if (flag_code == CODE_16BIT)
6721 code16 = CODE16;
6722
6723 if (i.prefix[DATA_PREFIX] != 0)
6724 {
6725 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
6726 i.prefixes -= 1;
6727 code16 ^= CODE16;
6728 }
6729
6730 size = 4;
6731 if (code16)
6732 size = 2;
6733 }
6734
6735 if (i.prefix[REX_PREFIX] != 0)
6736 {
6737 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
6738 i.prefixes -= 1;
6739 }
6740
6741 /* BND prefixed jump. */
6742 if (i.prefix[BND_PREFIX] != 0)
6743 {
6744 FRAG_APPEND_1_CHAR (i.prefix[BND_PREFIX]);
6745 i.prefixes -= 1;
6746 }
6747
6748 if (i.prefixes != 0 && !intel_syntax)
6749 as_warn (_("skipping prefixes on this instruction"));
6750
6751 p = frag_more (i.tm.opcode_length + size);
6752 switch (i.tm.opcode_length)
6753 {
6754 case 2:
6755 *p++ = i.tm.base_opcode >> 8;
6756 case 1:
6757 *p++ = i.tm.base_opcode;
6758 break;
6759 default:
6760 abort ();
6761 }
6762
6763 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6764 i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
6765
6766 /* All jumps handled here are signed, but don't use a signed limit
6767 check for 32 and 16 bit jumps as we want to allow wrap around at
6768 4G and 64k respectively. */
6769 if (size == 1)
6770 fixP->fx_signed = 1;
6771 }
6772
6773 static void
6774 output_interseg_jump (void)
6775 {
6776 char *p;
6777 int size;
6778 int prefix;
6779 int code16;
6780
6781 code16 = 0;
6782 if (flag_code == CODE_16BIT)
6783 code16 = CODE16;
6784
6785 prefix = 0;
6786 if (i.prefix[DATA_PREFIX] != 0)
6787 {
6788 prefix = 1;
6789 i.prefixes -= 1;
6790 code16 ^= CODE16;
6791 }
6792 if (i.prefix[REX_PREFIX] != 0)
6793 {
6794 prefix++;
6795 i.prefixes -= 1;
6796 }
6797
6798 size = 4;
6799 if (code16)
6800 size = 2;
6801
6802 if (i.prefixes != 0 && !intel_syntax)
6803 as_warn (_("skipping prefixes on this instruction"));
6804
6805 /* 1 opcode; 2 segment; offset */
6806 p = frag_more (prefix + 1 + 2 + size);
6807
6808 if (i.prefix[DATA_PREFIX] != 0)
6809 *p++ = DATA_PREFIX_OPCODE;
6810
6811 if (i.prefix[REX_PREFIX] != 0)
6812 *p++ = i.prefix[REX_PREFIX];
6813
6814 *p++ = i.tm.base_opcode;
6815 if (i.op[1].imms->X_op == O_constant)
6816 {
6817 offsetT n = i.op[1].imms->X_add_number;
6818
6819 if (size == 2
6820 && !fits_in_unsigned_word (n)
6821 && !fits_in_signed_word (n))
6822 {
6823 as_bad (_("16-bit jump out of range"));
6824 return;
6825 }
6826 md_number_to_chars (p, n, size);
6827 }
6828 else
6829 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6830 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
6831 if (i.op[0].imms->X_op != O_constant)
6832 as_bad (_("can't handle non absolute segment in `%s'"),
6833 i.tm.name);
6834 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
6835 }
6836
6837 static void
6838 output_insn (void)
6839 {
6840 fragS *insn_start_frag;
6841 offsetT insn_start_off;
6842
6843 /* Tie dwarf2 debug info to the address at the start of the insn.
6844 We can't do this after the insn has been output as the current
6845 frag may have been closed off. eg. by frag_var. */
6846 dwarf2_emit_insn (0);
6847
6848 insn_start_frag = frag_now;
6849 insn_start_off = frag_now_fix ();
6850
6851 /* Output jumps. */
6852 if (i.tm.opcode_modifier.jump)
6853 output_branch ();
6854 else if (i.tm.opcode_modifier.jumpbyte
6855 || i.tm.opcode_modifier.jumpdword)
6856 output_jump ();
6857 else if (i.tm.opcode_modifier.jumpintersegment)
6858 output_interseg_jump ();
6859 else
6860 {
6861 /* Output normal instructions here. */
6862 char *p;
6863 unsigned char *q;
6864 unsigned int j;
6865 unsigned int prefix;
6866
6867 /* Some processors fail on LOCK prefix. This options makes
6868 assembler ignore LOCK prefix and serves as a workaround. */
6869 if (omit_lock_prefix)
6870 {
6871 if (i.tm.base_opcode == LOCK_PREFIX_OPCODE)
6872 return;
6873 i.prefix[LOCK_PREFIX] = 0;
6874 }
6875
6876 /* Since the VEX/EVEX prefix contains the implicit prefix, we
6877 don't need the explicit prefix. */
6878 if (!i.tm.opcode_modifier.vex && !i.tm.opcode_modifier.evex)
6879 {
6880 switch (i.tm.opcode_length)
6881 {
6882 case 3:
6883 if (i.tm.base_opcode & 0xff000000)
6884 {
6885 prefix = (i.tm.base_opcode >> 24) & 0xff;
6886 goto check_prefix;
6887 }
6888 break;
6889 case 2:
6890 if ((i.tm.base_opcode & 0xff0000) != 0)
6891 {
6892 prefix = (i.tm.base_opcode >> 16) & 0xff;
6893 if (i.tm.cpu_flags.bitfield.cpupadlock)
6894 {
6895 check_prefix:
6896 if (prefix != REPE_PREFIX_OPCODE
6897 || (i.prefix[REP_PREFIX]
6898 != REPE_PREFIX_OPCODE))
6899 add_prefix (prefix);
6900 }
6901 else
6902 add_prefix (prefix);
6903 }
6904 break;
6905 case 1:
6906 break;
6907 default:
6908 abort ();
6909 }
6910
6911 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
6912 /* For x32, add a dummy REX_OPCODE prefix for mov/add with
6913 R_X86_64_GOTTPOFF relocation so that linker can safely
6914 perform IE->LE optimization. */
6915 if (x86_elf_abi == X86_64_X32_ABI
6916 && i.operands == 2
6917 && i.reloc[0] == BFD_RELOC_X86_64_GOTTPOFF
6918 && i.prefix[REX_PREFIX] == 0)
6919 add_prefix (REX_OPCODE);
6920 #endif
6921
6922 /* The prefix bytes. */
6923 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
6924 if (*q)
6925 FRAG_APPEND_1_CHAR (*q);
6926 }
6927 else
6928 {
6929 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
6930 if (*q)
6931 switch (j)
6932 {
6933 case REX_PREFIX:
6934 /* REX byte is encoded in VEX prefix. */
6935 break;
6936 case SEG_PREFIX:
6937 case ADDR_PREFIX:
6938 FRAG_APPEND_1_CHAR (*q);
6939 break;
6940 default:
6941 /* There should be no other prefixes for instructions
6942 with VEX prefix. */
6943 abort ();
6944 }
6945
6946 /* For EVEX instructions i.vrex should become 0 after
6947 build_evex_prefix. For VEX instructions upper 16 registers
6948 aren't available, so VREX should be 0. */
6949 if (i.vrex)
6950 abort ();
6951 /* Now the VEX prefix. */
6952 p = frag_more (i.vex.length);
6953 for (j = 0; j < i.vex.length; j++)
6954 p[j] = i.vex.bytes[j];
6955 }
6956
6957 /* Now the opcode; be careful about word order here! */
6958 if (i.tm.opcode_length == 1)
6959 {
6960 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
6961 }
6962 else
6963 {
6964 switch (i.tm.opcode_length)
6965 {
6966 case 4:
6967 p = frag_more (4);
6968 *p++ = (i.tm.base_opcode >> 24) & 0xff;
6969 *p++ = (i.tm.base_opcode >> 16) & 0xff;
6970 break;
6971 case 3:
6972 p = frag_more (3);
6973 *p++ = (i.tm.base_opcode >> 16) & 0xff;
6974 break;
6975 case 2:
6976 p = frag_more (2);
6977 break;
6978 default:
6979 abort ();
6980 break;
6981 }
6982
6983 /* Put out high byte first: can't use md_number_to_chars! */
6984 *p++ = (i.tm.base_opcode >> 8) & 0xff;
6985 *p = i.tm.base_opcode & 0xff;
6986 }
6987
6988 /* Now the modrm byte and sib byte (if present). */
6989 if (i.tm.opcode_modifier.modrm)
6990 {
6991 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
6992 | i.rm.reg << 3
6993 | i.rm.mode << 6));
6994 /* If i.rm.regmem == ESP (4)
6995 && i.rm.mode != (Register mode)
6996 && not 16 bit
6997 ==> need second modrm byte. */
6998 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
6999 && i.rm.mode != 3
7000 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
7001 FRAG_APPEND_1_CHAR ((i.sib.base << 0
7002 | i.sib.index << 3
7003 | i.sib.scale << 6));
7004 }
7005
7006 if (i.disp_operands)
7007 output_disp (insn_start_frag, insn_start_off);
7008
7009 if (i.imm_operands)
7010 output_imm (insn_start_frag, insn_start_off);
7011 }
7012
7013 #ifdef DEBUG386
7014 if (flag_debug)
7015 {
7016 pi ("" /*line*/, &i);
7017 }
7018 #endif /* DEBUG386 */
7019 }
7020
7021 /* Return the size of the displacement operand N. */
7022
7023 static int
7024 disp_size (unsigned int n)
7025 {
7026 int size = 4;
7027
7028 /* Vec_Disp8 has to be 8bit. */
7029 if (i.types[n].bitfield.vec_disp8)
7030 size = 1;
7031 else if (i.types[n].bitfield.disp64)
7032 size = 8;
7033 else if (i.types[n].bitfield.disp8)
7034 size = 1;
7035 else if (i.types[n].bitfield.disp16)
7036 size = 2;
7037 return size;
7038 }
7039
7040 /* Return the size of the immediate operand N. */
7041
7042 static int
7043 imm_size (unsigned int n)
7044 {
7045 int size = 4;
7046 if (i.types[n].bitfield.imm64)
7047 size = 8;
7048 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
7049 size = 1;
7050 else if (i.types[n].bitfield.imm16)
7051 size = 2;
7052 return size;
7053 }
7054
7055 static void
7056 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
7057 {
7058 char *p;
7059 unsigned int n;
7060
7061 for (n = 0; n < i.operands; n++)
7062 {
7063 if (i.types[n].bitfield.vec_disp8
7064 || operand_type_check (i.types[n], disp))
7065 {
7066 if (i.op[n].disps->X_op == O_constant)
7067 {
7068 int size = disp_size (n);
7069 offsetT val = i.op[n].disps->X_add_number;
7070
7071 if (i.types[n].bitfield.vec_disp8)
7072 val >>= i.memshift;
7073 val = offset_in_range (val, size);
7074 p = frag_more (size);
7075 md_number_to_chars (p, val, size);
7076 }
7077 else
7078 {
7079 enum bfd_reloc_code_real reloc_type;
7080 int size = disp_size (n);
7081 int sign = i.types[n].bitfield.disp32s;
7082 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
7083
7084 /* We can't have 8 bit displacement here. */
7085 gas_assert (!i.types[n].bitfield.disp8);
7086
7087 /* The PC relative address is computed relative
7088 to the instruction boundary, so in case immediate
7089 fields follows, we need to adjust the value. */
7090 if (pcrel && i.imm_operands)
7091 {
7092 unsigned int n1;
7093 int sz = 0;
7094
7095 for (n1 = 0; n1 < i.operands; n1++)
7096 if (operand_type_check (i.types[n1], imm))
7097 {
7098 /* Only one immediate is allowed for PC
7099 relative address. */
7100 gas_assert (sz == 0);
7101 sz = imm_size (n1);
7102 i.op[n].disps->X_add_number -= sz;
7103 }
7104 /* We should find the immediate. */
7105 gas_assert (sz != 0);
7106 }
7107
7108 p = frag_more (size);
7109 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
7110 if (GOT_symbol
7111 && GOT_symbol == i.op[n].disps->X_add_symbol
7112 && (((reloc_type == BFD_RELOC_32
7113 || reloc_type == BFD_RELOC_X86_64_32S
7114 || (reloc_type == BFD_RELOC_64
7115 && object_64bit))
7116 && (i.op[n].disps->X_op == O_symbol
7117 || (i.op[n].disps->X_op == O_add
7118 && ((symbol_get_value_expression
7119 (i.op[n].disps->X_op_symbol)->X_op)
7120 == O_subtract))))
7121 || reloc_type == BFD_RELOC_32_PCREL))
7122 {
7123 offsetT add;
7124
7125 if (insn_start_frag == frag_now)
7126 add = (p - frag_now->fr_literal) - insn_start_off;
7127 else
7128 {
7129 fragS *fr;
7130
7131 add = insn_start_frag->fr_fix - insn_start_off;
7132 for (fr = insn_start_frag->fr_next;
7133 fr && fr != frag_now; fr = fr->fr_next)
7134 add += fr->fr_fix;
7135 add += p - frag_now->fr_literal;
7136 }
7137
7138 if (!object_64bit)
7139 {
7140 reloc_type = BFD_RELOC_386_GOTPC;
7141 i.op[n].imms->X_add_number += add;
7142 }
7143 else if (reloc_type == BFD_RELOC_64)
7144 reloc_type = BFD_RELOC_X86_64_GOTPC64;
7145 else
7146 /* Don't do the adjustment for x86-64, as there
7147 the pcrel addressing is relative to the _next_
7148 insn, and that is taken care of in other code. */
7149 reloc_type = BFD_RELOC_X86_64_GOTPC32;
7150 }
7151 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
7152 i.op[n].disps, pcrel, reloc_type);
7153 }
7154 }
7155 }
7156 }
7157
7158 static void
7159 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
7160 {
7161 char *p;
7162 unsigned int n;
7163
7164 for (n = 0; n < i.operands; n++)
7165 {
7166 /* Skip SAE/RC Imm operand in EVEX. They are already handled. */
7167 if (i.rounding && (int) n == i.rounding->operand)
7168 continue;
7169
7170 if (operand_type_check (i.types[n], imm))
7171 {
7172 if (i.op[n].imms->X_op == O_constant)
7173 {
7174 int size = imm_size (n);
7175 offsetT val;
7176
7177 val = offset_in_range (i.op[n].imms->X_add_number,
7178 size);
7179 p = frag_more (size);
7180 md_number_to_chars (p, val, size);
7181 }
7182 else
7183 {
7184 /* Not absolute_section.
7185 Need a 32-bit fixup (don't support 8bit
7186 non-absolute imms). Try to support other
7187 sizes ... */
7188 enum bfd_reloc_code_real reloc_type;
7189 int size = imm_size (n);
7190 int sign;
7191
7192 if (i.types[n].bitfield.imm32s
7193 && (i.suffix == QWORD_MNEM_SUFFIX
7194 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
7195 sign = 1;
7196 else
7197 sign = 0;
7198
7199 p = frag_more (size);
7200 reloc_type = reloc (size, 0, sign, i.reloc[n]);
7201
7202 /* This is tough to explain. We end up with this one if we
7203 * have operands that look like
7204 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
7205 * obtain the absolute address of the GOT, and it is strongly
7206 * preferable from a performance point of view to avoid using
7207 * a runtime relocation for this. The actual sequence of
7208 * instructions often look something like:
7209 *
7210 * call .L66
7211 * .L66:
7212 * popl %ebx
7213 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
7214 *
7215 * The call and pop essentially return the absolute address
7216 * of the label .L66 and store it in %ebx. The linker itself
7217 * will ultimately change the first operand of the addl so
7218 * that %ebx points to the GOT, but to keep things simple, the
7219 * .o file must have this operand set so that it generates not
7220 * the absolute address of .L66, but the absolute address of
7221 * itself. This allows the linker itself simply treat a GOTPC
7222 * relocation as asking for a pcrel offset to the GOT to be
7223 * added in, and the addend of the relocation is stored in the
7224 * operand field for the instruction itself.
7225 *
7226 * Our job here is to fix the operand so that it would add
7227 * the correct offset so that %ebx would point to itself. The
7228 * thing that is tricky is that .-.L66 will point to the
7229 * beginning of the instruction, so we need to further modify
7230 * the operand so that it will point to itself. There are
7231 * other cases where you have something like:
7232 *
7233 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
7234 *
7235 * and here no correction would be required. Internally in
7236 * the assembler we treat operands of this form as not being
7237 * pcrel since the '.' is explicitly mentioned, and I wonder
7238 * whether it would simplify matters to do it this way. Who
7239 * knows. In earlier versions of the PIC patches, the
7240 * pcrel_adjust field was used to store the correction, but
7241 * since the expression is not pcrel, I felt it would be
7242 * confusing to do it this way. */
7243
7244 if ((reloc_type == BFD_RELOC_32
7245 || reloc_type == BFD_RELOC_X86_64_32S
7246 || reloc_type == BFD_RELOC_64)
7247 && GOT_symbol
7248 && GOT_symbol == i.op[n].imms->X_add_symbol
7249 && (i.op[n].imms->X_op == O_symbol
7250 || (i.op[n].imms->X_op == O_add
7251 && ((symbol_get_value_expression
7252 (i.op[n].imms->X_op_symbol)->X_op)
7253 == O_subtract))))
7254 {
7255 offsetT add;
7256
7257 if (insn_start_frag == frag_now)
7258 add = (p - frag_now->fr_literal) - insn_start_off;
7259 else
7260 {
7261 fragS *fr;
7262
7263 add = insn_start_frag->fr_fix - insn_start_off;
7264 for (fr = insn_start_frag->fr_next;
7265 fr && fr != frag_now; fr = fr->fr_next)
7266 add += fr->fr_fix;
7267 add += p - frag_now->fr_literal;
7268 }
7269
7270 if (!object_64bit)
7271 reloc_type = BFD_RELOC_386_GOTPC;
7272 else if (size == 4)
7273 reloc_type = BFD_RELOC_X86_64_GOTPC32;
7274 else if (size == 8)
7275 reloc_type = BFD_RELOC_X86_64_GOTPC64;
7276 i.op[n].imms->X_add_number += add;
7277 }
7278 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
7279 i.op[n].imms, 0, reloc_type);
7280 }
7281 }
7282 }
7283 }
7284 \f
7285 /* x86_cons_fix_new is called via the expression parsing code when a
7286 reloc is needed. We use this hook to get the correct .got reloc. */
7287 static int cons_sign = -1;
7288
7289 void
7290 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
7291 expressionS *exp, bfd_reloc_code_real_type r)
7292 {
7293 r = reloc (len, 0, cons_sign, r);
7294
7295 #ifdef TE_PE
7296 if (exp->X_op == O_secrel)
7297 {
7298 exp->X_op = O_symbol;
7299 r = BFD_RELOC_32_SECREL;
7300 }
7301 #endif
7302
7303 fix_new_exp (frag, off, len, exp, 0, r);
7304 }
7305
7306 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
7307 purpose of the `.dc.a' internal pseudo-op. */
7308
7309 int
7310 x86_address_bytes (void)
7311 {
7312 if ((stdoutput->arch_info->mach & bfd_mach_x64_32))
7313 return 4;
7314 return stdoutput->arch_info->bits_per_address / 8;
7315 }
7316
7317 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
7318 || defined (LEX_AT)
7319 # define lex_got(reloc, adjust, types) NULL
7320 #else
7321 /* Parse operands of the form
7322 <symbol>@GOTOFF+<nnn>
7323 and similar .plt or .got references.
7324
7325 If we find one, set up the correct relocation in RELOC and copy the
7326 input string, minus the `@GOTOFF' into a malloc'd buffer for
7327 parsing by the calling routine. Return this buffer, and if ADJUST
7328 is non-null set it to the length of the string we removed from the
7329 input line. Otherwise return NULL. */
7330 static char *
7331 lex_got (enum bfd_reloc_code_real *rel,
7332 int *adjust,
7333 i386_operand_type *types)
7334 {
7335 /* Some of the relocations depend on the size of what field is to
7336 be relocated. But in our callers i386_immediate and i386_displacement
7337 we don't yet know the operand size (this will be set by insn
7338 matching). Hence we record the word32 relocation here,
7339 and adjust the reloc according to the real size in reloc(). */
7340 static const struct {
7341 const char *str;
7342 int len;
7343 const enum bfd_reloc_code_real rel[2];
7344 const i386_operand_type types64;
7345 } gotrel[] = {
7346 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7347 { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32,
7348 BFD_RELOC_SIZE32 },
7349 OPERAND_TYPE_IMM32_64 },
7350 #endif
7351 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
7352 BFD_RELOC_X86_64_PLTOFF64 },
7353 OPERAND_TYPE_IMM64 },
7354 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
7355 BFD_RELOC_X86_64_PLT32 },
7356 OPERAND_TYPE_IMM32_32S_DISP32 },
7357 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
7358 BFD_RELOC_X86_64_GOTPLT64 },
7359 OPERAND_TYPE_IMM64_DISP64 },
7360 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
7361 BFD_RELOC_X86_64_GOTOFF64 },
7362 OPERAND_TYPE_IMM64_DISP64 },
7363 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
7364 BFD_RELOC_X86_64_GOTPCREL },
7365 OPERAND_TYPE_IMM32_32S_DISP32 },
7366 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
7367 BFD_RELOC_X86_64_TLSGD },
7368 OPERAND_TYPE_IMM32_32S_DISP32 },
7369 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
7370 _dummy_first_bfd_reloc_code_real },
7371 OPERAND_TYPE_NONE },
7372 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
7373 BFD_RELOC_X86_64_TLSLD },
7374 OPERAND_TYPE_IMM32_32S_DISP32 },
7375 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
7376 BFD_RELOC_X86_64_GOTTPOFF },
7377 OPERAND_TYPE_IMM32_32S_DISP32 },
7378 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
7379 BFD_RELOC_X86_64_TPOFF32 },
7380 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
7381 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
7382 _dummy_first_bfd_reloc_code_real },
7383 OPERAND_TYPE_NONE },
7384 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
7385 BFD_RELOC_X86_64_DTPOFF32 },
7386 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
7387 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
7388 _dummy_first_bfd_reloc_code_real },
7389 OPERAND_TYPE_NONE },
7390 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
7391 _dummy_first_bfd_reloc_code_real },
7392 OPERAND_TYPE_NONE },
7393 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
7394 BFD_RELOC_X86_64_GOT32 },
7395 OPERAND_TYPE_IMM32_32S_64_DISP32 },
7396 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
7397 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
7398 OPERAND_TYPE_IMM32_32S_DISP32 },
7399 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
7400 BFD_RELOC_X86_64_TLSDESC_CALL },
7401 OPERAND_TYPE_IMM32_32S_DISP32 },
7402 };
7403 char *cp;
7404 unsigned int j;
7405
7406 #if defined (OBJ_MAYBE_ELF)
7407 if (!IS_ELF)
7408 return NULL;
7409 #endif
7410
7411 for (cp = input_line_pointer; *cp != '@'; cp++)
7412 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
7413 return NULL;
7414
7415 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
7416 {
7417 int len = gotrel[j].len;
7418 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
7419 {
7420 if (gotrel[j].rel[object_64bit] != 0)
7421 {
7422 int first, second;
7423 char *tmpbuf, *past_reloc;
7424
7425 *rel = gotrel[j].rel[object_64bit];
7426
7427 if (types)
7428 {
7429 if (flag_code != CODE_64BIT)
7430 {
7431 types->bitfield.imm32 = 1;
7432 types->bitfield.disp32 = 1;
7433 }
7434 else
7435 *types = gotrel[j].types64;
7436 }
7437
7438 if (j != 0 && GOT_symbol == NULL)
7439 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
7440
7441 /* The length of the first part of our input line. */
7442 first = cp - input_line_pointer;
7443
7444 /* The second part goes from after the reloc token until
7445 (and including) an end_of_line char or comma. */
7446 past_reloc = cp + 1 + len;
7447 cp = past_reloc;
7448 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
7449 ++cp;
7450 second = cp + 1 - past_reloc;
7451
7452 /* Allocate and copy string. The trailing NUL shouldn't
7453 be necessary, but be safe. */
7454 tmpbuf = (char *) xmalloc (first + second + 2);
7455 memcpy (tmpbuf, input_line_pointer, first);
7456 if (second != 0 && *past_reloc != ' ')
7457 /* Replace the relocation token with ' ', so that
7458 errors like foo@GOTOFF1 will be detected. */
7459 tmpbuf[first++] = ' ';
7460 else
7461 /* Increment length by 1 if the relocation token is
7462 removed. */
7463 len++;
7464 if (adjust)
7465 *adjust = len;
7466 memcpy (tmpbuf + first, past_reloc, second);
7467 tmpbuf[first + second] = '\0';
7468 return tmpbuf;
7469 }
7470
7471 as_bad (_("@%s reloc is not supported with %d-bit output format"),
7472 gotrel[j].str, 1 << (5 + object_64bit));
7473 return NULL;
7474 }
7475 }
7476
7477 /* Might be a symbol version string. Don't as_bad here. */
7478 return NULL;
7479 }
7480 #endif
7481
7482 #ifdef TE_PE
7483 #ifdef lex_got
7484 #undef lex_got
7485 #endif
7486 /* Parse operands of the form
7487 <symbol>@SECREL32+<nnn>
7488
7489 If we find one, set up the correct relocation in RELOC and copy the
7490 input string, minus the `@SECREL32' into a malloc'd buffer for
7491 parsing by the calling routine. Return this buffer, and if ADJUST
7492 is non-null set it to the length of the string we removed from the
7493 input line. Otherwise return NULL.
7494
7495 This function is copied from the ELF version above adjusted for PE targets. */
7496
7497 static char *
7498 lex_got (enum bfd_reloc_code_real *rel ATTRIBUTE_UNUSED,
7499 int *adjust ATTRIBUTE_UNUSED,
7500 i386_operand_type *types)
7501 {
7502 static const struct
7503 {
7504 const char *str;
7505 int len;
7506 const enum bfd_reloc_code_real rel[2];
7507 const i386_operand_type types64;
7508 }
7509 gotrel[] =
7510 {
7511 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL,
7512 BFD_RELOC_32_SECREL },
7513 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
7514 };
7515
7516 char *cp;
7517 unsigned j;
7518
7519 for (cp = input_line_pointer; *cp != '@'; cp++)
7520 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
7521 return NULL;
7522
7523 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
7524 {
7525 int len = gotrel[j].len;
7526
7527 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
7528 {
7529 if (gotrel[j].rel[object_64bit] != 0)
7530 {
7531 int first, second;
7532 char *tmpbuf, *past_reloc;
7533
7534 *rel = gotrel[j].rel[object_64bit];
7535 if (adjust)
7536 *adjust = len;
7537
7538 if (types)
7539 {
7540 if (flag_code != CODE_64BIT)
7541 {
7542 types->bitfield.imm32 = 1;
7543 types->bitfield.disp32 = 1;
7544 }
7545 else
7546 *types = gotrel[j].types64;
7547 }
7548
7549 /* The length of the first part of our input line. */
7550 first = cp - input_line_pointer;
7551
7552 /* The second part goes from after the reloc token until
7553 (and including) an end_of_line char or comma. */
7554 past_reloc = cp + 1 + len;
7555 cp = past_reloc;
7556 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
7557 ++cp;
7558 second = cp + 1 - past_reloc;
7559
7560 /* Allocate and copy string. The trailing NUL shouldn't
7561 be necessary, but be safe. */
7562 tmpbuf = (char *) xmalloc (first + second + 2);
7563 memcpy (tmpbuf, input_line_pointer, first);
7564 if (second != 0 && *past_reloc != ' ')
7565 /* Replace the relocation token with ' ', so that
7566 errors like foo@SECLREL321 will be detected. */
7567 tmpbuf[first++] = ' ';
7568 memcpy (tmpbuf + first, past_reloc, second);
7569 tmpbuf[first + second] = '\0';
7570 return tmpbuf;
7571 }
7572
7573 as_bad (_("@%s reloc is not supported with %d-bit output format"),
7574 gotrel[j].str, 1 << (5 + object_64bit));
7575 return NULL;
7576 }
7577 }
7578
7579 /* Might be a symbol version string. Don't as_bad here. */
7580 return NULL;
7581 }
7582
7583 #endif /* TE_PE */
7584
7585 bfd_reloc_code_real_type
7586 x86_cons (expressionS *exp, int size)
7587 {
7588 bfd_reloc_code_real_type got_reloc = NO_RELOC;
7589
7590 intel_syntax = -intel_syntax;
7591
7592 exp->X_md = 0;
7593 if (size == 4 || (object_64bit && size == 8))
7594 {
7595 /* Handle @GOTOFF and the like in an expression. */
7596 char *save;
7597 char *gotfree_input_line;
7598 int adjust = 0;
7599
7600 save = input_line_pointer;
7601 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
7602 if (gotfree_input_line)
7603 input_line_pointer = gotfree_input_line;
7604
7605 expression (exp);
7606
7607 if (gotfree_input_line)
7608 {
7609 /* expression () has merrily parsed up to the end of line,
7610 or a comma - in the wrong buffer. Transfer how far
7611 input_line_pointer has moved to the right buffer. */
7612 input_line_pointer = (save
7613 + (input_line_pointer - gotfree_input_line)
7614 + adjust);
7615 free (gotfree_input_line);
7616 if (exp->X_op == O_constant
7617 || exp->X_op == O_absent
7618 || exp->X_op == O_illegal
7619 || exp->X_op == O_register
7620 || exp->X_op == O_big)
7621 {
7622 char c = *input_line_pointer;
7623 *input_line_pointer = 0;
7624 as_bad (_("missing or invalid expression `%s'"), save);
7625 *input_line_pointer = c;
7626 }
7627 }
7628 }
7629 else
7630 expression (exp);
7631
7632 intel_syntax = -intel_syntax;
7633
7634 if (intel_syntax)
7635 i386_intel_simplify (exp);
7636
7637 return got_reloc;
7638 }
7639
7640 static void
7641 signed_cons (int size)
7642 {
7643 if (flag_code == CODE_64BIT)
7644 cons_sign = 1;
7645 cons (size);
7646 cons_sign = -1;
7647 }
7648
7649 #ifdef TE_PE
7650 static void
7651 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
7652 {
7653 expressionS exp;
7654
7655 do
7656 {
7657 expression (&exp);
7658 if (exp.X_op == O_symbol)
7659 exp.X_op = O_secrel;
7660
7661 emit_expr (&exp, 4);
7662 }
7663 while (*input_line_pointer++ == ',');
7664
7665 input_line_pointer--;
7666 demand_empty_rest_of_line ();
7667 }
7668 #endif
7669
7670 /* Handle Vector operations. */
7671
7672 static char *
7673 check_VecOperations (char *op_string, char *op_end)
7674 {
7675 const reg_entry *mask;
7676 const char *saved;
7677 char *end_op;
7678
7679 while (*op_string
7680 && (op_end == NULL || op_string < op_end))
7681 {
7682 saved = op_string;
7683 if (*op_string == '{')
7684 {
7685 op_string++;
7686
7687 /* Check broadcasts. */
7688 if (strncmp (op_string, "1to", 3) == 0)
7689 {
7690 int bcst_type;
7691
7692 if (i.broadcast)
7693 goto duplicated_vec_op;
7694
7695 op_string += 3;
7696 if (*op_string == '8')
7697 bcst_type = BROADCAST_1TO8;
7698 else if (*op_string == '4')
7699 bcst_type = BROADCAST_1TO4;
7700 else if (*op_string == '2')
7701 bcst_type = BROADCAST_1TO2;
7702 else if (*op_string == '1'
7703 && *(op_string+1) == '6')
7704 {
7705 bcst_type = BROADCAST_1TO16;
7706 op_string++;
7707 }
7708 else
7709 {
7710 as_bad (_("Unsupported broadcast: `%s'"), saved);
7711 return NULL;
7712 }
7713 op_string++;
7714
7715 broadcast_op.type = bcst_type;
7716 broadcast_op.operand = this_operand;
7717 i.broadcast = &broadcast_op;
7718 }
7719 /* Check masking operation. */
7720 else if ((mask = parse_register (op_string, &end_op)) != NULL)
7721 {
7722 /* k0 can't be used for write mask. */
7723 if (mask->reg_num == 0)
7724 {
7725 as_bad (_("`%s' can't be used for write mask"),
7726 op_string);
7727 return NULL;
7728 }
7729
7730 if (!i.mask)
7731 {
7732 mask_op.mask = mask;
7733 mask_op.zeroing = 0;
7734 mask_op.operand = this_operand;
7735 i.mask = &mask_op;
7736 }
7737 else
7738 {
7739 if (i.mask->mask)
7740 goto duplicated_vec_op;
7741
7742 i.mask->mask = mask;
7743
7744 /* Only "{z}" is allowed here. No need to check
7745 zeroing mask explicitly. */
7746 if (i.mask->operand != this_operand)
7747 {
7748 as_bad (_("invalid write mask `%s'"), saved);
7749 return NULL;
7750 }
7751 }
7752
7753 op_string = end_op;
7754 }
7755 /* Check zeroing-flag for masking operation. */
7756 else if (*op_string == 'z')
7757 {
7758 if (!i.mask)
7759 {
7760 mask_op.mask = NULL;
7761 mask_op.zeroing = 1;
7762 mask_op.operand = this_operand;
7763 i.mask = &mask_op;
7764 }
7765 else
7766 {
7767 if (i.mask->zeroing)
7768 {
7769 duplicated_vec_op:
7770 as_bad (_("duplicated `%s'"), saved);
7771 return NULL;
7772 }
7773
7774 i.mask->zeroing = 1;
7775
7776 /* Only "{%k}" is allowed here. No need to check mask
7777 register explicitly. */
7778 if (i.mask->operand != this_operand)
7779 {
7780 as_bad (_("invalid zeroing-masking `%s'"),
7781 saved);
7782 return NULL;
7783 }
7784 }
7785
7786 op_string++;
7787 }
7788 else
7789 goto unknown_vec_op;
7790
7791 if (*op_string != '}')
7792 {
7793 as_bad (_("missing `}' in `%s'"), saved);
7794 return NULL;
7795 }
7796 op_string++;
7797 continue;
7798 }
7799 unknown_vec_op:
7800 /* We don't know this one. */
7801 as_bad (_("unknown vector operation: `%s'"), saved);
7802 return NULL;
7803 }
7804
7805 return op_string;
7806 }
7807
7808 static int
7809 i386_immediate (char *imm_start)
7810 {
7811 char *save_input_line_pointer;
7812 char *gotfree_input_line;
7813 segT exp_seg = 0;
7814 expressionS *exp;
7815 i386_operand_type types;
7816
7817 operand_type_set (&types, ~0);
7818
7819 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
7820 {
7821 as_bad (_("at most %d immediate operands are allowed"),
7822 MAX_IMMEDIATE_OPERANDS);
7823 return 0;
7824 }
7825
7826 exp = &im_expressions[i.imm_operands++];
7827 i.op[this_operand].imms = exp;
7828
7829 if (is_space_char (*imm_start))
7830 ++imm_start;
7831
7832 save_input_line_pointer = input_line_pointer;
7833 input_line_pointer = imm_start;
7834
7835 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
7836 if (gotfree_input_line)
7837 input_line_pointer = gotfree_input_line;
7838
7839 exp_seg = expression (exp);
7840
7841 SKIP_WHITESPACE ();
7842
7843 /* Handle vector operations. */
7844 if (*input_line_pointer == '{')
7845 {
7846 input_line_pointer = check_VecOperations (input_line_pointer,
7847 NULL);
7848 if (input_line_pointer == NULL)
7849 return 0;
7850 }
7851
7852 if (*input_line_pointer)
7853 as_bad (_("junk `%s' after expression"), input_line_pointer);
7854
7855 input_line_pointer = save_input_line_pointer;
7856 if (gotfree_input_line)
7857 {
7858 free (gotfree_input_line);
7859
7860 if (exp->X_op == O_constant || exp->X_op == O_register)
7861 exp->X_op = O_illegal;
7862 }
7863
7864 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
7865 }
7866
7867 static int
7868 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
7869 i386_operand_type types, const char *imm_start)
7870 {
7871 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
7872 {
7873 if (imm_start)
7874 as_bad (_("missing or invalid immediate expression `%s'"),
7875 imm_start);
7876 return 0;
7877 }
7878 else if (exp->X_op == O_constant)
7879 {
7880 /* Size it properly later. */
7881 i.types[this_operand].bitfield.imm64 = 1;
7882 /* If not 64bit, sign extend val. */
7883 if (flag_code != CODE_64BIT
7884 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
7885 exp->X_add_number
7886 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
7887 }
7888 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7889 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
7890 && exp_seg != absolute_section
7891 && exp_seg != text_section
7892 && exp_seg != data_section
7893 && exp_seg != bss_section
7894 && exp_seg != undefined_section
7895 && !bfd_is_com_section (exp_seg))
7896 {
7897 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7898 return 0;
7899 }
7900 #endif
7901 else if (!intel_syntax && exp_seg == reg_section)
7902 {
7903 if (imm_start)
7904 as_bad (_("illegal immediate register operand %s"), imm_start);
7905 return 0;
7906 }
7907 else
7908 {
7909 /* This is an address. The size of the address will be
7910 determined later, depending on destination register,
7911 suffix, or the default for the section. */
7912 i.types[this_operand].bitfield.imm8 = 1;
7913 i.types[this_operand].bitfield.imm16 = 1;
7914 i.types[this_operand].bitfield.imm32 = 1;
7915 i.types[this_operand].bitfield.imm32s = 1;
7916 i.types[this_operand].bitfield.imm64 = 1;
7917 i.types[this_operand] = operand_type_and (i.types[this_operand],
7918 types);
7919 }
7920
7921 return 1;
7922 }
7923
7924 static char *
7925 i386_scale (char *scale)
7926 {
7927 offsetT val;
7928 char *save = input_line_pointer;
7929
7930 input_line_pointer = scale;
7931 val = get_absolute_expression ();
7932
7933 switch (val)
7934 {
7935 case 1:
7936 i.log2_scale_factor = 0;
7937 break;
7938 case 2:
7939 i.log2_scale_factor = 1;
7940 break;
7941 case 4:
7942 i.log2_scale_factor = 2;
7943 break;
7944 case 8:
7945 i.log2_scale_factor = 3;
7946 break;
7947 default:
7948 {
7949 char sep = *input_line_pointer;
7950
7951 *input_line_pointer = '\0';
7952 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
7953 scale);
7954 *input_line_pointer = sep;
7955 input_line_pointer = save;
7956 return NULL;
7957 }
7958 }
7959 if (i.log2_scale_factor != 0 && i.index_reg == 0)
7960 {
7961 as_warn (_("scale factor of %d without an index register"),
7962 1 << i.log2_scale_factor);
7963 i.log2_scale_factor = 0;
7964 }
7965 scale = input_line_pointer;
7966 input_line_pointer = save;
7967 return scale;
7968 }
7969
7970 static int
7971 i386_displacement (char *disp_start, char *disp_end)
7972 {
7973 expressionS *exp;
7974 segT exp_seg = 0;
7975 char *save_input_line_pointer;
7976 char *gotfree_input_line;
7977 int override;
7978 i386_operand_type bigdisp, types = anydisp;
7979 int ret;
7980
7981 if (i.disp_operands == MAX_MEMORY_OPERANDS)
7982 {
7983 as_bad (_("at most %d displacement operands are allowed"),
7984 MAX_MEMORY_OPERANDS);
7985 return 0;
7986 }
7987
7988 operand_type_set (&bigdisp, 0);
7989 if ((i.types[this_operand].bitfield.jumpabsolute)
7990 || (!current_templates->start->opcode_modifier.jump
7991 && !current_templates->start->opcode_modifier.jumpdword))
7992 {
7993 bigdisp.bitfield.disp32 = 1;
7994 override = (i.prefix[ADDR_PREFIX] != 0);
7995 if (flag_code == CODE_64BIT)
7996 {
7997 if (!override)
7998 {
7999 bigdisp.bitfield.disp32s = 1;
8000 bigdisp.bitfield.disp64 = 1;
8001 }
8002 }
8003 else if ((flag_code == CODE_16BIT) ^ override)
8004 {
8005 bigdisp.bitfield.disp32 = 0;
8006 bigdisp.bitfield.disp16 = 1;
8007 }
8008 }
8009 else
8010 {
8011 /* For PC-relative branches, the width of the displacement
8012 is dependent upon data size, not address size. */
8013 override = (i.prefix[DATA_PREFIX] != 0);
8014 if (flag_code == CODE_64BIT)
8015 {
8016 if (override || i.suffix == WORD_MNEM_SUFFIX)
8017 bigdisp.bitfield.disp16 = 1;
8018 else
8019 {
8020 bigdisp.bitfield.disp32 = 1;
8021 bigdisp.bitfield.disp32s = 1;
8022 }
8023 }
8024 else
8025 {
8026 if (!override)
8027 override = (i.suffix == (flag_code != CODE_16BIT
8028 ? WORD_MNEM_SUFFIX
8029 : LONG_MNEM_SUFFIX));
8030 bigdisp.bitfield.disp32 = 1;
8031 if ((flag_code == CODE_16BIT) ^ override)
8032 {
8033 bigdisp.bitfield.disp32 = 0;
8034 bigdisp.bitfield.disp16 = 1;
8035 }
8036 }
8037 }
8038 i.types[this_operand] = operand_type_or (i.types[this_operand],
8039 bigdisp);
8040
8041 exp = &disp_expressions[i.disp_operands];
8042 i.op[this_operand].disps = exp;
8043 i.disp_operands++;
8044 save_input_line_pointer = input_line_pointer;
8045 input_line_pointer = disp_start;
8046 END_STRING_AND_SAVE (disp_end);
8047
8048 #ifndef GCC_ASM_O_HACK
8049 #define GCC_ASM_O_HACK 0
8050 #endif
8051 #if GCC_ASM_O_HACK
8052 END_STRING_AND_SAVE (disp_end + 1);
8053 if (i.types[this_operand].bitfield.baseIndex
8054 && displacement_string_end[-1] == '+')
8055 {
8056 /* This hack is to avoid a warning when using the "o"
8057 constraint within gcc asm statements.
8058 For instance:
8059
8060 #define _set_tssldt_desc(n,addr,limit,type) \
8061 __asm__ __volatile__ ( \
8062 "movw %w2,%0\n\t" \
8063 "movw %w1,2+%0\n\t" \
8064 "rorl $16,%1\n\t" \
8065 "movb %b1,4+%0\n\t" \
8066 "movb %4,5+%0\n\t" \
8067 "movb $0,6+%0\n\t" \
8068 "movb %h1,7+%0\n\t" \
8069 "rorl $16,%1" \
8070 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
8071
8072 This works great except that the output assembler ends
8073 up looking a bit weird if it turns out that there is
8074 no offset. You end up producing code that looks like:
8075
8076 #APP
8077 movw $235,(%eax)
8078 movw %dx,2+(%eax)
8079 rorl $16,%edx
8080 movb %dl,4+(%eax)
8081 movb $137,5+(%eax)
8082 movb $0,6+(%eax)
8083 movb %dh,7+(%eax)
8084 rorl $16,%edx
8085 #NO_APP
8086
8087 So here we provide the missing zero. */
8088
8089 *displacement_string_end = '0';
8090 }
8091 #endif
8092 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
8093 if (gotfree_input_line)
8094 input_line_pointer = gotfree_input_line;
8095
8096 exp_seg = expression (exp);
8097
8098 SKIP_WHITESPACE ();
8099 if (*input_line_pointer)
8100 as_bad (_("junk `%s' after expression"), input_line_pointer);
8101 #if GCC_ASM_O_HACK
8102 RESTORE_END_STRING (disp_end + 1);
8103 #endif
8104 input_line_pointer = save_input_line_pointer;
8105 if (gotfree_input_line)
8106 {
8107 free (gotfree_input_line);
8108
8109 if (exp->X_op == O_constant || exp->X_op == O_register)
8110 exp->X_op = O_illegal;
8111 }
8112
8113 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
8114
8115 RESTORE_END_STRING (disp_end);
8116
8117 return ret;
8118 }
8119
8120 static int
8121 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
8122 i386_operand_type types, const char *disp_start)
8123 {
8124 i386_operand_type bigdisp;
8125 int ret = 1;
8126
8127 /* We do this to make sure that the section symbol is in
8128 the symbol table. We will ultimately change the relocation
8129 to be relative to the beginning of the section. */
8130 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
8131 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
8132 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
8133 {
8134 if (exp->X_op != O_symbol)
8135 goto inv_disp;
8136
8137 if (S_IS_LOCAL (exp->X_add_symbol)
8138 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
8139 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
8140 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
8141 exp->X_op = O_subtract;
8142 exp->X_op_symbol = GOT_symbol;
8143 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
8144 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
8145 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
8146 i.reloc[this_operand] = BFD_RELOC_64;
8147 else
8148 i.reloc[this_operand] = BFD_RELOC_32;
8149 }
8150
8151 else if (exp->X_op == O_absent
8152 || exp->X_op == O_illegal
8153 || exp->X_op == O_big)
8154 {
8155 inv_disp:
8156 as_bad (_("missing or invalid displacement expression `%s'"),
8157 disp_start);
8158 ret = 0;
8159 }
8160
8161 else if (flag_code == CODE_64BIT
8162 && !i.prefix[ADDR_PREFIX]
8163 && exp->X_op == O_constant)
8164 {
8165 /* Since displacement is signed extended to 64bit, don't allow
8166 disp32 and turn off disp32s if they are out of range. */
8167 i.types[this_operand].bitfield.disp32 = 0;
8168 if (!fits_in_signed_long (exp->X_add_number))
8169 {
8170 i.types[this_operand].bitfield.disp32s = 0;
8171 if (i.types[this_operand].bitfield.baseindex)
8172 {
8173 as_bad (_("0x%lx out range of signed 32bit displacement"),
8174 (long) exp->X_add_number);
8175 ret = 0;
8176 }
8177 }
8178 }
8179
8180 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8181 else if (exp->X_op != O_constant
8182 && OUTPUT_FLAVOR == bfd_target_aout_flavour
8183 && exp_seg != absolute_section
8184 && exp_seg != text_section
8185 && exp_seg != data_section
8186 && exp_seg != bss_section
8187 && exp_seg != undefined_section
8188 && !bfd_is_com_section (exp_seg))
8189 {
8190 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
8191 ret = 0;
8192 }
8193 #endif
8194
8195 /* Check if this is a displacement only operand. */
8196 bigdisp = i.types[this_operand];
8197 bigdisp.bitfield.disp8 = 0;
8198 bigdisp.bitfield.disp16 = 0;
8199 bigdisp.bitfield.disp32 = 0;
8200 bigdisp.bitfield.disp32s = 0;
8201 bigdisp.bitfield.disp64 = 0;
8202 if (operand_type_all_zero (&bigdisp))
8203 i.types[this_operand] = operand_type_and (i.types[this_operand],
8204 types);
8205
8206 return ret;
8207 }
8208
8209 /* Make sure the memory operand we've been dealt is valid.
8210 Return 1 on success, 0 on a failure. */
8211
8212 static int
8213 i386_index_check (const char *operand_string)
8214 {
8215 const char *kind = "base/index";
8216 enum flag_code addr_mode;
8217
8218 if (i.prefix[ADDR_PREFIX])
8219 addr_mode = flag_code == CODE_32BIT ? CODE_16BIT : CODE_32BIT;
8220 else
8221 {
8222 addr_mode = flag_code;
8223
8224 #if INFER_ADDR_PREFIX
8225 if (i.mem_operands == 0)
8226 {
8227 /* Infer address prefix from the first memory operand. */
8228 const reg_entry *addr_reg = i.base_reg;
8229
8230 if (addr_reg == NULL)
8231 addr_reg = i.index_reg;
8232
8233 if (addr_reg)
8234 {
8235 if (addr_reg->reg_num == RegEip
8236 || addr_reg->reg_num == RegEiz
8237 || addr_reg->reg_type.bitfield.reg32)
8238 addr_mode = CODE_32BIT;
8239 else if (flag_code != CODE_64BIT
8240 && addr_reg->reg_type.bitfield.reg16)
8241 addr_mode = CODE_16BIT;
8242
8243 if (addr_mode != flag_code)
8244 {
8245 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
8246 i.prefixes += 1;
8247 /* Change the size of any displacement too. At most one
8248 of Disp16 or Disp32 is set.
8249 FIXME. There doesn't seem to be any real need for
8250 separate Disp16 and Disp32 flags. The same goes for
8251 Imm16 and Imm32. Removing them would probably clean
8252 up the code quite a lot. */
8253 if (flag_code != CODE_64BIT
8254 && (i.types[this_operand].bitfield.disp16
8255 || i.types[this_operand].bitfield.disp32))
8256 i.types[this_operand]
8257 = operand_type_xor (i.types[this_operand], disp16_32);
8258 }
8259 }
8260 }
8261 #endif
8262 }
8263
8264 if (current_templates->start->opcode_modifier.isstring
8265 && !current_templates->start->opcode_modifier.immext
8266 && (current_templates->end[-1].opcode_modifier.isstring
8267 || i.mem_operands))
8268 {
8269 /* Memory operands of string insns are special in that they only allow
8270 a single register (rDI, rSI, or rBX) as their memory address. */
8271 const reg_entry *expected_reg;
8272 static const char *di_si[][2] =
8273 {
8274 { "esi", "edi" },
8275 { "si", "di" },
8276 { "rsi", "rdi" }
8277 };
8278 static const char *bx[] = { "ebx", "bx", "rbx" };
8279
8280 kind = "string address";
8281
8282 if (current_templates->start->opcode_modifier.w)
8283 {
8284 i386_operand_type type = current_templates->end[-1].operand_types[0];
8285
8286 if (!type.bitfield.baseindex
8287 || ((!i.mem_operands != !intel_syntax)
8288 && current_templates->end[-1].operand_types[1]
8289 .bitfield.baseindex))
8290 type = current_templates->end[-1].operand_types[1];
8291 expected_reg = hash_find (reg_hash,
8292 di_si[addr_mode][type.bitfield.esseg]);
8293
8294 }
8295 else
8296 expected_reg = hash_find (reg_hash, bx[addr_mode]);
8297
8298 if (i.base_reg != expected_reg
8299 || i.index_reg
8300 || operand_type_check (i.types[this_operand], disp))
8301 {
8302 /* The second memory operand must have the same size as
8303 the first one. */
8304 if (i.mem_operands
8305 && i.base_reg
8306 && !((addr_mode == CODE_64BIT
8307 && i.base_reg->reg_type.bitfield.reg64)
8308 || (addr_mode == CODE_32BIT
8309 ? i.base_reg->reg_type.bitfield.reg32
8310 : i.base_reg->reg_type.bitfield.reg16)))
8311 goto bad_address;
8312
8313 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
8314 operand_string,
8315 intel_syntax ? '[' : '(',
8316 register_prefix,
8317 expected_reg->reg_name,
8318 intel_syntax ? ']' : ')');
8319 return 1;
8320 }
8321 else
8322 return 1;
8323
8324 bad_address:
8325 as_bad (_("`%s' is not a valid %s expression"),
8326 operand_string, kind);
8327 return 0;
8328 }
8329 else
8330 {
8331 if (addr_mode != CODE_16BIT)
8332 {
8333 /* 32-bit/64-bit checks. */
8334 if ((i.base_reg
8335 && (addr_mode == CODE_64BIT
8336 ? !i.base_reg->reg_type.bitfield.reg64
8337 : !i.base_reg->reg_type.bitfield.reg32)
8338 && (i.index_reg
8339 || (i.base_reg->reg_num
8340 != (addr_mode == CODE_64BIT ? RegRip : RegEip))))
8341 || (i.index_reg
8342 && !i.index_reg->reg_type.bitfield.regxmm
8343 && !i.index_reg->reg_type.bitfield.regymm
8344 && !i.index_reg->reg_type.bitfield.regzmm
8345 && ((addr_mode == CODE_64BIT
8346 ? !(i.index_reg->reg_type.bitfield.reg64
8347 || i.index_reg->reg_num == RegRiz)
8348 : !(i.index_reg->reg_type.bitfield.reg32
8349 || i.index_reg->reg_num == RegEiz))
8350 || !i.index_reg->reg_type.bitfield.baseindex)))
8351 goto bad_address;
8352 }
8353 else
8354 {
8355 /* 16-bit checks. */
8356 if ((i.base_reg
8357 && (!i.base_reg->reg_type.bitfield.reg16
8358 || !i.base_reg->reg_type.bitfield.baseindex))
8359 || (i.index_reg
8360 && (!i.index_reg->reg_type.bitfield.reg16
8361 || !i.index_reg->reg_type.bitfield.baseindex
8362 || !(i.base_reg
8363 && i.base_reg->reg_num < 6
8364 && i.index_reg->reg_num >= 6
8365 && i.log2_scale_factor == 0))))
8366 goto bad_address;
8367 }
8368 }
8369 return 1;
8370 }
8371
8372 /* Handle vector immediates. */
8373
8374 static int
8375 RC_SAE_immediate (const char *imm_start)
8376 {
8377 unsigned int match_found, j;
8378 const char *pstr = imm_start;
8379 expressionS *exp;
8380
8381 if (*pstr != '{')
8382 return 0;
8383
8384 pstr++;
8385 match_found = 0;
8386 for (j = 0; j < ARRAY_SIZE (RC_NamesTable); j++)
8387 {
8388 if (!strncmp (pstr, RC_NamesTable[j].name, RC_NamesTable[j].len))
8389 {
8390 if (!i.rounding)
8391 {
8392 rc_op.type = RC_NamesTable[j].type;
8393 rc_op.operand = this_operand;
8394 i.rounding = &rc_op;
8395 }
8396 else
8397 {
8398 as_bad (_("duplicated `%s'"), imm_start);
8399 return 0;
8400 }
8401 pstr += RC_NamesTable[j].len;
8402 match_found = 1;
8403 break;
8404 }
8405 }
8406 if (!match_found)
8407 return 0;
8408
8409 if (*pstr++ != '}')
8410 {
8411 as_bad (_("Missing '}': '%s'"), imm_start);
8412 return 0;
8413 }
8414 /* RC/SAE immediate string should contain nothing more. */;
8415 if (*pstr != 0)
8416 {
8417 as_bad (_("Junk after '}': '%s'"), imm_start);
8418 return 0;
8419 }
8420
8421 exp = &im_expressions[i.imm_operands++];
8422 i.op[this_operand].imms = exp;
8423
8424 exp->X_op = O_constant;
8425 exp->X_add_number = 0;
8426 exp->X_add_symbol = (symbolS *) 0;
8427 exp->X_op_symbol = (symbolS *) 0;
8428
8429 i.types[this_operand].bitfield.imm8 = 1;
8430 return 1;
8431 }
8432
8433 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
8434 on error. */
8435
8436 static int
8437 i386_att_operand (char *operand_string)
8438 {
8439 const reg_entry *r;
8440 char *end_op;
8441 char *op_string = operand_string;
8442
8443 if (is_space_char (*op_string))
8444 ++op_string;
8445
8446 /* We check for an absolute prefix (differentiating,
8447 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
8448 if (*op_string == ABSOLUTE_PREFIX)
8449 {
8450 ++op_string;
8451 if (is_space_char (*op_string))
8452 ++op_string;
8453 i.types[this_operand].bitfield.jumpabsolute = 1;
8454 }
8455
8456 /* Check if operand is a register. */
8457 if ((r = parse_register (op_string, &end_op)) != NULL)
8458 {
8459 i386_operand_type temp;
8460
8461 /* Check for a segment override by searching for ':' after a
8462 segment register. */
8463 op_string = end_op;
8464 if (is_space_char (*op_string))
8465 ++op_string;
8466 if (*op_string == ':'
8467 && (r->reg_type.bitfield.sreg2
8468 || r->reg_type.bitfield.sreg3))
8469 {
8470 switch (r->reg_num)
8471 {
8472 case 0:
8473 i.seg[i.mem_operands] = &es;
8474 break;
8475 case 1:
8476 i.seg[i.mem_operands] = &cs;
8477 break;
8478 case 2:
8479 i.seg[i.mem_operands] = &ss;
8480 break;
8481 case 3:
8482 i.seg[i.mem_operands] = &ds;
8483 break;
8484 case 4:
8485 i.seg[i.mem_operands] = &fs;
8486 break;
8487 case 5:
8488 i.seg[i.mem_operands] = &gs;
8489 break;
8490 }
8491
8492 /* Skip the ':' and whitespace. */
8493 ++op_string;
8494 if (is_space_char (*op_string))
8495 ++op_string;
8496
8497 if (!is_digit_char (*op_string)
8498 && !is_identifier_char (*op_string)
8499 && *op_string != '('
8500 && *op_string != ABSOLUTE_PREFIX)
8501 {
8502 as_bad (_("bad memory operand `%s'"), op_string);
8503 return 0;
8504 }
8505 /* Handle case of %es:*foo. */
8506 if (*op_string == ABSOLUTE_PREFIX)
8507 {
8508 ++op_string;
8509 if (is_space_char (*op_string))
8510 ++op_string;
8511 i.types[this_operand].bitfield.jumpabsolute = 1;
8512 }
8513 goto do_memory_reference;
8514 }
8515
8516 /* Handle vector operations. */
8517 if (*op_string == '{')
8518 {
8519 op_string = check_VecOperations (op_string, NULL);
8520 if (op_string == NULL)
8521 return 0;
8522 }
8523
8524 if (*op_string)
8525 {
8526 as_bad (_("junk `%s' after register"), op_string);
8527 return 0;
8528 }
8529 temp = r->reg_type;
8530 temp.bitfield.baseindex = 0;
8531 i.types[this_operand] = operand_type_or (i.types[this_operand],
8532 temp);
8533 i.types[this_operand].bitfield.unspecified = 0;
8534 i.op[this_operand].regs = r;
8535 i.reg_operands++;
8536 }
8537 else if (*op_string == REGISTER_PREFIX)
8538 {
8539 as_bad (_("bad register name `%s'"), op_string);
8540 return 0;
8541 }
8542 else if (*op_string == IMMEDIATE_PREFIX)
8543 {
8544 ++op_string;
8545 if (i.types[this_operand].bitfield.jumpabsolute)
8546 {
8547 as_bad (_("immediate operand illegal with absolute jump"));
8548 return 0;
8549 }
8550 if (!i386_immediate (op_string))
8551 return 0;
8552 }
8553 else if (RC_SAE_immediate (operand_string))
8554 {
8555 /* If it is a RC or SAE immediate, do nothing. */
8556 ;
8557 }
8558 else if (is_digit_char (*op_string)
8559 || is_identifier_char (*op_string)
8560 || *op_string == '(')
8561 {
8562 /* This is a memory reference of some sort. */
8563 char *base_string;
8564
8565 /* Start and end of displacement string expression (if found). */
8566 char *displacement_string_start;
8567 char *displacement_string_end;
8568 char *vop_start;
8569
8570 do_memory_reference:
8571 if ((i.mem_operands == 1
8572 && !current_templates->start->opcode_modifier.isstring)
8573 || i.mem_operands == 2)
8574 {
8575 as_bad (_("too many memory references for `%s'"),
8576 current_templates->start->name);
8577 return 0;
8578 }
8579
8580 /* Check for base index form. We detect the base index form by
8581 looking for an ')' at the end of the operand, searching
8582 for the '(' matching it, and finding a REGISTER_PREFIX or ','
8583 after the '('. */
8584 base_string = op_string + strlen (op_string);
8585
8586 /* Handle vector operations. */
8587 vop_start = strchr (op_string, '{');
8588 if (vop_start && vop_start < base_string)
8589 {
8590 if (check_VecOperations (vop_start, base_string) == NULL)
8591 return 0;
8592 base_string = vop_start;
8593 }
8594
8595 --base_string;
8596 if (is_space_char (*base_string))
8597 --base_string;
8598
8599 /* If we only have a displacement, set-up for it to be parsed later. */
8600 displacement_string_start = op_string;
8601 displacement_string_end = base_string + 1;
8602
8603 if (*base_string == ')')
8604 {
8605 char *temp_string;
8606 unsigned int parens_balanced = 1;
8607 /* We've already checked that the number of left & right ()'s are
8608 equal, so this loop will not be infinite. */
8609 do
8610 {
8611 base_string--;
8612 if (*base_string == ')')
8613 parens_balanced++;
8614 if (*base_string == '(')
8615 parens_balanced--;
8616 }
8617 while (parens_balanced);
8618
8619 temp_string = base_string;
8620
8621 /* Skip past '(' and whitespace. */
8622 ++base_string;
8623 if (is_space_char (*base_string))
8624 ++base_string;
8625
8626 if (*base_string == ','
8627 || ((i.base_reg = parse_register (base_string, &end_op))
8628 != NULL))
8629 {
8630 displacement_string_end = temp_string;
8631
8632 i.types[this_operand].bitfield.baseindex = 1;
8633
8634 if (i.base_reg)
8635 {
8636 base_string = end_op;
8637 if (is_space_char (*base_string))
8638 ++base_string;
8639 }
8640
8641 /* There may be an index reg or scale factor here. */
8642 if (*base_string == ',')
8643 {
8644 ++base_string;
8645 if (is_space_char (*base_string))
8646 ++base_string;
8647
8648 if ((i.index_reg = parse_register (base_string, &end_op))
8649 != NULL)
8650 {
8651 base_string = end_op;
8652 if (is_space_char (*base_string))
8653 ++base_string;
8654 if (*base_string == ',')
8655 {
8656 ++base_string;
8657 if (is_space_char (*base_string))
8658 ++base_string;
8659 }
8660 else if (*base_string != ')')
8661 {
8662 as_bad (_("expecting `,' or `)' "
8663 "after index register in `%s'"),
8664 operand_string);
8665 return 0;
8666 }
8667 }
8668 else if (*base_string == REGISTER_PREFIX)
8669 {
8670 end_op = strchr (base_string, ',');
8671 if (end_op)
8672 *end_op = '\0';
8673 as_bad (_("bad register name `%s'"), base_string);
8674 return 0;
8675 }
8676
8677 /* Check for scale factor. */
8678 if (*base_string != ')')
8679 {
8680 char *end_scale = i386_scale (base_string);
8681
8682 if (!end_scale)
8683 return 0;
8684
8685 base_string = end_scale;
8686 if (is_space_char (*base_string))
8687 ++base_string;
8688 if (*base_string != ')')
8689 {
8690 as_bad (_("expecting `)' "
8691 "after scale factor in `%s'"),
8692 operand_string);
8693 return 0;
8694 }
8695 }
8696 else if (!i.index_reg)
8697 {
8698 as_bad (_("expecting index register or scale factor "
8699 "after `,'; got '%c'"),
8700 *base_string);
8701 return 0;
8702 }
8703 }
8704 else if (*base_string != ')')
8705 {
8706 as_bad (_("expecting `,' or `)' "
8707 "after base register in `%s'"),
8708 operand_string);
8709 return 0;
8710 }
8711 }
8712 else if (*base_string == REGISTER_PREFIX)
8713 {
8714 end_op = strchr (base_string, ',');
8715 if (end_op)
8716 *end_op = '\0';
8717 as_bad (_("bad register name `%s'"), base_string);
8718 return 0;
8719 }
8720 }
8721
8722 /* If there's an expression beginning the operand, parse it,
8723 assuming displacement_string_start and
8724 displacement_string_end are meaningful. */
8725 if (displacement_string_start != displacement_string_end)
8726 {
8727 if (!i386_displacement (displacement_string_start,
8728 displacement_string_end))
8729 return 0;
8730 }
8731
8732 /* Special case for (%dx) while doing input/output op. */
8733 if (i.base_reg
8734 && operand_type_equal (&i.base_reg->reg_type,
8735 &reg16_inoutportreg)
8736 && i.index_reg == 0
8737 && i.log2_scale_factor == 0
8738 && i.seg[i.mem_operands] == 0
8739 && !operand_type_check (i.types[this_operand], disp))
8740 {
8741 i.types[this_operand] = inoutportreg;
8742 return 1;
8743 }
8744
8745 if (i386_index_check (operand_string) == 0)
8746 return 0;
8747 i.types[this_operand].bitfield.mem = 1;
8748 i.mem_operands++;
8749 }
8750 else
8751 {
8752 /* It's not a memory operand; argh! */
8753 as_bad (_("invalid char %s beginning operand %d `%s'"),
8754 output_invalid (*op_string),
8755 this_operand + 1,
8756 op_string);
8757 return 0;
8758 }
8759 return 1; /* Normal return. */
8760 }
8761 \f
8762 /* Calculate the maximum variable size (i.e., excluding fr_fix)
8763 that an rs_machine_dependent frag may reach. */
8764
8765 unsigned int
8766 i386_frag_max_var (fragS *frag)
8767 {
8768 /* The only relaxable frags are for jumps.
8769 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
8770 gas_assert (frag->fr_type == rs_machine_dependent);
8771 return TYPE_FROM_RELAX_STATE (frag->fr_subtype) == UNCOND_JUMP ? 4 : 5;
8772 }
8773
8774 /* md_estimate_size_before_relax()
8775
8776 Called just before relax() for rs_machine_dependent frags. The x86
8777 assembler uses these frags to handle variable size jump
8778 instructions.
8779
8780 Any symbol that is now undefined will not become defined.
8781 Return the correct fr_subtype in the frag.
8782 Return the initial "guess for variable size of frag" to caller.
8783 The guess is actually the growth beyond the fixed part. Whatever
8784 we do to grow the fixed or variable part contributes to our
8785 returned value. */
8786
8787 int
8788 md_estimate_size_before_relax (fragS *fragP, segT segment)
8789 {
8790 /* We've already got fragP->fr_subtype right; all we have to do is
8791 check for un-relaxable symbols. On an ELF system, we can't relax
8792 an externally visible symbol, because it may be overridden by a
8793 shared library. */
8794 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
8795 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8796 || (IS_ELF
8797 && (S_IS_EXTERNAL (fragP->fr_symbol)
8798 || S_IS_WEAK (fragP->fr_symbol)
8799 || ((symbol_get_bfdsym (fragP->fr_symbol)->flags
8800 & BSF_GNU_INDIRECT_FUNCTION))))
8801 #endif
8802 #if defined (OBJ_COFF) && defined (TE_PE)
8803 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
8804 && S_IS_WEAK (fragP->fr_symbol))
8805 #endif
8806 )
8807 {
8808 /* Symbol is undefined in this segment, or we need to keep a
8809 reloc so that weak symbols can be overridden. */
8810 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
8811 enum bfd_reloc_code_real reloc_type;
8812 unsigned char *opcode;
8813 int old_fr_fix;
8814
8815 if (fragP->fr_var != NO_RELOC)
8816 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
8817 else if (size == 2)
8818 reloc_type = BFD_RELOC_16_PCREL;
8819 else
8820 reloc_type = BFD_RELOC_32_PCREL;
8821
8822 old_fr_fix = fragP->fr_fix;
8823 opcode = (unsigned char *) fragP->fr_opcode;
8824
8825 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
8826 {
8827 case UNCOND_JUMP:
8828 /* Make jmp (0xeb) a (d)word displacement jump. */
8829 opcode[0] = 0xe9;
8830 fragP->fr_fix += size;
8831 fix_new (fragP, old_fr_fix, size,
8832 fragP->fr_symbol,
8833 fragP->fr_offset, 1,
8834 reloc_type);
8835 break;
8836
8837 case COND_JUMP86:
8838 if (size == 2
8839 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
8840 {
8841 /* Negate the condition, and branch past an
8842 unconditional jump. */
8843 opcode[0] ^= 1;
8844 opcode[1] = 3;
8845 /* Insert an unconditional jump. */
8846 opcode[2] = 0xe9;
8847 /* We added two extra opcode bytes, and have a two byte
8848 offset. */
8849 fragP->fr_fix += 2 + 2;
8850 fix_new (fragP, old_fr_fix + 2, 2,
8851 fragP->fr_symbol,
8852 fragP->fr_offset, 1,
8853 reloc_type);
8854 break;
8855 }
8856 /* Fall through. */
8857
8858 case COND_JUMP:
8859 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
8860 {
8861 fixS *fixP;
8862
8863 fragP->fr_fix += 1;
8864 fixP = fix_new (fragP, old_fr_fix, 1,
8865 fragP->fr_symbol,
8866 fragP->fr_offset, 1,
8867 BFD_RELOC_8_PCREL);
8868 fixP->fx_signed = 1;
8869 break;
8870 }
8871
8872 /* This changes the byte-displacement jump 0x7N
8873 to the (d)word-displacement jump 0x0f,0x8N. */
8874 opcode[1] = opcode[0] + 0x10;
8875 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
8876 /* We've added an opcode byte. */
8877 fragP->fr_fix += 1 + size;
8878 fix_new (fragP, old_fr_fix + 1, size,
8879 fragP->fr_symbol,
8880 fragP->fr_offset, 1,
8881 reloc_type);
8882 break;
8883
8884 default:
8885 BAD_CASE (fragP->fr_subtype);
8886 break;
8887 }
8888 frag_wane (fragP);
8889 return fragP->fr_fix - old_fr_fix;
8890 }
8891
8892 /* Guess size depending on current relax state. Initially the relax
8893 state will correspond to a short jump and we return 1, because
8894 the variable part of the frag (the branch offset) is one byte
8895 long. However, we can relax a section more than once and in that
8896 case we must either set fr_subtype back to the unrelaxed state,
8897 or return the value for the appropriate branch. */
8898 return md_relax_table[fragP->fr_subtype].rlx_length;
8899 }
8900
8901 /* Called after relax() is finished.
8902
8903 In: Address of frag.
8904 fr_type == rs_machine_dependent.
8905 fr_subtype is what the address relaxed to.
8906
8907 Out: Any fixSs and constants are set up.
8908 Caller will turn frag into a ".space 0". */
8909
8910 void
8911 md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT sec ATTRIBUTE_UNUSED,
8912 fragS *fragP)
8913 {
8914 unsigned char *opcode;
8915 unsigned char *where_to_put_displacement = NULL;
8916 offsetT target_address;
8917 offsetT opcode_address;
8918 unsigned int extension = 0;
8919 offsetT displacement_from_opcode_start;
8920
8921 opcode = (unsigned char *) fragP->fr_opcode;
8922
8923 /* Address we want to reach in file space. */
8924 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
8925
8926 /* Address opcode resides at in file space. */
8927 opcode_address = fragP->fr_address + fragP->fr_fix;
8928
8929 /* Displacement from opcode start to fill into instruction. */
8930 displacement_from_opcode_start = target_address - opcode_address;
8931
8932 if ((fragP->fr_subtype & BIG) == 0)
8933 {
8934 /* Don't have to change opcode. */
8935 extension = 1; /* 1 opcode + 1 displacement */
8936 where_to_put_displacement = &opcode[1];
8937 }
8938 else
8939 {
8940 if (no_cond_jump_promotion
8941 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
8942 as_warn_where (fragP->fr_file, fragP->fr_line,
8943 _("long jump required"));
8944
8945 switch (fragP->fr_subtype)
8946 {
8947 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
8948 extension = 4; /* 1 opcode + 4 displacement */
8949 opcode[0] = 0xe9;
8950 where_to_put_displacement = &opcode[1];
8951 break;
8952
8953 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
8954 extension = 2; /* 1 opcode + 2 displacement */
8955 opcode[0] = 0xe9;
8956 where_to_put_displacement = &opcode[1];
8957 break;
8958
8959 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
8960 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
8961 extension = 5; /* 2 opcode + 4 displacement */
8962 opcode[1] = opcode[0] + 0x10;
8963 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
8964 where_to_put_displacement = &opcode[2];
8965 break;
8966
8967 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
8968 extension = 3; /* 2 opcode + 2 displacement */
8969 opcode[1] = opcode[0] + 0x10;
8970 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
8971 where_to_put_displacement = &opcode[2];
8972 break;
8973
8974 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
8975 extension = 4;
8976 opcode[0] ^= 1;
8977 opcode[1] = 3;
8978 opcode[2] = 0xe9;
8979 where_to_put_displacement = &opcode[3];
8980 break;
8981
8982 default:
8983 BAD_CASE (fragP->fr_subtype);
8984 break;
8985 }
8986 }
8987
8988 /* If size if less then four we are sure that the operand fits,
8989 but if it's 4, then it could be that the displacement is larger
8990 then -/+ 2GB. */
8991 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
8992 && object_64bit
8993 && ((addressT) (displacement_from_opcode_start - extension
8994 + ((addressT) 1 << 31))
8995 > (((addressT) 2 << 31) - 1)))
8996 {
8997 as_bad_where (fragP->fr_file, fragP->fr_line,
8998 _("jump target out of range"));
8999 /* Make us emit 0. */
9000 displacement_from_opcode_start = extension;
9001 }
9002 /* Now put displacement after opcode. */
9003 md_number_to_chars ((char *) where_to_put_displacement,
9004 (valueT) (displacement_from_opcode_start - extension),
9005 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
9006 fragP->fr_fix += extension;
9007 }
9008 \f
9009 /* Apply a fixup (fixP) to segment data, once it has been determined
9010 by our caller that we have all the info we need to fix it up.
9011
9012 Parameter valP is the pointer to the value of the bits.
9013
9014 On the 386, immediates, displacements, and data pointers are all in
9015 the same (little-endian) format, so we don't need to care about which
9016 we are handling. */
9017
9018 void
9019 md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
9020 {
9021 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
9022 valueT value = *valP;
9023
9024 #if !defined (TE_Mach)
9025 if (fixP->fx_pcrel)
9026 {
9027 switch (fixP->fx_r_type)
9028 {
9029 default:
9030 break;
9031
9032 case BFD_RELOC_64:
9033 fixP->fx_r_type = BFD_RELOC_64_PCREL;
9034 break;
9035 case BFD_RELOC_32:
9036 case BFD_RELOC_X86_64_32S:
9037 fixP->fx_r_type = BFD_RELOC_32_PCREL;
9038 break;
9039 case BFD_RELOC_16:
9040 fixP->fx_r_type = BFD_RELOC_16_PCREL;
9041 break;
9042 case BFD_RELOC_8:
9043 fixP->fx_r_type = BFD_RELOC_8_PCREL;
9044 break;
9045 }
9046 }
9047
9048 if (fixP->fx_addsy != NULL
9049 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
9050 || fixP->fx_r_type == BFD_RELOC_64_PCREL
9051 || fixP->fx_r_type == BFD_RELOC_16_PCREL
9052 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
9053 && !use_rela_relocations)
9054 {
9055 /* This is a hack. There should be a better way to handle this.
9056 This covers for the fact that bfd_install_relocation will
9057 subtract the current location (for partial_inplace, PC relative
9058 relocations); see more below. */
9059 #ifndef OBJ_AOUT
9060 if (IS_ELF
9061 #ifdef TE_PE
9062 || OUTPUT_FLAVOR == bfd_target_coff_flavour
9063 #endif
9064 )
9065 value += fixP->fx_where + fixP->fx_frag->fr_address;
9066 #endif
9067 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9068 if (IS_ELF)
9069 {
9070 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
9071
9072 if ((sym_seg == seg
9073 || (symbol_section_p (fixP->fx_addsy)
9074 && sym_seg != absolute_section))
9075 && !generic_force_reloc (fixP))
9076 {
9077 /* Yes, we add the values in twice. This is because
9078 bfd_install_relocation subtracts them out again. I think
9079 bfd_install_relocation is broken, but I don't dare change
9080 it. FIXME. */
9081 value += fixP->fx_where + fixP->fx_frag->fr_address;
9082 }
9083 }
9084 #endif
9085 #if defined (OBJ_COFF) && defined (TE_PE)
9086 /* For some reason, the PE format does not store a
9087 section address offset for a PC relative symbol. */
9088 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
9089 || S_IS_WEAK (fixP->fx_addsy))
9090 value += md_pcrel_from (fixP);
9091 #endif
9092 }
9093 #if defined (OBJ_COFF) && defined (TE_PE)
9094 if (fixP->fx_addsy != NULL
9095 && S_IS_WEAK (fixP->fx_addsy)
9096 /* PR 16858: Do not modify weak function references. */
9097 && ! fixP->fx_pcrel)
9098 {
9099 #if !defined (TE_PEP)
9100 /* For x86 PE weak function symbols are neither PC-relative
9101 nor do they set S_IS_FUNCTION. So the only reliable way
9102 to detect them is to check the flags of their containing
9103 section. */
9104 if (S_GET_SEGMENT (fixP->fx_addsy) != NULL
9105 && S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_CODE)
9106 ;
9107 else
9108 #endif
9109 value -= S_GET_VALUE (fixP->fx_addsy);
9110 }
9111 #endif
9112
9113 /* Fix a few things - the dynamic linker expects certain values here,
9114 and we must not disappoint it. */
9115 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9116 if (IS_ELF && fixP->fx_addsy)
9117 switch (fixP->fx_r_type)
9118 {
9119 case BFD_RELOC_386_PLT32:
9120 case BFD_RELOC_X86_64_PLT32:
9121 /* Make the jump instruction point to the address of the operand. At
9122 runtime we merely add the offset to the actual PLT entry. */
9123 value = -4;
9124 break;
9125
9126 case BFD_RELOC_386_TLS_GD:
9127 case BFD_RELOC_386_TLS_LDM:
9128 case BFD_RELOC_386_TLS_IE_32:
9129 case BFD_RELOC_386_TLS_IE:
9130 case BFD_RELOC_386_TLS_GOTIE:
9131 case BFD_RELOC_386_TLS_GOTDESC:
9132 case BFD_RELOC_X86_64_TLSGD:
9133 case BFD_RELOC_X86_64_TLSLD:
9134 case BFD_RELOC_X86_64_GOTTPOFF:
9135 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9136 value = 0; /* Fully resolved at runtime. No addend. */
9137 /* Fallthrough */
9138 case BFD_RELOC_386_TLS_LE:
9139 case BFD_RELOC_386_TLS_LDO_32:
9140 case BFD_RELOC_386_TLS_LE_32:
9141 case BFD_RELOC_X86_64_DTPOFF32:
9142 case BFD_RELOC_X86_64_DTPOFF64:
9143 case BFD_RELOC_X86_64_TPOFF32:
9144 case BFD_RELOC_X86_64_TPOFF64:
9145 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9146 break;
9147
9148 case BFD_RELOC_386_TLS_DESC_CALL:
9149 case BFD_RELOC_X86_64_TLSDESC_CALL:
9150 value = 0; /* Fully resolved at runtime. No addend. */
9151 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9152 fixP->fx_done = 0;
9153 return;
9154
9155 case BFD_RELOC_386_GOT32:
9156 case BFD_RELOC_X86_64_GOT32:
9157 value = 0; /* Fully resolved at runtime. No addend. */
9158 break;
9159
9160 case BFD_RELOC_VTABLE_INHERIT:
9161 case BFD_RELOC_VTABLE_ENTRY:
9162 fixP->fx_done = 0;
9163 return;
9164
9165 default:
9166 break;
9167 }
9168 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
9169 *valP = value;
9170 #endif /* !defined (TE_Mach) */
9171
9172 /* Are we finished with this relocation now? */
9173 if (fixP->fx_addsy == NULL)
9174 fixP->fx_done = 1;
9175 #if defined (OBJ_COFF) && defined (TE_PE)
9176 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
9177 {
9178 fixP->fx_done = 0;
9179 /* Remember value for tc_gen_reloc. */
9180 fixP->fx_addnumber = value;
9181 /* Clear out the frag for now. */
9182 value = 0;
9183 }
9184 #endif
9185 else if (use_rela_relocations)
9186 {
9187 fixP->fx_no_overflow = 1;
9188 /* Remember value for tc_gen_reloc. */
9189 fixP->fx_addnumber = value;
9190 value = 0;
9191 }
9192
9193 md_number_to_chars (p, value, fixP->fx_size);
9194 }
9195 \f
9196 char *
9197 md_atof (int type, char *litP, int *sizeP)
9198 {
9199 /* This outputs the LITTLENUMs in REVERSE order;
9200 in accord with the bigendian 386. */
9201 return ieee_md_atof (type, litP, sizeP, FALSE);
9202 }
9203 \f
9204 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
9205
9206 static char *
9207 output_invalid (int c)
9208 {
9209 if (ISPRINT (c))
9210 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
9211 "'%c'", c);
9212 else
9213 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
9214 "(0x%x)", (unsigned char) c);
9215 return output_invalid_buf;
9216 }
9217
9218 /* REG_STRING starts *before* REGISTER_PREFIX. */
9219
9220 static const reg_entry *
9221 parse_real_register (char *reg_string, char **end_op)
9222 {
9223 char *s = reg_string;
9224 char *p;
9225 char reg_name_given[MAX_REG_NAME_SIZE + 1];
9226 const reg_entry *r;
9227
9228 /* Skip possible REGISTER_PREFIX and possible whitespace. */
9229 if (*s == REGISTER_PREFIX)
9230 ++s;
9231
9232 if (is_space_char (*s))
9233 ++s;
9234
9235 p = reg_name_given;
9236 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
9237 {
9238 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
9239 return (const reg_entry *) NULL;
9240 s++;
9241 }
9242
9243 /* For naked regs, make sure that we are not dealing with an identifier.
9244 This prevents confusing an identifier like `eax_var' with register
9245 `eax'. */
9246 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
9247 return (const reg_entry *) NULL;
9248
9249 *end_op = s;
9250
9251 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
9252
9253 /* Handle floating point regs, allowing spaces in the (i) part. */
9254 if (r == i386_regtab /* %st is first entry of table */)
9255 {
9256 if (is_space_char (*s))
9257 ++s;
9258 if (*s == '(')
9259 {
9260 ++s;
9261 if (is_space_char (*s))
9262 ++s;
9263 if (*s >= '0' && *s <= '7')
9264 {
9265 int fpr = *s - '0';
9266 ++s;
9267 if (is_space_char (*s))
9268 ++s;
9269 if (*s == ')')
9270 {
9271 *end_op = s + 1;
9272 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
9273 know (r);
9274 return r + fpr;
9275 }
9276 }
9277 /* We have "%st(" then garbage. */
9278 return (const reg_entry *) NULL;
9279 }
9280 }
9281
9282 if (r == NULL || allow_pseudo_reg)
9283 return r;
9284
9285 if (operand_type_all_zero (&r->reg_type))
9286 return (const reg_entry *) NULL;
9287
9288 if ((r->reg_type.bitfield.reg32
9289 || r->reg_type.bitfield.sreg3
9290 || r->reg_type.bitfield.control
9291 || r->reg_type.bitfield.debug
9292 || r->reg_type.bitfield.test)
9293 && !cpu_arch_flags.bitfield.cpui386)
9294 return (const reg_entry *) NULL;
9295
9296 if (r->reg_type.bitfield.floatreg
9297 && !cpu_arch_flags.bitfield.cpu8087
9298 && !cpu_arch_flags.bitfield.cpu287
9299 && !cpu_arch_flags.bitfield.cpu387)
9300 return (const reg_entry *) NULL;
9301
9302 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
9303 return (const reg_entry *) NULL;
9304
9305 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse)
9306 return (const reg_entry *) NULL;
9307
9308 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
9309 return (const reg_entry *) NULL;
9310
9311 if ((r->reg_type.bitfield.regzmm || r->reg_type.bitfield.regmask)
9312 && !cpu_arch_flags.bitfield.cpuavx512f)
9313 return (const reg_entry *) NULL;
9314
9315 /* Don't allow fake index register unless allow_index_reg isn't 0. */
9316 if (!allow_index_reg
9317 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
9318 return (const reg_entry *) NULL;
9319
9320 /* Upper 16 vector register is only available with VREX in 64bit
9321 mode. */
9322 if ((r->reg_flags & RegVRex))
9323 {
9324 if (!cpu_arch_flags.bitfield.cpuvrex
9325 || flag_code != CODE_64BIT)
9326 return (const reg_entry *) NULL;
9327
9328 i.need_vrex = 1;
9329 }
9330
9331 if (((r->reg_flags & (RegRex64 | RegRex))
9332 || r->reg_type.bitfield.reg64)
9333 && (!cpu_arch_flags.bitfield.cpulm
9334 || !operand_type_equal (&r->reg_type, &control))
9335 && flag_code != CODE_64BIT)
9336 return (const reg_entry *) NULL;
9337
9338 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
9339 return (const reg_entry *) NULL;
9340
9341 return r;
9342 }
9343
9344 /* REG_STRING starts *before* REGISTER_PREFIX. */
9345
9346 static const reg_entry *
9347 parse_register (char *reg_string, char **end_op)
9348 {
9349 const reg_entry *r;
9350
9351 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
9352 r = parse_real_register (reg_string, end_op);
9353 else
9354 r = NULL;
9355 if (!r)
9356 {
9357 char *save = input_line_pointer;
9358 char c;
9359 symbolS *symbolP;
9360
9361 input_line_pointer = reg_string;
9362 c = get_symbol_end ();
9363 symbolP = symbol_find (reg_string);
9364 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
9365 {
9366 const expressionS *e = symbol_get_value_expression (symbolP);
9367
9368 know (e->X_op == O_register);
9369 know (e->X_add_number >= 0
9370 && (valueT) e->X_add_number < i386_regtab_size);
9371 r = i386_regtab + e->X_add_number;
9372 if ((r->reg_flags & RegVRex))
9373 i.need_vrex = 1;
9374 *end_op = input_line_pointer;
9375 }
9376 *input_line_pointer = c;
9377 input_line_pointer = save;
9378 }
9379 return r;
9380 }
9381
9382 int
9383 i386_parse_name (char *name, expressionS *e, char *nextcharP)
9384 {
9385 const reg_entry *r;
9386 char *end = input_line_pointer;
9387
9388 *end = *nextcharP;
9389 r = parse_register (name, &input_line_pointer);
9390 if (r && end <= input_line_pointer)
9391 {
9392 *nextcharP = *input_line_pointer;
9393 *input_line_pointer = 0;
9394 e->X_op = O_register;
9395 e->X_add_number = r - i386_regtab;
9396 return 1;
9397 }
9398 input_line_pointer = end;
9399 *end = 0;
9400 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
9401 }
9402
9403 void
9404 md_operand (expressionS *e)
9405 {
9406 char *end;
9407 const reg_entry *r;
9408
9409 switch (*input_line_pointer)
9410 {
9411 case REGISTER_PREFIX:
9412 r = parse_real_register (input_line_pointer, &end);
9413 if (r)
9414 {
9415 e->X_op = O_register;
9416 e->X_add_number = r - i386_regtab;
9417 input_line_pointer = end;
9418 }
9419 break;
9420
9421 case '[':
9422 gas_assert (intel_syntax);
9423 end = input_line_pointer++;
9424 expression (e);
9425 if (*input_line_pointer == ']')
9426 {
9427 ++input_line_pointer;
9428 e->X_op_symbol = make_expr_symbol (e);
9429 e->X_add_symbol = NULL;
9430 e->X_add_number = 0;
9431 e->X_op = O_index;
9432 }
9433 else
9434 {
9435 e->X_op = O_absent;
9436 input_line_pointer = end;
9437 }
9438 break;
9439 }
9440 }
9441
9442 \f
9443 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9444 const char *md_shortopts = "kVQ:sqn";
9445 #else
9446 const char *md_shortopts = "qn";
9447 #endif
9448
9449 #define OPTION_32 (OPTION_MD_BASE + 0)
9450 #define OPTION_64 (OPTION_MD_BASE + 1)
9451 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
9452 #define OPTION_MARCH (OPTION_MD_BASE + 3)
9453 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
9454 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
9455 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
9456 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
9457 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
9458 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
9459 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
9460 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
9461 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
9462 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
9463 #define OPTION_X32 (OPTION_MD_BASE + 14)
9464 #define OPTION_MADD_BND_PREFIX (OPTION_MD_BASE + 15)
9465 #define OPTION_MEVEXLIG (OPTION_MD_BASE + 16)
9466 #define OPTION_MEVEXWIG (OPTION_MD_BASE + 17)
9467 #define OPTION_MBIG_OBJ (OPTION_MD_BASE + 18)
9468 #define OPTION_OMIT_LOCK_PREFIX (OPTION_MD_BASE + 19)
9469 #define OPTION_MEVEXRCIG (OPTION_MD_BASE + 20)
9470
9471 struct option md_longopts[] =
9472 {
9473 {"32", no_argument, NULL, OPTION_32},
9474 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9475 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
9476 {"64", no_argument, NULL, OPTION_64},
9477 #endif
9478 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9479 {"x32", no_argument, NULL, OPTION_X32},
9480 #endif
9481 {"divide", no_argument, NULL, OPTION_DIVIDE},
9482 {"march", required_argument, NULL, OPTION_MARCH},
9483 {"mtune", required_argument, NULL, OPTION_MTUNE},
9484 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
9485 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
9486 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
9487 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
9488 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
9489 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
9490 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
9491 {"moperand-check", required_argument, NULL, OPTION_MOPERAND_CHECK},
9492 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
9493 {"madd-bnd-prefix", no_argument, NULL, OPTION_MADD_BND_PREFIX},
9494 {"mevexlig", required_argument, NULL, OPTION_MEVEXLIG},
9495 {"mevexwig", required_argument, NULL, OPTION_MEVEXWIG},
9496 # if defined (TE_PE) || defined (TE_PEP)
9497 {"mbig-obj", no_argument, NULL, OPTION_MBIG_OBJ},
9498 #endif
9499 {"momit-lock-prefix", required_argument, NULL, OPTION_OMIT_LOCK_PREFIX},
9500 {"mevexrcig", required_argument, NULL, OPTION_MEVEXRCIG},
9501 {NULL, no_argument, NULL, 0}
9502 };
9503 size_t md_longopts_size = sizeof (md_longopts);
9504
9505 int
9506 md_parse_option (int c, char *arg)
9507 {
9508 unsigned int j;
9509 char *arch, *next;
9510
9511 switch (c)
9512 {
9513 case 'n':
9514 optimize_align_code = 0;
9515 break;
9516
9517 case 'q':
9518 quiet_warnings = 1;
9519 break;
9520
9521 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9522 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
9523 should be emitted or not. FIXME: Not implemented. */
9524 case 'Q':
9525 break;
9526
9527 /* -V: SVR4 argument to print version ID. */
9528 case 'V':
9529 print_version_id ();
9530 break;
9531
9532 /* -k: Ignore for FreeBSD compatibility. */
9533 case 'k':
9534 break;
9535
9536 case 's':
9537 /* -s: On i386 Solaris, this tells the native assembler to use
9538 .stab instead of .stab.excl. We always use .stab anyhow. */
9539 break;
9540 #endif
9541 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9542 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
9543 case OPTION_64:
9544 {
9545 const char **list, **l;
9546
9547 list = bfd_target_list ();
9548 for (l = list; *l != NULL; l++)
9549 if (CONST_STRNEQ (*l, "elf64-x86-64")
9550 || strcmp (*l, "coff-x86-64") == 0
9551 || strcmp (*l, "pe-x86-64") == 0
9552 || strcmp (*l, "pei-x86-64") == 0
9553 || strcmp (*l, "mach-o-x86-64") == 0)
9554 {
9555 default_arch = "x86_64";
9556 break;
9557 }
9558 if (*l == NULL)
9559 as_fatal (_("no compiled in support for x86_64"));
9560 free (list);
9561 }
9562 break;
9563 #endif
9564
9565 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9566 case OPTION_X32:
9567 if (IS_ELF)
9568 {
9569 const char **list, **l;
9570
9571 list = bfd_target_list ();
9572 for (l = list; *l != NULL; l++)
9573 if (CONST_STRNEQ (*l, "elf32-x86-64"))
9574 {
9575 default_arch = "x86_64:32";
9576 break;
9577 }
9578 if (*l == NULL)
9579 as_fatal (_("no compiled in support for 32bit x86_64"));
9580 free (list);
9581 }
9582 else
9583 as_fatal (_("32bit x86_64 is only supported for ELF"));
9584 break;
9585 #endif
9586
9587 case OPTION_32:
9588 default_arch = "i386";
9589 break;
9590
9591 case OPTION_DIVIDE:
9592 #ifdef SVR4_COMMENT_CHARS
9593 {
9594 char *n, *t;
9595 const char *s;
9596
9597 n = (char *) xmalloc (strlen (i386_comment_chars) + 1);
9598 t = n;
9599 for (s = i386_comment_chars; *s != '\0'; s++)
9600 if (*s != '/')
9601 *t++ = *s;
9602 *t = '\0';
9603 i386_comment_chars = n;
9604 }
9605 #endif
9606 break;
9607
9608 case OPTION_MARCH:
9609 arch = xstrdup (arg);
9610 do
9611 {
9612 if (*arch == '.')
9613 as_fatal (_("invalid -march= option: `%s'"), arg);
9614 next = strchr (arch, '+');
9615 if (next)
9616 *next++ = '\0';
9617 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
9618 {
9619 if (strcmp (arch, cpu_arch [j].name) == 0)
9620 {
9621 /* Processor. */
9622 if (! cpu_arch[j].flags.bitfield.cpui386)
9623 continue;
9624
9625 cpu_arch_name = cpu_arch[j].name;
9626 cpu_sub_arch_name = NULL;
9627 cpu_arch_flags = cpu_arch[j].flags;
9628 cpu_arch_isa = cpu_arch[j].type;
9629 cpu_arch_isa_flags = cpu_arch[j].flags;
9630 if (!cpu_arch_tune_set)
9631 {
9632 cpu_arch_tune = cpu_arch_isa;
9633 cpu_arch_tune_flags = cpu_arch_isa_flags;
9634 }
9635 break;
9636 }
9637 else if (*cpu_arch [j].name == '.'
9638 && strcmp (arch, cpu_arch [j].name + 1) == 0)
9639 {
9640 /* ISA entension. */
9641 i386_cpu_flags flags;
9642
9643 if (!cpu_arch[j].negated)
9644 flags = cpu_flags_or (cpu_arch_flags,
9645 cpu_arch[j].flags);
9646 else
9647 flags = cpu_flags_and_not (cpu_arch_flags,
9648 cpu_arch[j].flags);
9649 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
9650 {
9651 if (cpu_sub_arch_name)
9652 {
9653 char *name = cpu_sub_arch_name;
9654 cpu_sub_arch_name = concat (name,
9655 cpu_arch[j].name,
9656 (const char *) NULL);
9657 free (name);
9658 }
9659 else
9660 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
9661 cpu_arch_flags = flags;
9662 cpu_arch_isa_flags = flags;
9663 }
9664 break;
9665 }
9666 }
9667
9668 if (j >= ARRAY_SIZE (cpu_arch))
9669 as_fatal (_("invalid -march= option: `%s'"), arg);
9670
9671 arch = next;
9672 }
9673 while (next != NULL );
9674 break;
9675
9676 case OPTION_MTUNE:
9677 if (*arg == '.')
9678 as_fatal (_("invalid -mtune= option: `%s'"), arg);
9679 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
9680 {
9681 if (strcmp (arg, cpu_arch [j].name) == 0)
9682 {
9683 cpu_arch_tune_set = 1;
9684 cpu_arch_tune = cpu_arch [j].type;
9685 cpu_arch_tune_flags = cpu_arch[j].flags;
9686 break;
9687 }
9688 }
9689 if (j >= ARRAY_SIZE (cpu_arch))
9690 as_fatal (_("invalid -mtune= option: `%s'"), arg);
9691 break;
9692
9693 case OPTION_MMNEMONIC:
9694 if (strcasecmp (arg, "att") == 0)
9695 intel_mnemonic = 0;
9696 else if (strcasecmp (arg, "intel") == 0)
9697 intel_mnemonic = 1;
9698 else
9699 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg);
9700 break;
9701
9702 case OPTION_MSYNTAX:
9703 if (strcasecmp (arg, "att") == 0)
9704 intel_syntax = 0;
9705 else if (strcasecmp (arg, "intel") == 0)
9706 intel_syntax = 1;
9707 else
9708 as_fatal (_("invalid -msyntax= option: `%s'"), arg);
9709 break;
9710
9711 case OPTION_MINDEX_REG:
9712 allow_index_reg = 1;
9713 break;
9714
9715 case OPTION_MNAKED_REG:
9716 allow_naked_reg = 1;
9717 break;
9718
9719 case OPTION_MOLD_GCC:
9720 old_gcc = 1;
9721 break;
9722
9723 case OPTION_MSSE2AVX:
9724 sse2avx = 1;
9725 break;
9726
9727 case OPTION_MSSE_CHECK:
9728 if (strcasecmp (arg, "error") == 0)
9729 sse_check = check_error;
9730 else if (strcasecmp (arg, "warning") == 0)
9731 sse_check = check_warning;
9732 else if (strcasecmp (arg, "none") == 0)
9733 sse_check = check_none;
9734 else
9735 as_fatal (_("invalid -msse-check= option: `%s'"), arg);
9736 break;
9737
9738 case OPTION_MOPERAND_CHECK:
9739 if (strcasecmp (arg, "error") == 0)
9740 operand_check = check_error;
9741 else if (strcasecmp (arg, "warning") == 0)
9742 operand_check = check_warning;
9743 else if (strcasecmp (arg, "none") == 0)
9744 operand_check = check_none;
9745 else
9746 as_fatal (_("invalid -moperand-check= option: `%s'"), arg);
9747 break;
9748
9749 case OPTION_MAVXSCALAR:
9750 if (strcasecmp (arg, "128") == 0)
9751 avxscalar = vex128;
9752 else if (strcasecmp (arg, "256") == 0)
9753 avxscalar = vex256;
9754 else
9755 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg);
9756 break;
9757
9758 case OPTION_MADD_BND_PREFIX:
9759 add_bnd_prefix = 1;
9760 break;
9761
9762 case OPTION_MEVEXLIG:
9763 if (strcmp (arg, "128") == 0)
9764 evexlig = evexl128;
9765 else if (strcmp (arg, "256") == 0)
9766 evexlig = evexl256;
9767 else if (strcmp (arg, "512") == 0)
9768 evexlig = evexl512;
9769 else
9770 as_fatal (_("invalid -mevexlig= option: `%s'"), arg);
9771 break;
9772
9773 case OPTION_MEVEXRCIG:
9774 if (strcmp (arg, "rne") == 0)
9775 evexrcig = rne;
9776 else if (strcmp (arg, "rd") == 0)
9777 evexrcig = rd;
9778 else if (strcmp (arg, "ru") == 0)
9779 evexrcig = ru;
9780 else if (strcmp (arg, "rz") == 0)
9781 evexrcig = rz;
9782 else
9783 as_fatal (_("invalid -mevexrcig= option: `%s'"), arg);
9784 break;
9785
9786 case OPTION_MEVEXWIG:
9787 if (strcmp (arg, "0") == 0)
9788 evexwig = evexw0;
9789 else if (strcmp (arg, "1") == 0)
9790 evexwig = evexw1;
9791 else
9792 as_fatal (_("invalid -mevexwig= option: `%s'"), arg);
9793 break;
9794
9795 # if defined (TE_PE) || defined (TE_PEP)
9796 case OPTION_MBIG_OBJ:
9797 use_big_obj = 1;
9798 break;
9799 #endif
9800
9801 case OPTION_OMIT_LOCK_PREFIX:
9802 if (strcasecmp (arg, "yes") == 0)
9803 omit_lock_prefix = 1;
9804 else if (strcasecmp (arg, "no") == 0)
9805 omit_lock_prefix = 0;
9806 else
9807 as_fatal (_("invalid -momit-lock-prefix= option: `%s'"), arg);
9808 break;
9809
9810 default:
9811 return 0;
9812 }
9813 return 1;
9814 }
9815
9816 #define MESSAGE_TEMPLATE \
9817 " "
9818
9819 static void
9820 show_arch (FILE *stream, int ext, int check)
9821 {
9822 static char message[] = MESSAGE_TEMPLATE;
9823 char *start = message + 27;
9824 char *p;
9825 int size = sizeof (MESSAGE_TEMPLATE);
9826 int left;
9827 const char *name;
9828 int len;
9829 unsigned int j;
9830
9831 p = start;
9832 left = size - (start - message);
9833 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
9834 {
9835 /* Should it be skipped? */
9836 if (cpu_arch [j].skip)
9837 continue;
9838
9839 name = cpu_arch [j].name;
9840 len = cpu_arch [j].len;
9841 if (*name == '.')
9842 {
9843 /* It is an extension. Skip if we aren't asked to show it. */
9844 if (ext)
9845 {
9846 name++;
9847 len--;
9848 }
9849 else
9850 continue;
9851 }
9852 else if (ext)
9853 {
9854 /* It is an processor. Skip if we show only extension. */
9855 continue;
9856 }
9857 else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
9858 {
9859 /* It is an impossible processor - skip. */
9860 continue;
9861 }
9862
9863 /* Reserve 2 spaces for ", " or ",\0" */
9864 left -= len + 2;
9865
9866 /* Check if there is any room. */
9867 if (left >= 0)
9868 {
9869 if (p != start)
9870 {
9871 *p++ = ',';
9872 *p++ = ' ';
9873 }
9874 p = mempcpy (p, name, len);
9875 }
9876 else
9877 {
9878 /* Output the current message now and start a new one. */
9879 *p++ = ',';
9880 *p = '\0';
9881 fprintf (stream, "%s\n", message);
9882 p = start;
9883 left = size - (start - message) - len - 2;
9884
9885 gas_assert (left >= 0);
9886
9887 p = mempcpy (p, name, len);
9888 }
9889 }
9890
9891 *p = '\0';
9892 fprintf (stream, "%s\n", message);
9893 }
9894
9895 void
9896 md_show_usage (FILE *stream)
9897 {
9898 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9899 fprintf (stream, _("\
9900 -Q ignored\n\
9901 -V print assembler version number\n\
9902 -k ignored\n"));
9903 #endif
9904 fprintf (stream, _("\
9905 -n Do not optimize code alignment\n\
9906 -q quieten some warnings\n"));
9907 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9908 fprintf (stream, _("\
9909 -s ignored\n"));
9910 #endif
9911 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9912 || defined (TE_PE) || defined (TE_PEP))
9913 fprintf (stream, _("\
9914 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
9915 #endif
9916 #ifdef SVR4_COMMENT_CHARS
9917 fprintf (stream, _("\
9918 --divide do not treat `/' as a comment character\n"));
9919 #else
9920 fprintf (stream, _("\
9921 --divide ignored\n"));
9922 #endif
9923 fprintf (stream, _("\
9924 -march=CPU[,+EXTENSION...]\n\
9925 generate code for CPU and EXTENSION, CPU is one of:\n"));
9926 show_arch (stream, 0, 1);
9927 fprintf (stream, _("\
9928 EXTENSION is combination of:\n"));
9929 show_arch (stream, 1, 0);
9930 fprintf (stream, _("\
9931 -mtune=CPU optimize for CPU, CPU is one of:\n"));
9932 show_arch (stream, 0, 0);
9933 fprintf (stream, _("\
9934 -msse2avx encode SSE instructions with VEX prefix\n"));
9935 fprintf (stream, _("\
9936 -msse-check=[none|error|warning]\n\
9937 check SSE instructions\n"));
9938 fprintf (stream, _("\
9939 -moperand-check=[none|error|warning]\n\
9940 check operand combinations for validity\n"));
9941 fprintf (stream, _("\
9942 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
9943 length\n"));
9944 fprintf (stream, _("\
9945 -mevexlig=[128|256|512] encode scalar EVEX instructions with specific vector\n\
9946 length\n"));
9947 fprintf (stream, _("\
9948 -mevexwig=[0|1] encode EVEX instructions with specific EVEX.W value\n\
9949 for EVEX.W bit ignored instructions\n"));
9950 fprintf (stream, _("\
9951 -mevexrcig=[rne|rd|ru|rz]\n\
9952 encode EVEX instructions with specific EVEX.RC value\n\
9953 for SAE-only ignored instructions\n"));
9954 fprintf (stream, _("\
9955 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
9956 fprintf (stream, _("\
9957 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
9958 fprintf (stream, _("\
9959 -mindex-reg support pseudo index registers\n"));
9960 fprintf (stream, _("\
9961 -mnaked-reg don't require `%%' prefix for registers\n"));
9962 fprintf (stream, _("\
9963 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
9964 fprintf (stream, _("\
9965 -madd-bnd-prefix add BND prefix for all valid branches\n"));
9966 # if defined (TE_PE) || defined (TE_PEP)
9967 fprintf (stream, _("\
9968 -mbig-obj generate big object files\n"));
9969 #endif
9970 fprintf (stream, _("\
9971 -momit-lock-prefix=[no|yes]\n\
9972 strip all lock prefixes\n"));
9973 }
9974
9975 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
9976 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9977 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
9978
9979 /* Pick the target format to use. */
9980
9981 const char *
9982 i386_target_format (void)
9983 {
9984 if (!strncmp (default_arch, "x86_64", 6))
9985 {
9986 update_code_flag (CODE_64BIT, 1);
9987 if (default_arch[6] == '\0')
9988 x86_elf_abi = X86_64_ABI;
9989 else
9990 x86_elf_abi = X86_64_X32_ABI;
9991 }
9992 else if (!strcmp (default_arch, "i386"))
9993 update_code_flag (CODE_32BIT, 1);
9994 else
9995 as_fatal (_("unknown architecture"));
9996
9997 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
9998 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
9999 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
10000 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
10001
10002 switch (OUTPUT_FLAVOR)
10003 {
10004 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
10005 case bfd_target_aout_flavour:
10006 return AOUT_TARGET_FORMAT;
10007 #endif
10008 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
10009 # if defined (TE_PE) || defined (TE_PEP)
10010 case bfd_target_coff_flavour:
10011 if (flag_code == CODE_64BIT)
10012 return use_big_obj ? "pe-bigobj-x86-64" : "pe-x86-64";
10013 else
10014 return "pe-i386";
10015 # elif defined (TE_GO32)
10016 case bfd_target_coff_flavour:
10017 return "coff-go32";
10018 # else
10019 case bfd_target_coff_flavour:
10020 return "coff-i386";
10021 # endif
10022 #endif
10023 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
10024 case bfd_target_elf_flavour:
10025 {
10026 const char *format;
10027
10028 switch (x86_elf_abi)
10029 {
10030 default:
10031 format = ELF_TARGET_FORMAT;
10032 break;
10033 case X86_64_ABI:
10034 use_rela_relocations = 1;
10035 object_64bit = 1;
10036 format = ELF_TARGET_FORMAT64;
10037 break;
10038 case X86_64_X32_ABI:
10039 use_rela_relocations = 1;
10040 object_64bit = 1;
10041 disallow_64bit_reloc = 1;
10042 format = ELF_TARGET_FORMAT32;
10043 break;
10044 }
10045 if (cpu_arch_isa == PROCESSOR_L1OM)
10046 {
10047 if (x86_elf_abi != X86_64_ABI)
10048 as_fatal (_("Intel L1OM is 64bit only"));
10049 return ELF_TARGET_L1OM_FORMAT;
10050 }
10051 if (cpu_arch_isa == PROCESSOR_K1OM)
10052 {
10053 if (x86_elf_abi != X86_64_ABI)
10054 as_fatal (_("Intel K1OM is 64bit only"));
10055 return ELF_TARGET_K1OM_FORMAT;
10056 }
10057 else
10058 return format;
10059 }
10060 #endif
10061 #if defined (OBJ_MACH_O)
10062 case bfd_target_mach_o_flavour:
10063 if (flag_code == CODE_64BIT)
10064 {
10065 use_rela_relocations = 1;
10066 object_64bit = 1;
10067 return "mach-o-x86-64";
10068 }
10069 else
10070 return "mach-o-i386";
10071 #endif
10072 default:
10073 abort ();
10074 return NULL;
10075 }
10076 }
10077
10078 #endif /* OBJ_MAYBE_ more than one */
10079
10080 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
10081 void
10082 i386_elf_emit_arch_note (void)
10083 {
10084 if (IS_ELF && cpu_arch_name != NULL)
10085 {
10086 char *p;
10087 asection *seg = now_seg;
10088 subsegT subseg = now_subseg;
10089 Elf_Internal_Note i_note;
10090 Elf_External_Note e_note;
10091 asection *note_secp;
10092 int len;
10093
10094 /* Create the .note section. */
10095 note_secp = subseg_new (".note", 0);
10096 bfd_set_section_flags (stdoutput,
10097 note_secp,
10098 SEC_HAS_CONTENTS | SEC_READONLY);
10099
10100 /* Process the arch string. */
10101 len = strlen (cpu_arch_name);
10102
10103 i_note.namesz = len + 1;
10104 i_note.descsz = 0;
10105 i_note.type = NT_ARCH;
10106 p = frag_more (sizeof (e_note.namesz));
10107 md_number_to_chars (p, (valueT) i_note.namesz, sizeof (e_note.namesz));
10108 p = frag_more (sizeof (e_note.descsz));
10109 md_number_to_chars (p, (valueT) i_note.descsz, sizeof (e_note.descsz));
10110 p = frag_more (sizeof (e_note.type));
10111 md_number_to_chars (p, (valueT) i_note.type, sizeof (e_note.type));
10112 p = frag_more (len + 1);
10113 strcpy (p, cpu_arch_name);
10114
10115 frag_align (2, 0, 0);
10116
10117 subseg_set (seg, subseg);
10118 }
10119 }
10120 #endif
10121 \f
10122 symbolS *
10123 md_undefined_symbol (char *name)
10124 {
10125 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
10126 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
10127 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
10128 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
10129 {
10130 if (!GOT_symbol)
10131 {
10132 if (symbol_find (name))
10133 as_bad (_("GOT already in symbol table"));
10134 GOT_symbol = symbol_new (name, undefined_section,
10135 (valueT) 0, &zero_address_frag);
10136 };
10137 return GOT_symbol;
10138 }
10139 return 0;
10140 }
10141
10142 /* Round up a section size to the appropriate boundary. */
10143
10144 valueT
10145 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
10146 {
10147 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
10148 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
10149 {
10150 /* For a.out, force the section size to be aligned. If we don't do
10151 this, BFD will align it for us, but it will not write out the
10152 final bytes of the section. This may be a bug in BFD, but it is
10153 easier to fix it here since that is how the other a.out targets
10154 work. */
10155 int align;
10156
10157 align = bfd_get_section_alignment (stdoutput, segment);
10158 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
10159 }
10160 #endif
10161
10162 return size;
10163 }
10164
10165 /* On the i386, PC-relative offsets are relative to the start of the
10166 next instruction. That is, the address of the offset, plus its
10167 size, since the offset is always the last part of the insn. */
10168
10169 long
10170 md_pcrel_from (fixS *fixP)
10171 {
10172 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
10173 }
10174
10175 #ifndef I386COFF
10176
10177 static void
10178 s_bss (int ignore ATTRIBUTE_UNUSED)
10179 {
10180 int temp;
10181
10182 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10183 if (IS_ELF)
10184 obj_elf_section_change_hook ();
10185 #endif
10186 temp = get_absolute_expression ();
10187 subseg_set (bss_section, (subsegT) temp);
10188 demand_empty_rest_of_line ();
10189 }
10190
10191 #endif
10192
10193 void
10194 i386_validate_fix (fixS *fixp)
10195 {
10196 if (fixp->fx_subsy && fixp->fx_subsy == GOT_symbol)
10197 {
10198 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
10199 {
10200 if (!object_64bit)
10201 abort ();
10202 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
10203 }
10204 else
10205 {
10206 if (!object_64bit)
10207 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
10208 else
10209 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
10210 }
10211 fixp->fx_subsy = 0;
10212 }
10213 }
10214
10215 arelent *
10216 tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
10217 {
10218 arelent *rel;
10219 bfd_reloc_code_real_type code;
10220
10221 switch (fixp->fx_r_type)
10222 {
10223 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10224 case BFD_RELOC_SIZE32:
10225 case BFD_RELOC_SIZE64:
10226 if (S_IS_DEFINED (fixp->fx_addsy)
10227 && !S_IS_EXTERNAL (fixp->fx_addsy))
10228 {
10229 /* Resolve size relocation against local symbol to size of
10230 the symbol plus addend. */
10231 valueT value = S_GET_SIZE (fixp->fx_addsy) + fixp->fx_offset;
10232 if (fixp->fx_r_type == BFD_RELOC_SIZE32
10233 && !fits_in_unsigned_long (value))
10234 as_bad_where (fixp->fx_file, fixp->fx_line,
10235 _("symbol size computation overflow"));
10236 fixp->fx_addsy = NULL;
10237 fixp->fx_subsy = NULL;
10238 md_apply_fix (fixp, (valueT *) &value, NULL);
10239 return NULL;
10240 }
10241 #endif
10242
10243 case BFD_RELOC_X86_64_PLT32:
10244 case BFD_RELOC_X86_64_GOT32:
10245 case BFD_RELOC_X86_64_GOTPCREL:
10246 case BFD_RELOC_386_PLT32:
10247 case BFD_RELOC_386_GOT32:
10248 case BFD_RELOC_386_GOTOFF:
10249 case BFD_RELOC_386_GOTPC:
10250 case BFD_RELOC_386_TLS_GD:
10251 case BFD_RELOC_386_TLS_LDM:
10252 case BFD_RELOC_386_TLS_LDO_32:
10253 case BFD_RELOC_386_TLS_IE_32:
10254 case BFD_RELOC_386_TLS_IE:
10255 case BFD_RELOC_386_TLS_GOTIE:
10256 case BFD_RELOC_386_TLS_LE_32:
10257 case BFD_RELOC_386_TLS_LE:
10258 case BFD_RELOC_386_TLS_GOTDESC:
10259 case BFD_RELOC_386_TLS_DESC_CALL:
10260 case BFD_RELOC_X86_64_TLSGD:
10261 case BFD_RELOC_X86_64_TLSLD:
10262 case BFD_RELOC_X86_64_DTPOFF32:
10263 case BFD_RELOC_X86_64_DTPOFF64:
10264 case BFD_RELOC_X86_64_GOTTPOFF:
10265 case BFD_RELOC_X86_64_TPOFF32:
10266 case BFD_RELOC_X86_64_TPOFF64:
10267 case BFD_RELOC_X86_64_GOTOFF64:
10268 case BFD_RELOC_X86_64_GOTPC32:
10269 case BFD_RELOC_X86_64_GOT64:
10270 case BFD_RELOC_X86_64_GOTPCREL64:
10271 case BFD_RELOC_X86_64_GOTPC64:
10272 case BFD_RELOC_X86_64_GOTPLT64:
10273 case BFD_RELOC_X86_64_PLTOFF64:
10274 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
10275 case BFD_RELOC_X86_64_TLSDESC_CALL:
10276 case BFD_RELOC_RVA:
10277 case BFD_RELOC_VTABLE_ENTRY:
10278 case BFD_RELOC_VTABLE_INHERIT:
10279 #ifdef TE_PE
10280 case BFD_RELOC_32_SECREL:
10281 #endif
10282 code = fixp->fx_r_type;
10283 break;
10284 case BFD_RELOC_X86_64_32S:
10285 if (!fixp->fx_pcrel)
10286 {
10287 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
10288 code = fixp->fx_r_type;
10289 break;
10290 }
10291 default:
10292 if (fixp->fx_pcrel)
10293 {
10294 switch (fixp->fx_size)
10295 {
10296 default:
10297 as_bad_where (fixp->fx_file, fixp->fx_line,
10298 _("can not do %d byte pc-relative relocation"),
10299 fixp->fx_size);
10300 code = BFD_RELOC_32_PCREL;
10301 break;
10302 case 1: code = BFD_RELOC_8_PCREL; break;
10303 case 2: code = BFD_RELOC_16_PCREL; break;
10304 case 4: code = BFD_RELOC_32_PCREL; break;
10305 #ifdef BFD64
10306 case 8: code = BFD_RELOC_64_PCREL; break;
10307 #endif
10308 }
10309 }
10310 else
10311 {
10312 switch (fixp->fx_size)
10313 {
10314 default:
10315 as_bad_where (fixp->fx_file, fixp->fx_line,
10316 _("can not do %d byte relocation"),
10317 fixp->fx_size);
10318 code = BFD_RELOC_32;
10319 break;
10320 case 1: code = BFD_RELOC_8; break;
10321 case 2: code = BFD_RELOC_16; break;
10322 case 4: code = BFD_RELOC_32; break;
10323 #ifdef BFD64
10324 case 8: code = BFD_RELOC_64; break;
10325 #endif
10326 }
10327 }
10328 break;
10329 }
10330
10331 if ((code == BFD_RELOC_32
10332 || code == BFD_RELOC_32_PCREL
10333 || code == BFD_RELOC_X86_64_32S)
10334 && GOT_symbol
10335 && fixp->fx_addsy == GOT_symbol)
10336 {
10337 if (!object_64bit)
10338 code = BFD_RELOC_386_GOTPC;
10339 else
10340 code = BFD_RELOC_X86_64_GOTPC32;
10341 }
10342 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
10343 && GOT_symbol
10344 && fixp->fx_addsy == GOT_symbol)
10345 {
10346 code = BFD_RELOC_X86_64_GOTPC64;
10347 }
10348
10349 rel = (arelent *) xmalloc (sizeof (arelent));
10350 rel->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
10351 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
10352
10353 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
10354
10355 if (!use_rela_relocations)
10356 {
10357 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
10358 vtable entry to be used in the relocation's section offset. */
10359 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
10360 rel->address = fixp->fx_offset;
10361 #if defined (OBJ_COFF) && defined (TE_PE)
10362 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
10363 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
10364 else
10365 #endif
10366 rel->addend = 0;
10367 }
10368 /* Use the rela in 64bit mode. */
10369 else
10370 {
10371 if (disallow_64bit_reloc)
10372 switch (code)
10373 {
10374 case BFD_RELOC_X86_64_DTPOFF64:
10375 case BFD_RELOC_X86_64_TPOFF64:
10376 case BFD_RELOC_64_PCREL:
10377 case BFD_RELOC_X86_64_GOTOFF64:
10378 case BFD_RELOC_X86_64_GOT64:
10379 case BFD_RELOC_X86_64_GOTPCREL64:
10380 case BFD_RELOC_X86_64_GOTPC64:
10381 case BFD_RELOC_X86_64_GOTPLT64:
10382 case BFD_RELOC_X86_64_PLTOFF64:
10383 as_bad_where (fixp->fx_file, fixp->fx_line,
10384 _("cannot represent relocation type %s in x32 mode"),
10385 bfd_get_reloc_code_name (code));
10386 break;
10387 default:
10388 break;
10389 }
10390
10391 if (!fixp->fx_pcrel)
10392 rel->addend = fixp->fx_offset;
10393 else
10394 switch (code)
10395 {
10396 case BFD_RELOC_X86_64_PLT32:
10397 case BFD_RELOC_X86_64_GOT32:
10398 case BFD_RELOC_X86_64_GOTPCREL:
10399 case BFD_RELOC_X86_64_TLSGD:
10400 case BFD_RELOC_X86_64_TLSLD:
10401 case BFD_RELOC_X86_64_GOTTPOFF:
10402 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
10403 case BFD_RELOC_X86_64_TLSDESC_CALL:
10404 rel->addend = fixp->fx_offset - fixp->fx_size;
10405 break;
10406 default:
10407 rel->addend = (section->vma
10408 - fixp->fx_size
10409 + fixp->fx_addnumber
10410 + md_pcrel_from (fixp));
10411 break;
10412 }
10413 }
10414
10415 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
10416 if (rel->howto == NULL)
10417 {
10418 as_bad_where (fixp->fx_file, fixp->fx_line,
10419 _("cannot represent relocation type %s"),
10420 bfd_get_reloc_code_name (code));
10421 /* Set howto to a garbage value so that we can keep going. */
10422 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
10423 gas_assert (rel->howto != NULL);
10424 }
10425
10426 return rel;
10427 }
10428
10429 #include "tc-i386-intel.c"
10430
10431 void
10432 tc_x86_parse_to_dw2regnum (expressionS *exp)
10433 {
10434 int saved_naked_reg;
10435 char saved_register_dot;
10436
10437 saved_naked_reg = allow_naked_reg;
10438 allow_naked_reg = 1;
10439 saved_register_dot = register_chars['.'];
10440 register_chars['.'] = '.';
10441 allow_pseudo_reg = 1;
10442 expression_and_evaluate (exp);
10443 allow_pseudo_reg = 0;
10444 register_chars['.'] = saved_register_dot;
10445 allow_naked_reg = saved_naked_reg;
10446
10447 if (exp->X_op == O_register && exp->X_add_number >= 0)
10448 {
10449 if ((addressT) exp->X_add_number < i386_regtab_size)
10450 {
10451 exp->X_op = O_constant;
10452 exp->X_add_number = i386_regtab[exp->X_add_number]
10453 .dw2_regnum[flag_code >> 1];
10454 }
10455 else
10456 exp->X_op = O_illegal;
10457 }
10458 }
10459
10460 void
10461 tc_x86_frame_initial_instructions (void)
10462 {
10463 static unsigned int sp_regno[2];
10464
10465 if (!sp_regno[flag_code >> 1])
10466 {
10467 char *saved_input = input_line_pointer;
10468 char sp[][4] = {"esp", "rsp"};
10469 expressionS exp;
10470
10471 input_line_pointer = sp[flag_code >> 1];
10472 tc_x86_parse_to_dw2regnum (&exp);
10473 gas_assert (exp.X_op == O_constant);
10474 sp_regno[flag_code >> 1] = exp.X_add_number;
10475 input_line_pointer = saved_input;
10476 }
10477
10478 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
10479 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
10480 }
10481
10482 int
10483 x86_dwarf2_addr_size (void)
10484 {
10485 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
10486 if (x86_elf_abi == X86_64_X32_ABI)
10487 return 4;
10488 #endif
10489 return bfd_arch_bits_per_address (stdoutput) / 8;
10490 }
10491
10492 int
10493 i386_elf_section_type (const char *str, size_t len)
10494 {
10495 if (flag_code == CODE_64BIT
10496 && len == sizeof ("unwind") - 1
10497 && strncmp (str, "unwind", 6) == 0)
10498 return SHT_X86_64_UNWIND;
10499
10500 return -1;
10501 }
10502
10503 #ifdef TE_SOLARIS
10504 void
10505 i386_solaris_fix_up_eh_frame (segT sec)
10506 {
10507 if (flag_code == CODE_64BIT)
10508 elf_section_type (sec) = SHT_X86_64_UNWIND;
10509 }
10510 #endif
10511
10512 #ifdef TE_PE
10513 void
10514 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
10515 {
10516 expressionS exp;
10517
10518 exp.X_op = O_secrel;
10519 exp.X_add_symbol = symbol;
10520 exp.X_add_number = 0;
10521 emit_expr (&exp, size);
10522 }
10523 #endif
10524
10525 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10526 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
10527
10528 bfd_vma
10529 x86_64_section_letter (int letter, char **ptr_msg)
10530 {
10531 if (flag_code == CODE_64BIT)
10532 {
10533 if (letter == 'l')
10534 return SHF_X86_64_LARGE;
10535
10536 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
10537 }
10538 else
10539 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
10540 return -1;
10541 }
10542
10543 bfd_vma
10544 x86_64_section_word (char *str, size_t len)
10545 {
10546 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
10547 return SHF_X86_64_LARGE;
10548
10549 return -1;
10550 }
10551
10552 static void
10553 handle_large_common (int small ATTRIBUTE_UNUSED)
10554 {
10555 if (flag_code != CODE_64BIT)
10556 {
10557 s_comm_internal (0, elf_common_parse);
10558 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
10559 }
10560 else
10561 {
10562 static segT lbss_section;
10563 asection *saved_com_section_ptr = elf_com_section_ptr;
10564 asection *saved_bss_section = bss_section;
10565
10566 if (lbss_section == NULL)
10567 {
10568 flagword applicable;
10569 segT seg = now_seg;
10570 subsegT subseg = now_subseg;
10571
10572 /* The .lbss section is for local .largecomm symbols. */
10573 lbss_section = subseg_new (".lbss", 0);
10574 applicable = bfd_applicable_section_flags (stdoutput);
10575 bfd_set_section_flags (stdoutput, lbss_section,
10576 applicable & SEC_ALLOC);
10577 seg_info (lbss_section)->bss = 1;
10578
10579 subseg_set (seg, subseg);
10580 }
10581
10582 elf_com_section_ptr = &_bfd_elf_large_com_section;
10583 bss_section = lbss_section;
10584
10585 s_comm_internal (0, elf_common_parse);
10586
10587 elf_com_section_ptr = saved_com_section_ptr;
10588 bss_section = saved_bss_section;
10589 }
10590 }
10591 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */
This page took 0.389613 seconds and 5 git commands to generate.