641af39a15cbcc5aece1c274ed6a419da4b21740
[deliverable/binutils-gdb.git] / gas / config / tc-i386.c
1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
4 2012
5 Free Software Foundation, Inc.
6
7 This file is part of GAS, the GNU Assembler.
8
9 GAS is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
13
14 GAS is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GAS; see the file COPYING. If not, write to the Free
21 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
22 02110-1301, USA. */
23
24 /* Intel 80386 machine specific gas.
25 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
26 x86_64 support by Jan Hubicka (jh@suse.cz)
27 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
28 Bugs & suggestions are completely welcome. This is free software.
29 Please help us make it better. */
30
31 #include "as.h"
32 #include "safe-ctype.h"
33 #include "subsegs.h"
34 #include "dwarf2dbg.h"
35 #include "dw2gencfi.h"
36 #include "elf/x86-64.h"
37 #include "opcodes/i386-init.h"
38
39 #ifndef REGISTER_WARNINGS
40 #define REGISTER_WARNINGS 1
41 #endif
42
43 #ifndef INFER_ADDR_PREFIX
44 #define INFER_ADDR_PREFIX 1
45 #endif
46
47 #ifndef DEFAULT_ARCH
48 #define DEFAULT_ARCH "i386"
49 #endif
50
51 #ifndef INLINE
52 #if __GNUC__ >= 2
53 #define INLINE __inline__
54 #else
55 #define INLINE
56 #endif
57 #endif
58
59 /* Prefixes will be emitted in the order defined below.
60 WAIT_PREFIX must be the first prefix since FWAIT is really is an
61 instruction, and so must come before any prefixes.
62 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
63 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
64 #define WAIT_PREFIX 0
65 #define SEG_PREFIX 1
66 #define ADDR_PREFIX 2
67 #define DATA_PREFIX 3
68 #define REP_PREFIX 4
69 #define HLE_PREFIX REP_PREFIX
70 #define LOCK_PREFIX 5
71 #define REX_PREFIX 6 /* must come last. */
72 #define MAX_PREFIXES 7 /* max prefixes per opcode */
73
74 /* we define the syntax here (modulo base,index,scale syntax) */
75 #define REGISTER_PREFIX '%'
76 #define IMMEDIATE_PREFIX '$'
77 #define ABSOLUTE_PREFIX '*'
78
79 /* these are the instruction mnemonic suffixes in AT&T syntax or
80 memory operand size in Intel syntax. */
81 #define WORD_MNEM_SUFFIX 'w'
82 #define BYTE_MNEM_SUFFIX 'b'
83 #define SHORT_MNEM_SUFFIX 's'
84 #define LONG_MNEM_SUFFIX 'l'
85 #define QWORD_MNEM_SUFFIX 'q'
86 #define XMMWORD_MNEM_SUFFIX 'x'
87 #define YMMWORD_MNEM_SUFFIX 'y'
88 /* Intel Syntax. Use a non-ascii letter since since it never appears
89 in instructions. */
90 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
91
92 #define END_OF_INSN '\0'
93
94 /*
95 'templates' is for grouping together 'template' structures for opcodes
96 of the same name. This is only used for storing the insns in the grand
97 ole hash table of insns.
98 The templates themselves start at START and range up to (but not including)
99 END.
100 */
101 typedef struct
102 {
103 const insn_template *start;
104 const insn_template *end;
105 }
106 templates;
107
108 /* 386 operand encoding bytes: see 386 book for details of this. */
109 typedef struct
110 {
111 unsigned int regmem; /* codes register or memory operand */
112 unsigned int reg; /* codes register operand (or extended opcode) */
113 unsigned int mode; /* how to interpret regmem & reg */
114 }
115 modrm_byte;
116
117 /* x86-64 extension prefix. */
118 typedef int rex_byte;
119
120 /* 386 opcode byte to code indirect addressing. */
121 typedef struct
122 {
123 unsigned base;
124 unsigned index;
125 unsigned scale;
126 }
127 sib_byte;
128
129 /* x86 arch names, types and features */
130 typedef struct
131 {
132 const char *name; /* arch name */
133 unsigned int len; /* arch string length */
134 enum processor_type type; /* arch type */
135 i386_cpu_flags flags; /* cpu feature flags */
136 unsigned int skip; /* show_arch should skip this. */
137 unsigned int negated; /* turn off indicated flags. */
138 }
139 arch_entry;
140
141 static void update_code_flag (int, int);
142 static void set_code_flag (int);
143 static void set_16bit_gcc_code_flag (int);
144 static void set_intel_syntax (int);
145 static void set_intel_mnemonic (int);
146 static void set_allow_index_reg (int);
147 static void set_check (int);
148 static void set_cpu_arch (int);
149 #ifdef TE_PE
150 static void pe_directive_secrel (int);
151 #endif
152 static void signed_cons (int);
153 static char *output_invalid (int c);
154 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
155 const char *);
156 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
157 const char *);
158 static int i386_att_operand (char *);
159 static int i386_intel_operand (char *, int);
160 static int i386_intel_simplify (expressionS *);
161 static int i386_intel_parse_name (const char *, expressionS *);
162 static const reg_entry *parse_register (char *, char **);
163 static char *parse_insn (char *, char *);
164 static char *parse_operands (char *, const char *);
165 static void swap_operands (void);
166 static void swap_2_operands (int, int);
167 static void optimize_imm (void);
168 static void optimize_disp (void);
169 static const insn_template *match_template (void);
170 static int check_string (void);
171 static int process_suffix (void);
172 static int check_byte_reg (void);
173 static int check_long_reg (void);
174 static int check_qword_reg (void);
175 static int check_word_reg (void);
176 static int finalize_imm (void);
177 static int process_operands (void);
178 static const seg_entry *build_modrm_byte (void);
179 static void output_insn (void);
180 static void output_imm (fragS *, offsetT);
181 static void output_disp (fragS *, offsetT);
182 #ifndef I386COFF
183 static void s_bss (int);
184 #endif
185 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
186 static void handle_large_common (int small ATTRIBUTE_UNUSED);
187 #endif
188
189 static const char *default_arch = DEFAULT_ARCH;
190
191 /* VEX prefix. */
192 typedef struct
193 {
194 /* VEX prefix is either 2 byte or 3 byte. */
195 unsigned char bytes[3];
196 unsigned int length;
197 /* Destination or source register specifier. */
198 const reg_entry *register_specifier;
199 } vex_prefix;
200
201 /* 'md_assemble ()' gathers together information and puts it into a
202 i386_insn. */
203
204 union i386_op
205 {
206 expressionS *disps;
207 expressionS *imms;
208 const reg_entry *regs;
209 };
210
211 enum i386_error
212 {
213 operand_size_mismatch,
214 operand_type_mismatch,
215 register_type_mismatch,
216 number_of_operands_mismatch,
217 invalid_instruction_suffix,
218 bad_imm4,
219 old_gcc_only,
220 unsupported_with_intel_mnemonic,
221 unsupported_syntax,
222 unsupported,
223 invalid_vsib_address,
224 invalid_vector_register_set,
225 unsupported_vector_index_register
226 };
227
228 struct _i386_insn
229 {
230 /* TM holds the template for the insn were currently assembling. */
231 insn_template tm;
232
233 /* SUFFIX holds the instruction size suffix for byte, word, dword
234 or qword, if given. */
235 char suffix;
236
237 /* OPERANDS gives the number of given operands. */
238 unsigned int operands;
239
240 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
241 of given register, displacement, memory operands and immediate
242 operands. */
243 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
244
245 /* TYPES [i] is the type (see above #defines) which tells us how to
246 use OP[i] for the corresponding operand. */
247 i386_operand_type types[MAX_OPERANDS];
248
249 /* Displacement expression, immediate expression, or register for each
250 operand. */
251 union i386_op op[MAX_OPERANDS];
252
253 /* Flags for operands. */
254 unsigned int flags[MAX_OPERANDS];
255 #define Operand_PCrel 1
256
257 /* Relocation type for operand */
258 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
259
260 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
261 the base index byte below. */
262 const reg_entry *base_reg;
263 const reg_entry *index_reg;
264 unsigned int log2_scale_factor;
265
266 /* SEG gives the seg_entries of this insn. They are zero unless
267 explicit segment overrides are given. */
268 const seg_entry *seg[2];
269
270 /* PREFIX holds all the given prefix opcodes (usually null).
271 PREFIXES is the number of prefix opcodes. */
272 unsigned int prefixes;
273 unsigned char prefix[MAX_PREFIXES];
274
275 /* RM and SIB are the modrm byte and the sib byte where the
276 addressing modes of this insn are encoded. */
277 modrm_byte rm;
278 rex_byte rex;
279 sib_byte sib;
280 vex_prefix vex;
281
282 /* Swap operand in encoding. */
283 unsigned int swap_operand;
284
285 /* Prefer 8bit or 32bit displacement in encoding. */
286 enum
287 {
288 disp_encoding_default = 0,
289 disp_encoding_8bit,
290 disp_encoding_32bit
291 } disp_encoding;
292
293 /* Have HLE prefix. */
294 unsigned int have_hle;
295
296 /* Error message. */
297 enum i386_error error;
298 };
299
300 typedef struct _i386_insn i386_insn;
301
302 /* List of chars besides those in app.c:symbol_chars that can start an
303 operand. Used to prevent the scrubber eating vital white-space. */
304 const char extra_symbol_chars[] = "*%-(["
305 #ifdef LEX_AT
306 "@"
307 #endif
308 #ifdef LEX_QM
309 "?"
310 #endif
311 ;
312
313 #if (defined (TE_I386AIX) \
314 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
315 && !defined (TE_GNU) \
316 && !defined (TE_LINUX) \
317 && !defined (TE_NACL) \
318 && !defined (TE_NETWARE) \
319 && !defined (TE_FreeBSD) \
320 && !defined (TE_DragonFly) \
321 && !defined (TE_NetBSD)))
322 /* This array holds the chars that always start a comment. If the
323 pre-processor is disabled, these aren't very useful. The option
324 --divide will remove '/' from this list. */
325 const char *i386_comment_chars = "#/";
326 #define SVR4_COMMENT_CHARS 1
327 #define PREFIX_SEPARATOR '\\'
328
329 #else
330 const char *i386_comment_chars = "#";
331 #define PREFIX_SEPARATOR '/'
332 #endif
333
334 /* This array holds the chars that only start a comment at the beginning of
335 a line. If the line seems to have the form '# 123 filename'
336 .line and .file directives will appear in the pre-processed output.
337 Note that input_file.c hand checks for '#' at the beginning of the
338 first line of the input file. This is because the compiler outputs
339 #NO_APP at the beginning of its output.
340 Also note that comments started like this one will always work if
341 '/' isn't otherwise defined. */
342 const char line_comment_chars[] = "#/";
343
344 const char line_separator_chars[] = ";";
345
346 /* Chars that can be used to separate mant from exp in floating point
347 nums. */
348 const char EXP_CHARS[] = "eE";
349
350 /* Chars that mean this number is a floating point constant
351 As in 0f12.456
352 or 0d1.2345e12. */
353 const char FLT_CHARS[] = "fFdDxX";
354
355 /* Tables for lexical analysis. */
356 static char mnemonic_chars[256];
357 static char register_chars[256];
358 static char operand_chars[256];
359 static char identifier_chars[256];
360 static char digit_chars[256];
361
362 /* Lexical macros. */
363 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
364 #define is_operand_char(x) (operand_chars[(unsigned char) x])
365 #define is_register_char(x) (register_chars[(unsigned char) x])
366 #define is_space_char(x) ((x) == ' ')
367 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
368 #define is_digit_char(x) (digit_chars[(unsigned char) x])
369
370 /* All non-digit non-letter characters that may occur in an operand. */
371 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
372
373 /* md_assemble() always leaves the strings it's passed unaltered. To
374 effect this we maintain a stack of saved characters that we've smashed
375 with '\0's (indicating end of strings for various sub-fields of the
376 assembler instruction). */
377 static char save_stack[32];
378 static char *save_stack_p;
379 #define END_STRING_AND_SAVE(s) \
380 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
381 #define RESTORE_END_STRING(s) \
382 do { *(s) = *--save_stack_p; } while (0)
383
384 /* The instruction we're assembling. */
385 static i386_insn i;
386
387 /* Possible templates for current insn. */
388 static const templates *current_templates;
389
390 /* Per instruction expressionS buffers: max displacements & immediates. */
391 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
392 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
393
394 /* Current operand we are working on. */
395 static int this_operand = -1;
396
397 /* We support four different modes. FLAG_CODE variable is used to distinguish
398 these. */
399
400 enum flag_code {
401 CODE_32BIT,
402 CODE_16BIT,
403 CODE_64BIT };
404
405 static enum flag_code flag_code;
406 static unsigned int object_64bit;
407 static unsigned int disallow_64bit_reloc;
408 static int use_rela_relocations = 0;
409
410 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
411 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
412 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
413
414 /* The ELF ABI to use. */
415 enum x86_elf_abi
416 {
417 I386_ABI,
418 X86_64_ABI,
419 X86_64_X32_ABI
420 };
421
422 static enum x86_elf_abi x86_elf_abi = I386_ABI;
423 #endif
424
425 /* The names used to print error messages. */
426 static const char *flag_code_names[] =
427 {
428 "32",
429 "16",
430 "64"
431 };
432
433 /* 1 for intel syntax,
434 0 if att syntax. */
435 static int intel_syntax = 0;
436
437 /* 1 for intel mnemonic,
438 0 if att mnemonic. */
439 static int intel_mnemonic = !SYSV386_COMPAT;
440
441 /* 1 if support old (<= 2.8.1) versions of gcc. */
442 static int old_gcc = OLDGCC_COMPAT;
443
444 /* 1 if pseudo registers are permitted. */
445 static int allow_pseudo_reg = 0;
446
447 /* 1 if register prefix % not required. */
448 static int allow_naked_reg = 0;
449
450 /* 1 if pseudo index register, eiz/riz, is allowed . */
451 static int allow_index_reg = 0;
452
453 static enum check_kind
454 {
455 check_none = 0,
456 check_warning,
457 check_error
458 }
459 sse_check, operand_check = check_warning;
460
461 /* Register prefix used for error message. */
462 static const char *register_prefix = "%";
463
464 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
465 leave, push, and pop instructions so that gcc has the same stack
466 frame as in 32 bit mode. */
467 static char stackop_size = '\0';
468
469 /* Non-zero to optimize code alignment. */
470 int optimize_align_code = 1;
471
472 /* Non-zero to quieten some warnings. */
473 static int quiet_warnings = 0;
474
475 /* CPU name. */
476 static const char *cpu_arch_name = NULL;
477 static char *cpu_sub_arch_name = NULL;
478
479 /* CPU feature flags. */
480 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
481
482 /* If we have selected a cpu we are generating instructions for. */
483 static int cpu_arch_tune_set = 0;
484
485 /* Cpu we are generating instructions for. */
486 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
487
488 /* CPU feature flags of cpu we are generating instructions for. */
489 static i386_cpu_flags cpu_arch_tune_flags;
490
491 /* CPU instruction set architecture used. */
492 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
493
494 /* CPU feature flags of instruction set architecture used. */
495 i386_cpu_flags cpu_arch_isa_flags;
496
497 /* If set, conditional jumps are not automatically promoted to handle
498 larger than a byte offset. */
499 static unsigned int no_cond_jump_promotion = 0;
500
501 /* Encode SSE instructions with VEX prefix. */
502 static unsigned int sse2avx;
503
504 /* Encode scalar AVX instructions with specific vector length. */
505 static enum
506 {
507 vex128 = 0,
508 vex256
509 } avxscalar;
510
511 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
512 static symbolS *GOT_symbol;
513
514 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
515 unsigned int x86_dwarf2_return_column;
516
517 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
518 int x86_cie_data_alignment;
519
520 /* Interface to relax_segment.
521 There are 3 major relax states for 386 jump insns because the
522 different types of jumps add different sizes to frags when we're
523 figuring out what sort of jump to choose to reach a given label. */
524
525 /* Types. */
526 #define UNCOND_JUMP 0
527 #define COND_JUMP 1
528 #define COND_JUMP86 2
529
530 /* Sizes. */
531 #define CODE16 1
532 #define SMALL 0
533 #define SMALL16 (SMALL | CODE16)
534 #define BIG 2
535 #define BIG16 (BIG | CODE16)
536
537 #ifndef INLINE
538 #ifdef __GNUC__
539 #define INLINE __inline__
540 #else
541 #define INLINE
542 #endif
543 #endif
544
545 #define ENCODE_RELAX_STATE(type, size) \
546 ((relax_substateT) (((type) << 2) | (size)))
547 #define TYPE_FROM_RELAX_STATE(s) \
548 ((s) >> 2)
549 #define DISP_SIZE_FROM_RELAX_STATE(s) \
550 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
551
552 /* This table is used by relax_frag to promote short jumps to long
553 ones where necessary. SMALL (short) jumps may be promoted to BIG
554 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
555 don't allow a short jump in a 32 bit code segment to be promoted to
556 a 16 bit offset jump because it's slower (requires data size
557 prefix), and doesn't work, unless the destination is in the bottom
558 64k of the code segment (The top 16 bits of eip are zeroed). */
559
560 const relax_typeS md_relax_table[] =
561 {
562 /* The fields are:
563 1) most positive reach of this state,
564 2) most negative reach of this state,
565 3) how many bytes this mode will have in the variable part of the frag
566 4) which index into the table to try if we can't fit into this one. */
567
568 /* UNCOND_JUMP states. */
569 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
570 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
571 /* dword jmp adds 4 bytes to frag:
572 0 extra opcode bytes, 4 displacement bytes. */
573 {0, 0, 4, 0},
574 /* word jmp adds 2 byte2 to frag:
575 0 extra opcode bytes, 2 displacement bytes. */
576 {0, 0, 2, 0},
577
578 /* COND_JUMP states. */
579 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
580 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
581 /* dword conditionals adds 5 bytes to frag:
582 1 extra opcode byte, 4 displacement bytes. */
583 {0, 0, 5, 0},
584 /* word conditionals add 3 bytes to frag:
585 1 extra opcode byte, 2 displacement bytes. */
586 {0, 0, 3, 0},
587
588 /* COND_JUMP86 states. */
589 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
590 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
591 /* dword conditionals adds 5 bytes to frag:
592 1 extra opcode byte, 4 displacement bytes. */
593 {0, 0, 5, 0},
594 /* word conditionals add 4 bytes to frag:
595 1 displacement byte and a 3 byte long branch insn. */
596 {0, 0, 4, 0}
597 };
598
599 static const arch_entry cpu_arch[] =
600 {
601 /* Do not replace the first two entries - i386_target_format()
602 relies on them being there in this order. */
603 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
604 CPU_GENERIC32_FLAGS, 0, 0 },
605 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
606 CPU_GENERIC64_FLAGS, 0, 0 },
607 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
608 CPU_NONE_FLAGS, 0, 0 },
609 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
610 CPU_I186_FLAGS, 0, 0 },
611 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
612 CPU_I286_FLAGS, 0, 0 },
613 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
614 CPU_I386_FLAGS, 0, 0 },
615 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
616 CPU_I486_FLAGS, 0, 0 },
617 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
618 CPU_I586_FLAGS, 0, 0 },
619 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
620 CPU_I686_FLAGS, 0, 0 },
621 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
622 CPU_I586_FLAGS, 0, 0 },
623 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
624 CPU_PENTIUMPRO_FLAGS, 0, 0 },
625 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
626 CPU_P2_FLAGS, 0, 0 },
627 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
628 CPU_P3_FLAGS, 0, 0 },
629 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
630 CPU_P4_FLAGS, 0, 0 },
631 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
632 CPU_CORE_FLAGS, 0, 0 },
633 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
634 CPU_NOCONA_FLAGS, 0, 0 },
635 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
636 CPU_CORE_FLAGS, 1, 0 },
637 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
638 CPU_CORE_FLAGS, 0, 0 },
639 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
640 CPU_CORE2_FLAGS, 1, 0 },
641 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
642 CPU_CORE2_FLAGS, 0, 0 },
643 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
644 CPU_COREI7_FLAGS, 0, 0 },
645 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
646 CPU_L1OM_FLAGS, 0, 0 },
647 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM,
648 CPU_K1OM_FLAGS, 0, 0 },
649 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
650 CPU_K6_FLAGS, 0, 0 },
651 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
652 CPU_K6_2_FLAGS, 0, 0 },
653 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
654 CPU_ATHLON_FLAGS, 0, 0 },
655 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
656 CPU_K8_FLAGS, 1, 0 },
657 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
658 CPU_K8_FLAGS, 0, 0 },
659 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
660 CPU_K8_FLAGS, 0, 0 },
661 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
662 CPU_AMDFAM10_FLAGS, 0, 0 },
663 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD,
664 CPU_BDVER1_FLAGS, 0, 0 },
665 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD,
666 CPU_BDVER2_FLAGS, 0, 0 },
667 { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD,
668 CPU_BDVER3_FLAGS, 0, 0 },
669 { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT,
670 CPU_BTVER1_FLAGS, 0, 0 },
671 { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT,
672 CPU_BTVER2_FLAGS, 0, 0 },
673 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
674 CPU_8087_FLAGS, 0, 0 },
675 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
676 CPU_287_FLAGS, 0, 0 },
677 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
678 CPU_387_FLAGS, 0, 0 },
679 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN,
680 CPU_ANY87_FLAGS, 0, 1 },
681 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
682 CPU_MMX_FLAGS, 0, 0 },
683 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN,
684 CPU_3DNOWA_FLAGS, 0, 1 },
685 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
686 CPU_SSE_FLAGS, 0, 0 },
687 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
688 CPU_SSE2_FLAGS, 0, 0 },
689 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
690 CPU_SSE3_FLAGS, 0, 0 },
691 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
692 CPU_SSSE3_FLAGS, 0, 0 },
693 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
694 CPU_SSE4_1_FLAGS, 0, 0 },
695 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
696 CPU_SSE4_2_FLAGS, 0, 0 },
697 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
698 CPU_SSE4_2_FLAGS, 0, 0 },
699 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN,
700 CPU_ANY_SSE_FLAGS, 0, 1 },
701 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
702 CPU_AVX_FLAGS, 0, 0 },
703 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN,
704 CPU_AVX2_FLAGS, 0, 0 },
705 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
706 CPU_ANY_AVX_FLAGS, 0, 1 },
707 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
708 CPU_VMX_FLAGS, 0, 0 },
709 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN,
710 CPU_VMFUNC_FLAGS, 0, 0 },
711 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
712 CPU_SMX_FLAGS, 0, 0 },
713 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
714 CPU_XSAVE_FLAGS, 0, 0 },
715 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
716 CPU_XSAVEOPT_FLAGS, 0, 0 },
717 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
718 CPU_AES_FLAGS, 0, 0 },
719 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
720 CPU_PCLMUL_FLAGS, 0, 0 },
721 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
722 CPU_PCLMUL_FLAGS, 1, 0 },
723 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
724 CPU_FSGSBASE_FLAGS, 0, 0 },
725 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
726 CPU_RDRND_FLAGS, 0, 0 },
727 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
728 CPU_F16C_FLAGS, 0, 0 },
729 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN,
730 CPU_BMI2_FLAGS, 0, 0 },
731 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
732 CPU_FMA_FLAGS, 0, 0 },
733 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
734 CPU_FMA4_FLAGS, 0, 0 },
735 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
736 CPU_XOP_FLAGS, 0, 0 },
737 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
738 CPU_LWP_FLAGS, 0, 0 },
739 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
740 CPU_MOVBE_FLAGS, 0, 0 },
741 { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN,
742 CPU_CX16_FLAGS, 0, 0 },
743 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
744 CPU_EPT_FLAGS, 0, 0 },
745 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN,
746 CPU_LZCNT_FLAGS, 0, 0 },
747 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN,
748 CPU_HLE_FLAGS, 0, 0 },
749 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN,
750 CPU_RTM_FLAGS, 0, 0 },
751 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN,
752 CPU_INVPCID_FLAGS, 0, 0 },
753 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
754 CPU_CLFLUSH_FLAGS, 0, 0 },
755 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
756 CPU_NOP_FLAGS, 0, 0 },
757 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
758 CPU_SYSCALL_FLAGS, 0, 0 },
759 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
760 CPU_RDTSCP_FLAGS, 0, 0 },
761 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
762 CPU_3DNOW_FLAGS, 0, 0 },
763 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
764 CPU_3DNOWA_FLAGS, 0, 0 },
765 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
766 CPU_PADLOCK_FLAGS, 0, 0 },
767 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
768 CPU_SVME_FLAGS, 1, 0 },
769 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
770 CPU_SVME_FLAGS, 0, 0 },
771 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
772 CPU_SSE4A_FLAGS, 0, 0 },
773 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
774 CPU_ABM_FLAGS, 0, 0 },
775 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
776 CPU_BMI_FLAGS, 0, 0 },
777 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
778 CPU_TBM_FLAGS, 0, 0 },
779 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN,
780 CPU_ADX_FLAGS, 0, 0 },
781 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN,
782 CPU_RDSEED_FLAGS, 0, 0 },
783 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN,
784 CPU_PRFCHW_FLAGS, 0, 0 },
785 };
786
787 #ifdef I386COFF
788 /* Like s_lcomm_internal in gas/read.c but the alignment string
789 is allowed to be optional. */
790
791 static symbolS *
792 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
793 {
794 addressT align = 0;
795
796 SKIP_WHITESPACE ();
797
798 if (needs_align
799 && *input_line_pointer == ',')
800 {
801 align = parse_align (needs_align - 1);
802
803 if (align == (addressT) -1)
804 return NULL;
805 }
806 else
807 {
808 if (size >= 8)
809 align = 3;
810 else if (size >= 4)
811 align = 2;
812 else if (size >= 2)
813 align = 1;
814 else
815 align = 0;
816 }
817
818 bss_alloc (symbolP, size, align);
819 return symbolP;
820 }
821
822 static void
823 pe_lcomm (int needs_align)
824 {
825 s_comm_internal (needs_align * 2, pe_lcomm_internal);
826 }
827 #endif
828
829 const pseudo_typeS md_pseudo_table[] =
830 {
831 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
832 {"align", s_align_bytes, 0},
833 #else
834 {"align", s_align_ptwo, 0},
835 #endif
836 {"arch", set_cpu_arch, 0},
837 #ifndef I386COFF
838 {"bss", s_bss, 0},
839 #else
840 {"lcomm", pe_lcomm, 1},
841 #endif
842 {"ffloat", float_cons, 'f'},
843 {"dfloat", float_cons, 'd'},
844 {"tfloat", float_cons, 'x'},
845 {"value", cons, 2},
846 {"slong", signed_cons, 4},
847 {"noopt", s_ignore, 0},
848 {"optim", s_ignore, 0},
849 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
850 {"code16", set_code_flag, CODE_16BIT},
851 {"code32", set_code_flag, CODE_32BIT},
852 {"code64", set_code_flag, CODE_64BIT},
853 {"intel_syntax", set_intel_syntax, 1},
854 {"att_syntax", set_intel_syntax, 0},
855 {"intel_mnemonic", set_intel_mnemonic, 1},
856 {"att_mnemonic", set_intel_mnemonic, 0},
857 {"allow_index_reg", set_allow_index_reg, 1},
858 {"disallow_index_reg", set_allow_index_reg, 0},
859 {"sse_check", set_check, 0},
860 {"operand_check", set_check, 1},
861 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
862 {"largecomm", handle_large_common, 0},
863 #else
864 {"file", (void (*) (int)) dwarf2_directive_file, 0},
865 {"loc", dwarf2_directive_loc, 0},
866 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
867 #endif
868 #ifdef TE_PE
869 {"secrel32", pe_directive_secrel, 0},
870 #endif
871 {0, 0, 0}
872 };
873
874 /* For interface with expression (). */
875 extern char *input_line_pointer;
876
877 /* Hash table for instruction mnemonic lookup. */
878 static struct hash_control *op_hash;
879
880 /* Hash table for register lookup. */
881 static struct hash_control *reg_hash;
882 \f
883 void
884 i386_align_code (fragS *fragP, int count)
885 {
886 /* Various efficient no-op patterns for aligning code labels.
887 Note: Don't try to assemble the instructions in the comments.
888 0L and 0w are not legal. */
889 static const char f32_1[] =
890 {0x90}; /* nop */
891 static const char f32_2[] =
892 {0x66,0x90}; /* xchg %ax,%ax */
893 static const char f32_3[] =
894 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
895 static const char f32_4[] =
896 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
897 static const char f32_5[] =
898 {0x90, /* nop */
899 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
900 static const char f32_6[] =
901 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
902 static const char f32_7[] =
903 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
904 static const char f32_8[] =
905 {0x90, /* nop */
906 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
907 static const char f32_9[] =
908 {0x89,0xf6, /* movl %esi,%esi */
909 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
910 static const char f32_10[] =
911 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
912 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
913 static const char f32_11[] =
914 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
915 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
916 static const char f32_12[] =
917 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
918 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
919 static const char f32_13[] =
920 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
921 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
922 static const char f32_14[] =
923 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
924 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
925 static const char f16_3[] =
926 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
927 static const char f16_4[] =
928 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
929 static const char f16_5[] =
930 {0x90, /* nop */
931 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
932 static const char f16_6[] =
933 {0x89,0xf6, /* mov %si,%si */
934 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
935 static const char f16_7[] =
936 {0x8d,0x74,0x00, /* lea 0(%si),%si */
937 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
938 static const char f16_8[] =
939 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
940 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
941 static const char jump_31[] =
942 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
943 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
944 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
945 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
946 static const char *const f32_patt[] = {
947 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
948 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
949 };
950 static const char *const f16_patt[] = {
951 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
952 };
953 /* nopl (%[re]ax) */
954 static const char alt_3[] =
955 {0x0f,0x1f,0x00};
956 /* nopl 0(%[re]ax) */
957 static const char alt_4[] =
958 {0x0f,0x1f,0x40,0x00};
959 /* nopl 0(%[re]ax,%[re]ax,1) */
960 static const char alt_5[] =
961 {0x0f,0x1f,0x44,0x00,0x00};
962 /* nopw 0(%[re]ax,%[re]ax,1) */
963 static const char alt_6[] =
964 {0x66,0x0f,0x1f,0x44,0x00,0x00};
965 /* nopl 0L(%[re]ax) */
966 static const char alt_7[] =
967 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
968 /* nopl 0L(%[re]ax,%[re]ax,1) */
969 static const char alt_8[] =
970 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
971 /* nopw 0L(%[re]ax,%[re]ax,1) */
972 static const char alt_9[] =
973 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
974 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
975 static const char alt_10[] =
976 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
977 /* data16
978 nopw %cs:0L(%[re]ax,%[re]ax,1) */
979 static const char alt_long_11[] =
980 {0x66,
981 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
982 /* data16
983 data16
984 nopw %cs:0L(%[re]ax,%[re]ax,1) */
985 static const char alt_long_12[] =
986 {0x66,
987 0x66,
988 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
989 /* data16
990 data16
991 data16
992 nopw %cs:0L(%[re]ax,%[re]ax,1) */
993 static const char alt_long_13[] =
994 {0x66,
995 0x66,
996 0x66,
997 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
998 /* data16
999 data16
1000 data16
1001 data16
1002 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1003 static const char alt_long_14[] =
1004 {0x66,
1005 0x66,
1006 0x66,
1007 0x66,
1008 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1009 /* data16
1010 data16
1011 data16
1012 data16
1013 data16
1014 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1015 static const char alt_long_15[] =
1016 {0x66,
1017 0x66,
1018 0x66,
1019 0x66,
1020 0x66,
1021 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1022 /* nopl 0(%[re]ax,%[re]ax,1)
1023 nopw 0(%[re]ax,%[re]ax,1) */
1024 static const char alt_short_11[] =
1025 {0x0f,0x1f,0x44,0x00,0x00,
1026 0x66,0x0f,0x1f,0x44,0x00,0x00};
1027 /* nopw 0(%[re]ax,%[re]ax,1)
1028 nopw 0(%[re]ax,%[re]ax,1) */
1029 static const char alt_short_12[] =
1030 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1031 0x66,0x0f,0x1f,0x44,0x00,0x00};
1032 /* nopw 0(%[re]ax,%[re]ax,1)
1033 nopl 0L(%[re]ax) */
1034 static const char alt_short_13[] =
1035 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1036 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1037 /* nopl 0L(%[re]ax)
1038 nopl 0L(%[re]ax) */
1039 static const char alt_short_14[] =
1040 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1041 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1042 /* nopl 0L(%[re]ax)
1043 nopl 0L(%[re]ax,%[re]ax,1) */
1044 static const char alt_short_15[] =
1045 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1046 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1047 static const char *const alt_short_patt[] = {
1048 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1049 alt_9, alt_10, alt_short_11, alt_short_12, alt_short_13,
1050 alt_short_14, alt_short_15
1051 };
1052 static const char *const alt_long_patt[] = {
1053 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1054 alt_9, alt_10, alt_long_11, alt_long_12, alt_long_13,
1055 alt_long_14, alt_long_15
1056 };
1057
1058 /* Only align for at least a positive non-zero boundary. */
1059 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
1060 return;
1061
1062 /* We need to decide which NOP sequence to use for 32bit and
1063 64bit. When -mtune= is used:
1064
1065 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1066 PROCESSOR_GENERIC32, f32_patt will be used.
1067 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA,
1068 PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
1069 PROCESSOR_GENERIC64, alt_long_patt will be used.
1070 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
1071 PROCESSOR_AMDFAM10, PROCESSOR_BD and PROCESSOR_BT, alt_short_patt
1072 will be used.
1073
1074 When -mtune= isn't used, alt_long_patt will be used if
1075 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1076 be used.
1077
1078 When -march= or .arch is used, we can't use anything beyond
1079 cpu_arch_isa_flags. */
1080
1081 if (flag_code == CODE_16BIT)
1082 {
1083 if (count > 8)
1084 {
1085 memcpy (fragP->fr_literal + fragP->fr_fix,
1086 jump_31, count);
1087 /* Adjust jump offset. */
1088 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1089 }
1090 else
1091 memcpy (fragP->fr_literal + fragP->fr_fix,
1092 f16_patt[count - 1], count);
1093 }
1094 else
1095 {
1096 const char *const *patt = NULL;
1097
1098 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1099 {
1100 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1101 switch (cpu_arch_tune)
1102 {
1103 case PROCESSOR_UNKNOWN:
1104 /* We use cpu_arch_isa_flags to check if we SHOULD
1105 optimize with nops. */
1106 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1107 patt = alt_long_patt;
1108 else
1109 patt = f32_patt;
1110 break;
1111 case PROCESSOR_PENTIUM4:
1112 case PROCESSOR_NOCONA:
1113 case PROCESSOR_CORE:
1114 case PROCESSOR_CORE2:
1115 case PROCESSOR_COREI7:
1116 case PROCESSOR_L1OM:
1117 case PROCESSOR_K1OM:
1118 case PROCESSOR_GENERIC64:
1119 patt = alt_long_patt;
1120 break;
1121 case PROCESSOR_K6:
1122 case PROCESSOR_ATHLON:
1123 case PROCESSOR_K8:
1124 case PROCESSOR_AMDFAM10:
1125 case PROCESSOR_BD:
1126 case PROCESSOR_BT:
1127 patt = alt_short_patt;
1128 break;
1129 case PROCESSOR_I386:
1130 case PROCESSOR_I486:
1131 case PROCESSOR_PENTIUM:
1132 case PROCESSOR_PENTIUMPRO:
1133 case PROCESSOR_GENERIC32:
1134 patt = f32_patt;
1135 break;
1136 }
1137 }
1138 else
1139 {
1140 switch (fragP->tc_frag_data.tune)
1141 {
1142 case PROCESSOR_UNKNOWN:
1143 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1144 PROCESSOR_UNKNOWN. */
1145 abort ();
1146 break;
1147
1148 case PROCESSOR_I386:
1149 case PROCESSOR_I486:
1150 case PROCESSOR_PENTIUM:
1151 case PROCESSOR_K6:
1152 case PROCESSOR_ATHLON:
1153 case PROCESSOR_K8:
1154 case PROCESSOR_AMDFAM10:
1155 case PROCESSOR_BD:
1156 case PROCESSOR_BT:
1157 case PROCESSOR_GENERIC32:
1158 /* We use cpu_arch_isa_flags to check if we CAN optimize
1159 with nops. */
1160 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1161 patt = alt_short_patt;
1162 else
1163 patt = f32_patt;
1164 break;
1165 case PROCESSOR_PENTIUMPRO:
1166 case PROCESSOR_PENTIUM4:
1167 case PROCESSOR_NOCONA:
1168 case PROCESSOR_CORE:
1169 case PROCESSOR_CORE2:
1170 case PROCESSOR_COREI7:
1171 case PROCESSOR_L1OM:
1172 case PROCESSOR_K1OM:
1173 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1174 patt = alt_long_patt;
1175 else
1176 patt = f32_patt;
1177 break;
1178 case PROCESSOR_GENERIC64:
1179 patt = alt_long_patt;
1180 break;
1181 }
1182 }
1183
1184 if (patt == f32_patt)
1185 {
1186 /* If the padding is less than 15 bytes, we use the normal
1187 ones. Otherwise, we use a jump instruction and adjust
1188 its offset. */
1189 int limit;
1190
1191 /* For 64bit, the limit is 3 bytes. */
1192 if (flag_code == CODE_64BIT
1193 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1194 limit = 3;
1195 else
1196 limit = 15;
1197 if (count < limit)
1198 memcpy (fragP->fr_literal + fragP->fr_fix,
1199 patt[count - 1], count);
1200 else
1201 {
1202 memcpy (fragP->fr_literal + fragP->fr_fix,
1203 jump_31, count);
1204 /* Adjust jump offset. */
1205 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1206 }
1207 }
1208 else
1209 {
1210 /* Maximum length of an instruction is 15 byte. If the
1211 padding is greater than 15 bytes and we don't use jump,
1212 we have to break it into smaller pieces. */
1213 int padding = count;
1214 while (padding > 15)
1215 {
1216 padding -= 15;
1217 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1218 patt [14], 15);
1219 }
1220
1221 if (padding)
1222 memcpy (fragP->fr_literal + fragP->fr_fix,
1223 patt [padding - 1], padding);
1224 }
1225 }
1226 fragP->fr_var = count;
1227 }
1228
1229 static INLINE int
1230 operand_type_all_zero (const union i386_operand_type *x)
1231 {
1232 switch (ARRAY_SIZE(x->array))
1233 {
1234 case 3:
1235 if (x->array[2])
1236 return 0;
1237 case 2:
1238 if (x->array[1])
1239 return 0;
1240 case 1:
1241 return !x->array[0];
1242 default:
1243 abort ();
1244 }
1245 }
1246
1247 static INLINE void
1248 operand_type_set (union i386_operand_type *x, unsigned int v)
1249 {
1250 switch (ARRAY_SIZE(x->array))
1251 {
1252 case 3:
1253 x->array[2] = v;
1254 case 2:
1255 x->array[1] = v;
1256 case 1:
1257 x->array[0] = v;
1258 break;
1259 default:
1260 abort ();
1261 }
1262 }
1263
1264 static INLINE int
1265 operand_type_equal (const union i386_operand_type *x,
1266 const union i386_operand_type *y)
1267 {
1268 switch (ARRAY_SIZE(x->array))
1269 {
1270 case 3:
1271 if (x->array[2] != y->array[2])
1272 return 0;
1273 case 2:
1274 if (x->array[1] != y->array[1])
1275 return 0;
1276 case 1:
1277 return x->array[0] == y->array[0];
1278 break;
1279 default:
1280 abort ();
1281 }
1282 }
1283
1284 static INLINE int
1285 cpu_flags_all_zero (const union i386_cpu_flags *x)
1286 {
1287 switch (ARRAY_SIZE(x->array))
1288 {
1289 case 3:
1290 if (x->array[2])
1291 return 0;
1292 case 2:
1293 if (x->array[1])
1294 return 0;
1295 case 1:
1296 return !x->array[0];
1297 default:
1298 abort ();
1299 }
1300 }
1301
1302 static INLINE void
1303 cpu_flags_set (union i386_cpu_flags *x, unsigned int v)
1304 {
1305 switch (ARRAY_SIZE(x->array))
1306 {
1307 case 3:
1308 x->array[2] = v;
1309 case 2:
1310 x->array[1] = v;
1311 case 1:
1312 x->array[0] = v;
1313 break;
1314 default:
1315 abort ();
1316 }
1317 }
1318
1319 static INLINE int
1320 cpu_flags_equal (const union i386_cpu_flags *x,
1321 const union i386_cpu_flags *y)
1322 {
1323 switch (ARRAY_SIZE(x->array))
1324 {
1325 case 3:
1326 if (x->array[2] != y->array[2])
1327 return 0;
1328 case 2:
1329 if (x->array[1] != y->array[1])
1330 return 0;
1331 case 1:
1332 return x->array[0] == y->array[0];
1333 break;
1334 default:
1335 abort ();
1336 }
1337 }
1338
1339 static INLINE int
1340 cpu_flags_check_cpu64 (i386_cpu_flags f)
1341 {
1342 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1343 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1344 }
1345
1346 static INLINE i386_cpu_flags
1347 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1348 {
1349 switch (ARRAY_SIZE (x.array))
1350 {
1351 case 3:
1352 x.array [2] &= y.array [2];
1353 case 2:
1354 x.array [1] &= y.array [1];
1355 case 1:
1356 x.array [0] &= y.array [0];
1357 break;
1358 default:
1359 abort ();
1360 }
1361 return x;
1362 }
1363
1364 static INLINE i386_cpu_flags
1365 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1366 {
1367 switch (ARRAY_SIZE (x.array))
1368 {
1369 case 3:
1370 x.array [2] |= y.array [2];
1371 case 2:
1372 x.array [1] |= y.array [1];
1373 case 1:
1374 x.array [0] |= y.array [0];
1375 break;
1376 default:
1377 abort ();
1378 }
1379 return x;
1380 }
1381
1382 static INLINE i386_cpu_flags
1383 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1384 {
1385 switch (ARRAY_SIZE (x.array))
1386 {
1387 case 3:
1388 x.array [2] &= ~y.array [2];
1389 case 2:
1390 x.array [1] &= ~y.array [1];
1391 case 1:
1392 x.array [0] &= ~y.array [0];
1393 break;
1394 default:
1395 abort ();
1396 }
1397 return x;
1398 }
1399
1400 #define CPU_FLAGS_ARCH_MATCH 0x1
1401 #define CPU_FLAGS_64BIT_MATCH 0x2
1402 #define CPU_FLAGS_AES_MATCH 0x4
1403 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1404 #define CPU_FLAGS_AVX_MATCH 0x10
1405
1406 #define CPU_FLAGS_32BIT_MATCH \
1407 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1408 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1409 #define CPU_FLAGS_PERFECT_MATCH \
1410 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1411
1412 /* Return CPU flags match bits. */
1413
1414 static int
1415 cpu_flags_match (const insn_template *t)
1416 {
1417 i386_cpu_flags x = t->cpu_flags;
1418 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1419
1420 x.bitfield.cpu64 = 0;
1421 x.bitfield.cpuno64 = 0;
1422
1423 if (cpu_flags_all_zero (&x))
1424 {
1425 /* This instruction is available on all archs. */
1426 match |= CPU_FLAGS_32BIT_MATCH;
1427 }
1428 else
1429 {
1430 /* This instruction is available only on some archs. */
1431 i386_cpu_flags cpu = cpu_arch_flags;
1432
1433 cpu.bitfield.cpu64 = 0;
1434 cpu.bitfield.cpuno64 = 0;
1435 cpu = cpu_flags_and (x, cpu);
1436 if (!cpu_flags_all_zero (&cpu))
1437 {
1438 if (x.bitfield.cpuavx)
1439 {
1440 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1441 if (cpu.bitfield.cpuavx)
1442 {
1443 /* Check SSE2AVX. */
1444 if (!t->opcode_modifier.sse2avx|| sse2avx)
1445 {
1446 match |= (CPU_FLAGS_ARCH_MATCH
1447 | CPU_FLAGS_AVX_MATCH);
1448 /* Check AES. */
1449 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1450 match |= CPU_FLAGS_AES_MATCH;
1451 /* Check PCLMUL. */
1452 if (!x.bitfield.cpupclmul
1453 || cpu.bitfield.cpupclmul)
1454 match |= CPU_FLAGS_PCLMUL_MATCH;
1455 }
1456 }
1457 else
1458 match |= CPU_FLAGS_ARCH_MATCH;
1459 }
1460 else
1461 match |= CPU_FLAGS_32BIT_MATCH;
1462 }
1463 }
1464 return match;
1465 }
1466
1467 static INLINE i386_operand_type
1468 operand_type_and (i386_operand_type x, i386_operand_type y)
1469 {
1470 switch (ARRAY_SIZE (x.array))
1471 {
1472 case 3:
1473 x.array [2] &= y.array [2];
1474 case 2:
1475 x.array [1] &= y.array [1];
1476 case 1:
1477 x.array [0] &= y.array [0];
1478 break;
1479 default:
1480 abort ();
1481 }
1482 return x;
1483 }
1484
1485 static INLINE i386_operand_type
1486 operand_type_or (i386_operand_type x, i386_operand_type y)
1487 {
1488 switch (ARRAY_SIZE (x.array))
1489 {
1490 case 3:
1491 x.array [2] |= y.array [2];
1492 case 2:
1493 x.array [1] |= y.array [1];
1494 case 1:
1495 x.array [0] |= y.array [0];
1496 break;
1497 default:
1498 abort ();
1499 }
1500 return x;
1501 }
1502
1503 static INLINE i386_operand_type
1504 operand_type_xor (i386_operand_type x, i386_operand_type y)
1505 {
1506 switch (ARRAY_SIZE (x.array))
1507 {
1508 case 3:
1509 x.array [2] ^= y.array [2];
1510 case 2:
1511 x.array [1] ^= y.array [1];
1512 case 1:
1513 x.array [0] ^= y.array [0];
1514 break;
1515 default:
1516 abort ();
1517 }
1518 return x;
1519 }
1520
1521 static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1522 static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1523 static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1524 static const i386_operand_type inoutportreg
1525 = OPERAND_TYPE_INOUTPORTREG;
1526 static const i386_operand_type reg16_inoutportreg
1527 = OPERAND_TYPE_REG16_INOUTPORTREG;
1528 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1529 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1530 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1531 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1532 static const i386_operand_type anydisp
1533 = OPERAND_TYPE_ANYDISP;
1534 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1535 static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1536 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1537 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1538 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1539 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1540 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1541 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1542 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1543 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1544 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1545 static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
1546
1547 enum operand_type
1548 {
1549 reg,
1550 imm,
1551 disp,
1552 anymem
1553 };
1554
1555 static INLINE int
1556 operand_type_check (i386_operand_type t, enum operand_type c)
1557 {
1558 switch (c)
1559 {
1560 case reg:
1561 return (t.bitfield.reg8
1562 || t.bitfield.reg16
1563 || t.bitfield.reg32
1564 || t.bitfield.reg64);
1565
1566 case imm:
1567 return (t.bitfield.imm8
1568 || t.bitfield.imm8s
1569 || t.bitfield.imm16
1570 || t.bitfield.imm32
1571 || t.bitfield.imm32s
1572 || t.bitfield.imm64);
1573
1574 case disp:
1575 return (t.bitfield.disp8
1576 || t.bitfield.disp16
1577 || t.bitfield.disp32
1578 || t.bitfield.disp32s
1579 || t.bitfield.disp64);
1580
1581 case anymem:
1582 return (t.bitfield.disp8
1583 || t.bitfield.disp16
1584 || t.bitfield.disp32
1585 || t.bitfield.disp32s
1586 || t.bitfield.disp64
1587 || t.bitfield.baseindex);
1588
1589 default:
1590 abort ();
1591 }
1592
1593 return 0;
1594 }
1595
1596 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1597 operand J for instruction template T. */
1598
1599 static INLINE int
1600 match_reg_size (const insn_template *t, unsigned int j)
1601 {
1602 return !((i.types[j].bitfield.byte
1603 && !t->operand_types[j].bitfield.byte)
1604 || (i.types[j].bitfield.word
1605 && !t->operand_types[j].bitfield.word)
1606 || (i.types[j].bitfield.dword
1607 && !t->operand_types[j].bitfield.dword)
1608 || (i.types[j].bitfield.qword
1609 && !t->operand_types[j].bitfield.qword));
1610 }
1611
1612 /* Return 1 if there is no conflict in any size on operand J for
1613 instruction template T. */
1614
1615 static INLINE int
1616 match_mem_size (const insn_template *t, unsigned int j)
1617 {
1618 return (match_reg_size (t, j)
1619 && !((i.types[j].bitfield.unspecified
1620 && !t->operand_types[j].bitfield.unspecified)
1621 || (i.types[j].bitfield.fword
1622 && !t->operand_types[j].bitfield.fword)
1623 || (i.types[j].bitfield.tbyte
1624 && !t->operand_types[j].bitfield.tbyte)
1625 || (i.types[j].bitfield.xmmword
1626 && !t->operand_types[j].bitfield.xmmword)
1627 || (i.types[j].bitfield.ymmword
1628 && !t->operand_types[j].bitfield.ymmword)));
1629 }
1630
1631 /* Return 1 if there is no size conflict on any operands for
1632 instruction template T. */
1633
1634 static INLINE int
1635 operand_size_match (const insn_template *t)
1636 {
1637 unsigned int j;
1638 int match = 1;
1639
1640 /* Don't check jump instructions. */
1641 if (t->opcode_modifier.jump
1642 || t->opcode_modifier.jumpbyte
1643 || t->opcode_modifier.jumpdword
1644 || t->opcode_modifier.jumpintersegment)
1645 return match;
1646
1647 /* Check memory and accumulator operand size. */
1648 for (j = 0; j < i.operands; j++)
1649 {
1650 if (t->operand_types[j].bitfield.anysize)
1651 continue;
1652
1653 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1654 {
1655 match = 0;
1656 break;
1657 }
1658
1659 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1660 {
1661 match = 0;
1662 break;
1663 }
1664 }
1665
1666 if (match)
1667 return match;
1668 else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
1669 {
1670 mismatch:
1671 i.error = operand_size_mismatch;
1672 return 0;
1673 }
1674
1675 /* Check reverse. */
1676 gas_assert (i.operands == 2);
1677
1678 match = 1;
1679 for (j = 0; j < 2; j++)
1680 {
1681 if (t->operand_types[j].bitfield.acc
1682 && !match_reg_size (t, j ? 0 : 1))
1683 goto mismatch;
1684
1685 if (i.types[j].bitfield.mem
1686 && !match_mem_size (t, j ? 0 : 1))
1687 goto mismatch;
1688 }
1689
1690 return match;
1691 }
1692
1693 static INLINE int
1694 operand_type_match (i386_operand_type overlap,
1695 i386_operand_type given)
1696 {
1697 i386_operand_type temp = overlap;
1698
1699 temp.bitfield.jumpabsolute = 0;
1700 temp.bitfield.unspecified = 0;
1701 temp.bitfield.byte = 0;
1702 temp.bitfield.word = 0;
1703 temp.bitfield.dword = 0;
1704 temp.bitfield.fword = 0;
1705 temp.bitfield.qword = 0;
1706 temp.bitfield.tbyte = 0;
1707 temp.bitfield.xmmword = 0;
1708 temp.bitfield.ymmword = 0;
1709 if (operand_type_all_zero (&temp))
1710 goto mismatch;
1711
1712 if (given.bitfield.baseindex == overlap.bitfield.baseindex
1713 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
1714 return 1;
1715
1716 mismatch:
1717 i.error = operand_type_mismatch;
1718 return 0;
1719 }
1720
1721 /* If given types g0 and g1 are registers they must be of the same type
1722 unless the expected operand type register overlap is null.
1723 Note that Acc in a template matches every size of reg. */
1724
1725 static INLINE int
1726 operand_type_register_match (i386_operand_type m0,
1727 i386_operand_type g0,
1728 i386_operand_type t0,
1729 i386_operand_type m1,
1730 i386_operand_type g1,
1731 i386_operand_type t1)
1732 {
1733 if (!operand_type_check (g0, reg))
1734 return 1;
1735
1736 if (!operand_type_check (g1, reg))
1737 return 1;
1738
1739 if (g0.bitfield.reg8 == g1.bitfield.reg8
1740 && g0.bitfield.reg16 == g1.bitfield.reg16
1741 && g0.bitfield.reg32 == g1.bitfield.reg32
1742 && g0.bitfield.reg64 == g1.bitfield.reg64)
1743 return 1;
1744
1745 if (m0.bitfield.acc)
1746 {
1747 t0.bitfield.reg8 = 1;
1748 t0.bitfield.reg16 = 1;
1749 t0.bitfield.reg32 = 1;
1750 t0.bitfield.reg64 = 1;
1751 }
1752
1753 if (m1.bitfield.acc)
1754 {
1755 t1.bitfield.reg8 = 1;
1756 t1.bitfield.reg16 = 1;
1757 t1.bitfield.reg32 = 1;
1758 t1.bitfield.reg64 = 1;
1759 }
1760
1761 if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1762 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1763 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1764 && !(t0.bitfield.reg64 & t1.bitfield.reg64))
1765 return 1;
1766
1767 i.error = register_type_mismatch;
1768
1769 return 0;
1770 }
1771
1772 static INLINE unsigned int
1773 register_number (const reg_entry *r)
1774 {
1775 unsigned int nr = r->reg_num;
1776
1777 if (r->reg_flags & RegRex)
1778 nr += 8;
1779
1780 return nr;
1781 }
1782
1783 static INLINE unsigned int
1784 mode_from_disp_size (i386_operand_type t)
1785 {
1786 if (t.bitfield.disp8)
1787 return 1;
1788 else if (t.bitfield.disp16
1789 || t.bitfield.disp32
1790 || t.bitfield.disp32s)
1791 return 2;
1792 else
1793 return 0;
1794 }
1795
1796 static INLINE int
1797 fits_in_signed_byte (offsetT num)
1798 {
1799 return (num >= -128) && (num <= 127);
1800 }
1801
1802 static INLINE int
1803 fits_in_unsigned_byte (offsetT num)
1804 {
1805 return (num & 0xff) == num;
1806 }
1807
1808 static INLINE int
1809 fits_in_unsigned_word (offsetT num)
1810 {
1811 return (num & 0xffff) == num;
1812 }
1813
1814 static INLINE int
1815 fits_in_signed_word (offsetT num)
1816 {
1817 return (-32768 <= num) && (num <= 32767);
1818 }
1819
1820 static INLINE int
1821 fits_in_signed_long (offsetT num ATTRIBUTE_UNUSED)
1822 {
1823 #ifndef BFD64
1824 return 1;
1825 #else
1826 return (!(((offsetT) -1 << 31) & num)
1827 || (((offsetT) -1 << 31) & num) == ((offsetT) -1 << 31));
1828 #endif
1829 } /* fits_in_signed_long() */
1830
1831 static INLINE int
1832 fits_in_unsigned_long (offsetT num ATTRIBUTE_UNUSED)
1833 {
1834 #ifndef BFD64
1835 return 1;
1836 #else
1837 return (num & (((offsetT) 2 << 31) - 1)) == num;
1838 #endif
1839 } /* fits_in_unsigned_long() */
1840
1841 static INLINE int
1842 fits_in_imm4 (offsetT num)
1843 {
1844 return (num & 0xf) == num;
1845 }
1846
1847 static i386_operand_type
1848 smallest_imm_type (offsetT num)
1849 {
1850 i386_operand_type t;
1851
1852 operand_type_set (&t, 0);
1853 t.bitfield.imm64 = 1;
1854
1855 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
1856 {
1857 /* This code is disabled on the 486 because all the Imm1 forms
1858 in the opcode table are slower on the i486. They're the
1859 versions with the implicitly specified single-position
1860 displacement, which has another syntax if you really want to
1861 use that form. */
1862 t.bitfield.imm1 = 1;
1863 t.bitfield.imm8 = 1;
1864 t.bitfield.imm8s = 1;
1865 t.bitfield.imm16 = 1;
1866 t.bitfield.imm32 = 1;
1867 t.bitfield.imm32s = 1;
1868 }
1869 else if (fits_in_signed_byte (num))
1870 {
1871 t.bitfield.imm8 = 1;
1872 t.bitfield.imm8s = 1;
1873 t.bitfield.imm16 = 1;
1874 t.bitfield.imm32 = 1;
1875 t.bitfield.imm32s = 1;
1876 }
1877 else if (fits_in_unsigned_byte (num))
1878 {
1879 t.bitfield.imm8 = 1;
1880 t.bitfield.imm16 = 1;
1881 t.bitfield.imm32 = 1;
1882 t.bitfield.imm32s = 1;
1883 }
1884 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
1885 {
1886 t.bitfield.imm16 = 1;
1887 t.bitfield.imm32 = 1;
1888 t.bitfield.imm32s = 1;
1889 }
1890 else if (fits_in_signed_long (num))
1891 {
1892 t.bitfield.imm32 = 1;
1893 t.bitfield.imm32s = 1;
1894 }
1895 else if (fits_in_unsigned_long (num))
1896 t.bitfield.imm32 = 1;
1897
1898 return t;
1899 }
1900
1901 static offsetT
1902 offset_in_range (offsetT val, int size)
1903 {
1904 addressT mask;
1905
1906 switch (size)
1907 {
1908 case 1: mask = ((addressT) 1 << 8) - 1; break;
1909 case 2: mask = ((addressT) 1 << 16) - 1; break;
1910 case 4: mask = ((addressT) 2 << 31) - 1; break;
1911 #ifdef BFD64
1912 case 8: mask = ((addressT) 2 << 63) - 1; break;
1913 #endif
1914 default: abort ();
1915 }
1916
1917 #ifdef BFD64
1918 /* If BFD64, sign extend val for 32bit address mode. */
1919 if (flag_code != CODE_64BIT
1920 || i.prefix[ADDR_PREFIX])
1921 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
1922 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
1923 #endif
1924
1925 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
1926 {
1927 char buf1[40], buf2[40];
1928
1929 sprint_value (buf1, val);
1930 sprint_value (buf2, val & mask);
1931 as_warn (_("%s shortened to %s"), buf1, buf2);
1932 }
1933 return val & mask;
1934 }
1935
1936 enum PREFIX_GROUP
1937 {
1938 PREFIX_EXIST = 0,
1939 PREFIX_LOCK,
1940 PREFIX_REP,
1941 PREFIX_OTHER
1942 };
1943
1944 /* Returns
1945 a. PREFIX_EXIST if attempting to add a prefix where one from the
1946 same class already exists.
1947 b. PREFIX_LOCK if lock prefix is added.
1948 c. PREFIX_REP if rep/repne prefix is added.
1949 d. PREFIX_OTHER if other prefix is added.
1950 */
1951
1952 static enum PREFIX_GROUP
1953 add_prefix (unsigned int prefix)
1954 {
1955 enum PREFIX_GROUP ret = PREFIX_OTHER;
1956 unsigned int q;
1957
1958 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
1959 && flag_code == CODE_64BIT)
1960 {
1961 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
1962 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
1963 && (prefix & (REX_R | REX_X | REX_B))))
1964 ret = PREFIX_EXIST;
1965 q = REX_PREFIX;
1966 }
1967 else
1968 {
1969 switch (prefix)
1970 {
1971 default:
1972 abort ();
1973
1974 case CS_PREFIX_OPCODE:
1975 case DS_PREFIX_OPCODE:
1976 case ES_PREFIX_OPCODE:
1977 case FS_PREFIX_OPCODE:
1978 case GS_PREFIX_OPCODE:
1979 case SS_PREFIX_OPCODE:
1980 q = SEG_PREFIX;
1981 break;
1982
1983 case REPNE_PREFIX_OPCODE:
1984 case REPE_PREFIX_OPCODE:
1985 q = REP_PREFIX;
1986 ret = PREFIX_REP;
1987 break;
1988
1989 case LOCK_PREFIX_OPCODE:
1990 q = LOCK_PREFIX;
1991 ret = PREFIX_LOCK;
1992 break;
1993
1994 case FWAIT_OPCODE:
1995 q = WAIT_PREFIX;
1996 break;
1997
1998 case ADDR_PREFIX_OPCODE:
1999 q = ADDR_PREFIX;
2000 break;
2001
2002 case DATA_PREFIX_OPCODE:
2003 q = DATA_PREFIX;
2004 break;
2005 }
2006 if (i.prefix[q] != 0)
2007 ret = PREFIX_EXIST;
2008 }
2009
2010 if (ret)
2011 {
2012 if (!i.prefix[q])
2013 ++i.prefixes;
2014 i.prefix[q] |= prefix;
2015 }
2016 else
2017 as_bad (_("same type of prefix used twice"));
2018
2019 return ret;
2020 }
2021
2022 static void
2023 update_code_flag (int value, int check)
2024 {
2025 PRINTF_LIKE ((*as_error));
2026
2027 flag_code = (enum flag_code) value;
2028 if (flag_code == CODE_64BIT)
2029 {
2030 cpu_arch_flags.bitfield.cpu64 = 1;
2031 cpu_arch_flags.bitfield.cpuno64 = 0;
2032 }
2033 else
2034 {
2035 cpu_arch_flags.bitfield.cpu64 = 0;
2036 cpu_arch_flags.bitfield.cpuno64 = 1;
2037 }
2038 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
2039 {
2040 if (check)
2041 as_error = as_fatal;
2042 else
2043 as_error = as_bad;
2044 (*as_error) (_("64bit mode not supported on `%s'."),
2045 cpu_arch_name ? cpu_arch_name : default_arch);
2046 }
2047 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
2048 {
2049 if (check)
2050 as_error = as_fatal;
2051 else
2052 as_error = as_bad;
2053 (*as_error) (_("32bit mode not supported on `%s'."),
2054 cpu_arch_name ? cpu_arch_name : default_arch);
2055 }
2056 stackop_size = '\0';
2057 }
2058
2059 static void
2060 set_code_flag (int value)
2061 {
2062 update_code_flag (value, 0);
2063 }
2064
2065 static void
2066 set_16bit_gcc_code_flag (int new_code_flag)
2067 {
2068 flag_code = (enum flag_code) new_code_flag;
2069 if (flag_code != CODE_16BIT)
2070 abort ();
2071 cpu_arch_flags.bitfield.cpu64 = 0;
2072 cpu_arch_flags.bitfield.cpuno64 = 1;
2073 stackop_size = LONG_MNEM_SUFFIX;
2074 }
2075
2076 static void
2077 set_intel_syntax (int syntax_flag)
2078 {
2079 /* Find out if register prefixing is specified. */
2080 int ask_naked_reg = 0;
2081
2082 SKIP_WHITESPACE ();
2083 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2084 {
2085 char *string = input_line_pointer;
2086 int e = get_symbol_end ();
2087
2088 if (strcmp (string, "prefix") == 0)
2089 ask_naked_reg = 1;
2090 else if (strcmp (string, "noprefix") == 0)
2091 ask_naked_reg = -1;
2092 else
2093 as_bad (_("bad argument to syntax directive."));
2094 *input_line_pointer = e;
2095 }
2096 demand_empty_rest_of_line ();
2097
2098 intel_syntax = syntax_flag;
2099
2100 if (ask_naked_reg == 0)
2101 allow_naked_reg = (intel_syntax
2102 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2103 else
2104 allow_naked_reg = (ask_naked_reg < 0);
2105
2106 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2107
2108 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2109 identifier_chars['$'] = intel_syntax ? '$' : 0;
2110 register_prefix = allow_naked_reg ? "" : "%";
2111 }
2112
2113 static void
2114 set_intel_mnemonic (int mnemonic_flag)
2115 {
2116 intel_mnemonic = mnemonic_flag;
2117 }
2118
2119 static void
2120 set_allow_index_reg (int flag)
2121 {
2122 allow_index_reg = flag;
2123 }
2124
2125 static void
2126 set_check (int what)
2127 {
2128 enum check_kind *kind;
2129 const char *str;
2130
2131 if (what)
2132 {
2133 kind = &operand_check;
2134 str = "operand";
2135 }
2136 else
2137 {
2138 kind = &sse_check;
2139 str = "sse";
2140 }
2141
2142 SKIP_WHITESPACE ();
2143
2144 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2145 {
2146 char *string = input_line_pointer;
2147 int e = get_symbol_end ();
2148
2149 if (strcmp (string, "none") == 0)
2150 *kind = check_none;
2151 else if (strcmp (string, "warning") == 0)
2152 *kind = check_warning;
2153 else if (strcmp (string, "error") == 0)
2154 *kind = check_error;
2155 else
2156 as_bad (_("bad argument to %s_check directive."), str);
2157 *input_line_pointer = e;
2158 }
2159 else
2160 as_bad (_("missing argument for %s_check directive"), str);
2161
2162 demand_empty_rest_of_line ();
2163 }
2164
2165 static void
2166 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2167 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2168 {
2169 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2170 static const char *arch;
2171
2172 /* Intel LIOM is only supported on ELF. */
2173 if (!IS_ELF)
2174 return;
2175
2176 if (!arch)
2177 {
2178 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2179 use default_arch. */
2180 arch = cpu_arch_name;
2181 if (!arch)
2182 arch = default_arch;
2183 }
2184
2185 /* If we are targeting Intel L1OM, we must enable it. */
2186 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2187 || new_flag.bitfield.cpul1om)
2188 return;
2189
2190 /* If we are targeting Intel K1OM, we must enable it. */
2191 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_K1OM
2192 || new_flag.bitfield.cpuk1om)
2193 return;
2194
2195 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2196 #endif
2197 }
2198
2199 static void
2200 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2201 {
2202 SKIP_WHITESPACE ();
2203
2204 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2205 {
2206 char *string = input_line_pointer;
2207 int e = get_symbol_end ();
2208 unsigned int j;
2209 i386_cpu_flags flags;
2210
2211 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2212 {
2213 if (strcmp (string, cpu_arch[j].name) == 0)
2214 {
2215 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2216
2217 if (*string != '.')
2218 {
2219 cpu_arch_name = cpu_arch[j].name;
2220 cpu_sub_arch_name = NULL;
2221 cpu_arch_flags = cpu_arch[j].flags;
2222 if (flag_code == CODE_64BIT)
2223 {
2224 cpu_arch_flags.bitfield.cpu64 = 1;
2225 cpu_arch_flags.bitfield.cpuno64 = 0;
2226 }
2227 else
2228 {
2229 cpu_arch_flags.bitfield.cpu64 = 0;
2230 cpu_arch_flags.bitfield.cpuno64 = 1;
2231 }
2232 cpu_arch_isa = cpu_arch[j].type;
2233 cpu_arch_isa_flags = cpu_arch[j].flags;
2234 if (!cpu_arch_tune_set)
2235 {
2236 cpu_arch_tune = cpu_arch_isa;
2237 cpu_arch_tune_flags = cpu_arch_isa_flags;
2238 }
2239 break;
2240 }
2241
2242 if (!cpu_arch[j].negated)
2243 flags = cpu_flags_or (cpu_arch_flags,
2244 cpu_arch[j].flags);
2245 else
2246 flags = cpu_flags_and_not (cpu_arch_flags,
2247 cpu_arch[j].flags);
2248 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2249 {
2250 if (cpu_sub_arch_name)
2251 {
2252 char *name = cpu_sub_arch_name;
2253 cpu_sub_arch_name = concat (name,
2254 cpu_arch[j].name,
2255 (const char *) NULL);
2256 free (name);
2257 }
2258 else
2259 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2260 cpu_arch_flags = flags;
2261 cpu_arch_isa_flags = flags;
2262 }
2263 *input_line_pointer = e;
2264 demand_empty_rest_of_line ();
2265 return;
2266 }
2267 }
2268 if (j >= ARRAY_SIZE (cpu_arch))
2269 as_bad (_("no such architecture: `%s'"), string);
2270
2271 *input_line_pointer = e;
2272 }
2273 else
2274 as_bad (_("missing cpu architecture"));
2275
2276 no_cond_jump_promotion = 0;
2277 if (*input_line_pointer == ','
2278 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2279 {
2280 char *string = ++input_line_pointer;
2281 int e = get_symbol_end ();
2282
2283 if (strcmp (string, "nojumps") == 0)
2284 no_cond_jump_promotion = 1;
2285 else if (strcmp (string, "jumps") == 0)
2286 ;
2287 else
2288 as_bad (_("no such architecture modifier: `%s'"), string);
2289
2290 *input_line_pointer = e;
2291 }
2292
2293 demand_empty_rest_of_line ();
2294 }
2295
2296 enum bfd_architecture
2297 i386_arch (void)
2298 {
2299 if (cpu_arch_isa == PROCESSOR_L1OM)
2300 {
2301 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2302 || flag_code != CODE_64BIT)
2303 as_fatal (_("Intel L1OM is 64bit ELF only"));
2304 return bfd_arch_l1om;
2305 }
2306 else if (cpu_arch_isa == PROCESSOR_K1OM)
2307 {
2308 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2309 || flag_code != CODE_64BIT)
2310 as_fatal (_("Intel K1OM is 64bit ELF only"));
2311 return bfd_arch_k1om;
2312 }
2313 else
2314 return bfd_arch_i386;
2315 }
2316
2317 unsigned long
2318 i386_mach (void)
2319 {
2320 if (!strncmp (default_arch, "x86_64", 6))
2321 {
2322 if (cpu_arch_isa == PROCESSOR_L1OM)
2323 {
2324 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2325 || default_arch[6] != '\0')
2326 as_fatal (_("Intel L1OM is 64bit ELF only"));
2327 return bfd_mach_l1om;
2328 }
2329 else if (cpu_arch_isa == PROCESSOR_K1OM)
2330 {
2331 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2332 || default_arch[6] != '\0')
2333 as_fatal (_("Intel K1OM is 64bit ELF only"));
2334 return bfd_mach_k1om;
2335 }
2336 else if (default_arch[6] == '\0')
2337 return bfd_mach_x86_64;
2338 else
2339 return bfd_mach_x64_32;
2340 }
2341 else if (!strcmp (default_arch, "i386"))
2342 return bfd_mach_i386_i386;
2343 else
2344 as_fatal (_("unknown architecture"));
2345 }
2346 \f
2347 void
2348 md_begin (void)
2349 {
2350 const char *hash_err;
2351
2352 /* Initialize op_hash hash table. */
2353 op_hash = hash_new ();
2354
2355 {
2356 const insn_template *optab;
2357 templates *core_optab;
2358
2359 /* Setup for loop. */
2360 optab = i386_optab;
2361 core_optab = (templates *) xmalloc (sizeof (templates));
2362 core_optab->start = optab;
2363
2364 while (1)
2365 {
2366 ++optab;
2367 if (optab->name == NULL
2368 || strcmp (optab->name, (optab - 1)->name) != 0)
2369 {
2370 /* different name --> ship out current template list;
2371 add to hash table; & begin anew. */
2372 core_optab->end = optab;
2373 hash_err = hash_insert (op_hash,
2374 (optab - 1)->name,
2375 (void *) core_optab);
2376 if (hash_err)
2377 {
2378 as_fatal (_("can't hash %s: %s"),
2379 (optab - 1)->name,
2380 hash_err);
2381 }
2382 if (optab->name == NULL)
2383 break;
2384 core_optab = (templates *) xmalloc (sizeof (templates));
2385 core_optab->start = optab;
2386 }
2387 }
2388 }
2389
2390 /* Initialize reg_hash hash table. */
2391 reg_hash = hash_new ();
2392 {
2393 const reg_entry *regtab;
2394 unsigned int regtab_size = i386_regtab_size;
2395
2396 for (regtab = i386_regtab; regtab_size--; regtab++)
2397 {
2398 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2399 if (hash_err)
2400 as_fatal (_("can't hash %s: %s"),
2401 regtab->reg_name,
2402 hash_err);
2403 }
2404 }
2405
2406 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2407 {
2408 int c;
2409 char *p;
2410
2411 for (c = 0; c < 256; c++)
2412 {
2413 if (ISDIGIT (c))
2414 {
2415 digit_chars[c] = c;
2416 mnemonic_chars[c] = c;
2417 register_chars[c] = c;
2418 operand_chars[c] = c;
2419 }
2420 else if (ISLOWER (c))
2421 {
2422 mnemonic_chars[c] = c;
2423 register_chars[c] = c;
2424 operand_chars[c] = c;
2425 }
2426 else if (ISUPPER (c))
2427 {
2428 mnemonic_chars[c] = TOLOWER (c);
2429 register_chars[c] = mnemonic_chars[c];
2430 operand_chars[c] = c;
2431 }
2432
2433 if (ISALPHA (c) || ISDIGIT (c))
2434 identifier_chars[c] = c;
2435 else if (c >= 128)
2436 {
2437 identifier_chars[c] = c;
2438 operand_chars[c] = c;
2439 }
2440 }
2441
2442 #ifdef LEX_AT
2443 identifier_chars['@'] = '@';
2444 #endif
2445 #ifdef LEX_QM
2446 identifier_chars['?'] = '?';
2447 operand_chars['?'] = '?';
2448 #endif
2449 digit_chars['-'] = '-';
2450 mnemonic_chars['_'] = '_';
2451 mnemonic_chars['-'] = '-';
2452 mnemonic_chars['.'] = '.';
2453 identifier_chars['_'] = '_';
2454 identifier_chars['.'] = '.';
2455
2456 for (p = operand_special_chars; *p != '\0'; p++)
2457 operand_chars[(unsigned char) *p] = *p;
2458 }
2459
2460 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2461 if (IS_ELF)
2462 {
2463 record_alignment (text_section, 2);
2464 record_alignment (data_section, 2);
2465 record_alignment (bss_section, 2);
2466 }
2467 #endif
2468
2469 if (flag_code == CODE_64BIT)
2470 {
2471 #if defined (OBJ_COFF) && defined (TE_PE)
2472 x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
2473 ? 32 : 16);
2474 #else
2475 x86_dwarf2_return_column = 16;
2476 #endif
2477 x86_cie_data_alignment = -8;
2478 }
2479 else
2480 {
2481 x86_dwarf2_return_column = 8;
2482 x86_cie_data_alignment = -4;
2483 }
2484 }
2485
2486 void
2487 i386_print_statistics (FILE *file)
2488 {
2489 hash_print_statistics (file, "i386 opcode", op_hash);
2490 hash_print_statistics (file, "i386 register", reg_hash);
2491 }
2492 \f
2493 #ifdef DEBUG386
2494
2495 /* Debugging routines for md_assemble. */
2496 static void pte (insn_template *);
2497 static void pt (i386_operand_type);
2498 static void pe (expressionS *);
2499 static void ps (symbolS *);
2500
2501 static void
2502 pi (char *line, i386_insn *x)
2503 {
2504 unsigned int j;
2505
2506 fprintf (stdout, "%s: template ", line);
2507 pte (&x->tm);
2508 fprintf (stdout, " address: base %s index %s scale %x\n",
2509 x->base_reg ? x->base_reg->reg_name : "none",
2510 x->index_reg ? x->index_reg->reg_name : "none",
2511 x->log2_scale_factor);
2512 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2513 x->rm.mode, x->rm.reg, x->rm.regmem);
2514 fprintf (stdout, " sib: base %x index %x scale %x\n",
2515 x->sib.base, x->sib.index, x->sib.scale);
2516 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2517 (x->rex & REX_W) != 0,
2518 (x->rex & REX_R) != 0,
2519 (x->rex & REX_X) != 0,
2520 (x->rex & REX_B) != 0);
2521 for (j = 0; j < x->operands; j++)
2522 {
2523 fprintf (stdout, " #%d: ", j + 1);
2524 pt (x->types[j]);
2525 fprintf (stdout, "\n");
2526 if (x->types[j].bitfield.reg8
2527 || x->types[j].bitfield.reg16
2528 || x->types[j].bitfield.reg32
2529 || x->types[j].bitfield.reg64
2530 || x->types[j].bitfield.regmmx
2531 || x->types[j].bitfield.regxmm
2532 || x->types[j].bitfield.regymm
2533 || x->types[j].bitfield.sreg2
2534 || x->types[j].bitfield.sreg3
2535 || x->types[j].bitfield.control
2536 || x->types[j].bitfield.debug
2537 || x->types[j].bitfield.test)
2538 fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
2539 if (operand_type_check (x->types[j], imm))
2540 pe (x->op[j].imms);
2541 if (operand_type_check (x->types[j], disp))
2542 pe (x->op[j].disps);
2543 }
2544 }
2545
2546 static void
2547 pte (insn_template *t)
2548 {
2549 unsigned int j;
2550 fprintf (stdout, " %d operands ", t->operands);
2551 fprintf (stdout, "opcode %x ", t->base_opcode);
2552 if (t->extension_opcode != None)
2553 fprintf (stdout, "ext %x ", t->extension_opcode);
2554 if (t->opcode_modifier.d)
2555 fprintf (stdout, "D");
2556 if (t->opcode_modifier.w)
2557 fprintf (stdout, "W");
2558 fprintf (stdout, "\n");
2559 for (j = 0; j < t->operands; j++)
2560 {
2561 fprintf (stdout, " #%d type ", j + 1);
2562 pt (t->operand_types[j]);
2563 fprintf (stdout, "\n");
2564 }
2565 }
2566
2567 static void
2568 pe (expressionS *e)
2569 {
2570 fprintf (stdout, " operation %d\n", e->X_op);
2571 fprintf (stdout, " add_number %ld (%lx)\n",
2572 (long) e->X_add_number, (long) e->X_add_number);
2573 if (e->X_add_symbol)
2574 {
2575 fprintf (stdout, " add_symbol ");
2576 ps (e->X_add_symbol);
2577 fprintf (stdout, "\n");
2578 }
2579 if (e->X_op_symbol)
2580 {
2581 fprintf (stdout, " op_symbol ");
2582 ps (e->X_op_symbol);
2583 fprintf (stdout, "\n");
2584 }
2585 }
2586
2587 static void
2588 ps (symbolS *s)
2589 {
2590 fprintf (stdout, "%s type %s%s",
2591 S_GET_NAME (s),
2592 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2593 segment_name (S_GET_SEGMENT (s)));
2594 }
2595
2596 static struct type_name
2597 {
2598 i386_operand_type mask;
2599 const char *name;
2600 }
2601 const type_names[] =
2602 {
2603 { OPERAND_TYPE_REG8, "r8" },
2604 { OPERAND_TYPE_REG16, "r16" },
2605 { OPERAND_TYPE_REG32, "r32" },
2606 { OPERAND_TYPE_REG64, "r64" },
2607 { OPERAND_TYPE_IMM8, "i8" },
2608 { OPERAND_TYPE_IMM8, "i8s" },
2609 { OPERAND_TYPE_IMM16, "i16" },
2610 { OPERAND_TYPE_IMM32, "i32" },
2611 { OPERAND_TYPE_IMM32S, "i32s" },
2612 { OPERAND_TYPE_IMM64, "i64" },
2613 { OPERAND_TYPE_IMM1, "i1" },
2614 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2615 { OPERAND_TYPE_DISP8, "d8" },
2616 { OPERAND_TYPE_DISP16, "d16" },
2617 { OPERAND_TYPE_DISP32, "d32" },
2618 { OPERAND_TYPE_DISP32S, "d32s" },
2619 { OPERAND_TYPE_DISP64, "d64" },
2620 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2621 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2622 { OPERAND_TYPE_CONTROL, "control reg" },
2623 { OPERAND_TYPE_TEST, "test reg" },
2624 { OPERAND_TYPE_DEBUG, "debug reg" },
2625 { OPERAND_TYPE_FLOATREG, "FReg" },
2626 { OPERAND_TYPE_FLOATACC, "FAcc" },
2627 { OPERAND_TYPE_SREG2, "SReg2" },
2628 { OPERAND_TYPE_SREG3, "SReg3" },
2629 { OPERAND_TYPE_ACC, "Acc" },
2630 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2631 { OPERAND_TYPE_REGMMX, "rMMX" },
2632 { OPERAND_TYPE_REGXMM, "rXMM" },
2633 { OPERAND_TYPE_REGYMM, "rYMM" },
2634 { OPERAND_TYPE_ESSEG, "es" },
2635 };
2636
2637 static void
2638 pt (i386_operand_type t)
2639 {
2640 unsigned int j;
2641 i386_operand_type a;
2642
2643 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2644 {
2645 a = operand_type_and (t, type_names[j].mask);
2646 if (!operand_type_all_zero (&a))
2647 fprintf (stdout, "%s, ", type_names[j].name);
2648 }
2649 fflush (stdout);
2650 }
2651
2652 #endif /* DEBUG386 */
2653 \f
2654 static bfd_reloc_code_real_type
2655 reloc (unsigned int size,
2656 int pcrel,
2657 int sign,
2658 bfd_reloc_code_real_type other)
2659 {
2660 if (other != NO_RELOC)
2661 {
2662 reloc_howto_type *rel;
2663
2664 if (size == 8)
2665 switch (other)
2666 {
2667 case BFD_RELOC_X86_64_GOT32:
2668 return BFD_RELOC_X86_64_GOT64;
2669 break;
2670 case BFD_RELOC_X86_64_PLTOFF64:
2671 return BFD_RELOC_X86_64_PLTOFF64;
2672 break;
2673 case BFD_RELOC_X86_64_GOTPC32:
2674 other = BFD_RELOC_X86_64_GOTPC64;
2675 break;
2676 case BFD_RELOC_X86_64_GOTPCREL:
2677 other = BFD_RELOC_X86_64_GOTPCREL64;
2678 break;
2679 case BFD_RELOC_X86_64_TPOFF32:
2680 other = BFD_RELOC_X86_64_TPOFF64;
2681 break;
2682 case BFD_RELOC_X86_64_DTPOFF32:
2683 other = BFD_RELOC_X86_64_DTPOFF64;
2684 break;
2685 default:
2686 break;
2687 }
2688
2689 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2690 if (size == 4 && (flag_code != CODE_64BIT || disallow_64bit_reloc))
2691 sign = -1;
2692
2693 rel = bfd_reloc_type_lookup (stdoutput, other);
2694 if (!rel)
2695 as_bad (_("unknown relocation (%u)"), other);
2696 else if (size != bfd_get_reloc_size (rel))
2697 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2698 bfd_get_reloc_size (rel),
2699 size);
2700 else if (pcrel && !rel->pc_relative)
2701 as_bad (_("non-pc-relative relocation for pc-relative field"));
2702 else if ((rel->complain_on_overflow == complain_overflow_signed
2703 && !sign)
2704 || (rel->complain_on_overflow == complain_overflow_unsigned
2705 && sign > 0))
2706 as_bad (_("relocated field and relocation type differ in signedness"));
2707 else
2708 return other;
2709 return NO_RELOC;
2710 }
2711
2712 if (pcrel)
2713 {
2714 if (!sign)
2715 as_bad (_("there are no unsigned pc-relative relocations"));
2716 switch (size)
2717 {
2718 case 1: return BFD_RELOC_8_PCREL;
2719 case 2: return BFD_RELOC_16_PCREL;
2720 case 4: return BFD_RELOC_32_PCREL;
2721 case 8: return BFD_RELOC_64_PCREL;
2722 }
2723 as_bad (_("cannot do %u byte pc-relative relocation"), size);
2724 }
2725 else
2726 {
2727 if (sign > 0)
2728 switch (size)
2729 {
2730 case 4: return BFD_RELOC_X86_64_32S;
2731 }
2732 else
2733 switch (size)
2734 {
2735 case 1: return BFD_RELOC_8;
2736 case 2: return BFD_RELOC_16;
2737 case 4: return BFD_RELOC_32;
2738 case 8: return BFD_RELOC_64;
2739 }
2740 as_bad (_("cannot do %s %u byte relocation"),
2741 sign > 0 ? "signed" : "unsigned", size);
2742 }
2743
2744 return NO_RELOC;
2745 }
2746
2747 /* Here we decide which fixups can be adjusted to make them relative to
2748 the beginning of the section instead of the symbol. Basically we need
2749 to make sure that the dynamic relocations are done correctly, so in
2750 some cases we force the original symbol to be used. */
2751
2752 int
2753 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2754 {
2755 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2756 if (!IS_ELF)
2757 return 1;
2758
2759 /* Don't adjust pc-relative references to merge sections in 64-bit
2760 mode. */
2761 if (use_rela_relocations
2762 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
2763 && fixP->fx_pcrel)
2764 return 0;
2765
2766 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2767 and changed later by validate_fix. */
2768 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
2769 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
2770 return 0;
2771
2772 /* adjust_reloc_syms doesn't know about the GOT. */
2773 if (fixP->fx_r_type == BFD_RELOC_386_GOTOFF
2774 || fixP->fx_r_type == BFD_RELOC_386_PLT32
2775 || fixP->fx_r_type == BFD_RELOC_386_GOT32
2776 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
2777 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
2778 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
2779 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
2780 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
2781 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
2782 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
2783 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
2784 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
2785 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
2786 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
2787 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
2788 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
2789 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
2790 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
2791 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
2792 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
2793 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
2794 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
2795 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
2796 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
2797 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
2798 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
2799 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
2800 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
2801 return 0;
2802 #endif
2803 return 1;
2804 }
2805
2806 static int
2807 intel_float_operand (const char *mnemonic)
2808 {
2809 /* Note that the value returned is meaningful only for opcodes with (memory)
2810 operands, hence the code here is free to improperly handle opcodes that
2811 have no operands (for better performance and smaller code). */
2812
2813 if (mnemonic[0] != 'f')
2814 return 0; /* non-math */
2815
2816 switch (mnemonic[1])
2817 {
2818 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2819 the fs segment override prefix not currently handled because no
2820 call path can make opcodes without operands get here */
2821 case 'i':
2822 return 2 /* integer op */;
2823 case 'l':
2824 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
2825 return 3; /* fldcw/fldenv */
2826 break;
2827 case 'n':
2828 if (mnemonic[2] != 'o' /* fnop */)
2829 return 3; /* non-waiting control op */
2830 break;
2831 case 'r':
2832 if (mnemonic[2] == 's')
2833 return 3; /* frstor/frstpm */
2834 break;
2835 case 's':
2836 if (mnemonic[2] == 'a')
2837 return 3; /* fsave */
2838 if (mnemonic[2] == 't')
2839 {
2840 switch (mnemonic[3])
2841 {
2842 case 'c': /* fstcw */
2843 case 'd': /* fstdw */
2844 case 'e': /* fstenv */
2845 case 's': /* fsts[gw] */
2846 return 3;
2847 }
2848 }
2849 break;
2850 case 'x':
2851 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
2852 return 0; /* fxsave/fxrstor are not really math ops */
2853 break;
2854 }
2855
2856 return 1;
2857 }
2858
2859 /* Build the VEX prefix. */
2860
2861 static void
2862 build_vex_prefix (const insn_template *t)
2863 {
2864 unsigned int register_specifier;
2865 unsigned int implied_prefix;
2866 unsigned int vector_length;
2867
2868 /* Check register specifier. */
2869 if (i.vex.register_specifier)
2870 register_specifier = ~register_number (i.vex.register_specifier) & 0xf;
2871 else
2872 register_specifier = 0xf;
2873
2874 /* Use 2-byte VEX prefix by swappping destination and source
2875 operand. */
2876 if (!i.swap_operand
2877 && i.operands == i.reg_operands
2878 && i.tm.opcode_modifier.vexopcode == VEX0F
2879 && i.tm.opcode_modifier.s
2880 && i.rex == REX_B)
2881 {
2882 unsigned int xchg = i.operands - 1;
2883 union i386_op temp_op;
2884 i386_operand_type temp_type;
2885
2886 temp_type = i.types[xchg];
2887 i.types[xchg] = i.types[0];
2888 i.types[0] = temp_type;
2889 temp_op = i.op[xchg];
2890 i.op[xchg] = i.op[0];
2891 i.op[0] = temp_op;
2892
2893 gas_assert (i.rm.mode == 3);
2894
2895 i.rex = REX_R;
2896 xchg = i.rm.regmem;
2897 i.rm.regmem = i.rm.reg;
2898 i.rm.reg = xchg;
2899
2900 /* Use the next insn. */
2901 i.tm = t[1];
2902 }
2903
2904 if (i.tm.opcode_modifier.vex == VEXScalar)
2905 vector_length = avxscalar;
2906 else
2907 vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
2908
2909 switch ((i.tm.base_opcode >> 8) & 0xff)
2910 {
2911 case 0:
2912 implied_prefix = 0;
2913 break;
2914 case DATA_PREFIX_OPCODE:
2915 implied_prefix = 1;
2916 break;
2917 case REPE_PREFIX_OPCODE:
2918 implied_prefix = 2;
2919 break;
2920 case REPNE_PREFIX_OPCODE:
2921 implied_prefix = 3;
2922 break;
2923 default:
2924 abort ();
2925 }
2926
2927 /* Use 2-byte VEX prefix if possible. */
2928 if (i.tm.opcode_modifier.vexopcode == VEX0F
2929 && i.tm.opcode_modifier.vexw != VEXW1
2930 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
2931 {
2932 /* 2-byte VEX prefix. */
2933 unsigned int r;
2934
2935 i.vex.length = 2;
2936 i.vex.bytes[0] = 0xc5;
2937
2938 /* Check the REX.R bit. */
2939 r = (i.rex & REX_R) ? 0 : 1;
2940 i.vex.bytes[1] = (r << 7
2941 | register_specifier << 3
2942 | vector_length << 2
2943 | implied_prefix);
2944 }
2945 else
2946 {
2947 /* 3-byte VEX prefix. */
2948 unsigned int m, w;
2949
2950 i.vex.length = 3;
2951
2952 switch (i.tm.opcode_modifier.vexopcode)
2953 {
2954 case VEX0F:
2955 m = 0x1;
2956 i.vex.bytes[0] = 0xc4;
2957 break;
2958 case VEX0F38:
2959 m = 0x2;
2960 i.vex.bytes[0] = 0xc4;
2961 break;
2962 case VEX0F3A:
2963 m = 0x3;
2964 i.vex.bytes[0] = 0xc4;
2965 break;
2966 case XOP08:
2967 m = 0x8;
2968 i.vex.bytes[0] = 0x8f;
2969 break;
2970 case XOP09:
2971 m = 0x9;
2972 i.vex.bytes[0] = 0x8f;
2973 break;
2974 case XOP0A:
2975 m = 0xa;
2976 i.vex.bytes[0] = 0x8f;
2977 break;
2978 default:
2979 abort ();
2980 }
2981
2982 /* The high 3 bits of the second VEX byte are 1's compliment
2983 of RXB bits from REX. */
2984 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
2985
2986 /* Check the REX.W bit. */
2987 w = (i.rex & REX_W) ? 1 : 0;
2988 if (i.tm.opcode_modifier.vexw)
2989 {
2990 if (w)
2991 abort ();
2992
2993 if (i.tm.opcode_modifier.vexw == VEXW1)
2994 w = 1;
2995 }
2996
2997 i.vex.bytes[2] = (w << 7
2998 | register_specifier << 3
2999 | vector_length << 2
3000 | implied_prefix);
3001 }
3002 }
3003
3004 static void
3005 process_immext (void)
3006 {
3007 expressionS *exp;
3008
3009 if ((i.tm.cpu_flags.bitfield.cpusse3 || i.tm.cpu_flags.bitfield.cpusvme)
3010 && i.operands > 0)
3011 {
3012 /* MONITOR/MWAIT as well as SVME instructions have fixed operands
3013 with an opcode suffix which is coded in the same place as an
3014 8-bit immediate field would be.
3015 Here we check those operands and remove them afterwards. */
3016 unsigned int x;
3017
3018 for (x = 0; x < i.operands; x++)
3019 if (register_number (i.op[x].regs) != x)
3020 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
3021 register_prefix, i.op[x].regs->reg_name, x + 1,
3022 i.tm.name);
3023
3024 i.operands = 0;
3025 }
3026
3027 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
3028 which is coded in the same place as an 8-bit immediate field
3029 would be. Here we fake an 8-bit immediate operand from the
3030 opcode suffix stored in tm.extension_opcode.
3031
3032 AVX instructions also use this encoding, for some of
3033 3 argument instructions. */
3034
3035 gas_assert (i.imm_operands == 0
3036 && (i.operands <= 2
3037 || (i.tm.opcode_modifier.vex
3038 && i.operands <= 4)));
3039
3040 exp = &im_expressions[i.imm_operands++];
3041 i.op[i.operands].imms = exp;
3042 i.types[i.operands] = imm8;
3043 i.operands++;
3044 exp->X_op = O_constant;
3045 exp->X_add_number = i.tm.extension_opcode;
3046 i.tm.extension_opcode = None;
3047 }
3048
3049
3050 static int
3051 check_hle (void)
3052 {
3053 switch (i.tm.opcode_modifier.hleprefixok)
3054 {
3055 default:
3056 abort ();
3057 case HLEPrefixNone:
3058 if (i.prefix[HLE_PREFIX] == XACQUIRE_PREFIX_OPCODE)
3059 as_bad (_("invalid instruction `%s' after `xacquire'"),
3060 i.tm.name);
3061 else
3062 as_bad (_("invalid instruction `%s' after `xrelease'"),
3063 i.tm.name);
3064 return 0;
3065 case HLEPrefixLock:
3066 if (i.prefix[LOCK_PREFIX])
3067 return 1;
3068 if (i.prefix[HLE_PREFIX] == XACQUIRE_PREFIX_OPCODE)
3069 as_bad (_("missing `lock' with `xacquire'"));
3070 else
3071 as_bad (_("missing `lock' with `xrelease'"));
3072 return 0;
3073 case HLEPrefixAny:
3074 return 1;
3075 case HLEPrefixRelease:
3076 if (i.prefix[HLE_PREFIX] != XRELEASE_PREFIX_OPCODE)
3077 {
3078 as_bad (_("instruction `%s' after `xacquire' not allowed"),
3079 i.tm.name);
3080 return 0;
3081 }
3082 if (i.mem_operands == 0
3083 || !operand_type_check (i.types[i.operands - 1], anymem))
3084 {
3085 as_bad (_("memory destination needed for instruction `%s'"
3086 " after `xrelease'"), i.tm.name);
3087 return 0;
3088 }
3089 return 1;
3090 }
3091 }
3092
3093 /* This is the guts of the machine-dependent assembler. LINE points to a
3094 machine dependent instruction. This function is supposed to emit
3095 the frags/bytes it assembles to. */
3096
3097 void
3098 md_assemble (char *line)
3099 {
3100 unsigned int j;
3101 char mnemonic[MAX_MNEM_SIZE];
3102 const insn_template *t;
3103
3104 /* Initialize globals. */
3105 memset (&i, '\0', sizeof (i));
3106 for (j = 0; j < MAX_OPERANDS; j++)
3107 i.reloc[j] = NO_RELOC;
3108 memset (disp_expressions, '\0', sizeof (disp_expressions));
3109 memset (im_expressions, '\0', sizeof (im_expressions));
3110 save_stack_p = save_stack;
3111
3112 /* First parse an instruction mnemonic & call i386_operand for the operands.
3113 We assume that the scrubber has arranged it so that line[0] is the valid
3114 start of a (possibly prefixed) mnemonic. */
3115
3116 line = parse_insn (line, mnemonic);
3117 if (line == NULL)
3118 return;
3119
3120 line = parse_operands (line, mnemonic);
3121 this_operand = -1;
3122 if (line == NULL)
3123 return;
3124
3125 /* Now we've parsed the mnemonic into a set of templates, and have the
3126 operands at hand. */
3127
3128 /* All intel opcodes have reversed operands except for "bound" and
3129 "enter". We also don't reverse intersegment "jmp" and "call"
3130 instructions with 2 immediate operands so that the immediate segment
3131 precedes the offset, as it does when in AT&T mode. */
3132 if (intel_syntax
3133 && i.operands > 1
3134 && (strcmp (mnemonic, "bound") != 0)
3135 && (strcmp (mnemonic, "invlpga") != 0)
3136 && !(operand_type_check (i.types[0], imm)
3137 && operand_type_check (i.types[1], imm)))
3138 swap_operands ();
3139
3140 /* The order of the immediates should be reversed
3141 for 2 immediates extrq and insertq instructions */
3142 if (i.imm_operands == 2
3143 && (strcmp (mnemonic, "extrq") == 0
3144 || strcmp (mnemonic, "insertq") == 0))
3145 swap_2_operands (0, 1);
3146
3147 if (i.imm_operands)
3148 optimize_imm ();
3149
3150 /* Don't optimize displacement for movabs since it only takes 64bit
3151 displacement. */
3152 if (i.disp_operands
3153 && i.disp_encoding != disp_encoding_32bit
3154 && (flag_code != CODE_64BIT
3155 || strcmp (mnemonic, "movabs") != 0))
3156 optimize_disp ();
3157
3158 /* Next, we find a template that matches the given insn,
3159 making sure the overlap of the given operands types is consistent
3160 with the template operand types. */
3161
3162 if (!(t = match_template ()))
3163 return;
3164
3165 if (sse_check != check_none
3166 && !i.tm.opcode_modifier.noavx
3167 && (i.tm.cpu_flags.bitfield.cpusse
3168 || i.tm.cpu_flags.bitfield.cpusse2
3169 || i.tm.cpu_flags.bitfield.cpusse3
3170 || i.tm.cpu_flags.bitfield.cpussse3
3171 || i.tm.cpu_flags.bitfield.cpusse4_1
3172 || i.tm.cpu_flags.bitfield.cpusse4_2))
3173 {
3174 (sse_check == check_warning
3175 ? as_warn
3176 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
3177 }
3178
3179 /* Zap movzx and movsx suffix. The suffix has been set from
3180 "word ptr" or "byte ptr" on the source operand in Intel syntax
3181 or extracted from mnemonic in AT&T syntax. But we'll use
3182 the destination register to choose the suffix for encoding. */
3183 if ((i.tm.base_opcode & ~9) == 0x0fb6)
3184 {
3185 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3186 there is no suffix, the default will be byte extension. */
3187 if (i.reg_operands != 2
3188 && !i.suffix
3189 && intel_syntax)
3190 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
3191
3192 i.suffix = 0;
3193 }
3194
3195 if (i.tm.opcode_modifier.fwait)
3196 if (!add_prefix (FWAIT_OPCODE))
3197 return;
3198
3199 /* Check for lock without a lockable instruction. Destination operand
3200 must be memory unless it is xchg (0x86). */
3201 if (i.prefix[LOCK_PREFIX]
3202 && (!i.tm.opcode_modifier.islockable
3203 || i.mem_operands == 0
3204 || (i.tm.base_opcode != 0x86
3205 && !operand_type_check (i.types[i.operands - 1], anymem))))
3206 {
3207 as_bad (_("expecting lockable instruction after `lock'"));
3208 return;
3209 }
3210
3211 /* Check if HLE prefix is OK. */
3212 if (i.have_hle && !check_hle ())
3213 return;
3214
3215 /* Check string instruction segment overrides. */
3216 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
3217 {
3218 if (!check_string ())
3219 return;
3220 i.disp_operands = 0;
3221 }
3222
3223 if (!process_suffix ())
3224 return;
3225
3226 /* Update operand types. */
3227 for (j = 0; j < i.operands; j++)
3228 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
3229
3230 /* Make still unresolved immediate matches conform to size of immediate
3231 given in i.suffix. */
3232 if (!finalize_imm ())
3233 return;
3234
3235 if (i.types[0].bitfield.imm1)
3236 i.imm_operands = 0; /* kludge for shift insns. */
3237
3238 /* We only need to check those implicit registers for instructions
3239 with 3 operands or less. */
3240 if (i.operands <= 3)
3241 for (j = 0; j < i.operands; j++)
3242 if (i.types[j].bitfield.inoutportreg
3243 || i.types[j].bitfield.shiftcount
3244 || i.types[j].bitfield.acc
3245 || i.types[j].bitfield.floatacc)
3246 i.reg_operands--;
3247
3248 /* ImmExt should be processed after SSE2AVX. */
3249 if (!i.tm.opcode_modifier.sse2avx
3250 && i.tm.opcode_modifier.immext)
3251 process_immext ();
3252
3253 /* For insns with operands there are more diddles to do to the opcode. */
3254 if (i.operands)
3255 {
3256 if (!process_operands ())
3257 return;
3258 }
3259 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
3260 {
3261 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3262 as_warn (_("translating to `%sp'"), i.tm.name);
3263 }
3264
3265 if (i.tm.opcode_modifier.vex)
3266 build_vex_prefix (t);
3267
3268 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3269 instructions may define INT_OPCODE as well, so avoid this corner
3270 case for those instructions that use MODRM. */
3271 if (i.tm.base_opcode == INT_OPCODE
3272 && !i.tm.opcode_modifier.modrm
3273 && i.op[0].imms->X_add_number == 3)
3274 {
3275 i.tm.base_opcode = INT3_OPCODE;
3276 i.imm_operands = 0;
3277 }
3278
3279 if ((i.tm.opcode_modifier.jump
3280 || i.tm.opcode_modifier.jumpbyte
3281 || i.tm.opcode_modifier.jumpdword)
3282 && i.op[0].disps->X_op == O_constant)
3283 {
3284 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3285 the absolute address given by the constant. Since ix86 jumps and
3286 calls are pc relative, we need to generate a reloc. */
3287 i.op[0].disps->X_add_symbol = &abs_symbol;
3288 i.op[0].disps->X_op = O_symbol;
3289 }
3290
3291 if (i.tm.opcode_modifier.rex64)
3292 i.rex |= REX_W;
3293
3294 /* For 8 bit registers we need an empty rex prefix. Also if the
3295 instruction already has a prefix, we need to convert old
3296 registers to new ones. */
3297
3298 if ((i.types[0].bitfield.reg8
3299 && (i.op[0].regs->reg_flags & RegRex64) != 0)
3300 || (i.types[1].bitfield.reg8
3301 && (i.op[1].regs->reg_flags & RegRex64) != 0)
3302 || ((i.types[0].bitfield.reg8
3303 || i.types[1].bitfield.reg8)
3304 && i.rex != 0))
3305 {
3306 int x;
3307
3308 i.rex |= REX_OPCODE;
3309 for (x = 0; x < 2; x++)
3310 {
3311 /* Look for 8 bit operand that uses old registers. */
3312 if (i.types[x].bitfield.reg8
3313 && (i.op[x].regs->reg_flags & RegRex64) == 0)
3314 {
3315 /* In case it is "hi" register, give up. */
3316 if (i.op[x].regs->reg_num > 3)
3317 as_bad (_("can't encode register '%s%s' in an "
3318 "instruction requiring REX prefix."),
3319 register_prefix, i.op[x].regs->reg_name);
3320
3321 /* Otherwise it is equivalent to the extended register.
3322 Since the encoding doesn't change this is merely
3323 cosmetic cleanup for debug output. */
3324
3325 i.op[x].regs = i.op[x].regs + 8;
3326 }
3327 }
3328 }
3329
3330 if (i.rex != 0)
3331 add_prefix (REX_OPCODE | i.rex);
3332
3333 /* We are ready to output the insn. */
3334 output_insn ();
3335 }
3336
3337 static char *
3338 parse_insn (char *line, char *mnemonic)
3339 {
3340 char *l = line;
3341 char *token_start = l;
3342 char *mnem_p;
3343 int supported;
3344 const insn_template *t;
3345 char *dot_p = NULL;
3346
3347 /* Non-zero if we found a prefix only acceptable with string insns. */
3348 const char *expecting_string_instruction = NULL;
3349
3350 while (1)
3351 {
3352 mnem_p = mnemonic;
3353 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3354 {
3355 if (*mnem_p == '.')
3356 dot_p = mnem_p;
3357 mnem_p++;
3358 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3359 {
3360 as_bad (_("no such instruction: `%s'"), token_start);
3361 return NULL;
3362 }
3363 l++;
3364 }
3365 if (!is_space_char (*l)
3366 && *l != END_OF_INSN
3367 && (intel_syntax
3368 || (*l != PREFIX_SEPARATOR
3369 && *l != ',')))
3370 {
3371 as_bad (_("invalid character %s in mnemonic"),
3372 output_invalid (*l));
3373 return NULL;
3374 }
3375 if (token_start == l)
3376 {
3377 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3378 as_bad (_("expecting prefix; got nothing"));
3379 else
3380 as_bad (_("expecting mnemonic; got nothing"));
3381 return NULL;
3382 }
3383
3384 /* Look up instruction (or prefix) via hash table. */
3385 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3386
3387 if (*l != END_OF_INSN
3388 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3389 && current_templates
3390 && current_templates->start->opcode_modifier.isprefix)
3391 {
3392 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3393 {
3394 as_bad ((flag_code != CODE_64BIT
3395 ? _("`%s' is only supported in 64-bit mode")
3396 : _("`%s' is not supported in 64-bit mode")),
3397 current_templates->start->name);
3398 return NULL;
3399 }
3400 /* If we are in 16-bit mode, do not allow addr16 or data16.
3401 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3402 if ((current_templates->start->opcode_modifier.size16
3403 || current_templates->start->opcode_modifier.size32)
3404 && flag_code != CODE_64BIT
3405 && (current_templates->start->opcode_modifier.size32
3406 ^ (flag_code == CODE_16BIT)))
3407 {
3408 as_bad (_("redundant %s prefix"),
3409 current_templates->start->name);
3410 return NULL;
3411 }
3412 /* Add prefix, checking for repeated prefixes. */
3413 switch (add_prefix (current_templates->start->base_opcode))
3414 {
3415 case PREFIX_EXIST:
3416 return NULL;
3417 case PREFIX_REP:
3418 if (current_templates->start->cpu_flags.bitfield.cpuhle)
3419 i.have_hle = 1;
3420 else
3421 expecting_string_instruction = current_templates->start->name;
3422 break;
3423 default:
3424 break;
3425 }
3426 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3427 token_start = ++l;
3428 }
3429 else
3430 break;
3431 }
3432
3433 if (!current_templates)
3434 {
3435 /* Check if we should swap operand or force 32bit displacement in
3436 encoding. */
3437 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3438 i.swap_operand = 1;
3439 else if (mnem_p - 3 == dot_p
3440 && dot_p[1] == 'd'
3441 && dot_p[2] == '8')
3442 i.disp_encoding = disp_encoding_8bit;
3443 else if (mnem_p - 4 == dot_p
3444 && dot_p[1] == 'd'
3445 && dot_p[2] == '3'
3446 && dot_p[3] == '2')
3447 i.disp_encoding = disp_encoding_32bit;
3448 else
3449 goto check_suffix;
3450 mnem_p = dot_p;
3451 *dot_p = '\0';
3452 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3453 }
3454
3455 if (!current_templates)
3456 {
3457 check_suffix:
3458 /* See if we can get a match by trimming off a suffix. */
3459 switch (mnem_p[-1])
3460 {
3461 case WORD_MNEM_SUFFIX:
3462 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3463 i.suffix = SHORT_MNEM_SUFFIX;
3464 else
3465 case BYTE_MNEM_SUFFIX:
3466 case QWORD_MNEM_SUFFIX:
3467 i.suffix = mnem_p[-1];
3468 mnem_p[-1] = '\0';
3469 current_templates = (const templates *) hash_find (op_hash,
3470 mnemonic);
3471 break;
3472 case SHORT_MNEM_SUFFIX:
3473 case LONG_MNEM_SUFFIX:
3474 if (!intel_syntax)
3475 {
3476 i.suffix = mnem_p[-1];
3477 mnem_p[-1] = '\0';
3478 current_templates = (const templates *) hash_find (op_hash,
3479 mnemonic);
3480 }
3481 break;
3482
3483 /* Intel Syntax. */
3484 case 'd':
3485 if (intel_syntax)
3486 {
3487 if (intel_float_operand (mnemonic) == 1)
3488 i.suffix = SHORT_MNEM_SUFFIX;
3489 else
3490 i.suffix = LONG_MNEM_SUFFIX;
3491 mnem_p[-1] = '\0';
3492 current_templates = (const templates *) hash_find (op_hash,
3493 mnemonic);
3494 }
3495 break;
3496 }
3497 if (!current_templates)
3498 {
3499 as_bad (_("no such instruction: `%s'"), token_start);
3500 return NULL;
3501 }
3502 }
3503
3504 if (current_templates->start->opcode_modifier.jump
3505 || current_templates->start->opcode_modifier.jumpbyte)
3506 {
3507 /* Check for a branch hint. We allow ",pt" and ",pn" for
3508 predict taken and predict not taken respectively.
3509 I'm not sure that branch hints actually do anything on loop
3510 and jcxz insns (JumpByte) for current Pentium4 chips. They
3511 may work in the future and it doesn't hurt to accept them
3512 now. */
3513 if (l[0] == ',' && l[1] == 'p')
3514 {
3515 if (l[2] == 't')
3516 {
3517 if (!add_prefix (DS_PREFIX_OPCODE))
3518 return NULL;
3519 l += 3;
3520 }
3521 else if (l[2] == 'n')
3522 {
3523 if (!add_prefix (CS_PREFIX_OPCODE))
3524 return NULL;
3525 l += 3;
3526 }
3527 }
3528 }
3529 /* Any other comma loses. */
3530 if (*l == ',')
3531 {
3532 as_bad (_("invalid character %s in mnemonic"),
3533 output_invalid (*l));
3534 return NULL;
3535 }
3536
3537 /* Check if instruction is supported on specified architecture. */
3538 supported = 0;
3539 for (t = current_templates->start; t < current_templates->end; ++t)
3540 {
3541 supported |= cpu_flags_match (t);
3542 if (supported == CPU_FLAGS_PERFECT_MATCH)
3543 goto skip;
3544 }
3545
3546 if (!(supported & CPU_FLAGS_64BIT_MATCH))
3547 {
3548 as_bad (flag_code == CODE_64BIT
3549 ? _("`%s' is not supported in 64-bit mode")
3550 : _("`%s' is only supported in 64-bit mode"),
3551 current_templates->start->name);
3552 return NULL;
3553 }
3554 if (supported != CPU_FLAGS_PERFECT_MATCH)
3555 {
3556 as_bad (_("`%s' is not supported on `%s%s'"),
3557 current_templates->start->name,
3558 cpu_arch_name ? cpu_arch_name : default_arch,
3559 cpu_sub_arch_name ? cpu_sub_arch_name : "");
3560 return NULL;
3561 }
3562
3563 skip:
3564 if (!cpu_arch_flags.bitfield.cpui386
3565 && (flag_code != CODE_16BIT))
3566 {
3567 as_warn (_("use .code16 to ensure correct addressing mode"));
3568 }
3569
3570 /* Check for rep/repne without a string (or other allowed) instruction. */
3571 if (expecting_string_instruction)
3572 {
3573 static templates override;
3574
3575 for (t = current_templates->start; t < current_templates->end; ++t)
3576 if (t->opcode_modifier.repprefixok)
3577 break;
3578 if (t >= current_templates->end)
3579 {
3580 as_bad (_("expecting string instruction after `%s'"),
3581 expecting_string_instruction);
3582 return NULL;
3583 }
3584 for (override.start = t; t < current_templates->end; ++t)
3585 if (!t->opcode_modifier.repprefixok)
3586 break;
3587 override.end = t;
3588 current_templates = &override;
3589 }
3590
3591 return l;
3592 }
3593
3594 static char *
3595 parse_operands (char *l, const char *mnemonic)
3596 {
3597 char *token_start;
3598
3599 /* 1 if operand is pending after ','. */
3600 unsigned int expecting_operand = 0;
3601
3602 /* Non-zero if operand parens not balanced. */
3603 unsigned int paren_not_balanced;
3604
3605 while (*l != END_OF_INSN)
3606 {
3607 /* Skip optional white space before operand. */
3608 if (is_space_char (*l))
3609 ++l;
3610 if (!is_operand_char (*l) && *l != END_OF_INSN)
3611 {
3612 as_bad (_("invalid character %s before operand %d"),
3613 output_invalid (*l),
3614 i.operands + 1);
3615 return NULL;
3616 }
3617 token_start = l; /* after white space */
3618 paren_not_balanced = 0;
3619 while (paren_not_balanced || *l != ',')
3620 {
3621 if (*l == END_OF_INSN)
3622 {
3623 if (paren_not_balanced)
3624 {
3625 if (!intel_syntax)
3626 as_bad (_("unbalanced parenthesis in operand %d."),
3627 i.operands + 1);
3628 else
3629 as_bad (_("unbalanced brackets in operand %d."),
3630 i.operands + 1);
3631 return NULL;
3632 }
3633 else
3634 break; /* we are done */
3635 }
3636 else if (!is_operand_char (*l) && !is_space_char (*l))
3637 {
3638 as_bad (_("invalid character %s in operand %d"),
3639 output_invalid (*l),
3640 i.operands + 1);
3641 return NULL;
3642 }
3643 if (!intel_syntax)
3644 {
3645 if (*l == '(')
3646 ++paren_not_balanced;
3647 if (*l == ')')
3648 --paren_not_balanced;
3649 }
3650 else
3651 {
3652 if (*l == '[')
3653 ++paren_not_balanced;
3654 if (*l == ']')
3655 --paren_not_balanced;
3656 }
3657 l++;
3658 }
3659 if (l != token_start)
3660 { /* Yes, we've read in another operand. */
3661 unsigned int operand_ok;
3662 this_operand = i.operands++;
3663 i.types[this_operand].bitfield.unspecified = 1;
3664 if (i.operands > MAX_OPERANDS)
3665 {
3666 as_bad (_("spurious operands; (%d operands/instruction max)"),
3667 MAX_OPERANDS);
3668 return NULL;
3669 }
3670 /* Now parse operand adding info to 'i' as we go along. */
3671 END_STRING_AND_SAVE (l);
3672
3673 if (intel_syntax)
3674 operand_ok =
3675 i386_intel_operand (token_start,
3676 intel_float_operand (mnemonic));
3677 else
3678 operand_ok = i386_att_operand (token_start);
3679
3680 RESTORE_END_STRING (l);
3681 if (!operand_ok)
3682 return NULL;
3683 }
3684 else
3685 {
3686 if (expecting_operand)
3687 {
3688 expecting_operand_after_comma:
3689 as_bad (_("expecting operand after ','; got nothing"));
3690 return NULL;
3691 }
3692 if (*l == ',')
3693 {
3694 as_bad (_("expecting operand before ','; got nothing"));
3695 return NULL;
3696 }
3697 }
3698
3699 /* Now *l must be either ',' or END_OF_INSN. */
3700 if (*l == ',')
3701 {
3702 if (*++l == END_OF_INSN)
3703 {
3704 /* Just skip it, if it's \n complain. */
3705 goto expecting_operand_after_comma;
3706 }
3707 expecting_operand = 1;
3708 }
3709 }
3710 return l;
3711 }
3712
3713 static void
3714 swap_2_operands (int xchg1, int xchg2)
3715 {
3716 union i386_op temp_op;
3717 i386_operand_type temp_type;
3718 enum bfd_reloc_code_real temp_reloc;
3719
3720 temp_type = i.types[xchg2];
3721 i.types[xchg2] = i.types[xchg1];
3722 i.types[xchg1] = temp_type;
3723 temp_op = i.op[xchg2];
3724 i.op[xchg2] = i.op[xchg1];
3725 i.op[xchg1] = temp_op;
3726 temp_reloc = i.reloc[xchg2];
3727 i.reloc[xchg2] = i.reloc[xchg1];
3728 i.reloc[xchg1] = temp_reloc;
3729 }
3730
3731 static void
3732 swap_operands (void)
3733 {
3734 switch (i.operands)
3735 {
3736 case 5:
3737 case 4:
3738 swap_2_operands (1, i.operands - 2);
3739 case 3:
3740 case 2:
3741 swap_2_operands (0, i.operands - 1);
3742 break;
3743 default:
3744 abort ();
3745 }
3746
3747 if (i.mem_operands == 2)
3748 {
3749 const seg_entry *temp_seg;
3750 temp_seg = i.seg[0];
3751 i.seg[0] = i.seg[1];
3752 i.seg[1] = temp_seg;
3753 }
3754 }
3755
3756 /* Try to ensure constant immediates are represented in the smallest
3757 opcode possible. */
3758 static void
3759 optimize_imm (void)
3760 {
3761 char guess_suffix = 0;
3762 int op;
3763
3764 if (i.suffix)
3765 guess_suffix = i.suffix;
3766 else if (i.reg_operands)
3767 {
3768 /* Figure out a suffix from the last register operand specified.
3769 We can't do this properly yet, ie. excluding InOutPortReg,
3770 but the following works for instructions with immediates.
3771 In any case, we can't set i.suffix yet. */
3772 for (op = i.operands; --op >= 0;)
3773 if (i.types[op].bitfield.reg8)
3774 {
3775 guess_suffix = BYTE_MNEM_SUFFIX;
3776 break;
3777 }
3778 else if (i.types[op].bitfield.reg16)
3779 {
3780 guess_suffix = WORD_MNEM_SUFFIX;
3781 break;
3782 }
3783 else if (i.types[op].bitfield.reg32)
3784 {
3785 guess_suffix = LONG_MNEM_SUFFIX;
3786 break;
3787 }
3788 else if (i.types[op].bitfield.reg64)
3789 {
3790 guess_suffix = QWORD_MNEM_SUFFIX;
3791 break;
3792 }
3793 }
3794 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
3795 guess_suffix = WORD_MNEM_SUFFIX;
3796
3797 for (op = i.operands; --op >= 0;)
3798 if (operand_type_check (i.types[op], imm))
3799 {
3800 switch (i.op[op].imms->X_op)
3801 {
3802 case O_constant:
3803 /* If a suffix is given, this operand may be shortened. */
3804 switch (guess_suffix)
3805 {
3806 case LONG_MNEM_SUFFIX:
3807 i.types[op].bitfield.imm32 = 1;
3808 i.types[op].bitfield.imm64 = 1;
3809 break;
3810 case WORD_MNEM_SUFFIX:
3811 i.types[op].bitfield.imm16 = 1;
3812 i.types[op].bitfield.imm32 = 1;
3813 i.types[op].bitfield.imm32s = 1;
3814 i.types[op].bitfield.imm64 = 1;
3815 break;
3816 case BYTE_MNEM_SUFFIX:
3817 i.types[op].bitfield.imm8 = 1;
3818 i.types[op].bitfield.imm8s = 1;
3819 i.types[op].bitfield.imm16 = 1;
3820 i.types[op].bitfield.imm32 = 1;
3821 i.types[op].bitfield.imm32s = 1;
3822 i.types[op].bitfield.imm64 = 1;
3823 break;
3824 }
3825
3826 /* If this operand is at most 16 bits, convert it
3827 to a signed 16 bit number before trying to see
3828 whether it will fit in an even smaller size.
3829 This allows a 16-bit operand such as $0xffe0 to
3830 be recognised as within Imm8S range. */
3831 if ((i.types[op].bitfield.imm16)
3832 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
3833 {
3834 i.op[op].imms->X_add_number =
3835 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
3836 }
3837 if ((i.types[op].bitfield.imm32)
3838 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
3839 == 0))
3840 {
3841 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
3842 ^ ((offsetT) 1 << 31))
3843 - ((offsetT) 1 << 31));
3844 }
3845 i.types[op]
3846 = operand_type_or (i.types[op],
3847 smallest_imm_type (i.op[op].imms->X_add_number));
3848
3849 /* We must avoid matching of Imm32 templates when 64bit
3850 only immediate is available. */
3851 if (guess_suffix == QWORD_MNEM_SUFFIX)
3852 i.types[op].bitfield.imm32 = 0;
3853 break;
3854
3855 case O_absent:
3856 case O_register:
3857 abort ();
3858
3859 /* Symbols and expressions. */
3860 default:
3861 /* Convert symbolic operand to proper sizes for matching, but don't
3862 prevent matching a set of insns that only supports sizes other
3863 than those matching the insn suffix. */
3864 {
3865 i386_operand_type mask, allowed;
3866 const insn_template *t;
3867
3868 operand_type_set (&mask, 0);
3869 operand_type_set (&allowed, 0);
3870
3871 for (t = current_templates->start;
3872 t < current_templates->end;
3873 ++t)
3874 allowed = operand_type_or (allowed,
3875 t->operand_types[op]);
3876 switch (guess_suffix)
3877 {
3878 case QWORD_MNEM_SUFFIX:
3879 mask.bitfield.imm64 = 1;
3880 mask.bitfield.imm32s = 1;
3881 break;
3882 case LONG_MNEM_SUFFIX:
3883 mask.bitfield.imm32 = 1;
3884 break;
3885 case WORD_MNEM_SUFFIX:
3886 mask.bitfield.imm16 = 1;
3887 break;
3888 case BYTE_MNEM_SUFFIX:
3889 mask.bitfield.imm8 = 1;
3890 break;
3891 default:
3892 break;
3893 }
3894 allowed = operand_type_and (mask, allowed);
3895 if (!operand_type_all_zero (&allowed))
3896 i.types[op] = operand_type_and (i.types[op], mask);
3897 }
3898 break;
3899 }
3900 }
3901 }
3902
3903 /* Try to use the smallest displacement type too. */
3904 static void
3905 optimize_disp (void)
3906 {
3907 int op;
3908
3909 for (op = i.operands; --op >= 0;)
3910 if (operand_type_check (i.types[op], disp))
3911 {
3912 if (i.op[op].disps->X_op == O_constant)
3913 {
3914 offsetT op_disp = i.op[op].disps->X_add_number;
3915
3916 if (i.types[op].bitfield.disp16
3917 && (op_disp & ~(offsetT) 0xffff) == 0)
3918 {
3919 /* If this operand is at most 16 bits, convert
3920 to a signed 16 bit number and don't use 64bit
3921 displacement. */
3922 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
3923 i.types[op].bitfield.disp64 = 0;
3924 }
3925 if (i.types[op].bitfield.disp32
3926 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
3927 {
3928 /* If this operand is at most 32 bits, convert
3929 to a signed 32 bit number and don't use 64bit
3930 displacement. */
3931 op_disp &= (((offsetT) 2 << 31) - 1);
3932 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
3933 i.types[op].bitfield.disp64 = 0;
3934 }
3935 if (!op_disp && i.types[op].bitfield.baseindex)
3936 {
3937 i.types[op].bitfield.disp8 = 0;
3938 i.types[op].bitfield.disp16 = 0;
3939 i.types[op].bitfield.disp32 = 0;
3940 i.types[op].bitfield.disp32s = 0;
3941 i.types[op].bitfield.disp64 = 0;
3942 i.op[op].disps = 0;
3943 i.disp_operands--;
3944 }
3945 else if (flag_code == CODE_64BIT)
3946 {
3947 if (fits_in_signed_long (op_disp))
3948 {
3949 i.types[op].bitfield.disp64 = 0;
3950 i.types[op].bitfield.disp32s = 1;
3951 }
3952 if (i.prefix[ADDR_PREFIX]
3953 && fits_in_unsigned_long (op_disp))
3954 i.types[op].bitfield.disp32 = 1;
3955 }
3956 if ((i.types[op].bitfield.disp32
3957 || i.types[op].bitfield.disp32s
3958 || i.types[op].bitfield.disp16)
3959 && fits_in_signed_byte (op_disp))
3960 i.types[op].bitfield.disp8 = 1;
3961 }
3962 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
3963 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
3964 {
3965 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
3966 i.op[op].disps, 0, i.reloc[op]);
3967 i.types[op].bitfield.disp8 = 0;
3968 i.types[op].bitfield.disp16 = 0;
3969 i.types[op].bitfield.disp32 = 0;
3970 i.types[op].bitfield.disp32s = 0;
3971 i.types[op].bitfield.disp64 = 0;
3972 }
3973 else
3974 /* We only support 64bit displacement on constants. */
3975 i.types[op].bitfield.disp64 = 0;
3976 }
3977 }
3978
3979 /* Check if operands are valid for the instruction. */
3980
3981 static int
3982 check_VecOperands (const insn_template *t)
3983 {
3984 /* Without VSIB byte, we can't have a vector register for index. */
3985 if (!t->opcode_modifier.vecsib
3986 && i.index_reg
3987 && (i.index_reg->reg_type.bitfield.regxmm
3988 || i.index_reg->reg_type.bitfield.regymm))
3989 {
3990 i.error = unsupported_vector_index_register;
3991 return 1;
3992 }
3993
3994 /* For VSIB byte, we need a vector register for index, and all vector
3995 registers must be distinct. */
3996 if (t->opcode_modifier.vecsib)
3997 {
3998 if (!i.index_reg
3999 || !((t->opcode_modifier.vecsib == VecSIB128
4000 && i.index_reg->reg_type.bitfield.regxmm)
4001 || (t->opcode_modifier.vecsib == VecSIB256
4002 && i.index_reg->reg_type.bitfield.regymm)))
4003 {
4004 i.error = invalid_vsib_address;
4005 return 1;
4006 }
4007
4008 gas_assert (i.reg_operands == 2);
4009 gas_assert (i.types[0].bitfield.regxmm
4010 || i.types[0].bitfield.regymm);
4011 gas_assert (i.types[2].bitfield.regxmm
4012 || i.types[2].bitfield.regymm);
4013
4014 if (operand_check == check_none)
4015 return 0;
4016 if (register_number (i.op[0].regs) != register_number (i.index_reg)
4017 && register_number (i.op[2].regs) != register_number (i.index_reg)
4018 && register_number (i.op[0].regs) != register_number (i.op[2].regs))
4019 return 0;
4020 if (operand_check == check_error)
4021 {
4022 i.error = invalid_vector_register_set;
4023 return 1;
4024 }
4025 as_warn (_("mask, index, and destination registers should be distinct"));
4026 }
4027
4028 return 0;
4029 }
4030
4031 /* Check if operands are valid for the instruction. Update VEX
4032 operand types. */
4033
4034 static int
4035 VEX_check_operands (const insn_template *t)
4036 {
4037 if (!t->opcode_modifier.vex)
4038 return 0;
4039
4040 /* Only check VEX_Imm4, which must be the first operand. */
4041 if (t->operand_types[0].bitfield.vec_imm4)
4042 {
4043 if (i.op[0].imms->X_op != O_constant
4044 || !fits_in_imm4 (i.op[0].imms->X_add_number))
4045 {
4046 i.error = bad_imm4;
4047 return 1;
4048 }
4049
4050 /* Turn off Imm8 so that update_imm won't complain. */
4051 i.types[0] = vec_imm4;
4052 }
4053
4054 return 0;
4055 }
4056
4057 static const insn_template *
4058 match_template (void)
4059 {
4060 /* Points to template once we've found it. */
4061 const insn_template *t;
4062 i386_operand_type overlap0, overlap1, overlap2, overlap3;
4063 i386_operand_type overlap4;
4064 unsigned int found_reverse_match;
4065 i386_opcode_modifier suffix_check;
4066 i386_operand_type operand_types [MAX_OPERANDS];
4067 int addr_prefix_disp;
4068 unsigned int j;
4069 unsigned int found_cpu_match;
4070 unsigned int check_register;
4071 enum i386_error specific_error = 0;
4072
4073 #if MAX_OPERANDS != 5
4074 # error "MAX_OPERANDS must be 5."
4075 #endif
4076
4077 found_reverse_match = 0;
4078 addr_prefix_disp = -1;
4079
4080 memset (&suffix_check, 0, sizeof (suffix_check));
4081 if (i.suffix == BYTE_MNEM_SUFFIX)
4082 suffix_check.no_bsuf = 1;
4083 else if (i.suffix == WORD_MNEM_SUFFIX)
4084 suffix_check.no_wsuf = 1;
4085 else if (i.suffix == SHORT_MNEM_SUFFIX)
4086 suffix_check.no_ssuf = 1;
4087 else if (i.suffix == LONG_MNEM_SUFFIX)
4088 suffix_check.no_lsuf = 1;
4089 else if (i.suffix == QWORD_MNEM_SUFFIX)
4090 suffix_check.no_qsuf = 1;
4091 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
4092 suffix_check.no_ldsuf = 1;
4093
4094 /* Must have right number of operands. */
4095 i.error = number_of_operands_mismatch;
4096
4097 for (t = current_templates->start; t < current_templates->end; t++)
4098 {
4099 addr_prefix_disp = -1;
4100
4101 if (i.operands != t->operands)
4102 continue;
4103
4104 /* Check processor support. */
4105 i.error = unsupported;
4106 found_cpu_match = (cpu_flags_match (t)
4107 == CPU_FLAGS_PERFECT_MATCH);
4108 if (!found_cpu_match)
4109 continue;
4110
4111 /* Check old gcc support. */
4112 i.error = old_gcc_only;
4113 if (!old_gcc && t->opcode_modifier.oldgcc)
4114 continue;
4115
4116 /* Check AT&T mnemonic. */
4117 i.error = unsupported_with_intel_mnemonic;
4118 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
4119 continue;
4120
4121 /* Check AT&T/Intel syntax. */
4122 i.error = unsupported_syntax;
4123 if ((intel_syntax && t->opcode_modifier.attsyntax)
4124 || (!intel_syntax && t->opcode_modifier.intelsyntax))
4125 continue;
4126
4127 /* Check the suffix, except for some instructions in intel mode. */
4128 i.error = invalid_instruction_suffix;
4129 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
4130 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
4131 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
4132 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
4133 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
4134 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
4135 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
4136 continue;
4137
4138 if (!operand_size_match (t))
4139 continue;
4140
4141 for (j = 0; j < MAX_OPERANDS; j++)
4142 operand_types[j] = t->operand_types[j];
4143
4144 /* In general, don't allow 64-bit operands in 32-bit mode. */
4145 if (i.suffix == QWORD_MNEM_SUFFIX
4146 && flag_code != CODE_64BIT
4147 && (intel_syntax
4148 ? (!t->opcode_modifier.ignoresize
4149 && !intel_float_operand (t->name))
4150 : intel_float_operand (t->name) != 2)
4151 && ((!operand_types[0].bitfield.regmmx
4152 && !operand_types[0].bitfield.regxmm
4153 && !operand_types[0].bitfield.regymm)
4154 || (!operand_types[t->operands > 1].bitfield.regmmx
4155 && !!operand_types[t->operands > 1].bitfield.regxmm
4156 && !!operand_types[t->operands > 1].bitfield.regymm))
4157 && (t->base_opcode != 0x0fc7
4158 || t->extension_opcode != 1 /* cmpxchg8b */))
4159 continue;
4160
4161 /* In general, don't allow 32-bit operands on pre-386. */
4162 else if (i.suffix == LONG_MNEM_SUFFIX
4163 && !cpu_arch_flags.bitfield.cpui386
4164 && (intel_syntax
4165 ? (!t->opcode_modifier.ignoresize
4166 && !intel_float_operand (t->name))
4167 : intel_float_operand (t->name) != 2)
4168 && ((!operand_types[0].bitfield.regmmx
4169 && !operand_types[0].bitfield.regxmm)
4170 || (!operand_types[t->operands > 1].bitfield.regmmx
4171 && !!operand_types[t->operands > 1].bitfield.regxmm)))
4172 continue;
4173
4174 /* Do not verify operands when there are none. */
4175 else
4176 {
4177 if (!t->operands)
4178 /* We've found a match; break out of loop. */
4179 break;
4180 }
4181
4182 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
4183 into Disp32/Disp16/Disp32 operand. */
4184 if (i.prefix[ADDR_PREFIX] != 0)
4185 {
4186 /* There should be only one Disp operand. */
4187 switch (flag_code)
4188 {
4189 case CODE_16BIT:
4190 for (j = 0; j < MAX_OPERANDS; j++)
4191 {
4192 if (operand_types[j].bitfield.disp16)
4193 {
4194 addr_prefix_disp = j;
4195 operand_types[j].bitfield.disp32 = 1;
4196 operand_types[j].bitfield.disp16 = 0;
4197 break;
4198 }
4199 }
4200 break;
4201 case CODE_32BIT:
4202 for (j = 0; j < MAX_OPERANDS; j++)
4203 {
4204 if (operand_types[j].bitfield.disp32)
4205 {
4206 addr_prefix_disp = j;
4207 operand_types[j].bitfield.disp32 = 0;
4208 operand_types[j].bitfield.disp16 = 1;
4209 break;
4210 }
4211 }
4212 break;
4213 case CODE_64BIT:
4214 for (j = 0; j < MAX_OPERANDS; j++)
4215 {
4216 if (operand_types[j].bitfield.disp64)
4217 {
4218 addr_prefix_disp = j;
4219 operand_types[j].bitfield.disp64 = 0;
4220 operand_types[j].bitfield.disp32 = 1;
4221 break;
4222 }
4223 }
4224 break;
4225 }
4226 }
4227
4228 /* We check register size if needed. */
4229 check_register = t->opcode_modifier.checkregsize;
4230 overlap0 = operand_type_and (i.types[0], operand_types[0]);
4231 switch (t->operands)
4232 {
4233 case 1:
4234 if (!operand_type_match (overlap0, i.types[0]))
4235 continue;
4236 break;
4237 case 2:
4238 /* xchg %eax, %eax is a special case. It is an aliase for nop
4239 only in 32bit mode and we can use opcode 0x90. In 64bit
4240 mode, we can't use 0x90 for xchg %eax, %eax since it should
4241 zero-extend %eax to %rax. */
4242 if (flag_code == CODE_64BIT
4243 && t->base_opcode == 0x90
4244 && operand_type_equal (&i.types [0], &acc32)
4245 && operand_type_equal (&i.types [1], &acc32))
4246 continue;
4247 if (i.swap_operand)
4248 {
4249 /* If we swap operand in encoding, we either match
4250 the next one or reverse direction of operands. */
4251 if (t->opcode_modifier.s)
4252 continue;
4253 else if (t->opcode_modifier.d)
4254 goto check_reverse;
4255 }
4256
4257 case 3:
4258 /* If we swap operand in encoding, we match the next one. */
4259 if (i.swap_operand && t->opcode_modifier.s)
4260 continue;
4261 case 4:
4262 case 5:
4263 overlap1 = operand_type_and (i.types[1], operand_types[1]);
4264 if (!operand_type_match (overlap0, i.types[0])
4265 || !operand_type_match (overlap1, i.types[1])
4266 || (check_register
4267 && !operand_type_register_match (overlap0, i.types[0],
4268 operand_types[0],
4269 overlap1, i.types[1],
4270 operand_types[1])))
4271 {
4272 /* Check if other direction is valid ... */
4273 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
4274 continue;
4275
4276 check_reverse:
4277 /* Try reversing direction of operands. */
4278 overlap0 = operand_type_and (i.types[0], operand_types[1]);
4279 overlap1 = operand_type_and (i.types[1], operand_types[0]);
4280 if (!operand_type_match (overlap0, i.types[0])
4281 || !operand_type_match (overlap1, i.types[1])
4282 || (check_register
4283 && !operand_type_register_match (overlap0,
4284 i.types[0],
4285 operand_types[1],
4286 overlap1,
4287 i.types[1],
4288 operand_types[0])))
4289 {
4290 /* Does not match either direction. */
4291 continue;
4292 }
4293 /* found_reverse_match holds which of D or FloatDR
4294 we've found. */
4295 if (t->opcode_modifier.d)
4296 found_reverse_match = Opcode_D;
4297 else if (t->opcode_modifier.floatd)
4298 found_reverse_match = Opcode_FloatD;
4299 else
4300 found_reverse_match = 0;
4301 if (t->opcode_modifier.floatr)
4302 found_reverse_match |= Opcode_FloatR;
4303 }
4304 else
4305 {
4306 /* Found a forward 2 operand match here. */
4307 switch (t->operands)
4308 {
4309 case 5:
4310 overlap4 = operand_type_and (i.types[4],
4311 operand_types[4]);
4312 case 4:
4313 overlap3 = operand_type_and (i.types[3],
4314 operand_types[3]);
4315 case 3:
4316 overlap2 = operand_type_and (i.types[2],
4317 operand_types[2]);
4318 break;
4319 }
4320
4321 switch (t->operands)
4322 {
4323 case 5:
4324 if (!operand_type_match (overlap4, i.types[4])
4325 || !operand_type_register_match (overlap3,
4326 i.types[3],
4327 operand_types[3],
4328 overlap4,
4329 i.types[4],
4330 operand_types[4]))
4331 continue;
4332 case 4:
4333 if (!operand_type_match (overlap3, i.types[3])
4334 || (check_register
4335 && !operand_type_register_match (overlap2,
4336 i.types[2],
4337 operand_types[2],
4338 overlap3,
4339 i.types[3],
4340 operand_types[3])))
4341 continue;
4342 case 3:
4343 /* Here we make use of the fact that there are no
4344 reverse match 3 operand instructions, and all 3
4345 operand instructions only need to be checked for
4346 register consistency between operands 2 and 3. */
4347 if (!operand_type_match (overlap2, i.types[2])
4348 || (check_register
4349 && !operand_type_register_match (overlap1,
4350 i.types[1],
4351 operand_types[1],
4352 overlap2,
4353 i.types[2],
4354 operand_types[2])))
4355 continue;
4356 break;
4357 }
4358 }
4359 /* Found either forward/reverse 2, 3 or 4 operand match here:
4360 slip through to break. */
4361 }
4362 if (!found_cpu_match)
4363 {
4364 found_reverse_match = 0;
4365 continue;
4366 }
4367
4368 /* Check if vector and VEX operands are valid. */
4369 if (check_VecOperands (t) || VEX_check_operands (t))
4370 {
4371 specific_error = i.error;
4372 continue;
4373 }
4374
4375 /* We've found a match; break out of loop. */
4376 break;
4377 }
4378
4379 if (t == current_templates->end)
4380 {
4381 /* We found no match. */
4382 const char *err_msg;
4383 switch (specific_error ? specific_error : i.error)
4384 {
4385 default:
4386 abort ();
4387 case operand_size_mismatch:
4388 err_msg = _("operand size mismatch");
4389 break;
4390 case operand_type_mismatch:
4391 err_msg = _("operand type mismatch");
4392 break;
4393 case register_type_mismatch:
4394 err_msg = _("register type mismatch");
4395 break;
4396 case number_of_operands_mismatch:
4397 err_msg = _("number of operands mismatch");
4398 break;
4399 case invalid_instruction_suffix:
4400 err_msg = _("invalid instruction suffix");
4401 break;
4402 case bad_imm4:
4403 err_msg = _("constant doesn't fit in 4 bits");
4404 break;
4405 case old_gcc_only:
4406 err_msg = _("only supported with old gcc");
4407 break;
4408 case unsupported_with_intel_mnemonic:
4409 err_msg = _("unsupported with Intel mnemonic");
4410 break;
4411 case unsupported_syntax:
4412 err_msg = _("unsupported syntax");
4413 break;
4414 case unsupported:
4415 as_bad (_("unsupported instruction `%s'"),
4416 current_templates->start->name);
4417 return NULL;
4418 case invalid_vsib_address:
4419 err_msg = _("invalid VSIB address");
4420 break;
4421 case invalid_vector_register_set:
4422 err_msg = _("mask, index, and destination registers must be distinct");
4423 break;
4424 case unsupported_vector_index_register:
4425 err_msg = _("unsupported vector index register");
4426 break;
4427 }
4428 as_bad (_("%s for `%s'"), err_msg,
4429 current_templates->start->name);
4430 return NULL;
4431 }
4432
4433 if (!quiet_warnings)
4434 {
4435 if (!intel_syntax
4436 && (i.types[0].bitfield.jumpabsolute
4437 != operand_types[0].bitfield.jumpabsolute))
4438 {
4439 as_warn (_("indirect %s without `*'"), t->name);
4440 }
4441
4442 if (t->opcode_modifier.isprefix
4443 && t->opcode_modifier.ignoresize)
4444 {
4445 /* Warn them that a data or address size prefix doesn't
4446 affect assembly of the next line of code. */
4447 as_warn (_("stand-alone `%s' prefix"), t->name);
4448 }
4449 }
4450
4451 /* Copy the template we found. */
4452 i.tm = *t;
4453
4454 if (addr_prefix_disp != -1)
4455 i.tm.operand_types[addr_prefix_disp]
4456 = operand_types[addr_prefix_disp];
4457
4458 if (found_reverse_match)
4459 {
4460 /* If we found a reverse match we must alter the opcode
4461 direction bit. found_reverse_match holds bits to change
4462 (different for int & float insns). */
4463
4464 i.tm.base_opcode ^= found_reverse_match;
4465
4466 i.tm.operand_types[0] = operand_types[1];
4467 i.tm.operand_types[1] = operand_types[0];
4468 }
4469
4470 return t;
4471 }
4472
4473 static int
4474 check_string (void)
4475 {
4476 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
4477 if (i.tm.operand_types[mem_op].bitfield.esseg)
4478 {
4479 if (i.seg[0] != NULL && i.seg[0] != &es)
4480 {
4481 as_bad (_("`%s' operand %d must use `%ses' segment"),
4482 i.tm.name,
4483 mem_op + 1,
4484 register_prefix);
4485 return 0;
4486 }
4487 /* There's only ever one segment override allowed per instruction.
4488 This instruction possibly has a legal segment override on the
4489 second operand, so copy the segment to where non-string
4490 instructions store it, allowing common code. */
4491 i.seg[0] = i.seg[1];
4492 }
4493 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
4494 {
4495 if (i.seg[1] != NULL && i.seg[1] != &es)
4496 {
4497 as_bad (_("`%s' operand %d must use `%ses' segment"),
4498 i.tm.name,
4499 mem_op + 2,
4500 register_prefix);
4501 return 0;
4502 }
4503 }
4504 return 1;
4505 }
4506
4507 static int
4508 process_suffix (void)
4509 {
4510 /* If matched instruction specifies an explicit instruction mnemonic
4511 suffix, use it. */
4512 if (i.tm.opcode_modifier.size16)
4513 i.suffix = WORD_MNEM_SUFFIX;
4514 else if (i.tm.opcode_modifier.size32)
4515 i.suffix = LONG_MNEM_SUFFIX;
4516 else if (i.tm.opcode_modifier.size64)
4517 i.suffix = QWORD_MNEM_SUFFIX;
4518 else if (i.reg_operands)
4519 {
4520 /* If there's no instruction mnemonic suffix we try to invent one
4521 based on register operands. */
4522 if (!i.suffix)
4523 {
4524 /* We take i.suffix from the last register operand specified,
4525 Destination register type is more significant than source
4526 register type. crc32 in SSE4.2 prefers source register
4527 type. */
4528 if (i.tm.base_opcode == 0xf20f38f1)
4529 {
4530 if (i.types[0].bitfield.reg16)
4531 i.suffix = WORD_MNEM_SUFFIX;
4532 else if (i.types[0].bitfield.reg32)
4533 i.suffix = LONG_MNEM_SUFFIX;
4534 else if (i.types[0].bitfield.reg64)
4535 i.suffix = QWORD_MNEM_SUFFIX;
4536 }
4537 else if (i.tm.base_opcode == 0xf20f38f0)
4538 {
4539 if (i.types[0].bitfield.reg8)
4540 i.suffix = BYTE_MNEM_SUFFIX;
4541 }
4542
4543 if (!i.suffix)
4544 {
4545 int op;
4546
4547 if (i.tm.base_opcode == 0xf20f38f1
4548 || i.tm.base_opcode == 0xf20f38f0)
4549 {
4550 /* We have to know the operand size for crc32. */
4551 as_bad (_("ambiguous memory operand size for `%s`"),
4552 i.tm.name);
4553 return 0;
4554 }
4555
4556 for (op = i.operands; --op >= 0;)
4557 if (!i.tm.operand_types[op].bitfield.inoutportreg)
4558 {
4559 if (i.types[op].bitfield.reg8)
4560 {
4561 i.suffix = BYTE_MNEM_SUFFIX;
4562 break;
4563 }
4564 else if (i.types[op].bitfield.reg16)
4565 {
4566 i.suffix = WORD_MNEM_SUFFIX;
4567 break;
4568 }
4569 else if (i.types[op].bitfield.reg32)
4570 {
4571 i.suffix = LONG_MNEM_SUFFIX;
4572 break;
4573 }
4574 else if (i.types[op].bitfield.reg64)
4575 {
4576 i.suffix = QWORD_MNEM_SUFFIX;
4577 break;
4578 }
4579 }
4580 }
4581 }
4582 else if (i.suffix == BYTE_MNEM_SUFFIX)
4583 {
4584 if (intel_syntax
4585 && i.tm.opcode_modifier.ignoresize
4586 && i.tm.opcode_modifier.no_bsuf)
4587 i.suffix = 0;
4588 else if (!check_byte_reg ())
4589 return 0;
4590 }
4591 else if (i.suffix == LONG_MNEM_SUFFIX)
4592 {
4593 if (intel_syntax
4594 && i.tm.opcode_modifier.ignoresize
4595 && i.tm.opcode_modifier.no_lsuf)
4596 i.suffix = 0;
4597 else if (!check_long_reg ())
4598 return 0;
4599 }
4600 else if (i.suffix == QWORD_MNEM_SUFFIX)
4601 {
4602 if (intel_syntax
4603 && i.tm.opcode_modifier.ignoresize
4604 && i.tm.opcode_modifier.no_qsuf)
4605 i.suffix = 0;
4606 else if (!check_qword_reg ())
4607 return 0;
4608 }
4609 else if (i.suffix == WORD_MNEM_SUFFIX)
4610 {
4611 if (intel_syntax
4612 && i.tm.opcode_modifier.ignoresize
4613 && i.tm.opcode_modifier.no_wsuf)
4614 i.suffix = 0;
4615 else if (!check_word_reg ())
4616 return 0;
4617 }
4618 else if (i.suffix == XMMWORD_MNEM_SUFFIX
4619 || i.suffix == YMMWORD_MNEM_SUFFIX)
4620 {
4621 /* Skip if the instruction has x/y suffix. match_template
4622 should check if it is a valid suffix. */
4623 }
4624 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
4625 /* Do nothing if the instruction is going to ignore the prefix. */
4626 ;
4627 else
4628 abort ();
4629 }
4630 else if (i.tm.opcode_modifier.defaultsize
4631 && !i.suffix
4632 /* exclude fldenv/frstor/fsave/fstenv */
4633 && i.tm.opcode_modifier.no_ssuf)
4634 {
4635 i.suffix = stackop_size;
4636 }
4637 else if (intel_syntax
4638 && !i.suffix
4639 && (i.tm.operand_types[0].bitfield.jumpabsolute
4640 || i.tm.opcode_modifier.jumpbyte
4641 || i.tm.opcode_modifier.jumpintersegment
4642 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
4643 && i.tm.extension_opcode <= 3)))
4644 {
4645 switch (flag_code)
4646 {
4647 case CODE_64BIT:
4648 if (!i.tm.opcode_modifier.no_qsuf)
4649 {
4650 i.suffix = QWORD_MNEM_SUFFIX;
4651 break;
4652 }
4653 case CODE_32BIT:
4654 if (!i.tm.opcode_modifier.no_lsuf)
4655 i.suffix = LONG_MNEM_SUFFIX;
4656 break;
4657 case CODE_16BIT:
4658 if (!i.tm.opcode_modifier.no_wsuf)
4659 i.suffix = WORD_MNEM_SUFFIX;
4660 break;
4661 }
4662 }
4663
4664 if (!i.suffix)
4665 {
4666 if (!intel_syntax)
4667 {
4668 if (i.tm.opcode_modifier.w)
4669 {
4670 as_bad (_("no instruction mnemonic suffix given and "
4671 "no register operands; can't size instruction"));
4672 return 0;
4673 }
4674 }
4675 else
4676 {
4677 unsigned int suffixes;
4678
4679 suffixes = !i.tm.opcode_modifier.no_bsuf;
4680 if (!i.tm.opcode_modifier.no_wsuf)
4681 suffixes |= 1 << 1;
4682 if (!i.tm.opcode_modifier.no_lsuf)
4683 suffixes |= 1 << 2;
4684 if (!i.tm.opcode_modifier.no_ldsuf)
4685 suffixes |= 1 << 3;
4686 if (!i.tm.opcode_modifier.no_ssuf)
4687 suffixes |= 1 << 4;
4688 if (!i.tm.opcode_modifier.no_qsuf)
4689 suffixes |= 1 << 5;
4690
4691 /* There are more than suffix matches. */
4692 if (i.tm.opcode_modifier.w
4693 || ((suffixes & (suffixes - 1))
4694 && !i.tm.opcode_modifier.defaultsize
4695 && !i.tm.opcode_modifier.ignoresize))
4696 {
4697 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
4698 return 0;
4699 }
4700 }
4701 }
4702
4703 /* Change the opcode based on the operand size given by i.suffix;
4704 We don't need to change things for byte insns. */
4705
4706 if (i.suffix
4707 && i.suffix != BYTE_MNEM_SUFFIX
4708 && i.suffix != XMMWORD_MNEM_SUFFIX
4709 && i.suffix != YMMWORD_MNEM_SUFFIX)
4710 {
4711 /* It's not a byte, select word/dword operation. */
4712 if (i.tm.opcode_modifier.w)
4713 {
4714 if (i.tm.opcode_modifier.shortform)
4715 i.tm.base_opcode |= 8;
4716 else
4717 i.tm.base_opcode |= 1;
4718 }
4719
4720 /* Now select between word & dword operations via the operand
4721 size prefix, except for instructions that will ignore this
4722 prefix anyway. */
4723 if (i.tm.opcode_modifier.addrprefixop0)
4724 {
4725 /* The address size override prefix changes the size of the
4726 first operand. */
4727 if ((flag_code == CODE_32BIT
4728 && i.op->regs[0].reg_type.bitfield.reg16)
4729 || (flag_code != CODE_32BIT
4730 && i.op->regs[0].reg_type.bitfield.reg32))
4731 if (!add_prefix (ADDR_PREFIX_OPCODE))
4732 return 0;
4733 }
4734 else if (i.suffix != QWORD_MNEM_SUFFIX
4735 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
4736 && !i.tm.opcode_modifier.ignoresize
4737 && !i.tm.opcode_modifier.floatmf
4738 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
4739 || (flag_code == CODE_64BIT
4740 && i.tm.opcode_modifier.jumpbyte)))
4741 {
4742 unsigned int prefix = DATA_PREFIX_OPCODE;
4743
4744 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
4745 prefix = ADDR_PREFIX_OPCODE;
4746
4747 if (!add_prefix (prefix))
4748 return 0;
4749 }
4750
4751 /* Set mode64 for an operand. */
4752 if (i.suffix == QWORD_MNEM_SUFFIX
4753 && flag_code == CODE_64BIT
4754 && !i.tm.opcode_modifier.norex64)
4755 {
4756 /* Special case for xchg %rax,%rax. It is NOP and doesn't
4757 need rex64. cmpxchg8b is also a special case. */
4758 if (! (i.operands == 2
4759 && i.tm.base_opcode == 0x90
4760 && i.tm.extension_opcode == None
4761 && operand_type_equal (&i.types [0], &acc64)
4762 && operand_type_equal (&i.types [1], &acc64))
4763 && ! (i.operands == 1
4764 && i.tm.base_opcode == 0xfc7
4765 && i.tm.extension_opcode == 1
4766 && !operand_type_check (i.types [0], reg)
4767 && operand_type_check (i.types [0], anymem)))
4768 i.rex |= REX_W;
4769 }
4770
4771 /* Size floating point instruction. */
4772 if (i.suffix == LONG_MNEM_SUFFIX)
4773 if (i.tm.opcode_modifier.floatmf)
4774 i.tm.base_opcode ^= 4;
4775 }
4776
4777 return 1;
4778 }
4779
4780 static int
4781 check_byte_reg (void)
4782 {
4783 int op;
4784
4785 for (op = i.operands; --op >= 0;)
4786 {
4787 /* If this is an eight bit register, it's OK. If it's the 16 or
4788 32 bit version of an eight bit register, we will just use the
4789 low portion, and that's OK too. */
4790 if (i.types[op].bitfield.reg8)
4791 continue;
4792
4793 /* I/O port address operands are OK too. */
4794 if (i.tm.operand_types[op].bitfield.inoutportreg)
4795 continue;
4796
4797 /* crc32 doesn't generate this warning. */
4798 if (i.tm.base_opcode == 0xf20f38f0)
4799 continue;
4800
4801 if ((i.types[op].bitfield.reg16
4802 || i.types[op].bitfield.reg32
4803 || i.types[op].bitfield.reg64)
4804 && i.op[op].regs->reg_num < 4
4805 /* Prohibit these changes in 64bit mode, since the lowering
4806 would be more complicated. */
4807 && flag_code != CODE_64BIT)
4808 {
4809 #if REGISTER_WARNINGS
4810 if (!quiet_warnings)
4811 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4812 register_prefix,
4813 (i.op[op].regs + (i.types[op].bitfield.reg16
4814 ? REGNAM_AL - REGNAM_AX
4815 : REGNAM_AL - REGNAM_EAX))->reg_name,
4816 register_prefix,
4817 i.op[op].regs->reg_name,
4818 i.suffix);
4819 #endif
4820 continue;
4821 }
4822 /* Any other register is bad. */
4823 if (i.types[op].bitfield.reg16
4824 || i.types[op].bitfield.reg32
4825 || i.types[op].bitfield.reg64
4826 || i.types[op].bitfield.regmmx
4827 || i.types[op].bitfield.regxmm
4828 || i.types[op].bitfield.regymm
4829 || i.types[op].bitfield.sreg2
4830 || i.types[op].bitfield.sreg3
4831 || i.types[op].bitfield.control
4832 || i.types[op].bitfield.debug
4833 || i.types[op].bitfield.test
4834 || i.types[op].bitfield.floatreg
4835 || i.types[op].bitfield.floatacc)
4836 {
4837 as_bad (_("`%s%s' not allowed with `%s%c'"),
4838 register_prefix,
4839 i.op[op].regs->reg_name,
4840 i.tm.name,
4841 i.suffix);
4842 return 0;
4843 }
4844 }
4845 return 1;
4846 }
4847
4848 static int
4849 check_long_reg (void)
4850 {
4851 int op;
4852
4853 for (op = i.operands; --op >= 0;)
4854 /* Reject eight bit registers, except where the template requires
4855 them. (eg. movzb) */
4856 if (i.types[op].bitfield.reg8
4857 && (i.tm.operand_types[op].bitfield.reg16
4858 || i.tm.operand_types[op].bitfield.reg32
4859 || i.tm.operand_types[op].bitfield.acc))
4860 {
4861 as_bad (_("`%s%s' not allowed with `%s%c'"),
4862 register_prefix,
4863 i.op[op].regs->reg_name,
4864 i.tm.name,
4865 i.suffix);
4866 return 0;
4867 }
4868 /* Warn if the e prefix on a general reg is missing. */
4869 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4870 && i.types[op].bitfield.reg16
4871 && (i.tm.operand_types[op].bitfield.reg32
4872 || i.tm.operand_types[op].bitfield.acc))
4873 {
4874 /* Prohibit these changes in the 64bit mode, since the
4875 lowering is more complicated. */
4876 if (flag_code == CODE_64BIT)
4877 {
4878 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4879 register_prefix, i.op[op].regs->reg_name,
4880 i.suffix);
4881 return 0;
4882 }
4883 #if REGISTER_WARNINGS
4884 else
4885 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4886 register_prefix,
4887 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
4888 register_prefix,
4889 i.op[op].regs->reg_name,
4890 i.suffix);
4891 #endif
4892 }
4893 /* Warn if the r prefix on a general reg is missing. */
4894 else if (i.types[op].bitfield.reg64
4895 && (i.tm.operand_types[op].bitfield.reg32
4896 || i.tm.operand_types[op].bitfield.acc))
4897 {
4898 if (intel_syntax
4899 && i.tm.opcode_modifier.toqword
4900 && !i.types[0].bitfield.regxmm)
4901 {
4902 /* Convert to QWORD. We want REX byte. */
4903 i.suffix = QWORD_MNEM_SUFFIX;
4904 }
4905 else
4906 {
4907 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4908 register_prefix, i.op[op].regs->reg_name,
4909 i.suffix);
4910 return 0;
4911 }
4912 }
4913 return 1;
4914 }
4915
4916 static int
4917 check_qword_reg (void)
4918 {
4919 int op;
4920
4921 for (op = i.operands; --op >= 0; )
4922 /* Reject eight bit registers, except where the template requires
4923 them. (eg. movzb) */
4924 if (i.types[op].bitfield.reg8
4925 && (i.tm.operand_types[op].bitfield.reg16
4926 || i.tm.operand_types[op].bitfield.reg32
4927 || i.tm.operand_types[op].bitfield.acc))
4928 {
4929 as_bad (_("`%s%s' not allowed with `%s%c'"),
4930 register_prefix,
4931 i.op[op].regs->reg_name,
4932 i.tm.name,
4933 i.suffix);
4934 return 0;
4935 }
4936 /* Warn if the e prefix on a general reg is missing. */
4937 else if ((i.types[op].bitfield.reg16
4938 || i.types[op].bitfield.reg32)
4939 && (i.tm.operand_types[op].bitfield.reg32
4940 || i.tm.operand_types[op].bitfield.acc))
4941 {
4942 /* Prohibit these changes in the 64bit mode, since the
4943 lowering is more complicated. */
4944 if (intel_syntax
4945 && i.tm.opcode_modifier.todword
4946 && !i.types[0].bitfield.regxmm)
4947 {
4948 /* Convert to DWORD. We don't want REX byte. */
4949 i.suffix = LONG_MNEM_SUFFIX;
4950 }
4951 else
4952 {
4953 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4954 register_prefix, i.op[op].regs->reg_name,
4955 i.suffix);
4956 return 0;
4957 }
4958 }
4959 return 1;
4960 }
4961
4962 static int
4963 check_word_reg (void)
4964 {
4965 int op;
4966 for (op = i.operands; --op >= 0;)
4967 /* Reject eight bit registers, except where the template requires
4968 them. (eg. movzb) */
4969 if (i.types[op].bitfield.reg8
4970 && (i.tm.operand_types[op].bitfield.reg16
4971 || i.tm.operand_types[op].bitfield.reg32
4972 || i.tm.operand_types[op].bitfield.acc))
4973 {
4974 as_bad (_("`%s%s' not allowed with `%s%c'"),
4975 register_prefix,
4976 i.op[op].regs->reg_name,
4977 i.tm.name,
4978 i.suffix);
4979 return 0;
4980 }
4981 /* Warn if the e prefix on a general reg is present. */
4982 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4983 && i.types[op].bitfield.reg32
4984 && (i.tm.operand_types[op].bitfield.reg16
4985 || i.tm.operand_types[op].bitfield.acc))
4986 {
4987 /* Prohibit these changes in the 64bit mode, since the
4988 lowering is more complicated. */
4989 if (flag_code == CODE_64BIT)
4990 {
4991 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4992 register_prefix, i.op[op].regs->reg_name,
4993 i.suffix);
4994 return 0;
4995 }
4996 else
4997 #if REGISTER_WARNINGS
4998 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4999 register_prefix,
5000 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
5001 register_prefix,
5002 i.op[op].regs->reg_name,
5003 i.suffix);
5004 #endif
5005 }
5006 return 1;
5007 }
5008
5009 static int
5010 update_imm (unsigned int j)
5011 {
5012 i386_operand_type overlap = i.types[j];
5013 if ((overlap.bitfield.imm8
5014 || overlap.bitfield.imm8s
5015 || overlap.bitfield.imm16
5016 || overlap.bitfield.imm32
5017 || overlap.bitfield.imm32s
5018 || overlap.bitfield.imm64)
5019 && !operand_type_equal (&overlap, &imm8)
5020 && !operand_type_equal (&overlap, &imm8s)
5021 && !operand_type_equal (&overlap, &imm16)
5022 && !operand_type_equal (&overlap, &imm32)
5023 && !operand_type_equal (&overlap, &imm32s)
5024 && !operand_type_equal (&overlap, &imm64))
5025 {
5026 if (i.suffix)
5027 {
5028 i386_operand_type temp;
5029
5030 operand_type_set (&temp, 0);
5031 if (i.suffix == BYTE_MNEM_SUFFIX)
5032 {
5033 temp.bitfield.imm8 = overlap.bitfield.imm8;
5034 temp.bitfield.imm8s = overlap.bitfield.imm8s;
5035 }
5036 else if (i.suffix == WORD_MNEM_SUFFIX)
5037 temp.bitfield.imm16 = overlap.bitfield.imm16;
5038 else if (i.suffix == QWORD_MNEM_SUFFIX)
5039 {
5040 temp.bitfield.imm64 = overlap.bitfield.imm64;
5041 temp.bitfield.imm32s = overlap.bitfield.imm32s;
5042 }
5043 else
5044 temp.bitfield.imm32 = overlap.bitfield.imm32;
5045 overlap = temp;
5046 }
5047 else if (operand_type_equal (&overlap, &imm16_32_32s)
5048 || operand_type_equal (&overlap, &imm16_32)
5049 || operand_type_equal (&overlap, &imm16_32s))
5050 {
5051 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
5052 overlap = imm16;
5053 else
5054 overlap = imm32s;
5055 }
5056 if (!operand_type_equal (&overlap, &imm8)
5057 && !operand_type_equal (&overlap, &imm8s)
5058 && !operand_type_equal (&overlap, &imm16)
5059 && !operand_type_equal (&overlap, &imm32)
5060 && !operand_type_equal (&overlap, &imm32s)
5061 && !operand_type_equal (&overlap, &imm64))
5062 {
5063 as_bad (_("no instruction mnemonic suffix given; "
5064 "can't determine immediate size"));
5065 return 0;
5066 }
5067 }
5068 i.types[j] = overlap;
5069
5070 return 1;
5071 }
5072
5073 static int
5074 finalize_imm (void)
5075 {
5076 unsigned int j, n;
5077
5078 /* Update the first 2 immediate operands. */
5079 n = i.operands > 2 ? 2 : i.operands;
5080 if (n)
5081 {
5082 for (j = 0; j < n; j++)
5083 if (update_imm (j) == 0)
5084 return 0;
5085
5086 /* The 3rd operand can't be immediate operand. */
5087 gas_assert (operand_type_check (i.types[2], imm) == 0);
5088 }
5089
5090 return 1;
5091 }
5092
5093 static int
5094 bad_implicit_operand (int xmm)
5095 {
5096 const char *ireg = xmm ? "xmm0" : "ymm0";
5097
5098 if (intel_syntax)
5099 as_bad (_("the last operand of `%s' must be `%s%s'"),
5100 i.tm.name, register_prefix, ireg);
5101 else
5102 as_bad (_("the first operand of `%s' must be `%s%s'"),
5103 i.tm.name, register_prefix, ireg);
5104 return 0;
5105 }
5106
5107 static int
5108 process_operands (void)
5109 {
5110 /* Default segment register this instruction will use for memory
5111 accesses. 0 means unknown. This is only for optimizing out
5112 unnecessary segment overrides. */
5113 const seg_entry *default_seg = 0;
5114
5115 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
5116 {
5117 unsigned int dupl = i.operands;
5118 unsigned int dest = dupl - 1;
5119 unsigned int j;
5120
5121 /* The destination must be an xmm register. */
5122 gas_assert (i.reg_operands
5123 && MAX_OPERANDS > dupl
5124 && operand_type_equal (&i.types[dest], &regxmm));
5125
5126 if (i.tm.opcode_modifier.firstxmm0)
5127 {
5128 /* The first operand is implicit and must be xmm0. */
5129 gas_assert (operand_type_equal (&i.types[0], &regxmm));
5130 if (register_number (i.op[0].regs) != 0)
5131 return bad_implicit_operand (1);
5132
5133 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
5134 {
5135 /* Keep xmm0 for instructions with VEX prefix and 3
5136 sources. */
5137 goto duplicate;
5138 }
5139 else
5140 {
5141 /* We remove the first xmm0 and keep the number of
5142 operands unchanged, which in fact duplicates the
5143 destination. */
5144 for (j = 1; j < i.operands; j++)
5145 {
5146 i.op[j - 1] = i.op[j];
5147 i.types[j - 1] = i.types[j];
5148 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
5149 }
5150 }
5151 }
5152 else if (i.tm.opcode_modifier.implicit1stxmm0)
5153 {
5154 gas_assert ((MAX_OPERANDS - 1) > dupl
5155 && (i.tm.opcode_modifier.vexsources
5156 == VEX3SOURCES));
5157
5158 /* Add the implicit xmm0 for instructions with VEX prefix
5159 and 3 sources. */
5160 for (j = i.operands; j > 0; j--)
5161 {
5162 i.op[j] = i.op[j - 1];
5163 i.types[j] = i.types[j - 1];
5164 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
5165 }
5166 i.op[0].regs
5167 = (const reg_entry *) hash_find (reg_hash, "xmm0");
5168 i.types[0] = regxmm;
5169 i.tm.operand_types[0] = regxmm;
5170
5171 i.operands += 2;
5172 i.reg_operands += 2;
5173 i.tm.operands += 2;
5174
5175 dupl++;
5176 dest++;
5177 i.op[dupl] = i.op[dest];
5178 i.types[dupl] = i.types[dest];
5179 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5180 }
5181 else
5182 {
5183 duplicate:
5184 i.operands++;
5185 i.reg_operands++;
5186 i.tm.operands++;
5187
5188 i.op[dupl] = i.op[dest];
5189 i.types[dupl] = i.types[dest];
5190 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5191 }
5192
5193 if (i.tm.opcode_modifier.immext)
5194 process_immext ();
5195 }
5196 else if (i.tm.opcode_modifier.firstxmm0)
5197 {
5198 unsigned int j;
5199
5200 /* The first operand is implicit and must be xmm0/ymm0. */
5201 gas_assert (i.reg_operands
5202 && (operand_type_equal (&i.types[0], &regxmm)
5203 || operand_type_equal (&i.types[0], &regymm)));
5204 if (register_number (i.op[0].regs) != 0)
5205 return bad_implicit_operand (i.types[0].bitfield.regxmm);
5206
5207 for (j = 1; j < i.operands; j++)
5208 {
5209 i.op[j - 1] = i.op[j];
5210 i.types[j - 1] = i.types[j];
5211
5212 /* We need to adjust fields in i.tm since they are used by
5213 build_modrm_byte. */
5214 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
5215 }
5216
5217 i.operands--;
5218 i.reg_operands--;
5219 i.tm.operands--;
5220 }
5221 else if (i.tm.opcode_modifier.regkludge)
5222 {
5223 /* The imul $imm, %reg instruction is converted into
5224 imul $imm, %reg, %reg, and the clr %reg instruction
5225 is converted into xor %reg, %reg. */
5226
5227 unsigned int first_reg_op;
5228
5229 if (operand_type_check (i.types[0], reg))
5230 first_reg_op = 0;
5231 else
5232 first_reg_op = 1;
5233 /* Pretend we saw the extra register operand. */
5234 gas_assert (i.reg_operands == 1
5235 && i.op[first_reg_op + 1].regs == 0);
5236 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
5237 i.types[first_reg_op + 1] = i.types[first_reg_op];
5238 i.operands++;
5239 i.reg_operands++;
5240 }
5241
5242 if (i.tm.opcode_modifier.shortform)
5243 {
5244 if (i.types[0].bitfield.sreg2
5245 || i.types[0].bitfield.sreg3)
5246 {
5247 if (i.tm.base_opcode == POP_SEG_SHORT
5248 && i.op[0].regs->reg_num == 1)
5249 {
5250 as_bad (_("you can't `pop %scs'"), register_prefix);
5251 return 0;
5252 }
5253 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
5254 if ((i.op[0].regs->reg_flags & RegRex) != 0)
5255 i.rex |= REX_B;
5256 }
5257 else
5258 {
5259 /* The register or float register operand is in operand
5260 0 or 1. */
5261 unsigned int op;
5262
5263 if (i.types[0].bitfield.floatreg
5264 || operand_type_check (i.types[0], reg))
5265 op = 0;
5266 else
5267 op = 1;
5268 /* Register goes in low 3 bits of opcode. */
5269 i.tm.base_opcode |= i.op[op].regs->reg_num;
5270 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5271 i.rex |= REX_B;
5272 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
5273 {
5274 /* Warn about some common errors, but press on regardless.
5275 The first case can be generated by gcc (<= 2.8.1). */
5276 if (i.operands == 2)
5277 {
5278 /* Reversed arguments on faddp, fsubp, etc. */
5279 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
5280 register_prefix, i.op[!intel_syntax].regs->reg_name,
5281 register_prefix, i.op[intel_syntax].regs->reg_name);
5282 }
5283 else
5284 {
5285 /* Extraneous `l' suffix on fp insn. */
5286 as_warn (_("translating to `%s %s%s'"), i.tm.name,
5287 register_prefix, i.op[0].regs->reg_name);
5288 }
5289 }
5290 }
5291 }
5292 else if (i.tm.opcode_modifier.modrm)
5293 {
5294 /* The opcode is completed (modulo i.tm.extension_opcode which
5295 must be put into the modrm byte). Now, we make the modrm and
5296 index base bytes based on all the info we've collected. */
5297
5298 default_seg = build_modrm_byte ();
5299 }
5300 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
5301 {
5302 default_seg = &ds;
5303 }
5304 else if (i.tm.opcode_modifier.isstring)
5305 {
5306 /* For the string instructions that allow a segment override
5307 on one of their operands, the default segment is ds. */
5308 default_seg = &ds;
5309 }
5310
5311 if (i.tm.base_opcode == 0x8d /* lea */
5312 && i.seg[0]
5313 && !quiet_warnings)
5314 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
5315
5316 /* If a segment was explicitly specified, and the specified segment
5317 is not the default, use an opcode prefix to select it. If we
5318 never figured out what the default segment is, then default_seg
5319 will be zero at this point, and the specified segment prefix will
5320 always be used. */
5321 if ((i.seg[0]) && (i.seg[0] != default_seg))
5322 {
5323 if (!add_prefix (i.seg[0]->seg_prefix))
5324 return 0;
5325 }
5326 return 1;
5327 }
5328
5329 static const seg_entry *
5330 build_modrm_byte (void)
5331 {
5332 const seg_entry *default_seg = 0;
5333 unsigned int source, dest;
5334 int vex_3_sources;
5335
5336 /* The first operand of instructions with VEX prefix and 3 sources
5337 must be VEX_Imm4. */
5338 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
5339 if (vex_3_sources)
5340 {
5341 unsigned int nds, reg_slot;
5342 expressionS *exp;
5343
5344 if (i.tm.opcode_modifier.veximmext
5345 && i.tm.opcode_modifier.immext)
5346 {
5347 dest = i.operands - 2;
5348 gas_assert (dest == 3);
5349 }
5350 else
5351 dest = i.operands - 1;
5352 nds = dest - 1;
5353
5354 /* There are 2 kinds of instructions:
5355 1. 5 operands: 4 register operands or 3 register operands
5356 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
5357 VexW0 or VexW1. The destination must be either XMM or YMM
5358 register.
5359 2. 4 operands: 4 register operands or 3 register operands
5360 plus 1 memory operand, VexXDS, and VexImmExt */
5361 gas_assert ((i.reg_operands == 4
5362 || (i.reg_operands == 3 && i.mem_operands == 1))
5363 && i.tm.opcode_modifier.vexvvvv == VEXXDS
5364 && (i.tm.opcode_modifier.veximmext
5365 || (i.imm_operands == 1
5366 && i.types[0].bitfield.vec_imm4
5367 && (i.tm.opcode_modifier.vexw == VEXW0
5368 || i.tm.opcode_modifier.vexw == VEXW1)
5369 && (operand_type_equal (&i.tm.operand_types[dest], &regxmm)
5370 || operand_type_equal (&i.tm.operand_types[dest], &regymm)))));
5371
5372 if (i.imm_operands == 0)
5373 {
5374 /* When there is no immediate operand, generate an 8bit
5375 immediate operand to encode the first operand. */
5376 exp = &im_expressions[i.imm_operands++];
5377 i.op[i.operands].imms = exp;
5378 i.types[i.operands] = imm8;
5379 i.operands++;
5380 /* If VexW1 is set, the first operand is the source and
5381 the second operand is encoded in the immediate operand. */
5382 if (i.tm.opcode_modifier.vexw == VEXW1)
5383 {
5384 source = 0;
5385 reg_slot = 1;
5386 }
5387 else
5388 {
5389 source = 1;
5390 reg_slot = 0;
5391 }
5392
5393 /* FMA swaps REG and NDS. */
5394 if (i.tm.cpu_flags.bitfield.cpufma)
5395 {
5396 unsigned int tmp;
5397 tmp = reg_slot;
5398 reg_slot = nds;
5399 nds = tmp;
5400 }
5401
5402 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5403 &regxmm)
5404 || operand_type_equal (&i.tm.operand_types[reg_slot],
5405 &regymm));
5406 exp->X_op = O_constant;
5407 exp->X_add_number = register_number (i.op[reg_slot].regs) << 4;
5408 }
5409 else
5410 {
5411 unsigned int imm_slot;
5412
5413 if (i.tm.opcode_modifier.vexw == VEXW0)
5414 {
5415 /* If VexW0 is set, the third operand is the source and
5416 the second operand is encoded in the immediate
5417 operand. */
5418 source = 2;
5419 reg_slot = 1;
5420 }
5421 else
5422 {
5423 /* VexW1 is set, the second operand is the source and
5424 the third operand is encoded in the immediate
5425 operand. */
5426 source = 1;
5427 reg_slot = 2;
5428 }
5429
5430 if (i.tm.opcode_modifier.immext)
5431 {
5432 /* When ImmExt is set, the immdiate byte is the last
5433 operand. */
5434 imm_slot = i.operands - 1;
5435 source--;
5436 reg_slot--;
5437 }
5438 else
5439 {
5440 imm_slot = 0;
5441
5442 /* Turn on Imm8 so that output_imm will generate it. */
5443 i.types[imm_slot].bitfield.imm8 = 1;
5444 }
5445
5446 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5447 &regxmm)
5448 || operand_type_equal (&i.tm.operand_types[reg_slot],
5449 &regymm));
5450 i.op[imm_slot].imms->X_add_number
5451 |= register_number (i.op[reg_slot].regs) << 4;
5452 }
5453
5454 gas_assert (operand_type_equal (&i.tm.operand_types[nds], &regxmm)
5455 || operand_type_equal (&i.tm.operand_types[nds],
5456 &regymm));
5457 i.vex.register_specifier = i.op[nds].regs;
5458 }
5459 else
5460 source = dest = 0;
5461
5462 /* i.reg_operands MUST be the number of real register operands;
5463 implicit registers do not count. If there are 3 register
5464 operands, it must be a instruction with VexNDS. For a
5465 instruction with VexNDD, the destination register is encoded
5466 in VEX prefix. If there are 4 register operands, it must be
5467 a instruction with VEX prefix and 3 sources. */
5468 if (i.mem_operands == 0
5469 && ((i.reg_operands == 2
5470 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
5471 || (i.reg_operands == 3
5472 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
5473 || (i.reg_operands == 4 && vex_3_sources)))
5474 {
5475 switch (i.operands)
5476 {
5477 case 2:
5478 source = 0;
5479 break;
5480 case 3:
5481 /* When there are 3 operands, one of them may be immediate,
5482 which may be the first or the last operand. Otherwise,
5483 the first operand must be shift count register (cl) or it
5484 is an instruction with VexNDS. */
5485 gas_assert (i.imm_operands == 1
5486 || (i.imm_operands == 0
5487 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
5488 || i.types[0].bitfield.shiftcount)));
5489 if (operand_type_check (i.types[0], imm)
5490 || i.types[0].bitfield.shiftcount)
5491 source = 1;
5492 else
5493 source = 0;
5494 break;
5495 case 4:
5496 /* When there are 4 operands, the first two must be 8bit
5497 immediate operands. The source operand will be the 3rd
5498 one.
5499
5500 For instructions with VexNDS, if the first operand
5501 an imm8, the source operand is the 2nd one. If the last
5502 operand is imm8, the source operand is the first one. */
5503 gas_assert ((i.imm_operands == 2
5504 && i.types[0].bitfield.imm8
5505 && i.types[1].bitfield.imm8)
5506 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
5507 && i.imm_operands == 1
5508 && (i.types[0].bitfield.imm8
5509 || i.types[i.operands - 1].bitfield.imm8)));
5510 if (i.imm_operands == 2)
5511 source = 2;
5512 else
5513 {
5514 if (i.types[0].bitfield.imm8)
5515 source = 1;
5516 else
5517 source = 0;
5518 }
5519 break;
5520 case 5:
5521 break;
5522 default:
5523 abort ();
5524 }
5525
5526 if (!vex_3_sources)
5527 {
5528 dest = source + 1;
5529
5530 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5531 {
5532 /* For instructions with VexNDS, the register-only
5533 source operand must be 32/64bit integer, XMM or
5534 YMM register. It is encoded in VEX prefix. We
5535 need to clear RegMem bit before calling
5536 operand_type_equal. */
5537
5538 i386_operand_type op;
5539 unsigned int vvvv;
5540
5541 /* Check register-only source operand when two source
5542 operands are swapped. */
5543 if (!i.tm.operand_types[source].bitfield.baseindex
5544 && i.tm.operand_types[dest].bitfield.baseindex)
5545 {
5546 vvvv = source;
5547 source = dest;
5548 }
5549 else
5550 vvvv = dest;
5551
5552 op = i.tm.operand_types[vvvv];
5553 op.bitfield.regmem = 0;
5554 if ((dest + 1) >= i.operands
5555 || (op.bitfield.reg32 != 1
5556 && !op.bitfield.reg64 != 1
5557 && !operand_type_equal (&op, &regxmm)
5558 && !operand_type_equal (&op, &regymm)))
5559 abort ();
5560 i.vex.register_specifier = i.op[vvvv].regs;
5561 dest++;
5562 }
5563 }
5564
5565 i.rm.mode = 3;
5566 /* One of the register operands will be encoded in the i.tm.reg
5567 field, the other in the combined i.tm.mode and i.tm.regmem
5568 fields. If no form of this instruction supports a memory
5569 destination operand, then we assume the source operand may
5570 sometimes be a memory operand and so we need to store the
5571 destination in the i.rm.reg field. */
5572 if (!i.tm.operand_types[dest].bitfield.regmem
5573 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
5574 {
5575 i.rm.reg = i.op[dest].regs->reg_num;
5576 i.rm.regmem = i.op[source].regs->reg_num;
5577 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5578 i.rex |= REX_R;
5579 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5580 i.rex |= REX_B;
5581 }
5582 else
5583 {
5584 i.rm.reg = i.op[source].regs->reg_num;
5585 i.rm.regmem = i.op[dest].regs->reg_num;
5586 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5587 i.rex |= REX_B;
5588 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5589 i.rex |= REX_R;
5590 }
5591 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
5592 {
5593 if (!i.types[0].bitfield.control
5594 && !i.types[1].bitfield.control)
5595 abort ();
5596 i.rex &= ~(REX_R | REX_B);
5597 add_prefix (LOCK_PREFIX_OPCODE);
5598 }
5599 }
5600 else
5601 { /* If it's not 2 reg operands... */
5602 unsigned int mem;
5603
5604 if (i.mem_operands)
5605 {
5606 unsigned int fake_zero_displacement = 0;
5607 unsigned int op;
5608
5609 for (op = 0; op < i.operands; op++)
5610 if (operand_type_check (i.types[op], anymem))
5611 break;
5612 gas_assert (op < i.operands);
5613
5614 if (i.tm.opcode_modifier.vecsib)
5615 {
5616 if (i.index_reg->reg_num == RegEiz
5617 || i.index_reg->reg_num == RegRiz)
5618 abort ();
5619
5620 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5621 if (!i.base_reg)
5622 {
5623 i.sib.base = NO_BASE_REGISTER;
5624 i.sib.scale = i.log2_scale_factor;
5625 i.types[op].bitfield.disp8 = 0;
5626 i.types[op].bitfield.disp16 = 0;
5627 i.types[op].bitfield.disp64 = 0;
5628 if (flag_code != CODE_64BIT)
5629 {
5630 /* Must be 32 bit */
5631 i.types[op].bitfield.disp32 = 1;
5632 i.types[op].bitfield.disp32s = 0;
5633 }
5634 else
5635 {
5636 i.types[op].bitfield.disp32 = 0;
5637 i.types[op].bitfield.disp32s = 1;
5638 }
5639 }
5640 i.sib.index = i.index_reg->reg_num;
5641 if ((i.index_reg->reg_flags & RegRex) != 0)
5642 i.rex |= REX_X;
5643 }
5644
5645 default_seg = &ds;
5646
5647 if (i.base_reg == 0)
5648 {
5649 i.rm.mode = 0;
5650 if (!i.disp_operands)
5651 {
5652 fake_zero_displacement = 1;
5653 /* Instructions with VSIB byte need 32bit displacement
5654 if there is no base register. */
5655 if (i.tm.opcode_modifier.vecsib)
5656 i.types[op].bitfield.disp32 = 1;
5657 }
5658 if (i.index_reg == 0)
5659 {
5660 gas_assert (!i.tm.opcode_modifier.vecsib);
5661 /* Operand is just <disp> */
5662 if (flag_code == CODE_64BIT)
5663 {
5664 /* 64bit mode overwrites the 32bit absolute
5665 addressing by RIP relative addressing and
5666 absolute addressing is encoded by one of the
5667 redundant SIB forms. */
5668 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5669 i.sib.base = NO_BASE_REGISTER;
5670 i.sib.index = NO_INDEX_REGISTER;
5671 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
5672 ? disp32s : disp32);
5673 }
5674 else if ((flag_code == CODE_16BIT)
5675 ^ (i.prefix[ADDR_PREFIX] != 0))
5676 {
5677 i.rm.regmem = NO_BASE_REGISTER_16;
5678 i.types[op] = disp16;
5679 }
5680 else
5681 {
5682 i.rm.regmem = NO_BASE_REGISTER;
5683 i.types[op] = disp32;
5684 }
5685 }
5686 else if (!i.tm.opcode_modifier.vecsib)
5687 {
5688 /* !i.base_reg && i.index_reg */
5689 if (i.index_reg->reg_num == RegEiz
5690 || i.index_reg->reg_num == RegRiz)
5691 i.sib.index = NO_INDEX_REGISTER;
5692 else
5693 i.sib.index = i.index_reg->reg_num;
5694 i.sib.base = NO_BASE_REGISTER;
5695 i.sib.scale = i.log2_scale_factor;
5696 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5697 i.types[op].bitfield.disp8 = 0;
5698 i.types[op].bitfield.disp16 = 0;
5699 i.types[op].bitfield.disp64 = 0;
5700 if (flag_code != CODE_64BIT)
5701 {
5702 /* Must be 32 bit */
5703 i.types[op].bitfield.disp32 = 1;
5704 i.types[op].bitfield.disp32s = 0;
5705 }
5706 else
5707 {
5708 i.types[op].bitfield.disp32 = 0;
5709 i.types[op].bitfield.disp32s = 1;
5710 }
5711 if ((i.index_reg->reg_flags & RegRex) != 0)
5712 i.rex |= REX_X;
5713 }
5714 }
5715 /* RIP addressing for 64bit mode. */
5716 else if (i.base_reg->reg_num == RegRip ||
5717 i.base_reg->reg_num == RegEip)
5718 {
5719 gas_assert (!i.tm.opcode_modifier.vecsib);
5720 i.rm.regmem = NO_BASE_REGISTER;
5721 i.types[op].bitfield.disp8 = 0;
5722 i.types[op].bitfield.disp16 = 0;
5723 i.types[op].bitfield.disp32 = 0;
5724 i.types[op].bitfield.disp32s = 1;
5725 i.types[op].bitfield.disp64 = 0;
5726 i.flags[op] |= Operand_PCrel;
5727 if (! i.disp_operands)
5728 fake_zero_displacement = 1;
5729 }
5730 else if (i.base_reg->reg_type.bitfield.reg16)
5731 {
5732 gas_assert (!i.tm.opcode_modifier.vecsib);
5733 switch (i.base_reg->reg_num)
5734 {
5735 case 3: /* (%bx) */
5736 if (i.index_reg == 0)
5737 i.rm.regmem = 7;
5738 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
5739 i.rm.regmem = i.index_reg->reg_num - 6;
5740 break;
5741 case 5: /* (%bp) */
5742 default_seg = &ss;
5743 if (i.index_reg == 0)
5744 {
5745 i.rm.regmem = 6;
5746 if (operand_type_check (i.types[op], disp) == 0)
5747 {
5748 /* fake (%bp) into 0(%bp) */
5749 i.types[op].bitfield.disp8 = 1;
5750 fake_zero_displacement = 1;
5751 }
5752 }
5753 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
5754 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
5755 break;
5756 default: /* (%si) -> 4 or (%di) -> 5 */
5757 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
5758 }
5759 i.rm.mode = mode_from_disp_size (i.types[op]);
5760 }
5761 else /* i.base_reg and 32/64 bit mode */
5762 {
5763 if (flag_code == CODE_64BIT
5764 && operand_type_check (i.types[op], disp))
5765 {
5766 i386_operand_type temp;
5767 operand_type_set (&temp, 0);
5768 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
5769 i.types[op] = temp;
5770 if (i.prefix[ADDR_PREFIX] == 0)
5771 i.types[op].bitfield.disp32s = 1;
5772 else
5773 i.types[op].bitfield.disp32 = 1;
5774 }
5775
5776 if (!i.tm.opcode_modifier.vecsib)
5777 i.rm.regmem = i.base_reg->reg_num;
5778 if ((i.base_reg->reg_flags & RegRex) != 0)
5779 i.rex |= REX_B;
5780 i.sib.base = i.base_reg->reg_num;
5781 /* x86-64 ignores REX prefix bit here to avoid decoder
5782 complications. */
5783 if (!(i.base_reg->reg_flags & RegRex)
5784 && (i.base_reg->reg_num == EBP_REG_NUM
5785 || i.base_reg->reg_num == ESP_REG_NUM))
5786 default_seg = &ss;
5787 if (i.base_reg->reg_num == 5 && i.disp_operands == 0)
5788 {
5789 fake_zero_displacement = 1;
5790 i.types[op].bitfield.disp8 = 1;
5791 }
5792 i.sib.scale = i.log2_scale_factor;
5793 if (i.index_reg == 0)
5794 {
5795 gas_assert (!i.tm.opcode_modifier.vecsib);
5796 /* <disp>(%esp) becomes two byte modrm with no index
5797 register. We've already stored the code for esp
5798 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
5799 Any base register besides %esp will not use the
5800 extra modrm byte. */
5801 i.sib.index = NO_INDEX_REGISTER;
5802 }
5803 else if (!i.tm.opcode_modifier.vecsib)
5804 {
5805 if (i.index_reg->reg_num == RegEiz
5806 || i.index_reg->reg_num == RegRiz)
5807 i.sib.index = NO_INDEX_REGISTER;
5808 else
5809 i.sib.index = i.index_reg->reg_num;
5810 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5811 if ((i.index_reg->reg_flags & RegRex) != 0)
5812 i.rex |= REX_X;
5813 }
5814
5815 if (i.disp_operands
5816 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
5817 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
5818 i.rm.mode = 0;
5819 else
5820 {
5821 if (!fake_zero_displacement
5822 && !i.disp_operands
5823 && i.disp_encoding)
5824 {
5825 fake_zero_displacement = 1;
5826 if (i.disp_encoding == disp_encoding_8bit)
5827 i.types[op].bitfield.disp8 = 1;
5828 else
5829 i.types[op].bitfield.disp32 = 1;
5830 }
5831 i.rm.mode = mode_from_disp_size (i.types[op]);
5832 }
5833 }
5834
5835 if (fake_zero_displacement)
5836 {
5837 /* Fakes a zero displacement assuming that i.types[op]
5838 holds the correct displacement size. */
5839 expressionS *exp;
5840
5841 gas_assert (i.op[op].disps == 0);
5842 exp = &disp_expressions[i.disp_operands++];
5843 i.op[op].disps = exp;
5844 exp->X_op = O_constant;
5845 exp->X_add_number = 0;
5846 exp->X_add_symbol = (symbolS *) 0;
5847 exp->X_op_symbol = (symbolS *) 0;
5848 }
5849
5850 mem = op;
5851 }
5852 else
5853 mem = ~0;
5854
5855 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
5856 {
5857 if (operand_type_check (i.types[0], imm))
5858 i.vex.register_specifier = NULL;
5859 else
5860 {
5861 /* VEX.vvvv encodes one of the sources when the first
5862 operand is not an immediate. */
5863 if (i.tm.opcode_modifier.vexw == VEXW0)
5864 i.vex.register_specifier = i.op[0].regs;
5865 else
5866 i.vex.register_specifier = i.op[1].regs;
5867 }
5868
5869 /* Destination is a XMM register encoded in the ModRM.reg
5870 and VEX.R bit. */
5871 i.rm.reg = i.op[2].regs->reg_num;
5872 if ((i.op[2].regs->reg_flags & RegRex) != 0)
5873 i.rex |= REX_R;
5874
5875 /* ModRM.rm and VEX.B encodes the other source. */
5876 if (!i.mem_operands)
5877 {
5878 i.rm.mode = 3;
5879
5880 if (i.tm.opcode_modifier.vexw == VEXW0)
5881 i.rm.regmem = i.op[1].regs->reg_num;
5882 else
5883 i.rm.regmem = i.op[0].regs->reg_num;
5884
5885 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5886 i.rex |= REX_B;
5887 }
5888 }
5889 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
5890 {
5891 i.vex.register_specifier = i.op[2].regs;
5892 if (!i.mem_operands)
5893 {
5894 i.rm.mode = 3;
5895 i.rm.regmem = i.op[1].regs->reg_num;
5896 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5897 i.rex |= REX_B;
5898 }
5899 }
5900 /* Fill in i.rm.reg or i.rm.regmem field with register operand
5901 (if any) based on i.tm.extension_opcode. Again, we must be
5902 careful to make sure that segment/control/debug/test/MMX
5903 registers are coded into the i.rm.reg field. */
5904 else if (i.reg_operands)
5905 {
5906 unsigned int op;
5907 unsigned int vex_reg = ~0;
5908
5909 for (op = 0; op < i.operands; op++)
5910 if (i.types[op].bitfield.reg8
5911 || i.types[op].bitfield.reg16
5912 || i.types[op].bitfield.reg32
5913 || i.types[op].bitfield.reg64
5914 || i.types[op].bitfield.regmmx
5915 || i.types[op].bitfield.regxmm
5916 || i.types[op].bitfield.regymm
5917 || i.types[op].bitfield.sreg2
5918 || i.types[op].bitfield.sreg3
5919 || i.types[op].bitfield.control
5920 || i.types[op].bitfield.debug
5921 || i.types[op].bitfield.test)
5922 break;
5923
5924 if (vex_3_sources)
5925 op = dest;
5926 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5927 {
5928 /* For instructions with VexNDS, the register-only
5929 source operand is encoded in VEX prefix. */
5930 gas_assert (mem != (unsigned int) ~0);
5931
5932 if (op > mem)
5933 {
5934 vex_reg = op++;
5935 gas_assert (op < i.operands);
5936 }
5937 else
5938 {
5939 /* Check register-only source operand when two source
5940 operands are swapped. */
5941 if (!i.tm.operand_types[op].bitfield.baseindex
5942 && i.tm.operand_types[op + 1].bitfield.baseindex)
5943 {
5944 vex_reg = op;
5945 op += 2;
5946 gas_assert (mem == (vex_reg + 1)
5947 && op < i.operands);
5948 }
5949 else
5950 {
5951 vex_reg = op + 1;
5952 gas_assert (vex_reg < i.operands);
5953 }
5954 }
5955 }
5956 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
5957 {
5958 /* For instructions with VexNDD, the register destination
5959 is encoded in VEX prefix. */
5960 if (i.mem_operands == 0)
5961 {
5962 /* There is no memory operand. */
5963 gas_assert ((op + 2) == i.operands);
5964 vex_reg = op + 1;
5965 }
5966 else
5967 {
5968 /* There are only 2 operands. */
5969 gas_assert (op < 2 && i.operands == 2);
5970 vex_reg = 1;
5971 }
5972 }
5973 else
5974 gas_assert (op < i.operands);
5975
5976 if (vex_reg != (unsigned int) ~0)
5977 {
5978 i386_operand_type *type = &i.tm.operand_types[vex_reg];
5979
5980 if (type->bitfield.reg32 != 1
5981 && type->bitfield.reg64 != 1
5982 && !operand_type_equal (type, &regxmm)
5983 && !operand_type_equal (type, &regymm))
5984 abort ();
5985
5986 i.vex.register_specifier = i.op[vex_reg].regs;
5987 }
5988
5989 /* Don't set OP operand twice. */
5990 if (vex_reg != op)
5991 {
5992 /* If there is an extension opcode to put here, the
5993 register number must be put into the regmem field. */
5994 if (i.tm.extension_opcode != None)
5995 {
5996 i.rm.regmem = i.op[op].regs->reg_num;
5997 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5998 i.rex |= REX_B;
5999 }
6000 else
6001 {
6002 i.rm.reg = i.op[op].regs->reg_num;
6003 if ((i.op[op].regs->reg_flags & RegRex) != 0)
6004 i.rex |= REX_R;
6005 }
6006 }
6007
6008 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
6009 must set it to 3 to indicate this is a register operand
6010 in the regmem field. */
6011 if (!i.mem_operands)
6012 i.rm.mode = 3;
6013 }
6014
6015 /* Fill in i.rm.reg field with extension opcode (if any). */
6016 if (i.tm.extension_opcode != None)
6017 i.rm.reg = i.tm.extension_opcode;
6018 }
6019 return default_seg;
6020 }
6021
6022 static void
6023 output_branch (void)
6024 {
6025 char *p;
6026 int size;
6027 int code16;
6028 int prefix;
6029 relax_substateT subtype;
6030 symbolS *sym;
6031 offsetT off;
6032
6033 code16 = flag_code == CODE_16BIT ? CODE16 : 0;
6034 size = i.disp_encoding == disp_encoding_32bit ? BIG : SMALL;
6035
6036 prefix = 0;
6037 if (i.prefix[DATA_PREFIX] != 0)
6038 {
6039 prefix = 1;
6040 i.prefixes -= 1;
6041 code16 ^= CODE16;
6042 }
6043 /* Pentium4 branch hints. */
6044 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6045 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6046 {
6047 prefix++;
6048 i.prefixes--;
6049 }
6050 if (i.prefix[REX_PREFIX] != 0)
6051 {
6052 prefix++;
6053 i.prefixes--;
6054 }
6055
6056 if (i.prefixes != 0 && !intel_syntax)
6057 as_warn (_("skipping prefixes on this instruction"));
6058
6059 /* It's always a symbol; End frag & setup for relax.
6060 Make sure there is enough room in this frag for the largest
6061 instruction we may generate in md_convert_frag. This is 2
6062 bytes for the opcode and room for the prefix and largest
6063 displacement. */
6064 frag_grow (prefix + 2 + 4);
6065 /* Prefix and 1 opcode byte go in fr_fix. */
6066 p = frag_more (prefix + 1);
6067 if (i.prefix[DATA_PREFIX] != 0)
6068 *p++ = DATA_PREFIX_OPCODE;
6069 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
6070 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
6071 *p++ = i.prefix[SEG_PREFIX];
6072 if (i.prefix[REX_PREFIX] != 0)
6073 *p++ = i.prefix[REX_PREFIX];
6074 *p = i.tm.base_opcode;
6075
6076 if ((unsigned char) *p == JUMP_PC_RELATIVE)
6077 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
6078 else if (cpu_arch_flags.bitfield.cpui386)
6079 subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
6080 else
6081 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
6082 subtype |= code16;
6083
6084 sym = i.op[0].disps->X_add_symbol;
6085 off = i.op[0].disps->X_add_number;
6086
6087 if (i.op[0].disps->X_op != O_constant
6088 && i.op[0].disps->X_op != O_symbol)
6089 {
6090 /* Handle complex expressions. */
6091 sym = make_expr_symbol (i.op[0].disps);
6092 off = 0;
6093 }
6094
6095 /* 1 possible extra opcode + 4 byte displacement go in var part.
6096 Pass reloc in fr_var. */
6097 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
6098 }
6099
6100 static void
6101 output_jump (void)
6102 {
6103 char *p;
6104 int size;
6105 fixS *fixP;
6106
6107 if (i.tm.opcode_modifier.jumpbyte)
6108 {
6109 /* This is a loop or jecxz type instruction. */
6110 size = 1;
6111 if (i.prefix[ADDR_PREFIX] != 0)
6112 {
6113 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
6114 i.prefixes -= 1;
6115 }
6116 /* Pentium4 branch hints. */
6117 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6118 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6119 {
6120 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
6121 i.prefixes--;
6122 }
6123 }
6124 else
6125 {
6126 int code16;
6127
6128 code16 = 0;
6129 if (flag_code == CODE_16BIT)
6130 code16 = CODE16;
6131
6132 if (i.prefix[DATA_PREFIX] != 0)
6133 {
6134 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
6135 i.prefixes -= 1;
6136 code16 ^= CODE16;
6137 }
6138
6139 size = 4;
6140 if (code16)
6141 size = 2;
6142 }
6143
6144 if (i.prefix[REX_PREFIX] != 0)
6145 {
6146 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
6147 i.prefixes -= 1;
6148 }
6149
6150 if (i.prefixes != 0 && !intel_syntax)
6151 as_warn (_("skipping prefixes on this instruction"));
6152
6153 p = frag_more (i.tm.opcode_length + size);
6154 switch (i.tm.opcode_length)
6155 {
6156 case 2:
6157 *p++ = i.tm.base_opcode >> 8;
6158 case 1:
6159 *p++ = i.tm.base_opcode;
6160 break;
6161 default:
6162 abort ();
6163 }
6164
6165 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6166 i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
6167
6168 /* All jumps handled here are signed, but don't use a signed limit
6169 check for 32 and 16 bit jumps as we want to allow wrap around at
6170 4G and 64k respectively. */
6171 if (size == 1)
6172 fixP->fx_signed = 1;
6173 }
6174
6175 static void
6176 output_interseg_jump (void)
6177 {
6178 char *p;
6179 int size;
6180 int prefix;
6181 int code16;
6182
6183 code16 = 0;
6184 if (flag_code == CODE_16BIT)
6185 code16 = CODE16;
6186
6187 prefix = 0;
6188 if (i.prefix[DATA_PREFIX] != 0)
6189 {
6190 prefix = 1;
6191 i.prefixes -= 1;
6192 code16 ^= CODE16;
6193 }
6194 if (i.prefix[REX_PREFIX] != 0)
6195 {
6196 prefix++;
6197 i.prefixes -= 1;
6198 }
6199
6200 size = 4;
6201 if (code16)
6202 size = 2;
6203
6204 if (i.prefixes != 0 && !intel_syntax)
6205 as_warn (_("skipping prefixes on this instruction"));
6206
6207 /* 1 opcode; 2 segment; offset */
6208 p = frag_more (prefix + 1 + 2 + size);
6209
6210 if (i.prefix[DATA_PREFIX] != 0)
6211 *p++ = DATA_PREFIX_OPCODE;
6212
6213 if (i.prefix[REX_PREFIX] != 0)
6214 *p++ = i.prefix[REX_PREFIX];
6215
6216 *p++ = i.tm.base_opcode;
6217 if (i.op[1].imms->X_op == O_constant)
6218 {
6219 offsetT n = i.op[1].imms->X_add_number;
6220
6221 if (size == 2
6222 && !fits_in_unsigned_word (n)
6223 && !fits_in_signed_word (n))
6224 {
6225 as_bad (_("16-bit jump out of range"));
6226 return;
6227 }
6228 md_number_to_chars (p, n, size);
6229 }
6230 else
6231 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6232 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
6233 if (i.op[0].imms->X_op != O_constant)
6234 as_bad (_("can't handle non absolute segment in `%s'"),
6235 i.tm.name);
6236 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
6237 }
6238
6239 static void
6240 output_insn (void)
6241 {
6242 fragS *insn_start_frag;
6243 offsetT insn_start_off;
6244
6245 /* Tie dwarf2 debug info to the address at the start of the insn.
6246 We can't do this after the insn has been output as the current
6247 frag may have been closed off. eg. by frag_var. */
6248 dwarf2_emit_insn (0);
6249
6250 insn_start_frag = frag_now;
6251 insn_start_off = frag_now_fix ();
6252
6253 /* Output jumps. */
6254 if (i.tm.opcode_modifier.jump)
6255 output_branch ();
6256 else if (i.tm.opcode_modifier.jumpbyte
6257 || i.tm.opcode_modifier.jumpdword)
6258 output_jump ();
6259 else if (i.tm.opcode_modifier.jumpintersegment)
6260 output_interseg_jump ();
6261 else
6262 {
6263 /* Output normal instructions here. */
6264 char *p;
6265 unsigned char *q;
6266 unsigned int j;
6267 unsigned int prefix;
6268
6269 /* Since the VEX prefix contains the implicit prefix, we don't
6270 need the explicit prefix. */
6271 if (!i.tm.opcode_modifier.vex)
6272 {
6273 switch (i.tm.opcode_length)
6274 {
6275 case 3:
6276 if (i.tm.base_opcode & 0xff000000)
6277 {
6278 prefix = (i.tm.base_opcode >> 24) & 0xff;
6279 goto check_prefix;
6280 }
6281 break;
6282 case 2:
6283 if ((i.tm.base_opcode & 0xff0000) != 0)
6284 {
6285 prefix = (i.tm.base_opcode >> 16) & 0xff;
6286 if (i.tm.cpu_flags.bitfield.cpupadlock)
6287 {
6288 check_prefix:
6289 if (prefix != REPE_PREFIX_OPCODE
6290 || (i.prefix[REP_PREFIX]
6291 != REPE_PREFIX_OPCODE))
6292 add_prefix (prefix);
6293 }
6294 else
6295 add_prefix (prefix);
6296 }
6297 break;
6298 case 1:
6299 break;
6300 default:
6301 abort ();
6302 }
6303
6304 /* The prefix bytes. */
6305 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
6306 if (*q)
6307 FRAG_APPEND_1_CHAR (*q);
6308 }
6309 else
6310 {
6311 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
6312 if (*q)
6313 switch (j)
6314 {
6315 case REX_PREFIX:
6316 /* REX byte is encoded in VEX prefix. */
6317 break;
6318 case SEG_PREFIX:
6319 case ADDR_PREFIX:
6320 FRAG_APPEND_1_CHAR (*q);
6321 break;
6322 default:
6323 /* There should be no other prefixes for instructions
6324 with VEX prefix. */
6325 abort ();
6326 }
6327
6328 /* Now the VEX prefix. */
6329 p = frag_more (i.vex.length);
6330 for (j = 0; j < i.vex.length; j++)
6331 p[j] = i.vex.bytes[j];
6332 }
6333
6334 /* Now the opcode; be careful about word order here! */
6335 if (i.tm.opcode_length == 1)
6336 {
6337 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
6338 }
6339 else
6340 {
6341 switch (i.tm.opcode_length)
6342 {
6343 case 3:
6344 p = frag_more (3);
6345 *p++ = (i.tm.base_opcode >> 16) & 0xff;
6346 break;
6347 case 2:
6348 p = frag_more (2);
6349 break;
6350 default:
6351 abort ();
6352 break;
6353 }
6354
6355 /* Put out high byte first: can't use md_number_to_chars! */
6356 *p++ = (i.tm.base_opcode >> 8) & 0xff;
6357 *p = i.tm.base_opcode & 0xff;
6358 }
6359
6360 /* Now the modrm byte and sib byte (if present). */
6361 if (i.tm.opcode_modifier.modrm)
6362 {
6363 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
6364 | i.rm.reg << 3
6365 | i.rm.mode << 6));
6366 /* If i.rm.regmem == ESP (4)
6367 && i.rm.mode != (Register mode)
6368 && not 16 bit
6369 ==> need second modrm byte. */
6370 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
6371 && i.rm.mode != 3
6372 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
6373 FRAG_APPEND_1_CHAR ((i.sib.base << 0
6374 | i.sib.index << 3
6375 | i.sib.scale << 6));
6376 }
6377
6378 if (i.disp_operands)
6379 output_disp (insn_start_frag, insn_start_off);
6380
6381 if (i.imm_operands)
6382 output_imm (insn_start_frag, insn_start_off);
6383 }
6384
6385 #ifdef DEBUG386
6386 if (flag_debug)
6387 {
6388 pi ("" /*line*/, &i);
6389 }
6390 #endif /* DEBUG386 */
6391 }
6392
6393 /* Return the size of the displacement operand N. */
6394
6395 static int
6396 disp_size (unsigned int n)
6397 {
6398 int size = 4;
6399 if (i.types[n].bitfield.disp64)
6400 size = 8;
6401 else if (i.types[n].bitfield.disp8)
6402 size = 1;
6403 else if (i.types[n].bitfield.disp16)
6404 size = 2;
6405 return size;
6406 }
6407
6408 /* Return the size of the immediate operand N. */
6409
6410 static int
6411 imm_size (unsigned int n)
6412 {
6413 int size = 4;
6414 if (i.types[n].bitfield.imm64)
6415 size = 8;
6416 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
6417 size = 1;
6418 else if (i.types[n].bitfield.imm16)
6419 size = 2;
6420 return size;
6421 }
6422
6423 static void
6424 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
6425 {
6426 char *p;
6427 unsigned int n;
6428
6429 for (n = 0; n < i.operands; n++)
6430 {
6431 if (operand_type_check (i.types[n], disp))
6432 {
6433 if (i.op[n].disps->X_op == O_constant)
6434 {
6435 int size = disp_size (n);
6436 offsetT val;
6437
6438 val = offset_in_range (i.op[n].disps->X_add_number,
6439 size);
6440 p = frag_more (size);
6441 md_number_to_chars (p, val, size);
6442 }
6443 else
6444 {
6445 enum bfd_reloc_code_real reloc_type;
6446 int size = disp_size (n);
6447 int sign = i.types[n].bitfield.disp32s;
6448 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
6449
6450 /* We can't have 8 bit displacement here. */
6451 gas_assert (!i.types[n].bitfield.disp8);
6452
6453 /* The PC relative address is computed relative
6454 to the instruction boundary, so in case immediate
6455 fields follows, we need to adjust the value. */
6456 if (pcrel && i.imm_operands)
6457 {
6458 unsigned int n1;
6459 int sz = 0;
6460
6461 for (n1 = 0; n1 < i.operands; n1++)
6462 if (operand_type_check (i.types[n1], imm))
6463 {
6464 /* Only one immediate is allowed for PC
6465 relative address. */
6466 gas_assert (sz == 0);
6467 sz = imm_size (n1);
6468 i.op[n].disps->X_add_number -= sz;
6469 }
6470 /* We should find the immediate. */
6471 gas_assert (sz != 0);
6472 }
6473
6474 p = frag_more (size);
6475 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
6476 if (GOT_symbol
6477 && GOT_symbol == i.op[n].disps->X_add_symbol
6478 && (((reloc_type == BFD_RELOC_32
6479 || reloc_type == BFD_RELOC_X86_64_32S
6480 || (reloc_type == BFD_RELOC_64
6481 && object_64bit))
6482 && (i.op[n].disps->X_op == O_symbol
6483 || (i.op[n].disps->X_op == O_add
6484 && ((symbol_get_value_expression
6485 (i.op[n].disps->X_op_symbol)->X_op)
6486 == O_subtract))))
6487 || reloc_type == BFD_RELOC_32_PCREL))
6488 {
6489 offsetT add;
6490
6491 if (insn_start_frag == frag_now)
6492 add = (p - frag_now->fr_literal) - insn_start_off;
6493 else
6494 {
6495 fragS *fr;
6496
6497 add = insn_start_frag->fr_fix - insn_start_off;
6498 for (fr = insn_start_frag->fr_next;
6499 fr && fr != frag_now; fr = fr->fr_next)
6500 add += fr->fr_fix;
6501 add += p - frag_now->fr_literal;
6502 }
6503
6504 if (!object_64bit)
6505 {
6506 reloc_type = BFD_RELOC_386_GOTPC;
6507 i.op[n].imms->X_add_number += add;
6508 }
6509 else if (reloc_type == BFD_RELOC_64)
6510 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6511 else
6512 /* Don't do the adjustment for x86-64, as there
6513 the pcrel addressing is relative to the _next_
6514 insn, and that is taken care of in other code. */
6515 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6516 }
6517 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6518 i.op[n].disps, pcrel, reloc_type);
6519 }
6520 }
6521 }
6522 }
6523
6524 static void
6525 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
6526 {
6527 char *p;
6528 unsigned int n;
6529
6530 for (n = 0; n < i.operands; n++)
6531 {
6532 if (operand_type_check (i.types[n], imm))
6533 {
6534 if (i.op[n].imms->X_op == O_constant)
6535 {
6536 int size = imm_size (n);
6537 offsetT val;
6538
6539 val = offset_in_range (i.op[n].imms->X_add_number,
6540 size);
6541 p = frag_more (size);
6542 md_number_to_chars (p, val, size);
6543 }
6544 else
6545 {
6546 /* Not absolute_section.
6547 Need a 32-bit fixup (don't support 8bit
6548 non-absolute imms). Try to support other
6549 sizes ... */
6550 enum bfd_reloc_code_real reloc_type;
6551 int size = imm_size (n);
6552 int sign;
6553
6554 if (i.types[n].bitfield.imm32s
6555 && (i.suffix == QWORD_MNEM_SUFFIX
6556 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
6557 sign = 1;
6558 else
6559 sign = 0;
6560
6561 p = frag_more (size);
6562 reloc_type = reloc (size, 0, sign, i.reloc[n]);
6563
6564 /* This is tough to explain. We end up with this one if we
6565 * have operands that look like
6566 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
6567 * obtain the absolute address of the GOT, and it is strongly
6568 * preferable from a performance point of view to avoid using
6569 * a runtime relocation for this. The actual sequence of
6570 * instructions often look something like:
6571 *
6572 * call .L66
6573 * .L66:
6574 * popl %ebx
6575 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
6576 *
6577 * The call and pop essentially return the absolute address
6578 * of the label .L66 and store it in %ebx. The linker itself
6579 * will ultimately change the first operand of the addl so
6580 * that %ebx points to the GOT, but to keep things simple, the
6581 * .o file must have this operand set so that it generates not
6582 * the absolute address of .L66, but the absolute address of
6583 * itself. This allows the linker itself simply treat a GOTPC
6584 * relocation as asking for a pcrel offset to the GOT to be
6585 * added in, and the addend of the relocation is stored in the
6586 * operand field for the instruction itself.
6587 *
6588 * Our job here is to fix the operand so that it would add
6589 * the correct offset so that %ebx would point to itself. The
6590 * thing that is tricky is that .-.L66 will point to the
6591 * beginning of the instruction, so we need to further modify
6592 * the operand so that it will point to itself. There are
6593 * other cases where you have something like:
6594 *
6595 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
6596 *
6597 * and here no correction would be required. Internally in
6598 * the assembler we treat operands of this form as not being
6599 * pcrel since the '.' is explicitly mentioned, and I wonder
6600 * whether it would simplify matters to do it this way. Who
6601 * knows. In earlier versions of the PIC patches, the
6602 * pcrel_adjust field was used to store the correction, but
6603 * since the expression is not pcrel, I felt it would be
6604 * confusing to do it this way. */
6605
6606 if ((reloc_type == BFD_RELOC_32
6607 || reloc_type == BFD_RELOC_X86_64_32S
6608 || reloc_type == BFD_RELOC_64)
6609 && GOT_symbol
6610 && GOT_symbol == i.op[n].imms->X_add_symbol
6611 && (i.op[n].imms->X_op == O_symbol
6612 || (i.op[n].imms->X_op == O_add
6613 && ((symbol_get_value_expression
6614 (i.op[n].imms->X_op_symbol)->X_op)
6615 == O_subtract))))
6616 {
6617 offsetT add;
6618
6619 if (insn_start_frag == frag_now)
6620 add = (p - frag_now->fr_literal) - insn_start_off;
6621 else
6622 {
6623 fragS *fr;
6624
6625 add = insn_start_frag->fr_fix - insn_start_off;
6626 for (fr = insn_start_frag->fr_next;
6627 fr && fr != frag_now; fr = fr->fr_next)
6628 add += fr->fr_fix;
6629 add += p - frag_now->fr_literal;
6630 }
6631
6632 if (!object_64bit)
6633 reloc_type = BFD_RELOC_386_GOTPC;
6634 else if (size == 4)
6635 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6636 else if (size == 8)
6637 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6638 i.op[n].imms->X_add_number += add;
6639 }
6640 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6641 i.op[n].imms, 0, reloc_type);
6642 }
6643 }
6644 }
6645 }
6646 \f
6647 /* x86_cons_fix_new is called via the expression parsing code when a
6648 reloc is needed. We use this hook to get the correct .got reloc. */
6649 static enum bfd_reloc_code_real got_reloc = NO_RELOC;
6650 static int cons_sign = -1;
6651
6652 void
6653 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
6654 expressionS *exp)
6655 {
6656 enum bfd_reloc_code_real r = reloc (len, 0, cons_sign, got_reloc);
6657
6658 got_reloc = NO_RELOC;
6659
6660 #ifdef TE_PE
6661 if (exp->X_op == O_secrel)
6662 {
6663 exp->X_op = O_symbol;
6664 r = BFD_RELOC_32_SECREL;
6665 }
6666 #endif
6667
6668 fix_new_exp (frag, off, len, exp, 0, r);
6669 }
6670
6671 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
6672 purpose of the `.dc.a' internal pseudo-op. */
6673
6674 int
6675 x86_address_bytes (void)
6676 {
6677 if ((stdoutput->arch_info->mach & bfd_mach_x64_32))
6678 return 4;
6679 return stdoutput->arch_info->bits_per_address / 8;
6680 }
6681
6682 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
6683 || defined (LEX_AT)
6684 # define lex_got(reloc, adjust, types) NULL
6685 #else
6686 /* Parse operands of the form
6687 <symbol>@GOTOFF+<nnn>
6688 and similar .plt or .got references.
6689
6690 If we find one, set up the correct relocation in RELOC and copy the
6691 input string, minus the `@GOTOFF' into a malloc'd buffer for
6692 parsing by the calling routine. Return this buffer, and if ADJUST
6693 is non-null set it to the length of the string we removed from the
6694 input line. Otherwise return NULL. */
6695 static char *
6696 lex_got (enum bfd_reloc_code_real *rel,
6697 int *adjust,
6698 i386_operand_type *types)
6699 {
6700 /* Some of the relocations depend on the size of what field is to
6701 be relocated. But in our callers i386_immediate and i386_displacement
6702 we don't yet know the operand size (this will be set by insn
6703 matching). Hence we record the word32 relocation here,
6704 and adjust the reloc according to the real size in reloc(). */
6705 static const struct {
6706 const char *str;
6707 int len;
6708 const enum bfd_reloc_code_real rel[2];
6709 const i386_operand_type types64;
6710 } gotrel[] = {
6711 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
6712 BFD_RELOC_X86_64_PLTOFF64 },
6713 OPERAND_TYPE_IMM64 },
6714 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
6715 BFD_RELOC_X86_64_PLT32 },
6716 OPERAND_TYPE_IMM32_32S_DISP32 },
6717 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
6718 BFD_RELOC_X86_64_GOTPLT64 },
6719 OPERAND_TYPE_IMM64_DISP64 },
6720 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
6721 BFD_RELOC_X86_64_GOTOFF64 },
6722 OPERAND_TYPE_IMM64_DISP64 },
6723 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
6724 BFD_RELOC_X86_64_GOTPCREL },
6725 OPERAND_TYPE_IMM32_32S_DISP32 },
6726 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
6727 BFD_RELOC_X86_64_TLSGD },
6728 OPERAND_TYPE_IMM32_32S_DISP32 },
6729 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
6730 _dummy_first_bfd_reloc_code_real },
6731 OPERAND_TYPE_NONE },
6732 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
6733 BFD_RELOC_X86_64_TLSLD },
6734 OPERAND_TYPE_IMM32_32S_DISP32 },
6735 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
6736 BFD_RELOC_X86_64_GOTTPOFF },
6737 OPERAND_TYPE_IMM32_32S_DISP32 },
6738 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
6739 BFD_RELOC_X86_64_TPOFF32 },
6740 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6741 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
6742 _dummy_first_bfd_reloc_code_real },
6743 OPERAND_TYPE_NONE },
6744 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
6745 BFD_RELOC_X86_64_DTPOFF32 },
6746 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6747 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
6748 _dummy_first_bfd_reloc_code_real },
6749 OPERAND_TYPE_NONE },
6750 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
6751 _dummy_first_bfd_reloc_code_real },
6752 OPERAND_TYPE_NONE },
6753 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
6754 BFD_RELOC_X86_64_GOT32 },
6755 OPERAND_TYPE_IMM32_32S_64_DISP32 },
6756 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
6757 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
6758 OPERAND_TYPE_IMM32_32S_DISP32 },
6759 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
6760 BFD_RELOC_X86_64_TLSDESC_CALL },
6761 OPERAND_TYPE_IMM32_32S_DISP32 },
6762 };
6763 char *cp;
6764 unsigned int j;
6765
6766 #if defined (OBJ_MAYBE_ELF)
6767 if (!IS_ELF)
6768 return NULL;
6769 #endif
6770
6771 for (cp = input_line_pointer; *cp != '@'; cp++)
6772 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6773 return NULL;
6774
6775 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6776 {
6777 int len = gotrel[j].len;
6778 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6779 {
6780 if (gotrel[j].rel[object_64bit] != 0)
6781 {
6782 int first, second;
6783 char *tmpbuf, *past_reloc;
6784
6785 *rel = gotrel[j].rel[object_64bit];
6786
6787 if (types)
6788 {
6789 if (flag_code != CODE_64BIT)
6790 {
6791 types->bitfield.imm32 = 1;
6792 types->bitfield.disp32 = 1;
6793 }
6794 else
6795 *types = gotrel[j].types64;
6796 }
6797
6798 if (GOT_symbol == NULL)
6799 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
6800
6801 /* The length of the first part of our input line. */
6802 first = cp - input_line_pointer;
6803
6804 /* The second part goes from after the reloc token until
6805 (and including) an end_of_line char or comma. */
6806 past_reloc = cp + 1 + len;
6807 cp = past_reloc;
6808 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6809 ++cp;
6810 second = cp + 1 - past_reloc;
6811
6812 /* Allocate and copy string. The trailing NUL shouldn't
6813 be necessary, but be safe. */
6814 tmpbuf = (char *) xmalloc (first + second + 2);
6815 memcpy (tmpbuf, input_line_pointer, first);
6816 if (second != 0 && *past_reloc != ' ')
6817 /* Replace the relocation token with ' ', so that
6818 errors like foo@GOTOFF1 will be detected. */
6819 tmpbuf[first++] = ' ';
6820 else
6821 /* Increment length by 1 if the relocation token is
6822 removed. */
6823 len++;
6824 if (adjust)
6825 *adjust = len;
6826 memcpy (tmpbuf + first, past_reloc, second);
6827 tmpbuf[first + second] = '\0';
6828 return tmpbuf;
6829 }
6830
6831 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6832 gotrel[j].str, 1 << (5 + object_64bit));
6833 return NULL;
6834 }
6835 }
6836
6837 /* Might be a symbol version string. Don't as_bad here. */
6838 return NULL;
6839 }
6840 #endif
6841
6842 #ifdef TE_PE
6843 #ifdef lex_got
6844 #undef lex_got
6845 #endif
6846 /* Parse operands of the form
6847 <symbol>@SECREL32+<nnn>
6848
6849 If we find one, set up the correct relocation in RELOC and copy the
6850 input string, minus the `@SECREL32' into a malloc'd buffer for
6851 parsing by the calling routine. Return this buffer, and if ADJUST
6852 is non-null set it to the length of the string we removed from the
6853 input line. Otherwise return NULL.
6854
6855 This function is copied from the ELF version above adjusted for PE targets. */
6856
6857 static char *
6858 lex_got (enum bfd_reloc_code_real *rel ATTRIBUTE_UNUSED,
6859 int *adjust ATTRIBUTE_UNUSED,
6860 i386_operand_type *types ATTRIBUTE_UNUSED)
6861 {
6862 static const struct
6863 {
6864 const char *str;
6865 int len;
6866 const enum bfd_reloc_code_real rel[2];
6867 const i386_operand_type types64;
6868 }
6869 gotrel[] =
6870 {
6871 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL,
6872 BFD_RELOC_32_SECREL },
6873 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6874 };
6875
6876 char *cp;
6877 unsigned j;
6878
6879 for (cp = input_line_pointer; *cp != '@'; cp++)
6880 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6881 return NULL;
6882
6883 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6884 {
6885 int len = gotrel[j].len;
6886
6887 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6888 {
6889 if (gotrel[j].rel[object_64bit] != 0)
6890 {
6891 int first, second;
6892 char *tmpbuf, *past_reloc;
6893
6894 *rel = gotrel[j].rel[object_64bit];
6895 if (adjust)
6896 *adjust = len;
6897
6898 if (types)
6899 {
6900 if (flag_code != CODE_64BIT)
6901 {
6902 types->bitfield.imm32 = 1;
6903 types->bitfield.disp32 = 1;
6904 }
6905 else
6906 *types = gotrel[j].types64;
6907 }
6908
6909 /* The length of the first part of our input line. */
6910 first = cp - input_line_pointer;
6911
6912 /* The second part goes from after the reloc token until
6913 (and including) an end_of_line char or comma. */
6914 past_reloc = cp + 1 + len;
6915 cp = past_reloc;
6916 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6917 ++cp;
6918 second = cp + 1 - past_reloc;
6919
6920 /* Allocate and copy string. The trailing NUL shouldn't
6921 be necessary, but be safe. */
6922 tmpbuf = (char *) xmalloc (first + second + 2);
6923 memcpy (tmpbuf, input_line_pointer, first);
6924 if (second != 0 && *past_reloc != ' ')
6925 /* Replace the relocation token with ' ', so that
6926 errors like foo@SECLREL321 will be detected. */
6927 tmpbuf[first++] = ' ';
6928 memcpy (tmpbuf + first, past_reloc, second);
6929 tmpbuf[first + second] = '\0';
6930 return tmpbuf;
6931 }
6932
6933 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6934 gotrel[j].str, 1 << (5 + object_64bit));
6935 return NULL;
6936 }
6937 }
6938
6939 /* Might be a symbol version string. Don't as_bad here. */
6940 return NULL;
6941 }
6942
6943 #endif /* TE_PE */
6944
6945 void
6946 x86_cons (expressionS *exp, int size)
6947 {
6948 intel_syntax = -intel_syntax;
6949
6950 exp->X_md = 0;
6951 if (size == 4 || (object_64bit && size == 8))
6952 {
6953 /* Handle @GOTOFF and the like in an expression. */
6954 char *save;
6955 char *gotfree_input_line;
6956 int adjust = 0;
6957
6958 save = input_line_pointer;
6959 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
6960 if (gotfree_input_line)
6961 input_line_pointer = gotfree_input_line;
6962
6963 expression (exp);
6964
6965 if (gotfree_input_line)
6966 {
6967 /* expression () has merrily parsed up to the end of line,
6968 or a comma - in the wrong buffer. Transfer how far
6969 input_line_pointer has moved to the right buffer. */
6970 input_line_pointer = (save
6971 + (input_line_pointer - gotfree_input_line)
6972 + adjust);
6973 free (gotfree_input_line);
6974 if (exp->X_op == O_constant
6975 || exp->X_op == O_absent
6976 || exp->X_op == O_illegal
6977 || exp->X_op == O_register
6978 || exp->X_op == O_big)
6979 {
6980 char c = *input_line_pointer;
6981 *input_line_pointer = 0;
6982 as_bad (_("missing or invalid expression `%s'"), save);
6983 *input_line_pointer = c;
6984 }
6985 }
6986 }
6987 else
6988 expression (exp);
6989
6990 intel_syntax = -intel_syntax;
6991
6992 if (intel_syntax)
6993 i386_intel_simplify (exp);
6994 }
6995
6996 static void
6997 signed_cons (int size)
6998 {
6999 if (flag_code == CODE_64BIT)
7000 cons_sign = 1;
7001 cons (size);
7002 cons_sign = -1;
7003 }
7004
7005 #ifdef TE_PE
7006 static void
7007 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
7008 {
7009 expressionS exp;
7010
7011 do
7012 {
7013 expression (&exp);
7014 if (exp.X_op == O_symbol)
7015 exp.X_op = O_secrel;
7016
7017 emit_expr (&exp, 4);
7018 }
7019 while (*input_line_pointer++ == ',');
7020
7021 input_line_pointer--;
7022 demand_empty_rest_of_line ();
7023 }
7024 #endif
7025
7026 static int
7027 i386_immediate (char *imm_start)
7028 {
7029 char *save_input_line_pointer;
7030 char *gotfree_input_line;
7031 segT exp_seg = 0;
7032 expressionS *exp;
7033 i386_operand_type types;
7034
7035 operand_type_set (&types, ~0);
7036
7037 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
7038 {
7039 as_bad (_("at most %d immediate operands are allowed"),
7040 MAX_IMMEDIATE_OPERANDS);
7041 return 0;
7042 }
7043
7044 exp = &im_expressions[i.imm_operands++];
7045 i.op[this_operand].imms = exp;
7046
7047 if (is_space_char (*imm_start))
7048 ++imm_start;
7049
7050 save_input_line_pointer = input_line_pointer;
7051 input_line_pointer = imm_start;
7052
7053 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
7054 if (gotfree_input_line)
7055 input_line_pointer = gotfree_input_line;
7056
7057 exp_seg = expression (exp);
7058
7059 SKIP_WHITESPACE ();
7060 if (*input_line_pointer)
7061 as_bad (_("junk `%s' after expression"), input_line_pointer);
7062
7063 input_line_pointer = save_input_line_pointer;
7064 if (gotfree_input_line)
7065 {
7066 free (gotfree_input_line);
7067
7068 if (exp->X_op == O_constant || exp->X_op == O_register)
7069 exp->X_op = O_illegal;
7070 }
7071
7072 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
7073 }
7074
7075 static int
7076 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
7077 i386_operand_type types, const char *imm_start)
7078 {
7079 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
7080 {
7081 if (imm_start)
7082 as_bad (_("missing or invalid immediate expression `%s'"),
7083 imm_start);
7084 return 0;
7085 }
7086 else if (exp->X_op == O_constant)
7087 {
7088 /* Size it properly later. */
7089 i.types[this_operand].bitfield.imm64 = 1;
7090 /* If not 64bit, sign extend val. */
7091 if (flag_code != CODE_64BIT
7092 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
7093 exp->X_add_number
7094 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
7095 }
7096 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7097 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
7098 && exp_seg != absolute_section
7099 && exp_seg != text_section
7100 && exp_seg != data_section
7101 && exp_seg != bss_section
7102 && exp_seg != undefined_section
7103 && !bfd_is_com_section (exp_seg))
7104 {
7105 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7106 return 0;
7107 }
7108 #endif
7109 else if (!intel_syntax && exp->X_op == O_register)
7110 {
7111 if (imm_start)
7112 as_bad (_("illegal immediate register operand %s"), imm_start);
7113 return 0;
7114 }
7115 else
7116 {
7117 /* This is an address. The size of the address will be
7118 determined later, depending on destination register,
7119 suffix, or the default for the section. */
7120 i.types[this_operand].bitfield.imm8 = 1;
7121 i.types[this_operand].bitfield.imm16 = 1;
7122 i.types[this_operand].bitfield.imm32 = 1;
7123 i.types[this_operand].bitfield.imm32s = 1;
7124 i.types[this_operand].bitfield.imm64 = 1;
7125 i.types[this_operand] = operand_type_and (i.types[this_operand],
7126 types);
7127 }
7128
7129 return 1;
7130 }
7131
7132 static char *
7133 i386_scale (char *scale)
7134 {
7135 offsetT val;
7136 char *save = input_line_pointer;
7137
7138 input_line_pointer = scale;
7139 val = get_absolute_expression ();
7140
7141 switch (val)
7142 {
7143 case 1:
7144 i.log2_scale_factor = 0;
7145 break;
7146 case 2:
7147 i.log2_scale_factor = 1;
7148 break;
7149 case 4:
7150 i.log2_scale_factor = 2;
7151 break;
7152 case 8:
7153 i.log2_scale_factor = 3;
7154 break;
7155 default:
7156 {
7157 char sep = *input_line_pointer;
7158
7159 *input_line_pointer = '\0';
7160 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
7161 scale);
7162 *input_line_pointer = sep;
7163 input_line_pointer = save;
7164 return NULL;
7165 }
7166 }
7167 if (i.log2_scale_factor != 0 && i.index_reg == 0)
7168 {
7169 as_warn (_("scale factor of %d without an index register"),
7170 1 << i.log2_scale_factor);
7171 i.log2_scale_factor = 0;
7172 }
7173 scale = input_line_pointer;
7174 input_line_pointer = save;
7175 return scale;
7176 }
7177
7178 static int
7179 i386_displacement (char *disp_start, char *disp_end)
7180 {
7181 expressionS *exp;
7182 segT exp_seg = 0;
7183 char *save_input_line_pointer;
7184 char *gotfree_input_line;
7185 int override;
7186 i386_operand_type bigdisp, types = anydisp;
7187 int ret;
7188
7189 if (i.disp_operands == MAX_MEMORY_OPERANDS)
7190 {
7191 as_bad (_("at most %d displacement operands are allowed"),
7192 MAX_MEMORY_OPERANDS);
7193 return 0;
7194 }
7195
7196 operand_type_set (&bigdisp, 0);
7197 if ((i.types[this_operand].bitfield.jumpabsolute)
7198 || (!current_templates->start->opcode_modifier.jump
7199 && !current_templates->start->opcode_modifier.jumpdword))
7200 {
7201 bigdisp.bitfield.disp32 = 1;
7202 override = (i.prefix[ADDR_PREFIX] != 0);
7203 if (flag_code == CODE_64BIT)
7204 {
7205 if (!override)
7206 {
7207 bigdisp.bitfield.disp32s = 1;
7208 bigdisp.bitfield.disp64 = 1;
7209 }
7210 }
7211 else if ((flag_code == CODE_16BIT) ^ override)
7212 {
7213 bigdisp.bitfield.disp32 = 0;
7214 bigdisp.bitfield.disp16 = 1;
7215 }
7216 }
7217 else
7218 {
7219 /* For PC-relative branches, the width of the displacement
7220 is dependent upon data size, not address size. */
7221 override = (i.prefix[DATA_PREFIX] != 0);
7222 if (flag_code == CODE_64BIT)
7223 {
7224 if (override || i.suffix == WORD_MNEM_SUFFIX)
7225 bigdisp.bitfield.disp16 = 1;
7226 else
7227 {
7228 bigdisp.bitfield.disp32 = 1;
7229 bigdisp.bitfield.disp32s = 1;
7230 }
7231 }
7232 else
7233 {
7234 if (!override)
7235 override = (i.suffix == (flag_code != CODE_16BIT
7236 ? WORD_MNEM_SUFFIX
7237 : LONG_MNEM_SUFFIX));
7238 bigdisp.bitfield.disp32 = 1;
7239 if ((flag_code == CODE_16BIT) ^ override)
7240 {
7241 bigdisp.bitfield.disp32 = 0;
7242 bigdisp.bitfield.disp16 = 1;
7243 }
7244 }
7245 }
7246 i.types[this_operand] = operand_type_or (i.types[this_operand],
7247 bigdisp);
7248
7249 exp = &disp_expressions[i.disp_operands];
7250 i.op[this_operand].disps = exp;
7251 i.disp_operands++;
7252 save_input_line_pointer = input_line_pointer;
7253 input_line_pointer = disp_start;
7254 END_STRING_AND_SAVE (disp_end);
7255
7256 #ifndef GCC_ASM_O_HACK
7257 #define GCC_ASM_O_HACK 0
7258 #endif
7259 #if GCC_ASM_O_HACK
7260 END_STRING_AND_SAVE (disp_end + 1);
7261 if (i.types[this_operand].bitfield.baseIndex
7262 && displacement_string_end[-1] == '+')
7263 {
7264 /* This hack is to avoid a warning when using the "o"
7265 constraint within gcc asm statements.
7266 For instance:
7267
7268 #define _set_tssldt_desc(n,addr,limit,type) \
7269 __asm__ __volatile__ ( \
7270 "movw %w2,%0\n\t" \
7271 "movw %w1,2+%0\n\t" \
7272 "rorl $16,%1\n\t" \
7273 "movb %b1,4+%0\n\t" \
7274 "movb %4,5+%0\n\t" \
7275 "movb $0,6+%0\n\t" \
7276 "movb %h1,7+%0\n\t" \
7277 "rorl $16,%1" \
7278 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
7279
7280 This works great except that the output assembler ends
7281 up looking a bit weird if it turns out that there is
7282 no offset. You end up producing code that looks like:
7283
7284 #APP
7285 movw $235,(%eax)
7286 movw %dx,2+(%eax)
7287 rorl $16,%edx
7288 movb %dl,4+(%eax)
7289 movb $137,5+(%eax)
7290 movb $0,6+(%eax)
7291 movb %dh,7+(%eax)
7292 rorl $16,%edx
7293 #NO_APP
7294
7295 So here we provide the missing zero. */
7296
7297 *displacement_string_end = '0';
7298 }
7299 #endif
7300 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
7301 if (gotfree_input_line)
7302 input_line_pointer = gotfree_input_line;
7303
7304 exp_seg = expression (exp);
7305
7306 SKIP_WHITESPACE ();
7307 if (*input_line_pointer)
7308 as_bad (_("junk `%s' after expression"), input_line_pointer);
7309 #if GCC_ASM_O_HACK
7310 RESTORE_END_STRING (disp_end + 1);
7311 #endif
7312 input_line_pointer = save_input_line_pointer;
7313 if (gotfree_input_line)
7314 {
7315 free (gotfree_input_line);
7316
7317 if (exp->X_op == O_constant || exp->X_op == O_register)
7318 exp->X_op = O_illegal;
7319 }
7320
7321 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
7322
7323 RESTORE_END_STRING (disp_end);
7324
7325 return ret;
7326 }
7327
7328 static int
7329 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
7330 i386_operand_type types, const char *disp_start)
7331 {
7332 i386_operand_type bigdisp;
7333 int ret = 1;
7334
7335 /* We do this to make sure that the section symbol is in
7336 the symbol table. We will ultimately change the relocation
7337 to be relative to the beginning of the section. */
7338 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
7339 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
7340 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7341 {
7342 if (exp->X_op != O_symbol)
7343 goto inv_disp;
7344
7345 if (S_IS_LOCAL (exp->X_add_symbol)
7346 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
7347 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
7348 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
7349 exp->X_op = O_subtract;
7350 exp->X_op_symbol = GOT_symbol;
7351 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
7352 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
7353 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7354 i.reloc[this_operand] = BFD_RELOC_64;
7355 else
7356 i.reloc[this_operand] = BFD_RELOC_32;
7357 }
7358
7359 else if (exp->X_op == O_absent
7360 || exp->X_op == O_illegal
7361 || exp->X_op == O_big)
7362 {
7363 inv_disp:
7364 as_bad (_("missing or invalid displacement expression `%s'"),
7365 disp_start);
7366 ret = 0;
7367 }
7368
7369 else if (flag_code == CODE_64BIT
7370 && !i.prefix[ADDR_PREFIX]
7371 && exp->X_op == O_constant)
7372 {
7373 /* Since displacement is signed extended to 64bit, don't allow
7374 disp32 and turn off disp32s if they are out of range. */
7375 i.types[this_operand].bitfield.disp32 = 0;
7376 if (!fits_in_signed_long (exp->X_add_number))
7377 {
7378 i.types[this_operand].bitfield.disp32s = 0;
7379 if (i.types[this_operand].bitfield.baseindex)
7380 {
7381 as_bad (_("0x%lx out range of signed 32bit displacement"),
7382 (long) exp->X_add_number);
7383 ret = 0;
7384 }
7385 }
7386 }
7387
7388 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7389 else if (exp->X_op != O_constant
7390 && OUTPUT_FLAVOR == bfd_target_aout_flavour
7391 && exp_seg != absolute_section
7392 && exp_seg != text_section
7393 && exp_seg != data_section
7394 && exp_seg != bss_section
7395 && exp_seg != undefined_section
7396 && !bfd_is_com_section (exp_seg))
7397 {
7398 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7399 ret = 0;
7400 }
7401 #endif
7402
7403 /* Check if this is a displacement only operand. */
7404 bigdisp = i.types[this_operand];
7405 bigdisp.bitfield.disp8 = 0;
7406 bigdisp.bitfield.disp16 = 0;
7407 bigdisp.bitfield.disp32 = 0;
7408 bigdisp.bitfield.disp32s = 0;
7409 bigdisp.bitfield.disp64 = 0;
7410 if (operand_type_all_zero (&bigdisp))
7411 i.types[this_operand] = operand_type_and (i.types[this_operand],
7412 types);
7413
7414 return ret;
7415 }
7416
7417 /* Make sure the memory operand we've been dealt is valid.
7418 Return 1 on success, 0 on a failure. */
7419
7420 static int
7421 i386_index_check (const char *operand_string)
7422 {
7423 int ok;
7424 const char *kind = "base/index";
7425 #if INFER_ADDR_PREFIX
7426 int fudged = 0;
7427
7428 tryprefix:
7429 #endif
7430 ok = 1;
7431 if (current_templates->start->opcode_modifier.isstring
7432 && !current_templates->start->opcode_modifier.immext
7433 && (current_templates->end[-1].opcode_modifier.isstring
7434 || i.mem_operands))
7435 {
7436 /* Memory operands of string insns are special in that they only allow
7437 a single register (rDI, rSI, or rBX) as their memory address. */
7438 unsigned int expected;
7439
7440 kind = "string address";
7441
7442 if (current_templates->start->opcode_modifier.w)
7443 {
7444 i386_operand_type type = current_templates->end[-1].operand_types[0];
7445
7446 if (!type.bitfield.baseindex
7447 || ((!i.mem_operands != !intel_syntax)
7448 && current_templates->end[-1].operand_types[1]
7449 .bitfield.baseindex))
7450 type = current_templates->end[-1].operand_types[1];
7451 expected = type.bitfield.esseg ? 7 /* rDI */ : 6 /* rSI */;
7452 }
7453 else
7454 expected = 3 /* rBX */;
7455
7456 if (!i.base_reg || i.index_reg
7457 || operand_type_check (i.types[this_operand], disp))
7458 ok = -1;
7459 else if (!(flag_code == CODE_64BIT
7460 ? i.prefix[ADDR_PREFIX]
7461 ? i.base_reg->reg_type.bitfield.reg32
7462 : i.base_reg->reg_type.bitfield.reg64
7463 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7464 ? i.base_reg->reg_type.bitfield.reg32
7465 : i.base_reg->reg_type.bitfield.reg16))
7466 ok = 0;
7467 else if (register_number (i.base_reg) != expected)
7468 ok = -1;
7469
7470 if (ok < 0)
7471 {
7472 unsigned int j;
7473
7474 for (j = 0; j < i386_regtab_size; ++j)
7475 if ((flag_code == CODE_64BIT
7476 ? i.prefix[ADDR_PREFIX]
7477 ? i386_regtab[j].reg_type.bitfield.reg32
7478 : i386_regtab[j].reg_type.bitfield.reg64
7479 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7480 ? i386_regtab[j].reg_type.bitfield.reg32
7481 : i386_regtab[j].reg_type.bitfield.reg16)
7482 && register_number(i386_regtab + j) == expected)
7483 break;
7484 gas_assert (j < i386_regtab_size);
7485 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
7486 operand_string,
7487 intel_syntax ? '[' : '(',
7488 register_prefix,
7489 i386_regtab[j].reg_name,
7490 intel_syntax ? ']' : ')');
7491 ok = 1;
7492 }
7493 }
7494 else if (flag_code == CODE_64BIT)
7495 {
7496 if ((i.base_reg
7497 && ((i.prefix[ADDR_PREFIX] == 0
7498 && !i.base_reg->reg_type.bitfield.reg64)
7499 || (i.prefix[ADDR_PREFIX]
7500 && !i.base_reg->reg_type.bitfield.reg32))
7501 && (i.index_reg
7502 || i.base_reg->reg_num !=
7503 (i.prefix[ADDR_PREFIX] == 0 ? RegRip : RegEip)))
7504 || (i.index_reg
7505 && !(i.index_reg->reg_type.bitfield.regxmm
7506 || i.index_reg->reg_type.bitfield.regymm)
7507 && (!i.index_reg->reg_type.bitfield.baseindex
7508 || (i.prefix[ADDR_PREFIX] == 0
7509 && i.index_reg->reg_num != RegRiz
7510 && !i.index_reg->reg_type.bitfield.reg64
7511 )
7512 || (i.prefix[ADDR_PREFIX]
7513 && i.index_reg->reg_num != RegEiz
7514 && !i.index_reg->reg_type.bitfield.reg32))))
7515 ok = 0;
7516 }
7517 else
7518 {
7519 if ((flag_code == CODE_16BIT) ^ (i.prefix[ADDR_PREFIX] != 0))
7520 {
7521 /* 16bit checks. */
7522 if ((i.base_reg
7523 && (!i.base_reg->reg_type.bitfield.reg16
7524 || !i.base_reg->reg_type.bitfield.baseindex))
7525 || (i.index_reg
7526 && (!i.index_reg->reg_type.bitfield.reg16
7527 || !i.index_reg->reg_type.bitfield.baseindex
7528 || !(i.base_reg
7529 && i.base_reg->reg_num < 6
7530 && i.index_reg->reg_num >= 6
7531 && i.log2_scale_factor == 0))))
7532 ok = 0;
7533 }
7534 else
7535 {
7536 /* 32bit checks. */
7537 if ((i.base_reg
7538 && !i.base_reg->reg_type.bitfield.reg32)
7539 || (i.index_reg
7540 && !i.index_reg->reg_type.bitfield.regxmm
7541 && !i.index_reg->reg_type.bitfield.regymm
7542 && ((!i.index_reg->reg_type.bitfield.reg32
7543 && i.index_reg->reg_num != RegEiz)
7544 || !i.index_reg->reg_type.bitfield.baseindex)))
7545 ok = 0;
7546 }
7547 }
7548 if (!ok)
7549 {
7550 #if INFER_ADDR_PREFIX
7551 if (!i.mem_operands && !i.prefix[ADDR_PREFIX])
7552 {
7553 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
7554 i.prefixes += 1;
7555 /* Change the size of any displacement too. At most one of
7556 Disp16 or Disp32 is set.
7557 FIXME. There doesn't seem to be any real need for separate
7558 Disp16 and Disp32 flags. The same goes for Imm16 and Imm32.
7559 Removing them would probably clean up the code quite a lot. */
7560 if (flag_code != CODE_64BIT
7561 && (i.types[this_operand].bitfield.disp16
7562 || i.types[this_operand].bitfield.disp32))
7563 i.types[this_operand]
7564 = operand_type_xor (i.types[this_operand], disp16_32);
7565 fudged = 1;
7566 goto tryprefix;
7567 }
7568 if (fudged)
7569 as_bad (_("`%s' is not a valid %s expression"),
7570 operand_string,
7571 kind);
7572 else
7573 #endif
7574 as_bad (_("`%s' is not a valid %s-bit %s expression"),
7575 operand_string,
7576 flag_code_names[i.prefix[ADDR_PREFIX]
7577 ? flag_code == CODE_32BIT
7578 ? CODE_16BIT
7579 : CODE_32BIT
7580 : flag_code],
7581 kind);
7582 }
7583 return ok;
7584 }
7585
7586 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
7587 on error. */
7588
7589 static int
7590 i386_att_operand (char *operand_string)
7591 {
7592 const reg_entry *r;
7593 char *end_op;
7594 char *op_string = operand_string;
7595
7596 if (is_space_char (*op_string))
7597 ++op_string;
7598
7599 /* We check for an absolute prefix (differentiating,
7600 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
7601 if (*op_string == ABSOLUTE_PREFIX)
7602 {
7603 ++op_string;
7604 if (is_space_char (*op_string))
7605 ++op_string;
7606 i.types[this_operand].bitfield.jumpabsolute = 1;
7607 }
7608
7609 /* Check if operand is a register. */
7610 if ((r = parse_register (op_string, &end_op)) != NULL)
7611 {
7612 i386_operand_type temp;
7613
7614 /* Check for a segment override by searching for ':' after a
7615 segment register. */
7616 op_string = end_op;
7617 if (is_space_char (*op_string))
7618 ++op_string;
7619 if (*op_string == ':'
7620 && (r->reg_type.bitfield.sreg2
7621 || r->reg_type.bitfield.sreg3))
7622 {
7623 switch (r->reg_num)
7624 {
7625 case 0:
7626 i.seg[i.mem_operands] = &es;
7627 break;
7628 case 1:
7629 i.seg[i.mem_operands] = &cs;
7630 break;
7631 case 2:
7632 i.seg[i.mem_operands] = &ss;
7633 break;
7634 case 3:
7635 i.seg[i.mem_operands] = &ds;
7636 break;
7637 case 4:
7638 i.seg[i.mem_operands] = &fs;
7639 break;
7640 case 5:
7641 i.seg[i.mem_operands] = &gs;
7642 break;
7643 }
7644
7645 /* Skip the ':' and whitespace. */
7646 ++op_string;
7647 if (is_space_char (*op_string))
7648 ++op_string;
7649
7650 if (!is_digit_char (*op_string)
7651 && !is_identifier_char (*op_string)
7652 && *op_string != '('
7653 && *op_string != ABSOLUTE_PREFIX)
7654 {
7655 as_bad (_("bad memory operand `%s'"), op_string);
7656 return 0;
7657 }
7658 /* Handle case of %es:*foo. */
7659 if (*op_string == ABSOLUTE_PREFIX)
7660 {
7661 ++op_string;
7662 if (is_space_char (*op_string))
7663 ++op_string;
7664 i.types[this_operand].bitfield.jumpabsolute = 1;
7665 }
7666 goto do_memory_reference;
7667 }
7668 if (*op_string)
7669 {
7670 as_bad (_("junk `%s' after register"), op_string);
7671 return 0;
7672 }
7673 temp = r->reg_type;
7674 temp.bitfield.baseindex = 0;
7675 i.types[this_operand] = operand_type_or (i.types[this_operand],
7676 temp);
7677 i.types[this_operand].bitfield.unspecified = 0;
7678 i.op[this_operand].regs = r;
7679 i.reg_operands++;
7680 }
7681 else if (*op_string == REGISTER_PREFIX)
7682 {
7683 as_bad (_("bad register name `%s'"), op_string);
7684 return 0;
7685 }
7686 else if (*op_string == IMMEDIATE_PREFIX)
7687 {
7688 ++op_string;
7689 if (i.types[this_operand].bitfield.jumpabsolute)
7690 {
7691 as_bad (_("immediate operand illegal with absolute jump"));
7692 return 0;
7693 }
7694 if (!i386_immediate (op_string))
7695 return 0;
7696 }
7697 else if (is_digit_char (*op_string)
7698 || is_identifier_char (*op_string)
7699 || *op_string == '(')
7700 {
7701 /* This is a memory reference of some sort. */
7702 char *base_string;
7703
7704 /* Start and end of displacement string expression (if found). */
7705 char *displacement_string_start;
7706 char *displacement_string_end;
7707
7708 do_memory_reference:
7709 if ((i.mem_operands == 1
7710 && !current_templates->start->opcode_modifier.isstring)
7711 || i.mem_operands == 2)
7712 {
7713 as_bad (_("too many memory references for `%s'"),
7714 current_templates->start->name);
7715 return 0;
7716 }
7717
7718 /* Check for base index form. We detect the base index form by
7719 looking for an ')' at the end of the operand, searching
7720 for the '(' matching it, and finding a REGISTER_PREFIX or ','
7721 after the '('. */
7722 base_string = op_string + strlen (op_string);
7723
7724 --base_string;
7725 if (is_space_char (*base_string))
7726 --base_string;
7727
7728 /* If we only have a displacement, set-up for it to be parsed later. */
7729 displacement_string_start = op_string;
7730 displacement_string_end = base_string + 1;
7731
7732 if (*base_string == ')')
7733 {
7734 char *temp_string;
7735 unsigned int parens_balanced = 1;
7736 /* We've already checked that the number of left & right ()'s are
7737 equal, so this loop will not be infinite. */
7738 do
7739 {
7740 base_string--;
7741 if (*base_string == ')')
7742 parens_balanced++;
7743 if (*base_string == '(')
7744 parens_balanced--;
7745 }
7746 while (parens_balanced);
7747
7748 temp_string = base_string;
7749
7750 /* Skip past '(' and whitespace. */
7751 ++base_string;
7752 if (is_space_char (*base_string))
7753 ++base_string;
7754
7755 if (*base_string == ','
7756 || ((i.base_reg = parse_register (base_string, &end_op))
7757 != NULL))
7758 {
7759 displacement_string_end = temp_string;
7760
7761 i.types[this_operand].bitfield.baseindex = 1;
7762
7763 if (i.base_reg)
7764 {
7765 base_string = end_op;
7766 if (is_space_char (*base_string))
7767 ++base_string;
7768 }
7769
7770 /* There may be an index reg or scale factor here. */
7771 if (*base_string == ',')
7772 {
7773 ++base_string;
7774 if (is_space_char (*base_string))
7775 ++base_string;
7776
7777 if ((i.index_reg = parse_register (base_string, &end_op))
7778 != NULL)
7779 {
7780 base_string = end_op;
7781 if (is_space_char (*base_string))
7782 ++base_string;
7783 if (*base_string == ',')
7784 {
7785 ++base_string;
7786 if (is_space_char (*base_string))
7787 ++base_string;
7788 }
7789 else if (*base_string != ')')
7790 {
7791 as_bad (_("expecting `,' or `)' "
7792 "after index register in `%s'"),
7793 operand_string);
7794 return 0;
7795 }
7796 }
7797 else if (*base_string == REGISTER_PREFIX)
7798 {
7799 end_op = strchr (base_string, ',');
7800 if (end_op)
7801 *end_op = '\0';
7802 as_bad (_("bad register name `%s'"), base_string);
7803 return 0;
7804 }
7805
7806 /* Check for scale factor. */
7807 if (*base_string != ')')
7808 {
7809 char *end_scale = i386_scale (base_string);
7810
7811 if (!end_scale)
7812 return 0;
7813
7814 base_string = end_scale;
7815 if (is_space_char (*base_string))
7816 ++base_string;
7817 if (*base_string != ')')
7818 {
7819 as_bad (_("expecting `)' "
7820 "after scale factor in `%s'"),
7821 operand_string);
7822 return 0;
7823 }
7824 }
7825 else if (!i.index_reg)
7826 {
7827 as_bad (_("expecting index register or scale factor "
7828 "after `,'; got '%c'"),
7829 *base_string);
7830 return 0;
7831 }
7832 }
7833 else if (*base_string != ')')
7834 {
7835 as_bad (_("expecting `,' or `)' "
7836 "after base register in `%s'"),
7837 operand_string);
7838 return 0;
7839 }
7840 }
7841 else if (*base_string == REGISTER_PREFIX)
7842 {
7843 end_op = strchr (base_string, ',');
7844 if (end_op)
7845 *end_op = '\0';
7846 as_bad (_("bad register name `%s'"), base_string);
7847 return 0;
7848 }
7849 }
7850
7851 /* If there's an expression beginning the operand, parse it,
7852 assuming displacement_string_start and
7853 displacement_string_end are meaningful. */
7854 if (displacement_string_start != displacement_string_end)
7855 {
7856 if (!i386_displacement (displacement_string_start,
7857 displacement_string_end))
7858 return 0;
7859 }
7860
7861 /* Special case for (%dx) while doing input/output op. */
7862 if (i.base_reg
7863 && operand_type_equal (&i.base_reg->reg_type,
7864 &reg16_inoutportreg)
7865 && i.index_reg == 0
7866 && i.log2_scale_factor == 0
7867 && i.seg[i.mem_operands] == 0
7868 && !operand_type_check (i.types[this_operand], disp))
7869 {
7870 i.types[this_operand] = inoutportreg;
7871 return 1;
7872 }
7873
7874 if (i386_index_check (operand_string) == 0)
7875 return 0;
7876 i.types[this_operand].bitfield.mem = 1;
7877 i.mem_operands++;
7878 }
7879 else
7880 {
7881 /* It's not a memory operand; argh! */
7882 as_bad (_("invalid char %s beginning operand %d `%s'"),
7883 output_invalid (*op_string),
7884 this_operand + 1,
7885 op_string);
7886 return 0;
7887 }
7888 return 1; /* Normal return. */
7889 }
7890 \f
7891 /* Calculate the maximum variable size (i.e., excluding fr_fix)
7892 that an rs_machine_dependent frag may reach. */
7893
7894 unsigned int
7895 i386_frag_max_var (fragS *frag)
7896 {
7897 /* The only relaxable frags are for jumps.
7898 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
7899 gas_assert (frag->fr_type == rs_machine_dependent);
7900 return TYPE_FROM_RELAX_STATE (frag->fr_subtype) == UNCOND_JUMP ? 4 : 5;
7901 }
7902
7903 /* md_estimate_size_before_relax()
7904
7905 Called just before relax() for rs_machine_dependent frags. The x86
7906 assembler uses these frags to handle variable size jump
7907 instructions.
7908
7909 Any symbol that is now undefined will not become defined.
7910 Return the correct fr_subtype in the frag.
7911 Return the initial "guess for variable size of frag" to caller.
7912 The guess is actually the growth beyond the fixed part. Whatever
7913 we do to grow the fixed or variable part contributes to our
7914 returned value. */
7915
7916 int
7917 md_estimate_size_before_relax (fragS *fragP, segT segment)
7918 {
7919 /* We've already got fragP->fr_subtype right; all we have to do is
7920 check for un-relaxable symbols. On an ELF system, we can't relax
7921 an externally visible symbol, because it may be overridden by a
7922 shared library. */
7923 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
7924 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7925 || (IS_ELF
7926 && (S_IS_EXTERNAL (fragP->fr_symbol)
7927 || S_IS_WEAK (fragP->fr_symbol)
7928 || ((symbol_get_bfdsym (fragP->fr_symbol)->flags
7929 & BSF_GNU_INDIRECT_FUNCTION))))
7930 #endif
7931 #if defined (OBJ_COFF) && defined (TE_PE)
7932 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
7933 && S_IS_WEAK (fragP->fr_symbol))
7934 #endif
7935 )
7936 {
7937 /* Symbol is undefined in this segment, or we need to keep a
7938 reloc so that weak symbols can be overridden. */
7939 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
7940 enum bfd_reloc_code_real reloc_type;
7941 unsigned char *opcode;
7942 int old_fr_fix;
7943
7944 if (fragP->fr_var != NO_RELOC)
7945 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
7946 else if (size == 2)
7947 reloc_type = BFD_RELOC_16_PCREL;
7948 else
7949 reloc_type = BFD_RELOC_32_PCREL;
7950
7951 old_fr_fix = fragP->fr_fix;
7952 opcode = (unsigned char *) fragP->fr_opcode;
7953
7954 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
7955 {
7956 case UNCOND_JUMP:
7957 /* Make jmp (0xeb) a (d)word displacement jump. */
7958 opcode[0] = 0xe9;
7959 fragP->fr_fix += size;
7960 fix_new (fragP, old_fr_fix, size,
7961 fragP->fr_symbol,
7962 fragP->fr_offset, 1,
7963 reloc_type);
7964 break;
7965
7966 case COND_JUMP86:
7967 if (size == 2
7968 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
7969 {
7970 /* Negate the condition, and branch past an
7971 unconditional jump. */
7972 opcode[0] ^= 1;
7973 opcode[1] = 3;
7974 /* Insert an unconditional jump. */
7975 opcode[2] = 0xe9;
7976 /* We added two extra opcode bytes, and have a two byte
7977 offset. */
7978 fragP->fr_fix += 2 + 2;
7979 fix_new (fragP, old_fr_fix + 2, 2,
7980 fragP->fr_symbol,
7981 fragP->fr_offset, 1,
7982 reloc_type);
7983 break;
7984 }
7985 /* Fall through. */
7986
7987 case COND_JUMP:
7988 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
7989 {
7990 fixS *fixP;
7991
7992 fragP->fr_fix += 1;
7993 fixP = fix_new (fragP, old_fr_fix, 1,
7994 fragP->fr_symbol,
7995 fragP->fr_offset, 1,
7996 BFD_RELOC_8_PCREL);
7997 fixP->fx_signed = 1;
7998 break;
7999 }
8000
8001 /* This changes the byte-displacement jump 0x7N
8002 to the (d)word-displacement jump 0x0f,0x8N. */
8003 opcode[1] = opcode[0] + 0x10;
8004 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
8005 /* We've added an opcode byte. */
8006 fragP->fr_fix += 1 + size;
8007 fix_new (fragP, old_fr_fix + 1, size,
8008 fragP->fr_symbol,
8009 fragP->fr_offset, 1,
8010 reloc_type);
8011 break;
8012
8013 default:
8014 BAD_CASE (fragP->fr_subtype);
8015 break;
8016 }
8017 frag_wane (fragP);
8018 return fragP->fr_fix - old_fr_fix;
8019 }
8020
8021 /* Guess size depending on current relax state. Initially the relax
8022 state will correspond to a short jump and we return 1, because
8023 the variable part of the frag (the branch offset) is one byte
8024 long. However, we can relax a section more than once and in that
8025 case we must either set fr_subtype back to the unrelaxed state,
8026 or return the value for the appropriate branch. */
8027 return md_relax_table[fragP->fr_subtype].rlx_length;
8028 }
8029
8030 /* Called after relax() is finished.
8031
8032 In: Address of frag.
8033 fr_type == rs_machine_dependent.
8034 fr_subtype is what the address relaxed to.
8035
8036 Out: Any fixSs and constants are set up.
8037 Caller will turn frag into a ".space 0". */
8038
8039 void
8040 md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT sec ATTRIBUTE_UNUSED,
8041 fragS *fragP)
8042 {
8043 unsigned char *opcode;
8044 unsigned char *where_to_put_displacement = NULL;
8045 offsetT target_address;
8046 offsetT opcode_address;
8047 unsigned int extension = 0;
8048 offsetT displacement_from_opcode_start;
8049
8050 opcode = (unsigned char *) fragP->fr_opcode;
8051
8052 /* Address we want to reach in file space. */
8053 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
8054
8055 /* Address opcode resides at in file space. */
8056 opcode_address = fragP->fr_address + fragP->fr_fix;
8057
8058 /* Displacement from opcode start to fill into instruction. */
8059 displacement_from_opcode_start = target_address - opcode_address;
8060
8061 if ((fragP->fr_subtype & BIG) == 0)
8062 {
8063 /* Don't have to change opcode. */
8064 extension = 1; /* 1 opcode + 1 displacement */
8065 where_to_put_displacement = &opcode[1];
8066 }
8067 else
8068 {
8069 if (no_cond_jump_promotion
8070 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
8071 as_warn_where (fragP->fr_file, fragP->fr_line,
8072 _("long jump required"));
8073
8074 switch (fragP->fr_subtype)
8075 {
8076 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
8077 extension = 4; /* 1 opcode + 4 displacement */
8078 opcode[0] = 0xe9;
8079 where_to_put_displacement = &opcode[1];
8080 break;
8081
8082 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
8083 extension = 2; /* 1 opcode + 2 displacement */
8084 opcode[0] = 0xe9;
8085 where_to_put_displacement = &opcode[1];
8086 break;
8087
8088 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
8089 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
8090 extension = 5; /* 2 opcode + 4 displacement */
8091 opcode[1] = opcode[0] + 0x10;
8092 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
8093 where_to_put_displacement = &opcode[2];
8094 break;
8095
8096 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
8097 extension = 3; /* 2 opcode + 2 displacement */
8098 opcode[1] = opcode[0] + 0x10;
8099 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
8100 where_to_put_displacement = &opcode[2];
8101 break;
8102
8103 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
8104 extension = 4;
8105 opcode[0] ^= 1;
8106 opcode[1] = 3;
8107 opcode[2] = 0xe9;
8108 where_to_put_displacement = &opcode[3];
8109 break;
8110
8111 default:
8112 BAD_CASE (fragP->fr_subtype);
8113 break;
8114 }
8115 }
8116
8117 /* If size if less then four we are sure that the operand fits,
8118 but if it's 4, then it could be that the displacement is larger
8119 then -/+ 2GB. */
8120 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
8121 && object_64bit
8122 && ((addressT) (displacement_from_opcode_start - extension
8123 + ((addressT) 1 << 31))
8124 > (((addressT) 2 << 31) - 1)))
8125 {
8126 as_bad_where (fragP->fr_file, fragP->fr_line,
8127 _("jump target out of range"));
8128 /* Make us emit 0. */
8129 displacement_from_opcode_start = extension;
8130 }
8131 /* Now put displacement after opcode. */
8132 md_number_to_chars ((char *) where_to_put_displacement,
8133 (valueT) (displacement_from_opcode_start - extension),
8134 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
8135 fragP->fr_fix += extension;
8136 }
8137 \f
8138 /* Apply a fixup (fixP) to segment data, once it has been determined
8139 by our caller that we have all the info we need to fix it up.
8140
8141 Parameter valP is the pointer to the value of the bits.
8142
8143 On the 386, immediates, displacements, and data pointers are all in
8144 the same (little-endian) format, so we don't need to care about which
8145 we are handling. */
8146
8147 void
8148 md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
8149 {
8150 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
8151 valueT value = *valP;
8152
8153 #if !defined (TE_Mach)
8154 if (fixP->fx_pcrel)
8155 {
8156 switch (fixP->fx_r_type)
8157 {
8158 default:
8159 break;
8160
8161 case BFD_RELOC_64:
8162 fixP->fx_r_type = BFD_RELOC_64_PCREL;
8163 break;
8164 case BFD_RELOC_32:
8165 case BFD_RELOC_X86_64_32S:
8166 fixP->fx_r_type = BFD_RELOC_32_PCREL;
8167 break;
8168 case BFD_RELOC_16:
8169 fixP->fx_r_type = BFD_RELOC_16_PCREL;
8170 break;
8171 case BFD_RELOC_8:
8172 fixP->fx_r_type = BFD_RELOC_8_PCREL;
8173 break;
8174 }
8175 }
8176
8177 if (fixP->fx_addsy != NULL
8178 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
8179 || fixP->fx_r_type == BFD_RELOC_64_PCREL
8180 || fixP->fx_r_type == BFD_RELOC_16_PCREL
8181 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
8182 && !use_rela_relocations)
8183 {
8184 /* This is a hack. There should be a better way to handle this.
8185 This covers for the fact that bfd_install_relocation will
8186 subtract the current location (for partial_inplace, PC relative
8187 relocations); see more below. */
8188 #ifndef OBJ_AOUT
8189 if (IS_ELF
8190 #ifdef TE_PE
8191 || OUTPUT_FLAVOR == bfd_target_coff_flavour
8192 #endif
8193 )
8194 value += fixP->fx_where + fixP->fx_frag->fr_address;
8195 #endif
8196 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8197 if (IS_ELF)
8198 {
8199 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
8200
8201 if ((sym_seg == seg
8202 || (symbol_section_p (fixP->fx_addsy)
8203 && sym_seg != absolute_section))
8204 && !generic_force_reloc (fixP))
8205 {
8206 /* Yes, we add the values in twice. This is because
8207 bfd_install_relocation subtracts them out again. I think
8208 bfd_install_relocation is broken, but I don't dare change
8209 it. FIXME. */
8210 value += fixP->fx_where + fixP->fx_frag->fr_address;
8211 }
8212 }
8213 #endif
8214 #if defined (OBJ_COFF) && defined (TE_PE)
8215 /* For some reason, the PE format does not store a
8216 section address offset for a PC relative symbol. */
8217 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
8218 || S_IS_WEAK (fixP->fx_addsy))
8219 value += md_pcrel_from (fixP);
8220 #endif
8221 }
8222 #if defined (OBJ_COFF) && defined (TE_PE)
8223 if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
8224 {
8225 value -= S_GET_VALUE (fixP->fx_addsy);
8226 }
8227 #endif
8228
8229 /* Fix a few things - the dynamic linker expects certain values here,
8230 and we must not disappoint it. */
8231 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8232 if (IS_ELF && fixP->fx_addsy)
8233 switch (fixP->fx_r_type)
8234 {
8235 case BFD_RELOC_386_PLT32:
8236 case BFD_RELOC_X86_64_PLT32:
8237 /* Make the jump instruction point to the address of the operand. At
8238 runtime we merely add the offset to the actual PLT entry. */
8239 value = -4;
8240 break;
8241
8242 case BFD_RELOC_386_TLS_GD:
8243 case BFD_RELOC_386_TLS_LDM:
8244 case BFD_RELOC_386_TLS_IE_32:
8245 case BFD_RELOC_386_TLS_IE:
8246 case BFD_RELOC_386_TLS_GOTIE:
8247 case BFD_RELOC_386_TLS_GOTDESC:
8248 case BFD_RELOC_X86_64_TLSGD:
8249 case BFD_RELOC_X86_64_TLSLD:
8250 case BFD_RELOC_X86_64_GOTTPOFF:
8251 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8252 value = 0; /* Fully resolved at runtime. No addend. */
8253 /* Fallthrough */
8254 case BFD_RELOC_386_TLS_LE:
8255 case BFD_RELOC_386_TLS_LDO_32:
8256 case BFD_RELOC_386_TLS_LE_32:
8257 case BFD_RELOC_X86_64_DTPOFF32:
8258 case BFD_RELOC_X86_64_DTPOFF64:
8259 case BFD_RELOC_X86_64_TPOFF32:
8260 case BFD_RELOC_X86_64_TPOFF64:
8261 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8262 break;
8263
8264 case BFD_RELOC_386_TLS_DESC_CALL:
8265 case BFD_RELOC_X86_64_TLSDESC_CALL:
8266 value = 0; /* Fully resolved at runtime. No addend. */
8267 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8268 fixP->fx_done = 0;
8269 return;
8270
8271 case BFD_RELOC_386_GOT32:
8272 case BFD_RELOC_X86_64_GOT32:
8273 value = 0; /* Fully resolved at runtime. No addend. */
8274 break;
8275
8276 case BFD_RELOC_VTABLE_INHERIT:
8277 case BFD_RELOC_VTABLE_ENTRY:
8278 fixP->fx_done = 0;
8279 return;
8280
8281 default:
8282 break;
8283 }
8284 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
8285 *valP = value;
8286 #endif /* !defined (TE_Mach) */
8287
8288 /* Are we finished with this relocation now? */
8289 if (fixP->fx_addsy == NULL)
8290 fixP->fx_done = 1;
8291 #if defined (OBJ_COFF) && defined (TE_PE)
8292 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
8293 {
8294 fixP->fx_done = 0;
8295 /* Remember value for tc_gen_reloc. */
8296 fixP->fx_addnumber = value;
8297 /* Clear out the frag for now. */
8298 value = 0;
8299 }
8300 #endif
8301 else if (use_rela_relocations)
8302 {
8303 fixP->fx_no_overflow = 1;
8304 /* Remember value for tc_gen_reloc. */
8305 fixP->fx_addnumber = value;
8306 value = 0;
8307 }
8308
8309 md_number_to_chars (p, value, fixP->fx_size);
8310 }
8311 \f
8312 char *
8313 md_atof (int type, char *litP, int *sizeP)
8314 {
8315 /* This outputs the LITTLENUMs in REVERSE order;
8316 in accord with the bigendian 386. */
8317 return ieee_md_atof (type, litP, sizeP, FALSE);
8318 }
8319 \f
8320 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
8321
8322 static char *
8323 output_invalid (int c)
8324 {
8325 if (ISPRINT (c))
8326 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8327 "'%c'", c);
8328 else
8329 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8330 "(0x%x)", (unsigned char) c);
8331 return output_invalid_buf;
8332 }
8333
8334 /* REG_STRING starts *before* REGISTER_PREFIX. */
8335
8336 static const reg_entry *
8337 parse_real_register (char *reg_string, char **end_op)
8338 {
8339 char *s = reg_string;
8340 char *p;
8341 char reg_name_given[MAX_REG_NAME_SIZE + 1];
8342 const reg_entry *r;
8343
8344 /* Skip possible REGISTER_PREFIX and possible whitespace. */
8345 if (*s == REGISTER_PREFIX)
8346 ++s;
8347
8348 if (is_space_char (*s))
8349 ++s;
8350
8351 p = reg_name_given;
8352 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
8353 {
8354 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
8355 return (const reg_entry *) NULL;
8356 s++;
8357 }
8358
8359 /* For naked regs, make sure that we are not dealing with an identifier.
8360 This prevents confusing an identifier like `eax_var' with register
8361 `eax'. */
8362 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
8363 return (const reg_entry *) NULL;
8364
8365 *end_op = s;
8366
8367 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
8368
8369 /* Handle floating point regs, allowing spaces in the (i) part. */
8370 if (r == i386_regtab /* %st is first entry of table */)
8371 {
8372 if (is_space_char (*s))
8373 ++s;
8374 if (*s == '(')
8375 {
8376 ++s;
8377 if (is_space_char (*s))
8378 ++s;
8379 if (*s >= '0' && *s <= '7')
8380 {
8381 int fpr = *s - '0';
8382 ++s;
8383 if (is_space_char (*s))
8384 ++s;
8385 if (*s == ')')
8386 {
8387 *end_op = s + 1;
8388 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
8389 know (r);
8390 return r + fpr;
8391 }
8392 }
8393 /* We have "%st(" then garbage. */
8394 return (const reg_entry *) NULL;
8395 }
8396 }
8397
8398 if (r == NULL || allow_pseudo_reg)
8399 return r;
8400
8401 if (operand_type_all_zero (&r->reg_type))
8402 return (const reg_entry *) NULL;
8403
8404 if ((r->reg_type.bitfield.reg32
8405 || r->reg_type.bitfield.sreg3
8406 || r->reg_type.bitfield.control
8407 || r->reg_type.bitfield.debug
8408 || r->reg_type.bitfield.test)
8409 && !cpu_arch_flags.bitfield.cpui386)
8410 return (const reg_entry *) NULL;
8411
8412 if (r->reg_type.bitfield.floatreg
8413 && !cpu_arch_flags.bitfield.cpu8087
8414 && !cpu_arch_flags.bitfield.cpu287
8415 && !cpu_arch_flags.bitfield.cpu387)
8416 return (const reg_entry *) NULL;
8417
8418 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
8419 return (const reg_entry *) NULL;
8420
8421 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse)
8422 return (const reg_entry *) NULL;
8423
8424 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
8425 return (const reg_entry *) NULL;
8426
8427 /* Don't allow fake index register unless allow_index_reg isn't 0. */
8428 if (!allow_index_reg
8429 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
8430 return (const reg_entry *) NULL;
8431
8432 if (((r->reg_flags & (RegRex64 | RegRex))
8433 || r->reg_type.bitfield.reg64)
8434 && (!cpu_arch_flags.bitfield.cpulm
8435 || !operand_type_equal (&r->reg_type, &control))
8436 && flag_code != CODE_64BIT)
8437 return (const reg_entry *) NULL;
8438
8439 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
8440 return (const reg_entry *) NULL;
8441
8442 return r;
8443 }
8444
8445 /* REG_STRING starts *before* REGISTER_PREFIX. */
8446
8447 static const reg_entry *
8448 parse_register (char *reg_string, char **end_op)
8449 {
8450 const reg_entry *r;
8451
8452 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
8453 r = parse_real_register (reg_string, end_op);
8454 else
8455 r = NULL;
8456 if (!r)
8457 {
8458 char *save = input_line_pointer;
8459 char c;
8460 symbolS *symbolP;
8461
8462 input_line_pointer = reg_string;
8463 c = get_symbol_end ();
8464 symbolP = symbol_find (reg_string);
8465 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
8466 {
8467 const expressionS *e = symbol_get_value_expression (symbolP);
8468
8469 know (e->X_op == O_register);
8470 know (e->X_add_number >= 0
8471 && (valueT) e->X_add_number < i386_regtab_size);
8472 r = i386_regtab + e->X_add_number;
8473 *end_op = input_line_pointer;
8474 }
8475 *input_line_pointer = c;
8476 input_line_pointer = save;
8477 }
8478 return r;
8479 }
8480
8481 int
8482 i386_parse_name (char *name, expressionS *e, char *nextcharP)
8483 {
8484 const reg_entry *r;
8485 char *end = input_line_pointer;
8486
8487 *end = *nextcharP;
8488 r = parse_register (name, &input_line_pointer);
8489 if (r && end <= input_line_pointer)
8490 {
8491 *nextcharP = *input_line_pointer;
8492 *input_line_pointer = 0;
8493 e->X_op = O_register;
8494 e->X_add_number = r - i386_regtab;
8495 return 1;
8496 }
8497 input_line_pointer = end;
8498 *end = 0;
8499 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
8500 }
8501
8502 void
8503 md_operand (expressionS *e)
8504 {
8505 char *end;
8506 const reg_entry *r;
8507
8508 switch (*input_line_pointer)
8509 {
8510 case REGISTER_PREFIX:
8511 r = parse_real_register (input_line_pointer, &end);
8512 if (r)
8513 {
8514 e->X_op = O_register;
8515 e->X_add_number = r - i386_regtab;
8516 input_line_pointer = end;
8517 }
8518 break;
8519
8520 case '[':
8521 gas_assert (intel_syntax);
8522 end = input_line_pointer++;
8523 expression (e);
8524 if (*input_line_pointer == ']')
8525 {
8526 ++input_line_pointer;
8527 e->X_op_symbol = make_expr_symbol (e);
8528 e->X_add_symbol = NULL;
8529 e->X_add_number = 0;
8530 e->X_op = O_index;
8531 }
8532 else
8533 {
8534 e->X_op = O_absent;
8535 input_line_pointer = end;
8536 }
8537 break;
8538 }
8539 }
8540
8541 \f
8542 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8543 const char *md_shortopts = "kVQ:sqn";
8544 #else
8545 const char *md_shortopts = "qn";
8546 #endif
8547
8548 #define OPTION_32 (OPTION_MD_BASE + 0)
8549 #define OPTION_64 (OPTION_MD_BASE + 1)
8550 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
8551 #define OPTION_MARCH (OPTION_MD_BASE + 3)
8552 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
8553 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
8554 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
8555 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
8556 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
8557 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
8558 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
8559 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
8560 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
8561 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
8562 #define OPTION_X32 (OPTION_MD_BASE + 14)
8563
8564 struct option md_longopts[] =
8565 {
8566 {"32", no_argument, NULL, OPTION_32},
8567 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8568 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8569 {"64", no_argument, NULL, OPTION_64},
8570 #endif
8571 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8572 {"x32", no_argument, NULL, OPTION_X32},
8573 #endif
8574 {"divide", no_argument, NULL, OPTION_DIVIDE},
8575 {"march", required_argument, NULL, OPTION_MARCH},
8576 {"mtune", required_argument, NULL, OPTION_MTUNE},
8577 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
8578 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
8579 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
8580 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
8581 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
8582 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
8583 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
8584 {"moperand-check", required_argument, NULL, OPTION_MOPERAND_CHECK},
8585 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
8586 {NULL, no_argument, NULL, 0}
8587 };
8588 size_t md_longopts_size = sizeof (md_longopts);
8589
8590 int
8591 md_parse_option (int c, char *arg)
8592 {
8593 unsigned int j;
8594 char *arch, *next;
8595
8596 switch (c)
8597 {
8598 case 'n':
8599 optimize_align_code = 0;
8600 break;
8601
8602 case 'q':
8603 quiet_warnings = 1;
8604 break;
8605
8606 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8607 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
8608 should be emitted or not. FIXME: Not implemented. */
8609 case 'Q':
8610 break;
8611
8612 /* -V: SVR4 argument to print version ID. */
8613 case 'V':
8614 print_version_id ();
8615 break;
8616
8617 /* -k: Ignore for FreeBSD compatibility. */
8618 case 'k':
8619 break;
8620
8621 case 's':
8622 /* -s: On i386 Solaris, this tells the native assembler to use
8623 .stab instead of .stab.excl. We always use .stab anyhow. */
8624 break;
8625 #endif
8626 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8627 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8628 case OPTION_64:
8629 {
8630 const char **list, **l;
8631
8632 list = bfd_target_list ();
8633 for (l = list; *l != NULL; l++)
8634 if (CONST_STRNEQ (*l, "elf64-x86-64")
8635 || strcmp (*l, "coff-x86-64") == 0
8636 || strcmp (*l, "pe-x86-64") == 0
8637 || strcmp (*l, "pei-x86-64") == 0
8638 || strcmp (*l, "mach-o-x86-64") == 0)
8639 {
8640 default_arch = "x86_64";
8641 break;
8642 }
8643 if (*l == NULL)
8644 as_fatal (_("no compiled in support for x86_64"));
8645 free (list);
8646 }
8647 break;
8648 #endif
8649
8650 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8651 case OPTION_X32:
8652 if (IS_ELF)
8653 {
8654 const char **list, **l;
8655
8656 list = bfd_target_list ();
8657 for (l = list; *l != NULL; l++)
8658 if (CONST_STRNEQ (*l, "elf32-x86-64"))
8659 {
8660 default_arch = "x86_64:32";
8661 break;
8662 }
8663 if (*l == NULL)
8664 as_fatal (_("no compiled in support for 32bit x86_64"));
8665 free (list);
8666 }
8667 else
8668 as_fatal (_("32bit x86_64 is only supported for ELF"));
8669 break;
8670 #endif
8671
8672 case OPTION_32:
8673 default_arch = "i386";
8674 break;
8675
8676 case OPTION_DIVIDE:
8677 #ifdef SVR4_COMMENT_CHARS
8678 {
8679 char *n, *t;
8680 const char *s;
8681
8682 n = (char *) xmalloc (strlen (i386_comment_chars) + 1);
8683 t = n;
8684 for (s = i386_comment_chars; *s != '\0'; s++)
8685 if (*s != '/')
8686 *t++ = *s;
8687 *t = '\0';
8688 i386_comment_chars = n;
8689 }
8690 #endif
8691 break;
8692
8693 case OPTION_MARCH:
8694 arch = xstrdup (arg);
8695 do
8696 {
8697 if (*arch == '.')
8698 as_fatal (_("invalid -march= option: `%s'"), arg);
8699 next = strchr (arch, '+');
8700 if (next)
8701 *next++ = '\0';
8702 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8703 {
8704 if (strcmp (arch, cpu_arch [j].name) == 0)
8705 {
8706 /* Processor. */
8707 if (! cpu_arch[j].flags.bitfield.cpui386)
8708 continue;
8709
8710 cpu_arch_name = cpu_arch[j].name;
8711 cpu_sub_arch_name = NULL;
8712 cpu_arch_flags = cpu_arch[j].flags;
8713 cpu_arch_isa = cpu_arch[j].type;
8714 cpu_arch_isa_flags = cpu_arch[j].flags;
8715 if (!cpu_arch_tune_set)
8716 {
8717 cpu_arch_tune = cpu_arch_isa;
8718 cpu_arch_tune_flags = cpu_arch_isa_flags;
8719 }
8720 break;
8721 }
8722 else if (*cpu_arch [j].name == '.'
8723 && strcmp (arch, cpu_arch [j].name + 1) == 0)
8724 {
8725 /* ISA entension. */
8726 i386_cpu_flags flags;
8727
8728 if (!cpu_arch[j].negated)
8729 flags = cpu_flags_or (cpu_arch_flags,
8730 cpu_arch[j].flags);
8731 else
8732 flags = cpu_flags_and_not (cpu_arch_flags,
8733 cpu_arch[j].flags);
8734 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
8735 {
8736 if (cpu_sub_arch_name)
8737 {
8738 char *name = cpu_sub_arch_name;
8739 cpu_sub_arch_name = concat (name,
8740 cpu_arch[j].name,
8741 (const char *) NULL);
8742 free (name);
8743 }
8744 else
8745 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
8746 cpu_arch_flags = flags;
8747 cpu_arch_isa_flags = flags;
8748 }
8749 break;
8750 }
8751 }
8752
8753 if (j >= ARRAY_SIZE (cpu_arch))
8754 as_fatal (_("invalid -march= option: `%s'"), arg);
8755
8756 arch = next;
8757 }
8758 while (next != NULL );
8759 break;
8760
8761 case OPTION_MTUNE:
8762 if (*arg == '.')
8763 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8764 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8765 {
8766 if (strcmp (arg, cpu_arch [j].name) == 0)
8767 {
8768 cpu_arch_tune_set = 1;
8769 cpu_arch_tune = cpu_arch [j].type;
8770 cpu_arch_tune_flags = cpu_arch[j].flags;
8771 break;
8772 }
8773 }
8774 if (j >= ARRAY_SIZE (cpu_arch))
8775 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8776 break;
8777
8778 case OPTION_MMNEMONIC:
8779 if (strcasecmp (arg, "att") == 0)
8780 intel_mnemonic = 0;
8781 else if (strcasecmp (arg, "intel") == 0)
8782 intel_mnemonic = 1;
8783 else
8784 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg);
8785 break;
8786
8787 case OPTION_MSYNTAX:
8788 if (strcasecmp (arg, "att") == 0)
8789 intel_syntax = 0;
8790 else if (strcasecmp (arg, "intel") == 0)
8791 intel_syntax = 1;
8792 else
8793 as_fatal (_("invalid -msyntax= option: `%s'"), arg);
8794 break;
8795
8796 case OPTION_MINDEX_REG:
8797 allow_index_reg = 1;
8798 break;
8799
8800 case OPTION_MNAKED_REG:
8801 allow_naked_reg = 1;
8802 break;
8803
8804 case OPTION_MOLD_GCC:
8805 old_gcc = 1;
8806 break;
8807
8808 case OPTION_MSSE2AVX:
8809 sse2avx = 1;
8810 break;
8811
8812 case OPTION_MSSE_CHECK:
8813 if (strcasecmp (arg, "error") == 0)
8814 sse_check = check_error;
8815 else if (strcasecmp (arg, "warning") == 0)
8816 sse_check = check_warning;
8817 else if (strcasecmp (arg, "none") == 0)
8818 sse_check = check_none;
8819 else
8820 as_fatal (_("invalid -msse-check= option: `%s'"), arg);
8821 break;
8822
8823 case OPTION_MOPERAND_CHECK:
8824 if (strcasecmp (arg, "error") == 0)
8825 operand_check = check_error;
8826 else if (strcasecmp (arg, "warning") == 0)
8827 operand_check = check_warning;
8828 else if (strcasecmp (arg, "none") == 0)
8829 operand_check = check_none;
8830 else
8831 as_fatal (_("invalid -moperand-check= option: `%s'"), arg);
8832 break;
8833
8834 case OPTION_MAVXSCALAR:
8835 if (strcasecmp (arg, "128") == 0)
8836 avxscalar = vex128;
8837 else if (strcasecmp (arg, "256") == 0)
8838 avxscalar = vex256;
8839 else
8840 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg);
8841 break;
8842
8843 default:
8844 return 0;
8845 }
8846 return 1;
8847 }
8848
8849 #define MESSAGE_TEMPLATE \
8850 " "
8851
8852 static void
8853 show_arch (FILE *stream, int ext, int check)
8854 {
8855 static char message[] = MESSAGE_TEMPLATE;
8856 char *start = message + 27;
8857 char *p;
8858 int size = sizeof (MESSAGE_TEMPLATE);
8859 int left;
8860 const char *name;
8861 int len;
8862 unsigned int j;
8863
8864 p = start;
8865 left = size - (start - message);
8866 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8867 {
8868 /* Should it be skipped? */
8869 if (cpu_arch [j].skip)
8870 continue;
8871
8872 name = cpu_arch [j].name;
8873 len = cpu_arch [j].len;
8874 if (*name == '.')
8875 {
8876 /* It is an extension. Skip if we aren't asked to show it. */
8877 if (ext)
8878 {
8879 name++;
8880 len--;
8881 }
8882 else
8883 continue;
8884 }
8885 else if (ext)
8886 {
8887 /* It is an processor. Skip if we show only extension. */
8888 continue;
8889 }
8890 else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
8891 {
8892 /* It is an impossible processor - skip. */
8893 continue;
8894 }
8895
8896 /* Reserve 2 spaces for ", " or ",\0" */
8897 left -= len + 2;
8898
8899 /* Check if there is any room. */
8900 if (left >= 0)
8901 {
8902 if (p != start)
8903 {
8904 *p++ = ',';
8905 *p++ = ' ';
8906 }
8907 p = mempcpy (p, name, len);
8908 }
8909 else
8910 {
8911 /* Output the current message now and start a new one. */
8912 *p++ = ',';
8913 *p = '\0';
8914 fprintf (stream, "%s\n", message);
8915 p = start;
8916 left = size - (start - message) - len - 2;
8917
8918 gas_assert (left >= 0);
8919
8920 p = mempcpy (p, name, len);
8921 }
8922 }
8923
8924 *p = '\0';
8925 fprintf (stream, "%s\n", message);
8926 }
8927
8928 void
8929 md_show_usage (FILE *stream)
8930 {
8931 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8932 fprintf (stream, _("\
8933 -Q ignored\n\
8934 -V print assembler version number\n\
8935 -k ignored\n"));
8936 #endif
8937 fprintf (stream, _("\
8938 -n Do not optimize code alignment\n\
8939 -q quieten some warnings\n"));
8940 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8941 fprintf (stream, _("\
8942 -s ignored\n"));
8943 #endif
8944 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8945 || defined (TE_PE) || defined (TE_PEP))
8946 fprintf (stream, _("\
8947 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
8948 #endif
8949 #ifdef SVR4_COMMENT_CHARS
8950 fprintf (stream, _("\
8951 --divide do not treat `/' as a comment character\n"));
8952 #else
8953 fprintf (stream, _("\
8954 --divide ignored\n"));
8955 #endif
8956 fprintf (stream, _("\
8957 -march=CPU[,+EXTENSION...]\n\
8958 generate code for CPU and EXTENSION, CPU is one of:\n"));
8959 show_arch (stream, 0, 1);
8960 fprintf (stream, _("\
8961 EXTENSION is combination of:\n"));
8962 show_arch (stream, 1, 0);
8963 fprintf (stream, _("\
8964 -mtune=CPU optimize for CPU, CPU is one of:\n"));
8965 show_arch (stream, 0, 0);
8966 fprintf (stream, _("\
8967 -msse2avx encode SSE instructions with VEX prefix\n"));
8968 fprintf (stream, _("\
8969 -msse-check=[none|error|warning]\n\
8970 check SSE instructions\n"));
8971 fprintf (stream, _("\
8972 -moperand-check=[none|error|warning]\n\
8973 check operand combinations for validity\n"));
8974 fprintf (stream, _("\
8975 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
8976 length\n"));
8977 fprintf (stream, _("\
8978 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
8979 fprintf (stream, _("\
8980 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
8981 fprintf (stream, _("\
8982 -mindex-reg support pseudo index registers\n"));
8983 fprintf (stream, _("\
8984 -mnaked-reg don't require `%%' prefix for registers\n"));
8985 fprintf (stream, _("\
8986 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
8987 }
8988
8989 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
8990 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8991 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8992
8993 /* Pick the target format to use. */
8994
8995 const char *
8996 i386_target_format (void)
8997 {
8998 if (!strncmp (default_arch, "x86_64", 6))
8999 {
9000 update_code_flag (CODE_64BIT, 1);
9001 if (default_arch[6] == '\0')
9002 x86_elf_abi = X86_64_ABI;
9003 else
9004 x86_elf_abi = X86_64_X32_ABI;
9005 }
9006 else if (!strcmp (default_arch, "i386"))
9007 update_code_flag (CODE_32BIT, 1);
9008 else
9009 as_fatal (_("unknown architecture"));
9010
9011 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
9012 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
9013 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
9014 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
9015
9016 switch (OUTPUT_FLAVOR)
9017 {
9018 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
9019 case bfd_target_aout_flavour:
9020 return AOUT_TARGET_FORMAT;
9021 #endif
9022 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
9023 # if defined (TE_PE) || defined (TE_PEP)
9024 case bfd_target_coff_flavour:
9025 return flag_code == CODE_64BIT ? "pe-x86-64" : "pe-i386";
9026 # elif defined (TE_GO32)
9027 case bfd_target_coff_flavour:
9028 return "coff-go32";
9029 # else
9030 case bfd_target_coff_flavour:
9031 return "coff-i386";
9032 # endif
9033 #endif
9034 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9035 case bfd_target_elf_flavour:
9036 {
9037 const char *format;
9038
9039 switch (x86_elf_abi)
9040 {
9041 default:
9042 format = ELF_TARGET_FORMAT;
9043 break;
9044 case X86_64_ABI:
9045 use_rela_relocations = 1;
9046 object_64bit = 1;
9047 format = ELF_TARGET_FORMAT64;
9048 break;
9049 case X86_64_X32_ABI:
9050 use_rela_relocations = 1;
9051 object_64bit = 1;
9052 disallow_64bit_reloc = 1;
9053 format = ELF_TARGET_FORMAT32;
9054 break;
9055 }
9056 if (cpu_arch_isa == PROCESSOR_L1OM)
9057 {
9058 if (x86_elf_abi != X86_64_ABI)
9059 as_fatal (_("Intel L1OM is 64bit only"));
9060 return ELF_TARGET_L1OM_FORMAT;
9061 }
9062 if (cpu_arch_isa == PROCESSOR_K1OM)
9063 {
9064 if (x86_elf_abi != X86_64_ABI)
9065 as_fatal (_("Intel K1OM is 64bit only"));
9066 return ELF_TARGET_K1OM_FORMAT;
9067 }
9068 else
9069 return format;
9070 }
9071 #endif
9072 #if defined (OBJ_MACH_O)
9073 case bfd_target_mach_o_flavour:
9074 if (flag_code == CODE_64BIT)
9075 {
9076 use_rela_relocations = 1;
9077 object_64bit = 1;
9078 return "mach-o-x86-64";
9079 }
9080 else
9081 return "mach-o-i386";
9082 #endif
9083 default:
9084 abort ();
9085 return NULL;
9086 }
9087 }
9088
9089 #endif /* OBJ_MAYBE_ more than one */
9090
9091 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
9092 void
9093 i386_elf_emit_arch_note (void)
9094 {
9095 if (IS_ELF && cpu_arch_name != NULL)
9096 {
9097 char *p;
9098 asection *seg = now_seg;
9099 subsegT subseg = now_subseg;
9100 Elf_Internal_Note i_note;
9101 Elf_External_Note e_note;
9102 asection *note_secp;
9103 int len;
9104
9105 /* Create the .note section. */
9106 note_secp = subseg_new (".note", 0);
9107 bfd_set_section_flags (stdoutput,
9108 note_secp,
9109 SEC_HAS_CONTENTS | SEC_READONLY);
9110
9111 /* Process the arch string. */
9112 len = strlen (cpu_arch_name);
9113
9114 i_note.namesz = len + 1;
9115 i_note.descsz = 0;
9116 i_note.type = NT_ARCH;
9117 p = frag_more (sizeof (e_note.namesz));
9118 md_number_to_chars (p, (valueT) i_note.namesz, sizeof (e_note.namesz));
9119 p = frag_more (sizeof (e_note.descsz));
9120 md_number_to_chars (p, (valueT) i_note.descsz, sizeof (e_note.descsz));
9121 p = frag_more (sizeof (e_note.type));
9122 md_number_to_chars (p, (valueT) i_note.type, sizeof (e_note.type));
9123 p = frag_more (len + 1);
9124 strcpy (p, cpu_arch_name);
9125
9126 frag_align (2, 0, 0);
9127
9128 subseg_set (seg, subseg);
9129 }
9130 }
9131 #endif
9132 \f
9133 symbolS *
9134 md_undefined_symbol (char *name)
9135 {
9136 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
9137 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
9138 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
9139 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
9140 {
9141 if (!GOT_symbol)
9142 {
9143 if (symbol_find (name))
9144 as_bad (_("GOT already in symbol table"));
9145 GOT_symbol = symbol_new (name, undefined_section,
9146 (valueT) 0, &zero_address_frag);
9147 };
9148 return GOT_symbol;
9149 }
9150 return 0;
9151 }
9152
9153 /* Round up a section size to the appropriate boundary. */
9154
9155 valueT
9156 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
9157 {
9158 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
9159 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
9160 {
9161 /* For a.out, force the section size to be aligned. If we don't do
9162 this, BFD will align it for us, but it will not write out the
9163 final bytes of the section. This may be a bug in BFD, but it is
9164 easier to fix it here since that is how the other a.out targets
9165 work. */
9166 int align;
9167
9168 align = bfd_get_section_alignment (stdoutput, segment);
9169 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
9170 }
9171 #endif
9172
9173 return size;
9174 }
9175
9176 /* On the i386, PC-relative offsets are relative to the start of the
9177 next instruction. That is, the address of the offset, plus its
9178 size, since the offset is always the last part of the insn. */
9179
9180 long
9181 md_pcrel_from (fixS *fixP)
9182 {
9183 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
9184 }
9185
9186 #ifndef I386COFF
9187
9188 static void
9189 s_bss (int ignore ATTRIBUTE_UNUSED)
9190 {
9191 int temp;
9192
9193 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9194 if (IS_ELF)
9195 obj_elf_section_change_hook ();
9196 #endif
9197 temp = get_absolute_expression ();
9198 subseg_set (bss_section, (subsegT) temp);
9199 demand_empty_rest_of_line ();
9200 }
9201
9202 #endif
9203
9204 void
9205 i386_validate_fix (fixS *fixp)
9206 {
9207 if (fixp->fx_subsy && fixp->fx_subsy == GOT_symbol)
9208 {
9209 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
9210 {
9211 if (!object_64bit)
9212 abort ();
9213 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
9214 }
9215 else
9216 {
9217 if (!object_64bit)
9218 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
9219 else
9220 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
9221 }
9222 fixp->fx_subsy = 0;
9223 }
9224 }
9225
9226 arelent *
9227 tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
9228 {
9229 arelent *rel;
9230 bfd_reloc_code_real_type code;
9231
9232 switch (fixp->fx_r_type)
9233 {
9234 case BFD_RELOC_X86_64_PLT32:
9235 case BFD_RELOC_X86_64_GOT32:
9236 case BFD_RELOC_X86_64_GOTPCREL:
9237 case BFD_RELOC_386_PLT32:
9238 case BFD_RELOC_386_GOT32:
9239 case BFD_RELOC_386_GOTOFF:
9240 case BFD_RELOC_386_GOTPC:
9241 case BFD_RELOC_386_TLS_GD:
9242 case BFD_RELOC_386_TLS_LDM:
9243 case BFD_RELOC_386_TLS_LDO_32:
9244 case BFD_RELOC_386_TLS_IE_32:
9245 case BFD_RELOC_386_TLS_IE:
9246 case BFD_RELOC_386_TLS_GOTIE:
9247 case BFD_RELOC_386_TLS_LE_32:
9248 case BFD_RELOC_386_TLS_LE:
9249 case BFD_RELOC_386_TLS_GOTDESC:
9250 case BFD_RELOC_386_TLS_DESC_CALL:
9251 case BFD_RELOC_X86_64_TLSGD:
9252 case BFD_RELOC_X86_64_TLSLD:
9253 case BFD_RELOC_X86_64_DTPOFF32:
9254 case BFD_RELOC_X86_64_DTPOFF64:
9255 case BFD_RELOC_X86_64_GOTTPOFF:
9256 case BFD_RELOC_X86_64_TPOFF32:
9257 case BFD_RELOC_X86_64_TPOFF64:
9258 case BFD_RELOC_X86_64_GOTOFF64:
9259 case BFD_RELOC_X86_64_GOTPC32:
9260 case BFD_RELOC_X86_64_GOT64:
9261 case BFD_RELOC_X86_64_GOTPCREL64:
9262 case BFD_RELOC_X86_64_GOTPC64:
9263 case BFD_RELOC_X86_64_GOTPLT64:
9264 case BFD_RELOC_X86_64_PLTOFF64:
9265 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9266 case BFD_RELOC_X86_64_TLSDESC_CALL:
9267 case BFD_RELOC_RVA:
9268 case BFD_RELOC_VTABLE_ENTRY:
9269 case BFD_RELOC_VTABLE_INHERIT:
9270 #ifdef TE_PE
9271 case BFD_RELOC_32_SECREL:
9272 #endif
9273 code = fixp->fx_r_type;
9274 break;
9275 case BFD_RELOC_X86_64_32S:
9276 if (!fixp->fx_pcrel)
9277 {
9278 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
9279 code = fixp->fx_r_type;
9280 break;
9281 }
9282 default:
9283 if (fixp->fx_pcrel)
9284 {
9285 switch (fixp->fx_size)
9286 {
9287 default:
9288 as_bad_where (fixp->fx_file, fixp->fx_line,
9289 _("can not do %d byte pc-relative relocation"),
9290 fixp->fx_size);
9291 code = BFD_RELOC_32_PCREL;
9292 break;
9293 case 1: code = BFD_RELOC_8_PCREL; break;
9294 case 2: code = BFD_RELOC_16_PCREL; break;
9295 case 4: code = BFD_RELOC_32_PCREL; break;
9296 #ifdef BFD64
9297 case 8: code = BFD_RELOC_64_PCREL; break;
9298 #endif
9299 }
9300 }
9301 else
9302 {
9303 switch (fixp->fx_size)
9304 {
9305 default:
9306 as_bad_where (fixp->fx_file, fixp->fx_line,
9307 _("can not do %d byte relocation"),
9308 fixp->fx_size);
9309 code = BFD_RELOC_32;
9310 break;
9311 case 1: code = BFD_RELOC_8; break;
9312 case 2: code = BFD_RELOC_16; break;
9313 case 4: code = BFD_RELOC_32; break;
9314 #ifdef BFD64
9315 case 8: code = BFD_RELOC_64; break;
9316 #endif
9317 }
9318 }
9319 break;
9320 }
9321
9322 if ((code == BFD_RELOC_32
9323 || code == BFD_RELOC_32_PCREL
9324 || code == BFD_RELOC_X86_64_32S)
9325 && GOT_symbol
9326 && fixp->fx_addsy == GOT_symbol)
9327 {
9328 if (!object_64bit)
9329 code = BFD_RELOC_386_GOTPC;
9330 else
9331 code = BFD_RELOC_X86_64_GOTPC32;
9332 }
9333 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
9334 && GOT_symbol
9335 && fixp->fx_addsy == GOT_symbol)
9336 {
9337 code = BFD_RELOC_X86_64_GOTPC64;
9338 }
9339
9340 rel = (arelent *) xmalloc (sizeof (arelent));
9341 rel->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
9342 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9343
9344 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
9345
9346 if (!use_rela_relocations)
9347 {
9348 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
9349 vtable entry to be used in the relocation's section offset. */
9350 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
9351 rel->address = fixp->fx_offset;
9352 #if defined (OBJ_COFF) && defined (TE_PE)
9353 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
9354 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
9355 else
9356 #endif
9357 rel->addend = 0;
9358 }
9359 /* Use the rela in 64bit mode. */
9360 else
9361 {
9362 if (disallow_64bit_reloc)
9363 switch (code)
9364 {
9365 case BFD_RELOC_X86_64_DTPOFF64:
9366 case BFD_RELOC_X86_64_TPOFF64:
9367 case BFD_RELOC_64_PCREL:
9368 case BFD_RELOC_X86_64_GOTOFF64:
9369 case BFD_RELOC_X86_64_GOT64:
9370 case BFD_RELOC_X86_64_GOTPCREL64:
9371 case BFD_RELOC_X86_64_GOTPC64:
9372 case BFD_RELOC_X86_64_GOTPLT64:
9373 case BFD_RELOC_X86_64_PLTOFF64:
9374 as_bad_where (fixp->fx_file, fixp->fx_line,
9375 _("cannot represent relocation type %s in x32 mode"),
9376 bfd_get_reloc_code_name (code));
9377 break;
9378 default:
9379 break;
9380 }
9381
9382 if (!fixp->fx_pcrel)
9383 rel->addend = fixp->fx_offset;
9384 else
9385 switch (code)
9386 {
9387 case BFD_RELOC_X86_64_PLT32:
9388 case BFD_RELOC_X86_64_GOT32:
9389 case BFD_RELOC_X86_64_GOTPCREL:
9390 case BFD_RELOC_X86_64_TLSGD:
9391 case BFD_RELOC_X86_64_TLSLD:
9392 case BFD_RELOC_X86_64_GOTTPOFF:
9393 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9394 case BFD_RELOC_X86_64_TLSDESC_CALL:
9395 rel->addend = fixp->fx_offset - fixp->fx_size;
9396 break;
9397 default:
9398 rel->addend = (section->vma
9399 - fixp->fx_size
9400 + fixp->fx_addnumber
9401 + md_pcrel_from (fixp));
9402 break;
9403 }
9404 }
9405
9406 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
9407 if (rel->howto == NULL)
9408 {
9409 as_bad_where (fixp->fx_file, fixp->fx_line,
9410 _("cannot represent relocation type %s"),
9411 bfd_get_reloc_code_name (code));
9412 /* Set howto to a garbage value so that we can keep going. */
9413 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
9414 gas_assert (rel->howto != NULL);
9415 }
9416
9417 return rel;
9418 }
9419
9420 #include "tc-i386-intel.c"
9421
9422 void
9423 tc_x86_parse_to_dw2regnum (expressionS *exp)
9424 {
9425 int saved_naked_reg;
9426 char saved_register_dot;
9427
9428 saved_naked_reg = allow_naked_reg;
9429 allow_naked_reg = 1;
9430 saved_register_dot = register_chars['.'];
9431 register_chars['.'] = '.';
9432 allow_pseudo_reg = 1;
9433 expression_and_evaluate (exp);
9434 allow_pseudo_reg = 0;
9435 register_chars['.'] = saved_register_dot;
9436 allow_naked_reg = saved_naked_reg;
9437
9438 if (exp->X_op == O_register && exp->X_add_number >= 0)
9439 {
9440 if ((addressT) exp->X_add_number < i386_regtab_size)
9441 {
9442 exp->X_op = O_constant;
9443 exp->X_add_number = i386_regtab[exp->X_add_number]
9444 .dw2_regnum[flag_code >> 1];
9445 }
9446 else
9447 exp->X_op = O_illegal;
9448 }
9449 }
9450
9451 void
9452 tc_x86_frame_initial_instructions (void)
9453 {
9454 static unsigned int sp_regno[2];
9455
9456 if (!sp_regno[flag_code >> 1])
9457 {
9458 char *saved_input = input_line_pointer;
9459 char sp[][4] = {"esp", "rsp"};
9460 expressionS exp;
9461
9462 input_line_pointer = sp[flag_code >> 1];
9463 tc_x86_parse_to_dw2regnum (&exp);
9464 gas_assert (exp.X_op == O_constant);
9465 sp_regno[flag_code >> 1] = exp.X_add_number;
9466 input_line_pointer = saved_input;
9467 }
9468
9469 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
9470 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
9471 }
9472
9473 int
9474 x86_dwarf2_addr_size (void)
9475 {
9476 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9477 if (x86_elf_abi == X86_64_X32_ABI)
9478 return 4;
9479 #endif
9480 return bfd_arch_bits_per_address (stdoutput) / 8;
9481 }
9482
9483 int
9484 i386_elf_section_type (const char *str, size_t len)
9485 {
9486 if (flag_code == CODE_64BIT
9487 && len == sizeof ("unwind") - 1
9488 && strncmp (str, "unwind", 6) == 0)
9489 return SHT_X86_64_UNWIND;
9490
9491 return -1;
9492 }
9493
9494 #ifdef TE_SOLARIS
9495 void
9496 i386_solaris_fix_up_eh_frame (segT sec)
9497 {
9498 if (flag_code == CODE_64BIT)
9499 elf_section_type (sec) = SHT_X86_64_UNWIND;
9500 }
9501 #endif
9502
9503 #ifdef TE_PE
9504 void
9505 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
9506 {
9507 expressionS exp;
9508
9509 exp.X_op = O_secrel;
9510 exp.X_add_symbol = symbol;
9511 exp.X_add_number = 0;
9512 emit_expr (&exp, size);
9513 }
9514 #endif
9515
9516 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9517 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
9518
9519 bfd_vma
9520 x86_64_section_letter (int letter, char **ptr_msg)
9521 {
9522 if (flag_code == CODE_64BIT)
9523 {
9524 if (letter == 'l')
9525 return SHF_X86_64_LARGE;
9526
9527 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
9528 }
9529 else
9530 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
9531 return -1;
9532 }
9533
9534 bfd_vma
9535 x86_64_section_word (char *str, size_t len)
9536 {
9537 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
9538 return SHF_X86_64_LARGE;
9539
9540 return -1;
9541 }
9542
9543 static void
9544 handle_large_common (int small ATTRIBUTE_UNUSED)
9545 {
9546 if (flag_code != CODE_64BIT)
9547 {
9548 s_comm_internal (0, elf_common_parse);
9549 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
9550 }
9551 else
9552 {
9553 static segT lbss_section;
9554 asection *saved_com_section_ptr = elf_com_section_ptr;
9555 asection *saved_bss_section = bss_section;
9556
9557 if (lbss_section == NULL)
9558 {
9559 flagword applicable;
9560 segT seg = now_seg;
9561 subsegT subseg = now_subseg;
9562
9563 /* The .lbss section is for local .largecomm symbols. */
9564 lbss_section = subseg_new (".lbss", 0);
9565 applicable = bfd_applicable_section_flags (stdoutput);
9566 bfd_set_section_flags (stdoutput, lbss_section,
9567 applicable & SEC_ALLOC);
9568 seg_info (lbss_section)->bss = 1;
9569
9570 subseg_set (seg, subseg);
9571 }
9572
9573 elf_com_section_ptr = &_bfd_elf_large_com_section;
9574 bss_section = lbss_section;
9575
9576 s_comm_internal (0, elf_common_parse);
9577
9578 elf_com_section_ptr = saved_com_section_ptr;
9579 bss_section = saved_bss_section;
9580 }
9581 }
9582 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */
This page took 0.22922 seconds and 4 git commands to generate.