Implement Intel SMAP instructions
[deliverable/binutils-gdb.git] / gas / config / tc-i386.c
1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
4 2012
5 Free Software Foundation, Inc.
6
7 This file is part of GAS, the GNU Assembler.
8
9 GAS is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
13
14 GAS is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GAS; see the file COPYING. If not, write to the Free
21 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
22 02110-1301, USA. */
23
24 /* Intel 80386 machine specific gas.
25 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
26 x86_64 support by Jan Hubicka (jh@suse.cz)
27 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
28 Bugs & suggestions are completely welcome. This is free software.
29 Please help us make it better. */
30
31 #include "as.h"
32 #include "safe-ctype.h"
33 #include "subsegs.h"
34 #include "dwarf2dbg.h"
35 #include "dw2gencfi.h"
36 #include "elf/x86-64.h"
37 #include "opcodes/i386-init.h"
38
39 #ifndef REGISTER_WARNINGS
40 #define REGISTER_WARNINGS 1
41 #endif
42
43 #ifndef INFER_ADDR_PREFIX
44 #define INFER_ADDR_PREFIX 1
45 #endif
46
47 #ifndef DEFAULT_ARCH
48 #define DEFAULT_ARCH "i386"
49 #endif
50
51 #ifndef INLINE
52 #if __GNUC__ >= 2
53 #define INLINE __inline__
54 #else
55 #define INLINE
56 #endif
57 #endif
58
59 /* Prefixes will be emitted in the order defined below.
60 WAIT_PREFIX must be the first prefix since FWAIT is really is an
61 instruction, and so must come before any prefixes.
62 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
63 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
64 #define WAIT_PREFIX 0
65 #define SEG_PREFIX 1
66 #define ADDR_PREFIX 2
67 #define DATA_PREFIX 3
68 #define REP_PREFIX 4
69 #define HLE_PREFIX REP_PREFIX
70 #define LOCK_PREFIX 5
71 #define REX_PREFIX 6 /* must come last. */
72 #define MAX_PREFIXES 7 /* max prefixes per opcode */
73
74 /* we define the syntax here (modulo base,index,scale syntax) */
75 #define REGISTER_PREFIX '%'
76 #define IMMEDIATE_PREFIX '$'
77 #define ABSOLUTE_PREFIX '*'
78
79 /* these are the instruction mnemonic suffixes in AT&T syntax or
80 memory operand size in Intel syntax. */
81 #define WORD_MNEM_SUFFIX 'w'
82 #define BYTE_MNEM_SUFFIX 'b'
83 #define SHORT_MNEM_SUFFIX 's'
84 #define LONG_MNEM_SUFFIX 'l'
85 #define QWORD_MNEM_SUFFIX 'q'
86 #define XMMWORD_MNEM_SUFFIX 'x'
87 #define YMMWORD_MNEM_SUFFIX 'y'
88 /* Intel Syntax. Use a non-ascii letter since since it never appears
89 in instructions. */
90 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
91
92 #define END_OF_INSN '\0'
93
94 /*
95 'templates' is for grouping together 'template' structures for opcodes
96 of the same name. This is only used for storing the insns in the grand
97 ole hash table of insns.
98 The templates themselves start at START and range up to (but not including)
99 END.
100 */
101 typedef struct
102 {
103 const insn_template *start;
104 const insn_template *end;
105 }
106 templates;
107
108 /* 386 operand encoding bytes: see 386 book for details of this. */
109 typedef struct
110 {
111 unsigned int regmem; /* codes register or memory operand */
112 unsigned int reg; /* codes register operand (or extended opcode) */
113 unsigned int mode; /* how to interpret regmem & reg */
114 }
115 modrm_byte;
116
117 /* x86-64 extension prefix. */
118 typedef int rex_byte;
119
120 /* 386 opcode byte to code indirect addressing. */
121 typedef struct
122 {
123 unsigned base;
124 unsigned index;
125 unsigned scale;
126 }
127 sib_byte;
128
129 /* x86 arch names, types and features */
130 typedef struct
131 {
132 const char *name; /* arch name */
133 unsigned int len; /* arch string length */
134 enum processor_type type; /* arch type */
135 i386_cpu_flags flags; /* cpu feature flags */
136 unsigned int skip; /* show_arch should skip this. */
137 unsigned int negated; /* turn off indicated flags. */
138 }
139 arch_entry;
140
141 static void update_code_flag (int, int);
142 static void set_code_flag (int);
143 static void set_16bit_gcc_code_flag (int);
144 static void set_intel_syntax (int);
145 static void set_intel_mnemonic (int);
146 static void set_allow_index_reg (int);
147 static void set_check (int);
148 static void set_cpu_arch (int);
149 #ifdef TE_PE
150 static void pe_directive_secrel (int);
151 #endif
152 static void signed_cons (int);
153 static char *output_invalid (int c);
154 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
155 const char *);
156 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
157 const char *);
158 static int i386_att_operand (char *);
159 static int i386_intel_operand (char *, int);
160 static int i386_intel_simplify (expressionS *);
161 static int i386_intel_parse_name (const char *, expressionS *);
162 static const reg_entry *parse_register (char *, char **);
163 static char *parse_insn (char *, char *);
164 static char *parse_operands (char *, const char *);
165 static void swap_operands (void);
166 static void swap_2_operands (int, int);
167 static void optimize_imm (void);
168 static void optimize_disp (void);
169 static const insn_template *match_template (void);
170 static int check_string (void);
171 static int process_suffix (void);
172 static int check_byte_reg (void);
173 static int check_long_reg (void);
174 static int check_qword_reg (void);
175 static int check_word_reg (void);
176 static int finalize_imm (void);
177 static int process_operands (void);
178 static const seg_entry *build_modrm_byte (void);
179 static void output_insn (void);
180 static void output_imm (fragS *, offsetT);
181 static void output_disp (fragS *, offsetT);
182 #ifndef I386COFF
183 static void s_bss (int);
184 #endif
185 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
186 static void handle_large_common (int small ATTRIBUTE_UNUSED);
187 #endif
188
189 static const char *default_arch = DEFAULT_ARCH;
190
191 /* VEX prefix. */
192 typedef struct
193 {
194 /* VEX prefix is either 2 byte or 3 byte. */
195 unsigned char bytes[3];
196 unsigned int length;
197 /* Destination or source register specifier. */
198 const reg_entry *register_specifier;
199 } vex_prefix;
200
201 /* 'md_assemble ()' gathers together information and puts it into a
202 i386_insn. */
203
204 union i386_op
205 {
206 expressionS *disps;
207 expressionS *imms;
208 const reg_entry *regs;
209 };
210
211 enum i386_error
212 {
213 operand_size_mismatch,
214 operand_type_mismatch,
215 register_type_mismatch,
216 number_of_operands_mismatch,
217 invalid_instruction_suffix,
218 bad_imm4,
219 old_gcc_only,
220 unsupported_with_intel_mnemonic,
221 unsupported_syntax,
222 unsupported,
223 invalid_vsib_address,
224 invalid_vector_register_set,
225 unsupported_vector_index_register
226 };
227
228 struct _i386_insn
229 {
230 /* TM holds the template for the insn were currently assembling. */
231 insn_template tm;
232
233 /* SUFFIX holds the instruction size suffix for byte, word, dword
234 or qword, if given. */
235 char suffix;
236
237 /* OPERANDS gives the number of given operands. */
238 unsigned int operands;
239
240 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
241 of given register, displacement, memory operands and immediate
242 operands. */
243 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
244
245 /* TYPES [i] is the type (see above #defines) which tells us how to
246 use OP[i] for the corresponding operand. */
247 i386_operand_type types[MAX_OPERANDS];
248
249 /* Displacement expression, immediate expression, or register for each
250 operand. */
251 union i386_op op[MAX_OPERANDS];
252
253 /* Flags for operands. */
254 unsigned int flags[MAX_OPERANDS];
255 #define Operand_PCrel 1
256
257 /* Relocation type for operand */
258 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
259
260 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
261 the base index byte below. */
262 const reg_entry *base_reg;
263 const reg_entry *index_reg;
264 unsigned int log2_scale_factor;
265
266 /* SEG gives the seg_entries of this insn. They are zero unless
267 explicit segment overrides are given. */
268 const seg_entry *seg[2];
269
270 /* PREFIX holds all the given prefix opcodes (usually null).
271 PREFIXES is the number of prefix opcodes. */
272 unsigned int prefixes;
273 unsigned char prefix[MAX_PREFIXES];
274
275 /* RM and SIB are the modrm byte and the sib byte where the
276 addressing modes of this insn are encoded. */
277 modrm_byte rm;
278 rex_byte rex;
279 sib_byte sib;
280 vex_prefix vex;
281
282 /* Swap operand in encoding. */
283 unsigned int swap_operand;
284
285 /* Prefer 8bit or 32bit displacement in encoding. */
286 enum
287 {
288 disp_encoding_default = 0,
289 disp_encoding_8bit,
290 disp_encoding_32bit
291 } disp_encoding;
292
293 /* Have HLE prefix. */
294 unsigned int have_hle;
295
296 /* Error message. */
297 enum i386_error error;
298 };
299
300 typedef struct _i386_insn i386_insn;
301
302 /* List of chars besides those in app.c:symbol_chars that can start an
303 operand. Used to prevent the scrubber eating vital white-space. */
304 const char extra_symbol_chars[] = "*%-(["
305 #ifdef LEX_AT
306 "@"
307 #endif
308 #ifdef LEX_QM
309 "?"
310 #endif
311 ;
312
313 #if (defined (TE_I386AIX) \
314 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
315 && !defined (TE_GNU) \
316 && !defined (TE_LINUX) \
317 && !defined (TE_NACL) \
318 && !defined (TE_NETWARE) \
319 && !defined (TE_FreeBSD) \
320 && !defined (TE_DragonFly) \
321 && !defined (TE_NetBSD)))
322 /* This array holds the chars that always start a comment. If the
323 pre-processor is disabled, these aren't very useful. The option
324 --divide will remove '/' from this list. */
325 const char *i386_comment_chars = "#/";
326 #define SVR4_COMMENT_CHARS 1
327 #define PREFIX_SEPARATOR '\\'
328
329 #else
330 const char *i386_comment_chars = "#";
331 #define PREFIX_SEPARATOR '/'
332 #endif
333
334 /* This array holds the chars that only start a comment at the beginning of
335 a line. If the line seems to have the form '# 123 filename'
336 .line and .file directives will appear in the pre-processed output.
337 Note that input_file.c hand checks for '#' at the beginning of the
338 first line of the input file. This is because the compiler outputs
339 #NO_APP at the beginning of its output.
340 Also note that comments started like this one will always work if
341 '/' isn't otherwise defined. */
342 const char line_comment_chars[] = "#/";
343
344 const char line_separator_chars[] = ";";
345
346 /* Chars that can be used to separate mant from exp in floating point
347 nums. */
348 const char EXP_CHARS[] = "eE";
349
350 /* Chars that mean this number is a floating point constant
351 As in 0f12.456
352 or 0d1.2345e12. */
353 const char FLT_CHARS[] = "fFdDxX";
354
355 /* Tables for lexical analysis. */
356 static char mnemonic_chars[256];
357 static char register_chars[256];
358 static char operand_chars[256];
359 static char identifier_chars[256];
360 static char digit_chars[256];
361
362 /* Lexical macros. */
363 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
364 #define is_operand_char(x) (operand_chars[(unsigned char) x])
365 #define is_register_char(x) (register_chars[(unsigned char) x])
366 #define is_space_char(x) ((x) == ' ')
367 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
368 #define is_digit_char(x) (digit_chars[(unsigned char) x])
369
370 /* All non-digit non-letter characters that may occur in an operand. */
371 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
372
373 /* md_assemble() always leaves the strings it's passed unaltered. To
374 effect this we maintain a stack of saved characters that we've smashed
375 with '\0's (indicating end of strings for various sub-fields of the
376 assembler instruction). */
377 static char save_stack[32];
378 static char *save_stack_p;
379 #define END_STRING_AND_SAVE(s) \
380 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
381 #define RESTORE_END_STRING(s) \
382 do { *(s) = *--save_stack_p; } while (0)
383
384 /* The instruction we're assembling. */
385 static i386_insn i;
386
387 /* Possible templates for current insn. */
388 static const templates *current_templates;
389
390 /* Per instruction expressionS buffers: max displacements & immediates. */
391 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
392 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
393
394 /* Current operand we are working on. */
395 static int this_operand = -1;
396
397 /* We support four different modes. FLAG_CODE variable is used to distinguish
398 these. */
399
400 enum flag_code {
401 CODE_32BIT,
402 CODE_16BIT,
403 CODE_64BIT };
404
405 static enum flag_code flag_code;
406 static unsigned int object_64bit;
407 static unsigned int disallow_64bit_reloc;
408 static int use_rela_relocations = 0;
409
410 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
411 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
412 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
413
414 /* The ELF ABI to use. */
415 enum x86_elf_abi
416 {
417 I386_ABI,
418 X86_64_ABI,
419 X86_64_X32_ABI
420 };
421
422 static enum x86_elf_abi x86_elf_abi = I386_ABI;
423 #endif
424
425 /* The names used to print error messages. */
426 static const char *flag_code_names[] =
427 {
428 "32",
429 "16",
430 "64"
431 };
432
433 /* 1 for intel syntax,
434 0 if att syntax. */
435 static int intel_syntax = 0;
436
437 /* 1 for intel mnemonic,
438 0 if att mnemonic. */
439 static int intel_mnemonic = !SYSV386_COMPAT;
440
441 /* 1 if support old (<= 2.8.1) versions of gcc. */
442 static int old_gcc = OLDGCC_COMPAT;
443
444 /* 1 if pseudo registers are permitted. */
445 static int allow_pseudo_reg = 0;
446
447 /* 1 if register prefix % not required. */
448 static int allow_naked_reg = 0;
449
450 /* 1 if pseudo index register, eiz/riz, is allowed . */
451 static int allow_index_reg = 0;
452
453 static enum check_kind
454 {
455 check_none = 0,
456 check_warning,
457 check_error
458 }
459 sse_check, operand_check = check_warning;
460
461 /* Register prefix used for error message. */
462 static const char *register_prefix = "%";
463
464 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
465 leave, push, and pop instructions so that gcc has the same stack
466 frame as in 32 bit mode. */
467 static char stackop_size = '\0';
468
469 /* Non-zero to optimize code alignment. */
470 int optimize_align_code = 1;
471
472 /* Non-zero to quieten some warnings. */
473 static int quiet_warnings = 0;
474
475 /* CPU name. */
476 static const char *cpu_arch_name = NULL;
477 static char *cpu_sub_arch_name = NULL;
478
479 /* CPU feature flags. */
480 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
481
482 /* If we have selected a cpu we are generating instructions for. */
483 static int cpu_arch_tune_set = 0;
484
485 /* Cpu we are generating instructions for. */
486 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
487
488 /* CPU feature flags of cpu we are generating instructions for. */
489 static i386_cpu_flags cpu_arch_tune_flags;
490
491 /* CPU instruction set architecture used. */
492 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
493
494 /* CPU feature flags of instruction set architecture used. */
495 i386_cpu_flags cpu_arch_isa_flags;
496
497 /* If set, conditional jumps are not automatically promoted to handle
498 larger than a byte offset. */
499 static unsigned int no_cond_jump_promotion = 0;
500
501 /* Encode SSE instructions with VEX prefix. */
502 static unsigned int sse2avx;
503
504 /* Encode scalar AVX instructions with specific vector length. */
505 static enum
506 {
507 vex128 = 0,
508 vex256
509 } avxscalar;
510
511 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
512 static symbolS *GOT_symbol;
513
514 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
515 unsigned int x86_dwarf2_return_column;
516
517 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
518 int x86_cie_data_alignment;
519
520 /* Interface to relax_segment.
521 There are 3 major relax states for 386 jump insns because the
522 different types of jumps add different sizes to frags when we're
523 figuring out what sort of jump to choose to reach a given label. */
524
525 /* Types. */
526 #define UNCOND_JUMP 0
527 #define COND_JUMP 1
528 #define COND_JUMP86 2
529
530 /* Sizes. */
531 #define CODE16 1
532 #define SMALL 0
533 #define SMALL16 (SMALL | CODE16)
534 #define BIG 2
535 #define BIG16 (BIG | CODE16)
536
537 #ifndef INLINE
538 #ifdef __GNUC__
539 #define INLINE __inline__
540 #else
541 #define INLINE
542 #endif
543 #endif
544
545 #define ENCODE_RELAX_STATE(type, size) \
546 ((relax_substateT) (((type) << 2) | (size)))
547 #define TYPE_FROM_RELAX_STATE(s) \
548 ((s) >> 2)
549 #define DISP_SIZE_FROM_RELAX_STATE(s) \
550 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
551
552 /* This table is used by relax_frag to promote short jumps to long
553 ones where necessary. SMALL (short) jumps may be promoted to BIG
554 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
555 don't allow a short jump in a 32 bit code segment to be promoted to
556 a 16 bit offset jump because it's slower (requires data size
557 prefix), and doesn't work, unless the destination is in the bottom
558 64k of the code segment (The top 16 bits of eip are zeroed). */
559
560 const relax_typeS md_relax_table[] =
561 {
562 /* The fields are:
563 1) most positive reach of this state,
564 2) most negative reach of this state,
565 3) how many bytes this mode will have in the variable part of the frag
566 4) which index into the table to try if we can't fit into this one. */
567
568 /* UNCOND_JUMP states. */
569 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
570 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
571 /* dword jmp adds 4 bytes to frag:
572 0 extra opcode bytes, 4 displacement bytes. */
573 {0, 0, 4, 0},
574 /* word jmp adds 2 byte2 to frag:
575 0 extra opcode bytes, 2 displacement bytes. */
576 {0, 0, 2, 0},
577
578 /* COND_JUMP states. */
579 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
580 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
581 /* dword conditionals adds 5 bytes to frag:
582 1 extra opcode byte, 4 displacement bytes. */
583 {0, 0, 5, 0},
584 /* word conditionals add 3 bytes to frag:
585 1 extra opcode byte, 2 displacement bytes. */
586 {0, 0, 3, 0},
587
588 /* COND_JUMP86 states. */
589 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
590 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
591 /* dword conditionals adds 5 bytes to frag:
592 1 extra opcode byte, 4 displacement bytes. */
593 {0, 0, 5, 0},
594 /* word conditionals add 4 bytes to frag:
595 1 displacement byte and a 3 byte long branch insn. */
596 {0, 0, 4, 0}
597 };
598
599 static const arch_entry cpu_arch[] =
600 {
601 /* Do not replace the first two entries - i386_target_format()
602 relies on them being there in this order. */
603 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
604 CPU_GENERIC32_FLAGS, 0, 0 },
605 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
606 CPU_GENERIC64_FLAGS, 0, 0 },
607 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
608 CPU_NONE_FLAGS, 0, 0 },
609 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
610 CPU_I186_FLAGS, 0, 0 },
611 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
612 CPU_I286_FLAGS, 0, 0 },
613 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
614 CPU_I386_FLAGS, 0, 0 },
615 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
616 CPU_I486_FLAGS, 0, 0 },
617 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
618 CPU_I586_FLAGS, 0, 0 },
619 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
620 CPU_I686_FLAGS, 0, 0 },
621 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
622 CPU_I586_FLAGS, 0, 0 },
623 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
624 CPU_PENTIUMPRO_FLAGS, 0, 0 },
625 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
626 CPU_P2_FLAGS, 0, 0 },
627 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
628 CPU_P3_FLAGS, 0, 0 },
629 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
630 CPU_P4_FLAGS, 0, 0 },
631 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
632 CPU_CORE_FLAGS, 0, 0 },
633 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
634 CPU_NOCONA_FLAGS, 0, 0 },
635 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
636 CPU_CORE_FLAGS, 1, 0 },
637 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
638 CPU_CORE_FLAGS, 0, 0 },
639 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
640 CPU_CORE2_FLAGS, 1, 0 },
641 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
642 CPU_CORE2_FLAGS, 0, 0 },
643 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
644 CPU_COREI7_FLAGS, 0, 0 },
645 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
646 CPU_L1OM_FLAGS, 0, 0 },
647 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM,
648 CPU_K1OM_FLAGS, 0, 0 },
649 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
650 CPU_K6_FLAGS, 0, 0 },
651 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
652 CPU_K6_2_FLAGS, 0, 0 },
653 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
654 CPU_ATHLON_FLAGS, 0, 0 },
655 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
656 CPU_K8_FLAGS, 1, 0 },
657 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
658 CPU_K8_FLAGS, 0, 0 },
659 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
660 CPU_K8_FLAGS, 0, 0 },
661 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
662 CPU_AMDFAM10_FLAGS, 0, 0 },
663 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD,
664 CPU_BDVER1_FLAGS, 0, 0 },
665 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD,
666 CPU_BDVER2_FLAGS, 0, 0 },
667 { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD,
668 CPU_BDVER3_FLAGS, 0, 0 },
669 { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT,
670 CPU_BTVER1_FLAGS, 0, 0 },
671 { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT,
672 CPU_BTVER2_FLAGS, 0, 0 },
673 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
674 CPU_8087_FLAGS, 0, 0 },
675 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
676 CPU_287_FLAGS, 0, 0 },
677 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
678 CPU_387_FLAGS, 0, 0 },
679 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN,
680 CPU_ANY87_FLAGS, 0, 1 },
681 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
682 CPU_MMX_FLAGS, 0, 0 },
683 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN,
684 CPU_3DNOWA_FLAGS, 0, 1 },
685 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
686 CPU_SSE_FLAGS, 0, 0 },
687 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
688 CPU_SSE2_FLAGS, 0, 0 },
689 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
690 CPU_SSE3_FLAGS, 0, 0 },
691 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
692 CPU_SSSE3_FLAGS, 0, 0 },
693 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
694 CPU_SSE4_1_FLAGS, 0, 0 },
695 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
696 CPU_SSE4_2_FLAGS, 0, 0 },
697 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
698 CPU_SSE4_2_FLAGS, 0, 0 },
699 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN,
700 CPU_ANY_SSE_FLAGS, 0, 1 },
701 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
702 CPU_AVX_FLAGS, 0, 0 },
703 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN,
704 CPU_AVX2_FLAGS, 0, 0 },
705 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
706 CPU_ANY_AVX_FLAGS, 0, 1 },
707 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
708 CPU_VMX_FLAGS, 0, 0 },
709 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN,
710 CPU_VMFUNC_FLAGS, 0, 0 },
711 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
712 CPU_SMX_FLAGS, 0, 0 },
713 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
714 CPU_XSAVE_FLAGS, 0, 0 },
715 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
716 CPU_XSAVEOPT_FLAGS, 0, 0 },
717 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
718 CPU_AES_FLAGS, 0, 0 },
719 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
720 CPU_PCLMUL_FLAGS, 0, 0 },
721 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
722 CPU_PCLMUL_FLAGS, 1, 0 },
723 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
724 CPU_FSGSBASE_FLAGS, 0, 0 },
725 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
726 CPU_RDRND_FLAGS, 0, 0 },
727 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
728 CPU_F16C_FLAGS, 0, 0 },
729 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN,
730 CPU_BMI2_FLAGS, 0, 0 },
731 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
732 CPU_FMA_FLAGS, 0, 0 },
733 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
734 CPU_FMA4_FLAGS, 0, 0 },
735 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
736 CPU_XOP_FLAGS, 0, 0 },
737 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
738 CPU_LWP_FLAGS, 0, 0 },
739 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
740 CPU_MOVBE_FLAGS, 0, 0 },
741 { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN,
742 CPU_CX16_FLAGS, 0, 0 },
743 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
744 CPU_EPT_FLAGS, 0, 0 },
745 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN,
746 CPU_LZCNT_FLAGS, 0, 0 },
747 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN,
748 CPU_HLE_FLAGS, 0, 0 },
749 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN,
750 CPU_RTM_FLAGS, 0, 0 },
751 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN,
752 CPU_INVPCID_FLAGS, 0, 0 },
753 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
754 CPU_CLFLUSH_FLAGS, 0, 0 },
755 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
756 CPU_NOP_FLAGS, 0, 0 },
757 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
758 CPU_SYSCALL_FLAGS, 0, 0 },
759 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
760 CPU_RDTSCP_FLAGS, 0, 0 },
761 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
762 CPU_3DNOW_FLAGS, 0, 0 },
763 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
764 CPU_3DNOWA_FLAGS, 0, 0 },
765 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
766 CPU_PADLOCK_FLAGS, 0, 0 },
767 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
768 CPU_SVME_FLAGS, 1, 0 },
769 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
770 CPU_SVME_FLAGS, 0, 0 },
771 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
772 CPU_SSE4A_FLAGS, 0, 0 },
773 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
774 CPU_ABM_FLAGS, 0, 0 },
775 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
776 CPU_BMI_FLAGS, 0, 0 },
777 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
778 CPU_TBM_FLAGS, 0, 0 },
779 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN,
780 CPU_ADX_FLAGS, 0, 0 },
781 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN,
782 CPU_RDSEED_FLAGS, 0, 0 },
783 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN,
784 CPU_PRFCHW_FLAGS, 0, 0 },
785 { STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN,
786 CPU_SMAP_FLAGS, 0, 0 },
787 };
788
789 #ifdef I386COFF
790 /* Like s_lcomm_internal in gas/read.c but the alignment string
791 is allowed to be optional. */
792
793 static symbolS *
794 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
795 {
796 addressT align = 0;
797
798 SKIP_WHITESPACE ();
799
800 if (needs_align
801 && *input_line_pointer == ',')
802 {
803 align = parse_align (needs_align - 1);
804
805 if (align == (addressT) -1)
806 return NULL;
807 }
808 else
809 {
810 if (size >= 8)
811 align = 3;
812 else if (size >= 4)
813 align = 2;
814 else if (size >= 2)
815 align = 1;
816 else
817 align = 0;
818 }
819
820 bss_alloc (symbolP, size, align);
821 return symbolP;
822 }
823
824 static void
825 pe_lcomm (int needs_align)
826 {
827 s_comm_internal (needs_align * 2, pe_lcomm_internal);
828 }
829 #endif
830
831 const pseudo_typeS md_pseudo_table[] =
832 {
833 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
834 {"align", s_align_bytes, 0},
835 #else
836 {"align", s_align_ptwo, 0},
837 #endif
838 {"arch", set_cpu_arch, 0},
839 #ifndef I386COFF
840 {"bss", s_bss, 0},
841 #else
842 {"lcomm", pe_lcomm, 1},
843 #endif
844 {"ffloat", float_cons, 'f'},
845 {"dfloat", float_cons, 'd'},
846 {"tfloat", float_cons, 'x'},
847 {"value", cons, 2},
848 {"slong", signed_cons, 4},
849 {"noopt", s_ignore, 0},
850 {"optim", s_ignore, 0},
851 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
852 {"code16", set_code_flag, CODE_16BIT},
853 {"code32", set_code_flag, CODE_32BIT},
854 {"code64", set_code_flag, CODE_64BIT},
855 {"intel_syntax", set_intel_syntax, 1},
856 {"att_syntax", set_intel_syntax, 0},
857 {"intel_mnemonic", set_intel_mnemonic, 1},
858 {"att_mnemonic", set_intel_mnemonic, 0},
859 {"allow_index_reg", set_allow_index_reg, 1},
860 {"disallow_index_reg", set_allow_index_reg, 0},
861 {"sse_check", set_check, 0},
862 {"operand_check", set_check, 1},
863 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
864 {"largecomm", handle_large_common, 0},
865 #else
866 {"file", (void (*) (int)) dwarf2_directive_file, 0},
867 {"loc", dwarf2_directive_loc, 0},
868 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
869 #endif
870 #ifdef TE_PE
871 {"secrel32", pe_directive_secrel, 0},
872 #endif
873 {0, 0, 0}
874 };
875
876 /* For interface with expression (). */
877 extern char *input_line_pointer;
878
879 /* Hash table for instruction mnemonic lookup. */
880 static struct hash_control *op_hash;
881
882 /* Hash table for register lookup. */
883 static struct hash_control *reg_hash;
884 \f
885 void
886 i386_align_code (fragS *fragP, int count)
887 {
888 /* Various efficient no-op patterns for aligning code labels.
889 Note: Don't try to assemble the instructions in the comments.
890 0L and 0w are not legal. */
891 static const char f32_1[] =
892 {0x90}; /* nop */
893 static const char f32_2[] =
894 {0x66,0x90}; /* xchg %ax,%ax */
895 static const char f32_3[] =
896 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
897 static const char f32_4[] =
898 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
899 static const char f32_5[] =
900 {0x90, /* nop */
901 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
902 static const char f32_6[] =
903 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
904 static const char f32_7[] =
905 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
906 static const char f32_8[] =
907 {0x90, /* nop */
908 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
909 static const char f32_9[] =
910 {0x89,0xf6, /* movl %esi,%esi */
911 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
912 static const char f32_10[] =
913 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
914 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
915 static const char f32_11[] =
916 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
917 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
918 static const char f32_12[] =
919 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
920 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
921 static const char f32_13[] =
922 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
923 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
924 static const char f32_14[] =
925 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
926 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
927 static const char f16_3[] =
928 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
929 static const char f16_4[] =
930 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
931 static const char f16_5[] =
932 {0x90, /* nop */
933 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
934 static const char f16_6[] =
935 {0x89,0xf6, /* mov %si,%si */
936 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
937 static const char f16_7[] =
938 {0x8d,0x74,0x00, /* lea 0(%si),%si */
939 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
940 static const char f16_8[] =
941 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
942 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
943 static const char jump_31[] =
944 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
945 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
946 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
947 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
948 static const char *const f32_patt[] = {
949 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
950 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
951 };
952 static const char *const f16_patt[] = {
953 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
954 };
955 /* nopl (%[re]ax) */
956 static const char alt_3[] =
957 {0x0f,0x1f,0x00};
958 /* nopl 0(%[re]ax) */
959 static const char alt_4[] =
960 {0x0f,0x1f,0x40,0x00};
961 /* nopl 0(%[re]ax,%[re]ax,1) */
962 static const char alt_5[] =
963 {0x0f,0x1f,0x44,0x00,0x00};
964 /* nopw 0(%[re]ax,%[re]ax,1) */
965 static const char alt_6[] =
966 {0x66,0x0f,0x1f,0x44,0x00,0x00};
967 /* nopl 0L(%[re]ax) */
968 static const char alt_7[] =
969 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
970 /* nopl 0L(%[re]ax,%[re]ax,1) */
971 static const char alt_8[] =
972 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
973 /* nopw 0L(%[re]ax,%[re]ax,1) */
974 static const char alt_9[] =
975 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
976 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
977 static const char alt_10[] =
978 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
979 /* data16
980 nopw %cs:0L(%[re]ax,%[re]ax,1) */
981 static const char alt_long_11[] =
982 {0x66,
983 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
984 /* data16
985 data16
986 nopw %cs:0L(%[re]ax,%[re]ax,1) */
987 static const char alt_long_12[] =
988 {0x66,
989 0x66,
990 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
991 /* data16
992 data16
993 data16
994 nopw %cs:0L(%[re]ax,%[re]ax,1) */
995 static const char alt_long_13[] =
996 {0x66,
997 0x66,
998 0x66,
999 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1000 /* data16
1001 data16
1002 data16
1003 data16
1004 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1005 static const char alt_long_14[] =
1006 {0x66,
1007 0x66,
1008 0x66,
1009 0x66,
1010 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1011 /* data16
1012 data16
1013 data16
1014 data16
1015 data16
1016 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1017 static const char alt_long_15[] =
1018 {0x66,
1019 0x66,
1020 0x66,
1021 0x66,
1022 0x66,
1023 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1024 /* nopl 0(%[re]ax,%[re]ax,1)
1025 nopw 0(%[re]ax,%[re]ax,1) */
1026 static const char alt_short_11[] =
1027 {0x0f,0x1f,0x44,0x00,0x00,
1028 0x66,0x0f,0x1f,0x44,0x00,0x00};
1029 /* nopw 0(%[re]ax,%[re]ax,1)
1030 nopw 0(%[re]ax,%[re]ax,1) */
1031 static const char alt_short_12[] =
1032 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1033 0x66,0x0f,0x1f,0x44,0x00,0x00};
1034 /* nopw 0(%[re]ax,%[re]ax,1)
1035 nopl 0L(%[re]ax) */
1036 static const char alt_short_13[] =
1037 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1038 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1039 /* nopl 0L(%[re]ax)
1040 nopl 0L(%[re]ax) */
1041 static const char alt_short_14[] =
1042 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1043 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1044 /* nopl 0L(%[re]ax)
1045 nopl 0L(%[re]ax,%[re]ax,1) */
1046 static const char alt_short_15[] =
1047 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1048 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1049 static const char *const alt_short_patt[] = {
1050 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1051 alt_9, alt_10, alt_short_11, alt_short_12, alt_short_13,
1052 alt_short_14, alt_short_15
1053 };
1054 static const char *const alt_long_patt[] = {
1055 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1056 alt_9, alt_10, alt_long_11, alt_long_12, alt_long_13,
1057 alt_long_14, alt_long_15
1058 };
1059
1060 /* Only align for at least a positive non-zero boundary. */
1061 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
1062 return;
1063
1064 /* We need to decide which NOP sequence to use for 32bit and
1065 64bit. When -mtune= is used:
1066
1067 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1068 PROCESSOR_GENERIC32, f32_patt will be used.
1069 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA,
1070 PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
1071 PROCESSOR_GENERIC64, alt_long_patt will be used.
1072 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
1073 PROCESSOR_AMDFAM10, PROCESSOR_BD and PROCESSOR_BT, alt_short_patt
1074 will be used.
1075
1076 When -mtune= isn't used, alt_long_patt will be used if
1077 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1078 be used.
1079
1080 When -march= or .arch is used, we can't use anything beyond
1081 cpu_arch_isa_flags. */
1082
1083 if (flag_code == CODE_16BIT)
1084 {
1085 if (count > 8)
1086 {
1087 memcpy (fragP->fr_literal + fragP->fr_fix,
1088 jump_31, count);
1089 /* Adjust jump offset. */
1090 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1091 }
1092 else
1093 memcpy (fragP->fr_literal + fragP->fr_fix,
1094 f16_patt[count - 1], count);
1095 }
1096 else
1097 {
1098 const char *const *patt = NULL;
1099
1100 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1101 {
1102 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1103 switch (cpu_arch_tune)
1104 {
1105 case PROCESSOR_UNKNOWN:
1106 /* We use cpu_arch_isa_flags to check if we SHOULD
1107 optimize with nops. */
1108 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1109 patt = alt_long_patt;
1110 else
1111 patt = f32_patt;
1112 break;
1113 case PROCESSOR_PENTIUM4:
1114 case PROCESSOR_NOCONA:
1115 case PROCESSOR_CORE:
1116 case PROCESSOR_CORE2:
1117 case PROCESSOR_COREI7:
1118 case PROCESSOR_L1OM:
1119 case PROCESSOR_K1OM:
1120 case PROCESSOR_GENERIC64:
1121 patt = alt_long_patt;
1122 break;
1123 case PROCESSOR_K6:
1124 case PROCESSOR_ATHLON:
1125 case PROCESSOR_K8:
1126 case PROCESSOR_AMDFAM10:
1127 case PROCESSOR_BD:
1128 case PROCESSOR_BT:
1129 patt = alt_short_patt;
1130 break;
1131 case PROCESSOR_I386:
1132 case PROCESSOR_I486:
1133 case PROCESSOR_PENTIUM:
1134 case PROCESSOR_PENTIUMPRO:
1135 case PROCESSOR_GENERIC32:
1136 patt = f32_patt;
1137 break;
1138 }
1139 }
1140 else
1141 {
1142 switch (fragP->tc_frag_data.tune)
1143 {
1144 case PROCESSOR_UNKNOWN:
1145 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1146 PROCESSOR_UNKNOWN. */
1147 abort ();
1148 break;
1149
1150 case PROCESSOR_I386:
1151 case PROCESSOR_I486:
1152 case PROCESSOR_PENTIUM:
1153 case PROCESSOR_K6:
1154 case PROCESSOR_ATHLON:
1155 case PROCESSOR_K8:
1156 case PROCESSOR_AMDFAM10:
1157 case PROCESSOR_BD:
1158 case PROCESSOR_BT:
1159 case PROCESSOR_GENERIC32:
1160 /* We use cpu_arch_isa_flags to check if we CAN optimize
1161 with nops. */
1162 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1163 patt = alt_short_patt;
1164 else
1165 patt = f32_patt;
1166 break;
1167 case PROCESSOR_PENTIUMPRO:
1168 case PROCESSOR_PENTIUM4:
1169 case PROCESSOR_NOCONA:
1170 case PROCESSOR_CORE:
1171 case PROCESSOR_CORE2:
1172 case PROCESSOR_COREI7:
1173 case PROCESSOR_L1OM:
1174 case PROCESSOR_K1OM:
1175 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1176 patt = alt_long_patt;
1177 else
1178 patt = f32_patt;
1179 break;
1180 case PROCESSOR_GENERIC64:
1181 patt = alt_long_patt;
1182 break;
1183 }
1184 }
1185
1186 if (patt == f32_patt)
1187 {
1188 /* If the padding is less than 15 bytes, we use the normal
1189 ones. Otherwise, we use a jump instruction and adjust
1190 its offset. */
1191 int limit;
1192
1193 /* For 64bit, the limit is 3 bytes. */
1194 if (flag_code == CODE_64BIT
1195 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1196 limit = 3;
1197 else
1198 limit = 15;
1199 if (count < limit)
1200 memcpy (fragP->fr_literal + fragP->fr_fix,
1201 patt[count - 1], count);
1202 else
1203 {
1204 memcpy (fragP->fr_literal + fragP->fr_fix,
1205 jump_31, count);
1206 /* Adjust jump offset. */
1207 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1208 }
1209 }
1210 else
1211 {
1212 /* Maximum length of an instruction is 15 byte. If the
1213 padding is greater than 15 bytes and we don't use jump,
1214 we have to break it into smaller pieces. */
1215 int padding = count;
1216 while (padding > 15)
1217 {
1218 padding -= 15;
1219 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1220 patt [14], 15);
1221 }
1222
1223 if (padding)
1224 memcpy (fragP->fr_literal + fragP->fr_fix,
1225 patt [padding - 1], padding);
1226 }
1227 }
1228 fragP->fr_var = count;
1229 }
1230
1231 static INLINE int
1232 operand_type_all_zero (const union i386_operand_type *x)
1233 {
1234 switch (ARRAY_SIZE(x->array))
1235 {
1236 case 3:
1237 if (x->array[2])
1238 return 0;
1239 case 2:
1240 if (x->array[1])
1241 return 0;
1242 case 1:
1243 return !x->array[0];
1244 default:
1245 abort ();
1246 }
1247 }
1248
1249 static INLINE void
1250 operand_type_set (union i386_operand_type *x, unsigned int v)
1251 {
1252 switch (ARRAY_SIZE(x->array))
1253 {
1254 case 3:
1255 x->array[2] = v;
1256 case 2:
1257 x->array[1] = v;
1258 case 1:
1259 x->array[0] = v;
1260 break;
1261 default:
1262 abort ();
1263 }
1264 }
1265
1266 static INLINE int
1267 operand_type_equal (const union i386_operand_type *x,
1268 const union i386_operand_type *y)
1269 {
1270 switch (ARRAY_SIZE(x->array))
1271 {
1272 case 3:
1273 if (x->array[2] != y->array[2])
1274 return 0;
1275 case 2:
1276 if (x->array[1] != y->array[1])
1277 return 0;
1278 case 1:
1279 return x->array[0] == y->array[0];
1280 break;
1281 default:
1282 abort ();
1283 }
1284 }
1285
1286 static INLINE int
1287 cpu_flags_all_zero (const union i386_cpu_flags *x)
1288 {
1289 switch (ARRAY_SIZE(x->array))
1290 {
1291 case 3:
1292 if (x->array[2])
1293 return 0;
1294 case 2:
1295 if (x->array[1])
1296 return 0;
1297 case 1:
1298 return !x->array[0];
1299 default:
1300 abort ();
1301 }
1302 }
1303
1304 static INLINE void
1305 cpu_flags_set (union i386_cpu_flags *x, unsigned int v)
1306 {
1307 switch (ARRAY_SIZE(x->array))
1308 {
1309 case 3:
1310 x->array[2] = v;
1311 case 2:
1312 x->array[1] = v;
1313 case 1:
1314 x->array[0] = v;
1315 break;
1316 default:
1317 abort ();
1318 }
1319 }
1320
1321 static INLINE int
1322 cpu_flags_equal (const union i386_cpu_flags *x,
1323 const union i386_cpu_flags *y)
1324 {
1325 switch (ARRAY_SIZE(x->array))
1326 {
1327 case 3:
1328 if (x->array[2] != y->array[2])
1329 return 0;
1330 case 2:
1331 if (x->array[1] != y->array[1])
1332 return 0;
1333 case 1:
1334 return x->array[0] == y->array[0];
1335 break;
1336 default:
1337 abort ();
1338 }
1339 }
1340
1341 static INLINE int
1342 cpu_flags_check_cpu64 (i386_cpu_flags f)
1343 {
1344 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1345 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1346 }
1347
1348 static INLINE i386_cpu_flags
1349 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1350 {
1351 switch (ARRAY_SIZE (x.array))
1352 {
1353 case 3:
1354 x.array [2] &= y.array [2];
1355 case 2:
1356 x.array [1] &= y.array [1];
1357 case 1:
1358 x.array [0] &= y.array [0];
1359 break;
1360 default:
1361 abort ();
1362 }
1363 return x;
1364 }
1365
1366 static INLINE i386_cpu_flags
1367 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1368 {
1369 switch (ARRAY_SIZE (x.array))
1370 {
1371 case 3:
1372 x.array [2] |= y.array [2];
1373 case 2:
1374 x.array [1] |= y.array [1];
1375 case 1:
1376 x.array [0] |= y.array [0];
1377 break;
1378 default:
1379 abort ();
1380 }
1381 return x;
1382 }
1383
1384 static INLINE i386_cpu_flags
1385 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1386 {
1387 switch (ARRAY_SIZE (x.array))
1388 {
1389 case 3:
1390 x.array [2] &= ~y.array [2];
1391 case 2:
1392 x.array [1] &= ~y.array [1];
1393 case 1:
1394 x.array [0] &= ~y.array [0];
1395 break;
1396 default:
1397 abort ();
1398 }
1399 return x;
1400 }
1401
1402 #define CPU_FLAGS_ARCH_MATCH 0x1
1403 #define CPU_FLAGS_64BIT_MATCH 0x2
1404 #define CPU_FLAGS_AES_MATCH 0x4
1405 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1406 #define CPU_FLAGS_AVX_MATCH 0x10
1407
1408 #define CPU_FLAGS_32BIT_MATCH \
1409 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1410 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1411 #define CPU_FLAGS_PERFECT_MATCH \
1412 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1413
1414 /* Return CPU flags match bits. */
1415
1416 static int
1417 cpu_flags_match (const insn_template *t)
1418 {
1419 i386_cpu_flags x = t->cpu_flags;
1420 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1421
1422 x.bitfield.cpu64 = 0;
1423 x.bitfield.cpuno64 = 0;
1424
1425 if (cpu_flags_all_zero (&x))
1426 {
1427 /* This instruction is available on all archs. */
1428 match |= CPU_FLAGS_32BIT_MATCH;
1429 }
1430 else
1431 {
1432 /* This instruction is available only on some archs. */
1433 i386_cpu_flags cpu = cpu_arch_flags;
1434
1435 cpu.bitfield.cpu64 = 0;
1436 cpu.bitfield.cpuno64 = 0;
1437 cpu = cpu_flags_and (x, cpu);
1438 if (!cpu_flags_all_zero (&cpu))
1439 {
1440 if (x.bitfield.cpuavx)
1441 {
1442 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1443 if (cpu.bitfield.cpuavx)
1444 {
1445 /* Check SSE2AVX. */
1446 if (!t->opcode_modifier.sse2avx|| sse2avx)
1447 {
1448 match |= (CPU_FLAGS_ARCH_MATCH
1449 | CPU_FLAGS_AVX_MATCH);
1450 /* Check AES. */
1451 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1452 match |= CPU_FLAGS_AES_MATCH;
1453 /* Check PCLMUL. */
1454 if (!x.bitfield.cpupclmul
1455 || cpu.bitfield.cpupclmul)
1456 match |= CPU_FLAGS_PCLMUL_MATCH;
1457 }
1458 }
1459 else
1460 match |= CPU_FLAGS_ARCH_MATCH;
1461 }
1462 else
1463 match |= CPU_FLAGS_32BIT_MATCH;
1464 }
1465 }
1466 return match;
1467 }
1468
1469 static INLINE i386_operand_type
1470 operand_type_and (i386_operand_type x, i386_operand_type y)
1471 {
1472 switch (ARRAY_SIZE (x.array))
1473 {
1474 case 3:
1475 x.array [2] &= y.array [2];
1476 case 2:
1477 x.array [1] &= y.array [1];
1478 case 1:
1479 x.array [0] &= y.array [0];
1480 break;
1481 default:
1482 abort ();
1483 }
1484 return x;
1485 }
1486
1487 static INLINE i386_operand_type
1488 operand_type_or (i386_operand_type x, i386_operand_type y)
1489 {
1490 switch (ARRAY_SIZE (x.array))
1491 {
1492 case 3:
1493 x.array [2] |= y.array [2];
1494 case 2:
1495 x.array [1] |= y.array [1];
1496 case 1:
1497 x.array [0] |= y.array [0];
1498 break;
1499 default:
1500 abort ();
1501 }
1502 return x;
1503 }
1504
1505 static INLINE i386_operand_type
1506 operand_type_xor (i386_operand_type x, i386_operand_type y)
1507 {
1508 switch (ARRAY_SIZE (x.array))
1509 {
1510 case 3:
1511 x.array [2] ^= y.array [2];
1512 case 2:
1513 x.array [1] ^= y.array [1];
1514 case 1:
1515 x.array [0] ^= y.array [0];
1516 break;
1517 default:
1518 abort ();
1519 }
1520 return x;
1521 }
1522
1523 static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1524 static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1525 static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1526 static const i386_operand_type inoutportreg
1527 = OPERAND_TYPE_INOUTPORTREG;
1528 static const i386_operand_type reg16_inoutportreg
1529 = OPERAND_TYPE_REG16_INOUTPORTREG;
1530 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1531 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1532 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1533 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1534 static const i386_operand_type anydisp
1535 = OPERAND_TYPE_ANYDISP;
1536 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1537 static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1538 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1539 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1540 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1541 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1542 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1543 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1544 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1545 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1546 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1547 static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
1548
1549 enum operand_type
1550 {
1551 reg,
1552 imm,
1553 disp,
1554 anymem
1555 };
1556
1557 static INLINE int
1558 operand_type_check (i386_operand_type t, enum operand_type c)
1559 {
1560 switch (c)
1561 {
1562 case reg:
1563 return (t.bitfield.reg8
1564 || t.bitfield.reg16
1565 || t.bitfield.reg32
1566 || t.bitfield.reg64);
1567
1568 case imm:
1569 return (t.bitfield.imm8
1570 || t.bitfield.imm8s
1571 || t.bitfield.imm16
1572 || t.bitfield.imm32
1573 || t.bitfield.imm32s
1574 || t.bitfield.imm64);
1575
1576 case disp:
1577 return (t.bitfield.disp8
1578 || t.bitfield.disp16
1579 || t.bitfield.disp32
1580 || t.bitfield.disp32s
1581 || t.bitfield.disp64);
1582
1583 case anymem:
1584 return (t.bitfield.disp8
1585 || t.bitfield.disp16
1586 || t.bitfield.disp32
1587 || t.bitfield.disp32s
1588 || t.bitfield.disp64
1589 || t.bitfield.baseindex);
1590
1591 default:
1592 abort ();
1593 }
1594
1595 return 0;
1596 }
1597
1598 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1599 operand J for instruction template T. */
1600
1601 static INLINE int
1602 match_reg_size (const insn_template *t, unsigned int j)
1603 {
1604 return !((i.types[j].bitfield.byte
1605 && !t->operand_types[j].bitfield.byte)
1606 || (i.types[j].bitfield.word
1607 && !t->operand_types[j].bitfield.word)
1608 || (i.types[j].bitfield.dword
1609 && !t->operand_types[j].bitfield.dword)
1610 || (i.types[j].bitfield.qword
1611 && !t->operand_types[j].bitfield.qword));
1612 }
1613
1614 /* Return 1 if there is no conflict in any size on operand J for
1615 instruction template T. */
1616
1617 static INLINE int
1618 match_mem_size (const insn_template *t, unsigned int j)
1619 {
1620 return (match_reg_size (t, j)
1621 && !((i.types[j].bitfield.unspecified
1622 && !t->operand_types[j].bitfield.unspecified)
1623 || (i.types[j].bitfield.fword
1624 && !t->operand_types[j].bitfield.fword)
1625 || (i.types[j].bitfield.tbyte
1626 && !t->operand_types[j].bitfield.tbyte)
1627 || (i.types[j].bitfield.xmmword
1628 && !t->operand_types[j].bitfield.xmmword)
1629 || (i.types[j].bitfield.ymmword
1630 && !t->operand_types[j].bitfield.ymmword)));
1631 }
1632
1633 /* Return 1 if there is no size conflict on any operands for
1634 instruction template T. */
1635
1636 static INLINE int
1637 operand_size_match (const insn_template *t)
1638 {
1639 unsigned int j;
1640 int match = 1;
1641
1642 /* Don't check jump instructions. */
1643 if (t->opcode_modifier.jump
1644 || t->opcode_modifier.jumpbyte
1645 || t->opcode_modifier.jumpdword
1646 || t->opcode_modifier.jumpintersegment)
1647 return match;
1648
1649 /* Check memory and accumulator operand size. */
1650 for (j = 0; j < i.operands; j++)
1651 {
1652 if (t->operand_types[j].bitfield.anysize)
1653 continue;
1654
1655 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1656 {
1657 match = 0;
1658 break;
1659 }
1660
1661 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1662 {
1663 match = 0;
1664 break;
1665 }
1666 }
1667
1668 if (match)
1669 return match;
1670 else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
1671 {
1672 mismatch:
1673 i.error = operand_size_mismatch;
1674 return 0;
1675 }
1676
1677 /* Check reverse. */
1678 gas_assert (i.operands == 2);
1679
1680 match = 1;
1681 for (j = 0; j < 2; j++)
1682 {
1683 if (t->operand_types[j].bitfield.acc
1684 && !match_reg_size (t, j ? 0 : 1))
1685 goto mismatch;
1686
1687 if (i.types[j].bitfield.mem
1688 && !match_mem_size (t, j ? 0 : 1))
1689 goto mismatch;
1690 }
1691
1692 return match;
1693 }
1694
1695 static INLINE int
1696 operand_type_match (i386_operand_type overlap,
1697 i386_operand_type given)
1698 {
1699 i386_operand_type temp = overlap;
1700
1701 temp.bitfield.jumpabsolute = 0;
1702 temp.bitfield.unspecified = 0;
1703 temp.bitfield.byte = 0;
1704 temp.bitfield.word = 0;
1705 temp.bitfield.dword = 0;
1706 temp.bitfield.fword = 0;
1707 temp.bitfield.qword = 0;
1708 temp.bitfield.tbyte = 0;
1709 temp.bitfield.xmmword = 0;
1710 temp.bitfield.ymmword = 0;
1711 if (operand_type_all_zero (&temp))
1712 goto mismatch;
1713
1714 if (given.bitfield.baseindex == overlap.bitfield.baseindex
1715 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
1716 return 1;
1717
1718 mismatch:
1719 i.error = operand_type_mismatch;
1720 return 0;
1721 }
1722
1723 /* If given types g0 and g1 are registers they must be of the same type
1724 unless the expected operand type register overlap is null.
1725 Note that Acc in a template matches every size of reg. */
1726
1727 static INLINE int
1728 operand_type_register_match (i386_operand_type m0,
1729 i386_operand_type g0,
1730 i386_operand_type t0,
1731 i386_operand_type m1,
1732 i386_operand_type g1,
1733 i386_operand_type t1)
1734 {
1735 if (!operand_type_check (g0, reg))
1736 return 1;
1737
1738 if (!operand_type_check (g1, reg))
1739 return 1;
1740
1741 if (g0.bitfield.reg8 == g1.bitfield.reg8
1742 && g0.bitfield.reg16 == g1.bitfield.reg16
1743 && g0.bitfield.reg32 == g1.bitfield.reg32
1744 && g0.bitfield.reg64 == g1.bitfield.reg64)
1745 return 1;
1746
1747 if (m0.bitfield.acc)
1748 {
1749 t0.bitfield.reg8 = 1;
1750 t0.bitfield.reg16 = 1;
1751 t0.bitfield.reg32 = 1;
1752 t0.bitfield.reg64 = 1;
1753 }
1754
1755 if (m1.bitfield.acc)
1756 {
1757 t1.bitfield.reg8 = 1;
1758 t1.bitfield.reg16 = 1;
1759 t1.bitfield.reg32 = 1;
1760 t1.bitfield.reg64 = 1;
1761 }
1762
1763 if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1764 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1765 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1766 && !(t0.bitfield.reg64 & t1.bitfield.reg64))
1767 return 1;
1768
1769 i.error = register_type_mismatch;
1770
1771 return 0;
1772 }
1773
1774 static INLINE unsigned int
1775 register_number (const reg_entry *r)
1776 {
1777 unsigned int nr = r->reg_num;
1778
1779 if (r->reg_flags & RegRex)
1780 nr += 8;
1781
1782 return nr;
1783 }
1784
1785 static INLINE unsigned int
1786 mode_from_disp_size (i386_operand_type t)
1787 {
1788 if (t.bitfield.disp8)
1789 return 1;
1790 else if (t.bitfield.disp16
1791 || t.bitfield.disp32
1792 || t.bitfield.disp32s)
1793 return 2;
1794 else
1795 return 0;
1796 }
1797
1798 static INLINE int
1799 fits_in_signed_byte (offsetT num)
1800 {
1801 return (num >= -128) && (num <= 127);
1802 }
1803
1804 static INLINE int
1805 fits_in_unsigned_byte (offsetT num)
1806 {
1807 return (num & 0xff) == num;
1808 }
1809
1810 static INLINE int
1811 fits_in_unsigned_word (offsetT num)
1812 {
1813 return (num & 0xffff) == num;
1814 }
1815
1816 static INLINE int
1817 fits_in_signed_word (offsetT num)
1818 {
1819 return (-32768 <= num) && (num <= 32767);
1820 }
1821
1822 static INLINE int
1823 fits_in_signed_long (offsetT num ATTRIBUTE_UNUSED)
1824 {
1825 #ifndef BFD64
1826 return 1;
1827 #else
1828 return (!(((offsetT) -1 << 31) & num)
1829 || (((offsetT) -1 << 31) & num) == ((offsetT) -1 << 31));
1830 #endif
1831 } /* fits_in_signed_long() */
1832
1833 static INLINE int
1834 fits_in_unsigned_long (offsetT num ATTRIBUTE_UNUSED)
1835 {
1836 #ifndef BFD64
1837 return 1;
1838 #else
1839 return (num & (((offsetT) 2 << 31) - 1)) == num;
1840 #endif
1841 } /* fits_in_unsigned_long() */
1842
1843 static INLINE int
1844 fits_in_imm4 (offsetT num)
1845 {
1846 return (num & 0xf) == num;
1847 }
1848
1849 static i386_operand_type
1850 smallest_imm_type (offsetT num)
1851 {
1852 i386_operand_type t;
1853
1854 operand_type_set (&t, 0);
1855 t.bitfield.imm64 = 1;
1856
1857 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
1858 {
1859 /* This code is disabled on the 486 because all the Imm1 forms
1860 in the opcode table are slower on the i486. They're the
1861 versions with the implicitly specified single-position
1862 displacement, which has another syntax if you really want to
1863 use that form. */
1864 t.bitfield.imm1 = 1;
1865 t.bitfield.imm8 = 1;
1866 t.bitfield.imm8s = 1;
1867 t.bitfield.imm16 = 1;
1868 t.bitfield.imm32 = 1;
1869 t.bitfield.imm32s = 1;
1870 }
1871 else if (fits_in_signed_byte (num))
1872 {
1873 t.bitfield.imm8 = 1;
1874 t.bitfield.imm8s = 1;
1875 t.bitfield.imm16 = 1;
1876 t.bitfield.imm32 = 1;
1877 t.bitfield.imm32s = 1;
1878 }
1879 else if (fits_in_unsigned_byte (num))
1880 {
1881 t.bitfield.imm8 = 1;
1882 t.bitfield.imm16 = 1;
1883 t.bitfield.imm32 = 1;
1884 t.bitfield.imm32s = 1;
1885 }
1886 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
1887 {
1888 t.bitfield.imm16 = 1;
1889 t.bitfield.imm32 = 1;
1890 t.bitfield.imm32s = 1;
1891 }
1892 else if (fits_in_signed_long (num))
1893 {
1894 t.bitfield.imm32 = 1;
1895 t.bitfield.imm32s = 1;
1896 }
1897 else if (fits_in_unsigned_long (num))
1898 t.bitfield.imm32 = 1;
1899
1900 return t;
1901 }
1902
1903 static offsetT
1904 offset_in_range (offsetT val, int size)
1905 {
1906 addressT mask;
1907
1908 switch (size)
1909 {
1910 case 1: mask = ((addressT) 1 << 8) - 1; break;
1911 case 2: mask = ((addressT) 1 << 16) - 1; break;
1912 case 4: mask = ((addressT) 2 << 31) - 1; break;
1913 #ifdef BFD64
1914 case 8: mask = ((addressT) 2 << 63) - 1; break;
1915 #endif
1916 default: abort ();
1917 }
1918
1919 #ifdef BFD64
1920 /* If BFD64, sign extend val for 32bit address mode. */
1921 if (flag_code != CODE_64BIT
1922 || i.prefix[ADDR_PREFIX])
1923 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
1924 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
1925 #endif
1926
1927 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
1928 {
1929 char buf1[40], buf2[40];
1930
1931 sprint_value (buf1, val);
1932 sprint_value (buf2, val & mask);
1933 as_warn (_("%s shortened to %s"), buf1, buf2);
1934 }
1935 return val & mask;
1936 }
1937
1938 enum PREFIX_GROUP
1939 {
1940 PREFIX_EXIST = 0,
1941 PREFIX_LOCK,
1942 PREFIX_REP,
1943 PREFIX_OTHER
1944 };
1945
1946 /* Returns
1947 a. PREFIX_EXIST if attempting to add a prefix where one from the
1948 same class already exists.
1949 b. PREFIX_LOCK if lock prefix is added.
1950 c. PREFIX_REP if rep/repne prefix is added.
1951 d. PREFIX_OTHER if other prefix is added.
1952 */
1953
1954 static enum PREFIX_GROUP
1955 add_prefix (unsigned int prefix)
1956 {
1957 enum PREFIX_GROUP ret = PREFIX_OTHER;
1958 unsigned int q;
1959
1960 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
1961 && flag_code == CODE_64BIT)
1962 {
1963 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
1964 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
1965 && (prefix & (REX_R | REX_X | REX_B))))
1966 ret = PREFIX_EXIST;
1967 q = REX_PREFIX;
1968 }
1969 else
1970 {
1971 switch (prefix)
1972 {
1973 default:
1974 abort ();
1975
1976 case CS_PREFIX_OPCODE:
1977 case DS_PREFIX_OPCODE:
1978 case ES_PREFIX_OPCODE:
1979 case FS_PREFIX_OPCODE:
1980 case GS_PREFIX_OPCODE:
1981 case SS_PREFIX_OPCODE:
1982 q = SEG_PREFIX;
1983 break;
1984
1985 case REPNE_PREFIX_OPCODE:
1986 case REPE_PREFIX_OPCODE:
1987 q = REP_PREFIX;
1988 ret = PREFIX_REP;
1989 break;
1990
1991 case LOCK_PREFIX_OPCODE:
1992 q = LOCK_PREFIX;
1993 ret = PREFIX_LOCK;
1994 break;
1995
1996 case FWAIT_OPCODE:
1997 q = WAIT_PREFIX;
1998 break;
1999
2000 case ADDR_PREFIX_OPCODE:
2001 q = ADDR_PREFIX;
2002 break;
2003
2004 case DATA_PREFIX_OPCODE:
2005 q = DATA_PREFIX;
2006 break;
2007 }
2008 if (i.prefix[q] != 0)
2009 ret = PREFIX_EXIST;
2010 }
2011
2012 if (ret)
2013 {
2014 if (!i.prefix[q])
2015 ++i.prefixes;
2016 i.prefix[q] |= prefix;
2017 }
2018 else
2019 as_bad (_("same type of prefix used twice"));
2020
2021 return ret;
2022 }
2023
2024 static void
2025 update_code_flag (int value, int check)
2026 {
2027 PRINTF_LIKE ((*as_error));
2028
2029 flag_code = (enum flag_code) value;
2030 if (flag_code == CODE_64BIT)
2031 {
2032 cpu_arch_flags.bitfield.cpu64 = 1;
2033 cpu_arch_flags.bitfield.cpuno64 = 0;
2034 }
2035 else
2036 {
2037 cpu_arch_flags.bitfield.cpu64 = 0;
2038 cpu_arch_flags.bitfield.cpuno64 = 1;
2039 }
2040 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
2041 {
2042 if (check)
2043 as_error = as_fatal;
2044 else
2045 as_error = as_bad;
2046 (*as_error) (_("64bit mode not supported on `%s'."),
2047 cpu_arch_name ? cpu_arch_name : default_arch);
2048 }
2049 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
2050 {
2051 if (check)
2052 as_error = as_fatal;
2053 else
2054 as_error = as_bad;
2055 (*as_error) (_("32bit mode not supported on `%s'."),
2056 cpu_arch_name ? cpu_arch_name : default_arch);
2057 }
2058 stackop_size = '\0';
2059 }
2060
2061 static void
2062 set_code_flag (int value)
2063 {
2064 update_code_flag (value, 0);
2065 }
2066
2067 static void
2068 set_16bit_gcc_code_flag (int new_code_flag)
2069 {
2070 flag_code = (enum flag_code) new_code_flag;
2071 if (flag_code != CODE_16BIT)
2072 abort ();
2073 cpu_arch_flags.bitfield.cpu64 = 0;
2074 cpu_arch_flags.bitfield.cpuno64 = 1;
2075 stackop_size = LONG_MNEM_SUFFIX;
2076 }
2077
2078 static void
2079 set_intel_syntax (int syntax_flag)
2080 {
2081 /* Find out if register prefixing is specified. */
2082 int ask_naked_reg = 0;
2083
2084 SKIP_WHITESPACE ();
2085 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2086 {
2087 char *string = input_line_pointer;
2088 int e = get_symbol_end ();
2089
2090 if (strcmp (string, "prefix") == 0)
2091 ask_naked_reg = 1;
2092 else if (strcmp (string, "noprefix") == 0)
2093 ask_naked_reg = -1;
2094 else
2095 as_bad (_("bad argument to syntax directive."));
2096 *input_line_pointer = e;
2097 }
2098 demand_empty_rest_of_line ();
2099
2100 intel_syntax = syntax_flag;
2101
2102 if (ask_naked_reg == 0)
2103 allow_naked_reg = (intel_syntax
2104 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2105 else
2106 allow_naked_reg = (ask_naked_reg < 0);
2107
2108 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2109
2110 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2111 identifier_chars['$'] = intel_syntax ? '$' : 0;
2112 register_prefix = allow_naked_reg ? "" : "%";
2113 }
2114
2115 static void
2116 set_intel_mnemonic (int mnemonic_flag)
2117 {
2118 intel_mnemonic = mnemonic_flag;
2119 }
2120
2121 static void
2122 set_allow_index_reg (int flag)
2123 {
2124 allow_index_reg = flag;
2125 }
2126
2127 static void
2128 set_check (int what)
2129 {
2130 enum check_kind *kind;
2131 const char *str;
2132
2133 if (what)
2134 {
2135 kind = &operand_check;
2136 str = "operand";
2137 }
2138 else
2139 {
2140 kind = &sse_check;
2141 str = "sse";
2142 }
2143
2144 SKIP_WHITESPACE ();
2145
2146 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2147 {
2148 char *string = input_line_pointer;
2149 int e = get_symbol_end ();
2150
2151 if (strcmp (string, "none") == 0)
2152 *kind = check_none;
2153 else if (strcmp (string, "warning") == 0)
2154 *kind = check_warning;
2155 else if (strcmp (string, "error") == 0)
2156 *kind = check_error;
2157 else
2158 as_bad (_("bad argument to %s_check directive."), str);
2159 *input_line_pointer = e;
2160 }
2161 else
2162 as_bad (_("missing argument for %s_check directive"), str);
2163
2164 demand_empty_rest_of_line ();
2165 }
2166
2167 static void
2168 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2169 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2170 {
2171 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2172 static const char *arch;
2173
2174 /* Intel LIOM is only supported on ELF. */
2175 if (!IS_ELF)
2176 return;
2177
2178 if (!arch)
2179 {
2180 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2181 use default_arch. */
2182 arch = cpu_arch_name;
2183 if (!arch)
2184 arch = default_arch;
2185 }
2186
2187 /* If we are targeting Intel L1OM, we must enable it. */
2188 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2189 || new_flag.bitfield.cpul1om)
2190 return;
2191
2192 /* If we are targeting Intel K1OM, we must enable it. */
2193 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_K1OM
2194 || new_flag.bitfield.cpuk1om)
2195 return;
2196
2197 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2198 #endif
2199 }
2200
2201 static void
2202 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2203 {
2204 SKIP_WHITESPACE ();
2205
2206 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2207 {
2208 char *string = input_line_pointer;
2209 int e = get_symbol_end ();
2210 unsigned int j;
2211 i386_cpu_flags flags;
2212
2213 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2214 {
2215 if (strcmp (string, cpu_arch[j].name) == 0)
2216 {
2217 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2218
2219 if (*string != '.')
2220 {
2221 cpu_arch_name = cpu_arch[j].name;
2222 cpu_sub_arch_name = NULL;
2223 cpu_arch_flags = cpu_arch[j].flags;
2224 if (flag_code == CODE_64BIT)
2225 {
2226 cpu_arch_flags.bitfield.cpu64 = 1;
2227 cpu_arch_flags.bitfield.cpuno64 = 0;
2228 }
2229 else
2230 {
2231 cpu_arch_flags.bitfield.cpu64 = 0;
2232 cpu_arch_flags.bitfield.cpuno64 = 1;
2233 }
2234 cpu_arch_isa = cpu_arch[j].type;
2235 cpu_arch_isa_flags = cpu_arch[j].flags;
2236 if (!cpu_arch_tune_set)
2237 {
2238 cpu_arch_tune = cpu_arch_isa;
2239 cpu_arch_tune_flags = cpu_arch_isa_flags;
2240 }
2241 break;
2242 }
2243
2244 if (!cpu_arch[j].negated)
2245 flags = cpu_flags_or (cpu_arch_flags,
2246 cpu_arch[j].flags);
2247 else
2248 flags = cpu_flags_and_not (cpu_arch_flags,
2249 cpu_arch[j].flags);
2250 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2251 {
2252 if (cpu_sub_arch_name)
2253 {
2254 char *name = cpu_sub_arch_name;
2255 cpu_sub_arch_name = concat (name,
2256 cpu_arch[j].name,
2257 (const char *) NULL);
2258 free (name);
2259 }
2260 else
2261 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2262 cpu_arch_flags = flags;
2263 cpu_arch_isa_flags = flags;
2264 }
2265 *input_line_pointer = e;
2266 demand_empty_rest_of_line ();
2267 return;
2268 }
2269 }
2270 if (j >= ARRAY_SIZE (cpu_arch))
2271 as_bad (_("no such architecture: `%s'"), string);
2272
2273 *input_line_pointer = e;
2274 }
2275 else
2276 as_bad (_("missing cpu architecture"));
2277
2278 no_cond_jump_promotion = 0;
2279 if (*input_line_pointer == ','
2280 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2281 {
2282 char *string = ++input_line_pointer;
2283 int e = get_symbol_end ();
2284
2285 if (strcmp (string, "nojumps") == 0)
2286 no_cond_jump_promotion = 1;
2287 else if (strcmp (string, "jumps") == 0)
2288 ;
2289 else
2290 as_bad (_("no such architecture modifier: `%s'"), string);
2291
2292 *input_line_pointer = e;
2293 }
2294
2295 demand_empty_rest_of_line ();
2296 }
2297
2298 enum bfd_architecture
2299 i386_arch (void)
2300 {
2301 if (cpu_arch_isa == PROCESSOR_L1OM)
2302 {
2303 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2304 || flag_code != CODE_64BIT)
2305 as_fatal (_("Intel L1OM is 64bit ELF only"));
2306 return bfd_arch_l1om;
2307 }
2308 else if (cpu_arch_isa == PROCESSOR_K1OM)
2309 {
2310 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2311 || flag_code != CODE_64BIT)
2312 as_fatal (_("Intel K1OM is 64bit ELF only"));
2313 return bfd_arch_k1om;
2314 }
2315 else
2316 return bfd_arch_i386;
2317 }
2318
2319 unsigned long
2320 i386_mach (void)
2321 {
2322 if (!strncmp (default_arch, "x86_64", 6))
2323 {
2324 if (cpu_arch_isa == PROCESSOR_L1OM)
2325 {
2326 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2327 || default_arch[6] != '\0')
2328 as_fatal (_("Intel L1OM is 64bit ELF only"));
2329 return bfd_mach_l1om;
2330 }
2331 else if (cpu_arch_isa == PROCESSOR_K1OM)
2332 {
2333 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2334 || default_arch[6] != '\0')
2335 as_fatal (_("Intel K1OM is 64bit ELF only"));
2336 return bfd_mach_k1om;
2337 }
2338 else if (default_arch[6] == '\0')
2339 return bfd_mach_x86_64;
2340 else
2341 return bfd_mach_x64_32;
2342 }
2343 else if (!strcmp (default_arch, "i386"))
2344 return bfd_mach_i386_i386;
2345 else
2346 as_fatal (_("unknown architecture"));
2347 }
2348 \f
2349 void
2350 md_begin (void)
2351 {
2352 const char *hash_err;
2353
2354 /* Initialize op_hash hash table. */
2355 op_hash = hash_new ();
2356
2357 {
2358 const insn_template *optab;
2359 templates *core_optab;
2360
2361 /* Setup for loop. */
2362 optab = i386_optab;
2363 core_optab = (templates *) xmalloc (sizeof (templates));
2364 core_optab->start = optab;
2365
2366 while (1)
2367 {
2368 ++optab;
2369 if (optab->name == NULL
2370 || strcmp (optab->name, (optab - 1)->name) != 0)
2371 {
2372 /* different name --> ship out current template list;
2373 add to hash table; & begin anew. */
2374 core_optab->end = optab;
2375 hash_err = hash_insert (op_hash,
2376 (optab - 1)->name,
2377 (void *) core_optab);
2378 if (hash_err)
2379 {
2380 as_fatal (_("can't hash %s: %s"),
2381 (optab - 1)->name,
2382 hash_err);
2383 }
2384 if (optab->name == NULL)
2385 break;
2386 core_optab = (templates *) xmalloc (sizeof (templates));
2387 core_optab->start = optab;
2388 }
2389 }
2390 }
2391
2392 /* Initialize reg_hash hash table. */
2393 reg_hash = hash_new ();
2394 {
2395 const reg_entry *regtab;
2396 unsigned int regtab_size = i386_regtab_size;
2397
2398 for (regtab = i386_regtab; regtab_size--; regtab++)
2399 {
2400 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2401 if (hash_err)
2402 as_fatal (_("can't hash %s: %s"),
2403 regtab->reg_name,
2404 hash_err);
2405 }
2406 }
2407
2408 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2409 {
2410 int c;
2411 char *p;
2412
2413 for (c = 0; c < 256; c++)
2414 {
2415 if (ISDIGIT (c))
2416 {
2417 digit_chars[c] = c;
2418 mnemonic_chars[c] = c;
2419 register_chars[c] = c;
2420 operand_chars[c] = c;
2421 }
2422 else if (ISLOWER (c))
2423 {
2424 mnemonic_chars[c] = c;
2425 register_chars[c] = c;
2426 operand_chars[c] = c;
2427 }
2428 else if (ISUPPER (c))
2429 {
2430 mnemonic_chars[c] = TOLOWER (c);
2431 register_chars[c] = mnemonic_chars[c];
2432 operand_chars[c] = c;
2433 }
2434
2435 if (ISALPHA (c) || ISDIGIT (c))
2436 identifier_chars[c] = c;
2437 else if (c >= 128)
2438 {
2439 identifier_chars[c] = c;
2440 operand_chars[c] = c;
2441 }
2442 }
2443
2444 #ifdef LEX_AT
2445 identifier_chars['@'] = '@';
2446 #endif
2447 #ifdef LEX_QM
2448 identifier_chars['?'] = '?';
2449 operand_chars['?'] = '?';
2450 #endif
2451 digit_chars['-'] = '-';
2452 mnemonic_chars['_'] = '_';
2453 mnemonic_chars['-'] = '-';
2454 mnemonic_chars['.'] = '.';
2455 identifier_chars['_'] = '_';
2456 identifier_chars['.'] = '.';
2457
2458 for (p = operand_special_chars; *p != '\0'; p++)
2459 operand_chars[(unsigned char) *p] = *p;
2460 }
2461
2462 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2463 if (IS_ELF)
2464 {
2465 record_alignment (text_section, 2);
2466 record_alignment (data_section, 2);
2467 record_alignment (bss_section, 2);
2468 }
2469 #endif
2470
2471 if (flag_code == CODE_64BIT)
2472 {
2473 #if defined (OBJ_COFF) && defined (TE_PE)
2474 x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
2475 ? 32 : 16);
2476 #else
2477 x86_dwarf2_return_column = 16;
2478 #endif
2479 x86_cie_data_alignment = -8;
2480 }
2481 else
2482 {
2483 x86_dwarf2_return_column = 8;
2484 x86_cie_data_alignment = -4;
2485 }
2486 }
2487
2488 void
2489 i386_print_statistics (FILE *file)
2490 {
2491 hash_print_statistics (file, "i386 opcode", op_hash);
2492 hash_print_statistics (file, "i386 register", reg_hash);
2493 }
2494 \f
2495 #ifdef DEBUG386
2496
2497 /* Debugging routines for md_assemble. */
2498 static void pte (insn_template *);
2499 static void pt (i386_operand_type);
2500 static void pe (expressionS *);
2501 static void ps (symbolS *);
2502
2503 static void
2504 pi (char *line, i386_insn *x)
2505 {
2506 unsigned int j;
2507
2508 fprintf (stdout, "%s: template ", line);
2509 pte (&x->tm);
2510 fprintf (stdout, " address: base %s index %s scale %x\n",
2511 x->base_reg ? x->base_reg->reg_name : "none",
2512 x->index_reg ? x->index_reg->reg_name : "none",
2513 x->log2_scale_factor);
2514 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2515 x->rm.mode, x->rm.reg, x->rm.regmem);
2516 fprintf (stdout, " sib: base %x index %x scale %x\n",
2517 x->sib.base, x->sib.index, x->sib.scale);
2518 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2519 (x->rex & REX_W) != 0,
2520 (x->rex & REX_R) != 0,
2521 (x->rex & REX_X) != 0,
2522 (x->rex & REX_B) != 0);
2523 for (j = 0; j < x->operands; j++)
2524 {
2525 fprintf (stdout, " #%d: ", j + 1);
2526 pt (x->types[j]);
2527 fprintf (stdout, "\n");
2528 if (x->types[j].bitfield.reg8
2529 || x->types[j].bitfield.reg16
2530 || x->types[j].bitfield.reg32
2531 || x->types[j].bitfield.reg64
2532 || x->types[j].bitfield.regmmx
2533 || x->types[j].bitfield.regxmm
2534 || x->types[j].bitfield.regymm
2535 || x->types[j].bitfield.sreg2
2536 || x->types[j].bitfield.sreg3
2537 || x->types[j].bitfield.control
2538 || x->types[j].bitfield.debug
2539 || x->types[j].bitfield.test)
2540 fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
2541 if (operand_type_check (x->types[j], imm))
2542 pe (x->op[j].imms);
2543 if (operand_type_check (x->types[j], disp))
2544 pe (x->op[j].disps);
2545 }
2546 }
2547
2548 static void
2549 pte (insn_template *t)
2550 {
2551 unsigned int j;
2552 fprintf (stdout, " %d operands ", t->operands);
2553 fprintf (stdout, "opcode %x ", t->base_opcode);
2554 if (t->extension_opcode != None)
2555 fprintf (stdout, "ext %x ", t->extension_opcode);
2556 if (t->opcode_modifier.d)
2557 fprintf (stdout, "D");
2558 if (t->opcode_modifier.w)
2559 fprintf (stdout, "W");
2560 fprintf (stdout, "\n");
2561 for (j = 0; j < t->operands; j++)
2562 {
2563 fprintf (stdout, " #%d type ", j + 1);
2564 pt (t->operand_types[j]);
2565 fprintf (stdout, "\n");
2566 }
2567 }
2568
2569 static void
2570 pe (expressionS *e)
2571 {
2572 fprintf (stdout, " operation %d\n", e->X_op);
2573 fprintf (stdout, " add_number %ld (%lx)\n",
2574 (long) e->X_add_number, (long) e->X_add_number);
2575 if (e->X_add_symbol)
2576 {
2577 fprintf (stdout, " add_symbol ");
2578 ps (e->X_add_symbol);
2579 fprintf (stdout, "\n");
2580 }
2581 if (e->X_op_symbol)
2582 {
2583 fprintf (stdout, " op_symbol ");
2584 ps (e->X_op_symbol);
2585 fprintf (stdout, "\n");
2586 }
2587 }
2588
2589 static void
2590 ps (symbolS *s)
2591 {
2592 fprintf (stdout, "%s type %s%s",
2593 S_GET_NAME (s),
2594 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2595 segment_name (S_GET_SEGMENT (s)));
2596 }
2597
2598 static struct type_name
2599 {
2600 i386_operand_type mask;
2601 const char *name;
2602 }
2603 const type_names[] =
2604 {
2605 { OPERAND_TYPE_REG8, "r8" },
2606 { OPERAND_TYPE_REG16, "r16" },
2607 { OPERAND_TYPE_REG32, "r32" },
2608 { OPERAND_TYPE_REG64, "r64" },
2609 { OPERAND_TYPE_IMM8, "i8" },
2610 { OPERAND_TYPE_IMM8, "i8s" },
2611 { OPERAND_TYPE_IMM16, "i16" },
2612 { OPERAND_TYPE_IMM32, "i32" },
2613 { OPERAND_TYPE_IMM32S, "i32s" },
2614 { OPERAND_TYPE_IMM64, "i64" },
2615 { OPERAND_TYPE_IMM1, "i1" },
2616 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2617 { OPERAND_TYPE_DISP8, "d8" },
2618 { OPERAND_TYPE_DISP16, "d16" },
2619 { OPERAND_TYPE_DISP32, "d32" },
2620 { OPERAND_TYPE_DISP32S, "d32s" },
2621 { OPERAND_TYPE_DISP64, "d64" },
2622 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2623 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2624 { OPERAND_TYPE_CONTROL, "control reg" },
2625 { OPERAND_TYPE_TEST, "test reg" },
2626 { OPERAND_TYPE_DEBUG, "debug reg" },
2627 { OPERAND_TYPE_FLOATREG, "FReg" },
2628 { OPERAND_TYPE_FLOATACC, "FAcc" },
2629 { OPERAND_TYPE_SREG2, "SReg2" },
2630 { OPERAND_TYPE_SREG3, "SReg3" },
2631 { OPERAND_TYPE_ACC, "Acc" },
2632 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2633 { OPERAND_TYPE_REGMMX, "rMMX" },
2634 { OPERAND_TYPE_REGXMM, "rXMM" },
2635 { OPERAND_TYPE_REGYMM, "rYMM" },
2636 { OPERAND_TYPE_ESSEG, "es" },
2637 };
2638
2639 static void
2640 pt (i386_operand_type t)
2641 {
2642 unsigned int j;
2643 i386_operand_type a;
2644
2645 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2646 {
2647 a = operand_type_and (t, type_names[j].mask);
2648 if (!operand_type_all_zero (&a))
2649 fprintf (stdout, "%s, ", type_names[j].name);
2650 }
2651 fflush (stdout);
2652 }
2653
2654 #endif /* DEBUG386 */
2655 \f
2656 static bfd_reloc_code_real_type
2657 reloc (unsigned int size,
2658 int pcrel,
2659 int sign,
2660 bfd_reloc_code_real_type other)
2661 {
2662 if (other != NO_RELOC)
2663 {
2664 reloc_howto_type *rel;
2665
2666 if (size == 8)
2667 switch (other)
2668 {
2669 case BFD_RELOC_X86_64_GOT32:
2670 return BFD_RELOC_X86_64_GOT64;
2671 break;
2672 case BFD_RELOC_X86_64_PLTOFF64:
2673 return BFD_RELOC_X86_64_PLTOFF64;
2674 break;
2675 case BFD_RELOC_X86_64_GOTPC32:
2676 other = BFD_RELOC_X86_64_GOTPC64;
2677 break;
2678 case BFD_RELOC_X86_64_GOTPCREL:
2679 other = BFD_RELOC_X86_64_GOTPCREL64;
2680 break;
2681 case BFD_RELOC_X86_64_TPOFF32:
2682 other = BFD_RELOC_X86_64_TPOFF64;
2683 break;
2684 case BFD_RELOC_X86_64_DTPOFF32:
2685 other = BFD_RELOC_X86_64_DTPOFF64;
2686 break;
2687 default:
2688 break;
2689 }
2690
2691 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2692 if (other == BFD_RELOC_SIZE32)
2693 {
2694 if (size == 8)
2695 return BFD_RELOC_SIZE64;
2696 if (pcrel)
2697 as_bad (_("there are no pc-relative size relocations"));
2698 }
2699 #endif
2700
2701 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2702 if (size == 4 && (flag_code != CODE_64BIT || disallow_64bit_reloc))
2703 sign = -1;
2704
2705 rel = bfd_reloc_type_lookup (stdoutput, other);
2706 if (!rel)
2707 as_bad (_("unknown relocation (%u)"), other);
2708 else if (size != bfd_get_reloc_size (rel))
2709 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2710 bfd_get_reloc_size (rel),
2711 size);
2712 else if (pcrel && !rel->pc_relative)
2713 as_bad (_("non-pc-relative relocation for pc-relative field"));
2714 else if ((rel->complain_on_overflow == complain_overflow_signed
2715 && !sign)
2716 || (rel->complain_on_overflow == complain_overflow_unsigned
2717 && sign > 0))
2718 as_bad (_("relocated field and relocation type differ in signedness"));
2719 else
2720 return other;
2721 return NO_RELOC;
2722 }
2723
2724 if (pcrel)
2725 {
2726 if (!sign)
2727 as_bad (_("there are no unsigned pc-relative relocations"));
2728 switch (size)
2729 {
2730 case 1: return BFD_RELOC_8_PCREL;
2731 case 2: return BFD_RELOC_16_PCREL;
2732 case 4: return BFD_RELOC_32_PCREL;
2733 case 8: return BFD_RELOC_64_PCREL;
2734 }
2735 as_bad (_("cannot do %u byte pc-relative relocation"), size);
2736 }
2737 else
2738 {
2739 if (sign > 0)
2740 switch (size)
2741 {
2742 case 4: return BFD_RELOC_X86_64_32S;
2743 }
2744 else
2745 switch (size)
2746 {
2747 case 1: return BFD_RELOC_8;
2748 case 2: return BFD_RELOC_16;
2749 case 4: return BFD_RELOC_32;
2750 case 8: return BFD_RELOC_64;
2751 }
2752 as_bad (_("cannot do %s %u byte relocation"),
2753 sign > 0 ? "signed" : "unsigned", size);
2754 }
2755
2756 return NO_RELOC;
2757 }
2758
2759 /* Here we decide which fixups can be adjusted to make them relative to
2760 the beginning of the section instead of the symbol. Basically we need
2761 to make sure that the dynamic relocations are done correctly, so in
2762 some cases we force the original symbol to be used. */
2763
2764 int
2765 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2766 {
2767 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2768 if (!IS_ELF)
2769 return 1;
2770
2771 /* Don't adjust pc-relative references to merge sections in 64-bit
2772 mode. */
2773 if (use_rela_relocations
2774 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
2775 && fixP->fx_pcrel)
2776 return 0;
2777
2778 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2779 and changed later by validate_fix. */
2780 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
2781 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
2782 return 0;
2783
2784 /* Adjust_reloc_syms doesn't know about the GOT. Need to keep symbol
2785 for size relocations. */
2786 if (fixP->fx_r_type == BFD_RELOC_SIZE32
2787 || fixP->fx_r_type == BFD_RELOC_SIZE64
2788 || fixP->fx_r_type == BFD_RELOC_386_GOTOFF
2789 || fixP->fx_r_type == BFD_RELOC_386_PLT32
2790 || fixP->fx_r_type == BFD_RELOC_386_GOT32
2791 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
2792 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
2793 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
2794 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
2795 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
2796 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
2797 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
2798 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
2799 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
2800 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
2801 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
2802 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
2803 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
2804 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
2805 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
2806 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
2807 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
2808 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
2809 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
2810 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
2811 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
2812 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
2813 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
2814 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
2815 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
2816 return 0;
2817 #endif
2818 return 1;
2819 }
2820
2821 static int
2822 intel_float_operand (const char *mnemonic)
2823 {
2824 /* Note that the value returned is meaningful only for opcodes with (memory)
2825 operands, hence the code here is free to improperly handle opcodes that
2826 have no operands (for better performance and smaller code). */
2827
2828 if (mnemonic[0] != 'f')
2829 return 0; /* non-math */
2830
2831 switch (mnemonic[1])
2832 {
2833 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2834 the fs segment override prefix not currently handled because no
2835 call path can make opcodes without operands get here */
2836 case 'i':
2837 return 2 /* integer op */;
2838 case 'l':
2839 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
2840 return 3; /* fldcw/fldenv */
2841 break;
2842 case 'n':
2843 if (mnemonic[2] != 'o' /* fnop */)
2844 return 3; /* non-waiting control op */
2845 break;
2846 case 'r':
2847 if (mnemonic[2] == 's')
2848 return 3; /* frstor/frstpm */
2849 break;
2850 case 's':
2851 if (mnemonic[2] == 'a')
2852 return 3; /* fsave */
2853 if (mnemonic[2] == 't')
2854 {
2855 switch (mnemonic[3])
2856 {
2857 case 'c': /* fstcw */
2858 case 'd': /* fstdw */
2859 case 'e': /* fstenv */
2860 case 's': /* fsts[gw] */
2861 return 3;
2862 }
2863 }
2864 break;
2865 case 'x':
2866 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
2867 return 0; /* fxsave/fxrstor are not really math ops */
2868 break;
2869 }
2870
2871 return 1;
2872 }
2873
2874 /* Build the VEX prefix. */
2875
2876 static void
2877 build_vex_prefix (const insn_template *t)
2878 {
2879 unsigned int register_specifier;
2880 unsigned int implied_prefix;
2881 unsigned int vector_length;
2882
2883 /* Check register specifier. */
2884 if (i.vex.register_specifier)
2885 register_specifier = ~register_number (i.vex.register_specifier) & 0xf;
2886 else
2887 register_specifier = 0xf;
2888
2889 /* Use 2-byte VEX prefix by swappping destination and source
2890 operand. */
2891 if (!i.swap_operand
2892 && i.operands == i.reg_operands
2893 && i.tm.opcode_modifier.vexopcode == VEX0F
2894 && i.tm.opcode_modifier.s
2895 && i.rex == REX_B)
2896 {
2897 unsigned int xchg = i.operands - 1;
2898 union i386_op temp_op;
2899 i386_operand_type temp_type;
2900
2901 temp_type = i.types[xchg];
2902 i.types[xchg] = i.types[0];
2903 i.types[0] = temp_type;
2904 temp_op = i.op[xchg];
2905 i.op[xchg] = i.op[0];
2906 i.op[0] = temp_op;
2907
2908 gas_assert (i.rm.mode == 3);
2909
2910 i.rex = REX_R;
2911 xchg = i.rm.regmem;
2912 i.rm.regmem = i.rm.reg;
2913 i.rm.reg = xchg;
2914
2915 /* Use the next insn. */
2916 i.tm = t[1];
2917 }
2918
2919 if (i.tm.opcode_modifier.vex == VEXScalar)
2920 vector_length = avxscalar;
2921 else
2922 vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
2923
2924 switch ((i.tm.base_opcode >> 8) & 0xff)
2925 {
2926 case 0:
2927 implied_prefix = 0;
2928 break;
2929 case DATA_PREFIX_OPCODE:
2930 implied_prefix = 1;
2931 break;
2932 case REPE_PREFIX_OPCODE:
2933 implied_prefix = 2;
2934 break;
2935 case REPNE_PREFIX_OPCODE:
2936 implied_prefix = 3;
2937 break;
2938 default:
2939 abort ();
2940 }
2941
2942 /* Use 2-byte VEX prefix if possible. */
2943 if (i.tm.opcode_modifier.vexopcode == VEX0F
2944 && i.tm.opcode_modifier.vexw != VEXW1
2945 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
2946 {
2947 /* 2-byte VEX prefix. */
2948 unsigned int r;
2949
2950 i.vex.length = 2;
2951 i.vex.bytes[0] = 0xc5;
2952
2953 /* Check the REX.R bit. */
2954 r = (i.rex & REX_R) ? 0 : 1;
2955 i.vex.bytes[1] = (r << 7
2956 | register_specifier << 3
2957 | vector_length << 2
2958 | implied_prefix);
2959 }
2960 else
2961 {
2962 /* 3-byte VEX prefix. */
2963 unsigned int m, w;
2964
2965 i.vex.length = 3;
2966
2967 switch (i.tm.opcode_modifier.vexopcode)
2968 {
2969 case VEX0F:
2970 m = 0x1;
2971 i.vex.bytes[0] = 0xc4;
2972 break;
2973 case VEX0F38:
2974 m = 0x2;
2975 i.vex.bytes[0] = 0xc4;
2976 break;
2977 case VEX0F3A:
2978 m = 0x3;
2979 i.vex.bytes[0] = 0xc4;
2980 break;
2981 case XOP08:
2982 m = 0x8;
2983 i.vex.bytes[0] = 0x8f;
2984 break;
2985 case XOP09:
2986 m = 0x9;
2987 i.vex.bytes[0] = 0x8f;
2988 break;
2989 case XOP0A:
2990 m = 0xa;
2991 i.vex.bytes[0] = 0x8f;
2992 break;
2993 default:
2994 abort ();
2995 }
2996
2997 /* The high 3 bits of the second VEX byte are 1's compliment
2998 of RXB bits from REX. */
2999 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
3000
3001 /* Check the REX.W bit. */
3002 w = (i.rex & REX_W) ? 1 : 0;
3003 if (i.tm.opcode_modifier.vexw)
3004 {
3005 if (w)
3006 abort ();
3007
3008 if (i.tm.opcode_modifier.vexw == VEXW1)
3009 w = 1;
3010 }
3011
3012 i.vex.bytes[2] = (w << 7
3013 | register_specifier << 3
3014 | vector_length << 2
3015 | implied_prefix);
3016 }
3017 }
3018
3019 static void
3020 process_immext (void)
3021 {
3022 expressionS *exp;
3023
3024 if ((i.tm.cpu_flags.bitfield.cpusse3 || i.tm.cpu_flags.bitfield.cpusvme)
3025 && i.operands > 0)
3026 {
3027 /* MONITOR/MWAIT as well as SVME instructions have fixed operands
3028 with an opcode suffix which is coded in the same place as an
3029 8-bit immediate field would be.
3030 Here we check those operands and remove them afterwards. */
3031 unsigned int x;
3032
3033 for (x = 0; x < i.operands; x++)
3034 if (register_number (i.op[x].regs) != x)
3035 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
3036 register_prefix, i.op[x].regs->reg_name, x + 1,
3037 i.tm.name);
3038
3039 i.operands = 0;
3040 }
3041
3042 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
3043 which is coded in the same place as an 8-bit immediate field
3044 would be. Here we fake an 8-bit immediate operand from the
3045 opcode suffix stored in tm.extension_opcode.
3046
3047 AVX instructions also use this encoding, for some of
3048 3 argument instructions. */
3049
3050 gas_assert (i.imm_operands == 0
3051 && (i.operands <= 2
3052 || (i.tm.opcode_modifier.vex
3053 && i.operands <= 4)));
3054
3055 exp = &im_expressions[i.imm_operands++];
3056 i.op[i.operands].imms = exp;
3057 i.types[i.operands] = imm8;
3058 i.operands++;
3059 exp->X_op = O_constant;
3060 exp->X_add_number = i.tm.extension_opcode;
3061 i.tm.extension_opcode = None;
3062 }
3063
3064
3065 static int
3066 check_hle (void)
3067 {
3068 switch (i.tm.opcode_modifier.hleprefixok)
3069 {
3070 default:
3071 abort ();
3072 case HLEPrefixNone:
3073 if (i.prefix[HLE_PREFIX] == XACQUIRE_PREFIX_OPCODE)
3074 as_bad (_("invalid instruction `%s' after `xacquire'"),
3075 i.tm.name);
3076 else
3077 as_bad (_("invalid instruction `%s' after `xrelease'"),
3078 i.tm.name);
3079 return 0;
3080 case HLEPrefixLock:
3081 if (i.prefix[LOCK_PREFIX])
3082 return 1;
3083 if (i.prefix[HLE_PREFIX] == XACQUIRE_PREFIX_OPCODE)
3084 as_bad (_("missing `lock' with `xacquire'"));
3085 else
3086 as_bad (_("missing `lock' with `xrelease'"));
3087 return 0;
3088 case HLEPrefixAny:
3089 return 1;
3090 case HLEPrefixRelease:
3091 if (i.prefix[HLE_PREFIX] != XRELEASE_PREFIX_OPCODE)
3092 {
3093 as_bad (_("instruction `%s' after `xacquire' not allowed"),
3094 i.tm.name);
3095 return 0;
3096 }
3097 if (i.mem_operands == 0
3098 || !operand_type_check (i.types[i.operands - 1], anymem))
3099 {
3100 as_bad (_("memory destination needed for instruction `%s'"
3101 " after `xrelease'"), i.tm.name);
3102 return 0;
3103 }
3104 return 1;
3105 }
3106 }
3107
3108 /* This is the guts of the machine-dependent assembler. LINE points to a
3109 machine dependent instruction. This function is supposed to emit
3110 the frags/bytes it assembles to. */
3111
3112 void
3113 md_assemble (char *line)
3114 {
3115 unsigned int j;
3116 char mnemonic[MAX_MNEM_SIZE];
3117 const insn_template *t;
3118
3119 /* Initialize globals. */
3120 memset (&i, '\0', sizeof (i));
3121 for (j = 0; j < MAX_OPERANDS; j++)
3122 i.reloc[j] = NO_RELOC;
3123 memset (disp_expressions, '\0', sizeof (disp_expressions));
3124 memset (im_expressions, '\0', sizeof (im_expressions));
3125 save_stack_p = save_stack;
3126
3127 /* First parse an instruction mnemonic & call i386_operand for the operands.
3128 We assume that the scrubber has arranged it so that line[0] is the valid
3129 start of a (possibly prefixed) mnemonic. */
3130
3131 line = parse_insn (line, mnemonic);
3132 if (line == NULL)
3133 return;
3134
3135 line = parse_operands (line, mnemonic);
3136 this_operand = -1;
3137 if (line == NULL)
3138 return;
3139
3140 /* Now we've parsed the mnemonic into a set of templates, and have the
3141 operands at hand. */
3142
3143 /* All intel opcodes have reversed operands except for "bound" and
3144 "enter". We also don't reverse intersegment "jmp" and "call"
3145 instructions with 2 immediate operands so that the immediate segment
3146 precedes the offset, as it does when in AT&T mode. */
3147 if (intel_syntax
3148 && i.operands > 1
3149 && (strcmp (mnemonic, "bound") != 0)
3150 && (strcmp (mnemonic, "invlpga") != 0)
3151 && !(operand_type_check (i.types[0], imm)
3152 && operand_type_check (i.types[1], imm)))
3153 swap_operands ();
3154
3155 /* The order of the immediates should be reversed
3156 for 2 immediates extrq and insertq instructions */
3157 if (i.imm_operands == 2
3158 && (strcmp (mnemonic, "extrq") == 0
3159 || strcmp (mnemonic, "insertq") == 0))
3160 swap_2_operands (0, 1);
3161
3162 if (i.imm_operands)
3163 optimize_imm ();
3164
3165 /* Don't optimize displacement for movabs since it only takes 64bit
3166 displacement. */
3167 if (i.disp_operands
3168 && i.disp_encoding != disp_encoding_32bit
3169 && (flag_code != CODE_64BIT
3170 || strcmp (mnemonic, "movabs") != 0))
3171 optimize_disp ();
3172
3173 /* Next, we find a template that matches the given insn,
3174 making sure the overlap of the given operands types is consistent
3175 with the template operand types. */
3176
3177 if (!(t = match_template ()))
3178 return;
3179
3180 if (sse_check != check_none
3181 && !i.tm.opcode_modifier.noavx
3182 && (i.tm.cpu_flags.bitfield.cpusse
3183 || i.tm.cpu_flags.bitfield.cpusse2
3184 || i.tm.cpu_flags.bitfield.cpusse3
3185 || i.tm.cpu_flags.bitfield.cpussse3
3186 || i.tm.cpu_flags.bitfield.cpusse4_1
3187 || i.tm.cpu_flags.bitfield.cpusse4_2))
3188 {
3189 (sse_check == check_warning
3190 ? as_warn
3191 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
3192 }
3193
3194 /* Zap movzx and movsx suffix. The suffix has been set from
3195 "word ptr" or "byte ptr" on the source operand in Intel syntax
3196 or extracted from mnemonic in AT&T syntax. But we'll use
3197 the destination register to choose the suffix for encoding. */
3198 if ((i.tm.base_opcode & ~9) == 0x0fb6)
3199 {
3200 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3201 there is no suffix, the default will be byte extension. */
3202 if (i.reg_operands != 2
3203 && !i.suffix
3204 && intel_syntax)
3205 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
3206
3207 i.suffix = 0;
3208 }
3209
3210 if (i.tm.opcode_modifier.fwait)
3211 if (!add_prefix (FWAIT_OPCODE))
3212 return;
3213
3214 /* Check for lock without a lockable instruction. Destination operand
3215 must be memory unless it is xchg (0x86). */
3216 if (i.prefix[LOCK_PREFIX]
3217 && (!i.tm.opcode_modifier.islockable
3218 || i.mem_operands == 0
3219 || (i.tm.base_opcode != 0x86
3220 && !operand_type_check (i.types[i.operands - 1], anymem))))
3221 {
3222 as_bad (_("expecting lockable instruction after `lock'"));
3223 return;
3224 }
3225
3226 /* Check if HLE prefix is OK. */
3227 if (i.have_hle && !check_hle ())
3228 return;
3229
3230 /* Check string instruction segment overrides. */
3231 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
3232 {
3233 if (!check_string ())
3234 return;
3235 i.disp_operands = 0;
3236 }
3237
3238 if (!process_suffix ())
3239 return;
3240
3241 /* Update operand types. */
3242 for (j = 0; j < i.operands; j++)
3243 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
3244
3245 /* Make still unresolved immediate matches conform to size of immediate
3246 given in i.suffix. */
3247 if (!finalize_imm ())
3248 return;
3249
3250 if (i.types[0].bitfield.imm1)
3251 i.imm_operands = 0; /* kludge for shift insns. */
3252
3253 /* We only need to check those implicit registers for instructions
3254 with 3 operands or less. */
3255 if (i.operands <= 3)
3256 for (j = 0; j < i.operands; j++)
3257 if (i.types[j].bitfield.inoutportreg
3258 || i.types[j].bitfield.shiftcount
3259 || i.types[j].bitfield.acc
3260 || i.types[j].bitfield.floatacc)
3261 i.reg_operands--;
3262
3263 /* ImmExt should be processed after SSE2AVX. */
3264 if (!i.tm.opcode_modifier.sse2avx
3265 && i.tm.opcode_modifier.immext)
3266 process_immext ();
3267
3268 /* For insns with operands there are more diddles to do to the opcode. */
3269 if (i.operands)
3270 {
3271 if (!process_operands ())
3272 return;
3273 }
3274 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
3275 {
3276 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3277 as_warn (_("translating to `%sp'"), i.tm.name);
3278 }
3279
3280 if (i.tm.opcode_modifier.vex)
3281 build_vex_prefix (t);
3282
3283 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3284 instructions may define INT_OPCODE as well, so avoid this corner
3285 case for those instructions that use MODRM. */
3286 if (i.tm.base_opcode == INT_OPCODE
3287 && !i.tm.opcode_modifier.modrm
3288 && i.op[0].imms->X_add_number == 3)
3289 {
3290 i.tm.base_opcode = INT3_OPCODE;
3291 i.imm_operands = 0;
3292 }
3293
3294 if ((i.tm.opcode_modifier.jump
3295 || i.tm.opcode_modifier.jumpbyte
3296 || i.tm.opcode_modifier.jumpdword)
3297 && i.op[0].disps->X_op == O_constant)
3298 {
3299 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3300 the absolute address given by the constant. Since ix86 jumps and
3301 calls are pc relative, we need to generate a reloc. */
3302 i.op[0].disps->X_add_symbol = &abs_symbol;
3303 i.op[0].disps->X_op = O_symbol;
3304 }
3305
3306 if (i.tm.opcode_modifier.rex64)
3307 i.rex |= REX_W;
3308
3309 /* For 8 bit registers we need an empty rex prefix. Also if the
3310 instruction already has a prefix, we need to convert old
3311 registers to new ones. */
3312
3313 if ((i.types[0].bitfield.reg8
3314 && (i.op[0].regs->reg_flags & RegRex64) != 0)
3315 || (i.types[1].bitfield.reg8
3316 && (i.op[1].regs->reg_flags & RegRex64) != 0)
3317 || ((i.types[0].bitfield.reg8
3318 || i.types[1].bitfield.reg8)
3319 && i.rex != 0))
3320 {
3321 int x;
3322
3323 i.rex |= REX_OPCODE;
3324 for (x = 0; x < 2; x++)
3325 {
3326 /* Look for 8 bit operand that uses old registers. */
3327 if (i.types[x].bitfield.reg8
3328 && (i.op[x].regs->reg_flags & RegRex64) == 0)
3329 {
3330 /* In case it is "hi" register, give up. */
3331 if (i.op[x].regs->reg_num > 3)
3332 as_bad (_("can't encode register '%s%s' in an "
3333 "instruction requiring REX prefix."),
3334 register_prefix, i.op[x].regs->reg_name);
3335
3336 /* Otherwise it is equivalent to the extended register.
3337 Since the encoding doesn't change this is merely
3338 cosmetic cleanup for debug output. */
3339
3340 i.op[x].regs = i.op[x].regs + 8;
3341 }
3342 }
3343 }
3344
3345 if (i.rex != 0)
3346 add_prefix (REX_OPCODE | i.rex);
3347
3348 /* We are ready to output the insn. */
3349 output_insn ();
3350 }
3351
3352 static char *
3353 parse_insn (char *line, char *mnemonic)
3354 {
3355 char *l = line;
3356 char *token_start = l;
3357 char *mnem_p;
3358 int supported;
3359 const insn_template *t;
3360 char *dot_p = NULL;
3361
3362 /* Non-zero if we found a prefix only acceptable with string insns. */
3363 const char *expecting_string_instruction = NULL;
3364
3365 while (1)
3366 {
3367 mnem_p = mnemonic;
3368 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3369 {
3370 if (*mnem_p == '.')
3371 dot_p = mnem_p;
3372 mnem_p++;
3373 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3374 {
3375 as_bad (_("no such instruction: `%s'"), token_start);
3376 return NULL;
3377 }
3378 l++;
3379 }
3380 if (!is_space_char (*l)
3381 && *l != END_OF_INSN
3382 && (intel_syntax
3383 || (*l != PREFIX_SEPARATOR
3384 && *l != ',')))
3385 {
3386 as_bad (_("invalid character %s in mnemonic"),
3387 output_invalid (*l));
3388 return NULL;
3389 }
3390 if (token_start == l)
3391 {
3392 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3393 as_bad (_("expecting prefix; got nothing"));
3394 else
3395 as_bad (_("expecting mnemonic; got nothing"));
3396 return NULL;
3397 }
3398
3399 /* Look up instruction (or prefix) via hash table. */
3400 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3401
3402 if (*l != END_OF_INSN
3403 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3404 && current_templates
3405 && current_templates->start->opcode_modifier.isprefix)
3406 {
3407 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3408 {
3409 as_bad ((flag_code != CODE_64BIT
3410 ? _("`%s' is only supported in 64-bit mode")
3411 : _("`%s' is not supported in 64-bit mode")),
3412 current_templates->start->name);
3413 return NULL;
3414 }
3415 /* If we are in 16-bit mode, do not allow addr16 or data16.
3416 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3417 if ((current_templates->start->opcode_modifier.size16
3418 || current_templates->start->opcode_modifier.size32)
3419 && flag_code != CODE_64BIT
3420 && (current_templates->start->opcode_modifier.size32
3421 ^ (flag_code == CODE_16BIT)))
3422 {
3423 as_bad (_("redundant %s prefix"),
3424 current_templates->start->name);
3425 return NULL;
3426 }
3427 /* Add prefix, checking for repeated prefixes. */
3428 switch (add_prefix (current_templates->start->base_opcode))
3429 {
3430 case PREFIX_EXIST:
3431 return NULL;
3432 case PREFIX_REP:
3433 if (current_templates->start->cpu_flags.bitfield.cpuhle)
3434 i.have_hle = 1;
3435 else
3436 expecting_string_instruction = current_templates->start->name;
3437 break;
3438 default:
3439 break;
3440 }
3441 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3442 token_start = ++l;
3443 }
3444 else
3445 break;
3446 }
3447
3448 if (!current_templates)
3449 {
3450 /* Check if we should swap operand or force 32bit displacement in
3451 encoding. */
3452 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3453 i.swap_operand = 1;
3454 else if (mnem_p - 3 == dot_p
3455 && dot_p[1] == 'd'
3456 && dot_p[2] == '8')
3457 i.disp_encoding = disp_encoding_8bit;
3458 else if (mnem_p - 4 == dot_p
3459 && dot_p[1] == 'd'
3460 && dot_p[2] == '3'
3461 && dot_p[3] == '2')
3462 i.disp_encoding = disp_encoding_32bit;
3463 else
3464 goto check_suffix;
3465 mnem_p = dot_p;
3466 *dot_p = '\0';
3467 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3468 }
3469
3470 if (!current_templates)
3471 {
3472 check_suffix:
3473 /* See if we can get a match by trimming off a suffix. */
3474 switch (mnem_p[-1])
3475 {
3476 case WORD_MNEM_SUFFIX:
3477 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3478 i.suffix = SHORT_MNEM_SUFFIX;
3479 else
3480 case BYTE_MNEM_SUFFIX:
3481 case QWORD_MNEM_SUFFIX:
3482 i.suffix = mnem_p[-1];
3483 mnem_p[-1] = '\0';
3484 current_templates = (const templates *) hash_find (op_hash,
3485 mnemonic);
3486 break;
3487 case SHORT_MNEM_SUFFIX:
3488 case LONG_MNEM_SUFFIX:
3489 if (!intel_syntax)
3490 {
3491 i.suffix = mnem_p[-1];
3492 mnem_p[-1] = '\0';
3493 current_templates = (const templates *) hash_find (op_hash,
3494 mnemonic);
3495 }
3496 break;
3497
3498 /* Intel Syntax. */
3499 case 'd':
3500 if (intel_syntax)
3501 {
3502 if (intel_float_operand (mnemonic) == 1)
3503 i.suffix = SHORT_MNEM_SUFFIX;
3504 else
3505 i.suffix = LONG_MNEM_SUFFIX;
3506 mnem_p[-1] = '\0';
3507 current_templates = (const templates *) hash_find (op_hash,
3508 mnemonic);
3509 }
3510 break;
3511 }
3512 if (!current_templates)
3513 {
3514 as_bad (_("no such instruction: `%s'"), token_start);
3515 return NULL;
3516 }
3517 }
3518
3519 if (current_templates->start->opcode_modifier.jump
3520 || current_templates->start->opcode_modifier.jumpbyte)
3521 {
3522 /* Check for a branch hint. We allow ",pt" and ",pn" for
3523 predict taken and predict not taken respectively.
3524 I'm not sure that branch hints actually do anything on loop
3525 and jcxz insns (JumpByte) for current Pentium4 chips. They
3526 may work in the future and it doesn't hurt to accept them
3527 now. */
3528 if (l[0] == ',' && l[1] == 'p')
3529 {
3530 if (l[2] == 't')
3531 {
3532 if (!add_prefix (DS_PREFIX_OPCODE))
3533 return NULL;
3534 l += 3;
3535 }
3536 else if (l[2] == 'n')
3537 {
3538 if (!add_prefix (CS_PREFIX_OPCODE))
3539 return NULL;
3540 l += 3;
3541 }
3542 }
3543 }
3544 /* Any other comma loses. */
3545 if (*l == ',')
3546 {
3547 as_bad (_("invalid character %s in mnemonic"),
3548 output_invalid (*l));
3549 return NULL;
3550 }
3551
3552 /* Check if instruction is supported on specified architecture. */
3553 supported = 0;
3554 for (t = current_templates->start; t < current_templates->end; ++t)
3555 {
3556 supported |= cpu_flags_match (t);
3557 if (supported == CPU_FLAGS_PERFECT_MATCH)
3558 goto skip;
3559 }
3560
3561 if (!(supported & CPU_FLAGS_64BIT_MATCH))
3562 {
3563 as_bad (flag_code == CODE_64BIT
3564 ? _("`%s' is not supported in 64-bit mode")
3565 : _("`%s' is only supported in 64-bit mode"),
3566 current_templates->start->name);
3567 return NULL;
3568 }
3569 if (supported != CPU_FLAGS_PERFECT_MATCH)
3570 {
3571 as_bad (_("`%s' is not supported on `%s%s'"),
3572 current_templates->start->name,
3573 cpu_arch_name ? cpu_arch_name : default_arch,
3574 cpu_sub_arch_name ? cpu_sub_arch_name : "");
3575 return NULL;
3576 }
3577
3578 skip:
3579 if (!cpu_arch_flags.bitfield.cpui386
3580 && (flag_code != CODE_16BIT))
3581 {
3582 as_warn (_("use .code16 to ensure correct addressing mode"));
3583 }
3584
3585 /* Check for rep/repne without a string (or other allowed) instruction. */
3586 if (expecting_string_instruction)
3587 {
3588 static templates override;
3589
3590 for (t = current_templates->start; t < current_templates->end; ++t)
3591 if (t->opcode_modifier.repprefixok)
3592 break;
3593 if (t >= current_templates->end)
3594 {
3595 as_bad (_("expecting string instruction after `%s'"),
3596 expecting_string_instruction);
3597 return NULL;
3598 }
3599 for (override.start = t; t < current_templates->end; ++t)
3600 if (!t->opcode_modifier.repprefixok)
3601 break;
3602 override.end = t;
3603 current_templates = &override;
3604 }
3605
3606 return l;
3607 }
3608
3609 static char *
3610 parse_operands (char *l, const char *mnemonic)
3611 {
3612 char *token_start;
3613
3614 /* 1 if operand is pending after ','. */
3615 unsigned int expecting_operand = 0;
3616
3617 /* Non-zero if operand parens not balanced. */
3618 unsigned int paren_not_balanced;
3619
3620 while (*l != END_OF_INSN)
3621 {
3622 /* Skip optional white space before operand. */
3623 if (is_space_char (*l))
3624 ++l;
3625 if (!is_operand_char (*l) && *l != END_OF_INSN)
3626 {
3627 as_bad (_("invalid character %s before operand %d"),
3628 output_invalid (*l),
3629 i.operands + 1);
3630 return NULL;
3631 }
3632 token_start = l; /* after white space */
3633 paren_not_balanced = 0;
3634 while (paren_not_balanced || *l != ',')
3635 {
3636 if (*l == END_OF_INSN)
3637 {
3638 if (paren_not_balanced)
3639 {
3640 if (!intel_syntax)
3641 as_bad (_("unbalanced parenthesis in operand %d."),
3642 i.operands + 1);
3643 else
3644 as_bad (_("unbalanced brackets in operand %d."),
3645 i.operands + 1);
3646 return NULL;
3647 }
3648 else
3649 break; /* we are done */
3650 }
3651 else if (!is_operand_char (*l) && !is_space_char (*l))
3652 {
3653 as_bad (_("invalid character %s in operand %d"),
3654 output_invalid (*l),
3655 i.operands + 1);
3656 return NULL;
3657 }
3658 if (!intel_syntax)
3659 {
3660 if (*l == '(')
3661 ++paren_not_balanced;
3662 if (*l == ')')
3663 --paren_not_balanced;
3664 }
3665 else
3666 {
3667 if (*l == '[')
3668 ++paren_not_balanced;
3669 if (*l == ']')
3670 --paren_not_balanced;
3671 }
3672 l++;
3673 }
3674 if (l != token_start)
3675 { /* Yes, we've read in another operand. */
3676 unsigned int operand_ok;
3677 this_operand = i.operands++;
3678 i.types[this_operand].bitfield.unspecified = 1;
3679 if (i.operands > MAX_OPERANDS)
3680 {
3681 as_bad (_("spurious operands; (%d operands/instruction max)"),
3682 MAX_OPERANDS);
3683 return NULL;
3684 }
3685 /* Now parse operand adding info to 'i' as we go along. */
3686 END_STRING_AND_SAVE (l);
3687
3688 if (intel_syntax)
3689 operand_ok =
3690 i386_intel_operand (token_start,
3691 intel_float_operand (mnemonic));
3692 else
3693 operand_ok = i386_att_operand (token_start);
3694
3695 RESTORE_END_STRING (l);
3696 if (!operand_ok)
3697 return NULL;
3698 }
3699 else
3700 {
3701 if (expecting_operand)
3702 {
3703 expecting_operand_after_comma:
3704 as_bad (_("expecting operand after ','; got nothing"));
3705 return NULL;
3706 }
3707 if (*l == ',')
3708 {
3709 as_bad (_("expecting operand before ','; got nothing"));
3710 return NULL;
3711 }
3712 }
3713
3714 /* Now *l must be either ',' or END_OF_INSN. */
3715 if (*l == ',')
3716 {
3717 if (*++l == END_OF_INSN)
3718 {
3719 /* Just skip it, if it's \n complain. */
3720 goto expecting_operand_after_comma;
3721 }
3722 expecting_operand = 1;
3723 }
3724 }
3725 return l;
3726 }
3727
3728 static void
3729 swap_2_operands (int xchg1, int xchg2)
3730 {
3731 union i386_op temp_op;
3732 i386_operand_type temp_type;
3733 enum bfd_reloc_code_real temp_reloc;
3734
3735 temp_type = i.types[xchg2];
3736 i.types[xchg2] = i.types[xchg1];
3737 i.types[xchg1] = temp_type;
3738 temp_op = i.op[xchg2];
3739 i.op[xchg2] = i.op[xchg1];
3740 i.op[xchg1] = temp_op;
3741 temp_reloc = i.reloc[xchg2];
3742 i.reloc[xchg2] = i.reloc[xchg1];
3743 i.reloc[xchg1] = temp_reloc;
3744 }
3745
3746 static void
3747 swap_operands (void)
3748 {
3749 switch (i.operands)
3750 {
3751 case 5:
3752 case 4:
3753 swap_2_operands (1, i.operands - 2);
3754 case 3:
3755 case 2:
3756 swap_2_operands (0, i.operands - 1);
3757 break;
3758 default:
3759 abort ();
3760 }
3761
3762 if (i.mem_operands == 2)
3763 {
3764 const seg_entry *temp_seg;
3765 temp_seg = i.seg[0];
3766 i.seg[0] = i.seg[1];
3767 i.seg[1] = temp_seg;
3768 }
3769 }
3770
3771 /* Try to ensure constant immediates are represented in the smallest
3772 opcode possible. */
3773 static void
3774 optimize_imm (void)
3775 {
3776 char guess_suffix = 0;
3777 int op;
3778
3779 if (i.suffix)
3780 guess_suffix = i.suffix;
3781 else if (i.reg_operands)
3782 {
3783 /* Figure out a suffix from the last register operand specified.
3784 We can't do this properly yet, ie. excluding InOutPortReg,
3785 but the following works for instructions with immediates.
3786 In any case, we can't set i.suffix yet. */
3787 for (op = i.operands; --op >= 0;)
3788 if (i.types[op].bitfield.reg8)
3789 {
3790 guess_suffix = BYTE_MNEM_SUFFIX;
3791 break;
3792 }
3793 else if (i.types[op].bitfield.reg16)
3794 {
3795 guess_suffix = WORD_MNEM_SUFFIX;
3796 break;
3797 }
3798 else if (i.types[op].bitfield.reg32)
3799 {
3800 guess_suffix = LONG_MNEM_SUFFIX;
3801 break;
3802 }
3803 else if (i.types[op].bitfield.reg64)
3804 {
3805 guess_suffix = QWORD_MNEM_SUFFIX;
3806 break;
3807 }
3808 }
3809 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
3810 guess_suffix = WORD_MNEM_SUFFIX;
3811
3812 for (op = i.operands; --op >= 0;)
3813 if (operand_type_check (i.types[op], imm))
3814 {
3815 switch (i.op[op].imms->X_op)
3816 {
3817 case O_constant:
3818 /* If a suffix is given, this operand may be shortened. */
3819 switch (guess_suffix)
3820 {
3821 case LONG_MNEM_SUFFIX:
3822 i.types[op].bitfield.imm32 = 1;
3823 i.types[op].bitfield.imm64 = 1;
3824 break;
3825 case WORD_MNEM_SUFFIX:
3826 i.types[op].bitfield.imm16 = 1;
3827 i.types[op].bitfield.imm32 = 1;
3828 i.types[op].bitfield.imm32s = 1;
3829 i.types[op].bitfield.imm64 = 1;
3830 break;
3831 case BYTE_MNEM_SUFFIX:
3832 i.types[op].bitfield.imm8 = 1;
3833 i.types[op].bitfield.imm8s = 1;
3834 i.types[op].bitfield.imm16 = 1;
3835 i.types[op].bitfield.imm32 = 1;
3836 i.types[op].bitfield.imm32s = 1;
3837 i.types[op].bitfield.imm64 = 1;
3838 break;
3839 }
3840
3841 /* If this operand is at most 16 bits, convert it
3842 to a signed 16 bit number before trying to see
3843 whether it will fit in an even smaller size.
3844 This allows a 16-bit operand such as $0xffe0 to
3845 be recognised as within Imm8S range. */
3846 if ((i.types[op].bitfield.imm16)
3847 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
3848 {
3849 i.op[op].imms->X_add_number =
3850 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
3851 }
3852 if ((i.types[op].bitfield.imm32)
3853 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
3854 == 0))
3855 {
3856 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
3857 ^ ((offsetT) 1 << 31))
3858 - ((offsetT) 1 << 31));
3859 }
3860 i.types[op]
3861 = operand_type_or (i.types[op],
3862 smallest_imm_type (i.op[op].imms->X_add_number));
3863
3864 /* We must avoid matching of Imm32 templates when 64bit
3865 only immediate is available. */
3866 if (guess_suffix == QWORD_MNEM_SUFFIX)
3867 i.types[op].bitfield.imm32 = 0;
3868 break;
3869
3870 case O_absent:
3871 case O_register:
3872 abort ();
3873
3874 /* Symbols and expressions. */
3875 default:
3876 /* Convert symbolic operand to proper sizes for matching, but don't
3877 prevent matching a set of insns that only supports sizes other
3878 than those matching the insn suffix. */
3879 {
3880 i386_operand_type mask, allowed;
3881 const insn_template *t;
3882
3883 operand_type_set (&mask, 0);
3884 operand_type_set (&allowed, 0);
3885
3886 for (t = current_templates->start;
3887 t < current_templates->end;
3888 ++t)
3889 allowed = operand_type_or (allowed,
3890 t->operand_types[op]);
3891 switch (guess_suffix)
3892 {
3893 case QWORD_MNEM_SUFFIX:
3894 mask.bitfield.imm64 = 1;
3895 mask.bitfield.imm32s = 1;
3896 break;
3897 case LONG_MNEM_SUFFIX:
3898 mask.bitfield.imm32 = 1;
3899 break;
3900 case WORD_MNEM_SUFFIX:
3901 mask.bitfield.imm16 = 1;
3902 break;
3903 case BYTE_MNEM_SUFFIX:
3904 mask.bitfield.imm8 = 1;
3905 break;
3906 default:
3907 break;
3908 }
3909 allowed = operand_type_and (mask, allowed);
3910 if (!operand_type_all_zero (&allowed))
3911 i.types[op] = operand_type_and (i.types[op], mask);
3912 }
3913 break;
3914 }
3915 }
3916 }
3917
3918 /* Try to use the smallest displacement type too. */
3919 static void
3920 optimize_disp (void)
3921 {
3922 int op;
3923
3924 for (op = i.operands; --op >= 0;)
3925 if (operand_type_check (i.types[op], disp))
3926 {
3927 if (i.op[op].disps->X_op == O_constant)
3928 {
3929 offsetT op_disp = i.op[op].disps->X_add_number;
3930
3931 if (i.types[op].bitfield.disp16
3932 && (op_disp & ~(offsetT) 0xffff) == 0)
3933 {
3934 /* If this operand is at most 16 bits, convert
3935 to a signed 16 bit number and don't use 64bit
3936 displacement. */
3937 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
3938 i.types[op].bitfield.disp64 = 0;
3939 }
3940 if (i.types[op].bitfield.disp32
3941 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
3942 {
3943 /* If this operand is at most 32 bits, convert
3944 to a signed 32 bit number and don't use 64bit
3945 displacement. */
3946 op_disp &= (((offsetT) 2 << 31) - 1);
3947 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
3948 i.types[op].bitfield.disp64 = 0;
3949 }
3950 if (!op_disp && i.types[op].bitfield.baseindex)
3951 {
3952 i.types[op].bitfield.disp8 = 0;
3953 i.types[op].bitfield.disp16 = 0;
3954 i.types[op].bitfield.disp32 = 0;
3955 i.types[op].bitfield.disp32s = 0;
3956 i.types[op].bitfield.disp64 = 0;
3957 i.op[op].disps = 0;
3958 i.disp_operands--;
3959 }
3960 else if (flag_code == CODE_64BIT)
3961 {
3962 if (fits_in_signed_long (op_disp))
3963 {
3964 i.types[op].bitfield.disp64 = 0;
3965 i.types[op].bitfield.disp32s = 1;
3966 }
3967 if (i.prefix[ADDR_PREFIX]
3968 && fits_in_unsigned_long (op_disp))
3969 i.types[op].bitfield.disp32 = 1;
3970 }
3971 if ((i.types[op].bitfield.disp32
3972 || i.types[op].bitfield.disp32s
3973 || i.types[op].bitfield.disp16)
3974 && fits_in_signed_byte (op_disp))
3975 i.types[op].bitfield.disp8 = 1;
3976 }
3977 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
3978 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
3979 {
3980 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
3981 i.op[op].disps, 0, i.reloc[op]);
3982 i.types[op].bitfield.disp8 = 0;
3983 i.types[op].bitfield.disp16 = 0;
3984 i.types[op].bitfield.disp32 = 0;
3985 i.types[op].bitfield.disp32s = 0;
3986 i.types[op].bitfield.disp64 = 0;
3987 }
3988 else
3989 /* We only support 64bit displacement on constants. */
3990 i.types[op].bitfield.disp64 = 0;
3991 }
3992 }
3993
3994 /* Check if operands are valid for the instruction. */
3995
3996 static int
3997 check_VecOperands (const insn_template *t)
3998 {
3999 /* Without VSIB byte, we can't have a vector register for index. */
4000 if (!t->opcode_modifier.vecsib
4001 && i.index_reg
4002 && (i.index_reg->reg_type.bitfield.regxmm
4003 || i.index_reg->reg_type.bitfield.regymm))
4004 {
4005 i.error = unsupported_vector_index_register;
4006 return 1;
4007 }
4008
4009 /* For VSIB byte, we need a vector register for index, and all vector
4010 registers must be distinct. */
4011 if (t->opcode_modifier.vecsib)
4012 {
4013 if (!i.index_reg
4014 || !((t->opcode_modifier.vecsib == VecSIB128
4015 && i.index_reg->reg_type.bitfield.regxmm)
4016 || (t->opcode_modifier.vecsib == VecSIB256
4017 && i.index_reg->reg_type.bitfield.regymm)))
4018 {
4019 i.error = invalid_vsib_address;
4020 return 1;
4021 }
4022
4023 gas_assert (i.reg_operands == 2);
4024 gas_assert (i.types[0].bitfield.regxmm
4025 || i.types[0].bitfield.regymm);
4026 gas_assert (i.types[2].bitfield.regxmm
4027 || i.types[2].bitfield.regymm);
4028
4029 if (operand_check == check_none)
4030 return 0;
4031 if (register_number (i.op[0].regs) != register_number (i.index_reg)
4032 && register_number (i.op[2].regs) != register_number (i.index_reg)
4033 && register_number (i.op[0].regs) != register_number (i.op[2].regs))
4034 return 0;
4035 if (operand_check == check_error)
4036 {
4037 i.error = invalid_vector_register_set;
4038 return 1;
4039 }
4040 as_warn (_("mask, index, and destination registers should be distinct"));
4041 }
4042
4043 return 0;
4044 }
4045
4046 /* Check if operands are valid for the instruction. Update VEX
4047 operand types. */
4048
4049 static int
4050 VEX_check_operands (const insn_template *t)
4051 {
4052 if (!t->opcode_modifier.vex)
4053 return 0;
4054
4055 /* Only check VEX_Imm4, which must be the first operand. */
4056 if (t->operand_types[0].bitfield.vec_imm4)
4057 {
4058 if (i.op[0].imms->X_op != O_constant
4059 || !fits_in_imm4 (i.op[0].imms->X_add_number))
4060 {
4061 i.error = bad_imm4;
4062 return 1;
4063 }
4064
4065 /* Turn off Imm8 so that update_imm won't complain. */
4066 i.types[0] = vec_imm4;
4067 }
4068
4069 return 0;
4070 }
4071
4072 static const insn_template *
4073 match_template (void)
4074 {
4075 /* Points to template once we've found it. */
4076 const insn_template *t;
4077 i386_operand_type overlap0, overlap1, overlap2, overlap3;
4078 i386_operand_type overlap4;
4079 unsigned int found_reverse_match;
4080 i386_opcode_modifier suffix_check;
4081 i386_operand_type operand_types [MAX_OPERANDS];
4082 int addr_prefix_disp;
4083 unsigned int j;
4084 unsigned int found_cpu_match;
4085 unsigned int check_register;
4086 enum i386_error specific_error = 0;
4087
4088 #if MAX_OPERANDS != 5
4089 # error "MAX_OPERANDS must be 5."
4090 #endif
4091
4092 found_reverse_match = 0;
4093 addr_prefix_disp = -1;
4094
4095 memset (&suffix_check, 0, sizeof (suffix_check));
4096 if (i.suffix == BYTE_MNEM_SUFFIX)
4097 suffix_check.no_bsuf = 1;
4098 else if (i.suffix == WORD_MNEM_SUFFIX)
4099 suffix_check.no_wsuf = 1;
4100 else if (i.suffix == SHORT_MNEM_SUFFIX)
4101 suffix_check.no_ssuf = 1;
4102 else if (i.suffix == LONG_MNEM_SUFFIX)
4103 suffix_check.no_lsuf = 1;
4104 else if (i.suffix == QWORD_MNEM_SUFFIX)
4105 suffix_check.no_qsuf = 1;
4106 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
4107 suffix_check.no_ldsuf = 1;
4108
4109 /* Must have right number of operands. */
4110 i.error = number_of_operands_mismatch;
4111
4112 for (t = current_templates->start; t < current_templates->end; t++)
4113 {
4114 addr_prefix_disp = -1;
4115
4116 if (i.operands != t->operands)
4117 continue;
4118
4119 /* Check processor support. */
4120 i.error = unsupported;
4121 found_cpu_match = (cpu_flags_match (t)
4122 == CPU_FLAGS_PERFECT_MATCH);
4123 if (!found_cpu_match)
4124 continue;
4125
4126 /* Check old gcc support. */
4127 i.error = old_gcc_only;
4128 if (!old_gcc && t->opcode_modifier.oldgcc)
4129 continue;
4130
4131 /* Check AT&T mnemonic. */
4132 i.error = unsupported_with_intel_mnemonic;
4133 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
4134 continue;
4135
4136 /* Check AT&T/Intel syntax. */
4137 i.error = unsupported_syntax;
4138 if ((intel_syntax && t->opcode_modifier.attsyntax)
4139 || (!intel_syntax && t->opcode_modifier.intelsyntax))
4140 continue;
4141
4142 /* Check the suffix, except for some instructions in intel mode. */
4143 i.error = invalid_instruction_suffix;
4144 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
4145 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
4146 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
4147 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
4148 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
4149 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
4150 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
4151 continue;
4152
4153 if (!operand_size_match (t))
4154 continue;
4155
4156 for (j = 0; j < MAX_OPERANDS; j++)
4157 operand_types[j] = t->operand_types[j];
4158
4159 /* In general, don't allow 64-bit operands in 32-bit mode. */
4160 if (i.suffix == QWORD_MNEM_SUFFIX
4161 && flag_code != CODE_64BIT
4162 && (intel_syntax
4163 ? (!t->opcode_modifier.ignoresize
4164 && !intel_float_operand (t->name))
4165 : intel_float_operand (t->name) != 2)
4166 && ((!operand_types[0].bitfield.regmmx
4167 && !operand_types[0].bitfield.regxmm
4168 && !operand_types[0].bitfield.regymm)
4169 || (!operand_types[t->operands > 1].bitfield.regmmx
4170 && !!operand_types[t->operands > 1].bitfield.regxmm
4171 && !!operand_types[t->operands > 1].bitfield.regymm))
4172 && (t->base_opcode != 0x0fc7
4173 || t->extension_opcode != 1 /* cmpxchg8b */))
4174 continue;
4175
4176 /* In general, don't allow 32-bit operands on pre-386. */
4177 else if (i.suffix == LONG_MNEM_SUFFIX
4178 && !cpu_arch_flags.bitfield.cpui386
4179 && (intel_syntax
4180 ? (!t->opcode_modifier.ignoresize
4181 && !intel_float_operand (t->name))
4182 : intel_float_operand (t->name) != 2)
4183 && ((!operand_types[0].bitfield.regmmx
4184 && !operand_types[0].bitfield.regxmm)
4185 || (!operand_types[t->operands > 1].bitfield.regmmx
4186 && !!operand_types[t->operands > 1].bitfield.regxmm)))
4187 continue;
4188
4189 /* Do not verify operands when there are none. */
4190 else
4191 {
4192 if (!t->operands)
4193 /* We've found a match; break out of loop. */
4194 break;
4195 }
4196
4197 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
4198 into Disp32/Disp16/Disp32 operand. */
4199 if (i.prefix[ADDR_PREFIX] != 0)
4200 {
4201 /* There should be only one Disp operand. */
4202 switch (flag_code)
4203 {
4204 case CODE_16BIT:
4205 for (j = 0; j < MAX_OPERANDS; j++)
4206 {
4207 if (operand_types[j].bitfield.disp16)
4208 {
4209 addr_prefix_disp = j;
4210 operand_types[j].bitfield.disp32 = 1;
4211 operand_types[j].bitfield.disp16 = 0;
4212 break;
4213 }
4214 }
4215 break;
4216 case CODE_32BIT:
4217 for (j = 0; j < MAX_OPERANDS; j++)
4218 {
4219 if (operand_types[j].bitfield.disp32)
4220 {
4221 addr_prefix_disp = j;
4222 operand_types[j].bitfield.disp32 = 0;
4223 operand_types[j].bitfield.disp16 = 1;
4224 break;
4225 }
4226 }
4227 break;
4228 case CODE_64BIT:
4229 for (j = 0; j < MAX_OPERANDS; j++)
4230 {
4231 if (operand_types[j].bitfield.disp64)
4232 {
4233 addr_prefix_disp = j;
4234 operand_types[j].bitfield.disp64 = 0;
4235 operand_types[j].bitfield.disp32 = 1;
4236 break;
4237 }
4238 }
4239 break;
4240 }
4241 }
4242
4243 /* We check register size if needed. */
4244 check_register = t->opcode_modifier.checkregsize;
4245 overlap0 = operand_type_and (i.types[0], operand_types[0]);
4246 switch (t->operands)
4247 {
4248 case 1:
4249 if (!operand_type_match (overlap0, i.types[0]))
4250 continue;
4251 break;
4252 case 2:
4253 /* xchg %eax, %eax is a special case. It is an aliase for nop
4254 only in 32bit mode and we can use opcode 0x90. In 64bit
4255 mode, we can't use 0x90 for xchg %eax, %eax since it should
4256 zero-extend %eax to %rax. */
4257 if (flag_code == CODE_64BIT
4258 && t->base_opcode == 0x90
4259 && operand_type_equal (&i.types [0], &acc32)
4260 && operand_type_equal (&i.types [1], &acc32))
4261 continue;
4262 if (i.swap_operand)
4263 {
4264 /* If we swap operand in encoding, we either match
4265 the next one or reverse direction of operands. */
4266 if (t->opcode_modifier.s)
4267 continue;
4268 else if (t->opcode_modifier.d)
4269 goto check_reverse;
4270 }
4271
4272 case 3:
4273 /* If we swap operand in encoding, we match the next one. */
4274 if (i.swap_operand && t->opcode_modifier.s)
4275 continue;
4276 case 4:
4277 case 5:
4278 overlap1 = operand_type_and (i.types[1], operand_types[1]);
4279 if (!operand_type_match (overlap0, i.types[0])
4280 || !operand_type_match (overlap1, i.types[1])
4281 || (check_register
4282 && !operand_type_register_match (overlap0, i.types[0],
4283 operand_types[0],
4284 overlap1, i.types[1],
4285 operand_types[1])))
4286 {
4287 /* Check if other direction is valid ... */
4288 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
4289 continue;
4290
4291 check_reverse:
4292 /* Try reversing direction of operands. */
4293 overlap0 = operand_type_and (i.types[0], operand_types[1]);
4294 overlap1 = operand_type_and (i.types[1], operand_types[0]);
4295 if (!operand_type_match (overlap0, i.types[0])
4296 || !operand_type_match (overlap1, i.types[1])
4297 || (check_register
4298 && !operand_type_register_match (overlap0,
4299 i.types[0],
4300 operand_types[1],
4301 overlap1,
4302 i.types[1],
4303 operand_types[0])))
4304 {
4305 /* Does not match either direction. */
4306 continue;
4307 }
4308 /* found_reverse_match holds which of D or FloatDR
4309 we've found. */
4310 if (t->opcode_modifier.d)
4311 found_reverse_match = Opcode_D;
4312 else if (t->opcode_modifier.floatd)
4313 found_reverse_match = Opcode_FloatD;
4314 else
4315 found_reverse_match = 0;
4316 if (t->opcode_modifier.floatr)
4317 found_reverse_match |= Opcode_FloatR;
4318 }
4319 else
4320 {
4321 /* Found a forward 2 operand match here. */
4322 switch (t->operands)
4323 {
4324 case 5:
4325 overlap4 = operand_type_and (i.types[4],
4326 operand_types[4]);
4327 case 4:
4328 overlap3 = operand_type_and (i.types[3],
4329 operand_types[3]);
4330 case 3:
4331 overlap2 = operand_type_and (i.types[2],
4332 operand_types[2]);
4333 break;
4334 }
4335
4336 switch (t->operands)
4337 {
4338 case 5:
4339 if (!operand_type_match (overlap4, i.types[4])
4340 || !operand_type_register_match (overlap3,
4341 i.types[3],
4342 operand_types[3],
4343 overlap4,
4344 i.types[4],
4345 operand_types[4]))
4346 continue;
4347 case 4:
4348 if (!operand_type_match (overlap3, i.types[3])
4349 || (check_register
4350 && !operand_type_register_match (overlap2,
4351 i.types[2],
4352 operand_types[2],
4353 overlap3,
4354 i.types[3],
4355 operand_types[3])))
4356 continue;
4357 case 3:
4358 /* Here we make use of the fact that there are no
4359 reverse match 3 operand instructions, and all 3
4360 operand instructions only need to be checked for
4361 register consistency between operands 2 and 3. */
4362 if (!operand_type_match (overlap2, i.types[2])
4363 || (check_register
4364 && !operand_type_register_match (overlap1,
4365 i.types[1],
4366 operand_types[1],
4367 overlap2,
4368 i.types[2],
4369 operand_types[2])))
4370 continue;
4371 break;
4372 }
4373 }
4374 /* Found either forward/reverse 2, 3 or 4 operand match here:
4375 slip through to break. */
4376 }
4377 if (!found_cpu_match)
4378 {
4379 found_reverse_match = 0;
4380 continue;
4381 }
4382
4383 /* Check if vector and VEX operands are valid. */
4384 if (check_VecOperands (t) || VEX_check_operands (t))
4385 {
4386 specific_error = i.error;
4387 continue;
4388 }
4389
4390 /* We've found a match; break out of loop. */
4391 break;
4392 }
4393
4394 if (t == current_templates->end)
4395 {
4396 /* We found no match. */
4397 const char *err_msg;
4398 switch (specific_error ? specific_error : i.error)
4399 {
4400 default:
4401 abort ();
4402 case operand_size_mismatch:
4403 err_msg = _("operand size mismatch");
4404 break;
4405 case operand_type_mismatch:
4406 err_msg = _("operand type mismatch");
4407 break;
4408 case register_type_mismatch:
4409 err_msg = _("register type mismatch");
4410 break;
4411 case number_of_operands_mismatch:
4412 err_msg = _("number of operands mismatch");
4413 break;
4414 case invalid_instruction_suffix:
4415 err_msg = _("invalid instruction suffix");
4416 break;
4417 case bad_imm4:
4418 err_msg = _("constant doesn't fit in 4 bits");
4419 break;
4420 case old_gcc_only:
4421 err_msg = _("only supported with old gcc");
4422 break;
4423 case unsupported_with_intel_mnemonic:
4424 err_msg = _("unsupported with Intel mnemonic");
4425 break;
4426 case unsupported_syntax:
4427 err_msg = _("unsupported syntax");
4428 break;
4429 case unsupported:
4430 as_bad (_("unsupported instruction `%s'"),
4431 current_templates->start->name);
4432 return NULL;
4433 case invalid_vsib_address:
4434 err_msg = _("invalid VSIB address");
4435 break;
4436 case invalid_vector_register_set:
4437 err_msg = _("mask, index, and destination registers must be distinct");
4438 break;
4439 case unsupported_vector_index_register:
4440 err_msg = _("unsupported vector index register");
4441 break;
4442 }
4443 as_bad (_("%s for `%s'"), err_msg,
4444 current_templates->start->name);
4445 return NULL;
4446 }
4447
4448 if (!quiet_warnings)
4449 {
4450 if (!intel_syntax
4451 && (i.types[0].bitfield.jumpabsolute
4452 != operand_types[0].bitfield.jumpabsolute))
4453 {
4454 as_warn (_("indirect %s without `*'"), t->name);
4455 }
4456
4457 if (t->opcode_modifier.isprefix
4458 && t->opcode_modifier.ignoresize)
4459 {
4460 /* Warn them that a data or address size prefix doesn't
4461 affect assembly of the next line of code. */
4462 as_warn (_("stand-alone `%s' prefix"), t->name);
4463 }
4464 }
4465
4466 /* Copy the template we found. */
4467 i.tm = *t;
4468
4469 if (addr_prefix_disp != -1)
4470 i.tm.operand_types[addr_prefix_disp]
4471 = operand_types[addr_prefix_disp];
4472
4473 if (found_reverse_match)
4474 {
4475 /* If we found a reverse match we must alter the opcode
4476 direction bit. found_reverse_match holds bits to change
4477 (different for int & float insns). */
4478
4479 i.tm.base_opcode ^= found_reverse_match;
4480
4481 i.tm.operand_types[0] = operand_types[1];
4482 i.tm.operand_types[1] = operand_types[0];
4483 }
4484
4485 return t;
4486 }
4487
4488 static int
4489 check_string (void)
4490 {
4491 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
4492 if (i.tm.operand_types[mem_op].bitfield.esseg)
4493 {
4494 if (i.seg[0] != NULL && i.seg[0] != &es)
4495 {
4496 as_bad (_("`%s' operand %d must use `%ses' segment"),
4497 i.tm.name,
4498 mem_op + 1,
4499 register_prefix);
4500 return 0;
4501 }
4502 /* There's only ever one segment override allowed per instruction.
4503 This instruction possibly has a legal segment override on the
4504 second operand, so copy the segment to where non-string
4505 instructions store it, allowing common code. */
4506 i.seg[0] = i.seg[1];
4507 }
4508 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
4509 {
4510 if (i.seg[1] != NULL && i.seg[1] != &es)
4511 {
4512 as_bad (_("`%s' operand %d must use `%ses' segment"),
4513 i.tm.name,
4514 mem_op + 2,
4515 register_prefix);
4516 return 0;
4517 }
4518 }
4519 return 1;
4520 }
4521
4522 static int
4523 process_suffix (void)
4524 {
4525 /* If matched instruction specifies an explicit instruction mnemonic
4526 suffix, use it. */
4527 if (i.tm.opcode_modifier.size16)
4528 i.suffix = WORD_MNEM_SUFFIX;
4529 else if (i.tm.opcode_modifier.size32)
4530 i.suffix = LONG_MNEM_SUFFIX;
4531 else if (i.tm.opcode_modifier.size64)
4532 i.suffix = QWORD_MNEM_SUFFIX;
4533 else if (i.reg_operands)
4534 {
4535 /* If there's no instruction mnemonic suffix we try to invent one
4536 based on register operands. */
4537 if (!i.suffix)
4538 {
4539 /* We take i.suffix from the last register operand specified,
4540 Destination register type is more significant than source
4541 register type. crc32 in SSE4.2 prefers source register
4542 type. */
4543 if (i.tm.base_opcode == 0xf20f38f1)
4544 {
4545 if (i.types[0].bitfield.reg16)
4546 i.suffix = WORD_MNEM_SUFFIX;
4547 else if (i.types[0].bitfield.reg32)
4548 i.suffix = LONG_MNEM_SUFFIX;
4549 else if (i.types[0].bitfield.reg64)
4550 i.suffix = QWORD_MNEM_SUFFIX;
4551 }
4552 else if (i.tm.base_opcode == 0xf20f38f0)
4553 {
4554 if (i.types[0].bitfield.reg8)
4555 i.suffix = BYTE_MNEM_SUFFIX;
4556 }
4557
4558 if (!i.suffix)
4559 {
4560 int op;
4561
4562 if (i.tm.base_opcode == 0xf20f38f1
4563 || i.tm.base_opcode == 0xf20f38f0)
4564 {
4565 /* We have to know the operand size for crc32. */
4566 as_bad (_("ambiguous memory operand size for `%s`"),
4567 i.tm.name);
4568 return 0;
4569 }
4570
4571 for (op = i.operands; --op >= 0;)
4572 if (!i.tm.operand_types[op].bitfield.inoutportreg)
4573 {
4574 if (i.types[op].bitfield.reg8)
4575 {
4576 i.suffix = BYTE_MNEM_SUFFIX;
4577 break;
4578 }
4579 else if (i.types[op].bitfield.reg16)
4580 {
4581 i.suffix = WORD_MNEM_SUFFIX;
4582 break;
4583 }
4584 else if (i.types[op].bitfield.reg32)
4585 {
4586 i.suffix = LONG_MNEM_SUFFIX;
4587 break;
4588 }
4589 else if (i.types[op].bitfield.reg64)
4590 {
4591 i.suffix = QWORD_MNEM_SUFFIX;
4592 break;
4593 }
4594 }
4595 }
4596 }
4597 else if (i.suffix == BYTE_MNEM_SUFFIX)
4598 {
4599 if (intel_syntax
4600 && i.tm.opcode_modifier.ignoresize
4601 && i.tm.opcode_modifier.no_bsuf)
4602 i.suffix = 0;
4603 else if (!check_byte_reg ())
4604 return 0;
4605 }
4606 else if (i.suffix == LONG_MNEM_SUFFIX)
4607 {
4608 if (intel_syntax
4609 && i.tm.opcode_modifier.ignoresize
4610 && i.tm.opcode_modifier.no_lsuf)
4611 i.suffix = 0;
4612 else if (!check_long_reg ())
4613 return 0;
4614 }
4615 else if (i.suffix == QWORD_MNEM_SUFFIX)
4616 {
4617 if (intel_syntax
4618 && i.tm.opcode_modifier.ignoresize
4619 && i.tm.opcode_modifier.no_qsuf)
4620 i.suffix = 0;
4621 else if (!check_qword_reg ())
4622 return 0;
4623 }
4624 else if (i.suffix == WORD_MNEM_SUFFIX)
4625 {
4626 if (intel_syntax
4627 && i.tm.opcode_modifier.ignoresize
4628 && i.tm.opcode_modifier.no_wsuf)
4629 i.suffix = 0;
4630 else if (!check_word_reg ())
4631 return 0;
4632 }
4633 else if (i.suffix == XMMWORD_MNEM_SUFFIX
4634 || i.suffix == YMMWORD_MNEM_SUFFIX)
4635 {
4636 /* Skip if the instruction has x/y suffix. match_template
4637 should check if it is a valid suffix. */
4638 }
4639 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
4640 /* Do nothing if the instruction is going to ignore the prefix. */
4641 ;
4642 else
4643 abort ();
4644 }
4645 else if (i.tm.opcode_modifier.defaultsize
4646 && !i.suffix
4647 /* exclude fldenv/frstor/fsave/fstenv */
4648 && i.tm.opcode_modifier.no_ssuf)
4649 {
4650 i.suffix = stackop_size;
4651 }
4652 else if (intel_syntax
4653 && !i.suffix
4654 && (i.tm.operand_types[0].bitfield.jumpabsolute
4655 || i.tm.opcode_modifier.jumpbyte
4656 || i.tm.opcode_modifier.jumpintersegment
4657 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
4658 && i.tm.extension_opcode <= 3)))
4659 {
4660 switch (flag_code)
4661 {
4662 case CODE_64BIT:
4663 if (!i.tm.opcode_modifier.no_qsuf)
4664 {
4665 i.suffix = QWORD_MNEM_SUFFIX;
4666 break;
4667 }
4668 case CODE_32BIT:
4669 if (!i.tm.opcode_modifier.no_lsuf)
4670 i.suffix = LONG_MNEM_SUFFIX;
4671 break;
4672 case CODE_16BIT:
4673 if (!i.tm.opcode_modifier.no_wsuf)
4674 i.suffix = WORD_MNEM_SUFFIX;
4675 break;
4676 }
4677 }
4678
4679 if (!i.suffix)
4680 {
4681 if (!intel_syntax)
4682 {
4683 if (i.tm.opcode_modifier.w)
4684 {
4685 as_bad (_("no instruction mnemonic suffix given and "
4686 "no register operands; can't size instruction"));
4687 return 0;
4688 }
4689 }
4690 else
4691 {
4692 unsigned int suffixes;
4693
4694 suffixes = !i.tm.opcode_modifier.no_bsuf;
4695 if (!i.tm.opcode_modifier.no_wsuf)
4696 suffixes |= 1 << 1;
4697 if (!i.tm.opcode_modifier.no_lsuf)
4698 suffixes |= 1 << 2;
4699 if (!i.tm.opcode_modifier.no_ldsuf)
4700 suffixes |= 1 << 3;
4701 if (!i.tm.opcode_modifier.no_ssuf)
4702 suffixes |= 1 << 4;
4703 if (!i.tm.opcode_modifier.no_qsuf)
4704 suffixes |= 1 << 5;
4705
4706 /* There are more than suffix matches. */
4707 if (i.tm.opcode_modifier.w
4708 || ((suffixes & (suffixes - 1))
4709 && !i.tm.opcode_modifier.defaultsize
4710 && !i.tm.opcode_modifier.ignoresize))
4711 {
4712 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
4713 return 0;
4714 }
4715 }
4716 }
4717
4718 /* Change the opcode based on the operand size given by i.suffix;
4719 We don't need to change things for byte insns. */
4720
4721 if (i.suffix
4722 && i.suffix != BYTE_MNEM_SUFFIX
4723 && i.suffix != XMMWORD_MNEM_SUFFIX
4724 && i.suffix != YMMWORD_MNEM_SUFFIX)
4725 {
4726 /* It's not a byte, select word/dword operation. */
4727 if (i.tm.opcode_modifier.w)
4728 {
4729 if (i.tm.opcode_modifier.shortform)
4730 i.tm.base_opcode |= 8;
4731 else
4732 i.tm.base_opcode |= 1;
4733 }
4734
4735 /* Now select between word & dword operations via the operand
4736 size prefix, except for instructions that will ignore this
4737 prefix anyway. */
4738 if (i.tm.opcode_modifier.addrprefixop0)
4739 {
4740 /* The address size override prefix changes the size of the
4741 first operand. */
4742 if ((flag_code == CODE_32BIT
4743 && i.op->regs[0].reg_type.bitfield.reg16)
4744 || (flag_code != CODE_32BIT
4745 && i.op->regs[0].reg_type.bitfield.reg32))
4746 if (!add_prefix (ADDR_PREFIX_OPCODE))
4747 return 0;
4748 }
4749 else if (i.suffix != QWORD_MNEM_SUFFIX
4750 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
4751 && !i.tm.opcode_modifier.ignoresize
4752 && !i.tm.opcode_modifier.floatmf
4753 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
4754 || (flag_code == CODE_64BIT
4755 && i.tm.opcode_modifier.jumpbyte)))
4756 {
4757 unsigned int prefix = DATA_PREFIX_OPCODE;
4758
4759 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
4760 prefix = ADDR_PREFIX_OPCODE;
4761
4762 if (!add_prefix (prefix))
4763 return 0;
4764 }
4765
4766 /* Set mode64 for an operand. */
4767 if (i.suffix == QWORD_MNEM_SUFFIX
4768 && flag_code == CODE_64BIT
4769 && !i.tm.opcode_modifier.norex64)
4770 {
4771 /* Special case for xchg %rax,%rax. It is NOP and doesn't
4772 need rex64. cmpxchg8b is also a special case. */
4773 if (! (i.operands == 2
4774 && i.tm.base_opcode == 0x90
4775 && i.tm.extension_opcode == None
4776 && operand_type_equal (&i.types [0], &acc64)
4777 && operand_type_equal (&i.types [1], &acc64))
4778 && ! (i.operands == 1
4779 && i.tm.base_opcode == 0xfc7
4780 && i.tm.extension_opcode == 1
4781 && !operand_type_check (i.types [0], reg)
4782 && operand_type_check (i.types [0], anymem)))
4783 i.rex |= REX_W;
4784 }
4785
4786 /* Size floating point instruction. */
4787 if (i.suffix == LONG_MNEM_SUFFIX)
4788 if (i.tm.opcode_modifier.floatmf)
4789 i.tm.base_opcode ^= 4;
4790 }
4791
4792 return 1;
4793 }
4794
4795 static int
4796 check_byte_reg (void)
4797 {
4798 int op;
4799
4800 for (op = i.operands; --op >= 0;)
4801 {
4802 /* If this is an eight bit register, it's OK. If it's the 16 or
4803 32 bit version of an eight bit register, we will just use the
4804 low portion, and that's OK too. */
4805 if (i.types[op].bitfield.reg8)
4806 continue;
4807
4808 /* I/O port address operands are OK too. */
4809 if (i.tm.operand_types[op].bitfield.inoutportreg)
4810 continue;
4811
4812 /* crc32 doesn't generate this warning. */
4813 if (i.tm.base_opcode == 0xf20f38f0)
4814 continue;
4815
4816 if ((i.types[op].bitfield.reg16
4817 || i.types[op].bitfield.reg32
4818 || i.types[op].bitfield.reg64)
4819 && i.op[op].regs->reg_num < 4
4820 /* Prohibit these changes in 64bit mode, since the lowering
4821 would be more complicated. */
4822 && flag_code != CODE_64BIT)
4823 {
4824 #if REGISTER_WARNINGS
4825 if (!quiet_warnings)
4826 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4827 register_prefix,
4828 (i.op[op].regs + (i.types[op].bitfield.reg16
4829 ? REGNAM_AL - REGNAM_AX
4830 : REGNAM_AL - REGNAM_EAX))->reg_name,
4831 register_prefix,
4832 i.op[op].regs->reg_name,
4833 i.suffix);
4834 #endif
4835 continue;
4836 }
4837 /* Any other register is bad. */
4838 if (i.types[op].bitfield.reg16
4839 || i.types[op].bitfield.reg32
4840 || i.types[op].bitfield.reg64
4841 || i.types[op].bitfield.regmmx
4842 || i.types[op].bitfield.regxmm
4843 || i.types[op].bitfield.regymm
4844 || i.types[op].bitfield.sreg2
4845 || i.types[op].bitfield.sreg3
4846 || i.types[op].bitfield.control
4847 || i.types[op].bitfield.debug
4848 || i.types[op].bitfield.test
4849 || i.types[op].bitfield.floatreg
4850 || i.types[op].bitfield.floatacc)
4851 {
4852 as_bad (_("`%s%s' not allowed with `%s%c'"),
4853 register_prefix,
4854 i.op[op].regs->reg_name,
4855 i.tm.name,
4856 i.suffix);
4857 return 0;
4858 }
4859 }
4860 return 1;
4861 }
4862
4863 static int
4864 check_long_reg (void)
4865 {
4866 int op;
4867
4868 for (op = i.operands; --op >= 0;)
4869 /* Reject eight bit registers, except where the template requires
4870 them. (eg. movzb) */
4871 if (i.types[op].bitfield.reg8
4872 && (i.tm.operand_types[op].bitfield.reg16
4873 || i.tm.operand_types[op].bitfield.reg32
4874 || i.tm.operand_types[op].bitfield.acc))
4875 {
4876 as_bad (_("`%s%s' not allowed with `%s%c'"),
4877 register_prefix,
4878 i.op[op].regs->reg_name,
4879 i.tm.name,
4880 i.suffix);
4881 return 0;
4882 }
4883 /* Warn if the e prefix on a general reg is missing. */
4884 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4885 && i.types[op].bitfield.reg16
4886 && (i.tm.operand_types[op].bitfield.reg32
4887 || i.tm.operand_types[op].bitfield.acc))
4888 {
4889 /* Prohibit these changes in the 64bit mode, since the
4890 lowering is more complicated. */
4891 if (flag_code == CODE_64BIT)
4892 {
4893 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4894 register_prefix, i.op[op].regs->reg_name,
4895 i.suffix);
4896 return 0;
4897 }
4898 #if REGISTER_WARNINGS
4899 else
4900 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4901 register_prefix,
4902 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
4903 register_prefix,
4904 i.op[op].regs->reg_name,
4905 i.suffix);
4906 #endif
4907 }
4908 /* Warn if the r prefix on a general reg is missing. */
4909 else if (i.types[op].bitfield.reg64
4910 && (i.tm.operand_types[op].bitfield.reg32
4911 || i.tm.operand_types[op].bitfield.acc))
4912 {
4913 if (intel_syntax
4914 && i.tm.opcode_modifier.toqword
4915 && !i.types[0].bitfield.regxmm)
4916 {
4917 /* Convert to QWORD. We want REX byte. */
4918 i.suffix = QWORD_MNEM_SUFFIX;
4919 }
4920 else
4921 {
4922 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4923 register_prefix, i.op[op].regs->reg_name,
4924 i.suffix);
4925 return 0;
4926 }
4927 }
4928 return 1;
4929 }
4930
4931 static int
4932 check_qword_reg (void)
4933 {
4934 int op;
4935
4936 for (op = i.operands; --op >= 0; )
4937 /* Reject eight bit registers, except where the template requires
4938 them. (eg. movzb) */
4939 if (i.types[op].bitfield.reg8
4940 && (i.tm.operand_types[op].bitfield.reg16
4941 || i.tm.operand_types[op].bitfield.reg32
4942 || i.tm.operand_types[op].bitfield.acc))
4943 {
4944 as_bad (_("`%s%s' not allowed with `%s%c'"),
4945 register_prefix,
4946 i.op[op].regs->reg_name,
4947 i.tm.name,
4948 i.suffix);
4949 return 0;
4950 }
4951 /* Warn if the e prefix on a general reg is missing. */
4952 else if ((i.types[op].bitfield.reg16
4953 || i.types[op].bitfield.reg32)
4954 && (i.tm.operand_types[op].bitfield.reg32
4955 || i.tm.operand_types[op].bitfield.acc))
4956 {
4957 /* Prohibit these changes in the 64bit mode, since the
4958 lowering is more complicated. */
4959 if (intel_syntax
4960 && i.tm.opcode_modifier.todword
4961 && !i.types[0].bitfield.regxmm)
4962 {
4963 /* Convert to DWORD. We don't want REX byte. */
4964 i.suffix = LONG_MNEM_SUFFIX;
4965 }
4966 else
4967 {
4968 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4969 register_prefix, i.op[op].regs->reg_name,
4970 i.suffix);
4971 return 0;
4972 }
4973 }
4974 return 1;
4975 }
4976
4977 static int
4978 check_word_reg (void)
4979 {
4980 int op;
4981 for (op = i.operands; --op >= 0;)
4982 /* Reject eight bit registers, except where the template requires
4983 them. (eg. movzb) */
4984 if (i.types[op].bitfield.reg8
4985 && (i.tm.operand_types[op].bitfield.reg16
4986 || i.tm.operand_types[op].bitfield.reg32
4987 || i.tm.operand_types[op].bitfield.acc))
4988 {
4989 as_bad (_("`%s%s' not allowed with `%s%c'"),
4990 register_prefix,
4991 i.op[op].regs->reg_name,
4992 i.tm.name,
4993 i.suffix);
4994 return 0;
4995 }
4996 /* Warn if the e prefix on a general reg is present. */
4997 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4998 && i.types[op].bitfield.reg32
4999 && (i.tm.operand_types[op].bitfield.reg16
5000 || i.tm.operand_types[op].bitfield.acc))
5001 {
5002 /* Prohibit these changes in the 64bit mode, since the
5003 lowering is more complicated. */
5004 if (flag_code == CODE_64BIT)
5005 {
5006 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5007 register_prefix, i.op[op].regs->reg_name,
5008 i.suffix);
5009 return 0;
5010 }
5011 else
5012 #if REGISTER_WARNINGS
5013 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5014 register_prefix,
5015 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
5016 register_prefix,
5017 i.op[op].regs->reg_name,
5018 i.suffix);
5019 #endif
5020 }
5021 return 1;
5022 }
5023
5024 static int
5025 update_imm (unsigned int j)
5026 {
5027 i386_operand_type overlap = i.types[j];
5028 if ((overlap.bitfield.imm8
5029 || overlap.bitfield.imm8s
5030 || overlap.bitfield.imm16
5031 || overlap.bitfield.imm32
5032 || overlap.bitfield.imm32s
5033 || overlap.bitfield.imm64)
5034 && !operand_type_equal (&overlap, &imm8)
5035 && !operand_type_equal (&overlap, &imm8s)
5036 && !operand_type_equal (&overlap, &imm16)
5037 && !operand_type_equal (&overlap, &imm32)
5038 && !operand_type_equal (&overlap, &imm32s)
5039 && !operand_type_equal (&overlap, &imm64))
5040 {
5041 if (i.suffix)
5042 {
5043 i386_operand_type temp;
5044
5045 operand_type_set (&temp, 0);
5046 if (i.suffix == BYTE_MNEM_SUFFIX)
5047 {
5048 temp.bitfield.imm8 = overlap.bitfield.imm8;
5049 temp.bitfield.imm8s = overlap.bitfield.imm8s;
5050 }
5051 else if (i.suffix == WORD_MNEM_SUFFIX)
5052 temp.bitfield.imm16 = overlap.bitfield.imm16;
5053 else if (i.suffix == QWORD_MNEM_SUFFIX)
5054 {
5055 temp.bitfield.imm64 = overlap.bitfield.imm64;
5056 temp.bitfield.imm32s = overlap.bitfield.imm32s;
5057 }
5058 else
5059 temp.bitfield.imm32 = overlap.bitfield.imm32;
5060 overlap = temp;
5061 }
5062 else if (operand_type_equal (&overlap, &imm16_32_32s)
5063 || operand_type_equal (&overlap, &imm16_32)
5064 || operand_type_equal (&overlap, &imm16_32s))
5065 {
5066 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
5067 overlap = imm16;
5068 else
5069 overlap = imm32s;
5070 }
5071 if (!operand_type_equal (&overlap, &imm8)
5072 && !operand_type_equal (&overlap, &imm8s)
5073 && !operand_type_equal (&overlap, &imm16)
5074 && !operand_type_equal (&overlap, &imm32)
5075 && !operand_type_equal (&overlap, &imm32s)
5076 && !operand_type_equal (&overlap, &imm64))
5077 {
5078 as_bad (_("no instruction mnemonic suffix given; "
5079 "can't determine immediate size"));
5080 return 0;
5081 }
5082 }
5083 i.types[j] = overlap;
5084
5085 return 1;
5086 }
5087
5088 static int
5089 finalize_imm (void)
5090 {
5091 unsigned int j, n;
5092
5093 /* Update the first 2 immediate operands. */
5094 n = i.operands > 2 ? 2 : i.operands;
5095 if (n)
5096 {
5097 for (j = 0; j < n; j++)
5098 if (update_imm (j) == 0)
5099 return 0;
5100
5101 /* The 3rd operand can't be immediate operand. */
5102 gas_assert (operand_type_check (i.types[2], imm) == 0);
5103 }
5104
5105 return 1;
5106 }
5107
5108 static int
5109 bad_implicit_operand (int xmm)
5110 {
5111 const char *ireg = xmm ? "xmm0" : "ymm0";
5112
5113 if (intel_syntax)
5114 as_bad (_("the last operand of `%s' must be `%s%s'"),
5115 i.tm.name, register_prefix, ireg);
5116 else
5117 as_bad (_("the first operand of `%s' must be `%s%s'"),
5118 i.tm.name, register_prefix, ireg);
5119 return 0;
5120 }
5121
5122 static int
5123 process_operands (void)
5124 {
5125 /* Default segment register this instruction will use for memory
5126 accesses. 0 means unknown. This is only for optimizing out
5127 unnecessary segment overrides. */
5128 const seg_entry *default_seg = 0;
5129
5130 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
5131 {
5132 unsigned int dupl = i.operands;
5133 unsigned int dest = dupl - 1;
5134 unsigned int j;
5135
5136 /* The destination must be an xmm register. */
5137 gas_assert (i.reg_operands
5138 && MAX_OPERANDS > dupl
5139 && operand_type_equal (&i.types[dest], &regxmm));
5140
5141 if (i.tm.opcode_modifier.firstxmm0)
5142 {
5143 /* The first operand is implicit and must be xmm0. */
5144 gas_assert (operand_type_equal (&i.types[0], &regxmm));
5145 if (register_number (i.op[0].regs) != 0)
5146 return bad_implicit_operand (1);
5147
5148 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
5149 {
5150 /* Keep xmm0 for instructions with VEX prefix and 3
5151 sources. */
5152 goto duplicate;
5153 }
5154 else
5155 {
5156 /* We remove the first xmm0 and keep the number of
5157 operands unchanged, which in fact duplicates the
5158 destination. */
5159 for (j = 1; j < i.operands; j++)
5160 {
5161 i.op[j - 1] = i.op[j];
5162 i.types[j - 1] = i.types[j];
5163 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
5164 }
5165 }
5166 }
5167 else if (i.tm.opcode_modifier.implicit1stxmm0)
5168 {
5169 gas_assert ((MAX_OPERANDS - 1) > dupl
5170 && (i.tm.opcode_modifier.vexsources
5171 == VEX3SOURCES));
5172
5173 /* Add the implicit xmm0 for instructions with VEX prefix
5174 and 3 sources. */
5175 for (j = i.operands; j > 0; j--)
5176 {
5177 i.op[j] = i.op[j - 1];
5178 i.types[j] = i.types[j - 1];
5179 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
5180 }
5181 i.op[0].regs
5182 = (const reg_entry *) hash_find (reg_hash, "xmm0");
5183 i.types[0] = regxmm;
5184 i.tm.operand_types[0] = regxmm;
5185
5186 i.operands += 2;
5187 i.reg_operands += 2;
5188 i.tm.operands += 2;
5189
5190 dupl++;
5191 dest++;
5192 i.op[dupl] = i.op[dest];
5193 i.types[dupl] = i.types[dest];
5194 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5195 }
5196 else
5197 {
5198 duplicate:
5199 i.operands++;
5200 i.reg_operands++;
5201 i.tm.operands++;
5202
5203 i.op[dupl] = i.op[dest];
5204 i.types[dupl] = i.types[dest];
5205 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5206 }
5207
5208 if (i.tm.opcode_modifier.immext)
5209 process_immext ();
5210 }
5211 else if (i.tm.opcode_modifier.firstxmm0)
5212 {
5213 unsigned int j;
5214
5215 /* The first operand is implicit and must be xmm0/ymm0. */
5216 gas_assert (i.reg_operands
5217 && (operand_type_equal (&i.types[0], &regxmm)
5218 || operand_type_equal (&i.types[0], &regymm)));
5219 if (register_number (i.op[0].regs) != 0)
5220 return bad_implicit_operand (i.types[0].bitfield.regxmm);
5221
5222 for (j = 1; j < i.operands; j++)
5223 {
5224 i.op[j - 1] = i.op[j];
5225 i.types[j - 1] = i.types[j];
5226
5227 /* We need to adjust fields in i.tm since they are used by
5228 build_modrm_byte. */
5229 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
5230 }
5231
5232 i.operands--;
5233 i.reg_operands--;
5234 i.tm.operands--;
5235 }
5236 else if (i.tm.opcode_modifier.regkludge)
5237 {
5238 /* The imul $imm, %reg instruction is converted into
5239 imul $imm, %reg, %reg, and the clr %reg instruction
5240 is converted into xor %reg, %reg. */
5241
5242 unsigned int first_reg_op;
5243
5244 if (operand_type_check (i.types[0], reg))
5245 first_reg_op = 0;
5246 else
5247 first_reg_op = 1;
5248 /* Pretend we saw the extra register operand. */
5249 gas_assert (i.reg_operands == 1
5250 && i.op[first_reg_op + 1].regs == 0);
5251 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
5252 i.types[first_reg_op + 1] = i.types[first_reg_op];
5253 i.operands++;
5254 i.reg_operands++;
5255 }
5256
5257 if (i.tm.opcode_modifier.shortform)
5258 {
5259 if (i.types[0].bitfield.sreg2
5260 || i.types[0].bitfield.sreg3)
5261 {
5262 if (i.tm.base_opcode == POP_SEG_SHORT
5263 && i.op[0].regs->reg_num == 1)
5264 {
5265 as_bad (_("you can't `pop %scs'"), register_prefix);
5266 return 0;
5267 }
5268 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
5269 if ((i.op[0].regs->reg_flags & RegRex) != 0)
5270 i.rex |= REX_B;
5271 }
5272 else
5273 {
5274 /* The register or float register operand is in operand
5275 0 or 1. */
5276 unsigned int op;
5277
5278 if (i.types[0].bitfield.floatreg
5279 || operand_type_check (i.types[0], reg))
5280 op = 0;
5281 else
5282 op = 1;
5283 /* Register goes in low 3 bits of opcode. */
5284 i.tm.base_opcode |= i.op[op].regs->reg_num;
5285 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5286 i.rex |= REX_B;
5287 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
5288 {
5289 /* Warn about some common errors, but press on regardless.
5290 The first case can be generated by gcc (<= 2.8.1). */
5291 if (i.operands == 2)
5292 {
5293 /* Reversed arguments on faddp, fsubp, etc. */
5294 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
5295 register_prefix, i.op[!intel_syntax].regs->reg_name,
5296 register_prefix, i.op[intel_syntax].regs->reg_name);
5297 }
5298 else
5299 {
5300 /* Extraneous `l' suffix on fp insn. */
5301 as_warn (_("translating to `%s %s%s'"), i.tm.name,
5302 register_prefix, i.op[0].regs->reg_name);
5303 }
5304 }
5305 }
5306 }
5307 else if (i.tm.opcode_modifier.modrm)
5308 {
5309 /* The opcode is completed (modulo i.tm.extension_opcode which
5310 must be put into the modrm byte). Now, we make the modrm and
5311 index base bytes based on all the info we've collected. */
5312
5313 default_seg = build_modrm_byte ();
5314 }
5315 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
5316 {
5317 default_seg = &ds;
5318 }
5319 else if (i.tm.opcode_modifier.isstring)
5320 {
5321 /* For the string instructions that allow a segment override
5322 on one of their operands, the default segment is ds. */
5323 default_seg = &ds;
5324 }
5325
5326 if (i.tm.base_opcode == 0x8d /* lea */
5327 && i.seg[0]
5328 && !quiet_warnings)
5329 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
5330
5331 /* If a segment was explicitly specified, and the specified segment
5332 is not the default, use an opcode prefix to select it. If we
5333 never figured out what the default segment is, then default_seg
5334 will be zero at this point, and the specified segment prefix will
5335 always be used. */
5336 if ((i.seg[0]) && (i.seg[0] != default_seg))
5337 {
5338 if (!add_prefix (i.seg[0]->seg_prefix))
5339 return 0;
5340 }
5341 return 1;
5342 }
5343
5344 static const seg_entry *
5345 build_modrm_byte (void)
5346 {
5347 const seg_entry *default_seg = 0;
5348 unsigned int source, dest;
5349 int vex_3_sources;
5350
5351 /* The first operand of instructions with VEX prefix and 3 sources
5352 must be VEX_Imm4. */
5353 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
5354 if (vex_3_sources)
5355 {
5356 unsigned int nds, reg_slot;
5357 expressionS *exp;
5358
5359 if (i.tm.opcode_modifier.veximmext
5360 && i.tm.opcode_modifier.immext)
5361 {
5362 dest = i.operands - 2;
5363 gas_assert (dest == 3);
5364 }
5365 else
5366 dest = i.operands - 1;
5367 nds = dest - 1;
5368
5369 /* There are 2 kinds of instructions:
5370 1. 5 operands: 4 register operands or 3 register operands
5371 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
5372 VexW0 or VexW1. The destination must be either XMM or YMM
5373 register.
5374 2. 4 operands: 4 register operands or 3 register operands
5375 plus 1 memory operand, VexXDS, and VexImmExt */
5376 gas_assert ((i.reg_operands == 4
5377 || (i.reg_operands == 3 && i.mem_operands == 1))
5378 && i.tm.opcode_modifier.vexvvvv == VEXXDS
5379 && (i.tm.opcode_modifier.veximmext
5380 || (i.imm_operands == 1
5381 && i.types[0].bitfield.vec_imm4
5382 && (i.tm.opcode_modifier.vexw == VEXW0
5383 || i.tm.opcode_modifier.vexw == VEXW1)
5384 && (operand_type_equal (&i.tm.operand_types[dest], &regxmm)
5385 || operand_type_equal (&i.tm.operand_types[dest], &regymm)))));
5386
5387 if (i.imm_operands == 0)
5388 {
5389 /* When there is no immediate operand, generate an 8bit
5390 immediate operand to encode the first operand. */
5391 exp = &im_expressions[i.imm_operands++];
5392 i.op[i.operands].imms = exp;
5393 i.types[i.operands] = imm8;
5394 i.operands++;
5395 /* If VexW1 is set, the first operand is the source and
5396 the second operand is encoded in the immediate operand. */
5397 if (i.tm.opcode_modifier.vexw == VEXW1)
5398 {
5399 source = 0;
5400 reg_slot = 1;
5401 }
5402 else
5403 {
5404 source = 1;
5405 reg_slot = 0;
5406 }
5407
5408 /* FMA swaps REG and NDS. */
5409 if (i.tm.cpu_flags.bitfield.cpufma)
5410 {
5411 unsigned int tmp;
5412 tmp = reg_slot;
5413 reg_slot = nds;
5414 nds = tmp;
5415 }
5416
5417 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5418 &regxmm)
5419 || operand_type_equal (&i.tm.operand_types[reg_slot],
5420 &regymm));
5421 exp->X_op = O_constant;
5422 exp->X_add_number = register_number (i.op[reg_slot].regs) << 4;
5423 }
5424 else
5425 {
5426 unsigned int imm_slot;
5427
5428 if (i.tm.opcode_modifier.vexw == VEXW0)
5429 {
5430 /* If VexW0 is set, the third operand is the source and
5431 the second operand is encoded in the immediate
5432 operand. */
5433 source = 2;
5434 reg_slot = 1;
5435 }
5436 else
5437 {
5438 /* VexW1 is set, the second operand is the source and
5439 the third operand is encoded in the immediate
5440 operand. */
5441 source = 1;
5442 reg_slot = 2;
5443 }
5444
5445 if (i.tm.opcode_modifier.immext)
5446 {
5447 /* When ImmExt is set, the immdiate byte is the last
5448 operand. */
5449 imm_slot = i.operands - 1;
5450 source--;
5451 reg_slot--;
5452 }
5453 else
5454 {
5455 imm_slot = 0;
5456
5457 /* Turn on Imm8 so that output_imm will generate it. */
5458 i.types[imm_slot].bitfield.imm8 = 1;
5459 }
5460
5461 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5462 &regxmm)
5463 || operand_type_equal (&i.tm.operand_types[reg_slot],
5464 &regymm));
5465 i.op[imm_slot].imms->X_add_number
5466 |= register_number (i.op[reg_slot].regs) << 4;
5467 }
5468
5469 gas_assert (operand_type_equal (&i.tm.operand_types[nds], &regxmm)
5470 || operand_type_equal (&i.tm.operand_types[nds],
5471 &regymm));
5472 i.vex.register_specifier = i.op[nds].regs;
5473 }
5474 else
5475 source = dest = 0;
5476
5477 /* i.reg_operands MUST be the number of real register operands;
5478 implicit registers do not count. If there are 3 register
5479 operands, it must be a instruction with VexNDS. For a
5480 instruction with VexNDD, the destination register is encoded
5481 in VEX prefix. If there are 4 register operands, it must be
5482 a instruction with VEX prefix and 3 sources. */
5483 if (i.mem_operands == 0
5484 && ((i.reg_operands == 2
5485 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
5486 || (i.reg_operands == 3
5487 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
5488 || (i.reg_operands == 4 && vex_3_sources)))
5489 {
5490 switch (i.operands)
5491 {
5492 case 2:
5493 source = 0;
5494 break;
5495 case 3:
5496 /* When there are 3 operands, one of them may be immediate,
5497 which may be the first or the last operand. Otherwise,
5498 the first operand must be shift count register (cl) or it
5499 is an instruction with VexNDS. */
5500 gas_assert (i.imm_operands == 1
5501 || (i.imm_operands == 0
5502 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
5503 || i.types[0].bitfield.shiftcount)));
5504 if (operand_type_check (i.types[0], imm)
5505 || i.types[0].bitfield.shiftcount)
5506 source = 1;
5507 else
5508 source = 0;
5509 break;
5510 case 4:
5511 /* When there are 4 operands, the first two must be 8bit
5512 immediate operands. The source operand will be the 3rd
5513 one.
5514
5515 For instructions with VexNDS, if the first operand
5516 an imm8, the source operand is the 2nd one. If the last
5517 operand is imm8, the source operand is the first one. */
5518 gas_assert ((i.imm_operands == 2
5519 && i.types[0].bitfield.imm8
5520 && i.types[1].bitfield.imm8)
5521 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
5522 && i.imm_operands == 1
5523 && (i.types[0].bitfield.imm8
5524 || i.types[i.operands - 1].bitfield.imm8)));
5525 if (i.imm_operands == 2)
5526 source = 2;
5527 else
5528 {
5529 if (i.types[0].bitfield.imm8)
5530 source = 1;
5531 else
5532 source = 0;
5533 }
5534 break;
5535 case 5:
5536 break;
5537 default:
5538 abort ();
5539 }
5540
5541 if (!vex_3_sources)
5542 {
5543 dest = source + 1;
5544
5545 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5546 {
5547 /* For instructions with VexNDS, the register-only
5548 source operand must be 32/64bit integer, XMM or
5549 YMM register. It is encoded in VEX prefix. We
5550 need to clear RegMem bit before calling
5551 operand_type_equal. */
5552
5553 i386_operand_type op;
5554 unsigned int vvvv;
5555
5556 /* Check register-only source operand when two source
5557 operands are swapped. */
5558 if (!i.tm.operand_types[source].bitfield.baseindex
5559 && i.tm.operand_types[dest].bitfield.baseindex)
5560 {
5561 vvvv = source;
5562 source = dest;
5563 }
5564 else
5565 vvvv = dest;
5566
5567 op = i.tm.operand_types[vvvv];
5568 op.bitfield.regmem = 0;
5569 if ((dest + 1) >= i.operands
5570 || (op.bitfield.reg32 != 1
5571 && !op.bitfield.reg64 != 1
5572 && !operand_type_equal (&op, &regxmm)
5573 && !operand_type_equal (&op, &regymm)))
5574 abort ();
5575 i.vex.register_specifier = i.op[vvvv].regs;
5576 dest++;
5577 }
5578 }
5579
5580 i.rm.mode = 3;
5581 /* One of the register operands will be encoded in the i.tm.reg
5582 field, the other in the combined i.tm.mode and i.tm.regmem
5583 fields. If no form of this instruction supports a memory
5584 destination operand, then we assume the source operand may
5585 sometimes be a memory operand and so we need to store the
5586 destination in the i.rm.reg field. */
5587 if (!i.tm.operand_types[dest].bitfield.regmem
5588 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
5589 {
5590 i.rm.reg = i.op[dest].regs->reg_num;
5591 i.rm.regmem = i.op[source].regs->reg_num;
5592 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5593 i.rex |= REX_R;
5594 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5595 i.rex |= REX_B;
5596 }
5597 else
5598 {
5599 i.rm.reg = i.op[source].regs->reg_num;
5600 i.rm.regmem = i.op[dest].regs->reg_num;
5601 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5602 i.rex |= REX_B;
5603 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5604 i.rex |= REX_R;
5605 }
5606 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
5607 {
5608 if (!i.types[0].bitfield.control
5609 && !i.types[1].bitfield.control)
5610 abort ();
5611 i.rex &= ~(REX_R | REX_B);
5612 add_prefix (LOCK_PREFIX_OPCODE);
5613 }
5614 }
5615 else
5616 { /* If it's not 2 reg operands... */
5617 unsigned int mem;
5618
5619 if (i.mem_operands)
5620 {
5621 unsigned int fake_zero_displacement = 0;
5622 unsigned int op;
5623
5624 for (op = 0; op < i.operands; op++)
5625 if (operand_type_check (i.types[op], anymem))
5626 break;
5627 gas_assert (op < i.operands);
5628
5629 if (i.tm.opcode_modifier.vecsib)
5630 {
5631 if (i.index_reg->reg_num == RegEiz
5632 || i.index_reg->reg_num == RegRiz)
5633 abort ();
5634
5635 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5636 if (!i.base_reg)
5637 {
5638 i.sib.base = NO_BASE_REGISTER;
5639 i.sib.scale = i.log2_scale_factor;
5640 i.types[op].bitfield.disp8 = 0;
5641 i.types[op].bitfield.disp16 = 0;
5642 i.types[op].bitfield.disp64 = 0;
5643 if (flag_code != CODE_64BIT)
5644 {
5645 /* Must be 32 bit */
5646 i.types[op].bitfield.disp32 = 1;
5647 i.types[op].bitfield.disp32s = 0;
5648 }
5649 else
5650 {
5651 i.types[op].bitfield.disp32 = 0;
5652 i.types[op].bitfield.disp32s = 1;
5653 }
5654 }
5655 i.sib.index = i.index_reg->reg_num;
5656 if ((i.index_reg->reg_flags & RegRex) != 0)
5657 i.rex |= REX_X;
5658 }
5659
5660 default_seg = &ds;
5661
5662 if (i.base_reg == 0)
5663 {
5664 i.rm.mode = 0;
5665 if (!i.disp_operands)
5666 {
5667 fake_zero_displacement = 1;
5668 /* Instructions with VSIB byte need 32bit displacement
5669 if there is no base register. */
5670 if (i.tm.opcode_modifier.vecsib)
5671 i.types[op].bitfield.disp32 = 1;
5672 }
5673 if (i.index_reg == 0)
5674 {
5675 gas_assert (!i.tm.opcode_modifier.vecsib);
5676 /* Operand is just <disp> */
5677 if (flag_code == CODE_64BIT)
5678 {
5679 /* 64bit mode overwrites the 32bit absolute
5680 addressing by RIP relative addressing and
5681 absolute addressing is encoded by one of the
5682 redundant SIB forms. */
5683 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5684 i.sib.base = NO_BASE_REGISTER;
5685 i.sib.index = NO_INDEX_REGISTER;
5686 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
5687 ? disp32s : disp32);
5688 }
5689 else if ((flag_code == CODE_16BIT)
5690 ^ (i.prefix[ADDR_PREFIX] != 0))
5691 {
5692 i.rm.regmem = NO_BASE_REGISTER_16;
5693 i.types[op] = disp16;
5694 }
5695 else
5696 {
5697 i.rm.regmem = NO_BASE_REGISTER;
5698 i.types[op] = disp32;
5699 }
5700 }
5701 else if (!i.tm.opcode_modifier.vecsib)
5702 {
5703 /* !i.base_reg && i.index_reg */
5704 if (i.index_reg->reg_num == RegEiz
5705 || i.index_reg->reg_num == RegRiz)
5706 i.sib.index = NO_INDEX_REGISTER;
5707 else
5708 i.sib.index = i.index_reg->reg_num;
5709 i.sib.base = NO_BASE_REGISTER;
5710 i.sib.scale = i.log2_scale_factor;
5711 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5712 i.types[op].bitfield.disp8 = 0;
5713 i.types[op].bitfield.disp16 = 0;
5714 i.types[op].bitfield.disp64 = 0;
5715 if (flag_code != CODE_64BIT)
5716 {
5717 /* Must be 32 bit */
5718 i.types[op].bitfield.disp32 = 1;
5719 i.types[op].bitfield.disp32s = 0;
5720 }
5721 else
5722 {
5723 i.types[op].bitfield.disp32 = 0;
5724 i.types[op].bitfield.disp32s = 1;
5725 }
5726 if ((i.index_reg->reg_flags & RegRex) != 0)
5727 i.rex |= REX_X;
5728 }
5729 }
5730 /* RIP addressing for 64bit mode. */
5731 else if (i.base_reg->reg_num == RegRip ||
5732 i.base_reg->reg_num == RegEip)
5733 {
5734 gas_assert (!i.tm.opcode_modifier.vecsib);
5735 i.rm.regmem = NO_BASE_REGISTER;
5736 i.types[op].bitfield.disp8 = 0;
5737 i.types[op].bitfield.disp16 = 0;
5738 i.types[op].bitfield.disp32 = 0;
5739 i.types[op].bitfield.disp32s = 1;
5740 i.types[op].bitfield.disp64 = 0;
5741 i.flags[op] |= Operand_PCrel;
5742 if (! i.disp_operands)
5743 fake_zero_displacement = 1;
5744 }
5745 else if (i.base_reg->reg_type.bitfield.reg16)
5746 {
5747 gas_assert (!i.tm.opcode_modifier.vecsib);
5748 switch (i.base_reg->reg_num)
5749 {
5750 case 3: /* (%bx) */
5751 if (i.index_reg == 0)
5752 i.rm.regmem = 7;
5753 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
5754 i.rm.regmem = i.index_reg->reg_num - 6;
5755 break;
5756 case 5: /* (%bp) */
5757 default_seg = &ss;
5758 if (i.index_reg == 0)
5759 {
5760 i.rm.regmem = 6;
5761 if (operand_type_check (i.types[op], disp) == 0)
5762 {
5763 /* fake (%bp) into 0(%bp) */
5764 i.types[op].bitfield.disp8 = 1;
5765 fake_zero_displacement = 1;
5766 }
5767 }
5768 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
5769 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
5770 break;
5771 default: /* (%si) -> 4 or (%di) -> 5 */
5772 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
5773 }
5774 i.rm.mode = mode_from_disp_size (i.types[op]);
5775 }
5776 else /* i.base_reg and 32/64 bit mode */
5777 {
5778 if (flag_code == CODE_64BIT
5779 && operand_type_check (i.types[op], disp))
5780 {
5781 i386_operand_type temp;
5782 operand_type_set (&temp, 0);
5783 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
5784 i.types[op] = temp;
5785 if (i.prefix[ADDR_PREFIX] == 0)
5786 i.types[op].bitfield.disp32s = 1;
5787 else
5788 i.types[op].bitfield.disp32 = 1;
5789 }
5790
5791 if (!i.tm.opcode_modifier.vecsib)
5792 i.rm.regmem = i.base_reg->reg_num;
5793 if ((i.base_reg->reg_flags & RegRex) != 0)
5794 i.rex |= REX_B;
5795 i.sib.base = i.base_reg->reg_num;
5796 /* x86-64 ignores REX prefix bit here to avoid decoder
5797 complications. */
5798 if (!(i.base_reg->reg_flags & RegRex)
5799 && (i.base_reg->reg_num == EBP_REG_NUM
5800 || i.base_reg->reg_num == ESP_REG_NUM))
5801 default_seg = &ss;
5802 if (i.base_reg->reg_num == 5 && i.disp_operands == 0)
5803 {
5804 fake_zero_displacement = 1;
5805 i.types[op].bitfield.disp8 = 1;
5806 }
5807 i.sib.scale = i.log2_scale_factor;
5808 if (i.index_reg == 0)
5809 {
5810 gas_assert (!i.tm.opcode_modifier.vecsib);
5811 /* <disp>(%esp) becomes two byte modrm with no index
5812 register. We've already stored the code for esp
5813 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
5814 Any base register besides %esp will not use the
5815 extra modrm byte. */
5816 i.sib.index = NO_INDEX_REGISTER;
5817 }
5818 else if (!i.tm.opcode_modifier.vecsib)
5819 {
5820 if (i.index_reg->reg_num == RegEiz
5821 || i.index_reg->reg_num == RegRiz)
5822 i.sib.index = NO_INDEX_REGISTER;
5823 else
5824 i.sib.index = i.index_reg->reg_num;
5825 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5826 if ((i.index_reg->reg_flags & RegRex) != 0)
5827 i.rex |= REX_X;
5828 }
5829
5830 if (i.disp_operands
5831 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
5832 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
5833 i.rm.mode = 0;
5834 else
5835 {
5836 if (!fake_zero_displacement
5837 && !i.disp_operands
5838 && i.disp_encoding)
5839 {
5840 fake_zero_displacement = 1;
5841 if (i.disp_encoding == disp_encoding_8bit)
5842 i.types[op].bitfield.disp8 = 1;
5843 else
5844 i.types[op].bitfield.disp32 = 1;
5845 }
5846 i.rm.mode = mode_from_disp_size (i.types[op]);
5847 }
5848 }
5849
5850 if (fake_zero_displacement)
5851 {
5852 /* Fakes a zero displacement assuming that i.types[op]
5853 holds the correct displacement size. */
5854 expressionS *exp;
5855
5856 gas_assert (i.op[op].disps == 0);
5857 exp = &disp_expressions[i.disp_operands++];
5858 i.op[op].disps = exp;
5859 exp->X_op = O_constant;
5860 exp->X_add_number = 0;
5861 exp->X_add_symbol = (symbolS *) 0;
5862 exp->X_op_symbol = (symbolS *) 0;
5863 }
5864
5865 mem = op;
5866 }
5867 else
5868 mem = ~0;
5869
5870 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
5871 {
5872 if (operand_type_check (i.types[0], imm))
5873 i.vex.register_specifier = NULL;
5874 else
5875 {
5876 /* VEX.vvvv encodes one of the sources when the first
5877 operand is not an immediate. */
5878 if (i.tm.opcode_modifier.vexw == VEXW0)
5879 i.vex.register_specifier = i.op[0].regs;
5880 else
5881 i.vex.register_specifier = i.op[1].regs;
5882 }
5883
5884 /* Destination is a XMM register encoded in the ModRM.reg
5885 and VEX.R bit. */
5886 i.rm.reg = i.op[2].regs->reg_num;
5887 if ((i.op[2].regs->reg_flags & RegRex) != 0)
5888 i.rex |= REX_R;
5889
5890 /* ModRM.rm and VEX.B encodes the other source. */
5891 if (!i.mem_operands)
5892 {
5893 i.rm.mode = 3;
5894
5895 if (i.tm.opcode_modifier.vexw == VEXW0)
5896 i.rm.regmem = i.op[1].regs->reg_num;
5897 else
5898 i.rm.regmem = i.op[0].regs->reg_num;
5899
5900 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5901 i.rex |= REX_B;
5902 }
5903 }
5904 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
5905 {
5906 i.vex.register_specifier = i.op[2].regs;
5907 if (!i.mem_operands)
5908 {
5909 i.rm.mode = 3;
5910 i.rm.regmem = i.op[1].regs->reg_num;
5911 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5912 i.rex |= REX_B;
5913 }
5914 }
5915 /* Fill in i.rm.reg or i.rm.regmem field with register operand
5916 (if any) based on i.tm.extension_opcode. Again, we must be
5917 careful to make sure that segment/control/debug/test/MMX
5918 registers are coded into the i.rm.reg field. */
5919 else if (i.reg_operands)
5920 {
5921 unsigned int op;
5922 unsigned int vex_reg = ~0;
5923
5924 for (op = 0; op < i.operands; op++)
5925 if (i.types[op].bitfield.reg8
5926 || i.types[op].bitfield.reg16
5927 || i.types[op].bitfield.reg32
5928 || i.types[op].bitfield.reg64
5929 || i.types[op].bitfield.regmmx
5930 || i.types[op].bitfield.regxmm
5931 || i.types[op].bitfield.regymm
5932 || i.types[op].bitfield.sreg2
5933 || i.types[op].bitfield.sreg3
5934 || i.types[op].bitfield.control
5935 || i.types[op].bitfield.debug
5936 || i.types[op].bitfield.test)
5937 break;
5938
5939 if (vex_3_sources)
5940 op = dest;
5941 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5942 {
5943 /* For instructions with VexNDS, the register-only
5944 source operand is encoded in VEX prefix. */
5945 gas_assert (mem != (unsigned int) ~0);
5946
5947 if (op > mem)
5948 {
5949 vex_reg = op++;
5950 gas_assert (op < i.operands);
5951 }
5952 else
5953 {
5954 /* Check register-only source operand when two source
5955 operands are swapped. */
5956 if (!i.tm.operand_types[op].bitfield.baseindex
5957 && i.tm.operand_types[op + 1].bitfield.baseindex)
5958 {
5959 vex_reg = op;
5960 op += 2;
5961 gas_assert (mem == (vex_reg + 1)
5962 && op < i.operands);
5963 }
5964 else
5965 {
5966 vex_reg = op + 1;
5967 gas_assert (vex_reg < i.operands);
5968 }
5969 }
5970 }
5971 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
5972 {
5973 /* For instructions with VexNDD, the register destination
5974 is encoded in VEX prefix. */
5975 if (i.mem_operands == 0)
5976 {
5977 /* There is no memory operand. */
5978 gas_assert ((op + 2) == i.operands);
5979 vex_reg = op + 1;
5980 }
5981 else
5982 {
5983 /* There are only 2 operands. */
5984 gas_assert (op < 2 && i.operands == 2);
5985 vex_reg = 1;
5986 }
5987 }
5988 else
5989 gas_assert (op < i.operands);
5990
5991 if (vex_reg != (unsigned int) ~0)
5992 {
5993 i386_operand_type *type = &i.tm.operand_types[vex_reg];
5994
5995 if (type->bitfield.reg32 != 1
5996 && type->bitfield.reg64 != 1
5997 && !operand_type_equal (type, &regxmm)
5998 && !operand_type_equal (type, &regymm))
5999 abort ();
6000
6001 i.vex.register_specifier = i.op[vex_reg].regs;
6002 }
6003
6004 /* Don't set OP operand twice. */
6005 if (vex_reg != op)
6006 {
6007 /* If there is an extension opcode to put here, the
6008 register number must be put into the regmem field. */
6009 if (i.tm.extension_opcode != None)
6010 {
6011 i.rm.regmem = i.op[op].regs->reg_num;
6012 if ((i.op[op].regs->reg_flags & RegRex) != 0)
6013 i.rex |= REX_B;
6014 }
6015 else
6016 {
6017 i.rm.reg = i.op[op].regs->reg_num;
6018 if ((i.op[op].regs->reg_flags & RegRex) != 0)
6019 i.rex |= REX_R;
6020 }
6021 }
6022
6023 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
6024 must set it to 3 to indicate this is a register operand
6025 in the regmem field. */
6026 if (!i.mem_operands)
6027 i.rm.mode = 3;
6028 }
6029
6030 /* Fill in i.rm.reg field with extension opcode (if any). */
6031 if (i.tm.extension_opcode != None)
6032 i.rm.reg = i.tm.extension_opcode;
6033 }
6034 return default_seg;
6035 }
6036
6037 static void
6038 output_branch (void)
6039 {
6040 char *p;
6041 int size;
6042 int code16;
6043 int prefix;
6044 relax_substateT subtype;
6045 symbolS *sym;
6046 offsetT off;
6047
6048 code16 = flag_code == CODE_16BIT ? CODE16 : 0;
6049 size = i.disp_encoding == disp_encoding_32bit ? BIG : SMALL;
6050
6051 prefix = 0;
6052 if (i.prefix[DATA_PREFIX] != 0)
6053 {
6054 prefix = 1;
6055 i.prefixes -= 1;
6056 code16 ^= CODE16;
6057 }
6058 /* Pentium4 branch hints. */
6059 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6060 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6061 {
6062 prefix++;
6063 i.prefixes--;
6064 }
6065 if (i.prefix[REX_PREFIX] != 0)
6066 {
6067 prefix++;
6068 i.prefixes--;
6069 }
6070
6071 if (i.prefixes != 0 && !intel_syntax)
6072 as_warn (_("skipping prefixes on this instruction"));
6073
6074 /* It's always a symbol; End frag & setup for relax.
6075 Make sure there is enough room in this frag for the largest
6076 instruction we may generate in md_convert_frag. This is 2
6077 bytes for the opcode and room for the prefix and largest
6078 displacement. */
6079 frag_grow (prefix + 2 + 4);
6080 /* Prefix and 1 opcode byte go in fr_fix. */
6081 p = frag_more (prefix + 1);
6082 if (i.prefix[DATA_PREFIX] != 0)
6083 *p++ = DATA_PREFIX_OPCODE;
6084 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
6085 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
6086 *p++ = i.prefix[SEG_PREFIX];
6087 if (i.prefix[REX_PREFIX] != 0)
6088 *p++ = i.prefix[REX_PREFIX];
6089 *p = i.tm.base_opcode;
6090
6091 if ((unsigned char) *p == JUMP_PC_RELATIVE)
6092 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
6093 else if (cpu_arch_flags.bitfield.cpui386)
6094 subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
6095 else
6096 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
6097 subtype |= code16;
6098
6099 sym = i.op[0].disps->X_add_symbol;
6100 off = i.op[0].disps->X_add_number;
6101
6102 if (i.op[0].disps->X_op != O_constant
6103 && i.op[0].disps->X_op != O_symbol)
6104 {
6105 /* Handle complex expressions. */
6106 sym = make_expr_symbol (i.op[0].disps);
6107 off = 0;
6108 }
6109
6110 /* 1 possible extra opcode + 4 byte displacement go in var part.
6111 Pass reloc in fr_var. */
6112 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
6113 }
6114
6115 static void
6116 output_jump (void)
6117 {
6118 char *p;
6119 int size;
6120 fixS *fixP;
6121
6122 if (i.tm.opcode_modifier.jumpbyte)
6123 {
6124 /* This is a loop or jecxz type instruction. */
6125 size = 1;
6126 if (i.prefix[ADDR_PREFIX] != 0)
6127 {
6128 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
6129 i.prefixes -= 1;
6130 }
6131 /* Pentium4 branch hints. */
6132 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6133 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6134 {
6135 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
6136 i.prefixes--;
6137 }
6138 }
6139 else
6140 {
6141 int code16;
6142
6143 code16 = 0;
6144 if (flag_code == CODE_16BIT)
6145 code16 = CODE16;
6146
6147 if (i.prefix[DATA_PREFIX] != 0)
6148 {
6149 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
6150 i.prefixes -= 1;
6151 code16 ^= CODE16;
6152 }
6153
6154 size = 4;
6155 if (code16)
6156 size = 2;
6157 }
6158
6159 if (i.prefix[REX_PREFIX] != 0)
6160 {
6161 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
6162 i.prefixes -= 1;
6163 }
6164
6165 if (i.prefixes != 0 && !intel_syntax)
6166 as_warn (_("skipping prefixes on this instruction"));
6167
6168 p = frag_more (i.tm.opcode_length + size);
6169 switch (i.tm.opcode_length)
6170 {
6171 case 2:
6172 *p++ = i.tm.base_opcode >> 8;
6173 case 1:
6174 *p++ = i.tm.base_opcode;
6175 break;
6176 default:
6177 abort ();
6178 }
6179
6180 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6181 i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
6182
6183 /* All jumps handled here are signed, but don't use a signed limit
6184 check for 32 and 16 bit jumps as we want to allow wrap around at
6185 4G and 64k respectively. */
6186 if (size == 1)
6187 fixP->fx_signed = 1;
6188 }
6189
6190 static void
6191 output_interseg_jump (void)
6192 {
6193 char *p;
6194 int size;
6195 int prefix;
6196 int code16;
6197
6198 code16 = 0;
6199 if (flag_code == CODE_16BIT)
6200 code16 = CODE16;
6201
6202 prefix = 0;
6203 if (i.prefix[DATA_PREFIX] != 0)
6204 {
6205 prefix = 1;
6206 i.prefixes -= 1;
6207 code16 ^= CODE16;
6208 }
6209 if (i.prefix[REX_PREFIX] != 0)
6210 {
6211 prefix++;
6212 i.prefixes -= 1;
6213 }
6214
6215 size = 4;
6216 if (code16)
6217 size = 2;
6218
6219 if (i.prefixes != 0 && !intel_syntax)
6220 as_warn (_("skipping prefixes on this instruction"));
6221
6222 /* 1 opcode; 2 segment; offset */
6223 p = frag_more (prefix + 1 + 2 + size);
6224
6225 if (i.prefix[DATA_PREFIX] != 0)
6226 *p++ = DATA_PREFIX_OPCODE;
6227
6228 if (i.prefix[REX_PREFIX] != 0)
6229 *p++ = i.prefix[REX_PREFIX];
6230
6231 *p++ = i.tm.base_opcode;
6232 if (i.op[1].imms->X_op == O_constant)
6233 {
6234 offsetT n = i.op[1].imms->X_add_number;
6235
6236 if (size == 2
6237 && !fits_in_unsigned_word (n)
6238 && !fits_in_signed_word (n))
6239 {
6240 as_bad (_("16-bit jump out of range"));
6241 return;
6242 }
6243 md_number_to_chars (p, n, size);
6244 }
6245 else
6246 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6247 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
6248 if (i.op[0].imms->X_op != O_constant)
6249 as_bad (_("can't handle non absolute segment in `%s'"),
6250 i.tm.name);
6251 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
6252 }
6253
6254 static void
6255 output_insn (void)
6256 {
6257 fragS *insn_start_frag;
6258 offsetT insn_start_off;
6259
6260 /* Tie dwarf2 debug info to the address at the start of the insn.
6261 We can't do this after the insn has been output as the current
6262 frag may have been closed off. eg. by frag_var. */
6263 dwarf2_emit_insn (0);
6264
6265 insn_start_frag = frag_now;
6266 insn_start_off = frag_now_fix ();
6267
6268 /* Output jumps. */
6269 if (i.tm.opcode_modifier.jump)
6270 output_branch ();
6271 else if (i.tm.opcode_modifier.jumpbyte
6272 || i.tm.opcode_modifier.jumpdword)
6273 output_jump ();
6274 else if (i.tm.opcode_modifier.jumpintersegment)
6275 output_interseg_jump ();
6276 else
6277 {
6278 /* Output normal instructions here. */
6279 char *p;
6280 unsigned char *q;
6281 unsigned int j;
6282 unsigned int prefix;
6283
6284 /* Since the VEX prefix contains the implicit prefix, we don't
6285 need the explicit prefix. */
6286 if (!i.tm.opcode_modifier.vex)
6287 {
6288 switch (i.tm.opcode_length)
6289 {
6290 case 3:
6291 if (i.tm.base_opcode & 0xff000000)
6292 {
6293 prefix = (i.tm.base_opcode >> 24) & 0xff;
6294 goto check_prefix;
6295 }
6296 break;
6297 case 2:
6298 if ((i.tm.base_opcode & 0xff0000) != 0)
6299 {
6300 prefix = (i.tm.base_opcode >> 16) & 0xff;
6301 if (i.tm.cpu_flags.bitfield.cpupadlock)
6302 {
6303 check_prefix:
6304 if (prefix != REPE_PREFIX_OPCODE
6305 || (i.prefix[REP_PREFIX]
6306 != REPE_PREFIX_OPCODE))
6307 add_prefix (prefix);
6308 }
6309 else
6310 add_prefix (prefix);
6311 }
6312 break;
6313 case 1:
6314 break;
6315 default:
6316 abort ();
6317 }
6318
6319 /* The prefix bytes. */
6320 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
6321 if (*q)
6322 FRAG_APPEND_1_CHAR (*q);
6323 }
6324 else
6325 {
6326 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
6327 if (*q)
6328 switch (j)
6329 {
6330 case REX_PREFIX:
6331 /* REX byte is encoded in VEX prefix. */
6332 break;
6333 case SEG_PREFIX:
6334 case ADDR_PREFIX:
6335 FRAG_APPEND_1_CHAR (*q);
6336 break;
6337 default:
6338 /* There should be no other prefixes for instructions
6339 with VEX prefix. */
6340 abort ();
6341 }
6342
6343 /* Now the VEX prefix. */
6344 p = frag_more (i.vex.length);
6345 for (j = 0; j < i.vex.length; j++)
6346 p[j] = i.vex.bytes[j];
6347 }
6348
6349 /* Now the opcode; be careful about word order here! */
6350 if (i.tm.opcode_length == 1)
6351 {
6352 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
6353 }
6354 else
6355 {
6356 switch (i.tm.opcode_length)
6357 {
6358 case 3:
6359 p = frag_more (3);
6360 *p++ = (i.tm.base_opcode >> 16) & 0xff;
6361 break;
6362 case 2:
6363 p = frag_more (2);
6364 break;
6365 default:
6366 abort ();
6367 break;
6368 }
6369
6370 /* Put out high byte first: can't use md_number_to_chars! */
6371 *p++ = (i.tm.base_opcode >> 8) & 0xff;
6372 *p = i.tm.base_opcode & 0xff;
6373 }
6374
6375 /* Now the modrm byte and sib byte (if present). */
6376 if (i.tm.opcode_modifier.modrm)
6377 {
6378 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
6379 | i.rm.reg << 3
6380 | i.rm.mode << 6));
6381 /* If i.rm.regmem == ESP (4)
6382 && i.rm.mode != (Register mode)
6383 && not 16 bit
6384 ==> need second modrm byte. */
6385 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
6386 && i.rm.mode != 3
6387 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
6388 FRAG_APPEND_1_CHAR ((i.sib.base << 0
6389 | i.sib.index << 3
6390 | i.sib.scale << 6));
6391 }
6392
6393 if (i.disp_operands)
6394 output_disp (insn_start_frag, insn_start_off);
6395
6396 if (i.imm_operands)
6397 output_imm (insn_start_frag, insn_start_off);
6398 }
6399
6400 #ifdef DEBUG386
6401 if (flag_debug)
6402 {
6403 pi ("" /*line*/, &i);
6404 }
6405 #endif /* DEBUG386 */
6406 }
6407
6408 /* Return the size of the displacement operand N. */
6409
6410 static int
6411 disp_size (unsigned int n)
6412 {
6413 int size = 4;
6414 if (i.types[n].bitfield.disp64)
6415 size = 8;
6416 else if (i.types[n].bitfield.disp8)
6417 size = 1;
6418 else if (i.types[n].bitfield.disp16)
6419 size = 2;
6420 return size;
6421 }
6422
6423 /* Return the size of the immediate operand N. */
6424
6425 static int
6426 imm_size (unsigned int n)
6427 {
6428 int size = 4;
6429 if (i.types[n].bitfield.imm64)
6430 size = 8;
6431 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
6432 size = 1;
6433 else if (i.types[n].bitfield.imm16)
6434 size = 2;
6435 return size;
6436 }
6437
6438 static void
6439 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
6440 {
6441 char *p;
6442 unsigned int n;
6443
6444 for (n = 0; n < i.operands; n++)
6445 {
6446 if (operand_type_check (i.types[n], disp))
6447 {
6448 if (i.op[n].disps->X_op == O_constant)
6449 {
6450 int size = disp_size (n);
6451 offsetT val;
6452
6453 val = offset_in_range (i.op[n].disps->X_add_number,
6454 size);
6455 p = frag_more (size);
6456 md_number_to_chars (p, val, size);
6457 }
6458 else
6459 {
6460 enum bfd_reloc_code_real reloc_type;
6461 int size = disp_size (n);
6462 int sign = i.types[n].bitfield.disp32s;
6463 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
6464
6465 /* We can't have 8 bit displacement here. */
6466 gas_assert (!i.types[n].bitfield.disp8);
6467
6468 /* The PC relative address is computed relative
6469 to the instruction boundary, so in case immediate
6470 fields follows, we need to adjust the value. */
6471 if (pcrel && i.imm_operands)
6472 {
6473 unsigned int n1;
6474 int sz = 0;
6475
6476 for (n1 = 0; n1 < i.operands; n1++)
6477 if (operand_type_check (i.types[n1], imm))
6478 {
6479 /* Only one immediate is allowed for PC
6480 relative address. */
6481 gas_assert (sz == 0);
6482 sz = imm_size (n1);
6483 i.op[n].disps->X_add_number -= sz;
6484 }
6485 /* We should find the immediate. */
6486 gas_assert (sz != 0);
6487 }
6488
6489 p = frag_more (size);
6490 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
6491 if (GOT_symbol
6492 && GOT_symbol == i.op[n].disps->X_add_symbol
6493 && (((reloc_type == BFD_RELOC_32
6494 || reloc_type == BFD_RELOC_X86_64_32S
6495 || (reloc_type == BFD_RELOC_64
6496 && object_64bit))
6497 && (i.op[n].disps->X_op == O_symbol
6498 || (i.op[n].disps->X_op == O_add
6499 && ((symbol_get_value_expression
6500 (i.op[n].disps->X_op_symbol)->X_op)
6501 == O_subtract))))
6502 || reloc_type == BFD_RELOC_32_PCREL))
6503 {
6504 offsetT add;
6505
6506 if (insn_start_frag == frag_now)
6507 add = (p - frag_now->fr_literal) - insn_start_off;
6508 else
6509 {
6510 fragS *fr;
6511
6512 add = insn_start_frag->fr_fix - insn_start_off;
6513 for (fr = insn_start_frag->fr_next;
6514 fr && fr != frag_now; fr = fr->fr_next)
6515 add += fr->fr_fix;
6516 add += p - frag_now->fr_literal;
6517 }
6518
6519 if (!object_64bit)
6520 {
6521 reloc_type = BFD_RELOC_386_GOTPC;
6522 i.op[n].imms->X_add_number += add;
6523 }
6524 else if (reloc_type == BFD_RELOC_64)
6525 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6526 else
6527 /* Don't do the adjustment for x86-64, as there
6528 the pcrel addressing is relative to the _next_
6529 insn, and that is taken care of in other code. */
6530 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6531 }
6532 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6533 i.op[n].disps, pcrel, reloc_type);
6534 }
6535 }
6536 }
6537 }
6538
6539 static void
6540 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
6541 {
6542 char *p;
6543 unsigned int n;
6544
6545 for (n = 0; n < i.operands; n++)
6546 {
6547 if (operand_type_check (i.types[n], imm))
6548 {
6549 if (i.op[n].imms->X_op == O_constant)
6550 {
6551 int size = imm_size (n);
6552 offsetT val;
6553
6554 val = offset_in_range (i.op[n].imms->X_add_number,
6555 size);
6556 p = frag_more (size);
6557 md_number_to_chars (p, val, size);
6558 }
6559 else
6560 {
6561 /* Not absolute_section.
6562 Need a 32-bit fixup (don't support 8bit
6563 non-absolute imms). Try to support other
6564 sizes ... */
6565 enum bfd_reloc_code_real reloc_type;
6566 int size = imm_size (n);
6567 int sign;
6568
6569 if (i.types[n].bitfield.imm32s
6570 && (i.suffix == QWORD_MNEM_SUFFIX
6571 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
6572 sign = 1;
6573 else
6574 sign = 0;
6575
6576 p = frag_more (size);
6577 reloc_type = reloc (size, 0, sign, i.reloc[n]);
6578
6579 /* This is tough to explain. We end up with this one if we
6580 * have operands that look like
6581 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
6582 * obtain the absolute address of the GOT, and it is strongly
6583 * preferable from a performance point of view to avoid using
6584 * a runtime relocation for this. The actual sequence of
6585 * instructions often look something like:
6586 *
6587 * call .L66
6588 * .L66:
6589 * popl %ebx
6590 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
6591 *
6592 * The call and pop essentially return the absolute address
6593 * of the label .L66 and store it in %ebx. The linker itself
6594 * will ultimately change the first operand of the addl so
6595 * that %ebx points to the GOT, but to keep things simple, the
6596 * .o file must have this operand set so that it generates not
6597 * the absolute address of .L66, but the absolute address of
6598 * itself. This allows the linker itself simply treat a GOTPC
6599 * relocation as asking for a pcrel offset to the GOT to be
6600 * added in, and the addend of the relocation is stored in the
6601 * operand field for the instruction itself.
6602 *
6603 * Our job here is to fix the operand so that it would add
6604 * the correct offset so that %ebx would point to itself. The
6605 * thing that is tricky is that .-.L66 will point to the
6606 * beginning of the instruction, so we need to further modify
6607 * the operand so that it will point to itself. There are
6608 * other cases where you have something like:
6609 *
6610 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
6611 *
6612 * and here no correction would be required. Internally in
6613 * the assembler we treat operands of this form as not being
6614 * pcrel since the '.' is explicitly mentioned, and I wonder
6615 * whether it would simplify matters to do it this way. Who
6616 * knows. In earlier versions of the PIC patches, the
6617 * pcrel_adjust field was used to store the correction, but
6618 * since the expression is not pcrel, I felt it would be
6619 * confusing to do it this way. */
6620
6621 if ((reloc_type == BFD_RELOC_32
6622 || reloc_type == BFD_RELOC_X86_64_32S
6623 || reloc_type == BFD_RELOC_64)
6624 && GOT_symbol
6625 && GOT_symbol == i.op[n].imms->X_add_symbol
6626 && (i.op[n].imms->X_op == O_symbol
6627 || (i.op[n].imms->X_op == O_add
6628 && ((symbol_get_value_expression
6629 (i.op[n].imms->X_op_symbol)->X_op)
6630 == O_subtract))))
6631 {
6632 offsetT add;
6633
6634 if (insn_start_frag == frag_now)
6635 add = (p - frag_now->fr_literal) - insn_start_off;
6636 else
6637 {
6638 fragS *fr;
6639
6640 add = insn_start_frag->fr_fix - insn_start_off;
6641 for (fr = insn_start_frag->fr_next;
6642 fr && fr != frag_now; fr = fr->fr_next)
6643 add += fr->fr_fix;
6644 add += p - frag_now->fr_literal;
6645 }
6646
6647 if (!object_64bit)
6648 reloc_type = BFD_RELOC_386_GOTPC;
6649 else if (size == 4)
6650 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6651 else if (size == 8)
6652 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6653 i.op[n].imms->X_add_number += add;
6654 }
6655 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6656 i.op[n].imms, 0, reloc_type);
6657 }
6658 }
6659 }
6660 }
6661 \f
6662 /* x86_cons_fix_new is called via the expression parsing code when a
6663 reloc is needed. We use this hook to get the correct .got reloc. */
6664 static enum bfd_reloc_code_real got_reloc = NO_RELOC;
6665 static int cons_sign = -1;
6666
6667 void
6668 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
6669 expressionS *exp)
6670 {
6671 enum bfd_reloc_code_real r = reloc (len, 0, cons_sign, got_reloc);
6672
6673 got_reloc = NO_RELOC;
6674
6675 #ifdef TE_PE
6676 if (exp->X_op == O_secrel)
6677 {
6678 exp->X_op = O_symbol;
6679 r = BFD_RELOC_32_SECREL;
6680 }
6681 #endif
6682
6683 fix_new_exp (frag, off, len, exp, 0, r);
6684 }
6685
6686 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
6687 purpose of the `.dc.a' internal pseudo-op. */
6688
6689 int
6690 x86_address_bytes (void)
6691 {
6692 if ((stdoutput->arch_info->mach & bfd_mach_x64_32))
6693 return 4;
6694 return stdoutput->arch_info->bits_per_address / 8;
6695 }
6696
6697 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
6698 || defined (LEX_AT)
6699 # define lex_got(reloc, adjust, types) NULL
6700 #else
6701 /* Parse operands of the form
6702 <symbol>@GOTOFF+<nnn>
6703 and similar .plt or .got references.
6704
6705 If we find one, set up the correct relocation in RELOC and copy the
6706 input string, minus the `@GOTOFF' into a malloc'd buffer for
6707 parsing by the calling routine. Return this buffer, and if ADJUST
6708 is non-null set it to the length of the string we removed from the
6709 input line. Otherwise return NULL. */
6710 static char *
6711 lex_got (enum bfd_reloc_code_real *rel,
6712 int *adjust,
6713 i386_operand_type *types)
6714 {
6715 /* Some of the relocations depend on the size of what field is to
6716 be relocated. But in our callers i386_immediate and i386_displacement
6717 we don't yet know the operand size (this will be set by insn
6718 matching). Hence we record the word32 relocation here,
6719 and adjust the reloc according to the real size in reloc(). */
6720 static const struct {
6721 const char *str;
6722 int len;
6723 const enum bfd_reloc_code_real rel[2];
6724 const i386_operand_type types64;
6725 } gotrel[] = {
6726 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
6727 { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32,
6728 BFD_RELOC_SIZE32 },
6729 OPERAND_TYPE_IMM32_64 },
6730 #endif
6731 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
6732 BFD_RELOC_X86_64_PLTOFF64 },
6733 OPERAND_TYPE_IMM64 },
6734 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
6735 BFD_RELOC_X86_64_PLT32 },
6736 OPERAND_TYPE_IMM32_32S_DISP32 },
6737 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
6738 BFD_RELOC_X86_64_GOTPLT64 },
6739 OPERAND_TYPE_IMM64_DISP64 },
6740 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
6741 BFD_RELOC_X86_64_GOTOFF64 },
6742 OPERAND_TYPE_IMM64_DISP64 },
6743 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
6744 BFD_RELOC_X86_64_GOTPCREL },
6745 OPERAND_TYPE_IMM32_32S_DISP32 },
6746 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
6747 BFD_RELOC_X86_64_TLSGD },
6748 OPERAND_TYPE_IMM32_32S_DISP32 },
6749 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
6750 _dummy_first_bfd_reloc_code_real },
6751 OPERAND_TYPE_NONE },
6752 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
6753 BFD_RELOC_X86_64_TLSLD },
6754 OPERAND_TYPE_IMM32_32S_DISP32 },
6755 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
6756 BFD_RELOC_X86_64_GOTTPOFF },
6757 OPERAND_TYPE_IMM32_32S_DISP32 },
6758 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
6759 BFD_RELOC_X86_64_TPOFF32 },
6760 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6761 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
6762 _dummy_first_bfd_reloc_code_real },
6763 OPERAND_TYPE_NONE },
6764 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
6765 BFD_RELOC_X86_64_DTPOFF32 },
6766 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6767 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
6768 _dummy_first_bfd_reloc_code_real },
6769 OPERAND_TYPE_NONE },
6770 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
6771 _dummy_first_bfd_reloc_code_real },
6772 OPERAND_TYPE_NONE },
6773 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
6774 BFD_RELOC_X86_64_GOT32 },
6775 OPERAND_TYPE_IMM32_32S_64_DISP32 },
6776 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
6777 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
6778 OPERAND_TYPE_IMM32_32S_DISP32 },
6779 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
6780 BFD_RELOC_X86_64_TLSDESC_CALL },
6781 OPERAND_TYPE_IMM32_32S_DISP32 },
6782 };
6783 char *cp;
6784 unsigned int j;
6785
6786 #if defined (OBJ_MAYBE_ELF)
6787 if (!IS_ELF)
6788 return NULL;
6789 #endif
6790
6791 for (cp = input_line_pointer; *cp != '@'; cp++)
6792 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6793 return NULL;
6794
6795 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6796 {
6797 int len = gotrel[j].len;
6798 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6799 {
6800 if (gotrel[j].rel[object_64bit] != 0)
6801 {
6802 int first, second;
6803 char *tmpbuf, *past_reloc;
6804
6805 *rel = gotrel[j].rel[object_64bit];
6806
6807 if (types)
6808 {
6809 if (flag_code != CODE_64BIT)
6810 {
6811 types->bitfield.imm32 = 1;
6812 types->bitfield.disp32 = 1;
6813 }
6814 else
6815 *types = gotrel[j].types64;
6816 }
6817
6818 if (j != 0 && GOT_symbol == NULL)
6819 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
6820
6821 /* The length of the first part of our input line. */
6822 first = cp - input_line_pointer;
6823
6824 /* The second part goes from after the reloc token until
6825 (and including) an end_of_line char or comma. */
6826 past_reloc = cp + 1 + len;
6827 cp = past_reloc;
6828 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6829 ++cp;
6830 second = cp + 1 - past_reloc;
6831
6832 /* Allocate and copy string. The trailing NUL shouldn't
6833 be necessary, but be safe. */
6834 tmpbuf = (char *) xmalloc (first + second + 2);
6835 memcpy (tmpbuf, input_line_pointer, first);
6836 if (second != 0 && *past_reloc != ' ')
6837 /* Replace the relocation token with ' ', so that
6838 errors like foo@GOTOFF1 will be detected. */
6839 tmpbuf[first++] = ' ';
6840 else
6841 /* Increment length by 1 if the relocation token is
6842 removed. */
6843 len++;
6844 if (adjust)
6845 *adjust = len;
6846 memcpy (tmpbuf + first, past_reloc, second);
6847 tmpbuf[first + second] = '\0';
6848 return tmpbuf;
6849 }
6850
6851 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6852 gotrel[j].str, 1 << (5 + object_64bit));
6853 return NULL;
6854 }
6855 }
6856
6857 /* Might be a symbol version string. Don't as_bad here. */
6858 return NULL;
6859 }
6860 #endif
6861
6862 #ifdef TE_PE
6863 #ifdef lex_got
6864 #undef lex_got
6865 #endif
6866 /* Parse operands of the form
6867 <symbol>@SECREL32+<nnn>
6868
6869 If we find one, set up the correct relocation in RELOC and copy the
6870 input string, minus the `@SECREL32' into a malloc'd buffer for
6871 parsing by the calling routine. Return this buffer, and if ADJUST
6872 is non-null set it to the length of the string we removed from the
6873 input line. Otherwise return NULL.
6874
6875 This function is copied from the ELF version above adjusted for PE targets. */
6876
6877 static char *
6878 lex_got (enum bfd_reloc_code_real *rel ATTRIBUTE_UNUSED,
6879 int *adjust ATTRIBUTE_UNUSED,
6880 i386_operand_type *types ATTRIBUTE_UNUSED)
6881 {
6882 static const struct
6883 {
6884 const char *str;
6885 int len;
6886 const enum bfd_reloc_code_real rel[2];
6887 const i386_operand_type types64;
6888 }
6889 gotrel[] =
6890 {
6891 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL,
6892 BFD_RELOC_32_SECREL },
6893 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6894 };
6895
6896 char *cp;
6897 unsigned j;
6898
6899 for (cp = input_line_pointer; *cp != '@'; cp++)
6900 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6901 return NULL;
6902
6903 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6904 {
6905 int len = gotrel[j].len;
6906
6907 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6908 {
6909 if (gotrel[j].rel[object_64bit] != 0)
6910 {
6911 int first, second;
6912 char *tmpbuf, *past_reloc;
6913
6914 *rel = gotrel[j].rel[object_64bit];
6915 if (adjust)
6916 *adjust = len;
6917
6918 if (types)
6919 {
6920 if (flag_code != CODE_64BIT)
6921 {
6922 types->bitfield.imm32 = 1;
6923 types->bitfield.disp32 = 1;
6924 }
6925 else
6926 *types = gotrel[j].types64;
6927 }
6928
6929 /* The length of the first part of our input line. */
6930 first = cp - input_line_pointer;
6931
6932 /* The second part goes from after the reloc token until
6933 (and including) an end_of_line char or comma. */
6934 past_reloc = cp + 1 + len;
6935 cp = past_reloc;
6936 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6937 ++cp;
6938 second = cp + 1 - past_reloc;
6939
6940 /* Allocate and copy string. The trailing NUL shouldn't
6941 be necessary, but be safe. */
6942 tmpbuf = (char *) xmalloc (first + second + 2);
6943 memcpy (tmpbuf, input_line_pointer, first);
6944 if (second != 0 && *past_reloc != ' ')
6945 /* Replace the relocation token with ' ', so that
6946 errors like foo@SECLREL321 will be detected. */
6947 tmpbuf[first++] = ' ';
6948 memcpy (tmpbuf + first, past_reloc, second);
6949 tmpbuf[first + second] = '\0';
6950 return tmpbuf;
6951 }
6952
6953 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6954 gotrel[j].str, 1 << (5 + object_64bit));
6955 return NULL;
6956 }
6957 }
6958
6959 /* Might be a symbol version string. Don't as_bad here. */
6960 return NULL;
6961 }
6962
6963 #endif /* TE_PE */
6964
6965 void
6966 x86_cons (expressionS *exp, int size)
6967 {
6968 intel_syntax = -intel_syntax;
6969
6970 exp->X_md = 0;
6971 if (size == 4 || (object_64bit && size == 8))
6972 {
6973 /* Handle @GOTOFF and the like in an expression. */
6974 char *save;
6975 char *gotfree_input_line;
6976 int adjust = 0;
6977
6978 save = input_line_pointer;
6979 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
6980 if (gotfree_input_line)
6981 input_line_pointer = gotfree_input_line;
6982
6983 expression (exp);
6984
6985 if (gotfree_input_line)
6986 {
6987 /* expression () has merrily parsed up to the end of line,
6988 or a comma - in the wrong buffer. Transfer how far
6989 input_line_pointer has moved to the right buffer. */
6990 input_line_pointer = (save
6991 + (input_line_pointer - gotfree_input_line)
6992 + adjust);
6993 free (gotfree_input_line);
6994 if (exp->X_op == O_constant
6995 || exp->X_op == O_absent
6996 || exp->X_op == O_illegal
6997 || exp->X_op == O_register
6998 || exp->X_op == O_big)
6999 {
7000 char c = *input_line_pointer;
7001 *input_line_pointer = 0;
7002 as_bad (_("missing or invalid expression `%s'"), save);
7003 *input_line_pointer = c;
7004 }
7005 }
7006 }
7007 else
7008 expression (exp);
7009
7010 intel_syntax = -intel_syntax;
7011
7012 if (intel_syntax)
7013 i386_intel_simplify (exp);
7014 }
7015
7016 static void
7017 signed_cons (int size)
7018 {
7019 if (flag_code == CODE_64BIT)
7020 cons_sign = 1;
7021 cons (size);
7022 cons_sign = -1;
7023 }
7024
7025 #ifdef TE_PE
7026 static void
7027 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
7028 {
7029 expressionS exp;
7030
7031 do
7032 {
7033 expression (&exp);
7034 if (exp.X_op == O_symbol)
7035 exp.X_op = O_secrel;
7036
7037 emit_expr (&exp, 4);
7038 }
7039 while (*input_line_pointer++ == ',');
7040
7041 input_line_pointer--;
7042 demand_empty_rest_of_line ();
7043 }
7044 #endif
7045
7046 static int
7047 i386_immediate (char *imm_start)
7048 {
7049 char *save_input_line_pointer;
7050 char *gotfree_input_line;
7051 segT exp_seg = 0;
7052 expressionS *exp;
7053 i386_operand_type types;
7054
7055 operand_type_set (&types, ~0);
7056
7057 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
7058 {
7059 as_bad (_("at most %d immediate operands are allowed"),
7060 MAX_IMMEDIATE_OPERANDS);
7061 return 0;
7062 }
7063
7064 exp = &im_expressions[i.imm_operands++];
7065 i.op[this_operand].imms = exp;
7066
7067 if (is_space_char (*imm_start))
7068 ++imm_start;
7069
7070 save_input_line_pointer = input_line_pointer;
7071 input_line_pointer = imm_start;
7072
7073 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
7074 if (gotfree_input_line)
7075 input_line_pointer = gotfree_input_line;
7076
7077 exp_seg = expression (exp);
7078
7079 SKIP_WHITESPACE ();
7080 if (*input_line_pointer)
7081 as_bad (_("junk `%s' after expression"), input_line_pointer);
7082
7083 input_line_pointer = save_input_line_pointer;
7084 if (gotfree_input_line)
7085 {
7086 free (gotfree_input_line);
7087
7088 if (exp->X_op == O_constant || exp->X_op == O_register)
7089 exp->X_op = O_illegal;
7090 }
7091
7092 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
7093 }
7094
7095 static int
7096 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
7097 i386_operand_type types, const char *imm_start)
7098 {
7099 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
7100 {
7101 if (imm_start)
7102 as_bad (_("missing or invalid immediate expression `%s'"),
7103 imm_start);
7104 return 0;
7105 }
7106 else if (exp->X_op == O_constant)
7107 {
7108 /* Size it properly later. */
7109 i.types[this_operand].bitfield.imm64 = 1;
7110 /* If not 64bit, sign extend val. */
7111 if (flag_code != CODE_64BIT
7112 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
7113 exp->X_add_number
7114 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
7115 }
7116 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7117 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
7118 && exp_seg != absolute_section
7119 && exp_seg != text_section
7120 && exp_seg != data_section
7121 && exp_seg != bss_section
7122 && exp_seg != undefined_section
7123 && !bfd_is_com_section (exp_seg))
7124 {
7125 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7126 return 0;
7127 }
7128 #endif
7129 else if (!intel_syntax && exp->X_op == O_register)
7130 {
7131 if (imm_start)
7132 as_bad (_("illegal immediate register operand %s"), imm_start);
7133 return 0;
7134 }
7135 else
7136 {
7137 /* This is an address. The size of the address will be
7138 determined later, depending on destination register,
7139 suffix, or the default for the section. */
7140 i.types[this_operand].bitfield.imm8 = 1;
7141 i.types[this_operand].bitfield.imm16 = 1;
7142 i.types[this_operand].bitfield.imm32 = 1;
7143 i.types[this_operand].bitfield.imm32s = 1;
7144 i.types[this_operand].bitfield.imm64 = 1;
7145 i.types[this_operand] = operand_type_and (i.types[this_operand],
7146 types);
7147 }
7148
7149 return 1;
7150 }
7151
7152 static char *
7153 i386_scale (char *scale)
7154 {
7155 offsetT val;
7156 char *save = input_line_pointer;
7157
7158 input_line_pointer = scale;
7159 val = get_absolute_expression ();
7160
7161 switch (val)
7162 {
7163 case 1:
7164 i.log2_scale_factor = 0;
7165 break;
7166 case 2:
7167 i.log2_scale_factor = 1;
7168 break;
7169 case 4:
7170 i.log2_scale_factor = 2;
7171 break;
7172 case 8:
7173 i.log2_scale_factor = 3;
7174 break;
7175 default:
7176 {
7177 char sep = *input_line_pointer;
7178
7179 *input_line_pointer = '\0';
7180 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
7181 scale);
7182 *input_line_pointer = sep;
7183 input_line_pointer = save;
7184 return NULL;
7185 }
7186 }
7187 if (i.log2_scale_factor != 0 && i.index_reg == 0)
7188 {
7189 as_warn (_("scale factor of %d without an index register"),
7190 1 << i.log2_scale_factor);
7191 i.log2_scale_factor = 0;
7192 }
7193 scale = input_line_pointer;
7194 input_line_pointer = save;
7195 return scale;
7196 }
7197
7198 static int
7199 i386_displacement (char *disp_start, char *disp_end)
7200 {
7201 expressionS *exp;
7202 segT exp_seg = 0;
7203 char *save_input_line_pointer;
7204 char *gotfree_input_line;
7205 int override;
7206 i386_operand_type bigdisp, types = anydisp;
7207 int ret;
7208
7209 if (i.disp_operands == MAX_MEMORY_OPERANDS)
7210 {
7211 as_bad (_("at most %d displacement operands are allowed"),
7212 MAX_MEMORY_OPERANDS);
7213 return 0;
7214 }
7215
7216 operand_type_set (&bigdisp, 0);
7217 if ((i.types[this_operand].bitfield.jumpabsolute)
7218 || (!current_templates->start->opcode_modifier.jump
7219 && !current_templates->start->opcode_modifier.jumpdword))
7220 {
7221 bigdisp.bitfield.disp32 = 1;
7222 override = (i.prefix[ADDR_PREFIX] != 0);
7223 if (flag_code == CODE_64BIT)
7224 {
7225 if (!override)
7226 {
7227 bigdisp.bitfield.disp32s = 1;
7228 bigdisp.bitfield.disp64 = 1;
7229 }
7230 }
7231 else if ((flag_code == CODE_16BIT) ^ override)
7232 {
7233 bigdisp.bitfield.disp32 = 0;
7234 bigdisp.bitfield.disp16 = 1;
7235 }
7236 }
7237 else
7238 {
7239 /* For PC-relative branches, the width of the displacement
7240 is dependent upon data size, not address size. */
7241 override = (i.prefix[DATA_PREFIX] != 0);
7242 if (flag_code == CODE_64BIT)
7243 {
7244 if (override || i.suffix == WORD_MNEM_SUFFIX)
7245 bigdisp.bitfield.disp16 = 1;
7246 else
7247 {
7248 bigdisp.bitfield.disp32 = 1;
7249 bigdisp.bitfield.disp32s = 1;
7250 }
7251 }
7252 else
7253 {
7254 if (!override)
7255 override = (i.suffix == (flag_code != CODE_16BIT
7256 ? WORD_MNEM_SUFFIX
7257 : LONG_MNEM_SUFFIX));
7258 bigdisp.bitfield.disp32 = 1;
7259 if ((flag_code == CODE_16BIT) ^ override)
7260 {
7261 bigdisp.bitfield.disp32 = 0;
7262 bigdisp.bitfield.disp16 = 1;
7263 }
7264 }
7265 }
7266 i.types[this_operand] = operand_type_or (i.types[this_operand],
7267 bigdisp);
7268
7269 exp = &disp_expressions[i.disp_operands];
7270 i.op[this_operand].disps = exp;
7271 i.disp_operands++;
7272 save_input_line_pointer = input_line_pointer;
7273 input_line_pointer = disp_start;
7274 END_STRING_AND_SAVE (disp_end);
7275
7276 #ifndef GCC_ASM_O_HACK
7277 #define GCC_ASM_O_HACK 0
7278 #endif
7279 #if GCC_ASM_O_HACK
7280 END_STRING_AND_SAVE (disp_end + 1);
7281 if (i.types[this_operand].bitfield.baseIndex
7282 && displacement_string_end[-1] == '+')
7283 {
7284 /* This hack is to avoid a warning when using the "o"
7285 constraint within gcc asm statements.
7286 For instance:
7287
7288 #define _set_tssldt_desc(n,addr,limit,type) \
7289 __asm__ __volatile__ ( \
7290 "movw %w2,%0\n\t" \
7291 "movw %w1,2+%0\n\t" \
7292 "rorl $16,%1\n\t" \
7293 "movb %b1,4+%0\n\t" \
7294 "movb %4,5+%0\n\t" \
7295 "movb $0,6+%0\n\t" \
7296 "movb %h1,7+%0\n\t" \
7297 "rorl $16,%1" \
7298 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
7299
7300 This works great except that the output assembler ends
7301 up looking a bit weird if it turns out that there is
7302 no offset. You end up producing code that looks like:
7303
7304 #APP
7305 movw $235,(%eax)
7306 movw %dx,2+(%eax)
7307 rorl $16,%edx
7308 movb %dl,4+(%eax)
7309 movb $137,5+(%eax)
7310 movb $0,6+(%eax)
7311 movb %dh,7+(%eax)
7312 rorl $16,%edx
7313 #NO_APP
7314
7315 So here we provide the missing zero. */
7316
7317 *displacement_string_end = '0';
7318 }
7319 #endif
7320 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
7321 if (gotfree_input_line)
7322 input_line_pointer = gotfree_input_line;
7323
7324 exp_seg = expression (exp);
7325
7326 SKIP_WHITESPACE ();
7327 if (*input_line_pointer)
7328 as_bad (_("junk `%s' after expression"), input_line_pointer);
7329 #if GCC_ASM_O_HACK
7330 RESTORE_END_STRING (disp_end + 1);
7331 #endif
7332 input_line_pointer = save_input_line_pointer;
7333 if (gotfree_input_line)
7334 {
7335 free (gotfree_input_line);
7336
7337 if (exp->X_op == O_constant || exp->X_op == O_register)
7338 exp->X_op = O_illegal;
7339 }
7340
7341 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
7342
7343 RESTORE_END_STRING (disp_end);
7344
7345 return ret;
7346 }
7347
7348 static int
7349 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
7350 i386_operand_type types, const char *disp_start)
7351 {
7352 i386_operand_type bigdisp;
7353 int ret = 1;
7354
7355 /* We do this to make sure that the section symbol is in
7356 the symbol table. We will ultimately change the relocation
7357 to be relative to the beginning of the section. */
7358 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
7359 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
7360 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7361 {
7362 if (exp->X_op != O_symbol)
7363 goto inv_disp;
7364
7365 if (S_IS_LOCAL (exp->X_add_symbol)
7366 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
7367 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
7368 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
7369 exp->X_op = O_subtract;
7370 exp->X_op_symbol = GOT_symbol;
7371 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
7372 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
7373 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7374 i.reloc[this_operand] = BFD_RELOC_64;
7375 else
7376 i.reloc[this_operand] = BFD_RELOC_32;
7377 }
7378
7379 else if (exp->X_op == O_absent
7380 || exp->X_op == O_illegal
7381 || exp->X_op == O_big)
7382 {
7383 inv_disp:
7384 as_bad (_("missing or invalid displacement expression `%s'"),
7385 disp_start);
7386 ret = 0;
7387 }
7388
7389 else if (flag_code == CODE_64BIT
7390 && !i.prefix[ADDR_PREFIX]
7391 && exp->X_op == O_constant)
7392 {
7393 /* Since displacement is signed extended to 64bit, don't allow
7394 disp32 and turn off disp32s if they are out of range. */
7395 i.types[this_operand].bitfield.disp32 = 0;
7396 if (!fits_in_signed_long (exp->X_add_number))
7397 {
7398 i.types[this_operand].bitfield.disp32s = 0;
7399 if (i.types[this_operand].bitfield.baseindex)
7400 {
7401 as_bad (_("0x%lx out range of signed 32bit displacement"),
7402 (long) exp->X_add_number);
7403 ret = 0;
7404 }
7405 }
7406 }
7407
7408 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7409 else if (exp->X_op != O_constant
7410 && OUTPUT_FLAVOR == bfd_target_aout_flavour
7411 && exp_seg != absolute_section
7412 && exp_seg != text_section
7413 && exp_seg != data_section
7414 && exp_seg != bss_section
7415 && exp_seg != undefined_section
7416 && !bfd_is_com_section (exp_seg))
7417 {
7418 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7419 ret = 0;
7420 }
7421 #endif
7422
7423 /* Check if this is a displacement only operand. */
7424 bigdisp = i.types[this_operand];
7425 bigdisp.bitfield.disp8 = 0;
7426 bigdisp.bitfield.disp16 = 0;
7427 bigdisp.bitfield.disp32 = 0;
7428 bigdisp.bitfield.disp32s = 0;
7429 bigdisp.bitfield.disp64 = 0;
7430 if (operand_type_all_zero (&bigdisp))
7431 i.types[this_operand] = operand_type_and (i.types[this_operand],
7432 types);
7433
7434 return ret;
7435 }
7436
7437 /* Make sure the memory operand we've been dealt is valid.
7438 Return 1 on success, 0 on a failure. */
7439
7440 static int
7441 i386_index_check (const char *operand_string)
7442 {
7443 int ok;
7444 const char *kind = "base/index";
7445 #if INFER_ADDR_PREFIX
7446 int fudged = 0;
7447
7448 tryprefix:
7449 #endif
7450 ok = 1;
7451 if (current_templates->start->opcode_modifier.isstring
7452 && !current_templates->start->opcode_modifier.immext
7453 && (current_templates->end[-1].opcode_modifier.isstring
7454 || i.mem_operands))
7455 {
7456 /* Memory operands of string insns are special in that they only allow
7457 a single register (rDI, rSI, or rBX) as their memory address. */
7458 unsigned int expected;
7459
7460 kind = "string address";
7461
7462 if (current_templates->start->opcode_modifier.w)
7463 {
7464 i386_operand_type type = current_templates->end[-1].operand_types[0];
7465
7466 if (!type.bitfield.baseindex
7467 || ((!i.mem_operands != !intel_syntax)
7468 && current_templates->end[-1].operand_types[1]
7469 .bitfield.baseindex))
7470 type = current_templates->end[-1].operand_types[1];
7471 expected = type.bitfield.esseg ? 7 /* rDI */ : 6 /* rSI */;
7472 }
7473 else
7474 expected = 3 /* rBX */;
7475
7476 if (!i.base_reg || i.index_reg
7477 || operand_type_check (i.types[this_operand], disp))
7478 ok = -1;
7479 else if (!(flag_code == CODE_64BIT
7480 ? i.prefix[ADDR_PREFIX]
7481 ? i.base_reg->reg_type.bitfield.reg32
7482 : i.base_reg->reg_type.bitfield.reg64
7483 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7484 ? i.base_reg->reg_type.bitfield.reg32
7485 : i.base_reg->reg_type.bitfield.reg16))
7486 ok = 0;
7487 else if (register_number (i.base_reg) != expected)
7488 ok = -1;
7489
7490 if (ok < 0)
7491 {
7492 unsigned int j;
7493
7494 for (j = 0; j < i386_regtab_size; ++j)
7495 if ((flag_code == CODE_64BIT
7496 ? i.prefix[ADDR_PREFIX]
7497 ? i386_regtab[j].reg_type.bitfield.reg32
7498 : i386_regtab[j].reg_type.bitfield.reg64
7499 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7500 ? i386_regtab[j].reg_type.bitfield.reg32
7501 : i386_regtab[j].reg_type.bitfield.reg16)
7502 && register_number(i386_regtab + j) == expected)
7503 break;
7504 gas_assert (j < i386_regtab_size);
7505 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
7506 operand_string,
7507 intel_syntax ? '[' : '(',
7508 register_prefix,
7509 i386_regtab[j].reg_name,
7510 intel_syntax ? ']' : ')');
7511 ok = 1;
7512 }
7513 }
7514 else if (flag_code == CODE_64BIT)
7515 {
7516 if ((i.base_reg
7517 && ((i.prefix[ADDR_PREFIX] == 0
7518 && !i.base_reg->reg_type.bitfield.reg64)
7519 || (i.prefix[ADDR_PREFIX]
7520 && !i.base_reg->reg_type.bitfield.reg32))
7521 && (i.index_reg
7522 || i.base_reg->reg_num !=
7523 (i.prefix[ADDR_PREFIX] == 0 ? RegRip : RegEip)))
7524 || (i.index_reg
7525 && !(i.index_reg->reg_type.bitfield.regxmm
7526 || i.index_reg->reg_type.bitfield.regymm)
7527 && (!i.index_reg->reg_type.bitfield.baseindex
7528 || (i.prefix[ADDR_PREFIX] == 0
7529 && i.index_reg->reg_num != RegRiz
7530 && !i.index_reg->reg_type.bitfield.reg64
7531 )
7532 || (i.prefix[ADDR_PREFIX]
7533 && i.index_reg->reg_num != RegEiz
7534 && !i.index_reg->reg_type.bitfield.reg32))))
7535 ok = 0;
7536 }
7537 else
7538 {
7539 if ((flag_code == CODE_16BIT) ^ (i.prefix[ADDR_PREFIX] != 0))
7540 {
7541 /* 16bit checks. */
7542 if ((i.base_reg
7543 && (!i.base_reg->reg_type.bitfield.reg16
7544 || !i.base_reg->reg_type.bitfield.baseindex))
7545 || (i.index_reg
7546 && (!i.index_reg->reg_type.bitfield.reg16
7547 || !i.index_reg->reg_type.bitfield.baseindex
7548 || !(i.base_reg
7549 && i.base_reg->reg_num < 6
7550 && i.index_reg->reg_num >= 6
7551 && i.log2_scale_factor == 0))))
7552 ok = 0;
7553 }
7554 else
7555 {
7556 /* 32bit checks. */
7557 if ((i.base_reg
7558 && !i.base_reg->reg_type.bitfield.reg32)
7559 || (i.index_reg
7560 && !i.index_reg->reg_type.bitfield.regxmm
7561 && !i.index_reg->reg_type.bitfield.regymm
7562 && ((!i.index_reg->reg_type.bitfield.reg32
7563 && i.index_reg->reg_num != RegEiz)
7564 || !i.index_reg->reg_type.bitfield.baseindex)))
7565 ok = 0;
7566 }
7567 }
7568 if (!ok)
7569 {
7570 #if INFER_ADDR_PREFIX
7571 if (!i.mem_operands && !i.prefix[ADDR_PREFIX])
7572 {
7573 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
7574 i.prefixes += 1;
7575 /* Change the size of any displacement too. At most one of
7576 Disp16 or Disp32 is set.
7577 FIXME. There doesn't seem to be any real need for separate
7578 Disp16 and Disp32 flags. The same goes for Imm16 and Imm32.
7579 Removing them would probably clean up the code quite a lot. */
7580 if (flag_code != CODE_64BIT
7581 && (i.types[this_operand].bitfield.disp16
7582 || i.types[this_operand].bitfield.disp32))
7583 i.types[this_operand]
7584 = operand_type_xor (i.types[this_operand], disp16_32);
7585 fudged = 1;
7586 goto tryprefix;
7587 }
7588 if (fudged)
7589 as_bad (_("`%s' is not a valid %s expression"),
7590 operand_string,
7591 kind);
7592 else
7593 #endif
7594 as_bad (_("`%s' is not a valid %s-bit %s expression"),
7595 operand_string,
7596 flag_code_names[i.prefix[ADDR_PREFIX]
7597 ? flag_code == CODE_32BIT
7598 ? CODE_16BIT
7599 : CODE_32BIT
7600 : flag_code],
7601 kind);
7602 }
7603 return ok;
7604 }
7605
7606 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
7607 on error. */
7608
7609 static int
7610 i386_att_operand (char *operand_string)
7611 {
7612 const reg_entry *r;
7613 char *end_op;
7614 char *op_string = operand_string;
7615
7616 if (is_space_char (*op_string))
7617 ++op_string;
7618
7619 /* We check for an absolute prefix (differentiating,
7620 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
7621 if (*op_string == ABSOLUTE_PREFIX)
7622 {
7623 ++op_string;
7624 if (is_space_char (*op_string))
7625 ++op_string;
7626 i.types[this_operand].bitfield.jumpabsolute = 1;
7627 }
7628
7629 /* Check if operand is a register. */
7630 if ((r = parse_register (op_string, &end_op)) != NULL)
7631 {
7632 i386_operand_type temp;
7633
7634 /* Check for a segment override by searching for ':' after a
7635 segment register. */
7636 op_string = end_op;
7637 if (is_space_char (*op_string))
7638 ++op_string;
7639 if (*op_string == ':'
7640 && (r->reg_type.bitfield.sreg2
7641 || r->reg_type.bitfield.sreg3))
7642 {
7643 switch (r->reg_num)
7644 {
7645 case 0:
7646 i.seg[i.mem_operands] = &es;
7647 break;
7648 case 1:
7649 i.seg[i.mem_operands] = &cs;
7650 break;
7651 case 2:
7652 i.seg[i.mem_operands] = &ss;
7653 break;
7654 case 3:
7655 i.seg[i.mem_operands] = &ds;
7656 break;
7657 case 4:
7658 i.seg[i.mem_operands] = &fs;
7659 break;
7660 case 5:
7661 i.seg[i.mem_operands] = &gs;
7662 break;
7663 }
7664
7665 /* Skip the ':' and whitespace. */
7666 ++op_string;
7667 if (is_space_char (*op_string))
7668 ++op_string;
7669
7670 if (!is_digit_char (*op_string)
7671 && !is_identifier_char (*op_string)
7672 && *op_string != '('
7673 && *op_string != ABSOLUTE_PREFIX)
7674 {
7675 as_bad (_("bad memory operand `%s'"), op_string);
7676 return 0;
7677 }
7678 /* Handle case of %es:*foo. */
7679 if (*op_string == ABSOLUTE_PREFIX)
7680 {
7681 ++op_string;
7682 if (is_space_char (*op_string))
7683 ++op_string;
7684 i.types[this_operand].bitfield.jumpabsolute = 1;
7685 }
7686 goto do_memory_reference;
7687 }
7688 if (*op_string)
7689 {
7690 as_bad (_("junk `%s' after register"), op_string);
7691 return 0;
7692 }
7693 temp = r->reg_type;
7694 temp.bitfield.baseindex = 0;
7695 i.types[this_operand] = operand_type_or (i.types[this_operand],
7696 temp);
7697 i.types[this_operand].bitfield.unspecified = 0;
7698 i.op[this_operand].regs = r;
7699 i.reg_operands++;
7700 }
7701 else if (*op_string == REGISTER_PREFIX)
7702 {
7703 as_bad (_("bad register name `%s'"), op_string);
7704 return 0;
7705 }
7706 else if (*op_string == IMMEDIATE_PREFIX)
7707 {
7708 ++op_string;
7709 if (i.types[this_operand].bitfield.jumpabsolute)
7710 {
7711 as_bad (_("immediate operand illegal with absolute jump"));
7712 return 0;
7713 }
7714 if (!i386_immediate (op_string))
7715 return 0;
7716 }
7717 else if (is_digit_char (*op_string)
7718 || is_identifier_char (*op_string)
7719 || *op_string == '(')
7720 {
7721 /* This is a memory reference of some sort. */
7722 char *base_string;
7723
7724 /* Start and end of displacement string expression (if found). */
7725 char *displacement_string_start;
7726 char *displacement_string_end;
7727
7728 do_memory_reference:
7729 if ((i.mem_operands == 1
7730 && !current_templates->start->opcode_modifier.isstring)
7731 || i.mem_operands == 2)
7732 {
7733 as_bad (_("too many memory references for `%s'"),
7734 current_templates->start->name);
7735 return 0;
7736 }
7737
7738 /* Check for base index form. We detect the base index form by
7739 looking for an ')' at the end of the operand, searching
7740 for the '(' matching it, and finding a REGISTER_PREFIX or ','
7741 after the '('. */
7742 base_string = op_string + strlen (op_string);
7743
7744 --base_string;
7745 if (is_space_char (*base_string))
7746 --base_string;
7747
7748 /* If we only have a displacement, set-up for it to be parsed later. */
7749 displacement_string_start = op_string;
7750 displacement_string_end = base_string + 1;
7751
7752 if (*base_string == ')')
7753 {
7754 char *temp_string;
7755 unsigned int parens_balanced = 1;
7756 /* We've already checked that the number of left & right ()'s are
7757 equal, so this loop will not be infinite. */
7758 do
7759 {
7760 base_string--;
7761 if (*base_string == ')')
7762 parens_balanced++;
7763 if (*base_string == '(')
7764 parens_balanced--;
7765 }
7766 while (parens_balanced);
7767
7768 temp_string = base_string;
7769
7770 /* Skip past '(' and whitespace. */
7771 ++base_string;
7772 if (is_space_char (*base_string))
7773 ++base_string;
7774
7775 if (*base_string == ','
7776 || ((i.base_reg = parse_register (base_string, &end_op))
7777 != NULL))
7778 {
7779 displacement_string_end = temp_string;
7780
7781 i.types[this_operand].bitfield.baseindex = 1;
7782
7783 if (i.base_reg)
7784 {
7785 base_string = end_op;
7786 if (is_space_char (*base_string))
7787 ++base_string;
7788 }
7789
7790 /* There may be an index reg or scale factor here. */
7791 if (*base_string == ',')
7792 {
7793 ++base_string;
7794 if (is_space_char (*base_string))
7795 ++base_string;
7796
7797 if ((i.index_reg = parse_register (base_string, &end_op))
7798 != NULL)
7799 {
7800 base_string = end_op;
7801 if (is_space_char (*base_string))
7802 ++base_string;
7803 if (*base_string == ',')
7804 {
7805 ++base_string;
7806 if (is_space_char (*base_string))
7807 ++base_string;
7808 }
7809 else if (*base_string != ')')
7810 {
7811 as_bad (_("expecting `,' or `)' "
7812 "after index register in `%s'"),
7813 operand_string);
7814 return 0;
7815 }
7816 }
7817 else if (*base_string == REGISTER_PREFIX)
7818 {
7819 end_op = strchr (base_string, ',');
7820 if (end_op)
7821 *end_op = '\0';
7822 as_bad (_("bad register name `%s'"), base_string);
7823 return 0;
7824 }
7825
7826 /* Check for scale factor. */
7827 if (*base_string != ')')
7828 {
7829 char *end_scale = i386_scale (base_string);
7830
7831 if (!end_scale)
7832 return 0;
7833
7834 base_string = end_scale;
7835 if (is_space_char (*base_string))
7836 ++base_string;
7837 if (*base_string != ')')
7838 {
7839 as_bad (_("expecting `)' "
7840 "after scale factor in `%s'"),
7841 operand_string);
7842 return 0;
7843 }
7844 }
7845 else if (!i.index_reg)
7846 {
7847 as_bad (_("expecting index register or scale factor "
7848 "after `,'; got '%c'"),
7849 *base_string);
7850 return 0;
7851 }
7852 }
7853 else if (*base_string != ')')
7854 {
7855 as_bad (_("expecting `,' or `)' "
7856 "after base register in `%s'"),
7857 operand_string);
7858 return 0;
7859 }
7860 }
7861 else if (*base_string == REGISTER_PREFIX)
7862 {
7863 end_op = strchr (base_string, ',');
7864 if (end_op)
7865 *end_op = '\0';
7866 as_bad (_("bad register name `%s'"), base_string);
7867 return 0;
7868 }
7869 }
7870
7871 /* If there's an expression beginning the operand, parse it,
7872 assuming displacement_string_start and
7873 displacement_string_end are meaningful. */
7874 if (displacement_string_start != displacement_string_end)
7875 {
7876 if (!i386_displacement (displacement_string_start,
7877 displacement_string_end))
7878 return 0;
7879 }
7880
7881 /* Special case for (%dx) while doing input/output op. */
7882 if (i.base_reg
7883 && operand_type_equal (&i.base_reg->reg_type,
7884 &reg16_inoutportreg)
7885 && i.index_reg == 0
7886 && i.log2_scale_factor == 0
7887 && i.seg[i.mem_operands] == 0
7888 && !operand_type_check (i.types[this_operand], disp))
7889 {
7890 i.types[this_operand] = inoutportreg;
7891 return 1;
7892 }
7893
7894 if (i386_index_check (operand_string) == 0)
7895 return 0;
7896 i.types[this_operand].bitfield.mem = 1;
7897 i.mem_operands++;
7898 }
7899 else
7900 {
7901 /* It's not a memory operand; argh! */
7902 as_bad (_("invalid char %s beginning operand %d `%s'"),
7903 output_invalid (*op_string),
7904 this_operand + 1,
7905 op_string);
7906 return 0;
7907 }
7908 return 1; /* Normal return. */
7909 }
7910 \f
7911 /* Calculate the maximum variable size (i.e., excluding fr_fix)
7912 that an rs_machine_dependent frag may reach. */
7913
7914 unsigned int
7915 i386_frag_max_var (fragS *frag)
7916 {
7917 /* The only relaxable frags are for jumps.
7918 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
7919 gas_assert (frag->fr_type == rs_machine_dependent);
7920 return TYPE_FROM_RELAX_STATE (frag->fr_subtype) == UNCOND_JUMP ? 4 : 5;
7921 }
7922
7923 /* md_estimate_size_before_relax()
7924
7925 Called just before relax() for rs_machine_dependent frags. The x86
7926 assembler uses these frags to handle variable size jump
7927 instructions.
7928
7929 Any symbol that is now undefined will not become defined.
7930 Return the correct fr_subtype in the frag.
7931 Return the initial "guess for variable size of frag" to caller.
7932 The guess is actually the growth beyond the fixed part. Whatever
7933 we do to grow the fixed or variable part contributes to our
7934 returned value. */
7935
7936 int
7937 md_estimate_size_before_relax (fragS *fragP, segT segment)
7938 {
7939 /* We've already got fragP->fr_subtype right; all we have to do is
7940 check for un-relaxable symbols. On an ELF system, we can't relax
7941 an externally visible symbol, because it may be overridden by a
7942 shared library. */
7943 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
7944 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7945 || (IS_ELF
7946 && (S_IS_EXTERNAL (fragP->fr_symbol)
7947 || S_IS_WEAK (fragP->fr_symbol)
7948 || ((symbol_get_bfdsym (fragP->fr_symbol)->flags
7949 & BSF_GNU_INDIRECT_FUNCTION))))
7950 #endif
7951 #if defined (OBJ_COFF) && defined (TE_PE)
7952 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
7953 && S_IS_WEAK (fragP->fr_symbol))
7954 #endif
7955 )
7956 {
7957 /* Symbol is undefined in this segment, or we need to keep a
7958 reloc so that weak symbols can be overridden. */
7959 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
7960 enum bfd_reloc_code_real reloc_type;
7961 unsigned char *opcode;
7962 int old_fr_fix;
7963
7964 if (fragP->fr_var != NO_RELOC)
7965 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
7966 else if (size == 2)
7967 reloc_type = BFD_RELOC_16_PCREL;
7968 else
7969 reloc_type = BFD_RELOC_32_PCREL;
7970
7971 old_fr_fix = fragP->fr_fix;
7972 opcode = (unsigned char *) fragP->fr_opcode;
7973
7974 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
7975 {
7976 case UNCOND_JUMP:
7977 /* Make jmp (0xeb) a (d)word displacement jump. */
7978 opcode[0] = 0xe9;
7979 fragP->fr_fix += size;
7980 fix_new (fragP, old_fr_fix, size,
7981 fragP->fr_symbol,
7982 fragP->fr_offset, 1,
7983 reloc_type);
7984 break;
7985
7986 case COND_JUMP86:
7987 if (size == 2
7988 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
7989 {
7990 /* Negate the condition, and branch past an
7991 unconditional jump. */
7992 opcode[0] ^= 1;
7993 opcode[1] = 3;
7994 /* Insert an unconditional jump. */
7995 opcode[2] = 0xe9;
7996 /* We added two extra opcode bytes, and have a two byte
7997 offset. */
7998 fragP->fr_fix += 2 + 2;
7999 fix_new (fragP, old_fr_fix + 2, 2,
8000 fragP->fr_symbol,
8001 fragP->fr_offset, 1,
8002 reloc_type);
8003 break;
8004 }
8005 /* Fall through. */
8006
8007 case COND_JUMP:
8008 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
8009 {
8010 fixS *fixP;
8011
8012 fragP->fr_fix += 1;
8013 fixP = fix_new (fragP, old_fr_fix, 1,
8014 fragP->fr_symbol,
8015 fragP->fr_offset, 1,
8016 BFD_RELOC_8_PCREL);
8017 fixP->fx_signed = 1;
8018 break;
8019 }
8020
8021 /* This changes the byte-displacement jump 0x7N
8022 to the (d)word-displacement jump 0x0f,0x8N. */
8023 opcode[1] = opcode[0] + 0x10;
8024 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
8025 /* We've added an opcode byte. */
8026 fragP->fr_fix += 1 + size;
8027 fix_new (fragP, old_fr_fix + 1, size,
8028 fragP->fr_symbol,
8029 fragP->fr_offset, 1,
8030 reloc_type);
8031 break;
8032
8033 default:
8034 BAD_CASE (fragP->fr_subtype);
8035 break;
8036 }
8037 frag_wane (fragP);
8038 return fragP->fr_fix - old_fr_fix;
8039 }
8040
8041 /* Guess size depending on current relax state. Initially the relax
8042 state will correspond to a short jump and we return 1, because
8043 the variable part of the frag (the branch offset) is one byte
8044 long. However, we can relax a section more than once and in that
8045 case we must either set fr_subtype back to the unrelaxed state,
8046 or return the value for the appropriate branch. */
8047 return md_relax_table[fragP->fr_subtype].rlx_length;
8048 }
8049
8050 /* Called after relax() is finished.
8051
8052 In: Address of frag.
8053 fr_type == rs_machine_dependent.
8054 fr_subtype is what the address relaxed to.
8055
8056 Out: Any fixSs and constants are set up.
8057 Caller will turn frag into a ".space 0". */
8058
8059 void
8060 md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT sec ATTRIBUTE_UNUSED,
8061 fragS *fragP)
8062 {
8063 unsigned char *opcode;
8064 unsigned char *where_to_put_displacement = NULL;
8065 offsetT target_address;
8066 offsetT opcode_address;
8067 unsigned int extension = 0;
8068 offsetT displacement_from_opcode_start;
8069
8070 opcode = (unsigned char *) fragP->fr_opcode;
8071
8072 /* Address we want to reach in file space. */
8073 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
8074
8075 /* Address opcode resides at in file space. */
8076 opcode_address = fragP->fr_address + fragP->fr_fix;
8077
8078 /* Displacement from opcode start to fill into instruction. */
8079 displacement_from_opcode_start = target_address - opcode_address;
8080
8081 if ((fragP->fr_subtype & BIG) == 0)
8082 {
8083 /* Don't have to change opcode. */
8084 extension = 1; /* 1 opcode + 1 displacement */
8085 where_to_put_displacement = &opcode[1];
8086 }
8087 else
8088 {
8089 if (no_cond_jump_promotion
8090 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
8091 as_warn_where (fragP->fr_file, fragP->fr_line,
8092 _("long jump required"));
8093
8094 switch (fragP->fr_subtype)
8095 {
8096 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
8097 extension = 4; /* 1 opcode + 4 displacement */
8098 opcode[0] = 0xe9;
8099 where_to_put_displacement = &opcode[1];
8100 break;
8101
8102 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
8103 extension = 2; /* 1 opcode + 2 displacement */
8104 opcode[0] = 0xe9;
8105 where_to_put_displacement = &opcode[1];
8106 break;
8107
8108 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
8109 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
8110 extension = 5; /* 2 opcode + 4 displacement */
8111 opcode[1] = opcode[0] + 0x10;
8112 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
8113 where_to_put_displacement = &opcode[2];
8114 break;
8115
8116 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
8117 extension = 3; /* 2 opcode + 2 displacement */
8118 opcode[1] = opcode[0] + 0x10;
8119 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
8120 where_to_put_displacement = &opcode[2];
8121 break;
8122
8123 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
8124 extension = 4;
8125 opcode[0] ^= 1;
8126 opcode[1] = 3;
8127 opcode[2] = 0xe9;
8128 where_to_put_displacement = &opcode[3];
8129 break;
8130
8131 default:
8132 BAD_CASE (fragP->fr_subtype);
8133 break;
8134 }
8135 }
8136
8137 /* If size if less then four we are sure that the operand fits,
8138 but if it's 4, then it could be that the displacement is larger
8139 then -/+ 2GB. */
8140 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
8141 && object_64bit
8142 && ((addressT) (displacement_from_opcode_start - extension
8143 + ((addressT) 1 << 31))
8144 > (((addressT) 2 << 31) - 1)))
8145 {
8146 as_bad_where (fragP->fr_file, fragP->fr_line,
8147 _("jump target out of range"));
8148 /* Make us emit 0. */
8149 displacement_from_opcode_start = extension;
8150 }
8151 /* Now put displacement after opcode. */
8152 md_number_to_chars ((char *) where_to_put_displacement,
8153 (valueT) (displacement_from_opcode_start - extension),
8154 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
8155 fragP->fr_fix += extension;
8156 }
8157 \f
8158 /* Apply a fixup (fixP) to segment data, once it has been determined
8159 by our caller that we have all the info we need to fix it up.
8160
8161 Parameter valP is the pointer to the value of the bits.
8162
8163 On the 386, immediates, displacements, and data pointers are all in
8164 the same (little-endian) format, so we don't need to care about which
8165 we are handling. */
8166
8167 void
8168 md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
8169 {
8170 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
8171 valueT value = *valP;
8172
8173 #if !defined (TE_Mach)
8174 if (fixP->fx_pcrel)
8175 {
8176 switch (fixP->fx_r_type)
8177 {
8178 default:
8179 break;
8180
8181 case BFD_RELOC_64:
8182 fixP->fx_r_type = BFD_RELOC_64_PCREL;
8183 break;
8184 case BFD_RELOC_32:
8185 case BFD_RELOC_X86_64_32S:
8186 fixP->fx_r_type = BFD_RELOC_32_PCREL;
8187 break;
8188 case BFD_RELOC_16:
8189 fixP->fx_r_type = BFD_RELOC_16_PCREL;
8190 break;
8191 case BFD_RELOC_8:
8192 fixP->fx_r_type = BFD_RELOC_8_PCREL;
8193 break;
8194 }
8195 }
8196
8197 if (fixP->fx_addsy != NULL
8198 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
8199 || fixP->fx_r_type == BFD_RELOC_64_PCREL
8200 || fixP->fx_r_type == BFD_RELOC_16_PCREL
8201 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
8202 && !use_rela_relocations)
8203 {
8204 /* This is a hack. There should be a better way to handle this.
8205 This covers for the fact that bfd_install_relocation will
8206 subtract the current location (for partial_inplace, PC relative
8207 relocations); see more below. */
8208 #ifndef OBJ_AOUT
8209 if (IS_ELF
8210 #ifdef TE_PE
8211 || OUTPUT_FLAVOR == bfd_target_coff_flavour
8212 #endif
8213 )
8214 value += fixP->fx_where + fixP->fx_frag->fr_address;
8215 #endif
8216 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8217 if (IS_ELF)
8218 {
8219 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
8220
8221 if ((sym_seg == seg
8222 || (symbol_section_p (fixP->fx_addsy)
8223 && sym_seg != absolute_section))
8224 && !generic_force_reloc (fixP))
8225 {
8226 /* Yes, we add the values in twice. This is because
8227 bfd_install_relocation subtracts them out again. I think
8228 bfd_install_relocation is broken, but I don't dare change
8229 it. FIXME. */
8230 value += fixP->fx_where + fixP->fx_frag->fr_address;
8231 }
8232 }
8233 #endif
8234 #if defined (OBJ_COFF) && defined (TE_PE)
8235 /* For some reason, the PE format does not store a
8236 section address offset for a PC relative symbol. */
8237 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
8238 || S_IS_WEAK (fixP->fx_addsy))
8239 value += md_pcrel_from (fixP);
8240 #endif
8241 }
8242 #if defined (OBJ_COFF) && defined (TE_PE)
8243 if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
8244 {
8245 value -= S_GET_VALUE (fixP->fx_addsy);
8246 }
8247 #endif
8248
8249 /* Fix a few things - the dynamic linker expects certain values here,
8250 and we must not disappoint it. */
8251 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8252 if (IS_ELF && fixP->fx_addsy)
8253 switch (fixP->fx_r_type)
8254 {
8255 case BFD_RELOC_386_PLT32:
8256 case BFD_RELOC_X86_64_PLT32:
8257 /* Make the jump instruction point to the address of the operand. At
8258 runtime we merely add the offset to the actual PLT entry. */
8259 value = -4;
8260 break;
8261
8262 case BFD_RELOC_386_TLS_GD:
8263 case BFD_RELOC_386_TLS_LDM:
8264 case BFD_RELOC_386_TLS_IE_32:
8265 case BFD_RELOC_386_TLS_IE:
8266 case BFD_RELOC_386_TLS_GOTIE:
8267 case BFD_RELOC_386_TLS_GOTDESC:
8268 case BFD_RELOC_X86_64_TLSGD:
8269 case BFD_RELOC_X86_64_TLSLD:
8270 case BFD_RELOC_X86_64_GOTTPOFF:
8271 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8272 value = 0; /* Fully resolved at runtime. No addend. */
8273 /* Fallthrough */
8274 case BFD_RELOC_386_TLS_LE:
8275 case BFD_RELOC_386_TLS_LDO_32:
8276 case BFD_RELOC_386_TLS_LE_32:
8277 case BFD_RELOC_X86_64_DTPOFF32:
8278 case BFD_RELOC_X86_64_DTPOFF64:
8279 case BFD_RELOC_X86_64_TPOFF32:
8280 case BFD_RELOC_X86_64_TPOFF64:
8281 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8282 break;
8283
8284 case BFD_RELOC_386_TLS_DESC_CALL:
8285 case BFD_RELOC_X86_64_TLSDESC_CALL:
8286 value = 0; /* Fully resolved at runtime. No addend. */
8287 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8288 fixP->fx_done = 0;
8289 return;
8290
8291 case BFD_RELOC_386_GOT32:
8292 case BFD_RELOC_X86_64_GOT32:
8293 value = 0; /* Fully resolved at runtime. No addend. */
8294 break;
8295
8296 case BFD_RELOC_VTABLE_INHERIT:
8297 case BFD_RELOC_VTABLE_ENTRY:
8298 fixP->fx_done = 0;
8299 return;
8300
8301 default:
8302 break;
8303 }
8304 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
8305 *valP = value;
8306 #endif /* !defined (TE_Mach) */
8307
8308 /* Are we finished with this relocation now? */
8309 if (fixP->fx_addsy == NULL)
8310 fixP->fx_done = 1;
8311 #if defined (OBJ_COFF) && defined (TE_PE)
8312 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
8313 {
8314 fixP->fx_done = 0;
8315 /* Remember value for tc_gen_reloc. */
8316 fixP->fx_addnumber = value;
8317 /* Clear out the frag for now. */
8318 value = 0;
8319 }
8320 #endif
8321 else if (use_rela_relocations)
8322 {
8323 fixP->fx_no_overflow = 1;
8324 /* Remember value for tc_gen_reloc. */
8325 fixP->fx_addnumber = value;
8326 value = 0;
8327 }
8328
8329 md_number_to_chars (p, value, fixP->fx_size);
8330 }
8331 \f
8332 char *
8333 md_atof (int type, char *litP, int *sizeP)
8334 {
8335 /* This outputs the LITTLENUMs in REVERSE order;
8336 in accord with the bigendian 386. */
8337 return ieee_md_atof (type, litP, sizeP, FALSE);
8338 }
8339 \f
8340 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
8341
8342 static char *
8343 output_invalid (int c)
8344 {
8345 if (ISPRINT (c))
8346 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8347 "'%c'", c);
8348 else
8349 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8350 "(0x%x)", (unsigned char) c);
8351 return output_invalid_buf;
8352 }
8353
8354 /* REG_STRING starts *before* REGISTER_PREFIX. */
8355
8356 static const reg_entry *
8357 parse_real_register (char *reg_string, char **end_op)
8358 {
8359 char *s = reg_string;
8360 char *p;
8361 char reg_name_given[MAX_REG_NAME_SIZE + 1];
8362 const reg_entry *r;
8363
8364 /* Skip possible REGISTER_PREFIX and possible whitespace. */
8365 if (*s == REGISTER_PREFIX)
8366 ++s;
8367
8368 if (is_space_char (*s))
8369 ++s;
8370
8371 p = reg_name_given;
8372 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
8373 {
8374 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
8375 return (const reg_entry *) NULL;
8376 s++;
8377 }
8378
8379 /* For naked regs, make sure that we are not dealing with an identifier.
8380 This prevents confusing an identifier like `eax_var' with register
8381 `eax'. */
8382 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
8383 return (const reg_entry *) NULL;
8384
8385 *end_op = s;
8386
8387 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
8388
8389 /* Handle floating point regs, allowing spaces in the (i) part. */
8390 if (r == i386_regtab /* %st is first entry of table */)
8391 {
8392 if (is_space_char (*s))
8393 ++s;
8394 if (*s == '(')
8395 {
8396 ++s;
8397 if (is_space_char (*s))
8398 ++s;
8399 if (*s >= '0' && *s <= '7')
8400 {
8401 int fpr = *s - '0';
8402 ++s;
8403 if (is_space_char (*s))
8404 ++s;
8405 if (*s == ')')
8406 {
8407 *end_op = s + 1;
8408 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
8409 know (r);
8410 return r + fpr;
8411 }
8412 }
8413 /* We have "%st(" then garbage. */
8414 return (const reg_entry *) NULL;
8415 }
8416 }
8417
8418 if (r == NULL || allow_pseudo_reg)
8419 return r;
8420
8421 if (operand_type_all_zero (&r->reg_type))
8422 return (const reg_entry *) NULL;
8423
8424 if ((r->reg_type.bitfield.reg32
8425 || r->reg_type.bitfield.sreg3
8426 || r->reg_type.bitfield.control
8427 || r->reg_type.bitfield.debug
8428 || r->reg_type.bitfield.test)
8429 && !cpu_arch_flags.bitfield.cpui386)
8430 return (const reg_entry *) NULL;
8431
8432 if (r->reg_type.bitfield.floatreg
8433 && !cpu_arch_flags.bitfield.cpu8087
8434 && !cpu_arch_flags.bitfield.cpu287
8435 && !cpu_arch_flags.bitfield.cpu387)
8436 return (const reg_entry *) NULL;
8437
8438 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
8439 return (const reg_entry *) NULL;
8440
8441 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse)
8442 return (const reg_entry *) NULL;
8443
8444 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
8445 return (const reg_entry *) NULL;
8446
8447 /* Don't allow fake index register unless allow_index_reg isn't 0. */
8448 if (!allow_index_reg
8449 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
8450 return (const reg_entry *) NULL;
8451
8452 if (((r->reg_flags & (RegRex64 | RegRex))
8453 || r->reg_type.bitfield.reg64)
8454 && (!cpu_arch_flags.bitfield.cpulm
8455 || !operand_type_equal (&r->reg_type, &control))
8456 && flag_code != CODE_64BIT)
8457 return (const reg_entry *) NULL;
8458
8459 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
8460 return (const reg_entry *) NULL;
8461
8462 return r;
8463 }
8464
8465 /* REG_STRING starts *before* REGISTER_PREFIX. */
8466
8467 static const reg_entry *
8468 parse_register (char *reg_string, char **end_op)
8469 {
8470 const reg_entry *r;
8471
8472 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
8473 r = parse_real_register (reg_string, end_op);
8474 else
8475 r = NULL;
8476 if (!r)
8477 {
8478 char *save = input_line_pointer;
8479 char c;
8480 symbolS *symbolP;
8481
8482 input_line_pointer = reg_string;
8483 c = get_symbol_end ();
8484 symbolP = symbol_find (reg_string);
8485 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
8486 {
8487 const expressionS *e = symbol_get_value_expression (symbolP);
8488
8489 know (e->X_op == O_register);
8490 know (e->X_add_number >= 0
8491 && (valueT) e->X_add_number < i386_regtab_size);
8492 r = i386_regtab + e->X_add_number;
8493 *end_op = input_line_pointer;
8494 }
8495 *input_line_pointer = c;
8496 input_line_pointer = save;
8497 }
8498 return r;
8499 }
8500
8501 int
8502 i386_parse_name (char *name, expressionS *e, char *nextcharP)
8503 {
8504 const reg_entry *r;
8505 char *end = input_line_pointer;
8506
8507 *end = *nextcharP;
8508 r = parse_register (name, &input_line_pointer);
8509 if (r && end <= input_line_pointer)
8510 {
8511 *nextcharP = *input_line_pointer;
8512 *input_line_pointer = 0;
8513 e->X_op = O_register;
8514 e->X_add_number = r - i386_regtab;
8515 return 1;
8516 }
8517 input_line_pointer = end;
8518 *end = 0;
8519 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
8520 }
8521
8522 void
8523 md_operand (expressionS *e)
8524 {
8525 char *end;
8526 const reg_entry *r;
8527
8528 switch (*input_line_pointer)
8529 {
8530 case REGISTER_PREFIX:
8531 r = parse_real_register (input_line_pointer, &end);
8532 if (r)
8533 {
8534 e->X_op = O_register;
8535 e->X_add_number = r - i386_regtab;
8536 input_line_pointer = end;
8537 }
8538 break;
8539
8540 case '[':
8541 gas_assert (intel_syntax);
8542 end = input_line_pointer++;
8543 expression (e);
8544 if (*input_line_pointer == ']')
8545 {
8546 ++input_line_pointer;
8547 e->X_op_symbol = make_expr_symbol (e);
8548 e->X_add_symbol = NULL;
8549 e->X_add_number = 0;
8550 e->X_op = O_index;
8551 }
8552 else
8553 {
8554 e->X_op = O_absent;
8555 input_line_pointer = end;
8556 }
8557 break;
8558 }
8559 }
8560
8561 \f
8562 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8563 const char *md_shortopts = "kVQ:sqn";
8564 #else
8565 const char *md_shortopts = "qn";
8566 #endif
8567
8568 #define OPTION_32 (OPTION_MD_BASE + 0)
8569 #define OPTION_64 (OPTION_MD_BASE + 1)
8570 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
8571 #define OPTION_MARCH (OPTION_MD_BASE + 3)
8572 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
8573 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
8574 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
8575 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
8576 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
8577 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
8578 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
8579 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
8580 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
8581 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
8582 #define OPTION_X32 (OPTION_MD_BASE + 14)
8583
8584 struct option md_longopts[] =
8585 {
8586 {"32", no_argument, NULL, OPTION_32},
8587 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8588 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8589 {"64", no_argument, NULL, OPTION_64},
8590 #endif
8591 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8592 {"x32", no_argument, NULL, OPTION_X32},
8593 #endif
8594 {"divide", no_argument, NULL, OPTION_DIVIDE},
8595 {"march", required_argument, NULL, OPTION_MARCH},
8596 {"mtune", required_argument, NULL, OPTION_MTUNE},
8597 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
8598 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
8599 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
8600 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
8601 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
8602 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
8603 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
8604 {"moperand-check", required_argument, NULL, OPTION_MOPERAND_CHECK},
8605 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
8606 {NULL, no_argument, NULL, 0}
8607 };
8608 size_t md_longopts_size = sizeof (md_longopts);
8609
8610 int
8611 md_parse_option (int c, char *arg)
8612 {
8613 unsigned int j;
8614 char *arch, *next;
8615
8616 switch (c)
8617 {
8618 case 'n':
8619 optimize_align_code = 0;
8620 break;
8621
8622 case 'q':
8623 quiet_warnings = 1;
8624 break;
8625
8626 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8627 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
8628 should be emitted or not. FIXME: Not implemented. */
8629 case 'Q':
8630 break;
8631
8632 /* -V: SVR4 argument to print version ID. */
8633 case 'V':
8634 print_version_id ();
8635 break;
8636
8637 /* -k: Ignore for FreeBSD compatibility. */
8638 case 'k':
8639 break;
8640
8641 case 's':
8642 /* -s: On i386 Solaris, this tells the native assembler to use
8643 .stab instead of .stab.excl. We always use .stab anyhow. */
8644 break;
8645 #endif
8646 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8647 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8648 case OPTION_64:
8649 {
8650 const char **list, **l;
8651
8652 list = bfd_target_list ();
8653 for (l = list; *l != NULL; l++)
8654 if (CONST_STRNEQ (*l, "elf64-x86-64")
8655 || strcmp (*l, "coff-x86-64") == 0
8656 || strcmp (*l, "pe-x86-64") == 0
8657 || strcmp (*l, "pei-x86-64") == 0
8658 || strcmp (*l, "mach-o-x86-64") == 0)
8659 {
8660 default_arch = "x86_64";
8661 break;
8662 }
8663 if (*l == NULL)
8664 as_fatal (_("no compiled in support for x86_64"));
8665 free (list);
8666 }
8667 break;
8668 #endif
8669
8670 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8671 case OPTION_X32:
8672 if (IS_ELF)
8673 {
8674 const char **list, **l;
8675
8676 list = bfd_target_list ();
8677 for (l = list; *l != NULL; l++)
8678 if (CONST_STRNEQ (*l, "elf32-x86-64"))
8679 {
8680 default_arch = "x86_64:32";
8681 break;
8682 }
8683 if (*l == NULL)
8684 as_fatal (_("no compiled in support for 32bit x86_64"));
8685 free (list);
8686 }
8687 else
8688 as_fatal (_("32bit x86_64 is only supported for ELF"));
8689 break;
8690 #endif
8691
8692 case OPTION_32:
8693 default_arch = "i386";
8694 break;
8695
8696 case OPTION_DIVIDE:
8697 #ifdef SVR4_COMMENT_CHARS
8698 {
8699 char *n, *t;
8700 const char *s;
8701
8702 n = (char *) xmalloc (strlen (i386_comment_chars) + 1);
8703 t = n;
8704 for (s = i386_comment_chars; *s != '\0'; s++)
8705 if (*s != '/')
8706 *t++ = *s;
8707 *t = '\0';
8708 i386_comment_chars = n;
8709 }
8710 #endif
8711 break;
8712
8713 case OPTION_MARCH:
8714 arch = xstrdup (arg);
8715 do
8716 {
8717 if (*arch == '.')
8718 as_fatal (_("invalid -march= option: `%s'"), arg);
8719 next = strchr (arch, '+');
8720 if (next)
8721 *next++ = '\0';
8722 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8723 {
8724 if (strcmp (arch, cpu_arch [j].name) == 0)
8725 {
8726 /* Processor. */
8727 if (! cpu_arch[j].flags.bitfield.cpui386)
8728 continue;
8729
8730 cpu_arch_name = cpu_arch[j].name;
8731 cpu_sub_arch_name = NULL;
8732 cpu_arch_flags = cpu_arch[j].flags;
8733 cpu_arch_isa = cpu_arch[j].type;
8734 cpu_arch_isa_flags = cpu_arch[j].flags;
8735 if (!cpu_arch_tune_set)
8736 {
8737 cpu_arch_tune = cpu_arch_isa;
8738 cpu_arch_tune_flags = cpu_arch_isa_flags;
8739 }
8740 break;
8741 }
8742 else if (*cpu_arch [j].name == '.'
8743 && strcmp (arch, cpu_arch [j].name + 1) == 0)
8744 {
8745 /* ISA entension. */
8746 i386_cpu_flags flags;
8747
8748 if (!cpu_arch[j].negated)
8749 flags = cpu_flags_or (cpu_arch_flags,
8750 cpu_arch[j].flags);
8751 else
8752 flags = cpu_flags_and_not (cpu_arch_flags,
8753 cpu_arch[j].flags);
8754 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
8755 {
8756 if (cpu_sub_arch_name)
8757 {
8758 char *name = cpu_sub_arch_name;
8759 cpu_sub_arch_name = concat (name,
8760 cpu_arch[j].name,
8761 (const char *) NULL);
8762 free (name);
8763 }
8764 else
8765 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
8766 cpu_arch_flags = flags;
8767 cpu_arch_isa_flags = flags;
8768 }
8769 break;
8770 }
8771 }
8772
8773 if (j >= ARRAY_SIZE (cpu_arch))
8774 as_fatal (_("invalid -march= option: `%s'"), arg);
8775
8776 arch = next;
8777 }
8778 while (next != NULL );
8779 break;
8780
8781 case OPTION_MTUNE:
8782 if (*arg == '.')
8783 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8784 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8785 {
8786 if (strcmp (arg, cpu_arch [j].name) == 0)
8787 {
8788 cpu_arch_tune_set = 1;
8789 cpu_arch_tune = cpu_arch [j].type;
8790 cpu_arch_tune_flags = cpu_arch[j].flags;
8791 break;
8792 }
8793 }
8794 if (j >= ARRAY_SIZE (cpu_arch))
8795 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8796 break;
8797
8798 case OPTION_MMNEMONIC:
8799 if (strcasecmp (arg, "att") == 0)
8800 intel_mnemonic = 0;
8801 else if (strcasecmp (arg, "intel") == 0)
8802 intel_mnemonic = 1;
8803 else
8804 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg);
8805 break;
8806
8807 case OPTION_MSYNTAX:
8808 if (strcasecmp (arg, "att") == 0)
8809 intel_syntax = 0;
8810 else if (strcasecmp (arg, "intel") == 0)
8811 intel_syntax = 1;
8812 else
8813 as_fatal (_("invalid -msyntax= option: `%s'"), arg);
8814 break;
8815
8816 case OPTION_MINDEX_REG:
8817 allow_index_reg = 1;
8818 break;
8819
8820 case OPTION_MNAKED_REG:
8821 allow_naked_reg = 1;
8822 break;
8823
8824 case OPTION_MOLD_GCC:
8825 old_gcc = 1;
8826 break;
8827
8828 case OPTION_MSSE2AVX:
8829 sse2avx = 1;
8830 break;
8831
8832 case OPTION_MSSE_CHECK:
8833 if (strcasecmp (arg, "error") == 0)
8834 sse_check = check_error;
8835 else if (strcasecmp (arg, "warning") == 0)
8836 sse_check = check_warning;
8837 else if (strcasecmp (arg, "none") == 0)
8838 sse_check = check_none;
8839 else
8840 as_fatal (_("invalid -msse-check= option: `%s'"), arg);
8841 break;
8842
8843 case OPTION_MOPERAND_CHECK:
8844 if (strcasecmp (arg, "error") == 0)
8845 operand_check = check_error;
8846 else if (strcasecmp (arg, "warning") == 0)
8847 operand_check = check_warning;
8848 else if (strcasecmp (arg, "none") == 0)
8849 operand_check = check_none;
8850 else
8851 as_fatal (_("invalid -moperand-check= option: `%s'"), arg);
8852 break;
8853
8854 case OPTION_MAVXSCALAR:
8855 if (strcasecmp (arg, "128") == 0)
8856 avxscalar = vex128;
8857 else if (strcasecmp (arg, "256") == 0)
8858 avxscalar = vex256;
8859 else
8860 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg);
8861 break;
8862
8863 default:
8864 return 0;
8865 }
8866 return 1;
8867 }
8868
8869 #define MESSAGE_TEMPLATE \
8870 " "
8871
8872 static void
8873 show_arch (FILE *stream, int ext, int check)
8874 {
8875 static char message[] = MESSAGE_TEMPLATE;
8876 char *start = message + 27;
8877 char *p;
8878 int size = sizeof (MESSAGE_TEMPLATE);
8879 int left;
8880 const char *name;
8881 int len;
8882 unsigned int j;
8883
8884 p = start;
8885 left = size - (start - message);
8886 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8887 {
8888 /* Should it be skipped? */
8889 if (cpu_arch [j].skip)
8890 continue;
8891
8892 name = cpu_arch [j].name;
8893 len = cpu_arch [j].len;
8894 if (*name == '.')
8895 {
8896 /* It is an extension. Skip if we aren't asked to show it. */
8897 if (ext)
8898 {
8899 name++;
8900 len--;
8901 }
8902 else
8903 continue;
8904 }
8905 else if (ext)
8906 {
8907 /* It is an processor. Skip if we show only extension. */
8908 continue;
8909 }
8910 else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
8911 {
8912 /* It is an impossible processor - skip. */
8913 continue;
8914 }
8915
8916 /* Reserve 2 spaces for ", " or ",\0" */
8917 left -= len + 2;
8918
8919 /* Check if there is any room. */
8920 if (left >= 0)
8921 {
8922 if (p != start)
8923 {
8924 *p++ = ',';
8925 *p++ = ' ';
8926 }
8927 p = mempcpy (p, name, len);
8928 }
8929 else
8930 {
8931 /* Output the current message now and start a new one. */
8932 *p++ = ',';
8933 *p = '\0';
8934 fprintf (stream, "%s\n", message);
8935 p = start;
8936 left = size - (start - message) - len - 2;
8937
8938 gas_assert (left >= 0);
8939
8940 p = mempcpy (p, name, len);
8941 }
8942 }
8943
8944 *p = '\0';
8945 fprintf (stream, "%s\n", message);
8946 }
8947
8948 void
8949 md_show_usage (FILE *stream)
8950 {
8951 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8952 fprintf (stream, _("\
8953 -Q ignored\n\
8954 -V print assembler version number\n\
8955 -k ignored\n"));
8956 #endif
8957 fprintf (stream, _("\
8958 -n Do not optimize code alignment\n\
8959 -q quieten some warnings\n"));
8960 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8961 fprintf (stream, _("\
8962 -s ignored\n"));
8963 #endif
8964 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8965 || defined (TE_PE) || defined (TE_PEP))
8966 fprintf (stream, _("\
8967 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
8968 #endif
8969 #ifdef SVR4_COMMENT_CHARS
8970 fprintf (stream, _("\
8971 --divide do not treat `/' as a comment character\n"));
8972 #else
8973 fprintf (stream, _("\
8974 --divide ignored\n"));
8975 #endif
8976 fprintf (stream, _("\
8977 -march=CPU[,+EXTENSION...]\n\
8978 generate code for CPU and EXTENSION, CPU is one of:\n"));
8979 show_arch (stream, 0, 1);
8980 fprintf (stream, _("\
8981 EXTENSION is combination of:\n"));
8982 show_arch (stream, 1, 0);
8983 fprintf (stream, _("\
8984 -mtune=CPU optimize for CPU, CPU is one of:\n"));
8985 show_arch (stream, 0, 0);
8986 fprintf (stream, _("\
8987 -msse2avx encode SSE instructions with VEX prefix\n"));
8988 fprintf (stream, _("\
8989 -msse-check=[none|error|warning]\n\
8990 check SSE instructions\n"));
8991 fprintf (stream, _("\
8992 -moperand-check=[none|error|warning]\n\
8993 check operand combinations for validity\n"));
8994 fprintf (stream, _("\
8995 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
8996 length\n"));
8997 fprintf (stream, _("\
8998 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
8999 fprintf (stream, _("\
9000 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
9001 fprintf (stream, _("\
9002 -mindex-reg support pseudo index registers\n"));
9003 fprintf (stream, _("\
9004 -mnaked-reg don't require `%%' prefix for registers\n"));
9005 fprintf (stream, _("\
9006 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
9007 }
9008
9009 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
9010 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9011 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
9012
9013 /* Pick the target format to use. */
9014
9015 const char *
9016 i386_target_format (void)
9017 {
9018 if (!strncmp (default_arch, "x86_64", 6))
9019 {
9020 update_code_flag (CODE_64BIT, 1);
9021 if (default_arch[6] == '\0')
9022 x86_elf_abi = X86_64_ABI;
9023 else
9024 x86_elf_abi = X86_64_X32_ABI;
9025 }
9026 else if (!strcmp (default_arch, "i386"))
9027 update_code_flag (CODE_32BIT, 1);
9028 else
9029 as_fatal (_("unknown architecture"));
9030
9031 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
9032 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
9033 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
9034 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
9035
9036 switch (OUTPUT_FLAVOR)
9037 {
9038 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
9039 case bfd_target_aout_flavour:
9040 return AOUT_TARGET_FORMAT;
9041 #endif
9042 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
9043 # if defined (TE_PE) || defined (TE_PEP)
9044 case bfd_target_coff_flavour:
9045 return flag_code == CODE_64BIT ? "pe-x86-64" : "pe-i386";
9046 # elif defined (TE_GO32)
9047 case bfd_target_coff_flavour:
9048 return "coff-go32";
9049 # else
9050 case bfd_target_coff_flavour:
9051 return "coff-i386";
9052 # endif
9053 #endif
9054 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9055 case bfd_target_elf_flavour:
9056 {
9057 const char *format;
9058
9059 switch (x86_elf_abi)
9060 {
9061 default:
9062 format = ELF_TARGET_FORMAT;
9063 break;
9064 case X86_64_ABI:
9065 use_rela_relocations = 1;
9066 object_64bit = 1;
9067 format = ELF_TARGET_FORMAT64;
9068 break;
9069 case X86_64_X32_ABI:
9070 use_rela_relocations = 1;
9071 object_64bit = 1;
9072 disallow_64bit_reloc = 1;
9073 format = ELF_TARGET_FORMAT32;
9074 break;
9075 }
9076 if (cpu_arch_isa == PROCESSOR_L1OM)
9077 {
9078 if (x86_elf_abi != X86_64_ABI)
9079 as_fatal (_("Intel L1OM is 64bit only"));
9080 return ELF_TARGET_L1OM_FORMAT;
9081 }
9082 if (cpu_arch_isa == PROCESSOR_K1OM)
9083 {
9084 if (x86_elf_abi != X86_64_ABI)
9085 as_fatal (_("Intel K1OM is 64bit only"));
9086 return ELF_TARGET_K1OM_FORMAT;
9087 }
9088 else
9089 return format;
9090 }
9091 #endif
9092 #if defined (OBJ_MACH_O)
9093 case bfd_target_mach_o_flavour:
9094 if (flag_code == CODE_64BIT)
9095 {
9096 use_rela_relocations = 1;
9097 object_64bit = 1;
9098 return "mach-o-x86-64";
9099 }
9100 else
9101 return "mach-o-i386";
9102 #endif
9103 default:
9104 abort ();
9105 return NULL;
9106 }
9107 }
9108
9109 #endif /* OBJ_MAYBE_ more than one */
9110
9111 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
9112 void
9113 i386_elf_emit_arch_note (void)
9114 {
9115 if (IS_ELF && cpu_arch_name != NULL)
9116 {
9117 char *p;
9118 asection *seg = now_seg;
9119 subsegT subseg = now_subseg;
9120 Elf_Internal_Note i_note;
9121 Elf_External_Note e_note;
9122 asection *note_secp;
9123 int len;
9124
9125 /* Create the .note section. */
9126 note_secp = subseg_new (".note", 0);
9127 bfd_set_section_flags (stdoutput,
9128 note_secp,
9129 SEC_HAS_CONTENTS | SEC_READONLY);
9130
9131 /* Process the arch string. */
9132 len = strlen (cpu_arch_name);
9133
9134 i_note.namesz = len + 1;
9135 i_note.descsz = 0;
9136 i_note.type = NT_ARCH;
9137 p = frag_more (sizeof (e_note.namesz));
9138 md_number_to_chars (p, (valueT) i_note.namesz, sizeof (e_note.namesz));
9139 p = frag_more (sizeof (e_note.descsz));
9140 md_number_to_chars (p, (valueT) i_note.descsz, sizeof (e_note.descsz));
9141 p = frag_more (sizeof (e_note.type));
9142 md_number_to_chars (p, (valueT) i_note.type, sizeof (e_note.type));
9143 p = frag_more (len + 1);
9144 strcpy (p, cpu_arch_name);
9145
9146 frag_align (2, 0, 0);
9147
9148 subseg_set (seg, subseg);
9149 }
9150 }
9151 #endif
9152 \f
9153 symbolS *
9154 md_undefined_symbol (char *name)
9155 {
9156 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
9157 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
9158 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
9159 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
9160 {
9161 if (!GOT_symbol)
9162 {
9163 if (symbol_find (name))
9164 as_bad (_("GOT already in symbol table"));
9165 GOT_symbol = symbol_new (name, undefined_section,
9166 (valueT) 0, &zero_address_frag);
9167 };
9168 return GOT_symbol;
9169 }
9170 return 0;
9171 }
9172
9173 /* Round up a section size to the appropriate boundary. */
9174
9175 valueT
9176 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
9177 {
9178 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
9179 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
9180 {
9181 /* For a.out, force the section size to be aligned. If we don't do
9182 this, BFD will align it for us, but it will not write out the
9183 final bytes of the section. This may be a bug in BFD, but it is
9184 easier to fix it here since that is how the other a.out targets
9185 work. */
9186 int align;
9187
9188 align = bfd_get_section_alignment (stdoutput, segment);
9189 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
9190 }
9191 #endif
9192
9193 return size;
9194 }
9195
9196 /* On the i386, PC-relative offsets are relative to the start of the
9197 next instruction. That is, the address of the offset, plus its
9198 size, since the offset is always the last part of the insn. */
9199
9200 long
9201 md_pcrel_from (fixS *fixP)
9202 {
9203 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
9204 }
9205
9206 #ifndef I386COFF
9207
9208 static void
9209 s_bss (int ignore ATTRIBUTE_UNUSED)
9210 {
9211 int temp;
9212
9213 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9214 if (IS_ELF)
9215 obj_elf_section_change_hook ();
9216 #endif
9217 temp = get_absolute_expression ();
9218 subseg_set (bss_section, (subsegT) temp);
9219 demand_empty_rest_of_line ();
9220 }
9221
9222 #endif
9223
9224 void
9225 i386_validate_fix (fixS *fixp)
9226 {
9227 if (fixp->fx_subsy && fixp->fx_subsy == GOT_symbol)
9228 {
9229 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
9230 {
9231 if (!object_64bit)
9232 abort ();
9233 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
9234 }
9235 else
9236 {
9237 if (!object_64bit)
9238 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
9239 else
9240 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
9241 }
9242 fixp->fx_subsy = 0;
9243 }
9244 }
9245
9246 arelent *
9247 tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
9248 {
9249 arelent *rel;
9250 bfd_reloc_code_real_type code;
9251
9252 switch (fixp->fx_r_type)
9253 {
9254 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9255 case BFD_RELOC_SIZE32:
9256 case BFD_RELOC_SIZE64:
9257 if (S_IS_DEFINED (fixp->fx_addsy)
9258 && !S_IS_EXTERNAL (fixp->fx_addsy))
9259 {
9260 /* Resolve size relocation against local symbol to size of
9261 the symbol plus addend. */
9262 valueT value = S_GET_SIZE (fixp->fx_addsy) + fixp->fx_offset;
9263 if (fixp->fx_r_type == BFD_RELOC_SIZE32
9264 && !fits_in_unsigned_long (value))
9265 as_bad_where (fixp->fx_file, fixp->fx_line,
9266 _("symbol size computation overflow"));
9267 fixp->fx_addsy = NULL;
9268 fixp->fx_subsy = NULL;
9269 md_apply_fix (fixp, (valueT *) &value, NULL);
9270 return NULL;
9271 }
9272 #endif
9273
9274 case BFD_RELOC_X86_64_PLT32:
9275 case BFD_RELOC_X86_64_GOT32:
9276 case BFD_RELOC_X86_64_GOTPCREL:
9277 case BFD_RELOC_386_PLT32:
9278 case BFD_RELOC_386_GOT32:
9279 case BFD_RELOC_386_GOTOFF:
9280 case BFD_RELOC_386_GOTPC:
9281 case BFD_RELOC_386_TLS_GD:
9282 case BFD_RELOC_386_TLS_LDM:
9283 case BFD_RELOC_386_TLS_LDO_32:
9284 case BFD_RELOC_386_TLS_IE_32:
9285 case BFD_RELOC_386_TLS_IE:
9286 case BFD_RELOC_386_TLS_GOTIE:
9287 case BFD_RELOC_386_TLS_LE_32:
9288 case BFD_RELOC_386_TLS_LE:
9289 case BFD_RELOC_386_TLS_GOTDESC:
9290 case BFD_RELOC_386_TLS_DESC_CALL:
9291 case BFD_RELOC_X86_64_TLSGD:
9292 case BFD_RELOC_X86_64_TLSLD:
9293 case BFD_RELOC_X86_64_DTPOFF32:
9294 case BFD_RELOC_X86_64_DTPOFF64:
9295 case BFD_RELOC_X86_64_GOTTPOFF:
9296 case BFD_RELOC_X86_64_TPOFF32:
9297 case BFD_RELOC_X86_64_TPOFF64:
9298 case BFD_RELOC_X86_64_GOTOFF64:
9299 case BFD_RELOC_X86_64_GOTPC32:
9300 case BFD_RELOC_X86_64_GOT64:
9301 case BFD_RELOC_X86_64_GOTPCREL64:
9302 case BFD_RELOC_X86_64_GOTPC64:
9303 case BFD_RELOC_X86_64_GOTPLT64:
9304 case BFD_RELOC_X86_64_PLTOFF64:
9305 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9306 case BFD_RELOC_X86_64_TLSDESC_CALL:
9307 case BFD_RELOC_RVA:
9308 case BFD_RELOC_VTABLE_ENTRY:
9309 case BFD_RELOC_VTABLE_INHERIT:
9310 #ifdef TE_PE
9311 case BFD_RELOC_32_SECREL:
9312 #endif
9313 code = fixp->fx_r_type;
9314 break;
9315 case BFD_RELOC_X86_64_32S:
9316 if (!fixp->fx_pcrel)
9317 {
9318 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
9319 code = fixp->fx_r_type;
9320 break;
9321 }
9322 default:
9323 if (fixp->fx_pcrel)
9324 {
9325 switch (fixp->fx_size)
9326 {
9327 default:
9328 as_bad_where (fixp->fx_file, fixp->fx_line,
9329 _("can not do %d byte pc-relative relocation"),
9330 fixp->fx_size);
9331 code = BFD_RELOC_32_PCREL;
9332 break;
9333 case 1: code = BFD_RELOC_8_PCREL; break;
9334 case 2: code = BFD_RELOC_16_PCREL; break;
9335 case 4: code = BFD_RELOC_32_PCREL; break;
9336 #ifdef BFD64
9337 case 8: code = BFD_RELOC_64_PCREL; break;
9338 #endif
9339 }
9340 }
9341 else
9342 {
9343 switch (fixp->fx_size)
9344 {
9345 default:
9346 as_bad_where (fixp->fx_file, fixp->fx_line,
9347 _("can not do %d byte relocation"),
9348 fixp->fx_size);
9349 code = BFD_RELOC_32;
9350 break;
9351 case 1: code = BFD_RELOC_8; break;
9352 case 2: code = BFD_RELOC_16; break;
9353 case 4: code = BFD_RELOC_32; break;
9354 #ifdef BFD64
9355 case 8: code = BFD_RELOC_64; break;
9356 #endif
9357 }
9358 }
9359 break;
9360 }
9361
9362 if ((code == BFD_RELOC_32
9363 || code == BFD_RELOC_32_PCREL
9364 || code == BFD_RELOC_X86_64_32S)
9365 && GOT_symbol
9366 && fixp->fx_addsy == GOT_symbol)
9367 {
9368 if (!object_64bit)
9369 code = BFD_RELOC_386_GOTPC;
9370 else
9371 code = BFD_RELOC_X86_64_GOTPC32;
9372 }
9373 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
9374 && GOT_symbol
9375 && fixp->fx_addsy == GOT_symbol)
9376 {
9377 code = BFD_RELOC_X86_64_GOTPC64;
9378 }
9379
9380 rel = (arelent *) xmalloc (sizeof (arelent));
9381 rel->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
9382 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9383
9384 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
9385
9386 if (!use_rela_relocations)
9387 {
9388 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
9389 vtable entry to be used in the relocation's section offset. */
9390 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
9391 rel->address = fixp->fx_offset;
9392 #if defined (OBJ_COFF) && defined (TE_PE)
9393 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
9394 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
9395 else
9396 #endif
9397 rel->addend = 0;
9398 }
9399 /* Use the rela in 64bit mode. */
9400 else
9401 {
9402 if (disallow_64bit_reloc)
9403 switch (code)
9404 {
9405 case BFD_RELOC_X86_64_DTPOFF64:
9406 case BFD_RELOC_X86_64_TPOFF64:
9407 case BFD_RELOC_64_PCREL:
9408 case BFD_RELOC_X86_64_GOTOFF64:
9409 case BFD_RELOC_X86_64_GOT64:
9410 case BFD_RELOC_X86_64_GOTPCREL64:
9411 case BFD_RELOC_X86_64_GOTPC64:
9412 case BFD_RELOC_X86_64_GOTPLT64:
9413 case BFD_RELOC_X86_64_PLTOFF64:
9414 as_bad_where (fixp->fx_file, fixp->fx_line,
9415 _("cannot represent relocation type %s in x32 mode"),
9416 bfd_get_reloc_code_name (code));
9417 break;
9418 default:
9419 break;
9420 }
9421
9422 if (!fixp->fx_pcrel)
9423 rel->addend = fixp->fx_offset;
9424 else
9425 switch (code)
9426 {
9427 case BFD_RELOC_X86_64_PLT32:
9428 case BFD_RELOC_X86_64_GOT32:
9429 case BFD_RELOC_X86_64_GOTPCREL:
9430 case BFD_RELOC_X86_64_TLSGD:
9431 case BFD_RELOC_X86_64_TLSLD:
9432 case BFD_RELOC_X86_64_GOTTPOFF:
9433 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9434 case BFD_RELOC_X86_64_TLSDESC_CALL:
9435 rel->addend = fixp->fx_offset - fixp->fx_size;
9436 break;
9437 default:
9438 rel->addend = (section->vma
9439 - fixp->fx_size
9440 + fixp->fx_addnumber
9441 + md_pcrel_from (fixp));
9442 break;
9443 }
9444 }
9445
9446 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
9447 if (rel->howto == NULL)
9448 {
9449 as_bad_where (fixp->fx_file, fixp->fx_line,
9450 _("cannot represent relocation type %s"),
9451 bfd_get_reloc_code_name (code));
9452 /* Set howto to a garbage value so that we can keep going. */
9453 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
9454 gas_assert (rel->howto != NULL);
9455 }
9456
9457 return rel;
9458 }
9459
9460 #include "tc-i386-intel.c"
9461
9462 void
9463 tc_x86_parse_to_dw2regnum (expressionS *exp)
9464 {
9465 int saved_naked_reg;
9466 char saved_register_dot;
9467
9468 saved_naked_reg = allow_naked_reg;
9469 allow_naked_reg = 1;
9470 saved_register_dot = register_chars['.'];
9471 register_chars['.'] = '.';
9472 allow_pseudo_reg = 1;
9473 expression_and_evaluate (exp);
9474 allow_pseudo_reg = 0;
9475 register_chars['.'] = saved_register_dot;
9476 allow_naked_reg = saved_naked_reg;
9477
9478 if (exp->X_op == O_register && exp->X_add_number >= 0)
9479 {
9480 if ((addressT) exp->X_add_number < i386_regtab_size)
9481 {
9482 exp->X_op = O_constant;
9483 exp->X_add_number = i386_regtab[exp->X_add_number]
9484 .dw2_regnum[flag_code >> 1];
9485 }
9486 else
9487 exp->X_op = O_illegal;
9488 }
9489 }
9490
9491 void
9492 tc_x86_frame_initial_instructions (void)
9493 {
9494 static unsigned int sp_regno[2];
9495
9496 if (!sp_regno[flag_code >> 1])
9497 {
9498 char *saved_input = input_line_pointer;
9499 char sp[][4] = {"esp", "rsp"};
9500 expressionS exp;
9501
9502 input_line_pointer = sp[flag_code >> 1];
9503 tc_x86_parse_to_dw2regnum (&exp);
9504 gas_assert (exp.X_op == O_constant);
9505 sp_regno[flag_code >> 1] = exp.X_add_number;
9506 input_line_pointer = saved_input;
9507 }
9508
9509 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
9510 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
9511 }
9512
9513 int
9514 x86_dwarf2_addr_size (void)
9515 {
9516 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9517 if (x86_elf_abi == X86_64_X32_ABI)
9518 return 4;
9519 #endif
9520 return bfd_arch_bits_per_address (stdoutput) / 8;
9521 }
9522
9523 int
9524 i386_elf_section_type (const char *str, size_t len)
9525 {
9526 if (flag_code == CODE_64BIT
9527 && len == sizeof ("unwind") - 1
9528 && strncmp (str, "unwind", 6) == 0)
9529 return SHT_X86_64_UNWIND;
9530
9531 return -1;
9532 }
9533
9534 #ifdef TE_SOLARIS
9535 void
9536 i386_solaris_fix_up_eh_frame (segT sec)
9537 {
9538 if (flag_code == CODE_64BIT)
9539 elf_section_type (sec) = SHT_X86_64_UNWIND;
9540 }
9541 #endif
9542
9543 #ifdef TE_PE
9544 void
9545 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
9546 {
9547 expressionS exp;
9548
9549 exp.X_op = O_secrel;
9550 exp.X_add_symbol = symbol;
9551 exp.X_add_number = 0;
9552 emit_expr (&exp, size);
9553 }
9554 #endif
9555
9556 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9557 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
9558
9559 bfd_vma
9560 x86_64_section_letter (int letter, char **ptr_msg)
9561 {
9562 if (flag_code == CODE_64BIT)
9563 {
9564 if (letter == 'l')
9565 return SHF_X86_64_LARGE;
9566
9567 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
9568 }
9569 else
9570 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
9571 return -1;
9572 }
9573
9574 bfd_vma
9575 x86_64_section_word (char *str, size_t len)
9576 {
9577 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
9578 return SHF_X86_64_LARGE;
9579
9580 return -1;
9581 }
9582
9583 static void
9584 handle_large_common (int small ATTRIBUTE_UNUSED)
9585 {
9586 if (flag_code != CODE_64BIT)
9587 {
9588 s_comm_internal (0, elf_common_parse);
9589 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
9590 }
9591 else
9592 {
9593 static segT lbss_section;
9594 asection *saved_com_section_ptr = elf_com_section_ptr;
9595 asection *saved_bss_section = bss_section;
9596
9597 if (lbss_section == NULL)
9598 {
9599 flagword applicable;
9600 segT seg = now_seg;
9601 subsegT subseg = now_subseg;
9602
9603 /* The .lbss section is for local .largecomm symbols. */
9604 lbss_section = subseg_new (".lbss", 0);
9605 applicable = bfd_applicable_section_flags (stdoutput);
9606 bfd_set_section_flags (stdoutput, lbss_section,
9607 applicable & SEC_ALLOC);
9608 seg_info (lbss_section)->bss = 1;
9609
9610 subseg_set (seg, subseg);
9611 }
9612
9613 elf_com_section_ptr = &_bfd_elf_large_com_section;
9614 bss_section = lbss_section;
9615
9616 s_comm_internal (0, elf_common_parse);
9617
9618 elf_com_section_ptr = saved_com_section_ptr;
9619 bss_section = saved_bss_section;
9620 }
9621 }
9622 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */
This page took 0.340169 seconds and 5 git commands to generate.