Improve unsupported error message
[deliverable/binutils-gdb.git] / gas / config / tc-i386.c
1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
4 2012
5 Free Software Foundation, Inc.
6
7 This file is part of GAS, the GNU Assembler.
8
9 GAS is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
13
14 GAS is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GAS; see the file COPYING. If not, write to the Free
21 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
22 02110-1301, USA. */
23
24 /* Intel 80386 machine specific gas.
25 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
26 x86_64 support by Jan Hubicka (jh@suse.cz)
27 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
28 Bugs & suggestions are completely welcome. This is free software.
29 Please help us make it better. */
30
31 #include "as.h"
32 #include "safe-ctype.h"
33 #include "subsegs.h"
34 #include "dwarf2dbg.h"
35 #include "dw2gencfi.h"
36 #include "elf/x86-64.h"
37 #include "opcodes/i386-init.h"
38
39 #ifndef REGISTER_WARNINGS
40 #define REGISTER_WARNINGS 1
41 #endif
42
43 #ifndef INFER_ADDR_PREFIX
44 #define INFER_ADDR_PREFIX 1
45 #endif
46
47 #ifndef DEFAULT_ARCH
48 #define DEFAULT_ARCH "i386"
49 #endif
50
51 #ifndef INLINE
52 #if __GNUC__ >= 2
53 #define INLINE __inline__
54 #else
55 #define INLINE
56 #endif
57 #endif
58
59 /* Prefixes will be emitted in the order defined below.
60 WAIT_PREFIX must be the first prefix since FWAIT is really is an
61 instruction, and so must come before any prefixes.
62 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
63 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
64 #define WAIT_PREFIX 0
65 #define SEG_PREFIX 1
66 #define ADDR_PREFIX 2
67 #define DATA_PREFIX 3
68 #define REP_PREFIX 4
69 #define HLE_PREFIX REP_PREFIX
70 #define LOCK_PREFIX 5
71 #define REX_PREFIX 6 /* must come last. */
72 #define MAX_PREFIXES 7 /* max prefixes per opcode */
73
74 /* we define the syntax here (modulo base,index,scale syntax) */
75 #define REGISTER_PREFIX '%'
76 #define IMMEDIATE_PREFIX '$'
77 #define ABSOLUTE_PREFIX '*'
78
79 /* these are the instruction mnemonic suffixes in AT&T syntax or
80 memory operand size in Intel syntax. */
81 #define WORD_MNEM_SUFFIX 'w'
82 #define BYTE_MNEM_SUFFIX 'b'
83 #define SHORT_MNEM_SUFFIX 's'
84 #define LONG_MNEM_SUFFIX 'l'
85 #define QWORD_MNEM_SUFFIX 'q'
86 #define XMMWORD_MNEM_SUFFIX 'x'
87 #define YMMWORD_MNEM_SUFFIX 'y'
88 /* Intel Syntax. Use a non-ascii letter since since it never appears
89 in instructions. */
90 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
91
92 #define END_OF_INSN '\0'
93
94 /*
95 'templates' is for grouping together 'template' structures for opcodes
96 of the same name. This is only used for storing the insns in the grand
97 ole hash table of insns.
98 The templates themselves start at START and range up to (but not including)
99 END.
100 */
101 typedef struct
102 {
103 const insn_template *start;
104 const insn_template *end;
105 }
106 templates;
107
108 /* 386 operand encoding bytes: see 386 book for details of this. */
109 typedef struct
110 {
111 unsigned int regmem; /* codes register or memory operand */
112 unsigned int reg; /* codes register operand (or extended opcode) */
113 unsigned int mode; /* how to interpret regmem & reg */
114 }
115 modrm_byte;
116
117 /* x86-64 extension prefix. */
118 typedef int rex_byte;
119
120 /* 386 opcode byte to code indirect addressing. */
121 typedef struct
122 {
123 unsigned base;
124 unsigned index;
125 unsigned scale;
126 }
127 sib_byte;
128
129 /* x86 arch names, types and features */
130 typedef struct
131 {
132 const char *name; /* arch name */
133 unsigned int len; /* arch string length */
134 enum processor_type type; /* arch type */
135 i386_cpu_flags flags; /* cpu feature flags */
136 unsigned int skip; /* show_arch should skip this. */
137 unsigned int negated; /* turn off indicated flags. */
138 }
139 arch_entry;
140
141 static void update_code_flag (int, int);
142 static void set_code_flag (int);
143 static void set_16bit_gcc_code_flag (int);
144 static void set_intel_syntax (int);
145 static void set_intel_mnemonic (int);
146 static void set_allow_index_reg (int);
147 static void set_sse_check (int);
148 static void set_cpu_arch (int);
149 #ifdef TE_PE
150 static void pe_directive_secrel (int);
151 #endif
152 static void signed_cons (int);
153 static char *output_invalid (int c);
154 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
155 const char *);
156 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
157 const char *);
158 static int i386_att_operand (char *);
159 static int i386_intel_operand (char *, int);
160 static int i386_intel_simplify (expressionS *);
161 static int i386_intel_parse_name (const char *, expressionS *);
162 static const reg_entry *parse_register (char *, char **);
163 static char *parse_insn (char *, char *);
164 static char *parse_operands (char *, const char *);
165 static void swap_operands (void);
166 static void swap_2_operands (int, int);
167 static void optimize_imm (void);
168 static void optimize_disp (void);
169 static const insn_template *match_template (void);
170 static int check_string (void);
171 static int process_suffix (void);
172 static int check_byte_reg (void);
173 static int check_long_reg (void);
174 static int check_qword_reg (void);
175 static int check_word_reg (void);
176 static int finalize_imm (void);
177 static int process_operands (void);
178 static const seg_entry *build_modrm_byte (void);
179 static void output_insn (void);
180 static void output_imm (fragS *, offsetT);
181 static void output_disp (fragS *, offsetT);
182 #ifndef I386COFF
183 static void s_bss (int);
184 #endif
185 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
186 static void handle_large_common (int small ATTRIBUTE_UNUSED);
187 #endif
188
189 static const char *default_arch = DEFAULT_ARCH;
190
191 /* VEX prefix. */
192 typedef struct
193 {
194 /* VEX prefix is either 2 byte or 3 byte. */
195 unsigned char bytes[3];
196 unsigned int length;
197 /* Destination or source register specifier. */
198 const reg_entry *register_specifier;
199 } vex_prefix;
200
201 /* 'md_assemble ()' gathers together information and puts it into a
202 i386_insn. */
203
204 union i386_op
205 {
206 expressionS *disps;
207 expressionS *imms;
208 const reg_entry *regs;
209 };
210
211 enum i386_error
212 {
213 operand_size_mismatch,
214 operand_type_mismatch,
215 register_type_mismatch,
216 number_of_operands_mismatch,
217 invalid_instruction_suffix,
218 bad_imm4,
219 old_gcc_only,
220 unsupported_with_intel_mnemonic,
221 unsupported_syntax,
222 unsupported,
223 invalid_vsib_address,
224 unsupported_vector_index_register
225 };
226
227 struct _i386_insn
228 {
229 /* TM holds the template for the insn were currently assembling. */
230 insn_template tm;
231
232 /* SUFFIX holds the instruction size suffix for byte, word, dword
233 or qword, if given. */
234 char suffix;
235
236 /* OPERANDS gives the number of given operands. */
237 unsigned int operands;
238
239 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
240 of given register, displacement, memory operands and immediate
241 operands. */
242 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
243
244 /* TYPES [i] is the type (see above #defines) which tells us how to
245 use OP[i] for the corresponding operand. */
246 i386_operand_type types[MAX_OPERANDS];
247
248 /* Displacement expression, immediate expression, or register for each
249 operand. */
250 union i386_op op[MAX_OPERANDS];
251
252 /* Flags for operands. */
253 unsigned int flags[MAX_OPERANDS];
254 #define Operand_PCrel 1
255
256 /* Relocation type for operand */
257 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
258
259 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
260 the base index byte below. */
261 const reg_entry *base_reg;
262 const reg_entry *index_reg;
263 unsigned int log2_scale_factor;
264
265 /* SEG gives the seg_entries of this insn. They are zero unless
266 explicit segment overrides are given. */
267 const seg_entry *seg[2];
268
269 /* PREFIX holds all the given prefix opcodes (usually null).
270 PREFIXES is the number of prefix opcodes. */
271 unsigned int prefixes;
272 unsigned char prefix[MAX_PREFIXES];
273
274 /* RM and SIB are the modrm byte and the sib byte where the
275 addressing modes of this insn are encoded. */
276 modrm_byte rm;
277 rex_byte rex;
278 sib_byte sib;
279 vex_prefix vex;
280
281 /* Swap operand in encoding. */
282 unsigned int swap_operand;
283
284 /* Prefer 8bit or 32bit displacement in encoding. */
285 enum
286 {
287 disp_encoding_default = 0,
288 disp_encoding_8bit,
289 disp_encoding_32bit
290 } disp_encoding;
291
292 /* Have HLE prefix. */
293 unsigned int have_hle;
294
295 /* Error message. */
296 enum i386_error error;
297 };
298
299 typedef struct _i386_insn i386_insn;
300
301 /* List of chars besides those in app.c:symbol_chars that can start an
302 operand. Used to prevent the scrubber eating vital white-space. */
303 const char extra_symbol_chars[] = "*%-(["
304 #ifdef LEX_AT
305 "@"
306 #endif
307 #ifdef LEX_QM
308 "?"
309 #endif
310 ;
311
312 #if (defined (TE_I386AIX) \
313 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
314 && !defined (TE_GNU) \
315 && !defined (TE_LINUX) \
316 && !defined (TE_NACL) \
317 && !defined (TE_NETWARE) \
318 && !defined (TE_FreeBSD) \
319 && !defined (TE_DragonFly) \
320 && !defined (TE_NetBSD)))
321 /* This array holds the chars that always start a comment. If the
322 pre-processor is disabled, these aren't very useful. The option
323 --divide will remove '/' from this list. */
324 const char *i386_comment_chars = "#/";
325 #define SVR4_COMMENT_CHARS 1
326 #define PREFIX_SEPARATOR '\\'
327
328 #else
329 const char *i386_comment_chars = "#";
330 #define PREFIX_SEPARATOR '/'
331 #endif
332
333 /* This array holds the chars that only start a comment at the beginning of
334 a line. If the line seems to have the form '# 123 filename'
335 .line and .file directives will appear in the pre-processed output.
336 Note that input_file.c hand checks for '#' at the beginning of the
337 first line of the input file. This is because the compiler outputs
338 #NO_APP at the beginning of its output.
339 Also note that comments started like this one will always work if
340 '/' isn't otherwise defined. */
341 const char line_comment_chars[] = "#/";
342
343 const char line_separator_chars[] = ";";
344
345 /* Chars that can be used to separate mant from exp in floating point
346 nums. */
347 const char EXP_CHARS[] = "eE";
348
349 /* Chars that mean this number is a floating point constant
350 As in 0f12.456
351 or 0d1.2345e12. */
352 const char FLT_CHARS[] = "fFdDxX";
353
354 /* Tables for lexical analysis. */
355 static char mnemonic_chars[256];
356 static char register_chars[256];
357 static char operand_chars[256];
358 static char identifier_chars[256];
359 static char digit_chars[256];
360
361 /* Lexical macros. */
362 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
363 #define is_operand_char(x) (operand_chars[(unsigned char) x])
364 #define is_register_char(x) (register_chars[(unsigned char) x])
365 #define is_space_char(x) ((x) == ' ')
366 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
367 #define is_digit_char(x) (digit_chars[(unsigned char) x])
368
369 /* All non-digit non-letter characters that may occur in an operand. */
370 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
371
372 /* md_assemble() always leaves the strings it's passed unaltered. To
373 effect this we maintain a stack of saved characters that we've smashed
374 with '\0's (indicating end of strings for various sub-fields of the
375 assembler instruction). */
376 static char save_stack[32];
377 static char *save_stack_p;
378 #define END_STRING_AND_SAVE(s) \
379 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
380 #define RESTORE_END_STRING(s) \
381 do { *(s) = *--save_stack_p; } while (0)
382
383 /* The instruction we're assembling. */
384 static i386_insn i;
385
386 /* Possible templates for current insn. */
387 static const templates *current_templates;
388
389 /* Per instruction expressionS buffers: max displacements & immediates. */
390 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
391 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
392
393 /* Current operand we are working on. */
394 static int this_operand = -1;
395
396 /* We support four different modes. FLAG_CODE variable is used to distinguish
397 these. */
398
399 enum flag_code {
400 CODE_32BIT,
401 CODE_16BIT,
402 CODE_64BIT };
403
404 static enum flag_code flag_code;
405 static unsigned int object_64bit;
406 static unsigned int disallow_64bit_reloc;
407 static int use_rela_relocations = 0;
408
409 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
410 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
411 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
412
413 /* The ELF ABI to use. */
414 enum x86_elf_abi
415 {
416 I386_ABI,
417 X86_64_ABI,
418 X86_64_X32_ABI
419 };
420
421 static enum x86_elf_abi x86_elf_abi = I386_ABI;
422 #endif
423
424 /* The names used to print error messages. */
425 static const char *flag_code_names[] =
426 {
427 "32",
428 "16",
429 "64"
430 };
431
432 /* 1 for intel syntax,
433 0 if att syntax. */
434 static int intel_syntax = 0;
435
436 /* 1 for intel mnemonic,
437 0 if att mnemonic. */
438 static int intel_mnemonic = !SYSV386_COMPAT;
439
440 /* 1 if support old (<= 2.8.1) versions of gcc. */
441 static int old_gcc = OLDGCC_COMPAT;
442
443 /* 1 if pseudo registers are permitted. */
444 static int allow_pseudo_reg = 0;
445
446 /* 1 if register prefix % not required. */
447 static int allow_naked_reg = 0;
448
449 /* 1 if pseudo index register, eiz/riz, is allowed . */
450 static int allow_index_reg = 0;
451
452 static enum
453 {
454 sse_check_none = 0,
455 sse_check_warning,
456 sse_check_error
457 }
458 sse_check;
459
460 /* Register prefix used for error message. */
461 static const char *register_prefix = "%";
462
463 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
464 leave, push, and pop instructions so that gcc has the same stack
465 frame as in 32 bit mode. */
466 static char stackop_size = '\0';
467
468 /* Non-zero to optimize code alignment. */
469 int optimize_align_code = 1;
470
471 /* Non-zero to quieten some warnings. */
472 static int quiet_warnings = 0;
473
474 /* CPU name. */
475 static const char *cpu_arch_name = NULL;
476 static char *cpu_sub_arch_name = NULL;
477
478 /* CPU feature flags. */
479 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
480
481 /* If we have selected a cpu we are generating instructions for. */
482 static int cpu_arch_tune_set = 0;
483
484 /* Cpu we are generating instructions for. */
485 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
486
487 /* CPU feature flags of cpu we are generating instructions for. */
488 static i386_cpu_flags cpu_arch_tune_flags;
489
490 /* CPU instruction set architecture used. */
491 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
492
493 /* CPU feature flags of instruction set architecture used. */
494 i386_cpu_flags cpu_arch_isa_flags;
495
496 /* If set, conditional jumps are not automatically promoted to handle
497 larger than a byte offset. */
498 static unsigned int no_cond_jump_promotion = 0;
499
500 /* Encode SSE instructions with VEX prefix. */
501 static unsigned int sse2avx;
502
503 /* Encode scalar AVX instructions with specific vector length. */
504 static enum
505 {
506 vex128 = 0,
507 vex256
508 } avxscalar;
509
510 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
511 static symbolS *GOT_symbol;
512
513 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
514 unsigned int x86_dwarf2_return_column;
515
516 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
517 int x86_cie_data_alignment;
518
519 /* Interface to relax_segment.
520 There are 3 major relax states for 386 jump insns because the
521 different types of jumps add different sizes to frags when we're
522 figuring out what sort of jump to choose to reach a given label. */
523
524 /* Types. */
525 #define UNCOND_JUMP 0
526 #define COND_JUMP 1
527 #define COND_JUMP86 2
528
529 /* Sizes. */
530 #define CODE16 1
531 #define SMALL 0
532 #define SMALL16 (SMALL | CODE16)
533 #define BIG 2
534 #define BIG16 (BIG | CODE16)
535
536 #ifndef INLINE
537 #ifdef __GNUC__
538 #define INLINE __inline__
539 #else
540 #define INLINE
541 #endif
542 #endif
543
544 #define ENCODE_RELAX_STATE(type, size) \
545 ((relax_substateT) (((type) << 2) | (size)))
546 #define TYPE_FROM_RELAX_STATE(s) \
547 ((s) >> 2)
548 #define DISP_SIZE_FROM_RELAX_STATE(s) \
549 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
550
551 /* This table is used by relax_frag to promote short jumps to long
552 ones where necessary. SMALL (short) jumps may be promoted to BIG
553 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
554 don't allow a short jump in a 32 bit code segment to be promoted to
555 a 16 bit offset jump because it's slower (requires data size
556 prefix), and doesn't work, unless the destination is in the bottom
557 64k of the code segment (The top 16 bits of eip are zeroed). */
558
559 const relax_typeS md_relax_table[] =
560 {
561 /* The fields are:
562 1) most positive reach of this state,
563 2) most negative reach of this state,
564 3) how many bytes this mode will have in the variable part of the frag
565 4) which index into the table to try if we can't fit into this one. */
566
567 /* UNCOND_JUMP states. */
568 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
569 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
570 /* dword jmp adds 4 bytes to frag:
571 0 extra opcode bytes, 4 displacement bytes. */
572 {0, 0, 4, 0},
573 /* word jmp adds 2 byte2 to frag:
574 0 extra opcode bytes, 2 displacement bytes. */
575 {0, 0, 2, 0},
576
577 /* COND_JUMP states. */
578 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
579 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
580 /* dword conditionals adds 5 bytes to frag:
581 1 extra opcode byte, 4 displacement bytes. */
582 {0, 0, 5, 0},
583 /* word conditionals add 3 bytes to frag:
584 1 extra opcode byte, 2 displacement bytes. */
585 {0, 0, 3, 0},
586
587 /* COND_JUMP86 states. */
588 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
589 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
590 /* dword conditionals adds 5 bytes to frag:
591 1 extra opcode byte, 4 displacement bytes. */
592 {0, 0, 5, 0},
593 /* word conditionals add 4 bytes to frag:
594 1 displacement byte and a 3 byte long branch insn. */
595 {0, 0, 4, 0}
596 };
597
598 static const arch_entry cpu_arch[] =
599 {
600 /* Do not replace the first two entries - i386_target_format()
601 relies on them being there in this order. */
602 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
603 CPU_GENERIC32_FLAGS, 0, 0 },
604 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
605 CPU_GENERIC64_FLAGS, 0, 0 },
606 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
607 CPU_NONE_FLAGS, 0, 0 },
608 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
609 CPU_I186_FLAGS, 0, 0 },
610 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
611 CPU_I286_FLAGS, 0, 0 },
612 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
613 CPU_I386_FLAGS, 0, 0 },
614 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
615 CPU_I486_FLAGS, 0, 0 },
616 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
617 CPU_I586_FLAGS, 0, 0 },
618 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
619 CPU_I686_FLAGS, 0, 0 },
620 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
621 CPU_I586_FLAGS, 0, 0 },
622 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
623 CPU_PENTIUMPRO_FLAGS, 0, 0 },
624 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
625 CPU_P2_FLAGS, 0, 0 },
626 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
627 CPU_P3_FLAGS, 0, 0 },
628 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
629 CPU_P4_FLAGS, 0, 0 },
630 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
631 CPU_CORE_FLAGS, 0, 0 },
632 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
633 CPU_NOCONA_FLAGS, 0, 0 },
634 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
635 CPU_CORE_FLAGS, 1, 0 },
636 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
637 CPU_CORE_FLAGS, 0, 0 },
638 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
639 CPU_CORE2_FLAGS, 1, 0 },
640 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
641 CPU_CORE2_FLAGS, 0, 0 },
642 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
643 CPU_COREI7_FLAGS, 0, 0 },
644 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
645 CPU_L1OM_FLAGS, 0, 0 },
646 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM,
647 CPU_K1OM_FLAGS, 0, 0 },
648 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
649 CPU_K6_FLAGS, 0, 0 },
650 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
651 CPU_K6_2_FLAGS, 0, 0 },
652 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
653 CPU_ATHLON_FLAGS, 0, 0 },
654 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
655 CPU_K8_FLAGS, 1, 0 },
656 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
657 CPU_K8_FLAGS, 0, 0 },
658 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
659 CPU_K8_FLAGS, 0, 0 },
660 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
661 CPU_AMDFAM10_FLAGS, 0, 0 },
662 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD,
663 CPU_BDVER1_FLAGS, 0, 0 },
664 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD,
665 CPU_BDVER2_FLAGS, 0, 0 },
666 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
667 CPU_8087_FLAGS, 0, 0 },
668 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
669 CPU_287_FLAGS, 0, 0 },
670 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
671 CPU_387_FLAGS, 0, 0 },
672 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN,
673 CPU_ANY87_FLAGS, 0, 1 },
674 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
675 CPU_MMX_FLAGS, 0, 0 },
676 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN,
677 CPU_3DNOWA_FLAGS, 0, 1 },
678 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
679 CPU_SSE_FLAGS, 0, 0 },
680 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
681 CPU_SSE2_FLAGS, 0, 0 },
682 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
683 CPU_SSE3_FLAGS, 0, 0 },
684 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
685 CPU_SSSE3_FLAGS, 0, 0 },
686 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
687 CPU_SSE4_1_FLAGS, 0, 0 },
688 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
689 CPU_SSE4_2_FLAGS, 0, 0 },
690 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
691 CPU_SSE4_2_FLAGS, 0, 0 },
692 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN,
693 CPU_ANY_SSE_FLAGS, 0, 1 },
694 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
695 CPU_AVX_FLAGS, 0, 0 },
696 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN,
697 CPU_AVX2_FLAGS, 0, 0 },
698 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
699 CPU_ANY_AVX_FLAGS, 0, 1 },
700 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
701 CPU_VMX_FLAGS, 0, 0 },
702 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN,
703 CPU_VMFUNC_FLAGS, 0, 0 },
704 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
705 CPU_SMX_FLAGS, 0, 0 },
706 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
707 CPU_XSAVE_FLAGS, 0, 0 },
708 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
709 CPU_XSAVEOPT_FLAGS, 0, 0 },
710 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
711 CPU_AES_FLAGS, 0, 0 },
712 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
713 CPU_PCLMUL_FLAGS, 0, 0 },
714 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
715 CPU_PCLMUL_FLAGS, 1, 0 },
716 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
717 CPU_FSGSBASE_FLAGS, 0, 0 },
718 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
719 CPU_RDRND_FLAGS, 0, 0 },
720 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
721 CPU_F16C_FLAGS, 0, 0 },
722 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN,
723 CPU_BMI2_FLAGS, 0, 0 },
724 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
725 CPU_FMA_FLAGS, 0, 0 },
726 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
727 CPU_FMA4_FLAGS, 0, 0 },
728 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
729 CPU_XOP_FLAGS, 0, 0 },
730 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
731 CPU_LWP_FLAGS, 0, 0 },
732 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
733 CPU_MOVBE_FLAGS, 0, 0 },
734 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
735 CPU_EPT_FLAGS, 0, 0 },
736 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN,
737 CPU_LZCNT_FLAGS, 0, 0 },
738 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN,
739 CPU_HLE_FLAGS, 0, 0 },
740 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN,
741 CPU_RTM_FLAGS, 0, 0 },
742 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN,
743 CPU_INVPCID_FLAGS, 0, 0 },
744 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
745 CPU_CLFLUSH_FLAGS, 0, 0 },
746 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
747 CPU_NOP_FLAGS, 0, 0 },
748 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
749 CPU_SYSCALL_FLAGS, 0, 0 },
750 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
751 CPU_RDTSCP_FLAGS, 0, 0 },
752 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
753 CPU_3DNOW_FLAGS, 0, 0 },
754 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
755 CPU_3DNOWA_FLAGS, 0, 0 },
756 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
757 CPU_PADLOCK_FLAGS, 0, 0 },
758 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
759 CPU_SVME_FLAGS, 1, 0 },
760 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
761 CPU_SVME_FLAGS, 0, 0 },
762 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
763 CPU_SSE4A_FLAGS, 0, 0 },
764 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
765 CPU_ABM_FLAGS, 0, 0 },
766 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
767 CPU_BMI_FLAGS, 0, 0 },
768 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
769 CPU_TBM_FLAGS, 0, 0 },
770 };
771
772 #ifdef I386COFF
773 /* Like s_lcomm_internal in gas/read.c but the alignment string
774 is allowed to be optional. */
775
776 static symbolS *
777 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
778 {
779 addressT align = 0;
780
781 SKIP_WHITESPACE ();
782
783 if (needs_align
784 && *input_line_pointer == ',')
785 {
786 align = parse_align (needs_align - 1);
787
788 if (align == (addressT) -1)
789 return NULL;
790 }
791 else
792 {
793 if (size >= 8)
794 align = 3;
795 else if (size >= 4)
796 align = 2;
797 else if (size >= 2)
798 align = 1;
799 else
800 align = 0;
801 }
802
803 bss_alloc (symbolP, size, align);
804 return symbolP;
805 }
806
807 static void
808 pe_lcomm (int needs_align)
809 {
810 s_comm_internal (needs_align * 2, pe_lcomm_internal);
811 }
812 #endif
813
814 const pseudo_typeS md_pseudo_table[] =
815 {
816 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
817 {"align", s_align_bytes, 0},
818 #else
819 {"align", s_align_ptwo, 0},
820 #endif
821 {"arch", set_cpu_arch, 0},
822 #ifndef I386COFF
823 {"bss", s_bss, 0},
824 #else
825 {"lcomm", pe_lcomm, 1},
826 #endif
827 {"ffloat", float_cons, 'f'},
828 {"dfloat", float_cons, 'd'},
829 {"tfloat", float_cons, 'x'},
830 {"value", cons, 2},
831 {"slong", signed_cons, 4},
832 {"noopt", s_ignore, 0},
833 {"optim", s_ignore, 0},
834 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
835 {"code16", set_code_flag, CODE_16BIT},
836 {"code32", set_code_flag, CODE_32BIT},
837 {"code64", set_code_flag, CODE_64BIT},
838 {"intel_syntax", set_intel_syntax, 1},
839 {"att_syntax", set_intel_syntax, 0},
840 {"intel_mnemonic", set_intel_mnemonic, 1},
841 {"att_mnemonic", set_intel_mnemonic, 0},
842 {"allow_index_reg", set_allow_index_reg, 1},
843 {"disallow_index_reg", set_allow_index_reg, 0},
844 {"sse_check", set_sse_check, 0},
845 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
846 {"largecomm", handle_large_common, 0},
847 #else
848 {"file", (void (*) (int)) dwarf2_directive_file, 0},
849 {"loc", dwarf2_directive_loc, 0},
850 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
851 #endif
852 #ifdef TE_PE
853 {"secrel32", pe_directive_secrel, 0},
854 #endif
855 {0, 0, 0}
856 };
857
858 /* For interface with expression (). */
859 extern char *input_line_pointer;
860
861 /* Hash table for instruction mnemonic lookup. */
862 static struct hash_control *op_hash;
863
864 /* Hash table for register lookup. */
865 static struct hash_control *reg_hash;
866 \f
867 void
868 i386_align_code (fragS *fragP, int count)
869 {
870 /* Various efficient no-op patterns for aligning code labels.
871 Note: Don't try to assemble the instructions in the comments.
872 0L and 0w are not legal. */
873 static const char f32_1[] =
874 {0x90}; /* nop */
875 static const char f32_2[] =
876 {0x66,0x90}; /* xchg %ax,%ax */
877 static const char f32_3[] =
878 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
879 static const char f32_4[] =
880 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
881 static const char f32_5[] =
882 {0x90, /* nop */
883 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
884 static const char f32_6[] =
885 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
886 static const char f32_7[] =
887 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
888 static const char f32_8[] =
889 {0x90, /* nop */
890 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
891 static const char f32_9[] =
892 {0x89,0xf6, /* movl %esi,%esi */
893 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
894 static const char f32_10[] =
895 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
896 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
897 static const char f32_11[] =
898 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
899 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
900 static const char f32_12[] =
901 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
902 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
903 static const char f32_13[] =
904 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
905 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
906 static const char f32_14[] =
907 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
908 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
909 static const char f16_3[] =
910 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
911 static const char f16_4[] =
912 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
913 static const char f16_5[] =
914 {0x90, /* nop */
915 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
916 static const char f16_6[] =
917 {0x89,0xf6, /* mov %si,%si */
918 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
919 static const char f16_7[] =
920 {0x8d,0x74,0x00, /* lea 0(%si),%si */
921 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
922 static const char f16_8[] =
923 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
924 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
925 static const char jump_31[] =
926 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
927 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
928 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
929 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
930 static const char *const f32_patt[] = {
931 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
932 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
933 };
934 static const char *const f16_patt[] = {
935 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
936 };
937 /* nopl (%[re]ax) */
938 static const char alt_3[] =
939 {0x0f,0x1f,0x00};
940 /* nopl 0(%[re]ax) */
941 static const char alt_4[] =
942 {0x0f,0x1f,0x40,0x00};
943 /* nopl 0(%[re]ax,%[re]ax,1) */
944 static const char alt_5[] =
945 {0x0f,0x1f,0x44,0x00,0x00};
946 /* nopw 0(%[re]ax,%[re]ax,1) */
947 static const char alt_6[] =
948 {0x66,0x0f,0x1f,0x44,0x00,0x00};
949 /* nopl 0L(%[re]ax) */
950 static const char alt_7[] =
951 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
952 /* nopl 0L(%[re]ax,%[re]ax,1) */
953 static const char alt_8[] =
954 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
955 /* nopw 0L(%[re]ax,%[re]ax,1) */
956 static const char alt_9[] =
957 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
958 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
959 static const char alt_10[] =
960 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
961 /* data16
962 nopw %cs:0L(%[re]ax,%[re]ax,1) */
963 static const char alt_long_11[] =
964 {0x66,
965 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
966 /* data16
967 data16
968 nopw %cs:0L(%[re]ax,%[re]ax,1) */
969 static const char alt_long_12[] =
970 {0x66,
971 0x66,
972 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
973 /* data16
974 data16
975 data16
976 nopw %cs:0L(%[re]ax,%[re]ax,1) */
977 static const char alt_long_13[] =
978 {0x66,
979 0x66,
980 0x66,
981 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
982 /* data16
983 data16
984 data16
985 data16
986 nopw %cs:0L(%[re]ax,%[re]ax,1) */
987 static const char alt_long_14[] =
988 {0x66,
989 0x66,
990 0x66,
991 0x66,
992 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
993 /* data16
994 data16
995 data16
996 data16
997 data16
998 nopw %cs:0L(%[re]ax,%[re]ax,1) */
999 static const char alt_long_15[] =
1000 {0x66,
1001 0x66,
1002 0x66,
1003 0x66,
1004 0x66,
1005 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1006 /* nopl 0(%[re]ax,%[re]ax,1)
1007 nopw 0(%[re]ax,%[re]ax,1) */
1008 static const char alt_short_11[] =
1009 {0x0f,0x1f,0x44,0x00,0x00,
1010 0x66,0x0f,0x1f,0x44,0x00,0x00};
1011 /* nopw 0(%[re]ax,%[re]ax,1)
1012 nopw 0(%[re]ax,%[re]ax,1) */
1013 static const char alt_short_12[] =
1014 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1015 0x66,0x0f,0x1f,0x44,0x00,0x00};
1016 /* nopw 0(%[re]ax,%[re]ax,1)
1017 nopl 0L(%[re]ax) */
1018 static const char alt_short_13[] =
1019 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1020 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1021 /* nopl 0L(%[re]ax)
1022 nopl 0L(%[re]ax) */
1023 static const char alt_short_14[] =
1024 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1025 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1026 /* nopl 0L(%[re]ax)
1027 nopl 0L(%[re]ax,%[re]ax,1) */
1028 static const char alt_short_15[] =
1029 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1030 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1031 static const char *const alt_short_patt[] = {
1032 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1033 alt_9, alt_10, alt_short_11, alt_short_12, alt_short_13,
1034 alt_short_14, alt_short_15
1035 };
1036 static const char *const alt_long_patt[] = {
1037 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1038 alt_9, alt_10, alt_long_11, alt_long_12, alt_long_13,
1039 alt_long_14, alt_long_15
1040 };
1041
1042 /* Only align for at least a positive non-zero boundary. */
1043 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
1044 return;
1045
1046 /* We need to decide which NOP sequence to use for 32bit and
1047 64bit. When -mtune= is used:
1048
1049 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1050 PROCESSOR_GENERIC32, f32_patt will be used.
1051 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA,
1052 PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
1053 PROCESSOR_GENERIC64, alt_long_patt will be used.
1054 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
1055 PROCESSOR_AMDFAM10, and PROCESSOR_BD, alt_short_patt
1056 will be used.
1057
1058 When -mtune= isn't used, alt_long_patt will be used if
1059 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1060 be used.
1061
1062 When -march= or .arch is used, we can't use anything beyond
1063 cpu_arch_isa_flags. */
1064
1065 if (flag_code == CODE_16BIT)
1066 {
1067 if (count > 8)
1068 {
1069 memcpy (fragP->fr_literal + fragP->fr_fix,
1070 jump_31, count);
1071 /* Adjust jump offset. */
1072 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1073 }
1074 else
1075 memcpy (fragP->fr_literal + fragP->fr_fix,
1076 f16_patt[count - 1], count);
1077 }
1078 else
1079 {
1080 const char *const *patt = NULL;
1081
1082 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1083 {
1084 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1085 switch (cpu_arch_tune)
1086 {
1087 case PROCESSOR_UNKNOWN:
1088 /* We use cpu_arch_isa_flags to check if we SHOULD
1089 optimize with nops. */
1090 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1091 patt = alt_long_patt;
1092 else
1093 patt = f32_patt;
1094 break;
1095 case PROCESSOR_PENTIUM4:
1096 case PROCESSOR_NOCONA:
1097 case PROCESSOR_CORE:
1098 case PROCESSOR_CORE2:
1099 case PROCESSOR_COREI7:
1100 case PROCESSOR_L1OM:
1101 case PROCESSOR_K1OM:
1102 case PROCESSOR_GENERIC64:
1103 patt = alt_long_patt;
1104 break;
1105 case PROCESSOR_K6:
1106 case PROCESSOR_ATHLON:
1107 case PROCESSOR_K8:
1108 case PROCESSOR_AMDFAM10:
1109 case PROCESSOR_BD:
1110 patt = alt_short_patt;
1111 break;
1112 case PROCESSOR_I386:
1113 case PROCESSOR_I486:
1114 case PROCESSOR_PENTIUM:
1115 case PROCESSOR_PENTIUMPRO:
1116 case PROCESSOR_GENERIC32:
1117 patt = f32_patt;
1118 break;
1119 }
1120 }
1121 else
1122 {
1123 switch (fragP->tc_frag_data.tune)
1124 {
1125 case PROCESSOR_UNKNOWN:
1126 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1127 PROCESSOR_UNKNOWN. */
1128 abort ();
1129 break;
1130
1131 case PROCESSOR_I386:
1132 case PROCESSOR_I486:
1133 case PROCESSOR_PENTIUM:
1134 case PROCESSOR_K6:
1135 case PROCESSOR_ATHLON:
1136 case PROCESSOR_K8:
1137 case PROCESSOR_AMDFAM10:
1138 case PROCESSOR_BD:
1139 case PROCESSOR_GENERIC32:
1140 /* We use cpu_arch_isa_flags to check if we CAN optimize
1141 with nops. */
1142 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1143 patt = alt_short_patt;
1144 else
1145 patt = f32_patt;
1146 break;
1147 case PROCESSOR_PENTIUMPRO:
1148 case PROCESSOR_PENTIUM4:
1149 case PROCESSOR_NOCONA:
1150 case PROCESSOR_CORE:
1151 case PROCESSOR_CORE2:
1152 case PROCESSOR_COREI7:
1153 case PROCESSOR_L1OM:
1154 case PROCESSOR_K1OM:
1155 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1156 patt = alt_long_patt;
1157 else
1158 patt = f32_patt;
1159 break;
1160 case PROCESSOR_GENERIC64:
1161 patt = alt_long_patt;
1162 break;
1163 }
1164 }
1165
1166 if (patt == f32_patt)
1167 {
1168 /* If the padding is less than 15 bytes, we use the normal
1169 ones. Otherwise, we use a jump instruction and adjust
1170 its offset. */
1171 int limit;
1172
1173 /* For 64bit, the limit is 3 bytes. */
1174 if (flag_code == CODE_64BIT
1175 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1176 limit = 3;
1177 else
1178 limit = 15;
1179 if (count < limit)
1180 memcpy (fragP->fr_literal + fragP->fr_fix,
1181 patt[count - 1], count);
1182 else
1183 {
1184 memcpy (fragP->fr_literal + fragP->fr_fix,
1185 jump_31, count);
1186 /* Adjust jump offset. */
1187 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1188 }
1189 }
1190 else
1191 {
1192 /* Maximum length of an instruction is 15 byte. If the
1193 padding is greater than 15 bytes and we don't use jump,
1194 we have to break it into smaller pieces. */
1195 int padding = count;
1196 while (padding > 15)
1197 {
1198 padding -= 15;
1199 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1200 patt [14], 15);
1201 }
1202
1203 if (padding)
1204 memcpy (fragP->fr_literal + fragP->fr_fix,
1205 patt [padding - 1], padding);
1206 }
1207 }
1208 fragP->fr_var = count;
1209 }
1210
1211 static INLINE int
1212 operand_type_all_zero (const union i386_operand_type *x)
1213 {
1214 switch (ARRAY_SIZE(x->array))
1215 {
1216 case 3:
1217 if (x->array[2])
1218 return 0;
1219 case 2:
1220 if (x->array[1])
1221 return 0;
1222 case 1:
1223 return !x->array[0];
1224 default:
1225 abort ();
1226 }
1227 }
1228
1229 static INLINE void
1230 operand_type_set (union i386_operand_type *x, unsigned int v)
1231 {
1232 switch (ARRAY_SIZE(x->array))
1233 {
1234 case 3:
1235 x->array[2] = v;
1236 case 2:
1237 x->array[1] = v;
1238 case 1:
1239 x->array[0] = v;
1240 break;
1241 default:
1242 abort ();
1243 }
1244 }
1245
1246 static INLINE int
1247 operand_type_equal (const union i386_operand_type *x,
1248 const union i386_operand_type *y)
1249 {
1250 switch (ARRAY_SIZE(x->array))
1251 {
1252 case 3:
1253 if (x->array[2] != y->array[2])
1254 return 0;
1255 case 2:
1256 if (x->array[1] != y->array[1])
1257 return 0;
1258 case 1:
1259 return x->array[0] == y->array[0];
1260 break;
1261 default:
1262 abort ();
1263 }
1264 }
1265
1266 static INLINE int
1267 cpu_flags_all_zero (const union i386_cpu_flags *x)
1268 {
1269 switch (ARRAY_SIZE(x->array))
1270 {
1271 case 3:
1272 if (x->array[2])
1273 return 0;
1274 case 2:
1275 if (x->array[1])
1276 return 0;
1277 case 1:
1278 return !x->array[0];
1279 default:
1280 abort ();
1281 }
1282 }
1283
1284 static INLINE void
1285 cpu_flags_set (union i386_cpu_flags *x, unsigned int v)
1286 {
1287 switch (ARRAY_SIZE(x->array))
1288 {
1289 case 3:
1290 x->array[2] = v;
1291 case 2:
1292 x->array[1] = v;
1293 case 1:
1294 x->array[0] = v;
1295 break;
1296 default:
1297 abort ();
1298 }
1299 }
1300
1301 static INLINE int
1302 cpu_flags_equal (const union i386_cpu_flags *x,
1303 const union i386_cpu_flags *y)
1304 {
1305 switch (ARRAY_SIZE(x->array))
1306 {
1307 case 3:
1308 if (x->array[2] != y->array[2])
1309 return 0;
1310 case 2:
1311 if (x->array[1] != y->array[1])
1312 return 0;
1313 case 1:
1314 return x->array[0] == y->array[0];
1315 break;
1316 default:
1317 abort ();
1318 }
1319 }
1320
1321 static INLINE int
1322 cpu_flags_check_cpu64 (i386_cpu_flags f)
1323 {
1324 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1325 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1326 }
1327
1328 static INLINE i386_cpu_flags
1329 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1330 {
1331 switch (ARRAY_SIZE (x.array))
1332 {
1333 case 3:
1334 x.array [2] &= y.array [2];
1335 case 2:
1336 x.array [1] &= y.array [1];
1337 case 1:
1338 x.array [0] &= y.array [0];
1339 break;
1340 default:
1341 abort ();
1342 }
1343 return x;
1344 }
1345
1346 static INLINE i386_cpu_flags
1347 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1348 {
1349 switch (ARRAY_SIZE (x.array))
1350 {
1351 case 3:
1352 x.array [2] |= y.array [2];
1353 case 2:
1354 x.array [1] |= y.array [1];
1355 case 1:
1356 x.array [0] |= y.array [0];
1357 break;
1358 default:
1359 abort ();
1360 }
1361 return x;
1362 }
1363
1364 static INLINE i386_cpu_flags
1365 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1366 {
1367 switch (ARRAY_SIZE (x.array))
1368 {
1369 case 3:
1370 x.array [2] &= ~y.array [2];
1371 case 2:
1372 x.array [1] &= ~y.array [1];
1373 case 1:
1374 x.array [0] &= ~y.array [0];
1375 break;
1376 default:
1377 abort ();
1378 }
1379 return x;
1380 }
1381
1382 #define CPU_FLAGS_ARCH_MATCH 0x1
1383 #define CPU_FLAGS_64BIT_MATCH 0x2
1384 #define CPU_FLAGS_AES_MATCH 0x4
1385 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1386 #define CPU_FLAGS_AVX_MATCH 0x10
1387
1388 #define CPU_FLAGS_32BIT_MATCH \
1389 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1390 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1391 #define CPU_FLAGS_PERFECT_MATCH \
1392 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1393
1394 /* Return CPU flags match bits. */
1395
1396 static int
1397 cpu_flags_match (const insn_template *t)
1398 {
1399 i386_cpu_flags x = t->cpu_flags;
1400 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1401
1402 x.bitfield.cpu64 = 0;
1403 x.bitfield.cpuno64 = 0;
1404
1405 if (cpu_flags_all_zero (&x))
1406 {
1407 /* This instruction is available on all archs. */
1408 match |= CPU_FLAGS_32BIT_MATCH;
1409 }
1410 else
1411 {
1412 /* This instruction is available only on some archs. */
1413 i386_cpu_flags cpu = cpu_arch_flags;
1414
1415 cpu.bitfield.cpu64 = 0;
1416 cpu.bitfield.cpuno64 = 0;
1417 cpu = cpu_flags_and (x, cpu);
1418 if (!cpu_flags_all_zero (&cpu))
1419 {
1420 if (x.bitfield.cpuavx)
1421 {
1422 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1423 if (cpu.bitfield.cpuavx)
1424 {
1425 /* Check SSE2AVX. */
1426 if (!t->opcode_modifier.sse2avx|| sse2avx)
1427 {
1428 match |= (CPU_FLAGS_ARCH_MATCH
1429 | CPU_FLAGS_AVX_MATCH);
1430 /* Check AES. */
1431 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1432 match |= CPU_FLAGS_AES_MATCH;
1433 /* Check PCLMUL. */
1434 if (!x.bitfield.cpupclmul
1435 || cpu.bitfield.cpupclmul)
1436 match |= CPU_FLAGS_PCLMUL_MATCH;
1437 }
1438 }
1439 else
1440 match |= CPU_FLAGS_ARCH_MATCH;
1441 }
1442 else
1443 match |= CPU_FLAGS_32BIT_MATCH;
1444 }
1445 }
1446 return match;
1447 }
1448
1449 static INLINE i386_operand_type
1450 operand_type_and (i386_operand_type x, i386_operand_type y)
1451 {
1452 switch (ARRAY_SIZE (x.array))
1453 {
1454 case 3:
1455 x.array [2] &= y.array [2];
1456 case 2:
1457 x.array [1] &= y.array [1];
1458 case 1:
1459 x.array [0] &= y.array [0];
1460 break;
1461 default:
1462 abort ();
1463 }
1464 return x;
1465 }
1466
1467 static INLINE i386_operand_type
1468 operand_type_or (i386_operand_type x, i386_operand_type y)
1469 {
1470 switch (ARRAY_SIZE (x.array))
1471 {
1472 case 3:
1473 x.array [2] |= y.array [2];
1474 case 2:
1475 x.array [1] |= y.array [1];
1476 case 1:
1477 x.array [0] |= y.array [0];
1478 break;
1479 default:
1480 abort ();
1481 }
1482 return x;
1483 }
1484
1485 static INLINE i386_operand_type
1486 operand_type_xor (i386_operand_type x, i386_operand_type y)
1487 {
1488 switch (ARRAY_SIZE (x.array))
1489 {
1490 case 3:
1491 x.array [2] ^= y.array [2];
1492 case 2:
1493 x.array [1] ^= y.array [1];
1494 case 1:
1495 x.array [0] ^= y.array [0];
1496 break;
1497 default:
1498 abort ();
1499 }
1500 return x;
1501 }
1502
1503 static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1504 static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1505 static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1506 static const i386_operand_type inoutportreg
1507 = OPERAND_TYPE_INOUTPORTREG;
1508 static const i386_operand_type reg16_inoutportreg
1509 = OPERAND_TYPE_REG16_INOUTPORTREG;
1510 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1511 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1512 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1513 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1514 static const i386_operand_type anydisp
1515 = OPERAND_TYPE_ANYDISP;
1516 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1517 static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1518 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1519 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1520 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1521 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1522 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1523 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1524 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1525 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1526 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1527 static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
1528
1529 enum operand_type
1530 {
1531 reg,
1532 imm,
1533 disp,
1534 anymem
1535 };
1536
1537 static INLINE int
1538 operand_type_check (i386_operand_type t, enum operand_type c)
1539 {
1540 switch (c)
1541 {
1542 case reg:
1543 return (t.bitfield.reg8
1544 || t.bitfield.reg16
1545 || t.bitfield.reg32
1546 || t.bitfield.reg64);
1547
1548 case imm:
1549 return (t.bitfield.imm8
1550 || t.bitfield.imm8s
1551 || t.bitfield.imm16
1552 || t.bitfield.imm32
1553 || t.bitfield.imm32s
1554 || t.bitfield.imm64);
1555
1556 case disp:
1557 return (t.bitfield.disp8
1558 || t.bitfield.disp16
1559 || t.bitfield.disp32
1560 || t.bitfield.disp32s
1561 || t.bitfield.disp64);
1562
1563 case anymem:
1564 return (t.bitfield.disp8
1565 || t.bitfield.disp16
1566 || t.bitfield.disp32
1567 || t.bitfield.disp32s
1568 || t.bitfield.disp64
1569 || t.bitfield.baseindex);
1570
1571 default:
1572 abort ();
1573 }
1574
1575 return 0;
1576 }
1577
1578 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1579 operand J for instruction template T. */
1580
1581 static INLINE int
1582 match_reg_size (const insn_template *t, unsigned int j)
1583 {
1584 return !((i.types[j].bitfield.byte
1585 && !t->operand_types[j].bitfield.byte)
1586 || (i.types[j].bitfield.word
1587 && !t->operand_types[j].bitfield.word)
1588 || (i.types[j].bitfield.dword
1589 && !t->operand_types[j].bitfield.dword)
1590 || (i.types[j].bitfield.qword
1591 && !t->operand_types[j].bitfield.qword));
1592 }
1593
1594 /* Return 1 if there is no conflict in any size on operand J for
1595 instruction template T. */
1596
1597 static INLINE int
1598 match_mem_size (const insn_template *t, unsigned int j)
1599 {
1600 return (match_reg_size (t, j)
1601 && !((i.types[j].bitfield.unspecified
1602 && !t->operand_types[j].bitfield.unspecified)
1603 || (i.types[j].bitfield.fword
1604 && !t->operand_types[j].bitfield.fword)
1605 || (i.types[j].bitfield.tbyte
1606 && !t->operand_types[j].bitfield.tbyte)
1607 || (i.types[j].bitfield.xmmword
1608 && !t->operand_types[j].bitfield.xmmword)
1609 || (i.types[j].bitfield.ymmword
1610 && !t->operand_types[j].bitfield.ymmword)));
1611 }
1612
1613 /* Return 1 if there is no size conflict on any operands for
1614 instruction template T. */
1615
1616 static INLINE int
1617 operand_size_match (const insn_template *t)
1618 {
1619 unsigned int j;
1620 int match = 1;
1621
1622 /* Don't check jump instructions. */
1623 if (t->opcode_modifier.jump
1624 || t->opcode_modifier.jumpbyte
1625 || t->opcode_modifier.jumpdword
1626 || t->opcode_modifier.jumpintersegment)
1627 return match;
1628
1629 /* Check memory and accumulator operand size. */
1630 for (j = 0; j < i.operands; j++)
1631 {
1632 if (t->operand_types[j].bitfield.anysize)
1633 continue;
1634
1635 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1636 {
1637 match = 0;
1638 break;
1639 }
1640
1641 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1642 {
1643 match = 0;
1644 break;
1645 }
1646 }
1647
1648 if (match)
1649 return match;
1650 else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
1651 {
1652 mismatch:
1653 i.error = operand_size_mismatch;
1654 return 0;
1655 }
1656
1657 /* Check reverse. */
1658 gas_assert (i.operands == 2);
1659
1660 match = 1;
1661 for (j = 0; j < 2; j++)
1662 {
1663 if (t->operand_types[j].bitfield.acc
1664 && !match_reg_size (t, j ? 0 : 1))
1665 goto mismatch;
1666
1667 if (i.types[j].bitfield.mem
1668 && !match_mem_size (t, j ? 0 : 1))
1669 goto mismatch;
1670 }
1671
1672 return match;
1673 }
1674
1675 static INLINE int
1676 operand_type_match (i386_operand_type overlap,
1677 i386_operand_type given)
1678 {
1679 i386_operand_type temp = overlap;
1680
1681 temp.bitfield.jumpabsolute = 0;
1682 temp.bitfield.unspecified = 0;
1683 temp.bitfield.byte = 0;
1684 temp.bitfield.word = 0;
1685 temp.bitfield.dword = 0;
1686 temp.bitfield.fword = 0;
1687 temp.bitfield.qword = 0;
1688 temp.bitfield.tbyte = 0;
1689 temp.bitfield.xmmword = 0;
1690 temp.bitfield.ymmword = 0;
1691 if (operand_type_all_zero (&temp))
1692 goto mismatch;
1693
1694 if (given.bitfield.baseindex == overlap.bitfield.baseindex
1695 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
1696 return 1;
1697
1698 mismatch:
1699 i.error = operand_type_mismatch;
1700 return 0;
1701 }
1702
1703 /* If given types g0 and g1 are registers they must be of the same type
1704 unless the expected operand type register overlap is null.
1705 Note that Acc in a template matches every size of reg. */
1706
1707 static INLINE int
1708 operand_type_register_match (i386_operand_type m0,
1709 i386_operand_type g0,
1710 i386_operand_type t0,
1711 i386_operand_type m1,
1712 i386_operand_type g1,
1713 i386_operand_type t1)
1714 {
1715 if (!operand_type_check (g0, reg))
1716 return 1;
1717
1718 if (!operand_type_check (g1, reg))
1719 return 1;
1720
1721 if (g0.bitfield.reg8 == g1.bitfield.reg8
1722 && g0.bitfield.reg16 == g1.bitfield.reg16
1723 && g0.bitfield.reg32 == g1.bitfield.reg32
1724 && g0.bitfield.reg64 == g1.bitfield.reg64)
1725 return 1;
1726
1727 if (m0.bitfield.acc)
1728 {
1729 t0.bitfield.reg8 = 1;
1730 t0.bitfield.reg16 = 1;
1731 t0.bitfield.reg32 = 1;
1732 t0.bitfield.reg64 = 1;
1733 }
1734
1735 if (m1.bitfield.acc)
1736 {
1737 t1.bitfield.reg8 = 1;
1738 t1.bitfield.reg16 = 1;
1739 t1.bitfield.reg32 = 1;
1740 t1.bitfield.reg64 = 1;
1741 }
1742
1743 if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1744 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1745 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1746 && !(t0.bitfield.reg64 & t1.bitfield.reg64))
1747 return 1;
1748
1749 i.error = register_type_mismatch;
1750
1751 return 0;
1752 }
1753
1754 static INLINE unsigned int
1755 mode_from_disp_size (i386_operand_type t)
1756 {
1757 if (t.bitfield.disp8)
1758 return 1;
1759 else if (t.bitfield.disp16
1760 || t.bitfield.disp32
1761 || t.bitfield.disp32s)
1762 return 2;
1763 else
1764 return 0;
1765 }
1766
1767 static INLINE int
1768 fits_in_signed_byte (offsetT num)
1769 {
1770 return (num >= -128) && (num <= 127);
1771 }
1772
1773 static INLINE int
1774 fits_in_unsigned_byte (offsetT num)
1775 {
1776 return (num & 0xff) == num;
1777 }
1778
1779 static INLINE int
1780 fits_in_unsigned_word (offsetT num)
1781 {
1782 return (num & 0xffff) == num;
1783 }
1784
1785 static INLINE int
1786 fits_in_signed_word (offsetT num)
1787 {
1788 return (-32768 <= num) && (num <= 32767);
1789 }
1790
1791 static INLINE int
1792 fits_in_signed_long (offsetT num ATTRIBUTE_UNUSED)
1793 {
1794 #ifndef BFD64
1795 return 1;
1796 #else
1797 return (!(((offsetT) -1 << 31) & num)
1798 || (((offsetT) -1 << 31) & num) == ((offsetT) -1 << 31));
1799 #endif
1800 } /* fits_in_signed_long() */
1801
1802 static INLINE int
1803 fits_in_unsigned_long (offsetT num ATTRIBUTE_UNUSED)
1804 {
1805 #ifndef BFD64
1806 return 1;
1807 #else
1808 return (num & (((offsetT) 2 << 31) - 1)) == num;
1809 #endif
1810 } /* fits_in_unsigned_long() */
1811
1812 static INLINE int
1813 fits_in_imm4 (offsetT num)
1814 {
1815 return (num & 0xf) == num;
1816 }
1817
1818 static i386_operand_type
1819 smallest_imm_type (offsetT num)
1820 {
1821 i386_operand_type t;
1822
1823 operand_type_set (&t, 0);
1824 t.bitfield.imm64 = 1;
1825
1826 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
1827 {
1828 /* This code is disabled on the 486 because all the Imm1 forms
1829 in the opcode table are slower on the i486. They're the
1830 versions with the implicitly specified single-position
1831 displacement, which has another syntax if you really want to
1832 use that form. */
1833 t.bitfield.imm1 = 1;
1834 t.bitfield.imm8 = 1;
1835 t.bitfield.imm8s = 1;
1836 t.bitfield.imm16 = 1;
1837 t.bitfield.imm32 = 1;
1838 t.bitfield.imm32s = 1;
1839 }
1840 else if (fits_in_signed_byte (num))
1841 {
1842 t.bitfield.imm8 = 1;
1843 t.bitfield.imm8s = 1;
1844 t.bitfield.imm16 = 1;
1845 t.bitfield.imm32 = 1;
1846 t.bitfield.imm32s = 1;
1847 }
1848 else if (fits_in_unsigned_byte (num))
1849 {
1850 t.bitfield.imm8 = 1;
1851 t.bitfield.imm16 = 1;
1852 t.bitfield.imm32 = 1;
1853 t.bitfield.imm32s = 1;
1854 }
1855 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
1856 {
1857 t.bitfield.imm16 = 1;
1858 t.bitfield.imm32 = 1;
1859 t.bitfield.imm32s = 1;
1860 }
1861 else if (fits_in_signed_long (num))
1862 {
1863 t.bitfield.imm32 = 1;
1864 t.bitfield.imm32s = 1;
1865 }
1866 else if (fits_in_unsigned_long (num))
1867 t.bitfield.imm32 = 1;
1868
1869 return t;
1870 }
1871
1872 static offsetT
1873 offset_in_range (offsetT val, int size)
1874 {
1875 addressT mask;
1876
1877 switch (size)
1878 {
1879 case 1: mask = ((addressT) 1 << 8) - 1; break;
1880 case 2: mask = ((addressT) 1 << 16) - 1; break;
1881 case 4: mask = ((addressT) 2 << 31) - 1; break;
1882 #ifdef BFD64
1883 case 8: mask = ((addressT) 2 << 63) - 1; break;
1884 #endif
1885 default: abort ();
1886 }
1887
1888 #ifdef BFD64
1889 /* If BFD64, sign extend val for 32bit address mode. */
1890 if (flag_code != CODE_64BIT
1891 || i.prefix[ADDR_PREFIX])
1892 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
1893 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
1894 #endif
1895
1896 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
1897 {
1898 char buf1[40], buf2[40];
1899
1900 sprint_value (buf1, val);
1901 sprint_value (buf2, val & mask);
1902 as_warn (_("%s shortened to %s"), buf1, buf2);
1903 }
1904 return val & mask;
1905 }
1906
1907 enum PREFIX_GROUP
1908 {
1909 PREFIX_EXIST = 0,
1910 PREFIX_LOCK,
1911 PREFIX_REP,
1912 PREFIX_OTHER
1913 };
1914
1915 /* Returns
1916 a. PREFIX_EXIST if attempting to add a prefix where one from the
1917 same class already exists.
1918 b. PREFIX_LOCK if lock prefix is added.
1919 c. PREFIX_REP if rep/repne prefix is added.
1920 d. PREFIX_OTHER if other prefix is added.
1921 */
1922
1923 static enum PREFIX_GROUP
1924 add_prefix (unsigned int prefix)
1925 {
1926 enum PREFIX_GROUP ret = PREFIX_OTHER;
1927 unsigned int q;
1928
1929 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
1930 && flag_code == CODE_64BIT)
1931 {
1932 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
1933 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
1934 && (prefix & (REX_R | REX_X | REX_B))))
1935 ret = PREFIX_EXIST;
1936 q = REX_PREFIX;
1937 }
1938 else
1939 {
1940 switch (prefix)
1941 {
1942 default:
1943 abort ();
1944
1945 case CS_PREFIX_OPCODE:
1946 case DS_PREFIX_OPCODE:
1947 case ES_PREFIX_OPCODE:
1948 case FS_PREFIX_OPCODE:
1949 case GS_PREFIX_OPCODE:
1950 case SS_PREFIX_OPCODE:
1951 q = SEG_PREFIX;
1952 break;
1953
1954 case REPNE_PREFIX_OPCODE:
1955 case REPE_PREFIX_OPCODE:
1956 q = REP_PREFIX;
1957 ret = PREFIX_REP;
1958 break;
1959
1960 case LOCK_PREFIX_OPCODE:
1961 q = LOCK_PREFIX;
1962 ret = PREFIX_LOCK;
1963 break;
1964
1965 case FWAIT_OPCODE:
1966 q = WAIT_PREFIX;
1967 break;
1968
1969 case ADDR_PREFIX_OPCODE:
1970 q = ADDR_PREFIX;
1971 break;
1972
1973 case DATA_PREFIX_OPCODE:
1974 q = DATA_PREFIX;
1975 break;
1976 }
1977 if (i.prefix[q] != 0)
1978 ret = PREFIX_EXIST;
1979 }
1980
1981 if (ret)
1982 {
1983 if (!i.prefix[q])
1984 ++i.prefixes;
1985 i.prefix[q] |= prefix;
1986 }
1987 else
1988 as_bad (_("same type of prefix used twice"));
1989
1990 return ret;
1991 }
1992
1993 static void
1994 update_code_flag (int value, int check)
1995 {
1996 PRINTF_LIKE ((*as_error));
1997
1998 flag_code = (enum flag_code) value;
1999 if (flag_code == CODE_64BIT)
2000 {
2001 cpu_arch_flags.bitfield.cpu64 = 1;
2002 cpu_arch_flags.bitfield.cpuno64 = 0;
2003 }
2004 else
2005 {
2006 cpu_arch_flags.bitfield.cpu64 = 0;
2007 cpu_arch_flags.bitfield.cpuno64 = 1;
2008 }
2009 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
2010 {
2011 if (check)
2012 as_error = as_fatal;
2013 else
2014 as_error = as_bad;
2015 (*as_error) (_("64bit mode not supported on `%s'."),
2016 cpu_arch_name ? cpu_arch_name : default_arch);
2017 }
2018 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
2019 {
2020 if (check)
2021 as_error = as_fatal;
2022 else
2023 as_error = as_bad;
2024 (*as_error) (_("32bit mode not supported on `%s'."),
2025 cpu_arch_name ? cpu_arch_name : default_arch);
2026 }
2027 stackop_size = '\0';
2028 }
2029
2030 static void
2031 set_code_flag (int value)
2032 {
2033 update_code_flag (value, 0);
2034 }
2035
2036 static void
2037 set_16bit_gcc_code_flag (int new_code_flag)
2038 {
2039 flag_code = (enum flag_code) new_code_flag;
2040 if (flag_code != CODE_16BIT)
2041 abort ();
2042 cpu_arch_flags.bitfield.cpu64 = 0;
2043 cpu_arch_flags.bitfield.cpuno64 = 1;
2044 stackop_size = LONG_MNEM_SUFFIX;
2045 }
2046
2047 static void
2048 set_intel_syntax (int syntax_flag)
2049 {
2050 /* Find out if register prefixing is specified. */
2051 int ask_naked_reg = 0;
2052
2053 SKIP_WHITESPACE ();
2054 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2055 {
2056 char *string = input_line_pointer;
2057 int e = get_symbol_end ();
2058
2059 if (strcmp (string, "prefix") == 0)
2060 ask_naked_reg = 1;
2061 else if (strcmp (string, "noprefix") == 0)
2062 ask_naked_reg = -1;
2063 else
2064 as_bad (_("bad argument to syntax directive."));
2065 *input_line_pointer = e;
2066 }
2067 demand_empty_rest_of_line ();
2068
2069 intel_syntax = syntax_flag;
2070
2071 if (ask_naked_reg == 0)
2072 allow_naked_reg = (intel_syntax
2073 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2074 else
2075 allow_naked_reg = (ask_naked_reg < 0);
2076
2077 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2078
2079 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2080 identifier_chars['$'] = intel_syntax ? '$' : 0;
2081 register_prefix = allow_naked_reg ? "" : "%";
2082 }
2083
2084 static void
2085 set_intel_mnemonic (int mnemonic_flag)
2086 {
2087 intel_mnemonic = mnemonic_flag;
2088 }
2089
2090 static void
2091 set_allow_index_reg (int flag)
2092 {
2093 allow_index_reg = flag;
2094 }
2095
2096 static void
2097 set_sse_check (int dummy ATTRIBUTE_UNUSED)
2098 {
2099 SKIP_WHITESPACE ();
2100
2101 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2102 {
2103 char *string = input_line_pointer;
2104 int e = get_symbol_end ();
2105
2106 if (strcmp (string, "none") == 0)
2107 sse_check = sse_check_none;
2108 else if (strcmp (string, "warning") == 0)
2109 sse_check = sse_check_warning;
2110 else if (strcmp (string, "error") == 0)
2111 sse_check = sse_check_error;
2112 else
2113 as_bad (_("bad argument to sse_check directive."));
2114 *input_line_pointer = e;
2115 }
2116 else
2117 as_bad (_("missing argument for sse_check directive"));
2118
2119 demand_empty_rest_of_line ();
2120 }
2121
2122 static void
2123 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2124 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2125 {
2126 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2127 static const char *arch;
2128
2129 /* Intel LIOM is only supported on ELF. */
2130 if (!IS_ELF)
2131 return;
2132
2133 if (!arch)
2134 {
2135 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2136 use default_arch. */
2137 arch = cpu_arch_name;
2138 if (!arch)
2139 arch = default_arch;
2140 }
2141
2142 /* If we are targeting Intel L1OM, we must enable it. */
2143 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2144 || new_flag.bitfield.cpul1om)
2145 return;
2146
2147 /* If we are targeting Intel K1OM, we must enable it. */
2148 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_K1OM
2149 || new_flag.bitfield.cpuk1om)
2150 return;
2151
2152 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2153 #endif
2154 }
2155
2156 static void
2157 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2158 {
2159 SKIP_WHITESPACE ();
2160
2161 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2162 {
2163 char *string = input_line_pointer;
2164 int e = get_symbol_end ();
2165 unsigned int j;
2166 i386_cpu_flags flags;
2167
2168 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2169 {
2170 if (strcmp (string, cpu_arch[j].name) == 0)
2171 {
2172 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2173
2174 if (*string != '.')
2175 {
2176 cpu_arch_name = cpu_arch[j].name;
2177 cpu_sub_arch_name = NULL;
2178 cpu_arch_flags = cpu_arch[j].flags;
2179 if (flag_code == CODE_64BIT)
2180 {
2181 cpu_arch_flags.bitfield.cpu64 = 1;
2182 cpu_arch_flags.bitfield.cpuno64 = 0;
2183 }
2184 else
2185 {
2186 cpu_arch_flags.bitfield.cpu64 = 0;
2187 cpu_arch_flags.bitfield.cpuno64 = 1;
2188 }
2189 cpu_arch_isa = cpu_arch[j].type;
2190 cpu_arch_isa_flags = cpu_arch[j].flags;
2191 if (!cpu_arch_tune_set)
2192 {
2193 cpu_arch_tune = cpu_arch_isa;
2194 cpu_arch_tune_flags = cpu_arch_isa_flags;
2195 }
2196 break;
2197 }
2198
2199 if (!cpu_arch[j].negated)
2200 flags = cpu_flags_or (cpu_arch_flags,
2201 cpu_arch[j].flags);
2202 else
2203 flags = cpu_flags_and_not (cpu_arch_flags,
2204 cpu_arch[j].flags);
2205 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2206 {
2207 if (cpu_sub_arch_name)
2208 {
2209 char *name = cpu_sub_arch_name;
2210 cpu_sub_arch_name = concat (name,
2211 cpu_arch[j].name,
2212 (const char *) NULL);
2213 free (name);
2214 }
2215 else
2216 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2217 cpu_arch_flags = flags;
2218 cpu_arch_isa_flags = flags;
2219 }
2220 *input_line_pointer = e;
2221 demand_empty_rest_of_line ();
2222 return;
2223 }
2224 }
2225 if (j >= ARRAY_SIZE (cpu_arch))
2226 as_bad (_("no such architecture: `%s'"), string);
2227
2228 *input_line_pointer = e;
2229 }
2230 else
2231 as_bad (_("missing cpu architecture"));
2232
2233 no_cond_jump_promotion = 0;
2234 if (*input_line_pointer == ','
2235 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2236 {
2237 char *string = ++input_line_pointer;
2238 int e = get_symbol_end ();
2239
2240 if (strcmp (string, "nojumps") == 0)
2241 no_cond_jump_promotion = 1;
2242 else if (strcmp (string, "jumps") == 0)
2243 ;
2244 else
2245 as_bad (_("no such architecture modifier: `%s'"), string);
2246
2247 *input_line_pointer = e;
2248 }
2249
2250 demand_empty_rest_of_line ();
2251 }
2252
2253 enum bfd_architecture
2254 i386_arch (void)
2255 {
2256 if (cpu_arch_isa == PROCESSOR_L1OM)
2257 {
2258 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2259 || flag_code != CODE_64BIT)
2260 as_fatal (_("Intel L1OM is 64bit ELF only"));
2261 return bfd_arch_l1om;
2262 }
2263 else if (cpu_arch_isa == PROCESSOR_K1OM)
2264 {
2265 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2266 || flag_code != CODE_64BIT)
2267 as_fatal (_("Intel K1OM is 64bit ELF only"));
2268 return bfd_arch_k1om;
2269 }
2270 else
2271 return bfd_arch_i386;
2272 }
2273
2274 unsigned long
2275 i386_mach (void)
2276 {
2277 if (!strncmp (default_arch, "x86_64", 6))
2278 {
2279 if (cpu_arch_isa == PROCESSOR_L1OM)
2280 {
2281 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2282 || default_arch[6] != '\0')
2283 as_fatal (_("Intel L1OM is 64bit ELF only"));
2284 return bfd_mach_l1om;
2285 }
2286 else if (cpu_arch_isa == PROCESSOR_K1OM)
2287 {
2288 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2289 || default_arch[6] != '\0')
2290 as_fatal (_("Intel K1OM is 64bit ELF only"));
2291 return bfd_mach_k1om;
2292 }
2293 else if (default_arch[6] == '\0')
2294 return bfd_mach_x86_64;
2295 else
2296 return bfd_mach_x64_32;
2297 }
2298 else if (!strcmp (default_arch, "i386"))
2299 return bfd_mach_i386_i386;
2300 else
2301 as_fatal (_("unknown architecture"));
2302 }
2303 \f
2304 void
2305 md_begin (void)
2306 {
2307 const char *hash_err;
2308
2309 /* Initialize op_hash hash table. */
2310 op_hash = hash_new ();
2311
2312 {
2313 const insn_template *optab;
2314 templates *core_optab;
2315
2316 /* Setup for loop. */
2317 optab = i386_optab;
2318 core_optab = (templates *) xmalloc (sizeof (templates));
2319 core_optab->start = optab;
2320
2321 while (1)
2322 {
2323 ++optab;
2324 if (optab->name == NULL
2325 || strcmp (optab->name, (optab - 1)->name) != 0)
2326 {
2327 /* different name --> ship out current template list;
2328 add to hash table; & begin anew. */
2329 core_optab->end = optab;
2330 hash_err = hash_insert (op_hash,
2331 (optab - 1)->name,
2332 (void *) core_optab);
2333 if (hash_err)
2334 {
2335 as_fatal (_("internal Error: Can't hash %s: %s"),
2336 (optab - 1)->name,
2337 hash_err);
2338 }
2339 if (optab->name == NULL)
2340 break;
2341 core_optab = (templates *) xmalloc (sizeof (templates));
2342 core_optab->start = optab;
2343 }
2344 }
2345 }
2346
2347 /* Initialize reg_hash hash table. */
2348 reg_hash = hash_new ();
2349 {
2350 const reg_entry *regtab;
2351 unsigned int regtab_size = i386_regtab_size;
2352
2353 for (regtab = i386_regtab; regtab_size--; regtab++)
2354 {
2355 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2356 if (hash_err)
2357 as_fatal (_("internal Error: Can't hash %s: %s"),
2358 regtab->reg_name,
2359 hash_err);
2360 }
2361 }
2362
2363 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2364 {
2365 int c;
2366 char *p;
2367
2368 for (c = 0; c < 256; c++)
2369 {
2370 if (ISDIGIT (c))
2371 {
2372 digit_chars[c] = c;
2373 mnemonic_chars[c] = c;
2374 register_chars[c] = c;
2375 operand_chars[c] = c;
2376 }
2377 else if (ISLOWER (c))
2378 {
2379 mnemonic_chars[c] = c;
2380 register_chars[c] = c;
2381 operand_chars[c] = c;
2382 }
2383 else if (ISUPPER (c))
2384 {
2385 mnemonic_chars[c] = TOLOWER (c);
2386 register_chars[c] = mnemonic_chars[c];
2387 operand_chars[c] = c;
2388 }
2389
2390 if (ISALPHA (c) || ISDIGIT (c))
2391 identifier_chars[c] = c;
2392 else if (c >= 128)
2393 {
2394 identifier_chars[c] = c;
2395 operand_chars[c] = c;
2396 }
2397 }
2398
2399 #ifdef LEX_AT
2400 identifier_chars['@'] = '@';
2401 #endif
2402 #ifdef LEX_QM
2403 identifier_chars['?'] = '?';
2404 operand_chars['?'] = '?';
2405 #endif
2406 digit_chars['-'] = '-';
2407 mnemonic_chars['_'] = '_';
2408 mnemonic_chars['-'] = '-';
2409 mnemonic_chars['.'] = '.';
2410 identifier_chars['_'] = '_';
2411 identifier_chars['.'] = '.';
2412
2413 for (p = operand_special_chars; *p != '\0'; p++)
2414 operand_chars[(unsigned char) *p] = *p;
2415 }
2416
2417 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2418 if (IS_ELF)
2419 {
2420 record_alignment (text_section, 2);
2421 record_alignment (data_section, 2);
2422 record_alignment (bss_section, 2);
2423 }
2424 #endif
2425
2426 if (flag_code == CODE_64BIT)
2427 {
2428 #if defined (OBJ_COFF) && defined (TE_PE)
2429 x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
2430 ? 32 : 16);
2431 #else
2432 x86_dwarf2_return_column = 16;
2433 #endif
2434 x86_cie_data_alignment = -8;
2435 }
2436 else
2437 {
2438 x86_dwarf2_return_column = 8;
2439 x86_cie_data_alignment = -4;
2440 }
2441 }
2442
2443 void
2444 i386_print_statistics (FILE *file)
2445 {
2446 hash_print_statistics (file, "i386 opcode", op_hash);
2447 hash_print_statistics (file, "i386 register", reg_hash);
2448 }
2449 \f
2450 #ifdef DEBUG386
2451
2452 /* Debugging routines for md_assemble. */
2453 static void pte (insn_template *);
2454 static void pt (i386_operand_type);
2455 static void pe (expressionS *);
2456 static void ps (symbolS *);
2457
2458 static void
2459 pi (char *line, i386_insn *x)
2460 {
2461 unsigned int j;
2462
2463 fprintf (stdout, "%s: template ", line);
2464 pte (&x->tm);
2465 fprintf (stdout, " address: base %s index %s scale %x\n",
2466 x->base_reg ? x->base_reg->reg_name : "none",
2467 x->index_reg ? x->index_reg->reg_name : "none",
2468 x->log2_scale_factor);
2469 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2470 x->rm.mode, x->rm.reg, x->rm.regmem);
2471 fprintf (stdout, " sib: base %x index %x scale %x\n",
2472 x->sib.base, x->sib.index, x->sib.scale);
2473 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2474 (x->rex & REX_W) != 0,
2475 (x->rex & REX_R) != 0,
2476 (x->rex & REX_X) != 0,
2477 (x->rex & REX_B) != 0);
2478 for (j = 0; j < x->operands; j++)
2479 {
2480 fprintf (stdout, " #%d: ", j + 1);
2481 pt (x->types[j]);
2482 fprintf (stdout, "\n");
2483 if (x->types[j].bitfield.reg8
2484 || x->types[j].bitfield.reg16
2485 || x->types[j].bitfield.reg32
2486 || x->types[j].bitfield.reg64
2487 || x->types[j].bitfield.regmmx
2488 || x->types[j].bitfield.regxmm
2489 || x->types[j].bitfield.regymm
2490 || x->types[j].bitfield.sreg2
2491 || x->types[j].bitfield.sreg3
2492 || x->types[j].bitfield.control
2493 || x->types[j].bitfield.debug
2494 || x->types[j].bitfield.test)
2495 fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
2496 if (operand_type_check (x->types[j], imm))
2497 pe (x->op[j].imms);
2498 if (operand_type_check (x->types[j], disp))
2499 pe (x->op[j].disps);
2500 }
2501 }
2502
2503 static void
2504 pte (insn_template *t)
2505 {
2506 unsigned int j;
2507 fprintf (stdout, " %d operands ", t->operands);
2508 fprintf (stdout, "opcode %x ", t->base_opcode);
2509 if (t->extension_opcode != None)
2510 fprintf (stdout, "ext %x ", t->extension_opcode);
2511 if (t->opcode_modifier.d)
2512 fprintf (stdout, "D");
2513 if (t->opcode_modifier.w)
2514 fprintf (stdout, "W");
2515 fprintf (stdout, "\n");
2516 for (j = 0; j < t->operands; j++)
2517 {
2518 fprintf (stdout, " #%d type ", j + 1);
2519 pt (t->operand_types[j]);
2520 fprintf (stdout, "\n");
2521 }
2522 }
2523
2524 static void
2525 pe (expressionS *e)
2526 {
2527 fprintf (stdout, " operation %d\n", e->X_op);
2528 fprintf (stdout, " add_number %ld (%lx)\n",
2529 (long) e->X_add_number, (long) e->X_add_number);
2530 if (e->X_add_symbol)
2531 {
2532 fprintf (stdout, " add_symbol ");
2533 ps (e->X_add_symbol);
2534 fprintf (stdout, "\n");
2535 }
2536 if (e->X_op_symbol)
2537 {
2538 fprintf (stdout, " op_symbol ");
2539 ps (e->X_op_symbol);
2540 fprintf (stdout, "\n");
2541 }
2542 }
2543
2544 static void
2545 ps (symbolS *s)
2546 {
2547 fprintf (stdout, "%s type %s%s",
2548 S_GET_NAME (s),
2549 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2550 segment_name (S_GET_SEGMENT (s)));
2551 }
2552
2553 static struct type_name
2554 {
2555 i386_operand_type mask;
2556 const char *name;
2557 }
2558 const type_names[] =
2559 {
2560 { OPERAND_TYPE_REG8, "r8" },
2561 { OPERAND_TYPE_REG16, "r16" },
2562 { OPERAND_TYPE_REG32, "r32" },
2563 { OPERAND_TYPE_REG64, "r64" },
2564 { OPERAND_TYPE_IMM8, "i8" },
2565 { OPERAND_TYPE_IMM8, "i8s" },
2566 { OPERAND_TYPE_IMM16, "i16" },
2567 { OPERAND_TYPE_IMM32, "i32" },
2568 { OPERAND_TYPE_IMM32S, "i32s" },
2569 { OPERAND_TYPE_IMM64, "i64" },
2570 { OPERAND_TYPE_IMM1, "i1" },
2571 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2572 { OPERAND_TYPE_DISP8, "d8" },
2573 { OPERAND_TYPE_DISP16, "d16" },
2574 { OPERAND_TYPE_DISP32, "d32" },
2575 { OPERAND_TYPE_DISP32S, "d32s" },
2576 { OPERAND_TYPE_DISP64, "d64" },
2577 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2578 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2579 { OPERAND_TYPE_CONTROL, "control reg" },
2580 { OPERAND_TYPE_TEST, "test reg" },
2581 { OPERAND_TYPE_DEBUG, "debug reg" },
2582 { OPERAND_TYPE_FLOATREG, "FReg" },
2583 { OPERAND_TYPE_FLOATACC, "FAcc" },
2584 { OPERAND_TYPE_SREG2, "SReg2" },
2585 { OPERAND_TYPE_SREG3, "SReg3" },
2586 { OPERAND_TYPE_ACC, "Acc" },
2587 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2588 { OPERAND_TYPE_REGMMX, "rMMX" },
2589 { OPERAND_TYPE_REGXMM, "rXMM" },
2590 { OPERAND_TYPE_REGYMM, "rYMM" },
2591 { OPERAND_TYPE_ESSEG, "es" },
2592 };
2593
2594 static void
2595 pt (i386_operand_type t)
2596 {
2597 unsigned int j;
2598 i386_operand_type a;
2599
2600 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2601 {
2602 a = operand_type_and (t, type_names[j].mask);
2603 if (!operand_type_all_zero (&a))
2604 fprintf (stdout, "%s, ", type_names[j].name);
2605 }
2606 fflush (stdout);
2607 }
2608
2609 #endif /* DEBUG386 */
2610 \f
2611 static bfd_reloc_code_real_type
2612 reloc (unsigned int size,
2613 int pcrel,
2614 int sign,
2615 bfd_reloc_code_real_type other)
2616 {
2617 if (other != NO_RELOC)
2618 {
2619 reloc_howto_type *rel;
2620
2621 if (size == 8)
2622 switch (other)
2623 {
2624 case BFD_RELOC_X86_64_GOT32:
2625 return BFD_RELOC_X86_64_GOT64;
2626 break;
2627 case BFD_RELOC_X86_64_PLTOFF64:
2628 return BFD_RELOC_X86_64_PLTOFF64;
2629 break;
2630 case BFD_RELOC_X86_64_GOTPC32:
2631 other = BFD_RELOC_X86_64_GOTPC64;
2632 break;
2633 case BFD_RELOC_X86_64_GOTPCREL:
2634 other = BFD_RELOC_X86_64_GOTPCREL64;
2635 break;
2636 case BFD_RELOC_X86_64_TPOFF32:
2637 other = BFD_RELOC_X86_64_TPOFF64;
2638 break;
2639 case BFD_RELOC_X86_64_DTPOFF32:
2640 other = BFD_RELOC_X86_64_DTPOFF64;
2641 break;
2642 default:
2643 break;
2644 }
2645
2646 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2647 if (size == 4 && (flag_code != CODE_64BIT || disallow_64bit_reloc))
2648 sign = -1;
2649
2650 rel = bfd_reloc_type_lookup (stdoutput, other);
2651 if (!rel)
2652 as_bad (_("unknown relocation (%u)"), other);
2653 else if (size != bfd_get_reloc_size (rel))
2654 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2655 bfd_get_reloc_size (rel),
2656 size);
2657 else if (pcrel && !rel->pc_relative)
2658 as_bad (_("non-pc-relative relocation for pc-relative field"));
2659 else if ((rel->complain_on_overflow == complain_overflow_signed
2660 && !sign)
2661 || (rel->complain_on_overflow == complain_overflow_unsigned
2662 && sign > 0))
2663 as_bad (_("relocated field and relocation type differ in signedness"));
2664 else
2665 return other;
2666 return NO_RELOC;
2667 }
2668
2669 if (pcrel)
2670 {
2671 if (!sign)
2672 as_bad (_("there are no unsigned pc-relative relocations"));
2673 switch (size)
2674 {
2675 case 1: return BFD_RELOC_8_PCREL;
2676 case 2: return BFD_RELOC_16_PCREL;
2677 case 4: return BFD_RELOC_32_PCREL;
2678 case 8: return BFD_RELOC_64_PCREL;
2679 }
2680 as_bad (_("cannot do %u byte pc-relative relocation"), size);
2681 }
2682 else
2683 {
2684 if (sign > 0)
2685 switch (size)
2686 {
2687 case 4: return BFD_RELOC_X86_64_32S;
2688 }
2689 else
2690 switch (size)
2691 {
2692 case 1: return BFD_RELOC_8;
2693 case 2: return BFD_RELOC_16;
2694 case 4: return BFD_RELOC_32;
2695 case 8: return BFD_RELOC_64;
2696 }
2697 as_bad (_("cannot do %s %u byte relocation"),
2698 sign > 0 ? "signed" : "unsigned", size);
2699 }
2700
2701 return NO_RELOC;
2702 }
2703
2704 /* Here we decide which fixups can be adjusted to make them relative to
2705 the beginning of the section instead of the symbol. Basically we need
2706 to make sure that the dynamic relocations are done correctly, so in
2707 some cases we force the original symbol to be used. */
2708
2709 int
2710 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2711 {
2712 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2713 if (!IS_ELF)
2714 return 1;
2715
2716 /* Don't adjust pc-relative references to merge sections in 64-bit
2717 mode. */
2718 if (use_rela_relocations
2719 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
2720 && fixP->fx_pcrel)
2721 return 0;
2722
2723 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2724 and changed later by validate_fix. */
2725 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
2726 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
2727 return 0;
2728
2729 /* adjust_reloc_syms doesn't know about the GOT. */
2730 if (fixP->fx_r_type == BFD_RELOC_386_GOTOFF
2731 || fixP->fx_r_type == BFD_RELOC_386_PLT32
2732 || fixP->fx_r_type == BFD_RELOC_386_GOT32
2733 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
2734 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
2735 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
2736 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
2737 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
2738 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
2739 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
2740 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
2741 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
2742 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
2743 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
2744 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
2745 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
2746 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
2747 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
2748 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
2749 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
2750 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
2751 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
2752 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
2753 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
2754 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
2755 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
2756 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
2757 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
2758 return 0;
2759 #endif
2760 return 1;
2761 }
2762
2763 static int
2764 intel_float_operand (const char *mnemonic)
2765 {
2766 /* Note that the value returned is meaningful only for opcodes with (memory)
2767 operands, hence the code here is free to improperly handle opcodes that
2768 have no operands (for better performance and smaller code). */
2769
2770 if (mnemonic[0] != 'f')
2771 return 0; /* non-math */
2772
2773 switch (mnemonic[1])
2774 {
2775 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2776 the fs segment override prefix not currently handled because no
2777 call path can make opcodes without operands get here */
2778 case 'i':
2779 return 2 /* integer op */;
2780 case 'l':
2781 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
2782 return 3; /* fldcw/fldenv */
2783 break;
2784 case 'n':
2785 if (mnemonic[2] != 'o' /* fnop */)
2786 return 3; /* non-waiting control op */
2787 break;
2788 case 'r':
2789 if (mnemonic[2] == 's')
2790 return 3; /* frstor/frstpm */
2791 break;
2792 case 's':
2793 if (mnemonic[2] == 'a')
2794 return 3; /* fsave */
2795 if (mnemonic[2] == 't')
2796 {
2797 switch (mnemonic[3])
2798 {
2799 case 'c': /* fstcw */
2800 case 'd': /* fstdw */
2801 case 'e': /* fstenv */
2802 case 's': /* fsts[gw] */
2803 return 3;
2804 }
2805 }
2806 break;
2807 case 'x':
2808 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
2809 return 0; /* fxsave/fxrstor are not really math ops */
2810 break;
2811 }
2812
2813 return 1;
2814 }
2815
2816 /* Build the VEX prefix. */
2817
2818 static void
2819 build_vex_prefix (const insn_template *t)
2820 {
2821 unsigned int register_specifier;
2822 unsigned int implied_prefix;
2823 unsigned int vector_length;
2824
2825 /* Check register specifier. */
2826 if (i.vex.register_specifier)
2827 {
2828 register_specifier = i.vex.register_specifier->reg_num;
2829 if ((i.vex.register_specifier->reg_flags & RegRex))
2830 register_specifier += 8;
2831 register_specifier = ~register_specifier & 0xf;
2832 }
2833 else
2834 register_specifier = 0xf;
2835
2836 /* Use 2-byte VEX prefix by swappping destination and source
2837 operand. */
2838 if (!i.swap_operand
2839 && i.operands == i.reg_operands
2840 && i.tm.opcode_modifier.vexopcode == VEX0F
2841 && i.tm.opcode_modifier.s
2842 && i.rex == REX_B)
2843 {
2844 unsigned int xchg = i.operands - 1;
2845 union i386_op temp_op;
2846 i386_operand_type temp_type;
2847
2848 temp_type = i.types[xchg];
2849 i.types[xchg] = i.types[0];
2850 i.types[0] = temp_type;
2851 temp_op = i.op[xchg];
2852 i.op[xchg] = i.op[0];
2853 i.op[0] = temp_op;
2854
2855 gas_assert (i.rm.mode == 3);
2856
2857 i.rex = REX_R;
2858 xchg = i.rm.regmem;
2859 i.rm.regmem = i.rm.reg;
2860 i.rm.reg = xchg;
2861
2862 /* Use the next insn. */
2863 i.tm = t[1];
2864 }
2865
2866 if (i.tm.opcode_modifier.vex == VEXScalar)
2867 vector_length = avxscalar;
2868 else
2869 vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
2870
2871 switch ((i.tm.base_opcode >> 8) & 0xff)
2872 {
2873 case 0:
2874 implied_prefix = 0;
2875 break;
2876 case DATA_PREFIX_OPCODE:
2877 implied_prefix = 1;
2878 break;
2879 case REPE_PREFIX_OPCODE:
2880 implied_prefix = 2;
2881 break;
2882 case REPNE_PREFIX_OPCODE:
2883 implied_prefix = 3;
2884 break;
2885 default:
2886 abort ();
2887 }
2888
2889 /* Use 2-byte VEX prefix if possible. */
2890 if (i.tm.opcode_modifier.vexopcode == VEX0F
2891 && i.tm.opcode_modifier.vexw != VEXW1
2892 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
2893 {
2894 /* 2-byte VEX prefix. */
2895 unsigned int r;
2896
2897 i.vex.length = 2;
2898 i.vex.bytes[0] = 0xc5;
2899
2900 /* Check the REX.R bit. */
2901 r = (i.rex & REX_R) ? 0 : 1;
2902 i.vex.bytes[1] = (r << 7
2903 | register_specifier << 3
2904 | vector_length << 2
2905 | implied_prefix);
2906 }
2907 else
2908 {
2909 /* 3-byte VEX prefix. */
2910 unsigned int m, w;
2911
2912 i.vex.length = 3;
2913
2914 switch (i.tm.opcode_modifier.vexopcode)
2915 {
2916 case VEX0F:
2917 m = 0x1;
2918 i.vex.bytes[0] = 0xc4;
2919 break;
2920 case VEX0F38:
2921 m = 0x2;
2922 i.vex.bytes[0] = 0xc4;
2923 break;
2924 case VEX0F3A:
2925 m = 0x3;
2926 i.vex.bytes[0] = 0xc4;
2927 break;
2928 case XOP08:
2929 m = 0x8;
2930 i.vex.bytes[0] = 0x8f;
2931 break;
2932 case XOP09:
2933 m = 0x9;
2934 i.vex.bytes[0] = 0x8f;
2935 break;
2936 case XOP0A:
2937 m = 0xa;
2938 i.vex.bytes[0] = 0x8f;
2939 break;
2940 default:
2941 abort ();
2942 }
2943
2944 /* The high 3 bits of the second VEX byte are 1's compliment
2945 of RXB bits from REX. */
2946 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
2947
2948 /* Check the REX.W bit. */
2949 w = (i.rex & REX_W) ? 1 : 0;
2950 if (i.tm.opcode_modifier.vexw)
2951 {
2952 if (w)
2953 abort ();
2954
2955 if (i.tm.opcode_modifier.vexw == VEXW1)
2956 w = 1;
2957 }
2958
2959 i.vex.bytes[2] = (w << 7
2960 | register_specifier << 3
2961 | vector_length << 2
2962 | implied_prefix);
2963 }
2964 }
2965
2966 static void
2967 process_immext (void)
2968 {
2969 expressionS *exp;
2970
2971 if (i.tm.cpu_flags.bitfield.cpusse3 && i.operands > 0)
2972 {
2973 /* SSE3 Instructions have the fixed operands with an opcode
2974 suffix which is coded in the same place as an 8-bit immediate
2975 field would be. Here we check those operands and remove them
2976 afterwards. */
2977 unsigned int x;
2978
2979 for (x = 0; x < i.operands; x++)
2980 if (i.op[x].regs->reg_num != x)
2981 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
2982 register_prefix, i.op[x].regs->reg_name, x + 1,
2983 i.tm.name);
2984
2985 i.operands = 0;
2986 }
2987
2988 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
2989 which is coded in the same place as an 8-bit immediate field
2990 would be. Here we fake an 8-bit immediate operand from the
2991 opcode suffix stored in tm.extension_opcode.
2992
2993 AVX instructions also use this encoding, for some of
2994 3 argument instructions. */
2995
2996 gas_assert (i.imm_operands == 0
2997 && (i.operands <= 2
2998 || (i.tm.opcode_modifier.vex
2999 && i.operands <= 4)));
3000
3001 exp = &im_expressions[i.imm_operands++];
3002 i.op[i.operands].imms = exp;
3003 i.types[i.operands] = imm8;
3004 i.operands++;
3005 exp->X_op = O_constant;
3006 exp->X_add_number = i.tm.extension_opcode;
3007 i.tm.extension_opcode = None;
3008 }
3009
3010
3011 static int
3012 check_hle (void)
3013 {
3014 switch (i.tm.opcode_modifier.hleprefixok)
3015 {
3016 default:
3017 abort ();
3018 case HLEPrefixNone:
3019 if (i.prefix[HLE_PREFIX] == XACQUIRE_PREFIX_OPCODE)
3020 as_bad (_("invalid instruction `%s' after `xacquire'"),
3021 i.tm.name);
3022 else
3023 as_bad (_("invalid instruction `%s' after `xrelease'"),
3024 i.tm.name);
3025 return 0;
3026 case HLEPrefixLock:
3027 if (i.prefix[LOCK_PREFIX])
3028 return 1;
3029 if (i.prefix[HLE_PREFIX] == XACQUIRE_PREFIX_OPCODE)
3030 as_bad (_("missing `lock' with `xacquire'"));
3031 else
3032 as_bad (_("missing `lock' with `xrelease'"));
3033 return 0;
3034 case HLEPrefixAny:
3035 return 1;
3036 case HLEPrefixRelease:
3037 if (i.prefix[HLE_PREFIX] != XRELEASE_PREFIX_OPCODE)
3038 {
3039 as_bad (_("instruction `%s' after `xacquire' not allowed"),
3040 i.tm.name);
3041 return 0;
3042 }
3043 if (i.mem_operands == 0
3044 || !operand_type_check (i.types[i.operands - 1], anymem))
3045 {
3046 as_bad (_("memory destination needed for instruction `%s'"
3047 " after `xrelease'"), i.tm.name);
3048 return 0;
3049 }
3050 return 1;
3051 }
3052 }
3053
3054 /* This is the guts of the machine-dependent assembler. LINE points to a
3055 machine dependent instruction. This function is supposed to emit
3056 the frags/bytes it assembles to. */
3057
3058 void
3059 md_assemble (char *line)
3060 {
3061 unsigned int j;
3062 char mnemonic[MAX_MNEM_SIZE];
3063 const insn_template *t;
3064
3065 /* Initialize globals. */
3066 memset (&i, '\0', sizeof (i));
3067 for (j = 0; j < MAX_OPERANDS; j++)
3068 i.reloc[j] = NO_RELOC;
3069 memset (disp_expressions, '\0', sizeof (disp_expressions));
3070 memset (im_expressions, '\0', sizeof (im_expressions));
3071 save_stack_p = save_stack;
3072
3073 /* First parse an instruction mnemonic & call i386_operand for the operands.
3074 We assume that the scrubber has arranged it so that line[0] is the valid
3075 start of a (possibly prefixed) mnemonic. */
3076
3077 line = parse_insn (line, mnemonic);
3078 if (line == NULL)
3079 return;
3080
3081 line = parse_operands (line, mnemonic);
3082 this_operand = -1;
3083 if (line == NULL)
3084 return;
3085
3086 /* Now we've parsed the mnemonic into a set of templates, and have the
3087 operands at hand. */
3088
3089 /* All intel opcodes have reversed operands except for "bound" and
3090 "enter". We also don't reverse intersegment "jmp" and "call"
3091 instructions with 2 immediate operands so that the immediate segment
3092 precedes the offset, as it does when in AT&T mode. */
3093 if (intel_syntax
3094 && i.operands > 1
3095 && (strcmp (mnemonic, "bound") != 0)
3096 && (strcmp (mnemonic, "invlpga") != 0)
3097 && !(operand_type_check (i.types[0], imm)
3098 && operand_type_check (i.types[1], imm)))
3099 swap_operands ();
3100
3101 /* The order of the immediates should be reversed
3102 for 2 immediates extrq and insertq instructions */
3103 if (i.imm_operands == 2
3104 && (strcmp (mnemonic, "extrq") == 0
3105 || strcmp (mnemonic, "insertq") == 0))
3106 swap_2_operands (0, 1);
3107
3108 if (i.imm_operands)
3109 optimize_imm ();
3110
3111 /* Don't optimize displacement for movabs since it only takes 64bit
3112 displacement. */
3113 if (i.disp_operands
3114 && i.disp_encoding != disp_encoding_32bit
3115 && (flag_code != CODE_64BIT
3116 || strcmp (mnemonic, "movabs") != 0))
3117 optimize_disp ();
3118
3119 /* Next, we find a template that matches the given insn,
3120 making sure the overlap of the given operands types is consistent
3121 with the template operand types. */
3122
3123 if (!(t = match_template ()))
3124 return;
3125
3126 if (sse_check != sse_check_none
3127 && !i.tm.opcode_modifier.noavx
3128 && (i.tm.cpu_flags.bitfield.cpusse
3129 || i.tm.cpu_flags.bitfield.cpusse2
3130 || i.tm.cpu_flags.bitfield.cpusse3
3131 || i.tm.cpu_flags.bitfield.cpussse3
3132 || i.tm.cpu_flags.bitfield.cpusse4_1
3133 || i.tm.cpu_flags.bitfield.cpusse4_2))
3134 {
3135 (sse_check == sse_check_warning
3136 ? as_warn
3137 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
3138 }
3139
3140 /* Zap movzx and movsx suffix. The suffix has been set from
3141 "word ptr" or "byte ptr" on the source operand in Intel syntax
3142 or extracted from mnemonic in AT&T syntax. But we'll use
3143 the destination register to choose the suffix for encoding. */
3144 if ((i.tm.base_opcode & ~9) == 0x0fb6)
3145 {
3146 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3147 there is no suffix, the default will be byte extension. */
3148 if (i.reg_operands != 2
3149 && !i.suffix
3150 && intel_syntax)
3151 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
3152
3153 i.suffix = 0;
3154 }
3155
3156 if (i.tm.opcode_modifier.fwait)
3157 if (!add_prefix (FWAIT_OPCODE))
3158 return;
3159
3160 /* Check for lock without a lockable instruction. Destination operand
3161 must be memory unless it is xchg (0x86). */
3162 if (i.prefix[LOCK_PREFIX]
3163 && (!i.tm.opcode_modifier.islockable
3164 || i.mem_operands == 0
3165 || (i.tm.base_opcode != 0x86
3166 && !operand_type_check (i.types[i.operands - 1], anymem))))
3167 {
3168 as_bad (_("expecting lockable instruction after `lock'"));
3169 return;
3170 }
3171
3172 /* Check if HLE prefix is OK. */
3173 if (i.have_hle && !check_hle ())
3174 return;
3175
3176 /* Check string instruction segment overrides. */
3177 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
3178 {
3179 if (!check_string ())
3180 return;
3181 i.disp_operands = 0;
3182 }
3183
3184 if (!process_suffix ())
3185 return;
3186
3187 /* Update operand types. */
3188 for (j = 0; j < i.operands; j++)
3189 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
3190
3191 /* Make still unresolved immediate matches conform to size of immediate
3192 given in i.suffix. */
3193 if (!finalize_imm ())
3194 return;
3195
3196 if (i.types[0].bitfield.imm1)
3197 i.imm_operands = 0; /* kludge for shift insns. */
3198
3199 /* We only need to check those implicit registers for instructions
3200 with 3 operands or less. */
3201 if (i.operands <= 3)
3202 for (j = 0; j < i.operands; j++)
3203 if (i.types[j].bitfield.inoutportreg
3204 || i.types[j].bitfield.shiftcount
3205 || i.types[j].bitfield.acc
3206 || i.types[j].bitfield.floatacc)
3207 i.reg_operands--;
3208
3209 /* ImmExt should be processed after SSE2AVX. */
3210 if (!i.tm.opcode_modifier.sse2avx
3211 && i.tm.opcode_modifier.immext)
3212 process_immext ();
3213
3214 /* For insns with operands there are more diddles to do to the opcode. */
3215 if (i.operands)
3216 {
3217 if (!process_operands ())
3218 return;
3219 }
3220 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
3221 {
3222 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3223 as_warn (_("translating to `%sp'"), i.tm.name);
3224 }
3225
3226 if (i.tm.opcode_modifier.vex)
3227 build_vex_prefix (t);
3228
3229 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3230 instructions may define INT_OPCODE as well, so avoid this corner
3231 case for those instructions that use MODRM. */
3232 if (i.tm.base_opcode == INT_OPCODE
3233 && !i.tm.opcode_modifier.modrm
3234 && i.op[0].imms->X_add_number == 3)
3235 {
3236 i.tm.base_opcode = INT3_OPCODE;
3237 i.imm_operands = 0;
3238 }
3239
3240 if ((i.tm.opcode_modifier.jump
3241 || i.tm.opcode_modifier.jumpbyte
3242 || i.tm.opcode_modifier.jumpdword)
3243 && i.op[0].disps->X_op == O_constant)
3244 {
3245 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3246 the absolute address given by the constant. Since ix86 jumps and
3247 calls are pc relative, we need to generate a reloc. */
3248 i.op[0].disps->X_add_symbol = &abs_symbol;
3249 i.op[0].disps->X_op = O_symbol;
3250 }
3251
3252 if (i.tm.opcode_modifier.rex64)
3253 i.rex |= REX_W;
3254
3255 /* For 8 bit registers we need an empty rex prefix. Also if the
3256 instruction already has a prefix, we need to convert old
3257 registers to new ones. */
3258
3259 if ((i.types[0].bitfield.reg8
3260 && (i.op[0].regs->reg_flags & RegRex64) != 0)
3261 || (i.types[1].bitfield.reg8
3262 && (i.op[1].regs->reg_flags & RegRex64) != 0)
3263 || ((i.types[0].bitfield.reg8
3264 || i.types[1].bitfield.reg8)
3265 && i.rex != 0))
3266 {
3267 int x;
3268
3269 i.rex |= REX_OPCODE;
3270 for (x = 0; x < 2; x++)
3271 {
3272 /* Look for 8 bit operand that uses old registers. */
3273 if (i.types[x].bitfield.reg8
3274 && (i.op[x].regs->reg_flags & RegRex64) == 0)
3275 {
3276 /* In case it is "hi" register, give up. */
3277 if (i.op[x].regs->reg_num > 3)
3278 as_bad (_("can't encode register '%s%s' in an "
3279 "instruction requiring REX prefix."),
3280 register_prefix, i.op[x].regs->reg_name);
3281
3282 /* Otherwise it is equivalent to the extended register.
3283 Since the encoding doesn't change this is merely
3284 cosmetic cleanup for debug output. */
3285
3286 i.op[x].regs = i.op[x].regs + 8;
3287 }
3288 }
3289 }
3290
3291 if (i.rex != 0)
3292 add_prefix (REX_OPCODE | i.rex);
3293
3294 /* We are ready to output the insn. */
3295 output_insn ();
3296 }
3297
3298 static char *
3299 parse_insn (char *line, char *mnemonic)
3300 {
3301 char *l = line;
3302 char *token_start = l;
3303 char *mnem_p;
3304 int supported;
3305 const insn_template *t;
3306 char *dot_p = NULL;
3307
3308 /* Non-zero if we found a prefix only acceptable with string insns. */
3309 const char *expecting_string_instruction = NULL;
3310
3311 while (1)
3312 {
3313 mnem_p = mnemonic;
3314 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3315 {
3316 if (*mnem_p == '.')
3317 dot_p = mnem_p;
3318 mnem_p++;
3319 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3320 {
3321 as_bad (_("no such instruction: `%s'"), token_start);
3322 return NULL;
3323 }
3324 l++;
3325 }
3326 if (!is_space_char (*l)
3327 && *l != END_OF_INSN
3328 && (intel_syntax
3329 || (*l != PREFIX_SEPARATOR
3330 && *l != ',')))
3331 {
3332 as_bad (_("invalid character %s in mnemonic"),
3333 output_invalid (*l));
3334 return NULL;
3335 }
3336 if (token_start == l)
3337 {
3338 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3339 as_bad (_("expecting prefix; got nothing"));
3340 else
3341 as_bad (_("expecting mnemonic; got nothing"));
3342 return NULL;
3343 }
3344
3345 /* Look up instruction (or prefix) via hash table. */
3346 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3347
3348 if (*l != END_OF_INSN
3349 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3350 && current_templates
3351 && current_templates->start->opcode_modifier.isprefix)
3352 {
3353 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3354 {
3355 as_bad ((flag_code != CODE_64BIT
3356 ? _("`%s' is only supported in 64-bit mode")
3357 : _("`%s' is not supported in 64-bit mode")),
3358 current_templates->start->name);
3359 return NULL;
3360 }
3361 /* If we are in 16-bit mode, do not allow addr16 or data16.
3362 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3363 if ((current_templates->start->opcode_modifier.size16
3364 || current_templates->start->opcode_modifier.size32)
3365 && flag_code != CODE_64BIT
3366 && (current_templates->start->opcode_modifier.size32
3367 ^ (flag_code == CODE_16BIT)))
3368 {
3369 as_bad (_("redundant %s prefix"),
3370 current_templates->start->name);
3371 return NULL;
3372 }
3373 /* Add prefix, checking for repeated prefixes. */
3374 switch (add_prefix (current_templates->start->base_opcode))
3375 {
3376 case PREFIX_EXIST:
3377 return NULL;
3378 case PREFIX_REP:
3379 if (current_templates->start->cpu_flags.bitfield.cpuhle)
3380 i.have_hle = 1;
3381 else
3382 expecting_string_instruction = current_templates->start->name;
3383 break;
3384 default:
3385 break;
3386 }
3387 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3388 token_start = ++l;
3389 }
3390 else
3391 break;
3392 }
3393
3394 if (!current_templates)
3395 {
3396 /* Check if we should swap operand or force 32bit displacement in
3397 encoding. */
3398 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3399 i.swap_operand = 1;
3400 else if (mnem_p - 3 == dot_p
3401 && dot_p[1] == 'd'
3402 && dot_p[2] == '8')
3403 i.disp_encoding = disp_encoding_8bit;
3404 else if (mnem_p - 4 == dot_p
3405 && dot_p[1] == 'd'
3406 && dot_p[2] == '3'
3407 && dot_p[3] == '2')
3408 i.disp_encoding = disp_encoding_32bit;
3409 else
3410 goto check_suffix;
3411 mnem_p = dot_p;
3412 *dot_p = '\0';
3413 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3414 }
3415
3416 if (!current_templates)
3417 {
3418 check_suffix:
3419 /* See if we can get a match by trimming off a suffix. */
3420 switch (mnem_p[-1])
3421 {
3422 case WORD_MNEM_SUFFIX:
3423 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3424 i.suffix = SHORT_MNEM_SUFFIX;
3425 else
3426 case BYTE_MNEM_SUFFIX:
3427 case QWORD_MNEM_SUFFIX:
3428 i.suffix = mnem_p[-1];
3429 mnem_p[-1] = '\0';
3430 current_templates = (const templates *) hash_find (op_hash,
3431 mnemonic);
3432 break;
3433 case SHORT_MNEM_SUFFIX:
3434 case LONG_MNEM_SUFFIX:
3435 if (!intel_syntax)
3436 {
3437 i.suffix = mnem_p[-1];
3438 mnem_p[-1] = '\0';
3439 current_templates = (const templates *) hash_find (op_hash,
3440 mnemonic);
3441 }
3442 break;
3443
3444 /* Intel Syntax. */
3445 case 'd':
3446 if (intel_syntax)
3447 {
3448 if (intel_float_operand (mnemonic) == 1)
3449 i.suffix = SHORT_MNEM_SUFFIX;
3450 else
3451 i.suffix = LONG_MNEM_SUFFIX;
3452 mnem_p[-1] = '\0';
3453 current_templates = (const templates *) hash_find (op_hash,
3454 mnemonic);
3455 }
3456 break;
3457 }
3458 if (!current_templates)
3459 {
3460 as_bad (_("no such instruction: `%s'"), token_start);
3461 return NULL;
3462 }
3463 }
3464
3465 if (current_templates->start->opcode_modifier.jump
3466 || current_templates->start->opcode_modifier.jumpbyte)
3467 {
3468 /* Check for a branch hint. We allow ",pt" and ",pn" for
3469 predict taken and predict not taken respectively.
3470 I'm not sure that branch hints actually do anything on loop
3471 and jcxz insns (JumpByte) for current Pentium4 chips. They
3472 may work in the future and it doesn't hurt to accept them
3473 now. */
3474 if (l[0] == ',' && l[1] == 'p')
3475 {
3476 if (l[2] == 't')
3477 {
3478 if (!add_prefix (DS_PREFIX_OPCODE))
3479 return NULL;
3480 l += 3;
3481 }
3482 else if (l[2] == 'n')
3483 {
3484 if (!add_prefix (CS_PREFIX_OPCODE))
3485 return NULL;
3486 l += 3;
3487 }
3488 }
3489 }
3490 /* Any other comma loses. */
3491 if (*l == ',')
3492 {
3493 as_bad (_("invalid character %s in mnemonic"),
3494 output_invalid (*l));
3495 return NULL;
3496 }
3497
3498 /* Check if instruction is supported on specified architecture. */
3499 supported = 0;
3500 for (t = current_templates->start; t < current_templates->end; ++t)
3501 {
3502 supported |= cpu_flags_match (t);
3503 if (supported == CPU_FLAGS_PERFECT_MATCH)
3504 goto skip;
3505 }
3506
3507 if (!(supported & CPU_FLAGS_64BIT_MATCH))
3508 {
3509 as_bad (flag_code == CODE_64BIT
3510 ? _("`%s' is not supported in 64-bit mode")
3511 : _("`%s' is only supported in 64-bit mode"),
3512 current_templates->start->name);
3513 return NULL;
3514 }
3515 if (supported != CPU_FLAGS_PERFECT_MATCH)
3516 {
3517 as_bad (_("`%s' is not supported on `%s%s'"),
3518 current_templates->start->name,
3519 cpu_arch_name ? cpu_arch_name : default_arch,
3520 cpu_sub_arch_name ? cpu_sub_arch_name : "");
3521 return NULL;
3522 }
3523
3524 skip:
3525 if (!cpu_arch_flags.bitfield.cpui386
3526 && (flag_code != CODE_16BIT))
3527 {
3528 as_warn (_("use .code16 to ensure correct addressing mode"));
3529 }
3530
3531 /* Check for rep/repne without a string instruction. */
3532 if (expecting_string_instruction)
3533 {
3534 static templates override;
3535
3536 for (t = current_templates->start; t < current_templates->end; ++t)
3537 if (t->opcode_modifier.isstring)
3538 break;
3539 if (t >= current_templates->end)
3540 {
3541 as_bad (_("expecting string instruction after `%s'"),
3542 expecting_string_instruction);
3543 return NULL;
3544 }
3545 for (override.start = t; t < current_templates->end; ++t)
3546 if (!t->opcode_modifier.isstring)
3547 break;
3548 override.end = t;
3549 current_templates = &override;
3550 }
3551
3552 return l;
3553 }
3554
3555 static char *
3556 parse_operands (char *l, const char *mnemonic)
3557 {
3558 char *token_start;
3559
3560 /* 1 if operand is pending after ','. */
3561 unsigned int expecting_operand = 0;
3562
3563 /* Non-zero if operand parens not balanced. */
3564 unsigned int paren_not_balanced;
3565
3566 while (*l != END_OF_INSN)
3567 {
3568 /* Skip optional white space before operand. */
3569 if (is_space_char (*l))
3570 ++l;
3571 if (!is_operand_char (*l) && *l != END_OF_INSN)
3572 {
3573 as_bad (_("invalid character %s before operand %d"),
3574 output_invalid (*l),
3575 i.operands + 1);
3576 return NULL;
3577 }
3578 token_start = l; /* after white space */
3579 paren_not_balanced = 0;
3580 while (paren_not_balanced || *l != ',')
3581 {
3582 if (*l == END_OF_INSN)
3583 {
3584 if (paren_not_balanced)
3585 {
3586 if (!intel_syntax)
3587 as_bad (_("unbalanced parenthesis in operand %d."),
3588 i.operands + 1);
3589 else
3590 as_bad (_("unbalanced brackets in operand %d."),
3591 i.operands + 1);
3592 return NULL;
3593 }
3594 else
3595 break; /* we are done */
3596 }
3597 else if (!is_operand_char (*l) && !is_space_char (*l))
3598 {
3599 as_bad (_("invalid character %s in operand %d"),
3600 output_invalid (*l),
3601 i.operands + 1);
3602 return NULL;
3603 }
3604 if (!intel_syntax)
3605 {
3606 if (*l == '(')
3607 ++paren_not_balanced;
3608 if (*l == ')')
3609 --paren_not_balanced;
3610 }
3611 else
3612 {
3613 if (*l == '[')
3614 ++paren_not_balanced;
3615 if (*l == ']')
3616 --paren_not_balanced;
3617 }
3618 l++;
3619 }
3620 if (l != token_start)
3621 { /* Yes, we've read in another operand. */
3622 unsigned int operand_ok;
3623 this_operand = i.operands++;
3624 i.types[this_operand].bitfield.unspecified = 1;
3625 if (i.operands > MAX_OPERANDS)
3626 {
3627 as_bad (_("spurious operands; (%d operands/instruction max)"),
3628 MAX_OPERANDS);
3629 return NULL;
3630 }
3631 /* Now parse operand adding info to 'i' as we go along. */
3632 END_STRING_AND_SAVE (l);
3633
3634 if (intel_syntax)
3635 operand_ok =
3636 i386_intel_operand (token_start,
3637 intel_float_operand (mnemonic));
3638 else
3639 operand_ok = i386_att_operand (token_start);
3640
3641 RESTORE_END_STRING (l);
3642 if (!operand_ok)
3643 return NULL;
3644 }
3645 else
3646 {
3647 if (expecting_operand)
3648 {
3649 expecting_operand_after_comma:
3650 as_bad (_("expecting operand after ','; got nothing"));
3651 return NULL;
3652 }
3653 if (*l == ',')
3654 {
3655 as_bad (_("expecting operand before ','; got nothing"));
3656 return NULL;
3657 }
3658 }
3659
3660 /* Now *l must be either ',' or END_OF_INSN. */
3661 if (*l == ',')
3662 {
3663 if (*++l == END_OF_INSN)
3664 {
3665 /* Just skip it, if it's \n complain. */
3666 goto expecting_operand_after_comma;
3667 }
3668 expecting_operand = 1;
3669 }
3670 }
3671 return l;
3672 }
3673
3674 static void
3675 swap_2_operands (int xchg1, int xchg2)
3676 {
3677 union i386_op temp_op;
3678 i386_operand_type temp_type;
3679 enum bfd_reloc_code_real temp_reloc;
3680
3681 temp_type = i.types[xchg2];
3682 i.types[xchg2] = i.types[xchg1];
3683 i.types[xchg1] = temp_type;
3684 temp_op = i.op[xchg2];
3685 i.op[xchg2] = i.op[xchg1];
3686 i.op[xchg1] = temp_op;
3687 temp_reloc = i.reloc[xchg2];
3688 i.reloc[xchg2] = i.reloc[xchg1];
3689 i.reloc[xchg1] = temp_reloc;
3690 }
3691
3692 static void
3693 swap_operands (void)
3694 {
3695 switch (i.operands)
3696 {
3697 case 5:
3698 case 4:
3699 swap_2_operands (1, i.operands - 2);
3700 case 3:
3701 case 2:
3702 swap_2_operands (0, i.operands - 1);
3703 break;
3704 default:
3705 abort ();
3706 }
3707
3708 if (i.mem_operands == 2)
3709 {
3710 const seg_entry *temp_seg;
3711 temp_seg = i.seg[0];
3712 i.seg[0] = i.seg[1];
3713 i.seg[1] = temp_seg;
3714 }
3715 }
3716
3717 /* Try to ensure constant immediates are represented in the smallest
3718 opcode possible. */
3719 static void
3720 optimize_imm (void)
3721 {
3722 char guess_suffix = 0;
3723 int op;
3724
3725 if (i.suffix)
3726 guess_suffix = i.suffix;
3727 else if (i.reg_operands)
3728 {
3729 /* Figure out a suffix from the last register operand specified.
3730 We can't do this properly yet, ie. excluding InOutPortReg,
3731 but the following works for instructions with immediates.
3732 In any case, we can't set i.suffix yet. */
3733 for (op = i.operands; --op >= 0;)
3734 if (i.types[op].bitfield.reg8)
3735 {
3736 guess_suffix = BYTE_MNEM_SUFFIX;
3737 break;
3738 }
3739 else if (i.types[op].bitfield.reg16)
3740 {
3741 guess_suffix = WORD_MNEM_SUFFIX;
3742 break;
3743 }
3744 else if (i.types[op].bitfield.reg32)
3745 {
3746 guess_suffix = LONG_MNEM_SUFFIX;
3747 break;
3748 }
3749 else if (i.types[op].bitfield.reg64)
3750 {
3751 guess_suffix = QWORD_MNEM_SUFFIX;
3752 break;
3753 }
3754 }
3755 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
3756 guess_suffix = WORD_MNEM_SUFFIX;
3757
3758 for (op = i.operands; --op >= 0;)
3759 if (operand_type_check (i.types[op], imm))
3760 {
3761 switch (i.op[op].imms->X_op)
3762 {
3763 case O_constant:
3764 /* If a suffix is given, this operand may be shortened. */
3765 switch (guess_suffix)
3766 {
3767 case LONG_MNEM_SUFFIX:
3768 i.types[op].bitfield.imm32 = 1;
3769 i.types[op].bitfield.imm64 = 1;
3770 break;
3771 case WORD_MNEM_SUFFIX:
3772 i.types[op].bitfield.imm16 = 1;
3773 i.types[op].bitfield.imm32 = 1;
3774 i.types[op].bitfield.imm32s = 1;
3775 i.types[op].bitfield.imm64 = 1;
3776 break;
3777 case BYTE_MNEM_SUFFIX:
3778 i.types[op].bitfield.imm8 = 1;
3779 i.types[op].bitfield.imm8s = 1;
3780 i.types[op].bitfield.imm16 = 1;
3781 i.types[op].bitfield.imm32 = 1;
3782 i.types[op].bitfield.imm32s = 1;
3783 i.types[op].bitfield.imm64 = 1;
3784 break;
3785 }
3786
3787 /* If this operand is at most 16 bits, convert it
3788 to a signed 16 bit number before trying to see
3789 whether it will fit in an even smaller size.
3790 This allows a 16-bit operand such as $0xffe0 to
3791 be recognised as within Imm8S range. */
3792 if ((i.types[op].bitfield.imm16)
3793 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
3794 {
3795 i.op[op].imms->X_add_number =
3796 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
3797 }
3798 if ((i.types[op].bitfield.imm32)
3799 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
3800 == 0))
3801 {
3802 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
3803 ^ ((offsetT) 1 << 31))
3804 - ((offsetT) 1 << 31));
3805 }
3806 i.types[op]
3807 = operand_type_or (i.types[op],
3808 smallest_imm_type (i.op[op].imms->X_add_number));
3809
3810 /* We must avoid matching of Imm32 templates when 64bit
3811 only immediate is available. */
3812 if (guess_suffix == QWORD_MNEM_SUFFIX)
3813 i.types[op].bitfield.imm32 = 0;
3814 break;
3815
3816 case O_absent:
3817 case O_register:
3818 abort ();
3819
3820 /* Symbols and expressions. */
3821 default:
3822 /* Convert symbolic operand to proper sizes for matching, but don't
3823 prevent matching a set of insns that only supports sizes other
3824 than those matching the insn suffix. */
3825 {
3826 i386_operand_type mask, allowed;
3827 const insn_template *t;
3828
3829 operand_type_set (&mask, 0);
3830 operand_type_set (&allowed, 0);
3831
3832 for (t = current_templates->start;
3833 t < current_templates->end;
3834 ++t)
3835 allowed = operand_type_or (allowed,
3836 t->operand_types[op]);
3837 switch (guess_suffix)
3838 {
3839 case QWORD_MNEM_SUFFIX:
3840 mask.bitfield.imm64 = 1;
3841 mask.bitfield.imm32s = 1;
3842 break;
3843 case LONG_MNEM_SUFFIX:
3844 mask.bitfield.imm32 = 1;
3845 break;
3846 case WORD_MNEM_SUFFIX:
3847 mask.bitfield.imm16 = 1;
3848 break;
3849 case BYTE_MNEM_SUFFIX:
3850 mask.bitfield.imm8 = 1;
3851 break;
3852 default:
3853 break;
3854 }
3855 allowed = operand_type_and (mask, allowed);
3856 if (!operand_type_all_zero (&allowed))
3857 i.types[op] = operand_type_and (i.types[op], mask);
3858 }
3859 break;
3860 }
3861 }
3862 }
3863
3864 /* Try to use the smallest displacement type too. */
3865 static void
3866 optimize_disp (void)
3867 {
3868 int op;
3869
3870 for (op = i.operands; --op >= 0;)
3871 if (operand_type_check (i.types[op], disp))
3872 {
3873 if (i.op[op].disps->X_op == O_constant)
3874 {
3875 offsetT op_disp = i.op[op].disps->X_add_number;
3876
3877 if (i.types[op].bitfield.disp16
3878 && (op_disp & ~(offsetT) 0xffff) == 0)
3879 {
3880 /* If this operand is at most 16 bits, convert
3881 to a signed 16 bit number and don't use 64bit
3882 displacement. */
3883 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
3884 i.types[op].bitfield.disp64 = 0;
3885 }
3886 if (i.types[op].bitfield.disp32
3887 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
3888 {
3889 /* If this operand is at most 32 bits, convert
3890 to a signed 32 bit number and don't use 64bit
3891 displacement. */
3892 op_disp &= (((offsetT) 2 << 31) - 1);
3893 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
3894 i.types[op].bitfield.disp64 = 0;
3895 }
3896 if (!op_disp && i.types[op].bitfield.baseindex)
3897 {
3898 i.types[op].bitfield.disp8 = 0;
3899 i.types[op].bitfield.disp16 = 0;
3900 i.types[op].bitfield.disp32 = 0;
3901 i.types[op].bitfield.disp32s = 0;
3902 i.types[op].bitfield.disp64 = 0;
3903 i.op[op].disps = 0;
3904 i.disp_operands--;
3905 }
3906 else if (flag_code == CODE_64BIT)
3907 {
3908 if (fits_in_signed_long (op_disp))
3909 {
3910 i.types[op].bitfield.disp64 = 0;
3911 i.types[op].bitfield.disp32s = 1;
3912 }
3913 if (i.prefix[ADDR_PREFIX]
3914 && fits_in_unsigned_long (op_disp))
3915 i.types[op].bitfield.disp32 = 1;
3916 }
3917 if ((i.types[op].bitfield.disp32
3918 || i.types[op].bitfield.disp32s
3919 || i.types[op].bitfield.disp16)
3920 && fits_in_signed_byte (op_disp))
3921 i.types[op].bitfield.disp8 = 1;
3922 }
3923 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
3924 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
3925 {
3926 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
3927 i.op[op].disps, 0, i.reloc[op]);
3928 i.types[op].bitfield.disp8 = 0;
3929 i.types[op].bitfield.disp16 = 0;
3930 i.types[op].bitfield.disp32 = 0;
3931 i.types[op].bitfield.disp32s = 0;
3932 i.types[op].bitfield.disp64 = 0;
3933 }
3934 else
3935 /* We only support 64bit displacement on constants. */
3936 i.types[op].bitfield.disp64 = 0;
3937 }
3938 }
3939
3940 /* Check if operands are valid for the instruction. */
3941
3942 static int
3943 check_VecOperands (const insn_template *t)
3944 {
3945 /* Without VSIB byte, we can't have a vector register for index. */
3946 if (!t->opcode_modifier.vecsib
3947 && i.index_reg
3948 && (i.index_reg->reg_type.bitfield.regxmm
3949 || i.index_reg->reg_type.bitfield.regymm))
3950 {
3951 i.error = unsupported_vector_index_register;
3952 return 1;
3953 }
3954
3955 /* For VSIB byte, we need a vector register for index and no PC
3956 relative addressing is allowed. */
3957 if (t->opcode_modifier.vecsib
3958 && (!i.index_reg
3959 || !((t->opcode_modifier.vecsib == VecSIB128
3960 && i.index_reg->reg_type.bitfield.regxmm)
3961 || (t->opcode_modifier.vecsib == VecSIB256
3962 && i.index_reg->reg_type.bitfield.regymm))
3963 || (i.base_reg && i.base_reg->reg_num == RegRip)))
3964 {
3965 i.error = invalid_vsib_address;
3966 return 1;
3967 }
3968
3969 return 0;
3970 }
3971
3972 /* Check if operands are valid for the instruction. Update VEX
3973 operand types. */
3974
3975 static int
3976 VEX_check_operands (const insn_template *t)
3977 {
3978 if (!t->opcode_modifier.vex)
3979 return 0;
3980
3981 /* Only check VEX_Imm4, which must be the first operand. */
3982 if (t->operand_types[0].bitfield.vec_imm4)
3983 {
3984 if (i.op[0].imms->X_op != O_constant
3985 || !fits_in_imm4 (i.op[0].imms->X_add_number))
3986 {
3987 i.error = bad_imm4;
3988 return 1;
3989 }
3990
3991 /* Turn off Imm8 so that update_imm won't complain. */
3992 i.types[0] = vec_imm4;
3993 }
3994
3995 return 0;
3996 }
3997
3998 static const insn_template *
3999 match_template (void)
4000 {
4001 /* Points to template once we've found it. */
4002 const insn_template *t;
4003 i386_operand_type overlap0, overlap1, overlap2, overlap3;
4004 i386_operand_type overlap4;
4005 unsigned int found_reverse_match;
4006 i386_opcode_modifier suffix_check;
4007 i386_operand_type operand_types [MAX_OPERANDS];
4008 int addr_prefix_disp;
4009 unsigned int j;
4010 unsigned int found_cpu_match;
4011 unsigned int check_register;
4012
4013 #if MAX_OPERANDS != 5
4014 # error "MAX_OPERANDS must be 5."
4015 #endif
4016
4017 found_reverse_match = 0;
4018 addr_prefix_disp = -1;
4019
4020 memset (&suffix_check, 0, sizeof (suffix_check));
4021 if (i.suffix == BYTE_MNEM_SUFFIX)
4022 suffix_check.no_bsuf = 1;
4023 else if (i.suffix == WORD_MNEM_SUFFIX)
4024 suffix_check.no_wsuf = 1;
4025 else if (i.suffix == SHORT_MNEM_SUFFIX)
4026 suffix_check.no_ssuf = 1;
4027 else if (i.suffix == LONG_MNEM_SUFFIX)
4028 suffix_check.no_lsuf = 1;
4029 else if (i.suffix == QWORD_MNEM_SUFFIX)
4030 suffix_check.no_qsuf = 1;
4031 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
4032 suffix_check.no_ldsuf = 1;
4033
4034 /* Must have right number of operands. */
4035 i.error = number_of_operands_mismatch;
4036
4037 for (t = current_templates->start; t < current_templates->end; t++)
4038 {
4039 addr_prefix_disp = -1;
4040
4041 if (i.operands != t->operands)
4042 continue;
4043
4044 /* Check processor support. */
4045 i.error = unsupported;
4046 found_cpu_match = (cpu_flags_match (t)
4047 == CPU_FLAGS_PERFECT_MATCH);
4048 if (!found_cpu_match)
4049 continue;
4050
4051 /* Check old gcc support. */
4052 i.error = old_gcc_only;
4053 if (!old_gcc && t->opcode_modifier.oldgcc)
4054 continue;
4055
4056 /* Check AT&T mnemonic. */
4057 i.error = unsupported_with_intel_mnemonic;
4058 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
4059 continue;
4060
4061 /* Check AT&T/Intel syntax. */
4062 i.error = unsupported_syntax;
4063 if ((intel_syntax && t->opcode_modifier.attsyntax)
4064 || (!intel_syntax && t->opcode_modifier.intelsyntax))
4065 continue;
4066
4067 /* Check the suffix, except for some instructions in intel mode. */
4068 i.error = invalid_instruction_suffix;
4069 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
4070 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
4071 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
4072 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
4073 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
4074 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
4075 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
4076 continue;
4077
4078 if (!operand_size_match (t))
4079 continue;
4080
4081 for (j = 0; j < MAX_OPERANDS; j++)
4082 operand_types[j] = t->operand_types[j];
4083
4084 /* In general, don't allow 64-bit operands in 32-bit mode. */
4085 if (i.suffix == QWORD_MNEM_SUFFIX
4086 && flag_code != CODE_64BIT
4087 && (intel_syntax
4088 ? (!t->opcode_modifier.ignoresize
4089 && !intel_float_operand (t->name))
4090 : intel_float_operand (t->name) != 2)
4091 && ((!operand_types[0].bitfield.regmmx
4092 && !operand_types[0].bitfield.regxmm
4093 && !operand_types[0].bitfield.regymm)
4094 || (!operand_types[t->operands > 1].bitfield.regmmx
4095 && !!operand_types[t->operands > 1].bitfield.regxmm
4096 && !!operand_types[t->operands > 1].bitfield.regymm))
4097 && (t->base_opcode != 0x0fc7
4098 || t->extension_opcode != 1 /* cmpxchg8b */))
4099 continue;
4100
4101 /* In general, don't allow 32-bit operands on pre-386. */
4102 else if (i.suffix == LONG_MNEM_SUFFIX
4103 && !cpu_arch_flags.bitfield.cpui386
4104 && (intel_syntax
4105 ? (!t->opcode_modifier.ignoresize
4106 && !intel_float_operand (t->name))
4107 : intel_float_operand (t->name) != 2)
4108 && ((!operand_types[0].bitfield.regmmx
4109 && !operand_types[0].bitfield.regxmm)
4110 || (!operand_types[t->operands > 1].bitfield.regmmx
4111 && !!operand_types[t->operands > 1].bitfield.regxmm)))
4112 continue;
4113
4114 /* Do not verify operands when there are none. */
4115 else
4116 {
4117 if (!t->operands)
4118 /* We've found a match; break out of loop. */
4119 break;
4120 }
4121
4122 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
4123 into Disp32/Disp16/Disp32 operand. */
4124 if (i.prefix[ADDR_PREFIX] != 0)
4125 {
4126 /* There should be only one Disp operand. */
4127 switch (flag_code)
4128 {
4129 case CODE_16BIT:
4130 for (j = 0; j < MAX_OPERANDS; j++)
4131 {
4132 if (operand_types[j].bitfield.disp16)
4133 {
4134 addr_prefix_disp = j;
4135 operand_types[j].bitfield.disp32 = 1;
4136 operand_types[j].bitfield.disp16 = 0;
4137 break;
4138 }
4139 }
4140 break;
4141 case CODE_32BIT:
4142 for (j = 0; j < MAX_OPERANDS; j++)
4143 {
4144 if (operand_types[j].bitfield.disp32)
4145 {
4146 addr_prefix_disp = j;
4147 operand_types[j].bitfield.disp32 = 0;
4148 operand_types[j].bitfield.disp16 = 1;
4149 break;
4150 }
4151 }
4152 break;
4153 case CODE_64BIT:
4154 for (j = 0; j < MAX_OPERANDS; j++)
4155 {
4156 if (operand_types[j].bitfield.disp64)
4157 {
4158 addr_prefix_disp = j;
4159 operand_types[j].bitfield.disp64 = 0;
4160 operand_types[j].bitfield.disp32 = 1;
4161 break;
4162 }
4163 }
4164 break;
4165 }
4166 }
4167
4168 /* We check register size if needed. */
4169 check_register = t->opcode_modifier.checkregsize;
4170 overlap0 = operand_type_and (i.types[0], operand_types[0]);
4171 switch (t->operands)
4172 {
4173 case 1:
4174 if (!operand_type_match (overlap0, i.types[0]))
4175 continue;
4176 break;
4177 case 2:
4178 /* xchg %eax, %eax is a special case. It is an aliase for nop
4179 only in 32bit mode and we can use opcode 0x90. In 64bit
4180 mode, we can't use 0x90 for xchg %eax, %eax since it should
4181 zero-extend %eax to %rax. */
4182 if (flag_code == CODE_64BIT
4183 && t->base_opcode == 0x90
4184 && operand_type_equal (&i.types [0], &acc32)
4185 && operand_type_equal (&i.types [1], &acc32))
4186 continue;
4187 if (i.swap_operand)
4188 {
4189 /* If we swap operand in encoding, we either match
4190 the next one or reverse direction of operands. */
4191 if (t->opcode_modifier.s)
4192 continue;
4193 else if (t->opcode_modifier.d)
4194 goto check_reverse;
4195 }
4196
4197 case 3:
4198 /* If we swap operand in encoding, we match the next one. */
4199 if (i.swap_operand && t->opcode_modifier.s)
4200 continue;
4201 case 4:
4202 case 5:
4203 overlap1 = operand_type_and (i.types[1], operand_types[1]);
4204 if (!operand_type_match (overlap0, i.types[0])
4205 || !operand_type_match (overlap1, i.types[1])
4206 || (check_register
4207 && !operand_type_register_match (overlap0, i.types[0],
4208 operand_types[0],
4209 overlap1, i.types[1],
4210 operand_types[1])))
4211 {
4212 /* Check if other direction is valid ... */
4213 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
4214 continue;
4215
4216 check_reverse:
4217 /* Try reversing direction of operands. */
4218 overlap0 = operand_type_and (i.types[0], operand_types[1]);
4219 overlap1 = operand_type_and (i.types[1], operand_types[0]);
4220 if (!operand_type_match (overlap0, i.types[0])
4221 || !operand_type_match (overlap1, i.types[1])
4222 || (check_register
4223 && !operand_type_register_match (overlap0,
4224 i.types[0],
4225 operand_types[1],
4226 overlap1,
4227 i.types[1],
4228 operand_types[0])))
4229 {
4230 /* Does not match either direction. */
4231 continue;
4232 }
4233 /* found_reverse_match holds which of D or FloatDR
4234 we've found. */
4235 if (t->opcode_modifier.d)
4236 found_reverse_match = Opcode_D;
4237 else if (t->opcode_modifier.floatd)
4238 found_reverse_match = Opcode_FloatD;
4239 else
4240 found_reverse_match = 0;
4241 if (t->opcode_modifier.floatr)
4242 found_reverse_match |= Opcode_FloatR;
4243 }
4244 else
4245 {
4246 /* Found a forward 2 operand match here. */
4247 switch (t->operands)
4248 {
4249 case 5:
4250 overlap4 = operand_type_and (i.types[4],
4251 operand_types[4]);
4252 case 4:
4253 overlap3 = operand_type_and (i.types[3],
4254 operand_types[3]);
4255 case 3:
4256 overlap2 = operand_type_and (i.types[2],
4257 operand_types[2]);
4258 break;
4259 }
4260
4261 switch (t->operands)
4262 {
4263 case 5:
4264 if (!operand_type_match (overlap4, i.types[4])
4265 || !operand_type_register_match (overlap3,
4266 i.types[3],
4267 operand_types[3],
4268 overlap4,
4269 i.types[4],
4270 operand_types[4]))
4271 continue;
4272 case 4:
4273 if (!operand_type_match (overlap3, i.types[3])
4274 || (check_register
4275 && !operand_type_register_match (overlap2,
4276 i.types[2],
4277 operand_types[2],
4278 overlap3,
4279 i.types[3],
4280 operand_types[3])))
4281 continue;
4282 case 3:
4283 /* Here we make use of the fact that there are no
4284 reverse match 3 operand instructions, and all 3
4285 operand instructions only need to be checked for
4286 register consistency between operands 2 and 3. */
4287 if (!operand_type_match (overlap2, i.types[2])
4288 || (check_register
4289 && !operand_type_register_match (overlap1,
4290 i.types[1],
4291 operand_types[1],
4292 overlap2,
4293 i.types[2],
4294 operand_types[2])))
4295 continue;
4296 break;
4297 }
4298 }
4299 /* Found either forward/reverse 2, 3 or 4 operand match here:
4300 slip through to break. */
4301 }
4302 if (!found_cpu_match)
4303 {
4304 found_reverse_match = 0;
4305 continue;
4306 }
4307
4308 /* Check if vector operands are valid. */
4309 if (check_VecOperands (t))
4310 continue;
4311
4312 /* Check if VEX operands are valid. */
4313 if (VEX_check_operands (t))
4314 continue;
4315
4316 /* We've found a match; break out of loop. */
4317 break;
4318 }
4319
4320 if (t == current_templates->end)
4321 {
4322 /* We found no match. */
4323 const char *err_msg;
4324 switch (i.error)
4325 {
4326 default:
4327 abort ();
4328 case operand_size_mismatch:
4329 err_msg = _("operand size mismatch");
4330 break;
4331 case operand_type_mismatch:
4332 err_msg = _("operand type mismatch");
4333 break;
4334 case register_type_mismatch:
4335 err_msg = _("register type mismatch");
4336 break;
4337 case number_of_operands_mismatch:
4338 err_msg = _("number of operands mismatch");
4339 break;
4340 case invalid_instruction_suffix:
4341 err_msg = _("invalid instruction suffix");
4342 break;
4343 case bad_imm4:
4344 err_msg = _("Imm4 isn't the first operand");
4345 break;
4346 case old_gcc_only:
4347 err_msg = _("only supported with old gcc");
4348 break;
4349 case unsupported_with_intel_mnemonic:
4350 err_msg = _("unsupported with Intel mnemonic");
4351 break;
4352 case unsupported_syntax:
4353 err_msg = _("unsupported syntax");
4354 break;
4355 case unsupported:
4356 as_bad (_("unsupported `%s'"),
4357 current_templates->start->name);
4358 return NULL;
4359 case invalid_vsib_address:
4360 err_msg = _("invalid VSIB address");
4361 break;
4362 case unsupported_vector_index_register:
4363 err_msg = _("unsupported vector index register");
4364 break;
4365 }
4366 as_bad (_("%s for `%s'"), err_msg,
4367 current_templates->start->name);
4368 return NULL;
4369 }
4370
4371 if (!quiet_warnings)
4372 {
4373 if (!intel_syntax
4374 && (i.types[0].bitfield.jumpabsolute
4375 != operand_types[0].bitfield.jumpabsolute))
4376 {
4377 as_warn (_("indirect %s without `*'"), t->name);
4378 }
4379
4380 if (t->opcode_modifier.isprefix
4381 && t->opcode_modifier.ignoresize)
4382 {
4383 /* Warn them that a data or address size prefix doesn't
4384 affect assembly of the next line of code. */
4385 as_warn (_("stand-alone `%s' prefix"), t->name);
4386 }
4387 }
4388
4389 /* Copy the template we found. */
4390 i.tm = *t;
4391
4392 if (addr_prefix_disp != -1)
4393 i.tm.operand_types[addr_prefix_disp]
4394 = operand_types[addr_prefix_disp];
4395
4396 if (found_reverse_match)
4397 {
4398 /* If we found a reverse match we must alter the opcode
4399 direction bit. found_reverse_match holds bits to change
4400 (different for int & float insns). */
4401
4402 i.tm.base_opcode ^= found_reverse_match;
4403
4404 i.tm.operand_types[0] = operand_types[1];
4405 i.tm.operand_types[1] = operand_types[0];
4406 }
4407
4408 return t;
4409 }
4410
4411 static int
4412 check_string (void)
4413 {
4414 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
4415 if (i.tm.operand_types[mem_op].bitfield.esseg)
4416 {
4417 if (i.seg[0] != NULL && i.seg[0] != &es)
4418 {
4419 as_bad (_("`%s' operand %d must use `%ses' segment"),
4420 i.tm.name,
4421 mem_op + 1,
4422 register_prefix);
4423 return 0;
4424 }
4425 /* There's only ever one segment override allowed per instruction.
4426 This instruction possibly has a legal segment override on the
4427 second operand, so copy the segment to where non-string
4428 instructions store it, allowing common code. */
4429 i.seg[0] = i.seg[1];
4430 }
4431 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
4432 {
4433 if (i.seg[1] != NULL && i.seg[1] != &es)
4434 {
4435 as_bad (_("`%s' operand %d must use `%ses' segment"),
4436 i.tm.name,
4437 mem_op + 2,
4438 register_prefix);
4439 return 0;
4440 }
4441 }
4442 return 1;
4443 }
4444
4445 static int
4446 process_suffix (void)
4447 {
4448 /* If matched instruction specifies an explicit instruction mnemonic
4449 suffix, use it. */
4450 if (i.tm.opcode_modifier.size16)
4451 i.suffix = WORD_MNEM_SUFFIX;
4452 else if (i.tm.opcode_modifier.size32)
4453 i.suffix = LONG_MNEM_SUFFIX;
4454 else if (i.tm.opcode_modifier.size64)
4455 i.suffix = QWORD_MNEM_SUFFIX;
4456 else if (i.reg_operands)
4457 {
4458 /* If there's no instruction mnemonic suffix we try to invent one
4459 based on register operands. */
4460 if (!i.suffix)
4461 {
4462 /* We take i.suffix from the last register operand specified,
4463 Destination register type is more significant than source
4464 register type. crc32 in SSE4.2 prefers source register
4465 type. */
4466 if (i.tm.base_opcode == 0xf20f38f1)
4467 {
4468 if (i.types[0].bitfield.reg16)
4469 i.suffix = WORD_MNEM_SUFFIX;
4470 else if (i.types[0].bitfield.reg32)
4471 i.suffix = LONG_MNEM_SUFFIX;
4472 else if (i.types[0].bitfield.reg64)
4473 i.suffix = QWORD_MNEM_SUFFIX;
4474 }
4475 else if (i.tm.base_opcode == 0xf20f38f0)
4476 {
4477 if (i.types[0].bitfield.reg8)
4478 i.suffix = BYTE_MNEM_SUFFIX;
4479 }
4480
4481 if (!i.suffix)
4482 {
4483 int op;
4484
4485 if (i.tm.base_opcode == 0xf20f38f1
4486 || i.tm.base_opcode == 0xf20f38f0)
4487 {
4488 /* We have to know the operand size for crc32. */
4489 as_bad (_("ambiguous memory operand size for `%s`"),
4490 i.tm.name);
4491 return 0;
4492 }
4493
4494 for (op = i.operands; --op >= 0;)
4495 if (!i.tm.operand_types[op].bitfield.inoutportreg)
4496 {
4497 if (i.types[op].bitfield.reg8)
4498 {
4499 i.suffix = BYTE_MNEM_SUFFIX;
4500 break;
4501 }
4502 else if (i.types[op].bitfield.reg16)
4503 {
4504 i.suffix = WORD_MNEM_SUFFIX;
4505 break;
4506 }
4507 else if (i.types[op].bitfield.reg32)
4508 {
4509 i.suffix = LONG_MNEM_SUFFIX;
4510 break;
4511 }
4512 else if (i.types[op].bitfield.reg64)
4513 {
4514 i.suffix = QWORD_MNEM_SUFFIX;
4515 break;
4516 }
4517 }
4518 }
4519 }
4520 else if (i.suffix == BYTE_MNEM_SUFFIX)
4521 {
4522 if (intel_syntax
4523 && i.tm.opcode_modifier.ignoresize
4524 && i.tm.opcode_modifier.no_bsuf)
4525 i.suffix = 0;
4526 else if (!check_byte_reg ())
4527 return 0;
4528 }
4529 else if (i.suffix == LONG_MNEM_SUFFIX)
4530 {
4531 if (intel_syntax
4532 && i.tm.opcode_modifier.ignoresize
4533 && i.tm.opcode_modifier.no_lsuf)
4534 i.suffix = 0;
4535 else if (!check_long_reg ())
4536 return 0;
4537 }
4538 else if (i.suffix == QWORD_MNEM_SUFFIX)
4539 {
4540 if (intel_syntax
4541 && i.tm.opcode_modifier.ignoresize
4542 && i.tm.opcode_modifier.no_qsuf)
4543 i.suffix = 0;
4544 else if (!check_qword_reg ())
4545 return 0;
4546 }
4547 else if (i.suffix == WORD_MNEM_SUFFIX)
4548 {
4549 if (intel_syntax
4550 && i.tm.opcode_modifier.ignoresize
4551 && i.tm.opcode_modifier.no_wsuf)
4552 i.suffix = 0;
4553 else if (!check_word_reg ())
4554 return 0;
4555 }
4556 else if (i.suffix == XMMWORD_MNEM_SUFFIX
4557 || i.suffix == YMMWORD_MNEM_SUFFIX)
4558 {
4559 /* Skip if the instruction has x/y suffix. match_template
4560 should check if it is a valid suffix. */
4561 }
4562 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
4563 /* Do nothing if the instruction is going to ignore the prefix. */
4564 ;
4565 else
4566 abort ();
4567 }
4568 else if (i.tm.opcode_modifier.defaultsize
4569 && !i.suffix
4570 /* exclude fldenv/frstor/fsave/fstenv */
4571 && i.tm.opcode_modifier.no_ssuf)
4572 {
4573 i.suffix = stackop_size;
4574 }
4575 else if (intel_syntax
4576 && !i.suffix
4577 && (i.tm.operand_types[0].bitfield.jumpabsolute
4578 || i.tm.opcode_modifier.jumpbyte
4579 || i.tm.opcode_modifier.jumpintersegment
4580 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
4581 && i.tm.extension_opcode <= 3)))
4582 {
4583 switch (flag_code)
4584 {
4585 case CODE_64BIT:
4586 if (!i.tm.opcode_modifier.no_qsuf)
4587 {
4588 i.suffix = QWORD_MNEM_SUFFIX;
4589 break;
4590 }
4591 case CODE_32BIT:
4592 if (!i.tm.opcode_modifier.no_lsuf)
4593 i.suffix = LONG_MNEM_SUFFIX;
4594 break;
4595 case CODE_16BIT:
4596 if (!i.tm.opcode_modifier.no_wsuf)
4597 i.suffix = WORD_MNEM_SUFFIX;
4598 break;
4599 }
4600 }
4601
4602 if (!i.suffix)
4603 {
4604 if (!intel_syntax)
4605 {
4606 if (i.tm.opcode_modifier.w)
4607 {
4608 as_bad (_("no instruction mnemonic suffix given and "
4609 "no register operands; can't size instruction"));
4610 return 0;
4611 }
4612 }
4613 else
4614 {
4615 unsigned int suffixes;
4616
4617 suffixes = !i.tm.opcode_modifier.no_bsuf;
4618 if (!i.tm.opcode_modifier.no_wsuf)
4619 suffixes |= 1 << 1;
4620 if (!i.tm.opcode_modifier.no_lsuf)
4621 suffixes |= 1 << 2;
4622 if (!i.tm.opcode_modifier.no_ldsuf)
4623 suffixes |= 1 << 3;
4624 if (!i.tm.opcode_modifier.no_ssuf)
4625 suffixes |= 1 << 4;
4626 if (!i.tm.opcode_modifier.no_qsuf)
4627 suffixes |= 1 << 5;
4628
4629 /* There are more than suffix matches. */
4630 if (i.tm.opcode_modifier.w
4631 || ((suffixes & (suffixes - 1))
4632 && !i.tm.opcode_modifier.defaultsize
4633 && !i.tm.opcode_modifier.ignoresize))
4634 {
4635 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
4636 return 0;
4637 }
4638 }
4639 }
4640
4641 /* Change the opcode based on the operand size given by i.suffix;
4642 We don't need to change things for byte insns. */
4643
4644 if (i.suffix
4645 && i.suffix != BYTE_MNEM_SUFFIX
4646 && i.suffix != XMMWORD_MNEM_SUFFIX
4647 && i.suffix != YMMWORD_MNEM_SUFFIX)
4648 {
4649 /* It's not a byte, select word/dword operation. */
4650 if (i.tm.opcode_modifier.w)
4651 {
4652 if (i.tm.opcode_modifier.shortform)
4653 i.tm.base_opcode |= 8;
4654 else
4655 i.tm.base_opcode |= 1;
4656 }
4657
4658 /* Now select between word & dword operations via the operand
4659 size prefix, except for instructions that will ignore this
4660 prefix anyway. */
4661 if (i.tm.opcode_modifier.addrprefixop0)
4662 {
4663 /* The address size override prefix changes the size of the
4664 first operand. */
4665 if ((flag_code == CODE_32BIT
4666 && i.op->regs[0].reg_type.bitfield.reg16)
4667 || (flag_code != CODE_32BIT
4668 && i.op->regs[0].reg_type.bitfield.reg32))
4669 if (!add_prefix (ADDR_PREFIX_OPCODE))
4670 return 0;
4671 }
4672 else if (i.suffix != QWORD_MNEM_SUFFIX
4673 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
4674 && !i.tm.opcode_modifier.ignoresize
4675 && !i.tm.opcode_modifier.floatmf
4676 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
4677 || (flag_code == CODE_64BIT
4678 && i.tm.opcode_modifier.jumpbyte)))
4679 {
4680 unsigned int prefix = DATA_PREFIX_OPCODE;
4681
4682 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
4683 prefix = ADDR_PREFIX_OPCODE;
4684
4685 if (!add_prefix (prefix))
4686 return 0;
4687 }
4688
4689 /* Set mode64 for an operand. */
4690 if (i.suffix == QWORD_MNEM_SUFFIX
4691 && flag_code == CODE_64BIT
4692 && !i.tm.opcode_modifier.norex64)
4693 {
4694 /* Special case for xchg %rax,%rax. It is NOP and doesn't
4695 need rex64. cmpxchg8b is also a special case. */
4696 if (! (i.operands == 2
4697 && i.tm.base_opcode == 0x90
4698 && i.tm.extension_opcode == None
4699 && operand_type_equal (&i.types [0], &acc64)
4700 && operand_type_equal (&i.types [1], &acc64))
4701 && ! (i.operands == 1
4702 && i.tm.base_opcode == 0xfc7
4703 && i.tm.extension_opcode == 1
4704 && !operand_type_check (i.types [0], reg)
4705 && operand_type_check (i.types [0], anymem)))
4706 i.rex |= REX_W;
4707 }
4708
4709 /* Size floating point instruction. */
4710 if (i.suffix == LONG_MNEM_SUFFIX)
4711 if (i.tm.opcode_modifier.floatmf)
4712 i.tm.base_opcode ^= 4;
4713 }
4714
4715 return 1;
4716 }
4717
4718 static int
4719 check_byte_reg (void)
4720 {
4721 int op;
4722
4723 for (op = i.operands; --op >= 0;)
4724 {
4725 /* If this is an eight bit register, it's OK. If it's the 16 or
4726 32 bit version of an eight bit register, we will just use the
4727 low portion, and that's OK too. */
4728 if (i.types[op].bitfield.reg8)
4729 continue;
4730
4731 /* crc32 doesn't generate this warning. */
4732 if (i.tm.base_opcode == 0xf20f38f0)
4733 continue;
4734
4735 if ((i.types[op].bitfield.reg16
4736 || i.types[op].bitfield.reg32
4737 || i.types[op].bitfield.reg64)
4738 && i.op[op].regs->reg_num < 4)
4739 {
4740 /* Prohibit these changes in the 64bit mode, since the
4741 lowering is more complicated. */
4742 if (flag_code == CODE_64BIT
4743 && !i.tm.operand_types[op].bitfield.inoutportreg)
4744 {
4745 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4746 register_prefix, i.op[op].regs->reg_name,
4747 i.suffix);
4748 return 0;
4749 }
4750 #if REGISTER_WARNINGS
4751 if (!quiet_warnings
4752 && !i.tm.operand_types[op].bitfield.inoutportreg)
4753 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4754 register_prefix,
4755 (i.op[op].regs + (i.types[op].bitfield.reg16
4756 ? REGNAM_AL - REGNAM_AX
4757 : REGNAM_AL - REGNAM_EAX))->reg_name,
4758 register_prefix,
4759 i.op[op].regs->reg_name,
4760 i.suffix);
4761 #endif
4762 continue;
4763 }
4764 /* Any other register is bad. */
4765 if (i.types[op].bitfield.reg16
4766 || i.types[op].bitfield.reg32
4767 || i.types[op].bitfield.reg64
4768 || i.types[op].bitfield.regmmx
4769 || i.types[op].bitfield.regxmm
4770 || i.types[op].bitfield.regymm
4771 || i.types[op].bitfield.sreg2
4772 || i.types[op].bitfield.sreg3
4773 || i.types[op].bitfield.control
4774 || i.types[op].bitfield.debug
4775 || i.types[op].bitfield.test
4776 || i.types[op].bitfield.floatreg
4777 || i.types[op].bitfield.floatacc)
4778 {
4779 as_bad (_("`%s%s' not allowed with `%s%c'"),
4780 register_prefix,
4781 i.op[op].regs->reg_name,
4782 i.tm.name,
4783 i.suffix);
4784 return 0;
4785 }
4786 }
4787 return 1;
4788 }
4789
4790 static int
4791 check_long_reg (void)
4792 {
4793 int op;
4794
4795 for (op = i.operands; --op >= 0;)
4796 /* Reject eight bit registers, except where the template requires
4797 them. (eg. movzb) */
4798 if (i.types[op].bitfield.reg8
4799 && (i.tm.operand_types[op].bitfield.reg16
4800 || i.tm.operand_types[op].bitfield.reg32
4801 || i.tm.operand_types[op].bitfield.acc))
4802 {
4803 as_bad (_("`%s%s' not allowed with `%s%c'"),
4804 register_prefix,
4805 i.op[op].regs->reg_name,
4806 i.tm.name,
4807 i.suffix);
4808 return 0;
4809 }
4810 /* Warn if the e prefix on a general reg is missing. */
4811 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4812 && i.types[op].bitfield.reg16
4813 && (i.tm.operand_types[op].bitfield.reg32
4814 || i.tm.operand_types[op].bitfield.acc))
4815 {
4816 /* Prohibit these changes in the 64bit mode, since the
4817 lowering is more complicated. */
4818 if (flag_code == CODE_64BIT)
4819 {
4820 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4821 register_prefix, i.op[op].regs->reg_name,
4822 i.suffix);
4823 return 0;
4824 }
4825 #if REGISTER_WARNINGS
4826 else
4827 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4828 register_prefix,
4829 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
4830 register_prefix,
4831 i.op[op].regs->reg_name,
4832 i.suffix);
4833 #endif
4834 }
4835 /* Warn if the r prefix on a general reg is missing. */
4836 else if (i.types[op].bitfield.reg64
4837 && (i.tm.operand_types[op].bitfield.reg32
4838 || i.tm.operand_types[op].bitfield.acc))
4839 {
4840 if (intel_syntax
4841 && i.tm.opcode_modifier.toqword
4842 && !i.types[0].bitfield.regxmm)
4843 {
4844 /* Convert to QWORD. We want REX byte. */
4845 i.suffix = QWORD_MNEM_SUFFIX;
4846 }
4847 else
4848 {
4849 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4850 register_prefix, i.op[op].regs->reg_name,
4851 i.suffix);
4852 return 0;
4853 }
4854 }
4855 return 1;
4856 }
4857
4858 static int
4859 check_qword_reg (void)
4860 {
4861 int op;
4862
4863 for (op = i.operands; --op >= 0; )
4864 /* Reject eight bit registers, except where the template requires
4865 them. (eg. movzb) */
4866 if (i.types[op].bitfield.reg8
4867 && (i.tm.operand_types[op].bitfield.reg16
4868 || i.tm.operand_types[op].bitfield.reg32
4869 || i.tm.operand_types[op].bitfield.acc))
4870 {
4871 as_bad (_("`%s%s' not allowed with `%s%c'"),
4872 register_prefix,
4873 i.op[op].regs->reg_name,
4874 i.tm.name,
4875 i.suffix);
4876 return 0;
4877 }
4878 /* Warn if the e prefix on a general reg is missing. */
4879 else if ((i.types[op].bitfield.reg16
4880 || i.types[op].bitfield.reg32)
4881 && (i.tm.operand_types[op].bitfield.reg32
4882 || i.tm.operand_types[op].bitfield.acc))
4883 {
4884 /* Prohibit these changes in the 64bit mode, since the
4885 lowering is more complicated. */
4886 if (intel_syntax
4887 && i.tm.opcode_modifier.todword
4888 && !i.types[0].bitfield.regxmm)
4889 {
4890 /* Convert to DWORD. We don't want REX byte. */
4891 i.suffix = LONG_MNEM_SUFFIX;
4892 }
4893 else
4894 {
4895 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4896 register_prefix, i.op[op].regs->reg_name,
4897 i.suffix);
4898 return 0;
4899 }
4900 }
4901 return 1;
4902 }
4903
4904 static int
4905 check_word_reg (void)
4906 {
4907 int op;
4908 for (op = i.operands; --op >= 0;)
4909 /* Reject eight bit registers, except where the template requires
4910 them. (eg. movzb) */
4911 if (i.types[op].bitfield.reg8
4912 && (i.tm.operand_types[op].bitfield.reg16
4913 || i.tm.operand_types[op].bitfield.reg32
4914 || i.tm.operand_types[op].bitfield.acc))
4915 {
4916 as_bad (_("`%s%s' not allowed with `%s%c'"),
4917 register_prefix,
4918 i.op[op].regs->reg_name,
4919 i.tm.name,
4920 i.suffix);
4921 return 0;
4922 }
4923 /* Warn if the e prefix on a general reg is present. */
4924 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4925 && i.types[op].bitfield.reg32
4926 && (i.tm.operand_types[op].bitfield.reg16
4927 || i.tm.operand_types[op].bitfield.acc))
4928 {
4929 /* Prohibit these changes in the 64bit mode, since the
4930 lowering is more complicated. */
4931 if (flag_code == CODE_64BIT)
4932 {
4933 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4934 register_prefix, i.op[op].regs->reg_name,
4935 i.suffix);
4936 return 0;
4937 }
4938 else
4939 #if REGISTER_WARNINGS
4940 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4941 register_prefix,
4942 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
4943 register_prefix,
4944 i.op[op].regs->reg_name,
4945 i.suffix);
4946 #endif
4947 }
4948 return 1;
4949 }
4950
4951 static int
4952 update_imm (unsigned int j)
4953 {
4954 i386_operand_type overlap = i.types[j];
4955 if ((overlap.bitfield.imm8
4956 || overlap.bitfield.imm8s
4957 || overlap.bitfield.imm16
4958 || overlap.bitfield.imm32
4959 || overlap.bitfield.imm32s
4960 || overlap.bitfield.imm64)
4961 && !operand_type_equal (&overlap, &imm8)
4962 && !operand_type_equal (&overlap, &imm8s)
4963 && !operand_type_equal (&overlap, &imm16)
4964 && !operand_type_equal (&overlap, &imm32)
4965 && !operand_type_equal (&overlap, &imm32s)
4966 && !operand_type_equal (&overlap, &imm64))
4967 {
4968 if (i.suffix)
4969 {
4970 i386_operand_type temp;
4971
4972 operand_type_set (&temp, 0);
4973 if (i.suffix == BYTE_MNEM_SUFFIX)
4974 {
4975 temp.bitfield.imm8 = overlap.bitfield.imm8;
4976 temp.bitfield.imm8s = overlap.bitfield.imm8s;
4977 }
4978 else if (i.suffix == WORD_MNEM_SUFFIX)
4979 temp.bitfield.imm16 = overlap.bitfield.imm16;
4980 else if (i.suffix == QWORD_MNEM_SUFFIX)
4981 {
4982 temp.bitfield.imm64 = overlap.bitfield.imm64;
4983 temp.bitfield.imm32s = overlap.bitfield.imm32s;
4984 }
4985 else
4986 temp.bitfield.imm32 = overlap.bitfield.imm32;
4987 overlap = temp;
4988 }
4989 else if (operand_type_equal (&overlap, &imm16_32_32s)
4990 || operand_type_equal (&overlap, &imm16_32)
4991 || operand_type_equal (&overlap, &imm16_32s))
4992 {
4993 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
4994 overlap = imm16;
4995 else
4996 overlap = imm32s;
4997 }
4998 if (!operand_type_equal (&overlap, &imm8)
4999 && !operand_type_equal (&overlap, &imm8s)
5000 && !operand_type_equal (&overlap, &imm16)
5001 && !operand_type_equal (&overlap, &imm32)
5002 && !operand_type_equal (&overlap, &imm32s)
5003 && !operand_type_equal (&overlap, &imm64))
5004 {
5005 as_bad (_("no instruction mnemonic suffix given; "
5006 "can't determine immediate size"));
5007 return 0;
5008 }
5009 }
5010 i.types[j] = overlap;
5011
5012 return 1;
5013 }
5014
5015 static int
5016 finalize_imm (void)
5017 {
5018 unsigned int j, n;
5019
5020 /* Update the first 2 immediate operands. */
5021 n = i.operands > 2 ? 2 : i.operands;
5022 if (n)
5023 {
5024 for (j = 0; j < n; j++)
5025 if (update_imm (j) == 0)
5026 return 0;
5027
5028 /* The 3rd operand can't be immediate operand. */
5029 gas_assert (operand_type_check (i.types[2], imm) == 0);
5030 }
5031
5032 return 1;
5033 }
5034
5035 static int
5036 bad_implicit_operand (int xmm)
5037 {
5038 const char *ireg = xmm ? "xmm0" : "ymm0";
5039
5040 if (intel_syntax)
5041 as_bad (_("the last operand of `%s' must be `%s%s'"),
5042 i.tm.name, register_prefix, ireg);
5043 else
5044 as_bad (_("the first operand of `%s' must be `%s%s'"),
5045 i.tm.name, register_prefix, ireg);
5046 return 0;
5047 }
5048
5049 static int
5050 process_operands (void)
5051 {
5052 /* Default segment register this instruction will use for memory
5053 accesses. 0 means unknown. This is only for optimizing out
5054 unnecessary segment overrides. */
5055 const seg_entry *default_seg = 0;
5056
5057 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
5058 {
5059 unsigned int dupl = i.operands;
5060 unsigned int dest = dupl - 1;
5061 unsigned int j;
5062
5063 /* The destination must be an xmm register. */
5064 gas_assert (i.reg_operands
5065 && MAX_OPERANDS > dupl
5066 && operand_type_equal (&i.types[dest], &regxmm));
5067
5068 if (i.tm.opcode_modifier.firstxmm0)
5069 {
5070 /* The first operand is implicit and must be xmm0. */
5071 gas_assert (operand_type_equal (&i.types[0], &regxmm));
5072 if (i.op[0].regs->reg_num != 0)
5073 return bad_implicit_operand (1);
5074
5075 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
5076 {
5077 /* Keep xmm0 for instructions with VEX prefix and 3
5078 sources. */
5079 goto duplicate;
5080 }
5081 else
5082 {
5083 /* We remove the first xmm0 and keep the number of
5084 operands unchanged, which in fact duplicates the
5085 destination. */
5086 for (j = 1; j < i.operands; j++)
5087 {
5088 i.op[j - 1] = i.op[j];
5089 i.types[j - 1] = i.types[j];
5090 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
5091 }
5092 }
5093 }
5094 else if (i.tm.opcode_modifier.implicit1stxmm0)
5095 {
5096 gas_assert ((MAX_OPERANDS - 1) > dupl
5097 && (i.tm.opcode_modifier.vexsources
5098 == VEX3SOURCES));
5099
5100 /* Add the implicit xmm0 for instructions with VEX prefix
5101 and 3 sources. */
5102 for (j = i.operands; j > 0; j--)
5103 {
5104 i.op[j] = i.op[j - 1];
5105 i.types[j] = i.types[j - 1];
5106 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
5107 }
5108 i.op[0].regs
5109 = (const reg_entry *) hash_find (reg_hash, "xmm0");
5110 i.types[0] = regxmm;
5111 i.tm.operand_types[0] = regxmm;
5112
5113 i.operands += 2;
5114 i.reg_operands += 2;
5115 i.tm.operands += 2;
5116
5117 dupl++;
5118 dest++;
5119 i.op[dupl] = i.op[dest];
5120 i.types[dupl] = i.types[dest];
5121 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5122 }
5123 else
5124 {
5125 duplicate:
5126 i.operands++;
5127 i.reg_operands++;
5128 i.tm.operands++;
5129
5130 i.op[dupl] = i.op[dest];
5131 i.types[dupl] = i.types[dest];
5132 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5133 }
5134
5135 if (i.tm.opcode_modifier.immext)
5136 process_immext ();
5137 }
5138 else if (i.tm.opcode_modifier.firstxmm0)
5139 {
5140 unsigned int j;
5141
5142 /* The first operand is implicit and must be xmm0/ymm0. */
5143 gas_assert (i.reg_operands
5144 && (operand_type_equal (&i.types[0], &regxmm)
5145 || operand_type_equal (&i.types[0], &regymm)));
5146 if (i.op[0].regs->reg_num != 0)
5147 return bad_implicit_operand (i.types[0].bitfield.regxmm);
5148
5149 for (j = 1; j < i.operands; j++)
5150 {
5151 i.op[j - 1] = i.op[j];
5152 i.types[j - 1] = i.types[j];
5153
5154 /* We need to adjust fields in i.tm since they are used by
5155 build_modrm_byte. */
5156 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
5157 }
5158
5159 i.operands--;
5160 i.reg_operands--;
5161 i.tm.operands--;
5162 }
5163 else if (i.tm.opcode_modifier.regkludge)
5164 {
5165 /* The imul $imm, %reg instruction is converted into
5166 imul $imm, %reg, %reg, and the clr %reg instruction
5167 is converted into xor %reg, %reg. */
5168
5169 unsigned int first_reg_op;
5170
5171 if (operand_type_check (i.types[0], reg))
5172 first_reg_op = 0;
5173 else
5174 first_reg_op = 1;
5175 /* Pretend we saw the extra register operand. */
5176 gas_assert (i.reg_operands == 1
5177 && i.op[first_reg_op + 1].regs == 0);
5178 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
5179 i.types[first_reg_op + 1] = i.types[first_reg_op];
5180 i.operands++;
5181 i.reg_operands++;
5182 }
5183
5184 if (i.tm.opcode_modifier.shortform)
5185 {
5186 if (i.types[0].bitfield.sreg2
5187 || i.types[0].bitfield.sreg3)
5188 {
5189 if (i.tm.base_opcode == POP_SEG_SHORT
5190 && i.op[0].regs->reg_num == 1)
5191 {
5192 as_bad (_("you can't `pop %scs'"), register_prefix);
5193 return 0;
5194 }
5195 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
5196 if ((i.op[0].regs->reg_flags & RegRex) != 0)
5197 i.rex |= REX_B;
5198 }
5199 else
5200 {
5201 /* The register or float register operand is in operand
5202 0 or 1. */
5203 unsigned int op;
5204
5205 if (i.types[0].bitfield.floatreg
5206 || operand_type_check (i.types[0], reg))
5207 op = 0;
5208 else
5209 op = 1;
5210 /* Register goes in low 3 bits of opcode. */
5211 i.tm.base_opcode |= i.op[op].regs->reg_num;
5212 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5213 i.rex |= REX_B;
5214 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
5215 {
5216 /* Warn about some common errors, but press on regardless.
5217 The first case can be generated by gcc (<= 2.8.1). */
5218 if (i.operands == 2)
5219 {
5220 /* Reversed arguments on faddp, fsubp, etc. */
5221 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
5222 register_prefix, i.op[!intel_syntax].regs->reg_name,
5223 register_prefix, i.op[intel_syntax].regs->reg_name);
5224 }
5225 else
5226 {
5227 /* Extraneous `l' suffix on fp insn. */
5228 as_warn (_("translating to `%s %s%s'"), i.tm.name,
5229 register_prefix, i.op[0].regs->reg_name);
5230 }
5231 }
5232 }
5233 }
5234 else if (i.tm.opcode_modifier.modrm)
5235 {
5236 /* The opcode is completed (modulo i.tm.extension_opcode which
5237 must be put into the modrm byte). Now, we make the modrm and
5238 index base bytes based on all the info we've collected. */
5239
5240 default_seg = build_modrm_byte ();
5241 }
5242 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
5243 {
5244 default_seg = &ds;
5245 }
5246 else if (i.tm.opcode_modifier.isstring)
5247 {
5248 /* For the string instructions that allow a segment override
5249 on one of their operands, the default segment is ds. */
5250 default_seg = &ds;
5251 }
5252
5253 if (i.tm.base_opcode == 0x8d /* lea */
5254 && i.seg[0]
5255 && !quiet_warnings)
5256 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
5257
5258 /* If a segment was explicitly specified, and the specified segment
5259 is not the default, use an opcode prefix to select it. If we
5260 never figured out what the default segment is, then default_seg
5261 will be zero at this point, and the specified segment prefix will
5262 always be used. */
5263 if ((i.seg[0]) && (i.seg[0] != default_seg))
5264 {
5265 if (!add_prefix (i.seg[0]->seg_prefix))
5266 return 0;
5267 }
5268 return 1;
5269 }
5270
5271 static const seg_entry *
5272 build_modrm_byte (void)
5273 {
5274 const seg_entry *default_seg = 0;
5275 unsigned int source, dest;
5276 int vex_3_sources;
5277
5278 /* The first operand of instructions with VEX prefix and 3 sources
5279 must be VEX_Imm4. */
5280 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
5281 if (vex_3_sources)
5282 {
5283 unsigned int nds, reg_slot;
5284 expressionS *exp;
5285
5286 if (i.tm.opcode_modifier.veximmext
5287 && i.tm.opcode_modifier.immext)
5288 {
5289 dest = i.operands - 2;
5290 gas_assert (dest == 3);
5291 }
5292 else
5293 dest = i.operands - 1;
5294 nds = dest - 1;
5295
5296 /* There are 2 kinds of instructions:
5297 1. 5 operands: 4 register operands or 3 register operands
5298 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
5299 VexW0 or VexW1. The destination must be either XMM or YMM
5300 register.
5301 2. 4 operands: 4 register operands or 3 register operands
5302 plus 1 memory operand, VexXDS, and VexImmExt */
5303 gas_assert ((i.reg_operands == 4
5304 || (i.reg_operands == 3 && i.mem_operands == 1))
5305 && i.tm.opcode_modifier.vexvvvv == VEXXDS
5306 && (i.tm.opcode_modifier.veximmext
5307 || (i.imm_operands == 1
5308 && i.types[0].bitfield.vec_imm4
5309 && (i.tm.opcode_modifier.vexw == VEXW0
5310 || i.tm.opcode_modifier.vexw == VEXW1)
5311 && (operand_type_equal (&i.tm.operand_types[dest], &regxmm)
5312 || operand_type_equal (&i.tm.operand_types[dest], &regymm)))));
5313
5314 if (i.imm_operands == 0)
5315 {
5316 /* When there is no immediate operand, generate an 8bit
5317 immediate operand to encode the first operand. */
5318 exp = &im_expressions[i.imm_operands++];
5319 i.op[i.operands].imms = exp;
5320 i.types[i.operands] = imm8;
5321 i.operands++;
5322 /* If VexW1 is set, the first operand is the source and
5323 the second operand is encoded in the immediate operand. */
5324 if (i.tm.opcode_modifier.vexw == VEXW1)
5325 {
5326 source = 0;
5327 reg_slot = 1;
5328 }
5329 else
5330 {
5331 source = 1;
5332 reg_slot = 0;
5333 }
5334
5335 /* FMA swaps REG and NDS. */
5336 if (i.tm.cpu_flags.bitfield.cpufma)
5337 {
5338 unsigned int tmp;
5339 tmp = reg_slot;
5340 reg_slot = nds;
5341 nds = tmp;
5342 }
5343
5344 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5345 &regxmm)
5346 || operand_type_equal (&i.tm.operand_types[reg_slot],
5347 &regymm));
5348 exp->X_op = O_constant;
5349 exp->X_add_number
5350 = ((i.op[reg_slot].regs->reg_num
5351 + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
5352 << 4);
5353 }
5354 else
5355 {
5356 unsigned int imm_slot;
5357
5358 if (i.tm.opcode_modifier.vexw == VEXW0)
5359 {
5360 /* If VexW0 is set, the third operand is the source and
5361 the second operand is encoded in the immediate
5362 operand. */
5363 source = 2;
5364 reg_slot = 1;
5365 }
5366 else
5367 {
5368 /* VexW1 is set, the second operand is the source and
5369 the third operand is encoded in the immediate
5370 operand. */
5371 source = 1;
5372 reg_slot = 2;
5373 }
5374
5375 if (i.tm.opcode_modifier.immext)
5376 {
5377 /* When ImmExt is set, the immdiate byte is the last
5378 operand. */
5379 imm_slot = i.operands - 1;
5380 source--;
5381 reg_slot--;
5382 }
5383 else
5384 {
5385 imm_slot = 0;
5386
5387 /* Turn on Imm8 so that output_imm will generate it. */
5388 i.types[imm_slot].bitfield.imm8 = 1;
5389 }
5390
5391 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5392 &regxmm)
5393 || operand_type_equal (&i.tm.operand_types[reg_slot],
5394 &regymm));
5395 i.op[imm_slot].imms->X_add_number
5396 |= ((i.op[reg_slot].regs->reg_num
5397 + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
5398 << 4);
5399 }
5400
5401 gas_assert (operand_type_equal (&i.tm.operand_types[nds], &regxmm)
5402 || operand_type_equal (&i.tm.operand_types[nds],
5403 &regymm));
5404 i.vex.register_specifier = i.op[nds].regs;
5405 }
5406 else
5407 source = dest = 0;
5408
5409 /* i.reg_operands MUST be the number of real register operands;
5410 implicit registers do not count. If there are 3 register
5411 operands, it must be a instruction with VexNDS. For a
5412 instruction with VexNDD, the destination register is encoded
5413 in VEX prefix. If there are 4 register operands, it must be
5414 a instruction with VEX prefix and 3 sources. */
5415 if (i.mem_operands == 0
5416 && ((i.reg_operands == 2
5417 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
5418 || (i.reg_operands == 3
5419 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
5420 || (i.reg_operands == 4 && vex_3_sources)))
5421 {
5422 switch (i.operands)
5423 {
5424 case 2:
5425 source = 0;
5426 break;
5427 case 3:
5428 /* When there are 3 operands, one of them may be immediate,
5429 which may be the first or the last operand. Otherwise,
5430 the first operand must be shift count register (cl) or it
5431 is an instruction with VexNDS. */
5432 gas_assert (i.imm_operands == 1
5433 || (i.imm_operands == 0
5434 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
5435 || i.types[0].bitfield.shiftcount)));
5436 if (operand_type_check (i.types[0], imm)
5437 || i.types[0].bitfield.shiftcount)
5438 source = 1;
5439 else
5440 source = 0;
5441 break;
5442 case 4:
5443 /* When there are 4 operands, the first two must be 8bit
5444 immediate operands. The source operand will be the 3rd
5445 one.
5446
5447 For instructions with VexNDS, if the first operand
5448 an imm8, the source operand is the 2nd one. If the last
5449 operand is imm8, the source operand is the first one. */
5450 gas_assert ((i.imm_operands == 2
5451 && i.types[0].bitfield.imm8
5452 && i.types[1].bitfield.imm8)
5453 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
5454 && i.imm_operands == 1
5455 && (i.types[0].bitfield.imm8
5456 || i.types[i.operands - 1].bitfield.imm8)));
5457 if (i.imm_operands == 2)
5458 source = 2;
5459 else
5460 {
5461 if (i.types[0].bitfield.imm8)
5462 source = 1;
5463 else
5464 source = 0;
5465 }
5466 break;
5467 case 5:
5468 break;
5469 default:
5470 abort ();
5471 }
5472
5473 if (!vex_3_sources)
5474 {
5475 dest = source + 1;
5476
5477 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5478 {
5479 /* For instructions with VexNDS, the register-only
5480 source operand must be 32/64bit integer, XMM or
5481 YMM register. It is encoded in VEX prefix. We
5482 need to clear RegMem bit before calling
5483 operand_type_equal. */
5484
5485 i386_operand_type op;
5486 unsigned int vvvv;
5487
5488 /* Check register-only source operand when two source
5489 operands are swapped. */
5490 if (!i.tm.operand_types[source].bitfield.baseindex
5491 && i.tm.operand_types[dest].bitfield.baseindex)
5492 {
5493 vvvv = source;
5494 source = dest;
5495 }
5496 else
5497 vvvv = dest;
5498
5499 op = i.tm.operand_types[vvvv];
5500 op.bitfield.regmem = 0;
5501 if ((dest + 1) >= i.operands
5502 || (op.bitfield.reg32 != 1
5503 && !op.bitfield.reg64 != 1
5504 && !operand_type_equal (&op, &regxmm)
5505 && !operand_type_equal (&op, &regymm)))
5506 abort ();
5507 i.vex.register_specifier = i.op[vvvv].regs;
5508 dest++;
5509 }
5510 }
5511
5512 i.rm.mode = 3;
5513 /* One of the register operands will be encoded in the i.tm.reg
5514 field, the other in the combined i.tm.mode and i.tm.regmem
5515 fields. If no form of this instruction supports a memory
5516 destination operand, then we assume the source operand may
5517 sometimes be a memory operand and so we need to store the
5518 destination in the i.rm.reg field. */
5519 if (!i.tm.operand_types[dest].bitfield.regmem
5520 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
5521 {
5522 i.rm.reg = i.op[dest].regs->reg_num;
5523 i.rm.regmem = i.op[source].regs->reg_num;
5524 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5525 i.rex |= REX_R;
5526 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5527 i.rex |= REX_B;
5528 }
5529 else
5530 {
5531 i.rm.reg = i.op[source].regs->reg_num;
5532 i.rm.regmem = i.op[dest].regs->reg_num;
5533 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5534 i.rex |= REX_B;
5535 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5536 i.rex |= REX_R;
5537 }
5538 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
5539 {
5540 if (!i.types[0].bitfield.control
5541 && !i.types[1].bitfield.control)
5542 abort ();
5543 i.rex &= ~(REX_R | REX_B);
5544 add_prefix (LOCK_PREFIX_OPCODE);
5545 }
5546 }
5547 else
5548 { /* If it's not 2 reg operands... */
5549 unsigned int mem;
5550
5551 if (i.mem_operands)
5552 {
5553 unsigned int fake_zero_displacement = 0;
5554 unsigned int op;
5555
5556 for (op = 0; op < i.operands; op++)
5557 if (operand_type_check (i.types[op], anymem))
5558 break;
5559 gas_assert (op < i.operands);
5560
5561 if (i.tm.opcode_modifier.vecsib)
5562 {
5563 if (i.index_reg->reg_num == RegEiz
5564 || i.index_reg->reg_num == RegRiz)
5565 abort ();
5566
5567 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5568 if (!i.base_reg)
5569 {
5570 i.sib.base = NO_BASE_REGISTER;
5571 i.sib.scale = i.log2_scale_factor;
5572 i.types[op].bitfield.disp8 = 0;
5573 i.types[op].bitfield.disp16 = 0;
5574 i.types[op].bitfield.disp64 = 0;
5575 if (flag_code != CODE_64BIT)
5576 {
5577 /* Must be 32 bit */
5578 i.types[op].bitfield.disp32 = 1;
5579 i.types[op].bitfield.disp32s = 0;
5580 }
5581 else
5582 {
5583 i.types[op].bitfield.disp32 = 0;
5584 i.types[op].bitfield.disp32s = 1;
5585 }
5586 }
5587 i.sib.index = i.index_reg->reg_num;
5588 if ((i.index_reg->reg_flags & RegRex) != 0)
5589 i.rex |= REX_X;
5590 }
5591
5592 default_seg = &ds;
5593
5594 if (i.base_reg == 0)
5595 {
5596 i.rm.mode = 0;
5597 if (!i.disp_operands)
5598 {
5599 fake_zero_displacement = 1;
5600 /* Instructions with VSIB byte need 32bit displacement
5601 if there is no base register. */
5602 if (i.tm.opcode_modifier.vecsib)
5603 i.types[op].bitfield.disp32 = 1;
5604 }
5605 if (i.index_reg == 0)
5606 {
5607 gas_assert (!i.tm.opcode_modifier.vecsib);
5608 /* Operand is just <disp> */
5609 if (flag_code == CODE_64BIT)
5610 {
5611 /* 64bit mode overwrites the 32bit absolute
5612 addressing by RIP relative addressing and
5613 absolute addressing is encoded by one of the
5614 redundant SIB forms. */
5615 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5616 i.sib.base = NO_BASE_REGISTER;
5617 i.sib.index = NO_INDEX_REGISTER;
5618 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
5619 ? disp32s : disp32);
5620 }
5621 else if ((flag_code == CODE_16BIT)
5622 ^ (i.prefix[ADDR_PREFIX] != 0))
5623 {
5624 i.rm.regmem = NO_BASE_REGISTER_16;
5625 i.types[op] = disp16;
5626 }
5627 else
5628 {
5629 i.rm.regmem = NO_BASE_REGISTER;
5630 i.types[op] = disp32;
5631 }
5632 }
5633 else if (!i.tm.opcode_modifier.vecsib)
5634 {
5635 /* !i.base_reg && i.index_reg */
5636 if (i.index_reg->reg_num == RegEiz
5637 || i.index_reg->reg_num == RegRiz)
5638 i.sib.index = NO_INDEX_REGISTER;
5639 else
5640 i.sib.index = i.index_reg->reg_num;
5641 i.sib.base = NO_BASE_REGISTER;
5642 i.sib.scale = i.log2_scale_factor;
5643 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5644 i.types[op].bitfield.disp8 = 0;
5645 i.types[op].bitfield.disp16 = 0;
5646 i.types[op].bitfield.disp64 = 0;
5647 if (flag_code != CODE_64BIT)
5648 {
5649 /* Must be 32 bit */
5650 i.types[op].bitfield.disp32 = 1;
5651 i.types[op].bitfield.disp32s = 0;
5652 }
5653 else
5654 {
5655 i.types[op].bitfield.disp32 = 0;
5656 i.types[op].bitfield.disp32s = 1;
5657 }
5658 if ((i.index_reg->reg_flags & RegRex) != 0)
5659 i.rex |= REX_X;
5660 }
5661 }
5662 /* RIP addressing for 64bit mode. */
5663 else if (i.base_reg->reg_num == RegRip ||
5664 i.base_reg->reg_num == RegEip)
5665 {
5666 gas_assert (!i.tm.opcode_modifier.vecsib);
5667 i.rm.regmem = NO_BASE_REGISTER;
5668 i.types[op].bitfield.disp8 = 0;
5669 i.types[op].bitfield.disp16 = 0;
5670 i.types[op].bitfield.disp32 = 0;
5671 i.types[op].bitfield.disp32s = 1;
5672 i.types[op].bitfield.disp64 = 0;
5673 i.flags[op] |= Operand_PCrel;
5674 if (! i.disp_operands)
5675 fake_zero_displacement = 1;
5676 }
5677 else if (i.base_reg->reg_type.bitfield.reg16)
5678 {
5679 gas_assert (!i.tm.opcode_modifier.vecsib);
5680 switch (i.base_reg->reg_num)
5681 {
5682 case 3: /* (%bx) */
5683 if (i.index_reg == 0)
5684 i.rm.regmem = 7;
5685 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
5686 i.rm.regmem = i.index_reg->reg_num - 6;
5687 break;
5688 case 5: /* (%bp) */
5689 default_seg = &ss;
5690 if (i.index_reg == 0)
5691 {
5692 i.rm.regmem = 6;
5693 if (operand_type_check (i.types[op], disp) == 0)
5694 {
5695 /* fake (%bp) into 0(%bp) */
5696 i.types[op].bitfield.disp8 = 1;
5697 fake_zero_displacement = 1;
5698 }
5699 }
5700 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
5701 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
5702 break;
5703 default: /* (%si) -> 4 or (%di) -> 5 */
5704 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
5705 }
5706 i.rm.mode = mode_from_disp_size (i.types[op]);
5707 }
5708 else /* i.base_reg and 32/64 bit mode */
5709 {
5710 if (flag_code == CODE_64BIT
5711 && operand_type_check (i.types[op], disp))
5712 {
5713 i386_operand_type temp;
5714 operand_type_set (&temp, 0);
5715 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
5716 i.types[op] = temp;
5717 if (i.prefix[ADDR_PREFIX] == 0)
5718 i.types[op].bitfield.disp32s = 1;
5719 else
5720 i.types[op].bitfield.disp32 = 1;
5721 }
5722
5723 if (!i.tm.opcode_modifier.vecsib)
5724 i.rm.regmem = i.base_reg->reg_num;
5725 if ((i.base_reg->reg_flags & RegRex) != 0)
5726 i.rex |= REX_B;
5727 i.sib.base = i.base_reg->reg_num;
5728 /* x86-64 ignores REX prefix bit here to avoid decoder
5729 complications. */
5730 if ((i.base_reg->reg_num & 7) == EBP_REG_NUM)
5731 {
5732 default_seg = &ss;
5733 if (i.disp_operands == 0)
5734 {
5735 fake_zero_displacement = 1;
5736 i.types[op].bitfield.disp8 = 1;
5737 }
5738 }
5739 else if (i.base_reg->reg_num == ESP_REG_NUM)
5740 {
5741 default_seg = &ss;
5742 }
5743 i.sib.scale = i.log2_scale_factor;
5744 if (i.index_reg == 0)
5745 {
5746 gas_assert (!i.tm.opcode_modifier.vecsib);
5747 /* <disp>(%esp) becomes two byte modrm with no index
5748 register. We've already stored the code for esp
5749 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
5750 Any base register besides %esp will not use the
5751 extra modrm byte. */
5752 i.sib.index = NO_INDEX_REGISTER;
5753 }
5754 else if (!i.tm.opcode_modifier.vecsib)
5755 {
5756 if (i.index_reg->reg_num == RegEiz
5757 || i.index_reg->reg_num == RegRiz)
5758 i.sib.index = NO_INDEX_REGISTER;
5759 else
5760 i.sib.index = i.index_reg->reg_num;
5761 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5762 if ((i.index_reg->reg_flags & RegRex) != 0)
5763 i.rex |= REX_X;
5764 }
5765
5766 if (i.disp_operands
5767 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
5768 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
5769 i.rm.mode = 0;
5770 else
5771 {
5772 if (!fake_zero_displacement
5773 && !i.disp_operands
5774 && i.disp_encoding)
5775 {
5776 fake_zero_displacement = 1;
5777 if (i.disp_encoding == disp_encoding_8bit)
5778 i.types[op].bitfield.disp8 = 1;
5779 else
5780 i.types[op].bitfield.disp32 = 1;
5781 }
5782 i.rm.mode = mode_from_disp_size (i.types[op]);
5783 }
5784 }
5785
5786 if (fake_zero_displacement)
5787 {
5788 /* Fakes a zero displacement assuming that i.types[op]
5789 holds the correct displacement size. */
5790 expressionS *exp;
5791
5792 gas_assert (i.op[op].disps == 0);
5793 exp = &disp_expressions[i.disp_operands++];
5794 i.op[op].disps = exp;
5795 exp->X_op = O_constant;
5796 exp->X_add_number = 0;
5797 exp->X_add_symbol = (symbolS *) 0;
5798 exp->X_op_symbol = (symbolS *) 0;
5799 }
5800
5801 mem = op;
5802 }
5803 else
5804 mem = ~0;
5805
5806 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
5807 {
5808 if (operand_type_check (i.types[0], imm))
5809 i.vex.register_specifier = NULL;
5810 else
5811 {
5812 /* VEX.vvvv encodes one of the sources when the first
5813 operand is not an immediate. */
5814 if (i.tm.opcode_modifier.vexw == VEXW0)
5815 i.vex.register_specifier = i.op[0].regs;
5816 else
5817 i.vex.register_specifier = i.op[1].regs;
5818 }
5819
5820 /* Destination is a XMM register encoded in the ModRM.reg
5821 and VEX.R bit. */
5822 i.rm.reg = i.op[2].regs->reg_num;
5823 if ((i.op[2].regs->reg_flags & RegRex) != 0)
5824 i.rex |= REX_R;
5825
5826 /* ModRM.rm and VEX.B encodes the other source. */
5827 if (!i.mem_operands)
5828 {
5829 i.rm.mode = 3;
5830
5831 if (i.tm.opcode_modifier.vexw == VEXW0)
5832 i.rm.regmem = i.op[1].regs->reg_num;
5833 else
5834 i.rm.regmem = i.op[0].regs->reg_num;
5835
5836 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5837 i.rex |= REX_B;
5838 }
5839 }
5840 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
5841 {
5842 i.vex.register_specifier = i.op[2].regs;
5843 if (!i.mem_operands)
5844 {
5845 i.rm.mode = 3;
5846 i.rm.regmem = i.op[1].regs->reg_num;
5847 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5848 i.rex |= REX_B;
5849 }
5850 }
5851 /* Fill in i.rm.reg or i.rm.regmem field with register operand
5852 (if any) based on i.tm.extension_opcode. Again, we must be
5853 careful to make sure that segment/control/debug/test/MMX
5854 registers are coded into the i.rm.reg field. */
5855 else if (i.reg_operands)
5856 {
5857 unsigned int op;
5858 unsigned int vex_reg = ~0;
5859
5860 for (op = 0; op < i.operands; op++)
5861 if (i.types[op].bitfield.reg8
5862 || i.types[op].bitfield.reg16
5863 || i.types[op].bitfield.reg32
5864 || i.types[op].bitfield.reg64
5865 || i.types[op].bitfield.regmmx
5866 || i.types[op].bitfield.regxmm
5867 || i.types[op].bitfield.regymm
5868 || i.types[op].bitfield.sreg2
5869 || i.types[op].bitfield.sreg3
5870 || i.types[op].bitfield.control
5871 || i.types[op].bitfield.debug
5872 || i.types[op].bitfield.test)
5873 break;
5874
5875 if (vex_3_sources)
5876 op = dest;
5877 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5878 {
5879 /* For instructions with VexNDS, the register-only
5880 source operand is encoded in VEX prefix. */
5881 gas_assert (mem != (unsigned int) ~0);
5882
5883 if (op > mem)
5884 {
5885 vex_reg = op++;
5886 gas_assert (op < i.operands);
5887 }
5888 else
5889 {
5890 /* Check register-only source operand when two source
5891 operands are swapped. */
5892 if (!i.tm.operand_types[op].bitfield.baseindex
5893 && i.tm.operand_types[op + 1].bitfield.baseindex)
5894 {
5895 vex_reg = op;
5896 op += 2;
5897 gas_assert (mem == (vex_reg + 1)
5898 && op < i.operands);
5899 }
5900 else
5901 {
5902 vex_reg = op + 1;
5903 gas_assert (vex_reg < i.operands);
5904 }
5905 }
5906 }
5907 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
5908 {
5909 /* For instructions with VexNDD, the register destination
5910 is encoded in VEX prefix. */
5911 if (i.mem_operands == 0)
5912 {
5913 /* There is no memory operand. */
5914 gas_assert ((op + 2) == i.operands);
5915 vex_reg = op + 1;
5916 }
5917 else
5918 {
5919 /* There are only 2 operands. */
5920 gas_assert (op < 2 && i.operands == 2);
5921 vex_reg = 1;
5922 }
5923 }
5924 else
5925 gas_assert (op < i.operands);
5926
5927 if (vex_reg != (unsigned int) ~0)
5928 {
5929 i386_operand_type *type = &i.tm.operand_types[vex_reg];
5930
5931 if (type->bitfield.reg32 != 1
5932 && type->bitfield.reg64 != 1
5933 && !operand_type_equal (type, &regxmm)
5934 && !operand_type_equal (type, &regymm))
5935 abort ();
5936
5937 i.vex.register_specifier = i.op[vex_reg].regs;
5938 }
5939
5940 /* Don't set OP operand twice. */
5941 if (vex_reg != op)
5942 {
5943 /* If there is an extension opcode to put here, the
5944 register number must be put into the regmem field. */
5945 if (i.tm.extension_opcode != None)
5946 {
5947 i.rm.regmem = i.op[op].regs->reg_num;
5948 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5949 i.rex |= REX_B;
5950 }
5951 else
5952 {
5953 i.rm.reg = i.op[op].regs->reg_num;
5954 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5955 i.rex |= REX_R;
5956 }
5957 }
5958
5959 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
5960 must set it to 3 to indicate this is a register operand
5961 in the regmem field. */
5962 if (!i.mem_operands)
5963 i.rm.mode = 3;
5964 }
5965
5966 /* Fill in i.rm.reg field with extension opcode (if any). */
5967 if (i.tm.extension_opcode != None)
5968 i.rm.reg = i.tm.extension_opcode;
5969 }
5970 return default_seg;
5971 }
5972
5973 static void
5974 output_branch (void)
5975 {
5976 char *p;
5977 int size;
5978 int code16;
5979 int prefix;
5980 relax_substateT subtype;
5981 symbolS *sym;
5982 offsetT off;
5983
5984 code16 = flag_code == CODE_16BIT ? CODE16 : 0;
5985 size = i.disp_encoding == disp_encoding_32bit ? BIG : SMALL;
5986
5987 prefix = 0;
5988 if (i.prefix[DATA_PREFIX] != 0)
5989 {
5990 prefix = 1;
5991 i.prefixes -= 1;
5992 code16 ^= CODE16;
5993 }
5994 /* Pentium4 branch hints. */
5995 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
5996 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
5997 {
5998 prefix++;
5999 i.prefixes--;
6000 }
6001 if (i.prefix[REX_PREFIX] != 0)
6002 {
6003 prefix++;
6004 i.prefixes--;
6005 }
6006
6007 if (i.prefixes != 0 && !intel_syntax)
6008 as_warn (_("skipping prefixes on this instruction"));
6009
6010 /* It's always a symbol; End frag & setup for relax.
6011 Make sure there is enough room in this frag for the largest
6012 instruction we may generate in md_convert_frag. This is 2
6013 bytes for the opcode and room for the prefix and largest
6014 displacement. */
6015 frag_grow (prefix + 2 + 4);
6016 /* Prefix and 1 opcode byte go in fr_fix. */
6017 p = frag_more (prefix + 1);
6018 if (i.prefix[DATA_PREFIX] != 0)
6019 *p++ = DATA_PREFIX_OPCODE;
6020 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
6021 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
6022 *p++ = i.prefix[SEG_PREFIX];
6023 if (i.prefix[REX_PREFIX] != 0)
6024 *p++ = i.prefix[REX_PREFIX];
6025 *p = i.tm.base_opcode;
6026
6027 if ((unsigned char) *p == JUMP_PC_RELATIVE)
6028 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
6029 else if (cpu_arch_flags.bitfield.cpui386)
6030 subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
6031 else
6032 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
6033 subtype |= code16;
6034
6035 sym = i.op[0].disps->X_add_symbol;
6036 off = i.op[0].disps->X_add_number;
6037
6038 if (i.op[0].disps->X_op != O_constant
6039 && i.op[0].disps->X_op != O_symbol)
6040 {
6041 /* Handle complex expressions. */
6042 sym = make_expr_symbol (i.op[0].disps);
6043 off = 0;
6044 }
6045
6046 /* 1 possible extra opcode + 4 byte displacement go in var part.
6047 Pass reloc in fr_var. */
6048 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
6049 }
6050
6051 static void
6052 output_jump (void)
6053 {
6054 char *p;
6055 int size;
6056 fixS *fixP;
6057
6058 if (i.tm.opcode_modifier.jumpbyte)
6059 {
6060 /* This is a loop or jecxz type instruction. */
6061 size = 1;
6062 if (i.prefix[ADDR_PREFIX] != 0)
6063 {
6064 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
6065 i.prefixes -= 1;
6066 }
6067 /* Pentium4 branch hints. */
6068 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6069 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6070 {
6071 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
6072 i.prefixes--;
6073 }
6074 }
6075 else
6076 {
6077 int code16;
6078
6079 code16 = 0;
6080 if (flag_code == CODE_16BIT)
6081 code16 = CODE16;
6082
6083 if (i.prefix[DATA_PREFIX] != 0)
6084 {
6085 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
6086 i.prefixes -= 1;
6087 code16 ^= CODE16;
6088 }
6089
6090 size = 4;
6091 if (code16)
6092 size = 2;
6093 }
6094
6095 if (i.prefix[REX_PREFIX] != 0)
6096 {
6097 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
6098 i.prefixes -= 1;
6099 }
6100
6101 if (i.prefixes != 0 && !intel_syntax)
6102 as_warn (_("skipping prefixes on this instruction"));
6103
6104 p = frag_more (i.tm.opcode_length + size);
6105 switch (i.tm.opcode_length)
6106 {
6107 case 2:
6108 *p++ = i.tm.base_opcode >> 8;
6109 case 1:
6110 *p++ = i.tm.base_opcode;
6111 break;
6112 default:
6113 abort ();
6114 }
6115
6116 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6117 i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
6118
6119 /* All jumps handled here are signed, but don't use a signed limit
6120 check for 32 and 16 bit jumps as we want to allow wrap around at
6121 4G and 64k respectively. */
6122 if (size == 1)
6123 fixP->fx_signed = 1;
6124 }
6125
6126 static void
6127 output_interseg_jump (void)
6128 {
6129 char *p;
6130 int size;
6131 int prefix;
6132 int code16;
6133
6134 code16 = 0;
6135 if (flag_code == CODE_16BIT)
6136 code16 = CODE16;
6137
6138 prefix = 0;
6139 if (i.prefix[DATA_PREFIX] != 0)
6140 {
6141 prefix = 1;
6142 i.prefixes -= 1;
6143 code16 ^= CODE16;
6144 }
6145 if (i.prefix[REX_PREFIX] != 0)
6146 {
6147 prefix++;
6148 i.prefixes -= 1;
6149 }
6150
6151 size = 4;
6152 if (code16)
6153 size = 2;
6154
6155 if (i.prefixes != 0 && !intel_syntax)
6156 as_warn (_("skipping prefixes on this instruction"));
6157
6158 /* 1 opcode; 2 segment; offset */
6159 p = frag_more (prefix + 1 + 2 + size);
6160
6161 if (i.prefix[DATA_PREFIX] != 0)
6162 *p++ = DATA_PREFIX_OPCODE;
6163
6164 if (i.prefix[REX_PREFIX] != 0)
6165 *p++ = i.prefix[REX_PREFIX];
6166
6167 *p++ = i.tm.base_opcode;
6168 if (i.op[1].imms->X_op == O_constant)
6169 {
6170 offsetT n = i.op[1].imms->X_add_number;
6171
6172 if (size == 2
6173 && !fits_in_unsigned_word (n)
6174 && !fits_in_signed_word (n))
6175 {
6176 as_bad (_("16-bit jump out of range"));
6177 return;
6178 }
6179 md_number_to_chars (p, n, size);
6180 }
6181 else
6182 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6183 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
6184 if (i.op[0].imms->X_op != O_constant)
6185 as_bad (_("can't handle non absolute segment in `%s'"),
6186 i.tm.name);
6187 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
6188 }
6189
6190 static void
6191 output_insn (void)
6192 {
6193 fragS *insn_start_frag;
6194 offsetT insn_start_off;
6195
6196 /* Tie dwarf2 debug info to the address at the start of the insn.
6197 We can't do this after the insn has been output as the current
6198 frag may have been closed off. eg. by frag_var. */
6199 dwarf2_emit_insn (0);
6200
6201 insn_start_frag = frag_now;
6202 insn_start_off = frag_now_fix ();
6203
6204 /* Output jumps. */
6205 if (i.tm.opcode_modifier.jump)
6206 output_branch ();
6207 else if (i.tm.opcode_modifier.jumpbyte
6208 || i.tm.opcode_modifier.jumpdword)
6209 output_jump ();
6210 else if (i.tm.opcode_modifier.jumpintersegment)
6211 output_interseg_jump ();
6212 else
6213 {
6214 /* Output normal instructions here. */
6215 char *p;
6216 unsigned char *q;
6217 unsigned int j;
6218 unsigned int prefix;
6219
6220 /* Since the VEX prefix contains the implicit prefix, we don't
6221 need the explicit prefix. */
6222 if (!i.tm.opcode_modifier.vex)
6223 {
6224 switch (i.tm.opcode_length)
6225 {
6226 case 3:
6227 if (i.tm.base_opcode & 0xff000000)
6228 {
6229 prefix = (i.tm.base_opcode >> 24) & 0xff;
6230 goto check_prefix;
6231 }
6232 break;
6233 case 2:
6234 if ((i.tm.base_opcode & 0xff0000) != 0)
6235 {
6236 prefix = (i.tm.base_opcode >> 16) & 0xff;
6237 if (i.tm.cpu_flags.bitfield.cpupadlock)
6238 {
6239 check_prefix:
6240 if (prefix != REPE_PREFIX_OPCODE
6241 || (i.prefix[REP_PREFIX]
6242 != REPE_PREFIX_OPCODE))
6243 add_prefix (prefix);
6244 }
6245 else
6246 add_prefix (prefix);
6247 }
6248 break;
6249 case 1:
6250 break;
6251 default:
6252 abort ();
6253 }
6254
6255 /* The prefix bytes. */
6256 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
6257 if (*q)
6258 FRAG_APPEND_1_CHAR (*q);
6259 }
6260
6261 if (i.tm.opcode_modifier.vex)
6262 {
6263 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
6264 if (*q)
6265 switch (j)
6266 {
6267 case REX_PREFIX:
6268 /* REX byte is encoded in VEX prefix. */
6269 break;
6270 case SEG_PREFIX:
6271 case ADDR_PREFIX:
6272 FRAG_APPEND_1_CHAR (*q);
6273 break;
6274 default:
6275 /* There should be no other prefixes for instructions
6276 with VEX prefix. */
6277 abort ();
6278 }
6279
6280 /* Now the VEX prefix. */
6281 p = frag_more (i.vex.length);
6282 for (j = 0; j < i.vex.length; j++)
6283 p[j] = i.vex.bytes[j];
6284 }
6285
6286 /* Now the opcode; be careful about word order here! */
6287 if (i.tm.opcode_length == 1)
6288 {
6289 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
6290 }
6291 else
6292 {
6293 switch (i.tm.opcode_length)
6294 {
6295 case 3:
6296 p = frag_more (3);
6297 *p++ = (i.tm.base_opcode >> 16) & 0xff;
6298 break;
6299 case 2:
6300 p = frag_more (2);
6301 break;
6302 default:
6303 abort ();
6304 break;
6305 }
6306
6307 /* Put out high byte first: can't use md_number_to_chars! */
6308 *p++ = (i.tm.base_opcode >> 8) & 0xff;
6309 *p = i.tm.base_opcode & 0xff;
6310 }
6311
6312 /* Now the modrm byte and sib byte (if present). */
6313 if (i.tm.opcode_modifier.modrm)
6314 {
6315 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
6316 | i.rm.reg << 3
6317 | i.rm.mode << 6));
6318 /* If i.rm.regmem == ESP (4)
6319 && i.rm.mode != (Register mode)
6320 && not 16 bit
6321 ==> need second modrm byte. */
6322 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
6323 && i.rm.mode != 3
6324 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
6325 FRAG_APPEND_1_CHAR ((i.sib.base << 0
6326 | i.sib.index << 3
6327 | i.sib.scale << 6));
6328 }
6329
6330 if (i.disp_operands)
6331 output_disp (insn_start_frag, insn_start_off);
6332
6333 if (i.imm_operands)
6334 output_imm (insn_start_frag, insn_start_off);
6335 }
6336
6337 #ifdef DEBUG386
6338 if (flag_debug)
6339 {
6340 pi ("" /*line*/, &i);
6341 }
6342 #endif /* DEBUG386 */
6343 }
6344
6345 /* Return the size of the displacement operand N. */
6346
6347 static int
6348 disp_size (unsigned int n)
6349 {
6350 int size = 4;
6351 if (i.types[n].bitfield.disp64)
6352 size = 8;
6353 else if (i.types[n].bitfield.disp8)
6354 size = 1;
6355 else if (i.types[n].bitfield.disp16)
6356 size = 2;
6357 return size;
6358 }
6359
6360 /* Return the size of the immediate operand N. */
6361
6362 static int
6363 imm_size (unsigned int n)
6364 {
6365 int size = 4;
6366 if (i.types[n].bitfield.imm64)
6367 size = 8;
6368 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
6369 size = 1;
6370 else if (i.types[n].bitfield.imm16)
6371 size = 2;
6372 return size;
6373 }
6374
6375 static void
6376 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
6377 {
6378 char *p;
6379 unsigned int n;
6380
6381 for (n = 0; n < i.operands; n++)
6382 {
6383 if (operand_type_check (i.types[n], disp))
6384 {
6385 if (i.op[n].disps->X_op == O_constant)
6386 {
6387 int size = disp_size (n);
6388 offsetT val;
6389
6390 val = offset_in_range (i.op[n].disps->X_add_number,
6391 size);
6392 p = frag_more (size);
6393 md_number_to_chars (p, val, size);
6394 }
6395 else
6396 {
6397 enum bfd_reloc_code_real reloc_type;
6398 int size = disp_size (n);
6399 int sign = i.types[n].bitfield.disp32s;
6400 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
6401
6402 /* We can't have 8 bit displacement here. */
6403 gas_assert (!i.types[n].bitfield.disp8);
6404
6405 /* The PC relative address is computed relative
6406 to the instruction boundary, so in case immediate
6407 fields follows, we need to adjust the value. */
6408 if (pcrel && i.imm_operands)
6409 {
6410 unsigned int n1;
6411 int sz = 0;
6412
6413 for (n1 = 0; n1 < i.operands; n1++)
6414 if (operand_type_check (i.types[n1], imm))
6415 {
6416 /* Only one immediate is allowed for PC
6417 relative address. */
6418 gas_assert (sz == 0);
6419 sz = imm_size (n1);
6420 i.op[n].disps->X_add_number -= sz;
6421 }
6422 /* We should find the immediate. */
6423 gas_assert (sz != 0);
6424 }
6425
6426 p = frag_more (size);
6427 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
6428 if (GOT_symbol
6429 && GOT_symbol == i.op[n].disps->X_add_symbol
6430 && (((reloc_type == BFD_RELOC_32
6431 || reloc_type == BFD_RELOC_X86_64_32S
6432 || (reloc_type == BFD_RELOC_64
6433 && object_64bit))
6434 && (i.op[n].disps->X_op == O_symbol
6435 || (i.op[n].disps->X_op == O_add
6436 && ((symbol_get_value_expression
6437 (i.op[n].disps->X_op_symbol)->X_op)
6438 == O_subtract))))
6439 || reloc_type == BFD_RELOC_32_PCREL))
6440 {
6441 offsetT add;
6442
6443 if (insn_start_frag == frag_now)
6444 add = (p - frag_now->fr_literal) - insn_start_off;
6445 else
6446 {
6447 fragS *fr;
6448
6449 add = insn_start_frag->fr_fix - insn_start_off;
6450 for (fr = insn_start_frag->fr_next;
6451 fr && fr != frag_now; fr = fr->fr_next)
6452 add += fr->fr_fix;
6453 add += p - frag_now->fr_literal;
6454 }
6455
6456 if (!object_64bit)
6457 {
6458 reloc_type = BFD_RELOC_386_GOTPC;
6459 i.op[n].imms->X_add_number += add;
6460 }
6461 else if (reloc_type == BFD_RELOC_64)
6462 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6463 else
6464 /* Don't do the adjustment for x86-64, as there
6465 the pcrel addressing is relative to the _next_
6466 insn, and that is taken care of in other code. */
6467 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6468 }
6469 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6470 i.op[n].disps, pcrel, reloc_type);
6471 }
6472 }
6473 }
6474 }
6475
6476 static void
6477 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
6478 {
6479 char *p;
6480 unsigned int n;
6481
6482 for (n = 0; n < i.operands; n++)
6483 {
6484 if (operand_type_check (i.types[n], imm))
6485 {
6486 if (i.op[n].imms->X_op == O_constant)
6487 {
6488 int size = imm_size (n);
6489 offsetT val;
6490
6491 val = offset_in_range (i.op[n].imms->X_add_number,
6492 size);
6493 p = frag_more (size);
6494 md_number_to_chars (p, val, size);
6495 }
6496 else
6497 {
6498 /* Not absolute_section.
6499 Need a 32-bit fixup (don't support 8bit
6500 non-absolute imms). Try to support other
6501 sizes ... */
6502 enum bfd_reloc_code_real reloc_type;
6503 int size = imm_size (n);
6504 int sign;
6505
6506 if (i.types[n].bitfield.imm32s
6507 && (i.suffix == QWORD_MNEM_SUFFIX
6508 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
6509 sign = 1;
6510 else
6511 sign = 0;
6512
6513 p = frag_more (size);
6514 reloc_type = reloc (size, 0, sign, i.reloc[n]);
6515
6516 /* This is tough to explain. We end up with this one if we
6517 * have operands that look like
6518 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
6519 * obtain the absolute address of the GOT, and it is strongly
6520 * preferable from a performance point of view to avoid using
6521 * a runtime relocation for this. The actual sequence of
6522 * instructions often look something like:
6523 *
6524 * call .L66
6525 * .L66:
6526 * popl %ebx
6527 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
6528 *
6529 * The call and pop essentially return the absolute address
6530 * of the label .L66 and store it in %ebx. The linker itself
6531 * will ultimately change the first operand of the addl so
6532 * that %ebx points to the GOT, but to keep things simple, the
6533 * .o file must have this operand set so that it generates not
6534 * the absolute address of .L66, but the absolute address of
6535 * itself. This allows the linker itself simply treat a GOTPC
6536 * relocation as asking for a pcrel offset to the GOT to be
6537 * added in, and the addend of the relocation is stored in the
6538 * operand field for the instruction itself.
6539 *
6540 * Our job here is to fix the operand so that it would add
6541 * the correct offset so that %ebx would point to itself. The
6542 * thing that is tricky is that .-.L66 will point to the
6543 * beginning of the instruction, so we need to further modify
6544 * the operand so that it will point to itself. There are
6545 * other cases where you have something like:
6546 *
6547 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
6548 *
6549 * and here no correction would be required. Internally in
6550 * the assembler we treat operands of this form as not being
6551 * pcrel since the '.' is explicitly mentioned, and I wonder
6552 * whether it would simplify matters to do it this way. Who
6553 * knows. In earlier versions of the PIC patches, the
6554 * pcrel_adjust field was used to store the correction, but
6555 * since the expression is not pcrel, I felt it would be
6556 * confusing to do it this way. */
6557
6558 if ((reloc_type == BFD_RELOC_32
6559 || reloc_type == BFD_RELOC_X86_64_32S
6560 || reloc_type == BFD_RELOC_64)
6561 && GOT_symbol
6562 && GOT_symbol == i.op[n].imms->X_add_symbol
6563 && (i.op[n].imms->X_op == O_symbol
6564 || (i.op[n].imms->X_op == O_add
6565 && ((symbol_get_value_expression
6566 (i.op[n].imms->X_op_symbol)->X_op)
6567 == O_subtract))))
6568 {
6569 offsetT add;
6570
6571 if (insn_start_frag == frag_now)
6572 add = (p - frag_now->fr_literal) - insn_start_off;
6573 else
6574 {
6575 fragS *fr;
6576
6577 add = insn_start_frag->fr_fix - insn_start_off;
6578 for (fr = insn_start_frag->fr_next;
6579 fr && fr != frag_now; fr = fr->fr_next)
6580 add += fr->fr_fix;
6581 add += p - frag_now->fr_literal;
6582 }
6583
6584 if (!object_64bit)
6585 reloc_type = BFD_RELOC_386_GOTPC;
6586 else if (size == 4)
6587 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6588 else if (size == 8)
6589 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6590 i.op[n].imms->X_add_number += add;
6591 }
6592 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6593 i.op[n].imms, 0, reloc_type);
6594 }
6595 }
6596 }
6597 }
6598 \f
6599 /* x86_cons_fix_new is called via the expression parsing code when a
6600 reloc is needed. We use this hook to get the correct .got reloc. */
6601 static enum bfd_reloc_code_real got_reloc = NO_RELOC;
6602 static int cons_sign = -1;
6603
6604 void
6605 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
6606 expressionS *exp)
6607 {
6608 enum bfd_reloc_code_real r = reloc (len, 0, cons_sign, got_reloc);
6609
6610 got_reloc = NO_RELOC;
6611
6612 #ifdef TE_PE
6613 if (exp->X_op == O_secrel)
6614 {
6615 exp->X_op = O_symbol;
6616 r = BFD_RELOC_32_SECREL;
6617 }
6618 #endif
6619
6620 fix_new_exp (frag, off, len, exp, 0, r);
6621 }
6622
6623 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
6624 || defined (LEX_AT)
6625 # define lex_got(reloc, adjust, types) NULL
6626 #else
6627 /* Parse operands of the form
6628 <symbol>@GOTOFF+<nnn>
6629 and similar .plt or .got references.
6630
6631 If we find one, set up the correct relocation in RELOC and copy the
6632 input string, minus the `@GOTOFF' into a malloc'd buffer for
6633 parsing by the calling routine. Return this buffer, and if ADJUST
6634 is non-null set it to the length of the string we removed from the
6635 input line. Otherwise return NULL. */
6636 static char *
6637 lex_got (enum bfd_reloc_code_real *rel,
6638 int *adjust,
6639 i386_operand_type *types)
6640 {
6641 /* Some of the relocations depend on the size of what field is to
6642 be relocated. But in our callers i386_immediate and i386_displacement
6643 we don't yet know the operand size (this will be set by insn
6644 matching). Hence we record the word32 relocation here,
6645 and adjust the reloc according to the real size in reloc(). */
6646 static const struct {
6647 const char *str;
6648 int len;
6649 const enum bfd_reloc_code_real rel[2];
6650 const i386_operand_type types64;
6651 } gotrel[] = {
6652 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
6653 BFD_RELOC_X86_64_PLTOFF64 },
6654 OPERAND_TYPE_IMM64 },
6655 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
6656 BFD_RELOC_X86_64_PLT32 },
6657 OPERAND_TYPE_IMM32_32S_DISP32 },
6658 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
6659 BFD_RELOC_X86_64_GOTPLT64 },
6660 OPERAND_TYPE_IMM64_DISP64 },
6661 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
6662 BFD_RELOC_X86_64_GOTOFF64 },
6663 OPERAND_TYPE_IMM64_DISP64 },
6664 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
6665 BFD_RELOC_X86_64_GOTPCREL },
6666 OPERAND_TYPE_IMM32_32S_DISP32 },
6667 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
6668 BFD_RELOC_X86_64_TLSGD },
6669 OPERAND_TYPE_IMM32_32S_DISP32 },
6670 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
6671 _dummy_first_bfd_reloc_code_real },
6672 OPERAND_TYPE_NONE },
6673 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
6674 BFD_RELOC_X86_64_TLSLD },
6675 OPERAND_TYPE_IMM32_32S_DISP32 },
6676 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
6677 BFD_RELOC_X86_64_GOTTPOFF },
6678 OPERAND_TYPE_IMM32_32S_DISP32 },
6679 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
6680 BFD_RELOC_X86_64_TPOFF32 },
6681 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6682 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
6683 _dummy_first_bfd_reloc_code_real },
6684 OPERAND_TYPE_NONE },
6685 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
6686 BFD_RELOC_X86_64_DTPOFF32 },
6687 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6688 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
6689 _dummy_first_bfd_reloc_code_real },
6690 OPERAND_TYPE_NONE },
6691 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
6692 _dummy_first_bfd_reloc_code_real },
6693 OPERAND_TYPE_NONE },
6694 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
6695 BFD_RELOC_X86_64_GOT32 },
6696 OPERAND_TYPE_IMM32_32S_64_DISP32 },
6697 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
6698 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
6699 OPERAND_TYPE_IMM32_32S_DISP32 },
6700 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
6701 BFD_RELOC_X86_64_TLSDESC_CALL },
6702 OPERAND_TYPE_IMM32_32S_DISP32 },
6703 };
6704 char *cp;
6705 unsigned int j;
6706
6707 #if defined (OBJ_MAYBE_ELF)
6708 if (!IS_ELF)
6709 return NULL;
6710 #endif
6711
6712 for (cp = input_line_pointer; *cp != '@'; cp++)
6713 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6714 return NULL;
6715
6716 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6717 {
6718 int len = gotrel[j].len;
6719 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6720 {
6721 if (gotrel[j].rel[object_64bit] != 0)
6722 {
6723 int first, second;
6724 char *tmpbuf, *past_reloc;
6725
6726 *rel = gotrel[j].rel[object_64bit];
6727 if (adjust)
6728 *adjust = len;
6729
6730 if (types)
6731 {
6732 if (flag_code != CODE_64BIT)
6733 {
6734 types->bitfield.imm32 = 1;
6735 types->bitfield.disp32 = 1;
6736 }
6737 else
6738 *types = gotrel[j].types64;
6739 }
6740
6741 if (GOT_symbol == NULL)
6742 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
6743
6744 /* The length of the first part of our input line. */
6745 first = cp - input_line_pointer;
6746
6747 /* The second part goes from after the reloc token until
6748 (and including) an end_of_line char or comma. */
6749 past_reloc = cp + 1 + len;
6750 cp = past_reloc;
6751 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6752 ++cp;
6753 second = cp + 1 - past_reloc;
6754
6755 /* Allocate and copy string. The trailing NUL shouldn't
6756 be necessary, but be safe. */
6757 tmpbuf = (char *) xmalloc (first + second + 2);
6758 memcpy (tmpbuf, input_line_pointer, first);
6759 if (second != 0 && *past_reloc != ' ')
6760 /* Replace the relocation token with ' ', so that
6761 errors like foo@GOTOFF1 will be detected. */
6762 tmpbuf[first++] = ' ';
6763 memcpy (tmpbuf + first, past_reloc, second);
6764 tmpbuf[first + second] = '\0';
6765 return tmpbuf;
6766 }
6767
6768 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6769 gotrel[j].str, 1 << (5 + object_64bit));
6770 return NULL;
6771 }
6772 }
6773
6774 /* Might be a symbol version string. Don't as_bad here. */
6775 return NULL;
6776 }
6777 #endif
6778
6779 void
6780 x86_cons (expressionS *exp, int size)
6781 {
6782 intel_syntax = -intel_syntax;
6783
6784 exp->X_md = 0;
6785 if (size == 4 || (object_64bit && size == 8))
6786 {
6787 /* Handle @GOTOFF and the like in an expression. */
6788 char *save;
6789 char *gotfree_input_line;
6790 int adjust = 0;
6791
6792 save = input_line_pointer;
6793 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
6794 if (gotfree_input_line)
6795 input_line_pointer = gotfree_input_line;
6796
6797 expression (exp);
6798
6799 if (gotfree_input_line)
6800 {
6801 /* expression () has merrily parsed up to the end of line,
6802 or a comma - in the wrong buffer. Transfer how far
6803 input_line_pointer has moved to the right buffer. */
6804 input_line_pointer = (save
6805 + (input_line_pointer - gotfree_input_line)
6806 + adjust);
6807 free (gotfree_input_line);
6808 if (exp->X_op == O_constant
6809 || exp->X_op == O_absent
6810 || exp->X_op == O_illegal
6811 || exp->X_op == O_register
6812 || exp->X_op == O_big)
6813 {
6814 char c = *input_line_pointer;
6815 *input_line_pointer = 0;
6816 as_bad (_("missing or invalid expression `%s'"), save);
6817 *input_line_pointer = c;
6818 }
6819 }
6820 }
6821 else
6822 expression (exp);
6823
6824 intel_syntax = -intel_syntax;
6825
6826 if (intel_syntax)
6827 i386_intel_simplify (exp);
6828 }
6829
6830 static void
6831 signed_cons (int size)
6832 {
6833 if (flag_code == CODE_64BIT)
6834 cons_sign = 1;
6835 cons (size);
6836 cons_sign = -1;
6837 }
6838
6839 #ifdef TE_PE
6840 static void
6841 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
6842 {
6843 expressionS exp;
6844
6845 do
6846 {
6847 expression (&exp);
6848 if (exp.X_op == O_symbol)
6849 exp.X_op = O_secrel;
6850
6851 emit_expr (&exp, 4);
6852 }
6853 while (*input_line_pointer++ == ',');
6854
6855 input_line_pointer--;
6856 demand_empty_rest_of_line ();
6857 }
6858 #endif
6859
6860 static int
6861 i386_immediate (char *imm_start)
6862 {
6863 char *save_input_line_pointer;
6864 char *gotfree_input_line;
6865 segT exp_seg = 0;
6866 expressionS *exp;
6867 i386_operand_type types;
6868
6869 operand_type_set (&types, ~0);
6870
6871 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
6872 {
6873 as_bad (_("at most %d immediate operands are allowed"),
6874 MAX_IMMEDIATE_OPERANDS);
6875 return 0;
6876 }
6877
6878 exp = &im_expressions[i.imm_operands++];
6879 i.op[this_operand].imms = exp;
6880
6881 if (is_space_char (*imm_start))
6882 ++imm_start;
6883
6884 save_input_line_pointer = input_line_pointer;
6885 input_line_pointer = imm_start;
6886
6887 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
6888 if (gotfree_input_line)
6889 input_line_pointer = gotfree_input_line;
6890
6891 exp_seg = expression (exp);
6892
6893 SKIP_WHITESPACE ();
6894 if (*input_line_pointer)
6895 as_bad (_("junk `%s' after expression"), input_line_pointer);
6896
6897 input_line_pointer = save_input_line_pointer;
6898 if (gotfree_input_line)
6899 {
6900 free (gotfree_input_line);
6901
6902 if (exp->X_op == O_constant || exp->X_op == O_register)
6903 exp->X_op = O_illegal;
6904 }
6905
6906 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
6907 }
6908
6909 static int
6910 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
6911 i386_operand_type types, const char *imm_start)
6912 {
6913 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
6914 {
6915 if (imm_start)
6916 as_bad (_("missing or invalid immediate expression `%s'"),
6917 imm_start);
6918 return 0;
6919 }
6920 else if (exp->X_op == O_constant)
6921 {
6922 /* Size it properly later. */
6923 i.types[this_operand].bitfield.imm64 = 1;
6924 /* If not 64bit, sign extend val. */
6925 if (flag_code != CODE_64BIT
6926 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
6927 exp->X_add_number
6928 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
6929 }
6930 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
6931 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
6932 && exp_seg != absolute_section
6933 && exp_seg != text_section
6934 && exp_seg != data_section
6935 && exp_seg != bss_section
6936 && exp_seg != undefined_section
6937 && !bfd_is_com_section (exp_seg))
6938 {
6939 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
6940 return 0;
6941 }
6942 #endif
6943 else if (!intel_syntax && exp->X_op == O_register)
6944 {
6945 if (imm_start)
6946 as_bad (_("illegal immediate register operand %s"), imm_start);
6947 return 0;
6948 }
6949 else
6950 {
6951 /* This is an address. The size of the address will be
6952 determined later, depending on destination register,
6953 suffix, or the default for the section. */
6954 i.types[this_operand].bitfield.imm8 = 1;
6955 i.types[this_operand].bitfield.imm16 = 1;
6956 i.types[this_operand].bitfield.imm32 = 1;
6957 i.types[this_operand].bitfield.imm32s = 1;
6958 i.types[this_operand].bitfield.imm64 = 1;
6959 i.types[this_operand] = operand_type_and (i.types[this_operand],
6960 types);
6961 }
6962
6963 return 1;
6964 }
6965
6966 static char *
6967 i386_scale (char *scale)
6968 {
6969 offsetT val;
6970 char *save = input_line_pointer;
6971
6972 input_line_pointer = scale;
6973 val = get_absolute_expression ();
6974
6975 switch (val)
6976 {
6977 case 1:
6978 i.log2_scale_factor = 0;
6979 break;
6980 case 2:
6981 i.log2_scale_factor = 1;
6982 break;
6983 case 4:
6984 i.log2_scale_factor = 2;
6985 break;
6986 case 8:
6987 i.log2_scale_factor = 3;
6988 break;
6989 default:
6990 {
6991 char sep = *input_line_pointer;
6992
6993 *input_line_pointer = '\0';
6994 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
6995 scale);
6996 *input_line_pointer = sep;
6997 input_line_pointer = save;
6998 return NULL;
6999 }
7000 }
7001 if (i.log2_scale_factor != 0 && i.index_reg == 0)
7002 {
7003 as_warn (_("scale factor of %d without an index register"),
7004 1 << i.log2_scale_factor);
7005 i.log2_scale_factor = 0;
7006 }
7007 scale = input_line_pointer;
7008 input_line_pointer = save;
7009 return scale;
7010 }
7011
7012 static int
7013 i386_displacement (char *disp_start, char *disp_end)
7014 {
7015 expressionS *exp;
7016 segT exp_seg = 0;
7017 char *save_input_line_pointer;
7018 char *gotfree_input_line;
7019 int override;
7020 i386_operand_type bigdisp, types = anydisp;
7021 int ret;
7022
7023 if (i.disp_operands == MAX_MEMORY_OPERANDS)
7024 {
7025 as_bad (_("at most %d displacement operands are allowed"),
7026 MAX_MEMORY_OPERANDS);
7027 return 0;
7028 }
7029
7030 operand_type_set (&bigdisp, 0);
7031 if ((i.types[this_operand].bitfield.jumpabsolute)
7032 || (!current_templates->start->opcode_modifier.jump
7033 && !current_templates->start->opcode_modifier.jumpdword))
7034 {
7035 bigdisp.bitfield.disp32 = 1;
7036 override = (i.prefix[ADDR_PREFIX] != 0);
7037 if (flag_code == CODE_64BIT)
7038 {
7039 if (!override)
7040 {
7041 bigdisp.bitfield.disp32s = 1;
7042 bigdisp.bitfield.disp64 = 1;
7043 }
7044 }
7045 else if ((flag_code == CODE_16BIT) ^ override)
7046 {
7047 bigdisp.bitfield.disp32 = 0;
7048 bigdisp.bitfield.disp16 = 1;
7049 }
7050 }
7051 else
7052 {
7053 /* For PC-relative branches, the width of the displacement
7054 is dependent upon data size, not address size. */
7055 override = (i.prefix[DATA_PREFIX] != 0);
7056 if (flag_code == CODE_64BIT)
7057 {
7058 if (override || i.suffix == WORD_MNEM_SUFFIX)
7059 bigdisp.bitfield.disp16 = 1;
7060 else
7061 {
7062 bigdisp.bitfield.disp32 = 1;
7063 bigdisp.bitfield.disp32s = 1;
7064 }
7065 }
7066 else
7067 {
7068 if (!override)
7069 override = (i.suffix == (flag_code != CODE_16BIT
7070 ? WORD_MNEM_SUFFIX
7071 : LONG_MNEM_SUFFIX));
7072 bigdisp.bitfield.disp32 = 1;
7073 if ((flag_code == CODE_16BIT) ^ override)
7074 {
7075 bigdisp.bitfield.disp32 = 0;
7076 bigdisp.bitfield.disp16 = 1;
7077 }
7078 }
7079 }
7080 i.types[this_operand] = operand_type_or (i.types[this_operand],
7081 bigdisp);
7082
7083 exp = &disp_expressions[i.disp_operands];
7084 i.op[this_operand].disps = exp;
7085 i.disp_operands++;
7086 save_input_line_pointer = input_line_pointer;
7087 input_line_pointer = disp_start;
7088 END_STRING_AND_SAVE (disp_end);
7089
7090 #ifndef GCC_ASM_O_HACK
7091 #define GCC_ASM_O_HACK 0
7092 #endif
7093 #if GCC_ASM_O_HACK
7094 END_STRING_AND_SAVE (disp_end + 1);
7095 if (i.types[this_operand].bitfield.baseIndex
7096 && displacement_string_end[-1] == '+')
7097 {
7098 /* This hack is to avoid a warning when using the "o"
7099 constraint within gcc asm statements.
7100 For instance:
7101
7102 #define _set_tssldt_desc(n,addr,limit,type) \
7103 __asm__ __volatile__ ( \
7104 "movw %w2,%0\n\t" \
7105 "movw %w1,2+%0\n\t" \
7106 "rorl $16,%1\n\t" \
7107 "movb %b1,4+%0\n\t" \
7108 "movb %4,5+%0\n\t" \
7109 "movb $0,6+%0\n\t" \
7110 "movb %h1,7+%0\n\t" \
7111 "rorl $16,%1" \
7112 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
7113
7114 This works great except that the output assembler ends
7115 up looking a bit weird if it turns out that there is
7116 no offset. You end up producing code that looks like:
7117
7118 #APP
7119 movw $235,(%eax)
7120 movw %dx,2+(%eax)
7121 rorl $16,%edx
7122 movb %dl,4+(%eax)
7123 movb $137,5+(%eax)
7124 movb $0,6+(%eax)
7125 movb %dh,7+(%eax)
7126 rorl $16,%edx
7127 #NO_APP
7128
7129 So here we provide the missing zero. */
7130
7131 *displacement_string_end = '0';
7132 }
7133 #endif
7134 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
7135 if (gotfree_input_line)
7136 input_line_pointer = gotfree_input_line;
7137
7138 exp_seg = expression (exp);
7139
7140 SKIP_WHITESPACE ();
7141 if (*input_line_pointer)
7142 as_bad (_("junk `%s' after expression"), input_line_pointer);
7143 #if GCC_ASM_O_HACK
7144 RESTORE_END_STRING (disp_end + 1);
7145 #endif
7146 input_line_pointer = save_input_line_pointer;
7147 if (gotfree_input_line)
7148 {
7149 free (gotfree_input_line);
7150
7151 if (exp->X_op == O_constant || exp->X_op == O_register)
7152 exp->X_op = O_illegal;
7153 }
7154
7155 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
7156
7157 RESTORE_END_STRING (disp_end);
7158
7159 return ret;
7160 }
7161
7162 static int
7163 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
7164 i386_operand_type types, const char *disp_start)
7165 {
7166 i386_operand_type bigdisp;
7167 int ret = 1;
7168
7169 /* We do this to make sure that the section symbol is in
7170 the symbol table. We will ultimately change the relocation
7171 to be relative to the beginning of the section. */
7172 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
7173 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
7174 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7175 {
7176 if (exp->X_op != O_symbol)
7177 goto inv_disp;
7178
7179 if (S_IS_LOCAL (exp->X_add_symbol)
7180 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
7181 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
7182 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
7183 exp->X_op = O_subtract;
7184 exp->X_op_symbol = GOT_symbol;
7185 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
7186 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
7187 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7188 i.reloc[this_operand] = BFD_RELOC_64;
7189 else
7190 i.reloc[this_operand] = BFD_RELOC_32;
7191 }
7192
7193 else if (exp->X_op == O_absent
7194 || exp->X_op == O_illegal
7195 || exp->X_op == O_big)
7196 {
7197 inv_disp:
7198 as_bad (_("missing or invalid displacement expression `%s'"),
7199 disp_start);
7200 ret = 0;
7201 }
7202
7203 else if (flag_code == CODE_64BIT
7204 && !i.prefix[ADDR_PREFIX]
7205 && exp->X_op == O_constant)
7206 {
7207 /* Since displacement is signed extended to 64bit, don't allow
7208 disp32 and turn off disp32s if they are out of range. */
7209 i.types[this_operand].bitfield.disp32 = 0;
7210 if (!fits_in_signed_long (exp->X_add_number))
7211 {
7212 i.types[this_operand].bitfield.disp32s = 0;
7213 if (i.types[this_operand].bitfield.baseindex)
7214 {
7215 as_bad (_("0x%lx out range of signed 32bit displacement"),
7216 (long) exp->X_add_number);
7217 ret = 0;
7218 }
7219 }
7220 }
7221
7222 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7223 else if (exp->X_op != O_constant
7224 && OUTPUT_FLAVOR == bfd_target_aout_flavour
7225 && exp_seg != absolute_section
7226 && exp_seg != text_section
7227 && exp_seg != data_section
7228 && exp_seg != bss_section
7229 && exp_seg != undefined_section
7230 && !bfd_is_com_section (exp_seg))
7231 {
7232 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7233 ret = 0;
7234 }
7235 #endif
7236
7237 /* Check if this is a displacement only operand. */
7238 bigdisp = i.types[this_operand];
7239 bigdisp.bitfield.disp8 = 0;
7240 bigdisp.bitfield.disp16 = 0;
7241 bigdisp.bitfield.disp32 = 0;
7242 bigdisp.bitfield.disp32s = 0;
7243 bigdisp.bitfield.disp64 = 0;
7244 if (operand_type_all_zero (&bigdisp))
7245 i.types[this_operand] = operand_type_and (i.types[this_operand],
7246 types);
7247
7248 return ret;
7249 }
7250
7251 /* Make sure the memory operand we've been dealt is valid.
7252 Return 1 on success, 0 on a failure. */
7253
7254 static int
7255 i386_index_check (const char *operand_string)
7256 {
7257 int ok;
7258 const char *kind = "base/index";
7259 #if INFER_ADDR_PREFIX
7260 int fudged = 0;
7261
7262 tryprefix:
7263 #endif
7264 ok = 1;
7265 if (current_templates->start->opcode_modifier.isstring
7266 && !current_templates->start->opcode_modifier.immext
7267 && (current_templates->end[-1].opcode_modifier.isstring
7268 || i.mem_operands))
7269 {
7270 /* Memory operands of string insns are special in that they only allow
7271 a single register (rDI, rSI, or rBX) as their memory address. */
7272 unsigned int expected;
7273
7274 kind = "string address";
7275
7276 if (current_templates->start->opcode_modifier.w)
7277 {
7278 i386_operand_type type = current_templates->end[-1].operand_types[0];
7279
7280 if (!type.bitfield.baseindex
7281 || ((!i.mem_operands != !intel_syntax)
7282 && current_templates->end[-1].operand_types[1]
7283 .bitfield.baseindex))
7284 type = current_templates->end[-1].operand_types[1];
7285 expected = type.bitfield.esseg ? 7 /* rDI */ : 6 /* rSI */;
7286 }
7287 else
7288 expected = 3 /* rBX */;
7289
7290 if (!i.base_reg || i.index_reg
7291 || operand_type_check (i.types[this_operand], disp))
7292 ok = -1;
7293 else if (!(flag_code == CODE_64BIT
7294 ? i.prefix[ADDR_PREFIX]
7295 ? i.base_reg->reg_type.bitfield.reg32
7296 : i.base_reg->reg_type.bitfield.reg64
7297 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7298 ? i.base_reg->reg_type.bitfield.reg32
7299 : i.base_reg->reg_type.bitfield.reg16))
7300 ok = 0;
7301 else if (i.base_reg->reg_num != expected)
7302 ok = -1;
7303
7304 if (ok < 0)
7305 {
7306 unsigned int j;
7307
7308 for (j = 0; j < i386_regtab_size; ++j)
7309 if ((flag_code == CODE_64BIT
7310 ? i.prefix[ADDR_PREFIX]
7311 ? i386_regtab[j].reg_type.bitfield.reg32
7312 : i386_regtab[j].reg_type.bitfield.reg64
7313 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7314 ? i386_regtab[j].reg_type.bitfield.reg32
7315 : i386_regtab[j].reg_type.bitfield.reg16)
7316 && i386_regtab[j].reg_num == expected)
7317 break;
7318 gas_assert (j < i386_regtab_size);
7319 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
7320 operand_string,
7321 intel_syntax ? '[' : '(',
7322 register_prefix,
7323 i386_regtab[j].reg_name,
7324 intel_syntax ? ']' : ')');
7325 ok = 1;
7326 }
7327 }
7328 else if (flag_code == CODE_64BIT)
7329 {
7330 if ((i.base_reg
7331 && ((i.prefix[ADDR_PREFIX] == 0
7332 && !i.base_reg->reg_type.bitfield.reg64)
7333 || (i.prefix[ADDR_PREFIX]
7334 && !i.base_reg->reg_type.bitfield.reg32))
7335 && (i.index_reg
7336 || i.base_reg->reg_num !=
7337 (i.prefix[ADDR_PREFIX] == 0 ? RegRip : RegEip)))
7338 || (i.index_reg
7339 && !(i.index_reg->reg_type.bitfield.regxmm
7340 || i.index_reg->reg_type.bitfield.regymm)
7341 && (!i.index_reg->reg_type.bitfield.baseindex
7342 || (i.prefix[ADDR_PREFIX] == 0
7343 && i.index_reg->reg_num != RegRiz
7344 && !i.index_reg->reg_type.bitfield.reg64
7345 )
7346 || (i.prefix[ADDR_PREFIX]
7347 && i.index_reg->reg_num != RegEiz
7348 && !i.index_reg->reg_type.bitfield.reg32))))
7349 ok = 0;
7350 }
7351 else
7352 {
7353 if ((flag_code == CODE_16BIT) ^ (i.prefix[ADDR_PREFIX] != 0))
7354 {
7355 /* 16bit checks. */
7356 if ((i.base_reg
7357 && (!i.base_reg->reg_type.bitfield.reg16
7358 || !i.base_reg->reg_type.bitfield.baseindex))
7359 || (i.index_reg
7360 && (!i.index_reg->reg_type.bitfield.reg16
7361 || !i.index_reg->reg_type.bitfield.baseindex
7362 || !(i.base_reg
7363 && i.base_reg->reg_num < 6
7364 && i.index_reg->reg_num >= 6
7365 && i.log2_scale_factor == 0))))
7366 ok = 0;
7367 }
7368 else
7369 {
7370 /* 32bit checks. */
7371 if ((i.base_reg
7372 && !i.base_reg->reg_type.bitfield.reg32)
7373 || (i.index_reg
7374 && !i.index_reg->reg_type.bitfield.regxmm
7375 && !i.index_reg->reg_type.bitfield.regymm
7376 && ((!i.index_reg->reg_type.bitfield.reg32
7377 && i.index_reg->reg_num != RegEiz)
7378 || !i.index_reg->reg_type.bitfield.baseindex)))
7379 ok = 0;
7380 }
7381 }
7382 if (!ok)
7383 {
7384 #if INFER_ADDR_PREFIX
7385 if (!i.mem_operands && !i.prefix[ADDR_PREFIX])
7386 {
7387 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
7388 i.prefixes += 1;
7389 /* Change the size of any displacement too. At most one of
7390 Disp16 or Disp32 is set.
7391 FIXME. There doesn't seem to be any real need for separate
7392 Disp16 and Disp32 flags. The same goes for Imm16 and Imm32.
7393 Removing them would probably clean up the code quite a lot. */
7394 if (flag_code != CODE_64BIT
7395 && (i.types[this_operand].bitfield.disp16
7396 || i.types[this_operand].bitfield.disp32))
7397 i.types[this_operand]
7398 = operand_type_xor (i.types[this_operand], disp16_32);
7399 fudged = 1;
7400 goto tryprefix;
7401 }
7402 if (fudged)
7403 as_bad (_("`%s' is not a valid %s expression"),
7404 operand_string,
7405 kind);
7406 else
7407 #endif
7408 as_bad (_("`%s' is not a valid %s-bit %s expression"),
7409 operand_string,
7410 flag_code_names[i.prefix[ADDR_PREFIX]
7411 ? flag_code == CODE_32BIT
7412 ? CODE_16BIT
7413 : CODE_32BIT
7414 : flag_code],
7415 kind);
7416 }
7417 return ok;
7418 }
7419
7420 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
7421 on error. */
7422
7423 static int
7424 i386_att_operand (char *operand_string)
7425 {
7426 const reg_entry *r;
7427 char *end_op;
7428 char *op_string = operand_string;
7429
7430 if (is_space_char (*op_string))
7431 ++op_string;
7432
7433 /* We check for an absolute prefix (differentiating,
7434 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
7435 if (*op_string == ABSOLUTE_PREFIX)
7436 {
7437 ++op_string;
7438 if (is_space_char (*op_string))
7439 ++op_string;
7440 i.types[this_operand].bitfield.jumpabsolute = 1;
7441 }
7442
7443 /* Check if operand is a register. */
7444 if ((r = parse_register (op_string, &end_op)) != NULL)
7445 {
7446 i386_operand_type temp;
7447
7448 /* Check for a segment override by searching for ':' after a
7449 segment register. */
7450 op_string = end_op;
7451 if (is_space_char (*op_string))
7452 ++op_string;
7453 if (*op_string == ':'
7454 && (r->reg_type.bitfield.sreg2
7455 || r->reg_type.bitfield.sreg3))
7456 {
7457 switch (r->reg_num)
7458 {
7459 case 0:
7460 i.seg[i.mem_operands] = &es;
7461 break;
7462 case 1:
7463 i.seg[i.mem_operands] = &cs;
7464 break;
7465 case 2:
7466 i.seg[i.mem_operands] = &ss;
7467 break;
7468 case 3:
7469 i.seg[i.mem_operands] = &ds;
7470 break;
7471 case 4:
7472 i.seg[i.mem_operands] = &fs;
7473 break;
7474 case 5:
7475 i.seg[i.mem_operands] = &gs;
7476 break;
7477 }
7478
7479 /* Skip the ':' and whitespace. */
7480 ++op_string;
7481 if (is_space_char (*op_string))
7482 ++op_string;
7483
7484 if (!is_digit_char (*op_string)
7485 && !is_identifier_char (*op_string)
7486 && *op_string != '('
7487 && *op_string != ABSOLUTE_PREFIX)
7488 {
7489 as_bad (_("bad memory operand `%s'"), op_string);
7490 return 0;
7491 }
7492 /* Handle case of %es:*foo. */
7493 if (*op_string == ABSOLUTE_PREFIX)
7494 {
7495 ++op_string;
7496 if (is_space_char (*op_string))
7497 ++op_string;
7498 i.types[this_operand].bitfield.jumpabsolute = 1;
7499 }
7500 goto do_memory_reference;
7501 }
7502 if (*op_string)
7503 {
7504 as_bad (_("junk `%s' after register"), op_string);
7505 return 0;
7506 }
7507 temp = r->reg_type;
7508 temp.bitfield.baseindex = 0;
7509 i.types[this_operand] = operand_type_or (i.types[this_operand],
7510 temp);
7511 i.types[this_operand].bitfield.unspecified = 0;
7512 i.op[this_operand].regs = r;
7513 i.reg_operands++;
7514 }
7515 else if (*op_string == REGISTER_PREFIX)
7516 {
7517 as_bad (_("bad register name `%s'"), op_string);
7518 return 0;
7519 }
7520 else if (*op_string == IMMEDIATE_PREFIX)
7521 {
7522 ++op_string;
7523 if (i.types[this_operand].bitfield.jumpabsolute)
7524 {
7525 as_bad (_("immediate operand illegal with absolute jump"));
7526 return 0;
7527 }
7528 if (!i386_immediate (op_string))
7529 return 0;
7530 }
7531 else if (is_digit_char (*op_string)
7532 || is_identifier_char (*op_string)
7533 || *op_string == '(')
7534 {
7535 /* This is a memory reference of some sort. */
7536 char *base_string;
7537
7538 /* Start and end of displacement string expression (if found). */
7539 char *displacement_string_start;
7540 char *displacement_string_end;
7541
7542 do_memory_reference:
7543 if ((i.mem_operands == 1
7544 && !current_templates->start->opcode_modifier.isstring)
7545 || i.mem_operands == 2)
7546 {
7547 as_bad (_("too many memory references for `%s'"),
7548 current_templates->start->name);
7549 return 0;
7550 }
7551
7552 /* Check for base index form. We detect the base index form by
7553 looking for an ')' at the end of the operand, searching
7554 for the '(' matching it, and finding a REGISTER_PREFIX or ','
7555 after the '('. */
7556 base_string = op_string + strlen (op_string);
7557
7558 --base_string;
7559 if (is_space_char (*base_string))
7560 --base_string;
7561
7562 /* If we only have a displacement, set-up for it to be parsed later. */
7563 displacement_string_start = op_string;
7564 displacement_string_end = base_string + 1;
7565
7566 if (*base_string == ')')
7567 {
7568 char *temp_string;
7569 unsigned int parens_balanced = 1;
7570 /* We've already checked that the number of left & right ()'s are
7571 equal, so this loop will not be infinite. */
7572 do
7573 {
7574 base_string--;
7575 if (*base_string == ')')
7576 parens_balanced++;
7577 if (*base_string == '(')
7578 parens_balanced--;
7579 }
7580 while (parens_balanced);
7581
7582 temp_string = base_string;
7583
7584 /* Skip past '(' and whitespace. */
7585 ++base_string;
7586 if (is_space_char (*base_string))
7587 ++base_string;
7588
7589 if (*base_string == ','
7590 || ((i.base_reg = parse_register (base_string, &end_op))
7591 != NULL))
7592 {
7593 displacement_string_end = temp_string;
7594
7595 i.types[this_operand].bitfield.baseindex = 1;
7596
7597 if (i.base_reg)
7598 {
7599 base_string = end_op;
7600 if (is_space_char (*base_string))
7601 ++base_string;
7602 }
7603
7604 /* There may be an index reg or scale factor here. */
7605 if (*base_string == ',')
7606 {
7607 ++base_string;
7608 if (is_space_char (*base_string))
7609 ++base_string;
7610
7611 if ((i.index_reg = parse_register (base_string, &end_op))
7612 != NULL)
7613 {
7614 base_string = end_op;
7615 if (is_space_char (*base_string))
7616 ++base_string;
7617 if (*base_string == ',')
7618 {
7619 ++base_string;
7620 if (is_space_char (*base_string))
7621 ++base_string;
7622 }
7623 else if (*base_string != ')')
7624 {
7625 as_bad (_("expecting `,' or `)' "
7626 "after index register in `%s'"),
7627 operand_string);
7628 return 0;
7629 }
7630 }
7631 else if (*base_string == REGISTER_PREFIX)
7632 {
7633 as_bad (_("bad register name `%s'"), base_string);
7634 return 0;
7635 }
7636
7637 /* Check for scale factor. */
7638 if (*base_string != ')')
7639 {
7640 char *end_scale = i386_scale (base_string);
7641
7642 if (!end_scale)
7643 return 0;
7644
7645 base_string = end_scale;
7646 if (is_space_char (*base_string))
7647 ++base_string;
7648 if (*base_string != ')')
7649 {
7650 as_bad (_("expecting `)' "
7651 "after scale factor in `%s'"),
7652 operand_string);
7653 return 0;
7654 }
7655 }
7656 else if (!i.index_reg)
7657 {
7658 as_bad (_("expecting index register or scale factor "
7659 "after `,'; got '%c'"),
7660 *base_string);
7661 return 0;
7662 }
7663 }
7664 else if (*base_string != ')')
7665 {
7666 as_bad (_("expecting `,' or `)' "
7667 "after base register in `%s'"),
7668 operand_string);
7669 return 0;
7670 }
7671 }
7672 else if (*base_string == REGISTER_PREFIX)
7673 {
7674 as_bad (_("bad register name `%s'"), base_string);
7675 return 0;
7676 }
7677 }
7678
7679 /* If there's an expression beginning the operand, parse it,
7680 assuming displacement_string_start and
7681 displacement_string_end are meaningful. */
7682 if (displacement_string_start != displacement_string_end)
7683 {
7684 if (!i386_displacement (displacement_string_start,
7685 displacement_string_end))
7686 return 0;
7687 }
7688
7689 /* Special case for (%dx) while doing input/output op. */
7690 if (i.base_reg
7691 && operand_type_equal (&i.base_reg->reg_type,
7692 &reg16_inoutportreg)
7693 && i.index_reg == 0
7694 && i.log2_scale_factor == 0
7695 && i.seg[i.mem_operands] == 0
7696 && !operand_type_check (i.types[this_operand], disp))
7697 {
7698 i.types[this_operand] = inoutportreg;
7699 return 1;
7700 }
7701
7702 if (i386_index_check (operand_string) == 0)
7703 return 0;
7704 i.types[this_operand].bitfield.mem = 1;
7705 i.mem_operands++;
7706 }
7707 else
7708 {
7709 /* It's not a memory operand; argh! */
7710 as_bad (_("invalid char %s beginning operand %d `%s'"),
7711 output_invalid (*op_string),
7712 this_operand + 1,
7713 op_string);
7714 return 0;
7715 }
7716 return 1; /* Normal return. */
7717 }
7718 \f
7719 /* Calculate the maximum variable size (i.e., excluding fr_fix)
7720 that an rs_machine_dependent frag may reach. */
7721
7722 unsigned int
7723 i386_frag_max_var (fragS *frag)
7724 {
7725 /* The only relaxable frags are for jumps.
7726 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
7727 gas_assert (frag->fr_type == rs_machine_dependent);
7728 return TYPE_FROM_RELAX_STATE (frag->fr_subtype) == UNCOND_JUMP ? 4 : 5;
7729 }
7730
7731 /* md_estimate_size_before_relax()
7732
7733 Called just before relax() for rs_machine_dependent frags. The x86
7734 assembler uses these frags to handle variable size jump
7735 instructions.
7736
7737 Any symbol that is now undefined will not become defined.
7738 Return the correct fr_subtype in the frag.
7739 Return the initial "guess for variable size of frag" to caller.
7740 The guess is actually the growth beyond the fixed part. Whatever
7741 we do to grow the fixed or variable part contributes to our
7742 returned value. */
7743
7744 int
7745 md_estimate_size_before_relax (fragS *fragP, segT segment)
7746 {
7747 /* We've already got fragP->fr_subtype right; all we have to do is
7748 check for un-relaxable symbols. On an ELF system, we can't relax
7749 an externally visible symbol, because it may be overridden by a
7750 shared library. */
7751 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
7752 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7753 || (IS_ELF
7754 && (S_IS_EXTERNAL (fragP->fr_symbol)
7755 || S_IS_WEAK (fragP->fr_symbol)
7756 || ((symbol_get_bfdsym (fragP->fr_symbol)->flags
7757 & BSF_GNU_INDIRECT_FUNCTION))))
7758 #endif
7759 #if defined (OBJ_COFF) && defined (TE_PE)
7760 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
7761 && S_IS_WEAK (fragP->fr_symbol))
7762 #endif
7763 )
7764 {
7765 /* Symbol is undefined in this segment, or we need to keep a
7766 reloc so that weak symbols can be overridden. */
7767 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
7768 enum bfd_reloc_code_real reloc_type;
7769 unsigned char *opcode;
7770 int old_fr_fix;
7771
7772 if (fragP->fr_var != NO_RELOC)
7773 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
7774 else if (size == 2)
7775 reloc_type = BFD_RELOC_16_PCREL;
7776 else
7777 reloc_type = BFD_RELOC_32_PCREL;
7778
7779 old_fr_fix = fragP->fr_fix;
7780 opcode = (unsigned char *) fragP->fr_opcode;
7781
7782 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
7783 {
7784 case UNCOND_JUMP:
7785 /* Make jmp (0xeb) a (d)word displacement jump. */
7786 opcode[0] = 0xe9;
7787 fragP->fr_fix += size;
7788 fix_new (fragP, old_fr_fix, size,
7789 fragP->fr_symbol,
7790 fragP->fr_offset, 1,
7791 reloc_type);
7792 break;
7793
7794 case COND_JUMP86:
7795 if (size == 2
7796 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
7797 {
7798 /* Negate the condition, and branch past an
7799 unconditional jump. */
7800 opcode[0] ^= 1;
7801 opcode[1] = 3;
7802 /* Insert an unconditional jump. */
7803 opcode[2] = 0xe9;
7804 /* We added two extra opcode bytes, and have a two byte
7805 offset. */
7806 fragP->fr_fix += 2 + 2;
7807 fix_new (fragP, old_fr_fix + 2, 2,
7808 fragP->fr_symbol,
7809 fragP->fr_offset, 1,
7810 reloc_type);
7811 break;
7812 }
7813 /* Fall through. */
7814
7815 case COND_JUMP:
7816 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
7817 {
7818 fixS *fixP;
7819
7820 fragP->fr_fix += 1;
7821 fixP = fix_new (fragP, old_fr_fix, 1,
7822 fragP->fr_symbol,
7823 fragP->fr_offset, 1,
7824 BFD_RELOC_8_PCREL);
7825 fixP->fx_signed = 1;
7826 break;
7827 }
7828
7829 /* This changes the byte-displacement jump 0x7N
7830 to the (d)word-displacement jump 0x0f,0x8N. */
7831 opcode[1] = opcode[0] + 0x10;
7832 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7833 /* We've added an opcode byte. */
7834 fragP->fr_fix += 1 + size;
7835 fix_new (fragP, old_fr_fix + 1, size,
7836 fragP->fr_symbol,
7837 fragP->fr_offset, 1,
7838 reloc_type);
7839 break;
7840
7841 default:
7842 BAD_CASE (fragP->fr_subtype);
7843 break;
7844 }
7845 frag_wane (fragP);
7846 return fragP->fr_fix - old_fr_fix;
7847 }
7848
7849 /* Guess size depending on current relax state. Initially the relax
7850 state will correspond to a short jump and we return 1, because
7851 the variable part of the frag (the branch offset) is one byte
7852 long. However, we can relax a section more than once and in that
7853 case we must either set fr_subtype back to the unrelaxed state,
7854 or return the value for the appropriate branch. */
7855 return md_relax_table[fragP->fr_subtype].rlx_length;
7856 }
7857
7858 /* Called after relax() is finished.
7859
7860 In: Address of frag.
7861 fr_type == rs_machine_dependent.
7862 fr_subtype is what the address relaxed to.
7863
7864 Out: Any fixSs and constants are set up.
7865 Caller will turn frag into a ".space 0". */
7866
7867 void
7868 md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT sec ATTRIBUTE_UNUSED,
7869 fragS *fragP)
7870 {
7871 unsigned char *opcode;
7872 unsigned char *where_to_put_displacement = NULL;
7873 offsetT target_address;
7874 offsetT opcode_address;
7875 unsigned int extension = 0;
7876 offsetT displacement_from_opcode_start;
7877
7878 opcode = (unsigned char *) fragP->fr_opcode;
7879
7880 /* Address we want to reach in file space. */
7881 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
7882
7883 /* Address opcode resides at in file space. */
7884 opcode_address = fragP->fr_address + fragP->fr_fix;
7885
7886 /* Displacement from opcode start to fill into instruction. */
7887 displacement_from_opcode_start = target_address - opcode_address;
7888
7889 if ((fragP->fr_subtype & BIG) == 0)
7890 {
7891 /* Don't have to change opcode. */
7892 extension = 1; /* 1 opcode + 1 displacement */
7893 where_to_put_displacement = &opcode[1];
7894 }
7895 else
7896 {
7897 if (no_cond_jump_promotion
7898 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
7899 as_warn_where (fragP->fr_file, fragP->fr_line,
7900 _("long jump required"));
7901
7902 switch (fragP->fr_subtype)
7903 {
7904 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
7905 extension = 4; /* 1 opcode + 4 displacement */
7906 opcode[0] = 0xe9;
7907 where_to_put_displacement = &opcode[1];
7908 break;
7909
7910 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
7911 extension = 2; /* 1 opcode + 2 displacement */
7912 opcode[0] = 0xe9;
7913 where_to_put_displacement = &opcode[1];
7914 break;
7915
7916 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
7917 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
7918 extension = 5; /* 2 opcode + 4 displacement */
7919 opcode[1] = opcode[0] + 0x10;
7920 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7921 where_to_put_displacement = &opcode[2];
7922 break;
7923
7924 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
7925 extension = 3; /* 2 opcode + 2 displacement */
7926 opcode[1] = opcode[0] + 0x10;
7927 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7928 where_to_put_displacement = &opcode[2];
7929 break;
7930
7931 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
7932 extension = 4;
7933 opcode[0] ^= 1;
7934 opcode[1] = 3;
7935 opcode[2] = 0xe9;
7936 where_to_put_displacement = &opcode[3];
7937 break;
7938
7939 default:
7940 BAD_CASE (fragP->fr_subtype);
7941 break;
7942 }
7943 }
7944
7945 /* If size if less then four we are sure that the operand fits,
7946 but if it's 4, then it could be that the displacement is larger
7947 then -/+ 2GB. */
7948 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
7949 && object_64bit
7950 && ((addressT) (displacement_from_opcode_start - extension
7951 + ((addressT) 1 << 31))
7952 > (((addressT) 2 << 31) - 1)))
7953 {
7954 as_bad_where (fragP->fr_file, fragP->fr_line,
7955 _("jump target out of range"));
7956 /* Make us emit 0. */
7957 displacement_from_opcode_start = extension;
7958 }
7959 /* Now put displacement after opcode. */
7960 md_number_to_chars ((char *) where_to_put_displacement,
7961 (valueT) (displacement_from_opcode_start - extension),
7962 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
7963 fragP->fr_fix += extension;
7964 }
7965 \f
7966 /* Apply a fixup (fixP) to segment data, once it has been determined
7967 by our caller that we have all the info we need to fix it up.
7968
7969 Parameter valP is the pointer to the value of the bits.
7970
7971 On the 386, immediates, displacements, and data pointers are all in
7972 the same (little-endian) format, so we don't need to care about which
7973 we are handling. */
7974
7975 void
7976 md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
7977 {
7978 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
7979 valueT value = *valP;
7980
7981 #if !defined (TE_Mach)
7982 if (fixP->fx_pcrel)
7983 {
7984 switch (fixP->fx_r_type)
7985 {
7986 default:
7987 break;
7988
7989 case BFD_RELOC_64:
7990 fixP->fx_r_type = BFD_RELOC_64_PCREL;
7991 break;
7992 case BFD_RELOC_32:
7993 case BFD_RELOC_X86_64_32S:
7994 fixP->fx_r_type = BFD_RELOC_32_PCREL;
7995 break;
7996 case BFD_RELOC_16:
7997 fixP->fx_r_type = BFD_RELOC_16_PCREL;
7998 break;
7999 case BFD_RELOC_8:
8000 fixP->fx_r_type = BFD_RELOC_8_PCREL;
8001 break;
8002 }
8003 }
8004
8005 if (fixP->fx_addsy != NULL
8006 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
8007 || fixP->fx_r_type == BFD_RELOC_64_PCREL
8008 || fixP->fx_r_type == BFD_RELOC_16_PCREL
8009 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
8010 && !use_rela_relocations)
8011 {
8012 /* This is a hack. There should be a better way to handle this.
8013 This covers for the fact that bfd_install_relocation will
8014 subtract the current location (for partial_inplace, PC relative
8015 relocations); see more below. */
8016 #ifndef OBJ_AOUT
8017 if (IS_ELF
8018 #ifdef TE_PE
8019 || OUTPUT_FLAVOR == bfd_target_coff_flavour
8020 #endif
8021 )
8022 value += fixP->fx_where + fixP->fx_frag->fr_address;
8023 #endif
8024 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8025 if (IS_ELF)
8026 {
8027 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
8028
8029 if ((sym_seg == seg
8030 || (symbol_section_p (fixP->fx_addsy)
8031 && sym_seg != absolute_section))
8032 && !generic_force_reloc (fixP))
8033 {
8034 /* Yes, we add the values in twice. This is because
8035 bfd_install_relocation subtracts them out again. I think
8036 bfd_install_relocation is broken, but I don't dare change
8037 it. FIXME. */
8038 value += fixP->fx_where + fixP->fx_frag->fr_address;
8039 }
8040 }
8041 #endif
8042 #if defined (OBJ_COFF) && defined (TE_PE)
8043 /* For some reason, the PE format does not store a
8044 section address offset for a PC relative symbol. */
8045 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
8046 || S_IS_WEAK (fixP->fx_addsy))
8047 value += md_pcrel_from (fixP);
8048 #endif
8049 }
8050 #if defined (OBJ_COFF) && defined (TE_PE)
8051 if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
8052 {
8053 value -= S_GET_VALUE (fixP->fx_addsy);
8054 }
8055 #endif
8056
8057 /* Fix a few things - the dynamic linker expects certain values here,
8058 and we must not disappoint it. */
8059 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8060 if (IS_ELF && fixP->fx_addsy)
8061 switch (fixP->fx_r_type)
8062 {
8063 case BFD_RELOC_386_PLT32:
8064 case BFD_RELOC_X86_64_PLT32:
8065 /* Make the jump instruction point to the address of the operand. At
8066 runtime we merely add the offset to the actual PLT entry. */
8067 value = -4;
8068 break;
8069
8070 case BFD_RELOC_386_TLS_GD:
8071 case BFD_RELOC_386_TLS_LDM:
8072 case BFD_RELOC_386_TLS_IE_32:
8073 case BFD_RELOC_386_TLS_IE:
8074 case BFD_RELOC_386_TLS_GOTIE:
8075 case BFD_RELOC_386_TLS_GOTDESC:
8076 case BFD_RELOC_X86_64_TLSGD:
8077 case BFD_RELOC_X86_64_TLSLD:
8078 case BFD_RELOC_X86_64_GOTTPOFF:
8079 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8080 value = 0; /* Fully resolved at runtime. No addend. */
8081 /* Fallthrough */
8082 case BFD_RELOC_386_TLS_LE:
8083 case BFD_RELOC_386_TLS_LDO_32:
8084 case BFD_RELOC_386_TLS_LE_32:
8085 case BFD_RELOC_X86_64_DTPOFF32:
8086 case BFD_RELOC_X86_64_DTPOFF64:
8087 case BFD_RELOC_X86_64_TPOFF32:
8088 case BFD_RELOC_X86_64_TPOFF64:
8089 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8090 break;
8091
8092 case BFD_RELOC_386_TLS_DESC_CALL:
8093 case BFD_RELOC_X86_64_TLSDESC_CALL:
8094 value = 0; /* Fully resolved at runtime. No addend. */
8095 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8096 fixP->fx_done = 0;
8097 return;
8098
8099 case BFD_RELOC_386_GOT32:
8100 case BFD_RELOC_X86_64_GOT32:
8101 value = 0; /* Fully resolved at runtime. No addend. */
8102 break;
8103
8104 case BFD_RELOC_VTABLE_INHERIT:
8105 case BFD_RELOC_VTABLE_ENTRY:
8106 fixP->fx_done = 0;
8107 return;
8108
8109 default:
8110 break;
8111 }
8112 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
8113 *valP = value;
8114 #endif /* !defined (TE_Mach) */
8115
8116 /* Are we finished with this relocation now? */
8117 if (fixP->fx_addsy == NULL)
8118 fixP->fx_done = 1;
8119 #if defined (OBJ_COFF) && defined (TE_PE)
8120 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
8121 {
8122 fixP->fx_done = 0;
8123 /* Remember value for tc_gen_reloc. */
8124 fixP->fx_addnumber = value;
8125 /* Clear out the frag for now. */
8126 value = 0;
8127 }
8128 #endif
8129 else if (use_rela_relocations)
8130 {
8131 fixP->fx_no_overflow = 1;
8132 /* Remember value for tc_gen_reloc. */
8133 fixP->fx_addnumber = value;
8134 value = 0;
8135 }
8136
8137 md_number_to_chars (p, value, fixP->fx_size);
8138 }
8139 \f
8140 char *
8141 md_atof (int type, char *litP, int *sizeP)
8142 {
8143 /* This outputs the LITTLENUMs in REVERSE order;
8144 in accord with the bigendian 386. */
8145 return ieee_md_atof (type, litP, sizeP, FALSE);
8146 }
8147 \f
8148 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
8149
8150 static char *
8151 output_invalid (int c)
8152 {
8153 if (ISPRINT (c))
8154 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8155 "'%c'", c);
8156 else
8157 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8158 "(0x%x)", (unsigned char) c);
8159 return output_invalid_buf;
8160 }
8161
8162 /* REG_STRING starts *before* REGISTER_PREFIX. */
8163
8164 static const reg_entry *
8165 parse_real_register (char *reg_string, char **end_op)
8166 {
8167 char *s = reg_string;
8168 char *p;
8169 char reg_name_given[MAX_REG_NAME_SIZE + 1];
8170 const reg_entry *r;
8171
8172 /* Skip possible REGISTER_PREFIX and possible whitespace. */
8173 if (*s == REGISTER_PREFIX)
8174 ++s;
8175
8176 if (is_space_char (*s))
8177 ++s;
8178
8179 p = reg_name_given;
8180 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
8181 {
8182 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
8183 return (const reg_entry *) NULL;
8184 s++;
8185 }
8186
8187 /* For naked regs, make sure that we are not dealing with an identifier.
8188 This prevents confusing an identifier like `eax_var' with register
8189 `eax'. */
8190 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
8191 return (const reg_entry *) NULL;
8192
8193 *end_op = s;
8194
8195 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
8196
8197 /* Handle floating point regs, allowing spaces in the (i) part. */
8198 if (r == i386_regtab /* %st is first entry of table */)
8199 {
8200 if (is_space_char (*s))
8201 ++s;
8202 if (*s == '(')
8203 {
8204 ++s;
8205 if (is_space_char (*s))
8206 ++s;
8207 if (*s >= '0' && *s <= '7')
8208 {
8209 int fpr = *s - '0';
8210 ++s;
8211 if (is_space_char (*s))
8212 ++s;
8213 if (*s == ')')
8214 {
8215 *end_op = s + 1;
8216 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
8217 know (r);
8218 return r + fpr;
8219 }
8220 }
8221 /* We have "%st(" then garbage. */
8222 return (const reg_entry *) NULL;
8223 }
8224 }
8225
8226 if (r == NULL || allow_pseudo_reg)
8227 return r;
8228
8229 if (operand_type_all_zero (&r->reg_type))
8230 return (const reg_entry *) NULL;
8231
8232 if ((r->reg_type.bitfield.reg32
8233 || r->reg_type.bitfield.sreg3
8234 || r->reg_type.bitfield.control
8235 || r->reg_type.bitfield.debug
8236 || r->reg_type.bitfield.test)
8237 && !cpu_arch_flags.bitfield.cpui386)
8238 return (const reg_entry *) NULL;
8239
8240 if (r->reg_type.bitfield.floatreg
8241 && !cpu_arch_flags.bitfield.cpu8087
8242 && !cpu_arch_flags.bitfield.cpu287
8243 && !cpu_arch_flags.bitfield.cpu387)
8244 return (const reg_entry *) NULL;
8245
8246 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
8247 return (const reg_entry *) NULL;
8248
8249 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse)
8250 return (const reg_entry *) NULL;
8251
8252 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
8253 return (const reg_entry *) NULL;
8254
8255 /* Don't allow fake index register unless allow_index_reg isn't 0. */
8256 if (!allow_index_reg
8257 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
8258 return (const reg_entry *) NULL;
8259
8260 if (((r->reg_flags & (RegRex64 | RegRex))
8261 || r->reg_type.bitfield.reg64)
8262 && (!cpu_arch_flags.bitfield.cpulm
8263 || !operand_type_equal (&r->reg_type, &control))
8264 && flag_code != CODE_64BIT)
8265 return (const reg_entry *) NULL;
8266
8267 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
8268 return (const reg_entry *) NULL;
8269
8270 return r;
8271 }
8272
8273 /* REG_STRING starts *before* REGISTER_PREFIX. */
8274
8275 static const reg_entry *
8276 parse_register (char *reg_string, char **end_op)
8277 {
8278 const reg_entry *r;
8279
8280 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
8281 r = parse_real_register (reg_string, end_op);
8282 else
8283 r = NULL;
8284 if (!r)
8285 {
8286 char *save = input_line_pointer;
8287 char c;
8288 symbolS *symbolP;
8289
8290 input_line_pointer = reg_string;
8291 c = get_symbol_end ();
8292 symbolP = symbol_find (reg_string);
8293 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
8294 {
8295 const expressionS *e = symbol_get_value_expression (symbolP);
8296
8297 know (e->X_op == O_register);
8298 know (e->X_add_number >= 0
8299 && (valueT) e->X_add_number < i386_regtab_size);
8300 r = i386_regtab + e->X_add_number;
8301 *end_op = input_line_pointer;
8302 }
8303 *input_line_pointer = c;
8304 input_line_pointer = save;
8305 }
8306 return r;
8307 }
8308
8309 int
8310 i386_parse_name (char *name, expressionS *e, char *nextcharP)
8311 {
8312 const reg_entry *r;
8313 char *end = input_line_pointer;
8314
8315 *end = *nextcharP;
8316 r = parse_register (name, &input_line_pointer);
8317 if (r && end <= input_line_pointer)
8318 {
8319 *nextcharP = *input_line_pointer;
8320 *input_line_pointer = 0;
8321 e->X_op = O_register;
8322 e->X_add_number = r - i386_regtab;
8323 return 1;
8324 }
8325 input_line_pointer = end;
8326 *end = 0;
8327 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
8328 }
8329
8330 void
8331 md_operand (expressionS *e)
8332 {
8333 char *end;
8334 const reg_entry *r;
8335
8336 switch (*input_line_pointer)
8337 {
8338 case REGISTER_PREFIX:
8339 r = parse_real_register (input_line_pointer, &end);
8340 if (r)
8341 {
8342 e->X_op = O_register;
8343 e->X_add_number = r - i386_regtab;
8344 input_line_pointer = end;
8345 }
8346 break;
8347
8348 case '[':
8349 gas_assert (intel_syntax);
8350 end = input_line_pointer++;
8351 expression (e);
8352 if (*input_line_pointer == ']')
8353 {
8354 ++input_line_pointer;
8355 e->X_op_symbol = make_expr_symbol (e);
8356 e->X_add_symbol = NULL;
8357 e->X_add_number = 0;
8358 e->X_op = O_index;
8359 }
8360 else
8361 {
8362 e->X_op = O_absent;
8363 input_line_pointer = end;
8364 }
8365 break;
8366 }
8367 }
8368
8369 \f
8370 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8371 const char *md_shortopts = "kVQ:sqn";
8372 #else
8373 const char *md_shortopts = "qn";
8374 #endif
8375
8376 #define OPTION_32 (OPTION_MD_BASE + 0)
8377 #define OPTION_64 (OPTION_MD_BASE + 1)
8378 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
8379 #define OPTION_MARCH (OPTION_MD_BASE + 3)
8380 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
8381 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
8382 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
8383 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
8384 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
8385 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
8386 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
8387 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
8388 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 12)
8389 #define OPTION_X32 (OPTION_MD_BASE + 13)
8390
8391 struct option md_longopts[] =
8392 {
8393 {"32", no_argument, NULL, OPTION_32},
8394 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8395 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8396 {"64", no_argument, NULL, OPTION_64},
8397 #endif
8398 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8399 {"x32", no_argument, NULL, OPTION_X32},
8400 #endif
8401 {"divide", no_argument, NULL, OPTION_DIVIDE},
8402 {"march", required_argument, NULL, OPTION_MARCH},
8403 {"mtune", required_argument, NULL, OPTION_MTUNE},
8404 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
8405 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
8406 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
8407 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
8408 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
8409 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
8410 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
8411 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
8412 {NULL, no_argument, NULL, 0}
8413 };
8414 size_t md_longopts_size = sizeof (md_longopts);
8415
8416 int
8417 md_parse_option (int c, char *arg)
8418 {
8419 unsigned int j;
8420 char *arch, *next;
8421
8422 switch (c)
8423 {
8424 case 'n':
8425 optimize_align_code = 0;
8426 break;
8427
8428 case 'q':
8429 quiet_warnings = 1;
8430 break;
8431
8432 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8433 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
8434 should be emitted or not. FIXME: Not implemented. */
8435 case 'Q':
8436 break;
8437
8438 /* -V: SVR4 argument to print version ID. */
8439 case 'V':
8440 print_version_id ();
8441 break;
8442
8443 /* -k: Ignore for FreeBSD compatibility. */
8444 case 'k':
8445 break;
8446
8447 case 's':
8448 /* -s: On i386 Solaris, this tells the native assembler to use
8449 .stab instead of .stab.excl. We always use .stab anyhow. */
8450 break;
8451 #endif
8452 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8453 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8454 case OPTION_64:
8455 {
8456 const char **list, **l;
8457
8458 list = bfd_target_list ();
8459 for (l = list; *l != NULL; l++)
8460 if (CONST_STRNEQ (*l, "elf64-x86-64")
8461 || strcmp (*l, "coff-x86-64") == 0
8462 || strcmp (*l, "pe-x86-64") == 0
8463 || strcmp (*l, "pei-x86-64") == 0
8464 || strcmp (*l, "mach-o-x86-64") == 0)
8465 {
8466 default_arch = "x86_64";
8467 break;
8468 }
8469 if (*l == NULL)
8470 as_fatal (_("no compiled in support for x86_64"));
8471 free (list);
8472 }
8473 break;
8474 #endif
8475
8476 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8477 case OPTION_X32:
8478 if (IS_ELF)
8479 {
8480 const char **list, **l;
8481
8482 list = bfd_target_list ();
8483 for (l = list; *l != NULL; l++)
8484 if (CONST_STRNEQ (*l, "elf32-x86-64"))
8485 {
8486 default_arch = "x86_64:32";
8487 break;
8488 }
8489 if (*l == NULL)
8490 as_fatal (_("no compiled in support for 32bit x86_64"));
8491 free (list);
8492 }
8493 else
8494 as_fatal (_("32bit x86_64 is only supported for ELF"));
8495 break;
8496 #endif
8497
8498 case OPTION_32:
8499 default_arch = "i386";
8500 break;
8501
8502 case OPTION_DIVIDE:
8503 #ifdef SVR4_COMMENT_CHARS
8504 {
8505 char *n, *t;
8506 const char *s;
8507
8508 n = (char *) xmalloc (strlen (i386_comment_chars) + 1);
8509 t = n;
8510 for (s = i386_comment_chars; *s != '\0'; s++)
8511 if (*s != '/')
8512 *t++ = *s;
8513 *t = '\0';
8514 i386_comment_chars = n;
8515 }
8516 #endif
8517 break;
8518
8519 case OPTION_MARCH:
8520 arch = xstrdup (arg);
8521 do
8522 {
8523 if (*arch == '.')
8524 as_fatal (_("invalid -march= option: `%s'"), arg);
8525 next = strchr (arch, '+');
8526 if (next)
8527 *next++ = '\0';
8528 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8529 {
8530 if (strcmp (arch, cpu_arch [j].name) == 0)
8531 {
8532 /* Processor. */
8533 if (! cpu_arch[j].flags.bitfield.cpui386)
8534 continue;
8535
8536 cpu_arch_name = cpu_arch[j].name;
8537 cpu_sub_arch_name = NULL;
8538 cpu_arch_flags = cpu_arch[j].flags;
8539 cpu_arch_isa = cpu_arch[j].type;
8540 cpu_arch_isa_flags = cpu_arch[j].flags;
8541 if (!cpu_arch_tune_set)
8542 {
8543 cpu_arch_tune = cpu_arch_isa;
8544 cpu_arch_tune_flags = cpu_arch_isa_flags;
8545 }
8546 break;
8547 }
8548 else if (*cpu_arch [j].name == '.'
8549 && strcmp (arch, cpu_arch [j].name + 1) == 0)
8550 {
8551 /* ISA entension. */
8552 i386_cpu_flags flags;
8553
8554 if (!cpu_arch[j].negated)
8555 flags = cpu_flags_or (cpu_arch_flags,
8556 cpu_arch[j].flags);
8557 else
8558 flags = cpu_flags_and_not (cpu_arch_flags,
8559 cpu_arch[j].flags);
8560 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
8561 {
8562 if (cpu_sub_arch_name)
8563 {
8564 char *name = cpu_sub_arch_name;
8565 cpu_sub_arch_name = concat (name,
8566 cpu_arch[j].name,
8567 (const char *) NULL);
8568 free (name);
8569 }
8570 else
8571 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
8572 cpu_arch_flags = flags;
8573 cpu_arch_isa_flags = flags;
8574 }
8575 break;
8576 }
8577 }
8578
8579 if (j >= ARRAY_SIZE (cpu_arch))
8580 as_fatal (_("invalid -march= option: `%s'"), arg);
8581
8582 arch = next;
8583 }
8584 while (next != NULL );
8585 break;
8586
8587 case OPTION_MTUNE:
8588 if (*arg == '.')
8589 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8590 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8591 {
8592 if (strcmp (arg, cpu_arch [j].name) == 0)
8593 {
8594 cpu_arch_tune_set = 1;
8595 cpu_arch_tune = cpu_arch [j].type;
8596 cpu_arch_tune_flags = cpu_arch[j].flags;
8597 break;
8598 }
8599 }
8600 if (j >= ARRAY_SIZE (cpu_arch))
8601 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8602 break;
8603
8604 case OPTION_MMNEMONIC:
8605 if (strcasecmp (arg, "att") == 0)
8606 intel_mnemonic = 0;
8607 else if (strcasecmp (arg, "intel") == 0)
8608 intel_mnemonic = 1;
8609 else
8610 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg);
8611 break;
8612
8613 case OPTION_MSYNTAX:
8614 if (strcasecmp (arg, "att") == 0)
8615 intel_syntax = 0;
8616 else if (strcasecmp (arg, "intel") == 0)
8617 intel_syntax = 1;
8618 else
8619 as_fatal (_("invalid -msyntax= option: `%s'"), arg);
8620 break;
8621
8622 case OPTION_MINDEX_REG:
8623 allow_index_reg = 1;
8624 break;
8625
8626 case OPTION_MNAKED_REG:
8627 allow_naked_reg = 1;
8628 break;
8629
8630 case OPTION_MOLD_GCC:
8631 old_gcc = 1;
8632 break;
8633
8634 case OPTION_MSSE2AVX:
8635 sse2avx = 1;
8636 break;
8637
8638 case OPTION_MSSE_CHECK:
8639 if (strcasecmp (arg, "error") == 0)
8640 sse_check = sse_check_error;
8641 else if (strcasecmp (arg, "warning") == 0)
8642 sse_check = sse_check_warning;
8643 else if (strcasecmp (arg, "none") == 0)
8644 sse_check = sse_check_none;
8645 else
8646 as_fatal (_("invalid -msse-check= option: `%s'"), arg);
8647 break;
8648
8649 case OPTION_MAVXSCALAR:
8650 if (strcasecmp (arg, "128") == 0)
8651 avxscalar = vex128;
8652 else if (strcasecmp (arg, "256") == 0)
8653 avxscalar = vex256;
8654 else
8655 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg);
8656 break;
8657
8658 default:
8659 return 0;
8660 }
8661 return 1;
8662 }
8663
8664 #define MESSAGE_TEMPLATE \
8665 " "
8666
8667 static void
8668 show_arch (FILE *stream, int ext, int check)
8669 {
8670 static char message[] = MESSAGE_TEMPLATE;
8671 char *start = message + 27;
8672 char *p;
8673 int size = sizeof (MESSAGE_TEMPLATE);
8674 int left;
8675 const char *name;
8676 int len;
8677 unsigned int j;
8678
8679 p = start;
8680 left = size - (start - message);
8681 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8682 {
8683 /* Should it be skipped? */
8684 if (cpu_arch [j].skip)
8685 continue;
8686
8687 name = cpu_arch [j].name;
8688 len = cpu_arch [j].len;
8689 if (*name == '.')
8690 {
8691 /* It is an extension. Skip if we aren't asked to show it. */
8692 if (ext)
8693 {
8694 name++;
8695 len--;
8696 }
8697 else
8698 continue;
8699 }
8700 else if (ext)
8701 {
8702 /* It is an processor. Skip if we show only extension. */
8703 continue;
8704 }
8705 else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
8706 {
8707 /* It is an impossible processor - skip. */
8708 continue;
8709 }
8710
8711 /* Reserve 2 spaces for ", " or ",\0" */
8712 left -= len + 2;
8713
8714 /* Check if there is any room. */
8715 if (left >= 0)
8716 {
8717 if (p != start)
8718 {
8719 *p++ = ',';
8720 *p++ = ' ';
8721 }
8722 p = mempcpy (p, name, len);
8723 }
8724 else
8725 {
8726 /* Output the current message now and start a new one. */
8727 *p++ = ',';
8728 *p = '\0';
8729 fprintf (stream, "%s\n", message);
8730 p = start;
8731 left = size - (start - message) - len - 2;
8732
8733 gas_assert (left >= 0);
8734
8735 p = mempcpy (p, name, len);
8736 }
8737 }
8738
8739 *p = '\0';
8740 fprintf (stream, "%s\n", message);
8741 }
8742
8743 void
8744 md_show_usage (FILE *stream)
8745 {
8746 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8747 fprintf (stream, _("\
8748 -Q ignored\n\
8749 -V print assembler version number\n\
8750 -k ignored\n"));
8751 #endif
8752 fprintf (stream, _("\
8753 -n Do not optimize code alignment\n\
8754 -q quieten some warnings\n"));
8755 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8756 fprintf (stream, _("\
8757 -s ignored\n"));
8758 #endif
8759 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8760 || defined (TE_PE) || defined (TE_PEP))
8761 fprintf (stream, _("\
8762 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
8763 #endif
8764 #ifdef SVR4_COMMENT_CHARS
8765 fprintf (stream, _("\
8766 --divide do not treat `/' as a comment character\n"));
8767 #else
8768 fprintf (stream, _("\
8769 --divide ignored\n"));
8770 #endif
8771 fprintf (stream, _("\
8772 -march=CPU[,+EXTENSION...]\n\
8773 generate code for CPU and EXTENSION, CPU is one of:\n"));
8774 show_arch (stream, 0, 1);
8775 fprintf (stream, _("\
8776 EXTENSION is combination of:\n"));
8777 show_arch (stream, 1, 0);
8778 fprintf (stream, _("\
8779 -mtune=CPU optimize for CPU, CPU is one of:\n"));
8780 show_arch (stream, 0, 0);
8781 fprintf (stream, _("\
8782 -msse2avx encode SSE instructions with VEX prefix\n"));
8783 fprintf (stream, _("\
8784 -msse-check=[none|error|warning]\n\
8785 check SSE instructions\n"));
8786 fprintf (stream, _("\
8787 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
8788 length\n"));
8789 fprintf (stream, _("\
8790 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
8791 fprintf (stream, _("\
8792 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
8793 fprintf (stream, _("\
8794 -mindex-reg support pseudo index registers\n"));
8795 fprintf (stream, _("\
8796 -mnaked-reg don't require `%%' prefix for registers\n"));
8797 fprintf (stream, _("\
8798 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
8799 }
8800
8801 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
8802 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8803 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8804
8805 /* Pick the target format to use. */
8806
8807 const char *
8808 i386_target_format (void)
8809 {
8810 if (!strncmp (default_arch, "x86_64", 6))
8811 {
8812 update_code_flag (CODE_64BIT, 1);
8813 if (default_arch[6] == '\0')
8814 x86_elf_abi = X86_64_ABI;
8815 else
8816 x86_elf_abi = X86_64_X32_ABI;
8817 }
8818 else if (!strcmp (default_arch, "i386"))
8819 update_code_flag (CODE_32BIT, 1);
8820 else
8821 as_fatal (_("unknown architecture"));
8822
8823 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
8824 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
8825 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
8826 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
8827
8828 switch (OUTPUT_FLAVOR)
8829 {
8830 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
8831 case bfd_target_aout_flavour:
8832 return AOUT_TARGET_FORMAT;
8833 #endif
8834 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
8835 # if defined (TE_PE) || defined (TE_PEP)
8836 case bfd_target_coff_flavour:
8837 return flag_code == CODE_64BIT ? "pe-x86-64" : "pe-i386";
8838 # elif defined (TE_GO32)
8839 case bfd_target_coff_flavour:
8840 return "coff-go32";
8841 # else
8842 case bfd_target_coff_flavour:
8843 return "coff-i386";
8844 # endif
8845 #endif
8846 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8847 case bfd_target_elf_flavour:
8848 {
8849 const char *format;
8850
8851 switch (x86_elf_abi)
8852 {
8853 default:
8854 format = ELF_TARGET_FORMAT;
8855 break;
8856 case X86_64_ABI:
8857 use_rela_relocations = 1;
8858 object_64bit = 1;
8859 format = ELF_TARGET_FORMAT64;
8860 break;
8861 case X86_64_X32_ABI:
8862 use_rela_relocations = 1;
8863 object_64bit = 1;
8864 disallow_64bit_reloc = 1;
8865 format = ELF_TARGET_FORMAT32;
8866 break;
8867 }
8868 if (cpu_arch_isa == PROCESSOR_L1OM)
8869 {
8870 if (x86_elf_abi != X86_64_ABI)
8871 as_fatal (_("Intel L1OM is 64bit only"));
8872 return ELF_TARGET_L1OM_FORMAT;
8873 }
8874 if (cpu_arch_isa == PROCESSOR_K1OM)
8875 {
8876 if (x86_elf_abi != X86_64_ABI)
8877 as_fatal (_("Intel K1OM is 64bit only"));
8878 return ELF_TARGET_K1OM_FORMAT;
8879 }
8880 else
8881 return format;
8882 }
8883 #endif
8884 #if defined (OBJ_MACH_O)
8885 case bfd_target_mach_o_flavour:
8886 if (flag_code == CODE_64BIT)
8887 {
8888 use_rela_relocations = 1;
8889 object_64bit = 1;
8890 return "mach-o-x86-64";
8891 }
8892 else
8893 return "mach-o-i386";
8894 #endif
8895 default:
8896 abort ();
8897 return NULL;
8898 }
8899 }
8900
8901 #endif /* OBJ_MAYBE_ more than one */
8902
8903 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
8904 void
8905 i386_elf_emit_arch_note (void)
8906 {
8907 if (IS_ELF && cpu_arch_name != NULL)
8908 {
8909 char *p;
8910 asection *seg = now_seg;
8911 subsegT subseg = now_subseg;
8912 Elf_Internal_Note i_note;
8913 Elf_External_Note e_note;
8914 asection *note_secp;
8915 int len;
8916
8917 /* Create the .note section. */
8918 note_secp = subseg_new (".note", 0);
8919 bfd_set_section_flags (stdoutput,
8920 note_secp,
8921 SEC_HAS_CONTENTS | SEC_READONLY);
8922
8923 /* Process the arch string. */
8924 len = strlen (cpu_arch_name);
8925
8926 i_note.namesz = len + 1;
8927 i_note.descsz = 0;
8928 i_note.type = NT_ARCH;
8929 p = frag_more (sizeof (e_note.namesz));
8930 md_number_to_chars (p, (valueT) i_note.namesz, sizeof (e_note.namesz));
8931 p = frag_more (sizeof (e_note.descsz));
8932 md_number_to_chars (p, (valueT) i_note.descsz, sizeof (e_note.descsz));
8933 p = frag_more (sizeof (e_note.type));
8934 md_number_to_chars (p, (valueT) i_note.type, sizeof (e_note.type));
8935 p = frag_more (len + 1);
8936 strcpy (p, cpu_arch_name);
8937
8938 frag_align (2, 0, 0);
8939
8940 subseg_set (seg, subseg);
8941 }
8942 }
8943 #endif
8944 \f
8945 symbolS *
8946 md_undefined_symbol (char *name)
8947 {
8948 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
8949 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
8950 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
8951 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
8952 {
8953 if (!GOT_symbol)
8954 {
8955 if (symbol_find (name))
8956 as_bad (_("GOT already in symbol table"));
8957 GOT_symbol = symbol_new (name, undefined_section,
8958 (valueT) 0, &zero_address_frag);
8959 };
8960 return GOT_symbol;
8961 }
8962 return 0;
8963 }
8964
8965 /* Round up a section size to the appropriate boundary. */
8966
8967 valueT
8968 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8969 {
8970 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8971 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
8972 {
8973 /* For a.out, force the section size to be aligned. If we don't do
8974 this, BFD will align it for us, but it will not write out the
8975 final bytes of the section. This may be a bug in BFD, but it is
8976 easier to fix it here since that is how the other a.out targets
8977 work. */
8978 int align;
8979
8980 align = bfd_get_section_alignment (stdoutput, segment);
8981 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
8982 }
8983 #endif
8984
8985 return size;
8986 }
8987
8988 /* On the i386, PC-relative offsets are relative to the start of the
8989 next instruction. That is, the address of the offset, plus its
8990 size, since the offset is always the last part of the insn. */
8991
8992 long
8993 md_pcrel_from (fixS *fixP)
8994 {
8995 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
8996 }
8997
8998 #ifndef I386COFF
8999
9000 static void
9001 s_bss (int ignore ATTRIBUTE_UNUSED)
9002 {
9003 int temp;
9004
9005 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9006 if (IS_ELF)
9007 obj_elf_section_change_hook ();
9008 #endif
9009 temp = get_absolute_expression ();
9010 subseg_set (bss_section, (subsegT) temp);
9011 demand_empty_rest_of_line ();
9012 }
9013
9014 #endif
9015
9016 void
9017 i386_validate_fix (fixS *fixp)
9018 {
9019 if (fixp->fx_subsy && fixp->fx_subsy == GOT_symbol)
9020 {
9021 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
9022 {
9023 if (!object_64bit)
9024 abort ();
9025 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
9026 }
9027 else
9028 {
9029 if (!object_64bit)
9030 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
9031 else
9032 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
9033 }
9034 fixp->fx_subsy = 0;
9035 }
9036 }
9037
9038 arelent *
9039 tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
9040 {
9041 arelent *rel;
9042 bfd_reloc_code_real_type code;
9043
9044 switch (fixp->fx_r_type)
9045 {
9046 case BFD_RELOC_X86_64_PLT32:
9047 case BFD_RELOC_X86_64_GOT32:
9048 case BFD_RELOC_X86_64_GOTPCREL:
9049 case BFD_RELOC_386_PLT32:
9050 case BFD_RELOC_386_GOT32:
9051 case BFD_RELOC_386_GOTOFF:
9052 case BFD_RELOC_386_GOTPC:
9053 case BFD_RELOC_386_TLS_GD:
9054 case BFD_RELOC_386_TLS_LDM:
9055 case BFD_RELOC_386_TLS_LDO_32:
9056 case BFD_RELOC_386_TLS_IE_32:
9057 case BFD_RELOC_386_TLS_IE:
9058 case BFD_RELOC_386_TLS_GOTIE:
9059 case BFD_RELOC_386_TLS_LE_32:
9060 case BFD_RELOC_386_TLS_LE:
9061 case BFD_RELOC_386_TLS_GOTDESC:
9062 case BFD_RELOC_386_TLS_DESC_CALL:
9063 case BFD_RELOC_X86_64_TLSGD:
9064 case BFD_RELOC_X86_64_TLSLD:
9065 case BFD_RELOC_X86_64_DTPOFF32:
9066 case BFD_RELOC_X86_64_DTPOFF64:
9067 case BFD_RELOC_X86_64_GOTTPOFF:
9068 case BFD_RELOC_X86_64_TPOFF32:
9069 case BFD_RELOC_X86_64_TPOFF64:
9070 case BFD_RELOC_X86_64_GOTOFF64:
9071 case BFD_RELOC_X86_64_GOTPC32:
9072 case BFD_RELOC_X86_64_GOT64:
9073 case BFD_RELOC_X86_64_GOTPCREL64:
9074 case BFD_RELOC_X86_64_GOTPC64:
9075 case BFD_RELOC_X86_64_GOTPLT64:
9076 case BFD_RELOC_X86_64_PLTOFF64:
9077 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9078 case BFD_RELOC_X86_64_TLSDESC_CALL:
9079 case BFD_RELOC_RVA:
9080 case BFD_RELOC_VTABLE_ENTRY:
9081 case BFD_RELOC_VTABLE_INHERIT:
9082 #ifdef TE_PE
9083 case BFD_RELOC_32_SECREL:
9084 #endif
9085 code = fixp->fx_r_type;
9086 break;
9087 case BFD_RELOC_X86_64_32S:
9088 if (!fixp->fx_pcrel)
9089 {
9090 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
9091 code = fixp->fx_r_type;
9092 break;
9093 }
9094 default:
9095 if (fixp->fx_pcrel)
9096 {
9097 switch (fixp->fx_size)
9098 {
9099 default:
9100 as_bad_where (fixp->fx_file, fixp->fx_line,
9101 _("can not do %d byte pc-relative relocation"),
9102 fixp->fx_size);
9103 code = BFD_RELOC_32_PCREL;
9104 break;
9105 case 1: code = BFD_RELOC_8_PCREL; break;
9106 case 2: code = BFD_RELOC_16_PCREL; break;
9107 case 4: code = BFD_RELOC_32_PCREL; break;
9108 #ifdef BFD64
9109 case 8: code = BFD_RELOC_64_PCREL; break;
9110 #endif
9111 }
9112 }
9113 else
9114 {
9115 switch (fixp->fx_size)
9116 {
9117 default:
9118 as_bad_where (fixp->fx_file, fixp->fx_line,
9119 _("can not do %d byte relocation"),
9120 fixp->fx_size);
9121 code = BFD_RELOC_32;
9122 break;
9123 case 1: code = BFD_RELOC_8; break;
9124 case 2: code = BFD_RELOC_16; break;
9125 case 4: code = BFD_RELOC_32; break;
9126 #ifdef BFD64
9127 case 8: code = BFD_RELOC_64; break;
9128 #endif
9129 }
9130 }
9131 break;
9132 }
9133
9134 if ((code == BFD_RELOC_32
9135 || code == BFD_RELOC_32_PCREL
9136 || code == BFD_RELOC_X86_64_32S)
9137 && GOT_symbol
9138 && fixp->fx_addsy == GOT_symbol)
9139 {
9140 if (!object_64bit)
9141 code = BFD_RELOC_386_GOTPC;
9142 else
9143 code = BFD_RELOC_X86_64_GOTPC32;
9144 }
9145 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
9146 && GOT_symbol
9147 && fixp->fx_addsy == GOT_symbol)
9148 {
9149 code = BFD_RELOC_X86_64_GOTPC64;
9150 }
9151
9152 rel = (arelent *) xmalloc (sizeof (arelent));
9153 rel->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
9154 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9155
9156 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
9157
9158 if (!use_rela_relocations)
9159 {
9160 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
9161 vtable entry to be used in the relocation's section offset. */
9162 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
9163 rel->address = fixp->fx_offset;
9164 #if defined (OBJ_COFF) && defined (TE_PE)
9165 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
9166 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
9167 else
9168 #endif
9169 rel->addend = 0;
9170 }
9171 /* Use the rela in 64bit mode. */
9172 else
9173 {
9174 if (disallow_64bit_reloc)
9175 switch (code)
9176 {
9177 case BFD_RELOC_X86_64_DTPOFF64:
9178 case BFD_RELOC_X86_64_TPOFF64:
9179 case BFD_RELOC_64_PCREL:
9180 case BFD_RELOC_X86_64_GOTOFF64:
9181 case BFD_RELOC_X86_64_GOT64:
9182 case BFD_RELOC_X86_64_GOTPCREL64:
9183 case BFD_RELOC_X86_64_GOTPC64:
9184 case BFD_RELOC_X86_64_GOTPLT64:
9185 case BFD_RELOC_X86_64_PLTOFF64:
9186 as_bad_where (fixp->fx_file, fixp->fx_line,
9187 _("cannot represent relocation type %s in x32 mode"),
9188 bfd_get_reloc_code_name (code));
9189 break;
9190 default:
9191 break;
9192 }
9193
9194 if (!fixp->fx_pcrel)
9195 rel->addend = fixp->fx_offset;
9196 else
9197 switch (code)
9198 {
9199 case BFD_RELOC_X86_64_PLT32:
9200 case BFD_RELOC_X86_64_GOT32:
9201 case BFD_RELOC_X86_64_GOTPCREL:
9202 case BFD_RELOC_X86_64_TLSGD:
9203 case BFD_RELOC_X86_64_TLSLD:
9204 case BFD_RELOC_X86_64_GOTTPOFF:
9205 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9206 case BFD_RELOC_X86_64_TLSDESC_CALL:
9207 rel->addend = fixp->fx_offset - fixp->fx_size;
9208 break;
9209 default:
9210 rel->addend = (section->vma
9211 - fixp->fx_size
9212 + fixp->fx_addnumber
9213 + md_pcrel_from (fixp));
9214 break;
9215 }
9216 }
9217
9218 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
9219 if (rel->howto == NULL)
9220 {
9221 as_bad_where (fixp->fx_file, fixp->fx_line,
9222 _("cannot represent relocation type %s"),
9223 bfd_get_reloc_code_name (code));
9224 /* Set howto to a garbage value so that we can keep going. */
9225 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
9226 gas_assert (rel->howto != NULL);
9227 }
9228
9229 return rel;
9230 }
9231
9232 #include "tc-i386-intel.c"
9233
9234 void
9235 tc_x86_parse_to_dw2regnum (expressionS *exp)
9236 {
9237 int saved_naked_reg;
9238 char saved_register_dot;
9239
9240 saved_naked_reg = allow_naked_reg;
9241 allow_naked_reg = 1;
9242 saved_register_dot = register_chars['.'];
9243 register_chars['.'] = '.';
9244 allow_pseudo_reg = 1;
9245 expression_and_evaluate (exp);
9246 allow_pseudo_reg = 0;
9247 register_chars['.'] = saved_register_dot;
9248 allow_naked_reg = saved_naked_reg;
9249
9250 if (exp->X_op == O_register && exp->X_add_number >= 0)
9251 {
9252 if ((addressT) exp->X_add_number < i386_regtab_size)
9253 {
9254 exp->X_op = O_constant;
9255 exp->X_add_number = i386_regtab[exp->X_add_number]
9256 .dw2_regnum[flag_code >> 1];
9257 }
9258 else
9259 exp->X_op = O_illegal;
9260 }
9261 }
9262
9263 void
9264 tc_x86_frame_initial_instructions (void)
9265 {
9266 static unsigned int sp_regno[2];
9267
9268 if (!sp_regno[flag_code >> 1])
9269 {
9270 char *saved_input = input_line_pointer;
9271 char sp[][4] = {"esp", "rsp"};
9272 expressionS exp;
9273
9274 input_line_pointer = sp[flag_code >> 1];
9275 tc_x86_parse_to_dw2regnum (&exp);
9276 gas_assert (exp.X_op == O_constant);
9277 sp_regno[flag_code >> 1] = exp.X_add_number;
9278 input_line_pointer = saved_input;
9279 }
9280
9281 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
9282 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
9283 }
9284
9285 int
9286 x86_dwarf2_addr_size (void)
9287 {
9288 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9289 if (x86_elf_abi == X86_64_X32_ABI)
9290 return 4;
9291 #endif
9292 return bfd_arch_bits_per_address (stdoutput) / 8;
9293 }
9294
9295 int
9296 i386_elf_section_type (const char *str, size_t len)
9297 {
9298 if (flag_code == CODE_64BIT
9299 && len == sizeof ("unwind") - 1
9300 && strncmp (str, "unwind", 6) == 0)
9301 return SHT_X86_64_UNWIND;
9302
9303 return -1;
9304 }
9305
9306 #ifdef TE_SOLARIS
9307 void
9308 i386_solaris_fix_up_eh_frame (segT sec)
9309 {
9310 if (flag_code == CODE_64BIT)
9311 elf_section_type (sec) = SHT_X86_64_UNWIND;
9312 }
9313 #endif
9314
9315 #ifdef TE_PE
9316 void
9317 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
9318 {
9319 expressionS exp;
9320
9321 exp.X_op = O_secrel;
9322 exp.X_add_symbol = symbol;
9323 exp.X_add_number = 0;
9324 emit_expr (&exp, size);
9325 }
9326 #endif
9327
9328 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9329 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
9330
9331 bfd_vma
9332 x86_64_section_letter (int letter, char **ptr_msg)
9333 {
9334 if (flag_code == CODE_64BIT)
9335 {
9336 if (letter == 'l')
9337 return SHF_X86_64_LARGE;
9338
9339 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
9340 }
9341 else
9342 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
9343 return -1;
9344 }
9345
9346 bfd_vma
9347 x86_64_section_word (char *str, size_t len)
9348 {
9349 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
9350 return SHF_X86_64_LARGE;
9351
9352 return -1;
9353 }
9354
9355 static void
9356 handle_large_common (int small ATTRIBUTE_UNUSED)
9357 {
9358 if (flag_code != CODE_64BIT)
9359 {
9360 s_comm_internal (0, elf_common_parse);
9361 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
9362 }
9363 else
9364 {
9365 static segT lbss_section;
9366 asection *saved_com_section_ptr = elf_com_section_ptr;
9367 asection *saved_bss_section = bss_section;
9368
9369 if (lbss_section == NULL)
9370 {
9371 flagword applicable;
9372 segT seg = now_seg;
9373 subsegT subseg = now_subseg;
9374
9375 /* The .lbss section is for local .largecomm symbols. */
9376 lbss_section = subseg_new (".lbss", 0);
9377 applicable = bfd_applicable_section_flags (stdoutput);
9378 bfd_set_section_flags (stdoutput, lbss_section,
9379 applicable & SEC_ALLOC);
9380 seg_info (lbss_section)->bss = 1;
9381
9382 subseg_set (seg, subseg);
9383 }
9384
9385 elf_com_section_ptr = &_bfd_elf_large_com_section;
9386 bss_section = lbss_section;
9387
9388 s_comm_internal (0, elf_common_parse);
9389
9390 elf_com_section_ptr = saved_com_section_ptr;
9391 bss_section = saved_bss_section;
9392 }
9393 }
9394 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */
This page took 0.23049 seconds and 5 git commands to generate.