Remove x32 addend overflow for BFD_RELOC_64
[deliverable/binutils-gdb.git] / gas / config / tc-i386.c
1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
4 2012
5 Free Software Foundation, Inc.
6
7 This file is part of GAS, the GNU Assembler.
8
9 GAS is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
13
14 GAS is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GAS; see the file COPYING. If not, write to the Free
21 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
22 02110-1301, USA. */
23
24 /* Intel 80386 machine specific gas.
25 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
26 x86_64 support by Jan Hubicka (jh@suse.cz)
27 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
28 Bugs & suggestions are completely welcome. This is free software.
29 Please help us make it better. */
30
31 #include "as.h"
32 #include "safe-ctype.h"
33 #include "subsegs.h"
34 #include "dwarf2dbg.h"
35 #include "dw2gencfi.h"
36 #include "elf/x86-64.h"
37 #include "opcodes/i386-init.h"
38
39 #ifndef REGISTER_WARNINGS
40 #define REGISTER_WARNINGS 1
41 #endif
42
43 #ifndef INFER_ADDR_PREFIX
44 #define INFER_ADDR_PREFIX 1
45 #endif
46
47 #ifndef DEFAULT_ARCH
48 #define DEFAULT_ARCH "i386"
49 #endif
50
51 #ifndef INLINE
52 #if __GNUC__ >= 2
53 #define INLINE __inline__
54 #else
55 #define INLINE
56 #endif
57 #endif
58
59 /* Prefixes will be emitted in the order defined below.
60 WAIT_PREFIX must be the first prefix since FWAIT is really is an
61 instruction, and so must come before any prefixes.
62 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
63 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
64 #define WAIT_PREFIX 0
65 #define SEG_PREFIX 1
66 #define ADDR_PREFIX 2
67 #define DATA_PREFIX 3
68 #define REP_PREFIX 4
69 #define HLE_PREFIX REP_PREFIX
70 #define LOCK_PREFIX 5
71 #define REX_PREFIX 6 /* must come last. */
72 #define MAX_PREFIXES 7 /* max prefixes per opcode */
73
74 /* we define the syntax here (modulo base,index,scale syntax) */
75 #define REGISTER_PREFIX '%'
76 #define IMMEDIATE_PREFIX '$'
77 #define ABSOLUTE_PREFIX '*'
78
79 /* these are the instruction mnemonic suffixes in AT&T syntax or
80 memory operand size in Intel syntax. */
81 #define WORD_MNEM_SUFFIX 'w'
82 #define BYTE_MNEM_SUFFIX 'b'
83 #define SHORT_MNEM_SUFFIX 's'
84 #define LONG_MNEM_SUFFIX 'l'
85 #define QWORD_MNEM_SUFFIX 'q'
86 #define XMMWORD_MNEM_SUFFIX 'x'
87 #define YMMWORD_MNEM_SUFFIX 'y'
88 /* Intel Syntax. Use a non-ascii letter since since it never appears
89 in instructions. */
90 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
91
92 #define END_OF_INSN '\0'
93
94 /*
95 'templates' is for grouping together 'template' structures for opcodes
96 of the same name. This is only used for storing the insns in the grand
97 ole hash table of insns.
98 The templates themselves start at START and range up to (but not including)
99 END.
100 */
101 typedef struct
102 {
103 const insn_template *start;
104 const insn_template *end;
105 }
106 templates;
107
108 /* 386 operand encoding bytes: see 386 book for details of this. */
109 typedef struct
110 {
111 unsigned int regmem; /* codes register or memory operand */
112 unsigned int reg; /* codes register operand (or extended opcode) */
113 unsigned int mode; /* how to interpret regmem & reg */
114 }
115 modrm_byte;
116
117 /* x86-64 extension prefix. */
118 typedef int rex_byte;
119
120 /* 386 opcode byte to code indirect addressing. */
121 typedef struct
122 {
123 unsigned base;
124 unsigned index;
125 unsigned scale;
126 }
127 sib_byte;
128
129 /* x86 arch names, types and features */
130 typedef struct
131 {
132 const char *name; /* arch name */
133 unsigned int len; /* arch string length */
134 enum processor_type type; /* arch type */
135 i386_cpu_flags flags; /* cpu feature flags */
136 unsigned int skip; /* show_arch should skip this. */
137 unsigned int negated; /* turn off indicated flags. */
138 }
139 arch_entry;
140
141 static void update_code_flag (int, int);
142 static void set_code_flag (int);
143 static void set_16bit_gcc_code_flag (int);
144 static void set_intel_syntax (int);
145 static void set_intel_mnemonic (int);
146 static void set_allow_index_reg (int);
147 static void set_sse_check (int);
148 static void set_cpu_arch (int);
149 #ifdef TE_PE
150 static void pe_directive_secrel (int);
151 #endif
152 static void signed_cons (int);
153 static char *output_invalid (int c);
154 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
155 const char *);
156 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
157 const char *);
158 static int i386_att_operand (char *);
159 static int i386_intel_operand (char *, int);
160 static int i386_intel_simplify (expressionS *);
161 static int i386_intel_parse_name (const char *, expressionS *);
162 static const reg_entry *parse_register (char *, char **);
163 static char *parse_insn (char *, char *);
164 static char *parse_operands (char *, const char *);
165 static void swap_operands (void);
166 static void swap_2_operands (int, int);
167 static void optimize_imm (void);
168 static void optimize_disp (void);
169 static const insn_template *match_template (void);
170 static int check_string (void);
171 static int process_suffix (void);
172 static int check_byte_reg (void);
173 static int check_long_reg (void);
174 static int check_qword_reg (void);
175 static int check_word_reg (void);
176 static int finalize_imm (void);
177 static int process_operands (void);
178 static const seg_entry *build_modrm_byte (void);
179 static void output_insn (void);
180 static void output_imm (fragS *, offsetT);
181 static void output_disp (fragS *, offsetT);
182 #ifndef I386COFF
183 static void s_bss (int);
184 #endif
185 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
186 static void handle_large_common (int small ATTRIBUTE_UNUSED);
187 #endif
188
189 static const char *default_arch = DEFAULT_ARCH;
190
191 /* VEX prefix. */
192 typedef struct
193 {
194 /* VEX prefix is either 2 byte or 3 byte. */
195 unsigned char bytes[3];
196 unsigned int length;
197 /* Destination or source register specifier. */
198 const reg_entry *register_specifier;
199 } vex_prefix;
200
201 /* 'md_assemble ()' gathers together information and puts it into a
202 i386_insn. */
203
204 union i386_op
205 {
206 expressionS *disps;
207 expressionS *imms;
208 const reg_entry *regs;
209 };
210
211 enum i386_error
212 {
213 operand_size_mismatch,
214 operand_type_mismatch,
215 register_type_mismatch,
216 number_of_operands_mismatch,
217 invalid_instruction_suffix,
218 bad_imm4,
219 old_gcc_only,
220 unsupported_with_intel_mnemonic,
221 unsupported_syntax,
222 unsupported,
223 invalid_vsib_address,
224 unsupported_vector_index_register
225 };
226
227 struct _i386_insn
228 {
229 /* TM holds the template for the insn were currently assembling. */
230 insn_template tm;
231
232 /* SUFFIX holds the instruction size suffix for byte, word, dword
233 or qword, if given. */
234 char suffix;
235
236 /* OPERANDS gives the number of given operands. */
237 unsigned int operands;
238
239 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
240 of given register, displacement, memory operands and immediate
241 operands. */
242 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
243
244 /* TYPES [i] is the type (see above #defines) which tells us how to
245 use OP[i] for the corresponding operand. */
246 i386_operand_type types[MAX_OPERANDS];
247
248 /* Displacement expression, immediate expression, or register for each
249 operand. */
250 union i386_op op[MAX_OPERANDS];
251
252 /* Flags for operands. */
253 unsigned int flags[MAX_OPERANDS];
254 #define Operand_PCrel 1
255
256 /* Relocation type for operand */
257 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
258
259 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
260 the base index byte below. */
261 const reg_entry *base_reg;
262 const reg_entry *index_reg;
263 unsigned int log2_scale_factor;
264
265 /* SEG gives the seg_entries of this insn. They are zero unless
266 explicit segment overrides are given. */
267 const seg_entry *seg[2];
268
269 /* PREFIX holds all the given prefix opcodes (usually null).
270 PREFIXES is the number of prefix opcodes. */
271 unsigned int prefixes;
272 unsigned char prefix[MAX_PREFIXES];
273
274 /* RM and SIB are the modrm byte and the sib byte where the
275 addressing modes of this insn are encoded. */
276 modrm_byte rm;
277 rex_byte rex;
278 sib_byte sib;
279 vex_prefix vex;
280
281 /* Swap operand in encoding. */
282 unsigned int swap_operand;
283
284 /* Prefer 8bit or 32bit displacement in encoding. */
285 enum
286 {
287 disp_encoding_default = 0,
288 disp_encoding_8bit,
289 disp_encoding_32bit
290 } disp_encoding;
291
292 /* Have HLE prefix. */
293 unsigned int have_hle;
294
295 /* Error message. */
296 enum i386_error error;
297 };
298
299 typedef struct _i386_insn i386_insn;
300
301 /* List of chars besides those in app.c:symbol_chars that can start an
302 operand. Used to prevent the scrubber eating vital white-space. */
303 const char extra_symbol_chars[] = "*%-(["
304 #ifdef LEX_AT
305 "@"
306 #endif
307 #ifdef LEX_QM
308 "?"
309 #endif
310 ;
311
312 #if (defined (TE_I386AIX) \
313 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
314 && !defined (TE_GNU) \
315 && !defined (TE_LINUX) \
316 && !defined (TE_NACL) \
317 && !defined (TE_NETWARE) \
318 && !defined (TE_FreeBSD) \
319 && !defined (TE_DragonFly) \
320 && !defined (TE_NetBSD)))
321 /* This array holds the chars that always start a comment. If the
322 pre-processor is disabled, these aren't very useful. The option
323 --divide will remove '/' from this list. */
324 const char *i386_comment_chars = "#/";
325 #define SVR4_COMMENT_CHARS 1
326 #define PREFIX_SEPARATOR '\\'
327
328 #else
329 const char *i386_comment_chars = "#";
330 #define PREFIX_SEPARATOR '/'
331 #endif
332
333 /* This array holds the chars that only start a comment at the beginning of
334 a line. If the line seems to have the form '# 123 filename'
335 .line and .file directives will appear in the pre-processed output.
336 Note that input_file.c hand checks for '#' at the beginning of the
337 first line of the input file. This is because the compiler outputs
338 #NO_APP at the beginning of its output.
339 Also note that comments started like this one will always work if
340 '/' isn't otherwise defined. */
341 const char line_comment_chars[] = "#/";
342
343 const char line_separator_chars[] = ";";
344
345 /* Chars that can be used to separate mant from exp in floating point
346 nums. */
347 const char EXP_CHARS[] = "eE";
348
349 /* Chars that mean this number is a floating point constant
350 As in 0f12.456
351 or 0d1.2345e12. */
352 const char FLT_CHARS[] = "fFdDxX";
353
354 /* Tables for lexical analysis. */
355 static char mnemonic_chars[256];
356 static char register_chars[256];
357 static char operand_chars[256];
358 static char identifier_chars[256];
359 static char digit_chars[256];
360
361 /* Lexical macros. */
362 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
363 #define is_operand_char(x) (operand_chars[(unsigned char) x])
364 #define is_register_char(x) (register_chars[(unsigned char) x])
365 #define is_space_char(x) ((x) == ' ')
366 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
367 #define is_digit_char(x) (digit_chars[(unsigned char) x])
368
369 /* All non-digit non-letter characters that may occur in an operand. */
370 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
371
372 /* md_assemble() always leaves the strings it's passed unaltered. To
373 effect this we maintain a stack of saved characters that we've smashed
374 with '\0's (indicating end of strings for various sub-fields of the
375 assembler instruction). */
376 static char save_stack[32];
377 static char *save_stack_p;
378 #define END_STRING_AND_SAVE(s) \
379 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
380 #define RESTORE_END_STRING(s) \
381 do { *(s) = *--save_stack_p; } while (0)
382
383 /* The instruction we're assembling. */
384 static i386_insn i;
385
386 /* Possible templates for current insn. */
387 static const templates *current_templates;
388
389 /* Per instruction expressionS buffers: max displacements & immediates. */
390 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
391 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
392
393 /* Current operand we are working on. */
394 static int this_operand = -1;
395
396 /* We support four different modes. FLAG_CODE variable is used to distinguish
397 these. */
398
399 enum flag_code {
400 CODE_32BIT,
401 CODE_16BIT,
402 CODE_64BIT };
403
404 static enum flag_code flag_code;
405 static unsigned int object_64bit;
406 static unsigned int disallow_64bit_reloc;
407 static int use_rela_relocations = 0;
408
409 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
410 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
411 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
412
413 /* The ELF ABI to use. */
414 enum x86_elf_abi
415 {
416 I386_ABI,
417 X86_64_ABI,
418 X86_64_X32_ABI
419 };
420
421 static enum x86_elf_abi x86_elf_abi = I386_ABI;
422 #endif
423
424 /* The names used to print error messages. */
425 static const char *flag_code_names[] =
426 {
427 "32",
428 "16",
429 "64"
430 };
431
432 /* 1 for intel syntax,
433 0 if att syntax. */
434 static int intel_syntax = 0;
435
436 /* 1 for intel mnemonic,
437 0 if att mnemonic. */
438 static int intel_mnemonic = !SYSV386_COMPAT;
439
440 /* 1 if support old (<= 2.8.1) versions of gcc. */
441 static int old_gcc = OLDGCC_COMPAT;
442
443 /* 1 if pseudo registers are permitted. */
444 static int allow_pseudo_reg = 0;
445
446 /* 1 if register prefix % not required. */
447 static int allow_naked_reg = 0;
448
449 /* 1 if pseudo index register, eiz/riz, is allowed . */
450 static int allow_index_reg = 0;
451
452 static enum
453 {
454 sse_check_none = 0,
455 sse_check_warning,
456 sse_check_error
457 }
458 sse_check;
459
460 /* Register prefix used for error message. */
461 static const char *register_prefix = "%";
462
463 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
464 leave, push, and pop instructions so that gcc has the same stack
465 frame as in 32 bit mode. */
466 static char stackop_size = '\0';
467
468 /* Non-zero to optimize code alignment. */
469 int optimize_align_code = 1;
470
471 /* Non-zero to quieten some warnings. */
472 static int quiet_warnings = 0;
473
474 /* CPU name. */
475 static const char *cpu_arch_name = NULL;
476 static char *cpu_sub_arch_name = NULL;
477
478 /* CPU feature flags. */
479 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
480
481 /* If we have selected a cpu we are generating instructions for. */
482 static int cpu_arch_tune_set = 0;
483
484 /* Cpu we are generating instructions for. */
485 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
486
487 /* CPU feature flags of cpu we are generating instructions for. */
488 static i386_cpu_flags cpu_arch_tune_flags;
489
490 /* CPU instruction set architecture used. */
491 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
492
493 /* CPU feature flags of instruction set architecture used. */
494 i386_cpu_flags cpu_arch_isa_flags;
495
496 /* If set, conditional jumps are not automatically promoted to handle
497 larger than a byte offset. */
498 static unsigned int no_cond_jump_promotion = 0;
499
500 /* Encode SSE instructions with VEX prefix. */
501 static unsigned int sse2avx;
502
503 /* Encode scalar AVX instructions with specific vector length. */
504 static enum
505 {
506 vex128 = 0,
507 vex256
508 } avxscalar;
509
510 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
511 static symbolS *GOT_symbol;
512
513 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
514 unsigned int x86_dwarf2_return_column;
515
516 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
517 int x86_cie_data_alignment;
518
519 /* Interface to relax_segment.
520 There are 3 major relax states for 386 jump insns because the
521 different types of jumps add different sizes to frags when we're
522 figuring out what sort of jump to choose to reach a given label. */
523
524 /* Types. */
525 #define UNCOND_JUMP 0
526 #define COND_JUMP 1
527 #define COND_JUMP86 2
528
529 /* Sizes. */
530 #define CODE16 1
531 #define SMALL 0
532 #define SMALL16 (SMALL | CODE16)
533 #define BIG 2
534 #define BIG16 (BIG | CODE16)
535
536 #ifndef INLINE
537 #ifdef __GNUC__
538 #define INLINE __inline__
539 #else
540 #define INLINE
541 #endif
542 #endif
543
544 #define ENCODE_RELAX_STATE(type, size) \
545 ((relax_substateT) (((type) << 2) | (size)))
546 #define TYPE_FROM_RELAX_STATE(s) \
547 ((s) >> 2)
548 #define DISP_SIZE_FROM_RELAX_STATE(s) \
549 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
550
551 /* This table is used by relax_frag to promote short jumps to long
552 ones where necessary. SMALL (short) jumps may be promoted to BIG
553 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
554 don't allow a short jump in a 32 bit code segment to be promoted to
555 a 16 bit offset jump because it's slower (requires data size
556 prefix), and doesn't work, unless the destination is in the bottom
557 64k of the code segment (The top 16 bits of eip are zeroed). */
558
559 const relax_typeS md_relax_table[] =
560 {
561 /* The fields are:
562 1) most positive reach of this state,
563 2) most negative reach of this state,
564 3) how many bytes this mode will have in the variable part of the frag
565 4) which index into the table to try if we can't fit into this one. */
566
567 /* UNCOND_JUMP states. */
568 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
569 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
570 /* dword jmp adds 4 bytes to frag:
571 0 extra opcode bytes, 4 displacement bytes. */
572 {0, 0, 4, 0},
573 /* word jmp adds 2 byte2 to frag:
574 0 extra opcode bytes, 2 displacement bytes. */
575 {0, 0, 2, 0},
576
577 /* COND_JUMP states. */
578 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
579 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
580 /* dword conditionals adds 5 bytes to frag:
581 1 extra opcode byte, 4 displacement bytes. */
582 {0, 0, 5, 0},
583 /* word conditionals add 3 bytes to frag:
584 1 extra opcode byte, 2 displacement bytes. */
585 {0, 0, 3, 0},
586
587 /* COND_JUMP86 states. */
588 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
589 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
590 /* dword conditionals adds 5 bytes to frag:
591 1 extra opcode byte, 4 displacement bytes. */
592 {0, 0, 5, 0},
593 /* word conditionals add 4 bytes to frag:
594 1 displacement byte and a 3 byte long branch insn. */
595 {0, 0, 4, 0}
596 };
597
598 static const arch_entry cpu_arch[] =
599 {
600 /* Do not replace the first two entries - i386_target_format()
601 relies on them being there in this order. */
602 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
603 CPU_GENERIC32_FLAGS, 0, 0 },
604 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
605 CPU_GENERIC64_FLAGS, 0, 0 },
606 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
607 CPU_NONE_FLAGS, 0, 0 },
608 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
609 CPU_I186_FLAGS, 0, 0 },
610 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
611 CPU_I286_FLAGS, 0, 0 },
612 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
613 CPU_I386_FLAGS, 0, 0 },
614 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
615 CPU_I486_FLAGS, 0, 0 },
616 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
617 CPU_I586_FLAGS, 0, 0 },
618 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
619 CPU_I686_FLAGS, 0, 0 },
620 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
621 CPU_I586_FLAGS, 0, 0 },
622 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
623 CPU_PENTIUMPRO_FLAGS, 0, 0 },
624 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
625 CPU_P2_FLAGS, 0, 0 },
626 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
627 CPU_P3_FLAGS, 0, 0 },
628 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
629 CPU_P4_FLAGS, 0, 0 },
630 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
631 CPU_CORE_FLAGS, 0, 0 },
632 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
633 CPU_NOCONA_FLAGS, 0, 0 },
634 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
635 CPU_CORE_FLAGS, 1, 0 },
636 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
637 CPU_CORE_FLAGS, 0, 0 },
638 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
639 CPU_CORE2_FLAGS, 1, 0 },
640 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
641 CPU_CORE2_FLAGS, 0, 0 },
642 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
643 CPU_COREI7_FLAGS, 0, 0 },
644 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
645 CPU_L1OM_FLAGS, 0, 0 },
646 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM,
647 CPU_K1OM_FLAGS, 0, 0 },
648 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
649 CPU_K6_FLAGS, 0, 0 },
650 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
651 CPU_K6_2_FLAGS, 0, 0 },
652 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
653 CPU_ATHLON_FLAGS, 0, 0 },
654 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
655 CPU_K8_FLAGS, 1, 0 },
656 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
657 CPU_K8_FLAGS, 0, 0 },
658 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
659 CPU_K8_FLAGS, 0, 0 },
660 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
661 CPU_AMDFAM10_FLAGS, 0, 0 },
662 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD,
663 CPU_BDVER1_FLAGS, 0, 0 },
664 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD,
665 CPU_BDVER2_FLAGS, 0, 0 },
666 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
667 CPU_8087_FLAGS, 0, 0 },
668 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
669 CPU_287_FLAGS, 0, 0 },
670 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
671 CPU_387_FLAGS, 0, 0 },
672 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN,
673 CPU_ANY87_FLAGS, 0, 1 },
674 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
675 CPU_MMX_FLAGS, 0, 0 },
676 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN,
677 CPU_3DNOWA_FLAGS, 0, 1 },
678 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
679 CPU_SSE_FLAGS, 0, 0 },
680 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
681 CPU_SSE2_FLAGS, 0, 0 },
682 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
683 CPU_SSE3_FLAGS, 0, 0 },
684 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
685 CPU_SSSE3_FLAGS, 0, 0 },
686 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
687 CPU_SSE4_1_FLAGS, 0, 0 },
688 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
689 CPU_SSE4_2_FLAGS, 0, 0 },
690 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
691 CPU_SSE4_2_FLAGS, 0, 0 },
692 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN,
693 CPU_ANY_SSE_FLAGS, 0, 1 },
694 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
695 CPU_AVX_FLAGS, 0, 0 },
696 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN,
697 CPU_AVX2_FLAGS, 0, 0 },
698 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
699 CPU_ANY_AVX_FLAGS, 0, 1 },
700 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
701 CPU_VMX_FLAGS, 0, 0 },
702 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN,
703 CPU_VMFUNC_FLAGS, 0, 0 },
704 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
705 CPU_SMX_FLAGS, 0, 0 },
706 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
707 CPU_XSAVE_FLAGS, 0, 0 },
708 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
709 CPU_XSAVEOPT_FLAGS, 0, 0 },
710 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
711 CPU_AES_FLAGS, 0, 0 },
712 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
713 CPU_PCLMUL_FLAGS, 0, 0 },
714 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
715 CPU_PCLMUL_FLAGS, 1, 0 },
716 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
717 CPU_FSGSBASE_FLAGS, 0, 0 },
718 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
719 CPU_RDRND_FLAGS, 0, 0 },
720 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
721 CPU_F16C_FLAGS, 0, 0 },
722 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN,
723 CPU_BMI2_FLAGS, 0, 0 },
724 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
725 CPU_FMA_FLAGS, 0, 0 },
726 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
727 CPU_FMA4_FLAGS, 0, 0 },
728 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
729 CPU_XOP_FLAGS, 0, 0 },
730 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
731 CPU_LWP_FLAGS, 0, 0 },
732 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
733 CPU_MOVBE_FLAGS, 0, 0 },
734 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
735 CPU_EPT_FLAGS, 0, 0 },
736 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN,
737 CPU_LZCNT_FLAGS, 0, 0 },
738 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN,
739 CPU_HLE_FLAGS, 0, 0 },
740 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN,
741 CPU_RTM_FLAGS, 0, 0 },
742 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN,
743 CPU_INVPCID_FLAGS, 0, 0 },
744 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
745 CPU_CLFLUSH_FLAGS, 0, 0 },
746 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
747 CPU_NOP_FLAGS, 0, 0 },
748 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
749 CPU_SYSCALL_FLAGS, 0, 0 },
750 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
751 CPU_RDTSCP_FLAGS, 0, 0 },
752 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
753 CPU_3DNOW_FLAGS, 0, 0 },
754 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
755 CPU_3DNOWA_FLAGS, 0, 0 },
756 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
757 CPU_PADLOCK_FLAGS, 0, 0 },
758 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
759 CPU_SVME_FLAGS, 1, 0 },
760 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
761 CPU_SVME_FLAGS, 0, 0 },
762 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
763 CPU_SSE4A_FLAGS, 0, 0 },
764 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
765 CPU_ABM_FLAGS, 0, 0 },
766 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
767 CPU_BMI_FLAGS, 0, 0 },
768 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
769 CPU_TBM_FLAGS, 0, 0 },
770 };
771
772 #ifdef I386COFF
773 /* Like s_lcomm_internal in gas/read.c but the alignment string
774 is allowed to be optional. */
775
776 static symbolS *
777 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
778 {
779 addressT align = 0;
780
781 SKIP_WHITESPACE ();
782
783 if (needs_align
784 && *input_line_pointer == ',')
785 {
786 align = parse_align (needs_align - 1);
787
788 if (align == (addressT) -1)
789 return NULL;
790 }
791 else
792 {
793 if (size >= 8)
794 align = 3;
795 else if (size >= 4)
796 align = 2;
797 else if (size >= 2)
798 align = 1;
799 else
800 align = 0;
801 }
802
803 bss_alloc (symbolP, size, align);
804 return symbolP;
805 }
806
807 static void
808 pe_lcomm (int needs_align)
809 {
810 s_comm_internal (needs_align * 2, pe_lcomm_internal);
811 }
812 #endif
813
814 const pseudo_typeS md_pseudo_table[] =
815 {
816 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
817 {"align", s_align_bytes, 0},
818 #else
819 {"align", s_align_ptwo, 0},
820 #endif
821 {"arch", set_cpu_arch, 0},
822 #ifndef I386COFF
823 {"bss", s_bss, 0},
824 #else
825 {"lcomm", pe_lcomm, 1},
826 #endif
827 {"ffloat", float_cons, 'f'},
828 {"dfloat", float_cons, 'd'},
829 {"tfloat", float_cons, 'x'},
830 {"value", cons, 2},
831 {"slong", signed_cons, 4},
832 {"noopt", s_ignore, 0},
833 {"optim", s_ignore, 0},
834 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
835 {"code16", set_code_flag, CODE_16BIT},
836 {"code32", set_code_flag, CODE_32BIT},
837 {"code64", set_code_flag, CODE_64BIT},
838 {"intel_syntax", set_intel_syntax, 1},
839 {"att_syntax", set_intel_syntax, 0},
840 {"intel_mnemonic", set_intel_mnemonic, 1},
841 {"att_mnemonic", set_intel_mnemonic, 0},
842 {"allow_index_reg", set_allow_index_reg, 1},
843 {"disallow_index_reg", set_allow_index_reg, 0},
844 {"sse_check", set_sse_check, 0},
845 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
846 {"largecomm", handle_large_common, 0},
847 #else
848 {"file", (void (*) (int)) dwarf2_directive_file, 0},
849 {"loc", dwarf2_directive_loc, 0},
850 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
851 #endif
852 #ifdef TE_PE
853 {"secrel32", pe_directive_secrel, 0},
854 #endif
855 {0, 0, 0}
856 };
857
858 /* For interface with expression (). */
859 extern char *input_line_pointer;
860
861 /* Hash table for instruction mnemonic lookup. */
862 static struct hash_control *op_hash;
863
864 /* Hash table for register lookup. */
865 static struct hash_control *reg_hash;
866 \f
867 void
868 i386_align_code (fragS *fragP, int count)
869 {
870 /* Various efficient no-op patterns for aligning code labels.
871 Note: Don't try to assemble the instructions in the comments.
872 0L and 0w are not legal. */
873 static const char f32_1[] =
874 {0x90}; /* nop */
875 static const char f32_2[] =
876 {0x66,0x90}; /* xchg %ax,%ax */
877 static const char f32_3[] =
878 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
879 static const char f32_4[] =
880 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
881 static const char f32_5[] =
882 {0x90, /* nop */
883 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
884 static const char f32_6[] =
885 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
886 static const char f32_7[] =
887 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
888 static const char f32_8[] =
889 {0x90, /* nop */
890 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
891 static const char f32_9[] =
892 {0x89,0xf6, /* movl %esi,%esi */
893 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
894 static const char f32_10[] =
895 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
896 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
897 static const char f32_11[] =
898 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
899 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
900 static const char f32_12[] =
901 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
902 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
903 static const char f32_13[] =
904 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
905 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
906 static const char f32_14[] =
907 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
908 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
909 static const char f16_3[] =
910 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
911 static const char f16_4[] =
912 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
913 static const char f16_5[] =
914 {0x90, /* nop */
915 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
916 static const char f16_6[] =
917 {0x89,0xf6, /* mov %si,%si */
918 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
919 static const char f16_7[] =
920 {0x8d,0x74,0x00, /* lea 0(%si),%si */
921 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
922 static const char f16_8[] =
923 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
924 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
925 static const char jump_31[] =
926 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
927 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
928 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
929 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
930 static const char *const f32_patt[] = {
931 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
932 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
933 };
934 static const char *const f16_patt[] = {
935 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
936 };
937 /* nopl (%[re]ax) */
938 static const char alt_3[] =
939 {0x0f,0x1f,0x00};
940 /* nopl 0(%[re]ax) */
941 static const char alt_4[] =
942 {0x0f,0x1f,0x40,0x00};
943 /* nopl 0(%[re]ax,%[re]ax,1) */
944 static const char alt_5[] =
945 {0x0f,0x1f,0x44,0x00,0x00};
946 /* nopw 0(%[re]ax,%[re]ax,1) */
947 static const char alt_6[] =
948 {0x66,0x0f,0x1f,0x44,0x00,0x00};
949 /* nopl 0L(%[re]ax) */
950 static const char alt_7[] =
951 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
952 /* nopl 0L(%[re]ax,%[re]ax,1) */
953 static const char alt_8[] =
954 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
955 /* nopw 0L(%[re]ax,%[re]ax,1) */
956 static const char alt_9[] =
957 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
958 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
959 static const char alt_10[] =
960 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
961 /* data16
962 nopw %cs:0L(%[re]ax,%[re]ax,1) */
963 static const char alt_long_11[] =
964 {0x66,
965 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
966 /* data16
967 data16
968 nopw %cs:0L(%[re]ax,%[re]ax,1) */
969 static const char alt_long_12[] =
970 {0x66,
971 0x66,
972 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
973 /* data16
974 data16
975 data16
976 nopw %cs:0L(%[re]ax,%[re]ax,1) */
977 static const char alt_long_13[] =
978 {0x66,
979 0x66,
980 0x66,
981 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
982 /* data16
983 data16
984 data16
985 data16
986 nopw %cs:0L(%[re]ax,%[re]ax,1) */
987 static const char alt_long_14[] =
988 {0x66,
989 0x66,
990 0x66,
991 0x66,
992 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
993 /* data16
994 data16
995 data16
996 data16
997 data16
998 nopw %cs:0L(%[re]ax,%[re]ax,1) */
999 static const char alt_long_15[] =
1000 {0x66,
1001 0x66,
1002 0x66,
1003 0x66,
1004 0x66,
1005 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1006 /* nopl 0(%[re]ax,%[re]ax,1)
1007 nopw 0(%[re]ax,%[re]ax,1) */
1008 static const char alt_short_11[] =
1009 {0x0f,0x1f,0x44,0x00,0x00,
1010 0x66,0x0f,0x1f,0x44,0x00,0x00};
1011 /* nopw 0(%[re]ax,%[re]ax,1)
1012 nopw 0(%[re]ax,%[re]ax,1) */
1013 static const char alt_short_12[] =
1014 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1015 0x66,0x0f,0x1f,0x44,0x00,0x00};
1016 /* nopw 0(%[re]ax,%[re]ax,1)
1017 nopl 0L(%[re]ax) */
1018 static const char alt_short_13[] =
1019 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1020 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1021 /* nopl 0L(%[re]ax)
1022 nopl 0L(%[re]ax) */
1023 static const char alt_short_14[] =
1024 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1025 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1026 /* nopl 0L(%[re]ax)
1027 nopl 0L(%[re]ax,%[re]ax,1) */
1028 static const char alt_short_15[] =
1029 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1030 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1031 static const char *const alt_short_patt[] = {
1032 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1033 alt_9, alt_10, alt_short_11, alt_short_12, alt_short_13,
1034 alt_short_14, alt_short_15
1035 };
1036 static const char *const alt_long_patt[] = {
1037 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1038 alt_9, alt_10, alt_long_11, alt_long_12, alt_long_13,
1039 alt_long_14, alt_long_15
1040 };
1041
1042 /* Only align for at least a positive non-zero boundary. */
1043 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
1044 return;
1045
1046 /* We need to decide which NOP sequence to use for 32bit and
1047 64bit. When -mtune= is used:
1048
1049 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1050 PROCESSOR_GENERIC32, f32_patt will be used.
1051 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA,
1052 PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
1053 PROCESSOR_GENERIC64, alt_long_patt will be used.
1054 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
1055 PROCESSOR_AMDFAM10, and PROCESSOR_BD, alt_short_patt
1056 will be used.
1057
1058 When -mtune= isn't used, alt_long_patt will be used if
1059 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1060 be used.
1061
1062 When -march= or .arch is used, we can't use anything beyond
1063 cpu_arch_isa_flags. */
1064
1065 if (flag_code == CODE_16BIT)
1066 {
1067 if (count > 8)
1068 {
1069 memcpy (fragP->fr_literal + fragP->fr_fix,
1070 jump_31, count);
1071 /* Adjust jump offset. */
1072 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1073 }
1074 else
1075 memcpy (fragP->fr_literal + fragP->fr_fix,
1076 f16_patt[count - 1], count);
1077 }
1078 else
1079 {
1080 const char *const *patt = NULL;
1081
1082 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1083 {
1084 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1085 switch (cpu_arch_tune)
1086 {
1087 case PROCESSOR_UNKNOWN:
1088 /* We use cpu_arch_isa_flags to check if we SHOULD
1089 optimize with nops. */
1090 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1091 patt = alt_long_patt;
1092 else
1093 patt = f32_patt;
1094 break;
1095 case PROCESSOR_PENTIUM4:
1096 case PROCESSOR_NOCONA:
1097 case PROCESSOR_CORE:
1098 case PROCESSOR_CORE2:
1099 case PROCESSOR_COREI7:
1100 case PROCESSOR_L1OM:
1101 case PROCESSOR_K1OM:
1102 case PROCESSOR_GENERIC64:
1103 patt = alt_long_patt;
1104 break;
1105 case PROCESSOR_K6:
1106 case PROCESSOR_ATHLON:
1107 case PROCESSOR_K8:
1108 case PROCESSOR_AMDFAM10:
1109 case PROCESSOR_BD:
1110 patt = alt_short_patt;
1111 break;
1112 case PROCESSOR_I386:
1113 case PROCESSOR_I486:
1114 case PROCESSOR_PENTIUM:
1115 case PROCESSOR_PENTIUMPRO:
1116 case PROCESSOR_GENERIC32:
1117 patt = f32_patt;
1118 break;
1119 }
1120 }
1121 else
1122 {
1123 switch (fragP->tc_frag_data.tune)
1124 {
1125 case PROCESSOR_UNKNOWN:
1126 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1127 PROCESSOR_UNKNOWN. */
1128 abort ();
1129 break;
1130
1131 case PROCESSOR_I386:
1132 case PROCESSOR_I486:
1133 case PROCESSOR_PENTIUM:
1134 case PROCESSOR_K6:
1135 case PROCESSOR_ATHLON:
1136 case PROCESSOR_K8:
1137 case PROCESSOR_AMDFAM10:
1138 case PROCESSOR_BD:
1139 case PROCESSOR_GENERIC32:
1140 /* We use cpu_arch_isa_flags to check if we CAN optimize
1141 with nops. */
1142 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1143 patt = alt_short_patt;
1144 else
1145 patt = f32_patt;
1146 break;
1147 case PROCESSOR_PENTIUMPRO:
1148 case PROCESSOR_PENTIUM4:
1149 case PROCESSOR_NOCONA:
1150 case PROCESSOR_CORE:
1151 case PROCESSOR_CORE2:
1152 case PROCESSOR_COREI7:
1153 case PROCESSOR_L1OM:
1154 case PROCESSOR_K1OM:
1155 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1156 patt = alt_long_patt;
1157 else
1158 patt = f32_patt;
1159 break;
1160 case PROCESSOR_GENERIC64:
1161 patt = alt_long_patt;
1162 break;
1163 }
1164 }
1165
1166 if (patt == f32_patt)
1167 {
1168 /* If the padding is less than 15 bytes, we use the normal
1169 ones. Otherwise, we use a jump instruction and adjust
1170 its offset. */
1171 int limit;
1172
1173 /* For 64bit, the limit is 3 bytes. */
1174 if (flag_code == CODE_64BIT
1175 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1176 limit = 3;
1177 else
1178 limit = 15;
1179 if (count < limit)
1180 memcpy (fragP->fr_literal + fragP->fr_fix,
1181 patt[count - 1], count);
1182 else
1183 {
1184 memcpy (fragP->fr_literal + fragP->fr_fix,
1185 jump_31, count);
1186 /* Adjust jump offset. */
1187 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1188 }
1189 }
1190 else
1191 {
1192 /* Maximum length of an instruction is 15 byte. If the
1193 padding is greater than 15 bytes and we don't use jump,
1194 we have to break it into smaller pieces. */
1195 int padding = count;
1196 while (padding > 15)
1197 {
1198 padding -= 15;
1199 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1200 patt [14], 15);
1201 }
1202
1203 if (padding)
1204 memcpy (fragP->fr_literal + fragP->fr_fix,
1205 patt [padding - 1], padding);
1206 }
1207 }
1208 fragP->fr_var = count;
1209 }
1210
1211 static INLINE int
1212 operand_type_all_zero (const union i386_operand_type *x)
1213 {
1214 switch (ARRAY_SIZE(x->array))
1215 {
1216 case 3:
1217 if (x->array[2])
1218 return 0;
1219 case 2:
1220 if (x->array[1])
1221 return 0;
1222 case 1:
1223 return !x->array[0];
1224 default:
1225 abort ();
1226 }
1227 }
1228
1229 static INLINE void
1230 operand_type_set (union i386_operand_type *x, unsigned int v)
1231 {
1232 switch (ARRAY_SIZE(x->array))
1233 {
1234 case 3:
1235 x->array[2] = v;
1236 case 2:
1237 x->array[1] = v;
1238 case 1:
1239 x->array[0] = v;
1240 break;
1241 default:
1242 abort ();
1243 }
1244 }
1245
1246 static INLINE int
1247 operand_type_equal (const union i386_operand_type *x,
1248 const union i386_operand_type *y)
1249 {
1250 switch (ARRAY_SIZE(x->array))
1251 {
1252 case 3:
1253 if (x->array[2] != y->array[2])
1254 return 0;
1255 case 2:
1256 if (x->array[1] != y->array[1])
1257 return 0;
1258 case 1:
1259 return x->array[0] == y->array[0];
1260 break;
1261 default:
1262 abort ();
1263 }
1264 }
1265
1266 static INLINE int
1267 cpu_flags_all_zero (const union i386_cpu_flags *x)
1268 {
1269 switch (ARRAY_SIZE(x->array))
1270 {
1271 case 3:
1272 if (x->array[2])
1273 return 0;
1274 case 2:
1275 if (x->array[1])
1276 return 0;
1277 case 1:
1278 return !x->array[0];
1279 default:
1280 abort ();
1281 }
1282 }
1283
1284 static INLINE void
1285 cpu_flags_set (union i386_cpu_flags *x, unsigned int v)
1286 {
1287 switch (ARRAY_SIZE(x->array))
1288 {
1289 case 3:
1290 x->array[2] = v;
1291 case 2:
1292 x->array[1] = v;
1293 case 1:
1294 x->array[0] = v;
1295 break;
1296 default:
1297 abort ();
1298 }
1299 }
1300
1301 static INLINE int
1302 cpu_flags_equal (const union i386_cpu_flags *x,
1303 const union i386_cpu_flags *y)
1304 {
1305 switch (ARRAY_SIZE(x->array))
1306 {
1307 case 3:
1308 if (x->array[2] != y->array[2])
1309 return 0;
1310 case 2:
1311 if (x->array[1] != y->array[1])
1312 return 0;
1313 case 1:
1314 return x->array[0] == y->array[0];
1315 break;
1316 default:
1317 abort ();
1318 }
1319 }
1320
1321 static INLINE int
1322 cpu_flags_check_cpu64 (i386_cpu_flags f)
1323 {
1324 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1325 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1326 }
1327
1328 static INLINE i386_cpu_flags
1329 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1330 {
1331 switch (ARRAY_SIZE (x.array))
1332 {
1333 case 3:
1334 x.array [2] &= y.array [2];
1335 case 2:
1336 x.array [1] &= y.array [1];
1337 case 1:
1338 x.array [0] &= y.array [0];
1339 break;
1340 default:
1341 abort ();
1342 }
1343 return x;
1344 }
1345
1346 static INLINE i386_cpu_flags
1347 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1348 {
1349 switch (ARRAY_SIZE (x.array))
1350 {
1351 case 3:
1352 x.array [2] |= y.array [2];
1353 case 2:
1354 x.array [1] |= y.array [1];
1355 case 1:
1356 x.array [0] |= y.array [0];
1357 break;
1358 default:
1359 abort ();
1360 }
1361 return x;
1362 }
1363
1364 static INLINE i386_cpu_flags
1365 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1366 {
1367 switch (ARRAY_SIZE (x.array))
1368 {
1369 case 3:
1370 x.array [2] &= ~y.array [2];
1371 case 2:
1372 x.array [1] &= ~y.array [1];
1373 case 1:
1374 x.array [0] &= ~y.array [0];
1375 break;
1376 default:
1377 abort ();
1378 }
1379 return x;
1380 }
1381
1382 #define CPU_FLAGS_ARCH_MATCH 0x1
1383 #define CPU_FLAGS_64BIT_MATCH 0x2
1384 #define CPU_FLAGS_AES_MATCH 0x4
1385 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1386 #define CPU_FLAGS_AVX_MATCH 0x10
1387
1388 #define CPU_FLAGS_32BIT_MATCH \
1389 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1390 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1391 #define CPU_FLAGS_PERFECT_MATCH \
1392 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1393
1394 /* Return CPU flags match bits. */
1395
1396 static int
1397 cpu_flags_match (const insn_template *t)
1398 {
1399 i386_cpu_flags x = t->cpu_flags;
1400 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1401
1402 x.bitfield.cpu64 = 0;
1403 x.bitfield.cpuno64 = 0;
1404
1405 if (cpu_flags_all_zero (&x))
1406 {
1407 /* This instruction is available on all archs. */
1408 match |= CPU_FLAGS_32BIT_MATCH;
1409 }
1410 else
1411 {
1412 /* This instruction is available only on some archs. */
1413 i386_cpu_flags cpu = cpu_arch_flags;
1414
1415 cpu.bitfield.cpu64 = 0;
1416 cpu.bitfield.cpuno64 = 0;
1417 cpu = cpu_flags_and (x, cpu);
1418 if (!cpu_flags_all_zero (&cpu))
1419 {
1420 if (x.bitfield.cpuavx)
1421 {
1422 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1423 if (cpu.bitfield.cpuavx)
1424 {
1425 /* Check SSE2AVX. */
1426 if (!t->opcode_modifier.sse2avx|| sse2avx)
1427 {
1428 match |= (CPU_FLAGS_ARCH_MATCH
1429 | CPU_FLAGS_AVX_MATCH);
1430 /* Check AES. */
1431 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1432 match |= CPU_FLAGS_AES_MATCH;
1433 /* Check PCLMUL. */
1434 if (!x.bitfield.cpupclmul
1435 || cpu.bitfield.cpupclmul)
1436 match |= CPU_FLAGS_PCLMUL_MATCH;
1437 }
1438 }
1439 else
1440 match |= CPU_FLAGS_ARCH_MATCH;
1441 }
1442 else
1443 match |= CPU_FLAGS_32BIT_MATCH;
1444 }
1445 }
1446 return match;
1447 }
1448
1449 static INLINE i386_operand_type
1450 operand_type_and (i386_operand_type x, i386_operand_type y)
1451 {
1452 switch (ARRAY_SIZE (x.array))
1453 {
1454 case 3:
1455 x.array [2] &= y.array [2];
1456 case 2:
1457 x.array [1] &= y.array [1];
1458 case 1:
1459 x.array [0] &= y.array [0];
1460 break;
1461 default:
1462 abort ();
1463 }
1464 return x;
1465 }
1466
1467 static INLINE i386_operand_type
1468 operand_type_or (i386_operand_type x, i386_operand_type y)
1469 {
1470 switch (ARRAY_SIZE (x.array))
1471 {
1472 case 3:
1473 x.array [2] |= y.array [2];
1474 case 2:
1475 x.array [1] |= y.array [1];
1476 case 1:
1477 x.array [0] |= y.array [0];
1478 break;
1479 default:
1480 abort ();
1481 }
1482 return x;
1483 }
1484
1485 static INLINE i386_operand_type
1486 operand_type_xor (i386_operand_type x, i386_operand_type y)
1487 {
1488 switch (ARRAY_SIZE (x.array))
1489 {
1490 case 3:
1491 x.array [2] ^= y.array [2];
1492 case 2:
1493 x.array [1] ^= y.array [1];
1494 case 1:
1495 x.array [0] ^= y.array [0];
1496 break;
1497 default:
1498 abort ();
1499 }
1500 return x;
1501 }
1502
1503 static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1504 static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1505 static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1506 static const i386_operand_type inoutportreg
1507 = OPERAND_TYPE_INOUTPORTREG;
1508 static const i386_operand_type reg16_inoutportreg
1509 = OPERAND_TYPE_REG16_INOUTPORTREG;
1510 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1511 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1512 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1513 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1514 static const i386_operand_type anydisp
1515 = OPERAND_TYPE_ANYDISP;
1516 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1517 static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1518 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1519 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1520 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1521 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1522 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1523 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1524 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1525 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1526 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1527 static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
1528
1529 enum operand_type
1530 {
1531 reg,
1532 imm,
1533 disp,
1534 anymem
1535 };
1536
1537 static INLINE int
1538 operand_type_check (i386_operand_type t, enum operand_type c)
1539 {
1540 switch (c)
1541 {
1542 case reg:
1543 return (t.bitfield.reg8
1544 || t.bitfield.reg16
1545 || t.bitfield.reg32
1546 || t.bitfield.reg64);
1547
1548 case imm:
1549 return (t.bitfield.imm8
1550 || t.bitfield.imm8s
1551 || t.bitfield.imm16
1552 || t.bitfield.imm32
1553 || t.bitfield.imm32s
1554 || t.bitfield.imm64);
1555
1556 case disp:
1557 return (t.bitfield.disp8
1558 || t.bitfield.disp16
1559 || t.bitfield.disp32
1560 || t.bitfield.disp32s
1561 || t.bitfield.disp64);
1562
1563 case anymem:
1564 return (t.bitfield.disp8
1565 || t.bitfield.disp16
1566 || t.bitfield.disp32
1567 || t.bitfield.disp32s
1568 || t.bitfield.disp64
1569 || t.bitfield.baseindex);
1570
1571 default:
1572 abort ();
1573 }
1574
1575 return 0;
1576 }
1577
1578 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1579 operand J for instruction template T. */
1580
1581 static INLINE int
1582 match_reg_size (const insn_template *t, unsigned int j)
1583 {
1584 return !((i.types[j].bitfield.byte
1585 && !t->operand_types[j].bitfield.byte)
1586 || (i.types[j].bitfield.word
1587 && !t->operand_types[j].bitfield.word)
1588 || (i.types[j].bitfield.dword
1589 && !t->operand_types[j].bitfield.dword)
1590 || (i.types[j].bitfield.qword
1591 && !t->operand_types[j].bitfield.qword));
1592 }
1593
1594 /* Return 1 if there is no conflict in any size on operand J for
1595 instruction template T. */
1596
1597 static INLINE int
1598 match_mem_size (const insn_template *t, unsigned int j)
1599 {
1600 return (match_reg_size (t, j)
1601 && !((i.types[j].bitfield.unspecified
1602 && !t->operand_types[j].bitfield.unspecified)
1603 || (i.types[j].bitfield.fword
1604 && !t->operand_types[j].bitfield.fword)
1605 || (i.types[j].bitfield.tbyte
1606 && !t->operand_types[j].bitfield.tbyte)
1607 || (i.types[j].bitfield.xmmword
1608 && !t->operand_types[j].bitfield.xmmword)
1609 || (i.types[j].bitfield.ymmword
1610 && !t->operand_types[j].bitfield.ymmword)));
1611 }
1612
1613 /* Return 1 if there is no size conflict on any operands for
1614 instruction template T. */
1615
1616 static INLINE int
1617 operand_size_match (const insn_template *t)
1618 {
1619 unsigned int j;
1620 int match = 1;
1621
1622 /* Don't check jump instructions. */
1623 if (t->opcode_modifier.jump
1624 || t->opcode_modifier.jumpbyte
1625 || t->opcode_modifier.jumpdword
1626 || t->opcode_modifier.jumpintersegment)
1627 return match;
1628
1629 /* Check memory and accumulator operand size. */
1630 for (j = 0; j < i.operands; j++)
1631 {
1632 if (t->operand_types[j].bitfield.anysize)
1633 continue;
1634
1635 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1636 {
1637 match = 0;
1638 break;
1639 }
1640
1641 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1642 {
1643 match = 0;
1644 break;
1645 }
1646 }
1647
1648 if (match)
1649 return match;
1650 else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
1651 {
1652 mismatch:
1653 i.error = operand_size_mismatch;
1654 return 0;
1655 }
1656
1657 /* Check reverse. */
1658 gas_assert (i.operands == 2);
1659
1660 match = 1;
1661 for (j = 0; j < 2; j++)
1662 {
1663 if (t->operand_types[j].bitfield.acc
1664 && !match_reg_size (t, j ? 0 : 1))
1665 goto mismatch;
1666
1667 if (i.types[j].bitfield.mem
1668 && !match_mem_size (t, j ? 0 : 1))
1669 goto mismatch;
1670 }
1671
1672 return match;
1673 }
1674
1675 static INLINE int
1676 operand_type_match (i386_operand_type overlap,
1677 i386_operand_type given)
1678 {
1679 i386_operand_type temp = overlap;
1680
1681 temp.bitfield.jumpabsolute = 0;
1682 temp.bitfield.unspecified = 0;
1683 temp.bitfield.byte = 0;
1684 temp.bitfield.word = 0;
1685 temp.bitfield.dword = 0;
1686 temp.bitfield.fword = 0;
1687 temp.bitfield.qword = 0;
1688 temp.bitfield.tbyte = 0;
1689 temp.bitfield.xmmword = 0;
1690 temp.bitfield.ymmword = 0;
1691 if (operand_type_all_zero (&temp))
1692 goto mismatch;
1693
1694 if (given.bitfield.baseindex == overlap.bitfield.baseindex
1695 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
1696 return 1;
1697
1698 mismatch:
1699 i.error = operand_type_mismatch;
1700 return 0;
1701 }
1702
1703 /* If given types g0 and g1 are registers they must be of the same type
1704 unless the expected operand type register overlap is null.
1705 Note that Acc in a template matches every size of reg. */
1706
1707 static INLINE int
1708 operand_type_register_match (i386_operand_type m0,
1709 i386_operand_type g0,
1710 i386_operand_type t0,
1711 i386_operand_type m1,
1712 i386_operand_type g1,
1713 i386_operand_type t1)
1714 {
1715 if (!operand_type_check (g0, reg))
1716 return 1;
1717
1718 if (!operand_type_check (g1, reg))
1719 return 1;
1720
1721 if (g0.bitfield.reg8 == g1.bitfield.reg8
1722 && g0.bitfield.reg16 == g1.bitfield.reg16
1723 && g0.bitfield.reg32 == g1.bitfield.reg32
1724 && g0.bitfield.reg64 == g1.bitfield.reg64)
1725 return 1;
1726
1727 if (m0.bitfield.acc)
1728 {
1729 t0.bitfield.reg8 = 1;
1730 t0.bitfield.reg16 = 1;
1731 t0.bitfield.reg32 = 1;
1732 t0.bitfield.reg64 = 1;
1733 }
1734
1735 if (m1.bitfield.acc)
1736 {
1737 t1.bitfield.reg8 = 1;
1738 t1.bitfield.reg16 = 1;
1739 t1.bitfield.reg32 = 1;
1740 t1.bitfield.reg64 = 1;
1741 }
1742
1743 if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1744 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1745 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1746 && !(t0.bitfield.reg64 & t1.bitfield.reg64))
1747 return 1;
1748
1749 i.error = register_type_mismatch;
1750
1751 return 0;
1752 }
1753
1754 static INLINE unsigned int
1755 mode_from_disp_size (i386_operand_type t)
1756 {
1757 if (t.bitfield.disp8)
1758 return 1;
1759 else if (t.bitfield.disp16
1760 || t.bitfield.disp32
1761 || t.bitfield.disp32s)
1762 return 2;
1763 else
1764 return 0;
1765 }
1766
1767 static INLINE int
1768 fits_in_signed_byte (offsetT num)
1769 {
1770 return (num >= -128) && (num <= 127);
1771 }
1772
1773 static INLINE int
1774 fits_in_unsigned_byte (offsetT num)
1775 {
1776 return (num & 0xff) == num;
1777 }
1778
1779 static INLINE int
1780 fits_in_unsigned_word (offsetT num)
1781 {
1782 return (num & 0xffff) == num;
1783 }
1784
1785 static INLINE int
1786 fits_in_signed_word (offsetT num)
1787 {
1788 return (-32768 <= num) && (num <= 32767);
1789 }
1790
1791 static INLINE int
1792 fits_in_signed_long (offsetT num ATTRIBUTE_UNUSED)
1793 {
1794 #ifndef BFD64
1795 return 1;
1796 #else
1797 return (!(((offsetT) -1 << 31) & num)
1798 || (((offsetT) -1 << 31) & num) == ((offsetT) -1 << 31));
1799 #endif
1800 } /* fits_in_signed_long() */
1801
1802 static INLINE int
1803 fits_in_unsigned_long (offsetT num ATTRIBUTE_UNUSED)
1804 {
1805 #ifndef BFD64
1806 return 1;
1807 #else
1808 return (num & (((offsetT) 2 << 31) - 1)) == num;
1809 #endif
1810 } /* fits_in_unsigned_long() */
1811
1812 static INLINE int
1813 fits_in_imm4 (offsetT num)
1814 {
1815 return (num & 0xf) == num;
1816 }
1817
1818 static i386_operand_type
1819 smallest_imm_type (offsetT num)
1820 {
1821 i386_operand_type t;
1822
1823 operand_type_set (&t, 0);
1824 t.bitfield.imm64 = 1;
1825
1826 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
1827 {
1828 /* This code is disabled on the 486 because all the Imm1 forms
1829 in the opcode table are slower on the i486. They're the
1830 versions with the implicitly specified single-position
1831 displacement, which has another syntax if you really want to
1832 use that form. */
1833 t.bitfield.imm1 = 1;
1834 t.bitfield.imm8 = 1;
1835 t.bitfield.imm8s = 1;
1836 t.bitfield.imm16 = 1;
1837 t.bitfield.imm32 = 1;
1838 t.bitfield.imm32s = 1;
1839 }
1840 else if (fits_in_signed_byte (num))
1841 {
1842 t.bitfield.imm8 = 1;
1843 t.bitfield.imm8s = 1;
1844 t.bitfield.imm16 = 1;
1845 t.bitfield.imm32 = 1;
1846 t.bitfield.imm32s = 1;
1847 }
1848 else if (fits_in_unsigned_byte (num))
1849 {
1850 t.bitfield.imm8 = 1;
1851 t.bitfield.imm16 = 1;
1852 t.bitfield.imm32 = 1;
1853 t.bitfield.imm32s = 1;
1854 }
1855 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
1856 {
1857 t.bitfield.imm16 = 1;
1858 t.bitfield.imm32 = 1;
1859 t.bitfield.imm32s = 1;
1860 }
1861 else if (fits_in_signed_long (num))
1862 {
1863 t.bitfield.imm32 = 1;
1864 t.bitfield.imm32s = 1;
1865 }
1866 else if (fits_in_unsigned_long (num))
1867 t.bitfield.imm32 = 1;
1868
1869 return t;
1870 }
1871
1872 static offsetT
1873 offset_in_range (offsetT val, int size)
1874 {
1875 addressT mask;
1876
1877 switch (size)
1878 {
1879 case 1: mask = ((addressT) 1 << 8) - 1; break;
1880 case 2: mask = ((addressT) 1 << 16) - 1; break;
1881 case 4: mask = ((addressT) 2 << 31) - 1; break;
1882 #ifdef BFD64
1883 case 8: mask = ((addressT) 2 << 63) - 1; break;
1884 #endif
1885 default: abort ();
1886 }
1887
1888 #ifdef BFD64
1889 /* If BFD64, sign extend val for 32bit address mode. */
1890 if (flag_code != CODE_64BIT
1891 || i.prefix[ADDR_PREFIX])
1892 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
1893 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
1894 #endif
1895
1896 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
1897 {
1898 char buf1[40], buf2[40];
1899
1900 sprint_value (buf1, val);
1901 sprint_value (buf2, val & mask);
1902 as_warn (_("%s shortened to %s"), buf1, buf2);
1903 }
1904 return val & mask;
1905 }
1906
1907 enum PREFIX_GROUP
1908 {
1909 PREFIX_EXIST = 0,
1910 PREFIX_LOCK,
1911 PREFIX_REP,
1912 PREFIX_OTHER
1913 };
1914
1915 /* Returns
1916 a. PREFIX_EXIST if attempting to add a prefix where one from the
1917 same class already exists.
1918 b. PREFIX_LOCK if lock prefix is added.
1919 c. PREFIX_REP if rep/repne prefix is added.
1920 d. PREFIX_OTHER if other prefix is added.
1921 */
1922
1923 static enum PREFIX_GROUP
1924 add_prefix (unsigned int prefix)
1925 {
1926 enum PREFIX_GROUP ret = PREFIX_OTHER;
1927 unsigned int q;
1928
1929 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
1930 && flag_code == CODE_64BIT)
1931 {
1932 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
1933 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
1934 && (prefix & (REX_R | REX_X | REX_B))))
1935 ret = PREFIX_EXIST;
1936 q = REX_PREFIX;
1937 }
1938 else
1939 {
1940 switch (prefix)
1941 {
1942 default:
1943 abort ();
1944
1945 case CS_PREFIX_OPCODE:
1946 case DS_PREFIX_OPCODE:
1947 case ES_PREFIX_OPCODE:
1948 case FS_PREFIX_OPCODE:
1949 case GS_PREFIX_OPCODE:
1950 case SS_PREFIX_OPCODE:
1951 q = SEG_PREFIX;
1952 break;
1953
1954 case REPNE_PREFIX_OPCODE:
1955 case REPE_PREFIX_OPCODE:
1956 q = REP_PREFIX;
1957 ret = PREFIX_REP;
1958 break;
1959
1960 case LOCK_PREFIX_OPCODE:
1961 q = LOCK_PREFIX;
1962 ret = PREFIX_LOCK;
1963 break;
1964
1965 case FWAIT_OPCODE:
1966 q = WAIT_PREFIX;
1967 break;
1968
1969 case ADDR_PREFIX_OPCODE:
1970 q = ADDR_PREFIX;
1971 break;
1972
1973 case DATA_PREFIX_OPCODE:
1974 q = DATA_PREFIX;
1975 break;
1976 }
1977 if (i.prefix[q] != 0)
1978 ret = PREFIX_EXIST;
1979 }
1980
1981 if (ret)
1982 {
1983 if (!i.prefix[q])
1984 ++i.prefixes;
1985 i.prefix[q] |= prefix;
1986 }
1987 else
1988 as_bad (_("same type of prefix used twice"));
1989
1990 return ret;
1991 }
1992
1993 static void
1994 update_code_flag (int value, int check)
1995 {
1996 PRINTF_LIKE ((*as_error));
1997
1998 flag_code = (enum flag_code) value;
1999 if (flag_code == CODE_64BIT)
2000 {
2001 cpu_arch_flags.bitfield.cpu64 = 1;
2002 cpu_arch_flags.bitfield.cpuno64 = 0;
2003 }
2004 else
2005 {
2006 cpu_arch_flags.bitfield.cpu64 = 0;
2007 cpu_arch_flags.bitfield.cpuno64 = 1;
2008 }
2009 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
2010 {
2011 if (check)
2012 as_error = as_fatal;
2013 else
2014 as_error = as_bad;
2015 (*as_error) (_("64bit mode not supported on `%s'."),
2016 cpu_arch_name ? cpu_arch_name : default_arch);
2017 }
2018 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
2019 {
2020 if (check)
2021 as_error = as_fatal;
2022 else
2023 as_error = as_bad;
2024 (*as_error) (_("32bit mode not supported on `%s'."),
2025 cpu_arch_name ? cpu_arch_name : default_arch);
2026 }
2027 stackop_size = '\0';
2028 }
2029
2030 static void
2031 set_code_flag (int value)
2032 {
2033 update_code_flag (value, 0);
2034 }
2035
2036 static void
2037 set_16bit_gcc_code_flag (int new_code_flag)
2038 {
2039 flag_code = (enum flag_code) new_code_flag;
2040 if (flag_code != CODE_16BIT)
2041 abort ();
2042 cpu_arch_flags.bitfield.cpu64 = 0;
2043 cpu_arch_flags.bitfield.cpuno64 = 1;
2044 stackop_size = LONG_MNEM_SUFFIX;
2045 }
2046
2047 static void
2048 set_intel_syntax (int syntax_flag)
2049 {
2050 /* Find out if register prefixing is specified. */
2051 int ask_naked_reg = 0;
2052
2053 SKIP_WHITESPACE ();
2054 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2055 {
2056 char *string = input_line_pointer;
2057 int e = get_symbol_end ();
2058
2059 if (strcmp (string, "prefix") == 0)
2060 ask_naked_reg = 1;
2061 else if (strcmp (string, "noprefix") == 0)
2062 ask_naked_reg = -1;
2063 else
2064 as_bad (_("bad argument to syntax directive."));
2065 *input_line_pointer = e;
2066 }
2067 demand_empty_rest_of_line ();
2068
2069 intel_syntax = syntax_flag;
2070
2071 if (ask_naked_reg == 0)
2072 allow_naked_reg = (intel_syntax
2073 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2074 else
2075 allow_naked_reg = (ask_naked_reg < 0);
2076
2077 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2078
2079 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2080 identifier_chars['$'] = intel_syntax ? '$' : 0;
2081 register_prefix = allow_naked_reg ? "" : "%";
2082 }
2083
2084 static void
2085 set_intel_mnemonic (int mnemonic_flag)
2086 {
2087 intel_mnemonic = mnemonic_flag;
2088 }
2089
2090 static void
2091 set_allow_index_reg (int flag)
2092 {
2093 allow_index_reg = flag;
2094 }
2095
2096 static void
2097 set_sse_check (int dummy ATTRIBUTE_UNUSED)
2098 {
2099 SKIP_WHITESPACE ();
2100
2101 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2102 {
2103 char *string = input_line_pointer;
2104 int e = get_symbol_end ();
2105
2106 if (strcmp (string, "none") == 0)
2107 sse_check = sse_check_none;
2108 else if (strcmp (string, "warning") == 0)
2109 sse_check = sse_check_warning;
2110 else if (strcmp (string, "error") == 0)
2111 sse_check = sse_check_error;
2112 else
2113 as_bad (_("bad argument to sse_check directive."));
2114 *input_line_pointer = e;
2115 }
2116 else
2117 as_bad (_("missing argument for sse_check directive"));
2118
2119 demand_empty_rest_of_line ();
2120 }
2121
2122 static void
2123 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2124 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2125 {
2126 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2127 static const char *arch;
2128
2129 /* Intel LIOM is only supported on ELF. */
2130 if (!IS_ELF)
2131 return;
2132
2133 if (!arch)
2134 {
2135 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2136 use default_arch. */
2137 arch = cpu_arch_name;
2138 if (!arch)
2139 arch = default_arch;
2140 }
2141
2142 /* If we are targeting Intel L1OM, we must enable it. */
2143 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2144 || new_flag.bitfield.cpul1om)
2145 return;
2146
2147 /* If we are targeting Intel K1OM, we must enable it. */
2148 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_K1OM
2149 || new_flag.bitfield.cpuk1om)
2150 return;
2151
2152 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2153 #endif
2154 }
2155
2156 static void
2157 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2158 {
2159 SKIP_WHITESPACE ();
2160
2161 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2162 {
2163 char *string = input_line_pointer;
2164 int e = get_symbol_end ();
2165 unsigned int j;
2166 i386_cpu_flags flags;
2167
2168 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2169 {
2170 if (strcmp (string, cpu_arch[j].name) == 0)
2171 {
2172 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2173
2174 if (*string != '.')
2175 {
2176 cpu_arch_name = cpu_arch[j].name;
2177 cpu_sub_arch_name = NULL;
2178 cpu_arch_flags = cpu_arch[j].flags;
2179 if (flag_code == CODE_64BIT)
2180 {
2181 cpu_arch_flags.bitfield.cpu64 = 1;
2182 cpu_arch_flags.bitfield.cpuno64 = 0;
2183 }
2184 else
2185 {
2186 cpu_arch_flags.bitfield.cpu64 = 0;
2187 cpu_arch_flags.bitfield.cpuno64 = 1;
2188 }
2189 cpu_arch_isa = cpu_arch[j].type;
2190 cpu_arch_isa_flags = cpu_arch[j].flags;
2191 if (!cpu_arch_tune_set)
2192 {
2193 cpu_arch_tune = cpu_arch_isa;
2194 cpu_arch_tune_flags = cpu_arch_isa_flags;
2195 }
2196 break;
2197 }
2198
2199 if (!cpu_arch[j].negated)
2200 flags = cpu_flags_or (cpu_arch_flags,
2201 cpu_arch[j].flags);
2202 else
2203 flags = cpu_flags_and_not (cpu_arch_flags,
2204 cpu_arch[j].flags);
2205 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2206 {
2207 if (cpu_sub_arch_name)
2208 {
2209 char *name = cpu_sub_arch_name;
2210 cpu_sub_arch_name = concat (name,
2211 cpu_arch[j].name,
2212 (const char *) NULL);
2213 free (name);
2214 }
2215 else
2216 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2217 cpu_arch_flags = flags;
2218 cpu_arch_isa_flags = flags;
2219 }
2220 *input_line_pointer = e;
2221 demand_empty_rest_of_line ();
2222 return;
2223 }
2224 }
2225 if (j >= ARRAY_SIZE (cpu_arch))
2226 as_bad (_("no such architecture: `%s'"), string);
2227
2228 *input_line_pointer = e;
2229 }
2230 else
2231 as_bad (_("missing cpu architecture"));
2232
2233 no_cond_jump_promotion = 0;
2234 if (*input_line_pointer == ','
2235 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2236 {
2237 char *string = ++input_line_pointer;
2238 int e = get_symbol_end ();
2239
2240 if (strcmp (string, "nojumps") == 0)
2241 no_cond_jump_promotion = 1;
2242 else if (strcmp (string, "jumps") == 0)
2243 ;
2244 else
2245 as_bad (_("no such architecture modifier: `%s'"), string);
2246
2247 *input_line_pointer = e;
2248 }
2249
2250 demand_empty_rest_of_line ();
2251 }
2252
2253 enum bfd_architecture
2254 i386_arch (void)
2255 {
2256 if (cpu_arch_isa == PROCESSOR_L1OM)
2257 {
2258 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2259 || flag_code != CODE_64BIT)
2260 as_fatal (_("Intel L1OM is 64bit ELF only"));
2261 return bfd_arch_l1om;
2262 }
2263 else if (cpu_arch_isa == PROCESSOR_K1OM)
2264 {
2265 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2266 || flag_code != CODE_64BIT)
2267 as_fatal (_("Intel K1OM is 64bit ELF only"));
2268 return bfd_arch_k1om;
2269 }
2270 else
2271 return bfd_arch_i386;
2272 }
2273
2274 unsigned long
2275 i386_mach (void)
2276 {
2277 if (!strncmp (default_arch, "x86_64", 6))
2278 {
2279 if (cpu_arch_isa == PROCESSOR_L1OM)
2280 {
2281 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2282 || default_arch[6] != '\0')
2283 as_fatal (_("Intel L1OM is 64bit ELF only"));
2284 return bfd_mach_l1om;
2285 }
2286 else if (cpu_arch_isa == PROCESSOR_K1OM)
2287 {
2288 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2289 || default_arch[6] != '\0')
2290 as_fatal (_("Intel K1OM is 64bit ELF only"));
2291 return bfd_mach_k1om;
2292 }
2293 else if (default_arch[6] == '\0')
2294 return bfd_mach_x86_64;
2295 else
2296 return bfd_mach_x64_32;
2297 }
2298 else if (!strcmp (default_arch, "i386"))
2299 return bfd_mach_i386_i386;
2300 else
2301 as_fatal (_("unknown architecture"));
2302 }
2303 \f
2304 void
2305 md_begin (void)
2306 {
2307 const char *hash_err;
2308
2309 /* Initialize op_hash hash table. */
2310 op_hash = hash_new ();
2311
2312 {
2313 const insn_template *optab;
2314 templates *core_optab;
2315
2316 /* Setup for loop. */
2317 optab = i386_optab;
2318 core_optab = (templates *) xmalloc (sizeof (templates));
2319 core_optab->start = optab;
2320
2321 while (1)
2322 {
2323 ++optab;
2324 if (optab->name == NULL
2325 || strcmp (optab->name, (optab - 1)->name) != 0)
2326 {
2327 /* different name --> ship out current template list;
2328 add to hash table; & begin anew. */
2329 core_optab->end = optab;
2330 hash_err = hash_insert (op_hash,
2331 (optab - 1)->name,
2332 (void *) core_optab);
2333 if (hash_err)
2334 {
2335 as_fatal (_("internal Error: Can't hash %s: %s"),
2336 (optab - 1)->name,
2337 hash_err);
2338 }
2339 if (optab->name == NULL)
2340 break;
2341 core_optab = (templates *) xmalloc (sizeof (templates));
2342 core_optab->start = optab;
2343 }
2344 }
2345 }
2346
2347 /* Initialize reg_hash hash table. */
2348 reg_hash = hash_new ();
2349 {
2350 const reg_entry *regtab;
2351 unsigned int regtab_size = i386_regtab_size;
2352
2353 for (regtab = i386_regtab; regtab_size--; regtab++)
2354 {
2355 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2356 if (hash_err)
2357 as_fatal (_("internal Error: Can't hash %s: %s"),
2358 regtab->reg_name,
2359 hash_err);
2360 }
2361 }
2362
2363 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2364 {
2365 int c;
2366 char *p;
2367
2368 for (c = 0; c < 256; c++)
2369 {
2370 if (ISDIGIT (c))
2371 {
2372 digit_chars[c] = c;
2373 mnemonic_chars[c] = c;
2374 register_chars[c] = c;
2375 operand_chars[c] = c;
2376 }
2377 else if (ISLOWER (c))
2378 {
2379 mnemonic_chars[c] = c;
2380 register_chars[c] = c;
2381 operand_chars[c] = c;
2382 }
2383 else if (ISUPPER (c))
2384 {
2385 mnemonic_chars[c] = TOLOWER (c);
2386 register_chars[c] = mnemonic_chars[c];
2387 operand_chars[c] = c;
2388 }
2389
2390 if (ISALPHA (c) || ISDIGIT (c))
2391 identifier_chars[c] = c;
2392 else if (c >= 128)
2393 {
2394 identifier_chars[c] = c;
2395 operand_chars[c] = c;
2396 }
2397 }
2398
2399 #ifdef LEX_AT
2400 identifier_chars['@'] = '@';
2401 #endif
2402 #ifdef LEX_QM
2403 identifier_chars['?'] = '?';
2404 operand_chars['?'] = '?';
2405 #endif
2406 digit_chars['-'] = '-';
2407 mnemonic_chars['_'] = '_';
2408 mnemonic_chars['-'] = '-';
2409 mnemonic_chars['.'] = '.';
2410 identifier_chars['_'] = '_';
2411 identifier_chars['.'] = '.';
2412
2413 for (p = operand_special_chars; *p != '\0'; p++)
2414 operand_chars[(unsigned char) *p] = *p;
2415 }
2416
2417 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2418 if (IS_ELF)
2419 {
2420 record_alignment (text_section, 2);
2421 record_alignment (data_section, 2);
2422 record_alignment (bss_section, 2);
2423 }
2424 #endif
2425
2426 if (flag_code == CODE_64BIT)
2427 {
2428 #if defined (OBJ_COFF) && defined (TE_PE)
2429 x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
2430 ? 32 : 16);
2431 #else
2432 x86_dwarf2_return_column = 16;
2433 #endif
2434 x86_cie_data_alignment = -8;
2435 }
2436 else
2437 {
2438 x86_dwarf2_return_column = 8;
2439 x86_cie_data_alignment = -4;
2440 }
2441 }
2442
2443 void
2444 i386_print_statistics (FILE *file)
2445 {
2446 hash_print_statistics (file, "i386 opcode", op_hash);
2447 hash_print_statistics (file, "i386 register", reg_hash);
2448 }
2449 \f
2450 #ifdef DEBUG386
2451
2452 /* Debugging routines for md_assemble. */
2453 static void pte (insn_template *);
2454 static void pt (i386_operand_type);
2455 static void pe (expressionS *);
2456 static void ps (symbolS *);
2457
2458 static void
2459 pi (char *line, i386_insn *x)
2460 {
2461 unsigned int j;
2462
2463 fprintf (stdout, "%s: template ", line);
2464 pte (&x->tm);
2465 fprintf (stdout, " address: base %s index %s scale %x\n",
2466 x->base_reg ? x->base_reg->reg_name : "none",
2467 x->index_reg ? x->index_reg->reg_name : "none",
2468 x->log2_scale_factor);
2469 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2470 x->rm.mode, x->rm.reg, x->rm.regmem);
2471 fprintf (stdout, " sib: base %x index %x scale %x\n",
2472 x->sib.base, x->sib.index, x->sib.scale);
2473 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2474 (x->rex & REX_W) != 0,
2475 (x->rex & REX_R) != 0,
2476 (x->rex & REX_X) != 0,
2477 (x->rex & REX_B) != 0);
2478 for (j = 0; j < x->operands; j++)
2479 {
2480 fprintf (stdout, " #%d: ", j + 1);
2481 pt (x->types[j]);
2482 fprintf (stdout, "\n");
2483 if (x->types[j].bitfield.reg8
2484 || x->types[j].bitfield.reg16
2485 || x->types[j].bitfield.reg32
2486 || x->types[j].bitfield.reg64
2487 || x->types[j].bitfield.regmmx
2488 || x->types[j].bitfield.regxmm
2489 || x->types[j].bitfield.regymm
2490 || x->types[j].bitfield.sreg2
2491 || x->types[j].bitfield.sreg3
2492 || x->types[j].bitfield.control
2493 || x->types[j].bitfield.debug
2494 || x->types[j].bitfield.test)
2495 fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
2496 if (operand_type_check (x->types[j], imm))
2497 pe (x->op[j].imms);
2498 if (operand_type_check (x->types[j], disp))
2499 pe (x->op[j].disps);
2500 }
2501 }
2502
2503 static void
2504 pte (insn_template *t)
2505 {
2506 unsigned int j;
2507 fprintf (stdout, " %d operands ", t->operands);
2508 fprintf (stdout, "opcode %x ", t->base_opcode);
2509 if (t->extension_opcode != None)
2510 fprintf (stdout, "ext %x ", t->extension_opcode);
2511 if (t->opcode_modifier.d)
2512 fprintf (stdout, "D");
2513 if (t->opcode_modifier.w)
2514 fprintf (stdout, "W");
2515 fprintf (stdout, "\n");
2516 for (j = 0; j < t->operands; j++)
2517 {
2518 fprintf (stdout, " #%d type ", j + 1);
2519 pt (t->operand_types[j]);
2520 fprintf (stdout, "\n");
2521 }
2522 }
2523
2524 static void
2525 pe (expressionS *e)
2526 {
2527 fprintf (stdout, " operation %d\n", e->X_op);
2528 fprintf (stdout, " add_number %ld (%lx)\n",
2529 (long) e->X_add_number, (long) e->X_add_number);
2530 if (e->X_add_symbol)
2531 {
2532 fprintf (stdout, " add_symbol ");
2533 ps (e->X_add_symbol);
2534 fprintf (stdout, "\n");
2535 }
2536 if (e->X_op_symbol)
2537 {
2538 fprintf (stdout, " op_symbol ");
2539 ps (e->X_op_symbol);
2540 fprintf (stdout, "\n");
2541 }
2542 }
2543
2544 static void
2545 ps (symbolS *s)
2546 {
2547 fprintf (stdout, "%s type %s%s",
2548 S_GET_NAME (s),
2549 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2550 segment_name (S_GET_SEGMENT (s)));
2551 }
2552
2553 static struct type_name
2554 {
2555 i386_operand_type mask;
2556 const char *name;
2557 }
2558 const type_names[] =
2559 {
2560 { OPERAND_TYPE_REG8, "r8" },
2561 { OPERAND_TYPE_REG16, "r16" },
2562 { OPERAND_TYPE_REG32, "r32" },
2563 { OPERAND_TYPE_REG64, "r64" },
2564 { OPERAND_TYPE_IMM8, "i8" },
2565 { OPERAND_TYPE_IMM8, "i8s" },
2566 { OPERAND_TYPE_IMM16, "i16" },
2567 { OPERAND_TYPE_IMM32, "i32" },
2568 { OPERAND_TYPE_IMM32S, "i32s" },
2569 { OPERAND_TYPE_IMM64, "i64" },
2570 { OPERAND_TYPE_IMM1, "i1" },
2571 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2572 { OPERAND_TYPE_DISP8, "d8" },
2573 { OPERAND_TYPE_DISP16, "d16" },
2574 { OPERAND_TYPE_DISP32, "d32" },
2575 { OPERAND_TYPE_DISP32S, "d32s" },
2576 { OPERAND_TYPE_DISP64, "d64" },
2577 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2578 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2579 { OPERAND_TYPE_CONTROL, "control reg" },
2580 { OPERAND_TYPE_TEST, "test reg" },
2581 { OPERAND_TYPE_DEBUG, "debug reg" },
2582 { OPERAND_TYPE_FLOATREG, "FReg" },
2583 { OPERAND_TYPE_FLOATACC, "FAcc" },
2584 { OPERAND_TYPE_SREG2, "SReg2" },
2585 { OPERAND_TYPE_SREG3, "SReg3" },
2586 { OPERAND_TYPE_ACC, "Acc" },
2587 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2588 { OPERAND_TYPE_REGMMX, "rMMX" },
2589 { OPERAND_TYPE_REGXMM, "rXMM" },
2590 { OPERAND_TYPE_REGYMM, "rYMM" },
2591 { OPERAND_TYPE_ESSEG, "es" },
2592 };
2593
2594 static void
2595 pt (i386_operand_type t)
2596 {
2597 unsigned int j;
2598 i386_operand_type a;
2599
2600 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2601 {
2602 a = operand_type_and (t, type_names[j].mask);
2603 if (!operand_type_all_zero (&a))
2604 fprintf (stdout, "%s, ", type_names[j].name);
2605 }
2606 fflush (stdout);
2607 }
2608
2609 #endif /* DEBUG386 */
2610 \f
2611 static bfd_reloc_code_real_type
2612 reloc (unsigned int size,
2613 int pcrel,
2614 int sign,
2615 bfd_reloc_code_real_type other)
2616 {
2617 if (other != NO_RELOC)
2618 {
2619 reloc_howto_type *rel;
2620
2621 if (size == 8)
2622 switch (other)
2623 {
2624 case BFD_RELOC_X86_64_GOT32:
2625 return BFD_RELOC_X86_64_GOT64;
2626 break;
2627 case BFD_RELOC_X86_64_PLTOFF64:
2628 return BFD_RELOC_X86_64_PLTOFF64;
2629 break;
2630 case BFD_RELOC_X86_64_GOTPC32:
2631 other = BFD_RELOC_X86_64_GOTPC64;
2632 break;
2633 case BFD_RELOC_X86_64_GOTPCREL:
2634 other = BFD_RELOC_X86_64_GOTPCREL64;
2635 break;
2636 case BFD_RELOC_X86_64_TPOFF32:
2637 other = BFD_RELOC_X86_64_TPOFF64;
2638 break;
2639 case BFD_RELOC_X86_64_DTPOFF32:
2640 other = BFD_RELOC_X86_64_DTPOFF64;
2641 break;
2642 default:
2643 break;
2644 }
2645
2646 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2647 if (size == 4 && (flag_code != CODE_64BIT || disallow_64bit_reloc))
2648 sign = -1;
2649
2650 rel = bfd_reloc_type_lookup (stdoutput, other);
2651 if (!rel)
2652 as_bad (_("unknown relocation (%u)"), other);
2653 else if (size != bfd_get_reloc_size (rel))
2654 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2655 bfd_get_reloc_size (rel),
2656 size);
2657 else if (pcrel && !rel->pc_relative)
2658 as_bad (_("non-pc-relative relocation for pc-relative field"));
2659 else if ((rel->complain_on_overflow == complain_overflow_signed
2660 && !sign)
2661 || (rel->complain_on_overflow == complain_overflow_unsigned
2662 && sign > 0))
2663 as_bad (_("relocated field and relocation type differ in signedness"));
2664 else
2665 return other;
2666 return NO_RELOC;
2667 }
2668
2669 if (pcrel)
2670 {
2671 if (!sign)
2672 as_bad (_("there are no unsigned pc-relative relocations"));
2673 switch (size)
2674 {
2675 case 1: return BFD_RELOC_8_PCREL;
2676 case 2: return BFD_RELOC_16_PCREL;
2677 case 4: return BFD_RELOC_32_PCREL;
2678 case 8: return BFD_RELOC_64_PCREL;
2679 }
2680 as_bad (_("cannot do %u byte pc-relative relocation"), size);
2681 }
2682 else
2683 {
2684 if (sign > 0)
2685 switch (size)
2686 {
2687 case 4: return BFD_RELOC_X86_64_32S;
2688 }
2689 else
2690 switch (size)
2691 {
2692 case 1: return BFD_RELOC_8;
2693 case 2: return BFD_RELOC_16;
2694 case 4: return BFD_RELOC_32;
2695 case 8: return BFD_RELOC_64;
2696 }
2697 as_bad (_("cannot do %s %u byte relocation"),
2698 sign > 0 ? "signed" : "unsigned", size);
2699 }
2700
2701 return NO_RELOC;
2702 }
2703
2704 /* Here we decide which fixups can be adjusted to make them relative to
2705 the beginning of the section instead of the symbol. Basically we need
2706 to make sure that the dynamic relocations are done correctly, so in
2707 some cases we force the original symbol to be used. */
2708
2709 int
2710 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2711 {
2712 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2713 if (!IS_ELF)
2714 return 1;
2715
2716 /* Don't adjust pc-relative references to merge sections in 64-bit
2717 mode. */
2718 if (use_rela_relocations
2719 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
2720 && fixP->fx_pcrel)
2721 return 0;
2722
2723 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2724 and changed later by validate_fix. */
2725 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
2726 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
2727 return 0;
2728
2729 /* adjust_reloc_syms doesn't know about the GOT. */
2730 if (fixP->fx_r_type == BFD_RELOC_386_GOTOFF
2731 || fixP->fx_r_type == BFD_RELOC_386_PLT32
2732 || fixP->fx_r_type == BFD_RELOC_386_GOT32
2733 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
2734 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
2735 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
2736 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
2737 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
2738 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
2739 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
2740 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
2741 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
2742 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
2743 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
2744 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
2745 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
2746 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
2747 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
2748 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
2749 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
2750 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
2751 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
2752 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
2753 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
2754 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
2755 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
2756 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
2757 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
2758 return 0;
2759 #endif
2760 return 1;
2761 }
2762
2763 static int
2764 intel_float_operand (const char *mnemonic)
2765 {
2766 /* Note that the value returned is meaningful only for opcodes with (memory)
2767 operands, hence the code here is free to improperly handle opcodes that
2768 have no operands (for better performance and smaller code). */
2769
2770 if (mnemonic[0] != 'f')
2771 return 0; /* non-math */
2772
2773 switch (mnemonic[1])
2774 {
2775 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2776 the fs segment override prefix not currently handled because no
2777 call path can make opcodes without operands get here */
2778 case 'i':
2779 return 2 /* integer op */;
2780 case 'l':
2781 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
2782 return 3; /* fldcw/fldenv */
2783 break;
2784 case 'n':
2785 if (mnemonic[2] != 'o' /* fnop */)
2786 return 3; /* non-waiting control op */
2787 break;
2788 case 'r':
2789 if (mnemonic[2] == 's')
2790 return 3; /* frstor/frstpm */
2791 break;
2792 case 's':
2793 if (mnemonic[2] == 'a')
2794 return 3; /* fsave */
2795 if (mnemonic[2] == 't')
2796 {
2797 switch (mnemonic[3])
2798 {
2799 case 'c': /* fstcw */
2800 case 'd': /* fstdw */
2801 case 'e': /* fstenv */
2802 case 's': /* fsts[gw] */
2803 return 3;
2804 }
2805 }
2806 break;
2807 case 'x':
2808 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
2809 return 0; /* fxsave/fxrstor are not really math ops */
2810 break;
2811 }
2812
2813 return 1;
2814 }
2815
2816 /* Build the VEX prefix. */
2817
2818 static void
2819 build_vex_prefix (const insn_template *t)
2820 {
2821 unsigned int register_specifier;
2822 unsigned int implied_prefix;
2823 unsigned int vector_length;
2824
2825 /* Check register specifier. */
2826 if (i.vex.register_specifier)
2827 {
2828 register_specifier = i.vex.register_specifier->reg_num;
2829 if ((i.vex.register_specifier->reg_flags & RegRex))
2830 register_specifier += 8;
2831 register_specifier = ~register_specifier & 0xf;
2832 }
2833 else
2834 register_specifier = 0xf;
2835
2836 /* Use 2-byte VEX prefix by swappping destination and source
2837 operand. */
2838 if (!i.swap_operand
2839 && i.operands == i.reg_operands
2840 && i.tm.opcode_modifier.vexopcode == VEX0F
2841 && i.tm.opcode_modifier.s
2842 && i.rex == REX_B)
2843 {
2844 unsigned int xchg = i.operands - 1;
2845 union i386_op temp_op;
2846 i386_operand_type temp_type;
2847
2848 temp_type = i.types[xchg];
2849 i.types[xchg] = i.types[0];
2850 i.types[0] = temp_type;
2851 temp_op = i.op[xchg];
2852 i.op[xchg] = i.op[0];
2853 i.op[0] = temp_op;
2854
2855 gas_assert (i.rm.mode == 3);
2856
2857 i.rex = REX_R;
2858 xchg = i.rm.regmem;
2859 i.rm.regmem = i.rm.reg;
2860 i.rm.reg = xchg;
2861
2862 /* Use the next insn. */
2863 i.tm = t[1];
2864 }
2865
2866 if (i.tm.opcode_modifier.vex == VEXScalar)
2867 vector_length = avxscalar;
2868 else
2869 vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
2870
2871 switch ((i.tm.base_opcode >> 8) & 0xff)
2872 {
2873 case 0:
2874 implied_prefix = 0;
2875 break;
2876 case DATA_PREFIX_OPCODE:
2877 implied_prefix = 1;
2878 break;
2879 case REPE_PREFIX_OPCODE:
2880 implied_prefix = 2;
2881 break;
2882 case REPNE_PREFIX_OPCODE:
2883 implied_prefix = 3;
2884 break;
2885 default:
2886 abort ();
2887 }
2888
2889 /* Use 2-byte VEX prefix if possible. */
2890 if (i.tm.opcode_modifier.vexopcode == VEX0F
2891 && i.tm.opcode_modifier.vexw != VEXW1
2892 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
2893 {
2894 /* 2-byte VEX prefix. */
2895 unsigned int r;
2896
2897 i.vex.length = 2;
2898 i.vex.bytes[0] = 0xc5;
2899
2900 /* Check the REX.R bit. */
2901 r = (i.rex & REX_R) ? 0 : 1;
2902 i.vex.bytes[1] = (r << 7
2903 | register_specifier << 3
2904 | vector_length << 2
2905 | implied_prefix);
2906 }
2907 else
2908 {
2909 /* 3-byte VEX prefix. */
2910 unsigned int m, w;
2911
2912 i.vex.length = 3;
2913
2914 switch (i.tm.opcode_modifier.vexopcode)
2915 {
2916 case VEX0F:
2917 m = 0x1;
2918 i.vex.bytes[0] = 0xc4;
2919 break;
2920 case VEX0F38:
2921 m = 0x2;
2922 i.vex.bytes[0] = 0xc4;
2923 break;
2924 case VEX0F3A:
2925 m = 0x3;
2926 i.vex.bytes[0] = 0xc4;
2927 break;
2928 case XOP08:
2929 m = 0x8;
2930 i.vex.bytes[0] = 0x8f;
2931 break;
2932 case XOP09:
2933 m = 0x9;
2934 i.vex.bytes[0] = 0x8f;
2935 break;
2936 case XOP0A:
2937 m = 0xa;
2938 i.vex.bytes[0] = 0x8f;
2939 break;
2940 default:
2941 abort ();
2942 }
2943
2944 /* The high 3 bits of the second VEX byte are 1's compliment
2945 of RXB bits from REX. */
2946 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
2947
2948 /* Check the REX.W bit. */
2949 w = (i.rex & REX_W) ? 1 : 0;
2950 if (i.tm.opcode_modifier.vexw)
2951 {
2952 if (w)
2953 abort ();
2954
2955 if (i.tm.opcode_modifier.vexw == VEXW1)
2956 w = 1;
2957 }
2958
2959 i.vex.bytes[2] = (w << 7
2960 | register_specifier << 3
2961 | vector_length << 2
2962 | implied_prefix);
2963 }
2964 }
2965
2966 static void
2967 process_immext (void)
2968 {
2969 expressionS *exp;
2970
2971 if (i.tm.cpu_flags.bitfield.cpusse3 && i.operands > 0)
2972 {
2973 /* SSE3 Instructions have the fixed operands with an opcode
2974 suffix which is coded in the same place as an 8-bit immediate
2975 field would be. Here we check those operands and remove them
2976 afterwards. */
2977 unsigned int x;
2978
2979 for (x = 0; x < i.operands; x++)
2980 if (i.op[x].regs->reg_num != x)
2981 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
2982 register_prefix, i.op[x].regs->reg_name, x + 1,
2983 i.tm.name);
2984
2985 i.operands = 0;
2986 }
2987
2988 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
2989 which is coded in the same place as an 8-bit immediate field
2990 would be. Here we fake an 8-bit immediate operand from the
2991 opcode suffix stored in tm.extension_opcode.
2992
2993 AVX instructions also use this encoding, for some of
2994 3 argument instructions. */
2995
2996 gas_assert (i.imm_operands == 0
2997 && (i.operands <= 2
2998 || (i.tm.opcode_modifier.vex
2999 && i.operands <= 4)));
3000
3001 exp = &im_expressions[i.imm_operands++];
3002 i.op[i.operands].imms = exp;
3003 i.types[i.operands] = imm8;
3004 i.operands++;
3005 exp->X_op = O_constant;
3006 exp->X_add_number = i.tm.extension_opcode;
3007 i.tm.extension_opcode = None;
3008 }
3009
3010
3011 static int
3012 check_hle (void)
3013 {
3014 switch (i.tm.opcode_modifier.hleprefixok)
3015 {
3016 default:
3017 abort ();
3018 case HLEPrefixNone:
3019 if (i.prefix[HLE_PREFIX] == XACQUIRE_PREFIX_OPCODE)
3020 as_bad (_("invalid instruction `%s' after `xacquire'"),
3021 i.tm.name);
3022 else
3023 as_bad (_("invalid instruction `%s' after `xrelease'"),
3024 i.tm.name);
3025 return 0;
3026 case HLEPrefixLock:
3027 if (i.prefix[LOCK_PREFIX])
3028 return 1;
3029 if (i.prefix[HLE_PREFIX] == XACQUIRE_PREFIX_OPCODE)
3030 as_bad (_("missing `lock' with `xacquire'"));
3031 else
3032 as_bad (_("missing `lock' with `xrelease'"));
3033 return 0;
3034 case HLEPrefixAny:
3035 return 1;
3036 case HLEPrefixRelease:
3037 if (i.prefix[HLE_PREFIX] != XRELEASE_PREFIX_OPCODE)
3038 {
3039 as_bad (_("instruction `%s' after `xacquire' not allowed"),
3040 i.tm.name);
3041 return 0;
3042 }
3043 if (i.mem_operands == 0
3044 || !operand_type_check (i.types[i.operands - 1], anymem))
3045 {
3046 as_bad (_("memory destination needed for instruction `%s'"
3047 " after `xrelease'"), i.tm.name);
3048 return 0;
3049 }
3050 return 1;
3051 }
3052 }
3053
3054 /* This is the guts of the machine-dependent assembler. LINE points to a
3055 machine dependent instruction. This function is supposed to emit
3056 the frags/bytes it assembles to. */
3057
3058 void
3059 md_assemble (char *line)
3060 {
3061 unsigned int j;
3062 char mnemonic[MAX_MNEM_SIZE];
3063 const insn_template *t;
3064
3065 /* Initialize globals. */
3066 memset (&i, '\0', sizeof (i));
3067 for (j = 0; j < MAX_OPERANDS; j++)
3068 i.reloc[j] = NO_RELOC;
3069 memset (disp_expressions, '\0', sizeof (disp_expressions));
3070 memset (im_expressions, '\0', sizeof (im_expressions));
3071 save_stack_p = save_stack;
3072
3073 /* First parse an instruction mnemonic & call i386_operand for the operands.
3074 We assume that the scrubber has arranged it so that line[0] is the valid
3075 start of a (possibly prefixed) mnemonic. */
3076
3077 line = parse_insn (line, mnemonic);
3078 if (line == NULL)
3079 return;
3080
3081 line = parse_operands (line, mnemonic);
3082 this_operand = -1;
3083 if (line == NULL)
3084 return;
3085
3086 /* Now we've parsed the mnemonic into a set of templates, and have the
3087 operands at hand. */
3088
3089 /* All intel opcodes have reversed operands except for "bound" and
3090 "enter". We also don't reverse intersegment "jmp" and "call"
3091 instructions with 2 immediate operands so that the immediate segment
3092 precedes the offset, as it does when in AT&T mode. */
3093 if (intel_syntax
3094 && i.operands > 1
3095 && (strcmp (mnemonic, "bound") != 0)
3096 && (strcmp (mnemonic, "invlpga") != 0)
3097 && !(operand_type_check (i.types[0], imm)
3098 && operand_type_check (i.types[1], imm)))
3099 swap_operands ();
3100
3101 /* The order of the immediates should be reversed
3102 for 2 immediates extrq and insertq instructions */
3103 if (i.imm_operands == 2
3104 && (strcmp (mnemonic, "extrq") == 0
3105 || strcmp (mnemonic, "insertq") == 0))
3106 swap_2_operands (0, 1);
3107
3108 if (i.imm_operands)
3109 optimize_imm ();
3110
3111 /* Don't optimize displacement for movabs since it only takes 64bit
3112 displacement. */
3113 if (i.disp_operands
3114 && i.disp_encoding != disp_encoding_32bit
3115 && (flag_code != CODE_64BIT
3116 || strcmp (mnemonic, "movabs") != 0))
3117 optimize_disp ();
3118
3119 /* Next, we find a template that matches the given insn,
3120 making sure the overlap of the given operands types is consistent
3121 with the template operand types. */
3122
3123 if (!(t = match_template ()))
3124 return;
3125
3126 if (sse_check != sse_check_none
3127 && !i.tm.opcode_modifier.noavx
3128 && (i.tm.cpu_flags.bitfield.cpusse
3129 || i.tm.cpu_flags.bitfield.cpusse2
3130 || i.tm.cpu_flags.bitfield.cpusse3
3131 || i.tm.cpu_flags.bitfield.cpussse3
3132 || i.tm.cpu_flags.bitfield.cpusse4_1
3133 || i.tm.cpu_flags.bitfield.cpusse4_2))
3134 {
3135 (sse_check == sse_check_warning
3136 ? as_warn
3137 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
3138 }
3139
3140 /* Zap movzx and movsx suffix. The suffix has been set from
3141 "word ptr" or "byte ptr" on the source operand in Intel syntax
3142 or extracted from mnemonic in AT&T syntax. But we'll use
3143 the destination register to choose the suffix for encoding. */
3144 if ((i.tm.base_opcode & ~9) == 0x0fb6)
3145 {
3146 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3147 there is no suffix, the default will be byte extension. */
3148 if (i.reg_operands != 2
3149 && !i.suffix
3150 && intel_syntax)
3151 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
3152
3153 i.suffix = 0;
3154 }
3155
3156 if (i.tm.opcode_modifier.fwait)
3157 if (!add_prefix (FWAIT_OPCODE))
3158 return;
3159
3160 /* Check for lock without a lockable instruction. Destination operand
3161 must be memory unless it is xchg (0x86). */
3162 if (i.prefix[LOCK_PREFIX]
3163 && (!i.tm.opcode_modifier.islockable
3164 || i.mem_operands == 0
3165 || (i.tm.base_opcode != 0x86
3166 && !operand_type_check (i.types[i.operands - 1], anymem))))
3167 {
3168 as_bad (_("expecting lockable instruction after `lock'"));
3169 return;
3170 }
3171
3172 /* Check if HLE prefix is OK. */
3173 if (i.have_hle && !check_hle ())
3174 return;
3175
3176 /* Check string instruction segment overrides. */
3177 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
3178 {
3179 if (!check_string ())
3180 return;
3181 i.disp_operands = 0;
3182 }
3183
3184 if (!process_suffix ())
3185 return;
3186
3187 /* Update operand types. */
3188 for (j = 0; j < i.operands; j++)
3189 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
3190
3191 /* Make still unresolved immediate matches conform to size of immediate
3192 given in i.suffix. */
3193 if (!finalize_imm ())
3194 return;
3195
3196 if (i.types[0].bitfield.imm1)
3197 i.imm_operands = 0; /* kludge for shift insns. */
3198
3199 /* We only need to check those implicit registers for instructions
3200 with 3 operands or less. */
3201 if (i.operands <= 3)
3202 for (j = 0; j < i.operands; j++)
3203 if (i.types[j].bitfield.inoutportreg
3204 || i.types[j].bitfield.shiftcount
3205 || i.types[j].bitfield.acc
3206 || i.types[j].bitfield.floatacc)
3207 i.reg_operands--;
3208
3209 /* ImmExt should be processed after SSE2AVX. */
3210 if (!i.tm.opcode_modifier.sse2avx
3211 && i.tm.opcode_modifier.immext)
3212 process_immext ();
3213
3214 /* For insns with operands there are more diddles to do to the opcode. */
3215 if (i.operands)
3216 {
3217 if (!process_operands ())
3218 return;
3219 }
3220 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
3221 {
3222 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3223 as_warn (_("translating to `%sp'"), i.tm.name);
3224 }
3225
3226 if (i.tm.opcode_modifier.vex)
3227 build_vex_prefix (t);
3228
3229 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3230 instructions may define INT_OPCODE as well, so avoid this corner
3231 case for those instructions that use MODRM. */
3232 if (i.tm.base_opcode == INT_OPCODE
3233 && !i.tm.opcode_modifier.modrm
3234 && i.op[0].imms->X_add_number == 3)
3235 {
3236 i.tm.base_opcode = INT3_OPCODE;
3237 i.imm_operands = 0;
3238 }
3239
3240 if ((i.tm.opcode_modifier.jump
3241 || i.tm.opcode_modifier.jumpbyte
3242 || i.tm.opcode_modifier.jumpdword)
3243 && i.op[0].disps->X_op == O_constant)
3244 {
3245 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3246 the absolute address given by the constant. Since ix86 jumps and
3247 calls are pc relative, we need to generate a reloc. */
3248 i.op[0].disps->X_add_symbol = &abs_symbol;
3249 i.op[0].disps->X_op = O_symbol;
3250 }
3251
3252 if (i.tm.opcode_modifier.rex64)
3253 i.rex |= REX_W;
3254
3255 /* For 8 bit registers we need an empty rex prefix. Also if the
3256 instruction already has a prefix, we need to convert old
3257 registers to new ones. */
3258
3259 if ((i.types[0].bitfield.reg8
3260 && (i.op[0].regs->reg_flags & RegRex64) != 0)
3261 || (i.types[1].bitfield.reg8
3262 && (i.op[1].regs->reg_flags & RegRex64) != 0)
3263 || ((i.types[0].bitfield.reg8
3264 || i.types[1].bitfield.reg8)
3265 && i.rex != 0))
3266 {
3267 int x;
3268
3269 i.rex |= REX_OPCODE;
3270 for (x = 0; x < 2; x++)
3271 {
3272 /* Look for 8 bit operand that uses old registers. */
3273 if (i.types[x].bitfield.reg8
3274 && (i.op[x].regs->reg_flags & RegRex64) == 0)
3275 {
3276 /* In case it is "hi" register, give up. */
3277 if (i.op[x].regs->reg_num > 3)
3278 as_bad (_("can't encode register '%s%s' in an "
3279 "instruction requiring REX prefix."),
3280 register_prefix, i.op[x].regs->reg_name);
3281
3282 /* Otherwise it is equivalent to the extended register.
3283 Since the encoding doesn't change this is merely
3284 cosmetic cleanup for debug output. */
3285
3286 i.op[x].regs = i.op[x].regs + 8;
3287 }
3288 }
3289 }
3290
3291 if (i.rex != 0)
3292 add_prefix (REX_OPCODE | i.rex);
3293
3294 /* We are ready to output the insn. */
3295 output_insn ();
3296 }
3297
3298 static char *
3299 parse_insn (char *line, char *mnemonic)
3300 {
3301 char *l = line;
3302 char *token_start = l;
3303 char *mnem_p;
3304 int supported;
3305 const insn_template *t;
3306 char *dot_p = NULL;
3307
3308 /* Non-zero if we found a prefix only acceptable with string insns. */
3309 const char *expecting_string_instruction = NULL;
3310
3311 while (1)
3312 {
3313 mnem_p = mnemonic;
3314 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3315 {
3316 if (*mnem_p == '.')
3317 dot_p = mnem_p;
3318 mnem_p++;
3319 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3320 {
3321 as_bad (_("no such instruction: `%s'"), token_start);
3322 return NULL;
3323 }
3324 l++;
3325 }
3326 if (!is_space_char (*l)
3327 && *l != END_OF_INSN
3328 && (intel_syntax
3329 || (*l != PREFIX_SEPARATOR
3330 && *l != ',')))
3331 {
3332 as_bad (_("invalid character %s in mnemonic"),
3333 output_invalid (*l));
3334 return NULL;
3335 }
3336 if (token_start == l)
3337 {
3338 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3339 as_bad (_("expecting prefix; got nothing"));
3340 else
3341 as_bad (_("expecting mnemonic; got nothing"));
3342 return NULL;
3343 }
3344
3345 /* Look up instruction (or prefix) via hash table. */
3346 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3347
3348 if (*l != END_OF_INSN
3349 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3350 && current_templates
3351 && current_templates->start->opcode_modifier.isprefix)
3352 {
3353 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3354 {
3355 as_bad ((flag_code != CODE_64BIT
3356 ? _("`%s' is only supported in 64-bit mode")
3357 : _("`%s' is not supported in 64-bit mode")),
3358 current_templates->start->name);
3359 return NULL;
3360 }
3361 /* If we are in 16-bit mode, do not allow addr16 or data16.
3362 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3363 if ((current_templates->start->opcode_modifier.size16
3364 || current_templates->start->opcode_modifier.size32)
3365 && flag_code != CODE_64BIT
3366 && (current_templates->start->opcode_modifier.size32
3367 ^ (flag_code == CODE_16BIT)))
3368 {
3369 as_bad (_("redundant %s prefix"),
3370 current_templates->start->name);
3371 return NULL;
3372 }
3373 /* Add prefix, checking for repeated prefixes. */
3374 switch (add_prefix (current_templates->start->base_opcode))
3375 {
3376 case PREFIX_EXIST:
3377 return NULL;
3378 case PREFIX_REP:
3379 if (current_templates->start->cpu_flags.bitfield.cpuhle)
3380 i.have_hle = 1;
3381 else
3382 expecting_string_instruction = current_templates->start->name;
3383 break;
3384 default:
3385 break;
3386 }
3387 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3388 token_start = ++l;
3389 }
3390 else
3391 break;
3392 }
3393
3394 if (!current_templates)
3395 {
3396 /* Check if we should swap operand or force 32bit displacement in
3397 encoding. */
3398 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3399 i.swap_operand = 1;
3400 else if (mnem_p - 3 == dot_p
3401 && dot_p[1] == 'd'
3402 && dot_p[2] == '8')
3403 i.disp_encoding = disp_encoding_8bit;
3404 else if (mnem_p - 4 == dot_p
3405 && dot_p[1] == 'd'
3406 && dot_p[2] == '3'
3407 && dot_p[3] == '2')
3408 i.disp_encoding = disp_encoding_32bit;
3409 else
3410 goto check_suffix;
3411 mnem_p = dot_p;
3412 *dot_p = '\0';
3413 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3414 }
3415
3416 if (!current_templates)
3417 {
3418 check_suffix:
3419 /* See if we can get a match by trimming off a suffix. */
3420 switch (mnem_p[-1])
3421 {
3422 case WORD_MNEM_SUFFIX:
3423 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3424 i.suffix = SHORT_MNEM_SUFFIX;
3425 else
3426 case BYTE_MNEM_SUFFIX:
3427 case QWORD_MNEM_SUFFIX:
3428 i.suffix = mnem_p[-1];
3429 mnem_p[-1] = '\0';
3430 current_templates = (const templates *) hash_find (op_hash,
3431 mnemonic);
3432 break;
3433 case SHORT_MNEM_SUFFIX:
3434 case LONG_MNEM_SUFFIX:
3435 if (!intel_syntax)
3436 {
3437 i.suffix = mnem_p[-1];
3438 mnem_p[-1] = '\0';
3439 current_templates = (const templates *) hash_find (op_hash,
3440 mnemonic);
3441 }
3442 break;
3443
3444 /* Intel Syntax. */
3445 case 'd':
3446 if (intel_syntax)
3447 {
3448 if (intel_float_operand (mnemonic) == 1)
3449 i.suffix = SHORT_MNEM_SUFFIX;
3450 else
3451 i.suffix = LONG_MNEM_SUFFIX;
3452 mnem_p[-1] = '\0';
3453 current_templates = (const templates *) hash_find (op_hash,
3454 mnemonic);
3455 }
3456 break;
3457 }
3458 if (!current_templates)
3459 {
3460 as_bad (_("no such instruction: `%s'"), token_start);
3461 return NULL;
3462 }
3463 }
3464
3465 if (current_templates->start->opcode_modifier.jump
3466 || current_templates->start->opcode_modifier.jumpbyte)
3467 {
3468 /* Check for a branch hint. We allow ",pt" and ",pn" for
3469 predict taken and predict not taken respectively.
3470 I'm not sure that branch hints actually do anything on loop
3471 and jcxz insns (JumpByte) for current Pentium4 chips. They
3472 may work in the future and it doesn't hurt to accept them
3473 now. */
3474 if (l[0] == ',' && l[1] == 'p')
3475 {
3476 if (l[2] == 't')
3477 {
3478 if (!add_prefix (DS_PREFIX_OPCODE))
3479 return NULL;
3480 l += 3;
3481 }
3482 else if (l[2] == 'n')
3483 {
3484 if (!add_prefix (CS_PREFIX_OPCODE))
3485 return NULL;
3486 l += 3;
3487 }
3488 }
3489 }
3490 /* Any other comma loses. */
3491 if (*l == ',')
3492 {
3493 as_bad (_("invalid character %s in mnemonic"),
3494 output_invalid (*l));
3495 return NULL;
3496 }
3497
3498 /* Check if instruction is supported on specified architecture. */
3499 supported = 0;
3500 for (t = current_templates->start; t < current_templates->end; ++t)
3501 {
3502 supported |= cpu_flags_match (t);
3503 if (supported == CPU_FLAGS_PERFECT_MATCH)
3504 goto skip;
3505 }
3506
3507 if (!(supported & CPU_FLAGS_64BIT_MATCH))
3508 {
3509 as_bad (flag_code == CODE_64BIT
3510 ? _("`%s' is not supported in 64-bit mode")
3511 : _("`%s' is only supported in 64-bit mode"),
3512 current_templates->start->name);
3513 return NULL;
3514 }
3515 if (supported != CPU_FLAGS_PERFECT_MATCH)
3516 {
3517 as_bad (_("`%s' is not supported on `%s%s'"),
3518 current_templates->start->name,
3519 cpu_arch_name ? cpu_arch_name : default_arch,
3520 cpu_sub_arch_name ? cpu_sub_arch_name : "");
3521 return NULL;
3522 }
3523
3524 skip:
3525 if (!cpu_arch_flags.bitfield.cpui386
3526 && (flag_code != CODE_16BIT))
3527 {
3528 as_warn (_("use .code16 to ensure correct addressing mode"));
3529 }
3530
3531 /* Check for rep/repne without a string instruction. */
3532 if (expecting_string_instruction)
3533 {
3534 static templates override;
3535
3536 for (t = current_templates->start; t < current_templates->end; ++t)
3537 if (t->opcode_modifier.isstring)
3538 break;
3539 if (t >= current_templates->end)
3540 {
3541 as_bad (_("expecting string instruction after `%s'"),
3542 expecting_string_instruction);
3543 return NULL;
3544 }
3545 for (override.start = t; t < current_templates->end; ++t)
3546 if (!t->opcode_modifier.isstring)
3547 break;
3548 override.end = t;
3549 current_templates = &override;
3550 }
3551
3552 return l;
3553 }
3554
3555 static char *
3556 parse_operands (char *l, const char *mnemonic)
3557 {
3558 char *token_start;
3559
3560 /* 1 if operand is pending after ','. */
3561 unsigned int expecting_operand = 0;
3562
3563 /* Non-zero if operand parens not balanced. */
3564 unsigned int paren_not_balanced;
3565
3566 while (*l != END_OF_INSN)
3567 {
3568 /* Skip optional white space before operand. */
3569 if (is_space_char (*l))
3570 ++l;
3571 if (!is_operand_char (*l) && *l != END_OF_INSN)
3572 {
3573 as_bad (_("invalid character %s before operand %d"),
3574 output_invalid (*l),
3575 i.operands + 1);
3576 return NULL;
3577 }
3578 token_start = l; /* after white space */
3579 paren_not_balanced = 0;
3580 while (paren_not_balanced || *l != ',')
3581 {
3582 if (*l == END_OF_INSN)
3583 {
3584 if (paren_not_balanced)
3585 {
3586 if (!intel_syntax)
3587 as_bad (_("unbalanced parenthesis in operand %d."),
3588 i.operands + 1);
3589 else
3590 as_bad (_("unbalanced brackets in operand %d."),
3591 i.operands + 1);
3592 return NULL;
3593 }
3594 else
3595 break; /* we are done */
3596 }
3597 else if (!is_operand_char (*l) && !is_space_char (*l))
3598 {
3599 as_bad (_("invalid character %s in operand %d"),
3600 output_invalid (*l),
3601 i.operands + 1);
3602 return NULL;
3603 }
3604 if (!intel_syntax)
3605 {
3606 if (*l == '(')
3607 ++paren_not_balanced;
3608 if (*l == ')')
3609 --paren_not_balanced;
3610 }
3611 else
3612 {
3613 if (*l == '[')
3614 ++paren_not_balanced;
3615 if (*l == ']')
3616 --paren_not_balanced;
3617 }
3618 l++;
3619 }
3620 if (l != token_start)
3621 { /* Yes, we've read in another operand. */
3622 unsigned int operand_ok;
3623 this_operand = i.operands++;
3624 i.types[this_operand].bitfield.unspecified = 1;
3625 if (i.operands > MAX_OPERANDS)
3626 {
3627 as_bad (_("spurious operands; (%d operands/instruction max)"),
3628 MAX_OPERANDS);
3629 return NULL;
3630 }
3631 /* Now parse operand adding info to 'i' as we go along. */
3632 END_STRING_AND_SAVE (l);
3633
3634 if (intel_syntax)
3635 operand_ok =
3636 i386_intel_operand (token_start,
3637 intel_float_operand (mnemonic));
3638 else
3639 operand_ok = i386_att_operand (token_start);
3640
3641 RESTORE_END_STRING (l);
3642 if (!operand_ok)
3643 return NULL;
3644 }
3645 else
3646 {
3647 if (expecting_operand)
3648 {
3649 expecting_operand_after_comma:
3650 as_bad (_("expecting operand after ','; got nothing"));
3651 return NULL;
3652 }
3653 if (*l == ',')
3654 {
3655 as_bad (_("expecting operand before ','; got nothing"));
3656 return NULL;
3657 }
3658 }
3659
3660 /* Now *l must be either ',' or END_OF_INSN. */
3661 if (*l == ',')
3662 {
3663 if (*++l == END_OF_INSN)
3664 {
3665 /* Just skip it, if it's \n complain. */
3666 goto expecting_operand_after_comma;
3667 }
3668 expecting_operand = 1;
3669 }
3670 }
3671 return l;
3672 }
3673
3674 static void
3675 swap_2_operands (int xchg1, int xchg2)
3676 {
3677 union i386_op temp_op;
3678 i386_operand_type temp_type;
3679 enum bfd_reloc_code_real temp_reloc;
3680
3681 temp_type = i.types[xchg2];
3682 i.types[xchg2] = i.types[xchg1];
3683 i.types[xchg1] = temp_type;
3684 temp_op = i.op[xchg2];
3685 i.op[xchg2] = i.op[xchg1];
3686 i.op[xchg1] = temp_op;
3687 temp_reloc = i.reloc[xchg2];
3688 i.reloc[xchg2] = i.reloc[xchg1];
3689 i.reloc[xchg1] = temp_reloc;
3690 }
3691
3692 static void
3693 swap_operands (void)
3694 {
3695 switch (i.operands)
3696 {
3697 case 5:
3698 case 4:
3699 swap_2_operands (1, i.operands - 2);
3700 case 3:
3701 case 2:
3702 swap_2_operands (0, i.operands - 1);
3703 break;
3704 default:
3705 abort ();
3706 }
3707
3708 if (i.mem_operands == 2)
3709 {
3710 const seg_entry *temp_seg;
3711 temp_seg = i.seg[0];
3712 i.seg[0] = i.seg[1];
3713 i.seg[1] = temp_seg;
3714 }
3715 }
3716
3717 /* Try to ensure constant immediates are represented in the smallest
3718 opcode possible. */
3719 static void
3720 optimize_imm (void)
3721 {
3722 char guess_suffix = 0;
3723 int op;
3724
3725 if (i.suffix)
3726 guess_suffix = i.suffix;
3727 else if (i.reg_operands)
3728 {
3729 /* Figure out a suffix from the last register operand specified.
3730 We can't do this properly yet, ie. excluding InOutPortReg,
3731 but the following works for instructions with immediates.
3732 In any case, we can't set i.suffix yet. */
3733 for (op = i.operands; --op >= 0;)
3734 if (i.types[op].bitfield.reg8)
3735 {
3736 guess_suffix = BYTE_MNEM_SUFFIX;
3737 break;
3738 }
3739 else if (i.types[op].bitfield.reg16)
3740 {
3741 guess_suffix = WORD_MNEM_SUFFIX;
3742 break;
3743 }
3744 else if (i.types[op].bitfield.reg32)
3745 {
3746 guess_suffix = LONG_MNEM_SUFFIX;
3747 break;
3748 }
3749 else if (i.types[op].bitfield.reg64)
3750 {
3751 guess_suffix = QWORD_MNEM_SUFFIX;
3752 break;
3753 }
3754 }
3755 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
3756 guess_suffix = WORD_MNEM_SUFFIX;
3757
3758 for (op = i.operands; --op >= 0;)
3759 if (operand_type_check (i.types[op], imm))
3760 {
3761 switch (i.op[op].imms->X_op)
3762 {
3763 case O_constant:
3764 /* If a suffix is given, this operand may be shortened. */
3765 switch (guess_suffix)
3766 {
3767 case LONG_MNEM_SUFFIX:
3768 i.types[op].bitfield.imm32 = 1;
3769 i.types[op].bitfield.imm64 = 1;
3770 break;
3771 case WORD_MNEM_SUFFIX:
3772 i.types[op].bitfield.imm16 = 1;
3773 i.types[op].bitfield.imm32 = 1;
3774 i.types[op].bitfield.imm32s = 1;
3775 i.types[op].bitfield.imm64 = 1;
3776 break;
3777 case BYTE_MNEM_SUFFIX:
3778 i.types[op].bitfield.imm8 = 1;
3779 i.types[op].bitfield.imm8s = 1;
3780 i.types[op].bitfield.imm16 = 1;
3781 i.types[op].bitfield.imm32 = 1;
3782 i.types[op].bitfield.imm32s = 1;
3783 i.types[op].bitfield.imm64 = 1;
3784 break;
3785 }
3786
3787 /* If this operand is at most 16 bits, convert it
3788 to a signed 16 bit number before trying to see
3789 whether it will fit in an even smaller size.
3790 This allows a 16-bit operand such as $0xffe0 to
3791 be recognised as within Imm8S range. */
3792 if ((i.types[op].bitfield.imm16)
3793 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
3794 {
3795 i.op[op].imms->X_add_number =
3796 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
3797 }
3798 if ((i.types[op].bitfield.imm32)
3799 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
3800 == 0))
3801 {
3802 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
3803 ^ ((offsetT) 1 << 31))
3804 - ((offsetT) 1 << 31));
3805 }
3806 i.types[op]
3807 = operand_type_or (i.types[op],
3808 smallest_imm_type (i.op[op].imms->X_add_number));
3809
3810 /* We must avoid matching of Imm32 templates when 64bit
3811 only immediate is available. */
3812 if (guess_suffix == QWORD_MNEM_SUFFIX)
3813 i.types[op].bitfield.imm32 = 0;
3814 break;
3815
3816 case O_absent:
3817 case O_register:
3818 abort ();
3819
3820 /* Symbols and expressions. */
3821 default:
3822 /* Convert symbolic operand to proper sizes for matching, but don't
3823 prevent matching a set of insns that only supports sizes other
3824 than those matching the insn suffix. */
3825 {
3826 i386_operand_type mask, allowed;
3827 const insn_template *t;
3828
3829 operand_type_set (&mask, 0);
3830 operand_type_set (&allowed, 0);
3831
3832 for (t = current_templates->start;
3833 t < current_templates->end;
3834 ++t)
3835 allowed = operand_type_or (allowed,
3836 t->operand_types[op]);
3837 switch (guess_suffix)
3838 {
3839 case QWORD_MNEM_SUFFIX:
3840 mask.bitfield.imm64 = 1;
3841 mask.bitfield.imm32s = 1;
3842 break;
3843 case LONG_MNEM_SUFFIX:
3844 mask.bitfield.imm32 = 1;
3845 break;
3846 case WORD_MNEM_SUFFIX:
3847 mask.bitfield.imm16 = 1;
3848 break;
3849 case BYTE_MNEM_SUFFIX:
3850 mask.bitfield.imm8 = 1;
3851 break;
3852 default:
3853 break;
3854 }
3855 allowed = operand_type_and (mask, allowed);
3856 if (!operand_type_all_zero (&allowed))
3857 i.types[op] = operand_type_and (i.types[op], mask);
3858 }
3859 break;
3860 }
3861 }
3862 }
3863
3864 /* Try to use the smallest displacement type too. */
3865 static void
3866 optimize_disp (void)
3867 {
3868 int op;
3869
3870 for (op = i.operands; --op >= 0;)
3871 if (operand_type_check (i.types[op], disp))
3872 {
3873 if (i.op[op].disps->X_op == O_constant)
3874 {
3875 offsetT op_disp = i.op[op].disps->X_add_number;
3876
3877 if (i.types[op].bitfield.disp16
3878 && (op_disp & ~(offsetT) 0xffff) == 0)
3879 {
3880 /* If this operand is at most 16 bits, convert
3881 to a signed 16 bit number and don't use 64bit
3882 displacement. */
3883 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
3884 i.types[op].bitfield.disp64 = 0;
3885 }
3886 if (i.types[op].bitfield.disp32
3887 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
3888 {
3889 /* If this operand is at most 32 bits, convert
3890 to a signed 32 bit number and don't use 64bit
3891 displacement. */
3892 op_disp &= (((offsetT) 2 << 31) - 1);
3893 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
3894 i.types[op].bitfield.disp64 = 0;
3895 }
3896 if (!op_disp && i.types[op].bitfield.baseindex)
3897 {
3898 i.types[op].bitfield.disp8 = 0;
3899 i.types[op].bitfield.disp16 = 0;
3900 i.types[op].bitfield.disp32 = 0;
3901 i.types[op].bitfield.disp32s = 0;
3902 i.types[op].bitfield.disp64 = 0;
3903 i.op[op].disps = 0;
3904 i.disp_operands--;
3905 }
3906 else if (flag_code == CODE_64BIT)
3907 {
3908 if (fits_in_signed_long (op_disp))
3909 {
3910 i.types[op].bitfield.disp64 = 0;
3911 i.types[op].bitfield.disp32s = 1;
3912 }
3913 if (i.prefix[ADDR_PREFIX]
3914 && fits_in_unsigned_long (op_disp))
3915 i.types[op].bitfield.disp32 = 1;
3916 }
3917 if ((i.types[op].bitfield.disp32
3918 || i.types[op].bitfield.disp32s
3919 || i.types[op].bitfield.disp16)
3920 && fits_in_signed_byte (op_disp))
3921 i.types[op].bitfield.disp8 = 1;
3922 }
3923 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
3924 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
3925 {
3926 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
3927 i.op[op].disps, 0, i.reloc[op]);
3928 i.types[op].bitfield.disp8 = 0;
3929 i.types[op].bitfield.disp16 = 0;
3930 i.types[op].bitfield.disp32 = 0;
3931 i.types[op].bitfield.disp32s = 0;
3932 i.types[op].bitfield.disp64 = 0;
3933 }
3934 else
3935 /* We only support 64bit displacement on constants. */
3936 i.types[op].bitfield.disp64 = 0;
3937 }
3938 }
3939
3940 /* Check if operands are valid for the instruction. */
3941
3942 static int
3943 check_VecOperands (const insn_template *t)
3944 {
3945 /* Without VSIB byte, we can't have a vector register for index. */
3946 if (!t->opcode_modifier.vecsib
3947 && i.index_reg
3948 && (i.index_reg->reg_type.bitfield.regxmm
3949 || i.index_reg->reg_type.bitfield.regymm))
3950 {
3951 i.error = unsupported_vector_index_register;
3952 return 1;
3953 }
3954
3955 /* For VSIB byte, we need a vector register for index and no PC
3956 relative addressing is allowed. */
3957 if (t->opcode_modifier.vecsib
3958 && (!i.index_reg
3959 || !((t->opcode_modifier.vecsib == VecSIB128
3960 && i.index_reg->reg_type.bitfield.regxmm)
3961 || (t->opcode_modifier.vecsib == VecSIB256
3962 && i.index_reg->reg_type.bitfield.regymm))
3963 || (i.base_reg && i.base_reg->reg_num == RegRip)))
3964 {
3965 i.error = invalid_vsib_address;
3966 return 1;
3967 }
3968
3969 return 0;
3970 }
3971
3972 /* Check if operands are valid for the instruction. Update VEX
3973 operand types. */
3974
3975 static int
3976 VEX_check_operands (const insn_template *t)
3977 {
3978 if (!t->opcode_modifier.vex)
3979 return 0;
3980
3981 /* Only check VEX_Imm4, which must be the first operand. */
3982 if (t->operand_types[0].bitfield.vec_imm4)
3983 {
3984 if (i.op[0].imms->X_op != O_constant
3985 || !fits_in_imm4 (i.op[0].imms->X_add_number))
3986 {
3987 i.error = bad_imm4;
3988 return 1;
3989 }
3990
3991 /* Turn off Imm8 so that update_imm won't complain. */
3992 i.types[0] = vec_imm4;
3993 }
3994
3995 return 0;
3996 }
3997
3998 static const insn_template *
3999 match_template (void)
4000 {
4001 /* Points to template once we've found it. */
4002 const insn_template *t;
4003 i386_operand_type overlap0, overlap1, overlap2, overlap3;
4004 i386_operand_type overlap4;
4005 unsigned int found_reverse_match;
4006 i386_opcode_modifier suffix_check;
4007 i386_operand_type operand_types [MAX_OPERANDS];
4008 int addr_prefix_disp;
4009 unsigned int j;
4010 unsigned int found_cpu_match;
4011 unsigned int check_register;
4012
4013 #if MAX_OPERANDS != 5
4014 # error "MAX_OPERANDS must be 5."
4015 #endif
4016
4017 found_reverse_match = 0;
4018 addr_prefix_disp = -1;
4019
4020 memset (&suffix_check, 0, sizeof (suffix_check));
4021 if (i.suffix == BYTE_MNEM_SUFFIX)
4022 suffix_check.no_bsuf = 1;
4023 else if (i.suffix == WORD_MNEM_SUFFIX)
4024 suffix_check.no_wsuf = 1;
4025 else if (i.suffix == SHORT_MNEM_SUFFIX)
4026 suffix_check.no_ssuf = 1;
4027 else if (i.suffix == LONG_MNEM_SUFFIX)
4028 suffix_check.no_lsuf = 1;
4029 else if (i.suffix == QWORD_MNEM_SUFFIX)
4030 suffix_check.no_qsuf = 1;
4031 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
4032 suffix_check.no_ldsuf = 1;
4033
4034 /* Must have right number of operands. */
4035 i.error = number_of_operands_mismatch;
4036
4037 for (t = current_templates->start; t < current_templates->end; t++)
4038 {
4039 addr_prefix_disp = -1;
4040
4041 if (i.operands != t->operands)
4042 continue;
4043
4044 /* Check processor support. */
4045 i.error = unsupported;
4046 found_cpu_match = (cpu_flags_match (t)
4047 == CPU_FLAGS_PERFECT_MATCH);
4048 if (!found_cpu_match)
4049 continue;
4050
4051 /* Check old gcc support. */
4052 i.error = old_gcc_only;
4053 if (!old_gcc && t->opcode_modifier.oldgcc)
4054 continue;
4055
4056 /* Check AT&T mnemonic. */
4057 i.error = unsupported_with_intel_mnemonic;
4058 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
4059 continue;
4060
4061 /* Check AT&T/Intel syntax. */
4062 i.error = unsupported_syntax;
4063 if ((intel_syntax && t->opcode_modifier.attsyntax)
4064 || (!intel_syntax && t->opcode_modifier.intelsyntax))
4065 continue;
4066
4067 /* Check the suffix, except for some instructions in intel mode. */
4068 i.error = invalid_instruction_suffix;
4069 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
4070 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
4071 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
4072 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
4073 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
4074 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
4075 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
4076 continue;
4077
4078 if (!operand_size_match (t))
4079 continue;
4080
4081 for (j = 0; j < MAX_OPERANDS; j++)
4082 operand_types[j] = t->operand_types[j];
4083
4084 /* In general, don't allow 64-bit operands in 32-bit mode. */
4085 if (i.suffix == QWORD_MNEM_SUFFIX
4086 && flag_code != CODE_64BIT
4087 && (intel_syntax
4088 ? (!t->opcode_modifier.ignoresize
4089 && !intel_float_operand (t->name))
4090 : intel_float_operand (t->name) != 2)
4091 && ((!operand_types[0].bitfield.regmmx
4092 && !operand_types[0].bitfield.regxmm
4093 && !operand_types[0].bitfield.regymm)
4094 || (!operand_types[t->operands > 1].bitfield.regmmx
4095 && !!operand_types[t->operands > 1].bitfield.regxmm
4096 && !!operand_types[t->operands > 1].bitfield.regymm))
4097 && (t->base_opcode != 0x0fc7
4098 || t->extension_opcode != 1 /* cmpxchg8b */))
4099 continue;
4100
4101 /* In general, don't allow 32-bit operands on pre-386. */
4102 else if (i.suffix == LONG_MNEM_SUFFIX
4103 && !cpu_arch_flags.bitfield.cpui386
4104 && (intel_syntax
4105 ? (!t->opcode_modifier.ignoresize
4106 && !intel_float_operand (t->name))
4107 : intel_float_operand (t->name) != 2)
4108 && ((!operand_types[0].bitfield.regmmx
4109 && !operand_types[0].bitfield.regxmm)
4110 || (!operand_types[t->operands > 1].bitfield.regmmx
4111 && !!operand_types[t->operands > 1].bitfield.regxmm)))
4112 continue;
4113
4114 /* Do not verify operands when there are none. */
4115 else
4116 {
4117 if (!t->operands)
4118 /* We've found a match; break out of loop. */
4119 break;
4120 }
4121
4122 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
4123 into Disp32/Disp16/Disp32 operand. */
4124 if (i.prefix[ADDR_PREFIX] != 0)
4125 {
4126 /* There should be only one Disp operand. */
4127 switch (flag_code)
4128 {
4129 case CODE_16BIT:
4130 for (j = 0; j < MAX_OPERANDS; j++)
4131 {
4132 if (operand_types[j].bitfield.disp16)
4133 {
4134 addr_prefix_disp = j;
4135 operand_types[j].bitfield.disp32 = 1;
4136 operand_types[j].bitfield.disp16 = 0;
4137 break;
4138 }
4139 }
4140 break;
4141 case CODE_32BIT:
4142 for (j = 0; j < MAX_OPERANDS; j++)
4143 {
4144 if (operand_types[j].bitfield.disp32)
4145 {
4146 addr_prefix_disp = j;
4147 operand_types[j].bitfield.disp32 = 0;
4148 operand_types[j].bitfield.disp16 = 1;
4149 break;
4150 }
4151 }
4152 break;
4153 case CODE_64BIT:
4154 for (j = 0; j < MAX_OPERANDS; j++)
4155 {
4156 if (operand_types[j].bitfield.disp64)
4157 {
4158 addr_prefix_disp = j;
4159 operand_types[j].bitfield.disp64 = 0;
4160 operand_types[j].bitfield.disp32 = 1;
4161 break;
4162 }
4163 }
4164 break;
4165 }
4166 }
4167
4168 /* We check register size if needed. */
4169 check_register = t->opcode_modifier.checkregsize;
4170 overlap0 = operand_type_and (i.types[0], operand_types[0]);
4171 switch (t->operands)
4172 {
4173 case 1:
4174 if (!operand_type_match (overlap0, i.types[0]))
4175 continue;
4176 break;
4177 case 2:
4178 /* xchg %eax, %eax is a special case. It is an aliase for nop
4179 only in 32bit mode and we can use opcode 0x90. In 64bit
4180 mode, we can't use 0x90 for xchg %eax, %eax since it should
4181 zero-extend %eax to %rax. */
4182 if (flag_code == CODE_64BIT
4183 && t->base_opcode == 0x90
4184 && operand_type_equal (&i.types [0], &acc32)
4185 && operand_type_equal (&i.types [1], &acc32))
4186 continue;
4187 if (i.swap_operand)
4188 {
4189 /* If we swap operand in encoding, we either match
4190 the next one or reverse direction of operands. */
4191 if (t->opcode_modifier.s)
4192 continue;
4193 else if (t->opcode_modifier.d)
4194 goto check_reverse;
4195 }
4196
4197 case 3:
4198 /* If we swap operand in encoding, we match the next one. */
4199 if (i.swap_operand && t->opcode_modifier.s)
4200 continue;
4201 case 4:
4202 case 5:
4203 overlap1 = operand_type_and (i.types[1], operand_types[1]);
4204 if (!operand_type_match (overlap0, i.types[0])
4205 || !operand_type_match (overlap1, i.types[1])
4206 || (check_register
4207 && !operand_type_register_match (overlap0, i.types[0],
4208 operand_types[0],
4209 overlap1, i.types[1],
4210 operand_types[1])))
4211 {
4212 /* Check if other direction is valid ... */
4213 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
4214 continue;
4215
4216 check_reverse:
4217 /* Try reversing direction of operands. */
4218 overlap0 = operand_type_and (i.types[0], operand_types[1]);
4219 overlap1 = operand_type_and (i.types[1], operand_types[0]);
4220 if (!operand_type_match (overlap0, i.types[0])
4221 || !operand_type_match (overlap1, i.types[1])
4222 || (check_register
4223 && !operand_type_register_match (overlap0,
4224 i.types[0],
4225 operand_types[1],
4226 overlap1,
4227 i.types[1],
4228 operand_types[0])))
4229 {
4230 /* Does not match either direction. */
4231 continue;
4232 }
4233 /* found_reverse_match holds which of D or FloatDR
4234 we've found. */
4235 if (t->opcode_modifier.d)
4236 found_reverse_match = Opcode_D;
4237 else if (t->opcode_modifier.floatd)
4238 found_reverse_match = Opcode_FloatD;
4239 else
4240 found_reverse_match = 0;
4241 if (t->opcode_modifier.floatr)
4242 found_reverse_match |= Opcode_FloatR;
4243 }
4244 else
4245 {
4246 /* Found a forward 2 operand match here. */
4247 switch (t->operands)
4248 {
4249 case 5:
4250 overlap4 = operand_type_and (i.types[4],
4251 operand_types[4]);
4252 case 4:
4253 overlap3 = operand_type_and (i.types[3],
4254 operand_types[3]);
4255 case 3:
4256 overlap2 = operand_type_and (i.types[2],
4257 operand_types[2]);
4258 break;
4259 }
4260
4261 switch (t->operands)
4262 {
4263 case 5:
4264 if (!operand_type_match (overlap4, i.types[4])
4265 || !operand_type_register_match (overlap3,
4266 i.types[3],
4267 operand_types[3],
4268 overlap4,
4269 i.types[4],
4270 operand_types[4]))
4271 continue;
4272 case 4:
4273 if (!operand_type_match (overlap3, i.types[3])
4274 || (check_register
4275 && !operand_type_register_match (overlap2,
4276 i.types[2],
4277 operand_types[2],
4278 overlap3,
4279 i.types[3],
4280 operand_types[3])))
4281 continue;
4282 case 3:
4283 /* Here we make use of the fact that there are no
4284 reverse match 3 operand instructions, and all 3
4285 operand instructions only need to be checked for
4286 register consistency between operands 2 and 3. */
4287 if (!operand_type_match (overlap2, i.types[2])
4288 || (check_register
4289 && !operand_type_register_match (overlap1,
4290 i.types[1],
4291 operand_types[1],
4292 overlap2,
4293 i.types[2],
4294 operand_types[2])))
4295 continue;
4296 break;
4297 }
4298 }
4299 /* Found either forward/reverse 2, 3 or 4 operand match here:
4300 slip through to break. */
4301 }
4302 if (!found_cpu_match)
4303 {
4304 found_reverse_match = 0;
4305 continue;
4306 }
4307
4308 /* Check if vector operands are valid. */
4309 if (check_VecOperands (t))
4310 continue;
4311
4312 /* Check if VEX operands are valid. */
4313 if (VEX_check_operands (t))
4314 continue;
4315
4316 /* We've found a match; break out of loop. */
4317 break;
4318 }
4319
4320 if (t == current_templates->end)
4321 {
4322 /* We found no match. */
4323 const char *err_msg;
4324 switch (i.error)
4325 {
4326 default:
4327 abort ();
4328 case operand_size_mismatch:
4329 err_msg = _("operand size mismatch");
4330 break;
4331 case operand_type_mismatch:
4332 err_msg = _("operand type mismatch");
4333 break;
4334 case register_type_mismatch:
4335 err_msg = _("register type mismatch");
4336 break;
4337 case number_of_operands_mismatch:
4338 err_msg = _("number of operands mismatch");
4339 break;
4340 case invalid_instruction_suffix:
4341 err_msg = _("invalid instruction suffix");
4342 break;
4343 case bad_imm4:
4344 err_msg = _("Imm4 isn't the first operand");
4345 break;
4346 case old_gcc_only:
4347 err_msg = _("only supported with old gcc");
4348 break;
4349 case unsupported_with_intel_mnemonic:
4350 err_msg = _("unsupported with Intel mnemonic");
4351 break;
4352 case unsupported_syntax:
4353 err_msg = _("unsupported syntax");
4354 break;
4355 case unsupported:
4356 as_bad (_("unsupported instruction `%s'"),
4357 current_templates->start->name);
4358 return NULL;
4359 case invalid_vsib_address:
4360 err_msg = _("invalid VSIB address");
4361 break;
4362 case unsupported_vector_index_register:
4363 err_msg = _("unsupported vector index register");
4364 break;
4365 }
4366 as_bad (_("%s for `%s'"), err_msg,
4367 current_templates->start->name);
4368 return NULL;
4369 }
4370
4371 if (!quiet_warnings)
4372 {
4373 if (!intel_syntax
4374 && (i.types[0].bitfield.jumpabsolute
4375 != operand_types[0].bitfield.jumpabsolute))
4376 {
4377 as_warn (_("indirect %s without `*'"), t->name);
4378 }
4379
4380 if (t->opcode_modifier.isprefix
4381 && t->opcode_modifier.ignoresize)
4382 {
4383 /* Warn them that a data or address size prefix doesn't
4384 affect assembly of the next line of code. */
4385 as_warn (_("stand-alone `%s' prefix"), t->name);
4386 }
4387 }
4388
4389 /* Copy the template we found. */
4390 i.tm = *t;
4391
4392 if (addr_prefix_disp != -1)
4393 i.tm.operand_types[addr_prefix_disp]
4394 = operand_types[addr_prefix_disp];
4395
4396 if (found_reverse_match)
4397 {
4398 /* If we found a reverse match we must alter the opcode
4399 direction bit. found_reverse_match holds bits to change
4400 (different for int & float insns). */
4401
4402 i.tm.base_opcode ^= found_reverse_match;
4403
4404 i.tm.operand_types[0] = operand_types[1];
4405 i.tm.operand_types[1] = operand_types[0];
4406 }
4407
4408 return t;
4409 }
4410
4411 static int
4412 check_string (void)
4413 {
4414 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
4415 if (i.tm.operand_types[mem_op].bitfield.esseg)
4416 {
4417 if (i.seg[0] != NULL && i.seg[0] != &es)
4418 {
4419 as_bad (_("`%s' operand %d must use `%ses' segment"),
4420 i.tm.name,
4421 mem_op + 1,
4422 register_prefix);
4423 return 0;
4424 }
4425 /* There's only ever one segment override allowed per instruction.
4426 This instruction possibly has a legal segment override on the
4427 second operand, so copy the segment to where non-string
4428 instructions store it, allowing common code. */
4429 i.seg[0] = i.seg[1];
4430 }
4431 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
4432 {
4433 if (i.seg[1] != NULL && i.seg[1] != &es)
4434 {
4435 as_bad (_("`%s' operand %d must use `%ses' segment"),
4436 i.tm.name,
4437 mem_op + 2,
4438 register_prefix);
4439 return 0;
4440 }
4441 }
4442 return 1;
4443 }
4444
4445 static int
4446 process_suffix (void)
4447 {
4448 /* If matched instruction specifies an explicit instruction mnemonic
4449 suffix, use it. */
4450 if (i.tm.opcode_modifier.size16)
4451 i.suffix = WORD_MNEM_SUFFIX;
4452 else if (i.tm.opcode_modifier.size32)
4453 i.suffix = LONG_MNEM_SUFFIX;
4454 else if (i.tm.opcode_modifier.size64)
4455 i.suffix = QWORD_MNEM_SUFFIX;
4456 else if (i.reg_operands)
4457 {
4458 /* If there's no instruction mnemonic suffix we try to invent one
4459 based on register operands. */
4460 if (!i.suffix)
4461 {
4462 /* We take i.suffix from the last register operand specified,
4463 Destination register type is more significant than source
4464 register type. crc32 in SSE4.2 prefers source register
4465 type. */
4466 if (i.tm.base_opcode == 0xf20f38f1)
4467 {
4468 if (i.types[0].bitfield.reg16)
4469 i.suffix = WORD_MNEM_SUFFIX;
4470 else if (i.types[0].bitfield.reg32)
4471 i.suffix = LONG_MNEM_SUFFIX;
4472 else if (i.types[0].bitfield.reg64)
4473 i.suffix = QWORD_MNEM_SUFFIX;
4474 }
4475 else if (i.tm.base_opcode == 0xf20f38f0)
4476 {
4477 if (i.types[0].bitfield.reg8)
4478 i.suffix = BYTE_MNEM_SUFFIX;
4479 }
4480
4481 if (!i.suffix)
4482 {
4483 int op;
4484
4485 if (i.tm.base_opcode == 0xf20f38f1
4486 || i.tm.base_opcode == 0xf20f38f0)
4487 {
4488 /* We have to know the operand size for crc32. */
4489 as_bad (_("ambiguous memory operand size for `%s`"),
4490 i.tm.name);
4491 return 0;
4492 }
4493
4494 for (op = i.operands; --op >= 0;)
4495 if (!i.tm.operand_types[op].bitfield.inoutportreg)
4496 {
4497 if (i.types[op].bitfield.reg8)
4498 {
4499 i.suffix = BYTE_MNEM_SUFFIX;
4500 break;
4501 }
4502 else if (i.types[op].bitfield.reg16)
4503 {
4504 i.suffix = WORD_MNEM_SUFFIX;
4505 break;
4506 }
4507 else if (i.types[op].bitfield.reg32)
4508 {
4509 i.suffix = LONG_MNEM_SUFFIX;
4510 break;
4511 }
4512 else if (i.types[op].bitfield.reg64)
4513 {
4514 i.suffix = QWORD_MNEM_SUFFIX;
4515 break;
4516 }
4517 }
4518 }
4519 }
4520 else if (i.suffix == BYTE_MNEM_SUFFIX)
4521 {
4522 if (intel_syntax
4523 && i.tm.opcode_modifier.ignoresize
4524 && i.tm.opcode_modifier.no_bsuf)
4525 i.suffix = 0;
4526 else if (!check_byte_reg ())
4527 return 0;
4528 }
4529 else if (i.suffix == LONG_MNEM_SUFFIX)
4530 {
4531 if (intel_syntax
4532 && i.tm.opcode_modifier.ignoresize
4533 && i.tm.opcode_modifier.no_lsuf)
4534 i.suffix = 0;
4535 else if (!check_long_reg ())
4536 return 0;
4537 }
4538 else if (i.suffix == QWORD_MNEM_SUFFIX)
4539 {
4540 if (intel_syntax
4541 && i.tm.opcode_modifier.ignoresize
4542 && i.tm.opcode_modifier.no_qsuf)
4543 i.suffix = 0;
4544 else if (!check_qword_reg ())
4545 return 0;
4546 }
4547 else if (i.suffix == WORD_MNEM_SUFFIX)
4548 {
4549 if (intel_syntax
4550 && i.tm.opcode_modifier.ignoresize
4551 && i.tm.opcode_modifier.no_wsuf)
4552 i.suffix = 0;
4553 else if (!check_word_reg ())
4554 return 0;
4555 }
4556 else if (i.suffix == XMMWORD_MNEM_SUFFIX
4557 || i.suffix == YMMWORD_MNEM_SUFFIX)
4558 {
4559 /* Skip if the instruction has x/y suffix. match_template
4560 should check if it is a valid suffix. */
4561 }
4562 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
4563 /* Do nothing if the instruction is going to ignore the prefix. */
4564 ;
4565 else
4566 abort ();
4567 }
4568 else if (i.tm.opcode_modifier.defaultsize
4569 && !i.suffix
4570 /* exclude fldenv/frstor/fsave/fstenv */
4571 && i.tm.opcode_modifier.no_ssuf)
4572 {
4573 i.suffix = stackop_size;
4574 }
4575 else if (intel_syntax
4576 && !i.suffix
4577 && (i.tm.operand_types[0].bitfield.jumpabsolute
4578 || i.tm.opcode_modifier.jumpbyte
4579 || i.tm.opcode_modifier.jumpintersegment
4580 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
4581 && i.tm.extension_opcode <= 3)))
4582 {
4583 switch (flag_code)
4584 {
4585 case CODE_64BIT:
4586 if (!i.tm.opcode_modifier.no_qsuf)
4587 {
4588 i.suffix = QWORD_MNEM_SUFFIX;
4589 break;
4590 }
4591 case CODE_32BIT:
4592 if (!i.tm.opcode_modifier.no_lsuf)
4593 i.suffix = LONG_MNEM_SUFFIX;
4594 break;
4595 case CODE_16BIT:
4596 if (!i.tm.opcode_modifier.no_wsuf)
4597 i.suffix = WORD_MNEM_SUFFIX;
4598 break;
4599 }
4600 }
4601
4602 if (!i.suffix)
4603 {
4604 if (!intel_syntax)
4605 {
4606 if (i.tm.opcode_modifier.w)
4607 {
4608 as_bad (_("no instruction mnemonic suffix given and "
4609 "no register operands; can't size instruction"));
4610 return 0;
4611 }
4612 }
4613 else
4614 {
4615 unsigned int suffixes;
4616
4617 suffixes = !i.tm.opcode_modifier.no_bsuf;
4618 if (!i.tm.opcode_modifier.no_wsuf)
4619 suffixes |= 1 << 1;
4620 if (!i.tm.opcode_modifier.no_lsuf)
4621 suffixes |= 1 << 2;
4622 if (!i.tm.opcode_modifier.no_ldsuf)
4623 suffixes |= 1 << 3;
4624 if (!i.tm.opcode_modifier.no_ssuf)
4625 suffixes |= 1 << 4;
4626 if (!i.tm.opcode_modifier.no_qsuf)
4627 suffixes |= 1 << 5;
4628
4629 /* There are more than suffix matches. */
4630 if (i.tm.opcode_modifier.w
4631 || ((suffixes & (suffixes - 1))
4632 && !i.tm.opcode_modifier.defaultsize
4633 && !i.tm.opcode_modifier.ignoresize))
4634 {
4635 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
4636 return 0;
4637 }
4638 }
4639 }
4640
4641 /* Change the opcode based on the operand size given by i.suffix;
4642 We don't need to change things for byte insns. */
4643
4644 if (i.suffix
4645 && i.suffix != BYTE_MNEM_SUFFIX
4646 && i.suffix != XMMWORD_MNEM_SUFFIX
4647 && i.suffix != YMMWORD_MNEM_SUFFIX)
4648 {
4649 /* It's not a byte, select word/dword operation. */
4650 if (i.tm.opcode_modifier.w)
4651 {
4652 if (i.tm.opcode_modifier.shortform)
4653 i.tm.base_opcode |= 8;
4654 else
4655 i.tm.base_opcode |= 1;
4656 }
4657
4658 /* Now select between word & dword operations via the operand
4659 size prefix, except for instructions that will ignore this
4660 prefix anyway. */
4661 if (i.tm.opcode_modifier.addrprefixop0)
4662 {
4663 /* The address size override prefix changes the size of the
4664 first operand. */
4665 if ((flag_code == CODE_32BIT
4666 && i.op->regs[0].reg_type.bitfield.reg16)
4667 || (flag_code != CODE_32BIT
4668 && i.op->regs[0].reg_type.bitfield.reg32))
4669 if (!add_prefix (ADDR_PREFIX_OPCODE))
4670 return 0;
4671 }
4672 else if (i.suffix != QWORD_MNEM_SUFFIX
4673 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
4674 && !i.tm.opcode_modifier.ignoresize
4675 && !i.tm.opcode_modifier.floatmf
4676 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
4677 || (flag_code == CODE_64BIT
4678 && i.tm.opcode_modifier.jumpbyte)))
4679 {
4680 unsigned int prefix = DATA_PREFIX_OPCODE;
4681
4682 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
4683 prefix = ADDR_PREFIX_OPCODE;
4684
4685 if (!add_prefix (prefix))
4686 return 0;
4687 }
4688
4689 /* Set mode64 for an operand. */
4690 if (i.suffix == QWORD_MNEM_SUFFIX
4691 && flag_code == CODE_64BIT
4692 && !i.tm.opcode_modifier.norex64)
4693 {
4694 /* Special case for xchg %rax,%rax. It is NOP and doesn't
4695 need rex64. cmpxchg8b is also a special case. */
4696 if (! (i.operands == 2
4697 && i.tm.base_opcode == 0x90
4698 && i.tm.extension_opcode == None
4699 && operand_type_equal (&i.types [0], &acc64)
4700 && operand_type_equal (&i.types [1], &acc64))
4701 && ! (i.operands == 1
4702 && i.tm.base_opcode == 0xfc7
4703 && i.tm.extension_opcode == 1
4704 && !operand_type_check (i.types [0], reg)
4705 && operand_type_check (i.types [0], anymem)))
4706 i.rex |= REX_W;
4707 }
4708
4709 /* Size floating point instruction. */
4710 if (i.suffix == LONG_MNEM_SUFFIX)
4711 if (i.tm.opcode_modifier.floatmf)
4712 i.tm.base_opcode ^= 4;
4713 }
4714
4715 return 1;
4716 }
4717
4718 static int
4719 check_byte_reg (void)
4720 {
4721 int op;
4722
4723 for (op = i.operands; --op >= 0;)
4724 {
4725 /* If this is an eight bit register, it's OK. If it's the 16 or
4726 32 bit version of an eight bit register, we will just use the
4727 low portion, and that's OK too. */
4728 if (i.types[op].bitfield.reg8)
4729 continue;
4730
4731 /* crc32 doesn't generate this warning. */
4732 if (i.tm.base_opcode == 0xf20f38f0)
4733 continue;
4734
4735 if ((i.types[op].bitfield.reg16
4736 || i.types[op].bitfield.reg32
4737 || i.types[op].bitfield.reg64)
4738 && i.op[op].regs->reg_num < 4)
4739 {
4740 /* Prohibit these changes in the 64bit mode, since the
4741 lowering is more complicated. */
4742 if (flag_code == CODE_64BIT
4743 && !i.tm.operand_types[op].bitfield.inoutportreg)
4744 {
4745 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4746 register_prefix, i.op[op].regs->reg_name,
4747 i.suffix);
4748 return 0;
4749 }
4750 #if REGISTER_WARNINGS
4751 if (!quiet_warnings
4752 && !i.tm.operand_types[op].bitfield.inoutportreg)
4753 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4754 register_prefix,
4755 (i.op[op].regs + (i.types[op].bitfield.reg16
4756 ? REGNAM_AL - REGNAM_AX
4757 : REGNAM_AL - REGNAM_EAX))->reg_name,
4758 register_prefix,
4759 i.op[op].regs->reg_name,
4760 i.suffix);
4761 #endif
4762 continue;
4763 }
4764 /* Any other register is bad. */
4765 if (i.types[op].bitfield.reg16
4766 || i.types[op].bitfield.reg32
4767 || i.types[op].bitfield.reg64
4768 || i.types[op].bitfield.regmmx
4769 || i.types[op].bitfield.regxmm
4770 || i.types[op].bitfield.regymm
4771 || i.types[op].bitfield.sreg2
4772 || i.types[op].bitfield.sreg3
4773 || i.types[op].bitfield.control
4774 || i.types[op].bitfield.debug
4775 || i.types[op].bitfield.test
4776 || i.types[op].bitfield.floatreg
4777 || i.types[op].bitfield.floatacc)
4778 {
4779 as_bad (_("`%s%s' not allowed with `%s%c'"),
4780 register_prefix,
4781 i.op[op].regs->reg_name,
4782 i.tm.name,
4783 i.suffix);
4784 return 0;
4785 }
4786 }
4787 return 1;
4788 }
4789
4790 static int
4791 check_long_reg (void)
4792 {
4793 int op;
4794
4795 for (op = i.operands; --op >= 0;)
4796 /* Reject eight bit registers, except where the template requires
4797 them. (eg. movzb) */
4798 if (i.types[op].bitfield.reg8
4799 && (i.tm.operand_types[op].bitfield.reg16
4800 || i.tm.operand_types[op].bitfield.reg32
4801 || i.tm.operand_types[op].bitfield.acc))
4802 {
4803 as_bad (_("`%s%s' not allowed with `%s%c'"),
4804 register_prefix,
4805 i.op[op].regs->reg_name,
4806 i.tm.name,
4807 i.suffix);
4808 return 0;
4809 }
4810 /* Warn if the e prefix on a general reg is missing. */
4811 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4812 && i.types[op].bitfield.reg16
4813 && (i.tm.operand_types[op].bitfield.reg32
4814 || i.tm.operand_types[op].bitfield.acc))
4815 {
4816 /* Prohibit these changes in the 64bit mode, since the
4817 lowering is more complicated. */
4818 if (flag_code == CODE_64BIT)
4819 {
4820 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4821 register_prefix, i.op[op].regs->reg_name,
4822 i.suffix);
4823 return 0;
4824 }
4825 #if REGISTER_WARNINGS
4826 else
4827 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4828 register_prefix,
4829 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
4830 register_prefix,
4831 i.op[op].regs->reg_name,
4832 i.suffix);
4833 #endif
4834 }
4835 /* Warn if the r prefix on a general reg is missing. */
4836 else if (i.types[op].bitfield.reg64
4837 && (i.tm.operand_types[op].bitfield.reg32
4838 || i.tm.operand_types[op].bitfield.acc))
4839 {
4840 if (intel_syntax
4841 && i.tm.opcode_modifier.toqword
4842 && !i.types[0].bitfield.regxmm)
4843 {
4844 /* Convert to QWORD. We want REX byte. */
4845 i.suffix = QWORD_MNEM_SUFFIX;
4846 }
4847 else
4848 {
4849 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4850 register_prefix, i.op[op].regs->reg_name,
4851 i.suffix);
4852 return 0;
4853 }
4854 }
4855 return 1;
4856 }
4857
4858 static int
4859 check_qword_reg (void)
4860 {
4861 int op;
4862
4863 for (op = i.operands; --op >= 0; )
4864 /* Reject eight bit registers, except where the template requires
4865 them. (eg. movzb) */
4866 if (i.types[op].bitfield.reg8
4867 && (i.tm.operand_types[op].bitfield.reg16
4868 || i.tm.operand_types[op].bitfield.reg32
4869 || i.tm.operand_types[op].bitfield.acc))
4870 {
4871 as_bad (_("`%s%s' not allowed with `%s%c'"),
4872 register_prefix,
4873 i.op[op].regs->reg_name,
4874 i.tm.name,
4875 i.suffix);
4876 return 0;
4877 }
4878 /* Warn if the e prefix on a general reg is missing. */
4879 else if ((i.types[op].bitfield.reg16
4880 || i.types[op].bitfield.reg32)
4881 && (i.tm.operand_types[op].bitfield.reg32
4882 || i.tm.operand_types[op].bitfield.acc))
4883 {
4884 /* Prohibit these changes in the 64bit mode, since the
4885 lowering is more complicated. */
4886 if (intel_syntax
4887 && i.tm.opcode_modifier.todword
4888 && !i.types[0].bitfield.regxmm)
4889 {
4890 /* Convert to DWORD. We don't want REX byte. */
4891 i.suffix = LONG_MNEM_SUFFIX;
4892 }
4893 else
4894 {
4895 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4896 register_prefix, i.op[op].regs->reg_name,
4897 i.suffix);
4898 return 0;
4899 }
4900 }
4901 return 1;
4902 }
4903
4904 static int
4905 check_word_reg (void)
4906 {
4907 int op;
4908 for (op = i.operands; --op >= 0;)
4909 /* Reject eight bit registers, except where the template requires
4910 them. (eg. movzb) */
4911 if (i.types[op].bitfield.reg8
4912 && (i.tm.operand_types[op].bitfield.reg16
4913 || i.tm.operand_types[op].bitfield.reg32
4914 || i.tm.operand_types[op].bitfield.acc))
4915 {
4916 as_bad (_("`%s%s' not allowed with `%s%c'"),
4917 register_prefix,
4918 i.op[op].regs->reg_name,
4919 i.tm.name,
4920 i.suffix);
4921 return 0;
4922 }
4923 /* Warn if the e prefix on a general reg is present. */
4924 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4925 && i.types[op].bitfield.reg32
4926 && (i.tm.operand_types[op].bitfield.reg16
4927 || i.tm.operand_types[op].bitfield.acc))
4928 {
4929 /* Prohibit these changes in the 64bit mode, since the
4930 lowering is more complicated. */
4931 if (flag_code == CODE_64BIT)
4932 {
4933 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4934 register_prefix, i.op[op].regs->reg_name,
4935 i.suffix);
4936 return 0;
4937 }
4938 else
4939 #if REGISTER_WARNINGS
4940 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4941 register_prefix,
4942 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
4943 register_prefix,
4944 i.op[op].regs->reg_name,
4945 i.suffix);
4946 #endif
4947 }
4948 return 1;
4949 }
4950
4951 static int
4952 update_imm (unsigned int j)
4953 {
4954 i386_operand_type overlap = i.types[j];
4955 if ((overlap.bitfield.imm8
4956 || overlap.bitfield.imm8s
4957 || overlap.bitfield.imm16
4958 || overlap.bitfield.imm32
4959 || overlap.bitfield.imm32s
4960 || overlap.bitfield.imm64)
4961 && !operand_type_equal (&overlap, &imm8)
4962 && !operand_type_equal (&overlap, &imm8s)
4963 && !operand_type_equal (&overlap, &imm16)
4964 && !operand_type_equal (&overlap, &imm32)
4965 && !operand_type_equal (&overlap, &imm32s)
4966 && !operand_type_equal (&overlap, &imm64))
4967 {
4968 if (i.suffix)
4969 {
4970 i386_operand_type temp;
4971
4972 operand_type_set (&temp, 0);
4973 if (i.suffix == BYTE_MNEM_SUFFIX)
4974 {
4975 temp.bitfield.imm8 = overlap.bitfield.imm8;
4976 temp.bitfield.imm8s = overlap.bitfield.imm8s;
4977 }
4978 else if (i.suffix == WORD_MNEM_SUFFIX)
4979 temp.bitfield.imm16 = overlap.bitfield.imm16;
4980 else if (i.suffix == QWORD_MNEM_SUFFIX)
4981 {
4982 temp.bitfield.imm64 = overlap.bitfield.imm64;
4983 temp.bitfield.imm32s = overlap.bitfield.imm32s;
4984 }
4985 else
4986 temp.bitfield.imm32 = overlap.bitfield.imm32;
4987 overlap = temp;
4988 }
4989 else if (operand_type_equal (&overlap, &imm16_32_32s)
4990 || operand_type_equal (&overlap, &imm16_32)
4991 || operand_type_equal (&overlap, &imm16_32s))
4992 {
4993 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
4994 overlap = imm16;
4995 else
4996 overlap = imm32s;
4997 }
4998 if (!operand_type_equal (&overlap, &imm8)
4999 && !operand_type_equal (&overlap, &imm8s)
5000 && !operand_type_equal (&overlap, &imm16)
5001 && !operand_type_equal (&overlap, &imm32)
5002 && !operand_type_equal (&overlap, &imm32s)
5003 && !operand_type_equal (&overlap, &imm64))
5004 {
5005 as_bad (_("no instruction mnemonic suffix given; "
5006 "can't determine immediate size"));
5007 return 0;
5008 }
5009 }
5010 i.types[j] = overlap;
5011
5012 return 1;
5013 }
5014
5015 static int
5016 finalize_imm (void)
5017 {
5018 unsigned int j, n;
5019
5020 /* Update the first 2 immediate operands. */
5021 n = i.operands > 2 ? 2 : i.operands;
5022 if (n)
5023 {
5024 for (j = 0; j < n; j++)
5025 if (update_imm (j) == 0)
5026 return 0;
5027
5028 /* The 3rd operand can't be immediate operand. */
5029 gas_assert (operand_type_check (i.types[2], imm) == 0);
5030 }
5031
5032 return 1;
5033 }
5034
5035 static int
5036 bad_implicit_operand (int xmm)
5037 {
5038 const char *ireg = xmm ? "xmm0" : "ymm0";
5039
5040 if (intel_syntax)
5041 as_bad (_("the last operand of `%s' must be `%s%s'"),
5042 i.tm.name, register_prefix, ireg);
5043 else
5044 as_bad (_("the first operand of `%s' must be `%s%s'"),
5045 i.tm.name, register_prefix, ireg);
5046 return 0;
5047 }
5048
5049 static int
5050 process_operands (void)
5051 {
5052 /* Default segment register this instruction will use for memory
5053 accesses. 0 means unknown. This is only for optimizing out
5054 unnecessary segment overrides. */
5055 const seg_entry *default_seg = 0;
5056
5057 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
5058 {
5059 unsigned int dupl = i.operands;
5060 unsigned int dest = dupl - 1;
5061 unsigned int j;
5062
5063 /* The destination must be an xmm register. */
5064 gas_assert (i.reg_operands
5065 && MAX_OPERANDS > dupl
5066 && operand_type_equal (&i.types[dest], &regxmm));
5067
5068 if (i.tm.opcode_modifier.firstxmm0)
5069 {
5070 /* The first operand is implicit and must be xmm0. */
5071 gas_assert (operand_type_equal (&i.types[0], &regxmm));
5072 if (i.op[0].regs->reg_num != 0)
5073 return bad_implicit_operand (1);
5074
5075 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
5076 {
5077 /* Keep xmm0 for instructions with VEX prefix and 3
5078 sources. */
5079 goto duplicate;
5080 }
5081 else
5082 {
5083 /* We remove the first xmm0 and keep the number of
5084 operands unchanged, which in fact duplicates the
5085 destination. */
5086 for (j = 1; j < i.operands; j++)
5087 {
5088 i.op[j - 1] = i.op[j];
5089 i.types[j - 1] = i.types[j];
5090 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
5091 }
5092 }
5093 }
5094 else if (i.tm.opcode_modifier.implicit1stxmm0)
5095 {
5096 gas_assert ((MAX_OPERANDS - 1) > dupl
5097 && (i.tm.opcode_modifier.vexsources
5098 == VEX3SOURCES));
5099
5100 /* Add the implicit xmm0 for instructions with VEX prefix
5101 and 3 sources. */
5102 for (j = i.operands; j > 0; j--)
5103 {
5104 i.op[j] = i.op[j - 1];
5105 i.types[j] = i.types[j - 1];
5106 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
5107 }
5108 i.op[0].regs
5109 = (const reg_entry *) hash_find (reg_hash, "xmm0");
5110 i.types[0] = regxmm;
5111 i.tm.operand_types[0] = regxmm;
5112
5113 i.operands += 2;
5114 i.reg_operands += 2;
5115 i.tm.operands += 2;
5116
5117 dupl++;
5118 dest++;
5119 i.op[dupl] = i.op[dest];
5120 i.types[dupl] = i.types[dest];
5121 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5122 }
5123 else
5124 {
5125 duplicate:
5126 i.operands++;
5127 i.reg_operands++;
5128 i.tm.operands++;
5129
5130 i.op[dupl] = i.op[dest];
5131 i.types[dupl] = i.types[dest];
5132 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5133 }
5134
5135 if (i.tm.opcode_modifier.immext)
5136 process_immext ();
5137 }
5138 else if (i.tm.opcode_modifier.firstxmm0)
5139 {
5140 unsigned int j;
5141
5142 /* The first operand is implicit and must be xmm0/ymm0. */
5143 gas_assert (i.reg_operands
5144 && (operand_type_equal (&i.types[0], &regxmm)
5145 || operand_type_equal (&i.types[0], &regymm)));
5146 if (i.op[0].regs->reg_num != 0)
5147 return bad_implicit_operand (i.types[0].bitfield.regxmm);
5148
5149 for (j = 1; j < i.operands; j++)
5150 {
5151 i.op[j - 1] = i.op[j];
5152 i.types[j - 1] = i.types[j];
5153
5154 /* We need to adjust fields in i.tm since they are used by
5155 build_modrm_byte. */
5156 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
5157 }
5158
5159 i.operands--;
5160 i.reg_operands--;
5161 i.tm.operands--;
5162 }
5163 else if (i.tm.opcode_modifier.regkludge)
5164 {
5165 /* The imul $imm, %reg instruction is converted into
5166 imul $imm, %reg, %reg, and the clr %reg instruction
5167 is converted into xor %reg, %reg. */
5168
5169 unsigned int first_reg_op;
5170
5171 if (operand_type_check (i.types[0], reg))
5172 first_reg_op = 0;
5173 else
5174 first_reg_op = 1;
5175 /* Pretend we saw the extra register operand. */
5176 gas_assert (i.reg_operands == 1
5177 && i.op[first_reg_op + 1].regs == 0);
5178 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
5179 i.types[first_reg_op + 1] = i.types[first_reg_op];
5180 i.operands++;
5181 i.reg_operands++;
5182 }
5183
5184 if (i.tm.opcode_modifier.shortform)
5185 {
5186 if (i.types[0].bitfield.sreg2
5187 || i.types[0].bitfield.sreg3)
5188 {
5189 if (i.tm.base_opcode == POP_SEG_SHORT
5190 && i.op[0].regs->reg_num == 1)
5191 {
5192 as_bad (_("you can't `pop %scs'"), register_prefix);
5193 return 0;
5194 }
5195 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
5196 if ((i.op[0].regs->reg_flags & RegRex) != 0)
5197 i.rex |= REX_B;
5198 }
5199 else
5200 {
5201 /* The register or float register operand is in operand
5202 0 or 1. */
5203 unsigned int op;
5204
5205 if (i.types[0].bitfield.floatreg
5206 || operand_type_check (i.types[0], reg))
5207 op = 0;
5208 else
5209 op = 1;
5210 /* Register goes in low 3 bits of opcode. */
5211 i.tm.base_opcode |= i.op[op].regs->reg_num;
5212 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5213 i.rex |= REX_B;
5214 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
5215 {
5216 /* Warn about some common errors, but press on regardless.
5217 The first case can be generated by gcc (<= 2.8.1). */
5218 if (i.operands == 2)
5219 {
5220 /* Reversed arguments on faddp, fsubp, etc. */
5221 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
5222 register_prefix, i.op[!intel_syntax].regs->reg_name,
5223 register_prefix, i.op[intel_syntax].regs->reg_name);
5224 }
5225 else
5226 {
5227 /* Extraneous `l' suffix on fp insn. */
5228 as_warn (_("translating to `%s %s%s'"), i.tm.name,
5229 register_prefix, i.op[0].regs->reg_name);
5230 }
5231 }
5232 }
5233 }
5234 else if (i.tm.opcode_modifier.modrm)
5235 {
5236 /* The opcode is completed (modulo i.tm.extension_opcode which
5237 must be put into the modrm byte). Now, we make the modrm and
5238 index base bytes based on all the info we've collected. */
5239
5240 default_seg = build_modrm_byte ();
5241 }
5242 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
5243 {
5244 default_seg = &ds;
5245 }
5246 else if (i.tm.opcode_modifier.isstring)
5247 {
5248 /* For the string instructions that allow a segment override
5249 on one of their operands, the default segment is ds. */
5250 default_seg = &ds;
5251 }
5252
5253 if (i.tm.base_opcode == 0x8d /* lea */
5254 && i.seg[0]
5255 && !quiet_warnings)
5256 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
5257
5258 /* If a segment was explicitly specified, and the specified segment
5259 is not the default, use an opcode prefix to select it. If we
5260 never figured out what the default segment is, then default_seg
5261 will be zero at this point, and the specified segment prefix will
5262 always be used. */
5263 if ((i.seg[0]) && (i.seg[0] != default_seg))
5264 {
5265 if (!add_prefix (i.seg[0]->seg_prefix))
5266 return 0;
5267 }
5268 return 1;
5269 }
5270
5271 static const seg_entry *
5272 build_modrm_byte (void)
5273 {
5274 const seg_entry *default_seg = 0;
5275 unsigned int source, dest;
5276 int vex_3_sources;
5277
5278 /* The first operand of instructions with VEX prefix and 3 sources
5279 must be VEX_Imm4. */
5280 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
5281 if (vex_3_sources)
5282 {
5283 unsigned int nds, reg_slot;
5284 expressionS *exp;
5285
5286 if (i.tm.opcode_modifier.veximmext
5287 && i.tm.opcode_modifier.immext)
5288 {
5289 dest = i.operands - 2;
5290 gas_assert (dest == 3);
5291 }
5292 else
5293 dest = i.operands - 1;
5294 nds = dest - 1;
5295
5296 /* There are 2 kinds of instructions:
5297 1. 5 operands: 4 register operands or 3 register operands
5298 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
5299 VexW0 or VexW1. The destination must be either XMM or YMM
5300 register.
5301 2. 4 operands: 4 register operands or 3 register operands
5302 plus 1 memory operand, VexXDS, and VexImmExt */
5303 gas_assert ((i.reg_operands == 4
5304 || (i.reg_operands == 3 && i.mem_operands == 1))
5305 && i.tm.opcode_modifier.vexvvvv == VEXXDS
5306 && (i.tm.opcode_modifier.veximmext
5307 || (i.imm_operands == 1
5308 && i.types[0].bitfield.vec_imm4
5309 && (i.tm.opcode_modifier.vexw == VEXW0
5310 || i.tm.opcode_modifier.vexw == VEXW1)
5311 && (operand_type_equal (&i.tm.operand_types[dest], &regxmm)
5312 || operand_type_equal (&i.tm.operand_types[dest], &regymm)))));
5313
5314 if (i.imm_operands == 0)
5315 {
5316 /* When there is no immediate operand, generate an 8bit
5317 immediate operand to encode the first operand. */
5318 exp = &im_expressions[i.imm_operands++];
5319 i.op[i.operands].imms = exp;
5320 i.types[i.operands] = imm8;
5321 i.operands++;
5322 /* If VexW1 is set, the first operand is the source and
5323 the second operand is encoded in the immediate operand. */
5324 if (i.tm.opcode_modifier.vexw == VEXW1)
5325 {
5326 source = 0;
5327 reg_slot = 1;
5328 }
5329 else
5330 {
5331 source = 1;
5332 reg_slot = 0;
5333 }
5334
5335 /* FMA swaps REG and NDS. */
5336 if (i.tm.cpu_flags.bitfield.cpufma)
5337 {
5338 unsigned int tmp;
5339 tmp = reg_slot;
5340 reg_slot = nds;
5341 nds = tmp;
5342 }
5343
5344 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5345 &regxmm)
5346 || operand_type_equal (&i.tm.operand_types[reg_slot],
5347 &regymm));
5348 exp->X_op = O_constant;
5349 exp->X_add_number
5350 = ((i.op[reg_slot].regs->reg_num
5351 + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
5352 << 4);
5353 }
5354 else
5355 {
5356 unsigned int imm_slot;
5357
5358 if (i.tm.opcode_modifier.vexw == VEXW0)
5359 {
5360 /* If VexW0 is set, the third operand is the source and
5361 the second operand is encoded in the immediate
5362 operand. */
5363 source = 2;
5364 reg_slot = 1;
5365 }
5366 else
5367 {
5368 /* VexW1 is set, the second operand is the source and
5369 the third operand is encoded in the immediate
5370 operand. */
5371 source = 1;
5372 reg_slot = 2;
5373 }
5374
5375 if (i.tm.opcode_modifier.immext)
5376 {
5377 /* When ImmExt is set, the immdiate byte is the last
5378 operand. */
5379 imm_slot = i.operands - 1;
5380 source--;
5381 reg_slot--;
5382 }
5383 else
5384 {
5385 imm_slot = 0;
5386
5387 /* Turn on Imm8 so that output_imm will generate it. */
5388 i.types[imm_slot].bitfield.imm8 = 1;
5389 }
5390
5391 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5392 &regxmm)
5393 || operand_type_equal (&i.tm.operand_types[reg_slot],
5394 &regymm));
5395 i.op[imm_slot].imms->X_add_number
5396 |= ((i.op[reg_slot].regs->reg_num
5397 + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
5398 << 4);
5399 }
5400
5401 gas_assert (operand_type_equal (&i.tm.operand_types[nds], &regxmm)
5402 || operand_type_equal (&i.tm.operand_types[nds],
5403 &regymm));
5404 i.vex.register_specifier = i.op[nds].regs;
5405 }
5406 else
5407 source = dest = 0;
5408
5409 /* i.reg_operands MUST be the number of real register operands;
5410 implicit registers do not count. If there are 3 register
5411 operands, it must be a instruction with VexNDS. For a
5412 instruction with VexNDD, the destination register is encoded
5413 in VEX prefix. If there are 4 register operands, it must be
5414 a instruction with VEX prefix and 3 sources. */
5415 if (i.mem_operands == 0
5416 && ((i.reg_operands == 2
5417 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
5418 || (i.reg_operands == 3
5419 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
5420 || (i.reg_operands == 4 && vex_3_sources)))
5421 {
5422 switch (i.operands)
5423 {
5424 case 2:
5425 source = 0;
5426 break;
5427 case 3:
5428 /* When there are 3 operands, one of them may be immediate,
5429 which may be the first or the last operand. Otherwise,
5430 the first operand must be shift count register (cl) or it
5431 is an instruction with VexNDS. */
5432 gas_assert (i.imm_operands == 1
5433 || (i.imm_operands == 0
5434 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
5435 || i.types[0].bitfield.shiftcount)));
5436 if (operand_type_check (i.types[0], imm)
5437 || i.types[0].bitfield.shiftcount)
5438 source = 1;
5439 else
5440 source = 0;
5441 break;
5442 case 4:
5443 /* When there are 4 operands, the first two must be 8bit
5444 immediate operands. The source operand will be the 3rd
5445 one.
5446
5447 For instructions with VexNDS, if the first operand
5448 an imm8, the source operand is the 2nd one. If the last
5449 operand is imm8, the source operand is the first one. */
5450 gas_assert ((i.imm_operands == 2
5451 && i.types[0].bitfield.imm8
5452 && i.types[1].bitfield.imm8)
5453 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
5454 && i.imm_operands == 1
5455 && (i.types[0].bitfield.imm8
5456 || i.types[i.operands - 1].bitfield.imm8)));
5457 if (i.imm_operands == 2)
5458 source = 2;
5459 else
5460 {
5461 if (i.types[0].bitfield.imm8)
5462 source = 1;
5463 else
5464 source = 0;
5465 }
5466 break;
5467 case 5:
5468 break;
5469 default:
5470 abort ();
5471 }
5472
5473 if (!vex_3_sources)
5474 {
5475 dest = source + 1;
5476
5477 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5478 {
5479 /* For instructions with VexNDS, the register-only
5480 source operand must be 32/64bit integer, XMM or
5481 YMM register. It is encoded in VEX prefix. We
5482 need to clear RegMem bit before calling
5483 operand_type_equal. */
5484
5485 i386_operand_type op;
5486 unsigned int vvvv;
5487
5488 /* Check register-only source operand when two source
5489 operands are swapped. */
5490 if (!i.tm.operand_types[source].bitfield.baseindex
5491 && i.tm.operand_types[dest].bitfield.baseindex)
5492 {
5493 vvvv = source;
5494 source = dest;
5495 }
5496 else
5497 vvvv = dest;
5498
5499 op = i.tm.operand_types[vvvv];
5500 op.bitfield.regmem = 0;
5501 if ((dest + 1) >= i.operands
5502 || (op.bitfield.reg32 != 1
5503 && !op.bitfield.reg64 != 1
5504 && !operand_type_equal (&op, &regxmm)
5505 && !operand_type_equal (&op, &regymm)))
5506 abort ();
5507 i.vex.register_specifier = i.op[vvvv].regs;
5508 dest++;
5509 }
5510 }
5511
5512 i.rm.mode = 3;
5513 /* One of the register operands will be encoded in the i.tm.reg
5514 field, the other in the combined i.tm.mode and i.tm.regmem
5515 fields. If no form of this instruction supports a memory
5516 destination operand, then we assume the source operand may
5517 sometimes be a memory operand and so we need to store the
5518 destination in the i.rm.reg field. */
5519 if (!i.tm.operand_types[dest].bitfield.regmem
5520 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
5521 {
5522 i.rm.reg = i.op[dest].regs->reg_num;
5523 i.rm.regmem = i.op[source].regs->reg_num;
5524 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5525 i.rex |= REX_R;
5526 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5527 i.rex |= REX_B;
5528 }
5529 else
5530 {
5531 i.rm.reg = i.op[source].regs->reg_num;
5532 i.rm.regmem = i.op[dest].regs->reg_num;
5533 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5534 i.rex |= REX_B;
5535 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5536 i.rex |= REX_R;
5537 }
5538 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
5539 {
5540 if (!i.types[0].bitfield.control
5541 && !i.types[1].bitfield.control)
5542 abort ();
5543 i.rex &= ~(REX_R | REX_B);
5544 add_prefix (LOCK_PREFIX_OPCODE);
5545 }
5546 }
5547 else
5548 { /* If it's not 2 reg operands... */
5549 unsigned int mem;
5550
5551 if (i.mem_operands)
5552 {
5553 unsigned int fake_zero_displacement = 0;
5554 unsigned int op;
5555
5556 for (op = 0; op < i.operands; op++)
5557 if (operand_type_check (i.types[op], anymem))
5558 break;
5559 gas_assert (op < i.operands);
5560
5561 if (i.tm.opcode_modifier.vecsib)
5562 {
5563 if (i.index_reg->reg_num == RegEiz
5564 || i.index_reg->reg_num == RegRiz)
5565 abort ();
5566
5567 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5568 if (!i.base_reg)
5569 {
5570 i.sib.base = NO_BASE_REGISTER;
5571 i.sib.scale = i.log2_scale_factor;
5572 i.types[op].bitfield.disp8 = 0;
5573 i.types[op].bitfield.disp16 = 0;
5574 i.types[op].bitfield.disp64 = 0;
5575 if (flag_code != CODE_64BIT)
5576 {
5577 /* Must be 32 bit */
5578 i.types[op].bitfield.disp32 = 1;
5579 i.types[op].bitfield.disp32s = 0;
5580 }
5581 else
5582 {
5583 i.types[op].bitfield.disp32 = 0;
5584 i.types[op].bitfield.disp32s = 1;
5585 }
5586 }
5587 i.sib.index = i.index_reg->reg_num;
5588 if ((i.index_reg->reg_flags & RegRex) != 0)
5589 i.rex |= REX_X;
5590 }
5591
5592 default_seg = &ds;
5593
5594 if (i.base_reg == 0)
5595 {
5596 i.rm.mode = 0;
5597 if (!i.disp_operands)
5598 {
5599 fake_zero_displacement = 1;
5600 /* Instructions with VSIB byte need 32bit displacement
5601 if there is no base register. */
5602 if (i.tm.opcode_modifier.vecsib)
5603 i.types[op].bitfield.disp32 = 1;
5604 }
5605 if (i.index_reg == 0)
5606 {
5607 gas_assert (!i.tm.opcode_modifier.vecsib);
5608 /* Operand is just <disp> */
5609 if (flag_code == CODE_64BIT)
5610 {
5611 /* 64bit mode overwrites the 32bit absolute
5612 addressing by RIP relative addressing and
5613 absolute addressing is encoded by one of the
5614 redundant SIB forms. */
5615 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5616 i.sib.base = NO_BASE_REGISTER;
5617 i.sib.index = NO_INDEX_REGISTER;
5618 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
5619 ? disp32s : disp32);
5620 }
5621 else if ((flag_code == CODE_16BIT)
5622 ^ (i.prefix[ADDR_PREFIX] != 0))
5623 {
5624 i.rm.regmem = NO_BASE_REGISTER_16;
5625 i.types[op] = disp16;
5626 }
5627 else
5628 {
5629 i.rm.regmem = NO_BASE_REGISTER;
5630 i.types[op] = disp32;
5631 }
5632 }
5633 else if (!i.tm.opcode_modifier.vecsib)
5634 {
5635 /* !i.base_reg && i.index_reg */
5636 if (i.index_reg->reg_num == RegEiz
5637 || i.index_reg->reg_num == RegRiz)
5638 i.sib.index = NO_INDEX_REGISTER;
5639 else
5640 i.sib.index = i.index_reg->reg_num;
5641 i.sib.base = NO_BASE_REGISTER;
5642 i.sib.scale = i.log2_scale_factor;
5643 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5644 i.types[op].bitfield.disp8 = 0;
5645 i.types[op].bitfield.disp16 = 0;
5646 i.types[op].bitfield.disp64 = 0;
5647 if (flag_code != CODE_64BIT)
5648 {
5649 /* Must be 32 bit */
5650 i.types[op].bitfield.disp32 = 1;
5651 i.types[op].bitfield.disp32s = 0;
5652 }
5653 else
5654 {
5655 i.types[op].bitfield.disp32 = 0;
5656 i.types[op].bitfield.disp32s = 1;
5657 }
5658 if ((i.index_reg->reg_flags & RegRex) != 0)
5659 i.rex |= REX_X;
5660 }
5661 }
5662 /* RIP addressing for 64bit mode. */
5663 else if (i.base_reg->reg_num == RegRip ||
5664 i.base_reg->reg_num == RegEip)
5665 {
5666 gas_assert (!i.tm.opcode_modifier.vecsib);
5667 i.rm.regmem = NO_BASE_REGISTER;
5668 i.types[op].bitfield.disp8 = 0;
5669 i.types[op].bitfield.disp16 = 0;
5670 i.types[op].bitfield.disp32 = 0;
5671 i.types[op].bitfield.disp32s = 1;
5672 i.types[op].bitfield.disp64 = 0;
5673 i.flags[op] |= Operand_PCrel;
5674 if (! i.disp_operands)
5675 fake_zero_displacement = 1;
5676 }
5677 else if (i.base_reg->reg_type.bitfield.reg16)
5678 {
5679 gas_assert (!i.tm.opcode_modifier.vecsib);
5680 switch (i.base_reg->reg_num)
5681 {
5682 case 3: /* (%bx) */
5683 if (i.index_reg == 0)
5684 i.rm.regmem = 7;
5685 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
5686 i.rm.regmem = i.index_reg->reg_num - 6;
5687 break;
5688 case 5: /* (%bp) */
5689 default_seg = &ss;
5690 if (i.index_reg == 0)
5691 {
5692 i.rm.regmem = 6;
5693 if (operand_type_check (i.types[op], disp) == 0)
5694 {
5695 /* fake (%bp) into 0(%bp) */
5696 i.types[op].bitfield.disp8 = 1;
5697 fake_zero_displacement = 1;
5698 }
5699 }
5700 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
5701 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
5702 break;
5703 default: /* (%si) -> 4 or (%di) -> 5 */
5704 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
5705 }
5706 i.rm.mode = mode_from_disp_size (i.types[op]);
5707 }
5708 else /* i.base_reg and 32/64 bit mode */
5709 {
5710 if (flag_code == CODE_64BIT
5711 && operand_type_check (i.types[op], disp))
5712 {
5713 i386_operand_type temp;
5714 operand_type_set (&temp, 0);
5715 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
5716 i.types[op] = temp;
5717 if (i.prefix[ADDR_PREFIX] == 0)
5718 i.types[op].bitfield.disp32s = 1;
5719 else
5720 i.types[op].bitfield.disp32 = 1;
5721 }
5722
5723 if (!i.tm.opcode_modifier.vecsib)
5724 i.rm.regmem = i.base_reg->reg_num;
5725 if ((i.base_reg->reg_flags & RegRex) != 0)
5726 i.rex |= REX_B;
5727 i.sib.base = i.base_reg->reg_num;
5728 /* x86-64 ignores REX prefix bit here to avoid decoder
5729 complications. */
5730 if ((i.base_reg->reg_num & 7) == EBP_REG_NUM)
5731 {
5732 default_seg = &ss;
5733 if (i.disp_operands == 0)
5734 {
5735 fake_zero_displacement = 1;
5736 i.types[op].bitfield.disp8 = 1;
5737 }
5738 }
5739 else if (i.base_reg->reg_num == ESP_REG_NUM)
5740 {
5741 default_seg = &ss;
5742 }
5743 i.sib.scale = i.log2_scale_factor;
5744 if (i.index_reg == 0)
5745 {
5746 gas_assert (!i.tm.opcode_modifier.vecsib);
5747 /* <disp>(%esp) becomes two byte modrm with no index
5748 register. We've already stored the code for esp
5749 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
5750 Any base register besides %esp will not use the
5751 extra modrm byte. */
5752 i.sib.index = NO_INDEX_REGISTER;
5753 }
5754 else if (!i.tm.opcode_modifier.vecsib)
5755 {
5756 if (i.index_reg->reg_num == RegEiz
5757 || i.index_reg->reg_num == RegRiz)
5758 i.sib.index = NO_INDEX_REGISTER;
5759 else
5760 i.sib.index = i.index_reg->reg_num;
5761 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5762 if ((i.index_reg->reg_flags & RegRex) != 0)
5763 i.rex |= REX_X;
5764 }
5765
5766 if (i.disp_operands
5767 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
5768 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
5769 i.rm.mode = 0;
5770 else
5771 {
5772 if (!fake_zero_displacement
5773 && !i.disp_operands
5774 && i.disp_encoding)
5775 {
5776 fake_zero_displacement = 1;
5777 if (i.disp_encoding == disp_encoding_8bit)
5778 i.types[op].bitfield.disp8 = 1;
5779 else
5780 i.types[op].bitfield.disp32 = 1;
5781 }
5782 i.rm.mode = mode_from_disp_size (i.types[op]);
5783 }
5784 }
5785
5786 if (fake_zero_displacement)
5787 {
5788 /* Fakes a zero displacement assuming that i.types[op]
5789 holds the correct displacement size. */
5790 expressionS *exp;
5791
5792 gas_assert (i.op[op].disps == 0);
5793 exp = &disp_expressions[i.disp_operands++];
5794 i.op[op].disps = exp;
5795 exp->X_op = O_constant;
5796 exp->X_add_number = 0;
5797 exp->X_add_symbol = (symbolS *) 0;
5798 exp->X_op_symbol = (symbolS *) 0;
5799 }
5800
5801 mem = op;
5802 }
5803 else
5804 mem = ~0;
5805
5806 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
5807 {
5808 if (operand_type_check (i.types[0], imm))
5809 i.vex.register_specifier = NULL;
5810 else
5811 {
5812 /* VEX.vvvv encodes one of the sources when the first
5813 operand is not an immediate. */
5814 if (i.tm.opcode_modifier.vexw == VEXW0)
5815 i.vex.register_specifier = i.op[0].regs;
5816 else
5817 i.vex.register_specifier = i.op[1].regs;
5818 }
5819
5820 /* Destination is a XMM register encoded in the ModRM.reg
5821 and VEX.R bit. */
5822 i.rm.reg = i.op[2].regs->reg_num;
5823 if ((i.op[2].regs->reg_flags & RegRex) != 0)
5824 i.rex |= REX_R;
5825
5826 /* ModRM.rm and VEX.B encodes the other source. */
5827 if (!i.mem_operands)
5828 {
5829 i.rm.mode = 3;
5830
5831 if (i.tm.opcode_modifier.vexw == VEXW0)
5832 i.rm.regmem = i.op[1].regs->reg_num;
5833 else
5834 i.rm.regmem = i.op[0].regs->reg_num;
5835
5836 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5837 i.rex |= REX_B;
5838 }
5839 }
5840 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
5841 {
5842 i.vex.register_specifier = i.op[2].regs;
5843 if (!i.mem_operands)
5844 {
5845 i.rm.mode = 3;
5846 i.rm.regmem = i.op[1].regs->reg_num;
5847 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5848 i.rex |= REX_B;
5849 }
5850 }
5851 /* Fill in i.rm.reg or i.rm.regmem field with register operand
5852 (if any) based on i.tm.extension_opcode. Again, we must be
5853 careful to make sure that segment/control/debug/test/MMX
5854 registers are coded into the i.rm.reg field. */
5855 else if (i.reg_operands)
5856 {
5857 unsigned int op;
5858 unsigned int vex_reg = ~0;
5859
5860 for (op = 0; op < i.operands; op++)
5861 if (i.types[op].bitfield.reg8
5862 || i.types[op].bitfield.reg16
5863 || i.types[op].bitfield.reg32
5864 || i.types[op].bitfield.reg64
5865 || i.types[op].bitfield.regmmx
5866 || i.types[op].bitfield.regxmm
5867 || i.types[op].bitfield.regymm
5868 || i.types[op].bitfield.sreg2
5869 || i.types[op].bitfield.sreg3
5870 || i.types[op].bitfield.control
5871 || i.types[op].bitfield.debug
5872 || i.types[op].bitfield.test)
5873 break;
5874
5875 if (vex_3_sources)
5876 op = dest;
5877 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5878 {
5879 /* For instructions with VexNDS, the register-only
5880 source operand is encoded in VEX prefix. */
5881 gas_assert (mem != (unsigned int) ~0);
5882
5883 if (op > mem)
5884 {
5885 vex_reg = op++;
5886 gas_assert (op < i.operands);
5887 }
5888 else
5889 {
5890 /* Check register-only source operand when two source
5891 operands are swapped. */
5892 if (!i.tm.operand_types[op].bitfield.baseindex
5893 && i.tm.operand_types[op + 1].bitfield.baseindex)
5894 {
5895 vex_reg = op;
5896 op += 2;
5897 gas_assert (mem == (vex_reg + 1)
5898 && op < i.operands);
5899 }
5900 else
5901 {
5902 vex_reg = op + 1;
5903 gas_assert (vex_reg < i.operands);
5904 }
5905 }
5906 }
5907 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
5908 {
5909 /* For instructions with VexNDD, the register destination
5910 is encoded in VEX prefix. */
5911 if (i.mem_operands == 0)
5912 {
5913 /* There is no memory operand. */
5914 gas_assert ((op + 2) == i.operands);
5915 vex_reg = op + 1;
5916 }
5917 else
5918 {
5919 /* There are only 2 operands. */
5920 gas_assert (op < 2 && i.operands == 2);
5921 vex_reg = 1;
5922 }
5923 }
5924 else
5925 gas_assert (op < i.operands);
5926
5927 if (vex_reg != (unsigned int) ~0)
5928 {
5929 i386_operand_type *type = &i.tm.operand_types[vex_reg];
5930
5931 if (type->bitfield.reg32 != 1
5932 && type->bitfield.reg64 != 1
5933 && !operand_type_equal (type, &regxmm)
5934 && !operand_type_equal (type, &regymm))
5935 abort ();
5936
5937 i.vex.register_specifier = i.op[vex_reg].regs;
5938 }
5939
5940 /* Don't set OP operand twice. */
5941 if (vex_reg != op)
5942 {
5943 /* If there is an extension opcode to put here, the
5944 register number must be put into the regmem field. */
5945 if (i.tm.extension_opcode != None)
5946 {
5947 i.rm.regmem = i.op[op].regs->reg_num;
5948 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5949 i.rex |= REX_B;
5950 }
5951 else
5952 {
5953 i.rm.reg = i.op[op].regs->reg_num;
5954 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5955 i.rex |= REX_R;
5956 }
5957 }
5958
5959 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
5960 must set it to 3 to indicate this is a register operand
5961 in the regmem field. */
5962 if (!i.mem_operands)
5963 i.rm.mode = 3;
5964 }
5965
5966 /* Fill in i.rm.reg field with extension opcode (if any). */
5967 if (i.tm.extension_opcode != None)
5968 i.rm.reg = i.tm.extension_opcode;
5969 }
5970 return default_seg;
5971 }
5972
5973 static void
5974 output_branch (void)
5975 {
5976 char *p;
5977 int size;
5978 int code16;
5979 int prefix;
5980 relax_substateT subtype;
5981 symbolS *sym;
5982 offsetT off;
5983
5984 code16 = flag_code == CODE_16BIT ? CODE16 : 0;
5985 size = i.disp_encoding == disp_encoding_32bit ? BIG : SMALL;
5986
5987 prefix = 0;
5988 if (i.prefix[DATA_PREFIX] != 0)
5989 {
5990 prefix = 1;
5991 i.prefixes -= 1;
5992 code16 ^= CODE16;
5993 }
5994 /* Pentium4 branch hints. */
5995 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
5996 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
5997 {
5998 prefix++;
5999 i.prefixes--;
6000 }
6001 if (i.prefix[REX_PREFIX] != 0)
6002 {
6003 prefix++;
6004 i.prefixes--;
6005 }
6006
6007 if (i.prefixes != 0 && !intel_syntax)
6008 as_warn (_("skipping prefixes on this instruction"));
6009
6010 /* It's always a symbol; End frag & setup for relax.
6011 Make sure there is enough room in this frag for the largest
6012 instruction we may generate in md_convert_frag. This is 2
6013 bytes for the opcode and room for the prefix and largest
6014 displacement. */
6015 frag_grow (prefix + 2 + 4);
6016 /* Prefix and 1 opcode byte go in fr_fix. */
6017 p = frag_more (prefix + 1);
6018 if (i.prefix[DATA_PREFIX] != 0)
6019 *p++ = DATA_PREFIX_OPCODE;
6020 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
6021 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
6022 *p++ = i.prefix[SEG_PREFIX];
6023 if (i.prefix[REX_PREFIX] != 0)
6024 *p++ = i.prefix[REX_PREFIX];
6025 *p = i.tm.base_opcode;
6026
6027 if ((unsigned char) *p == JUMP_PC_RELATIVE)
6028 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
6029 else if (cpu_arch_flags.bitfield.cpui386)
6030 subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
6031 else
6032 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
6033 subtype |= code16;
6034
6035 sym = i.op[0].disps->X_add_symbol;
6036 off = i.op[0].disps->X_add_number;
6037
6038 if (i.op[0].disps->X_op != O_constant
6039 && i.op[0].disps->X_op != O_symbol)
6040 {
6041 /* Handle complex expressions. */
6042 sym = make_expr_symbol (i.op[0].disps);
6043 off = 0;
6044 }
6045
6046 /* 1 possible extra opcode + 4 byte displacement go in var part.
6047 Pass reloc in fr_var. */
6048 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
6049 }
6050
6051 static void
6052 output_jump (void)
6053 {
6054 char *p;
6055 int size;
6056 fixS *fixP;
6057
6058 if (i.tm.opcode_modifier.jumpbyte)
6059 {
6060 /* This is a loop or jecxz type instruction. */
6061 size = 1;
6062 if (i.prefix[ADDR_PREFIX] != 0)
6063 {
6064 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
6065 i.prefixes -= 1;
6066 }
6067 /* Pentium4 branch hints. */
6068 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6069 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6070 {
6071 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
6072 i.prefixes--;
6073 }
6074 }
6075 else
6076 {
6077 int code16;
6078
6079 code16 = 0;
6080 if (flag_code == CODE_16BIT)
6081 code16 = CODE16;
6082
6083 if (i.prefix[DATA_PREFIX] != 0)
6084 {
6085 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
6086 i.prefixes -= 1;
6087 code16 ^= CODE16;
6088 }
6089
6090 size = 4;
6091 if (code16)
6092 size = 2;
6093 }
6094
6095 if (i.prefix[REX_PREFIX] != 0)
6096 {
6097 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
6098 i.prefixes -= 1;
6099 }
6100
6101 if (i.prefixes != 0 && !intel_syntax)
6102 as_warn (_("skipping prefixes on this instruction"));
6103
6104 p = frag_more (i.tm.opcode_length + size);
6105 switch (i.tm.opcode_length)
6106 {
6107 case 2:
6108 *p++ = i.tm.base_opcode >> 8;
6109 case 1:
6110 *p++ = i.tm.base_opcode;
6111 break;
6112 default:
6113 abort ();
6114 }
6115
6116 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6117 i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
6118
6119 /* All jumps handled here are signed, but don't use a signed limit
6120 check for 32 and 16 bit jumps as we want to allow wrap around at
6121 4G and 64k respectively. */
6122 if (size == 1)
6123 fixP->fx_signed = 1;
6124 }
6125
6126 static void
6127 output_interseg_jump (void)
6128 {
6129 char *p;
6130 int size;
6131 int prefix;
6132 int code16;
6133
6134 code16 = 0;
6135 if (flag_code == CODE_16BIT)
6136 code16 = CODE16;
6137
6138 prefix = 0;
6139 if (i.prefix[DATA_PREFIX] != 0)
6140 {
6141 prefix = 1;
6142 i.prefixes -= 1;
6143 code16 ^= CODE16;
6144 }
6145 if (i.prefix[REX_PREFIX] != 0)
6146 {
6147 prefix++;
6148 i.prefixes -= 1;
6149 }
6150
6151 size = 4;
6152 if (code16)
6153 size = 2;
6154
6155 if (i.prefixes != 0 && !intel_syntax)
6156 as_warn (_("skipping prefixes on this instruction"));
6157
6158 /* 1 opcode; 2 segment; offset */
6159 p = frag_more (prefix + 1 + 2 + size);
6160
6161 if (i.prefix[DATA_PREFIX] != 0)
6162 *p++ = DATA_PREFIX_OPCODE;
6163
6164 if (i.prefix[REX_PREFIX] != 0)
6165 *p++ = i.prefix[REX_PREFIX];
6166
6167 *p++ = i.tm.base_opcode;
6168 if (i.op[1].imms->X_op == O_constant)
6169 {
6170 offsetT n = i.op[1].imms->X_add_number;
6171
6172 if (size == 2
6173 && !fits_in_unsigned_word (n)
6174 && !fits_in_signed_word (n))
6175 {
6176 as_bad (_("16-bit jump out of range"));
6177 return;
6178 }
6179 md_number_to_chars (p, n, size);
6180 }
6181 else
6182 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6183 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
6184 if (i.op[0].imms->X_op != O_constant)
6185 as_bad (_("can't handle non absolute segment in `%s'"),
6186 i.tm.name);
6187 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
6188 }
6189
6190 static void
6191 output_insn (void)
6192 {
6193 fragS *insn_start_frag;
6194 offsetT insn_start_off;
6195
6196 /* Tie dwarf2 debug info to the address at the start of the insn.
6197 We can't do this after the insn has been output as the current
6198 frag may have been closed off. eg. by frag_var. */
6199 dwarf2_emit_insn (0);
6200
6201 insn_start_frag = frag_now;
6202 insn_start_off = frag_now_fix ();
6203
6204 /* Output jumps. */
6205 if (i.tm.opcode_modifier.jump)
6206 output_branch ();
6207 else if (i.tm.opcode_modifier.jumpbyte
6208 || i.tm.opcode_modifier.jumpdword)
6209 output_jump ();
6210 else if (i.tm.opcode_modifier.jumpintersegment)
6211 output_interseg_jump ();
6212 else
6213 {
6214 /* Output normal instructions here. */
6215 char *p;
6216 unsigned char *q;
6217 unsigned int j;
6218 unsigned int prefix;
6219
6220 /* Since the VEX prefix contains the implicit prefix, we don't
6221 need the explicit prefix. */
6222 if (!i.tm.opcode_modifier.vex)
6223 {
6224 switch (i.tm.opcode_length)
6225 {
6226 case 3:
6227 if (i.tm.base_opcode & 0xff000000)
6228 {
6229 prefix = (i.tm.base_opcode >> 24) & 0xff;
6230 goto check_prefix;
6231 }
6232 break;
6233 case 2:
6234 if ((i.tm.base_opcode & 0xff0000) != 0)
6235 {
6236 prefix = (i.tm.base_opcode >> 16) & 0xff;
6237 if (i.tm.cpu_flags.bitfield.cpupadlock)
6238 {
6239 check_prefix:
6240 if (prefix != REPE_PREFIX_OPCODE
6241 || (i.prefix[REP_PREFIX]
6242 != REPE_PREFIX_OPCODE))
6243 add_prefix (prefix);
6244 }
6245 else
6246 add_prefix (prefix);
6247 }
6248 break;
6249 case 1:
6250 break;
6251 default:
6252 abort ();
6253 }
6254
6255 /* The prefix bytes. */
6256 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
6257 if (*q)
6258 FRAG_APPEND_1_CHAR (*q);
6259 }
6260 else
6261 {
6262 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
6263 if (*q)
6264 switch (j)
6265 {
6266 case REX_PREFIX:
6267 /* REX byte is encoded in VEX prefix. */
6268 break;
6269 case SEG_PREFIX:
6270 case ADDR_PREFIX:
6271 FRAG_APPEND_1_CHAR (*q);
6272 break;
6273 default:
6274 /* There should be no other prefixes for instructions
6275 with VEX prefix. */
6276 abort ();
6277 }
6278
6279 /* Now the VEX prefix. */
6280 p = frag_more (i.vex.length);
6281 for (j = 0; j < i.vex.length; j++)
6282 p[j] = i.vex.bytes[j];
6283 }
6284
6285 /* Now the opcode; be careful about word order here! */
6286 if (i.tm.opcode_length == 1)
6287 {
6288 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
6289 }
6290 else
6291 {
6292 switch (i.tm.opcode_length)
6293 {
6294 case 3:
6295 p = frag_more (3);
6296 *p++ = (i.tm.base_opcode >> 16) & 0xff;
6297 break;
6298 case 2:
6299 p = frag_more (2);
6300 break;
6301 default:
6302 abort ();
6303 break;
6304 }
6305
6306 /* Put out high byte first: can't use md_number_to_chars! */
6307 *p++ = (i.tm.base_opcode >> 8) & 0xff;
6308 *p = i.tm.base_opcode & 0xff;
6309 }
6310
6311 /* Now the modrm byte and sib byte (if present). */
6312 if (i.tm.opcode_modifier.modrm)
6313 {
6314 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
6315 | i.rm.reg << 3
6316 | i.rm.mode << 6));
6317 /* If i.rm.regmem == ESP (4)
6318 && i.rm.mode != (Register mode)
6319 && not 16 bit
6320 ==> need second modrm byte. */
6321 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
6322 && i.rm.mode != 3
6323 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
6324 FRAG_APPEND_1_CHAR ((i.sib.base << 0
6325 | i.sib.index << 3
6326 | i.sib.scale << 6));
6327 }
6328
6329 if (i.disp_operands)
6330 output_disp (insn_start_frag, insn_start_off);
6331
6332 if (i.imm_operands)
6333 output_imm (insn_start_frag, insn_start_off);
6334 }
6335
6336 #ifdef DEBUG386
6337 if (flag_debug)
6338 {
6339 pi ("" /*line*/, &i);
6340 }
6341 #endif /* DEBUG386 */
6342 }
6343
6344 /* Return the size of the displacement operand N. */
6345
6346 static int
6347 disp_size (unsigned int n)
6348 {
6349 int size = 4;
6350 if (i.types[n].bitfield.disp64)
6351 size = 8;
6352 else if (i.types[n].bitfield.disp8)
6353 size = 1;
6354 else if (i.types[n].bitfield.disp16)
6355 size = 2;
6356 return size;
6357 }
6358
6359 /* Return the size of the immediate operand N. */
6360
6361 static int
6362 imm_size (unsigned int n)
6363 {
6364 int size = 4;
6365 if (i.types[n].bitfield.imm64)
6366 size = 8;
6367 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
6368 size = 1;
6369 else if (i.types[n].bitfield.imm16)
6370 size = 2;
6371 return size;
6372 }
6373
6374 static void
6375 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
6376 {
6377 char *p;
6378 unsigned int n;
6379
6380 for (n = 0; n < i.operands; n++)
6381 {
6382 if (operand_type_check (i.types[n], disp))
6383 {
6384 if (i.op[n].disps->X_op == O_constant)
6385 {
6386 int size = disp_size (n);
6387 offsetT val;
6388
6389 val = offset_in_range (i.op[n].disps->X_add_number,
6390 size);
6391 p = frag_more (size);
6392 md_number_to_chars (p, val, size);
6393 }
6394 else
6395 {
6396 enum bfd_reloc_code_real reloc_type;
6397 int size = disp_size (n);
6398 int sign = i.types[n].bitfield.disp32s;
6399 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
6400
6401 /* We can't have 8 bit displacement here. */
6402 gas_assert (!i.types[n].bitfield.disp8);
6403
6404 /* The PC relative address is computed relative
6405 to the instruction boundary, so in case immediate
6406 fields follows, we need to adjust the value. */
6407 if (pcrel && i.imm_operands)
6408 {
6409 unsigned int n1;
6410 int sz = 0;
6411
6412 for (n1 = 0; n1 < i.operands; n1++)
6413 if (operand_type_check (i.types[n1], imm))
6414 {
6415 /* Only one immediate is allowed for PC
6416 relative address. */
6417 gas_assert (sz == 0);
6418 sz = imm_size (n1);
6419 i.op[n].disps->X_add_number -= sz;
6420 }
6421 /* We should find the immediate. */
6422 gas_assert (sz != 0);
6423 }
6424
6425 p = frag_more (size);
6426 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
6427 if (GOT_symbol
6428 && GOT_symbol == i.op[n].disps->X_add_symbol
6429 && (((reloc_type == BFD_RELOC_32
6430 || reloc_type == BFD_RELOC_X86_64_32S
6431 || (reloc_type == BFD_RELOC_64
6432 && object_64bit))
6433 && (i.op[n].disps->X_op == O_symbol
6434 || (i.op[n].disps->X_op == O_add
6435 && ((symbol_get_value_expression
6436 (i.op[n].disps->X_op_symbol)->X_op)
6437 == O_subtract))))
6438 || reloc_type == BFD_RELOC_32_PCREL))
6439 {
6440 offsetT add;
6441
6442 if (insn_start_frag == frag_now)
6443 add = (p - frag_now->fr_literal) - insn_start_off;
6444 else
6445 {
6446 fragS *fr;
6447
6448 add = insn_start_frag->fr_fix - insn_start_off;
6449 for (fr = insn_start_frag->fr_next;
6450 fr && fr != frag_now; fr = fr->fr_next)
6451 add += fr->fr_fix;
6452 add += p - frag_now->fr_literal;
6453 }
6454
6455 if (!object_64bit)
6456 {
6457 reloc_type = BFD_RELOC_386_GOTPC;
6458 i.op[n].imms->X_add_number += add;
6459 }
6460 else if (reloc_type == BFD_RELOC_64)
6461 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6462 else
6463 /* Don't do the adjustment for x86-64, as there
6464 the pcrel addressing is relative to the _next_
6465 insn, and that is taken care of in other code. */
6466 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6467 }
6468 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6469 i.op[n].disps, pcrel, reloc_type);
6470 }
6471 }
6472 }
6473 }
6474
6475 static void
6476 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
6477 {
6478 char *p;
6479 unsigned int n;
6480
6481 for (n = 0; n < i.operands; n++)
6482 {
6483 if (operand_type_check (i.types[n], imm))
6484 {
6485 if (i.op[n].imms->X_op == O_constant)
6486 {
6487 int size = imm_size (n);
6488 offsetT val;
6489
6490 val = offset_in_range (i.op[n].imms->X_add_number,
6491 size);
6492 p = frag_more (size);
6493 md_number_to_chars (p, val, size);
6494 }
6495 else
6496 {
6497 /* Not absolute_section.
6498 Need a 32-bit fixup (don't support 8bit
6499 non-absolute imms). Try to support other
6500 sizes ... */
6501 enum bfd_reloc_code_real reloc_type;
6502 int size = imm_size (n);
6503 int sign;
6504
6505 if (i.types[n].bitfield.imm32s
6506 && (i.suffix == QWORD_MNEM_SUFFIX
6507 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
6508 sign = 1;
6509 else
6510 sign = 0;
6511
6512 p = frag_more (size);
6513 reloc_type = reloc (size, 0, sign, i.reloc[n]);
6514
6515 /* This is tough to explain. We end up with this one if we
6516 * have operands that look like
6517 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
6518 * obtain the absolute address of the GOT, and it is strongly
6519 * preferable from a performance point of view to avoid using
6520 * a runtime relocation for this. The actual sequence of
6521 * instructions often look something like:
6522 *
6523 * call .L66
6524 * .L66:
6525 * popl %ebx
6526 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
6527 *
6528 * The call and pop essentially return the absolute address
6529 * of the label .L66 and store it in %ebx. The linker itself
6530 * will ultimately change the first operand of the addl so
6531 * that %ebx points to the GOT, but to keep things simple, the
6532 * .o file must have this operand set so that it generates not
6533 * the absolute address of .L66, but the absolute address of
6534 * itself. This allows the linker itself simply treat a GOTPC
6535 * relocation as asking for a pcrel offset to the GOT to be
6536 * added in, and the addend of the relocation is stored in the
6537 * operand field for the instruction itself.
6538 *
6539 * Our job here is to fix the operand so that it would add
6540 * the correct offset so that %ebx would point to itself. The
6541 * thing that is tricky is that .-.L66 will point to the
6542 * beginning of the instruction, so we need to further modify
6543 * the operand so that it will point to itself. There are
6544 * other cases where you have something like:
6545 *
6546 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
6547 *
6548 * and here no correction would be required. Internally in
6549 * the assembler we treat operands of this form as not being
6550 * pcrel since the '.' is explicitly mentioned, and I wonder
6551 * whether it would simplify matters to do it this way. Who
6552 * knows. In earlier versions of the PIC patches, the
6553 * pcrel_adjust field was used to store the correction, but
6554 * since the expression is not pcrel, I felt it would be
6555 * confusing to do it this way. */
6556
6557 if ((reloc_type == BFD_RELOC_32
6558 || reloc_type == BFD_RELOC_X86_64_32S
6559 || reloc_type == BFD_RELOC_64)
6560 && GOT_symbol
6561 && GOT_symbol == i.op[n].imms->X_add_symbol
6562 && (i.op[n].imms->X_op == O_symbol
6563 || (i.op[n].imms->X_op == O_add
6564 && ((symbol_get_value_expression
6565 (i.op[n].imms->X_op_symbol)->X_op)
6566 == O_subtract))))
6567 {
6568 offsetT add;
6569
6570 if (insn_start_frag == frag_now)
6571 add = (p - frag_now->fr_literal) - insn_start_off;
6572 else
6573 {
6574 fragS *fr;
6575
6576 add = insn_start_frag->fr_fix - insn_start_off;
6577 for (fr = insn_start_frag->fr_next;
6578 fr && fr != frag_now; fr = fr->fr_next)
6579 add += fr->fr_fix;
6580 add += p - frag_now->fr_literal;
6581 }
6582
6583 if (!object_64bit)
6584 reloc_type = BFD_RELOC_386_GOTPC;
6585 else if (size == 4)
6586 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6587 else if (size == 8)
6588 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6589 i.op[n].imms->X_add_number += add;
6590 }
6591 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6592 i.op[n].imms, 0, reloc_type);
6593 }
6594 }
6595 }
6596 }
6597 \f
6598 /* x86_cons_fix_new is called via the expression parsing code when a
6599 reloc is needed. We use this hook to get the correct .got reloc. */
6600 static enum bfd_reloc_code_real got_reloc = NO_RELOC;
6601 static int cons_sign = -1;
6602
6603 void
6604 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
6605 expressionS *exp)
6606 {
6607 enum bfd_reloc_code_real r = reloc (len, 0, cons_sign, got_reloc);
6608
6609 got_reloc = NO_RELOC;
6610
6611 #ifdef TE_PE
6612 if (exp->X_op == O_secrel)
6613 {
6614 exp->X_op = O_symbol;
6615 r = BFD_RELOC_32_SECREL;
6616 }
6617 #endif
6618
6619 fix_new_exp (frag, off, len, exp, 0, r);
6620 }
6621
6622 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
6623 || defined (LEX_AT)
6624 # define lex_got(reloc, adjust, types) NULL
6625 #else
6626 /* Parse operands of the form
6627 <symbol>@GOTOFF+<nnn>
6628 and similar .plt or .got references.
6629
6630 If we find one, set up the correct relocation in RELOC and copy the
6631 input string, minus the `@GOTOFF' into a malloc'd buffer for
6632 parsing by the calling routine. Return this buffer, and if ADJUST
6633 is non-null set it to the length of the string we removed from the
6634 input line. Otherwise return NULL. */
6635 static char *
6636 lex_got (enum bfd_reloc_code_real *rel,
6637 int *adjust,
6638 i386_operand_type *types)
6639 {
6640 /* Some of the relocations depend on the size of what field is to
6641 be relocated. But in our callers i386_immediate and i386_displacement
6642 we don't yet know the operand size (this will be set by insn
6643 matching). Hence we record the word32 relocation here,
6644 and adjust the reloc according to the real size in reloc(). */
6645 static const struct {
6646 const char *str;
6647 int len;
6648 const enum bfd_reloc_code_real rel[2];
6649 const i386_operand_type types64;
6650 } gotrel[] = {
6651 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
6652 BFD_RELOC_X86_64_PLTOFF64 },
6653 OPERAND_TYPE_IMM64 },
6654 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
6655 BFD_RELOC_X86_64_PLT32 },
6656 OPERAND_TYPE_IMM32_32S_DISP32 },
6657 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
6658 BFD_RELOC_X86_64_GOTPLT64 },
6659 OPERAND_TYPE_IMM64_DISP64 },
6660 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
6661 BFD_RELOC_X86_64_GOTOFF64 },
6662 OPERAND_TYPE_IMM64_DISP64 },
6663 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
6664 BFD_RELOC_X86_64_GOTPCREL },
6665 OPERAND_TYPE_IMM32_32S_DISP32 },
6666 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
6667 BFD_RELOC_X86_64_TLSGD },
6668 OPERAND_TYPE_IMM32_32S_DISP32 },
6669 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
6670 _dummy_first_bfd_reloc_code_real },
6671 OPERAND_TYPE_NONE },
6672 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
6673 BFD_RELOC_X86_64_TLSLD },
6674 OPERAND_TYPE_IMM32_32S_DISP32 },
6675 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
6676 BFD_RELOC_X86_64_GOTTPOFF },
6677 OPERAND_TYPE_IMM32_32S_DISP32 },
6678 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
6679 BFD_RELOC_X86_64_TPOFF32 },
6680 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6681 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
6682 _dummy_first_bfd_reloc_code_real },
6683 OPERAND_TYPE_NONE },
6684 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
6685 BFD_RELOC_X86_64_DTPOFF32 },
6686 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6687 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
6688 _dummy_first_bfd_reloc_code_real },
6689 OPERAND_TYPE_NONE },
6690 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
6691 _dummy_first_bfd_reloc_code_real },
6692 OPERAND_TYPE_NONE },
6693 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
6694 BFD_RELOC_X86_64_GOT32 },
6695 OPERAND_TYPE_IMM32_32S_64_DISP32 },
6696 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
6697 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
6698 OPERAND_TYPE_IMM32_32S_DISP32 },
6699 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
6700 BFD_RELOC_X86_64_TLSDESC_CALL },
6701 OPERAND_TYPE_IMM32_32S_DISP32 },
6702 };
6703 char *cp;
6704 unsigned int j;
6705
6706 #if defined (OBJ_MAYBE_ELF)
6707 if (!IS_ELF)
6708 return NULL;
6709 #endif
6710
6711 for (cp = input_line_pointer; *cp != '@'; cp++)
6712 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6713 return NULL;
6714
6715 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6716 {
6717 int len = gotrel[j].len;
6718 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6719 {
6720 if (gotrel[j].rel[object_64bit] != 0)
6721 {
6722 int first, second;
6723 char *tmpbuf, *past_reloc;
6724
6725 *rel = gotrel[j].rel[object_64bit];
6726 if (adjust)
6727 *adjust = len;
6728
6729 if (types)
6730 {
6731 if (flag_code != CODE_64BIT)
6732 {
6733 types->bitfield.imm32 = 1;
6734 types->bitfield.disp32 = 1;
6735 }
6736 else
6737 *types = gotrel[j].types64;
6738 }
6739
6740 if (GOT_symbol == NULL)
6741 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
6742
6743 /* The length of the first part of our input line. */
6744 first = cp - input_line_pointer;
6745
6746 /* The second part goes from after the reloc token until
6747 (and including) an end_of_line char or comma. */
6748 past_reloc = cp + 1 + len;
6749 cp = past_reloc;
6750 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6751 ++cp;
6752 second = cp + 1 - past_reloc;
6753
6754 /* Allocate and copy string. The trailing NUL shouldn't
6755 be necessary, but be safe. */
6756 tmpbuf = (char *) xmalloc (first + second + 2);
6757 memcpy (tmpbuf, input_line_pointer, first);
6758 if (second != 0 && *past_reloc != ' ')
6759 /* Replace the relocation token with ' ', so that
6760 errors like foo@GOTOFF1 will be detected. */
6761 tmpbuf[first++] = ' ';
6762 memcpy (tmpbuf + first, past_reloc, second);
6763 tmpbuf[first + second] = '\0';
6764 return tmpbuf;
6765 }
6766
6767 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6768 gotrel[j].str, 1 << (5 + object_64bit));
6769 return NULL;
6770 }
6771 }
6772
6773 /* Might be a symbol version string. Don't as_bad here. */
6774 return NULL;
6775 }
6776 #endif
6777
6778 void
6779 x86_cons (expressionS *exp, int size)
6780 {
6781 intel_syntax = -intel_syntax;
6782
6783 exp->X_md = 0;
6784 if (size == 4 || (object_64bit && size == 8))
6785 {
6786 /* Handle @GOTOFF and the like in an expression. */
6787 char *save;
6788 char *gotfree_input_line;
6789 int adjust = 0;
6790
6791 save = input_line_pointer;
6792 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
6793 if (gotfree_input_line)
6794 input_line_pointer = gotfree_input_line;
6795
6796 expression (exp);
6797
6798 if (gotfree_input_line)
6799 {
6800 /* expression () has merrily parsed up to the end of line,
6801 or a comma - in the wrong buffer. Transfer how far
6802 input_line_pointer has moved to the right buffer. */
6803 input_line_pointer = (save
6804 + (input_line_pointer - gotfree_input_line)
6805 + adjust);
6806 free (gotfree_input_line);
6807 if (exp->X_op == O_constant
6808 || exp->X_op == O_absent
6809 || exp->X_op == O_illegal
6810 || exp->X_op == O_register
6811 || exp->X_op == O_big)
6812 {
6813 char c = *input_line_pointer;
6814 *input_line_pointer = 0;
6815 as_bad (_("missing or invalid expression `%s'"), save);
6816 *input_line_pointer = c;
6817 }
6818 }
6819 }
6820 else
6821 expression (exp);
6822
6823 intel_syntax = -intel_syntax;
6824
6825 if (intel_syntax)
6826 i386_intel_simplify (exp);
6827 }
6828
6829 static void
6830 signed_cons (int size)
6831 {
6832 if (flag_code == CODE_64BIT)
6833 cons_sign = 1;
6834 cons (size);
6835 cons_sign = -1;
6836 }
6837
6838 #ifdef TE_PE
6839 static void
6840 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
6841 {
6842 expressionS exp;
6843
6844 do
6845 {
6846 expression (&exp);
6847 if (exp.X_op == O_symbol)
6848 exp.X_op = O_secrel;
6849
6850 emit_expr (&exp, 4);
6851 }
6852 while (*input_line_pointer++ == ',');
6853
6854 input_line_pointer--;
6855 demand_empty_rest_of_line ();
6856 }
6857 #endif
6858
6859 static int
6860 i386_immediate (char *imm_start)
6861 {
6862 char *save_input_line_pointer;
6863 char *gotfree_input_line;
6864 segT exp_seg = 0;
6865 expressionS *exp;
6866 i386_operand_type types;
6867
6868 operand_type_set (&types, ~0);
6869
6870 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
6871 {
6872 as_bad (_("at most %d immediate operands are allowed"),
6873 MAX_IMMEDIATE_OPERANDS);
6874 return 0;
6875 }
6876
6877 exp = &im_expressions[i.imm_operands++];
6878 i.op[this_operand].imms = exp;
6879
6880 if (is_space_char (*imm_start))
6881 ++imm_start;
6882
6883 save_input_line_pointer = input_line_pointer;
6884 input_line_pointer = imm_start;
6885
6886 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
6887 if (gotfree_input_line)
6888 input_line_pointer = gotfree_input_line;
6889
6890 exp_seg = expression (exp);
6891
6892 SKIP_WHITESPACE ();
6893 if (*input_line_pointer)
6894 as_bad (_("junk `%s' after expression"), input_line_pointer);
6895
6896 input_line_pointer = save_input_line_pointer;
6897 if (gotfree_input_line)
6898 {
6899 free (gotfree_input_line);
6900
6901 if (exp->X_op == O_constant || exp->X_op == O_register)
6902 exp->X_op = O_illegal;
6903 }
6904
6905 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
6906 }
6907
6908 static int
6909 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
6910 i386_operand_type types, const char *imm_start)
6911 {
6912 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
6913 {
6914 if (imm_start)
6915 as_bad (_("missing or invalid immediate expression `%s'"),
6916 imm_start);
6917 return 0;
6918 }
6919 else if (exp->X_op == O_constant)
6920 {
6921 /* Size it properly later. */
6922 i.types[this_operand].bitfield.imm64 = 1;
6923 /* If not 64bit, sign extend val. */
6924 if (flag_code != CODE_64BIT
6925 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
6926 exp->X_add_number
6927 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
6928 }
6929 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
6930 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
6931 && exp_seg != absolute_section
6932 && exp_seg != text_section
6933 && exp_seg != data_section
6934 && exp_seg != bss_section
6935 && exp_seg != undefined_section
6936 && !bfd_is_com_section (exp_seg))
6937 {
6938 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
6939 return 0;
6940 }
6941 #endif
6942 else if (!intel_syntax && exp->X_op == O_register)
6943 {
6944 if (imm_start)
6945 as_bad (_("illegal immediate register operand %s"), imm_start);
6946 return 0;
6947 }
6948 else
6949 {
6950 /* This is an address. The size of the address will be
6951 determined later, depending on destination register,
6952 suffix, or the default for the section. */
6953 i.types[this_operand].bitfield.imm8 = 1;
6954 i.types[this_operand].bitfield.imm16 = 1;
6955 i.types[this_operand].bitfield.imm32 = 1;
6956 i.types[this_operand].bitfield.imm32s = 1;
6957 i.types[this_operand].bitfield.imm64 = 1;
6958 i.types[this_operand] = operand_type_and (i.types[this_operand],
6959 types);
6960 }
6961
6962 return 1;
6963 }
6964
6965 static char *
6966 i386_scale (char *scale)
6967 {
6968 offsetT val;
6969 char *save = input_line_pointer;
6970
6971 input_line_pointer = scale;
6972 val = get_absolute_expression ();
6973
6974 switch (val)
6975 {
6976 case 1:
6977 i.log2_scale_factor = 0;
6978 break;
6979 case 2:
6980 i.log2_scale_factor = 1;
6981 break;
6982 case 4:
6983 i.log2_scale_factor = 2;
6984 break;
6985 case 8:
6986 i.log2_scale_factor = 3;
6987 break;
6988 default:
6989 {
6990 char sep = *input_line_pointer;
6991
6992 *input_line_pointer = '\0';
6993 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
6994 scale);
6995 *input_line_pointer = sep;
6996 input_line_pointer = save;
6997 return NULL;
6998 }
6999 }
7000 if (i.log2_scale_factor != 0 && i.index_reg == 0)
7001 {
7002 as_warn (_("scale factor of %d without an index register"),
7003 1 << i.log2_scale_factor);
7004 i.log2_scale_factor = 0;
7005 }
7006 scale = input_line_pointer;
7007 input_line_pointer = save;
7008 return scale;
7009 }
7010
7011 static int
7012 i386_displacement (char *disp_start, char *disp_end)
7013 {
7014 expressionS *exp;
7015 segT exp_seg = 0;
7016 char *save_input_line_pointer;
7017 char *gotfree_input_line;
7018 int override;
7019 i386_operand_type bigdisp, types = anydisp;
7020 int ret;
7021
7022 if (i.disp_operands == MAX_MEMORY_OPERANDS)
7023 {
7024 as_bad (_("at most %d displacement operands are allowed"),
7025 MAX_MEMORY_OPERANDS);
7026 return 0;
7027 }
7028
7029 operand_type_set (&bigdisp, 0);
7030 if ((i.types[this_operand].bitfield.jumpabsolute)
7031 || (!current_templates->start->opcode_modifier.jump
7032 && !current_templates->start->opcode_modifier.jumpdword))
7033 {
7034 bigdisp.bitfield.disp32 = 1;
7035 override = (i.prefix[ADDR_PREFIX] != 0);
7036 if (flag_code == CODE_64BIT)
7037 {
7038 if (!override)
7039 {
7040 bigdisp.bitfield.disp32s = 1;
7041 bigdisp.bitfield.disp64 = 1;
7042 }
7043 }
7044 else if ((flag_code == CODE_16BIT) ^ override)
7045 {
7046 bigdisp.bitfield.disp32 = 0;
7047 bigdisp.bitfield.disp16 = 1;
7048 }
7049 }
7050 else
7051 {
7052 /* For PC-relative branches, the width of the displacement
7053 is dependent upon data size, not address size. */
7054 override = (i.prefix[DATA_PREFIX] != 0);
7055 if (flag_code == CODE_64BIT)
7056 {
7057 if (override || i.suffix == WORD_MNEM_SUFFIX)
7058 bigdisp.bitfield.disp16 = 1;
7059 else
7060 {
7061 bigdisp.bitfield.disp32 = 1;
7062 bigdisp.bitfield.disp32s = 1;
7063 }
7064 }
7065 else
7066 {
7067 if (!override)
7068 override = (i.suffix == (flag_code != CODE_16BIT
7069 ? WORD_MNEM_SUFFIX
7070 : LONG_MNEM_SUFFIX));
7071 bigdisp.bitfield.disp32 = 1;
7072 if ((flag_code == CODE_16BIT) ^ override)
7073 {
7074 bigdisp.bitfield.disp32 = 0;
7075 bigdisp.bitfield.disp16 = 1;
7076 }
7077 }
7078 }
7079 i.types[this_operand] = operand_type_or (i.types[this_operand],
7080 bigdisp);
7081
7082 exp = &disp_expressions[i.disp_operands];
7083 i.op[this_operand].disps = exp;
7084 i.disp_operands++;
7085 save_input_line_pointer = input_line_pointer;
7086 input_line_pointer = disp_start;
7087 END_STRING_AND_SAVE (disp_end);
7088
7089 #ifndef GCC_ASM_O_HACK
7090 #define GCC_ASM_O_HACK 0
7091 #endif
7092 #if GCC_ASM_O_HACK
7093 END_STRING_AND_SAVE (disp_end + 1);
7094 if (i.types[this_operand].bitfield.baseIndex
7095 && displacement_string_end[-1] == '+')
7096 {
7097 /* This hack is to avoid a warning when using the "o"
7098 constraint within gcc asm statements.
7099 For instance:
7100
7101 #define _set_tssldt_desc(n,addr,limit,type) \
7102 __asm__ __volatile__ ( \
7103 "movw %w2,%0\n\t" \
7104 "movw %w1,2+%0\n\t" \
7105 "rorl $16,%1\n\t" \
7106 "movb %b1,4+%0\n\t" \
7107 "movb %4,5+%0\n\t" \
7108 "movb $0,6+%0\n\t" \
7109 "movb %h1,7+%0\n\t" \
7110 "rorl $16,%1" \
7111 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
7112
7113 This works great except that the output assembler ends
7114 up looking a bit weird if it turns out that there is
7115 no offset. You end up producing code that looks like:
7116
7117 #APP
7118 movw $235,(%eax)
7119 movw %dx,2+(%eax)
7120 rorl $16,%edx
7121 movb %dl,4+(%eax)
7122 movb $137,5+(%eax)
7123 movb $0,6+(%eax)
7124 movb %dh,7+(%eax)
7125 rorl $16,%edx
7126 #NO_APP
7127
7128 So here we provide the missing zero. */
7129
7130 *displacement_string_end = '0';
7131 }
7132 #endif
7133 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
7134 if (gotfree_input_line)
7135 input_line_pointer = gotfree_input_line;
7136
7137 exp_seg = expression (exp);
7138
7139 SKIP_WHITESPACE ();
7140 if (*input_line_pointer)
7141 as_bad (_("junk `%s' after expression"), input_line_pointer);
7142 #if GCC_ASM_O_HACK
7143 RESTORE_END_STRING (disp_end + 1);
7144 #endif
7145 input_line_pointer = save_input_line_pointer;
7146 if (gotfree_input_line)
7147 {
7148 free (gotfree_input_line);
7149
7150 if (exp->X_op == O_constant || exp->X_op == O_register)
7151 exp->X_op = O_illegal;
7152 }
7153
7154 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
7155
7156 RESTORE_END_STRING (disp_end);
7157
7158 return ret;
7159 }
7160
7161 static int
7162 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
7163 i386_operand_type types, const char *disp_start)
7164 {
7165 i386_operand_type bigdisp;
7166 int ret = 1;
7167
7168 /* We do this to make sure that the section symbol is in
7169 the symbol table. We will ultimately change the relocation
7170 to be relative to the beginning of the section. */
7171 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
7172 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
7173 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7174 {
7175 if (exp->X_op != O_symbol)
7176 goto inv_disp;
7177
7178 if (S_IS_LOCAL (exp->X_add_symbol)
7179 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
7180 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
7181 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
7182 exp->X_op = O_subtract;
7183 exp->X_op_symbol = GOT_symbol;
7184 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
7185 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
7186 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7187 i.reloc[this_operand] = BFD_RELOC_64;
7188 else
7189 i.reloc[this_operand] = BFD_RELOC_32;
7190 }
7191
7192 else if (exp->X_op == O_absent
7193 || exp->X_op == O_illegal
7194 || exp->X_op == O_big)
7195 {
7196 inv_disp:
7197 as_bad (_("missing or invalid displacement expression `%s'"),
7198 disp_start);
7199 ret = 0;
7200 }
7201
7202 else if (flag_code == CODE_64BIT
7203 && !i.prefix[ADDR_PREFIX]
7204 && exp->X_op == O_constant)
7205 {
7206 /* Since displacement is signed extended to 64bit, don't allow
7207 disp32 and turn off disp32s if they are out of range. */
7208 i.types[this_operand].bitfield.disp32 = 0;
7209 if (!fits_in_signed_long (exp->X_add_number))
7210 {
7211 i.types[this_operand].bitfield.disp32s = 0;
7212 if (i.types[this_operand].bitfield.baseindex)
7213 {
7214 as_bad (_("0x%lx out range of signed 32bit displacement"),
7215 (long) exp->X_add_number);
7216 ret = 0;
7217 }
7218 }
7219 }
7220
7221 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7222 else if (exp->X_op != O_constant
7223 && OUTPUT_FLAVOR == bfd_target_aout_flavour
7224 && exp_seg != absolute_section
7225 && exp_seg != text_section
7226 && exp_seg != data_section
7227 && exp_seg != bss_section
7228 && exp_seg != undefined_section
7229 && !bfd_is_com_section (exp_seg))
7230 {
7231 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7232 ret = 0;
7233 }
7234 #endif
7235
7236 /* Check if this is a displacement only operand. */
7237 bigdisp = i.types[this_operand];
7238 bigdisp.bitfield.disp8 = 0;
7239 bigdisp.bitfield.disp16 = 0;
7240 bigdisp.bitfield.disp32 = 0;
7241 bigdisp.bitfield.disp32s = 0;
7242 bigdisp.bitfield.disp64 = 0;
7243 if (operand_type_all_zero (&bigdisp))
7244 i.types[this_operand] = operand_type_and (i.types[this_operand],
7245 types);
7246
7247 return ret;
7248 }
7249
7250 /* Make sure the memory operand we've been dealt is valid.
7251 Return 1 on success, 0 on a failure. */
7252
7253 static int
7254 i386_index_check (const char *operand_string)
7255 {
7256 int ok;
7257 const char *kind = "base/index";
7258 #if INFER_ADDR_PREFIX
7259 int fudged = 0;
7260
7261 tryprefix:
7262 #endif
7263 ok = 1;
7264 if (current_templates->start->opcode_modifier.isstring
7265 && !current_templates->start->opcode_modifier.immext
7266 && (current_templates->end[-1].opcode_modifier.isstring
7267 || i.mem_operands))
7268 {
7269 /* Memory operands of string insns are special in that they only allow
7270 a single register (rDI, rSI, or rBX) as their memory address. */
7271 unsigned int expected;
7272
7273 kind = "string address";
7274
7275 if (current_templates->start->opcode_modifier.w)
7276 {
7277 i386_operand_type type = current_templates->end[-1].operand_types[0];
7278
7279 if (!type.bitfield.baseindex
7280 || ((!i.mem_operands != !intel_syntax)
7281 && current_templates->end[-1].operand_types[1]
7282 .bitfield.baseindex))
7283 type = current_templates->end[-1].operand_types[1];
7284 expected = type.bitfield.esseg ? 7 /* rDI */ : 6 /* rSI */;
7285 }
7286 else
7287 expected = 3 /* rBX */;
7288
7289 if (!i.base_reg || i.index_reg
7290 || operand_type_check (i.types[this_operand], disp))
7291 ok = -1;
7292 else if (!(flag_code == CODE_64BIT
7293 ? i.prefix[ADDR_PREFIX]
7294 ? i.base_reg->reg_type.bitfield.reg32
7295 : i.base_reg->reg_type.bitfield.reg64
7296 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7297 ? i.base_reg->reg_type.bitfield.reg32
7298 : i.base_reg->reg_type.bitfield.reg16))
7299 ok = 0;
7300 else if (i.base_reg->reg_num != expected)
7301 ok = -1;
7302
7303 if (ok < 0)
7304 {
7305 unsigned int j;
7306
7307 for (j = 0; j < i386_regtab_size; ++j)
7308 if ((flag_code == CODE_64BIT
7309 ? i.prefix[ADDR_PREFIX]
7310 ? i386_regtab[j].reg_type.bitfield.reg32
7311 : i386_regtab[j].reg_type.bitfield.reg64
7312 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7313 ? i386_regtab[j].reg_type.bitfield.reg32
7314 : i386_regtab[j].reg_type.bitfield.reg16)
7315 && i386_regtab[j].reg_num == expected)
7316 break;
7317 gas_assert (j < i386_regtab_size);
7318 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
7319 operand_string,
7320 intel_syntax ? '[' : '(',
7321 register_prefix,
7322 i386_regtab[j].reg_name,
7323 intel_syntax ? ']' : ')');
7324 ok = 1;
7325 }
7326 }
7327 else if (flag_code == CODE_64BIT)
7328 {
7329 if ((i.base_reg
7330 && ((i.prefix[ADDR_PREFIX] == 0
7331 && !i.base_reg->reg_type.bitfield.reg64)
7332 || (i.prefix[ADDR_PREFIX]
7333 && !i.base_reg->reg_type.bitfield.reg32))
7334 && (i.index_reg
7335 || i.base_reg->reg_num !=
7336 (i.prefix[ADDR_PREFIX] == 0 ? RegRip : RegEip)))
7337 || (i.index_reg
7338 && !(i.index_reg->reg_type.bitfield.regxmm
7339 || i.index_reg->reg_type.bitfield.regymm)
7340 && (!i.index_reg->reg_type.bitfield.baseindex
7341 || (i.prefix[ADDR_PREFIX] == 0
7342 && i.index_reg->reg_num != RegRiz
7343 && !i.index_reg->reg_type.bitfield.reg64
7344 )
7345 || (i.prefix[ADDR_PREFIX]
7346 && i.index_reg->reg_num != RegEiz
7347 && !i.index_reg->reg_type.bitfield.reg32))))
7348 ok = 0;
7349 }
7350 else
7351 {
7352 if ((flag_code == CODE_16BIT) ^ (i.prefix[ADDR_PREFIX] != 0))
7353 {
7354 /* 16bit checks. */
7355 if ((i.base_reg
7356 && (!i.base_reg->reg_type.bitfield.reg16
7357 || !i.base_reg->reg_type.bitfield.baseindex))
7358 || (i.index_reg
7359 && (!i.index_reg->reg_type.bitfield.reg16
7360 || !i.index_reg->reg_type.bitfield.baseindex
7361 || !(i.base_reg
7362 && i.base_reg->reg_num < 6
7363 && i.index_reg->reg_num >= 6
7364 && i.log2_scale_factor == 0))))
7365 ok = 0;
7366 }
7367 else
7368 {
7369 /* 32bit checks. */
7370 if ((i.base_reg
7371 && !i.base_reg->reg_type.bitfield.reg32)
7372 || (i.index_reg
7373 && !i.index_reg->reg_type.bitfield.regxmm
7374 && !i.index_reg->reg_type.bitfield.regymm
7375 && ((!i.index_reg->reg_type.bitfield.reg32
7376 && i.index_reg->reg_num != RegEiz)
7377 || !i.index_reg->reg_type.bitfield.baseindex)))
7378 ok = 0;
7379 }
7380 }
7381 if (!ok)
7382 {
7383 #if INFER_ADDR_PREFIX
7384 if (!i.mem_operands && !i.prefix[ADDR_PREFIX])
7385 {
7386 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
7387 i.prefixes += 1;
7388 /* Change the size of any displacement too. At most one of
7389 Disp16 or Disp32 is set.
7390 FIXME. There doesn't seem to be any real need for separate
7391 Disp16 and Disp32 flags. The same goes for Imm16 and Imm32.
7392 Removing them would probably clean up the code quite a lot. */
7393 if (flag_code != CODE_64BIT
7394 && (i.types[this_operand].bitfield.disp16
7395 || i.types[this_operand].bitfield.disp32))
7396 i.types[this_operand]
7397 = operand_type_xor (i.types[this_operand], disp16_32);
7398 fudged = 1;
7399 goto tryprefix;
7400 }
7401 if (fudged)
7402 as_bad (_("`%s' is not a valid %s expression"),
7403 operand_string,
7404 kind);
7405 else
7406 #endif
7407 as_bad (_("`%s' is not a valid %s-bit %s expression"),
7408 operand_string,
7409 flag_code_names[i.prefix[ADDR_PREFIX]
7410 ? flag_code == CODE_32BIT
7411 ? CODE_16BIT
7412 : CODE_32BIT
7413 : flag_code],
7414 kind);
7415 }
7416 return ok;
7417 }
7418
7419 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
7420 on error. */
7421
7422 static int
7423 i386_att_operand (char *operand_string)
7424 {
7425 const reg_entry *r;
7426 char *end_op;
7427 char *op_string = operand_string;
7428
7429 if (is_space_char (*op_string))
7430 ++op_string;
7431
7432 /* We check for an absolute prefix (differentiating,
7433 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
7434 if (*op_string == ABSOLUTE_PREFIX)
7435 {
7436 ++op_string;
7437 if (is_space_char (*op_string))
7438 ++op_string;
7439 i.types[this_operand].bitfield.jumpabsolute = 1;
7440 }
7441
7442 /* Check if operand is a register. */
7443 if ((r = parse_register (op_string, &end_op)) != NULL)
7444 {
7445 i386_operand_type temp;
7446
7447 /* Check for a segment override by searching for ':' after a
7448 segment register. */
7449 op_string = end_op;
7450 if (is_space_char (*op_string))
7451 ++op_string;
7452 if (*op_string == ':'
7453 && (r->reg_type.bitfield.sreg2
7454 || r->reg_type.bitfield.sreg3))
7455 {
7456 switch (r->reg_num)
7457 {
7458 case 0:
7459 i.seg[i.mem_operands] = &es;
7460 break;
7461 case 1:
7462 i.seg[i.mem_operands] = &cs;
7463 break;
7464 case 2:
7465 i.seg[i.mem_operands] = &ss;
7466 break;
7467 case 3:
7468 i.seg[i.mem_operands] = &ds;
7469 break;
7470 case 4:
7471 i.seg[i.mem_operands] = &fs;
7472 break;
7473 case 5:
7474 i.seg[i.mem_operands] = &gs;
7475 break;
7476 }
7477
7478 /* Skip the ':' and whitespace. */
7479 ++op_string;
7480 if (is_space_char (*op_string))
7481 ++op_string;
7482
7483 if (!is_digit_char (*op_string)
7484 && !is_identifier_char (*op_string)
7485 && *op_string != '('
7486 && *op_string != ABSOLUTE_PREFIX)
7487 {
7488 as_bad (_("bad memory operand `%s'"), op_string);
7489 return 0;
7490 }
7491 /* Handle case of %es:*foo. */
7492 if (*op_string == ABSOLUTE_PREFIX)
7493 {
7494 ++op_string;
7495 if (is_space_char (*op_string))
7496 ++op_string;
7497 i.types[this_operand].bitfield.jumpabsolute = 1;
7498 }
7499 goto do_memory_reference;
7500 }
7501 if (*op_string)
7502 {
7503 as_bad (_("junk `%s' after register"), op_string);
7504 return 0;
7505 }
7506 temp = r->reg_type;
7507 temp.bitfield.baseindex = 0;
7508 i.types[this_operand] = operand_type_or (i.types[this_operand],
7509 temp);
7510 i.types[this_operand].bitfield.unspecified = 0;
7511 i.op[this_operand].regs = r;
7512 i.reg_operands++;
7513 }
7514 else if (*op_string == REGISTER_PREFIX)
7515 {
7516 as_bad (_("bad register name `%s'"), op_string);
7517 return 0;
7518 }
7519 else if (*op_string == IMMEDIATE_PREFIX)
7520 {
7521 ++op_string;
7522 if (i.types[this_operand].bitfield.jumpabsolute)
7523 {
7524 as_bad (_("immediate operand illegal with absolute jump"));
7525 return 0;
7526 }
7527 if (!i386_immediate (op_string))
7528 return 0;
7529 }
7530 else if (is_digit_char (*op_string)
7531 || is_identifier_char (*op_string)
7532 || *op_string == '(')
7533 {
7534 /* This is a memory reference of some sort. */
7535 char *base_string;
7536
7537 /* Start and end of displacement string expression (if found). */
7538 char *displacement_string_start;
7539 char *displacement_string_end;
7540
7541 do_memory_reference:
7542 if ((i.mem_operands == 1
7543 && !current_templates->start->opcode_modifier.isstring)
7544 || i.mem_operands == 2)
7545 {
7546 as_bad (_("too many memory references for `%s'"),
7547 current_templates->start->name);
7548 return 0;
7549 }
7550
7551 /* Check for base index form. We detect the base index form by
7552 looking for an ')' at the end of the operand, searching
7553 for the '(' matching it, and finding a REGISTER_PREFIX or ','
7554 after the '('. */
7555 base_string = op_string + strlen (op_string);
7556
7557 --base_string;
7558 if (is_space_char (*base_string))
7559 --base_string;
7560
7561 /* If we only have a displacement, set-up for it to be parsed later. */
7562 displacement_string_start = op_string;
7563 displacement_string_end = base_string + 1;
7564
7565 if (*base_string == ')')
7566 {
7567 char *temp_string;
7568 unsigned int parens_balanced = 1;
7569 /* We've already checked that the number of left & right ()'s are
7570 equal, so this loop will not be infinite. */
7571 do
7572 {
7573 base_string--;
7574 if (*base_string == ')')
7575 parens_balanced++;
7576 if (*base_string == '(')
7577 parens_balanced--;
7578 }
7579 while (parens_balanced);
7580
7581 temp_string = base_string;
7582
7583 /* Skip past '(' and whitespace. */
7584 ++base_string;
7585 if (is_space_char (*base_string))
7586 ++base_string;
7587
7588 if (*base_string == ','
7589 || ((i.base_reg = parse_register (base_string, &end_op))
7590 != NULL))
7591 {
7592 displacement_string_end = temp_string;
7593
7594 i.types[this_operand].bitfield.baseindex = 1;
7595
7596 if (i.base_reg)
7597 {
7598 base_string = end_op;
7599 if (is_space_char (*base_string))
7600 ++base_string;
7601 }
7602
7603 /* There may be an index reg or scale factor here. */
7604 if (*base_string == ',')
7605 {
7606 ++base_string;
7607 if (is_space_char (*base_string))
7608 ++base_string;
7609
7610 if ((i.index_reg = parse_register (base_string, &end_op))
7611 != NULL)
7612 {
7613 base_string = end_op;
7614 if (is_space_char (*base_string))
7615 ++base_string;
7616 if (*base_string == ',')
7617 {
7618 ++base_string;
7619 if (is_space_char (*base_string))
7620 ++base_string;
7621 }
7622 else if (*base_string != ')')
7623 {
7624 as_bad (_("expecting `,' or `)' "
7625 "after index register in `%s'"),
7626 operand_string);
7627 return 0;
7628 }
7629 }
7630 else if (*base_string == REGISTER_PREFIX)
7631 {
7632 as_bad (_("bad register name `%s'"), base_string);
7633 return 0;
7634 }
7635
7636 /* Check for scale factor. */
7637 if (*base_string != ')')
7638 {
7639 char *end_scale = i386_scale (base_string);
7640
7641 if (!end_scale)
7642 return 0;
7643
7644 base_string = end_scale;
7645 if (is_space_char (*base_string))
7646 ++base_string;
7647 if (*base_string != ')')
7648 {
7649 as_bad (_("expecting `)' "
7650 "after scale factor in `%s'"),
7651 operand_string);
7652 return 0;
7653 }
7654 }
7655 else if (!i.index_reg)
7656 {
7657 as_bad (_("expecting index register or scale factor "
7658 "after `,'; got '%c'"),
7659 *base_string);
7660 return 0;
7661 }
7662 }
7663 else if (*base_string != ')')
7664 {
7665 as_bad (_("expecting `,' or `)' "
7666 "after base register in `%s'"),
7667 operand_string);
7668 return 0;
7669 }
7670 }
7671 else if (*base_string == REGISTER_PREFIX)
7672 {
7673 as_bad (_("bad register name `%s'"), base_string);
7674 return 0;
7675 }
7676 }
7677
7678 /* If there's an expression beginning the operand, parse it,
7679 assuming displacement_string_start and
7680 displacement_string_end are meaningful. */
7681 if (displacement_string_start != displacement_string_end)
7682 {
7683 if (!i386_displacement (displacement_string_start,
7684 displacement_string_end))
7685 return 0;
7686 }
7687
7688 /* Special case for (%dx) while doing input/output op. */
7689 if (i.base_reg
7690 && operand_type_equal (&i.base_reg->reg_type,
7691 &reg16_inoutportreg)
7692 && i.index_reg == 0
7693 && i.log2_scale_factor == 0
7694 && i.seg[i.mem_operands] == 0
7695 && !operand_type_check (i.types[this_operand], disp))
7696 {
7697 i.types[this_operand] = inoutportreg;
7698 return 1;
7699 }
7700
7701 if (i386_index_check (operand_string) == 0)
7702 return 0;
7703 i.types[this_operand].bitfield.mem = 1;
7704 i.mem_operands++;
7705 }
7706 else
7707 {
7708 /* It's not a memory operand; argh! */
7709 as_bad (_("invalid char %s beginning operand %d `%s'"),
7710 output_invalid (*op_string),
7711 this_operand + 1,
7712 op_string);
7713 return 0;
7714 }
7715 return 1; /* Normal return. */
7716 }
7717 \f
7718 /* Calculate the maximum variable size (i.e., excluding fr_fix)
7719 that an rs_machine_dependent frag may reach. */
7720
7721 unsigned int
7722 i386_frag_max_var (fragS *frag)
7723 {
7724 /* The only relaxable frags are for jumps.
7725 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
7726 gas_assert (frag->fr_type == rs_machine_dependent);
7727 return TYPE_FROM_RELAX_STATE (frag->fr_subtype) == UNCOND_JUMP ? 4 : 5;
7728 }
7729
7730 /* md_estimate_size_before_relax()
7731
7732 Called just before relax() for rs_machine_dependent frags. The x86
7733 assembler uses these frags to handle variable size jump
7734 instructions.
7735
7736 Any symbol that is now undefined will not become defined.
7737 Return the correct fr_subtype in the frag.
7738 Return the initial "guess for variable size of frag" to caller.
7739 The guess is actually the growth beyond the fixed part. Whatever
7740 we do to grow the fixed or variable part contributes to our
7741 returned value. */
7742
7743 int
7744 md_estimate_size_before_relax (fragS *fragP, segT segment)
7745 {
7746 /* We've already got fragP->fr_subtype right; all we have to do is
7747 check for un-relaxable symbols. On an ELF system, we can't relax
7748 an externally visible symbol, because it may be overridden by a
7749 shared library. */
7750 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
7751 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7752 || (IS_ELF
7753 && (S_IS_EXTERNAL (fragP->fr_symbol)
7754 || S_IS_WEAK (fragP->fr_symbol)
7755 || ((symbol_get_bfdsym (fragP->fr_symbol)->flags
7756 & BSF_GNU_INDIRECT_FUNCTION))))
7757 #endif
7758 #if defined (OBJ_COFF) && defined (TE_PE)
7759 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
7760 && S_IS_WEAK (fragP->fr_symbol))
7761 #endif
7762 )
7763 {
7764 /* Symbol is undefined in this segment, or we need to keep a
7765 reloc so that weak symbols can be overridden. */
7766 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
7767 enum bfd_reloc_code_real reloc_type;
7768 unsigned char *opcode;
7769 int old_fr_fix;
7770
7771 if (fragP->fr_var != NO_RELOC)
7772 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
7773 else if (size == 2)
7774 reloc_type = BFD_RELOC_16_PCREL;
7775 else
7776 reloc_type = BFD_RELOC_32_PCREL;
7777
7778 old_fr_fix = fragP->fr_fix;
7779 opcode = (unsigned char *) fragP->fr_opcode;
7780
7781 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
7782 {
7783 case UNCOND_JUMP:
7784 /* Make jmp (0xeb) a (d)word displacement jump. */
7785 opcode[0] = 0xe9;
7786 fragP->fr_fix += size;
7787 fix_new (fragP, old_fr_fix, size,
7788 fragP->fr_symbol,
7789 fragP->fr_offset, 1,
7790 reloc_type);
7791 break;
7792
7793 case COND_JUMP86:
7794 if (size == 2
7795 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
7796 {
7797 /* Negate the condition, and branch past an
7798 unconditional jump. */
7799 opcode[0] ^= 1;
7800 opcode[1] = 3;
7801 /* Insert an unconditional jump. */
7802 opcode[2] = 0xe9;
7803 /* We added two extra opcode bytes, and have a two byte
7804 offset. */
7805 fragP->fr_fix += 2 + 2;
7806 fix_new (fragP, old_fr_fix + 2, 2,
7807 fragP->fr_symbol,
7808 fragP->fr_offset, 1,
7809 reloc_type);
7810 break;
7811 }
7812 /* Fall through. */
7813
7814 case COND_JUMP:
7815 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
7816 {
7817 fixS *fixP;
7818
7819 fragP->fr_fix += 1;
7820 fixP = fix_new (fragP, old_fr_fix, 1,
7821 fragP->fr_symbol,
7822 fragP->fr_offset, 1,
7823 BFD_RELOC_8_PCREL);
7824 fixP->fx_signed = 1;
7825 break;
7826 }
7827
7828 /* This changes the byte-displacement jump 0x7N
7829 to the (d)word-displacement jump 0x0f,0x8N. */
7830 opcode[1] = opcode[0] + 0x10;
7831 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7832 /* We've added an opcode byte. */
7833 fragP->fr_fix += 1 + size;
7834 fix_new (fragP, old_fr_fix + 1, size,
7835 fragP->fr_symbol,
7836 fragP->fr_offset, 1,
7837 reloc_type);
7838 break;
7839
7840 default:
7841 BAD_CASE (fragP->fr_subtype);
7842 break;
7843 }
7844 frag_wane (fragP);
7845 return fragP->fr_fix - old_fr_fix;
7846 }
7847
7848 /* Guess size depending on current relax state. Initially the relax
7849 state will correspond to a short jump and we return 1, because
7850 the variable part of the frag (the branch offset) is one byte
7851 long. However, we can relax a section more than once and in that
7852 case we must either set fr_subtype back to the unrelaxed state,
7853 or return the value for the appropriate branch. */
7854 return md_relax_table[fragP->fr_subtype].rlx_length;
7855 }
7856
7857 /* Called after relax() is finished.
7858
7859 In: Address of frag.
7860 fr_type == rs_machine_dependent.
7861 fr_subtype is what the address relaxed to.
7862
7863 Out: Any fixSs and constants are set up.
7864 Caller will turn frag into a ".space 0". */
7865
7866 void
7867 md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT sec ATTRIBUTE_UNUSED,
7868 fragS *fragP)
7869 {
7870 unsigned char *opcode;
7871 unsigned char *where_to_put_displacement = NULL;
7872 offsetT target_address;
7873 offsetT opcode_address;
7874 unsigned int extension = 0;
7875 offsetT displacement_from_opcode_start;
7876
7877 opcode = (unsigned char *) fragP->fr_opcode;
7878
7879 /* Address we want to reach in file space. */
7880 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
7881
7882 /* Address opcode resides at in file space. */
7883 opcode_address = fragP->fr_address + fragP->fr_fix;
7884
7885 /* Displacement from opcode start to fill into instruction. */
7886 displacement_from_opcode_start = target_address - opcode_address;
7887
7888 if ((fragP->fr_subtype & BIG) == 0)
7889 {
7890 /* Don't have to change opcode. */
7891 extension = 1; /* 1 opcode + 1 displacement */
7892 where_to_put_displacement = &opcode[1];
7893 }
7894 else
7895 {
7896 if (no_cond_jump_promotion
7897 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
7898 as_warn_where (fragP->fr_file, fragP->fr_line,
7899 _("long jump required"));
7900
7901 switch (fragP->fr_subtype)
7902 {
7903 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
7904 extension = 4; /* 1 opcode + 4 displacement */
7905 opcode[0] = 0xe9;
7906 where_to_put_displacement = &opcode[1];
7907 break;
7908
7909 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
7910 extension = 2; /* 1 opcode + 2 displacement */
7911 opcode[0] = 0xe9;
7912 where_to_put_displacement = &opcode[1];
7913 break;
7914
7915 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
7916 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
7917 extension = 5; /* 2 opcode + 4 displacement */
7918 opcode[1] = opcode[0] + 0x10;
7919 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7920 where_to_put_displacement = &opcode[2];
7921 break;
7922
7923 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
7924 extension = 3; /* 2 opcode + 2 displacement */
7925 opcode[1] = opcode[0] + 0x10;
7926 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7927 where_to_put_displacement = &opcode[2];
7928 break;
7929
7930 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
7931 extension = 4;
7932 opcode[0] ^= 1;
7933 opcode[1] = 3;
7934 opcode[2] = 0xe9;
7935 where_to_put_displacement = &opcode[3];
7936 break;
7937
7938 default:
7939 BAD_CASE (fragP->fr_subtype);
7940 break;
7941 }
7942 }
7943
7944 /* If size if less then four we are sure that the operand fits,
7945 but if it's 4, then it could be that the displacement is larger
7946 then -/+ 2GB. */
7947 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
7948 && object_64bit
7949 && ((addressT) (displacement_from_opcode_start - extension
7950 + ((addressT) 1 << 31))
7951 > (((addressT) 2 << 31) - 1)))
7952 {
7953 as_bad_where (fragP->fr_file, fragP->fr_line,
7954 _("jump target out of range"));
7955 /* Make us emit 0. */
7956 displacement_from_opcode_start = extension;
7957 }
7958 /* Now put displacement after opcode. */
7959 md_number_to_chars ((char *) where_to_put_displacement,
7960 (valueT) (displacement_from_opcode_start - extension),
7961 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
7962 fragP->fr_fix += extension;
7963 }
7964 \f
7965 /* Apply a fixup (fixP) to segment data, once it has been determined
7966 by our caller that we have all the info we need to fix it up.
7967
7968 Parameter valP is the pointer to the value of the bits.
7969
7970 On the 386, immediates, displacements, and data pointers are all in
7971 the same (little-endian) format, so we don't need to care about which
7972 we are handling. */
7973
7974 void
7975 md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
7976 {
7977 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
7978 valueT value = *valP;
7979
7980 #if !defined (TE_Mach)
7981 if (fixP->fx_pcrel)
7982 {
7983 switch (fixP->fx_r_type)
7984 {
7985 default:
7986 break;
7987
7988 case BFD_RELOC_64:
7989 fixP->fx_r_type = BFD_RELOC_64_PCREL;
7990 break;
7991 case BFD_RELOC_32:
7992 case BFD_RELOC_X86_64_32S:
7993 fixP->fx_r_type = BFD_RELOC_32_PCREL;
7994 break;
7995 case BFD_RELOC_16:
7996 fixP->fx_r_type = BFD_RELOC_16_PCREL;
7997 break;
7998 case BFD_RELOC_8:
7999 fixP->fx_r_type = BFD_RELOC_8_PCREL;
8000 break;
8001 }
8002 }
8003
8004 if (fixP->fx_addsy != NULL
8005 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
8006 || fixP->fx_r_type == BFD_RELOC_64_PCREL
8007 || fixP->fx_r_type == BFD_RELOC_16_PCREL
8008 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
8009 && !use_rela_relocations)
8010 {
8011 /* This is a hack. There should be a better way to handle this.
8012 This covers for the fact that bfd_install_relocation will
8013 subtract the current location (for partial_inplace, PC relative
8014 relocations); see more below. */
8015 #ifndef OBJ_AOUT
8016 if (IS_ELF
8017 #ifdef TE_PE
8018 || OUTPUT_FLAVOR == bfd_target_coff_flavour
8019 #endif
8020 )
8021 value += fixP->fx_where + fixP->fx_frag->fr_address;
8022 #endif
8023 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8024 if (IS_ELF)
8025 {
8026 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
8027
8028 if ((sym_seg == seg
8029 || (symbol_section_p (fixP->fx_addsy)
8030 && sym_seg != absolute_section))
8031 && !generic_force_reloc (fixP))
8032 {
8033 /* Yes, we add the values in twice. This is because
8034 bfd_install_relocation subtracts them out again. I think
8035 bfd_install_relocation is broken, but I don't dare change
8036 it. FIXME. */
8037 value += fixP->fx_where + fixP->fx_frag->fr_address;
8038 }
8039 }
8040 #endif
8041 #if defined (OBJ_COFF) && defined (TE_PE)
8042 /* For some reason, the PE format does not store a
8043 section address offset for a PC relative symbol. */
8044 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
8045 || S_IS_WEAK (fixP->fx_addsy))
8046 value += md_pcrel_from (fixP);
8047 #endif
8048 }
8049 #if defined (OBJ_COFF) && defined (TE_PE)
8050 if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
8051 {
8052 value -= S_GET_VALUE (fixP->fx_addsy);
8053 }
8054 #endif
8055
8056 /* Fix a few things - the dynamic linker expects certain values here,
8057 and we must not disappoint it. */
8058 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8059 if (IS_ELF && fixP->fx_addsy)
8060 switch (fixP->fx_r_type)
8061 {
8062 case BFD_RELOC_386_PLT32:
8063 case BFD_RELOC_X86_64_PLT32:
8064 /* Make the jump instruction point to the address of the operand. At
8065 runtime we merely add the offset to the actual PLT entry. */
8066 value = -4;
8067 break;
8068
8069 case BFD_RELOC_386_TLS_GD:
8070 case BFD_RELOC_386_TLS_LDM:
8071 case BFD_RELOC_386_TLS_IE_32:
8072 case BFD_RELOC_386_TLS_IE:
8073 case BFD_RELOC_386_TLS_GOTIE:
8074 case BFD_RELOC_386_TLS_GOTDESC:
8075 case BFD_RELOC_X86_64_TLSGD:
8076 case BFD_RELOC_X86_64_TLSLD:
8077 case BFD_RELOC_X86_64_GOTTPOFF:
8078 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8079 value = 0; /* Fully resolved at runtime. No addend. */
8080 /* Fallthrough */
8081 case BFD_RELOC_386_TLS_LE:
8082 case BFD_RELOC_386_TLS_LDO_32:
8083 case BFD_RELOC_386_TLS_LE_32:
8084 case BFD_RELOC_X86_64_DTPOFF32:
8085 case BFD_RELOC_X86_64_DTPOFF64:
8086 case BFD_RELOC_X86_64_TPOFF32:
8087 case BFD_RELOC_X86_64_TPOFF64:
8088 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8089 break;
8090
8091 case BFD_RELOC_386_TLS_DESC_CALL:
8092 case BFD_RELOC_X86_64_TLSDESC_CALL:
8093 value = 0; /* Fully resolved at runtime. No addend. */
8094 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8095 fixP->fx_done = 0;
8096 return;
8097
8098 case BFD_RELOC_386_GOT32:
8099 case BFD_RELOC_X86_64_GOT32:
8100 value = 0; /* Fully resolved at runtime. No addend. */
8101 break;
8102
8103 case BFD_RELOC_VTABLE_INHERIT:
8104 case BFD_RELOC_VTABLE_ENTRY:
8105 fixP->fx_done = 0;
8106 return;
8107
8108 default:
8109 break;
8110 }
8111 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
8112 *valP = value;
8113 #endif /* !defined (TE_Mach) */
8114
8115 /* Are we finished with this relocation now? */
8116 if (fixP->fx_addsy == NULL)
8117 fixP->fx_done = 1;
8118 #if defined (OBJ_COFF) && defined (TE_PE)
8119 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
8120 {
8121 fixP->fx_done = 0;
8122 /* Remember value for tc_gen_reloc. */
8123 fixP->fx_addnumber = value;
8124 /* Clear out the frag for now. */
8125 value = 0;
8126 }
8127 #endif
8128 else if (use_rela_relocations)
8129 {
8130 fixP->fx_no_overflow = 1;
8131 /* Remember value for tc_gen_reloc. */
8132 fixP->fx_addnumber = value;
8133 value = 0;
8134 }
8135
8136 md_number_to_chars (p, value, fixP->fx_size);
8137 }
8138 \f
8139 char *
8140 md_atof (int type, char *litP, int *sizeP)
8141 {
8142 /* This outputs the LITTLENUMs in REVERSE order;
8143 in accord with the bigendian 386. */
8144 return ieee_md_atof (type, litP, sizeP, FALSE);
8145 }
8146 \f
8147 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
8148
8149 static char *
8150 output_invalid (int c)
8151 {
8152 if (ISPRINT (c))
8153 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8154 "'%c'", c);
8155 else
8156 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8157 "(0x%x)", (unsigned char) c);
8158 return output_invalid_buf;
8159 }
8160
8161 /* REG_STRING starts *before* REGISTER_PREFIX. */
8162
8163 static const reg_entry *
8164 parse_real_register (char *reg_string, char **end_op)
8165 {
8166 char *s = reg_string;
8167 char *p;
8168 char reg_name_given[MAX_REG_NAME_SIZE + 1];
8169 const reg_entry *r;
8170
8171 /* Skip possible REGISTER_PREFIX and possible whitespace. */
8172 if (*s == REGISTER_PREFIX)
8173 ++s;
8174
8175 if (is_space_char (*s))
8176 ++s;
8177
8178 p = reg_name_given;
8179 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
8180 {
8181 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
8182 return (const reg_entry *) NULL;
8183 s++;
8184 }
8185
8186 /* For naked regs, make sure that we are not dealing with an identifier.
8187 This prevents confusing an identifier like `eax_var' with register
8188 `eax'. */
8189 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
8190 return (const reg_entry *) NULL;
8191
8192 *end_op = s;
8193
8194 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
8195
8196 /* Handle floating point regs, allowing spaces in the (i) part. */
8197 if (r == i386_regtab /* %st is first entry of table */)
8198 {
8199 if (is_space_char (*s))
8200 ++s;
8201 if (*s == '(')
8202 {
8203 ++s;
8204 if (is_space_char (*s))
8205 ++s;
8206 if (*s >= '0' && *s <= '7')
8207 {
8208 int fpr = *s - '0';
8209 ++s;
8210 if (is_space_char (*s))
8211 ++s;
8212 if (*s == ')')
8213 {
8214 *end_op = s + 1;
8215 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
8216 know (r);
8217 return r + fpr;
8218 }
8219 }
8220 /* We have "%st(" then garbage. */
8221 return (const reg_entry *) NULL;
8222 }
8223 }
8224
8225 if (r == NULL || allow_pseudo_reg)
8226 return r;
8227
8228 if (operand_type_all_zero (&r->reg_type))
8229 return (const reg_entry *) NULL;
8230
8231 if ((r->reg_type.bitfield.reg32
8232 || r->reg_type.bitfield.sreg3
8233 || r->reg_type.bitfield.control
8234 || r->reg_type.bitfield.debug
8235 || r->reg_type.bitfield.test)
8236 && !cpu_arch_flags.bitfield.cpui386)
8237 return (const reg_entry *) NULL;
8238
8239 if (r->reg_type.bitfield.floatreg
8240 && !cpu_arch_flags.bitfield.cpu8087
8241 && !cpu_arch_flags.bitfield.cpu287
8242 && !cpu_arch_flags.bitfield.cpu387)
8243 return (const reg_entry *) NULL;
8244
8245 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
8246 return (const reg_entry *) NULL;
8247
8248 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse)
8249 return (const reg_entry *) NULL;
8250
8251 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
8252 return (const reg_entry *) NULL;
8253
8254 /* Don't allow fake index register unless allow_index_reg isn't 0. */
8255 if (!allow_index_reg
8256 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
8257 return (const reg_entry *) NULL;
8258
8259 if (((r->reg_flags & (RegRex64 | RegRex))
8260 || r->reg_type.bitfield.reg64)
8261 && (!cpu_arch_flags.bitfield.cpulm
8262 || !operand_type_equal (&r->reg_type, &control))
8263 && flag_code != CODE_64BIT)
8264 return (const reg_entry *) NULL;
8265
8266 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
8267 return (const reg_entry *) NULL;
8268
8269 return r;
8270 }
8271
8272 /* REG_STRING starts *before* REGISTER_PREFIX. */
8273
8274 static const reg_entry *
8275 parse_register (char *reg_string, char **end_op)
8276 {
8277 const reg_entry *r;
8278
8279 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
8280 r = parse_real_register (reg_string, end_op);
8281 else
8282 r = NULL;
8283 if (!r)
8284 {
8285 char *save = input_line_pointer;
8286 char c;
8287 symbolS *symbolP;
8288
8289 input_line_pointer = reg_string;
8290 c = get_symbol_end ();
8291 symbolP = symbol_find (reg_string);
8292 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
8293 {
8294 const expressionS *e = symbol_get_value_expression (symbolP);
8295
8296 know (e->X_op == O_register);
8297 know (e->X_add_number >= 0
8298 && (valueT) e->X_add_number < i386_regtab_size);
8299 r = i386_regtab + e->X_add_number;
8300 *end_op = input_line_pointer;
8301 }
8302 *input_line_pointer = c;
8303 input_line_pointer = save;
8304 }
8305 return r;
8306 }
8307
8308 int
8309 i386_parse_name (char *name, expressionS *e, char *nextcharP)
8310 {
8311 const reg_entry *r;
8312 char *end = input_line_pointer;
8313
8314 *end = *nextcharP;
8315 r = parse_register (name, &input_line_pointer);
8316 if (r && end <= input_line_pointer)
8317 {
8318 *nextcharP = *input_line_pointer;
8319 *input_line_pointer = 0;
8320 e->X_op = O_register;
8321 e->X_add_number = r - i386_regtab;
8322 return 1;
8323 }
8324 input_line_pointer = end;
8325 *end = 0;
8326 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
8327 }
8328
8329 void
8330 md_operand (expressionS *e)
8331 {
8332 char *end;
8333 const reg_entry *r;
8334
8335 switch (*input_line_pointer)
8336 {
8337 case REGISTER_PREFIX:
8338 r = parse_real_register (input_line_pointer, &end);
8339 if (r)
8340 {
8341 e->X_op = O_register;
8342 e->X_add_number = r - i386_regtab;
8343 input_line_pointer = end;
8344 }
8345 break;
8346
8347 case '[':
8348 gas_assert (intel_syntax);
8349 end = input_line_pointer++;
8350 expression (e);
8351 if (*input_line_pointer == ']')
8352 {
8353 ++input_line_pointer;
8354 e->X_op_symbol = make_expr_symbol (e);
8355 e->X_add_symbol = NULL;
8356 e->X_add_number = 0;
8357 e->X_op = O_index;
8358 }
8359 else
8360 {
8361 e->X_op = O_absent;
8362 input_line_pointer = end;
8363 }
8364 break;
8365 }
8366 }
8367
8368 \f
8369 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8370 const char *md_shortopts = "kVQ:sqn";
8371 #else
8372 const char *md_shortopts = "qn";
8373 #endif
8374
8375 #define OPTION_32 (OPTION_MD_BASE + 0)
8376 #define OPTION_64 (OPTION_MD_BASE + 1)
8377 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
8378 #define OPTION_MARCH (OPTION_MD_BASE + 3)
8379 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
8380 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
8381 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
8382 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
8383 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
8384 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
8385 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
8386 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
8387 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 12)
8388 #define OPTION_X32 (OPTION_MD_BASE + 13)
8389
8390 struct option md_longopts[] =
8391 {
8392 {"32", no_argument, NULL, OPTION_32},
8393 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8394 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8395 {"64", no_argument, NULL, OPTION_64},
8396 #endif
8397 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8398 {"x32", no_argument, NULL, OPTION_X32},
8399 #endif
8400 {"divide", no_argument, NULL, OPTION_DIVIDE},
8401 {"march", required_argument, NULL, OPTION_MARCH},
8402 {"mtune", required_argument, NULL, OPTION_MTUNE},
8403 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
8404 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
8405 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
8406 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
8407 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
8408 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
8409 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
8410 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
8411 {NULL, no_argument, NULL, 0}
8412 };
8413 size_t md_longopts_size = sizeof (md_longopts);
8414
8415 int
8416 md_parse_option (int c, char *arg)
8417 {
8418 unsigned int j;
8419 char *arch, *next;
8420
8421 switch (c)
8422 {
8423 case 'n':
8424 optimize_align_code = 0;
8425 break;
8426
8427 case 'q':
8428 quiet_warnings = 1;
8429 break;
8430
8431 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8432 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
8433 should be emitted or not. FIXME: Not implemented. */
8434 case 'Q':
8435 break;
8436
8437 /* -V: SVR4 argument to print version ID. */
8438 case 'V':
8439 print_version_id ();
8440 break;
8441
8442 /* -k: Ignore for FreeBSD compatibility. */
8443 case 'k':
8444 break;
8445
8446 case 's':
8447 /* -s: On i386 Solaris, this tells the native assembler to use
8448 .stab instead of .stab.excl. We always use .stab anyhow. */
8449 break;
8450 #endif
8451 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8452 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8453 case OPTION_64:
8454 {
8455 const char **list, **l;
8456
8457 list = bfd_target_list ();
8458 for (l = list; *l != NULL; l++)
8459 if (CONST_STRNEQ (*l, "elf64-x86-64")
8460 || strcmp (*l, "coff-x86-64") == 0
8461 || strcmp (*l, "pe-x86-64") == 0
8462 || strcmp (*l, "pei-x86-64") == 0
8463 || strcmp (*l, "mach-o-x86-64") == 0)
8464 {
8465 default_arch = "x86_64";
8466 break;
8467 }
8468 if (*l == NULL)
8469 as_fatal (_("no compiled in support for x86_64"));
8470 free (list);
8471 }
8472 break;
8473 #endif
8474
8475 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8476 case OPTION_X32:
8477 if (IS_ELF)
8478 {
8479 const char **list, **l;
8480
8481 list = bfd_target_list ();
8482 for (l = list; *l != NULL; l++)
8483 if (CONST_STRNEQ (*l, "elf32-x86-64"))
8484 {
8485 default_arch = "x86_64:32";
8486 break;
8487 }
8488 if (*l == NULL)
8489 as_fatal (_("no compiled in support for 32bit x86_64"));
8490 free (list);
8491 }
8492 else
8493 as_fatal (_("32bit x86_64 is only supported for ELF"));
8494 break;
8495 #endif
8496
8497 case OPTION_32:
8498 default_arch = "i386";
8499 break;
8500
8501 case OPTION_DIVIDE:
8502 #ifdef SVR4_COMMENT_CHARS
8503 {
8504 char *n, *t;
8505 const char *s;
8506
8507 n = (char *) xmalloc (strlen (i386_comment_chars) + 1);
8508 t = n;
8509 for (s = i386_comment_chars; *s != '\0'; s++)
8510 if (*s != '/')
8511 *t++ = *s;
8512 *t = '\0';
8513 i386_comment_chars = n;
8514 }
8515 #endif
8516 break;
8517
8518 case OPTION_MARCH:
8519 arch = xstrdup (arg);
8520 do
8521 {
8522 if (*arch == '.')
8523 as_fatal (_("invalid -march= option: `%s'"), arg);
8524 next = strchr (arch, '+');
8525 if (next)
8526 *next++ = '\0';
8527 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8528 {
8529 if (strcmp (arch, cpu_arch [j].name) == 0)
8530 {
8531 /* Processor. */
8532 if (! cpu_arch[j].flags.bitfield.cpui386)
8533 continue;
8534
8535 cpu_arch_name = cpu_arch[j].name;
8536 cpu_sub_arch_name = NULL;
8537 cpu_arch_flags = cpu_arch[j].flags;
8538 cpu_arch_isa = cpu_arch[j].type;
8539 cpu_arch_isa_flags = cpu_arch[j].flags;
8540 if (!cpu_arch_tune_set)
8541 {
8542 cpu_arch_tune = cpu_arch_isa;
8543 cpu_arch_tune_flags = cpu_arch_isa_flags;
8544 }
8545 break;
8546 }
8547 else if (*cpu_arch [j].name == '.'
8548 && strcmp (arch, cpu_arch [j].name + 1) == 0)
8549 {
8550 /* ISA entension. */
8551 i386_cpu_flags flags;
8552
8553 if (!cpu_arch[j].negated)
8554 flags = cpu_flags_or (cpu_arch_flags,
8555 cpu_arch[j].flags);
8556 else
8557 flags = cpu_flags_and_not (cpu_arch_flags,
8558 cpu_arch[j].flags);
8559 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
8560 {
8561 if (cpu_sub_arch_name)
8562 {
8563 char *name = cpu_sub_arch_name;
8564 cpu_sub_arch_name = concat (name,
8565 cpu_arch[j].name,
8566 (const char *) NULL);
8567 free (name);
8568 }
8569 else
8570 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
8571 cpu_arch_flags = flags;
8572 cpu_arch_isa_flags = flags;
8573 }
8574 break;
8575 }
8576 }
8577
8578 if (j >= ARRAY_SIZE (cpu_arch))
8579 as_fatal (_("invalid -march= option: `%s'"), arg);
8580
8581 arch = next;
8582 }
8583 while (next != NULL );
8584 break;
8585
8586 case OPTION_MTUNE:
8587 if (*arg == '.')
8588 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8589 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8590 {
8591 if (strcmp (arg, cpu_arch [j].name) == 0)
8592 {
8593 cpu_arch_tune_set = 1;
8594 cpu_arch_tune = cpu_arch [j].type;
8595 cpu_arch_tune_flags = cpu_arch[j].flags;
8596 break;
8597 }
8598 }
8599 if (j >= ARRAY_SIZE (cpu_arch))
8600 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8601 break;
8602
8603 case OPTION_MMNEMONIC:
8604 if (strcasecmp (arg, "att") == 0)
8605 intel_mnemonic = 0;
8606 else if (strcasecmp (arg, "intel") == 0)
8607 intel_mnemonic = 1;
8608 else
8609 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg);
8610 break;
8611
8612 case OPTION_MSYNTAX:
8613 if (strcasecmp (arg, "att") == 0)
8614 intel_syntax = 0;
8615 else if (strcasecmp (arg, "intel") == 0)
8616 intel_syntax = 1;
8617 else
8618 as_fatal (_("invalid -msyntax= option: `%s'"), arg);
8619 break;
8620
8621 case OPTION_MINDEX_REG:
8622 allow_index_reg = 1;
8623 break;
8624
8625 case OPTION_MNAKED_REG:
8626 allow_naked_reg = 1;
8627 break;
8628
8629 case OPTION_MOLD_GCC:
8630 old_gcc = 1;
8631 break;
8632
8633 case OPTION_MSSE2AVX:
8634 sse2avx = 1;
8635 break;
8636
8637 case OPTION_MSSE_CHECK:
8638 if (strcasecmp (arg, "error") == 0)
8639 sse_check = sse_check_error;
8640 else if (strcasecmp (arg, "warning") == 0)
8641 sse_check = sse_check_warning;
8642 else if (strcasecmp (arg, "none") == 0)
8643 sse_check = sse_check_none;
8644 else
8645 as_fatal (_("invalid -msse-check= option: `%s'"), arg);
8646 break;
8647
8648 case OPTION_MAVXSCALAR:
8649 if (strcasecmp (arg, "128") == 0)
8650 avxscalar = vex128;
8651 else if (strcasecmp (arg, "256") == 0)
8652 avxscalar = vex256;
8653 else
8654 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg);
8655 break;
8656
8657 default:
8658 return 0;
8659 }
8660 return 1;
8661 }
8662
8663 #define MESSAGE_TEMPLATE \
8664 " "
8665
8666 static void
8667 show_arch (FILE *stream, int ext, int check)
8668 {
8669 static char message[] = MESSAGE_TEMPLATE;
8670 char *start = message + 27;
8671 char *p;
8672 int size = sizeof (MESSAGE_TEMPLATE);
8673 int left;
8674 const char *name;
8675 int len;
8676 unsigned int j;
8677
8678 p = start;
8679 left = size - (start - message);
8680 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8681 {
8682 /* Should it be skipped? */
8683 if (cpu_arch [j].skip)
8684 continue;
8685
8686 name = cpu_arch [j].name;
8687 len = cpu_arch [j].len;
8688 if (*name == '.')
8689 {
8690 /* It is an extension. Skip if we aren't asked to show it. */
8691 if (ext)
8692 {
8693 name++;
8694 len--;
8695 }
8696 else
8697 continue;
8698 }
8699 else if (ext)
8700 {
8701 /* It is an processor. Skip if we show only extension. */
8702 continue;
8703 }
8704 else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
8705 {
8706 /* It is an impossible processor - skip. */
8707 continue;
8708 }
8709
8710 /* Reserve 2 spaces for ", " or ",\0" */
8711 left -= len + 2;
8712
8713 /* Check if there is any room. */
8714 if (left >= 0)
8715 {
8716 if (p != start)
8717 {
8718 *p++ = ',';
8719 *p++ = ' ';
8720 }
8721 p = mempcpy (p, name, len);
8722 }
8723 else
8724 {
8725 /* Output the current message now and start a new one. */
8726 *p++ = ',';
8727 *p = '\0';
8728 fprintf (stream, "%s\n", message);
8729 p = start;
8730 left = size - (start - message) - len - 2;
8731
8732 gas_assert (left >= 0);
8733
8734 p = mempcpy (p, name, len);
8735 }
8736 }
8737
8738 *p = '\0';
8739 fprintf (stream, "%s\n", message);
8740 }
8741
8742 void
8743 md_show_usage (FILE *stream)
8744 {
8745 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8746 fprintf (stream, _("\
8747 -Q ignored\n\
8748 -V print assembler version number\n\
8749 -k ignored\n"));
8750 #endif
8751 fprintf (stream, _("\
8752 -n Do not optimize code alignment\n\
8753 -q quieten some warnings\n"));
8754 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8755 fprintf (stream, _("\
8756 -s ignored\n"));
8757 #endif
8758 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8759 || defined (TE_PE) || defined (TE_PEP))
8760 fprintf (stream, _("\
8761 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
8762 #endif
8763 #ifdef SVR4_COMMENT_CHARS
8764 fprintf (stream, _("\
8765 --divide do not treat `/' as a comment character\n"));
8766 #else
8767 fprintf (stream, _("\
8768 --divide ignored\n"));
8769 #endif
8770 fprintf (stream, _("\
8771 -march=CPU[,+EXTENSION...]\n\
8772 generate code for CPU and EXTENSION, CPU is one of:\n"));
8773 show_arch (stream, 0, 1);
8774 fprintf (stream, _("\
8775 EXTENSION is combination of:\n"));
8776 show_arch (stream, 1, 0);
8777 fprintf (stream, _("\
8778 -mtune=CPU optimize for CPU, CPU is one of:\n"));
8779 show_arch (stream, 0, 0);
8780 fprintf (stream, _("\
8781 -msse2avx encode SSE instructions with VEX prefix\n"));
8782 fprintf (stream, _("\
8783 -msse-check=[none|error|warning]\n\
8784 check SSE instructions\n"));
8785 fprintf (stream, _("\
8786 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
8787 length\n"));
8788 fprintf (stream, _("\
8789 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
8790 fprintf (stream, _("\
8791 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
8792 fprintf (stream, _("\
8793 -mindex-reg support pseudo index registers\n"));
8794 fprintf (stream, _("\
8795 -mnaked-reg don't require `%%' prefix for registers\n"));
8796 fprintf (stream, _("\
8797 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
8798 }
8799
8800 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
8801 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8802 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8803
8804 /* Pick the target format to use. */
8805
8806 const char *
8807 i386_target_format (void)
8808 {
8809 if (!strncmp (default_arch, "x86_64", 6))
8810 {
8811 update_code_flag (CODE_64BIT, 1);
8812 if (default_arch[6] == '\0')
8813 x86_elf_abi = X86_64_ABI;
8814 else
8815 x86_elf_abi = X86_64_X32_ABI;
8816 }
8817 else if (!strcmp (default_arch, "i386"))
8818 update_code_flag (CODE_32BIT, 1);
8819 else
8820 as_fatal (_("unknown architecture"));
8821
8822 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
8823 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
8824 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
8825 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
8826
8827 switch (OUTPUT_FLAVOR)
8828 {
8829 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
8830 case bfd_target_aout_flavour:
8831 return AOUT_TARGET_FORMAT;
8832 #endif
8833 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
8834 # if defined (TE_PE) || defined (TE_PEP)
8835 case bfd_target_coff_flavour:
8836 return flag_code == CODE_64BIT ? "pe-x86-64" : "pe-i386";
8837 # elif defined (TE_GO32)
8838 case bfd_target_coff_flavour:
8839 return "coff-go32";
8840 # else
8841 case bfd_target_coff_flavour:
8842 return "coff-i386";
8843 # endif
8844 #endif
8845 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8846 case bfd_target_elf_flavour:
8847 {
8848 const char *format;
8849
8850 switch (x86_elf_abi)
8851 {
8852 default:
8853 format = ELF_TARGET_FORMAT;
8854 break;
8855 case X86_64_ABI:
8856 use_rela_relocations = 1;
8857 object_64bit = 1;
8858 format = ELF_TARGET_FORMAT64;
8859 break;
8860 case X86_64_X32_ABI:
8861 use_rela_relocations = 1;
8862 object_64bit = 1;
8863 disallow_64bit_reloc = 1;
8864 format = ELF_TARGET_FORMAT32;
8865 break;
8866 }
8867 if (cpu_arch_isa == PROCESSOR_L1OM)
8868 {
8869 if (x86_elf_abi != X86_64_ABI)
8870 as_fatal (_("Intel L1OM is 64bit only"));
8871 return ELF_TARGET_L1OM_FORMAT;
8872 }
8873 if (cpu_arch_isa == PROCESSOR_K1OM)
8874 {
8875 if (x86_elf_abi != X86_64_ABI)
8876 as_fatal (_("Intel K1OM is 64bit only"));
8877 return ELF_TARGET_K1OM_FORMAT;
8878 }
8879 else
8880 return format;
8881 }
8882 #endif
8883 #if defined (OBJ_MACH_O)
8884 case bfd_target_mach_o_flavour:
8885 if (flag_code == CODE_64BIT)
8886 {
8887 use_rela_relocations = 1;
8888 object_64bit = 1;
8889 return "mach-o-x86-64";
8890 }
8891 else
8892 return "mach-o-i386";
8893 #endif
8894 default:
8895 abort ();
8896 return NULL;
8897 }
8898 }
8899
8900 #endif /* OBJ_MAYBE_ more than one */
8901
8902 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
8903 void
8904 i386_elf_emit_arch_note (void)
8905 {
8906 if (IS_ELF && cpu_arch_name != NULL)
8907 {
8908 char *p;
8909 asection *seg = now_seg;
8910 subsegT subseg = now_subseg;
8911 Elf_Internal_Note i_note;
8912 Elf_External_Note e_note;
8913 asection *note_secp;
8914 int len;
8915
8916 /* Create the .note section. */
8917 note_secp = subseg_new (".note", 0);
8918 bfd_set_section_flags (stdoutput,
8919 note_secp,
8920 SEC_HAS_CONTENTS | SEC_READONLY);
8921
8922 /* Process the arch string. */
8923 len = strlen (cpu_arch_name);
8924
8925 i_note.namesz = len + 1;
8926 i_note.descsz = 0;
8927 i_note.type = NT_ARCH;
8928 p = frag_more (sizeof (e_note.namesz));
8929 md_number_to_chars (p, (valueT) i_note.namesz, sizeof (e_note.namesz));
8930 p = frag_more (sizeof (e_note.descsz));
8931 md_number_to_chars (p, (valueT) i_note.descsz, sizeof (e_note.descsz));
8932 p = frag_more (sizeof (e_note.type));
8933 md_number_to_chars (p, (valueT) i_note.type, sizeof (e_note.type));
8934 p = frag_more (len + 1);
8935 strcpy (p, cpu_arch_name);
8936
8937 frag_align (2, 0, 0);
8938
8939 subseg_set (seg, subseg);
8940 }
8941 }
8942 #endif
8943 \f
8944 symbolS *
8945 md_undefined_symbol (char *name)
8946 {
8947 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
8948 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
8949 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
8950 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
8951 {
8952 if (!GOT_symbol)
8953 {
8954 if (symbol_find (name))
8955 as_bad (_("GOT already in symbol table"));
8956 GOT_symbol = symbol_new (name, undefined_section,
8957 (valueT) 0, &zero_address_frag);
8958 };
8959 return GOT_symbol;
8960 }
8961 return 0;
8962 }
8963
8964 /* Round up a section size to the appropriate boundary. */
8965
8966 valueT
8967 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8968 {
8969 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8970 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
8971 {
8972 /* For a.out, force the section size to be aligned. If we don't do
8973 this, BFD will align it for us, but it will not write out the
8974 final bytes of the section. This may be a bug in BFD, but it is
8975 easier to fix it here since that is how the other a.out targets
8976 work. */
8977 int align;
8978
8979 align = bfd_get_section_alignment (stdoutput, segment);
8980 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
8981 }
8982 #endif
8983
8984 return size;
8985 }
8986
8987 /* On the i386, PC-relative offsets are relative to the start of the
8988 next instruction. That is, the address of the offset, plus its
8989 size, since the offset is always the last part of the insn. */
8990
8991 long
8992 md_pcrel_from (fixS *fixP)
8993 {
8994 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
8995 }
8996
8997 #ifndef I386COFF
8998
8999 static void
9000 s_bss (int ignore ATTRIBUTE_UNUSED)
9001 {
9002 int temp;
9003
9004 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9005 if (IS_ELF)
9006 obj_elf_section_change_hook ();
9007 #endif
9008 temp = get_absolute_expression ();
9009 subseg_set (bss_section, (subsegT) temp);
9010 demand_empty_rest_of_line ();
9011 }
9012
9013 #endif
9014
9015 void
9016 i386_validate_fix (fixS *fixp)
9017 {
9018 if (fixp->fx_subsy && fixp->fx_subsy == GOT_symbol)
9019 {
9020 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
9021 {
9022 if (!object_64bit)
9023 abort ();
9024 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
9025 }
9026 else
9027 {
9028 if (!object_64bit)
9029 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
9030 else
9031 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
9032 }
9033 fixp->fx_subsy = 0;
9034 }
9035 }
9036
9037 arelent *
9038 tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
9039 {
9040 arelent *rel;
9041 bfd_reloc_code_real_type code;
9042
9043 switch (fixp->fx_r_type)
9044 {
9045 case BFD_RELOC_X86_64_PLT32:
9046 case BFD_RELOC_X86_64_GOT32:
9047 case BFD_RELOC_X86_64_GOTPCREL:
9048 case BFD_RELOC_386_PLT32:
9049 case BFD_RELOC_386_GOT32:
9050 case BFD_RELOC_386_GOTOFF:
9051 case BFD_RELOC_386_GOTPC:
9052 case BFD_RELOC_386_TLS_GD:
9053 case BFD_RELOC_386_TLS_LDM:
9054 case BFD_RELOC_386_TLS_LDO_32:
9055 case BFD_RELOC_386_TLS_IE_32:
9056 case BFD_RELOC_386_TLS_IE:
9057 case BFD_RELOC_386_TLS_GOTIE:
9058 case BFD_RELOC_386_TLS_LE_32:
9059 case BFD_RELOC_386_TLS_LE:
9060 case BFD_RELOC_386_TLS_GOTDESC:
9061 case BFD_RELOC_386_TLS_DESC_CALL:
9062 case BFD_RELOC_X86_64_TLSGD:
9063 case BFD_RELOC_X86_64_TLSLD:
9064 case BFD_RELOC_X86_64_DTPOFF32:
9065 case BFD_RELOC_X86_64_DTPOFF64:
9066 case BFD_RELOC_X86_64_GOTTPOFF:
9067 case BFD_RELOC_X86_64_TPOFF32:
9068 case BFD_RELOC_X86_64_TPOFF64:
9069 case BFD_RELOC_X86_64_GOTOFF64:
9070 case BFD_RELOC_X86_64_GOTPC32:
9071 case BFD_RELOC_X86_64_GOT64:
9072 case BFD_RELOC_X86_64_GOTPCREL64:
9073 case BFD_RELOC_X86_64_GOTPC64:
9074 case BFD_RELOC_X86_64_GOTPLT64:
9075 case BFD_RELOC_X86_64_PLTOFF64:
9076 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9077 case BFD_RELOC_X86_64_TLSDESC_CALL:
9078 case BFD_RELOC_RVA:
9079 case BFD_RELOC_VTABLE_ENTRY:
9080 case BFD_RELOC_VTABLE_INHERIT:
9081 #ifdef TE_PE
9082 case BFD_RELOC_32_SECREL:
9083 #endif
9084 code = fixp->fx_r_type;
9085 break;
9086 case BFD_RELOC_X86_64_32S:
9087 if (!fixp->fx_pcrel)
9088 {
9089 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
9090 code = fixp->fx_r_type;
9091 break;
9092 }
9093 default:
9094 if (fixp->fx_pcrel)
9095 {
9096 switch (fixp->fx_size)
9097 {
9098 default:
9099 as_bad_where (fixp->fx_file, fixp->fx_line,
9100 _("can not do %d byte pc-relative relocation"),
9101 fixp->fx_size);
9102 code = BFD_RELOC_32_PCREL;
9103 break;
9104 case 1: code = BFD_RELOC_8_PCREL; break;
9105 case 2: code = BFD_RELOC_16_PCREL; break;
9106 case 4: code = BFD_RELOC_32_PCREL; break;
9107 #ifdef BFD64
9108 case 8: code = BFD_RELOC_64_PCREL; break;
9109 #endif
9110 }
9111 }
9112 else
9113 {
9114 switch (fixp->fx_size)
9115 {
9116 default:
9117 as_bad_where (fixp->fx_file, fixp->fx_line,
9118 _("can not do %d byte relocation"),
9119 fixp->fx_size);
9120 code = BFD_RELOC_32;
9121 break;
9122 case 1: code = BFD_RELOC_8; break;
9123 case 2: code = BFD_RELOC_16; break;
9124 case 4: code = BFD_RELOC_32; break;
9125 #ifdef BFD64
9126 case 8: code = BFD_RELOC_64; break;
9127 #endif
9128 }
9129 }
9130 break;
9131 }
9132
9133 if ((code == BFD_RELOC_32
9134 || code == BFD_RELOC_32_PCREL
9135 || code == BFD_RELOC_X86_64_32S)
9136 && GOT_symbol
9137 && fixp->fx_addsy == GOT_symbol)
9138 {
9139 if (!object_64bit)
9140 code = BFD_RELOC_386_GOTPC;
9141 else
9142 code = BFD_RELOC_X86_64_GOTPC32;
9143 }
9144 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
9145 && GOT_symbol
9146 && fixp->fx_addsy == GOT_symbol)
9147 {
9148 code = BFD_RELOC_X86_64_GOTPC64;
9149 }
9150
9151 rel = (arelent *) xmalloc (sizeof (arelent));
9152 rel->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
9153 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9154
9155 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
9156
9157 if (!use_rela_relocations)
9158 {
9159 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
9160 vtable entry to be used in the relocation's section offset. */
9161 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
9162 rel->address = fixp->fx_offset;
9163 #if defined (OBJ_COFF) && defined (TE_PE)
9164 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
9165 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
9166 else
9167 #endif
9168 rel->addend = 0;
9169 }
9170 /* Use the rela in 64bit mode. */
9171 else
9172 {
9173 if (disallow_64bit_reloc)
9174 switch (code)
9175 {
9176 case BFD_RELOC_X86_64_DTPOFF64:
9177 case BFD_RELOC_X86_64_TPOFF64:
9178 case BFD_RELOC_64_PCREL:
9179 case BFD_RELOC_X86_64_GOTOFF64:
9180 case BFD_RELOC_X86_64_GOT64:
9181 case BFD_RELOC_X86_64_GOTPCREL64:
9182 case BFD_RELOC_X86_64_GOTPC64:
9183 case BFD_RELOC_X86_64_GOTPLT64:
9184 case BFD_RELOC_X86_64_PLTOFF64:
9185 as_bad_where (fixp->fx_file, fixp->fx_line,
9186 _("cannot represent relocation type %s in x32 mode"),
9187 bfd_get_reloc_code_name (code));
9188 break;
9189 default:
9190 break;
9191 }
9192
9193 if (!fixp->fx_pcrel)
9194 rel->addend = fixp->fx_offset;
9195 else
9196 switch (code)
9197 {
9198 case BFD_RELOC_X86_64_PLT32:
9199 case BFD_RELOC_X86_64_GOT32:
9200 case BFD_RELOC_X86_64_GOTPCREL:
9201 case BFD_RELOC_X86_64_TLSGD:
9202 case BFD_RELOC_X86_64_TLSLD:
9203 case BFD_RELOC_X86_64_GOTTPOFF:
9204 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9205 case BFD_RELOC_X86_64_TLSDESC_CALL:
9206 rel->addend = fixp->fx_offset - fixp->fx_size;
9207 break;
9208 default:
9209 rel->addend = (section->vma
9210 - fixp->fx_size
9211 + fixp->fx_addnumber
9212 + md_pcrel_from (fixp));
9213 break;
9214 }
9215 }
9216
9217 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
9218 if (rel->howto == NULL)
9219 {
9220 as_bad_where (fixp->fx_file, fixp->fx_line,
9221 _("cannot represent relocation type %s"),
9222 bfd_get_reloc_code_name (code));
9223 /* Set howto to a garbage value so that we can keep going. */
9224 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
9225 gas_assert (rel->howto != NULL);
9226 }
9227
9228 return rel;
9229 }
9230
9231 #include "tc-i386-intel.c"
9232
9233 void
9234 tc_x86_parse_to_dw2regnum (expressionS *exp)
9235 {
9236 int saved_naked_reg;
9237 char saved_register_dot;
9238
9239 saved_naked_reg = allow_naked_reg;
9240 allow_naked_reg = 1;
9241 saved_register_dot = register_chars['.'];
9242 register_chars['.'] = '.';
9243 allow_pseudo_reg = 1;
9244 expression_and_evaluate (exp);
9245 allow_pseudo_reg = 0;
9246 register_chars['.'] = saved_register_dot;
9247 allow_naked_reg = saved_naked_reg;
9248
9249 if (exp->X_op == O_register && exp->X_add_number >= 0)
9250 {
9251 if ((addressT) exp->X_add_number < i386_regtab_size)
9252 {
9253 exp->X_op = O_constant;
9254 exp->X_add_number = i386_regtab[exp->X_add_number]
9255 .dw2_regnum[flag_code >> 1];
9256 }
9257 else
9258 exp->X_op = O_illegal;
9259 }
9260 }
9261
9262 void
9263 tc_x86_frame_initial_instructions (void)
9264 {
9265 static unsigned int sp_regno[2];
9266
9267 if (!sp_regno[flag_code >> 1])
9268 {
9269 char *saved_input = input_line_pointer;
9270 char sp[][4] = {"esp", "rsp"};
9271 expressionS exp;
9272
9273 input_line_pointer = sp[flag_code >> 1];
9274 tc_x86_parse_to_dw2regnum (&exp);
9275 gas_assert (exp.X_op == O_constant);
9276 sp_regno[flag_code >> 1] = exp.X_add_number;
9277 input_line_pointer = saved_input;
9278 }
9279
9280 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
9281 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
9282 }
9283
9284 int
9285 x86_dwarf2_addr_size (void)
9286 {
9287 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9288 if (x86_elf_abi == X86_64_X32_ABI)
9289 return 4;
9290 #endif
9291 return bfd_arch_bits_per_address (stdoutput) / 8;
9292 }
9293
9294 int
9295 i386_elf_section_type (const char *str, size_t len)
9296 {
9297 if (flag_code == CODE_64BIT
9298 && len == sizeof ("unwind") - 1
9299 && strncmp (str, "unwind", 6) == 0)
9300 return SHT_X86_64_UNWIND;
9301
9302 return -1;
9303 }
9304
9305 #ifdef TE_SOLARIS
9306 void
9307 i386_solaris_fix_up_eh_frame (segT sec)
9308 {
9309 if (flag_code == CODE_64BIT)
9310 elf_section_type (sec) = SHT_X86_64_UNWIND;
9311 }
9312 #endif
9313
9314 #ifdef TE_PE
9315 void
9316 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
9317 {
9318 expressionS exp;
9319
9320 exp.X_op = O_secrel;
9321 exp.X_add_symbol = symbol;
9322 exp.X_add_number = 0;
9323 emit_expr (&exp, size);
9324 }
9325 #endif
9326
9327 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9328 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
9329
9330 bfd_vma
9331 x86_64_section_letter (int letter, char **ptr_msg)
9332 {
9333 if (flag_code == CODE_64BIT)
9334 {
9335 if (letter == 'l')
9336 return SHF_X86_64_LARGE;
9337
9338 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
9339 }
9340 else
9341 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
9342 return -1;
9343 }
9344
9345 bfd_vma
9346 x86_64_section_word (char *str, size_t len)
9347 {
9348 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
9349 return SHF_X86_64_LARGE;
9350
9351 return -1;
9352 }
9353
9354 static void
9355 handle_large_common (int small ATTRIBUTE_UNUSED)
9356 {
9357 if (flag_code != CODE_64BIT)
9358 {
9359 s_comm_internal (0, elf_common_parse);
9360 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
9361 }
9362 else
9363 {
9364 static segT lbss_section;
9365 asection *saved_com_section_ptr = elf_com_section_ptr;
9366 asection *saved_bss_section = bss_section;
9367
9368 if (lbss_section == NULL)
9369 {
9370 flagword applicable;
9371 segT seg = now_seg;
9372 subsegT subseg = now_subseg;
9373
9374 /* The .lbss section is for local .largecomm symbols. */
9375 lbss_section = subseg_new (".lbss", 0);
9376 applicable = bfd_applicable_section_flags (stdoutput);
9377 bfd_set_section_flags (stdoutput, lbss_section,
9378 applicable & SEC_ALLOC);
9379 seg_info (lbss_section)->bss = 1;
9380
9381 subseg_set (seg, subseg);
9382 }
9383
9384 elf_com_section_ptr = &_bfd_elf_large_com_section;
9385 bss_section = lbss_section;
9386
9387 s_comm_internal (0, elf_common_parse);
9388
9389 elf_com_section_ptr = saved_com_section_ptr;
9390 bss_section = saved_bss_section;
9391 }
9392 }
9393 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */
This page took 0.279556 seconds and 5 git commands to generate.