bfd/
[deliverable/binutils-gdb.git] / gas / config / tc-i386.c
1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5
6 This file is part of GAS, the GNU Assembler.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GAS; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
21 02110-1301, USA. */
22
23 /* Intel 80386 machine specific gas.
24 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
25 x86_64 support by Jan Hubicka (jh@suse.cz)
26 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
27 Bugs & suggestions are completely welcome. This is free software.
28 Please help us make it better. */
29
30 #include "as.h"
31 #include "safe-ctype.h"
32 #include "subsegs.h"
33 #include "dwarf2dbg.h"
34 #include "dw2gencfi.h"
35 #include "elf/x86-64.h"
36 #include "opcodes/i386-init.h"
37
38 #ifndef REGISTER_WARNINGS
39 #define REGISTER_WARNINGS 1
40 #endif
41
42 #ifndef INFER_ADDR_PREFIX
43 #define INFER_ADDR_PREFIX 1
44 #endif
45
46 #ifndef DEFAULT_ARCH
47 #define DEFAULT_ARCH "i386"
48 #endif
49
50 #ifndef INLINE
51 #if __GNUC__ >= 2
52 #define INLINE __inline__
53 #else
54 #define INLINE
55 #endif
56 #endif
57
58 /* Prefixes will be emitted in the order defined below.
59 WAIT_PREFIX must be the first prefix since FWAIT is really is an
60 instruction, and so must come before any prefixes.
61 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
62 REP_PREFIX, LOCK_PREFIX. */
63 #define WAIT_PREFIX 0
64 #define SEG_PREFIX 1
65 #define ADDR_PREFIX 2
66 #define DATA_PREFIX 3
67 #define REP_PREFIX 4
68 #define LOCK_PREFIX 5
69 #define REX_PREFIX 6 /* must come last. */
70 #define MAX_PREFIXES 7 /* max prefixes per opcode */
71
72 /* we define the syntax here (modulo base,index,scale syntax) */
73 #define REGISTER_PREFIX '%'
74 #define IMMEDIATE_PREFIX '$'
75 #define ABSOLUTE_PREFIX '*'
76
77 /* these are the instruction mnemonic suffixes in AT&T syntax or
78 memory operand size in Intel syntax. */
79 #define WORD_MNEM_SUFFIX 'w'
80 #define BYTE_MNEM_SUFFIX 'b'
81 #define SHORT_MNEM_SUFFIX 's'
82 #define LONG_MNEM_SUFFIX 'l'
83 #define QWORD_MNEM_SUFFIX 'q'
84 #define XMMWORD_MNEM_SUFFIX 'x'
85 #define YMMWORD_MNEM_SUFFIX 'y'
86 /* Intel Syntax. Use a non-ascii letter since since it never appears
87 in instructions. */
88 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
89
90 #define END_OF_INSN '\0'
91
92 /*
93 'templates' is for grouping together 'template' structures for opcodes
94 of the same name. This is only used for storing the insns in the grand
95 ole hash table of insns.
96 The templates themselves start at START and range up to (but not including)
97 END.
98 */
99 typedef struct
100 {
101 const insn_template *start;
102 const insn_template *end;
103 }
104 templates;
105
106 /* 386 operand encoding bytes: see 386 book for details of this. */
107 typedef struct
108 {
109 unsigned int regmem; /* codes register or memory operand */
110 unsigned int reg; /* codes register operand (or extended opcode) */
111 unsigned int mode; /* how to interpret regmem & reg */
112 }
113 modrm_byte;
114
115 /* x86-64 extension prefix. */
116 typedef int rex_byte;
117
118 /* 386 opcode byte to code indirect addressing. */
119 typedef struct
120 {
121 unsigned base;
122 unsigned index;
123 unsigned scale;
124 }
125 sib_byte;
126
127 /* x86 arch names, types and features */
128 typedef struct
129 {
130 const char *name; /* arch name */
131 unsigned int len; /* arch string length */
132 enum processor_type type; /* arch type */
133 i386_cpu_flags flags; /* cpu feature flags */
134 unsigned int skip; /* show_arch should skip this. */
135 unsigned int negated; /* turn off indicated flags. */
136 }
137 arch_entry;
138
139 static void update_code_flag (int, int);
140 static void set_code_flag (int);
141 static void set_16bit_gcc_code_flag (int);
142 static void set_intel_syntax (int);
143 static void set_intel_mnemonic (int);
144 static void set_allow_index_reg (int);
145 static void set_sse_check (int);
146 static void set_cpu_arch (int);
147 #ifdef TE_PE
148 static void pe_directive_secrel (int);
149 #endif
150 static void signed_cons (int);
151 static char *output_invalid (int c);
152 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
153 const char *);
154 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
155 const char *);
156 static int i386_att_operand (char *);
157 static int i386_intel_operand (char *, int);
158 static int i386_intel_simplify (expressionS *);
159 static int i386_intel_parse_name (const char *, expressionS *);
160 static const reg_entry *parse_register (char *, char **);
161 static char *parse_insn (char *, char *);
162 static char *parse_operands (char *, const char *);
163 static void swap_operands (void);
164 static void swap_2_operands (int, int);
165 static void optimize_imm (void);
166 static void optimize_disp (void);
167 static const insn_template *match_template (void);
168 static int check_string (void);
169 static int process_suffix (void);
170 static int check_byte_reg (void);
171 static int check_long_reg (void);
172 static int check_qword_reg (void);
173 static int check_word_reg (void);
174 static int finalize_imm (void);
175 static int process_operands (void);
176 static const seg_entry *build_modrm_byte (void);
177 static void output_insn (void);
178 static void output_imm (fragS *, offsetT);
179 static void output_disp (fragS *, offsetT);
180 #ifndef I386COFF
181 static void s_bss (int);
182 #endif
183 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
184 static void handle_large_common (int small ATTRIBUTE_UNUSED);
185 static void handle_quad (int);
186 #endif
187
188 static const char *default_arch = DEFAULT_ARCH;
189
190 /* VEX prefix. */
191 typedef struct
192 {
193 /* VEX prefix is either 2 byte or 3 byte. */
194 unsigned char bytes[3];
195 unsigned int length;
196 /* Destination or source register specifier. */
197 const reg_entry *register_specifier;
198 } vex_prefix;
199
200 /* 'md_assemble ()' gathers together information and puts it into a
201 i386_insn. */
202
203 union i386_op
204 {
205 expressionS *disps;
206 expressionS *imms;
207 const reg_entry *regs;
208 };
209
210 enum i386_error
211 {
212 operand_size_mismatch,
213 operand_type_mismatch,
214 register_type_mismatch,
215 number_of_operands_mismatch,
216 invalid_instruction_suffix,
217 bad_imm4,
218 old_gcc_only,
219 unsupported_with_intel_mnemonic,
220 unsupported_syntax,
221 unsupported,
222 invalid_vsib_address,
223 unsupported_vector_index_register
224 };
225
226 struct _i386_insn
227 {
228 /* TM holds the template for the insn were currently assembling. */
229 insn_template tm;
230
231 /* SUFFIX holds the instruction size suffix for byte, word, dword
232 or qword, if given. */
233 char suffix;
234
235 /* OPERANDS gives the number of given operands. */
236 unsigned int operands;
237
238 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
239 of given register, displacement, memory operands and immediate
240 operands. */
241 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
242
243 /* TYPES [i] is the type (see above #defines) which tells us how to
244 use OP[i] for the corresponding operand. */
245 i386_operand_type types[MAX_OPERANDS];
246
247 /* Displacement expression, immediate expression, or register for each
248 operand. */
249 union i386_op op[MAX_OPERANDS];
250
251 /* Flags for operands. */
252 unsigned int flags[MAX_OPERANDS];
253 #define Operand_PCrel 1
254
255 /* Relocation type for operand */
256 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
257
258 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
259 the base index byte below. */
260 const reg_entry *base_reg;
261 const reg_entry *index_reg;
262 unsigned int log2_scale_factor;
263
264 /* SEG gives the seg_entries of this insn. They are zero unless
265 explicit segment overrides are given. */
266 const seg_entry *seg[2];
267
268 /* PREFIX holds all the given prefix opcodes (usually null).
269 PREFIXES is the number of prefix opcodes. */
270 unsigned int prefixes;
271 unsigned char prefix[MAX_PREFIXES];
272
273 /* RM and SIB are the modrm byte and the sib byte where the
274 addressing modes of this insn are encoded. */
275 modrm_byte rm;
276 rex_byte rex;
277 sib_byte sib;
278 vex_prefix vex;
279
280 /* Swap operand in encoding. */
281 unsigned int swap_operand;
282
283 /* Force 32bit displacement in encoding. */
284 unsigned int disp32_encoding;
285
286 /* Error message. */
287 enum i386_error error;
288 };
289
290 typedef struct _i386_insn i386_insn;
291
292 /* List of chars besides those in app.c:symbol_chars that can start an
293 operand. Used to prevent the scrubber eating vital white-space. */
294 const char extra_symbol_chars[] = "*%-(["
295 #ifdef LEX_AT
296 "@"
297 #endif
298 #ifdef LEX_QM
299 "?"
300 #endif
301 ;
302
303 #if (defined (TE_I386AIX) \
304 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
305 && !defined (TE_GNU) \
306 && !defined (TE_LINUX) \
307 && !defined (TE_NETWARE) \
308 && !defined (TE_FreeBSD) \
309 && !defined (TE_DragonFly) \
310 && !defined (TE_NetBSD)))
311 /* This array holds the chars that always start a comment. If the
312 pre-processor is disabled, these aren't very useful. The option
313 --divide will remove '/' from this list. */
314 const char *i386_comment_chars = "#/";
315 #define SVR4_COMMENT_CHARS 1
316 #define PREFIX_SEPARATOR '\\'
317
318 #else
319 const char *i386_comment_chars = "#";
320 #define PREFIX_SEPARATOR '/'
321 #endif
322
323 /* This array holds the chars that only start a comment at the beginning of
324 a line. If the line seems to have the form '# 123 filename'
325 .line and .file directives will appear in the pre-processed output.
326 Note that input_file.c hand checks for '#' at the beginning of the
327 first line of the input file. This is because the compiler outputs
328 #NO_APP at the beginning of its output.
329 Also note that comments started like this one will always work if
330 '/' isn't otherwise defined. */
331 const char line_comment_chars[] = "#/";
332
333 const char line_separator_chars[] = ";";
334
335 /* Chars that can be used to separate mant from exp in floating point
336 nums. */
337 const char EXP_CHARS[] = "eE";
338
339 /* Chars that mean this number is a floating point constant
340 As in 0f12.456
341 or 0d1.2345e12. */
342 const char FLT_CHARS[] = "fFdDxX";
343
344 /* Tables for lexical analysis. */
345 static char mnemonic_chars[256];
346 static char register_chars[256];
347 static char operand_chars[256];
348 static char identifier_chars[256];
349 static char digit_chars[256];
350
351 /* Lexical macros. */
352 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
353 #define is_operand_char(x) (operand_chars[(unsigned char) x])
354 #define is_register_char(x) (register_chars[(unsigned char) x])
355 #define is_space_char(x) ((x) == ' ')
356 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
357 #define is_digit_char(x) (digit_chars[(unsigned char) x])
358
359 /* All non-digit non-letter characters that may occur in an operand. */
360 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
361
362 /* md_assemble() always leaves the strings it's passed unaltered. To
363 effect this we maintain a stack of saved characters that we've smashed
364 with '\0's (indicating end of strings for various sub-fields of the
365 assembler instruction). */
366 static char save_stack[32];
367 static char *save_stack_p;
368 #define END_STRING_AND_SAVE(s) \
369 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
370 #define RESTORE_END_STRING(s) \
371 do { *(s) = *--save_stack_p; } while (0)
372
373 /* The instruction we're assembling. */
374 static i386_insn i;
375
376 /* Possible templates for current insn. */
377 static const templates *current_templates;
378
379 /* Per instruction expressionS buffers: max displacements & immediates. */
380 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
381 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
382
383 /* Current operand we are working on. */
384 static int this_operand = -1;
385
386 /* We support four different modes. FLAG_CODE variable is used to distinguish
387 these. */
388
389 enum flag_code {
390 CODE_32BIT,
391 CODE_16BIT,
392 CODE_64BIT };
393
394 static enum flag_code flag_code;
395 static unsigned int object_64bit;
396 static unsigned int disallow_64bit_reloc;
397 static int use_rela_relocations = 0;
398
399 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
400 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
401 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
402
403 /* The ELF ABI to use. */
404 enum x86_elf_abi
405 {
406 I386_ABI,
407 X86_64_ABI,
408 X86_64_X32_ABI
409 };
410
411 static enum x86_elf_abi x86_elf_abi = I386_ABI;
412 #endif
413
414 /* The names used to print error messages. */
415 static const char *flag_code_names[] =
416 {
417 "32",
418 "16",
419 "64"
420 };
421
422 /* 1 for intel syntax,
423 0 if att syntax. */
424 static int intel_syntax = 0;
425
426 /* 1 for intel mnemonic,
427 0 if att mnemonic. */
428 static int intel_mnemonic = !SYSV386_COMPAT;
429
430 /* 1 if support old (<= 2.8.1) versions of gcc. */
431 static int old_gcc = OLDGCC_COMPAT;
432
433 /* 1 if pseudo registers are permitted. */
434 static int allow_pseudo_reg = 0;
435
436 /* 1 if register prefix % not required. */
437 static int allow_naked_reg = 0;
438
439 /* 1 if pseudo index register, eiz/riz, is allowed . */
440 static int allow_index_reg = 0;
441
442 static enum
443 {
444 sse_check_none = 0,
445 sse_check_warning,
446 sse_check_error
447 }
448 sse_check;
449
450 /* Register prefix used for error message. */
451 static const char *register_prefix = "%";
452
453 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
454 leave, push, and pop instructions so that gcc has the same stack
455 frame as in 32 bit mode. */
456 static char stackop_size = '\0';
457
458 /* Non-zero to optimize code alignment. */
459 int optimize_align_code = 1;
460
461 /* Non-zero to quieten some warnings. */
462 static int quiet_warnings = 0;
463
464 /* CPU name. */
465 static const char *cpu_arch_name = NULL;
466 static char *cpu_sub_arch_name = NULL;
467
468 /* CPU feature flags. */
469 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
470
471 /* If we have selected a cpu we are generating instructions for. */
472 static int cpu_arch_tune_set = 0;
473
474 /* Cpu we are generating instructions for. */
475 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
476
477 /* CPU feature flags of cpu we are generating instructions for. */
478 static i386_cpu_flags cpu_arch_tune_flags;
479
480 /* CPU instruction set architecture used. */
481 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
482
483 /* CPU feature flags of instruction set architecture used. */
484 i386_cpu_flags cpu_arch_isa_flags;
485
486 /* If set, conditional jumps are not automatically promoted to handle
487 larger than a byte offset. */
488 static unsigned int no_cond_jump_promotion = 0;
489
490 /* Encode SSE instructions with VEX prefix. */
491 static unsigned int sse2avx;
492
493 /* Encode scalar AVX instructions with specific vector length. */
494 static enum
495 {
496 vex128 = 0,
497 vex256
498 } avxscalar;
499
500 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
501 static symbolS *GOT_symbol;
502
503 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
504 unsigned int x86_dwarf2_return_column;
505
506 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
507 int x86_cie_data_alignment;
508
509 /* Interface to relax_segment.
510 There are 3 major relax states for 386 jump insns because the
511 different types of jumps add different sizes to frags when we're
512 figuring out what sort of jump to choose to reach a given label. */
513
514 /* Types. */
515 #define UNCOND_JUMP 0
516 #define COND_JUMP 1
517 #define COND_JUMP86 2
518
519 /* Sizes. */
520 #define CODE16 1
521 #define SMALL 0
522 #define SMALL16 (SMALL | CODE16)
523 #define BIG 2
524 #define BIG16 (BIG | CODE16)
525
526 #ifndef INLINE
527 #ifdef __GNUC__
528 #define INLINE __inline__
529 #else
530 #define INLINE
531 #endif
532 #endif
533
534 #define ENCODE_RELAX_STATE(type, size) \
535 ((relax_substateT) (((type) << 2) | (size)))
536 #define TYPE_FROM_RELAX_STATE(s) \
537 ((s) >> 2)
538 #define DISP_SIZE_FROM_RELAX_STATE(s) \
539 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
540
541 /* This table is used by relax_frag to promote short jumps to long
542 ones where necessary. SMALL (short) jumps may be promoted to BIG
543 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
544 don't allow a short jump in a 32 bit code segment to be promoted to
545 a 16 bit offset jump because it's slower (requires data size
546 prefix), and doesn't work, unless the destination is in the bottom
547 64k of the code segment (The top 16 bits of eip are zeroed). */
548
549 const relax_typeS md_relax_table[] =
550 {
551 /* The fields are:
552 1) most positive reach of this state,
553 2) most negative reach of this state,
554 3) how many bytes this mode will have in the variable part of the frag
555 4) which index into the table to try if we can't fit into this one. */
556
557 /* UNCOND_JUMP states. */
558 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
559 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
560 /* dword jmp adds 4 bytes to frag:
561 0 extra opcode bytes, 4 displacement bytes. */
562 {0, 0, 4, 0},
563 /* word jmp adds 2 byte2 to frag:
564 0 extra opcode bytes, 2 displacement bytes. */
565 {0, 0, 2, 0},
566
567 /* COND_JUMP states. */
568 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
569 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
570 /* dword conditionals adds 5 bytes to frag:
571 1 extra opcode byte, 4 displacement bytes. */
572 {0, 0, 5, 0},
573 /* word conditionals add 3 bytes to frag:
574 1 extra opcode byte, 2 displacement bytes. */
575 {0, 0, 3, 0},
576
577 /* COND_JUMP86 states. */
578 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
579 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
580 /* dword conditionals adds 5 bytes to frag:
581 1 extra opcode byte, 4 displacement bytes. */
582 {0, 0, 5, 0},
583 /* word conditionals add 4 bytes to frag:
584 1 displacement byte and a 3 byte long branch insn. */
585 {0, 0, 4, 0}
586 };
587
588 static const arch_entry cpu_arch[] =
589 {
590 /* Do not replace the first two entries - i386_target_format()
591 relies on them being there in this order. */
592 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
593 CPU_GENERIC32_FLAGS, 0, 0 },
594 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
595 CPU_GENERIC64_FLAGS, 0, 0 },
596 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
597 CPU_NONE_FLAGS, 0, 0 },
598 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
599 CPU_I186_FLAGS, 0, 0 },
600 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
601 CPU_I286_FLAGS, 0, 0 },
602 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
603 CPU_I386_FLAGS, 0, 0 },
604 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
605 CPU_I486_FLAGS, 0, 0 },
606 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
607 CPU_I586_FLAGS, 0, 0 },
608 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
609 CPU_I686_FLAGS, 0, 0 },
610 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
611 CPU_I586_FLAGS, 0, 0 },
612 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
613 CPU_PENTIUMPRO_FLAGS, 0, 0 },
614 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
615 CPU_P2_FLAGS, 0, 0 },
616 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
617 CPU_P3_FLAGS, 0, 0 },
618 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
619 CPU_P4_FLAGS, 0, 0 },
620 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
621 CPU_CORE_FLAGS, 0, 0 },
622 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
623 CPU_NOCONA_FLAGS, 0, 0 },
624 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
625 CPU_CORE_FLAGS, 1, 0 },
626 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
627 CPU_CORE_FLAGS, 0, 0 },
628 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
629 CPU_CORE2_FLAGS, 1, 0 },
630 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
631 CPU_CORE2_FLAGS, 0, 0 },
632 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
633 CPU_COREI7_FLAGS, 0, 0 },
634 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
635 CPU_L1OM_FLAGS, 0, 0 },
636 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM,
637 CPU_K1OM_FLAGS, 0, 0 },
638 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
639 CPU_K6_FLAGS, 0, 0 },
640 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
641 CPU_K6_2_FLAGS, 0, 0 },
642 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
643 CPU_ATHLON_FLAGS, 0, 0 },
644 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
645 CPU_K8_FLAGS, 1, 0 },
646 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
647 CPU_K8_FLAGS, 0, 0 },
648 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
649 CPU_K8_FLAGS, 0, 0 },
650 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
651 CPU_AMDFAM10_FLAGS, 0, 0 },
652 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD,
653 CPU_BDVER1_FLAGS, 0, 0 },
654 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD,
655 CPU_BDVER2_FLAGS, 0, 0 },
656 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
657 CPU_8087_FLAGS, 0, 0 },
658 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
659 CPU_287_FLAGS, 0, 0 },
660 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
661 CPU_387_FLAGS, 0, 0 },
662 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN,
663 CPU_ANY87_FLAGS, 0, 1 },
664 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
665 CPU_MMX_FLAGS, 0, 0 },
666 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN,
667 CPU_3DNOWA_FLAGS, 0, 1 },
668 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
669 CPU_SSE_FLAGS, 0, 0 },
670 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
671 CPU_SSE2_FLAGS, 0, 0 },
672 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
673 CPU_SSE3_FLAGS, 0, 0 },
674 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
675 CPU_SSSE3_FLAGS, 0, 0 },
676 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
677 CPU_SSE4_1_FLAGS, 0, 0 },
678 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
679 CPU_SSE4_2_FLAGS, 0, 0 },
680 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
681 CPU_SSE4_2_FLAGS, 0, 0 },
682 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN,
683 CPU_ANY_SSE_FLAGS, 0, 1 },
684 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
685 CPU_AVX_FLAGS, 0, 0 },
686 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN,
687 CPU_AVX2_FLAGS, 0, 0 },
688 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
689 CPU_ANY_AVX_FLAGS, 0, 1 },
690 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
691 CPU_VMX_FLAGS, 0, 0 },
692 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
693 CPU_SMX_FLAGS, 0, 0 },
694 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
695 CPU_XSAVE_FLAGS, 0, 0 },
696 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
697 CPU_XSAVEOPT_FLAGS, 0, 0 },
698 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
699 CPU_AES_FLAGS, 0, 0 },
700 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
701 CPU_PCLMUL_FLAGS, 0, 0 },
702 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
703 CPU_PCLMUL_FLAGS, 1, 0 },
704 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
705 CPU_FSGSBASE_FLAGS, 0, 0 },
706 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
707 CPU_RDRND_FLAGS, 0, 0 },
708 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
709 CPU_F16C_FLAGS, 0, 0 },
710 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN,
711 CPU_BMI2_FLAGS, 0, 0 },
712 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
713 CPU_FMA_FLAGS, 0, 0 },
714 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
715 CPU_FMA4_FLAGS, 0, 0 },
716 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
717 CPU_XOP_FLAGS, 0, 0 },
718 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
719 CPU_LWP_FLAGS, 0, 0 },
720 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
721 CPU_MOVBE_FLAGS, 0, 0 },
722 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
723 CPU_EPT_FLAGS, 0, 0 },
724 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN,
725 CPU_LZCNT_FLAGS, 0, 0 },
726 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN,
727 CPU_INVPCID_FLAGS, 0, 0 },
728 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
729 CPU_CLFLUSH_FLAGS, 0, 0 },
730 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
731 CPU_NOP_FLAGS, 0, 0 },
732 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
733 CPU_SYSCALL_FLAGS, 0, 0 },
734 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
735 CPU_RDTSCP_FLAGS, 0, 0 },
736 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
737 CPU_3DNOW_FLAGS, 0, 0 },
738 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
739 CPU_3DNOWA_FLAGS, 0, 0 },
740 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
741 CPU_PADLOCK_FLAGS, 0, 0 },
742 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
743 CPU_SVME_FLAGS, 1, 0 },
744 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
745 CPU_SVME_FLAGS, 0, 0 },
746 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
747 CPU_SSE4A_FLAGS, 0, 0 },
748 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
749 CPU_ABM_FLAGS, 0, 0 },
750 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
751 CPU_BMI_FLAGS, 0, 0 },
752 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
753 CPU_TBM_FLAGS, 0, 0 },
754 };
755
756 #ifdef I386COFF
757 /* Like s_lcomm_internal in gas/read.c but the alignment string
758 is allowed to be optional. */
759
760 static symbolS *
761 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
762 {
763 addressT align = 0;
764
765 SKIP_WHITESPACE ();
766
767 if (needs_align
768 && *input_line_pointer == ',')
769 {
770 align = parse_align (needs_align - 1);
771
772 if (align == (addressT) -1)
773 return NULL;
774 }
775 else
776 {
777 if (size >= 8)
778 align = 3;
779 else if (size >= 4)
780 align = 2;
781 else if (size >= 2)
782 align = 1;
783 else
784 align = 0;
785 }
786
787 bss_alloc (symbolP, size, align);
788 return symbolP;
789 }
790
791 static void
792 pe_lcomm (int needs_align)
793 {
794 s_comm_internal (needs_align * 2, pe_lcomm_internal);
795 }
796 #endif
797
798 const pseudo_typeS md_pseudo_table[] =
799 {
800 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
801 {"align", s_align_bytes, 0},
802 #else
803 {"align", s_align_ptwo, 0},
804 #endif
805 {"arch", set_cpu_arch, 0},
806 #ifndef I386COFF
807 {"bss", s_bss, 0},
808 #else
809 {"lcomm", pe_lcomm, 1},
810 #endif
811 {"ffloat", float_cons, 'f'},
812 {"dfloat", float_cons, 'd'},
813 {"tfloat", float_cons, 'x'},
814 {"value", cons, 2},
815 {"slong", signed_cons, 4},
816 {"noopt", s_ignore, 0},
817 {"optim", s_ignore, 0},
818 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
819 {"code16", set_code_flag, CODE_16BIT},
820 {"code32", set_code_flag, CODE_32BIT},
821 {"code64", set_code_flag, CODE_64BIT},
822 {"intel_syntax", set_intel_syntax, 1},
823 {"att_syntax", set_intel_syntax, 0},
824 {"intel_mnemonic", set_intel_mnemonic, 1},
825 {"att_mnemonic", set_intel_mnemonic, 0},
826 {"allow_index_reg", set_allow_index_reg, 1},
827 {"disallow_index_reg", set_allow_index_reg, 0},
828 {"sse_check", set_sse_check, 0},
829 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
830 {"largecomm", handle_large_common, 0},
831 {"quad", handle_quad, 8},
832 #else
833 {"file", (void (*) (int)) dwarf2_directive_file, 0},
834 {"loc", dwarf2_directive_loc, 0},
835 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
836 #endif
837 #ifdef TE_PE
838 {"secrel32", pe_directive_secrel, 0},
839 #endif
840 {0, 0, 0}
841 };
842
843 /* For interface with expression (). */
844 extern char *input_line_pointer;
845
846 /* Hash table for instruction mnemonic lookup. */
847 static struct hash_control *op_hash;
848
849 /* Hash table for register lookup. */
850 static struct hash_control *reg_hash;
851 \f
852 void
853 i386_align_code (fragS *fragP, int count)
854 {
855 /* Various efficient no-op patterns for aligning code labels.
856 Note: Don't try to assemble the instructions in the comments.
857 0L and 0w are not legal. */
858 static const char f32_1[] =
859 {0x90}; /* nop */
860 static const char f32_2[] =
861 {0x66,0x90}; /* xchg %ax,%ax */
862 static const char f32_3[] =
863 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
864 static const char f32_4[] =
865 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
866 static const char f32_5[] =
867 {0x90, /* nop */
868 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
869 static const char f32_6[] =
870 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
871 static const char f32_7[] =
872 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
873 static const char f32_8[] =
874 {0x90, /* nop */
875 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
876 static const char f32_9[] =
877 {0x89,0xf6, /* movl %esi,%esi */
878 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
879 static const char f32_10[] =
880 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
881 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
882 static const char f32_11[] =
883 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
884 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
885 static const char f32_12[] =
886 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
887 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
888 static const char f32_13[] =
889 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
890 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
891 static const char f32_14[] =
892 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
893 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
894 static const char f16_3[] =
895 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
896 static const char f16_4[] =
897 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
898 static const char f16_5[] =
899 {0x90, /* nop */
900 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
901 static const char f16_6[] =
902 {0x89,0xf6, /* mov %si,%si */
903 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
904 static const char f16_7[] =
905 {0x8d,0x74,0x00, /* lea 0(%si),%si */
906 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
907 static const char f16_8[] =
908 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
909 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
910 static const char jump_31[] =
911 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
912 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
913 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
914 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
915 static const char *const f32_patt[] = {
916 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
917 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
918 };
919 static const char *const f16_patt[] = {
920 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
921 };
922 /* nopl (%[re]ax) */
923 static const char alt_3[] =
924 {0x0f,0x1f,0x00};
925 /* nopl 0(%[re]ax) */
926 static const char alt_4[] =
927 {0x0f,0x1f,0x40,0x00};
928 /* nopl 0(%[re]ax,%[re]ax,1) */
929 static const char alt_5[] =
930 {0x0f,0x1f,0x44,0x00,0x00};
931 /* nopw 0(%[re]ax,%[re]ax,1) */
932 static const char alt_6[] =
933 {0x66,0x0f,0x1f,0x44,0x00,0x00};
934 /* nopl 0L(%[re]ax) */
935 static const char alt_7[] =
936 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
937 /* nopl 0L(%[re]ax,%[re]ax,1) */
938 static const char alt_8[] =
939 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
940 /* nopw 0L(%[re]ax,%[re]ax,1) */
941 static const char alt_9[] =
942 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
943 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
944 static const char alt_10[] =
945 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
946 /* data16
947 nopw %cs:0L(%[re]ax,%[re]ax,1) */
948 static const char alt_long_11[] =
949 {0x66,
950 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
951 /* data16
952 data16
953 nopw %cs:0L(%[re]ax,%[re]ax,1) */
954 static const char alt_long_12[] =
955 {0x66,
956 0x66,
957 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
958 /* data16
959 data16
960 data16
961 nopw %cs:0L(%[re]ax,%[re]ax,1) */
962 static const char alt_long_13[] =
963 {0x66,
964 0x66,
965 0x66,
966 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
967 /* data16
968 data16
969 data16
970 data16
971 nopw %cs:0L(%[re]ax,%[re]ax,1) */
972 static const char alt_long_14[] =
973 {0x66,
974 0x66,
975 0x66,
976 0x66,
977 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
978 /* data16
979 data16
980 data16
981 data16
982 data16
983 nopw %cs:0L(%[re]ax,%[re]ax,1) */
984 static const char alt_long_15[] =
985 {0x66,
986 0x66,
987 0x66,
988 0x66,
989 0x66,
990 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
991 /* nopl 0(%[re]ax,%[re]ax,1)
992 nopw 0(%[re]ax,%[re]ax,1) */
993 static const char alt_short_11[] =
994 {0x0f,0x1f,0x44,0x00,0x00,
995 0x66,0x0f,0x1f,0x44,0x00,0x00};
996 /* nopw 0(%[re]ax,%[re]ax,1)
997 nopw 0(%[re]ax,%[re]ax,1) */
998 static const char alt_short_12[] =
999 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1000 0x66,0x0f,0x1f,0x44,0x00,0x00};
1001 /* nopw 0(%[re]ax,%[re]ax,1)
1002 nopl 0L(%[re]ax) */
1003 static const char alt_short_13[] =
1004 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1005 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1006 /* nopl 0L(%[re]ax)
1007 nopl 0L(%[re]ax) */
1008 static const char alt_short_14[] =
1009 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1010 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1011 /* nopl 0L(%[re]ax)
1012 nopl 0L(%[re]ax,%[re]ax,1) */
1013 static const char alt_short_15[] =
1014 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1015 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1016 static const char *const alt_short_patt[] = {
1017 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1018 alt_9, alt_10, alt_short_11, alt_short_12, alt_short_13,
1019 alt_short_14, alt_short_15
1020 };
1021 static const char *const alt_long_patt[] = {
1022 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1023 alt_9, alt_10, alt_long_11, alt_long_12, alt_long_13,
1024 alt_long_14, alt_long_15
1025 };
1026
1027 /* Only align for at least a positive non-zero boundary. */
1028 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
1029 return;
1030
1031 /* We need to decide which NOP sequence to use for 32bit and
1032 64bit. When -mtune= is used:
1033
1034 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1035 PROCESSOR_GENERIC32, f32_patt will be used.
1036 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA,
1037 PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
1038 PROCESSOR_GENERIC64, alt_long_patt will be used.
1039 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
1040 PROCESSOR_AMDFAM10, and PROCESSOR_BD, alt_short_patt
1041 will be used.
1042
1043 When -mtune= isn't used, alt_long_patt will be used if
1044 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1045 be used.
1046
1047 When -march= or .arch is used, we can't use anything beyond
1048 cpu_arch_isa_flags. */
1049
1050 if (flag_code == CODE_16BIT)
1051 {
1052 if (count > 8)
1053 {
1054 memcpy (fragP->fr_literal + fragP->fr_fix,
1055 jump_31, count);
1056 /* Adjust jump offset. */
1057 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1058 }
1059 else
1060 memcpy (fragP->fr_literal + fragP->fr_fix,
1061 f16_patt[count - 1], count);
1062 }
1063 else
1064 {
1065 const char *const *patt = NULL;
1066
1067 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1068 {
1069 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1070 switch (cpu_arch_tune)
1071 {
1072 case PROCESSOR_UNKNOWN:
1073 /* We use cpu_arch_isa_flags to check if we SHOULD
1074 optimize with nops. */
1075 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1076 patt = alt_long_patt;
1077 else
1078 patt = f32_patt;
1079 break;
1080 case PROCESSOR_PENTIUM4:
1081 case PROCESSOR_NOCONA:
1082 case PROCESSOR_CORE:
1083 case PROCESSOR_CORE2:
1084 case PROCESSOR_COREI7:
1085 case PROCESSOR_L1OM:
1086 case PROCESSOR_K1OM:
1087 case PROCESSOR_GENERIC64:
1088 patt = alt_long_patt;
1089 break;
1090 case PROCESSOR_K6:
1091 case PROCESSOR_ATHLON:
1092 case PROCESSOR_K8:
1093 case PROCESSOR_AMDFAM10:
1094 case PROCESSOR_BD:
1095 patt = alt_short_patt;
1096 break;
1097 case PROCESSOR_I386:
1098 case PROCESSOR_I486:
1099 case PROCESSOR_PENTIUM:
1100 case PROCESSOR_PENTIUMPRO:
1101 case PROCESSOR_GENERIC32:
1102 patt = f32_patt;
1103 break;
1104 }
1105 }
1106 else
1107 {
1108 switch (fragP->tc_frag_data.tune)
1109 {
1110 case PROCESSOR_UNKNOWN:
1111 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1112 PROCESSOR_UNKNOWN. */
1113 abort ();
1114 break;
1115
1116 case PROCESSOR_I386:
1117 case PROCESSOR_I486:
1118 case PROCESSOR_PENTIUM:
1119 case PROCESSOR_K6:
1120 case PROCESSOR_ATHLON:
1121 case PROCESSOR_K8:
1122 case PROCESSOR_AMDFAM10:
1123 case PROCESSOR_BD:
1124 case PROCESSOR_GENERIC32:
1125 /* We use cpu_arch_isa_flags to check if we CAN optimize
1126 with nops. */
1127 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1128 patt = alt_short_patt;
1129 else
1130 patt = f32_patt;
1131 break;
1132 case PROCESSOR_PENTIUMPRO:
1133 case PROCESSOR_PENTIUM4:
1134 case PROCESSOR_NOCONA:
1135 case PROCESSOR_CORE:
1136 case PROCESSOR_CORE2:
1137 case PROCESSOR_COREI7:
1138 case PROCESSOR_L1OM:
1139 case PROCESSOR_K1OM:
1140 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1141 patt = alt_long_patt;
1142 else
1143 patt = f32_patt;
1144 break;
1145 case PROCESSOR_GENERIC64:
1146 patt = alt_long_patt;
1147 break;
1148 }
1149 }
1150
1151 if (patt == f32_patt)
1152 {
1153 /* If the padding is less than 15 bytes, we use the normal
1154 ones. Otherwise, we use a jump instruction and adjust
1155 its offset. */
1156 int limit;
1157
1158 /* For 64bit, the limit is 3 bytes. */
1159 if (flag_code == CODE_64BIT
1160 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1161 limit = 3;
1162 else
1163 limit = 15;
1164 if (count < limit)
1165 memcpy (fragP->fr_literal + fragP->fr_fix,
1166 patt[count - 1], count);
1167 else
1168 {
1169 memcpy (fragP->fr_literal + fragP->fr_fix,
1170 jump_31, count);
1171 /* Adjust jump offset. */
1172 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1173 }
1174 }
1175 else
1176 {
1177 /* Maximum length of an instruction is 15 byte. If the
1178 padding is greater than 15 bytes and we don't use jump,
1179 we have to break it into smaller pieces. */
1180 int padding = count;
1181 while (padding > 15)
1182 {
1183 padding -= 15;
1184 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1185 patt [14], 15);
1186 }
1187
1188 if (padding)
1189 memcpy (fragP->fr_literal + fragP->fr_fix,
1190 patt [padding - 1], padding);
1191 }
1192 }
1193 fragP->fr_var = count;
1194 }
1195
1196 static INLINE int
1197 operand_type_all_zero (const union i386_operand_type *x)
1198 {
1199 switch (ARRAY_SIZE(x->array))
1200 {
1201 case 3:
1202 if (x->array[2])
1203 return 0;
1204 case 2:
1205 if (x->array[1])
1206 return 0;
1207 case 1:
1208 return !x->array[0];
1209 default:
1210 abort ();
1211 }
1212 }
1213
1214 static INLINE void
1215 operand_type_set (union i386_operand_type *x, unsigned int v)
1216 {
1217 switch (ARRAY_SIZE(x->array))
1218 {
1219 case 3:
1220 x->array[2] = v;
1221 case 2:
1222 x->array[1] = v;
1223 case 1:
1224 x->array[0] = v;
1225 break;
1226 default:
1227 abort ();
1228 }
1229 }
1230
1231 static INLINE int
1232 operand_type_equal (const union i386_operand_type *x,
1233 const union i386_operand_type *y)
1234 {
1235 switch (ARRAY_SIZE(x->array))
1236 {
1237 case 3:
1238 if (x->array[2] != y->array[2])
1239 return 0;
1240 case 2:
1241 if (x->array[1] != y->array[1])
1242 return 0;
1243 case 1:
1244 return x->array[0] == y->array[0];
1245 break;
1246 default:
1247 abort ();
1248 }
1249 }
1250
1251 static INLINE int
1252 cpu_flags_all_zero (const union i386_cpu_flags *x)
1253 {
1254 switch (ARRAY_SIZE(x->array))
1255 {
1256 case 3:
1257 if (x->array[2])
1258 return 0;
1259 case 2:
1260 if (x->array[1])
1261 return 0;
1262 case 1:
1263 return !x->array[0];
1264 default:
1265 abort ();
1266 }
1267 }
1268
1269 static INLINE void
1270 cpu_flags_set (union i386_cpu_flags *x, unsigned int v)
1271 {
1272 switch (ARRAY_SIZE(x->array))
1273 {
1274 case 3:
1275 x->array[2] = v;
1276 case 2:
1277 x->array[1] = v;
1278 case 1:
1279 x->array[0] = v;
1280 break;
1281 default:
1282 abort ();
1283 }
1284 }
1285
1286 static INLINE int
1287 cpu_flags_equal (const union i386_cpu_flags *x,
1288 const union i386_cpu_flags *y)
1289 {
1290 switch (ARRAY_SIZE(x->array))
1291 {
1292 case 3:
1293 if (x->array[2] != y->array[2])
1294 return 0;
1295 case 2:
1296 if (x->array[1] != y->array[1])
1297 return 0;
1298 case 1:
1299 return x->array[0] == y->array[0];
1300 break;
1301 default:
1302 abort ();
1303 }
1304 }
1305
1306 static INLINE int
1307 cpu_flags_check_cpu64 (i386_cpu_flags f)
1308 {
1309 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1310 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1311 }
1312
1313 static INLINE i386_cpu_flags
1314 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1315 {
1316 switch (ARRAY_SIZE (x.array))
1317 {
1318 case 3:
1319 x.array [2] &= y.array [2];
1320 case 2:
1321 x.array [1] &= y.array [1];
1322 case 1:
1323 x.array [0] &= y.array [0];
1324 break;
1325 default:
1326 abort ();
1327 }
1328 return x;
1329 }
1330
1331 static INLINE i386_cpu_flags
1332 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1333 {
1334 switch (ARRAY_SIZE (x.array))
1335 {
1336 case 3:
1337 x.array [2] |= y.array [2];
1338 case 2:
1339 x.array [1] |= y.array [1];
1340 case 1:
1341 x.array [0] |= y.array [0];
1342 break;
1343 default:
1344 abort ();
1345 }
1346 return x;
1347 }
1348
1349 static INLINE i386_cpu_flags
1350 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1351 {
1352 switch (ARRAY_SIZE (x.array))
1353 {
1354 case 3:
1355 x.array [2] &= ~y.array [2];
1356 case 2:
1357 x.array [1] &= ~y.array [1];
1358 case 1:
1359 x.array [0] &= ~y.array [0];
1360 break;
1361 default:
1362 abort ();
1363 }
1364 return x;
1365 }
1366
1367 #define CPU_FLAGS_ARCH_MATCH 0x1
1368 #define CPU_FLAGS_64BIT_MATCH 0x2
1369 #define CPU_FLAGS_AES_MATCH 0x4
1370 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1371 #define CPU_FLAGS_AVX_MATCH 0x10
1372
1373 #define CPU_FLAGS_32BIT_MATCH \
1374 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1375 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1376 #define CPU_FLAGS_PERFECT_MATCH \
1377 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1378
1379 /* Return CPU flags match bits. */
1380
1381 static int
1382 cpu_flags_match (const insn_template *t)
1383 {
1384 i386_cpu_flags x = t->cpu_flags;
1385 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1386
1387 x.bitfield.cpu64 = 0;
1388 x.bitfield.cpuno64 = 0;
1389
1390 if (cpu_flags_all_zero (&x))
1391 {
1392 /* This instruction is available on all archs. */
1393 match |= CPU_FLAGS_32BIT_MATCH;
1394 }
1395 else
1396 {
1397 /* This instruction is available only on some archs. */
1398 i386_cpu_flags cpu = cpu_arch_flags;
1399
1400 cpu.bitfield.cpu64 = 0;
1401 cpu.bitfield.cpuno64 = 0;
1402 cpu = cpu_flags_and (x, cpu);
1403 if (!cpu_flags_all_zero (&cpu))
1404 {
1405 if (x.bitfield.cpuavx)
1406 {
1407 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1408 if (cpu.bitfield.cpuavx)
1409 {
1410 /* Check SSE2AVX. */
1411 if (!t->opcode_modifier.sse2avx|| sse2avx)
1412 {
1413 match |= (CPU_FLAGS_ARCH_MATCH
1414 | CPU_FLAGS_AVX_MATCH);
1415 /* Check AES. */
1416 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1417 match |= CPU_FLAGS_AES_MATCH;
1418 /* Check PCLMUL. */
1419 if (!x.bitfield.cpupclmul
1420 || cpu.bitfield.cpupclmul)
1421 match |= CPU_FLAGS_PCLMUL_MATCH;
1422 }
1423 }
1424 else
1425 match |= CPU_FLAGS_ARCH_MATCH;
1426 }
1427 else
1428 match |= CPU_FLAGS_32BIT_MATCH;
1429 }
1430 }
1431 return match;
1432 }
1433
1434 static INLINE i386_operand_type
1435 operand_type_and (i386_operand_type x, i386_operand_type y)
1436 {
1437 switch (ARRAY_SIZE (x.array))
1438 {
1439 case 3:
1440 x.array [2] &= y.array [2];
1441 case 2:
1442 x.array [1] &= y.array [1];
1443 case 1:
1444 x.array [0] &= y.array [0];
1445 break;
1446 default:
1447 abort ();
1448 }
1449 return x;
1450 }
1451
1452 static INLINE i386_operand_type
1453 operand_type_or (i386_operand_type x, i386_operand_type y)
1454 {
1455 switch (ARRAY_SIZE (x.array))
1456 {
1457 case 3:
1458 x.array [2] |= y.array [2];
1459 case 2:
1460 x.array [1] |= y.array [1];
1461 case 1:
1462 x.array [0] |= y.array [0];
1463 break;
1464 default:
1465 abort ();
1466 }
1467 return x;
1468 }
1469
1470 static INLINE i386_operand_type
1471 operand_type_xor (i386_operand_type x, i386_operand_type y)
1472 {
1473 switch (ARRAY_SIZE (x.array))
1474 {
1475 case 3:
1476 x.array [2] ^= y.array [2];
1477 case 2:
1478 x.array [1] ^= y.array [1];
1479 case 1:
1480 x.array [0] ^= y.array [0];
1481 break;
1482 default:
1483 abort ();
1484 }
1485 return x;
1486 }
1487
1488 static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1489 static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1490 static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1491 static const i386_operand_type inoutportreg
1492 = OPERAND_TYPE_INOUTPORTREG;
1493 static const i386_operand_type reg16_inoutportreg
1494 = OPERAND_TYPE_REG16_INOUTPORTREG;
1495 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1496 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1497 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1498 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1499 static const i386_operand_type anydisp
1500 = OPERAND_TYPE_ANYDISP;
1501 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1502 static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1503 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1504 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1505 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1506 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1507 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1508 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1509 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1510 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1511 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1512 static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
1513
1514 enum operand_type
1515 {
1516 reg,
1517 imm,
1518 disp,
1519 anymem
1520 };
1521
1522 static INLINE int
1523 operand_type_check (i386_operand_type t, enum operand_type c)
1524 {
1525 switch (c)
1526 {
1527 case reg:
1528 return (t.bitfield.reg8
1529 || t.bitfield.reg16
1530 || t.bitfield.reg32
1531 || t.bitfield.reg64);
1532
1533 case imm:
1534 return (t.bitfield.imm8
1535 || t.bitfield.imm8s
1536 || t.bitfield.imm16
1537 || t.bitfield.imm32
1538 || t.bitfield.imm32s
1539 || t.bitfield.imm64);
1540
1541 case disp:
1542 return (t.bitfield.disp8
1543 || t.bitfield.disp16
1544 || t.bitfield.disp32
1545 || t.bitfield.disp32s
1546 || t.bitfield.disp64);
1547
1548 case anymem:
1549 return (t.bitfield.disp8
1550 || t.bitfield.disp16
1551 || t.bitfield.disp32
1552 || t.bitfield.disp32s
1553 || t.bitfield.disp64
1554 || t.bitfield.baseindex);
1555
1556 default:
1557 abort ();
1558 }
1559
1560 return 0;
1561 }
1562
1563 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1564 operand J for instruction template T. */
1565
1566 static INLINE int
1567 match_reg_size (const insn_template *t, unsigned int j)
1568 {
1569 return !((i.types[j].bitfield.byte
1570 && !t->operand_types[j].bitfield.byte)
1571 || (i.types[j].bitfield.word
1572 && !t->operand_types[j].bitfield.word)
1573 || (i.types[j].bitfield.dword
1574 && !t->operand_types[j].bitfield.dword)
1575 || (i.types[j].bitfield.qword
1576 && !t->operand_types[j].bitfield.qword));
1577 }
1578
1579 /* Return 1 if there is no conflict in any size on operand J for
1580 instruction template T. */
1581
1582 static INLINE int
1583 match_mem_size (const insn_template *t, unsigned int j)
1584 {
1585 return (match_reg_size (t, j)
1586 && !((i.types[j].bitfield.unspecified
1587 && !t->operand_types[j].bitfield.unspecified)
1588 || (i.types[j].bitfield.fword
1589 && !t->operand_types[j].bitfield.fword)
1590 || (i.types[j].bitfield.tbyte
1591 && !t->operand_types[j].bitfield.tbyte)
1592 || (i.types[j].bitfield.xmmword
1593 && !t->operand_types[j].bitfield.xmmword)
1594 || (i.types[j].bitfield.ymmword
1595 && !t->operand_types[j].bitfield.ymmword)));
1596 }
1597
1598 /* Return 1 if there is no size conflict on any operands for
1599 instruction template T. */
1600
1601 static INLINE int
1602 operand_size_match (const insn_template *t)
1603 {
1604 unsigned int j;
1605 int match = 1;
1606
1607 /* Don't check jump instructions. */
1608 if (t->opcode_modifier.jump
1609 || t->opcode_modifier.jumpbyte
1610 || t->opcode_modifier.jumpdword
1611 || t->opcode_modifier.jumpintersegment)
1612 return match;
1613
1614 /* Check memory and accumulator operand size. */
1615 for (j = 0; j < i.operands; j++)
1616 {
1617 if (t->operand_types[j].bitfield.anysize)
1618 continue;
1619
1620 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1621 {
1622 match = 0;
1623 break;
1624 }
1625
1626 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1627 {
1628 match = 0;
1629 break;
1630 }
1631 }
1632
1633 if (match)
1634 return match;
1635 else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
1636 {
1637 mismatch:
1638 i.error = operand_size_mismatch;
1639 return 0;
1640 }
1641
1642 /* Check reverse. */
1643 gas_assert (i.operands == 2);
1644
1645 match = 1;
1646 for (j = 0; j < 2; j++)
1647 {
1648 if (t->operand_types[j].bitfield.acc
1649 && !match_reg_size (t, j ? 0 : 1))
1650 goto mismatch;
1651
1652 if (i.types[j].bitfield.mem
1653 && !match_mem_size (t, j ? 0 : 1))
1654 goto mismatch;
1655 }
1656
1657 return match;
1658 }
1659
1660 static INLINE int
1661 operand_type_match (i386_operand_type overlap,
1662 i386_operand_type given)
1663 {
1664 i386_operand_type temp = overlap;
1665
1666 temp.bitfield.jumpabsolute = 0;
1667 temp.bitfield.unspecified = 0;
1668 temp.bitfield.byte = 0;
1669 temp.bitfield.word = 0;
1670 temp.bitfield.dword = 0;
1671 temp.bitfield.fword = 0;
1672 temp.bitfield.qword = 0;
1673 temp.bitfield.tbyte = 0;
1674 temp.bitfield.xmmword = 0;
1675 temp.bitfield.ymmword = 0;
1676 if (operand_type_all_zero (&temp))
1677 goto mismatch;
1678
1679 if (given.bitfield.baseindex == overlap.bitfield.baseindex
1680 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
1681 return 1;
1682
1683 mismatch:
1684 i.error = operand_type_mismatch;
1685 return 0;
1686 }
1687
1688 /* If given types g0 and g1 are registers they must be of the same type
1689 unless the expected operand type register overlap is null.
1690 Note that Acc in a template matches every size of reg. */
1691
1692 static INLINE int
1693 operand_type_register_match (i386_operand_type m0,
1694 i386_operand_type g0,
1695 i386_operand_type t0,
1696 i386_operand_type m1,
1697 i386_operand_type g1,
1698 i386_operand_type t1)
1699 {
1700 if (!operand_type_check (g0, reg))
1701 return 1;
1702
1703 if (!operand_type_check (g1, reg))
1704 return 1;
1705
1706 if (g0.bitfield.reg8 == g1.bitfield.reg8
1707 && g0.bitfield.reg16 == g1.bitfield.reg16
1708 && g0.bitfield.reg32 == g1.bitfield.reg32
1709 && g0.bitfield.reg64 == g1.bitfield.reg64)
1710 return 1;
1711
1712 if (m0.bitfield.acc)
1713 {
1714 t0.bitfield.reg8 = 1;
1715 t0.bitfield.reg16 = 1;
1716 t0.bitfield.reg32 = 1;
1717 t0.bitfield.reg64 = 1;
1718 }
1719
1720 if (m1.bitfield.acc)
1721 {
1722 t1.bitfield.reg8 = 1;
1723 t1.bitfield.reg16 = 1;
1724 t1.bitfield.reg32 = 1;
1725 t1.bitfield.reg64 = 1;
1726 }
1727
1728 if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1729 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1730 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1731 && !(t0.bitfield.reg64 & t1.bitfield.reg64))
1732 return 1;
1733
1734 i.error = register_type_mismatch;
1735
1736 return 0;
1737 }
1738
1739 static INLINE unsigned int
1740 mode_from_disp_size (i386_operand_type t)
1741 {
1742 if (t.bitfield.disp8)
1743 return 1;
1744 else if (t.bitfield.disp16
1745 || t.bitfield.disp32
1746 || t.bitfield.disp32s)
1747 return 2;
1748 else
1749 return 0;
1750 }
1751
1752 static INLINE int
1753 fits_in_signed_byte (offsetT num)
1754 {
1755 return (num >= -128) && (num <= 127);
1756 }
1757
1758 static INLINE int
1759 fits_in_unsigned_byte (offsetT num)
1760 {
1761 return (num & 0xff) == num;
1762 }
1763
1764 static INLINE int
1765 fits_in_unsigned_word (offsetT num)
1766 {
1767 return (num & 0xffff) == num;
1768 }
1769
1770 static INLINE int
1771 fits_in_signed_word (offsetT num)
1772 {
1773 return (-32768 <= num) && (num <= 32767);
1774 }
1775
1776 static INLINE int
1777 fits_in_signed_long (offsetT num ATTRIBUTE_UNUSED)
1778 {
1779 #ifndef BFD64
1780 return 1;
1781 #else
1782 return (!(((offsetT) -1 << 31) & num)
1783 || (((offsetT) -1 << 31) & num) == ((offsetT) -1 << 31));
1784 #endif
1785 } /* fits_in_signed_long() */
1786
1787 static INLINE int
1788 fits_in_unsigned_long (offsetT num ATTRIBUTE_UNUSED)
1789 {
1790 #ifndef BFD64
1791 return 1;
1792 #else
1793 return (num & (((offsetT) 2 << 31) - 1)) == num;
1794 #endif
1795 } /* fits_in_unsigned_long() */
1796
1797 static INLINE int
1798 fits_in_imm4 (offsetT num)
1799 {
1800 return (num & 0xf) == num;
1801 }
1802
1803 static i386_operand_type
1804 smallest_imm_type (offsetT num)
1805 {
1806 i386_operand_type t;
1807
1808 operand_type_set (&t, 0);
1809 t.bitfield.imm64 = 1;
1810
1811 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
1812 {
1813 /* This code is disabled on the 486 because all the Imm1 forms
1814 in the opcode table are slower on the i486. They're the
1815 versions with the implicitly specified single-position
1816 displacement, which has another syntax if you really want to
1817 use that form. */
1818 t.bitfield.imm1 = 1;
1819 t.bitfield.imm8 = 1;
1820 t.bitfield.imm8s = 1;
1821 t.bitfield.imm16 = 1;
1822 t.bitfield.imm32 = 1;
1823 t.bitfield.imm32s = 1;
1824 }
1825 else if (fits_in_signed_byte (num))
1826 {
1827 t.bitfield.imm8 = 1;
1828 t.bitfield.imm8s = 1;
1829 t.bitfield.imm16 = 1;
1830 t.bitfield.imm32 = 1;
1831 t.bitfield.imm32s = 1;
1832 }
1833 else if (fits_in_unsigned_byte (num))
1834 {
1835 t.bitfield.imm8 = 1;
1836 t.bitfield.imm16 = 1;
1837 t.bitfield.imm32 = 1;
1838 t.bitfield.imm32s = 1;
1839 }
1840 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
1841 {
1842 t.bitfield.imm16 = 1;
1843 t.bitfield.imm32 = 1;
1844 t.bitfield.imm32s = 1;
1845 }
1846 else if (fits_in_signed_long (num))
1847 {
1848 t.bitfield.imm32 = 1;
1849 t.bitfield.imm32s = 1;
1850 }
1851 else if (fits_in_unsigned_long (num))
1852 t.bitfield.imm32 = 1;
1853
1854 return t;
1855 }
1856
1857 static offsetT
1858 offset_in_range (offsetT val, int size)
1859 {
1860 addressT mask;
1861
1862 switch (size)
1863 {
1864 case 1: mask = ((addressT) 1 << 8) - 1; break;
1865 case 2: mask = ((addressT) 1 << 16) - 1; break;
1866 case 4: mask = ((addressT) 2 << 31) - 1; break;
1867 #ifdef BFD64
1868 case 8: mask = ((addressT) 2 << 63) - 1; break;
1869 #endif
1870 default: abort ();
1871 }
1872
1873 #ifdef BFD64
1874 /* If BFD64, sign extend val for 32bit address mode. */
1875 if (flag_code != CODE_64BIT
1876 || i.prefix[ADDR_PREFIX])
1877 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
1878 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
1879 #endif
1880
1881 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
1882 {
1883 char buf1[40], buf2[40];
1884
1885 sprint_value (buf1, val);
1886 sprint_value (buf2, val & mask);
1887 as_warn (_("%s shortened to %s"), buf1, buf2);
1888 }
1889 return val & mask;
1890 }
1891
1892 enum PREFIX_GROUP
1893 {
1894 PREFIX_EXIST = 0,
1895 PREFIX_LOCK,
1896 PREFIX_REP,
1897 PREFIX_OTHER
1898 };
1899
1900 /* Returns
1901 a. PREFIX_EXIST if attempting to add a prefix where one from the
1902 same class already exists.
1903 b. PREFIX_LOCK if lock prefix is added.
1904 c. PREFIX_REP if rep/repne prefix is added.
1905 d. PREFIX_OTHER if other prefix is added.
1906 */
1907
1908 static enum PREFIX_GROUP
1909 add_prefix (unsigned int prefix)
1910 {
1911 enum PREFIX_GROUP ret = PREFIX_OTHER;
1912 unsigned int q;
1913
1914 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
1915 && flag_code == CODE_64BIT)
1916 {
1917 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
1918 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
1919 && (prefix & (REX_R | REX_X | REX_B))))
1920 ret = PREFIX_EXIST;
1921 q = REX_PREFIX;
1922 }
1923 else
1924 {
1925 switch (prefix)
1926 {
1927 default:
1928 abort ();
1929
1930 case CS_PREFIX_OPCODE:
1931 case DS_PREFIX_OPCODE:
1932 case ES_PREFIX_OPCODE:
1933 case FS_PREFIX_OPCODE:
1934 case GS_PREFIX_OPCODE:
1935 case SS_PREFIX_OPCODE:
1936 q = SEG_PREFIX;
1937 break;
1938
1939 case REPNE_PREFIX_OPCODE:
1940 case REPE_PREFIX_OPCODE:
1941 q = REP_PREFIX;
1942 ret = PREFIX_REP;
1943 break;
1944
1945 case LOCK_PREFIX_OPCODE:
1946 q = LOCK_PREFIX;
1947 ret = PREFIX_LOCK;
1948 break;
1949
1950 case FWAIT_OPCODE:
1951 q = WAIT_PREFIX;
1952 break;
1953
1954 case ADDR_PREFIX_OPCODE:
1955 q = ADDR_PREFIX;
1956 break;
1957
1958 case DATA_PREFIX_OPCODE:
1959 q = DATA_PREFIX;
1960 break;
1961 }
1962 if (i.prefix[q] != 0)
1963 ret = PREFIX_EXIST;
1964 }
1965
1966 if (ret)
1967 {
1968 if (!i.prefix[q])
1969 ++i.prefixes;
1970 i.prefix[q] |= prefix;
1971 }
1972 else
1973 as_bad (_("same type of prefix used twice"));
1974
1975 return ret;
1976 }
1977
1978 static void
1979 update_code_flag (int value, int check)
1980 {
1981 PRINTF_LIKE ((*as_error));
1982
1983 flag_code = (enum flag_code) value;
1984 if (flag_code == CODE_64BIT)
1985 {
1986 cpu_arch_flags.bitfield.cpu64 = 1;
1987 cpu_arch_flags.bitfield.cpuno64 = 0;
1988 }
1989 else
1990 {
1991 cpu_arch_flags.bitfield.cpu64 = 0;
1992 cpu_arch_flags.bitfield.cpuno64 = 1;
1993 }
1994 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
1995 {
1996 if (check)
1997 as_error = as_fatal;
1998 else
1999 as_error = as_bad;
2000 (*as_error) (_("64bit mode not supported on `%s'."),
2001 cpu_arch_name ? cpu_arch_name : default_arch);
2002 }
2003 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
2004 {
2005 if (check)
2006 as_error = as_fatal;
2007 else
2008 as_error = as_bad;
2009 (*as_error) (_("32bit mode not supported on `%s'."),
2010 cpu_arch_name ? cpu_arch_name : default_arch);
2011 }
2012 stackop_size = '\0';
2013 }
2014
2015 static void
2016 set_code_flag (int value)
2017 {
2018 update_code_flag (value, 0);
2019 }
2020
2021 static void
2022 set_16bit_gcc_code_flag (int new_code_flag)
2023 {
2024 flag_code = (enum flag_code) new_code_flag;
2025 if (flag_code != CODE_16BIT)
2026 abort ();
2027 cpu_arch_flags.bitfield.cpu64 = 0;
2028 cpu_arch_flags.bitfield.cpuno64 = 1;
2029 stackop_size = LONG_MNEM_SUFFIX;
2030 }
2031
2032 static void
2033 set_intel_syntax (int syntax_flag)
2034 {
2035 /* Find out if register prefixing is specified. */
2036 int ask_naked_reg = 0;
2037
2038 SKIP_WHITESPACE ();
2039 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2040 {
2041 char *string = input_line_pointer;
2042 int e = get_symbol_end ();
2043
2044 if (strcmp (string, "prefix") == 0)
2045 ask_naked_reg = 1;
2046 else if (strcmp (string, "noprefix") == 0)
2047 ask_naked_reg = -1;
2048 else
2049 as_bad (_("bad argument to syntax directive."));
2050 *input_line_pointer = e;
2051 }
2052 demand_empty_rest_of_line ();
2053
2054 intel_syntax = syntax_flag;
2055
2056 if (ask_naked_reg == 0)
2057 allow_naked_reg = (intel_syntax
2058 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2059 else
2060 allow_naked_reg = (ask_naked_reg < 0);
2061
2062 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2063
2064 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2065 identifier_chars['$'] = intel_syntax ? '$' : 0;
2066 register_prefix = allow_naked_reg ? "" : "%";
2067 }
2068
2069 static void
2070 set_intel_mnemonic (int mnemonic_flag)
2071 {
2072 intel_mnemonic = mnemonic_flag;
2073 }
2074
2075 static void
2076 set_allow_index_reg (int flag)
2077 {
2078 allow_index_reg = flag;
2079 }
2080
2081 static void
2082 set_sse_check (int dummy ATTRIBUTE_UNUSED)
2083 {
2084 SKIP_WHITESPACE ();
2085
2086 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2087 {
2088 char *string = input_line_pointer;
2089 int e = get_symbol_end ();
2090
2091 if (strcmp (string, "none") == 0)
2092 sse_check = sse_check_none;
2093 else if (strcmp (string, "warning") == 0)
2094 sse_check = sse_check_warning;
2095 else if (strcmp (string, "error") == 0)
2096 sse_check = sse_check_error;
2097 else
2098 as_bad (_("bad argument to sse_check directive."));
2099 *input_line_pointer = e;
2100 }
2101 else
2102 as_bad (_("missing argument for sse_check directive"));
2103
2104 demand_empty_rest_of_line ();
2105 }
2106
2107 static void
2108 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2109 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2110 {
2111 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2112 static const char *arch;
2113
2114 /* Intel LIOM is only supported on ELF. */
2115 if (!IS_ELF)
2116 return;
2117
2118 if (!arch)
2119 {
2120 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2121 use default_arch. */
2122 arch = cpu_arch_name;
2123 if (!arch)
2124 arch = default_arch;
2125 }
2126
2127 /* If we are targeting Intel L1OM, we must enable it. */
2128 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2129 || new_flag.bitfield.cpul1om)
2130 return;
2131
2132 /* If we are targeting Intel K1OM, we must enable it. */
2133 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_K1OM
2134 || new_flag.bitfield.cpuk1om)
2135 return;
2136
2137 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2138 #endif
2139 }
2140
2141 static void
2142 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2143 {
2144 SKIP_WHITESPACE ();
2145
2146 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2147 {
2148 char *string = input_line_pointer;
2149 int e = get_symbol_end ();
2150 unsigned int j;
2151 i386_cpu_flags flags;
2152
2153 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2154 {
2155 if (strcmp (string, cpu_arch[j].name) == 0)
2156 {
2157 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2158
2159 if (*string != '.')
2160 {
2161 cpu_arch_name = cpu_arch[j].name;
2162 cpu_sub_arch_name = NULL;
2163 cpu_arch_flags = cpu_arch[j].flags;
2164 if (flag_code == CODE_64BIT)
2165 {
2166 cpu_arch_flags.bitfield.cpu64 = 1;
2167 cpu_arch_flags.bitfield.cpuno64 = 0;
2168 }
2169 else
2170 {
2171 cpu_arch_flags.bitfield.cpu64 = 0;
2172 cpu_arch_flags.bitfield.cpuno64 = 1;
2173 }
2174 cpu_arch_isa = cpu_arch[j].type;
2175 cpu_arch_isa_flags = cpu_arch[j].flags;
2176 if (!cpu_arch_tune_set)
2177 {
2178 cpu_arch_tune = cpu_arch_isa;
2179 cpu_arch_tune_flags = cpu_arch_isa_flags;
2180 }
2181 break;
2182 }
2183
2184 if (!cpu_arch[j].negated)
2185 flags = cpu_flags_or (cpu_arch_flags,
2186 cpu_arch[j].flags);
2187 else
2188 flags = cpu_flags_and_not (cpu_arch_flags,
2189 cpu_arch[j].flags);
2190 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2191 {
2192 if (cpu_sub_arch_name)
2193 {
2194 char *name = cpu_sub_arch_name;
2195 cpu_sub_arch_name = concat (name,
2196 cpu_arch[j].name,
2197 (const char *) NULL);
2198 free (name);
2199 }
2200 else
2201 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2202 cpu_arch_flags = flags;
2203 cpu_arch_isa_flags = flags;
2204 }
2205 *input_line_pointer = e;
2206 demand_empty_rest_of_line ();
2207 return;
2208 }
2209 }
2210 if (j >= ARRAY_SIZE (cpu_arch))
2211 as_bad (_("no such architecture: `%s'"), string);
2212
2213 *input_line_pointer = e;
2214 }
2215 else
2216 as_bad (_("missing cpu architecture"));
2217
2218 no_cond_jump_promotion = 0;
2219 if (*input_line_pointer == ','
2220 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2221 {
2222 char *string = ++input_line_pointer;
2223 int e = get_symbol_end ();
2224
2225 if (strcmp (string, "nojumps") == 0)
2226 no_cond_jump_promotion = 1;
2227 else if (strcmp (string, "jumps") == 0)
2228 ;
2229 else
2230 as_bad (_("no such architecture modifier: `%s'"), string);
2231
2232 *input_line_pointer = e;
2233 }
2234
2235 demand_empty_rest_of_line ();
2236 }
2237
2238 enum bfd_architecture
2239 i386_arch (void)
2240 {
2241 if (cpu_arch_isa == PROCESSOR_L1OM)
2242 {
2243 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2244 || flag_code != CODE_64BIT)
2245 as_fatal (_("Intel L1OM is 64bit ELF only"));
2246 return bfd_arch_l1om;
2247 }
2248 else if (cpu_arch_isa == PROCESSOR_K1OM)
2249 {
2250 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2251 || flag_code != CODE_64BIT)
2252 as_fatal (_("Intel K1OM is 64bit ELF only"));
2253 return bfd_arch_k1om;
2254 }
2255 else
2256 return bfd_arch_i386;
2257 }
2258
2259 unsigned long
2260 i386_mach (void)
2261 {
2262 if (!strncmp (default_arch, "x86_64", 6))
2263 {
2264 if (cpu_arch_isa == PROCESSOR_L1OM)
2265 {
2266 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2267 || default_arch[6] != '\0')
2268 as_fatal (_("Intel L1OM is 64bit ELF only"));
2269 return bfd_mach_l1om;
2270 }
2271 else if (cpu_arch_isa == PROCESSOR_K1OM)
2272 {
2273 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2274 || default_arch[6] != '\0')
2275 as_fatal (_("Intel K1OM is 64bit ELF only"));
2276 return bfd_mach_k1om;
2277 }
2278 else if (default_arch[6] == '\0')
2279 return bfd_mach_x86_64;
2280 else
2281 return bfd_mach_x64_32;
2282 }
2283 else if (!strcmp (default_arch, "i386"))
2284 return bfd_mach_i386_i386;
2285 else
2286 as_fatal (_("unknown architecture"));
2287 }
2288 \f
2289 void
2290 md_begin (void)
2291 {
2292 const char *hash_err;
2293
2294 /* Initialize op_hash hash table. */
2295 op_hash = hash_new ();
2296
2297 {
2298 const insn_template *optab;
2299 templates *core_optab;
2300
2301 /* Setup for loop. */
2302 optab = i386_optab;
2303 core_optab = (templates *) xmalloc (sizeof (templates));
2304 core_optab->start = optab;
2305
2306 while (1)
2307 {
2308 ++optab;
2309 if (optab->name == NULL
2310 || strcmp (optab->name, (optab - 1)->name) != 0)
2311 {
2312 /* different name --> ship out current template list;
2313 add to hash table; & begin anew. */
2314 core_optab->end = optab;
2315 hash_err = hash_insert (op_hash,
2316 (optab - 1)->name,
2317 (void *) core_optab);
2318 if (hash_err)
2319 {
2320 as_fatal (_("internal Error: Can't hash %s: %s"),
2321 (optab - 1)->name,
2322 hash_err);
2323 }
2324 if (optab->name == NULL)
2325 break;
2326 core_optab = (templates *) xmalloc (sizeof (templates));
2327 core_optab->start = optab;
2328 }
2329 }
2330 }
2331
2332 /* Initialize reg_hash hash table. */
2333 reg_hash = hash_new ();
2334 {
2335 const reg_entry *regtab;
2336 unsigned int regtab_size = i386_regtab_size;
2337
2338 for (regtab = i386_regtab; regtab_size--; regtab++)
2339 {
2340 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2341 if (hash_err)
2342 as_fatal (_("internal Error: Can't hash %s: %s"),
2343 regtab->reg_name,
2344 hash_err);
2345 }
2346 }
2347
2348 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2349 {
2350 int c;
2351 char *p;
2352
2353 for (c = 0; c < 256; c++)
2354 {
2355 if (ISDIGIT (c))
2356 {
2357 digit_chars[c] = c;
2358 mnemonic_chars[c] = c;
2359 register_chars[c] = c;
2360 operand_chars[c] = c;
2361 }
2362 else if (ISLOWER (c))
2363 {
2364 mnemonic_chars[c] = c;
2365 register_chars[c] = c;
2366 operand_chars[c] = c;
2367 }
2368 else if (ISUPPER (c))
2369 {
2370 mnemonic_chars[c] = TOLOWER (c);
2371 register_chars[c] = mnemonic_chars[c];
2372 operand_chars[c] = c;
2373 }
2374
2375 if (ISALPHA (c) || ISDIGIT (c))
2376 identifier_chars[c] = c;
2377 else if (c >= 128)
2378 {
2379 identifier_chars[c] = c;
2380 operand_chars[c] = c;
2381 }
2382 }
2383
2384 #ifdef LEX_AT
2385 identifier_chars['@'] = '@';
2386 #endif
2387 #ifdef LEX_QM
2388 identifier_chars['?'] = '?';
2389 operand_chars['?'] = '?';
2390 #endif
2391 digit_chars['-'] = '-';
2392 mnemonic_chars['_'] = '_';
2393 mnemonic_chars['-'] = '-';
2394 mnemonic_chars['.'] = '.';
2395 identifier_chars['_'] = '_';
2396 identifier_chars['.'] = '.';
2397
2398 for (p = operand_special_chars; *p != '\0'; p++)
2399 operand_chars[(unsigned char) *p] = *p;
2400 }
2401
2402 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2403 if (IS_ELF)
2404 {
2405 record_alignment (text_section, 2);
2406 record_alignment (data_section, 2);
2407 record_alignment (bss_section, 2);
2408 }
2409 #endif
2410
2411 if (flag_code == CODE_64BIT)
2412 {
2413 #if defined (OBJ_COFF) && defined (TE_PE)
2414 x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
2415 ? 32 : 16);
2416 #else
2417 x86_dwarf2_return_column = 16;
2418 #endif
2419 x86_cie_data_alignment = -8;
2420 }
2421 else
2422 {
2423 x86_dwarf2_return_column = 8;
2424 x86_cie_data_alignment = -4;
2425 }
2426 }
2427
2428 void
2429 i386_print_statistics (FILE *file)
2430 {
2431 hash_print_statistics (file, "i386 opcode", op_hash);
2432 hash_print_statistics (file, "i386 register", reg_hash);
2433 }
2434 \f
2435 #ifdef DEBUG386
2436
2437 /* Debugging routines for md_assemble. */
2438 static void pte (insn_template *);
2439 static void pt (i386_operand_type);
2440 static void pe (expressionS *);
2441 static void ps (symbolS *);
2442
2443 static void
2444 pi (char *line, i386_insn *x)
2445 {
2446 unsigned int j;
2447
2448 fprintf (stdout, "%s: template ", line);
2449 pte (&x->tm);
2450 fprintf (stdout, " address: base %s index %s scale %x\n",
2451 x->base_reg ? x->base_reg->reg_name : "none",
2452 x->index_reg ? x->index_reg->reg_name : "none",
2453 x->log2_scale_factor);
2454 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2455 x->rm.mode, x->rm.reg, x->rm.regmem);
2456 fprintf (stdout, " sib: base %x index %x scale %x\n",
2457 x->sib.base, x->sib.index, x->sib.scale);
2458 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2459 (x->rex & REX_W) != 0,
2460 (x->rex & REX_R) != 0,
2461 (x->rex & REX_X) != 0,
2462 (x->rex & REX_B) != 0);
2463 for (j = 0; j < x->operands; j++)
2464 {
2465 fprintf (stdout, " #%d: ", j + 1);
2466 pt (x->types[j]);
2467 fprintf (stdout, "\n");
2468 if (x->types[j].bitfield.reg8
2469 || x->types[j].bitfield.reg16
2470 || x->types[j].bitfield.reg32
2471 || x->types[j].bitfield.reg64
2472 || x->types[j].bitfield.regmmx
2473 || x->types[j].bitfield.regxmm
2474 || x->types[j].bitfield.regymm
2475 || x->types[j].bitfield.sreg2
2476 || x->types[j].bitfield.sreg3
2477 || x->types[j].bitfield.control
2478 || x->types[j].bitfield.debug
2479 || x->types[j].bitfield.test)
2480 fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
2481 if (operand_type_check (x->types[j], imm))
2482 pe (x->op[j].imms);
2483 if (operand_type_check (x->types[j], disp))
2484 pe (x->op[j].disps);
2485 }
2486 }
2487
2488 static void
2489 pte (insn_template *t)
2490 {
2491 unsigned int j;
2492 fprintf (stdout, " %d operands ", t->operands);
2493 fprintf (stdout, "opcode %x ", t->base_opcode);
2494 if (t->extension_opcode != None)
2495 fprintf (stdout, "ext %x ", t->extension_opcode);
2496 if (t->opcode_modifier.d)
2497 fprintf (stdout, "D");
2498 if (t->opcode_modifier.w)
2499 fprintf (stdout, "W");
2500 fprintf (stdout, "\n");
2501 for (j = 0; j < t->operands; j++)
2502 {
2503 fprintf (stdout, " #%d type ", j + 1);
2504 pt (t->operand_types[j]);
2505 fprintf (stdout, "\n");
2506 }
2507 }
2508
2509 static void
2510 pe (expressionS *e)
2511 {
2512 fprintf (stdout, " operation %d\n", e->X_op);
2513 fprintf (stdout, " add_number %ld (%lx)\n",
2514 (long) e->X_add_number, (long) e->X_add_number);
2515 if (e->X_add_symbol)
2516 {
2517 fprintf (stdout, " add_symbol ");
2518 ps (e->X_add_symbol);
2519 fprintf (stdout, "\n");
2520 }
2521 if (e->X_op_symbol)
2522 {
2523 fprintf (stdout, " op_symbol ");
2524 ps (e->X_op_symbol);
2525 fprintf (stdout, "\n");
2526 }
2527 }
2528
2529 static void
2530 ps (symbolS *s)
2531 {
2532 fprintf (stdout, "%s type %s%s",
2533 S_GET_NAME (s),
2534 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2535 segment_name (S_GET_SEGMENT (s)));
2536 }
2537
2538 static struct type_name
2539 {
2540 i386_operand_type mask;
2541 const char *name;
2542 }
2543 const type_names[] =
2544 {
2545 { OPERAND_TYPE_REG8, "r8" },
2546 { OPERAND_TYPE_REG16, "r16" },
2547 { OPERAND_TYPE_REG32, "r32" },
2548 { OPERAND_TYPE_REG64, "r64" },
2549 { OPERAND_TYPE_IMM8, "i8" },
2550 { OPERAND_TYPE_IMM8, "i8s" },
2551 { OPERAND_TYPE_IMM16, "i16" },
2552 { OPERAND_TYPE_IMM32, "i32" },
2553 { OPERAND_TYPE_IMM32S, "i32s" },
2554 { OPERAND_TYPE_IMM64, "i64" },
2555 { OPERAND_TYPE_IMM1, "i1" },
2556 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2557 { OPERAND_TYPE_DISP8, "d8" },
2558 { OPERAND_TYPE_DISP16, "d16" },
2559 { OPERAND_TYPE_DISP32, "d32" },
2560 { OPERAND_TYPE_DISP32S, "d32s" },
2561 { OPERAND_TYPE_DISP64, "d64" },
2562 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2563 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2564 { OPERAND_TYPE_CONTROL, "control reg" },
2565 { OPERAND_TYPE_TEST, "test reg" },
2566 { OPERAND_TYPE_DEBUG, "debug reg" },
2567 { OPERAND_TYPE_FLOATREG, "FReg" },
2568 { OPERAND_TYPE_FLOATACC, "FAcc" },
2569 { OPERAND_TYPE_SREG2, "SReg2" },
2570 { OPERAND_TYPE_SREG3, "SReg3" },
2571 { OPERAND_TYPE_ACC, "Acc" },
2572 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2573 { OPERAND_TYPE_REGMMX, "rMMX" },
2574 { OPERAND_TYPE_REGXMM, "rXMM" },
2575 { OPERAND_TYPE_REGYMM, "rYMM" },
2576 { OPERAND_TYPE_ESSEG, "es" },
2577 };
2578
2579 static void
2580 pt (i386_operand_type t)
2581 {
2582 unsigned int j;
2583 i386_operand_type a;
2584
2585 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2586 {
2587 a = operand_type_and (t, type_names[j].mask);
2588 if (!operand_type_all_zero (&a))
2589 fprintf (stdout, "%s, ", type_names[j].name);
2590 }
2591 fflush (stdout);
2592 }
2593
2594 #endif /* DEBUG386 */
2595 \f
2596 static bfd_reloc_code_real_type
2597 reloc (unsigned int size,
2598 int pcrel,
2599 int sign,
2600 bfd_reloc_code_real_type other)
2601 {
2602 if (other != NO_RELOC)
2603 {
2604 reloc_howto_type *rel;
2605
2606 if (size == 8)
2607 switch (other)
2608 {
2609 case BFD_RELOC_X86_64_GOT32:
2610 return BFD_RELOC_X86_64_GOT64;
2611 break;
2612 case BFD_RELOC_X86_64_PLTOFF64:
2613 return BFD_RELOC_X86_64_PLTOFF64;
2614 break;
2615 case BFD_RELOC_X86_64_GOTPC32:
2616 other = BFD_RELOC_X86_64_GOTPC64;
2617 break;
2618 case BFD_RELOC_X86_64_GOTPCREL:
2619 other = BFD_RELOC_X86_64_GOTPCREL64;
2620 break;
2621 case BFD_RELOC_X86_64_TPOFF32:
2622 other = BFD_RELOC_X86_64_TPOFF64;
2623 break;
2624 case BFD_RELOC_X86_64_DTPOFF32:
2625 other = BFD_RELOC_X86_64_DTPOFF64;
2626 break;
2627 default:
2628 break;
2629 }
2630
2631 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2632 if (size == 4 && (flag_code != CODE_64BIT || disallow_64bit_reloc))
2633 sign = -1;
2634
2635 rel = bfd_reloc_type_lookup (stdoutput, other);
2636 if (!rel)
2637 as_bad (_("unknown relocation (%u)"), other);
2638 else if (size != bfd_get_reloc_size (rel))
2639 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2640 bfd_get_reloc_size (rel),
2641 size);
2642 else if (pcrel && !rel->pc_relative)
2643 as_bad (_("non-pc-relative relocation for pc-relative field"));
2644 else if ((rel->complain_on_overflow == complain_overflow_signed
2645 && !sign)
2646 || (rel->complain_on_overflow == complain_overflow_unsigned
2647 && sign > 0))
2648 as_bad (_("relocated field and relocation type differ in signedness"));
2649 else
2650 return other;
2651 return NO_RELOC;
2652 }
2653
2654 if (pcrel)
2655 {
2656 if (!sign)
2657 as_bad (_("there are no unsigned pc-relative relocations"));
2658 switch (size)
2659 {
2660 case 1: return BFD_RELOC_8_PCREL;
2661 case 2: return BFD_RELOC_16_PCREL;
2662 case 4: return BFD_RELOC_32_PCREL;
2663 case 8: return BFD_RELOC_64_PCREL;
2664 }
2665 as_bad (_("cannot do %u byte pc-relative relocation"), size);
2666 }
2667 else
2668 {
2669 if (sign > 0)
2670 switch (size)
2671 {
2672 case 4: return BFD_RELOC_X86_64_32S;
2673 }
2674 else
2675 switch (size)
2676 {
2677 case 1: return BFD_RELOC_8;
2678 case 2: return BFD_RELOC_16;
2679 case 4: return BFD_RELOC_32;
2680 case 8: return BFD_RELOC_64;
2681 }
2682 as_bad (_("cannot do %s %u byte relocation"),
2683 sign > 0 ? "signed" : "unsigned", size);
2684 }
2685
2686 return NO_RELOC;
2687 }
2688
2689 /* Here we decide which fixups can be adjusted to make them relative to
2690 the beginning of the section instead of the symbol. Basically we need
2691 to make sure that the dynamic relocations are done correctly, so in
2692 some cases we force the original symbol to be used. */
2693
2694 int
2695 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2696 {
2697 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2698 if (!IS_ELF)
2699 return 1;
2700
2701 /* Don't adjust pc-relative references to merge sections in 64-bit
2702 mode. */
2703 if (use_rela_relocations
2704 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
2705 && fixP->fx_pcrel)
2706 return 0;
2707
2708 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2709 and changed later by validate_fix. */
2710 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
2711 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
2712 return 0;
2713
2714 /* adjust_reloc_syms doesn't know about the GOT. */
2715 if (fixP->fx_r_type == BFD_RELOC_386_GOTOFF
2716 || fixP->fx_r_type == BFD_RELOC_386_PLT32
2717 || fixP->fx_r_type == BFD_RELOC_386_GOT32
2718 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
2719 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
2720 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
2721 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
2722 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
2723 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
2724 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
2725 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
2726 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
2727 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
2728 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
2729 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
2730 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
2731 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
2732 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
2733 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
2734 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
2735 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
2736 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
2737 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
2738 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
2739 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
2740 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
2741 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
2742 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
2743 return 0;
2744 #endif
2745 return 1;
2746 }
2747
2748 static int
2749 intel_float_operand (const char *mnemonic)
2750 {
2751 /* Note that the value returned is meaningful only for opcodes with (memory)
2752 operands, hence the code here is free to improperly handle opcodes that
2753 have no operands (for better performance and smaller code). */
2754
2755 if (mnemonic[0] != 'f')
2756 return 0; /* non-math */
2757
2758 switch (mnemonic[1])
2759 {
2760 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2761 the fs segment override prefix not currently handled because no
2762 call path can make opcodes without operands get here */
2763 case 'i':
2764 return 2 /* integer op */;
2765 case 'l':
2766 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
2767 return 3; /* fldcw/fldenv */
2768 break;
2769 case 'n':
2770 if (mnemonic[2] != 'o' /* fnop */)
2771 return 3; /* non-waiting control op */
2772 break;
2773 case 'r':
2774 if (mnemonic[2] == 's')
2775 return 3; /* frstor/frstpm */
2776 break;
2777 case 's':
2778 if (mnemonic[2] == 'a')
2779 return 3; /* fsave */
2780 if (mnemonic[2] == 't')
2781 {
2782 switch (mnemonic[3])
2783 {
2784 case 'c': /* fstcw */
2785 case 'd': /* fstdw */
2786 case 'e': /* fstenv */
2787 case 's': /* fsts[gw] */
2788 return 3;
2789 }
2790 }
2791 break;
2792 case 'x':
2793 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
2794 return 0; /* fxsave/fxrstor are not really math ops */
2795 break;
2796 }
2797
2798 return 1;
2799 }
2800
2801 /* Build the VEX prefix. */
2802
2803 static void
2804 build_vex_prefix (const insn_template *t)
2805 {
2806 unsigned int register_specifier;
2807 unsigned int implied_prefix;
2808 unsigned int vector_length;
2809
2810 /* Check register specifier. */
2811 if (i.vex.register_specifier)
2812 {
2813 register_specifier = i.vex.register_specifier->reg_num;
2814 if ((i.vex.register_specifier->reg_flags & RegRex))
2815 register_specifier += 8;
2816 register_specifier = ~register_specifier & 0xf;
2817 }
2818 else
2819 register_specifier = 0xf;
2820
2821 /* Use 2-byte VEX prefix by swappping destination and source
2822 operand. */
2823 if (!i.swap_operand
2824 && i.operands == i.reg_operands
2825 && i.tm.opcode_modifier.vexopcode == VEX0F
2826 && i.tm.opcode_modifier.s
2827 && i.rex == REX_B)
2828 {
2829 unsigned int xchg = i.operands - 1;
2830 union i386_op temp_op;
2831 i386_operand_type temp_type;
2832
2833 temp_type = i.types[xchg];
2834 i.types[xchg] = i.types[0];
2835 i.types[0] = temp_type;
2836 temp_op = i.op[xchg];
2837 i.op[xchg] = i.op[0];
2838 i.op[0] = temp_op;
2839
2840 gas_assert (i.rm.mode == 3);
2841
2842 i.rex = REX_R;
2843 xchg = i.rm.regmem;
2844 i.rm.regmem = i.rm.reg;
2845 i.rm.reg = xchg;
2846
2847 /* Use the next insn. */
2848 i.tm = t[1];
2849 }
2850
2851 if (i.tm.opcode_modifier.vex == VEXScalar)
2852 vector_length = avxscalar;
2853 else
2854 vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
2855
2856 switch ((i.tm.base_opcode >> 8) & 0xff)
2857 {
2858 case 0:
2859 implied_prefix = 0;
2860 break;
2861 case DATA_PREFIX_OPCODE:
2862 implied_prefix = 1;
2863 break;
2864 case REPE_PREFIX_OPCODE:
2865 implied_prefix = 2;
2866 break;
2867 case REPNE_PREFIX_OPCODE:
2868 implied_prefix = 3;
2869 break;
2870 default:
2871 abort ();
2872 }
2873
2874 /* Use 2-byte VEX prefix if possible. */
2875 if (i.tm.opcode_modifier.vexopcode == VEX0F
2876 && i.tm.opcode_modifier.vexw != VEXW1
2877 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
2878 {
2879 /* 2-byte VEX prefix. */
2880 unsigned int r;
2881
2882 i.vex.length = 2;
2883 i.vex.bytes[0] = 0xc5;
2884
2885 /* Check the REX.R bit. */
2886 r = (i.rex & REX_R) ? 0 : 1;
2887 i.vex.bytes[1] = (r << 7
2888 | register_specifier << 3
2889 | vector_length << 2
2890 | implied_prefix);
2891 }
2892 else
2893 {
2894 /* 3-byte VEX prefix. */
2895 unsigned int m, w;
2896
2897 i.vex.length = 3;
2898
2899 switch (i.tm.opcode_modifier.vexopcode)
2900 {
2901 case VEX0F:
2902 m = 0x1;
2903 i.vex.bytes[0] = 0xc4;
2904 break;
2905 case VEX0F38:
2906 m = 0x2;
2907 i.vex.bytes[0] = 0xc4;
2908 break;
2909 case VEX0F3A:
2910 m = 0x3;
2911 i.vex.bytes[0] = 0xc4;
2912 break;
2913 case XOP08:
2914 m = 0x8;
2915 i.vex.bytes[0] = 0x8f;
2916 break;
2917 case XOP09:
2918 m = 0x9;
2919 i.vex.bytes[0] = 0x8f;
2920 break;
2921 case XOP0A:
2922 m = 0xa;
2923 i.vex.bytes[0] = 0x8f;
2924 break;
2925 default:
2926 abort ();
2927 }
2928
2929 /* The high 3 bits of the second VEX byte are 1's compliment
2930 of RXB bits from REX. */
2931 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
2932
2933 /* Check the REX.W bit. */
2934 w = (i.rex & REX_W) ? 1 : 0;
2935 if (i.tm.opcode_modifier.vexw)
2936 {
2937 if (w)
2938 abort ();
2939
2940 if (i.tm.opcode_modifier.vexw == VEXW1)
2941 w = 1;
2942 }
2943
2944 i.vex.bytes[2] = (w << 7
2945 | register_specifier << 3
2946 | vector_length << 2
2947 | implied_prefix);
2948 }
2949 }
2950
2951 static void
2952 process_immext (void)
2953 {
2954 expressionS *exp;
2955
2956 if (i.tm.cpu_flags.bitfield.cpusse3 && i.operands > 0)
2957 {
2958 /* SSE3 Instructions have the fixed operands with an opcode
2959 suffix which is coded in the same place as an 8-bit immediate
2960 field would be. Here we check those operands and remove them
2961 afterwards. */
2962 unsigned int x;
2963
2964 for (x = 0; x < i.operands; x++)
2965 if (i.op[x].regs->reg_num != x)
2966 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
2967 register_prefix, i.op[x].regs->reg_name, x + 1,
2968 i.tm.name);
2969
2970 i.operands = 0;
2971 }
2972
2973 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
2974 which is coded in the same place as an 8-bit immediate field
2975 would be. Here we fake an 8-bit immediate operand from the
2976 opcode suffix stored in tm.extension_opcode.
2977
2978 AVX instructions also use this encoding, for some of
2979 3 argument instructions. */
2980
2981 gas_assert (i.imm_operands == 0
2982 && (i.operands <= 2
2983 || (i.tm.opcode_modifier.vex
2984 && i.operands <= 4)));
2985
2986 exp = &im_expressions[i.imm_operands++];
2987 i.op[i.operands].imms = exp;
2988 i.types[i.operands] = imm8;
2989 i.operands++;
2990 exp->X_op = O_constant;
2991 exp->X_add_number = i.tm.extension_opcode;
2992 i.tm.extension_opcode = None;
2993 }
2994
2995 /* This is the guts of the machine-dependent assembler. LINE points to a
2996 machine dependent instruction. This function is supposed to emit
2997 the frags/bytes it assembles to. */
2998
2999 void
3000 md_assemble (char *line)
3001 {
3002 unsigned int j;
3003 char mnemonic[MAX_MNEM_SIZE];
3004 const insn_template *t;
3005
3006 /* Initialize globals. */
3007 memset (&i, '\0', sizeof (i));
3008 for (j = 0; j < MAX_OPERANDS; j++)
3009 i.reloc[j] = NO_RELOC;
3010 memset (disp_expressions, '\0', sizeof (disp_expressions));
3011 memset (im_expressions, '\0', sizeof (im_expressions));
3012 save_stack_p = save_stack;
3013
3014 /* First parse an instruction mnemonic & call i386_operand for the operands.
3015 We assume that the scrubber has arranged it so that line[0] is the valid
3016 start of a (possibly prefixed) mnemonic. */
3017
3018 line = parse_insn (line, mnemonic);
3019 if (line == NULL)
3020 return;
3021
3022 line = parse_operands (line, mnemonic);
3023 this_operand = -1;
3024 if (line == NULL)
3025 return;
3026
3027 /* Now we've parsed the mnemonic into a set of templates, and have the
3028 operands at hand. */
3029
3030 /* All intel opcodes have reversed operands except for "bound" and
3031 "enter". We also don't reverse intersegment "jmp" and "call"
3032 instructions with 2 immediate operands so that the immediate segment
3033 precedes the offset, as it does when in AT&T mode. */
3034 if (intel_syntax
3035 && i.operands > 1
3036 && (strcmp (mnemonic, "bound") != 0)
3037 && (strcmp (mnemonic, "invlpga") != 0)
3038 && !(operand_type_check (i.types[0], imm)
3039 && operand_type_check (i.types[1], imm)))
3040 swap_operands ();
3041
3042 /* The order of the immediates should be reversed
3043 for 2 immediates extrq and insertq instructions */
3044 if (i.imm_operands == 2
3045 && (strcmp (mnemonic, "extrq") == 0
3046 || strcmp (mnemonic, "insertq") == 0))
3047 swap_2_operands (0, 1);
3048
3049 if (i.imm_operands)
3050 optimize_imm ();
3051
3052 /* Don't optimize displacement for movabs since it only takes 64bit
3053 displacement. */
3054 if (i.disp_operands
3055 && !i.disp32_encoding
3056 && (flag_code != CODE_64BIT
3057 || strcmp (mnemonic, "movabs") != 0))
3058 optimize_disp ();
3059
3060 /* Next, we find a template that matches the given insn,
3061 making sure the overlap of the given operands types is consistent
3062 with the template operand types. */
3063
3064 if (!(t = match_template ()))
3065 return;
3066
3067 if (sse_check != sse_check_none
3068 && !i.tm.opcode_modifier.noavx
3069 && (i.tm.cpu_flags.bitfield.cpusse
3070 || i.tm.cpu_flags.bitfield.cpusse2
3071 || i.tm.cpu_flags.bitfield.cpusse3
3072 || i.tm.cpu_flags.bitfield.cpussse3
3073 || i.tm.cpu_flags.bitfield.cpusse4_1
3074 || i.tm.cpu_flags.bitfield.cpusse4_2))
3075 {
3076 (sse_check == sse_check_warning
3077 ? as_warn
3078 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
3079 }
3080
3081 /* Zap movzx and movsx suffix. The suffix has been set from
3082 "word ptr" or "byte ptr" on the source operand in Intel syntax
3083 or extracted from mnemonic in AT&T syntax. But we'll use
3084 the destination register to choose the suffix for encoding. */
3085 if ((i.tm.base_opcode & ~9) == 0x0fb6)
3086 {
3087 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3088 there is no suffix, the default will be byte extension. */
3089 if (i.reg_operands != 2
3090 && !i.suffix
3091 && intel_syntax)
3092 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
3093
3094 i.suffix = 0;
3095 }
3096
3097 if (i.tm.opcode_modifier.fwait)
3098 if (!add_prefix (FWAIT_OPCODE))
3099 return;
3100
3101 /* Check for lock without a lockable instruction. Destination operand
3102 must be memory unless it is xchg (0x86). */
3103 if (i.prefix[LOCK_PREFIX]
3104 && (!i.tm.opcode_modifier.islockable
3105 || i.mem_operands == 0
3106 || (i.tm.base_opcode != 0x86
3107 && !operand_type_check (i.types[i.operands - 1], anymem))))
3108 {
3109 as_bad (_("expecting lockable instruction after `lock'"));
3110 return;
3111 }
3112
3113 /* Check string instruction segment overrides. */
3114 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
3115 {
3116 if (!check_string ())
3117 return;
3118 i.disp_operands = 0;
3119 }
3120
3121 if (!process_suffix ())
3122 return;
3123
3124 /* Update operand types. */
3125 for (j = 0; j < i.operands; j++)
3126 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
3127
3128 /* Make still unresolved immediate matches conform to size of immediate
3129 given in i.suffix. */
3130 if (!finalize_imm ())
3131 return;
3132
3133 if (i.types[0].bitfield.imm1)
3134 i.imm_operands = 0; /* kludge for shift insns. */
3135
3136 /* We only need to check those implicit registers for instructions
3137 with 3 operands or less. */
3138 if (i.operands <= 3)
3139 for (j = 0; j < i.operands; j++)
3140 if (i.types[j].bitfield.inoutportreg
3141 || i.types[j].bitfield.shiftcount
3142 || i.types[j].bitfield.acc
3143 || i.types[j].bitfield.floatacc)
3144 i.reg_operands--;
3145
3146 /* ImmExt should be processed after SSE2AVX. */
3147 if (!i.tm.opcode_modifier.sse2avx
3148 && i.tm.opcode_modifier.immext)
3149 process_immext ();
3150
3151 /* For insns with operands there are more diddles to do to the opcode. */
3152 if (i.operands)
3153 {
3154 if (!process_operands ())
3155 return;
3156 }
3157 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
3158 {
3159 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3160 as_warn (_("translating to `%sp'"), i.tm.name);
3161 }
3162
3163 if (i.tm.opcode_modifier.vex)
3164 build_vex_prefix (t);
3165
3166 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3167 instructions may define INT_OPCODE as well, so avoid this corner
3168 case for those instructions that use MODRM. */
3169 if (i.tm.base_opcode == INT_OPCODE
3170 && !i.tm.opcode_modifier.modrm
3171 && i.op[0].imms->X_add_number == 3)
3172 {
3173 i.tm.base_opcode = INT3_OPCODE;
3174 i.imm_operands = 0;
3175 }
3176
3177 if ((i.tm.opcode_modifier.jump
3178 || i.tm.opcode_modifier.jumpbyte
3179 || i.tm.opcode_modifier.jumpdword)
3180 && i.op[0].disps->X_op == O_constant)
3181 {
3182 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3183 the absolute address given by the constant. Since ix86 jumps and
3184 calls are pc relative, we need to generate a reloc. */
3185 i.op[0].disps->X_add_symbol = &abs_symbol;
3186 i.op[0].disps->X_op = O_symbol;
3187 }
3188
3189 if (i.tm.opcode_modifier.rex64)
3190 i.rex |= REX_W;
3191
3192 /* For 8 bit registers we need an empty rex prefix. Also if the
3193 instruction already has a prefix, we need to convert old
3194 registers to new ones. */
3195
3196 if ((i.types[0].bitfield.reg8
3197 && (i.op[0].regs->reg_flags & RegRex64) != 0)
3198 || (i.types[1].bitfield.reg8
3199 && (i.op[1].regs->reg_flags & RegRex64) != 0)
3200 || ((i.types[0].bitfield.reg8
3201 || i.types[1].bitfield.reg8)
3202 && i.rex != 0))
3203 {
3204 int x;
3205
3206 i.rex |= REX_OPCODE;
3207 for (x = 0; x < 2; x++)
3208 {
3209 /* Look for 8 bit operand that uses old registers. */
3210 if (i.types[x].bitfield.reg8
3211 && (i.op[x].regs->reg_flags & RegRex64) == 0)
3212 {
3213 /* In case it is "hi" register, give up. */
3214 if (i.op[x].regs->reg_num > 3)
3215 as_bad (_("can't encode register '%s%s' in an "
3216 "instruction requiring REX prefix."),
3217 register_prefix, i.op[x].regs->reg_name);
3218
3219 /* Otherwise it is equivalent to the extended register.
3220 Since the encoding doesn't change this is merely
3221 cosmetic cleanup for debug output. */
3222
3223 i.op[x].regs = i.op[x].regs + 8;
3224 }
3225 }
3226 }
3227
3228 if (i.rex != 0)
3229 add_prefix (REX_OPCODE | i.rex);
3230
3231 /* We are ready to output the insn. */
3232 output_insn ();
3233 }
3234
3235 static char *
3236 parse_insn (char *line, char *mnemonic)
3237 {
3238 char *l = line;
3239 char *token_start = l;
3240 char *mnem_p;
3241 int supported;
3242 const insn_template *t;
3243 char *dot_p = NULL;
3244
3245 /* Non-zero if we found a prefix only acceptable with string insns. */
3246 const char *expecting_string_instruction = NULL;
3247
3248 while (1)
3249 {
3250 mnem_p = mnemonic;
3251 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3252 {
3253 if (*mnem_p == '.')
3254 dot_p = mnem_p;
3255 mnem_p++;
3256 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3257 {
3258 as_bad (_("no such instruction: `%s'"), token_start);
3259 return NULL;
3260 }
3261 l++;
3262 }
3263 if (!is_space_char (*l)
3264 && *l != END_OF_INSN
3265 && (intel_syntax
3266 || (*l != PREFIX_SEPARATOR
3267 && *l != ',')))
3268 {
3269 as_bad (_("invalid character %s in mnemonic"),
3270 output_invalid (*l));
3271 return NULL;
3272 }
3273 if (token_start == l)
3274 {
3275 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3276 as_bad (_("expecting prefix; got nothing"));
3277 else
3278 as_bad (_("expecting mnemonic; got nothing"));
3279 return NULL;
3280 }
3281
3282 /* Look up instruction (or prefix) via hash table. */
3283 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3284
3285 if (*l != END_OF_INSN
3286 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3287 && current_templates
3288 && current_templates->start->opcode_modifier.isprefix)
3289 {
3290 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3291 {
3292 as_bad ((flag_code != CODE_64BIT
3293 ? _("`%s' is only supported in 64-bit mode")
3294 : _("`%s' is not supported in 64-bit mode")),
3295 current_templates->start->name);
3296 return NULL;
3297 }
3298 /* If we are in 16-bit mode, do not allow addr16 or data16.
3299 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3300 if ((current_templates->start->opcode_modifier.size16
3301 || current_templates->start->opcode_modifier.size32)
3302 && flag_code != CODE_64BIT
3303 && (current_templates->start->opcode_modifier.size32
3304 ^ (flag_code == CODE_16BIT)))
3305 {
3306 as_bad (_("redundant %s prefix"),
3307 current_templates->start->name);
3308 return NULL;
3309 }
3310 /* Add prefix, checking for repeated prefixes. */
3311 switch (add_prefix (current_templates->start->base_opcode))
3312 {
3313 case PREFIX_EXIST:
3314 return NULL;
3315 case PREFIX_REP:
3316 expecting_string_instruction = current_templates->start->name;
3317 break;
3318 default:
3319 break;
3320 }
3321 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3322 token_start = ++l;
3323 }
3324 else
3325 break;
3326 }
3327
3328 if (!current_templates)
3329 {
3330 /* Check if we should swap operand or force 32bit displacement in
3331 encoding. */
3332 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3333 i.swap_operand = 1;
3334 else if (mnem_p - 4 == dot_p
3335 && dot_p[1] == 'd'
3336 && dot_p[2] == '3'
3337 && dot_p[3] == '2')
3338 i.disp32_encoding = 1;
3339 else
3340 goto check_suffix;
3341 mnem_p = dot_p;
3342 *dot_p = '\0';
3343 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3344 }
3345
3346 if (!current_templates)
3347 {
3348 check_suffix:
3349 /* See if we can get a match by trimming off a suffix. */
3350 switch (mnem_p[-1])
3351 {
3352 case WORD_MNEM_SUFFIX:
3353 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3354 i.suffix = SHORT_MNEM_SUFFIX;
3355 else
3356 case BYTE_MNEM_SUFFIX:
3357 case QWORD_MNEM_SUFFIX:
3358 i.suffix = mnem_p[-1];
3359 mnem_p[-1] = '\0';
3360 current_templates = (const templates *) hash_find (op_hash,
3361 mnemonic);
3362 break;
3363 case SHORT_MNEM_SUFFIX:
3364 case LONG_MNEM_SUFFIX:
3365 if (!intel_syntax)
3366 {
3367 i.suffix = mnem_p[-1];
3368 mnem_p[-1] = '\0';
3369 current_templates = (const templates *) hash_find (op_hash,
3370 mnemonic);
3371 }
3372 break;
3373
3374 /* Intel Syntax. */
3375 case 'd':
3376 if (intel_syntax)
3377 {
3378 if (intel_float_operand (mnemonic) == 1)
3379 i.suffix = SHORT_MNEM_SUFFIX;
3380 else
3381 i.suffix = LONG_MNEM_SUFFIX;
3382 mnem_p[-1] = '\0';
3383 current_templates = (const templates *) hash_find (op_hash,
3384 mnemonic);
3385 }
3386 break;
3387 }
3388 if (!current_templates)
3389 {
3390 as_bad (_("no such instruction: `%s'"), token_start);
3391 return NULL;
3392 }
3393 }
3394
3395 if (current_templates->start->opcode_modifier.jump
3396 || current_templates->start->opcode_modifier.jumpbyte)
3397 {
3398 /* Check for a branch hint. We allow ",pt" and ",pn" for
3399 predict taken and predict not taken respectively.
3400 I'm not sure that branch hints actually do anything on loop
3401 and jcxz insns (JumpByte) for current Pentium4 chips. They
3402 may work in the future and it doesn't hurt to accept them
3403 now. */
3404 if (l[0] == ',' && l[1] == 'p')
3405 {
3406 if (l[2] == 't')
3407 {
3408 if (!add_prefix (DS_PREFIX_OPCODE))
3409 return NULL;
3410 l += 3;
3411 }
3412 else if (l[2] == 'n')
3413 {
3414 if (!add_prefix (CS_PREFIX_OPCODE))
3415 return NULL;
3416 l += 3;
3417 }
3418 }
3419 }
3420 /* Any other comma loses. */
3421 if (*l == ',')
3422 {
3423 as_bad (_("invalid character %s in mnemonic"),
3424 output_invalid (*l));
3425 return NULL;
3426 }
3427
3428 /* Check if instruction is supported on specified architecture. */
3429 supported = 0;
3430 for (t = current_templates->start; t < current_templates->end; ++t)
3431 {
3432 supported |= cpu_flags_match (t);
3433 if (supported == CPU_FLAGS_PERFECT_MATCH)
3434 goto skip;
3435 }
3436
3437 if (!(supported & CPU_FLAGS_64BIT_MATCH))
3438 {
3439 as_bad (flag_code == CODE_64BIT
3440 ? _("`%s' is not supported in 64-bit mode")
3441 : _("`%s' is only supported in 64-bit mode"),
3442 current_templates->start->name);
3443 return NULL;
3444 }
3445 if (supported != CPU_FLAGS_PERFECT_MATCH)
3446 {
3447 as_bad (_("`%s' is not supported on `%s%s'"),
3448 current_templates->start->name,
3449 cpu_arch_name ? cpu_arch_name : default_arch,
3450 cpu_sub_arch_name ? cpu_sub_arch_name : "");
3451 return NULL;
3452 }
3453
3454 skip:
3455 if (!cpu_arch_flags.bitfield.cpui386
3456 && (flag_code != CODE_16BIT))
3457 {
3458 as_warn (_("use .code16 to ensure correct addressing mode"));
3459 }
3460
3461 /* Check for rep/repne without a string instruction. */
3462 if (expecting_string_instruction)
3463 {
3464 static templates override;
3465
3466 for (t = current_templates->start; t < current_templates->end; ++t)
3467 if (t->opcode_modifier.isstring)
3468 break;
3469 if (t >= current_templates->end)
3470 {
3471 as_bad (_("expecting string instruction after `%s'"),
3472 expecting_string_instruction);
3473 return NULL;
3474 }
3475 for (override.start = t; t < current_templates->end; ++t)
3476 if (!t->opcode_modifier.isstring)
3477 break;
3478 override.end = t;
3479 current_templates = &override;
3480 }
3481
3482 return l;
3483 }
3484
3485 static char *
3486 parse_operands (char *l, const char *mnemonic)
3487 {
3488 char *token_start;
3489
3490 /* 1 if operand is pending after ','. */
3491 unsigned int expecting_operand = 0;
3492
3493 /* Non-zero if operand parens not balanced. */
3494 unsigned int paren_not_balanced;
3495
3496 while (*l != END_OF_INSN)
3497 {
3498 /* Skip optional white space before operand. */
3499 if (is_space_char (*l))
3500 ++l;
3501 if (!is_operand_char (*l) && *l != END_OF_INSN)
3502 {
3503 as_bad (_("invalid character %s before operand %d"),
3504 output_invalid (*l),
3505 i.operands + 1);
3506 return NULL;
3507 }
3508 token_start = l; /* after white space */
3509 paren_not_balanced = 0;
3510 while (paren_not_balanced || *l != ',')
3511 {
3512 if (*l == END_OF_INSN)
3513 {
3514 if (paren_not_balanced)
3515 {
3516 if (!intel_syntax)
3517 as_bad (_("unbalanced parenthesis in operand %d."),
3518 i.operands + 1);
3519 else
3520 as_bad (_("unbalanced brackets in operand %d."),
3521 i.operands + 1);
3522 return NULL;
3523 }
3524 else
3525 break; /* we are done */
3526 }
3527 else if (!is_operand_char (*l) && !is_space_char (*l))
3528 {
3529 as_bad (_("invalid character %s in operand %d"),
3530 output_invalid (*l),
3531 i.operands + 1);
3532 return NULL;
3533 }
3534 if (!intel_syntax)
3535 {
3536 if (*l == '(')
3537 ++paren_not_balanced;
3538 if (*l == ')')
3539 --paren_not_balanced;
3540 }
3541 else
3542 {
3543 if (*l == '[')
3544 ++paren_not_balanced;
3545 if (*l == ']')
3546 --paren_not_balanced;
3547 }
3548 l++;
3549 }
3550 if (l != token_start)
3551 { /* Yes, we've read in another operand. */
3552 unsigned int operand_ok;
3553 this_operand = i.operands++;
3554 i.types[this_operand].bitfield.unspecified = 1;
3555 if (i.operands > MAX_OPERANDS)
3556 {
3557 as_bad (_("spurious operands; (%d operands/instruction max)"),
3558 MAX_OPERANDS);
3559 return NULL;
3560 }
3561 /* Now parse operand adding info to 'i' as we go along. */
3562 END_STRING_AND_SAVE (l);
3563
3564 if (intel_syntax)
3565 operand_ok =
3566 i386_intel_operand (token_start,
3567 intel_float_operand (mnemonic));
3568 else
3569 operand_ok = i386_att_operand (token_start);
3570
3571 RESTORE_END_STRING (l);
3572 if (!operand_ok)
3573 return NULL;
3574 }
3575 else
3576 {
3577 if (expecting_operand)
3578 {
3579 expecting_operand_after_comma:
3580 as_bad (_("expecting operand after ','; got nothing"));
3581 return NULL;
3582 }
3583 if (*l == ',')
3584 {
3585 as_bad (_("expecting operand before ','; got nothing"));
3586 return NULL;
3587 }
3588 }
3589
3590 /* Now *l must be either ',' or END_OF_INSN. */
3591 if (*l == ',')
3592 {
3593 if (*++l == END_OF_INSN)
3594 {
3595 /* Just skip it, if it's \n complain. */
3596 goto expecting_operand_after_comma;
3597 }
3598 expecting_operand = 1;
3599 }
3600 }
3601 return l;
3602 }
3603
3604 static void
3605 swap_2_operands (int xchg1, int xchg2)
3606 {
3607 union i386_op temp_op;
3608 i386_operand_type temp_type;
3609 enum bfd_reloc_code_real temp_reloc;
3610
3611 temp_type = i.types[xchg2];
3612 i.types[xchg2] = i.types[xchg1];
3613 i.types[xchg1] = temp_type;
3614 temp_op = i.op[xchg2];
3615 i.op[xchg2] = i.op[xchg1];
3616 i.op[xchg1] = temp_op;
3617 temp_reloc = i.reloc[xchg2];
3618 i.reloc[xchg2] = i.reloc[xchg1];
3619 i.reloc[xchg1] = temp_reloc;
3620 }
3621
3622 static void
3623 swap_operands (void)
3624 {
3625 switch (i.operands)
3626 {
3627 case 5:
3628 case 4:
3629 swap_2_operands (1, i.operands - 2);
3630 case 3:
3631 case 2:
3632 swap_2_operands (0, i.operands - 1);
3633 break;
3634 default:
3635 abort ();
3636 }
3637
3638 if (i.mem_operands == 2)
3639 {
3640 const seg_entry *temp_seg;
3641 temp_seg = i.seg[0];
3642 i.seg[0] = i.seg[1];
3643 i.seg[1] = temp_seg;
3644 }
3645 }
3646
3647 /* Try to ensure constant immediates are represented in the smallest
3648 opcode possible. */
3649 static void
3650 optimize_imm (void)
3651 {
3652 char guess_suffix = 0;
3653 int op;
3654
3655 if (i.suffix)
3656 guess_suffix = i.suffix;
3657 else if (i.reg_operands)
3658 {
3659 /* Figure out a suffix from the last register operand specified.
3660 We can't do this properly yet, ie. excluding InOutPortReg,
3661 but the following works for instructions with immediates.
3662 In any case, we can't set i.suffix yet. */
3663 for (op = i.operands; --op >= 0;)
3664 if (i.types[op].bitfield.reg8)
3665 {
3666 guess_suffix = BYTE_MNEM_SUFFIX;
3667 break;
3668 }
3669 else if (i.types[op].bitfield.reg16)
3670 {
3671 guess_suffix = WORD_MNEM_SUFFIX;
3672 break;
3673 }
3674 else if (i.types[op].bitfield.reg32)
3675 {
3676 guess_suffix = LONG_MNEM_SUFFIX;
3677 break;
3678 }
3679 else if (i.types[op].bitfield.reg64)
3680 {
3681 guess_suffix = QWORD_MNEM_SUFFIX;
3682 break;
3683 }
3684 }
3685 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
3686 guess_suffix = WORD_MNEM_SUFFIX;
3687
3688 for (op = i.operands; --op >= 0;)
3689 if (operand_type_check (i.types[op], imm))
3690 {
3691 switch (i.op[op].imms->X_op)
3692 {
3693 case O_constant:
3694 /* If a suffix is given, this operand may be shortened. */
3695 switch (guess_suffix)
3696 {
3697 case LONG_MNEM_SUFFIX:
3698 i.types[op].bitfield.imm32 = 1;
3699 i.types[op].bitfield.imm64 = 1;
3700 break;
3701 case WORD_MNEM_SUFFIX:
3702 i.types[op].bitfield.imm16 = 1;
3703 i.types[op].bitfield.imm32 = 1;
3704 i.types[op].bitfield.imm32s = 1;
3705 i.types[op].bitfield.imm64 = 1;
3706 break;
3707 case BYTE_MNEM_SUFFIX:
3708 i.types[op].bitfield.imm8 = 1;
3709 i.types[op].bitfield.imm8s = 1;
3710 i.types[op].bitfield.imm16 = 1;
3711 i.types[op].bitfield.imm32 = 1;
3712 i.types[op].bitfield.imm32s = 1;
3713 i.types[op].bitfield.imm64 = 1;
3714 break;
3715 }
3716
3717 /* If this operand is at most 16 bits, convert it
3718 to a signed 16 bit number before trying to see
3719 whether it will fit in an even smaller size.
3720 This allows a 16-bit operand such as $0xffe0 to
3721 be recognised as within Imm8S range. */
3722 if ((i.types[op].bitfield.imm16)
3723 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
3724 {
3725 i.op[op].imms->X_add_number =
3726 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
3727 }
3728 if ((i.types[op].bitfield.imm32)
3729 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
3730 == 0))
3731 {
3732 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
3733 ^ ((offsetT) 1 << 31))
3734 - ((offsetT) 1 << 31));
3735 }
3736 i.types[op]
3737 = operand_type_or (i.types[op],
3738 smallest_imm_type (i.op[op].imms->X_add_number));
3739
3740 /* We must avoid matching of Imm32 templates when 64bit
3741 only immediate is available. */
3742 if (guess_suffix == QWORD_MNEM_SUFFIX)
3743 i.types[op].bitfield.imm32 = 0;
3744 break;
3745
3746 case O_absent:
3747 case O_register:
3748 abort ();
3749
3750 /* Symbols and expressions. */
3751 default:
3752 /* Convert symbolic operand to proper sizes for matching, but don't
3753 prevent matching a set of insns that only supports sizes other
3754 than those matching the insn suffix. */
3755 {
3756 i386_operand_type mask, allowed;
3757 const insn_template *t;
3758
3759 operand_type_set (&mask, 0);
3760 operand_type_set (&allowed, 0);
3761
3762 for (t = current_templates->start;
3763 t < current_templates->end;
3764 ++t)
3765 allowed = operand_type_or (allowed,
3766 t->operand_types[op]);
3767 switch (guess_suffix)
3768 {
3769 case QWORD_MNEM_SUFFIX:
3770 mask.bitfield.imm64 = 1;
3771 mask.bitfield.imm32s = 1;
3772 break;
3773 case LONG_MNEM_SUFFIX:
3774 mask.bitfield.imm32 = 1;
3775 break;
3776 case WORD_MNEM_SUFFIX:
3777 mask.bitfield.imm16 = 1;
3778 break;
3779 case BYTE_MNEM_SUFFIX:
3780 mask.bitfield.imm8 = 1;
3781 break;
3782 default:
3783 break;
3784 }
3785 allowed = operand_type_and (mask, allowed);
3786 if (!operand_type_all_zero (&allowed))
3787 i.types[op] = operand_type_and (i.types[op], mask);
3788 }
3789 break;
3790 }
3791 }
3792 }
3793
3794 /* Try to use the smallest displacement type too. */
3795 static void
3796 optimize_disp (void)
3797 {
3798 int op;
3799
3800 for (op = i.operands; --op >= 0;)
3801 if (operand_type_check (i.types[op], disp))
3802 {
3803 if (i.op[op].disps->X_op == O_constant)
3804 {
3805 offsetT op_disp = i.op[op].disps->X_add_number;
3806
3807 if (i.types[op].bitfield.disp16
3808 && (op_disp & ~(offsetT) 0xffff) == 0)
3809 {
3810 /* If this operand is at most 16 bits, convert
3811 to a signed 16 bit number and don't use 64bit
3812 displacement. */
3813 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
3814 i.types[op].bitfield.disp64 = 0;
3815 }
3816 if (i.types[op].bitfield.disp32
3817 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
3818 {
3819 /* If this operand is at most 32 bits, convert
3820 to a signed 32 bit number and don't use 64bit
3821 displacement. */
3822 op_disp &= (((offsetT) 2 << 31) - 1);
3823 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
3824 i.types[op].bitfield.disp64 = 0;
3825 }
3826 if (!op_disp && i.types[op].bitfield.baseindex)
3827 {
3828 i.types[op].bitfield.disp8 = 0;
3829 i.types[op].bitfield.disp16 = 0;
3830 i.types[op].bitfield.disp32 = 0;
3831 i.types[op].bitfield.disp32s = 0;
3832 i.types[op].bitfield.disp64 = 0;
3833 i.op[op].disps = 0;
3834 i.disp_operands--;
3835 }
3836 else if (flag_code == CODE_64BIT)
3837 {
3838 if (fits_in_signed_long (op_disp))
3839 {
3840 i.types[op].bitfield.disp64 = 0;
3841 i.types[op].bitfield.disp32s = 1;
3842 }
3843 if (i.prefix[ADDR_PREFIX]
3844 && fits_in_unsigned_long (op_disp))
3845 i.types[op].bitfield.disp32 = 1;
3846 }
3847 if ((i.types[op].bitfield.disp32
3848 || i.types[op].bitfield.disp32s
3849 || i.types[op].bitfield.disp16)
3850 && fits_in_signed_byte (op_disp))
3851 i.types[op].bitfield.disp8 = 1;
3852 }
3853 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
3854 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
3855 {
3856 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
3857 i.op[op].disps, 0, i.reloc[op]);
3858 i.types[op].bitfield.disp8 = 0;
3859 i.types[op].bitfield.disp16 = 0;
3860 i.types[op].bitfield.disp32 = 0;
3861 i.types[op].bitfield.disp32s = 0;
3862 i.types[op].bitfield.disp64 = 0;
3863 }
3864 else
3865 /* We only support 64bit displacement on constants. */
3866 i.types[op].bitfield.disp64 = 0;
3867 }
3868 }
3869
3870 /* Check if operands are valid for the instruction. */
3871
3872 static int
3873 check_VecOperands (const insn_template *t)
3874 {
3875 /* Without VSIB byte, we can't have a vector register for index. */
3876 if (!t->opcode_modifier.vecsib
3877 && i.index_reg
3878 && (i.index_reg->reg_type.bitfield.regxmm
3879 || i.index_reg->reg_type.bitfield.regymm))
3880 {
3881 i.error = unsupported_vector_index_register;
3882 return 1;
3883 }
3884
3885 /* For VSIB byte, we need a vector register for index and no PC
3886 relative addressing is allowed. */
3887 if (t->opcode_modifier.vecsib
3888 && (!i.index_reg
3889 || !((t->opcode_modifier.vecsib == VecSIB128
3890 && i.index_reg->reg_type.bitfield.regxmm)
3891 || (t->opcode_modifier.vecsib == VecSIB256
3892 && i.index_reg->reg_type.bitfield.regymm))
3893 || (i.base_reg && i.base_reg->reg_num == RegRip)))
3894 {
3895 i.error = invalid_vsib_address;
3896 return 1;
3897 }
3898
3899 return 0;
3900 }
3901
3902 /* Check if operands are valid for the instruction. Update VEX
3903 operand types. */
3904
3905 static int
3906 VEX_check_operands (const insn_template *t)
3907 {
3908 if (!t->opcode_modifier.vex)
3909 return 0;
3910
3911 /* Only check VEX_Imm4, which must be the first operand. */
3912 if (t->operand_types[0].bitfield.vec_imm4)
3913 {
3914 if (i.op[0].imms->X_op != O_constant
3915 || !fits_in_imm4 (i.op[0].imms->X_add_number))
3916 {
3917 i.error = bad_imm4;
3918 return 1;
3919 }
3920
3921 /* Turn off Imm8 so that update_imm won't complain. */
3922 i.types[0] = vec_imm4;
3923 }
3924
3925 return 0;
3926 }
3927
3928 static const insn_template *
3929 match_template (void)
3930 {
3931 /* Points to template once we've found it. */
3932 const insn_template *t;
3933 i386_operand_type overlap0, overlap1, overlap2, overlap3;
3934 i386_operand_type overlap4;
3935 unsigned int found_reverse_match;
3936 i386_opcode_modifier suffix_check;
3937 i386_operand_type operand_types [MAX_OPERANDS];
3938 int addr_prefix_disp;
3939 unsigned int j;
3940 unsigned int found_cpu_match;
3941 unsigned int check_register;
3942
3943 #if MAX_OPERANDS != 5
3944 # error "MAX_OPERANDS must be 5."
3945 #endif
3946
3947 found_reverse_match = 0;
3948 addr_prefix_disp = -1;
3949
3950 memset (&suffix_check, 0, sizeof (suffix_check));
3951 if (i.suffix == BYTE_MNEM_SUFFIX)
3952 suffix_check.no_bsuf = 1;
3953 else if (i.suffix == WORD_MNEM_SUFFIX)
3954 suffix_check.no_wsuf = 1;
3955 else if (i.suffix == SHORT_MNEM_SUFFIX)
3956 suffix_check.no_ssuf = 1;
3957 else if (i.suffix == LONG_MNEM_SUFFIX)
3958 suffix_check.no_lsuf = 1;
3959 else if (i.suffix == QWORD_MNEM_SUFFIX)
3960 suffix_check.no_qsuf = 1;
3961 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
3962 suffix_check.no_ldsuf = 1;
3963
3964 /* Must have right number of operands. */
3965 i.error = number_of_operands_mismatch;
3966
3967 for (t = current_templates->start; t < current_templates->end; t++)
3968 {
3969 addr_prefix_disp = -1;
3970
3971 if (i.operands != t->operands)
3972 continue;
3973
3974 /* Check processor support. */
3975 i.error = unsupported;
3976 found_cpu_match = (cpu_flags_match (t)
3977 == CPU_FLAGS_PERFECT_MATCH);
3978 if (!found_cpu_match)
3979 continue;
3980
3981 /* Check old gcc support. */
3982 i.error = old_gcc_only;
3983 if (!old_gcc && t->opcode_modifier.oldgcc)
3984 continue;
3985
3986 /* Check AT&T mnemonic. */
3987 i.error = unsupported_with_intel_mnemonic;
3988 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
3989 continue;
3990
3991 /* Check AT&T/Intel syntax. */
3992 i.error = unsupported_syntax;
3993 if ((intel_syntax && t->opcode_modifier.attsyntax)
3994 || (!intel_syntax && t->opcode_modifier.intelsyntax))
3995 continue;
3996
3997 /* Check the suffix, except for some instructions in intel mode. */
3998 i.error = invalid_instruction_suffix;
3999 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
4000 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
4001 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
4002 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
4003 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
4004 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
4005 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
4006 continue;
4007
4008 if (!operand_size_match (t))
4009 continue;
4010
4011 for (j = 0; j < MAX_OPERANDS; j++)
4012 operand_types[j] = t->operand_types[j];
4013
4014 /* In general, don't allow 64-bit operands in 32-bit mode. */
4015 if (i.suffix == QWORD_MNEM_SUFFIX
4016 && flag_code != CODE_64BIT
4017 && (intel_syntax
4018 ? (!t->opcode_modifier.ignoresize
4019 && !intel_float_operand (t->name))
4020 : intel_float_operand (t->name) != 2)
4021 && ((!operand_types[0].bitfield.regmmx
4022 && !operand_types[0].bitfield.regxmm
4023 && !operand_types[0].bitfield.regymm)
4024 || (!operand_types[t->operands > 1].bitfield.regmmx
4025 && !!operand_types[t->operands > 1].bitfield.regxmm
4026 && !!operand_types[t->operands > 1].bitfield.regymm))
4027 && (t->base_opcode != 0x0fc7
4028 || t->extension_opcode != 1 /* cmpxchg8b */))
4029 continue;
4030
4031 /* In general, don't allow 32-bit operands on pre-386. */
4032 else if (i.suffix == LONG_MNEM_SUFFIX
4033 && !cpu_arch_flags.bitfield.cpui386
4034 && (intel_syntax
4035 ? (!t->opcode_modifier.ignoresize
4036 && !intel_float_operand (t->name))
4037 : intel_float_operand (t->name) != 2)
4038 && ((!operand_types[0].bitfield.regmmx
4039 && !operand_types[0].bitfield.regxmm)
4040 || (!operand_types[t->operands > 1].bitfield.regmmx
4041 && !!operand_types[t->operands > 1].bitfield.regxmm)))
4042 continue;
4043
4044 /* Do not verify operands when there are none. */
4045 else
4046 {
4047 if (!t->operands)
4048 /* We've found a match; break out of loop. */
4049 break;
4050 }
4051
4052 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
4053 into Disp32/Disp16/Disp32 operand. */
4054 if (i.prefix[ADDR_PREFIX] != 0)
4055 {
4056 /* There should be only one Disp operand. */
4057 switch (flag_code)
4058 {
4059 case CODE_16BIT:
4060 for (j = 0; j < MAX_OPERANDS; j++)
4061 {
4062 if (operand_types[j].bitfield.disp16)
4063 {
4064 addr_prefix_disp = j;
4065 operand_types[j].bitfield.disp32 = 1;
4066 operand_types[j].bitfield.disp16 = 0;
4067 break;
4068 }
4069 }
4070 break;
4071 case CODE_32BIT:
4072 for (j = 0; j < MAX_OPERANDS; j++)
4073 {
4074 if (operand_types[j].bitfield.disp32)
4075 {
4076 addr_prefix_disp = j;
4077 operand_types[j].bitfield.disp32 = 0;
4078 operand_types[j].bitfield.disp16 = 1;
4079 break;
4080 }
4081 }
4082 break;
4083 case CODE_64BIT:
4084 for (j = 0; j < MAX_OPERANDS; j++)
4085 {
4086 if (operand_types[j].bitfield.disp64)
4087 {
4088 addr_prefix_disp = j;
4089 operand_types[j].bitfield.disp64 = 0;
4090 operand_types[j].bitfield.disp32 = 1;
4091 break;
4092 }
4093 }
4094 break;
4095 }
4096 }
4097
4098 /* We check register size if needed. */
4099 check_register = t->opcode_modifier.checkregsize;
4100 overlap0 = operand_type_and (i.types[0], operand_types[0]);
4101 switch (t->operands)
4102 {
4103 case 1:
4104 if (!operand_type_match (overlap0, i.types[0]))
4105 continue;
4106 break;
4107 case 2:
4108 /* xchg %eax, %eax is a special case. It is an aliase for nop
4109 only in 32bit mode and we can use opcode 0x90. In 64bit
4110 mode, we can't use 0x90 for xchg %eax, %eax since it should
4111 zero-extend %eax to %rax. */
4112 if (flag_code == CODE_64BIT
4113 && t->base_opcode == 0x90
4114 && operand_type_equal (&i.types [0], &acc32)
4115 && operand_type_equal (&i.types [1], &acc32))
4116 continue;
4117 if (i.swap_operand)
4118 {
4119 /* If we swap operand in encoding, we either match
4120 the next one or reverse direction of operands. */
4121 if (t->opcode_modifier.s)
4122 continue;
4123 else if (t->opcode_modifier.d)
4124 goto check_reverse;
4125 }
4126
4127 case 3:
4128 /* If we swap operand in encoding, we match the next one. */
4129 if (i.swap_operand && t->opcode_modifier.s)
4130 continue;
4131 case 4:
4132 case 5:
4133 overlap1 = operand_type_and (i.types[1], operand_types[1]);
4134 if (!operand_type_match (overlap0, i.types[0])
4135 || !operand_type_match (overlap1, i.types[1])
4136 || (check_register
4137 && !operand_type_register_match (overlap0, i.types[0],
4138 operand_types[0],
4139 overlap1, i.types[1],
4140 operand_types[1])))
4141 {
4142 /* Check if other direction is valid ... */
4143 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
4144 continue;
4145
4146 check_reverse:
4147 /* Try reversing direction of operands. */
4148 overlap0 = operand_type_and (i.types[0], operand_types[1]);
4149 overlap1 = operand_type_and (i.types[1], operand_types[0]);
4150 if (!operand_type_match (overlap0, i.types[0])
4151 || !operand_type_match (overlap1, i.types[1])
4152 || (check_register
4153 && !operand_type_register_match (overlap0,
4154 i.types[0],
4155 operand_types[1],
4156 overlap1,
4157 i.types[1],
4158 operand_types[0])))
4159 {
4160 /* Does not match either direction. */
4161 continue;
4162 }
4163 /* found_reverse_match holds which of D or FloatDR
4164 we've found. */
4165 if (t->opcode_modifier.d)
4166 found_reverse_match = Opcode_D;
4167 else if (t->opcode_modifier.floatd)
4168 found_reverse_match = Opcode_FloatD;
4169 else
4170 found_reverse_match = 0;
4171 if (t->opcode_modifier.floatr)
4172 found_reverse_match |= Opcode_FloatR;
4173 }
4174 else
4175 {
4176 /* Found a forward 2 operand match here. */
4177 switch (t->operands)
4178 {
4179 case 5:
4180 overlap4 = operand_type_and (i.types[4],
4181 operand_types[4]);
4182 case 4:
4183 overlap3 = operand_type_and (i.types[3],
4184 operand_types[3]);
4185 case 3:
4186 overlap2 = operand_type_and (i.types[2],
4187 operand_types[2]);
4188 break;
4189 }
4190
4191 switch (t->operands)
4192 {
4193 case 5:
4194 if (!operand_type_match (overlap4, i.types[4])
4195 || !operand_type_register_match (overlap3,
4196 i.types[3],
4197 operand_types[3],
4198 overlap4,
4199 i.types[4],
4200 operand_types[4]))
4201 continue;
4202 case 4:
4203 if (!operand_type_match (overlap3, i.types[3])
4204 || (check_register
4205 && !operand_type_register_match (overlap2,
4206 i.types[2],
4207 operand_types[2],
4208 overlap3,
4209 i.types[3],
4210 operand_types[3])))
4211 continue;
4212 case 3:
4213 /* Here we make use of the fact that there are no
4214 reverse match 3 operand instructions, and all 3
4215 operand instructions only need to be checked for
4216 register consistency between operands 2 and 3. */
4217 if (!operand_type_match (overlap2, i.types[2])
4218 || (check_register
4219 && !operand_type_register_match (overlap1,
4220 i.types[1],
4221 operand_types[1],
4222 overlap2,
4223 i.types[2],
4224 operand_types[2])))
4225 continue;
4226 break;
4227 }
4228 }
4229 /* Found either forward/reverse 2, 3 or 4 operand match here:
4230 slip through to break. */
4231 }
4232 if (!found_cpu_match)
4233 {
4234 found_reverse_match = 0;
4235 continue;
4236 }
4237
4238 /* Check if vector operands are valid. */
4239 if (check_VecOperands (t))
4240 continue;
4241
4242 /* Check if VEX operands are valid. */
4243 if (VEX_check_operands (t))
4244 continue;
4245
4246 /* We've found a match; break out of loop. */
4247 break;
4248 }
4249
4250 if (t == current_templates->end)
4251 {
4252 /* We found no match. */
4253 const char *err_msg;
4254 switch (i.error)
4255 {
4256 default:
4257 abort ();
4258 case operand_size_mismatch:
4259 err_msg = _("operand size mismatch");
4260 break;
4261 case operand_type_mismatch:
4262 err_msg = _("operand type mismatch");
4263 break;
4264 case register_type_mismatch:
4265 err_msg = _("register type mismatch");
4266 break;
4267 case number_of_operands_mismatch:
4268 err_msg = _("number of operands mismatch");
4269 break;
4270 case invalid_instruction_suffix:
4271 err_msg = _("invalid instruction suffix");
4272 break;
4273 case bad_imm4:
4274 err_msg = _("Imm4 isn't the first operand");
4275 break;
4276 case old_gcc_only:
4277 err_msg = _("only supported with old gcc");
4278 break;
4279 case unsupported_with_intel_mnemonic:
4280 err_msg = _("unsupported with Intel mnemonic");
4281 break;
4282 case unsupported_syntax:
4283 err_msg = _("unsupported syntax");
4284 break;
4285 case unsupported:
4286 err_msg = _("unsupported");
4287 break;
4288 case invalid_vsib_address:
4289 err_msg = _("invalid VSIB address");
4290 break;
4291 case unsupported_vector_index_register:
4292 err_msg = _("unsupported vector index register");
4293 break;
4294 }
4295 as_bad (_("%s for `%s'"), err_msg,
4296 current_templates->start->name);
4297 return NULL;
4298 }
4299
4300 if (!quiet_warnings)
4301 {
4302 if (!intel_syntax
4303 && (i.types[0].bitfield.jumpabsolute
4304 != operand_types[0].bitfield.jumpabsolute))
4305 {
4306 as_warn (_("indirect %s without `*'"), t->name);
4307 }
4308
4309 if (t->opcode_modifier.isprefix
4310 && t->opcode_modifier.ignoresize)
4311 {
4312 /* Warn them that a data or address size prefix doesn't
4313 affect assembly of the next line of code. */
4314 as_warn (_("stand-alone `%s' prefix"), t->name);
4315 }
4316 }
4317
4318 /* Copy the template we found. */
4319 i.tm = *t;
4320
4321 if (addr_prefix_disp != -1)
4322 i.tm.operand_types[addr_prefix_disp]
4323 = operand_types[addr_prefix_disp];
4324
4325 if (found_reverse_match)
4326 {
4327 /* If we found a reverse match we must alter the opcode
4328 direction bit. found_reverse_match holds bits to change
4329 (different for int & float insns). */
4330
4331 i.tm.base_opcode ^= found_reverse_match;
4332
4333 i.tm.operand_types[0] = operand_types[1];
4334 i.tm.operand_types[1] = operand_types[0];
4335 }
4336
4337 return t;
4338 }
4339
4340 static int
4341 check_string (void)
4342 {
4343 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
4344 if (i.tm.operand_types[mem_op].bitfield.esseg)
4345 {
4346 if (i.seg[0] != NULL && i.seg[0] != &es)
4347 {
4348 as_bad (_("`%s' operand %d must use `%ses' segment"),
4349 i.tm.name,
4350 mem_op + 1,
4351 register_prefix);
4352 return 0;
4353 }
4354 /* There's only ever one segment override allowed per instruction.
4355 This instruction possibly has a legal segment override on the
4356 second operand, so copy the segment to where non-string
4357 instructions store it, allowing common code. */
4358 i.seg[0] = i.seg[1];
4359 }
4360 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
4361 {
4362 if (i.seg[1] != NULL && i.seg[1] != &es)
4363 {
4364 as_bad (_("`%s' operand %d must use `%ses' segment"),
4365 i.tm.name,
4366 mem_op + 2,
4367 register_prefix);
4368 return 0;
4369 }
4370 }
4371 return 1;
4372 }
4373
4374 static int
4375 process_suffix (void)
4376 {
4377 /* If matched instruction specifies an explicit instruction mnemonic
4378 suffix, use it. */
4379 if (i.tm.opcode_modifier.size16)
4380 i.suffix = WORD_MNEM_SUFFIX;
4381 else if (i.tm.opcode_modifier.size32)
4382 i.suffix = LONG_MNEM_SUFFIX;
4383 else if (i.tm.opcode_modifier.size64)
4384 i.suffix = QWORD_MNEM_SUFFIX;
4385 else if (i.reg_operands)
4386 {
4387 /* If there's no instruction mnemonic suffix we try to invent one
4388 based on register operands. */
4389 if (!i.suffix)
4390 {
4391 /* We take i.suffix from the last register operand specified,
4392 Destination register type is more significant than source
4393 register type. crc32 in SSE4.2 prefers source register
4394 type. */
4395 if (i.tm.base_opcode == 0xf20f38f1)
4396 {
4397 if (i.types[0].bitfield.reg16)
4398 i.suffix = WORD_MNEM_SUFFIX;
4399 else if (i.types[0].bitfield.reg32)
4400 i.suffix = LONG_MNEM_SUFFIX;
4401 else if (i.types[0].bitfield.reg64)
4402 i.suffix = QWORD_MNEM_SUFFIX;
4403 }
4404 else if (i.tm.base_opcode == 0xf20f38f0)
4405 {
4406 if (i.types[0].bitfield.reg8)
4407 i.suffix = BYTE_MNEM_SUFFIX;
4408 }
4409
4410 if (!i.suffix)
4411 {
4412 int op;
4413
4414 if (i.tm.base_opcode == 0xf20f38f1
4415 || i.tm.base_opcode == 0xf20f38f0)
4416 {
4417 /* We have to know the operand size for crc32. */
4418 as_bad (_("ambiguous memory operand size for `%s`"),
4419 i.tm.name);
4420 return 0;
4421 }
4422
4423 for (op = i.operands; --op >= 0;)
4424 if (!i.tm.operand_types[op].bitfield.inoutportreg)
4425 {
4426 if (i.types[op].bitfield.reg8)
4427 {
4428 i.suffix = BYTE_MNEM_SUFFIX;
4429 break;
4430 }
4431 else if (i.types[op].bitfield.reg16)
4432 {
4433 i.suffix = WORD_MNEM_SUFFIX;
4434 break;
4435 }
4436 else if (i.types[op].bitfield.reg32)
4437 {
4438 i.suffix = LONG_MNEM_SUFFIX;
4439 break;
4440 }
4441 else if (i.types[op].bitfield.reg64)
4442 {
4443 i.suffix = QWORD_MNEM_SUFFIX;
4444 break;
4445 }
4446 }
4447 }
4448 }
4449 else if (i.suffix == BYTE_MNEM_SUFFIX)
4450 {
4451 if (intel_syntax
4452 && i.tm.opcode_modifier.ignoresize
4453 && i.tm.opcode_modifier.no_bsuf)
4454 i.suffix = 0;
4455 else if (!check_byte_reg ())
4456 return 0;
4457 }
4458 else if (i.suffix == LONG_MNEM_SUFFIX)
4459 {
4460 if (intel_syntax
4461 && i.tm.opcode_modifier.ignoresize
4462 && i.tm.opcode_modifier.no_lsuf)
4463 i.suffix = 0;
4464 else if (!check_long_reg ())
4465 return 0;
4466 }
4467 else if (i.suffix == QWORD_MNEM_SUFFIX)
4468 {
4469 if (intel_syntax
4470 && i.tm.opcode_modifier.ignoresize
4471 && i.tm.opcode_modifier.no_qsuf)
4472 i.suffix = 0;
4473 else if (!check_qword_reg ())
4474 return 0;
4475 }
4476 else if (i.suffix == WORD_MNEM_SUFFIX)
4477 {
4478 if (intel_syntax
4479 && i.tm.opcode_modifier.ignoresize
4480 && i.tm.opcode_modifier.no_wsuf)
4481 i.suffix = 0;
4482 else if (!check_word_reg ())
4483 return 0;
4484 }
4485 else if (i.suffix == XMMWORD_MNEM_SUFFIX
4486 || i.suffix == YMMWORD_MNEM_SUFFIX)
4487 {
4488 /* Skip if the instruction has x/y suffix. match_template
4489 should check if it is a valid suffix. */
4490 }
4491 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
4492 /* Do nothing if the instruction is going to ignore the prefix. */
4493 ;
4494 else
4495 abort ();
4496 }
4497 else if (i.tm.opcode_modifier.defaultsize
4498 && !i.suffix
4499 /* exclude fldenv/frstor/fsave/fstenv */
4500 && i.tm.opcode_modifier.no_ssuf)
4501 {
4502 i.suffix = stackop_size;
4503 }
4504 else if (intel_syntax
4505 && !i.suffix
4506 && (i.tm.operand_types[0].bitfield.jumpabsolute
4507 || i.tm.opcode_modifier.jumpbyte
4508 || i.tm.opcode_modifier.jumpintersegment
4509 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
4510 && i.tm.extension_opcode <= 3)))
4511 {
4512 switch (flag_code)
4513 {
4514 case CODE_64BIT:
4515 if (!i.tm.opcode_modifier.no_qsuf)
4516 {
4517 i.suffix = QWORD_MNEM_SUFFIX;
4518 break;
4519 }
4520 case CODE_32BIT:
4521 if (!i.tm.opcode_modifier.no_lsuf)
4522 i.suffix = LONG_MNEM_SUFFIX;
4523 break;
4524 case CODE_16BIT:
4525 if (!i.tm.opcode_modifier.no_wsuf)
4526 i.suffix = WORD_MNEM_SUFFIX;
4527 break;
4528 }
4529 }
4530
4531 if (!i.suffix)
4532 {
4533 if (!intel_syntax)
4534 {
4535 if (i.tm.opcode_modifier.w)
4536 {
4537 as_bad (_("no instruction mnemonic suffix given and "
4538 "no register operands; can't size instruction"));
4539 return 0;
4540 }
4541 }
4542 else
4543 {
4544 unsigned int suffixes;
4545
4546 suffixes = !i.tm.opcode_modifier.no_bsuf;
4547 if (!i.tm.opcode_modifier.no_wsuf)
4548 suffixes |= 1 << 1;
4549 if (!i.tm.opcode_modifier.no_lsuf)
4550 suffixes |= 1 << 2;
4551 if (!i.tm.opcode_modifier.no_ldsuf)
4552 suffixes |= 1 << 3;
4553 if (!i.tm.opcode_modifier.no_ssuf)
4554 suffixes |= 1 << 4;
4555 if (!i.tm.opcode_modifier.no_qsuf)
4556 suffixes |= 1 << 5;
4557
4558 /* There are more than suffix matches. */
4559 if (i.tm.opcode_modifier.w
4560 || ((suffixes & (suffixes - 1))
4561 && !i.tm.opcode_modifier.defaultsize
4562 && !i.tm.opcode_modifier.ignoresize))
4563 {
4564 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
4565 return 0;
4566 }
4567 }
4568 }
4569
4570 /* Change the opcode based on the operand size given by i.suffix;
4571 We don't need to change things for byte insns. */
4572
4573 if (i.suffix
4574 && i.suffix != BYTE_MNEM_SUFFIX
4575 && i.suffix != XMMWORD_MNEM_SUFFIX
4576 && i.suffix != YMMWORD_MNEM_SUFFIX)
4577 {
4578 /* It's not a byte, select word/dword operation. */
4579 if (i.tm.opcode_modifier.w)
4580 {
4581 if (i.tm.opcode_modifier.shortform)
4582 i.tm.base_opcode |= 8;
4583 else
4584 i.tm.base_opcode |= 1;
4585 }
4586
4587 /* Now select between word & dword operations via the operand
4588 size prefix, except for instructions that will ignore this
4589 prefix anyway. */
4590 if (i.tm.opcode_modifier.addrprefixop0)
4591 {
4592 /* The address size override prefix changes the size of the
4593 first operand. */
4594 if ((flag_code == CODE_32BIT
4595 && i.op->regs[0].reg_type.bitfield.reg16)
4596 || (flag_code != CODE_32BIT
4597 && i.op->regs[0].reg_type.bitfield.reg32))
4598 if (!add_prefix (ADDR_PREFIX_OPCODE))
4599 return 0;
4600 }
4601 else if (i.suffix != QWORD_MNEM_SUFFIX
4602 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
4603 && !i.tm.opcode_modifier.ignoresize
4604 && !i.tm.opcode_modifier.floatmf
4605 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
4606 || (flag_code == CODE_64BIT
4607 && i.tm.opcode_modifier.jumpbyte)))
4608 {
4609 unsigned int prefix = DATA_PREFIX_OPCODE;
4610
4611 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
4612 prefix = ADDR_PREFIX_OPCODE;
4613
4614 if (!add_prefix (prefix))
4615 return 0;
4616 }
4617
4618 /* Set mode64 for an operand. */
4619 if (i.suffix == QWORD_MNEM_SUFFIX
4620 && flag_code == CODE_64BIT
4621 && !i.tm.opcode_modifier.norex64)
4622 {
4623 /* Special case for xchg %rax,%rax. It is NOP and doesn't
4624 need rex64. cmpxchg8b is also a special case. */
4625 if (! (i.operands == 2
4626 && i.tm.base_opcode == 0x90
4627 && i.tm.extension_opcode == None
4628 && operand_type_equal (&i.types [0], &acc64)
4629 && operand_type_equal (&i.types [1], &acc64))
4630 && ! (i.operands == 1
4631 && i.tm.base_opcode == 0xfc7
4632 && i.tm.extension_opcode == 1
4633 && !operand_type_check (i.types [0], reg)
4634 && operand_type_check (i.types [0], anymem)))
4635 i.rex |= REX_W;
4636 }
4637
4638 /* Size floating point instruction. */
4639 if (i.suffix == LONG_MNEM_SUFFIX)
4640 if (i.tm.opcode_modifier.floatmf)
4641 i.tm.base_opcode ^= 4;
4642 }
4643
4644 return 1;
4645 }
4646
4647 static int
4648 check_byte_reg (void)
4649 {
4650 int op;
4651
4652 for (op = i.operands; --op >= 0;)
4653 {
4654 /* If this is an eight bit register, it's OK. If it's the 16 or
4655 32 bit version of an eight bit register, we will just use the
4656 low portion, and that's OK too. */
4657 if (i.types[op].bitfield.reg8)
4658 continue;
4659
4660 /* crc32 doesn't generate this warning. */
4661 if (i.tm.base_opcode == 0xf20f38f0)
4662 continue;
4663
4664 if ((i.types[op].bitfield.reg16
4665 || i.types[op].bitfield.reg32
4666 || i.types[op].bitfield.reg64)
4667 && i.op[op].regs->reg_num < 4)
4668 {
4669 /* Prohibit these changes in the 64bit mode, since the
4670 lowering is more complicated. */
4671 if (flag_code == CODE_64BIT
4672 && !i.tm.operand_types[op].bitfield.inoutportreg)
4673 {
4674 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4675 register_prefix, i.op[op].regs->reg_name,
4676 i.suffix);
4677 return 0;
4678 }
4679 #if REGISTER_WARNINGS
4680 if (!quiet_warnings
4681 && !i.tm.operand_types[op].bitfield.inoutportreg)
4682 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4683 register_prefix,
4684 (i.op[op].regs + (i.types[op].bitfield.reg16
4685 ? REGNAM_AL - REGNAM_AX
4686 : REGNAM_AL - REGNAM_EAX))->reg_name,
4687 register_prefix,
4688 i.op[op].regs->reg_name,
4689 i.suffix);
4690 #endif
4691 continue;
4692 }
4693 /* Any other register is bad. */
4694 if (i.types[op].bitfield.reg16
4695 || i.types[op].bitfield.reg32
4696 || i.types[op].bitfield.reg64
4697 || i.types[op].bitfield.regmmx
4698 || i.types[op].bitfield.regxmm
4699 || i.types[op].bitfield.regymm
4700 || i.types[op].bitfield.sreg2
4701 || i.types[op].bitfield.sreg3
4702 || i.types[op].bitfield.control
4703 || i.types[op].bitfield.debug
4704 || i.types[op].bitfield.test
4705 || i.types[op].bitfield.floatreg
4706 || i.types[op].bitfield.floatacc)
4707 {
4708 as_bad (_("`%s%s' not allowed with `%s%c'"),
4709 register_prefix,
4710 i.op[op].regs->reg_name,
4711 i.tm.name,
4712 i.suffix);
4713 return 0;
4714 }
4715 }
4716 return 1;
4717 }
4718
4719 static int
4720 check_long_reg (void)
4721 {
4722 int op;
4723
4724 for (op = i.operands; --op >= 0;)
4725 /* Reject eight bit registers, except where the template requires
4726 them. (eg. movzb) */
4727 if (i.types[op].bitfield.reg8
4728 && (i.tm.operand_types[op].bitfield.reg16
4729 || i.tm.operand_types[op].bitfield.reg32
4730 || i.tm.operand_types[op].bitfield.acc))
4731 {
4732 as_bad (_("`%s%s' not allowed with `%s%c'"),
4733 register_prefix,
4734 i.op[op].regs->reg_name,
4735 i.tm.name,
4736 i.suffix);
4737 return 0;
4738 }
4739 /* Warn if the e prefix on a general reg is missing. */
4740 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4741 && i.types[op].bitfield.reg16
4742 && (i.tm.operand_types[op].bitfield.reg32
4743 || i.tm.operand_types[op].bitfield.acc))
4744 {
4745 /* Prohibit these changes in the 64bit mode, since the
4746 lowering is more complicated. */
4747 if (flag_code == CODE_64BIT)
4748 {
4749 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4750 register_prefix, i.op[op].regs->reg_name,
4751 i.suffix);
4752 return 0;
4753 }
4754 #if REGISTER_WARNINGS
4755 else
4756 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4757 register_prefix,
4758 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
4759 register_prefix,
4760 i.op[op].regs->reg_name,
4761 i.suffix);
4762 #endif
4763 }
4764 /* Warn if the r prefix on a general reg is missing. */
4765 else if (i.types[op].bitfield.reg64
4766 && (i.tm.operand_types[op].bitfield.reg32
4767 || i.tm.operand_types[op].bitfield.acc))
4768 {
4769 if (intel_syntax
4770 && i.tm.opcode_modifier.toqword
4771 && !i.types[0].bitfield.regxmm)
4772 {
4773 /* Convert to QWORD. We want REX byte. */
4774 i.suffix = QWORD_MNEM_SUFFIX;
4775 }
4776 else
4777 {
4778 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4779 register_prefix, i.op[op].regs->reg_name,
4780 i.suffix);
4781 return 0;
4782 }
4783 }
4784 return 1;
4785 }
4786
4787 static int
4788 check_qword_reg (void)
4789 {
4790 int op;
4791
4792 for (op = i.operands; --op >= 0; )
4793 /* Reject eight bit registers, except where the template requires
4794 them. (eg. movzb) */
4795 if (i.types[op].bitfield.reg8
4796 && (i.tm.operand_types[op].bitfield.reg16
4797 || i.tm.operand_types[op].bitfield.reg32
4798 || i.tm.operand_types[op].bitfield.acc))
4799 {
4800 as_bad (_("`%s%s' not allowed with `%s%c'"),
4801 register_prefix,
4802 i.op[op].regs->reg_name,
4803 i.tm.name,
4804 i.suffix);
4805 return 0;
4806 }
4807 /* Warn if the e prefix on a general reg is missing. */
4808 else if ((i.types[op].bitfield.reg16
4809 || i.types[op].bitfield.reg32)
4810 && (i.tm.operand_types[op].bitfield.reg32
4811 || i.tm.operand_types[op].bitfield.acc))
4812 {
4813 /* Prohibit these changes in the 64bit mode, since the
4814 lowering is more complicated. */
4815 if (intel_syntax
4816 && i.tm.opcode_modifier.todword
4817 && !i.types[0].bitfield.regxmm)
4818 {
4819 /* Convert to DWORD. We don't want REX byte. */
4820 i.suffix = LONG_MNEM_SUFFIX;
4821 }
4822 else
4823 {
4824 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4825 register_prefix, i.op[op].regs->reg_name,
4826 i.suffix);
4827 return 0;
4828 }
4829 }
4830 return 1;
4831 }
4832
4833 static int
4834 check_word_reg (void)
4835 {
4836 int op;
4837 for (op = i.operands; --op >= 0;)
4838 /* Reject eight bit registers, except where the template requires
4839 them. (eg. movzb) */
4840 if (i.types[op].bitfield.reg8
4841 && (i.tm.operand_types[op].bitfield.reg16
4842 || i.tm.operand_types[op].bitfield.reg32
4843 || i.tm.operand_types[op].bitfield.acc))
4844 {
4845 as_bad (_("`%s%s' not allowed with `%s%c'"),
4846 register_prefix,
4847 i.op[op].regs->reg_name,
4848 i.tm.name,
4849 i.suffix);
4850 return 0;
4851 }
4852 /* Warn if the e prefix on a general reg is present. */
4853 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4854 && i.types[op].bitfield.reg32
4855 && (i.tm.operand_types[op].bitfield.reg16
4856 || i.tm.operand_types[op].bitfield.acc))
4857 {
4858 /* Prohibit these changes in the 64bit mode, since the
4859 lowering is more complicated. */
4860 if (flag_code == CODE_64BIT)
4861 {
4862 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4863 register_prefix, i.op[op].regs->reg_name,
4864 i.suffix);
4865 return 0;
4866 }
4867 else
4868 #if REGISTER_WARNINGS
4869 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4870 register_prefix,
4871 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
4872 register_prefix,
4873 i.op[op].regs->reg_name,
4874 i.suffix);
4875 #endif
4876 }
4877 return 1;
4878 }
4879
4880 static int
4881 update_imm (unsigned int j)
4882 {
4883 i386_operand_type overlap = i.types[j];
4884 if ((overlap.bitfield.imm8
4885 || overlap.bitfield.imm8s
4886 || overlap.bitfield.imm16
4887 || overlap.bitfield.imm32
4888 || overlap.bitfield.imm32s
4889 || overlap.bitfield.imm64)
4890 && !operand_type_equal (&overlap, &imm8)
4891 && !operand_type_equal (&overlap, &imm8s)
4892 && !operand_type_equal (&overlap, &imm16)
4893 && !operand_type_equal (&overlap, &imm32)
4894 && !operand_type_equal (&overlap, &imm32s)
4895 && !operand_type_equal (&overlap, &imm64))
4896 {
4897 if (i.suffix)
4898 {
4899 i386_operand_type temp;
4900
4901 operand_type_set (&temp, 0);
4902 if (i.suffix == BYTE_MNEM_SUFFIX)
4903 {
4904 temp.bitfield.imm8 = overlap.bitfield.imm8;
4905 temp.bitfield.imm8s = overlap.bitfield.imm8s;
4906 }
4907 else if (i.suffix == WORD_MNEM_SUFFIX)
4908 temp.bitfield.imm16 = overlap.bitfield.imm16;
4909 else if (i.suffix == QWORD_MNEM_SUFFIX)
4910 {
4911 temp.bitfield.imm64 = overlap.bitfield.imm64;
4912 temp.bitfield.imm32s = overlap.bitfield.imm32s;
4913 }
4914 else
4915 temp.bitfield.imm32 = overlap.bitfield.imm32;
4916 overlap = temp;
4917 }
4918 else if (operand_type_equal (&overlap, &imm16_32_32s)
4919 || operand_type_equal (&overlap, &imm16_32)
4920 || operand_type_equal (&overlap, &imm16_32s))
4921 {
4922 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
4923 overlap = imm16;
4924 else
4925 overlap = imm32s;
4926 }
4927 if (!operand_type_equal (&overlap, &imm8)
4928 && !operand_type_equal (&overlap, &imm8s)
4929 && !operand_type_equal (&overlap, &imm16)
4930 && !operand_type_equal (&overlap, &imm32)
4931 && !operand_type_equal (&overlap, &imm32s)
4932 && !operand_type_equal (&overlap, &imm64))
4933 {
4934 as_bad (_("no instruction mnemonic suffix given; "
4935 "can't determine immediate size"));
4936 return 0;
4937 }
4938 }
4939 i.types[j] = overlap;
4940
4941 return 1;
4942 }
4943
4944 static int
4945 finalize_imm (void)
4946 {
4947 unsigned int j, n;
4948
4949 /* Update the first 2 immediate operands. */
4950 n = i.operands > 2 ? 2 : i.operands;
4951 if (n)
4952 {
4953 for (j = 0; j < n; j++)
4954 if (update_imm (j) == 0)
4955 return 0;
4956
4957 /* The 3rd operand can't be immediate operand. */
4958 gas_assert (operand_type_check (i.types[2], imm) == 0);
4959 }
4960
4961 return 1;
4962 }
4963
4964 static int
4965 bad_implicit_operand (int xmm)
4966 {
4967 const char *ireg = xmm ? "xmm0" : "ymm0";
4968
4969 if (intel_syntax)
4970 as_bad (_("the last operand of `%s' must be `%s%s'"),
4971 i.tm.name, register_prefix, ireg);
4972 else
4973 as_bad (_("the first operand of `%s' must be `%s%s'"),
4974 i.tm.name, register_prefix, ireg);
4975 return 0;
4976 }
4977
4978 static int
4979 process_operands (void)
4980 {
4981 /* Default segment register this instruction will use for memory
4982 accesses. 0 means unknown. This is only for optimizing out
4983 unnecessary segment overrides. */
4984 const seg_entry *default_seg = 0;
4985
4986 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
4987 {
4988 unsigned int dupl = i.operands;
4989 unsigned int dest = dupl - 1;
4990 unsigned int j;
4991
4992 /* The destination must be an xmm register. */
4993 gas_assert (i.reg_operands
4994 && MAX_OPERANDS > dupl
4995 && operand_type_equal (&i.types[dest], &regxmm));
4996
4997 if (i.tm.opcode_modifier.firstxmm0)
4998 {
4999 /* The first operand is implicit and must be xmm0. */
5000 gas_assert (operand_type_equal (&i.types[0], &regxmm));
5001 if (i.op[0].regs->reg_num != 0)
5002 return bad_implicit_operand (1);
5003
5004 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
5005 {
5006 /* Keep xmm0 for instructions with VEX prefix and 3
5007 sources. */
5008 goto duplicate;
5009 }
5010 else
5011 {
5012 /* We remove the first xmm0 and keep the number of
5013 operands unchanged, which in fact duplicates the
5014 destination. */
5015 for (j = 1; j < i.operands; j++)
5016 {
5017 i.op[j - 1] = i.op[j];
5018 i.types[j - 1] = i.types[j];
5019 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
5020 }
5021 }
5022 }
5023 else if (i.tm.opcode_modifier.implicit1stxmm0)
5024 {
5025 gas_assert ((MAX_OPERANDS - 1) > dupl
5026 && (i.tm.opcode_modifier.vexsources
5027 == VEX3SOURCES));
5028
5029 /* Add the implicit xmm0 for instructions with VEX prefix
5030 and 3 sources. */
5031 for (j = i.operands; j > 0; j--)
5032 {
5033 i.op[j] = i.op[j - 1];
5034 i.types[j] = i.types[j - 1];
5035 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
5036 }
5037 i.op[0].regs
5038 = (const reg_entry *) hash_find (reg_hash, "xmm0");
5039 i.types[0] = regxmm;
5040 i.tm.operand_types[0] = regxmm;
5041
5042 i.operands += 2;
5043 i.reg_operands += 2;
5044 i.tm.operands += 2;
5045
5046 dupl++;
5047 dest++;
5048 i.op[dupl] = i.op[dest];
5049 i.types[dupl] = i.types[dest];
5050 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5051 }
5052 else
5053 {
5054 duplicate:
5055 i.operands++;
5056 i.reg_operands++;
5057 i.tm.operands++;
5058
5059 i.op[dupl] = i.op[dest];
5060 i.types[dupl] = i.types[dest];
5061 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5062 }
5063
5064 if (i.tm.opcode_modifier.immext)
5065 process_immext ();
5066 }
5067 else if (i.tm.opcode_modifier.firstxmm0)
5068 {
5069 unsigned int j;
5070
5071 /* The first operand is implicit and must be xmm0/ymm0. */
5072 gas_assert (i.reg_operands
5073 && (operand_type_equal (&i.types[0], &regxmm)
5074 || operand_type_equal (&i.types[0], &regymm)));
5075 if (i.op[0].regs->reg_num != 0)
5076 return bad_implicit_operand (i.types[0].bitfield.regxmm);
5077
5078 for (j = 1; j < i.operands; j++)
5079 {
5080 i.op[j - 1] = i.op[j];
5081 i.types[j - 1] = i.types[j];
5082
5083 /* We need to adjust fields in i.tm since they are used by
5084 build_modrm_byte. */
5085 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
5086 }
5087
5088 i.operands--;
5089 i.reg_operands--;
5090 i.tm.operands--;
5091 }
5092 else if (i.tm.opcode_modifier.regkludge)
5093 {
5094 /* The imul $imm, %reg instruction is converted into
5095 imul $imm, %reg, %reg, and the clr %reg instruction
5096 is converted into xor %reg, %reg. */
5097
5098 unsigned int first_reg_op;
5099
5100 if (operand_type_check (i.types[0], reg))
5101 first_reg_op = 0;
5102 else
5103 first_reg_op = 1;
5104 /* Pretend we saw the extra register operand. */
5105 gas_assert (i.reg_operands == 1
5106 && i.op[first_reg_op + 1].regs == 0);
5107 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
5108 i.types[first_reg_op + 1] = i.types[first_reg_op];
5109 i.operands++;
5110 i.reg_operands++;
5111 }
5112
5113 if (i.tm.opcode_modifier.shortform)
5114 {
5115 if (i.types[0].bitfield.sreg2
5116 || i.types[0].bitfield.sreg3)
5117 {
5118 if (i.tm.base_opcode == POP_SEG_SHORT
5119 && i.op[0].regs->reg_num == 1)
5120 {
5121 as_bad (_("you can't `pop %scs'"), register_prefix);
5122 return 0;
5123 }
5124 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
5125 if ((i.op[0].regs->reg_flags & RegRex) != 0)
5126 i.rex |= REX_B;
5127 }
5128 else
5129 {
5130 /* The register or float register operand is in operand
5131 0 or 1. */
5132 unsigned int op;
5133
5134 if (i.types[0].bitfield.floatreg
5135 || operand_type_check (i.types[0], reg))
5136 op = 0;
5137 else
5138 op = 1;
5139 /* Register goes in low 3 bits of opcode. */
5140 i.tm.base_opcode |= i.op[op].regs->reg_num;
5141 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5142 i.rex |= REX_B;
5143 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
5144 {
5145 /* Warn about some common errors, but press on regardless.
5146 The first case can be generated by gcc (<= 2.8.1). */
5147 if (i.operands == 2)
5148 {
5149 /* Reversed arguments on faddp, fsubp, etc. */
5150 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
5151 register_prefix, i.op[!intel_syntax].regs->reg_name,
5152 register_prefix, i.op[intel_syntax].regs->reg_name);
5153 }
5154 else
5155 {
5156 /* Extraneous `l' suffix on fp insn. */
5157 as_warn (_("translating to `%s %s%s'"), i.tm.name,
5158 register_prefix, i.op[0].regs->reg_name);
5159 }
5160 }
5161 }
5162 }
5163 else if (i.tm.opcode_modifier.modrm)
5164 {
5165 /* The opcode is completed (modulo i.tm.extension_opcode which
5166 must be put into the modrm byte). Now, we make the modrm and
5167 index base bytes based on all the info we've collected. */
5168
5169 default_seg = build_modrm_byte ();
5170 }
5171 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
5172 {
5173 default_seg = &ds;
5174 }
5175 else if (i.tm.opcode_modifier.isstring)
5176 {
5177 /* For the string instructions that allow a segment override
5178 on one of their operands, the default segment is ds. */
5179 default_seg = &ds;
5180 }
5181
5182 if (i.tm.base_opcode == 0x8d /* lea */
5183 && i.seg[0]
5184 && !quiet_warnings)
5185 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
5186
5187 /* If a segment was explicitly specified, and the specified segment
5188 is not the default, use an opcode prefix to select it. If we
5189 never figured out what the default segment is, then default_seg
5190 will be zero at this point, and the specified segment prefix will
5191 always be used. */
5192 if ((i.seg[0]) && (i.seg[0] != default_seg))
5193 {
5194 if (!add_prefix (i.seg[0]->seg_prefix))
5195 return 0;
5196 }
5197 return 1;
5198 }
5199
5200 static const seg_entry *
5201 build_modrm_byte (void)
5202 {
5203 const seg_entry *default_seg = 0;
5204 unsigned int source, dest;
5205 int vex_3_sources;
5206
5207 /* The first operand of instructions with VEX prefix and 3 sources
5208 must be VEX_Imm4. */
5209 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
5210 if (vex_3_sources)
5211 {
5212 unsigned int nds, reg_slot;
5213 expressionS *exp;
5214
5215 if (i.tm.opcode_modifier.veximmext
5216 && i.tm.opcode_modifier.immext)
5217 {
5218 dest = i.operands - 2;
5219 gas_assert (dest == 3);
5220 }
5221 else
5222 dest = i.operands - 1;
5223 nds = dest - 1;
5224
5225 /* There are 2 kinds of instructions:
5226 1. 5 operands: 4 register operands or 3 register operands
5227 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
5228 VexW0 or VexW1. The destination must be either XMM or YMM
5229 register.
5230 2. 4 operands: 4 register operands or 3 register operands
5231 plus 1 memory operand, VexXDS, and VexImmExt */
5232 gas_assert ((i.reg_operands == 4
5233 || (i.reg_operands == 3 && i.mem_operands == 1))
5234 && i.tm.opcode_modifier.vexvvvv == VEXXDS
5235 && (i.tm.opcode_modifier.veximmext
5236 || (i.imm_operands == 1
5237 && i.types[0].bitfield.vec_imm4
5238 && (i.tm.opcode_modifier.vexw == VEXW0
5239 || i.tm.opcode_modifier.vexw == VEXW1)
5240 && (operand_type_equal (&i.tm.operand_types[dest], &regxmm)
5241 || operand_type_equal (&i.tm.operand_types[dest], &regymm)))));
5242
5243 if (i.imm_operands == 0)
5244 {
5245 /* When there is no immediate operand, generate an 8bit
5246 immediate operand to encode the first operand. */
5247 exp = &im_expressions[i.imm_operands++];
5248 i.op[i.operands].imms = exp;
5249 i.types[i.operands] = imm8;
5250 i.operands++;
5251 /* If VexW1 is set, the first operand is the source and
5252 the second operand is encoded in the immediate operand. */
5253 if (i.tm.opcode_modifier.vexw == VEXW1)
5254 {
5255 source = 0;
5256 reg_slot = 1;
5257 }
5258 else
5259 {
5260 source = 1;
5261 reg_slot = 0;
5262 }
5263
5264 /* FMA swaps REG and NDS. */
5265 if (i.tm.cpu_flags.bitfield.cpufma)
5266 {
5267 unsigned int tmp;
5268 tmp = reg_slot;
5269 reg_slot = nds;
5270 nds = tmp;
5271 }
5272
5273 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5274 &regxmm)
5275 || operand_type_equal (&i.tm.operand_types[reg_slot],
5276 &regymm));
5277 exp->X_op = O_constant;
5278 exp->X_add_number
5279 = ((i.op[reg_slot].regs->reg_num
5280 + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
5281 << 4);
5282 }
5283 else
5284 {
5285 unsigned int imm_slot;
5286
5287 if (i.tm.opcode_modifier.vexw == VEXW0)
5288 {
5289 /* If VexW0 is set, the third operand is the source and
5290 the second operand is encoded in the immediate
5291 operand. */
5292 source = 2;
5293 reg_slot = 1;
5294 }
5295 else
5296 {
5297 /* VexW1 is set, the second operand is the source and
5298 the third operand is encoded in the immediate
5299 operand. */
5300 source = 1;
5301 reg_slot = 2;
5302 }
5303
5304 if (i.tm.opcode_modifier.immext)
5305 {
5306 /* When ImmExt is set, the immdiate byte is the last
5307 operand. */
5308 imm_slot = i.operands - 1;
5309 source--;
5310 reg_slot--;
5311 }
5312 else
5313 {
5314 imm_slot = 0;
5315
5316 /* Turn on Imm8 so that output_imm will generate it. */
5317 i.types[imm_slot].bitfield.imm8 = 1;
5318 }
5319
5320 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5321 &regxmm)
5322 || operand_type_equal (&i.tm.operand_types[reg_slot],
5323 &regymm));
5324 i.op[imm_slot].imms->X_add_number
5325 |= ((i.op[reg_slot].regs->reg_num
5326 + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
5327 << 4);
5328 }
5329
5330 gas_assert (operand_type_equal (&i.tm.operand_types[nds], &regxmm)
5331 || operand_type_equal (&i.tm.operand_types[nds],
5332 &regymm));
5333 i.vex.register_specifier = i.op[nds].regs;
5334 }
5335 else
5336 source = dest = 0;
5337
5338 /* i.reg_operands MUST be the number of real register operands;
5339 implicit registers do not count. If there are 3 register
5340 operands, it must be a instruction with VexNDS. For a
5341 instruction with VexNDD, the destination register is encoded
5342 in VEX prefix. If there are 4 register operands, it must be
5343 a instruction with VEX prefix and 3 sources. */
5344 if (i.mem_operands == 0
5345 && ((i.reg_operands == 2
5346 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
5347 || (i.reg_operands == 3
5348 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
5349 || (i.reg_operands == 4 && vex_3_sources)))
5350 {
5351 switch (i.operands)
5352 {
5353 case 2:
5354 source = 0;
5355 break;
5356 case 3:
5357 /* When there are 3 operands, one of them may be immediate,
5358 which may be the first or the last operand. Otherwise,
5359 the first operand must be shift count register (cl) or it
5360 is an instruction with VexNDS. */
5361 gas_assert (i.imm_operands == 1
5362 || (i.imm_operands == 0
5363 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
5364 || i.types[0].bitfield.shiftcount)));
5365 if (operand_type_check (i.types[0], imm)
5366 || i.types[0].bitfield.shiftcount)
5367 source = 1;
5368 else
5369 source = 0;
5370 break;
5371 case 4:
5372 /* When there are 4 operands, the first two must be 8bit
5373 immediate operands. The source operand will be the 3rd
5374 one.
5375
5376 For instructions with VexNDS, if the first operand
5377 an imm8, the source operand is the 2nd one. If the last
5378 operand is imm8, the source operand is the first one. */
5379 gas_assert ((i.imm_operands == 2
5380 && i.types[0].bitfield.imm8
5381 && i.types[1].bitfield.imm8)
5382 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
5383 && i.imm_operands == 1
5384 && (i.types[0].bitfield.imm8
5385 || i.types[i.operands - 1].bitfield.imm8)));
5386 if (i.imm_operands == 2)
5387 source = 2;
5388 else
5389 {
5390 if (i.types[0].bitfield.imm8)
5391 source = 1;
5392 else
5393 source = 0;
5394 }
5395 break;
5396 case 5:
5397 break;
5398 default:
5399 abort ();
5400 }
5401
5402 if (!vex_3_sources)
5403 {
5404 dest = source + 1;
5405
5406 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5407 {
5408 /* For instructions with VexNDS, the register-only
5409 source operand must be 32/64bit integer, XMM or
5410 YMM register. It is encoded in VEX prefix. We
5411 need to clear RegMem bit before calling
5412 operand_type_equal. */
5413
5414 i386_operand_type op;
5415 unsigned int vvvv;
5416
5417 /* Check register-only source operand when two source
5418 operands are swapped. */
5419 if (!i.tm.operand_types[source].bitfield.baseindex
5420 && i.tm.operand_types[dest].bitfield.baseindex)
5421 {
5422 vvvv = source;
5423 source = dest;
5424 }
5425 else
5426 vvvv = dest;
5427
5428 op = i.tm.operand_types[vvvv];
5429 op.bitfield.regmem = 0;
5430 if ((dest + 1) >= i.operands
5431 || (op.bitfield.reg32 != 1
5432 && !op.bitfield.reg64 != 1
5433 && !operand_type_equal (&op, &regxmm)
5434 && !operand_type_equal (&op, &regymm)))
5435 abort ();
5436 i.vex.register_specifier = i.op[vvvv].regs;
5437 dest++;
5438 }
5439 }
5440
5441 i.rm.mode = 3;
5442 /* One of the register operands will be encoded in the i.tm.reg
5443 field, the other in the combined i.tm.mode and i.tm.regmem
5444 fields. If no form of this instruction supports a memory
5445 destination operand, then we assume the source operand may
5446 sometimes be a memory operand and so we need to store the
5447 destination in the i.rm.reg field. */
5448 if (!i.tm.operand_types[dest].bitfield.regmem
5449 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
5450 {
5451 i.rm.reg = i.op[dest].regs->reg_num;
5452 i.rm.regmem = i.op[source].regs->reg_num;
5453 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5454 i.rex |= REX_R;
5455 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5456 i.rex |= REX_B;
5457 }
5458 else
5459 {
5460 i.rm.reg = i.op[source].regs->reg_num;
5461 i.rm.regmem = i.op[dest].regs->reg_num;
5462 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5463 i.rex |= REX_B;
5464 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5465 i.rex |= REX_R;
5466 }
5467 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
5468 {
5469 if (!i.types[0].bitfield.control
5470 && !i.types[1].bitfield.control)
5471 abort ();
5472 i.rex &= ~(REX_R | REX_B);
5473 add_prefix (LOCK_PREFIX_OPCODE);
5474 }
5475 }
5476 else
5477 { /* If it's not 2 reg operands... */
5478 unsigned int mem;
5479
5480 if (i.mem_operands)
5481 {
5482 unsigned int fake_zero_displacement = 0;
5483 unsigned int op;
5484
5485 for (op = 0; op < i.operands; op++)
5486 if (operand_type_check (i.types[op], anymem))
5487 break;
5488 gas_assert (op < i.operands);
5489
5490 if (i.tm.opcode_modifier.vecsib)
5491 {
5492 if (i.index_reg->reg_num == RegEiz
5493 || i.index_reg->reg_num == RegRiz)
5494 abort ();
5495
5496 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5497 if (!i.base_reg)
5498 {
5499 i.sib.base = NO_BASE_REGISTER;
5500 i.sib.scale = i.log2_scale_factor;
5501 i.types[op].bitfield.disp8 = 0;
5502 i.types[op].bitfield.disp16 = 0;
5503 i.types[op].bitfield.disp64 = 0;
5504 if (flag_code != CODE_64BIT)
5505 {
5506 /* Must be 32 bit */
5507 i.types[op].bitfield.disp32 = 1;
5508 i.types[op].bitfield.disp32s = 0;
5509 }
5510 else
5511 {
5512 i.types[op].bitfield.disp32 = 0;
5513 i.types[op].bitfield.disp32s = 1;
5514 }
5515 }
5516 i.sib.index = i.index_reg->reg_num;
5517 if ((i.index_reg->reg_flags & RegRex) != 0)
5518 i.rex |= REX_X;
5519 }
5520
5521 default_seg = &ds;
5522
5523 if (i.base_reg == 0)
5524 {
5525 i.rm.mode = 0;
5526 if (!i.disp_operands)
5527 {
5528 fake_zero_displacement = 1;
5529 /* Instructions with VSIB byte need 32bit displacement
5530 if there is no base register. */
5531 if (i.tm.opcode_modifier.vecsib)
5532 i.types[op].bitfield.disp32 = 1;
5533 }
5534 if (i.index_reg == 0)
5535 {
5536 gas_assert (!i.tm.opcode_modifier.vecsib);
5537 /* Operand is just <disp> */
5538 if (flag_code == CODE_64BIT)
5539 {
5540 /* 64bit mode overwrites the 32bit absolute
5541 addressing by RIP relative addressing and
5542 absolute addressing is encoded by one of the
5543 redundant SIB forms. */
5544 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5545 i.sib.base = NO_BASE_REGISTER;
5546 i.sib.index = NO_INDEX_REGISTER;
5547 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
5548 ? disp32s : disp32);
5549 }
5550 else if ((flag_code == CODE_16BIT)
5551 ^ (i.prefix[ADDR_PREFIX] != 0))
5552 {
5553 i.rm.regmem = NO_BASE_REGISTER_16;
5554 i.types[op] = disp16;
5555 }
5556 else
5557 {
5558 i.rm.regmem = NO_BASE_REGISTER;
5559 i.types[op] = disp32;
5560 }
5561 }
5562 else if (!i.tm.opcode_modifier.vecsib)
5563 {
5564 /* !i.base_reg && i.index_reg */
5565 if (i.index_reg->reg_num == RegEiz
5566 || i.index_reg->reg_num == RegRiz)
5567 i.sib.index = NO_INDEX_REGISTER;
5568 else
5569 i.sib.index = i.index_reg->reg_num;
5570 i.sib.base = NO_BASE_REGISTER;
5571 i.sib.scale = i.log2_scale_factor;
5572 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5573 i.types[op].bitfield.disp8 = 0;
5574 i.types[op].bitfield.disp16 = 0;
5575 i.types[op].bitfield.disp64 = 0;
5576 if (flag_code != CODE_64BIT)
5577 {
5578 /* Must be 32 bit */
5579 i.types[op].bitfield.disp32 = 1;
5580 i.types[op].bitfield.disp32s = 0;
5581 }
5582 else
5583 {
5584 i.types[op].bitfield.disp32 = 0;
5585 i.types[op].bitfield.disp32s = 1;
5586 }
5587 if ((i.index_reg->reg_flags & RegRex) != 0)
5588 i.rex |= REX_X;
5589 }
5590 }
5591 /* RIP addressing for 64bit mode. */
5592 else if (i.base_reg->reg_num == RegRip ||
5593 i.base_reg->reg_num == RegEip)
5594 {
5595 gas_assert (!i.tm.opcode_modifier.vecsib);
5596 i.rm.regmem = NO_BASE_REGISTER;
5597 i.types[op].bitfield.disp8 = 0;
5598 i.types[op].bitfield.disp16 = 0;
5599 i.types[op].bitfield.disp32 = 0;
5600 i.types[op].bitfield.disp32s = 1;
5601 i.types[op].bitfield.disp64 = 0;
5602 i.flags[op] |= Operand_PCrel;
5603 if (! i.disp_operands)
5604 fake_zero_displacement = 1;
5605 }
5606 else if (i.base_reg->reg_type.bitfield.reg16)
5607 {
5608 gas_assert (!i.tm.opcode_modifier.vecsib);
5609 switch (i.base_reg->reg_num)
5610 {
5611 case 3: /* (%bx) */
5612 if (i.index_reg == 0)
5613 i.rm.regmem = 7;
5614 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
5615 i.rm.regmem = i.index_reg->reg_num - 6;
5616 break;
5617 case 5: /* (%bp) */
5618 default_seg = &ss;
5619 if (i.index_reg == 0)
5620 {
5621 i.rm.regmem = 6;
5622 if (operand_type_check (i.types[op], disp) == 0)
5623 {
5624 /* fake (%bp) into 0(%bp) */
5625 i.types[op].bitfield.disp8 = 1;
5626 fake_zero_displacement = 1;
5627 }
5628 }
5629 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
5630 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
5631 break;
5632 default: /* (%si) -> 4 or (%di) -> 5 */
5633 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
5634 }
5635 i.rm.mode = mode_from_disp_size (i.types[op]);
5636 }
5637 else /* i.base_reg and 32/64 bit mode */
5638 {
5639 if (flag_code == CODE_64BIT
5640 && operand_type_check (i.types[op], disp))
5641 {
5642 i386_operand_type temp;
5643 operand_type_set (&temp, 0);
5644 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
5645 i.types[op] = temp;
5646 if (i.prefix[ADDR_PREFIX] == 0)
5647 i.types[op].bitfield.disp32s = 1;
5648 else
5649 i.types[op].bitfield.disp32 = 1;
5650 }
5651
5652 if (!i.tm.opcode_modifier.vecsib)
5653 i.rm.regmem = i.base_reg->reg_num;
5654 if ((i.base_reg->reg_flags & RegRex) != 0)
5655 i.rex |= REX_B;
5656 i.sib.base = i.base_reg->reg_num;
5657 /* x86-64 ignores REX prefix bit here to avoid decoder
5658 complications. */
5659 if ((i.base_reg->reg_num & 7) == EBP_REG_NUM)
5660 {
5661 default_seg = &ss;
5662 if (i.disp_operands == 0)
5663 {
5664 fake_zero_displacement = 1;
5665 i.types[op].bitfield.disp8 = 1;
5666 }
5667 }
5668 else if (i.base_reg->reg_num == ESP_REG_NUM)
5669 {
5670 default_seg = &ss;
5671 }
5672 i.sib.scale = i.log2_scale_factor;
5673 if (i.index_reg == 0)
5674 {
5675 gas_assert (!i.tm.opcode_modifier.vecsib);
5676 /* <disp>(%esp) becomes two byte modrm with no index
5677 register. We've already stored the code for esp
5678 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
5679 Any base register besides %esp will not use the
5680 extra modrm byte. */
5681 i.sib.index = NO_INDEX_REGISTER;
5682 }
5683 else if (!i.tm.opcode_modifier.vecsib)
5684 {
5685 if (i.index_reg->reg_num == RegEiz
5686 || i.index_reg->reg_num == RegRiz)
5687 i.sib.index = NO_INDEX_REGISTER;
5688 else
5689 i.sib.index = i.index_reg->reg_num;
5690 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5691 if ((i.index_reg->reg_flags & RegRex) != 0)
5692 i.rex |= REX_X;
5693 }
5694
5695 if (i.disp_operands
5696 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
5697 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
5698 i.rm.mode = 0;
5699 else
5700 i.rm.mode = mode_from_disp_size (i.types[op]);
5701 }
5702
5703 if (fake_zero_displacement)
5704 {
5705 /* Fakes a zero displacement assuming that i.types[op]
5706 holds the correct displacement size. */
5707 expressionS *exp;
5708
5709 gas_assert (i.op[op].disps == 0);
5710 exp = &disp_expressions[i.disp_operands++];
5711 i.op[op].disps = exp;
5712 exp->X_op = O_constant;
5713 exp->X_add_number = 0;
5714 exp->X_add_symbol = (symbolS *) 0;
5715 exp->X_op_symbol = (symbolS *) 0;
5716 }
5717
5718 mem = op;
5719 }
5720 else
5721 mem = ~0;
5722
5723 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
5724 {
5725 if (operand_type_check (i.types[0], imm))
5726 i.vex.register_specifier = NULL;
5727 else
5728 {
5729 /* VEX.vvvv encodes one of the sources when the first
5730 operand is not an immediate. */
5731 if (i.tm.opcode_modifier.vexw == VEXW0)
5732 i.vex.register_specifier = i.op[0].regs;
5733 else
5734 i.vex.register_specifier = i.op[1].regs;
5735 }
5736
5737 /* Destination is a XMM register encoded in the ModRM.reg
5738 and VEX.R bit. */
5739 i.rm.reg = i.op[2].regs->reg_num;
5740 if ((i.op[2].regs->reg_flags & RegRex) != 0)
5741 i.rex |= REX_R;
5742
5743 /* ModRM.rm and VEX.B encodes the other source. */
5744 if (!i.mem_operands)
5745 {
5746 i.rm.mode = 3;
5747
5748 if (i.tm.opcode_modifier.vexw == VEXW0)
5749 i.rm.regmem = i.op[1].regs->reg_num;
5750 else
5751 i.rm.regmem = i.op[0].regs->reg_num;
5752
5753 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5754 i.rex |= REX_B;
5755 }
5756 }
5757 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
5758 {
5759 i.vex.register_specifier = i.op[2].regs;
5760 if (!i.mem_operands)
5761 {
5762 i.rm.mode = 3;
5763 i.rm.regmem = i.op[1].regs->reg_num;
5764 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5765 i.rex |= REX_B;
5766 }
5767 }
5768 /* Fill in i.rm.reg or i.rm.regmem field with register operand
5769 (if any) based on i.tm.extension_opcode. Again, we must be
5770 careful to make sure that segment/control/debug/test/MMX
5771 registers are coded into the i.rm.reg field. */
5772 else if (i.reg_operands)
5773 {
5774 unsigned int op;
5775 unsigned int vex_reg = ~0;
5776
5777 for (op = 0; op < i.operands; op++)
5778 if (i.types[op].bitfield.reg8
5779 || i.types[op].bitfield.reg16
5780 || i.types[op].bitfield.reg32
5781 || i.types[op].bitfield.reg64
5782 || i.types[op].bitfield.regmmx
5783 || i.types[op].bitfield.regxmm
5784 || i.types[op].bitfield.regymm
5785 || i.types[op].bitfield.sreg2
5786 || i.types[op].bitfield.sreg3
5787 || i.types[op].bitfield.control
5788 || i.types[op].bitfield.debug
5789 || i.types[op].bitfield.test)
5790 break;
5791
5792 if (vex_3_sources)
5793 op = dest;
5794 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5795 {
5796 /* For instructions with VexNDS, the register-only
5797 source operand is encoded in VEX prefix. */
5798 gas_assert (mem != (unsigned int) ~0);
5799
5800 if (op > mem)
5801 {
5802 vex_reg = op++;
5803 gas_assert (op < i.operands);
5804 }
5805 else
5806 {
5807 /* Check register-only source operand when two source
5808 operands are swapped. */
5809 if (!i.tm.operand_types[op].bitfield.baseindex
5810 && i.tm.operand_types[op + 1].bitfield.baseindex)
5811 {
5812 vex_reg = op;
5813 op += 2;
5814 gas_assert (mem == (vex_reg + 1)
5815 && op < i.operands);
5816 }
5817 else
5818 {
5819 vex_reg = op + 1;
5820 gas_assert (vex_reg < i.operands);
5821 }
5822 }
5823 }
5824 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
5825 {
5826 /* For instructions with VexNDD, the register destination
5827 is encoded in VEX prefix. */
5828 if (i.mem_operands == 0)
5829 {
5830 /* There is no memory operand. */
5831 gas_assert ((op + 2) == i.operands);
5832 vex_reg = op + 1;
5833 }
5834 else
5835 {
5836 /* There are only 2 operands. */
5837 gas_assert (op < 2 && i.operands == 2);
5838 vex_reg = 1;
5839 }
5840 }
5841 else
5842 gas_assert (op < i.operands);
5843
5844 if (vex_reg != (unsigned int) ~0)
5845 {
5846 i386_operand_type *type = &i.tm.operand_types[vex_reg];
5847
5848 if (type->bitfield.reg32 != 1
5849 && type->bitfield.reg64 != 1
5850 && !operand_type_equal (type, &regxmm)
5851 && !operand_type_equal (type, &regymm))
5852 abort ();
5853
5854 i.vex.register_specifier = i.op[vex_reg].regs;
5855 }
5856
5857 /* Don't set OP operand twice. */
5858 if (vex_reg != op)
5859 {
5860 /* If there is an extension opcode to put here, the
5861 register number must be put into the regmem field. */
5862 if (i.tm.extension_opcode != None)
5863 {
5864 i.rm.regmem = i.op[op].regs->reg_num;
5865 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5866 i.rex |= REX_B;
5867 }
5868 else
5869 {
5870 i.rm.reg = i.op[op].regs->reg_num;
5871 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5872 i.rex |= REX_R;
5873 }
5874 }
5875
5876 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
5877 must set it to 3 to indicate this is a register operand
5878 in the regmem field. */
5879 if (!i.mem_operands)
5880 i.rm.mode = 3;
5881 }
5882
5883 /* Fill in i.rm.reg field with extension opcode (if any). */
5884 if (i.tm.extension_opcode != None)
5885 i.rm.reg = i.tm.extension_opcode;
5886 }
5887 return default_seg;
5888 }
5889
5890 static void
5891 output_branch (void)
5892 {
5893 char *p;
5894 int size;
5895 int code16;
5896 int prefix;
5897 relax_substateT subtype;
5898 symbolS *sym;
5899 offsetT off;
5900
5901 code16 = flag_code == CODE_16BIT ? CODE16 : 0;
5902 size = i.disp32_encoding ? BIG : SMALL;
5903
5904 prefix = 0;
5905 if (i.prefix[DATA_PREFIX] != 0)
5906 {
5907 prefix = 1;
5908 i.prefixes -= 1;
5909 code16 ^= CODE16;
5910 }
5911 /* Pentium4 branch hints. */
5912 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
5913 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
5914 {
5915 prefix++;
5916 i.prefixes--;
5917 }
5918 if (i.prefix[REX_PREFIX] != 0)
5919 {
5920 prefix++;
5921 i.prefixes--;
5922 }
5923
5924 if (i.prefixes != 0 && !intel_syntax)
5925 as_warn (_("skipping prefixes on this instruction"));
5926
5927 /* It's always a symbol; End frag & setup for relax.
5928 Make sure there is enough room in this frag for the largest
5929 instruction we may generate in md_convert_frag. This is 2
5930 bytes for the opcode and room for the prefix and largest
5931 displacement. */
5932 frag_grow (prefix + 2 + 4);
5933 /* Prefix and 1 opcode byte go in fr_fix. */
5934 p = frag_more (prefix + 1);
5935 if (i.prefix[DATA_PREFIX] != 0)
5936 *p++ = DATA_PREFIX_OPCODE;
5937 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
5938 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
5939 *p++ = i.prefix[SEG_PREFIX];
5940 if (i.prefix[REX_PREFIX] != 0)
5941 *p++ = i.prefix[REX_PREFIX];
5942 *p = i.tm.base_opcode;
5943
5944 if ((unsigned char) *p == JUMP_PC_RELATIVE)
5945 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
5946 else if (cpu_arch_flags.bitfield.cpui386)
5947 subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
5948 else
5949 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
5950 subtype |= code16;
5951
5952 sym = i.op[0].disps->X_add_symbol;
5953 off = i.op[0].disps->X_add_number;
5954
5955 if (i.op[0].disps->X_op != O_constant
5956 && i.op[0].disps->X_op != O_symbol)
5957 {
5958 /* Handle complex expressions. */
5959 sym = make_expr_symbol (i.op[0].disps);
5960 off = 0;
5961 }
5962
5963 /* 1 possible extra opcode + 4 byte displacement go in var part.
5964 Pass reloc in fr_var. */
5965 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
5966 }
5967
5968 static void
5969 output_jump (void)
5970 {
5971 char *p;
5972 int size;
5973 fixS *fixP;
5974
5975 if (i.tm.opcode_modifier.jumpbyte)
5976 {
5977 /* This is a loop or jecxz type instruction. */
5978 size = 1;
5979 if (i.prefix[ADDR_PREFIX] != 0)
5980 {
5981 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
5982 i.prefixes -= 1;
5983 }
5984 /* Pentium4 branch hints. */
5985 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
5986 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
5987 {
5988 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
5989 i.prefixes--;
5990 }
5991 }
5992 else
5993 {
5994 int code16;
5995
5996 code16 = 0;
5997 if (flag_code == CODE_16BIT)
5998 code16 = CODE16;
5999
6000 if (i.prefix[DATA_PREFIX] != 0)
6001 {
6002 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
6003 i.prefixes -= 1;
6004 code16 ^= CODE16;
6005 }
6006
6007 size = 4;
6008 if (code16)
6009 size = 2;
6010 }
6011
6012 if (i.prefix[REX_PREFIX] != 0)
6013 {
6014 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
6015 i.prefixes -= 1;
6016 }
6017
6018 if (i.prefixes != 0 && !intel_syntax)
6019 as_warn (_("skipping prefixes on this instruction"));
6020
6021 p = frag_more (1 + size);
6022 *p++ = i.tm.base_opcode;
6023
6024 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6025 i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
6026
6027 /* All jumps handled here are signed, but don't use a signed limit
6028 check for 32 and 16 bit jumps as we want to allow wrap around at
6029 4G and 64k respectively. */
6030 if (size == 1)
6031 fixP->fx_signed = 1;
6032 }
6033
6034 static void
6035 output_interseg_jump (void)
6036 {
6037 char *p;
6038 int size;
6039 int prefix;
6040 int code16;
6041
6042 code16 = 0;
6043 if (flag_code == CODE_16BIT)
6044 code16 = CODE16;
6045
6046 prefix = 0;
6047 if (i.prefix[DATA_PREFIX] != 0)
6048 {
6049 prefix = 1;
6050 i.prefixes -= 1;
6051 code16 ^= CODE16;
6052 }
6053 if (i.prefix[REX_PREFIX] != 0)
6054 {
6055 prefix++;
6056 i.prefixes -= 1;
6057 }
6058
6059 size = 4;
6060 if (code16)
6061 size = 2;
6062
6063 if (i.prefixes != 0 && !intel_syntax)
6064 as_warn (_("skipping prefixes on this instruction"));
6065
6066 /* 1 opcode; 2 segment; offset */
6067 p = frag_more (prefix + 1 + 2 + size);
6068
6069 if (i.prefix[DATA_PREFIX] != 0)
6070 *p++ = DATA_PREFIX_OPCODE;
6071
6072 if (i.prefix[REX_PREFIX] != 0)
6073 *p++ = i.prefix[REX_PREFIX];
6074
6075 *p++ = i.tm.base_opcode;
6076 if (i.op[1].imms->X_op == O_constant)
6077 {
6078 offsetT n = i.op[1].imms->X_add_number;
6079
6080 if (size == 2
6081 && !fits_in_unsigned_word (n)
6082 && !fits_in_signed_word (n))
6083 {
6084 as_bad (_("16-bit jump out of range"));
6085 return;
6086 }
6087 md_number_to_chars (p, n, size);
6088 }
6089 else
6090 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6091 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
6092 if (i.op[0].imms->X_op != O_constant)
6093 as_bad (_("can't handle non absolute segment in `%s'"),
6094 i.tm.name);
6095 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
6096 }
6097
6098 static void
6099 output_insn (void)
6100 {
6101 fragS *insn_start_frag;
6102 offsetT insn_start_off;
6103
6104 /* Tie dwarf2 debug info to the address at the start of the insn.
6105 We can't do this after the insn has been output as the current
6106 frag may have been closed off. eg. by frag_var. */
6107 dwarf2_emit_insn (0);
6108
6109 insn_start_frag = frag_now;
6110 insn_start_off = frag_now_fix ();
6111
6112 /* Output jumps. */
6113 if (i.tm.opcode_modifier.jump)
6114 output_branch ();
6115 else if (i.tm.opcode_modifier.jumpbyte
6116 || i.tm.opcode_modifier.jumpdword)
6117 output_jump ();
6118 else if (i.tm.opcode_modifier.jumpintersegment)
6119 output_interseg_jump ();
6120 else
6121 {
6122 /* Output normal instructions here. */
6123 char *p;
6124 unsigned char *q;
6125 unsigned int j;
6126 unsigned int prefix;
6127
6128 /* Since the VEX prefix contains the implicit prefix, we don't
6129 need the explicit prefix. */
6130 if (!i.tm.opcode_modifier.vex)
6131 {
6132 switch (i.tm.opcode_length)
6133 {
6134 case 3:
6135 if (i.tm.base_opcode & 0xff000000)
6136 {
6137 prefix = (i.tm.base_opcode >> 24) & 0xff;
6138 goto check_prefix;
6139 }
6140 break;
6141 case 2:
6142 if ((i.tm.base_opcode & 0xff0000) != 0)
6143 {
6144 prefix = (i.tm.base_opcode >> 16) & 0xff;
6145 if (i.tm.cpu_flags.bitfield.cpupadlock)
6146 {
6147 check_prefix:
6148 if (prefix != REPE_PREFIX_OPCODE
6149 || (i.prefix[REP_PREFIX]
6150 != REPE_PREFIX_OPCODE))
6151 add_prefix (prefix);
6152 }
6153 else
6154 add_prefix (prefix);
6155 }
6156 break;
6157 case 1:
6158 break;
6159 default:
6160 abort ();
6161 }
6162
6163 /* The prefix bytes. */
6164 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
6165 if (*q)
6166 FRAG_APPEND_1_CHAR (*q);
6167 }
6168
6169 if (i.tm.opcode_modifier.vex)
6170 {
6171 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
6172 if (*q)
6173 switch (j)
6174 {
6175 case REX_PREFIX:
6176 /* REX byte is encoded in VEX prefix. */
6177 break;
6178 case SEG_PREFIX:
6179 case ADDR_PREFIX:
6180 FRAG_APPEND_1_CHAR (*q);
6181 break;
6182 default:
6183 /* There should be no other prefixes for instructions
6184 with VEX prefix. */
6185 abort ();
6186 }
6187
6188 /* Now the VEX prefix. */
6189 p = frag_more (i.vex.length);
6190 for (j = 0; j < i.vex.length; j++)
6191 p[j] = i.vex.bytes[j];
6192 }
6193
6194 /* Now the opcode; be careful about word order here! */
6195 if (i.tm.opcode_length == 1)
6196 {
6197 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
6198 }
6199 else
6200 {
6201 switch (i.tm.opcode_length)
6202 {
6203 case 3:
6204 p = frag_more (3);
6205 *p++ = (i.tm.base_opcode >> 16) & 0xff;
6206 break;
6207 case 2:
6208 p = frag_more (2);
6209 break;
6210 default:
6211 abort ();
6212 break;
6213 }
6214
6215 /* Put out high byte first: can't use md_number_to_chars! */
6216 *p++ = (i.tm.base_opcode >> 8) & 0xff;
6217 *p = i.tm.base_opcode & 0xff;
6218 }
6219
6220 /* Now the modrm byte and sib byte (if present). */
6221 if (i.tm.opcode_modifier.modrm)
6222 {
6223 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
6224 | i.rm.reg << 3
6225 | i.rm.mode << 6));
6226 /* If i.rm.regmem == ESP (4)
6227 && i.rm.mode != (Register mode)
6228 && not 16 bit
6229 ==> need second modrm byte. */
6230 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
6231 && i.rm.mode != 3
6232 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
6233 FRAG_APPEND_1_CHAR ((i.sib.base << 0
6234 | i.sib.index << 3
6235 | i.sib.scale << 6));
6236 }
6237
6238 if (i.disp_operands)
6239 output_disp (insn_start_frag, insn_start_off);
6240
6241 if (i.imm_operands)
6242 output_imm (insn_start_frag, insn_start_off);
6243 }
6244
6245 #ifdef DEBUG386
6246 if (flag_debug)
6247 {
6248 pi ("" /*line*/, &i);
6249 }
6250 #endif /* DEBUG386 */
6251 }
6252
6253 /* Return the size of the displacement operand N. */
6254
6255 static int
6256 disp_size (unsigned int n)
6257 {
6258 int size = 4;
6259 if (i.types[n].bitfield.disp64)
6260 size = 8;
6261 else if (i.types[n].bitfield.disp8)
6262 size = 1;
6263 else if (i.types[n].bitfield.disp16)
6264 size = 2;
6265 return size;
6266 }
6267
6268 /* Return the size of the immediate operand N. */
6269
6270 static int
6271 imm_size (unsigned int n)
6272 {
6273 int size = 4;
6274 if (i.types[n].bitfield.imm64)
6275 size = 8;
6276 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
6277 size = 1;
6278 else if (i.types[n].bitfield.imm16)
6279 size = 2;
6280 return size;
6281 }
6282
6283 static void
6284 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
6285 {
6286 char *p;
6287 unsigned int n;
6288
6289 for (n = 0; n < i.operands; n++)
6290 {
6291 if (operand_type_check (i.types[n], disp))
6292 {
6293 if (i.op[n].disps->X_op == O_constant)
6294 {
6295 int size = disp_size (n);
6296 offsetT val;
6297
6298 val = offset_in_range (i.op[n].disps->X_add_number,
6299 size);
6300 p = frag_more (size);
6301 md_number_to_chars (p, val, size);
6302 }
6303 else
6304 {
6305 enum bfd_reloc_code_real reloc_type;
6306 int size = disp_size (n);
6307 int sign = i.types[n].bitfield.disp32s;
6308 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
6309
6310 /* We can't have 8 bit displacement here. */
6311 gas_assert (!i.types[n].bitfield.disp8);
6312
6313 /* The PC relative address is computed relative
6314 to the instruction boundary, so in case immediate
6315 fields follows, we need to adjust the value. */
6316 if (pcrel && i.imm_operands)
6317 {
6318 unsigned int n1;
6319 int sz = 0;
6320
6321 for (n1 = 0; n1 < i.operands; n1++)
6322 if (operand_type_check (i.types[n1], imm))
6323 {
6324 /* Only one immediate is allowed for PC
6325 relative address. */
6326 gas_assert (sz == 0);
6327 sz = imm_size (n1);
6328 i.op[n].disps->X_add_number -= sz;
6329 }
6330 /* We should find the immediate. */
6331 gas_assert (sz != 0);
6332 }
6333
6334 p = frag_more (size);
6335 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
6336 if (GOT_symbol
6337 && GOT_symbol == i.op[n].disps->X_add_symbol
6338 && (((reloc_type == BFD_RELOC_32
6339 || reloc_type == BFD_RELOC_X86_64_32S
6340 || (reloc_type == BFD_RELOC_64
6341 && object_64bit))
6342 && (i.op[n].disps->X_op == O_symbol
6343 || (i.op[n].disps->X_op == O_add
6344 && ((symbol_get_value_expression
6345 (i.op[n].disps->X_op_symbol)->X_op)
6346 == O_subtract))))
6347 || reloc_type == BFD_RELOC_32_PCREL))
6348 {
6349 offsetT add;
6350
6351 if (insn_start_frag == frag_now)
6352 add = (p - frag_now->fr_literal) - insn_start_off;
6353 else
6354 {
6355 fragS *fr;
6356
6357 add = insn_start_frag->fr_fix - insn_start_off;
6358 for (fr = insn_start_frag->fr_next;
6359 fr && fr != frag_now; fr = fr->fr_next)
6360 add += fr->fr_fix;
6361 add += p - frag_now->fr_literal;
6362 }
6363
6364 if (!object_64bit)
6365 {
6366 reloc_type = BFD_RELOC_386_GOTPC;
6367 i.op[n].imms->X_add_number += add;
6368 }
6369 else if (reloc_type == BFD_RELOC_64)
6370 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6371 else
6372 /* Don't do the adjustment for x86-64, as there
6373 the pcrel addressing is relative to the _next_
6374 insn, and that is taken care of in other code. */
6375 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6376 }
6377 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6378 i.op[n].disps, pcrel, reloc_type);
6379 }
6380 }
6381 }
6382 }
6383
6384 static void
6385 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
6386 {
6387 char *p;
6388 unsigned int n;
6389
6390 for (n = 0; n < i.operands; n++)
6391 {
6392 if (operand_type_check (i.types[n], imm))
6393 {
6394 if (i.op[n].imms->X_op == O_constant)
6395 {
6396 int size = imm_size (n);
6397 offsetT val;
6398
6399 val = offset_in_range (i.op[n].imms->X_add_number,
6400 size);
6401 p = frag_more (size);
6402 md_number_to_chars (p, val, size);
6403 }
6404 else
6405 {
6406 /* Not absolute_section.
6407 Need a 32-bit fixup (don't support 8bit
6408 non-absolute imms). Try to support other
6409 sizes ... */
6410 enum bfd_reloc_code_real reloc_type;
6411 int size = imm_size (n);
6412 int sign;
6413
6414 if (i.types[n].bitfield.imm32s
6415 && (i.suffix == QWORD_MNEM_SUFFIX
6416 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
6417 sign = 1;
6418 else
6419 sign = 0;
6420
6421 p = frag_more (size);
6422 reloc_type = reloc (size, 0, sign, i.reloc[n]);
6423
6424 /* This is tough to explain. We end up with this one if we
6425 * have operands that look like
6426 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
6427 * obtain the absolute address of the GOT, and it is strongly
6428 * preferable from a performance point of view to avoid using
6429 * a runtime relocation for this. The actual sequence of
6430 * instructions often look something like:
6431 *
6432 * call .L66
6433 * .L66:
6434 * popl %ebx
6435 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
6436 *
6437 * The call and pop essentially return the absolute address
6438 * of the label .L66 and store it in %ebx. The linker itself
6439 * will ultimately change the first operand of the addl so
6440 * that %ebx points to the GOT, but to keep things simple, the
6441 * .o file must have this operand set so that it generates not
6442 * the absolute address of .L66, but the absolute address of
6443 * itself. This allows the linker itself simply treat a GOTPC
6444 * relocation as asking for a pcrel offset to the GOT to be
6445 * added in, and the addend of the relocation is stored in the
6446 * operand field for the instruction itself.
6447 *
6448 * Our job here is to fix the operand so that it would add
6449 * the correct offset so that %ebx would point to itself. The
6450 * thing that is tricky is that .-.L66 will point to the
6451 * beginning of the instruction, so we need to further modify
6452 * the operand so that it will point to itself. There are
6453 * other cases where you have something like:
6454 *
6455 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
6456 *
6457 * and here no correction would be required. Internally in
6458 * the assembler we treat operands of this form as not being
6459 * pcrel since the '.' is explicitly mentioned, and I wonder
6460 * whether it would simplify matters to do it this way. Who
6461 * knows. In earlier versions of the PIC patches, the
6462 * pcrel_adjust field was used to store the correction, but
6463 * since the expression is not pcrel, I felt it would be
6464 * confusing to do it this way. */
6465
6466 if ((reloc_type == BFD_RELOC_32
6467 || reloc_type == BFD_RELOC_X86_64_32S
6468 || reloc_type == BFD_RELOC_64)
6469 && GOT_symbol
6470 && GOT_symbol == i.op[n].imms->X_add_symbol
6471 && (i.op[n].imms->X_op == O_symbol
6472 || (i.op[n].imms->X_op == O_add
6473 && ((symbol_get_value_expression
6474 (i.op[n].imms->X_op_symbol)->X_op)
6475 == O_subtract))))
6476 {
6477 offsetT add;
6478
6479 if (insn_start_frag == frag_now)
6480 add = (p - frag_now->fr_literal) - insn_start_off;
6481 else
6482 {
6483 fragS *fr;
6484
6485 add = insn_start_frag->fr_fix - insn_start_off;
6486 for (fr = insn_start_frag->fr_next;
6487 fr && fr != frag_now; fr = fr->fr_next)
6488 add += fr->fr_fix;
6489 add += p - frag_now->fr_literal;
6490 }
6491
6492 if (!object_64bit)
6493 reloc_type = BFD_RELOC_386_GOTPC;
6494 else if (size == 4)
6495 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6496 else if (size == 8)
6497 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6498 i.op[n].imms->X_add_number += add;
6499 }
6500 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6501 i.op[n].imms, 0, reloc_type);
6502 }
6503 }
6504 }
6505 }
6506 \f
6507 /* x86_cons_fix_new is called via the expression parsing code when a
6508 reloc is needed. We use this hook to get the correct .got reloc. */
6509 static enum bfd_reloc_code_real got_reloc = NO_RELOC;
6510 static int cons_sign = -1;
6511
6512 void
6513 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
6514 expressionS *exp)
6515 {
6516 enum bfd_reloc_code_real r = reloc (len, 0, cons_sign, got_reloc);
6517
6518 got_reloc = NO_RELOC;
6519
6520 #ifdef TE_PE
6521 if (exp->X_op == O_secrel)
6522 {
6523 exp->X_op = O_symbol;
6524 r = BFD_RELOC_32_SECREL;
6525 }
6526 #endif
6527
6528 fix_new_exp (frag, off, len, exp, 0, r);
6529 }
6530
6531 #if (!defined (OBJ_ELF) && !defined (OBJ_MAYBE_ELF)) || defined (LEX_AT)
6532 # define lex_got(reloc, adjust, types) NULL
6533 #else
6534 /* Parse operands of the form
6535 <symbol>@GOTOFF+<nnn>
6536 and similar .plt or .got references.
6537
6538 If we find one, set up the correct relocation in RELOC and copy the
6539 input string, minus the `@GOTOFF' into a malloc'd buffer for
6540 parsing by the calling routine. Return this buffer, and if ADJUST
6541 is non-null set it to the length of the string we removed from the
6542 input line. Otherwise return NULL. */
6543 static char *
6544 lex_got (enum bfd_reloc_code_real *rel,
6545 int *adjust,
6546 i386_operand_type *types)
6547 {
6548 /* Some of the relocations depend on the size of what field is to
6549 be relocated. But in our callers i386_immediate and i386_displacement
6550 we don't yet know the operand size (this will be set by insn
6551 matching). Hence we record the word32 relocation here,
6552 and adjust the reloc according to the real size in reloc(). */
6553 static const struct {
6554 const char *str;
6555 int len;
6556 const enum bfd_reloc_code_real rel[2];
6557 const i386_operand_type types64;
6558 } gotrel[] = {
6559 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
6560 BFD_RELOC_X86_64_PLTOFF64 },
6561 OPERAND_TYPE_IMM64 },
6562 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
6563 BFD_RELOC_X86_64_PLT32 },
6564 OPERAND_TYPE_IMM32_32S_DISP32 },
6565 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
6566 BFD_RELOC_X86_64_GOTPLT64 },
6567 OPERAND_TYPE_IMM64_DISP64 },
6568 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
6569 BFD_RELOC_X86_64_GOTOFF64 },
6570 OPERAND_TYPE_IMM64_DISP64 },
6571 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
6572 BFD_RELOC_X86_64_GOTPCREL },
6573 OPERAND_TYPE_IMM32_32S_DISP32 },
6574 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
6575 BFD_RELOC_X86_64_TLSGD },
6576 OPERAND_TYPE_IMM32_32S_DISP32 },
6577 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
6578 _dummy_first_bfd_reloc_code_real },
6579 OPERAND_TYPE_NONE },
6580 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
6581 BFD_RELOC_X86_64_TLSLD },
6582 OPERAND_TYPE_IMM32_32S_DISP32 },
6583 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
6584 BFD_RELOC_X86_64_GOTTPOFF },
6585 OPERAND_TYPE_IMM32_32S_DISP32 },
6586 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
6587 BFD_RELOC_X86_64_TPOFF32 },
6588 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6589 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
6590 _dummy_first_bfd_reloc_code_real },
6591 OPERAND_TYPE_NONE },
6592 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
6593 BFD_RELOC_X86_64_DTPOFF32 },
6594 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6595 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
6596 _dummy_first_bfd_reloc_code_real },
6597 OPERAND_TYPE_NONE },
6598 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
6599 _dummy_first_bfd_reloc_code_real },
6600 OPERAND_TYPE_NONE },
6601 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
6602 BFD_RELOC_X86_64_GOT32 },
6603 OPERAND_TYPE_IMM32_32S_64_DISP32 },
6604 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
6605 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
6606 OPERAND_TYPE_IMM32_32S_DISP32 },
6607 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
6608 BFD_RELOC_X86_64_TLSDESC_CALL },
6609 OPERAND_TYPE_IMM32_32S_DISP32 },
6610 };
6611 char *cp;
6612 unsigned int j;
6613
6614 if (!IS_ELF)
6615 return NULL;
6616
6617 for (cp = input_line_pointer; *cp != '@'; cp++)
6618 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6619 return NULL;
6620
6621 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6622 {
6623 int len = gotrel[j].len;
6624 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6625 {
6626 if (gotrel[j].rel[object_64bit] != 0)
6627 {
6628 int first, second;
6629 char *tmpbuf, *past_reloc;
6630
6631 *rel = gotrel[j].rel[object_64bit];
6632 if (adjust)
6633 *adjust = len;
6634
6635 if (types)
6636 {
6637 if (flag_code != CODE_64BIT)
6638 {
6639 types->bitfield.imm32 = 1;
6640 types->bitfield.disp32 = 1;
6641 }
6642 else
6643 *types = gotrel[j].types64;
6644 }
6645
6646 if (GOT_symbol == NULL)
6647 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
6648
6649 /* The length of the first part of our input line. */
6650 first = cp - input_line_pointer;
6651
6652 /* The second part goes from after the reloc token until
6653 (and including) an end_of_line char or comma. */
6654 past_reloc = cp + 1 + len;
6655 cp = past_reloc;
6656 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6657 ++cp;
6658 second = cp + 1 - past_reloc;
6659
6660 /* Allocate and copy string. The trailing NUL shouldn't
6661 be necessary, but be safe. */
6662 tmpbuf = (char *) xmalloc (first + second + 2);
6663 memcpy (tmpbuf, input_line_pointer, first);
6664 if (second != 0 && *past_reloc != ' ')
6665 /* Replace the relocation token with ' ', so that
6666 errors like foo@GOTOFF1 will be detected. */
6667 tmpbuf[first++] = ' ';
6668 memcpy (tmpbuf + first, past_reloc, second);
6669 tmpbuf[first + second] = '\0';
6670 return tmpbuf;
6671 }
6672
6673 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6674 gotrel[j].str, 1 << (5 + object_64bit));
6675 return NULL;
6676 }
6677 }
6678
6679 /* Might be a symbol version string. Don't as_bad here. */
6680 return NULL;
6681 }
6682 #endif
6683
6684 void
6685 x86_cons (expressionS *exp, int size)
6686 {
6687 intel_syntax = -intel_syntax;
6688
6689 exp->X_md = 0;
6690 if (size == 4 || (object_64bit && size == 8))
6691 {
6692 /* Handle @GOTOFF and the like in an expression. */
6693 char *save;
6694 char *gotfree_input_line;
6695 int adjust = 0;
6696
6697 save = input_line_pointer;
6698 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
6699 if (gotfree_input_line)
6700 input_line_pointer = gotfree_input_line;
6701
6702 expression (exp);
6703
6704 if (gotfree_input_line)
6705 {
6706 /* expression () has merrily parsed up to the end of line,
6707 or a comma - in the wrong buffer. Transfer how far
6708 input_line_pointer has moved to the right buffer. */
6709 input_line_pointer = (save
6710 + (input_line_pointer - gotfree_input_line)
6711 + adjust);
6712 free (gotfree_input_line);
6713 if (exp->X_op == O_constant
6714 || exp->X_op == O_absent
6715 || exp->X_op == O_illegal
6716 || exp->X_op == O_register
6717 || exp->X_op == O_big)
6718 {
6719 char c = *input_line_pointer;
6720 *input_line_pointer = 0;
6721 as_bad (_("missing or invalid expression `%s'"), save);
6722 *input_line_pointer = c;
6723 }
6724 }
6725 }
6726 else
6727 expression (exp);
6728
6729 intel_syntax = -intel_syntax;
6730
6731 if (intel_syntax)
6732 i386_intel_simplify (exp);
6733 }
6734
6735 static void
6736 signed_cons (int size)
6737 {
6738 if (flag_code == CODE_64BIT)
6739 cons_sign = 1;
6740 cons (size);
6741 cons_sign = -1;
6742 }
6743
6744 #ifdef TE_PE
6745 static void
6746 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
6747 {
6748 expressionS exp;
6749
6750 do
6751 {
6752 expression (&exp);
6753 if (exp.X_op == O_symbol)
6754 exp.X_op = O_secrel;
6755
6756 emit_expr (&exp, 4);
6757 }
6758 while (*input_line_pointer++ == ',');
6759
6760 input_line_pointer--;
6761 demand_empty_rest_of_line ();
6762 }
6763 #endif
6764
6765 static int
6766 i386_immediate (char *imm_start)
6767 {
6768 char *save_input_line_pointer;
6769 char *gotfree_input_line;
6770 segT exp_seg = 0;
6771 expressionS *exp;
6772 i386_operand_type types;
6773
6774 operand_type_set (&types, ~0);
6775
6776 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
6777 {
6778 as_bad (_("at most %d immediate operands are allowed"),
6779 MAX_IMMEDIATE_OPERANDS);
6780 return 0;
6781 }
6782
6783 exp = &im_expressions[i.imm_operands++];
6784 i.op[this_operand].imms = exp;
6785
6786 if (is_space_char (*imm_start))
6787 ++imm_start;
6788
6789 save_input_line_pointer = input_line_pointer;
6790 input_line_pointer = imm_start;
6791
6792 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
6793 if (gotfree_input_line)
6794 input_line_pointer = gotfree_input_line;
6795
6796 exp_seg = expression (exp);
6797
6798 SKIP_WHITESPACE ();
6799 if (*input_line_pointer)
6800 as_bad (_("junk `%s' after expression"), input_line_pointer);
6801
6802 input_line_pointer = save_input_line_pointer;
6803 if (gotfree_input_line)
6804 {
6805 free (gotfree_input_line);
6806
6807 if (exp->X_op == O_constant || exp->X_op == O_register)
6808 exp->X_op = O_illegal;
6809 }
6810
6811 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
6812 }
6813
6814 static int
6815 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
6816 i386_operand_type types, const char *imm_start)
6817 {
6818 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
6819 {
6820 if (imm_start)
6821 as_bad (_("missing or invalid immediate expression `%s'"),
6822 imm_start);
6823 return 0;
6824 }
6825 else if (exp->X_op == O_constant)
6826 {
6827 /* Size it properly later. */
6828 i.types[this_operand].bitfield.imm64 = 1;
6829 /* If not 64bit, sign extend val. */
6830 if (flag_code != CODE_64BIT
6831 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
6832 exp->X_add_number
6833 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
6834 }
6835 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
6836 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
6837 && exp_seg != absolute_section
6838 && exp_seg != text_section
6839 && exp_seg != data_section
6840 && exp_seg != bss_section
6841 && exp_seg != undefined_section
6842 && !bfd_is_com_section (exp_seg))
6843 {
6844 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
6845 return 0;
6846 }
6847 #endif
6848 else if (!intel_syntax && exp->X_op == O_register)
6849 {
6850 if (imm_start)
6851 as_bad (_("illegal immediate register operand %s"), imm_start);
6852 return 0;
6853 }
6854 else
6855 {
6856 /* This is an address. The size of the address will be
6857 determined later, depending on destination register,
6858 suffix, or the default for the section. */
6859 i.types[this_operand].bitfield.imm8 = 1;
6860 i.types[this_operand].bitfield.imm16 = 1;
6861 i.types[this_operand].bitfield.imm32 = 1;
6862 i.types[this_operand].bitfield.imm32s = 1;
6863 i.types[this_operand].bitfield.imm64 = 1;
6864 i.types[this_operand] = operand_type_and (i.types[this_operand],
6865 types);
6866 }
6867
6868 return 1;
6869 }
6870
6871 static char *
6872 i386_scale (char *scale)
6873 {
6874 offsetT val;
6875 char *save = input_line_pointer;
6876
6877 input_line_pointer = scale;
6878 val = get_absolute_expression ();
6879
6880 switch (val)
6881 {
6882 case 1:
6883 i.log2_scale_factor = 0;
6884 break;
6885 case 2:
6886 i.log2_scale_factor = 1;
6887 break;
6888 case 4:
6889 i.log2_scale_factor = 2;
6890 break;
6891 case 8:
6892 i.log2_scale_factor = 3;
6893 break;
6894 default:
6895 {
6896 char sep = *input_line_pointer;
6897
6898 *input_line_pointer = '\0';
6899 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
6900 scale);
6901 *input_line_pointer = sep;
6902 input_line_pointer = save;
6903 return NULL;
6904 }
6905 }
6906 if (i.log2_scale_factor != 0 && i.index_reg == 0)
6907 {
6908 as_warn (_("scale factor of %d without an index register"),
6909 1 << i.log2_scale_factor);
6910 i.log2_scale_factor = 0;
6911 }
6912 scale = input_line_pointer;
6913 input_line_pointer = save;
6914 return scale;
6915 }
6916
6917 static int
6918 i386_displacement (char *disp_start, char *disp_end)
6919 {
6920 expressionS *exp;
6921 segT exp_seg = 0;
6922 char *save_input_line_pointer;
6923 char *gotfree_input_line;
6924 int override;
6925 i386_operand_type bigdisp, types = anydisp;
6926 int ret;
6927
6928 if (i.disp_operands == MAX_MEMORY_OPERANDS)
6929 {
6930 as_bad (_("at most %d displacement operands are allowed"),
6931 MAX_MEMORY_OPERANDS);
6932 return 0;
6933 }
6934
6935 operand_type_set (&bigdisp, 0);
6936 if ((i.types[this_operand].bitfield.jumpabsolute)
6937 || (!current_templates->start->opcode_modifier.jump
6938 && !current_templates->start->opcode_modifier.jumpdword))
6939 {
6940 bigdisp.bitfield.disp32 = 1;
6941 override = (i.prefix[ADDR_PREFIX] != 0);
6942 if (flag_code == CODE_64BIT)
6943 {
6944 if (!override)
6945 {
6946 bigdisp.bitfield.disp32s = 1;
6947 bigdisp.bitfield.disp64 = 1;
6948 }
6949 }
6950 else if ((flag_code == CODE_16BIT) ^ override)
6951 {
6952 bigdisp.bitfield.disp32 = 0;
6953 bigdisp.bitfield.disp16 = 1;
6954 }
6955 }
6956 else
6957 {
6958 /* For PC-relative branches, the width of the displacement
6959 is dependent upon data size, not address size. */
6960 override = (i.prefix[DATA_PREFIX] != 0);
6961 if (flag_code == CODE_64BIT)
6962 {
6963 if (override || i.suffix == WORD_MNEM_SUFFIX)
6964 bigdisp.bitfield.disp16 = 1;
6965 else
6966 {
6967 bigdisp.bitfield.disp32 = 1;
6968 bigdisp.bitfield.disp32s = 1;
6969 }
6970 }
6971 else
6972 {
6973 if (!override)
6974 override = (i.suffix == (flag_code != CODE_16BIT
6975 ? WORD_MNEM_SUFFIX
6976 : LONG_MNEM_SUFFIX));
6977 bigdisp.bitfield.disp32 = 1;
6978 if ((flag_code == CODE_16BIT) ^ override)
6979 {
6980 bigdisp.bitfield.disp32 = 0;
6981 bigdisp.bitfield.disp16 = 1;
6982 }
6983 }
6984 }
6985 i.types[this_operand] = operand_type_or (i.types[this_operand],
6986 bigdisp);
6987
6988 exp = &disp_expressions[i.disp_operands];
6989 i.op[this_operand].disps = exp;
6990 i.disp_operands++;
6991 save_input_line_pointer = input_line_pointer;
6992 input_line_pointer = disp_start;
6993 END_STRING_AND_SAVE (disp_end);
6994
6995 #ifndef GCC_ASM_O_HACK
6996 #define GCC_ASM_O_HACK 0
6997 #endif
6998 #if GCC_ASM_O_HACK
6999 END_STRING_AND_SAVE (disp_end + 1);
7000 if (i.types[this_operand].bitfield.baseIndex
7001 && displacement_string_end[-1] == '+')
7002 {
7003 /* This hack is to avoid a warning when using the "o"
7004 constraint within gcc asm statements.
7005 For instance:
7006
7007 #define _set_tssldt_desc(n,addr,limit,type) \
7008 __asm__ __volatile__ ( \
7009 "movw %w2,%0\n\t" \
7010 "movw %w1,2+%0\n\t" \
7011 "rorl $16,%1\n\t" \
7012 "movb %b1,4+%0\n\t" \
7013 "movb %4,5+%0\n\t" \
7014 "movb $0,6+%0\n\t" \
7015 "movb %h1,7+%0\n\t" \
7016 "rorl $16,%1" \
7017 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
7018
7019 This works great except that the output assembler ends
7020 up looking a bit weird if it turns out that there is
7021 no offset. You end up producing code that looks like:
7022
7023 #APP
7024 movw $235,(%eax)
7025 movw %dx,2+(%eax)
7026 rorl $16,%edx
7027 movb %dl,4+(%eax)
7028 movb $137,5+(%eax)
7029 movb $0,6+(%eax)
7030 movb %dh,7+(%eax)
7031 rorl $16,%edx
7032 #NO_APP
7033
7034 So here we provide the missing zero. */
7035
7036 *displacement_string_end = '0';
7037 }
7038 #endif
7039 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
7040 if (gotfree_input_line)
7041 input_line_pointer = gotfree_input_line;
7042
7043 exp_seg = expression (exp);
7044
7045 SKIP_WHITESPACE ();
7046 if (*input_line_pointer)
7047 as_bad (_("junk `%s' after expression"), input_line_pointer);
7048 #if GCC_ASM_O_HACK
7049 RESTORE_END_STRING (disp_end + 1);
7050 #endif
7051 input_line_pointer = save_input_line_pointer;
7052 if (gotfree_input_line)
7053 {
7054 free (gotfree_input_line);
7055
7056 if (exp->X_op == O_constant || exp->X_op == O_register)
7057 exp->X_op = O_illegal;
7058 }
7059
7060 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
7061
7062 RESTORE_END_STRING (disp_end);
7063
7064 return ret;
7065 }
7066
7067 static int
7068 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
7069 i386_operand_type types, const char *disp_start)
7070 {
7071 i386_operand_type bigdisp;
7072 int ret = 1;
7073
7074 /* We do this to make sure that the section symbol is in
7075 the symbol table. We will ultimately change the relocation
7076 to be relative to the beginning of the section. */
7077 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
7078 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
7079 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7080 {
7081 if (exp->X_op != O_symbol)
7082 goto inv_disp;
7083
7084 if (S_IS_LOCAL (exp->X_add_symbol)
7085 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
7086 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
7087 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
7088 exp->X_op = O_subtract;
7089 exp->X_op_symbol = GOT_symbol;
7090 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
7091 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
7092 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7093 i.reloc[this_operand] = BFD_RELOC_64;
7094 else
7095 i.reloc[this_operand] = BFD_RELOC_32;
7096 }
7097
7098 else if (exp->X_op == O_absent
7099 || exp->X_op == O_illegal
7100 || exp->X_op == O_big)
7101 {
7102 inv_disp:
7103 as_bad (_("missing or invalid displacement expression `%s'"),
7104 disp_start);
7105 ret = 0;
7106 }
7107
7108 else if (flag_code == CODE_64BIT
7109 && !i.prefix[ADDR_PREFIX]
7110 && exp->X_op == O_constant)
7111 {
7112 /* Since displacement is signed extended to 64bit, don't allow
7113 disp32 and turn off disp32s if they are out of range. */
7114 i.types[this_operand].bitfield.disp32 = 0;
7115 if (!fits_in_signed_long (exp->X_add_number))
7116 {
7117 i.types[this_operand].bitfield.disp32s = 0;
7118 if (i.types[this_operand].bitfield.baseindex)
7119 {
7120 as_bad (_("0x%lx out range of signed 32bit displacement"),
7121 (long) exp->X_add_number);
7122 ret = 0;
7123 }
7124 }
7125 }
7126
7127 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7128 else if (exp->X_op != O_constant
7129 && OUTPUT_FLAVOR == bfd_target_aout_flavour
7130 && exp_seg != absolute_section
7131 && exp_seg != text_section
7132 && exp_seg != data_section
7133 && exp_seg != bss_section
7134 && exp_seg != undefined_section
7135 && !bfd_is_com_section (exp_seg))
7136 {
7137 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7138 ret = 0;
7139 }
7140 #endif
7141
7142 /* Check if this is a displacement only operand. */
7143 bigdisp = i.types[this_operand];
7144 bigdisp.bitfield.disp8 = 0;
7145 bigdisp.bitfield.disp16 = 0;
7146 bigdisp.bitfield.disp32 = 0;
7147 bigdisp.bitfield.disp32s = 0;
7148 bigdisp.bitfield.disp64 = 0;
7149 if (operand_type_all_zero (&bigdisp))
7150 i.types[this_operand] = operand_type_and (i.types[this_operand],
7151 types);
7152
7153 return ret;
7154 }
7155
7156 /* Make sure the memory operand we've been dealt is valid.
7157 Return 1 on success, 0 on a failure. */
7158
7159 static int
7160 i386_index_check (const char *operand_string)
7161 {
7162 int ok;
7163 const char *kind = "base/index";
7164 #if INFER_ADDR_PREFIX
7165 int fudged = 0;
7166
7167 tryprefix:
7168 #endif
7169 ok = 1;
7170 if (current_templates->start->opcode_modifier.isstring
7171 && !current_templates->start->opcode_modifier.immext
7172 && (current_templates->end[-1].opcode_modifier.isstring
7173 || i.mem_operands))
7174 {
7175 /* Memory operands of string insns are special in that they only allow
7176 a single register (rDI, rSI, or rBX) as their memory address. */
7177 unsigned int expected;
7178
7179 kind = "string address";
7180
7181 if (current_templates->start->opcode_modifier.w)
7182 {
7183 i386_operand_type type = current_templates->end[-1].operand_types[0];
7184
7185 if (!type.bitfield.baseindex
7186 || ((!i.mem_operands != !intel_syntax)
7187 && current_templates->end[-1].operand_types[1]
7188 .bitfield.baseindex))
7189 type = current_templates->end[-1].operand_types[1];
7190 expected = type.bitfield.esseg ? 7 /* rDI */ : 6 /* rSI */;
7191 }
7192 else
7193 expected = 3 /* rBX */;
7194
7195 if (!i.base_reg || i.index_reg
7196 || operand_type_check (i.types[this_operand], disp))
7197 ok = -1;
7198 else if (!(flag_code == CODE_64BIT
7199 ? i.prefix[ADDR_PREFIX]
7200 ? i.base_reg->reg_type.bitfield.reg32
7201 : i.base_reg->reg_type.bitfield.reg64
7202 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7203 ? i.base_reg->reg_type.bitfield.reg32
7204 : i.base_reg->reg_type.bitfield.reg16))
7205 ok = 0;
7206 else if (i.base_reg->reg_num != expected)
7207 ok = -1;
7208
7209 if (ok < 0)
7210 {
7211 unsigned int j;
7212
7213 for (j = 0; j < i386_regtab_size; ++j)
7214 if ((flag_code == CODE_64BIT
7215 ? i.prefix[ADDR_PREFIX]
7216 ? i386_regtab[j].reg_type.bitfield.reg32
7217 : i386_regtab[j].reg_type.bitfield.reg64
7218 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7219 ? i386_regtab[j].reg_type.bitfield.reg32
7220 : i386_regtab[j].reg_type.bitfield.reg16)
7221 && i386_regtab[j].reg_num == expected)
7222 break;
7223 gas_assert (j < i386_regtab_size);
7224 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
7225 operand_string,
7226 intel_syntax ? '[' : '(',
7227 register_prefix,
7228 i386_regtab[j].reg_name,
7229 intel_syntax ? ']' : ')');
7230 ok = 1;
7231 }
7232 }
7233 else if (flag_code == CODE_64BIT)
7234 {
7235 if ((i.base_reg
7236 && ((i.prefix[ADDR_PREFIX] == 0
7237 && !i.base_reg->reg_type.bitfield.reg64)
7238 || (i.prefix[ADDR_PREFIX]
7239 && !i.base_reg->reg_type.bitfield.reg32))
7240 && (i.index_reg
7241 || i.base_reg->reg_num !=
7242 (i.prefix[ADDR_PREFIX] == 0 ? RegRip : RegEip)))
7243 || (i.index_reg
7244 && !(i.index_reg->reg_type.bitfield.regxmm
7245 || i.index_reg->reg_type.bitfield.regymm)
7246 && (!i.index_reg->reg_type.bitfield.baseindex
7247 || (i.prefix[ADDR_PREFIX] == 0
7248 && i.index_reg->reg_num != RegRiz
7249 && !i.index_reg->reg_type.bitfield.reg64
7250 )
7251 || (i.prefix[ADDR_PREFIX]
7252 && i.index_reg->reg_num != RegEiz
7253 && !i.index_reg->reg_type.bitfield.reg32))))
7254 ok = 0;
7255 }
7256 else
7257 {
7258 if ((flag_code == CODE_16BIT) ^ (i.prefix[ADDR_PREFIX] != 0))
7259 {
7260 /* 16bit checks. */
7261 if ((i.base_reg
7262 && (!i.base_reg->reg_type.bitfield.reg16
7263 || !i.base_reg->reg_type.bitfield.baseindex))
7264 || (i.index_reg
7265 && (!i.index_reg->reg_type.bitfield.reg16
7266 || !i.index_reg->reg_type.bitfield.baseindex
7267 || !(i.base_reg
7268 && i.base_reg->reg_num < 6
7269 && i.index_reg->reg_num >= 6
7270 && i.log2_scale_factor == 0))))
7271 ok = 0;
7272 }
7273 else
7274 {
7275 /* 32bit checks. */
7276 if ((i.base_reg
7277 && !i.base_reg->reg_type.bitfield.reg32)
7278 || (i.index_reg
7279 && !i.index_reg->reg_type.bitfield.regxmm
7280 && !i.index_reg->reg_type.bitfield.regymm
7281 && ((!i.index_reg->reg_type.bitfield.reg32
7282 && i.index_reg->reg_num != RegEiz)
7283 || !i.index_reg->reg_type.bitfield.baseindex)))
7284 ok = 0;
7285 }
7286 }
7287 if (!ok)
7288 {
7289 #if INFER_ADDR_PREFIX
7290 if (!i.mem_operands && !i.prefix[ADDR_PREFIX])
7291 {
7292 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
7293 i.prefixes += 1;
7294 /* Change the size of any displacement too. At most one of
7295 Disp16 or Disp32 is set.
7296 FIXME. There doesn't seem to be any real need for separate
7297 Disp16 and Disp32 flags. The same goes for Imm16 and Imm32.
7298 Removing them would probably clean up the code quite a lot. */
7299 if (flag_code != CODE_64BIT
7300 && (i.types[this_operand].bitfield.disp16
7301 || i.types[this_operand].bitfield.disp32))
7302 i.types[this_operand]
7303 = operand_type_xor (i.types[this_operand], disp16_32);
7304 fudged = 1;
7305 goto tryprefix;
7306 }
7307 if (fudged)
7308 as_bad (_("`%s' is not a valid %s expression"),
7309 operand_string,
7310 kind);
7311 else
7312 #endif
7313 as_bad (_("`%s' is not a valid %s-bit %s expression"),
7314 operand_string,
7315 flag_code_names[i.prefix[ADDR_PREFIX]
7316 ? flag_code == CODE_32BIT
7317 ? CODE_16BIT
7318 : CODE_32BIT
7319 : flag_code],
7320 kind);
7321 }
7322 return ok;
7323 }
7324
7325 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
7326 on error. */
7327
7328 static int
7329 i386_att_operand (char *operand_string)
7330 {
7331 const reg_entry *r;
7332 char *end_op;
7333 char *op_string = operand_string;
7334
7335 if (is_space_char (*op_string))
7336 ++op_string;
7337
7338 /* We check for an absolute prefix (differentiating,
7339 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
7340 if (*op_string == ABSOLUTE_PREFIX)
7341 {
7342 ++op_string;
7343 if (is_space_char (*op_string))
7344 ++op_string;
7345 i.types[this_operand].bitfield.jumpabsolute = 1;
7346 }
7347
7348 /* Check if operand is a register. */
7349 if ((r = parse_register (op_string, &end_op)) != NULL)
7350 {
7351 i386_operand_type temp;
7352
7353 /* Check for a segment override by searching for ':' after a
7354 segment register. */
7355 op_string = end_op;
7356 if (is_space_char (*op_string))
7357 ++op_string;
7358 if (*op_string == ':'
7359 && (r->reg_type.bitfield.sreg2
7360 || r->reg_type.bitfield.sreg3))
7361 {
7362 switch (r->reg_num)
7363 {
7364 case 0:
7365 i.seg[i.mem_operands] = &es;
7366 break;
7367 case 1:
7368 i.seg[i.mem_operands] = &cs;
7369 break;
7370 case 2:
7371 i.seg[i.mem_operands] = &ss;
7372 break;
7373 case 3:
7374 i.seg[i.mem_operands] = &ds;
7375 break;
7376 case 4:
7377 i.seg[i.mem_operands] = &fs;
7378 break;
7379 case 5:
7380 i.seg[i.mem_operands] = &gs;
7381 break;
7382 }
7383
7384 /* Skip the ':' and whitespace. */
7385 ++op_string;
7386 if (is_space_char (*op_string))
7387 ++op_string;
7388
7389 if (!is_digit_char (*op_string)
7390 && !is_identifier_char (*op_string)
7391 && *op_string != '('
7392 && *op_string != ABSOLUTE_PREFIX)
7393 {
7394 as_bad (_("bad memory operand `%s'"), op_string);
7395 return 0;
7396 }
7397 /* Handle case of %es:*foo. */
7398 if (*op_string == ABSOLUTE_PREFIX)
7399 {
7400 ++op_string;
7401 if (is_space_char (*op_string))
7402 ++op_string;
7403 i.types[this_operand].bitfield.jumpabsolute = 1;
7404 }
7405 goto do_memory_reference;
7406 }
7407 if (*op_string)
7408 {
7409 as_bad (_("junk `%s' after register"), op_string);
7410 return 0;
7411 }
7412 temp = r->reg_type;
7413 temp.bitfield.baseindex = 0;
7414 i.types[this_operand] = operand_type_or (i.types[this_operand],
7415 temp);
7416 i.types[this_operand].bitfield.unspecified = 0;
7417 i.op[this_operand].regs = r;
7418 i.reg_operands++;
7419 }
7420 else if (*op_string == REGISTER_PREFIX)
7421 {
7422 as_bad (_("bad register name `%s'"), op_string);
7423 return 0;
7424 }
7425 else if (*op_string == IMMEDIATE_PREFIX)
7426 {
7427 ++op_string;
7428 if (i.types[this_operand].bitfield.jumpabsolute)
7429 {
7430 as_bad (_("immediate operand illegal with absolute jump"));
7431 return 0;
7432 }
7433 if (!i386_immediate (op_string))
7434 return 0;
7435 }
7436 else if (is_digit_char (*op_string)
7437 || is_identifier_char (*op_string)
7438 || *op_string == '(')
7439 {
7440 /* This is a memory reference of some sort. */
7441 char *base_string;
7442
7443 /* Start and end of displacement string expression (if found). */
7444 char *displacement_string_start;
7445 char *displacement_string_end;
7446
7447 do_memory_reference:
7448 if ((i.mem_operands == 1
7449 && !current_templates->start->opcode_modifier.isstring)
7450 || i.mem_operands == 2)
7451 {
7452 as_bad (_("too many memory references for `%s'"),
7453 current_templates->start->name);
7454 return 0;
7455 }
7456
7457 /* Check for base index form. We detect the base index form by
7458 looking for an ')' at the end of the operand, searching
7459 for the '(' matching it, and finding a REGISTER_PREFIX or ','
7460 after the '('. */
7461 base_string = op_string + strlen (op_string);
7462
7463 --base_string;
7464 if (is_space_char (*base_string))
7465 --base_string;
7466
7467 /* If we only have a displacement, set-up for it to be parsed later. */
7468 displacement_string_start = op_string;
7469 displacement_string_end = base_string + 1;
7470
7471 if (*base_string == ')')
7472 {
7473 char *temp_string;
7474 unsigned int parens_balanced = 1;
7475 /* We've already checked that the number of left & right ()'s are
7476 equal, so this loop will not be infinite. */
7477 do
7478 {
7479 base_string--;
7480 if (*base_string == ')')
7481 parens_balanced++;
7482 if (*base_string == '(')
7483 parens_balanced--;
7484 }
7485 while (parens_balanced);
7486
7487 temp_string = base_string;
7488
7489 /* Skip past '(' and whitespace. */
7490 ++base_string;
7491 if (is_space_char (*base_string))
7492 ++base_string;
7493
7494 if (*base_string == ','
7495 || ((i.base_reg = parse_register (base_string, &end_op))
7496 != NULL))
7497 {
7498 displacement_string_end = temp_string;
7499
7500 i.types[this_operand].bitfield.baseindex = 1;
7501
7502 if (i.base_reg)
7503 {
7504 base_string = end_op;
7505 if (is_space_char (*base_string))
7506 ++base_string;
7507 }
7508
7509 /* There may be an index reg or scale factor here. */
7510 if (*base_string == ',')
7511 {
7512 ++base_string;
7513 if (is_space_char (*base_string))
7514 ++base_string;
7515
7516 if ((i.index_reg = parse_register (base_string, &end_op))
7517 != NULL)
7518 {
7519 base_string = end_op;
7520 if (is_space_char (*base_string))
7521 ++base_string;
7522 if (*base_string == ',')
7523 {
7524 ++base_string;
7525 if (is_space_char (*base_string))
7526 ++base_string;
7527 }
7528 else if (*base_string != ')')
7529 {
7530 as_bad (_("expecting `,' or `)' "
7531 "after index register in `%s'"),
7532 operand_string);
7533 return 0;
7534 }
7535 }
7536 else if (*base_string == REGISTER_PREFIX)
7537 {
7538 as_bad (_("bad register name `%s'"), base_string);
7539 return 0;
7540 }
7541
7542 /* Check for scale factor. */
7543 if (*base_string != ')')
7544 {
7545 char *end_scale = i386_scale (base_string);
7546
7547 if (!end_scale)
7548 return 0;
7549
7550 base_string = end_scale;
7551 if (is_space_char (*base_string))
7552 ++base_string;
7553 if (*base_string != ')')
7554 {
7555 as_bad (_("expecting `)' "
7556 "after scale factor in `%s'"),
7557 operand_string);
7558 return 0;
7559 }
7560 }
7561 else if (!i.index_reg)
7562 {
7563 as_bad (_("expecting index register or scale factor "
7564 "after `,'; got '%c'"),
7565 *base_string);
7566 return 0;
7567 }
7568 }
7569 else if (*base_string != ')')
7570 {
7571 as_bad (_("expecting `,' or `)' "
7572 "after base register in `%s'"),
7573 operand_string);
7574 return 0;
7575 }
7576 }
7577 else if (*base_string == REGISTER_PREFIX)
7578 {
7579 as_bad (_("bad register name `%s'"), base_string);
7580 return 0;
7581 }
7582 }
7583
7584 /* If there's an expression beginning the operand, parse it,
7585 assuming displacement_string_start and
7586 displacement_string_end are meaningful. */
7587 if (displacement_string_start != displacement_string_end)
7588 {
7589 if (!i386_displacement (displacement_string_start,
7590 displacement_string_end))
7591 return 0;
7592 }
7593
7594 /* Special case for (%dx) while doing input/output op. */
7595 if (i.base_reg
7596 && operand_type_equal (&i.base_reg->reg_type,
7597 &reg16_inoutportreg)
7598 && i.index_reg == 0
7599 && i.log2_scale_factor == 0
7600 && i.seg[i.mem_operands] == 0
7601 && !operand_type_check (i.types[this_operand], disp))
7602 {
7603 i.types[this_operand] = inoutportreg;
7604 return 1;
7605 }
7606
7607 if (i386_index_check (operand_string) == 0)
7608 return 0;
7609 i.types[this_operand].bitfield.mem = 1;
7610 i.mem_operands++;
7611 }
7612 else
7613 {
7614 /* It's not a memory operand; argh! */
7615 as_bad (_("invalid char %s beginning operand %d `%s'"),
7616 output_invalid (*op_string),
7617 this_operand + 1,
7618 op_string);
7619 return 0;
7620 }
7621 return 1; /* Normal return. */
7622 }
7623 \f
7624 /* md_estimate_size_before_relax()
7625
7626 Called just before relax() for rs_machine_dependent frags. The x86
7627 assembler uses these frags to handle variable size jump
7628 instructions.
7629
7630 Any symbol that is now undefined will not become defined.
7631 Return the correct fr_subtype in the frag.
7632 Return the initial "guess for variable size of frag" to caller.
7633 The guess is actually the growth beyond the fixed part. Whatever
7634 we do to grow the fixed or variable part contributes to our
7635 returned value. */
7636
7637 int
7638 md_estimate_size_before_relax (fragS *fragP, segT segment)
7639 {
7640 /* We've already got fragP->fr_subtype right; all we have to do is
7641 check for un-relaxable symbols. On an ELF system, we can't relax
7642 an externally visible symbol, because it may be overridden by a
7643 shared library. */
7644 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
7645 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7646 || (IS_ELF
7647 && (S_IS_EXTERNAL (fragP->fr_symbol)
7648 || S_IS_WEAK (fragP->fr_symbol)
7649 || ((symbol_get_bfdsym (fragP->fr_symbol)->flags
7650 & BSF_GNU_INDIRECT_FUNCTION))))
7651 #endif
7652 #if defined (OBJ_COFF) && defined (TE_PE)
7653 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
7654 && S_IS_WEAK (fragP->fr_symbol))
7655 #endif
7656 )
7657 {
7658 /* Symbol is undefined in this segment, or we need to keep a
7659 reloc so that weak symbols can be overridden. */
7660 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
7661 enum bfd_reloc_code_real reloc_type;
7662 unsigned char *opcode;
7663 int old_fr_fix;
7664
7665 if (fragP->fr_var != NO_RELOC)
7666 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
7667 else if (size == 2)
7668 reloc_type = BFD_RELOC_16_PCREL;
7669 else
7670 reloc_type = BFD_RELOC_32_PCREL;
7671
7672 old_fr_fix = fragP->fr_fix;
7673 opcode = (unsigned char *) fragP->fr_opcode;
7674
7675 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
7676 {
7677 case UNCOND_JUMP:
7678 /* Make jmp (0xeb) a (d)word displacement jump. */
7679 opcode[0] = 0xe9;
7680 fragP->fr_fix += size;
7681 fix_new (fragP, old_fr_fix, size,
7682 fragP->fr_symbol,
7683 fragP->fr_offset, 1,
7684 reloc_type);
7685 break;
7686
7687 case COND_JUMP86:
7688 if (size == 2
7689 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
7690 {
7691 /* Negate the condition, and branch past an
7692 unconditional jump. */
7693 opcode[0] ^= 1;
7694 opcode[1] = 3;
7695 /* Insert an unconditional jump. */
7696 opcode[2] = 0xe9;
7697 /* We added two extra opcode bytes, and have a two byte
7698 offset. */
7699 fragP->fr_fix += 2 + 2;
7700 fix_new (fragP, old_fr_fix + 2, 2,
7701 fragP->fr_symbol,
7702 fragP->fr_offset, 1,
7703 reloc_type);
7704 break;
7705 }
7706 /* Fall through. */
7707
7708 case COND_JUMP:
7709 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
7710 {
7711 fixS *fixP;
7712
7713 fragP->fr_fix += 1;
7714 fixP = fix_new (fragP, old_fr_fix, 1,
7715 fragP->fr_symbol,
7716 fragP->fr_offset, 1,
7717 BFD_RELOC_8_PCREL);
7718 fixP->fx_signed = 1;
7719 break;
7720 }
7721
7722 /* This changes the byte-displacement jump 0x7N
7723 to the (d)word-displacement jump 0x0f,0x8N. */
7724 opcode[1] = opcode[0] + 0x10;
7725 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7726 /* We've added an opcode byte. */
7727 fragP->fr_fix += 1 + size;
7728 fix_new (fragP, old_fr_fix + 1, size,
7729 fragP->fr_symbol,
7730 fragP->fr_offset, 1,
7731 reloc_type);
7732 break;
7733
7734 default:
7735 BAD_CASE (fragP->fr_subtype);
7736 break;
7737 }
7738 frag_wane (fragP);
7739 return fragP->fr_fix - old_fr_fix;
7740 }
7741
7742 /* Guess size depending on current relax state. Initially the relax
7743 state will correspond to a short jump and we return 1, because
7744 the variable part of the frag (the branch offset) is one byte
7745 long. However, we can relax a section more than once and in that
7746 case we must either set fr_subtype back to the unrelaxed state,
7747 or return the value for the appropriate branch. */
7748 return md_relax_table[fragP->fr_subtype].rlx_length;
7749 }
7750
7751 /* Called after relax() is finished.
7752
7753 In: Address of frag.
7754 fr_type == rs_machine_dependent.
7755 fr_subtype is what the address relaxed to.
7756
7757 Out: Any fixSs and constants are set up.
7758 Caller will turn frag into a ".space 0". */
7759
7760 void
7761 md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT sec ATTRIBUTE_UNUSED,
7762 fragS *fragP)
7763 {
7764 unsigned char *opcode;
7765 unsigned char *where_to_put_displacement = NULL;
7766 offsetT target_address;
7767 offsetT opcode_address;
7768 unsigned int extension = 0;
7769 offsetT displacement_from_opcode_start;
7770
7771 opcode = (unsigned char *) fragP->fr_opcode;
7772
7773 /* Address we want to reach in file space. */
7774 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
7775
7776 /* Address opcode resides at in file space. */
7777 opcode_address = fragP->fr_address + fragP->fr_fix;
7778
7779 /* Displacement from opcode start to fill into instruction. */
7780 displacement_from_opcode_start = target_address - opcode_address;
7781
7782 if ((fragP->fr_subtype & BIG) == 0)
7783 {
7784 /* Don't have to change opcode. */
7785 extension = 1; /* 1 opcode + 1 displacement */
7786 where_to_put_displacement = &opcode[1];
7787 }
7788 else
7789 {
7790 if (no_cond_jump_promotion
7791 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
7792 as_warn_where (fragP->fr_file, fragP->fr_line,
7793 _("long jump required"));
7794
7795 switch (fragP->fr_subtype)
7796 {
7797 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
7798 extension = 4; /* 1 opcode + 4 displacement */
7799 opcode[0] = 0xe9;
7800 where_to_put_displacement = &opcode[1];
7801 break;
7802
7803 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
7804 extension = 2; /* 1 opcode + 2 displacement */
7805 opcode[0] = 0xe9;
7806 where_to_put_displacement = &opcode[1];
7807 break;
7808
7809 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
7810 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
7811 extension = 5; /* 2 opcode + 4 displacement */
7812 opcode[1] = opcode[0] + 0x10;
7813 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7814 where_to_put_displacement = &opcode[2];
7815 break;
7816
7817 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
7818 extension = 3; /* 2 opcode + 2 displacement */
7819 opcode[1] = opcode[0] + 0x10;
7820 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7821 where_to_put_displacement = &opcode[2];
7822 break;
7823
7824 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
7825 extension = 4;
7826 opcode[0] ^= 1;
7827 opcode[1] = 3;
7828 opcode[2] = 0xe9;
7829 where_to_put_displacement = &opcode[3];
7830 break;
7831
7832 default:
7833 BAD_CASE (fragP->fr_subtype);
7834 break;
7835 }
7836 }
7837
7838 /* If size if less then four we are sure that the operand fits,
7839 but if it's 4, then it could be that the displacement is larger
7840 then -/+ 2GB. */
7841 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
7842 && object_64bit
7843 && ((addressT) (displacement_from_opcode_start - extension
7844 + ((addressT) 1 << 31))
7845 > (((addressT) 2 << 31) - 1)))
7846 {
7847 as_bad_where (fragP->fr_file, fragP->fr_line,
7848 _("jump target out of range"));
7849 /* Make us emit 0. */
7850 displacement_from_opcode_start = extension;
7851 }
7852 /* Now put displacement after opcode. */
7853 md_number_to_chars ((char *) where_to_put_displacement,
7854 (valueT) (displacement_from_opcode_start - extension),
7855 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
7856 fragP->fr_fix += extension;
7857 }
7858 \f
7859 /* Apply a fixup (fixP) to segment data, once it has been determined
7860 by our caller that we have all the info we need to fix it up.
7861
7862 Parameter valP is the pointer to the value of the bits.
7863
7864 On the 386, immediates, displacements, and data pointers are all in
7865 the same (little-endian) format, so we don't need to care about which
7866 we are handling. */
7867
7868 void
7869 md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
7870 {
7871 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
7872 valueT value = *valP;
7873
7874 #if !defined (TE_Mach)
7875 if (fixP->fx_pcrel)
7876 {
7877 switch (fixP->fx_r_type)
7878 {
7879 default:
7880 break;
7881
7882 case BFD_RELOC_64:
7883 fixP->fx_r_type = BFD_RELOC_64_PCREL;
7884 break;
7885 case BFD_RELOC_32:
7886 case BFD_RELOC_X86_64_32S:
7887 fixP->fx_r_type = BFD_RELOC_32_PCREL;
7888 break;
7889 case BFD_RELOC_16:
7890 fixP->fx_r_type = BFD_RELOC_16_PCREL;
7891 break;
7892 case BFD_RELOC_8:
7893 fixP->fx_r_type = BFD_RELOC_8_PCREL;
7894 break;
7895 }
7896 }
7897
7898 if (fixP->fx_addsy != NULL
7899 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
7900 || fixP->fx_r_type == BFD_RELOC_64_PCREL
7901 || fixP->fx_r_type == BFD_RELOC_16_PCREL
7902 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
7903 && !use_rela_relocations)
7904 {
7905 /* This is a hack. There should be a better way to handle this.
7906 This covers for the fact that bfd_install_relocation will
7907 subtract the current location (for partial_inplace, PC relative
7908 relocations); see more below. */
7909 #ifndef OBJ_AOUT
7910 if (IS_ELF
7911 #ifdef TE_PE
7912 || OUTPUT_FLAVOR == bfd_target_coff_flavour
7913 #endif
7914 )
7915 value += fixP->fx_where + fixP->fx_frag->fr_address;
7916 #endif
7917 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7918 if (IS_ELF)
7919 {
7920 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
7921
7922 if ((sym_seg == seg
7923 || (symbol_section_p (fixP->fx_addsy)
7924 && sym_seg != absolute_section))
7925 && !generic_force_reloc (fixP))
7926 {
7927 /* Yes, we add the values in twice. This is because
7928 bfd_install_relocation subtracts them out again. I think
7929 bfd_install_relocation is broken, but I don't dare change
7930 it. FIXME. */
7931 value += fixP->fx_where + fixP->fx_frag->fr_address;
7932 }
7933 }
7934 #endif
7935 #if defined (OBJ_COFF) && defined (TE_PE)
7936 /* For some reason, the PE format does not store a
7937 section address offset for a PC relative symbol. */
7938 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
7939 || S_IS_WEAK (fixP->fx_addsy))
7940 value += md_pcrel_from (fixP);
7941 #endif
7942 }
7943 #if defined (OBJ_COFF) && defined (TE_PE)
7944 if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
7945 {
7946 value -= S_GET_VALUE (fixP->fx_addsy);
7947 }
7948 #endif
7949
7950 /* Fix a few things - the dynamic linker expects certain values here,
7951 and we must not disappoint it. */
7952 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7953 if (IS_ELF && fixP->fx_addsy)
7954 switch (fixP->fx_r_type)
7955 {
7956 case BFD_RELOC_386_PLT32:
7957 case BFD_RELOC_X86_64_PLT32:
7958 /* Make the jump instruction point to the address of the operand. At
7959 runtime we merely add the offset to the actual PLT entry. */
7960 value = -4;
7961 break;
7962
7963 case BFD_RELOC_386_TLS_GD:
7964 case BFD_RELOC_386_TLS_LDM:
7965 case BFD_RELOC_386_TLS_IE_32:
7966 case BFD_RELOC_386_TLS_IE:
7967 case BFD_RELOC_386_TLS_GOTIE:
7968 case BFD_RELOC_386_TLS_GOTDESC:
7969 case BFD_RELOC_X86_64_TLSGD:
7970 case BFD_RELOC_X86_64_TLSLD:
7971 case BFD_RELOC_X86_64_GOTTPOFF:
7972 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
7973 value = 0; /* Fully resolved at runtime. No addend. */
7974 /* Fallthrough */
7975 case BFD_RELOC_386_TLS_LE:
7976 case BFD_RELOC_386_TLS_LDO_32:
7977 case BFD_RELOC_386_TLS_LE_32:
7978 case BFD_RELOC_X86_64_DTPOFF32:
7979 case BFD_RELOC_X86_64_DTPOFF64:
7980 case BFD_RELOC_X86_64_TPOFF32:
7981 case BFD_RELOC_X86_64_TPOFF64:
7982 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7983 break;
7984
7985 case BFD_RELOC_386_TLS_DESC_CALL:
7986 case BFD_RELOC_X86_64_TLSDESC_CALL:
7987 value = 0; /* Fully resolved at runtime. No addend. */
7988 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7989 fixP->fx_done = 0;
7990 return;
7991
7992 case BFD_RELOC_386_GOT32:
7993 case BFD_RELOC_X86_64_GOT32:
7994 value = 0; /* Fully resolved at runtime. No addend. */
7995 break;
7996
7997 case BFD_RELOC_VTABLE_INHERIT:
7998 case BFD_RELOC_VTABLE_ENTRY:
7999 fixP->fx_done = 0;
8000 return;
8001
8002 default:
8003 break;
8004 }
8005 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
8006 *valP = value;
8007 #endif /* !defined (TE_Mach) */
8008
8009 /* Are we finished with this relocation now? */
8010 if (fixP->fx_addsy == NULL)
8011 fixP->fx_done = 1;
8012 #if defined (OBJ_COFF) && defined (TE_PE)
8013 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
8014 {
8015 fixP->fx_done = 0;
8016 /* Remember value for tc_gen_reloc. */
8017 fixP->fx_addnumber = value;
8018 /* Clear out the frag for now. */
8019 value = 0;
8020 }
8021 #endif
8022 else if (use_rela_relocations)
8023 {
8024 fixP->fx_no_overflow = 1;
8025 /* Remember value for tc_gen_reloc. */
8026 fixP->fx_addnumber = value;
8027 value = 0;
8028 }
8029
8030 md_number_to_chars (p, value, fixP->fx_size);
8031 }
8032 \f
8033 char *
8034 md_atof (int type, char *litP, int *sizeP)
8035 {
8036 /* This outputs the LITTLENUMs in REVERSE order;
8037 in accord with the bigendian 386. */
8038 return ieee_md_atof (type, litP, sizeP, FALSE);
8039 }
8040 \f
8041 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
8042
8043 static char *
8044 output_invalid (int c)
8045 {
8046 if (ISPRINT (c))
8047 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8048 "'%c'", c);
8049 else
8050 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8051 "(0x%x)", (unsigned char) c);
8052 return output_invalid_buf;
8053 }
8054
8055 /* REG_STRING starts *before* REGISTER_PREFIX. */
8056
8057 static const reg_entry *
8058 parse_real_register (char *reg_string, char **end_op)
8059 {
8060 char *s = reg_string;
8061 char *p;
8062 char reg_name_given[MAX_REG_NAME_SIZE + 1];
8063 const reg_entry *r;
8064
8065 /* Skip possible REGISTER_PREFIX and possible whitespace. */
8066 if (*s == REGISTER_PREFIX)
8067 ++s;
8068
8069 if (is_space_char (*s))
8070 ++s;
8071
8072 p = reg_name_given;
8073 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
8074 {
8075 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
8076 return (const reg_entry *) NULL;
8077 s++;
8078 }
8079
8080 /* For naked regs, make sure that we are not dealing with an identifier.
8081 This prevents confusing an identifier like `eax_var' with register
8082 `eax'. */
8083 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
8084 return (const reg_entry *) NULL;
8085
8086 *end_op = s;
8087
8088 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
8089
8090 /* Handle floating point regs, allowing spaces in the (i) part. */
8091 if (r == i386_regtab /* %st is first entry of table */)
8092 {
8093 if (is_space_char (*s))
8094 ++s;
8095 if (*s == '(')
8096 {
8097 ++s;
8098 if (is_space_char (*s))
8099 ++s;
8100 if (*s >= '0' && *s <= '7')
8101 {
8102 int fpr = *s - '0';
8103 ++s;
8104 if (is_space_char (*s))
8105 ++s;
8106 if (*s == ')')
8107 {
8108 *end_op = s + 1;
8109 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
8110 know (r);
8111 return r + fpr;
8112 }
8113 }
8114 /* We have "%st(" then garbage. */
8115 return (const reg_entry *) NULL;
8116 }
8117 }
8118
8119 if (r == NULL || allow_pseudo_reg)
8120 return r;
8121
8122 if (operand_type_all_zero (&r->reg_type))
8123 return (const reg_entry *) NULL;
8124
8125 if ((r->reg_type.bitfield.reg32
8126 || r->reg_type.bitfield.sreg3
8127 || r->reg_type.bitfield.control
8128 || r->reg_type.bitfield.debug
8129 || r->reg_type.bitfield.test)
8130 && !cpu_arch_flags.bitfield.cpui386)
8131 return (const reg_entry *) NULL;
8132
8133 if (r->reg_type.bitfield.floatreg
8134 && !cpu_arch_flags.bitfield.cpu8087
8135 && !cpu_arch_flags.bitfield.cpu287
8136 && !cpu_arch_flags.bitfield.cpu387)
8137 return (const reg_entry *) NULL;
8138
8139 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
8140 return (const reg_entry *) NULL;
8141
8142 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse)
8143 return (const reg_entry *) NULL;
8144
8145 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
8146 return (const reg_entry *) NULL;
8147
8148 /* Don't allow fake index register unless allow_index_reg isn't 0. */
8149 if (!allow_index_reg
8150 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
8151 return (const reg_entry *) NULL;
8152
8153 if (((r->reg_flags & (RegRex64 | RegRex))
8154 || r->reg_type.bitfield.reg64)
8155 && (!cpu_arch_flags.bitfield.cpulm
8156 || !operand_type_equal (&r->reg_type, &control))
8157 && flag_code != CODE_64BIT)
8158 return (const reg_entry *) NULL;
8159
8160 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
8161 return (const reg_entry *) NULL;
8162
8163 return r;
8164 }
8165
8166 /* REG_STRING starts *before* REGISTER_PREFIX. */
8167
8168 static const reg_entry *
8169 parse_register (char *reg_string, char **end_op)
8170 {
8171 const reg_entry *r;
8172
8173 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
8174 r = parse_real_register (reg_string, end_op);
8175 else
8176 r = NULL;
8177 if (!r)
8178 {
8179 char *save = input_line_pointer;
8180 char c;
8181 symbolS *symbolP;
8182
8183 input_line_pointer = reg_string;
8184 c = get_symbol_end ();
8185 symbolP = symbol_find (reg_string);
8186 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
8187 {
8188 const expressionS *e = symbol_get_value_expression (symbolP);
8189
8190 know (e->X_op == O_register);
8191 know (e->X_add_number >= 0
8192 && (valueT) e->X_add_number < i386_regtab_size);
8193 r = i386_regtab + e->X_add_number;
8194 *end_op = input_line_pointer;
8195 }
8196 *input_line_pointer = c;
8197 input_line_pointer = save;
8198 }
8199 return r;
8200 }
8201
8202 int
8203 i386_parse_name (char *name, expressionS *e, char *nextcharP)
8204 {
8205 const reg_entry *r;
8206 char *end = input_line_pointer;
8207
8208 *end = *nextcharP;
8209 r = parse_register (name, &input_line_pointer);
8210 if (r && end <= input_line_pointer)
8211 {
8212 *nextcharP = *input_line_pointer;
8213 *input_line_pointer = 0;
8214 e->X_op = O_register;
8215 e->X_add_number = r - i386_regtab;
8216 return 1;
8217 }
8218 input_line_pointer = end;
8219 *end = 0;
8220 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
8221 }
8222
8223 void
8224 md_operand (expressionS *e)
8225 {
8226 char *end;
8227 const reg_entry *r;
8228
8229 switch (*input_line_pointer)
8230 {
8231 case REGISTER_PREFIX:
8232 r = parse_real_register (input_line_pointer, &end);
8233 if (r)
8234 {
8235 e->X_op = O_register;
8236 e->X_add_number = r - i386_regtab;
8237 input_line_pointer = end;
8238 }
8239 break;
8240
8241 case '[':
8242 gas_assert (intel_syntax);
8243 end = input_line_pointer++;
8244 expression (e);
8245 if (*input_line_pointer == ']')
8246 {
8247 ++input_line_pointer;
8248 e->X_op_symbol = make_expr_symbol (e);
8249 e->X_add_symbol = NULL;
8250 e->X_add_number = 0;
8251 e->X_op = O_index;
8252 }
8253 else
8254 {
8255 e->X_op = O_absent;
8256 input_line_pointer = end;
8257 }
8258 break;
8259 }
8260 }
8261
8262 \f
8263 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8264 const char *md_shortopts = "kVQ:sqn";
8265 #else
8266 const char *md_shortopts = "qn";
8267 #endif
8268
8269 #define OPTION_32 (OPTION_MD_BASE + 0)
8270 #define OPTION_64 (OPTION_MD_BASE + 1)
8271 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
8272 #define OPTION_MARCH (OPTION_MD_BASE + 3)
8273 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
8274 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
8275 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
8276 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
8277 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
8278 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
8279 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
8280 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
8281 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 12)
8282 #define OPTION_X32 (OPTION_MD_BASE + 13)
8283
8284 struct option md_longopts[] =
8285 {
8286 {"32", no_argument, NULL, OPTION_32},
8287 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8288 || defined (TE_PE) || defined (TE_PEP))
8289 {"64", no_argument, NULL, OPTION_64},
8290 #endif
8291 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8292 {"x32", no_argument, NULL, OPTION_X32},
8293 #endif
8294 {"divide", no_argument, NULL, OPTION_DIVIDE},
8295 {"march", required_argument, NULL, OPTION_MARCH},
8296 {"mtune", required_argument, NULL, OPTION_MTUNE},
8297 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
8298 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
8299 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
8300 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
8301 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
8302 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
8303 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
8304 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
8305 {NULL, no_argument, NULL, 0}
8306 };
8307 size_t md_longopts_size = sizeof (md_longopts);
8308
8309 int
8310 md_parse_option (int c, char *arg)
8311 {
8312 unsigned int j;
8313 char *arch, *next;
8314
8315 switch (c)
8316 {
8317 case 'n':
8318 optimize_align_code = 0;
8319 break;
8320
8321 case 'q':
8322 quiet_warnings = 1;
8323 break;
8324
8325 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8326 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
8327 should be emitted or not. FIXME: Not implemented. */
8328 case 'Q':
8329 break;
8330
8331 /* -V: SVR4 argument to print version ID. */
8332 case 'V':
8333 print_version_id ();
8334 break;
8335
8336 /* -k: Ignore for FreeBSD compatibility. */
8337 case 'k':
8338 break;
8339
8340 case 's':
8341 /* -s: On i386 Solaris, this tells the native assembler to use
8342 .stab instead of .stab.excl. We always use .stab anyhow. */
8343 break;
8344 #endif
8345 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8346 || defined (TE_PE) || defined (TE_PEP))
8347 case OPTION_64:
8348 {
8349 const char **list, **l;
8350
8351 list = bfd_target_list ();
8352 for (l = list; *l != NULL; l++)
8353 if (CONST_STRNEQ (*l, "elf64-x86-64")
8354 || strcmp (*l, "coff-x86-64") == 0
8355 || strcmp (*l, "pe-x86-64") == 0
8356 || strcmp (*l, "pei-x86-64") == 0)
8357 {
8358 default_arch = "x86_64";
8359 break;
8360 }
8361 if (*l == NULL)
8362 as_fatal (_("no compiled in support for x86_64"));
8363 free (list);
8364 }
8365 break;
8366 #endif
8367
8368 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8369 case OPTION_X32:
8370 if (IS_ELF)
8371 {
8372 const char **list, **l;
8373
8374 list = bfd_target_list ();
8375 for (l = list; *l != NULL; l++)
8376 if (CONST_STRNEQ (*l, "elf32-x86-64"))
8377 {
8378 default_arch = "x86_64:32";
8379 break;
8380 }
8381 if (*l == NULL)
8382 as_fatal (_("no compiled in support for 32bit x86_64"));
8383 free (list);
8384 }
8385 else
8386 as_fatal (_("32bit x86_64 is only supported for ELF"));
8387 break;
8388 #endif
8389
8390 case OPTION_32:
8391 default_arch = "i386";
8392 break;
8393
8394 case OPTION_DIVIDE:
8395 #ifdef SVR4_COMMENT_CHARS
8396 {
8397 char *n, *t;
8398 const char *s;
8399
8400 n = (char *) xmalloc (strlen (i386_comment_chars) + 1);
8401 t = n;
8402 for (s = i386_comment_chars; *s != '\0'; s++)
8403 if (*s != '/')
8404 *t++ = *s;
8405 *t = '\0';
8406 i386_comment_chars = n;
8407 }
8408 #endif
8409 break;
8410
8411 case OPTION_MARCH:
8412 arch = xstrdup (arg);
8413 do
8414 {
8415 if (*arch == '.')
8416 as_fatal (_("invalid -march= option: `%s'"), arg);
8417 next = strchr (arch, '+');
8418 if (next)
8419 *next++ = '\0';
8420 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8421 {
8422 if (strcmp (arch, cpu_arch [j].name) == 0)
8423 {
8424 /* Processor. */
8425 if (! cpu_arch[j].flags.bitfield.cpui386)
8426 continue;
8427
8428 cpu_arch_name = cpu_arch[j].name;
8429 cpu_sub_arch_name = NULL;
8430 cpu_arch_flags = cpu_arch[j].flags;
8431 cpu_arch_isa = cpu_arch[j].type;
8432 cpu_arch_isa_flags = cpu_arch[j].flags;
8433 if (!cpu_arch_tune_set)
8434 {
8435 cpu_arch_tune = cpu_arch_isa;
8436 cpu_arch_tune_flags = cpu_arch_isa_flags;
8437 }
8438 break;
8439 }
8440 else if (*cpu_arch [j].name == '.'
8441 && strcmp (arch, cpu_arch [j].name + 1) == 0)
8442 {
8443 /* ISA entension. */
8444 i386_cpu_flags flags;
8445
8446 if (!cpu_arch[j].negated)
8447 flags = cpu_flags_or (cpu_arch_flags,
8448 cpu_arch[j].flags);
8449 else
8450 flags = cpu_flags_and_not (cpu_arch_flags,
8451 cpu_arch[j].flags);
8452 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
8453 {
8454 if (cpu_sub_arch_name)
8455 {
8456 char *name = cpu_sub_arch_name;
8457 cpu_sub_arch_name = concat (name,
8458 cpu_arch[j].name,
8459 (const char *) NULL);
8460 free (name);
8461 }
8462 else
8463 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
8464 cpu_arch_flags = flags;
8465 cpu_arch_isa_flags = flags;
8466 }
8467 break;
8468 }
8469 }
8470
8471 if (j >= ARRAY_SIZE (cpu_arch))
8472 as_fatal (_("invalid -march= option: `%s'"), arg);
8473
8474 arch = next;
8475 }
8476 while (next != NULL );
8477 break;
8478
8479 case OPTION_MTUNE:
8480 if (*arg == '.')
8481 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8482 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8483 {
8484 if (strcmp (arg, cpu_arch [j].name) == 0)
8485 {
8486 cpu_arch_tune_set = 1;
8487 cpu_arch_tune = cpu_arch [j].type;
8488 cpu_arch_tune_flags = cpu_arch[j].flags;
8489 break;
8490 }
8491 }
8492 if (j >= ARRAY_SIZE (cpu_arch))
8493 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8494 break;
8495
8496 case OPTION_MMNEMONIC:
8497 if (strcasecmp (arg, "att") == 0)
8498 intel_mnemonic = 0;
8499 else if (strcasecmp (arg, "intel") == 0)
8500 intel_mnemonic = 1;
8501 else
8502 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg);
8503 break;
8504
8505 case OPTION_MSYNTAX:
8506 if (strcasecmp (arg, "att") == 0)
8507 intel_syntax = 0;
8508 else if (strcasecmp (arg, "intel") == 0)
8509 intel_syntax = 1;
8510 else
8511 as_fatal (_("invalid -msyntax= option: `%s'"), arg);
8512 break;
8513
8514 case OPTION_MINDEX_REG:
8515 allow_index_reg = 1;
8516 break;
8517
8518 case OPTION_MNAKED_REG:
8519 allow_naked_reg = 1;
8520 break;
8521
8522 case OPTION_MOLD_GCC:
8523 old_gcc = 1;
8524 break;
8525
8526 case OPTION_MSSE2AVX:
8527 sse2avx = 1;
8528 break;
8529
8530 case OPTION_MSSE_CHECK:
8531 if (strcasecmp (arg, "error") == 0)
8532 sse_check = sse_check_error;
8533 else if (strcasecmp (arg, "warning") == 0)
8534 sse_check = sse_check_warning;
8535 else if (strcasecmp (arg, "none") == 0)
8536 sse_check = sse_check_none;
8537 else
8538 as_fatal (_("invalid -msse-check= option: `%s'"), arg);
8539 break;
8540
8541 case OPTION_MAVXSCALAR:
8542 if (strcasecmp (arg, "128") == 0)
8543 avxscalar = vex128;
8544 else if (strcasecmp (arg, "256") == 0)
8545 avxscalar = vex256;
8546 else
8547 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg);
8548 break;
8549
8550 default:
8551 return 0;
8552 }
8553 return 1;
8554 }
8555
8556 #define MESSAGE_TEMPLATE \
8557 " "
8558
8559 static void
8560 show_arch (FILE *stream, int ext, int check)
8561 {
8562 static char message[] = MESSAGE_TEMPLATE;
8563 char *start = message + 27;
8564 char *p;
8565 int size = sizeof (MESSAGE_TEMPLATE);
8566 int left;
8567 const char *name;
8568 int len;
8569 unsigned int j;
8570
8571 p = start;
8572 left = size - (start - message);
8573 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8574 {
8575 /* Should it be skipped? */
8576 if (cpu_arch [j].skip)
8577 continue;
8578
8579 name = cpu_arch [j].name;
8580 len = cpu_arch [j].len;
8581 if (*name == '.')
8582 {
8583 /* It is an extension. Skip if we aren't asked to show it. */
8584 if (ext)
8585 {
8586 name++;
8587 len--;
8588 }
8589 else
8590 continue;
8591 }
8592 else if (ext)
8593 {
8594 /* It is an processor. Skip if we show only extension. */
8595 continue;
8596 }
8597 else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
8598 {
8599 /* It is an impossible processor - skip. */
8600 continue;
8601 }
8602
8603 /* Reserve 2 spaces for ", " or ",\0" */
8604 left -= len + 2;
8605
8606 /* Check if there is any room. */
8607 if (left >= 0)
8608 {
8609 if (p != start)
8610 {
8611 *p++ = ',';
8612 *p++ = ' ';
8613 }
8614 p = mempcpy (p, name, len);
8615 }
8616 else
8617 {
8618 /* Output the current message now and start a new one. */
8619 *p++ = ',';
8620 *p = '\0';
8621 fprintf (stream, "%s\n", message);
8622 p = start;
8623 left = size - (start - message) - len - 2;
8624
8625 gas_assert (left >= 0);
8626
8627 p = mempcpy (p, name, len);
8628 }
8629 }
8630
8631 *p = '\0';
8632 fprintf (stream, "%s\n", message);
8633 }
8634
8635 void
8636 md_show_usage (FILE *stream)
8637 {
8638 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8639 fprintf (stream, _("\
8640 -Q ignored\n\
8641 -V print assembler version number\n\
8642 -k ignored\n"));
8643 #endif
8644 fprintf (stream, _("\
8645 -n Do not optimize code alignment\n\
8646 -q quieten some warnings\n"));
8647 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8648 fprintf (stream, _("\
8649 -s ignored\n"));
8650 #endif
8651 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8652 || defined (TE_PE) || defined (TE_PEP))
8653 fprintf (stream, _("\
8654 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
8655 #endif
8656 #ifdef SVR4_COMMENT_CHARS
8657 fprintf (stream, _("\
8658 --divide do not treat `/' as a comment character\n"));
8659 #else
8660 fprintf (stream, _("\
8661 --divide ignored\n"));
8662 #endif
8663 fprintf (stream, _("\
8664 -march=CPU[,+EXTENSION...]\n\
8665 generate code for CPU and EXTENSION, CPU is one of:\n"));
8666 show_arch (stream, 0, 1);
8667 fprintf (stream, _("\
8668 EXTENSION is combination of:\n"));
8669 show_arch (stream, 1, 0);
8670 fprintf (stream, _("\
8671 -mtune=CPU optimize for CPU, CPU is one of:\n"));
8672 show_arch (stream, 0, 0);
8673 fprintf (stream, _("\
8674 -msse2avx encode SSE instructions with VEX prefix\n"));
8675 fprintf (stream, _("\
8676 -msse-check=[none|error|warning]\n\
8677 check SSE instructions\n"));
8678 fprintf (stream, _("\
8679 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
8680 length\n"));
8681 fprintf (stream, _("\
8682 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
8683 fprintf (stream, _("\
8684 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
8685 fprintf (stream, _("\
8686 -mindex-reg support pseudo index registers\n"));
8687 fprintf (stream, _("\
8688 -mnaked-reg don't require `%%' prefix for registers\n"));
8689 fprintf (stream, _("\
8690 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
8691 }
8692
8693 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
8694 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8695 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8696
8697 /* Pick the target format to use. */
8698
8699 const char *
8700 i386_target_format (void)
8701 {
8702 if (!strncmp (default_arch, "x86_64", 6))
8703 {
8704 update_code_flag (CODE_64BIT, 1);
8705 if (default_arch[6] == '\0')
8706 x86_elf_abi = X86_64_ABI;
8707 else
8708 x86_elf_abi = X86_64_X32_ABI;
8709 }
8710 else if (!strcmp (default_arch, "i386"))
8711 update_code_flag (CODE_32BIT, 1);
8712 else
8713 as_fatal (_("unknown architecture"));
8714
8715 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
8716 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
8717 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
8718 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
8719
8720 switch (OUTPUT_FLAVOR)
8721 {
8722 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
8723 case bfd_target_aout_flavour:
8724 return AOUT_TARGET_FORMAT;
8725 #endif
8726 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
8727 # if defined (TE_PE) || defined (TE_PEP)
8728 case bfd_target_coff_flavour:
8729 return flag_code == CODE_64BIT ? "pe-x86-64" : "pe-i386";
8730 # elif defined (TE_GO32)
8731 case bfd_target_coff_flavour:
8732 return "coff-go32";
8733 # else
8734 case bfd_target_coff_flavour:
8735 return "coff-i386";
8736 # endif
8737 #endif
8738 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8739 case bfd_target_elf_flavour:
8740 {
8741 const char *format;
8742
8743 switch (x86_elf_abi)
8744 {
8745 default:
8746 format = ELF_TARGET_FORMAT;
8747 break;
8748 case X86_64_ABI:
8749 use_rela_relocations = 1;
8750 object_64bit = 1;
8751 format = ELF_TARGET_FORMAT64;
8752 break;
8753 case X86_64_X32_ABI:
8754 use_rela_relocations = 1;
8755 object_64bit = 1;
8756 disallow_64bit_reloc = 1;
8757 format = ELF_TARGET_FORMAT32;
8758 break;
8759 }
8760 if (cpu_arch_isa == PROCESSOR_L1OM)
8761 {
8762 if (x86_elf_abi != X86_64_ABI)
8763 as_fatal (_("Intel L1OM is 64bit only"));
8764 return ELF_TARGET_L1OM_FORMAT;
8765 }
8766 if (cpu_arch_isa == PROCESSOR_K1OM)
8767 {
8768 if (x86_elf_abi != X86_64_ABI)
8769 as_fatal (_("Intel K1OM is 64bit only"));
8770 return ELF_TARGET_K1OM_FORMAT;
8771 }
8772 else
8773 return format;
8774 }
8775 #endif
8776 #if defined (OBJ_MACH_O)
8777 case bfd_target_mach_o_flavour:
8778 return flag_code == CODE_64BIT ? "mach-o-x86-64" : "mach-o-i386";
8779 #endif
8780 default:
8781 abort ();
8782 return NULL;
8783 }
8784 }
8785
8786 #endif /* OBJ_MAYBE_ more than one */
8787
8788 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
8789 void
8790 i386_elf_emit_arch_note (void)
8791 {
8792 if (IS_ELF && cpu_arch_name != NULL)
8793 {
8794 char *p;
8795 asection *seg = now_seg;
8796 subsegT subseg = now_subseg;
8797 Elf_Internal_Note i_note;
8798 Elf_External_Note e_note;
8799 asection *note_secp;
8800 int len;
8801
8802 /* Create the .note section. */
8803 note_secp = subseg_new (".note", 0);
8804 bfd_set_section_flags (stdoutput,
8805 note_secp,
8806 SEC_HAS_CONTENTS | SEC_READONLY);
8807
8808 /* Process the arch string. */
8809 len = strlen (cpu_arch_name);
8810
8811 i_note.namesz = len + 1;
8812 i_note.descsz = 0;
8813 i_note.type = NT_ARCH;
8814 p = frag_more (sizeof (e_note.namesz));
8815 md_number_to_chars (p, (valueT) i_note.namesz, sizeof (e_note.namesz));
8816 p = frag_more (sizeof (e_note.descsz));
8817 md_number_to_chars (p, (valueT) i_note.descsz, sizeof (e_note.descsz));
8818 p = frag_more (sizeof (e_note.type));
8819 md_number_to_chars (p, (valueT) i_note.type, sizeof (e_note.type));
8820 p = frag_more (len + 1);
8821 strcpy (p, cpu_arch_name);
8822
8823 frag_align (2, 0, 0);
8824
8825 subseg_set (seg, subseg);
8826 }
8827 }
8828 #endif
8829 \f
8830 symbolS *
8831 md_undefined_symbol (char *name)
8832 {
8833 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
8834 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
8835 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
8836 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
8837 {
8838 if (!GOT_symbol)
8839 {
8840 if (symbol_find (name))
8841 as_bad (_("GOT already in symbol table"));
8842 GOT_symbol = symbol_new (name, undefined_section,
8843 (valueT) 0, &zero_address_frag);
8844 };
8845 return GOT_symbol;
8846 }
8847 return 0;
8848 }
8849
8850 /* Round up a section size to the appropriate boundary. */
8851
8852 valueT
8853 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8854 {
8855 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8856 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
8857 {
8858 /* For a.out, force the section size to be aligned. If we don't do
8859 this, BFD will align it for us, but it will not write out the
8860 final bytes of the section. This may be a bug in BFD, but it is
8861 easier to fix it here since that is how the other a.out targets
8862 work. */
8863 int align;
8864
8865 align = bfd_get_section_alignment (stdoutput, segment);
8866 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
8867 }
8868 #endif
8869
8870 return size;
8871 }
8872
8873 /* On the i386, PC-relative offsets are relative to the start of the
8874 next instruction. That is, the address of the offset, plus its
8875 size, since the offset is always the last part of the insn. */
8876
8877 long
8878 md_pcrel_from (fixS *fixP)
8879 {
8880 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
8881 }
8882
8883 #ifndef I386COFF
8884
8885 static void
8886 s_bss (int ignore ATTRIBUTE_UNUSED)
8887 {
8888 int temp;
8889
8890 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8891 if (IS_ELF)
8892 obj_elf_section_change_hook ();
8893 #endif
8894 temp = get_absolute_expression ();
8895 subseg_set (bss_section, (subsegT) temp);
8896 demand_empty_rest_of_line ();
8897 }
8898
8899 #endif
8900
8901 void
8902 i386_validate_fix (fixS *fixp)
8903 {
8904 if (fixp->fx_subsy && fixp->fx_subsy == GOT_symbol)
8905 {
8906 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
8907 {
8908 if (!object_64bit)
8909 abort ();
8910 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
8911 }
8912 else
8913 {
8914 if (!object_64bit)
8915 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
8916 else
8917 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
8918 }
8919 fixp->fx_subsy = 0;
8920 }
8921 }
8922
8923 arelent *
8924 tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
8925 {
8926 arelent *rel;
8927 bfd_reloc_code_real_type code;
8928
8929 switch (fixp->fx_r_type)
8930 {
8931 case BFD_RELOC_X86_64_PLT32:
8932 case BFD_RELOC_X86_64_GOT32:
8933 case BFD_RELOC_X86_64_GOTPCREL:
8934 case BFD_RELOC_386_PLT32:
8935 case BFD_RELOC_386_GOT32:
8936 case BFD_RELOC_386_GOTOFF:
8937 case BFD_RELOC_386_GOTPC:
8938 case BFD_RELOC_386_TLS_GD:
8939 case BFD_RELOC_386_TLS_LDM:
8940 case BFD_RELOC_386_TLS_LDO_32:
8941 case BFD_RELOC_386_TLS_IE_32:
8942 case BFD_RELOC_386_TLS_IE:
8943 case BFD_RELOC_386_TLS_GOTIE:
8944 case BFD_RELOC_386_TLS_LE_32:
8945 case BFD_RELOC_386_TLS_LE:
8946 case BFD_RELOC_386_TLS_GOTDESC:
8947 case BFD_RELOC_386_TLS_DESC_CALL:
8948 case BFD_RELOC_X86_64_TLSGD:
8949 case BFD_RELOC_X86_64_TLSLD:
8950 case BFD_RELOC_X86_64_DTPOFF32:
8951 case BFD_RELOC_X86_64_DTPOFF64:
8952 case BFD_RELOC_X86_64_GOTTPOFF:
8953 case BFD_RELOC_X86_64_TPOFF32:
8954 case BFD_RELOC_X86_64_TPOFF64:
8955 case BFD_RELOC_X86_64_GOTOFF64:
8956 case BFD_RELOC_X86_64_GOTPC32:
8957 case BFD_RELOC_X86_64_GOT64:
8958 case BFD_RELOC_X86_64_GOTPCREL64:
8959 case BFD_RELOC_X86_64_GOTPC64:
8960 case BFD_RELOC_X86_64_GOTPLT64:
8961 case BFD_RELOC_X86_64_PLTOFF64:
8962 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8963 case BFD_RELOC_X86_64_TLSDESC_CALL:
8964 case BFD_RELOC_RVA:
8965 case BFD_RELOC_VTABLE_ENTRY:
8966 case BFD_RELOC_VTABLE_INHERIT:
8967 #ifdef TE_PE
8968 case BFD_RELOC_32_SECREL:
8969 #endif
8970 code = fixp->fx_r_type;
8971 break;
8972 case BFD_RELOC_X86_64_32S:
8973 if (!fixp->fx_pcrel)
8974 {
8975 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
8976 code = fixp->fx_r_type;
8977 break;
8978 }
8979 default:
8980 if (fixp->fx_pcrel)
8981 {
8982 switch (fixp->fx_size)
8983 {
8984 default:
8985 as_bad_where (fixp->fx_file, fixp->fx_line,
8986 _("can not do %d byte pc-relative relocation"),
8987 fixp->fx_size);
8988 code = BFD_RELOC_32_PCREL;
8989 break;
8990 case 1: code = BFD_RELOC_8_PCREL; break;
8991 case 2: code = BFD_RELOC_16_PCREL; break;
8992 case 4: code = BFD_RELOC_32_PCREL; break;
8993 #ifdef BFD64
8994 case 8: code = BFD_RELOC_64_PCREL; break;
8995 #endif
8996 }
8997 }
8998 else
8999 {
9000 switch (fixp->fx_size)
9001 {
9002 default:
9003 as_bad_where (fixp->fx_file, fixp->fx_line,
9004 _("can not do %d byte relocation"),
9005 fixp->fx_size);
9006 code = BFD_RELOC_32;
9007 break;
9008 case 1: code = BFD_RELOC_8; break;
9009 case 2: code = BFD_RELOC_16; break;
9010 case 4: code = BFD_RELOC_32; break;
9011 #ifdef BFD64
9012 case 8: code = BFD_RELOC_64; break;
9013 #endif
9014 }
9015 }
9016 break;
9017 }
9018
9019 if ((code == BFD_RELOC_32
9020 || code == BFD_RELOC_32_PCREL
9021 || code == BFD_RELOC_X86_64_32S)
9022 && GOT_symbol
9023 && fixp->fx_addsy == GOT_symbol)
9024 {
9025 if (!object_64bit)
9026 code = BFD_RELOC_386_GOTPC;
9027 else
9028 code = BFD_RELOC_X86_64_GOTPC32;
9029 }
9030 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
9031 && GOT_symbol
9032 && fixp->fx_addsy == GOT_symbol)
9033 {
9034 code = BFD_RELOC_X86_64_GOTPC64;
9035 }
9036
9037 rel = (arelent *) xmalloc (sizeof (arelent));
9038 rel->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
9039 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9040
9041 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
9042
9043 if (!use_rela_relocations)
9044 {
9045 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
9046 vtable entry to be used in the relocation's section offset. */
9047 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
9048 rel->address = fixp->fx_offset;
9049 #if defined (OBJ_COFF) && defined (TE_PE)
9050 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
9051 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
9052 else
9053 #endif
9054 rel->addend = 0;
9055 }
9056 /* Use the rela in 64bit mode. */
9057 else
9058 {
9059 if (disallow_64bit_reloc)
9060 switch (code)
9061 {
9062 case BFD_RELOC_64:
9063 case BFD_RELOC_X86_64_DTPOFF64:
9064 case BFD_RELOC_X86_64_TPOFF64:
9065 case BFD_RELOC_64_PCREL:
9066 case BFD_RELOC_X86_64_GOTOFF64:
9067 case BFD_RELOC_X86_64_GOT64:
9068 case BFD_RELOC_X86_64_GOTPCREL64:
9069 case BFD_RELOC_X86_64_GOTPC64:
9070 case BFD_RELOC_X86_64_GOTPLT64:
9071 case BFD_RELOC_X86_64_PLTOFF64:
9072 as_bad_where (fixp->fx_file, fixp->fx_line,
9073 _("cannot represent relocation type %s in x32 mode"),
9074 bfd_get_reloc_code_name (code));
9075 break;
9076 default:
9077 break;
9078 }
9079
9080 if (!fixp->fx_pcrel)
9081 rel->addend = fixp->fx_offset;
9082 else
9083 switch (code)
9084 {
9085 case BFD_RELOC_X86_64_PLT32:
9086 case BFD_RELOC_X86_64_GOT32:
9087 case BFD_RELOC_X86_64_GOTPCREL:
9088 case BFD_RELOC_X86_64_TLSGD:
9089 case BFD_RELOC_X86_64_TLSLD:
9090 case BFD_RELOC_X86_64_GOTTPOFF:
9091 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9092 case BFD_RELOC_X86_64_TLSDESC_CALL:
9093 rel->addend = fixp->fx_offset - fixp->fx_size;
9094 break;
9095 default:
9096 rel->addend = (section->vma
9097 - fixp->fx_size
9098 + fixp->fx_addnumber
9099 + md_pcrel_from (fixp));
9100 break;
9101 }
9102 }
9103
9104 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
9105 if (rel->howto == NULL)
9106 {
9107 as_bad_where (fixp->fx_file, fixp->fx_line,
9108 _("cannot represent relocation type %s"),
9109 bfd_get_reloc_code_name (code));
9110 /* Set howto to a garbage value so that we can keep going. */
9111 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
9112 gas_assert (rel->howto != NULL);
9113 }
9114
9115 return rel;
9116 }
9117
9118 #include "tc-i386-intel.c"
9119
9120 void
9121 tc_x86_parse_to_dw2regnum (expressionS *exp)
9122 {
9123 int saved_naked_reg;
9124 char saved_register_dot;
9125
9126 saved_naked_reg = allow_naked_reg;
9127 allow_naked_reg = 1;
9128 saved_register_dot = register_chars['.'];
9129 register_chars['.'] = '.';
9130 allow_pseudo_reg = 1;
9131 expression_and_evaluate (exp);
9132 allow_pseudo_reg = 0;
9133 register_chars['.'] = saved_register_dot;
9134 allow_naked_reg = saved_naked_reg;
9135
9136 if (exp->X_op == O_register && exp->X_add_number >= 0)
9137 {
9138 if ((addressT) exp->X_add_number < i386_regtab_size)
9139 {
9140 exp->X_op = O_constant;
9141 exp->X_add_number = i386_regtab[exp->X_add_number]
9142 .dw2_regnum[flag_code >> 1];
9143 }
9144 else
9145 exp->X_op = O_illegal;
9146 }
9147 }
9148
9149 void
9150 tc_x86_frame_initial_instructions (void)
9151 {
9152 static unsigned int sp_regno[2];
9153
9154 if (!sp_regno[flag_code >> 1])
9155 {
9156 char *saved_input = input_line_pointer;
9157 char sp[][4] = {"esp", "rsp"};
9158 expressionS exp;
9159
9160 input_line_pointer = sp[flag_code >> 1];
9161 tc_x86_parse_to_dw2regnum (&exp);
9162 gas_assert (exp.X_op == O_constant);
9163 sp_regno[flag_code >> 1] = exp.X_add_number;
9164 input_line_pointer = saved_input;
9165 }
9166
9167 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
9168 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
9169 }
9170
9171 int
9172 i386_elf_section_type (const char *str, size_t len)
9173 {
9174 if (flag_code == CODE_64BIT
9175 && len == sizeof ("unwind") - 1
9176 && strncmp (str, "unwind", 6) == 0)
9177 return SHT_X86_64_UNWIND;
9178
9179 return -1;
9180 }
9181
9182 #ifdef TE_SOLARIS
9183 void
9184 i386_solaris_fix_up_eh_frame (segT sec)
9185 {
9186 if (flag_code == CODE_64BIT)
9187 elf_section_type (sec) = SHT_X86_64_UNWIND;
9188 }
9189 #endif
9190
9191 #ifdef TE_PE
9192 void
9193 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
9194 {
9195 expressionS exp;
9196
9197 exp.X_op = O_secrel;
9198 exp.X_add_symbol = symbol;
9199 exp.X_add_number = 0;
9200 emit_expr (&exp, size);
9201 }
9202 #endif
9203
9204 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9205 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
9206
9207 bfd_vma
9208 x86_64_section_letter (int letter, char **ptr_msg)
9209 {
9210 if (flag_code == CODE_64BIT)
9211 {
9212 if (letter == 'l')
9213 return SHF_X86_64_LARGE;
9214
9215 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
9216 }
9217 else
9218 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
9219 return -1;
9220 }
9221
9222 bfd_vma
9223 x86_64_section_word (char *str, size_t len)
9224 {
9225 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
9226 return SHF_X86_64_LARGE;
9227
9228 return -1;
9229 }
9230
9231 static void
9232 handle_large_common (int small ATTRIBUTE_UNUSED)
9233 {
9234 if (flag_code != CODE_64BIT)
9235 {
9236 s_comm_internal (0, elf_common_parse);
9237 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
9238 }
9239 else
9240 {
9241 static segT lbss_section;
9242 asection *saved_com_section_ptr = elf_com_section_ptr;
9243 asection *saved_bss_section = bss_section;
9244
9245 if (lbss_section == NULL)
9246 {
9247 flagword applicable;
9248 segT seg = now_seg;
9249 subsegT subseg = now_subseg;
9250
9251 /* The .lbss section is for local .largecomm symbols. */
9252 lbss_section = subseg_new (".lbss", 0);
9253 applicable = bfd_applicable_section_flags (stdoutput);
9254 bfd_set_section_flags (stdoutput, lbss_section,
9255 applicable & SEC_ALLOC);
9256 seg_info (lbss_section)->bss = 1;
9257
9258 subseg_set (seg, subseg);
9259 }
9260
9261 elf_com_section_ptr = &_bfd_elf_large_com_section;
9262 bss_section = lbss_section;
9263
9264 s_comm_internal (0, elf_common_parse);
9265
9266 elf_com_section_ptr = saved_com_section_ptr;
9267 bss_section = saved_bss_section;
9268 }
9269 }
9270
9271 static void
9272 handle_quad (int nbytes)
9273 {
9274 expressionS exp;
9275
9276 if (x86_elf_abi != X86_64_X32_ABI)
9277 {
9278 cons (nbytes);
9279 return;
9280 }
9281
9282 if (is_it_end_of_statement ())
9283 {
9284 demand_empty_rest_of_line ();
9285 return;
9286 }
9287
9288 do
9289 {
9290 if (*input_line_pointer == '"')
9291 {
9292 as_bad (_("unexpected `\"' in expression"));
9293 ignore_rest_of_line ();
9294 return;
9295 }
9296 x86_cons (&exp, nbytes);
9297 /* Output 4 bytes if not constant. */
9298 if (exp.X_op != O_constant)
9299 nbytes = 4;
9300 emit_expr (&exp, (unsigned int) nbytes);
9301 /* Zero-extends to 8 bytes if not constant. */
9302 if (nbytes == 4)
9303 {
9304 memset (&exp, '\0', sizeof (exp));
9305 exp.X_op = O_constant;
9306 emit_expr (&exp, nbytes);
9307 }
9308 nbytes = 8;
9309 }
9310 while (*input_line_pointer++ == ',');
9311
9312 input_line_pointer--; /* Put terminator back into stream. */
9313
9314 demand_empty_rest_of_line ();
9315 }
9316 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */
This page took 0.223843 seconds and 4 git commands to generate.