gas/
[deliverable/binutils-gdb.git] / gas / config / tc-i386.c
1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
4 2012
5 Free Software Foundation, Inc.
6
7 This file is part of GAS, the GNU Assembler.
8
9 GAS is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
13
14 GAS is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GAS; see the file COPYING. If not, write to the Free
21 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
22 02110-1301, USA. */
23
24 /* Intel 80386 machine specific gas.
25 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
26 x86_64 support by Jan Hubicka (jh@suse.cz)
27 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
28 Bugs & suggestions are completely welcome. This is free software.
29 Please help us make it better. */
30
31 #include "as.h"
32 #include "safe-ctype.h"
33 #include "subsegs.h"
34 #include "dwarf2dbg.h"
35 #include "dw2gencfi.h"
36 #include "elf/x86-64.h"
37 #include "opcodes/i386-init.h"
38
39 #ifndef REGISTER_WARNINGS
40 #define REGISTER_WARNINGS 1
41 #endif
42
43 #ifndef INFER_ADDR_PREFIX
44 #define INFER_ADDR_PREFIX 1
45 #endif
46
47 #ifndef DEFAULT_ARCH
48 #define DEFAULT_ARCH "i386"
49 #endif
50
51 #ifndef INLINE
52 #if __GNUC__ >= 2
53 #define INLINE __inline__
54 #else
55 #define INLINE
56 #endif
57 #endif
58
59 /* Prefixes will be emitted in the order defined below.
60 WAIT_PREFIX must be the first prefix since FWAIT is really is an
61 instruction, and so must come before any prefixes.
62 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
63 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
64 #define WAIT_PREFIX 0
65 #define SEG_PREFIX 1
66 #define ADDR_PREFIX 2
67 #define DATA_PREFIX 3
68 #define REP_PREFIX 4
69 #define HLE_PREFIX REP_PREFIX
70 #define LOCK_PREFIX 5
71 #define REX_PREFIX 6 /* must come last. */
72 #define MAX_PREFIXES 7 /* max prefixes per opcode */
73
74 /* we define the syntax here (modulo base,index,scale syntax) */
75 #define REGISTER_PREFIX '%'
76 #define IMMEDIATE_PREFIX '$'
77 #define ABSOLUTE_PREFIX '*'
78
79 /* these are the instruction mnemonic suffixes in AT&T syntax or
80 memory operand size in Intel syntax. */
81 #define WORD_MNEM_SUFFIX 'w'
82 #define BYTE_MNEM_SUFFIX 'b'
83 #define SHORT_MNEM_SUFFIX 's'
84 #define LONG_MNEM_SUFFIX 'l'
85 #define QWORD_MNEM_SUFFIX 'q'
86 #define XMMWORD_MNEM_SUFFIX 'x'
87 #define YMMWORD_MNEM_SUFFIX 'y'
88 /* Intel Syntax. Use a non-ascii letter since since it never appears
89 in instructions. */
90 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
91
92 #define END_OF_INSN '\0'
93
94 /*
95 'templates' is for grouping together 'template' structures for opcodes
96 of the same name. This is only used for storing the insns in the grand
97 ole hash table of insns.
98 The templates themselves start at START and range up to (but not including)
99 END.
100 */
101 typedef struct
102 {
103 const insn_template *start;
104 const insn_template *end;
105 }
106 templates;
107
108 /* 386 operand encoding bytes: see 386 book for details of this. */
109 typedef struct
110 {
111 unsigned int regmem; /* codes register or memory operand */
112 unsigned int reg; /* codes register operand (or extended opcode) */
113 unsigned int mode; /* how to interpret regmem & reg */
114 }
115 modrm_byte;
116
117 /* x86-64 extension prefix. */
118 typedef int rex_byte;
119
120 /* 386 opcode byte to code indirect addressing. */
121 typedef struct
122 {
123 unsigned base;
124 unsigned index;
125 unsigned scale;
126 }
127 sib_byte;
128
129 /* x86 arch names, types and features */
130 typedef struct
131 {
132 const char *name; /* arch name */
133 unsigned int len; /* arch string length */
134 enum processor_type type; /* arch type */
135 i386_cpu_flags flags; /* cpu feature flags */
136 unsigned int skip; /* show_arch should skip this. */
137 unsigned int negated; /* turn off indicated flags. */
138 }
139 arch_entry;
140
141 static void update_code_flag (int, int);
142 static void set_code_flag (int);
143 static void set_16bit_gcc_code_flag (int);
144 static void set_intel_syntax (int);
145 static void set_intel_mnemonic (int);
146 static void set_allow_index_reg (int);
147 static void set_check (int);
148 static void set_cpu_arch (int);
149 #ifdef TE_PE
150 static void pe_directive_secrel (int);
151 #endif
152 static void signed_cons (int);
153 static char *output_invalid (int c);
154 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
155 const char *);
156 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
157 const char *);
158 static int i386_att_operand (char *);
159 static int i386_intel_operand (char *, int);
160 static int i386_intel_simplify (expressionS *);
161 static int i386_intel_parse_name (const char *, expressionS *);
162 static const reg_entry *parse_register (char *, char **);
163 static char *parse_insn (char *, char *);
164 static char *parse_operands (char *, const char *);
165 static void swap_operands (void);
166 static void swap_2_operands (int, int);
167 static void optimize_imm (void);
168 static void optimize_disp (void);
169 static const insn_template *match_template (void);
170 static int check_string (void);
171 static int process_suffix (void);
172 static int check_byte_reg (void);
173 static int check_long_reg (void);
174 static int check_qword_reg (void);
175 static int check_word_reg (void);
176 static int finalize_imm (void);
177 static int process_operands (void);
178 static const seg_entry *build_modrm_byte (void);
179 static void output_insn (void);
180 static void output_imm (fragS *, offsetT);
181 static void output_disp (fragS *, offsetT);
182 #ifndef I386COFF
183 static void s_bss (int);
184 #endif
185 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
186 static void handle_large_common (int small ATTRIBUTE_UNUSED);
187 #endif
188
189 static const char *default_arch = DEFAULT_ARCH;
190
191 /* VEX prefix. */
192 typedef struct
193 {
194 /* VEX prefix is either 2 byte or 3 byte. */
195 unsigned char bytes[3];
196 unsigned int length;
197 /* Destination or source register specifier. */
198 const reg_entry *register_specifier;
199 } vex_prefix;
200
201 /* 'md_assemble ()' gathers together information and puts it into a
202 i386_insn. */
203
204 union i386_op
205 {
206 expressionS *disps;
207 expressionS *imms;
208 const reg_entry *regs;
209 };
210
211 enum i386_error
212 {
213 operand_size_mismatch,
214 operand_type_mismatch,
215 register_type_mismatch,
216 number_of_operands_mismatch,
217 invalid_instruction_suffix,
218 bad_imm4,
219 old_gcc_only,
220 unsupported_with_intel_mnemonic,
221 unsupported_syntax,
222 unsupported,
223 invalid_vsib_address,
224 invalid_vector_register_set,
225 unsupported_vector_index_register
226 };
227
228 struct _i386_insn
229 {
230 /* TM holds the template for the insn were currently assembling. */
231 insn_template tm;
232
233 /* SUFFIX holds the instruction size suffix for byte, word, dword
234 or qword, if given. */
235 char suffix;
236
237 /* OPERANDS gives the number of given operands. */
238 unsigned int operands;
239
240 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
241 of given register, displacement, memory operands and immediate
242 operands. */
243 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
244
245 /* TYPES [i] is the type (see above #defines) which tells us how to
246 use OP[i] for the corresponding operand. */
247 i386_operand_type types[MAX_OPERANDS];
248
249 /* Displacement expression, immediate expression, or register for each
250 operand. */
251 union i386_op op[MAX_OPERANDS];
252
253 /* Flags for operands. */
254 unsigned int flags[MAX_OPERANDS];
255 #define Operand_PCrel 1
256
257 /* Relocation type for operand */
258 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
259
260 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
261 the base index byte below. */
262 const reg_entry *base_reg;
263 const reg_entry *index_reg;
264 unsigned int log2_scale_factor;
265
266 /* SEG gives the seg_entries of this insn. They are zero unless
267 explicit segment overrides are given. */
268 const seg_entry *seg[2];
269
270 /* PREFIX holds all the given prefix opcodes (usually null).
271 PREFIXES is the number of prefix opcodes. */
272 unsigned int prefixes;
273 unsigned char prefix[MAX_PREFIXES];
274
275 /* RM and SIB are the modrm byte and the sib byte where the
276 addressing modes of this insn are encoded. */
277 modrm_byte rm;
278 rex_byte rex;
279 sib_byte sib;
280 vex_prefix vex;
281
282 /* Swap operand in encoding. */
283 unsigned int swap_operand;
284
285 /* Prefer 8bit or 32bit displacement in encoding. */
286 enum
287 {
288 disp_encoding_default = 0,
289 disp_encoding_8bit,
290 disp_encoding_32bit
291 } disp_encoding;
292
293 /* Have HLE prefix. */
294 unsigned int have_hle;
295
296 /* Error message. */
297 enum i386_error error;
298 };
299
300 typedef struct _i386_insn i386_insn;
301
302 /* List of chars besides those in app.c:symbol_chars that can start an
303 operand. Used to prevent the scrubber eating vital white-space. */
304 const char extra_symbol_chars[] = "*%-(["
305 #ifdef LEX_AT
306 "@"
307 #endif
308 #ifdef LEX_QM
309 "?"
310 #endif
311 ;
312
313 #if (defined (TE_I386AIX) \
314 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
315 && !defined (TE_GNU) \
316 && !defined (TE_LINUX) \
317 && !defined (TE_NACL) \
318 && !defined (TE_NETWARE) \
319 && !defined (TE_FreeBSD) \
320 && !defined (TE_DragonFly) \
321 && !defined (TE_NetBSD)))
322 /* This array holds the chars that always start a comment. If the
323 pre-processor is disabled, these aren't very useful. The option
324 --divide will remove '/' from this list. */
325 const char *i386_comment_chars = "#/";
326 #define SVR4_COMMENT_CHARS 1
327 #define PREFIX_SEPARATOR '\\'
328
329 #else
330 const char *i386_comment_chars = "#";
331 #define PREFIX_SEPARATOR '/'
332 #endif
333
334 /* This array holds the chars that only start a comment at the beginning of
335 a line. If the line seems to have the form '# 123 filename'
336 .line and .file directives will appear in the pre-processed output.
337 Note that input_file.c hand checks for '#' at the beginning of the
338 first line of the input file. This is because the compiler outputs
339 #NO_APP at the beginning of its output.
340 Also note that comments started like this one will always work if
341 '/' isn't otherwise defined. */
342 const char line_comment_chars[] = "#/";
343
344 const char line_separator_chars[] = ";";
345
346 /* Chars that can be used to separate mant from exp in floating point
347 nums. */
348 const char EXP_CHARS[] = "eE";
349
350 /* Chars that mean this number is a floating point constant
351 As in 0f12.456
352 or 0d1.2345e12. */
353 const char FLT_CHARS[] = "fFdDxX";
354
355 /* Tables for lexical analysis. */
356 static char mnemonic_chars[256];
357 static char register_chars[256];
358 static char operand_chars[256];
359 static char identifier_chars[256];
360 static char digit_chars[256];
361
362 /* Lexical macros. */
363 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
364 #define is_operand_char(x) (operand_chars[(unsigned char) x])
365 #define is_register_char(x) (register_chars[(unsigned char) x])
366 #define is_space_char(x) ((x) == ' ')
367 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
368 #define is_digit_char(x) (digit_chars[(unsigned char) x])
369
370 /* All non-digit non-letter characters that may occur in an operand. */
371 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
372
373 /* md_assemble() always leaves the strings it's passed unaltered. To
374 effect this we maintain a stack of saved characters that we've smashed
375 with '\0's (indicating end of strings for various sub-fields of the
376 assembler instruction). */
377 static char save_stack[32];
378 static char *save_stack_p;
379 #define END_STRING_AND_SAVE(s) \
380 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
381 #define RESTORE_END_STRING(s) \
382 do { *(s) = *--save_stack_p; } while (0)
383
384 /* The instruction we're assembling. */
385 static i386_insn i;
386
387 /* Possible templates for current insn. */
388 static const templates *current_templates;
389
390 /* Per instruction expressionS buffers: max displacements & immediates. */
391 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
392 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
393
394 /* Current operand we are working on. */
395 static int this_operand = -1;
396
397 /* We support four different modes. FLAG_CODE variable is used to distinguish
398 these. */
399
400 enum flag_code {
401 CODE_32BIT,
402 CODE_16BIT,
403 CODE_64BIT };
404
405 static enum flag_code flag_code;
406 static unsigned int object_64bit;
407 static unsigned int disallow_64bit_reloc;
408 static int use_rela_relocations = 0;
409
410 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
411 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
412 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
413
414 /* The ELF ABI to use. */
415 enum x86_elf_abi
416 {
417 I386_ABI,
418 X86_64_ABI,
419 X86_64_X32_ABI
420 };
421
422 static enum x86_elf_abi x86_elf_abi = I386_ABI;
423 #endif
424
425 /* The names used to print error messages. */
426 static const char *flag_code_names[] =
427 {
428 "32",
429 "16",
430 "64"
431 };
432
433 /* 1 for intel syntax,
434 0 if att syntax. */
435 static int intel_syntax = 0;
436
437 /* 1 for intel mnemonic,
438 0 if att mnemonic. */
439 static int intel_mnemonic = !SYSV386_COMPAT;
440
441 /* 1 if support old (<= 2.8.1) versions of gcc. */
442 static int old_gcc = OLDGCC_COMPAT;
443
444 /* 1 if pseudo registers are permitted. */
445 static int allow_pseudo_reg = 0;
446
447 /* 1 if register prefix % not required. */
448 static int allow_naked_reg = 0;
449
450 /* 1 if pseudo index register, eiz/riz, is allowed . */
451 static int allow_index_reg = 0;
452
453 static enum check_kind
454 {
455 check_none = 0,
456 check_warning,
457 check_error
458 }
459 sse_check, operand_check = check_warning;
460
461 /* Register prefix used for error message. */
462 static const char *register_prefix = "%";
463
464 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
465 leave, push, and pop instructions so that gcc has the same stack
466 frame as in 32 bit mode. */
467 static char stackop_size = '\0';
468
469 /* Non-zero to optimize code alignment. */
470 int optimize_align_code = 1;
471
472 /* Non-zero to quieten some warnings. */
473 static int quiet_warnings = 0;
474
475 /* CPU name. */
476 static const char *cpu_arch_name = NULL;
477 static char *cpu_sub_arch_name = NULL;
478
479 /* CPU feature flags. */
480 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
481
482 /* If we have selected a cpu we are generating instructions for. */
483 static int cpu_arch_tune_set = 0;
484
485 /* Cpu we are generating instructions for. */
486 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
487
488 /* CPU feature flags of cpu we are generating instructions for. */
489 static i386_cpu_flags cpu_arch_tune_flags;
490
491 /* CPU instruction set architecture used. */
492 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
493
494 /* CPU feature flags of instruction set architecture used. */
495 i386_cpu_flags cpu_arch_isa_flags;
496
497 /* If set, conditional jumps are not automatically promoted to handle
498 larger than a byte offset. */
499 static unsigned int no_cond_jump_promotion = 0;
500
501 /* Encode SSE instructions with VEX prefix. */
502 static unsigned int sse2avx;
503
504 /* Encode scalar AVX instructions with specific vector length. */
505 static enum
506 {
507 vex128 = 0,
508 vex256
509 } avxscalar;
510
511 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
512 static symbolS *GOT_symbol;
513
514 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
515 unsigned int x86_dwarf2_return_column;
516
517 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
518 int x86_cie_data_alignment;
519
520 /* Interface to relax_segment.
521 There are 3 major relax states for 386 jump insns because the
522 different types of jumps add different sizes to frags when we're
523 figuring out what sort of jump to choose to reach a given label. */
524
525 /* Types. */
526 #define UNCOND_JUMP 0
527 #define COND_JUMP 1
528 #define COND_JUMP86 2
529
530 /* Sizes. */
531 #define CODE16 1
532 #define SMALL 0
533 #define SMALL16 (SMALL | CODE16)
534 #define BIG 2
535 #define BIG16 (BIG | CODE16)
536
537 #ifndef INLINE
538 #ifdef __GNUC__
539 #define INLINE __inline__
540 #else
541 #define INLINE
542 #endif
543 #endif
544
545 #define ENCODE_RELAX_STATE(type, size) \
546 ((relax_substateT) (((type) << 2) | (size)))
547 #define TYPE_FROM_RELAX_STATE(s) \
548 ((s) >> 2)
549 #define DISP_SIZE_FROM_RELAX_STATE(s) \
550 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
551
552 /* This table is used by relax_frag to promote short jumps to long
553 ones where necessary. SMALL (short) jumps may be promoted to BIG
554 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
555 don't allow a short jump in a 32 bit code segment to be promoted to
556 a 16 bit offset jump because it's slower (requires data size
557 prefix), and doesn't work, unless the destination is in the bottom
558 64k of the code segment (The top 16 bits of eip are zeroed). */
559
560 const relax_typeS md_relax_table[] =
561 {
562 /* The fields are:
563 1) most positive reach of this state,
564 2) most negative reach of this state,
565 3) how many bytes this mode will have in the variable part of the frag
566 4) which index into the table to try if we can't fit into this one. */
567
568 /* UNCOND_JUMP states. */
569 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
570 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
571 /* dword jmp adds 4 bytes to frag:
572 0 extra opcode bytes, 4 displacement bytes. */
573 {0, 0, 4, 0},
574 /* word jmp adds 2 byte2 to frag:
575 0 extra opcode bytes, 2 displacement bytes. */
576 {0, 0, 2, 0},
577
578 /* COND_JUMP states. */
579 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
580 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
581 /* dword conditionals adds 5 bytes to frag:
582 1 extra opcode byte, 4 displacement bytes. */
583 {0, 0, 5, 0},
584 /* word conditionals add 3 bytes to frag:
585 1 extra opcode byte, 2 displacement bytes. */
586 {0, 0, 3, 0},
587
588 /* COND_JUMP86 states. */
589 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
590 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
591 /* dword conditionals adds 5 bytes to frag:
592 1 extra opcode byte, 4 displacement bytes. */
593 {0, 0, 5, 0},
594 /* word conditionals add 4 bytes to frag:
595 1 displacement byte and a 3 byte long branch insn. */
596 {0, 0, 4, 0}
597 };
598
599 static const arch_entry cpu_arch[] =
600 {
601 /* Do not replace the first two entries - i386_target_format()
602 relies on them being there in this order. */
603 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
604 CPU_GENERIC32_FLAGS, 0, 0 },
605 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
606 CPU_GENERIC64_FLAGS, 0, 0 },
607 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
608 CPU_NONE_FLAGS, 0, 0 },
609 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
610 CPU_I186_FLAGS, 0, 0 },
611 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
612 CPU_I286_FLAGS, 0, 0 },
613 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
614 CPU_I386_FLAGS, 0, 0 },
615 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
616 CPU_I486_FLAGS, 0, 0 },
617 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
618 CPU_I586_FLAGS, 0, 0 },
619 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
620 CPU_I686_FLAGS, 0, 0 },
621 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
622 CPU_I586_FLAGS, 0, 0 },
623 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
624 CPU_PENTIUMPRO_FLAGS, 0, 0 },
625 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
626 CPU_P2_FLAGS, 0, 0 },
627 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
628 CPU_P3_FLAGS, 0, 0 },
629 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
630 CPU_P4_FLAGS, 0, 0 },
631 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
632 CPU_CORE_FLAGS, 0, 0 },
633 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
634 CPU_NOCONA_FLAGS, 0, 0 },
635 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
636 CPU_CORE_FLAGS, 1, 0 },
637 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
638 CPU_CORE_FLAGS, 0, 0 },
639 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
640 CPU_CORE2_FLAGS, 1, 0 },
641 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
642 CPU_CORE2_FLAGS, 0, 0 },
643 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
644 CPU_COREI7_FLAGS, 0, 0 },
645 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
646 CPU_L1OM_FLAGS, 0, 0 },
647 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM,
648 CPU_K1OM_FLAGS, 0, 0 },
649 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
650 CPU_K6_FLAGS, 0, 0 },
651 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
652 CPU_K6_2_FLAGS, 0, 0 },
653 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
654 CPU_ATHLON_FLAGS, 0, 0 },
655 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
656 CPU_K8_FLAGS, 1, 0 },
657 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
658 CPU_K8_FLAGS, 0, 0 },
659 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
660 CPU_K8_FLAGS, 0, 0 },
661 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
662 CPU_AMDFAM10_FLAGS, 0, 0 },
663 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD,
664 CPU_BDVER1_FLAGS, 0, 0 },
665 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD,
666 CPU_BDVER2_FLAGS, 0, 0 },
667 { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT,
668 CPU_BTVER1_FLAGS, 0, 0 },
669 { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT,
670 CPU_BTVER2_FLAGS, 0, 0 },
671 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
672 CPU_8087_FLAGS, 0, 0 },
673 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
674 CPU_287_FLAGS, 0, 0 },
675 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
676 CPU_387_FLAGS, 0, 0 },
677 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN,
678 CPU_ANY87_FLAGS, 0, 1 },
679 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
680 CPU_MMX_FLAGS, 0, 0 },
681 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN,
682 CPU_3DNOWA_FLAGS, 0, 1 },
683 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
684 CPU_SSE_FLAGS, 0, 0 },
685 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
686 CPU_SSE2_FLAGS, 0, 0 },
687 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
688 CPU_SSE3_FLAGS, 0, 0 },
689 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
690 CPU_SSSE3_FLAGS, 0, 0 },
691 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
692 CPU_SSE4_1_FLAGS, 0, 0 },
693 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
694 CPU_SSE4_2_FLAGS, 0, 0 },
695 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
696 CPU_SSE4_2_FLAGS, 0, 0 },
697 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN,
698 CPU_ANY_SSE_FLAGS, 0, 1 },
699 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
700 CPU_AVX_FLAGS, 0, 0 },
701 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN,
702 CPU_AVX2_FLAGS, 0, 0 },
703 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
704 CPU_ANY_AVX_FLAGS, 0, 1 },
705 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
706 CPU_VMX_FLAGS, 0, 0 },
707 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN,
708 CPU_VMFUNC_FLAGS, 0, 0 },
709 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
710 CPU_SMX_FLAGS, 0, 0 },
711 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
712 CPU_XSAVE_FLAGS, 0, 0 },
713 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
714 CPU_XSAVEOPT_FLAGS, 0, 0 },
715 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
716 CPU_AES_FLAGS, 0, 0 },
717 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
718 CPU_PCLMUL_FLAGS, 0, 0 },
719 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
720 CPU_PCLMUL_FLAGS, 1, 0 },
721 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
722 CPU_FSGSBASE_FLAGS, 0, 0 },
723 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
724 CPU_RDRND_FLAGS, 0, 0 },
725 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
726 CPU_F16C_FLAGS, 0, 0 },
727 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN,
728 CPU_BMI2_FLAGS, 0, 0 },
729 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
730 CPU_FMA_FLAGS, 0, 0 },
731 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
732 CPU_FMA4_FLAGS, 0, 0 },
733 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
734 CPU_XOP_FLAGS, 0, 0 },
735 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
736 CPU_LWP_FLAGS, 0, 0 },
737 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
738 CPU_MOVBE_FLAGS, 0, 0 },
739 { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN,
740 CPU_CX16_FLAGS, 0, 0 },
741 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
742 CPU_EPT_FLAGS, 0, 0 },
743 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN,
744 CPU_LZCNT_FLAGS, 0, 0 },
745 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN,
746 CPU_HLE_FLAGS, 0, 0 },
747 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN,
748 CPU_RTM_FLAGS, 0, 0 },
749 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN,
750 CPU_INVPCID_FLAGS, 0, 0 },
751 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
752 CPU_CLFLUSH_FLAGS, 0, 0 },
753 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
754 CPU_NOP_FLAGS, 0, 0 },
755 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
756 CPU_SYSCALL_FLAGS, 0, 0 },
757 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
758 CPU_RDTSCP_FLAGS, 0, 0 },
759 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
760 CPU_3DNOW_FLAGS, 0, 0 },
761 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
762 CPU_3DNOWA_FLAGS, 0, 0 },
763 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
764 CPU_PADLOCK_FLAGS, 0, 0 },
765 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
766 CPU_SVME_FLAGS, 1, 0 },
767 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
768 CPU_SVME_FLAGS, 0, 0 },
769 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
770 CPU_SSE4A_FLAGS, 0, 0 },
771 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
772 CPU_ABM_FLAGS, 0, 0 },
773 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
774 CPU_BMI_FLAGS, 0, 0 },
775 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
776 CPU_TBM_FLAGS, 0, 0 },
777 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN,
778 CPU_ADX_FLAGS, 0, 0 },
779 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN,
780 CPU_RDSEED_FLAGS, 0, 0 },
781 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN,
782 CPU_PRFCHW_FLAGS, 0, 0 },
783 };
784
785 #ifdef I386COFF
786 /* Like s_lcomm_internal in gas/read.c but the alignment string
787 is allowed to be optional. */
788
789 static symbolS *
790 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
791 {
792 addressT align = 0;
793
794 SKIP_WHITESPACE ();
795
796 if (needs_align
797 && *input_line_pointer == ',')
798 {
799 align = parse_align (needs_align - 1);
800
801 if (align == (addressT) -1)
802 return NULL;
803 }
804 else
805 {
806 if (size >= 8)
807 align = 3;
808 else if (size >= 4)
809 align = 2;
810 else if (size >= 2)
811 align = 1;
812 else
813 align = 0;
814 }
815
816 bss_alloc (symbolP, size, align);
817 return symbolP;
818 }
819
820 static void
821 pe_lcomm (int needs_align)
822 {
823 s_comm_internal (needs_align * 2, pe_lcomm_internal);
824 }
825 #endif
826
827 const pseudo_typeS md_pseudo_table[] =
828 {
829 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
830 {"align", s_align_bytes, 0},
831 #else
832 {"align", s_align_ptwo, 0},
833 #endif
834 {"arch", set_cpu_arch, 0},
835 #ifndef I386COFF
836 {"bss", s_bss, 0},
837 #else
838 {"lcomm", pe_lcomm, 1},
839 #endif
840 {"ffloat", float_cons, 'f'},
841 {"dfloat", float_cons, 'd'},
842 {"tfloat", float_cons, 'x'},
843 {"value", cons, 2},
844 {"slong", signed_cons, 4},
845 {"noopt", s_ignore, 0},
846 {"optim", s_ignore, 0},
847 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
848 {"code16", set_code_flag, CODE_16BIT},
849 {"code32", set_code_flag, CODE_32BIT},
850 {"code64", set_code_flag, CODE_64BIT},
851 {"intel_syntax", set_intel_syntax, 1},
852 {"att_syntax", set_intel_syntax, 0},
853 {"intel_mnemonic", set_intel_mnemonic, 1},
854 {"att_mnemonic", set_intel_mnemonic, 0},
855 {"allow_index_reg", set_allow_index_reg, 1},
856 {"disallow_index_reg", set_allow_index_reg, 0},
857 {"sse_check", set_check, 0},
858 {"operand_check", set_check, 1},
859 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
860 {"largecomm", handle_large_common, 0},
861 #else
862 {"file", (void (*) (int)) dwarf2_directive_file, 0},
863 {"loc", dwarf2_directive_loc, 0},
864 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
865 #endif
866 #ifdef TE_PE
867 {"secrel32", pe_directive_secrel, 0},
868 #endif
869 {0, 0, 0}
870 };
871
872 /* For interface with expression (). */
873 extern char *input_line_pointer;
874
875 /* Hash table for instruction mnemonic lookup. */
876 static struct hash_control *op_hash;
877
878 /* Hash table for register lookup. */
879 static struct hash_control *reg_hash;
880 \f
881 void
882 i386_align_code (fragS *fragP, int count)
883 {
884 /* Various efficient no-op patterns for aligning code labels.
885 Note: Don't try to assemble the instructions in the comments.
886 0L and 0w are not legal. */
887 static const char f32_1[] =
888 {0x90}; /* nop */
889 static const char f32_2[] =
890 {0x66,0x90}; /* xchg %ax,%ax */
891 static const char f32_3[] =
892 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
893 static const char f32_4[] =
894 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
895 static const char f32_5[] =
896 {0x90, /* nop */
897 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
898 static const char f32_6[] =
899 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
900 static const char f32_7[] =
901 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
902 static const char f32_8[] =
903 {0x90, /* nop */
904 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
905 static const char f32_9[] =
906 {0x89,0xf6, /* movl %esi,%esi */
907 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
908 static const char f32_10[] =
909 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
910 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
911 static const char f32_11[] =
912 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
913 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
914 static const char f32_12[] =
915 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
916 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
917 static const char f32_13[] =
918 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
919 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
920 static const char f32_14[] =
921 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
922 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
923 static const char f16_3[] =
924 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
925 static const char f16_4[] =
926 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
927 static const char f16_5[] =
928 {0x90, /* nop */
929 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
930 static const char f16_6[] =
931 {0x89,0xf6, /* mov %si,%si */
932 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
933 static const char f16_7[] =
934 {0x8d,0x74,0x00, /* lea 0(%si),%si */
935 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
936 static const char f16_8[] =
937 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
938 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
939 static const char jump_31[] =
940 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
941 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
942 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
943 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
944 static const char *const f32_patt[] = {
945 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
946 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
947 };
948 static const char *const f16_patt[] = {
949 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
950 };
951 /* nopl (%[re]ax) */
952 static const char alt_3[] =
953 {0x0f,0x1f,0x00};
954 /* nopl 0(%[re]ax) */
955 static const char alt_4[] =
956 {0x0f,0x1f,0x40,0x00};
957 /* nopl 0(%[re]ax,%[re]ax,1) */
958 static const char alt_5[] =
959 {0x0f,0x1f,0x44,0x00,0x00};
960 /* nopw 0(%[re]ax,%[re]ax,1) */
961 static const char alt_6[] =
962 {0x66,0x0f,0x1f,0x44,0x00,0x00};
963 /* nopl 0L(%[re]ax) */
964 static const char alt_7[] =
965 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
966 /* nopl 0L(%[re]ax,%[re]ax,1) */
967 static const char alt_8[] =
968 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
969 /* nopw 0L(%[re]ax,%[re]ax,1) */
970 static const char alt_9[] =
971 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
972 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
973 static const char alt_10[] =
974 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
975 /* data16
976 nopw %cs:0L(%[re]ax,%[re]ax,1) */
977 static const char alt_long_11[] =
978 {0x66,
979 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
980 /* data16
981 data16
982 nopw %cs:0L(%[re]ax,%[re]ax,1) */
983 static const char alt_long_12[] =
984 {0x66,
985 0x66,
986 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
987 /* data16
988 data16
989 data16
990 nopw %cs:0L(%[re]ax,%[re]ax,1) */
991 static const char alt_long_13[] =
992 {0x66,
993 0x66,
994 0x66,
995 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
996 /* data16
997 data16
998 data16
999 data16
1000 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1001 static const char alt_long_14[] =
1002 {0x66,
1003 0x66,
1004 0x66,
1005 0x66,
1006 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1007 /* data16
1008 data16
1009 data16
1010 data16
1011 data16
1012 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1013 static const char alt_long_15[] =
1014 {0x66,
1015 0x66,
1016 0x66,
1017 0x66,
1018 0x66,
1019 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1020 /* nopl 0(%[re]ax,%[re]ax,1)
1021 nopw 0(%[re]ax,%[re]ax,1) */
1022 static const char alt_short_11[] =
1023 {0x0f,0x1f,0x44,0x00,0x00,
1024 0x66,0x0f,0x1f,0x44,0x00,0x00};
1025 /* nopw 0(%[re]ax,%[re]ax,1)
1026 nopw 0(%[re]ax,%[re]ax,1) */
1027 static const char alt_short_12[] =
1028 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1029 0x66,0x0f,0x1f,0x44,0x00,0x00};
1030 /* nopw 0(%[re]ax,%[re]ax,1)
1031 nopl 0L(%[re]ax) */
1032 static const char alt_short_13[] =
1033 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1034 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1035 /* nopl 0L(%[re]ax)
1036 nopl 0L(%[re]ax) */
1037 static const char alt_short_14[] =
1038 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1039 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1040 /* nopl 0L(%[re]ax)
1041 nopl 0L(%[re]ax,%[re]ax,1) */
1042 static const char alt_short_15[] =
1043 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1044 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1045 static const char *const alt_short_patt[] = {
1046 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1047 alt_9, alt_10, alt_short_11, alt_short_12, alt_short_13,
1048 alt_short_14, alt_short_15
1049 };
1050 static const char *const alt_long_patt[] = {
1051 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1052 alt_9, alt_10, alt_long_11, alt_long_12, alt_long_13,
1053 alt_long_14, alt_long_15
1054 };
1055
1056 /* Only align for at least a positive non-zero boundary. */
1057 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
1058 return;
1059
1060 /* We need to decide which NOP sequence to use for 32bit and
1061 64bit. When -mtune= is used:
1062
1063 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1064 PROCESSOR_GENERIC32, f32_patt will be used.
1065 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA,
1066 PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
1067 PROCESSOR_GENERIC64, alt_long_patt will be used.
1068 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
1069 PROCESSOR_AMDFAM10, PROCESSOR_BD and PROCESSOR_BT, alt_short_patt
1070 will be used.
1071
1072 When -mtune= isn't used, alt_long_patt will be used if
1073 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1074 be used.
1075
1076 When -march= or .arch is used, we can't use anything beyond
1077 cpu_arch_isa_flags. */
1078
1079 if (flag_code == CODE_16BIT)
1080 {
1081 if (count > 8)
1082 {
1083 memcpy (fragP->fr_literal + fragP->fr_fix,
1084 jump_31, count);
1085 /* Adjust jump offset. */
1086 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1087 }
1088 else
1089 memcpy (fragP->fr_literal + fragP->fr_fix,
1090 f16_patt[count - 1], count);
1091 }
1092 else
1093 {
1094 const char *const *patt = NULL;
1095
1096 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1097 {
1098 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1099 switch (cpu_arch_tune)
1100 {
1101 case PROCESSOR_UNKNOWN:
1102 /* We use cpu_arch_isa_flags to check if we SHOULD
1103 optimize with nops. */
1104 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1105 patt = alt_long_patt;
1106 else
1107 patt = f32_patt;
1108 break;
1109 case PROCESSOR_PENTIUM4:
1110 case PROCESSOR_NOCONA:
1111 case PROCESSOR_CORE:
1112 case PROCESSOR_CORE2:
1113 case PROCESSOR_COREI7:
1114 case PROCESSOR_L1OM:
1115 case PROCESSOR_K1OM:
1116 case PROCESSOR_GENERIC64:
1117 patt = alt_long_patt;
1118 break;
1119 case PROCESSOR_K6:
1120 case PROCESSOR_ATHLON:
1121 case PROCESSOR_K8:
1122 case PROCESSOR_AMDFAM10:
1123 case PROCESSOR_BD:
1124 case PROCESSOR_BT:
1125 patt = alt_short_patt;
1126 break;
1127 case PROCESSOR_I386:
1128 case PROCESSOR_I486:
1129 case PROCESSOR_PENTIUM:
1130 case PROCESSOR_PENTIUMPRO:
1131 case PROCESSOR_GENERIC32:
1132 patt = f32_patt;
1133 break;
1134 }
1135 }
1136 else
1137 {
1138 switch (fragP->tc_frag_data.tune)
1139 {
1140 case PROCESSOR_UNKNOWN:
1141 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1142 PROCESSOR_UNKNOWN. */
1143 abort ();
1144 break;
1145
1146 case PROCESSOR_I386:
1147 case PROCESSOR_I486:
1148 case PROCESSOR_PENTIUM:
1149 case PROCESSOR_K6:
1150 case PROCESSOR_ATHLON:
1151 case PROCESSOR_K8:
1152 case PROCESSOR_AMDFAM10:
1153 case PROCESSOR_BD:
1154 case PROCESSOR_BT:
1155 case PROCESSOR_GENERIC32:
1156 /* We use cpu_arch_isa_flags to check if we CAN optimize
1157 with nops. */
1158 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1159 patt = alt_short_patt;
1160 else
1161 patt = f32_patt;
1162 break;
1163 case PROCESSOR_PENTIUMPRO:
1164 case PROCESSOR_PENTIUM4:
1165 case PROCESSOR_NOCONA:
1166 case PROCESSOR_CORE:
1167 case PROCESSOR_CORE2:
1168 case PROCESSOR_COREI7:
1169 case PROCESSOR_L1OM:
1170 case PROCESSOR_K1OM:
1171 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1172 patt = alt_long_patt;
1173 else
1174 patt = f32_patt;
1175 break;
1176 case PROCESSOR_GENERIC64:
1177 patt = alt_long_patt;
1178 break;
1179 }
1180 }
1181
1182 if (patt == f32_patt)
1183 {
1184 /* If the padding is less than 15 bytes, we use the normal
1185 ones. Otherwise, we use a jump instruction and adjust
1186 its offset. */
1187 int limit;
1188
1189 /* For 64bit, the limit is 3 bytes. */
1190 if (flag_code == CODE_64BIT
1191 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1192 limit = 3;
1193 else
1194 limit = 15;
1195 if (count < limit)
1196 memcpy (fragP->fr_literal + fragP->fr_fix,
1197 patt[count - 1], count);
1198 else
1199 {
1200 memcpy (fragP->fr_literal + fragP->fr_fix,
1201 jump_31, count);
1202 /* Adjust jump offset. */
1203 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1204 }
1205 }
1206 else
1207 {
1208 /* Maximum length of an instruction is 15 byte. If the
1209 padding is greater than 15 bytes and we don't use jump,
1210 we have to break it into smaller pieces. */
1211 int padding = count;
1212 while (padding > 15)
1213 {
1214 padding -= 15;
1215 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1216 patt [14], 15);
1217 }
1218
1219 if (padding)
1220 memcpy (fragP->fr_literal + fragP->fr_fix,
1221 patt [padding - 1], padding);
1222 }
1223 }
1224 fragP->fr_var = count;
1225 }
1226
1227 static INLINE int
1228 operand_type_all_zero (const union i386_operand_type *x)
1229 {
1230 switch (ARRAY_SIZE(x->array))
1231 {
1232 case 3:
1233 if (x->array[2])
1234 return 0;
1235 case 2:
1236 if (x->array[1])
1237 return 0;
1238 case 1:
1239 return !x->array[0];
1240 default:
1241 abort ();
1242 }
1243 }
1244
1245 static INLINE void
1246 operand_type_set (union i386_operand_type *x, unsigned int v)
1247 {
1248 switch (ARRAY_SIZE(x->array))
1249 {
1250 case 3:
1251 x->array[2] = v;
1252 case 2:
1253 x->array[1] = v;
1254 case 1:
1255 x->array[0] = v;
1256 break;
1257 default:
1258 abort ();
1259 }
1260 }
1261
1262 static INLINE int
1263 operand_type_equal (const union i386_operand_type *x,
1264 const union i386_operand_type *y)
1265 {
1266 switch (ARRAY_SIZE(x->array))
1267 {
1268 case 3:
1269 if (x->array[2] != y->array[2])
1270 return 0;
1271 case 2:
1272 if (x->array[1] != y->array[1])
1273 return 0;
1274 case 1:
1275 return x->array[0] == y->array[0];
1276 break;
1277 default:
1278 abort ();
1279 }
1280 }
1281
1282 static INLINE int
1283 cpu_flags_all_zero (const union i386_cpu_flags *x)
1284 {
1285 switch (ARRAY_SIZE(x->array))
1286 {
1287 case 3:
1288 if (x->array[2])
1289 return 0;
1290 case 2:
1291 if (x->array[1])
1292 return 0;
1293 case 1:
1294 return !x->array[0];
1295 default:
1296 abort ();
1297 }
1298 }
1299
1300 static INLINE void
1301 cpu_flags_set (union i386_cpu_flags *x, unsigned int v)
1302 {
1303 switch (ARRAY_SIZE(x->array))
1304 {
1305 case 3:
1306 x->array[2] = v;
1307 case 2:
1308 x->array[1] = v;
1309 case 1:
1310 x->array[0] = v;
1311 break;
1312 default:
1313 abort ();
1314 }
1315 }
1316
1317 static INLINE int
1318 cpu_flags_equal (const union i386_cpu_flags *x,
1319 const union i386_cpu_flags *y)
1320 {
1321 switch (ARRAY_SIZE(x->array))
1322 {
1323 case 3:
1324 if (x->array[2] != y->array[2])
1325 return 0;
1326 case 2:
1327 if (x->array[1] != y->array[1])
1328 return 0;
1329 case 1:
1330 return x->array[0] == y->array[0];
1331 break;
1332 default:
1333 abort ();
1334 }
1335 }
1336
1337 static INLINE int
1338 cpu_flags_check_cpu64 (i386_cpu_flags f)
1339 {
1340 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1341 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1342 }
1343
1344 static INLINE i386_cpu_flags
1345 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1346 {
1347 switch (ARRAY_SIZE (x.array))
1348 {
1349 case 3:
1350 x.array [2] &= y.array [2];
1351 case 2:
1352 x.array [1] &= y.array [1];
1353 case 1:
1354 x.array [0] &= y.array [0];
1355 break;
1356 default:
1357 abort ();
1358 }
1359 return x;
1360 }
1361
1362 static INLINE i386_cpu_flags
1363 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1364 {
1365 switch (ARRAY_SIZE (x.array))
1366 {
1367 case 3:
1368 x.array [2] |= y.array [2];
1369 case 2:
1370 x.array [1] |= y.array [1];
1371 case 1:
1372 x.array [0] |= y.array [0];
1373 break;
1374 default:
1375 abort ();
1376 }
1377 return x;
1378 }
1379
1380 static INLINE i386_cpu_flags
1381 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1382 {
1383 switch (ARRAY_SIZE (x.array))
1384 {
1385 case 3:
1386 x.array [2] &= ~y.array [2];
1387 case 2:
1388 x.array [1] &= ~y.array [1];
1389 case 1:
1390 x.array [0] &= ~y.array [0];
1391 break;
1392 default:
1393 abort ();
1394 }
1395 return x;
1396 }
1397
1398 #define CPU_FLAGS_ARCH_MATCH 0x1
1399 #define CPU_FLAGS_64BIT_MATCH 0x2
1400 #define CPU_FLAGS_AES_MATCH 0x4
1401 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1402 #define CPU_FLAGS_AVX_MATCH 0x10
1403
1404 #define CPU_FLAGS_32BIT_MATCH \
1405 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1406 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1407 #define CPU_FLAGS_PERFECT_MATCH \
1408 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1409
1410 /* Return CPU flags match bits. */
1411
1412 static int
1413 cpu_flags_match (const insn_template *t)
1414 {
1415 i386_cpu_flags x = t->cpu_flags;
1416 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1417
1418 x.bitfield.cpu64 = 0;
1419 x.bitfield.cpuno64 = 0;
1420
1421 if (cpu_flags_all_zero (&x))
1422 {
1423 /* This instruction is available on all archs. */
1424 match |= CPU_FLAGS_32BIT_MATCH;
1425 }
1426 else
1427 {
1428 /* This instruction is available only on some archs. */
1429 i386_cpu_flags cpu = cpu_arch_flags;
1430
1431 cpu.bitfield.cpu64 = 0;
1432 cpu.bitfield.cpuno64 = 0;
1433 cpu = cpu_flags_and (x, cpu);
1434 if (!cpu_flags_all_zero (&cpu))
1435 {
1436 if (x.bitfield.cpuavx)
1437 {
1438 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1439 if (cpu.bitfield.cpuavx)
1440 {
1441 /* Check SSE2AVX. */
1442 if (!t->opcode_modifier.sse2avx|| sse2avx)
1443 {
1444 match |= (CPU_FLAGS_ARCH_MATCH
1445 | CPU_FLAGS_AVX_MATCH);
1446 /* Check AES. */
1447 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1448 match |= CPU_FLAGS_AES_MATCH;
1449 /* Check PCLMUL. */
1450 if (!x.bitfield.cpupclmul
1451 || cpu.bitfield.cpupclmul)
1452 match |= CPU_FLAGS_PCLMUL_MATCH;
1453 }
1454 }
1455 else
1456 match |= CPU_FLAGS_ARCH_MATCH;
1457 }
1458 else
1459 match |= CPU_FLAGS_32BIT_MATCH;
1460 }
1461 }
1462 return match;
1463 }
1464
1465 static INLINE i386_operand_type
1466 operand_type_and (i386_operand_type x, i386_operand_type y)
1467 {
1468 switch (ARRAY_SIZE (x.array))
1469 {
1470 case 3:
1471 x.array [2] &= y.array [2];
1472 case 2:
1473 x.array [1] &= y.array [1];
1474 case 1:
1475 x.array [0] &= y.array [0];
1476 break;
1477 default:
1478 abort ();
1479 }
1480 return x;
1481 }
1482
1483 static INLINE i386_operand_type
1484 operand_type_or (i386_operand_type x, i386_operand_type y)
1485 {
1486 switch (ARRAY_SIZE (x.array))
1487 {
1488 case 3:
1489 x.array [2] |= y.array [2];
1490 case 2:
1491 x.array [1] |= y.array [1];
1492 case 1:
1493 x.array [0] |= y.array [0];
1494 break;
1495 default:
1496 abort ();
1497 }
1498 return x;
1499 }
1500
1501 static INLINE i386_operand_type
1502 operand_type_xor (i386_operand_type x, i386_operand_type y)
1503 {
1504 switch (ARRAY_SIZE (x.array))
1505 {
1506 case 3:
1507 x.array [2] ^= y.array [2];
1508 case 2:
1509 x.array [1] ^= y.array [1];
1510 case 1:
1511 x.array [0] ^= y.array [0];
1512 break;
1513 default:
1514 abort ();
1515 }
1516 return x;
1517 }
1518
1519 static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1520 static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1521 static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1522 static const i386_operand_type inoutportreg
1523 = OPERAND_TYPE_INOUTPORTREG;
1524 static const i386_operand_type reg16_inoutportreg
1525 = OPERAND_TYPE_REG16_INOUTPORTREG;
1526 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1527 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1528 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1529 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1530 static const i386_operand_type anydisp
1531 = OPERAND_TYPE_ANYDISP;
1532 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1533 static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1534 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1535 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1536 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1537 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1538 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1539 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1540 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1541 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1542 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1543 static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
1544
1545 enum operand_type
1546 {
1547 reg,
1548 imm,
1549 disp,
1550 anymem
1551 };
1552
1553 static INLINE int
1554 operand_type_check (i386_operand_type t, enum operand_type c)
1555 {
1556 switch (c)
1557 {
1558 case reg:
1559 return (t.bitfield.reg8
1560 || t.bitfield.reg16
1561 || t.bitfield.reg32
1562 || t.bitfield.reg64);
1563
1564 case imm:
1565 return (t.bitfield.imm8
1566 || t.bitfield.imm8s
1567 || t.bitfield.imm16
1568 || t.bitfield.imm32
1569 || t.bitfield.imm32s
1570 || t.bitfield.imm64);
1571
1572 case disp:
1573 return (t.bitfield.disp8
1574 || t.bitfield.disp16
1575 || t.bitfield.disp32
1576 || t.bitfield.disp32s
1577 || t.bitfield.disp64);
1578
1579 case anymem:
1580 return (t.bitfield.disp8
1581 || t.bitfield.disp16
1582 || t.bitfield.disp32
1583 || t.bitfield.disp32s
1584 || t.bitfield.disp64
1585 || t.bitfield.baseindex);
1586
1587 default:
1588 abort ();
1589 }
1590
1591 return 0;
1592 }
1593
1594 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1595 operand J for instruction template T. */
1596
1597 static INLINE int
1598 match_reg_size (const insn_template *t, unsigned int j)
1599 {
1600 return !((i.types[j].bitfield.byte
1601 && !t->operand_types[j].bitfield.byte)
1602 || (i.types[j].bitfield.word
1603 && !t->operand_types[j].bitfield.word)
1604 || (i.types[j].bitfield.dword
1605 && !t->operand_types[j].bitfield.dword)
1606 || (i.types[j].bitfield.qword
1607 && !t->operand_types[j].bitfield.qword));
1608 }
1609
1610 /* Return 1 if there is no conflict in any size on operand J for
1611 instruction template T. */
1612
1613 static INLINE int
1614 match_mem_size (const insn_template *t, unsigned int j)
1615 {
1616 return (match_reg_size (t, j)
1617 && !((i.types[j].bitfield.unspecified
1618 && !t->operand_types[j].bitfield.unspecified)
1619 || (i.types[j].bitfield.fword
1620 && !t->operand_types[j].bitfield.fword)
1621 || (i.types[j].bitfield.tbyte
1622 && !t->operand_types[j].bitfield.tbyte)
1623 || (i.types[j].bitfield.xmmword
1624 && !t->operand_types[j].bitfield.xmmword)
1625 || (i.types[j].bitfield.ymmword
1626 && !t->operand_types[j].bitfield.ymmword)));
1627 }
1628
1629 /* Return 1 if there is no size conflict on any operands for
1630 instruction template T. */
1631
1632 static INLINE int
1633 operand_size_match (const insn_template *t)
1634 {
1635 unsigned int j;
1636 int match = 1;
1637
1638 /* Don't check jump instructions. */
1639 if (t->opcode_modifier.jump
1640 || t->opcode_modifier.jumpbyte
1641 || t->opcode_modifier.jumpdword
1642 || t->opcode_modifier.jumpintersegment)
1643 return match;
1644
1645 /* Check memory and accumulator operand size. */
1646 for (j = 0; j < i.operands; j++)
1647 {
1648 if (t->operand_types[j].bitfield.anysize)
1649 continue;
1650
1651 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1652 {
1653 match = 0;
1654 break;
1655 }
1656
1657 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1658 {
1659 match = 0;
1660 break;
1661 }
1662 }
1663
1664 if (match)
1665 return match;
1666 else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
1667 {
1668 mismatch:
1669 i.error = operand_size_mismatch;
1670 return 0;
1671 }
1672
1673 /* Check reverse. */
1674 gas_assert (i.operands == 2);
1675
1676 match = 1;
1677 for (j = 0; j < 2; j++)
1678 {
1679 if (t->operand_types[j].bitfield.acc
1680 && !match_reg_size (t, j ? 0 : 1))
1681 goto mismatch;
1682
1683 if (i.types[j].bitfield.mem
1684 && !match_mem_size (t, j ? 0 : 1))
1685 goto mismatch;
1686 }
1687
1688 return match;
1689 }
1690
1691 static INLINE int
1692 operand_type_match (i386_operand_type overlap,
1693 i386_operand_type given)
1694 {
1695 i386_operand_type temp = overlap;
1696
1697 temp.bitfield.jumpabsolute = 0;
1698 temp.bitfield.unspecified = 0;
1699 temp.bitfield.byte = 0;
1700 temp.bitfield.word = 0;
1701 temp.bitfield.dword = 0;
1702 temp.bitfield.fword = 0;
1703 temp.bitfield.qword = 0;
1704 temp.bitfield.tbyte = 0;
1705 temp.bitfield.xmmword = 0;
1706 temp.bitfield.ymmword = 0;
1707 if (operand_type_all_zero (&temp))
1708 goto mismatch;
1709
1710 if (given.bitfield.baseindex == overlap.bitfield.baseindex
1711 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
1712 return 1;
1713
1714 mismatch:
1715 i.error = operand_type_mismatch;
1716 return 0;
1717 }
1718
1719 /* If given types g0 and g1 are registers they must be of the same type
1720 unless the expected operand type register overlap is null.
1721 Note that Acc in a template matches every size of reg. */
1722
1723 static INLINE int
1724 operand_type_register_match (i386_operand_type m0,
1725 i386_operand_type g0,
1726 i386_operand_type t0,
1727 i386_operand_type m1,
1728 i386_operand_type g1,
1729 i386_operand_type t1)
1730 {
1731 if (!operand_type_check (g0, reg))
1732 return 1;
1733
1734 if (!operand_type_check (g1, reg))
1735 return 1;
1736
1737 if (g0.bitfield.reg8 == g1.bitfield.reg8
1738 && g0.bitfield.reg16 == g1.bitfield.reg16
1739 && g0.bitfield.reg32 == g1.bitfield.reg32
1740 && g0.bitfield.reg64 == g1.bitfield.reg64)
1741 return 1;
1742
1743 if (m0.bitfield.acc)
1744 {
1745 t0.bitfield.reg8 = 1;
1746 t0.bitfield.reg16 = 1;
1747 t0.bitfield.reg32 = 1;
1748 t0.bitfield.reg64 = 1;
1749 }
1750
1751 if (m1.bitfield.acc)
1752 {
1753 t1.bitfield.reg8 = 1;
1754 t1.bitfield.reg16 = 1;
1755 t1.bitfield.reg32 = 1;
1756 t1.bitfield.reg64 = 1;
1757 }
1758
1759 if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1760 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1761 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1762 && !(t0.bitfield.reg64 & t1.bitfield.reg64))
1763 return 1;
1764
1765 i.error = register_type_mismatch;
1766
1767 return 0;
1768 }
1769
1770 static INLINE unsigned int
1771 register_number (const reg_entry *r)
1772 {
1773 unsigned int nr = r->reg_num;
1774
1775 if (r->reg_flags & RegRex)
1776 nr += 8;
1777
1778 return nr;
1779 }
1780
1781 static INLINE unsigned int
1782 mode_from_disp_size (i386_operand_type t)
1783 {
1784 if (t.bitfield.disp8)
1785 return 1;
1786 else if (t.bitfield.disp16
1787 || t.bitfield.disp32
1788 || t.bitfield.disp32s)
1789 return 2;
1790 else
1791 return 0;
1792 }
1793
1794 static INLINE int
1795 fits_in_signed_byte (offsetT num)
1796 {
1797 return (num >= -128) && (num <= 127);
1798 }
1799
1800 static INLINE int
1801 fits_in_unsigned_byte (offsetT num)
1802 {
1803 return (num & 0xff) == num;
1804 }
1805
1806 static INLINE int
1807 fits_in_unsigned_word (offsetT num)
1808 {
1809 return (num & 0xffff) == num;
1810 }
1811
1812 static INLINE int
1813 fits_in_signed_word (offsetT num)
1814 {
1815 return (-32768 <= num) && (num <= 32767);
1816 }
1817
1818 static INLINE int
1819 fits_in_signed_long (offsetT num ATTRIBUTE_UNUSED)
1820 {
1821 #ifndef BFD64
1822 return 1;
1823 #else
1824 return (!(((offsetT) -1 << 31) & num)
1825 || (((offsetT) -1 << 31) & num) == ((offsetT) -1 << 31));
1826 #endif
1827 } /* fits_in_signed_long() */
1828
1829 static INLINE int
1830 fits_in_unsigned_long (offsetT num ATTRIBUTE_UNUSED)
1831 {
1832 #ifndef BFD64
1833 return 1;
1834 #else
1835 return (num & (((offsetT) 2 << 31) - 1)) == num;
1836 #endif
1837 } /* fits_in_unsigned_long() */
1838
1839 static INLINE int
1840 fits_in_imm4 (offsetT num)
1841 {
1842 return (num & 0xf) == num;
1843 }
1844
1845 static i386_operand_type
1846 smallest_imm_type (offsetT num)
1847 {
1848 i386_operand_type t;
1849
1850 operand_type_set (&t, 0);
1851 t.bitfield.imm64 = 1;
1852
1853 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
1854 {
1855 /* This code is disabled on the 486 because all the Imm1 forms
1856 in the opcode table are slower on the i486. They're the
1857 versions with the implicitly specified single-position
1858 displacement, which has another syntax if you really want to
1859 use that form. */
1860 t.bitfield.imm1 = 1;
1861 t.bitfield.imm8 = 1;
1862 t.bitfield.imm8s = 1;
1863 t.bitfield.imm16 = 1;
1864 t.bitfield.imm32 = 1;
1865 t.bitfield.imm32s = 1;
1866 }
1867 else if (fits_in_signed_byte (num))
1868 {
1869 t.bitfield.imm8 = 1;
1870 t.bitfield.imm8s = 1;
1871 t.bitfield.imm16 = 1;
1872 t.bitfield.imm32 = 1;
1873 t.bitfield.imm32s = 1;
1874 }
1875 else if (fits_in_unsigned_byte (num))
1876 {
1877 t.bitfield.imm8 = 1;
1878 t.bitfield.imm16 = 1;
1879 t.bitfield.imm32 = 1;
1880 t.bitfield.imm32s = 1;
1881 }
1882 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
1883 {
1884 t.bitfield.imm16 = 1;
1885 t.bitfield.imm32 = 1;
1886 t.bitfield.imm32s = 1;
1887 }
1888 else if (fits_in_signed_long (num))
1889 {
1890 t.bitfield.imm32 = 1;
1891 t.bitfield.imm32s = 1;
1892 }
1893 else if (fits_in_unsigned_long (num))
1894 t.bitfield.imm32 = 1;
1895
1896 return t;
1897 }
1898
1899 static offsetT
1900 offset_in_range (offsetT val, int size)
1901 {
1902 addressT mask;
1903
1904 switch (size)
1905 {
1906 case 1: mask = ((addressT) 1 << 8) - 1; break;
1907 case 2: mask = ((addressT) 1 << 16) - 1; break;
1908 case 4: mask = ((addressT) 2 << 31) - 1; break;
1909 #ifdef BFD64
1910 case 8: mask = ((addressT) 2 << 63) - 1; break;
1911 #endif
1912 default: abort ();
1913 }
1914
1915 #ifdef BFD64
1916 /* If BFD64, sign extend val for 32bit address mode. */
1917 if (flag_code != CODE_64BIT
1918 || i.prefix[ADDR_PREFIX])
1919 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
1920 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
1921 #endif
1922
1923 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
1924 {
1925 char buf1[40], buf2[40];
1926
1927 sprint_value (buf1, val);
1928 sprint_value (buf2, val & mask);
1929 as_warn (_("%s shortened to %s"), buf1, buf2);
1930 }
1931 return val & mask;
1932 }
1933
1934 enum PREFIX_GROUP
1935 {
1936 PREFIX_EXIST = 0,
1937 PREFIX_LOCK,
1938 PREFIX_REP,
1939 PREFIX_OTHER
1940 };
1941
1942 /* Returns
1943 a. PREFIX_EXIST if attempting to add a prefix where one from the
1944 same class already exists.
1945 b. PREFIX_LOCK if lock prefix is added.
1946 c. PREFIX_REP if rep/repne prefix is added.
1947 d. PREFIX_OTHER if other prefix is added.
1948 */
1949
1950 static enum PREFIX_GROUP
1951 add_prefix (unsigned int prefix)
1952 {
1953 enum PREFIX_GROUP ret = PREFIX_OTHER;
1954 unsigned int q;
1955
1956 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
1957 && flag_code == CODE_64BIT)
1958 {
1959 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
1960 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
1961 && (prefix & (REX_R | REX_X | REX_B))))
1962 ret = PREFIX_EXIST;
1963 q = REX_PREFIX;
1964 }
1965 else
1966 {
1967 switch (prefix)
1968 {
1969 default:
1970 abort ();
1971
1972 case CS_PREFIX_OPCODE:
1973 case DS_PREFIX_OPCODE:
1974 case ES_PREFIX_OPCODE:
1975 case FS_PREFIX_OPCODE:
1976 case GS_PREFIX_OPCODE:
1977 case SS_PREFIX_OPCODE:
1978 q = SEG_PREFIX;
1979 break;
1980
1981 case REPNE_PREFIX_OPCODE:
1982 case REPE_PREFIX_OPCODE:
1983 q = REP_PREFIX;
1984 ret = PREFIX_REP;
1985 break;
1986
1987 case LOCK_PREFIX_OPCODE:
1988 q = LOCK_PREFIX;
1989 ret = PREFIX_LOCK;
1990 break;
1991
1992 case FWAIT_OPCODE:
1993 q = WAIT_PREFIX;
1994 break;
1995
1996 case ADDR_PREFIX_OPCODE:
1997 q = ADDR_PREFIX;
1998 break;
1999
2000 case DATA_PREFIX_OPCODE:
2001 q = DATA_PREFIX;
2002 break;
2003 }
2004 if (i.prefix[q] != 0)
2005 ret = PREFIX_EXIST;
2006 }
2007
2008 if (ret)
2009 {
2010 if (!i.prefix[q])
2011 ++i.prefixes;
2012 i.prefix[q] |= prefix;
2013 }
2014 else
2015 as_bad (_("same type of prefix used twice"));
2016
2017 return ret;
2018 }
2019
2020 static void
2021 update_code_flag (int value, int check)
2022 {
2023 PRINTF_LIKE ((*as_error));
2024
2025 flag_code = (enum flag_code) value;
2026 if (flag_code == CODE_64BIT)
2027 {
2028 cpu_arch_flags.bitfield.cpu64 = 1;
2029 cpu_arch_flags.bitfield.cpuno64 = 0;
2030 }
2031 else
2032 {
2033 cpu_arch_flags.bitfield.cpu64 = 0;
2034 cpu_arch_flags.bitfield.cpuno64 = 1;
2035 }
2036 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
2037 {
2038 if (check)
2039 as_error = as_fatal;
2040 else
2041 as_error = as_bad;
2042 (*as_error) (_("64bit mode not supported on `%s'."),
2043 cpu_arch_name ? cpu_arch_name : default_arch);
2044 }
2045 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
2046 {
2047 if (check)
2048 as_error = as_fatal;
2049 else
2050 as_error = as_bad;
2051 (*as_error) (_("32bit mode not supported on `%s'."),
2052 cpu_arch_name ? cpu_arch_name : default_arch);
2053 }
2054 stackop_size = '\0';
2055 }
2056
2057 static void
2058 set_code_flag (int value)
2059 {
2060 update_code_flag (value, 0);
2061 }
2062
2063 static void
2064 set_16bit_gcc_code_flag (int new_code_flag)
2065 {
2066 flag_code = (enum flag_code) new_code_flag;
2067 if (flag_code != CODE_16BIT)
2068 abort ();
2069 cpu_arch_flags.bitfield.cpu64 = 0;
2070 cpu_arch_flags.bitfield.cpuno64 = 1;
2071 stackop_size = LONG_MNEM_SUFFIX;
2072 }
2073
2074 static void
2075 set_intel_syntax (int syntax_flag)
2076 {
2077 /* Find out if register prefixing is specified. */
2078 int ask_naked_reg = 0;
2079
2080 SKIP_WHITESPACE ();
2081 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2082 {
2083 char *string = input_line_pointer;
2084 int e = get_symbol_end ();
2085
2086 if (strcmp (string, "prefix") == 0)
2087 ask_naked_reg = 1;
2088 else if (strcmp (string, "noprefix") == 0)
2089 ask_naked_reg = -1;
2090 else
2091 as_bad (_("bad argument to syntax directive."));
2092 *input_line_pointer = e;
2093 }
2094 demand_empty_rest_of_line ();
2095
2096 intel_syntax = syntax_flag;
2097
2098 if (ask_naked_reg == 0)
2099 allow_naked_reg = (intel_syntax
2100 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2101 else
2102 allow_naked_reg = (ask_naked_reg < 0);
2103
2104 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2105
2106 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2107 identifier_chars['$'] = intel_syntax ? '$' : 0;
2108 register_prefix = allow_naked_reg ? "" : "%";
2109 }
2110
2111 static void
2112 set_intel_mnemonic (int mnemonic_flag)
2113 {
2114 intel_mnemonic = mnemonic_flag;
2115 }
2116
2117 static void
2118 set_allow_index_reg (int flag)
2119 {
2120 allow_index_reg = flag;
2121 }
2122
2123 static void
2124 set_check (int what)
2125 {
2126 enum check_kind *kind;
2127 const char *str;
2128
2129 if (what)
2130 {
2131 kind = &operand_check;
2132 str = "operand";
2133 }
2134 else
2135 {
2136 kind = &sse_check;
2137 str = "sse";
2138 }
2139
2140 SKIP_WHITESPACE ();
2141
2142 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2143 {
2144 char *string = input_line_pointer;
2145 int e = get_symbol_end ();
2146
2147 if (strcmp (string, "none") == 0)
2148 *kind = check_none;
2149 else if (strcmp (string, "warning") == 0)
2150 *kind = check_warning;
2151 else if (strcmp (string, "error") == 0)
2152 *kind = check_error;
2153 else
2154 as_bad (_("bad argument to %s_check directive."), str);
2155 *input_line_pointer = e;
2156 }
2157 else
2158 as_bad (_("missing argument for %s_check directive"), str);
2159
2160 demand_empty_rest_of_line ();
2161 }
2162
2163 static void
2164 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2165 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2166 {
2167 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2168 static const char *arch;
2169
2170 /* Intel LIOM is only supported on ELF. */
2171 if (!IS_ELF)
2172 return;
2173
2174 if (!arch)
2175 {
2176 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2177 use default_arch. */
2178 arch = cpu_arch_name;
2179 if (!arch)
2180 arch = default_arch;
2181 }
2182
2183 /* If we are targeting Intel L1OM, we must enable it. */
2184 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2185 || new_flag.bitfield.cpul1om)
2186 return;
2187
2188 /* If we are targeting Intel K1OM, we must enable it. */
2189 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_K1OM
2190 || new_flag.bitfield.cpuk1om)
2191 return;
2192
2193 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2194 #endif
2195 }
2196
2197 static void
2198 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2199 {
2200 SKIP_WHITESPACE ();
2201
2202 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2203 {
2204 char *string = input_line_pointer;
2205 int e = get_symbol_end ();
2206 unsigned int j;
2207 i386_cpu_flags flags;
2208
2209 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2210 {
2211 if (strcmp (string, cpu_arch[j].name) == 0)
2212 {
2213 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2214
2215 if (*string != '.')
2216 {
2217 cpu_arch_name = cpu_arch[j].name;
2218 cpu_sub_arch_name = NULL;
2219 cpu_arch_flags = cpu_arch[j].flags;
2220 if (flag_code == CODE_64BIT)
2221 {
2222 cpu_arch_flags.bitfield.cpu64 = 1;
2223 cpu_arch_flags.bitfield.cpuno64 = 0;
2224 }
2225 else
2226 {
2227 cpu_arch_flags.bitfield.cpu64 = 0;
2228 cpu_arch_flags.bitfield.cpuno64 = 1;
2229 }
2230 cpu_arch_isa = cpu_arch[j].type;
2231 cpu_arch_isa_flags = cpu_arch[j].flags;
2232 if (!cpu_arch_tune_set)
2233 {
2234 cpu_arch_tune = cpu_arch_isa;
2235 cpu_arch_tune_flags = cpu_arch_isa_flags;
2236 }
2237 break;
2238 }
2239
2240 if (!cpu_arch[j].negated)
2241 flags = cpu_flags_or (cpu_arch_flags,
2242 cpu_arch[j].flags);
2243 else
2244 flags = cpu_flags_and_not (cpu_arch_flags,
2245 cpu_arch[j].flags);
2246 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2247 {
2248 if (cpu_sub_arch_name)
2249 {
2250 char *name = cpu_sub_arch_name;
2251 cpu_sub_arch_name = concat (name,
2252 cpu_arch[j].name,
2253 (const char *) NULL);
2254 free (name);
2255 }
2256 else
2257 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2258 cpu_arch_flags = flags;
2259 cpu_arch_isa_flags = flags;
2260 }
2261 *input_line_pointer = e;
2262 demand_empty_rest_of_line ();
2263 return;
2264 }
2265 }
2266 if (j >= ARRAY_SIZE (cpu_arch))
2267 as_bad (_("no such architecture: `%s'"), string);
2268
2269 *input_line_pointer = e;
2270 }
2271 else
2272 as_bad (_("missing cpu architecture"));
2273
2274 no_cond_jump_promotion = 0;
2275 if (*input_line_pointer == ','
2276 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2277 {
2278 char *string = ++input_line_pointer;
2279 int e = get_symbol_end ();
2280
2281 if (strcmp (string, "nojumps") == 0)
2282 no_cond_jump_promotion = 1;
2283 else if (strcmp (string, "jumps") == 0)
2284 ;
2285 else
2286 as_bad (_("no such architecture modifier: `%s'"), string);
2287
2288 *input_line_pointer = e;
2289 }
2290
2291 demand_empty_rest_of_line ();
2292 }
2293
2294 enum bfd_architecture
2295 i386_arch (void)
2296 {
2297 if (cpu_arch_isa == PROCESSOR_L1OM)
2298 {
2299 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2300 || flag_code != CODE_64BIT)
2301 as_fatal (_("Intel L1OM is 64bit ELF only"));
2302 return bfd_arch_l1om;
2303 }
2304 else if (cpu_arch_isa == PROCESSOR_K1OM)
2305 {
2306 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2307 || flag_code != CODE_64BIT)
2308 as_fatal (_("Intel K1OM is 64bit ELF only"));
2309 return bfd_arch_k1om;
2310 }
2311 else
2312 return bfd_arch_i386;
2313 }
2314
2315 unsigned long
2316 i386_mach (void)
2317 {
2318 if (!strncmp (default_arch, "x86_64", 6))
2319 {
2320 if (cpu_arch_isa == PROCESSOR_L1OM)
2321 {
2322 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2323 || default_arch[6] != '\0')
2324 as_fatal (_("Intel L1OM is 64bit ELF only"));
2325 return bfd_mach_l1om;
2326 }
2327 else if (cpu_arch_isa == PROCESSOR_K1OM)
2328 {
2329 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2330 || default_arch[6] != '\0')
2331 as_fatal (_("Intel K1OM is 64bit ELF only"));
2332 return bfd_mach_k1om;
2333 }
2334 else if (default_arch[6] == '\0')
2335 return bfd_mach_x86_64;
2336 else
2337 return bfd_mach_x64_32;
2338 }
2339 else if (!strcmp (default_arch, "i386"))
2340 return bfd_mach_i386_i386;
2341 else
2342 as_fatal (_("unknown architecture"));
2343 }
2344 \f
2345 void
2346 md_begin (void)
2347 {
2348 const char *hash_err;
2349
2350 /* Initialize op_hash hash table. */
2351 op_hash = hash_new ();
2352
2353 {
2354 const insn_template *optab;
2355 templates *core_optab;
2356
2357 /* Setup for loop. */
2358 optab = i386_optab;
2359 core_optab = (templates *) xmalloc (sizeof (templates));
2360 core_optab->start = optab;
2361
2362 while (1)
2363 {
2364 ++optab;
2365 if (optab->name == NULL
2366 || strcmp (optab->name, (optab - 1)->name) != 0)
2367 {
2368 /* different name --> ship out current template list;
2369 add to hash table; & begin anew. */
2370 core_optab->end = optab;
2371 hash_err = hash_insert (op_hash,
2372 (optab - 1)->name,
2373 (void *) core_optab);
2374 if (hash_err)
2375 {
2376 as_fatal (_("internal Error: Can't hash %s: %s"),
2377 (optab - 1)->name,
2378 hash_err);
2379 }
2380 if (optab->name == NULL)
2381 break;
2382 core_optab = (templates *) xmalloc (sizeof (templates));
2383 core_optab->start = optab;
2384 }
2385 }
2386 }
2387
2388 /* Initialize reg_hash hash table. */
2389 reg_hash = hash_new ();
2390 {
2391 const reg_entry *regtab;
2392 unsigned int regtab_size = i386_regtab_size;
2393
2394 for (regtab = i386_regtab; regtab_size--; regtab++)
2395 {
2396 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2397 if (hash_err)
2398 as_fatal (_("internal Error: Can't hash %s: %s"),
2399 regtab->reg_name,
2400 hash_err);
2401 }
2402 }
2403
2404 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2405 {
2406 int c;
2407 char *p;
2408
2409 for (c = 0; c < 256; c++)
2410 {
2411 if (ISDIGIT (c))
2412 {
2413 digit_chars[c] = c;
2414 mnemonic_chars[c] = c;
2415 register_chars[c] = c;
2416 operand_chars[c] = c;
2417 }
2418 else if (ISLOWER (c))
2419 {
2420 mnemonic_chars[c] = c;
2421 register_chars[c] = c;
2422 operand_chars[c] = c;
2423 }
2424 else if (ISUPPER (c))
2425 {
2426 mnemonic_chars[c] = TOLOWER (c);
2427 register_chars[c] = mnemonic_chars[c];
2428 operand_chars[c] = c;
2429 }
2430
2431 if (ISALPHA (c) || ISDIGIT (c))
2432 identifier_chars[c] = c;
2433 else if (c >= 128)
2434 {
2435 identifier_chars[c] = c;
2436 operand_chars[c] = c;
2437 }
2438 }
2439
2440 #ifdef LEX_AT
2441 identifier_chars['@'] = '@';
2442 #endif
2443 #ifdef LEX_QM
2444 identifier_chars['?'] = '?';
2445 operand_chars['?'] = '?';
2446 #endif
2447 digit_chars['-'] = '-';
2448 mnemonic_chars['_'] = '_';
2449 mnemonic_chars['-'] = '-';
2450 mnemonic_chars['.'] = '.';
2451 identifier_chars['_'] = '_';
2452 identifier_chars['.'] = '.';
2453
2454 for (p = operand_special_chars; *p != '\0'; p++)
2455 operand_chars[(unsigned char) *p] = *p;
2456 }
2457
2458 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2459 if (IS_ELF)
2460 {
2461 record_alignment (text_section, 2);
2462 record_alignment (data_section, 2);
2463 record_alignment (bss_section, 2);
2464 }
2465 #endif
2466
2467 if (flag_code == CODE_64BIT)
2468 {
2469 #if defined (OBJ_COFF) && defined (TE_PE)
2470 x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
2471 ? 32 : 16);
2472 #else
2473 x86_dwarf2_return_column = 16;
2474 #endif
2475 x86_cie_data_alignment = -8;
2476 }
2477 else
2478 {
2479 x86_dwarf2_return_column = 8;
2480 x86_cie_data_alignment = -4;
2481 }
2482 }
2483
2484 void
2485 i386_print_statistics (FILE *file)
2486 {
2487 hash_print_statistics (file, "i386 opcode", op_hash);
2488 hash_print_statistics (file, "i386 register", reg_hash);
2489 }
2490 \f
2491 #ifdef DEBUG386
2492
2493 /* Debugging routines for md_assemble. */
2494 static void pte (insn_template *);
2495 static void pt (i386_operand_type);
2496 static void pe (expressionS *);
2497 static void ps (symbolS *);
2498
2499 static void
2500 pi (char *line, i386_insn *x)
2501 {
2502 unsigned int j;
2503
2504 fprintf (stdout, "%s: template ", line);
2505 pte (&x->tm);
2506 fprintf (stdout, " address: base %s index %s scale %x\n",
2507 x->base_reg ? x->base_reg->reg_name : "none",
2508 x->index_reg ? x->index_reg->reg_name : "none",
2509 x->log2_scale_factor);
2510 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2511 x->rm.mode, x->rm.reg, x->rm.regmem);
2512 fprintf (stdout, " sib: base %x index %x scale %x\n",
2513 x->sib.base, x->sib.index, x->sib.scale);
2514 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2515 (x->rex & REX_W) != 0,
2516 (x->rex & REX_R) != 0,
2517 (x->rex & REX_X) != 0,
2518 (x->rex & REX_B) != 0);
2519 for (j = 0; j < x->operands; j++)
2520 {
2521 fprintf (stdout, " #%d: ", j + 1);
2522 pt (x->types[j]);
2523 fprintf (stdout, "\n");
2524 if (x->types[j].bitfield.reg8
2525 || x->types[j].bitfield.reg16
2526 || x->types[j].bitfield.reg32
2527 || x->types[j].bitfield.reg64
2528 || x->types[j].bitfield.regmmx
2529 || x->types[j].bitfield.regxmm
2530 || x->types[j].bitfield.regymm
2531 || x->types[j].bitfield.sreg2
2532 || x->types[j].bitfield.sreg3
2533 || x->types[j].bitfield.control
2534 || x->types[j].bitfield.debug
2535 || x->types[j].bitfield.test)
2536 fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
2537 if (operand_type_check (x->types[j], imm))
2538 pe (x->op[j].imms);
2539 if (operand_type_check (x->types[j], disp))
2540 pe (x->op[j].disps);
2541 }
2542 }
2543
2544 static void
2545 pte (insn_template *t)
2546 {
2547 unsigned int j;
2548 fprintf (stdout, " %d operands ", t->operands);
2549 fprintf (stdout, "opcode %x ", t->base_opcode);
2550 if (t->extension_opcode != None)
2551 fprintf (stdout, "ext %x ", t->extension_opcode);
2552 if (t->opcode_modifier.d)
2553 fprintf (stdout, "D");
2554 if (t->opcode_modifier.w)
2555 fprintf (stdout, "W");
2556 fprintf (stdout, "\n");
2557 for (j = 0; j < t->operands; j++)
2558 {
2559 fprintf (stdout, " #%d type ", j + 1);
2560 pt (t->operand_types[j]);
2561 fprintf (stdout, "\n");
2562 }
2563 }
2564
2565 static void
2566 pe (expressionS *e)
2567 {
2568 fprintf (stdout, " operation %d\n", e->X_op);
2569 fprintf (stdout, " add_number %ld (%lx)\n",
2570 (long) e->X_add_number, (long) e->X_add_number);
2571 if (e->X_add_symbol)
2572 {
2573 fprintf (stdout, " add_symbol ");
2574 ps (e->X_add_symbol);
2575 fprintf (stdout, "\n");
2576 }
2577 if (e->X_op_symbol)
2578 {
2579 fprintf (stdout, " op_symbol ");
2580 ps (e->X_op_symbol);
2581 fprintf (stdout, "\n");
2582 }
2583 }
2584
2585 static void
2586 ps (symbolS *s)
2587 {
2588 fprintf (stdout, "%s type %s%s",
2589 S_GET_NAME (s),
2590 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2591 segment_name (S_GET_SEGMENT (s)));
2592 }
2593
2594 static struct type_name
2595 {
2596 i386_operand_type mask;
2597 const char *name;
2598 }
2599 const type_names[] =
2600 {
2601 { OPERAND_TYPE_REG8, "r8" },
2602 { OPERAND_TYPE_REG16, "r16" },
2603 { OPERAND_TYPE_REG32, "r32" },
2604 { OPERAND_TYPE_REG64, "r64" },
2605 { OPERAND_TYPE_IMM8, "i8" },
2606 { OPERAND_TYPE_IMM8, "i8s" },
2607 { OPERAND_TYPE_IMM16, "i16" },
2608 { OPERAND_TYPE_IMM32, "i32" },
2609 { OPERAND_TYPE_IMM32S, "i32s" },
2610 { OPERAND_TYPE_IMM64, "i64" },
2611 { OPERAND_TYPE_IMM1, "i1" },
2612 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2613 { OPERAND_TYPE_DISP8, "d8" },
2614 { OPERAND_TYPE_DISP16, "d16" },
2615 { OPERAND_TYPE_DISP32, "d32" },
2616 { OPERAND_TYPE_DISP32S, "d32s" },
2617 { OPERAND_TYPE_DISP64, "d64" },
2618 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2619 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2620 { OPERAND_TYPE_CONTROL, "control reg" },
2621 { OPERAND_TYPE_TEST, "test reg" },
2622 { OPERAND_TYPE_DEBUG, "debug reg" },
2623 { OPERAND_TYPE_FLOATREG, "FReg" },
2624 { OPERAND_TYPE_FLOATACC, "FAcc" },
2625 { OPERAND_TYPE_SREG2, "SReg2" },
2626 { OPERAND_TYPE_SREG3, "SReg3" },
2627 { OPERAND_TYPE_ACC, "Acc" },
2628 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2629 { OPERAND_TYPE_REGMMX, "rMMX" },
2630 { OPERAND_TYPE_REGXMM, "rXMM" },
2631 { OPERAND_TYPE_REGYMM, "rYMM" },
2632 { OPERAND_TYPE_ESSEG, "es" },
2633 };
2634
2635 static void
2636 pt (i386_operand_type t)
2637 {
2638 unsigned int j;
2639 i386_operand_type a;
2640
2641 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2642 {
2643 a = operand_type_and (t, type_names[j].mask);
2644 if (!operand_type_all_zero (&a))
2645 fprintf (stdout, "%s, ", type_names[j].name);
2646 }
2647 fflush (stdout);
2648 }
2649
2650 #endif /* DEBUG386 */
2651 \f
2652 static bfd_reloc_code_real_type
2653 reloc (unsigned int size,
2654 int pcrel,
2655 int sign,
2656 bfd_reloc_code_real_type other)
2657 {
2658 if (other != NO_RELOC)
2659 {
2660 reloc_howto_type *rel;
2661
2662 if (size == 8)
2663 switch (other)
2664 {
2665 case BFD_RELOC_X86_64_GOT32:
2666 return BFD_RELOC_X86_64_GOT64;
2667 break;
2668 case BFD_RELOC_X86_64_PLTOFF64:
2669 return BFD_RELOC_X86_64_PLTOFF64;
2670 break;
2671 case BFD_RELOC_X86_64_GOTPC32:
2672 other = BFD_RELOC_X86_64_GOTPC64;
2673 break;
2674 case BFD_RELOC_X86_64_GOTPCREL:
2675 other = BFD_RELOC_X86_64_GOTPCREL64;
2676 break;
2677 case BFD_RELOC_X86_64_TPOFF32:
2678 other = BFD_RELOC_X86_64_TPOFF64;
2679 break;
2680 case BFD_RELOC_X86_64_DTPOFF32:
2681 other = BFD_RELOC_X86_64_DTPOFF64;
2682 break;
2683 default:
2684 break;
2685 }
2686
2687 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2688 if (size == 4 && (flag_code != CODE_64BIT || disallow_64bit_reloc))
2689 sign = -1;
2690
2691 rel = bfd_reloc_type_lookup (stdoutput, other);
2692 if (!rel)
2693 as_bad (_("unknown relocation (%u)"), other);
2694 else if (size != bfd_get_reloc_size (rel))
2695 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2696 bfd_get_reloc_size (rel),
2697 size);
2698 else if (pcrel && !rel->pc_relative)
2699 as_bad (_("non-pc-relative relocation for pc-relative field"));
2700 else if ((rel->complain_on_overflow == complain_overflow_signed
2701 && !sign)
2702 || (rel->complain_on_overflow == complain_overflow_unsigned
2703 && sign > 0))
2704 as_bad (_("relocated field and relocation type differ in signedness"));
2705 else
2706 return other;
2707 return NO_RELOC;
2708 }
2709
2710 if (pcrel)
2711 {
2712 if (!sign)
2713 as_bad (_("there are no unsigned pc-relative relocations"));
2714 switch (size)
2715 {
2716 case 1: return BFD_RELOC_8_PCREL;
2717 case 2: return BFD_RELOC_16_PCREL;
2718 case 4: return BFD_RELOC_32_PCREL;
2719 case 8: return BFD_RELOC_64_PCREL;
2720 }
2721 as_bad (_("cannot do %u byte pc-relative relocation"), size);
2722 }
2723 else
2724 {
2725 if (sign > 0)
2726 switch (size)
2727 {
2728 case 4: return BFD_RELOC_X86_64_32S;
2729 }
2730 else
2731 switch (size)
2732 {
2733 case 1: return BFD_RELOC_8;
2734 case 2: return BFD_RELOC_16;
2735 case 4: return BFD_RELOC_32;
2736 case 8: return BFD_RELOC_64;
2737 }
2738 as_bad (_("cannot do %s %u byte relocation"),
2739 sign > 0 ? "signed" : "unsigned", size);
2740 }
2741
2742 return NO_RELOC;
2743 }
2744
2745 /* Here we decide which fixups can be adjusted to make them relative to
2746 the beginning of the section instead of the symbol. Basically we need
2747 to make sure that the dynamic relocations are done correctly, so in
2748 some cases we force the original symbol to be used. */
2749
2750 int
2751 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2752 {
2753 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2754 if (!IS_ELF)
2755 return 1;
2756
2757 /* Don't adjust pc-relative references to merge sections in 64-bit
2758 mode. */
2759 if (use_rela_relocations
2760 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
2761 && fixP->fx_pcrel)
2762 return 0;
2763
2764 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2765 and changed later by validate_fix. */
2766 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
2767 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
2768 return 0;
2769
2770 /* adjust_reloc_syms doesn't know about the GOT. */
2771 if (fixP->fx_r_type == BFD_RELOC_386_GOTOFF
2772 || fixP->fx_r_type == BFD_RELOC_386_PLT32
2773 || fixP->fx_r_type == BFD_RELOC_386_GOT32
2774 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
2775 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
2776 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
2777 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
2778 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
2779 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
2780 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
2781 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
2782 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
2783 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
2784 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
2785 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
2786 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
2787 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
2788 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
2789 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
2790 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
2791 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
2792 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
2793 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
2794 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
2795 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
2796 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
2797 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
2798 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
2799 return 0;
2800 #endif
2801 return 1;
2802 }
2803
2804 static int
2805 intel_float_operand (const char *mnemonic)
2806 {
2807 /* Note that the value returned is meaningful only for opcodes with (memory)
2808 operands, hence the code here is free to improperly handle opcodes that
2809 have no operands (for better performance and smaller code). */
2810
2811 if (mnemonic[0] != 'f')
2812 return 0; /* non-math */
2813
2814 switch (mnemonic[1])
2815 {
2816 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2817 the fs segment override prefix not currently handled because no
2818 call path can make opcodes without operands get here */
2819 case 'i':
2820 return 2 /* integer op */;
2821 case 'l':
2822 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
2823 return 3; /* fldcw/fldenv */
2824 break;
2825 case 'n':
2826 if (mnemonic[2] != 'o' /* fnop */)
2827 return 3; /* non-waiting control op */
2828 break;
2829 case 'r':
2830 if (mnemonic[2] == 's')
2831 return 3; /* frstor/frstpm */
2832 break;
2833 case 's':
2834 if (mnemonic[2] == 'a')
2835 return 3; /* fsave */
2836 if (mnemonic[2] == 't')
2837 {
2838 switch (mnemonic[3])
2839 {
2840 case 'c': /* fstcw */
2841 case 'd': /* fstdw */
2842 case 'e': /* fstenv */
2843 case 's': /* fsts[gw] */
2844 return 3;
2845 }
2846 }
2847 break;
2848 case 'x':
2849 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
2850 return 0; /* fxsave/fxrstor are not really math ops */
2851 break;
2852 }
2853
2854 return 1;
2855 }
2856
2857 /* Build the VEX prefix. */
2858
2859 static void
2860 build_vex_prefix (const insn_template *t)
2861 {
2862 unsigned int register_specifier;
2863 unsigned int implied_prefix;
2864 unsigned int vector_length;
2865
2866 /* Check register specifier. */
2867 if (i.vex.register_specifier)
2868 register_specifier = ~register_number (i.vex.register_specifier) & 0xf;
2869 else
2870 register_specifier = 0xf;
2871
2872 /* Use 2-byte VEX prefix by swappping destination and source
2873 operand. */
2874 if (!i.swap_operand
2875 && i.operands == i.reg_operands
2876 && i.tm.opcode_modifier.vexopcode == VEX0F
2877 && i.tm.opcode_modifier.s
2878 && i.rex == REX_B)
2879 {
2880 unsigned int xchg = i.operands - 1;
2881 union i386_op temp_op;
2882 i386_operand_type temp_type;
2883
2884 temp_type = i.types[xchg];
2885 i.types[xchg] = i.types[0];
2886 i.types[0] = temp_type;
2887 temp_op = i.op[xchg];
2888 i.op[xchg] = i.op[0];
2889 i.op[0] = temp_op;
2890
2891 gas_assert (i.rm.mode == 3);
2892
2893 i.rex = REX_R;
2894 xchg = i.rm.regmem;
2895 i.rm.regmem = i.rm.reg;
2896 i.rm.reg = xchg;
2897
2898 /* Use the next insn. */
2899 i.tm = t[1];
2900 }
2901
2902 if (i.tm.opcode_modifier.vex == VEXScalar)
2903 vector_length = avxscalar;
2904 else
2905 vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
2906
2907 switch ((i.tm.base_opcode >> 8) & 0xff)
2908 {
2909 case 0:
2910 implied_prefix = 0;
2911 break;
2912 case DATA_PREFIX_OPCODE:
2913 implied_prefix = 1;
2914 break;
2915 case REPE_PREFIX_OPCODE:
2916 implied_prefix = 2;
2917 break;
2918 case REPNE_PREFIX_OPCODE:
2919 implied_prefix = 3;
2920 break;
2921 default:
2922 abort ();
2923 }
2924
2925 /* Use 2-byte VEX prefix if possible. */
2926 if (i.tm.opcode_modifier.vexopcode == VEX0F
2927 && i.tm.opcode_modifier.vexw != VEXW1
2928 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
2929 {
2930 /* 2-byte VEX prefix. */
2931 unsigned int r;
2932
2933 i.vex.length = 2;
2934 i.vex.bytes[0] = 0xc5;
2935
2936 /* Check the REX.R bit. */
2937 r = (i.rex & REX_R) ? 0 : 1;
2938 i.vex.bytes[1] = (r << 7
2939 | register_specifier << 3
2940 | vector_length << 2
2941 | implied_prefix);
2942 }
2943 else
2944 {
2945 /* 3-byte VEX prefix. */
2946 unsigned int m, w;
2947
2948 i.vex.length = 3;
2949
2950 switch (i.tm.opcode_modifier.vexopcode)
2951 {
2952 case VEX0F:
2953 m = 0x1;
2954 i.vex.bytes[0] = 0xc4;
2955 break;
2956 case VEX0F38:
2957 m = 0x2;
2958 i.vex.bytes[0] = 0xc4;
2959 break;
2960 case VEX0F3A:
2961 m = 0x3;
2962 i.vex.bytes[0] = 0xc4;
2963 break;
2964 case XOP08:
2965 m = 0x8;
2966 i.vex.bytes[0] = 0x8f;
2967 break;
2968 case XOP09:
2969 m = 0x9;
2970 i.vex.bytes[0] = 0x8f;
2971 break;
2972 case XOP0A:
2973 m = 0xa;
2974 i.vex.bytes[0] = 0x8f;
2975 break;
2976 default:
2977 abort ();
2978 }
2979
2980 /* The high 3 bits of the second VEX byte are 1's compliment
2981 of RXB bits from REX. */
2982 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
2983
2984 /* Check the REX.W bit. */
2985 w = (i.rex & REX_W) ? 1 : 0;
2986 if (i.tm.opcode_modifier.vexw)
2987 {
2988 if (w)
2989 abort ();
2990
2991 if (i.tm.opcode_modifier.vexw == VEXW1)
2992 w = 1;
2993 }
2994
2995 i.vex.bytes[2] = (w << 7
2996 | register_specifier << 3
2997 | vector_length << 2
2998 | implied_prefix);
2999 }
3000 }
3001
3002 static void
3003 process_immext (void)
3004 {
3005 expressionS *exp;
3006
3007 if ((i.tm.cpu_flags.bitfield.cpusse3 || i.tm.cpu_flags.bitfield.cpusvme)
3008 && i.operands > 0)
3009 {
3010 /* MONITOR/MWAIT as well as SVME instructions have fixed operands
3011 with an opcode suffix which is coded in the same place as an
3012 8-bit immediate field would be.
3013 Here we check those operands and remove them afterwards. */
3014 unsigned int x;
3015
3016 for (x = 0; x < i.operands; x++)
3017 if (register_number (i.op[x].regs) != x)
3018 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
3019 register_prefix, i.op[x].regs->reg_name, x + 1,
3020 i.tm.name);
3021
3022 i.operands = 0;
3023 }
3024
3025 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
3026 which is coded in the same place as an 8-bit immediate field
3027 would be. Here we fake an 8-bit immediate operand from the
3028 opcode suffix stored in tm.extension_opcode.
3029
3030 AVX instructions also use this encoding, for some of
3031 3 argument instructions. */
3032
3033 gas_assert (i.imm_operands == 0
3034 && (i.operands <= 2
3035 || (i.tm.opcode_modifier.vex
3036 && i.operands <= 4)));
3037
3038 exp = &im_expressions[i.imm_operands++];
3039 i.op[i.operands].imms = exp;
3040 i.types[i.operands] = imm8;
3041 i.operands++;
3042 exp->X_op = O_constant;
3043 exp->X_add_number = i.tm.extension_opcode;
3044 i.tm.extension_opcode = None;
3045 }
3046
3047
3048 static int
3049 check_hle (void)
3050 {
3051 switch (i.tm.opcode_modifier.hleprefixok)
3052 {
3053 default:
3054 abort ();
3055 case HLEPrefixNone:
3056 if (i.prefix[HLE_PREFIX] == XACQUIRE_PREFIX_OPCODE)
3057 as_bad (_("invalid instruction `%s' after `xacquire'"),
3058 i.tm.name);
3059 else
3060 as_bad (_("invalid instruction `%s' after `xrelease'"),
3061 i.tm.name);
3062 return 0;
3063 case HLEPrefixLock:
3064 if (i.prefix[LOCK_PREFIX])
3065 return 1;
3066 if (i.prefix[HLE_PREFIX] == XACQUIRE_PREFIX_OPCODE)
3067 as_bad (_("missing `lock' with `xacquire'"));
3068 else
3069 as_bad (_("missing `lock' with `xrelease'"));
3070 return 0;
3071 case HLEPrefixAny:
3072 return 1;
3073 case HLEPrefixRelease:
3074 if (i.prefix[HLE_PREFIX] != XRELEASE_PREFIX_OPCODE)
3075 {
3076 as_bad (_("instruction `%s' after `xacquire' not allowed"),
3077 i.tm.name);
3078 return 0;
3079 }
3080 if (i.mem_operands == 0
3081 || !operand_type_check (i.types[i.operands - 1], anymem))
3082 {
3083 as_bad (_("memory destination needed for instruction `%s'"
3084 " after `xrelease'"), i.tm.name);
3085 return 0;
3086 }
3087 return 1;
3088 }
3089 }
3090
3091 /* This is the guts of the machine-dependent assembler. LINE points to a
3092 machine dependent instruction. This function is supposed to emit
3093 the frags/bytes it assembles to. */
3094
3095 void
3096 md_assemble (char *line)
3097 {
3098 unsigned int j;
3099 char mnemonic[MAX_MNEM_SIZE];
3100 const insn_template *t;
3101
3102 /* Initialize globals. */
3103 memset (&i, '\0', sizeof (i));
3104 for (j = 0; j < MAX_OPERANDS; j++)
3105 i.reloc[j] = NO_RELOC;
3106 memset (disp_expressions, '\0', sizeof (disp_expressions));
3107 memset (im_expressions, '\0', sizeof (im_expressions));
3108 save_stack_p = save_stack;
3109
3110 /* First parse an instruction mnemonic & call i386_operand for the operands.
3111 We assume that the scrubber has arranged it so that line[0] is the valid
3112 start of a (possibly prefixed) mnemonic. */
3113
3114 line = parse_insn (line, mnemonic);
3115 if (line == NULL)
3116 return;
3117
3118 line = parse_operands (line, mnemonic);
3119 this_operand = -1;
3120 if (line == NULL)
3121 return;
3122
3123 /* Now we've parsed the mnemonic into a set of templates, and have the
3124 operands at hand. */
3125
3126 /* All intel opcodes have reversed operands except for "bound" and
3127 "enter". We also don't reverse intersegment "jmp" and "call"
3128 instructions with 2 immediate operands so that the immediate segment
3129 precedes the offset, as it does when in AT&T mode. */
3130 if (intel_syntax
3131 && i.operands > 1
3132 && (strcmp (mnemonic, "bound") != 0)
3133 && (strcmp (mnemonic, "invlpga") != 0)
3134 && !(operand_type_check (i.types[0], imm)
3135 && operand_type_check (i.types[1], imm)))
3136 swap_operands ();
3137
3138 /* The order of the immediates should be reversed
3139 for 2 immediates extrq and insertq instructions */
3140 if (i.imm_operands == 2
3141 && (strcmp (mnemonic, "extrq") == 0
3142 || strcmp (mnemonic, "insertq") == 0))
3143 swap_2_operands (0, 1);
3144
3145 if (i.imm_operands)
3146 optimize_imm ();
3147
3148 /* Don't optimize displacement for movabs since it only takes 64bit
3149 displacement. */
3150 if (i.disp_operands
3151 && i.disp_encoding != disp_encoding_32bit
3152 && (flag_code != CODE_64BIT
3153 || strcmp (mnemonic, "movabs") != 0))
3154 optimize_disp ();
3155
3156 /* Next, we find a template that matches the given insn,
3157 making sure the overlap of the given operands types is consistent
3158 with the template operand types. */
3159
3160 if (!(t = match_template ()))
3161 return;
3162
3163 if (sse_check != check_none
3164 && !i.tm.opcode_modifier.noavx
3165 && (i.tm.cpu_flags.bitfield.cpusse
3166 || i.tm.cpu_flags.bitfield.cpusse2
3167 || i.tm.cpu_flags.bitfield.cpusse3
3168 || i.tm.cpu_flags.bitfield.cpussse3
3169 || i.tm.cpu_flags.bitfield.cpusse4_1
3170 || i.tm.cpu_flags.bitfield.cpusse4_2))
3171 {
3172 (sse_check == check_warning
3173 ? as_warn
3174 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
3175 }
3176
3177 /* Zap movzx and movsx suffix. The suffix has been set from
3178 "word ptr" or "byte ptr" on the source operand in Intel syntax
3179 or extracted from mnemonic in AT&T syntax. But we'll use
3180 the destination register to choose the suffix for encoding. */
3181 if ((i.tm.base_opcode & ~9) == 0x0fb6)
3182 {
3183 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3184 there is no suffix, the default will be byte extension. */
3185 if (i.reg_operands != 2
3186 && !i.suffix
3187 && intel_syntax)
3188 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
3189
3190 i.suffix = 0;
3191 }
3192
3193 if (i.tm.opcode_modifier.fwait)
3194 if (!add_prefix (FWAIT_OPCODE))
3195 return;
3196
3197 /* Check for lock without a lockable instruction. Destination operand
3198 must be memory unless it is xchg (0x86). */
3199 if (i.prefix[LOCK_PREFIX]
3200 && (!i.tm.opcode_modifier.islockable
3201 || i.mem_operands == 0
3202 || (i.tm.base_opcode != 0x86
3203 && !operand_type_check (i.types[i.operands - 1], anymem))))
3204 {
3205 as_bad (_("expecting lockable instruction after `lock'"));
3206 return;
3207 }
3208
3209 /* Check if HLE prefix is OK. */
3210 if (i.have_hle && !check_hle ())
3211 return;
3212
3213 /* Check string instruction segment overrides. */
3214 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
3215 {
3216 if (!check_string ())
3217 return;
3218 i.disp_operands = 0;
3219 }
3220
3221 if (!process_suffix ())
3222 return;
3223
3224 /* Update operand types. */
3225 for (j = 0; j < i.operands; j++)
3226 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
3227
3228 /* Make still unresolved immediate matches conform to size of immediate
3229 given in i.suffix. */
3230 if (!finalize_imm ())
3231 return;
3232
3233 if (i.types[0].bitfield.imm1)
3234 i.imm_operands = 0; /* kludge for shift insns. */
3235
3236 /* We only need to check those implicit registers for instructions
3237 with 3 operands or less. */
3238 if (i.operands <= 3)
3239 for (j = 0; j < i.operands; j++)
3240 if (i.types[j].bitfield.inoutportreg
3241 || i.types[j].bitfield.shiftcount
3242 || i.types[j].bitfield.acc
3243 || i.types[j].bitfield.floatacc)
3244 i.reg_operands--;
3245
3246 /* ImmExt should be processed after SSE2AVX. */
3247 if (!i.tm.opcode_modifier.sse2avx
3248 && i.tm.opcode_modifier.immext)
3249 process_immext ();
3250
3251 /* For insns with operands there are more diddles to do to the opcode. */
3252 if (i.operands)
3253 {
3254 if (!process_operands ())
3255 return;
3256 }
3257 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
3258 {
3259 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3260 as_warn (_("translating to `%sp'"), i.tm.name);
3261 }
3262
3263 if (i.tm.opcode_modifier.vex)
3264 build_vex_prefix (t);
3265
3266 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3267 instructions may define INT_OPCODE as well, so avoid this corner
3268 case for those instructions that use MODRM. */
3269 if (i.tm.base_opcode == INT_OPCODE
3270 && !i.tm.opcode_modifier.modrm
3271 && i.op[0].imms->X_add_number == 3)
3272 {
3273 i.tm.base_opcode = INT3_OPCODE;
3274 i.imm_operands = 0;
3275 }
3276
3277 if ((i.tm.opcode_modifier.jump
3278 || i.tm.opcode_modifier.jumpbyte
3279 || i.tm.opcode_modifier.jumpdword)
3280 && i.op[0].disps->X_op == O_constant)
3281 {
3282 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3283 the absolute address given by the constant. Since ix86 jumps and
3284 calls are pc relative, we need to generate a reloc. */
3285 i.op[0].disps->X_add_symbol = &abs_symbol;
3286 i.op[0].disps->X_op = O_symbol;
3287 }
3288
3289 if (i.tm.opcode_modifier.rex64)
3290 i.rex |= REX_W;
3291
3292 /* For 8 bit registers we need an empty rex prefix. Also if the
3293 instruction already has a prefix, we need to convert old
3294 registers to new ones. */
3295
3296 if ((i.types[0].bitfield.reg8
3297 && (i.op[0].regs->reg_flags & RegRex64) != 0)
3298 || (i.types[1].bitfield.reg8
3299 && (i.op[1].regs->reg_flags & RegRex64) != 0)
3300 || ((i.types[0].bitfield.reg8
3301 || i.types[1].bitfield.reg8)
3302 && i.rex != 0))
3303 {
3304 int x;
3305
3306 i.rex |= REX_OPCODE;
3307 for (x = 0; x < 2; x++)
3308 {
3309 /* Look for 8 bit operand that uses old registers. */
3310 if (i.types[x].bitfield.reg8
3311 && (i.op[x].regs->reg_flags & RegRex64) == 0)
3312 {
3313 /* In case it is "hi" register, give up. */
3314 if (i.op[x].regs->reg_num > 3)
3315 as_bad (_("can't encode register '%s%s' in an "
3316 "instruction requiring REX prefix."),
3317 register_prefix, i.op[x].regs->reg_name);
3318
3319 /* Otherwise it is equivalent to the extended register.
3320 Since the encoding doesn't change this is merely
3321 cosmetic cleanup for debug output. */
3322
3323 i.op[x].regs = i.op[x].regs + 8;
3324 }
3325 }
3326 }
3327
3328 if (i.rex != 0)
3329 add_prefix (REX_OPCODE | i.rex);
3330
3331 /* We are ready to output the insn. */
3332 output_insn ();
3333 }
3334
3335 static char *
3336 parse_insn (char *line, char *mnemonic)
3337 {
3338 char *l = line;
3339 char *token_start = l;
3340 char *mnem_p;
3341 int supported;
3342 const insn_template *t;
3343 char *dot_p = NULL;
3344
3345 /* Non-zero if we found a prefix only acceptable with string insns. */
3346 const char *expecting_string_instruction = NULL;
3347
3348 while (1)
3349 {
3350 mnem_p = mnemonic;
3351 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3352 {
3353 if (*mnem_p == '.')
3354 dot_p = mnem_p;
3355 mnem_p++;
3356 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3357 {
3358 as_bad (_("no such instruction: `%s'"), token_start);
3359 return NULL;
3360 }
3361 l++;
3362 }
3363 if (!is_space_char (*l)
3364 && *l != END_OF_INSN
3365 && (intel_syntax
3366 || (*l != PREFIX_SEPARATOR
3367 && *l != ',')))
3368 {
3369 as_bad (_("invalid character %s in mnemonic"),
3370 output_invalid (*l));
3371 return NULL;
3372 }
3373 if (token_start == l)
3374 {
3375 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3376 as_bad (_("expecting prefix; got nothing"));
3377 else
3378 as_bad (_("expecting mnemonic; got nothing"));
3379 return NULL;
3380 }
3381
3382 /* Look up instruction (or prefix) via hash table. */
3383 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3384
3385 if (*l != END_OF_INSN
3386 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3387 && current_templates
3388 && current_templates->start->opcode_modifier.isprefix)
3389 {
3390 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3391 {
3392 as_bad ((flag_code != CODE_64BIT
3393 ? _("`%s' is only supported in 64-bit mode")
3394 : _("`%s' is not supported in 64-bit mode")),
3395 current_templates->start->name);
3396 return NULL;
3397 }
3398 /* If we are in 16-bit mode, do not allow addr16 or data16.
3399 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3400 if ((current_templates->start->opcode_modifier.size16
3401 || current_templates->start->opcode_modifier.size32)
3402 && flag_code != CODE_64BIT
3403 && (current_templates->start->opcode_modifier.size32
3404 ^ (flag_code == CODE_16BIT)))
3405 {
3406 as_bad (_("redundant %s prefix"),
3407 current_templates->start->name);
3408 return NULL;
3409 }
3410 /* Add prefix, checking for repeated prefixes. */
3411 switch (add_prefix (current_templates->start->base_opcode))
3412 {
3413 case PREFIX_EXIST:
3414 return NULL;
3415 case PREFIX_REP:
3416 if (current_templates->start->cpu_flags.bitfield.cpuhle)
3417 i.have_hle = 1;
3418 else
3419 expecting_string_instruction = current_templates->start->name;
3420 break;
3421 default:
3422 break;
3423 }
3424 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3425 token_start = ++l;
3426 }
3427 else
3428 break;
3429 }
3430
3431 if (!current_templates)
3432 {
3433 /* Check if we should swap operand or force 32bit displacement in
3434 encoding. */
3435 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3436 i.swap_operand = 1;
3437 else if (mnem_p - 3 == dot_p
3438 && dot_p[1] == 'd'
3439 && dot_p[2] == '8')
3440 i.disp_encoding = disp_encoding_8bit;
3441 else if (mnem_p - 4 == dot_p
3442 && dot_p[1] == 'd'
3443 && dot_p[2] == '3'
3444 && dot_p[3] == '2')
3445 i.disp_encoding = disp_encoding_32bit;
3446 else
3447 goto check_suffix;
3448 mnem_p = dot_p;
3449 *dot_p = '\0';
3450 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3451 }
3452
3453 if (!current_templates)
3454 {
3455 check_suffix:
3456 /* See if we can get a match by trimming off a suffix. */
3457 switch (mnem_p[-1])
3458 {
3459 case WORD_MNEM_SUFFIX:
3460 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3461 i.suffix = SHORT_MNEM_SUFFIX;
3462 else
3463 case BYTE_MNEM_SUFFIX:
3464 case QWORD_MNEM_SUFFIX:
3465 i.suffix = mnem_p[-1];
3466 mnem_p[-1] = '\0';
3467 current_templates = (const templates *) hash_find (op_hash,
3468 mnemonic);
3469 break;
3470 case SHORT_MNEM_SUFFIX:
3471 case LONG_MNEM_SUFFIX:
3472 if (!intel_syntax)
3473 {
3474 i.suffix = mnem_p[-1];
3475 mnem_p[-1] = '\0';
3476 current_templates = (const templates *) hash_find (op_hash,
3477 mnemonic);
3478 }
3479 break;
3480
3481 /* Intel Syntax. */
3482 case 'd':
3483 if (intel_syntax)
3484 {
3485 if (intel_float_operand (mnemonic) == 1)
3486 i.suffix = SHORT_MNEM_SUFFIX;
3487 else
3488 i.suffix = LONG_MNEM_SUFFIX;
3489 mnem_p[-1] = '\0';
3490 current_templates = (const templates *) hash_find (op_hash,
3491 mnemonic);
3492 }
3493 break;
3494 }
3495 if (!current_templates)
3496 {
3497 as_bad (_("no such instruction: `%s'"), token_start);
3498 return NULL;
3499 }
3500 }
3501
3502 if (current_templates->start->opcode_modifier.jump
3503 || current_templates->start->opcode_modifier.jumpbyte)
3504 {
3505 /* Check for a branch hint. We allow ",pt" and ",pn" for
3506 predict taken and predict not taken respectively.
3507 I'm not sure that branch hints actually do anything on loop
3508 and jcxz insns (JumpByte) for current Pentium4 chips. They
3509 may work in the future and it doesn't hurt to accept them
3510 now. */
3511 if (l[0] == ',' && l[1] == 'p')
3512 {
3513 if (l[2] == 't')
3514 {
3515 if (!add_prefix (DS_PREFIX_OPCODE))
3516 return NULL;
3517 l += 3;
3518 }
3519 else if (l[2] == 'n')
3520 {
3521 if (!add_prefix (CS_PREFIX_OPCODE))
3522 return NULL;
3523 l += 3;
3524 }
3525 }
3526 }
3527 /* Any other comma loses. */
3528 if (*l == ',')
3529 {
3530 as_bad (_("invalid character %s in mnemonic"),
3531 output_invalid (*l));
3532 return NULL;
3533 }
3534
3535 /* Check if instruction is supported on specified architecture. */
3536 supported = 0;
3537 for (t = current_templates->start; t < current_templates->end; ++t)
3538 {
3539 supported |= cpu_flags_match (t);
3540 if (supported == CPU_FLAGS_PERFECT_MATCH)
3541 goto skip;
3542 }
3543
3544 if (!(supported & CPU_FLAGS_64BIT_MATCH))
3545 {
3546 as_bad (flag_code == CODE_64BIT
3547 ? _("`%s' is not supported in 64-bit mode")
3548 : _("`%s' is only supported in 64-bit mode"),
3549 current_templates->start->name);
3550 return NULL;
3551 }
3552 if (supported != CPU_FLAGS_PERFECT_MATCH)
3553 {
3554 as_bad (_("`%s' is not supported on `%s%s'"),
3555 current_templates->start->name,
3556 cpu_arch_name ? cpu_arch_name : default_arch,
3557 cpu_sub_arch_name ? cpu_sub_arch_name : "");
3558 return NULL;
3559 }
3560
3561 skip:
3562 if (!cpu_arch_flags.bitfield.cpui386
3563 && (flag_code != CODE_16BIT))
3564 {
3565 as_warn (_("use .code16 to ensure correct addressing mode"));
3566 }
3567
3568 /* Check for rep/repne without a string (or other allowed) instruction. */
3569 if (expecting_string_instruction)
3570 {
3571 static templates override;
3572
3573 for (t = current_templates->start; t < current_templates->end; ++t)
3574 if (t->opcode_modifier.repprefixok)
3575 break;
3576 if (t >= current_templates->end)
3577 {
3578 as_bad (_("expecting string instruction after `%s'"),
3579 expecting_string_instruction);
3580 return NULL;
3581 }
3582 for (override.start = t; t < current_templates->end; ++t)
3583 if (!t->opcode_modifier.repprefixok)
3584 break;
3585 override.end = t;
3586 current_templates = &override;
3587 }
3588
3589 return l;
3590 }
3591
3592 static char *
3593 parse_operands (char *l, const char *mnemonic)
3594 {
3595 char *token_start;
3596
3597 /* 1 if operand is pending after ','. */
3598 unsigned int expecting_operand = 0;
3599
3600 /* Non-zero if operand parens not balanced. */
3601 unsigned int paren_not_balanced;
3602
3603 while (*l != END_OF_INSN)
3604 {
3605 /* Skip optional white space before operand. */
3606 if (is_space_char (*l))
3607 ++l;
3608 if (!is_operand_char (*l) && *l != END_OF_INSN)
3609 {
3610 as_bad (_("invalid character %s before operand %d"),
3611 output_invalid (*l),
3612 i.operands + 1);
3613 return NULL;
3614 }
3615 token_start = l; /* after white space */
3616 paren_not_balanced = 0;
3617 while (paren_not_balanced || *l != ',')
3618 {
3619 if (*l == END_OF_INSN)
3620 {
3621 if (paren_not_balanced)
3622 {
3623 if (!intel_syntax)
3624 as_bad (_("unbalanced parenthesis in operand %d."),
3625 i.operands + 1);
3626 else
3627 as_bad (_("unbalanced brackets in operand %d."),
3628 i.operands + 1);
3629 return NULL;
3630 }
3631 else
3632 break; /* we are done */
3633 }
3634 else if (!is_operand_char (*l) && !is_space_char (*l))
3635 {
3636 as_bad (_("invalid character %s in operand %d"),
3637 output_invalid (*l),
3638 i.operands + 1);
3639 return NULL;
3640 }
3641 if (!intel_syntax)
3642 {
3643 if (*l == '(')
3644 ++paren_not_balanced;
3645 if (*l == ')')
3646 --paren_not_balanced;
3647 }
3648 else
3649 {
3650 if (*l == '[')
3651 ++paren_not_balanced;
3652 if (*l == ']')
3653 --paren_not_balanced;
3654 }
3655 l++;
3656 }
3657 if (l != token_start)
3658 { /* Yes, we've read in another operand. */
3659 unsigned int operand_ok;
3660 this_operand = i.operands++;
3661 i.types[this_operand].bitfield.unspecified = 1;
3662 if (i.operands > MAX_OPERANDS)
3663 {
3664 as_bad (_("spurious operands; (%d operands/instruction max)"),
3665 MAX_OPERANDS);
3666 return NULL;
3667 }
3668 /* Now parse operand adding info to 'i' as we go along. */
3669 END_STRING_AND_SAVE (l);
3670
3671 if (intel_syntax)
3672 operand_ok =
3673 i386_intel_operand (token_start,
3674 intel_float_operand (mnemonic));
3675 else
3676 operand_ok = i386_att_operand (token_start);
3677
3678 RESTORE_END_STRING (l);
3679 if (!operand_ok)
3680 return NULL;
3681 }
3682 else
3683 {
3684 if (expecting_operand)
3685 {
3686 expecting_operand_after_comma:
3687 as_bad (_("expecting operand after ','; got nothing"));
3688 return NULL;
3689 }
3690 if (*l == ',')
3691 {
3692 as_bad (_("expecting operand before ','; got nothing"));
3693 return NULL;
3694 }
3695 }
3696
3697 /* Now *l must be either ',' or END_OF_INSN. */
3698 if (*l == ',')
3699 {
3700 if (*++l == END_OF_INSN)
3701 {
3702 /* Just skip it, if it's \n complain. */
3703 goto expecting_operand_after_comma;
3704 }
3705 expecting_operand = 1;
3706 }
3707 }
3708 return l;
3709 }
3710
3711 static void
3712 swap_2_operands (int xchg1, int xchg2)
3713 {
3714 union i386_op temp_op;
3715 i386_operand_type temp_type;
3716 enum bfd_reloc_code_real temp_reloc;
3717
3718 temp_type = i.types[xchg2];
3719 i.types[xchg2] = i.types[xchg1];
3720 i.types[xchg1] = temp_type;
3721 temp_op = i.op[xchg2];
3722 i.op[xchg2] = i.op[xchg1];
3723 i.op[xchg1] = temp_op;
3724 temp_reloc = i.reloc[xchg2];
3725 i.reloc[xchg2] = i.reloc[xchg1];
3726 i.reloc[xchg1] = temp_reloc;
3727 }
3728
3729 static void
3730 swap_operands (void)
3731 {
3732 switch (i.operands)
3733 {
3734 case 5:
3735 case 4:
3736 swap_2_operands (1, i.operands - 2);
3737 case 3:
3738 case 2:
3739 swap_2_operands (0, i.operands - 1);
3740 break;
3741 default:
3742 abort ();
3743 }
3744
3745 if (i.mem_operands == 2)
3746 {
3747 const seg_entry *temp_seg;
3748 temp_seg = i.seg[0];
3749 i.seg[0] = i.seg[1];
3750 i.seg[1] = temp_seg;
3751 }
3752 }
3753
3754 /* Try to ensure constant immediates are represented in the smallest
3755 opcode possible. */
3756 static void
3757 optimize_imm (void)
3758 {
3759 char guess_suffix = 0;
3760 int op;
3761
3762 if (i.suffix)
3763 guess_suffix = i.suffix;
3764 else if (i.reg_operands)
3765 {
3766 /* Figure out a suffix from the last register operand specified.
3767 We can't do this properly yet, ie. excluding InOutPortReg,
3768 but the following works for instructions with immediates.
3769 In any case, we can't set i.suffix yet. */
3770 for (op = i.operands; --op >= 0;)
3771 if (i.types[op].bitfield.reg8)
3772 {
3773 guess_suffix = BYTE_MNEM_SUFFIX;
3774 break;
3775 }
3776 else if (i.types[op].bitfield.reg16)
3777 {
3778 guess_suffix = WORD_MNEM_SUFFIX;
3779 break;
3780 }
3781 else if (i.types[op].bitfield.reg32)
3782 {
3783 guess_suffix = LONG_MNEM_SUFFIX;
3784 break;
3785 }
3786 else if (i.types[op].bitfield.reg64)
3787 {
3788 guess_suffix = QWORD_MNEM_SUFFIX;
3789 break;
3790 }
3791 }
3792 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
3793 guess_suffix = WORD_MNEM_SUFFIX;
3794
3795 for (op = i.operands; --op >= 0;)
3796 if (operand_type_check (i.types[op], imm))
3797 {
3798 switch (i.op[op].imms->X_op)
3799 {
3800 case O_constant:
3801 /* If a suffix is given, this operand may be shortened. */
3802 switch (guess_suffix)
3803 {
3804 case LONG_MNEM_SUFFIX:
3805 i.types[op].bitfield.imm32 = 1;
3806 i.types[op].bitfield.imm64 = 1;
3807 break;
3808 case WORD_MNEM_SUFFIX:
3809 i.types[op].bitfield.imm16 = 1;
3810 i.types[op].bitfield.imm32 = 1;
3811 i.types[op].bitfield.imm32s = 1;
3812 i.types[op].bitfield.imm64 = 1;
3813 break;
3814 case BYTE_MNEM_SUFFIX:
3815 i.types[op].bitfield.imm8 = 1;
3816 i.types[op].bitfield.imm8s = 1;
3817 i.types[op].bitfield.imm16 = 1;
3818 i.types[op].bitfield.imm32 = 1;
3819 i.types[op].bitfield.imm32s = 1;
3820 i.types[op].bitfield.imm64 = 1;
3821 break;
3822 }
3823
3824 /* If this operand is at most 16 bits, convert it
3825 to a signed 16 bit number before trying to see
3826 whether it will fit in an even smaller size.
3827 This allows a 16-bit operand such as $0xffe0 to
3828 be recognised as within Imm8S range. */
3829 if ((i.types[op].bitfield.imm16)
3830 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
3831 {
3832 i.op[op].imms->X_add_number =
3833 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
3834 }
3835 if ((i.types[op].bitfield.imm32)
3836 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
3837 == 0))
3838 {
3839 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
3840 ^ ((offsetT) 1 << 31))
3841 - ((offsetT) 1 << 31));
3842 }
3843 i.types[op]
3844 = operand_type_or (i.types[op],
3845 smallest_imm_type (i.op[op].imms->X_add_number));
3846
3847 /* We must avoid matching of Imm32 templates when 64bit
3848 only immediate is available. */
3849 if (guess_suffix == QWORD_MNEM_SUFFIX)
3850 i.types[op].bitfield.imm32 = 0;
3851 break;
3852
3853 case O_absent:
3854 case O_register:
3855 abort ();
3856
3857 /* Symbols and expressions. */
3858 default:
3859 /* Convert symbolic operand to proper sizes for matching, but don't
3860 prevent matching a set of insns that only supports sizes other
3861 than those matching the insn suffix. */
3862 {
3863 i386_operand_type mask, allowed;
3864 const insn_template *t;
3865
3866 operand_type_set (&mask, 0);
3867 operand_type_set (&allowed, 0);
3868
3869 for (t = current_templates->start;
3870 t < current_templates->end;
3871 ++t)
3872 allowed = operand_type_or (allowed,
3873 t->operand_types[op]);
3874 switch (guess_suffix)
3875 {
3876 case QWORD_MNEM_SUFFIX:
3877 mask.bitfield.imm64 = 1;
3878 mask.bitfield.imm32s = 1;
3879 break;
3880 case LONG_MNEM_SUFFIX:
3881 mask.bitfield.imm32 = 1;
3882 break;
3883 case WORD_MNEM_SUFFIX:
3884 mask.bitfield.imm16 = 1;
3885 break;
3886 case BYTE_MNEM_SUFFIX:
3887 mask.bitfield.imm8 = 1;
3888 break;
3889 default:
3890 break;
3891 }
3892 allowed = operand_type_and (mask, allowed);
3893 if (!operand_type_all_zero (&allowed))
3894 i.types[op] = operand_type_and (i.types[op], mask);
3895 }
3896 break;
3897 }
3898 }
3899 }
3900
3901 /* Try to use the smallest displacement type too. */
3902 static void
3903 optimize_disp (void)
3904 {
3905 int op;
3906
3907 for (op = i.operands; --op >= 0;)
3908 if (operand_type_check (i.types[op], disp))
3909 {
3910 if (i.op[op].disps->X_op == O_constant)
3911 {
3912 offsetT op_disp = i.op[op].disps->X_add_number;
3913
3914 if (i.types[op].bitfield.disp16
3915 && (op_disp & ~(offsetT) 0xffff) == 0)
3916 {
3917 /* If this operand is at most 16 bits, convert
3918 to a signed 16 bit number and don't use 64bit
3919 displacement. */
3920 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
3921 i.types[op].bitfield.disp64 = 0;
3922 }
3923 if (i.types[op].bitfield.disp32
3924 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
3925 {
3926 /* If this operand is at most 32 bits, convert
3927 to a signed 32 bit number and don't use 64bit
3928 displacement. */
3929 op_disp &= (((offsetT) 2 << 31) - 1);
3930 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
3931 i.types[op].bitfield.disp64 = 0;
3932 }
3933 if (!op_disp && i.types[op].bitfield.baseindex)
3934 {
3935 i.types[op].bitfield.disp8 = 0;
3936 i.types[op].bitfield.disp16 = 0;
3937 i.types[op].bitfield.disp32 = 0;
3938 i.types[op].bitfield.disp32s = 0;
3939 i.types[op].bitfield.disp64 = 0;
3940 i.op[op].disps = 0;
3941 i.disp_operands--;
3942 }
3943 else if (flag_code == CODE_64BIT)
3944 {
3945 if (fits_in_signed_long (op_disp))
3946 {
3947 i.types[op].bitfield.disp64 = 0;
3948 i.types[op].bitfield.disp32s = 1;
3949 }
3950 if (i.prefix[ADDR_PREFIX]
3951 && fits_in_unsigned_long (op_disp))
3952 i.types[op].bitfield.disp32 = 1;
3953 }
3954 if ((i.types[op].bitfield.disp32
3955 || i.types[op].bitfield.disp32s
3956 || i.types[op].bitfield.disp16)
3957 && fits_in_signed_byte (op_disp))
3958 i.types[op].bitfield.disp8 = 1;
3959 }
3960 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
3961 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
3962 {
3963 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
3964 i.op[op].disps, 0, i.reloc[op]);
3965 i.types[op].bitfield.disp8 = 0;
3966 i.types[op].bitfield.disp16 = 0;
3967 i.types[op].bitfield.disp32 = 0;
3968 i.types[op].bitfield.disp32s = 0;
3969 i.types[op].bitfield.disp64 = 0;
3970 }
3971 else
3972 /* We only support 64bit displacement on constants. */
3973 i.types[op].bitfield.disp64 = 0;
3974 }
3975 }
3976
3977 /* Check if operands are valid for the instruction. */
3978
3979 static int
3980 check_VecOperands (const insn_template *t)
3981 {
3982 /* Without VSIB byte, we can't have a vector register for index. */
3983 if (!t->opcode_modifier.vecsib
3984 && i.index_reg
3985 && (i.index_reg->reg_type.bitfield.regxmm
3986 || i.index_reg->reg_type.bitfield.regymm))
3987 {
3988 i.error = unsupported_vector_index_register;
3989 return 1;
3990 }
3991
3992 /* For VSIB byte, we need a vector register for index, and all vector
3993 registers must be distinct. */
3994 if (t->opcode_modifier.vecsib)
3995 {
3996 if (!i.index_reg
3997 || !((t->opcode_modifier.vecsib == VecSIB128
3998 && i.index_reg->reg_type.bitfield.regxmm)
3999 || (t->opcode_modifier.vecsib == VecSIB256
4000 && i.index_reg->reg_type.bitfield.regymm)))
4001 {
4002 i.error = invalid_vsib_address;
4003 return 1;
4004 }
4005
4006 gas_assert (i.reg_operands == 2);
4007 gas_assert (i.types[0].bitfield.regxmm
4008 || i.types[0].bitfield.regymm);
4009 gas_assert (i.types[2].bitfield.regxmm
4010 || i.types[2].bitfield.regymm);
4011
4012 if (operand_check == check_none)
4013 return 0;
4014 if (register_number (i.op[0].regs) != register_number (i.index_reg)
4015 && register_number (i.op[2].regs) != register_number (i.index_reg)
4016 && register_number (i.op[0].regs) != register_number (i.op[2].regs))
4017 return 0;
4018 if (operand_check == check_error)
4019 {
4020 i.error = invalid_vector_register_set;
4021 return 1;
4022 }
4023 as_warn (_("mask, index, and destination registers should be distinct"));
4024 }
4025
4026 return 0;
4027 }
4028
4029 /* Check if operands are valid for the instruction. Update VEX
4030 operand types. */
4031
4032 static int
4033 VEX_check_operands (const insn_template *t)
4034 {
4035 if (!t->opcode_modifier.vex)
4036 return 0;
4037
4038 /* Only check VEX_Imm4, which must be the first operand. */
4039 if (t->operand_types[0].bitfield.vec_imm4)
4040 {
4041 if (i.op[0].imms->X_op != O_constant
4042 || !fits_in_imm4 (i.op[0].imms->X_add_number))
4043 {
4044 i.error = bad_imm4;
4045 return 1;
4046 }
4047
4048 /* Turn off Imm8 so that update_imm won't complain. */
4049 i.types[0] = vec_imm4;
4050 }
4051
4052 return 0;
4053 }
4054
4055 static const insn_template *
4056 match_template (void)
4057 {
4058 /* Points to template once we've found it. */
4059 const insn_template *t;
4060 i386_operand_type overlap0, overlap1, overlap2, overlap3;
4061 i386_operand_type overlap4;
4062 unsigned int found_reverse_match;
4063 i386_opcode_modifier suffix_check;
4064 i386_operand_type operand_types [MAX_OPERANDS];
4065 int addr_prefix_disp;
4066 unsigned int j;
4067 unsigned int found_cpu_match;
4068 unsigned int check_register;
4069 enum i386_error specific_error = 0;
4070
4071 #if MAX_OPERANDS != 5
4072 # error "MAX_OPERANDS must be 5."
4073 #endif
4074
4075 found_reverse_match = 0;
4076 addr_prefix_disp = -1;
4077
4078 memset (&suffix_check, 0, sizeof (suffix_check));
4079 if (i.suffix == BYTE_MNEM_SUFFIX)
4080 suffix_check.no_bsuf = 1;
4081 else if (i.suffix == WORD_MNEM_SUFFIX)
4082 suffix_check.no_wsuf = 1;
4083 else if (i.suffix == SHORT_MNEM_SUFFIX)
4084 suffix_check.no_ssuf = 1;
4085 else if (i.suffix == LONG_MNEM_SUFFIX)
4086 suffix_check.no_lsuf = 1;
4087 else if (i.suffix == QWORD_MNEM_SUFFIX)
4088 suffix_check.no_qsuf = 1;
4089 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
4090 suffix_check.no_ldsuf = 1;
4091
4092 /* Must have right number of operands. */
4093 i.error = number_of_operands_mismatch;
4094
4095 for (t = current_templates->start; t < current_templates->end; t++)
4096 {
4097 addr_prefix_disp = -1;
4098
4099 if (i.operands != t->operands)
4100 continue;
4101
4102 /* Check processor support. */
4103 i.error = unsupported;
4104 found_cpu_match = (cpu_flags_match (t)
4105 == CPU_FLAGS_PERFECT_MATCH);
4106 if (!found_cpu_match)
4107 continue;
4108
4109 /* Check old gcc support. */
4110 i.error = old_gcc_only;
4111 if (!old_gcc && t->opcode_modifier.oldgcc)
4112 continue;
4113
4114 /* Check AT&T mnemonic. */
4115 i.error = unsupported_with_intel_mnemonic;
4116 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
4117 continue;
4118
4119 /* Check AT&T/Intel syntax. */
4120 i.error = unsupported_syntax;
4121 if ((intel_syntax && t->opcode_modifier.attsyntax)
4122 || (!intel_syntax && t->opcode_modifier.intelsyntax))
4123 continue;
4124
4125 /* Check the suffix, except for some instructions in intel mode. */
4126 i.error = invalid_instruction_suffix;
4127 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
4128 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
4129 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
4130 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
4131 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
4132 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
4133 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
4134 continue;
4135
4136 if (!operand_size_match (t))
4137 continue;
4138
4139 for (j = 0; j < MAX_OPERANDS; j++)
4140 operand_types[j] = t->operand_types[j];
4141
4142 /* In general, don't allow 64-bit operands in 32-bit mode. */
4143 if (i.suffix == QWORD_MNEM_SUFFIX
4144 && flag_code != CODE_64BIT
4145 && (intel_syntax
4146 ? (!t->opcode_modifier.ignoresize
4147 && !intel_float_operand (t->name))
4148 : intel_float_operand (t->name) != 2)
4149 && ((!operand_types[0].bitfield.regmmx
4150 && !operand_types[0].bitfield.regxmm
4151 && !operand_types[0].bitfield.regymm)
4152 || (!operand_types[t->operands > 1].bitfield.regmmx
4153 && !!operand_types[t->operands > 1].bitfield.regxmm
4154 && !!operand_types[t->operands > 1].bitfield.regymm))
4155 && (t->base_opcode != 0x0fc7
4156 || t->extension_opcode != 1 /* cmpxchg8b */))
4157 continue;
4158
4159 /* In general, don't allow 32-bit operands on pre-386. */
4160 else if (i.suffix == LONG_MNEM_SUFFIX
4161 && !cpu_arch_flags.bitfield.cpui386
4162 && (intel_syntax
4163 ? (!t->opcode_modifier.ignoresize
4164 && !intel_float_operand (t->name))
4165 : intel_float_operand (t->name) != 2)
4166 && ((!operand_types[0].bitfield.regmmx
4167 && !operand_types[0].bitfield.regxmm)
4168 || (!operand_types[t->operands > 1].bitfield.regmmx
4169 && !!operand_types[t->operands > 1].bitfield.regxmm)))
4170 continue;
4171
4172 /* Do not verify operands when there are none. */
4173 else
4174 {
4175 if (!t->operands)
4176 /* We've found a match; break out of loop. */
4177 break;
4178 }
4179
4180 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
4181 into Disp32/Disp16/Disp32 operand. */
4182 if (i.prefix[ADDR_PREFIX] != 0)
4183 {
4184 /* There should be only one Disp operand. */
4185 switch (flag_code)
4186 {
4187 case CODE_16BIT:
4188 for (j = 0; j < MAX_OPERANDS; j++)
4189 {
4190 if (operand_types[j].bitfield.disp16)
4191 {
4192 addr_prefix_disp = j;
4193 operand_types[j].bitfield.disp32 = 1;
4194 operand_types[j].bitfield.disp16 = 0;
4195 break;
4196 }
4197 }
4198 break;
4199 case CODE_32BIT:
4200 for (j = 0; j < MAX_OPERANDS; j++)
4201 {
4202 if (operand_types[j].bitfield.disp32)
4203 {
4204 addr_prefix_disp = j;
4205 operand_types[j].bitfield.disp32 = 0;
4206 operand_types[j].bitfield.disp16 = 1;
4207 break;
4208 }
4209 }
4210 break;
4211 case CODE_64BIT:
4212 for (j = 0; j < MAX_OPERANDS; j++)
4213 {
4214 if (operand_types[j].bitfield.disp64)
4215 {
4216 addr_prefix_disp = j;
4217 operand_types[j].bitfield.disp64 = 0;
4218 operand_types[j].bitfield.disp32 = 1;
4219 break;
4220 }
4221 }
4222 break;
4223 }
4224 }
4225
4226 /* We check register size if needed. */
4227 check_register = t->opcode_modifier.checkregsize;
4228 overlap0 = operand_type_and (i.types[0], operand_types[0]);
4229 switch (t->operands)
4230 {
4231 case 1:
4232 if (!operand_type_match (overlap0, i.types[0]))
4233 continue;
4234 break;
4235 case 2:
4236 /* xchg %eax, %eax is a special case. It is an aliase for nop
4237 only in 32bit mode and we can use opcode 0x90. In 64bit
4238 mode, we can't use 0x90 for xchg %eax, %eax since it should
4239 zero-extend %eax to %rax. */
4240 if (flag_code == CODE_64BIT
4241 && t->base_opcode == 0x90
4242 && operand_type_equal (&i.types [0], &acc32)
4243 && operand_type_equal (&i.types [1], &acc32))
4244 continue;
4245 if (i.swap_operand)
4246 {
4247 /* If we swap operand in encoding, we either match
4248 the next one or reverse direction of operands. */
4249 if (t->opcode_modifier.s)
4250 continue;
4251 else if (t->opcode_modifier.d)
4252 goto check_reverse;
4253 }
4254
4255 case 3:
4256 /* If we swap operand in encoding, we match the next one. */
4257 if (i.swap_operand && t->opcode_modifier.s)
4258 continue;
4259 case 4:
4260 case 5:
4261 overlap1 = operand_type_and (i.types[1], operand_types[1]);
4262 if (!operand_type_match (overlap0, i.types[0])
4263 || !operand_type_match (overlap1, i.types[1])
4264 || (check_register
4265 && !operand_type_register_match (overlap0, i.types[0],
4266 operand_types[0],
4267 overlap1, i.types[1],
4268 operand_types[1])))
4269 {
4270 /* Check if other direction is valid ... */
4271 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
4272 continue;
4273
4274 check_reverse:
4275 /* Try reversing direction of operands. */
4276 overlap0 = operand_type_and (i.types[0], operand_types[1]);
4277 overlap1 = operand_type_and (i.types[1], operand_types[0]);
4278 if (!operand_type_match (overlap0, i.types[0])
4279 || !operand_type_match (overlap1, i.types[1])
4280 || (check_register
4281 && !operand_type_register_match (overlap0,
4282 i.types[0],
4283 operand_types[1],
4284 overlap1,
4285 i.types[1],
4286 operand_types[0])))
4287 {
4288 /* Does not match either direction. */
4289 continue;
4290 }
4291 /* found_reverse_match holds which of D or FloatDR
4292 we've found. */
4293 if (t->opcode_modifier.d)
4294 found_reverse_match = Opcode_D;
4295 else if (t->opcode_modifier.floatd)
4296 found_reverse_match = Opcode_FloatD;
4297 else
4298 found_reverse_match = 0;
4299 if (t->opcode_modifier.floatr)
4300 found_reverse_match |= Opcode_FloatR;
4301 }
4302 else
4303 {
4304 /* Found a forward 2 operand match here. */
4305 switch (t->operands)
4306 {
4307 case 5:
4308 overlap4 = operand_type_and (i.types[4],
4309 operand_types[4]);
4310 case 4:
4311 overlap3 = operand_type_and (i.types[3],
4312 operand_types[3]);
4313 case 3:
4314 overlap2 = operand_type_and (i.types[2],
4315 operand_types[2]);
4316 break;
4317 }
4318
4319 switch (t->operands)
4320 {
4321 case 5:
4322 if (!operand_type_match (overlap4, i.types[4])
4323 || !operand_type_register_match (overlap3,
4324 i.types[3],
4325 operand_types[3],
4326 overlap4,
4327 i.types[4],
4328 operand_types[4]))
4329 continue;
4330 case 4:
4331 if (!operand_type_match (overlap3, i.types[3])
4332 || (check_register
4333 && !operand_type_register_match (overlap2,
4334 i.types[2],
4335 operand_types[2],
4336 overlap3,
4337 i.types[3],
4338 operand_types[3])))
4339 continue;
4340 case 3:
4341 /* Here we make use of the fact that there are no
4342 reverse match 3 operand instructions, and all 3
4343 operand instructions only need to be checked for
4344 register consistency between operands 2 and 3. */
4345 if (!operand_type_match (overlap2, i.types[2])
4346 || (check_register
4347 && !operand_type_register_match (overlap1,
4348 i.types[1],
4349 operand_types[1],
4350 overlap2,
4351 i.types[2],
4352 operand_types[2])))
4353 continue;
4354 break;
4355 }
4356 }
4357 /* Found either forward/reverse 2, 3 or 4 operand match here:
4358 slip through to break. */
4359 }
4360 if (!found_cpu_match)
4361 {
4362 found_reverse_match = 0;
4363 continue;
4364 }
4365
4366 /* Check if vector and VEX operands are valid. */
4367 if (check_VecOperands (t) || VEX_check_operands (t))
4368 {
4369 specific_error = i.error;
4370 continue;
4371 }
4372
4373 /* We've found a match; break out of loop. */
4374 break;
4375 }
4376
4377 if (t == current_templates->end)
4378 {
4379 /* We found no match. */
4380 const char *err_msg;
4381 switch (specific_error ? specific_error : i.error)
4382 {
4383 default:
4384 abort ();
4385 case operand_size_mismatch:
4386 err_msg = _("operand size mismatch");
4387 break;
4388 case operand_type_mismatch:
4389 err_msg = _("operand type mismatch");
4390 break;
4391 case register_type_mismatch:
4392 err_msg = _("register type mismatch");
4393 break;
4394 case number_of_operands_mismatch:
4395 err_msg = _("number of operands mismatch");
4396 break;
4397 case invalid_instruction_suffix:
4398 err_msg = _("invalid instruction suffix");
4399 break;
4400 case bad_imm4:
4401 err_msg = _("constant doesn't fit in 4 bits");
4402 break;
4403 case old_gcc_only:
4404 err_msg = _("only supported with old gcc");
4405 break;
4406 case unsupported_with_intel_mnemonic:
4407 err_msg = _("unsupported with Intel mnemonic");
4408 break;
4409 case unsupported_syntax:
4410 err_msg = _("unsupported syntax");
4411 break;
4412 case unsupported:
4413 as_bad (_("unsupported instruction `%s'"),
4414 current_templates->start->name);
4415 return NULL;
4416 case invalid_vsib_address:
4417 err_msg = _("invalid VSIB address");
4418 break;
4419 case invalid_vector_register_set:
4420 err_msg = _("mask, index, and destination registers must be distinct");
4421 break;
4422 case unsupported_vector_index_register:
4423 err_msg = _("unsupported vector index register");
4424 break;
4425 }
4426 as_bad (_("%s for `%s'"), err_msg,
4427 current_templates->start->name);
4428 return NULL;
4429 }
4430
4431 if (!quiet_warnings)
4432 {
4433 if (!intel_syntax
4434 && (i.types[0].bitfield.jumpabsolute
4435 != operand_types[0].bitfield.jumpabsolute))
4436 {
4437 as_warn (_("indirect %s without `*'"), t->name);
4438 }
4439
4440 if (t->opcode_modifier.isprefix
4441 && t->opcode_modifier.ignoresize)
4442 {
4443 /* Warn them that a data or address size prefix doesn't
4444 affect assembly of the next line of code. */
4445 as_warn (_("stand-alone `%s' prefix"), t->name);
4446 }
4447 }
4448
4449 /* Copy the template we found. */
4450 i.tm = *t;
4451
4452 if (addr_prefix_disp != -1)
4453 i.tm.operand_types[addr_prefix_disp]
4454 = operand_types[addr_prefix_disp];
4455
4456 if (found_reverse_match)
4457 {
4458 /* If we found a reverse match we must alter the opcode
4459 direction bit. found_reverse_match holds bits to change
4460 (different for int & float insns). */
4461
4462 i.tm.base_opcode ^= found_reverse_match;
4463
4464 i.tm.operand_types[0] = operand_types[1];
4465 i.tm.operand_types[1] = operand_types[0];
4466 }
4467
4468 return t;
4469 }
4470
4471 static int
4472 check_string (void)
4473 {
4474 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
4475 if (i.tm.operand_types[mem_op].bitfield.esseg)
4476 {
4477 if (i.seg[0] != NULL && i.seg[0] != &es)
4478 {
4479 as_bad (_("`%s' operand %d must use `%ses' segment"),
4480 i.tm.name,
4481 mem_op + 1,
4482 register_prefix);
4483 return 0;
4484 }
4485 /* There's only ever one segment override allowed per instruction.
4486 This instruction possibly has a legal segment override on the
4487 second operand, so copy the segment to where non-string
4488 instructions store it, allowing common code. */
4489 i.seg[0] = i.seg[1];
4490 }
4491 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
4492 {
4493 if (i.seg[1] != NULL && i.seg[1] != &es)
4494 {
4495 as_bad (_("`%s' operand %d must use `%ses' segment"),
4496 i.tm.name,
4497 mem_op + 2,
4498 register_prefix);
4499 return 0;
4500 }
4501 }
4502 return 1;
4503 }
4504
4505 static int
4506 process_suffix (void)
4507 {
4508 /* If matched instruction specifies an explicit instruction mnemonic
4509 suffix, use it. */
4510 if (i.tm.opcode_modifier.size16)
4511 i.suffix = WORD_MNEM_SUFFIX;
4512 else if (i.tm.opcode_modifier.size32)
4513 i.suffix = LONG_MNEM_SUFFIX;
4514 else if (i.tm.opcode_modifier.size64)
4515 i.suffix = QWORD_MNEM_SUFFIX;
4516 else if (i.reg_operands)
4517 {
4518 /* If there's no instruction mnemonic suffix we try to invent one
4519 based on register operands. */
4520 if (!i.suffix)
4521 {
4522 /* We take i.suffix from the last register operand specified,
4523 Destination register type is more significant than source
4524 register type. crc32 in SSE4.2 prefers source register
4525 type. */
4526 if (i.tm.base_opcode == 0xf20f38f1)
4527 {
4528 if (i.types[0].bitfield.reg16)
4529 i.suffix = WORD_MNEM_SUFFIX;
4530 else if (i.types[0].bitfield.reg32)
4531 i.suffix = LONG_MNEM_SUFFIX;
4532 else if (i.types[0].bitfield.reg64)
4533 i.suffix = QWORD_MNEM_SUFFIX;
4534 }
4535 else if (i.tm.base_opcode == 0xf20f38f0)
4536 {
4537 if (i.types[0].bitfield.reg8)
4538 i.suffix = BYTE_MNEM_SUFFIX;
4539 }
4540
4541 if (!i.suffix)
4542 {
4543 int op;
4544
4545 if (i.tm.base_opcode == 0xf20f38f1
4546 || i.tm.base_opcode == 0xf20f38f0)
4547 {
4548 /* We have to know the operand size for crc32. */
4549 as_bad (_("ambiguous memory operand size for `%s`"),
4550 i.tm.name);
4551 return 0;
4552 }
4553
4554 for (op = i.operands; --op >= 0;)
4555 if (!i.tm.operand_types[op].bitfield.inoutportreg)
4556 {
4557 if (i.types[op].bitfield.reg8)
4558 {
4559 i.suffix = BYTE_MNEM_SUFFIX;
4560 break;
4561 }
4562 else if (i.types[op].bitfield.reg16)
4563 {
4564 i.suffix = WORD_MNEM_SUFFIX;
4565 break;
4566 }
4567 else if (i.types[op].bitfield.reg32)
4568 {
4569 i.suffix = LONG_MNEM_SUFFIX;
4570 break;
4571 }
4572 else if (i.types[op].bitfield.reg64)
4573 {
4574 i.suffix = QWORD_MNEM_SUFFIX;
4575 break;
4576 }
4577 }
4578 }
4579 }
4580 else if (i.suffix == BYTE_MNEM_SUFFIX)
4581 {
4582 if (intel_syntax
4583 && i.tm.opcode_modifier.ignoresize
4584 && i.tm.opcode_modifier.no_bsuf)
4585 i.suffix = 0;
4586 else if (!check_byte_reg ())
4587 return 0;
4588 }
4589 else if (i.suffix == LONG_MNEM_SUFFIX)
4590 {
4591 if (intel_syntax
4592 && i.tm.opcode_modifier.ignoresize
4593 && i.tm.opcode_modifier.no_lsuf)
4594 i.suffix = 0;
4595 else if (!check_long_reg ())
4596 return 0;
4597 }
4598 else if (i.suffix == QWORD_MNEM_SUFFIX)
4599 {
4600 if (intel_syntax
4601 && i.tm.opcode_modifier.ignoresize
4602 && i.tm.opcode_modifier.no_qsuf)
4603 i.suffix = 0;
4604 else if (!check_qword_reg ())
4605 return 0;
4606 }
4607 else if (i.suffix == WORD_MNEM_SUFFIX)
4608 {
4609 if (intel_syntax
4610 && i.tm.opcode_modifier.ignoresize
4611 && i.tm.opcode_modifier.no_wsuf)
4612 i.suffix = 0;
4613 else if (!check_word_reg ())
4614 return 0;
4615 }
4616 else if (i.suffix == XMMWORD_MNEM_SUFFIX
4617 || i.suffix == YMMWORD_MNEM_SUFFIX)
4618 {
4619 /* Skip if the instruction has x/y suffix. match_template
4620 should check if it is a valid suffix. */
4621 }
4622 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
4623 /* Do nothing if the instruction is going to ignore the prefix. */
4624 ;
4625 else
4626 abort ();
4627 }
4628 else if (i.tm.opcode_modifier.defaultsize
4629 && !i.suffix
4630 /* exclude fldenv/frstor/fsave/fstenv */
4631 && i.tm.opcode_modifier.no_ssuf)
4632 {
4633 i.suffix = stackop_size;
4634 }
4635 else if (intel_syntax
4636 && !i.suffix
4637 && (i.tm.operand_types[0].bitfield.jumpabsolute
4638 || i.tm.opcode_modifier.jumpbyte
4639 || i.tm.opcode_modifier.jumpintersegment
4640 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
4641 && i.tm.extension_opcode <= 3)))
4642 {
4643 switch (flag_code)
4644 {
4645 case CODE_64BIT:
4646 if (!i.tm.opcode_modifier.no_qsuf)
4647 {
4648 i.suffix = QWORD_MNEM_SUFFIX;
4649 break;
4650 }
4651 case CODE_32BIT:
4652 if (!i.tm.opcode_modifier.no_lsuf)
4653 i.suffix = LONG_MNEM_SUFFIX;
4654 break;
4655 case CODE_16BIT:
4656 if (!i.tm.opcode_modifier.no_wsuf)
4657 i.suffix = WORD_MNEM_SUFFIX;
4658 break;
4659 }
4660 }
4661
4662 if (!i.suffix)
4663 {
4664 if (!intel_syntax)
4665 {
4666 if (i.tm.opcode_modifier.w)
4667 {
4668 as_bad (_("no instruction mnemonic suffix given and "
4669 "no register operands; can't size instruction"));
4670 return 0;
4671 }
4672 }
4673 else
4674 {
4675 unsigned int suffixes;
4676
4677 suffixes = !i.tm.opcode_modifier.no_bsuf;
4678 if (!i.tm.opcode_modifier.no_wsuf)
4679 suffixes |= 1 << 1;
4680 if (!i.tm.opcode_modifier.no_lsuf)
4681 suffixes |= 1 << 2;
4682 if (!i.tm.opcode_modifier.no_ldsuf)
4683 suffixes |= 1 << 3;
4684 if (!i.tm.opcode_modifier.no_ssuf)
4685 suffixes |= 1 << 4;
4686 if (!i.tm.opcode_modifier.no_qsuf)
4687 suffixes |= 1 << 5;
4688
4689 /* There are more than suffix matches. */
4690 if (i.tm.opcode_modifier.w
4691 || ((suffixes & (suffixes - 1))
4692 && !i.tm.opcode_modifier.defaultsize
4693 && !i.tm.opcode_modifier.ignoresize))
4694 {
4695 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
4696 return 0;
4697 }
4698 }
4699 }
4700
4701 /* Change the opcode based on the operand size given by i.suffix;
4702 We don't need to change things for byte insns. */
4703
4704 if (i.suffix
4705 && i.suffix != BYTE_MNEM_SUFFIX
4706 && i.suffix != XMMWORD_MNEM_SUFFIX
4707 && i.suffix != YMMWORD_MNEM_SUFFIX)
4708 {
4709 /* It's not a byte, select word/dword operation. */
4710 if (i.tm.opcode_modifier.w)
4711 {
4712 if (i.tm.opcode_modifier.shortform)
4713 i.tm.base_opcode |= 8;
4714 else
4715 i.tm.base_opcode |= 1;
4716 }
4717
4718 /* Now select between word & dword operations via the operand
4719 size prefix, except for instructions that will ignore this
4720 prefix anyway. */
4721 if (i.tm.opcode_modifier.addrprefixop0)
4722 {
4723 /* The address size override prefix changes the size of the
4724 first operand. */
4725 if ((flag_code == CODE_32BIT
4726 && i.op->regs[0].reg_type.bitfield.reg16)
4727 || (flag_code != CODE_32BIT
4728 && i.op->regs[0].reg_type.bitfield.reg32))
4729 if (!add_prefix (ADDR_PREFIX_OPCODE))
4730 return 0;
4731 }
4732 else if (i.suffix != QWORD_MNEM_SUFFIX
4733 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
4734 && !i.tm.opcode_modifier.ignoresize
4735 && !i.tm.opcode_modifier.floatmf
4736 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
4737 || (flag_code == CODE_64BIT
4738 && i.tm.opcode_modifier.jumpbyte)))
4739 {
4740 unsigned int prefix = DATA_PREFIX_OPCODE;
4741
4742 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
4743 prefix = ADDR_PREFIX_OPCODE;
4744
4745 if (!add_prefix (prefix))
4746 return 0;
4747 }
4748
4749 /* Set mode64 for an operand. */
4750 if (i.suffix == QWORD_MNEM_SUFFIX
4751 && flag_code == CODE_64BIT
4752 && !i.tm.opcode_modifier.norex64)
4753 {
4754 /* Special case for xchg %rax,%rax. It is NOP and doesn't
4755 need rex64. cmpxchg8b is also a special case. */
4756 if (! (i.operands == 2
4757 && i.tm.base_opcode == 0x90
4758 && i.tm.extension_opcode == None
4759 && operand_type_equal (&i.types [0], &acc64)
4760 && operand_type_equal (&i.types [1], &acc64))
4761 && ! (i.operands == 1
4762 && i.tm.base_opcode == 0xfc7
4763 && i.tm.extension_opcode == 1
4764 && !operand_type_check (i.types [0], reg)
4765 && operand_type_check (i.types [0], anymem)))
4766 i.rex |= REX_W;
4767 }
4768
4769 /* Size floating point instruction. */
4770 if (i.suffix == LONG_MNEM_SUFFIX)
4771 if (i.tm.opcode_modifier.floatmf)
4772 i.tm.base_opcode ^= 4;
4773 }
4774
4775 return 1;
4776 }
4777
4778 static int
4779 check_byte_reg (void)
4780 {
4781 int op;
4782
4783 for (op = i.operands; --op >= 0;)
4784 {
4785 /* If this is an eight bit register, it's OK. If it's the 16 or
4786 32 bit version of an eight bit register, we will just use the
4787 low portion, and that's OK too. */
4788 if (i.types[op].bitfield.reg8)
4789 continue;
4790
4791 /* I/O port address operands are OK too. */
4792 if (i.tm.operand_types[op].bitfield.inoutportreg)
4793 continue;
4794
4795 /* crc32 doesn't generate this warning. */
4796 if (i.tm.base_opcode == 0xf20f38f0)
4797 continue;
4798
4799 if ((i.types[op].bitfield.reg16
4800 || i.types[op].bitfield.reg32
4801 || i.types[op].bitfield.reg64)
4802 && i.op[op].regs->reg_num < 4
4803 /* Prohibit these changes in 64bit mode, since the lowering
4804 would be more complicated. */
4805 && flag_code != CODE_64BIT)
4806 {
4807 #if REGISTER_WARNINGS
4808 if (!quiet_warnings)
4809 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4810 register_prefix,
4811 (i.op[op].regs + (i.types[op].bitfield.reg16
4812 ? REGNAM_AL - REGNAM_AX
4813 : REGNAM_AL - REGNAM_EAX))->reg_name,
4814 register_prefix,
4815 i.op[op].regs->reg_name,
4816 i.suffix);
4817 #endif
4818 continue;
4819 }
4820 /* Any other register is bad. */
4821 if (i.types[op].bitfield.reg16
4822 || i.types[op].bitfield.reg32
4823 || i.types[op].bitfield.reg64
4824 || i.types[op].bitfield.regmmx
4825 || i.types[op].bitfield.regxmm
4826 || i.types[op].bitfield.regymm
4827 || i.types[op].bitfield.sreg2
4828 || i.types[op].bitfield.sreg3
4829 || i.types[op].bitfield.control
4830 || i.types[op].bitfield.debug
4831 || i.types[op].bitfield.test
4832 || i.types[op].bitfield.floatreg
4833 || i.types[op].bitfield.floatacc)
4834 {
4835 as_bad (_("`%s%s' not allowed with `%s%c'"),
4836 register_prefix,
4837 i.op[op].regs->reg_name,
4838 i.tm.name,
4839 i.suffix);
4840 return 0;
4841 }
4842 }
4843 return 1;
4844 }
4845
4846 static int
4847 check_long_reg (void)
4848 {
4849 int op;
4850
4851 for (op = i.operands; --op >= 0;)
4852 /* Reject eight bit registers, except where the template requires
4853 them. (eg. movzb) */
4854 if (i.types[op].bitfield.reg8
4855 && (i.tm.operand_types[op].bitfield.reg16
4856 || i.tm.operand_types[op].bitfield.reg32
4857 || i.tm.operand_types[op].bitfield.acc))
4858 {
4859 as_bad (_("`%s%s' not allowed with `%s%c'"),
4860 register_prefix,
4861 i.op[op].regs->reg_name,
4862 i.tm.name,
4863 i.suffix);
4864 return 0;
4865 }
4866 /* Warn if the e prefix on a general reg is missing. */
4867 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4868 && i.types[op].bitfield.reg16
4869 && (i.tm.operand_types[op].bitfield.reg32
4870 || i.tm.operand_types[op].bitfield.acc))
4871 {
4872 /* Prohibit these changes in the 64bit mode, since the
4873 lowering is more complicated. */
4874 if (flag_code == CODE_64BIT)
4875 {
4876 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4877 register_prefix, i.op[op].regs->reg_name,
4878 i.suffix);
4879 return 0;
4880 }
4881 #if REGISTER_WARNINGS
4882 else
4883 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4884 register_prefix,
4885 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
4886 register_prefix,
4887 i.op[op].regs->reg_name,
4888 i.suffix);
4889 #endif
4890 }
4891 /* Warn if the r prefix on a general reg is missing. */
4892 else if (i.types[op].bitfield.reg64
4893 && (i.tm.operand_types[op].bitfield.reg32
4894 || i.tm.operand_types[op].bitfield.acc))
4895 {
4896 if (intel_syntax
4897 && i.tm.opcode_modifier.toqword
4898 && !i.types[0].bitfield.regxmm)
4899 {
4900 /* Convert to QWORD. We want REX byte. */
4901 i.suffix = QWORD_MNEM_SUFFIX;
4902 }
4903 else
4904 {
4905 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4906 register_prefix, i.op[op].regs->reg_name,
4907 i.suffix);
4908 return 0;
4909 }
4910 }
4911 return 1;
4912 }
4913
4914 static int
4915 check_qword_reg (void)
4916 {
4917 int op;
4918
4919 for (op = i.operands; --op >= 0; )
4920 /* Reject eight bit registers, except where the template requires
4921 them. (eg. movzb) */
4922 if (i.types[op].bitfield.reg8
4923 && (i.tm.operand_types[op].bitfield.reg16
4924 || i.tm.operand_types[op].bitfield.reg32
4925 || i.tm.operand_types[op].bitfield.acc))
4926 {
4927 as_bad (_("`%s%s' not allowed with `%s%c'"),
4928 register_prefix,
4929 i.op[op].regs->reg_name,
4930 i.tm.name,
4931 i.suffix);
4932 return 0;
4933 }
4934 /* Warn if the e prefix on a general reg is missing. */
4935 else if ((i.types[op].bitfield.reg16
4936 || i.types[op].bitfield.reg32)
4937 && (i.tm.operand_types[op].bitfield.reg32
4938 || i.tm.operand_types[op].bitfield.acc))
4939 {
4940 /* Prohibit these changes in the 64bit mode, since the
4941 lowering is more complicated. */
4942 if (intel_syntax
4943 && i.tm.opcode_modifier.todword
4944 && !i.types[0].bitfield.regxmm)
4945 {
4946 /* Convert to DWORD. We don't want REX byte. */
4947 i.suffix = LONG_MNEM_SUFFIX;
4948 }
4949 else
4950 {
4951 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4952 register_prefix, i.op[op].regs->reg_name,
4953 i.suffix);
4954 return 0;
4955 }
4956 }
4957 return 1;
4958 }
4959
4960 static int
4961 check_word_reg (void)
4962 {
4963 int op;
4964 for (op = i.operands; --op >= 0;)
4965 /* Reject eight bit registers, except where the template requires
4966 them. (eg. movzb) */
4967 if (i.types[op].bitfield.reg8
4968 && (i.tm.operand_types[op].bitfield.reg16
4969 || i.tm.operand_types[op].bitfield.reg32
4970 || i.tm.operand_types[op].bitfield.acc))
4971 {
4972 as_bad (_("`%s%s' not allowed with `%s%c'"),
4973 register_prefix,
4974 i.op[op].regs->reg_name,
4975 i.tm.name,
4976 i.suffix);
4977 return 0;
4978 }
4979 /* Warn if the e prefix on a general reg is present. */
4980 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4981 && i.types[op].bitfield.reg32
4982 && (i.tm.operand_types[op].bitfield.reg16
4983 || i.tm.operand_types[op].bitfield.acc))
4984 {
4985 /* Prohibit these changes in the 64bit mode, since the
4986 lowering is more complicated. */
4987 if (flag_code == CODE_64BIT)
4988 {
4989 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4990 register_prefix, i.op[op].regs->reg_name,
4991 i.suffix);
4992 return 0;
4993 }
4994 else
4995 #if REGISTER_WARNINGS
4996 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4997 register_prefix,
4998 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
4999 register_prefix,
5000 i.op[op].regs->reg_name,
5001 i.suffix);
5002 #endif
5003 }
5004 return 1;
5005 }
5006
5007 static int
5008 update_imm (unsigned int j)
5009 {
5010 i386_operand_type overlap = i.types[j];
5011 if ((overlap.bitfield.imm8
5012 || overlap.bitfield.imm8s
5013 || overlap.bitfield.imm16
5014 || overlap.bitfield.imm32
5015 || overlap.bitfield.imm32s
5016 || overlap.bitfield.imm64)
5017 && !operand_type_equal (&overlap, &imm8)
5018 && !operand_type_equal (&overlap, &imm8s)
5019 && !operand_type_equal (&overlap, &imm16)
5020 && !operand_type_equal (&overlap, &imm32)
5021 && !operand_type_equal (&overlap, &imm32s)
5022 && !operand_type_equal (&overlap, &imm64))
5023 {
5024 if (i.suffix)
5025 {
5026 i386_operand_type temp;
5027
5028 operand_type_set (&temp, 0);
5029 if (i.suffix == BYTE_MNEM_SUFFIX)
5030 {
5031 temp.bitfield.imm8 = overlap.bitfield.imm8;
5032 temp.bitfield.imm8s = overlap.bitfield.imm8s;
5033 }
5034 else if (i.suffix == WORD_MNEM_SUFFIX)
5035 temp.bitfield.imm16 = overlap.bitfield.imm16;
5036 else if (i.suffix == QWORD_MNEM_SUFFIX)
5037 {
5038 temp.bitfield.imm64 = overlap.bitfield.imm64;
5039 temp.bitfield.imm32s = overlap.bitfield.imm32s;
5040 }
5041 else
5042 temp.bitfield.imm32 = overlap.bitfield.imm32;
5043 overlap = temp;
5044 }
5045 else if (operand_type_equal (&overlap, &imm16_32_32s)
5046 || operand_type_equal (&overlap, &imm16_32)
5047 || operand_type_equal (&overlap, &imm16_32s))
5048 {
5049 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
5050 overlap = imm16;
5051 else
5052 overlap = imm32s;
5053 }
5054 if (!operand_type_equal (&overlap, &imm8)
5055 && !operand_type_equal (&overlap, &imm8s)
5056 && !operand_type_equal (&overlap, &imm16)
5057 && !operand_type_equal (&overlap, &imm32)
5058 && !operand_type_equal (&overlap, &imm32s)
5059 && !operand_type_equal (&overlap, &imm64))
5060 {
5061 as_bad (_("no instruction mnemonic suffix given; "
5062 "can't determine immediate size"));
5063 return 0;
5064 }
5065 }
5066 i.types[j] = overlap;
5067
5068 return 1;
5069 }
5070
5071 static int
5072 finalize_imm (void)
5073 {
5074 unsigned int j, n;
5075
5076 /* Update the first 2 immediate operands. */
5077 n = i.operands > 2 ? 2 : i.operands;
5078 if (n)
5079 {
5080 for (j = 0; j < n; j++)
5081 if (update_imm (j) == 0)
5082 return 0;
5083
5084 /* The 3rd operand can't be immediate operand. */
5085 gas_assert (operand_type_check (i.types[2], imm) == 0);
5086 }
5087
5088 return 1;
5089 }
5090
5091 static int
5092 bad_implicit_operand (int xmm)
5093 {
5094 const char *ireg = xmm ? "xmm0" : "ymm0";
5095
5096 if (intel_syntax)
5097 as_bad (_("the last operand of `%s' must be `%s%s'"),
5098 i.tm.name, register_prefix, ireg);
5099 else
5100 as_bad (_("the first operand of `%s' must be `%s%s'"),
5101 i.tm.name, register_prefix, ireg);
5102 return 0;
5103 }
5104
5105 static int
5106 process_operands (void)
5107 {
5108 /* Default segment register this instruction will use for memory
5109 accesses. 0 means unknown. This is only for optimizing out
5110 unnecessary segment overrides. */
5111 const seg_entry *default_seg = 0;
5112
5113 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
5114 {
5115 unsigned int dupl = i.operands;
5116 unsigned int dest = dupl - 1;
5117 unsigned int j;
5118
5119 /* The destination must be an xmm register. */
5120 gas_assert (i.reg_operands
5121 && MAX_OPERANDS > dupl
5122 && operand_type_equal (&i.types[dest], &regxmm));
5123
5124 if (i.tm.opcode_modifier.firstxmm0)
5125 {
5126 /* The first operand is implicit and must be xmm0. */
5127 gas_assert (operand_type_equal (&i.types[0], &regxmm));
5128 if (register_number (i.op[0].regs) != 0)
5129 return bad_implicit_operand (1);
5130
5131 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
5132 {
5133 /* Keep xmm0 for instructions with VEX prefix and 3
5134 sources. */
5135 goto duplicate;
5136 }
5137 else
5138 {
5139 /* We remove the first xmm0 and keep the number of
5140 operands unchanged, which in fact duplicates the
5141 destination. */
5142 for (j = 1; j < i.operands; j++)
5143 {
5144 i.op[j - 1] = i.op[j];
5145 i.types[j - 1] = i.types[j];
5146 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
5147 }
5148 }
5149 }
5150 else if (i.tm.opcode_modifier.implicit1stxmm0)
5151 {
5152 gas_assert ((MAX_OPERANDS - 1) > dupl
5153 && (i.tm.opcode_modifier.vexsources
5154 == VEX3SOURCES));
5155
5156 /* Add the implicit xmm0 for instructions with VEX prefix
5157 and 3 sources. */
5158 for (j = i.operands; j > 0; j--)
5159 {
5160 i.op[j] = i.op[j - 1];
5161 i.types[j] = i.types[j - 1];
5162 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
5163 }
5164 i.op[0].regs
5165 = (const reg_entry *) hash_find (reg_hash, "xmm0");
5166 i.types[0] = regxmm;
5167 i.tm.operand_types[0] = regxmm;
5168
5169 i.operands += 2;
5170 i.reg_operands += 2;
5171 i.tm.operands += 2;
5172
5173 dupl++;
5174 dest++;
5175 i.op[dupl] = i.op[dest];
5176 i.types[dupl] = i.types[dest];
5177 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5178 }
5179 else
5180 {
5181 duplicate:
5182 i.operands++;
5183 i.reg_operands++;
5184 i.tm.operands++;
5185
5186 i.op[dupl] = i.op[dest];
5187 i.types[dupl] = i.types[dest];
5188 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5189 }
5190
5191 if (i.tm.opcode_modifier.immext)
5192 process_immext ();
5193 }
5194 else if (i.tm.opcode_modifier.firstxmm0)
5195 {
5196 unsigned int j;
5197
5198 /* The first operand is implicit and must be xmm0/ymm0. */
5199 gas_assert (i.reg_operands
5200 && (operand_type_equal (&i.types[0], &regxmm)
5201 || operand_type_equal (&i.types[0], &regymm)));
5202 if (register_number (i.op[0].regs) != 0)
5203 return bad_implicit_operand (i.types[0].bitfield.regxmm);
5204
5205 for (j = 1; j < i.operands; j++)
5206 {
5207 i.op[j - 1] = i.op[j];
5208 i.types[j - 1] = i.types[j];
5209
5210 /* We need to adjust fields in i.tm since they are used by
5211 build_modrm_byte. */
5212 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
5213 }
5214
5215 i.operands--;
5216 i.reg_operands--;
5217 i.tm.operands--;
5218 }
5219 else if (i.tm.opcode_modifier.regkludge)
5220 {
5221 /* The imul $imm, %reg instruction is converted into
5222 imul $imm, %reg, %reg, and the clr %reg instruction
5223 is converted into xor %reg, %reg. */
5224
5225 unsigned int first_reg_op;
5226
5227 if (operand_type_check (i.types[0], reg))
5228 first_reg_op = 0;
5229 else
5230 first_reg_op = 1;
5231 /* Pretend we saw the extra register operand. */
5232 gas_assert (i.reg_operands == 1
5233 && i.op[first_reg_op + 1].regs == 0);
5234 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
5235 i.types[first_reg_op + 1] = i.types[first_reg_op];
5236 i.operands++;
5237 i.reg_operands++;
5238 }
5239
5240 if (i.tm.opcode_modifier.shortform)
5241 {
5242 if (i.types[0].bitfield.sreg2
5243 || i.types[0].bitfield.sreg3)
5244 {
5245 if (i.tm.base_opcode == POP_SEG_SHORT
5246 && i.op[0].regs->reg_num == 1)
5247 {
5248 as_bad (_("you can't `pop %scs'"), register_prefix);
5249 return 0;
5250 }
5251 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
5252 if ((i.op[0].regs->reg_flags & RegRex) != 0)
5253 i.rex |= REX_B;
5254 }
5255 else
5256 {
5257 /* The register or float register operand is in operand
5258 0 or 1. */
5259 unsigned int op;
5260
5261 if (i.types[0].bitfield.floatreg
5262 || operand_type_check (i.types[0], reg))
5263 op = 0;
5264 else
5265 op = 1;
5266 /* Register goes in low 3 bits of opcode. */
5267 i.tm.base_opcode |= i.op[op].regs->reg_num;
5268 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5269 i.rex |= REX_B;
5270 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
5271 {
5272 /* Warn about some common errors, but press on regardless.
5273 The first case can be generated by gcc (<= 2.8.1). */
5274 if (i.operands == 2)
5275 {
5276 /* Reversed arguments on faddp, fsubp, etc. */
5277 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
5278 register_prefix, i.op[!intel_syntax].regs->reg_name,
5279 register_prefix, i.op[intel_syntax].regs->reg_name);
5280 }
5281 else
5282 {
5283 /* Extraneous `l' suffix on fp insn. */
5284 as_warn (_("translating to `%s %s%s'"), i.tm.name,
5285 register_prefix, i.op[0].regs->reg_name);
5286 }
5287 }
5288 }
5289 }
5290 else if (i.tm.opcode_modifier.modrm)
5291 {
5292 /* The opcode is completed (modulo i.tm.extension_opcode which
5293 must be put into the modrm byte). Now, we make the modrm and
5294 index base bytes based on all the info we've collected. */
5295
5296 default_seg = build_modrm_byte ();
5297 }
5298 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
5299 {
5300 default_seg = &ds;
5301 }
5302 else if (i.tm.opcode_modifier.isstring)
5303 {
5304 /* For the string instructions that allow a segment override
5305 on one of their operands, the default segment is ds. */
5306 default_seg = &ds;
5307 }
5308
5309 if (i.tm.base_opcode == 0x8d /* lea */
5310 && i.seg[0]
5311 && !quiet_warnings)
5312 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
5313
5314 /* If a segment was explicitly specified, and the specified segment
5315 is not the default, use an opcode prefix to select it. If we
5316 never figured out what the default segment is, then default_seg
5317 will be zero at this point, and the specified segment prefix will
5318 always be used. */
5319 if ((i.seg[0]) && (i.seg[0] != default_seg))
5320 {
5321 if (!add_prefix (i.seg[0]->seg_prefix))
5322 return 0;
5323 }
5324 return 1;
5325 }
5326
5327 static const seg_entry *
5328 build_modrm_byte (void)
5329 {
5330 const seg_entry *default_seg = 0;
5331 unsigned int source, dest;
5332 int vex_3_sources;
5333
5334 /* The first operand of instructions with VEX prefix and 3 sources
5335 must be VEX_Imm4. */
5336 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
5337 if (vex_3_sources)
5338 {
5339 unsigned int nds, reg_slot;
5340 expressionS *exp;
5341
5342 if (i.tm.opcode_modifier.veximmext
5343 && i.tm.opcode_modifier.immext)
5344 {
5345 dest = i.operands - 2;
5346 gas_assert (dest == 3);
5347 }
5348 else
5349 dest = i.operands - 1;
5350 nds = dest - 1;
5351
5352 /* There are 2 kinds of instructions:
5353 1. 5 operands: 4 register operands or 3 register operands
5354 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
5355 VexW0 or VexW1. The destination must be either XMM or YMM
5356 register.
5357 2. 4 operands: 4 register operands or 3 register operands
5358 plus 1 memory operand, VexXDS, and VexImmExt */
5359 gas_assert ((i.reg_operands == 4
5360 || (i.reg_operands == 3 && i.mem_operands == 1))
5361 && i.tm.opcode_modifier.vexvvvv == VEXXDS
5362 && (i.tm.opcode_modifier.veximmext
5363 || (i.imm_operands == 1
5364 && i.types[0].bitfield.vec_imm4
5365 && (i.tm.opcode_modifier.vexw == VEXW0
5366 || i.tm.opcode_modifier.vexw == VEXW1)
5367 && (operand_type_equal (&i.tm.operand_types[dest], &regxmm)
5368 || operand_type_equal (&i.tm.operand_types[dest], &regymm)))));
5369
5370 if (i.imm_operands == 0)
5371 {
5372 /* When there is no immediate operand, generate an 8bit
5373 immediate operand to encode the first operand. */
5374 exp = &im_expressions[i.imm_operands++];
5375 i.op[i.operands].imms = exp;
5376 i.types[i.operands] = imm8;
5377 i.operands++;
5378 /* If VexW1 is set, the first operand is the source and
5379 the second operand is encoded in the immediate operand. */
5380 if (i.tm.opcode_modifier.vexw == VEXW1)
5381 {
5382 source = 0;
5383 reg_slot = 1;
5384 }
5385 else
5386 {
5387 source = 1;
5388 reg_slot = 0;
5389 }
5390
5391 /* FMA swaps REG and NDS. */
5392 if (i.tm.cpu_flags.bitfield.cpufma)
5393 {
5394 unsigned int tmp;
5395 tmp = reg_slot;
5396 reg_slot = nds;
5397 nds = tmp;
5398 }
5399
5400 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5401 &regxmm)
5402 || operand_type_equal (&i.tm.operand_types[reg_slot],
5403 &regymm));
5404 exp->X_op = O_constant;
5405 exp->X_add_number = register_number (i.op[reg_slot].regs) << 4;
5406 }
5407 else
5408 {
5409 unsigned int imm_slot;
5410
5411 if (i.tm.opcode_modifier.vexw == VEXW0)
5412 {
5413 /* If VexW0 is set, the third operand is the source and
5414 the second operand is encoded in the immediate
5415 operand. */
5416 source = 2;
5417 reg_slot = 1;
5418 }
5419 else
5420 {
5421 /* VexW1 is set, the second operand is the source and
5422 the third operand is encoded in the immediate
5423 operand. */
5424 source = 1;
5425 reg_slot = 2;
5426 }
5427
5428 if (i.tm.opcode_modifier.immext)
5429 {
5430 /* When ImmExt is set, the immdiate byte is the last
5431 operand. */
5432 imm_slot = i.operands - 1;
5433 source--;
5434 reg_slot--;
5435 }
5436 else
5437 {
5438 imm_slot = 0;
5439
5440 /* Turn on Imm8 so that output_imm will generate it. */
5441 i.types[imm_slot].bitfield.imm8 = 1;
5442 }
5443
5444 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5445 &regxmm)
5446 || operand_type_equal (&i.tm.operand_types[reg_slot],
5447 &regymm));
5448 i.op[imm_slot].imms->X_add_number
5449 |= register_number (i.op[reg_slot].regs) << 4;
5450 }
5451
5452 gas_assert (operand_type_equal (&i.tm.operand_types[nds], &regxmm)
5453 || operand_type_equal (&i.tm.operand_types[nds],
5454 &regymm));
5455 i.vex.register_specifier = i.op[nds].regs;
5456 }
5457 else
5458 source = dest = 0;
5459
5460 /* i.reg_operands MUST be the number of real register operands;
5461 implicit registers do not count. If there are 3 register
5462 operands, it must be a instruction with VexNDS. For a
5463 instruction with VexNDD, the destination register is encoded
5464 in VEX prefix. If there are 4 register operands, it must be
5465 a instruction with VEX prefix and 3 sources. */
5466 if (i.mem_operands == 0
5467 && ((i.reg_operands == 2
5468 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
5469 || (i.reg_operands == 3
5470 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
5471 || (i.reg_operands == 4 && vex_3_sources)))
5472 {
5473 switch (i.operands)
5474 {
5475 case 2:
5476 source = 0;
5477 break;
5478 case 3:
5479 /* When there are 3 operands, one of them may be immediate,
5480 which may be the first or the last operand. Otherwise,
5481 the first operand must be shift count register (cl) or it
5482 is an instruction with VexNDS. */
5483 gas_assert (i.imm_operands == 1
5484 || (i.imm_operands == 0
5485 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
5486 || i.types[0].bitfield.shiftcount)));
5487 if (operand_type_check (i.types[0], imm)
5488 || i.types[0].bitfield.shiftcount)
5489 source = 1;
5490 else
5491 source = 0;
5492 break;
5493 case 4:
5494 /* When there are 4 operands, the first two must be 8bit
5495 immediate operands. The source operand will be the 3rd
5496 one.
5497
5498 For instructions with VexNDS, if the first operand
5499 an imm8, the source operand is the 2nd one. If the last
5500 operand is imm8, the source operand is the first one. */
5501 gas_assert ((i.imm_operands == 2
5502 && i.types[0].bitfield.imm8
5503 && i.types[1].bitfield.imm8)
5504 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
5505 && i.imm_operands == 1
5506 && (i.types[0].bitfield.imm8
5507 || i.types[i.operands - 1].bitfield.imm8)));
5508 if (i.imm_operands == 2)
5509 source = 2;
5510 else
5511 {
5512 if (i.types[0].bitfield.imm8)
5513 source = 1;
5514 else
5515 source = 0;
5516 }
5517 break;
5518 case 5:
5519 break;
5520 default:
5521 abort ();
5522 }
5523
5524 if (!vex_3_sources)
5525 {
5526 dest = source + 1;
5527
5528 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5529 {
5530 /* For instructions with VexNDS, the register-only
5531 source operand must be 32/64bit integer, XMM or
5532 YMM register. It is encoded in VEX prefix. We
5533 need to clear RegMem bit before calling
5534 operand_type_equal. */
5535
5536 i386_operand_type op;
5537 unsigned int vvvv;
5538
5539 /* Check register-only source operand when two source
5540 operands are swapped. */
5541 if (!i.tm.operand_types[source].bitfield.baseindex
5542 && i.tm.operand_types[dest].bitfield.baseindex)
5543 {
5544 vvvv = source;
5545 source = dest;
5546 }
5547 else
5548 vvvv = dest;
5549
5550 op = i.tm.operand_types[vvvv];
5551 op.bitfield.regmem = 0;
5552 if ((dest + 1) >= i.operands
5553 || (op.bitfield.reg32 != 1
5554 && !op.bitfield.reg64 != 1
5555 && !operand_type_equal (&op, &regxmm)
5556 && !operand_type_equal (&op, &regymm)))
5557 abort ();
5558 i.vex.register_specifier = i.op[vvvv].regs;
5559 dest++;
5560 }
5561 }
5562
5563 i.rm.mode = 3;
5564 /* One of the register operands will be encoded in the i.tm.reg
5565 field, the other in the combined i.tm.mode and i.tm.regmem
5566 fields. If no form of this instruction supports a memory
5567 destination operand, then we assume the source operand may
5568 sometimes be a memory operand and so we need to store the
5569 destination in the i.rm.reg field. */
5570 if (!i.tm.operand_types[dest].bitfield.regmem
5571 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
5572 {
5573 i.rm.reg = i.op[dest].regs->reg_num;
5574 i.rm.regmem = i.op[source].regs->reg_num;
5575 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5576 i.rex |= REX_R;
5577 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5578 i.rex |= REX_B;
5579 }
5580 else
5581 {
5582 i.rm.reg = i.op[source].regs->reg_num;
5583 i.rm.regmem = i.op[dest].regs->reg_num;
5584 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5585 i.rex |= REX_B;
5586 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5587 i.rex |= REX_R;
5588 }
5589 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
5590 {
5591 if (!i.types[0].bitfield.control
5592 && !i.types[1].bitfield.control)
5593 abort ();
5594 i.rex &= ~(REX_R | REX_B);
5595 add_prefix (LOCK_PREFIX_OPCODE);
5596 }
5597 }
5598 else
5599 { /* If it's not 2 reg operands... */
5600 unsigned int mem;
5601
5602 if (i.mem_operands)
5603 {
5604 unsigned int fake_zero_displacement = 0;
5605 unsigned int op;
5606
5607 for (op = 0; op < i.operands; op++)
5608 if (operand_type_check (i.types[op], anymem))
5609 break;
5610 gas_assert (op < i.operands);
5611
5612 if (i.tm.opcode_modifier.vecsib)
5613 {
5614 if (i.index_reg->reg_num == RegEiz
5615 || i.index_reg->reg_num == RegRiz)
5616 abort ();
5617
5618 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5619 if (!i.base_reg)
5620 {
5621 i.sib.base = NO_BASE_REGISTER;
5622 i.sib.scale = i.log2_scale_factor;
5623 i.types[op].bitfield.disp8 = 0;
5624 i.types[op].bitfield.disp16 = 0;
5625 i.types[op].bitfield.disp64 = 0;
5626 if (flag_code != CODE_64BIT)
5627 {
5628 /* Must be 32 bit */
5629 i.types[op].bitfield.disp32 = 1;
5630 i.types[op].bitfield.disp32s = 0;
5631 }
5632 else
5633 {
5634 i.types[op].bitfield.disp32 = 0;
5635 i.types[op].bitfield.disp32s = 1;
5636 }
5637 }
5638 i.sib.index = i.index_reg->reg_num;
5639 if ((i.index_reg->reg_flags & RegRex) != 0)
5640 i.rex |= REX_X;
5641 }
5642
5643 default_seg = &ds;
5644
5645 if (i.base_reg == 0)
5646 {
5647 i.rm.mode = 0;
5648 if (!i.disp_operands)
5649 {
5650 fake_zero_displacement = 1;
5651 /* Instructions with VSIB byte need 32bit displacement
5652 if there is no base register. */
5653 if (i.tm.opcode_modifier.vecsib)
5654 i.types[op].bitfield.disp32 = 1;
5655 }
5656 if (i.index_reg == 0)
5657 {
5658 gas_assert (!i.tm.opcode_modifier.vecsib);
5659 /* Operand is just <disp> */
5660 if (flag_code == CODE_64BIT)
5661 {
5662 /* 64bit mode overwrites the 32bit absolute
5663 addressing by RIP relative addressing and
5664 absolute addressing is encoded by one of the
5665 redundant SIB forms. */
5666 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5667 i.sib.base = NO_BASE_REGISTER;
5668 i.sib.index = NO_INDEX_REGISTER;
5669 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
5670 ? disp32s : disp32);
5671 }
5672 else if ((flag_code == CODE_16BIT)
5673 ^ (i.prefix[ADDR_PREFIX] != 0))
5674 {
5675 i.rm.regmem = NO_BASE_REGISTER_16;
5676 i.types[op] = disp16;
5677 }
5678 else
5679 {
5680 i.rm.regmem = NO_BASE_REGISTER;
5681 i.types[op] = disp32;
5682 }
5683 }
5684 else if (!i.tm.opcode_modifier.vecsib)
5685 {
5686 /* !i.base_reg && i.index_reg */
5687 if (i.index_reg->reg_num == RegEiz
5688 || i.index_reg->reg_num == RegRiz)
5689 i.sib.index = NO_INDEX_REGISTER;
5690 else
5691 i.sib.index = i.index_reg->reg_num;
5692 i.sib.base = NO_BASE_REGISTER;
5693 i.sib.scale = i.log2_scale_factor;
5694 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5695 i.types[op].bitfield.disp8 = 0;
5696 i.types[op].bitfield.disp16 = 0;
5697 i.types[op].bitfield.disp64 = 0;
5698 if (flag_code != CODE_64BIT)
5699 {
5700 /* Must be 32 bit */
5701 i.types[op].bitfield.disp32 = 1;
5702 i.types[op].bitfield.disp32s = 0;
5703 }
5704 else
5705 {
5706 i.types[op].bitfield.disp32 = 0;
5707 i.types[op].bitfield.disp32s = 1;
5708 }
5709 if ((i.index_reg->reg_flags & RegRex) != 0)
5710 i.rex |= REX_X;
5711 }
5712 }
5713 /* RIP addressing for 64bit mode. */
5714 else if (i.base_reg->reg_num == RegRip ||
5715 i.base_reg->reg_num == RegEip)
5716 {
5717 gas_assert (!i.tm.opcode_modifier.vecsib);
5718 i.rm.regmem = NO_BASE_REGISTER;
5719 i.types[op].bitfield.disp8 = 0;
5720 i.types[op].bitfield.disp16 = 0;
5721 i.types[op].bitfield.disp32 = 0;
5722 i.types[op].bitfield.disp32s = 1;
5723 i.types[op].bitfield.disp64 = 0;
5724 i.flags[op] |= Operand_PCrel;
5725 if (! i.disp_operands)
5726 fake_zero_displacement = 1;
5727 }
5728 else if (i.base_reg->reg_type.bitfield.reg16)
5729 {
5730 gas_assert (!i.tm.opcode_modifier.vecsib);
5731 switch (i.base_reg->reg_num)
5732 {
5733 case 3: /* (%bx) */
5734 if (i.index_reg == 0)
5735 i.rm.regmem = 7;
5736 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
5737 i.rm.regmem = i.index_reg->reg_num - 6;
5738 break;
5739 case 5: /* (%bp) */
5740 default_seg = &ss;
5741 if (i.index_reg == 0)
5742 {
5743 i.rm.regmem = 6;
5744 if (operand_type_check (i.types[op], disp) == 0)
5745 {
5746 /* fake (%bp) into 0(%bp) */
5747 i.types[op].bitfield.disp8 = 1;
5748 fake_zero_displacement = 1;
5749 }
5750 }
5751 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
5752 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
5753 break;
5754 default: /* (%si) -> 4 or (%di) -> 5 */
5755 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
5756 }
5757 i.rm.mode = mode_from_disp_size (i.types[op]);
5758 }
5759 else /* i.base_reg and 32/64 bit mode */
5760 {
5761 if (flag_code == CODE_64BIT
5762 && operand_type_check (i.types[op], disp))
5763 {
5764 i386_operand_type temp;
5765 operand_type_set (&temp, 0);
5766 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
5767 i.types[op] = temp;
5768 if (i.prefix[ADDR_PREFIX] == 0)
5769 i.types[op].bitfield.disp32s = 1;
5770 else
5771 i.types[op].bitfield.disp32 = 1;
5772 }
5773
5774 if (!i.tm.opcode_modifier.vecsib)
5775 i.rm.regmem = i.base_reg->reg_num;
5776 if ((i.base_reg->reg_flags & RegRex) != 0)
5777 i.rex |= REX_B;
5778 i.sib.base = i.base_reg->reg_num;
5779 /* x86-64 ignores REX prefix bit here to avoid decoder
5780 complications. */
5781 if (!(i.base_reg->reg_flags & RegRex)
5782 && (i.base_reg->reg_num == EBP_REG_NUM
5783 || i.base_reg->reg_num == ESP_REG_NUM))
5784 default_seg = &ss;
5785 if (i.base_reg->reg_num == 5 && i.disp_operands == 0)
5786 {
5787 fake_zero_displacement = 1;
5788 i.types[op].bitfield.disp8 = 1;
5789 }
5790 i.sib.scale = i.log2_scale_factor;
5791 if (i.index_reg == 0)
5792 {
5793 gas_assert (!i.tm.opcode_modifier.vecsib);
5794 /* <disp>(%esp) becomes two byte modrm with no index
5795 register. We've already stored the code for esp
5796 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
5797 Any base register besides %esp will not use the
5798 extra modrm byte. */
5799 i.sib.index = NO_INDEX_REGISTER;
5800 }
5801 else if (!i.tm.opcode_modifier.vecsib)
5802 {
5803 if (i.index_reg->reg_num == RegEiz
5804 || i.index_reg->reg_num == RegRiz)
5805 i.sib.index = NO_INDEX_REGISTER;
5806 else
5807 i.sib.index = i.index_reg->reg_num;
5808 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5809 if ((i.index_reg->reg_flags & RegRex) != 0)
5810 i.rex |= REX_X;
5811 }
5812
5813 if (i.disp_operands
5814 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
5815 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
5816 i.rm.mode = 0;
5817 else
5818 {
5819 if (!fake_zero_displacement
5820 && !i.disp_operands
5821 && i.disp_encoding)
5822 {
5823 fake_zero_displacement = 1;
5824 if (i.disp_encoding == disp_encoding_8bit)
5825 i.types[op].bitfield.disp8 = 1;
5826 else
5827 i.types[op].bitfield.disp32 = 1;
5828 }
5829 i.rm.mode = mode_from_disp_size (i.types[op]);
5830 }
5831 }
5832
5833 if (fake_zero_displacement)
5834 {
5835 /* Fakes a zero displacement assuming that i.types[op]
5836 holds the correct displacement size. */
5837 expressionS *exp;
5838
5839 gas_assert (i.op[op].disps == 0);
5840 exp = &disp_expressions[i.disp_operands++];
5841 i.op[op].disps = exp;
5842 exp->X_op = O_constant;
5843 exp->X_add_number = 0;
5844 exp->X_add_symbol = (symbolS *) 0;
5845 exp->X_op_symbol = (symbolS *) 0;
5846 }
5847
5848 mem = op;
5849 }
5850 else
5851 mem = ~0;
5852
5853 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
5854 {
5855 if (operand_type_check (i.types[0], imm))
5856 i.vex.register_specifier = NULL;
5857 else
5858 {
5859 /* VEX.vvvv encodes one of the sources when the first
5860 operand is not an immediate. */
5861 if (i.tm.opcode_modifier.vexw == VEXW0)
5862 i.vex.register_specifier = i.op[0].regs;
5863 else
5864 i.vex.register_specifier = i.op[1].regs;
5865 }
5866
5867 /* Destination is a XMM register encoded in the ModRM.reg
5868 and VEX.R bit. */
5869 i.rm.reg = i.op[2].regs->reg_num;
5870 if ((i.op[2].regs->reg_flags & RegRex) != 0)
5871 i.rex |= REX_R;
5872
5873 /* ModRM.rm and VEX.B encodes the other source. */
5874 if (!i.mem_operands)
5875 {
5876 i.rm.mode = 3;
5877
5878 if (i.tm.opcode_modifier.vexw == VEXW0)
5879 i.rm.regmem = i.op[1].regs->reg_num;
5880 else
5881 i.rm.regmem = i.op[0].regs->reg_num;
5882
5883 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5884 i.rex |= REX_B;
5885 }
5886 }
5887 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
5888 {
5889 i.vex.register_specifier = i.op[2].regs;
5890 if (!i.mem_operands)
5891 {
5892 i.rm.mode = 3;
5893 i.rm.regmem = i.op[1].regs->reg_num;
5894 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5895 i.rex |= REX_B;
5896 }
5897 }
5898 /* Fill in i.rm.reg or i.rm.regmem field with register operand
5899 (if any) based on i.tm.extension_opcode. Again, we must be
5900 careful to make sure that segment/control/debug/test/MMX
5901 registers are coded into the i.rm.reg field. */
5902 else if (i.reg_operands)
5903 {
5904 unsigned int op;
5905 unsigned int vex_reg = ~0;
5906
5907 for (op = 0; op < i.operands; op++)
5908 if (i.types[op].bitfield.reg8
5909 || i.types[op].bitfield.reg16
5910 || i.types[op].bitfield.reg32
5911 || i.types[op].bitfield.reg64
5912 || i.types[op].bitfield.regmmx
5913 || i.types[op].bitfield.regxmm
5914 || i.types[op].bitfield.regymm
5915 || i.types[op].bitfield.sreg2
5916 || i.types[op].bitfield.sreg3
5917 || i.types[op].bitfield.control
5918 || i.types[op].bitfield.debug
5919 || i.types[op].bitfield.test)
5920 break;
5921
5922 if (vex_3_sources)
5923 op = dest;
5924 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5925 {
5926 /* For instructions with VexNDS, the register-only
5927 source operand is encoded in VEX prefix. */
5928 gas_assert (mem != (unsigned int) ~0);
5929
5930 if (op > mem)
5931 {
5932 vex_reg = op++;
5933 gas_assert (op < i.operands);
5934 }
5935 else
5936 {
5937 /* Check register-only source operand when two source
5938 operands are swapped. */
5939 if (!i.tm.operand_types[op].bitfield.baseindex
5940 && i.tm.operand_types[op + 1].bitfield.baseindex)
5941 {
5942 vex_reg = op;
5943 op += 2;
5944 gas_assert (mem == (vex_reg + 1)
5945 && op < i.operands);
5946 }
5947 else
5948 {
5949 vex_reg = op + 1;
5950 gas_assert (vex_reg < i.operands);
5951 }
5952 }
5953 }
5954 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
5955 {
5956 /* For instructions with VexNDD, the register destination
5957 is encoded in VEX prefix. */
5958 if (i.mem_operands == 0)
5959 {
5960 /* There is no memory operand. */
5961 gas_assert ((op + 2) == i.operands);
5962 vex_reg = op + 1;
5963 }
5964 else
5965 {
5966 /* There are only 2 operands. */
5967 gas_assert (op < 2 && i.operands == 2);
5968 vex_reg = 1;
5969 }
5970 }
5971 else
5972 gas_assert (op < i.operands);
5973
5974 if (vex_reg != (unsigned int) ~0)
5975 {
5976 i386_operand_type *type = &i.tm.operand_types[vex_reg];
5977
5978 if (type->bitfield.reg32 != 1
5979 && type->bitfield.reg64 != 1
5980 && !operand_type_equal (type, &regxmm)
5981 && !operand_type_equal (type, &regymm))
5982 abort ();
5983
5984 i.vex.register_specifier = i.op[vex_reg].regs;
5985 }
5986
5987 /* Don't set OP operand twice. */
5988 if (vex_reg != op)
5989 {
5990 /* If there is an extension opcode to put here, the
5991 register number must be put into the regmem field. */
5992 if (i.tm.extension_opcode != None)
5993 {
5994 i.rm.regmem = i.op[op].regs->reg_num;
5995 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5996 i.rex |= REX_B;
5997 }
5998 else
5999 {
6000 i.rm.reg = i.op[op].regs->reg_num;
6001 if ((i.op[op].regs->reg_flags & RegRex) != 0)
6002 i.rex |= REX_R;
6003 }
6004 }
6005
6006 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
6007 must set it to 3 to indicate this is a register operand
6008 in the regmem field. */
6009 if (!i.mem_operands)
6010 i.rm.mode = 3;
6011 }
6012
6013 /* Fill in i.rm.reg field with extension opcode (if any). */
6014 if (i.tm.extension_opcode != None)
6015 i.rm.reg = i.tm.extension_opcode;
6016 }
6017 return default_seg;
6018 }
6019
6020 static void
6021 output_branch (void)
6022 {
6023 char *p;
6024 int size;
6025 int code16;
6026 int prefix;
6027 relax_substateT subtype;
6028 symbolS *sym;
6029 offsetT off;
6030
6031 code16 = flag_code == CODE_16BIT ? CODE16 : 0;
6032 size = i.disp_encoding == disp_encoding_32bit ? BIG : SMALL;
6033
6034 prefix = 0;
6035 if (i.prefix[DATA_PREFIX] != 0)
6036 {
6037 prefix = 1;
6038 i.prefixes -= 1;
6039 code16 ^= CODE16;
6040 }
6041 /* Pentium4 branch hints. */
6042 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6043 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6044 {
6045 prefix++;
6046 i.prefixes--;
6047 }
6048 if (i.prefix[REX_PREFIX] != 0)
6049 {
6050 prefix++;
6051 i.prefixes--;
6052 }
6053
6054 if (i.prefixes != 0 && !intel_syntax)
6055 as_warn (_("skipping prefixes on this instruction"));
6056
6057 /* It's always a symbol; End frag & setup for relax.
6058 Make sure there is enough room in this frag for the largest
6059 instruction we may generate in md_convert_frag. This is 2
6060 bytes for the opcode and room for the prefix and largest
6061 displacement. */
6062 frag_grow (prefix + 2 + 4);
6063 /* Prefix and 1 opcode byte go in fr_fix. */
6064 p = frag_more (prefix + 1);
6065 if (i.prefix[DATA_PREFIX] != 0)
6066 *p++ = DATA_PREFIX_OPCODE;
6067 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
6068 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
6069 *p++ = i.prefix[SEG_PREFIX];
6070 if (i.prefix[REX_PREFIX] != 0)
6071 *p++ = i.prefix[REX_PREFIX];
6072 *p = i.tm.base_opcode;
6073
6074 if ((unsigned char) *p == JUMP_PC_RELATIVE)
6075 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
6076 else if (cpu_arch_flags.bitfield.cpui386)
6077 subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
6078 else
6079 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
6080 subtype |= code16;
6081
6082 sym = i.op[0].disps->X_add_symbol;
6083 off = i.op[0].disps->X_add_number;
6084
6085 if (i.op[0].disps->X_op != O_constant
6086 && i.op[0].disps->X_op != O_symbol)
6087 {
6088 /* Handle complex expressions. */
6089 sym = make_expr_symbol (i.op[0].disps);
6090 off = 0;
6091 }
6092
6093 /* 1 possible extra opcode + 4 byte displacement go in var part.
6094 Pass reloc in fr_var. */
6095 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
6096 }
6097
6098 static void
6099 output_jump (void)
6100 {
6101 char *p;
6102 int size;
6103 fixS *fixP;
6104
6105 if (i.tm.opcode_modifier.jumpbyte)
6106 {
6107 /* This is a loop or jecxz type instruction. */
6108 size = 1;
6109 if (i.prefix[ADDR_PREFIX] != 0)
6110 {
6111 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
6112 i.prefixes -= 1;
6113 }
6114 /* Pentium4 branch hints. */
6115 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6116 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6117 {
6118 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
6119 i.prefixes--;
6120 }
6121 }
6122 else
6123 {
6124 int code16;
6125
6126 code16 = 0;
6127 if (flag_code == CODE_16BIT)
6128 code16 = CODE16;
6129
6130 if (i.prefix[DATA_PREFIX] != 0)
6131 {
6132 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
6133 i.prefixes -= 1;
6134 code16 ^= CODE16;
6135 }
6136
6137 size = 4;
6138 if (code16)
6139 size = 2;
6140 }
6141
6142 if (i.prefix[REX_PREFIX] != 0)
6143 {
6144 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
6145 i.prefixes -= 1;
6146 }
6147
6148 if (i.prefixes != 0 && !intel_syntax)
6149 as_warn (_("skipping prefixes on this instruction"));
6150
6151 p = frag_more (i.tm.opcode_length + size);
6152 switch (i.tm.opcode_length)
6153 {
6154 case 2:
6155 *p++ = i.tm.base_opcode >> 8;
6156 case 1:
6157 *p++ = i.tm.base_opcode;
6158 break;
6159 default:
6160 abort ();
6161 }
6162
6163 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6164 i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
6165
6166 /* All jumps handled here are signed, but don't use a signed limit
6167 check for 32 and 16 bit jumps as we want to allow wrap around at
6168 4G and 64k respectively. */
6169 if (size == 1)
6170 fixP->fx_signed = 1;
6171 }
6172
6173 static void
6174 output_interseg_jump (void)
6175 {
6176 char *p;
6177 int size;
6178 int prefix;
6179 int code16;
6180
6181 code16 = 0;
6182 if (flag_code == CODE_16BIT)
6183 code16 = CODE16;
6184
6185 prefix = 0;
6186 if (i.prefix[DATA_PREFIX] != 0)
6187 {
6188 prefix = 1;
6189 i.prefixes -= 1;
6190 code16 ^= CODE16;
6191 }
6192 if (i.prefix[REX_PREFIX] != 0)
6193 {
6194 prefix++;
6195 i.prefixes -= 1;
6196 }
6197
6198 size = 4;
6199 if (code16)
6200 size = 2;
6201
6202 if (i.prefixes != 0 && !intel_syntax)
6203 as_warn (_("skipping prefixes on this instruction"));
6204
6205 /* 1 opcode; 2 segment; offset */
6206 p = frag_more (prefix + 1 + 2 + size);
6207
6208 if (i.prefix[DATA_PREFIX] != 0)
6209 *p++ = DATA_PREFIX_OPCODE;
6210
6211 if (i.prefix[REX_PREFIX] != 0)
6212 *p++ = i.prefix[REX_PREFIX];
6213
6214 *p++ = i.tm.base_opcode;
6215 if (i.op[1].imms->X_op == O_constant)
6216 {
6217 offsetT n = i.op[1].imms->X_add_number;
6218
6219 if (size == 2
6220 && !fits_in_unsigned_word (n)
6221 && !fits_in_signed_word (n))
6222 {
6223 as_bad (_("16-bit jump out of range"));
6224 return;
6225 }
6226 md_number_to_chars (p, n, size);
6227 }
6228 else
6229 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6230 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
6231 if (i.op[0].imms->X_op != O_constant)
6232 as_bad (_("can't handle non absolute segment in `%s'"),
6233 i.tm.name);
6234 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
6235 }
6236
6237 static void
6238 output_insn (void)
6239 {
6240 fragS *insn_start_frag;
6241 offsetT insn_start_off;
6242
6243 /* Tie dwarf2 debug info to the address at the start of the insn.
6244 We can't do this after the insn has been output as the current
6245 frag may have been closed off. eg. by frag_var. */
6246 dwarf2_emit_insn (0);
6247
6248 insn_start_frag = frag_now;
6249 insn_start_off = frag_now_fix ();
6250
6251 /* Output jumps. */
6252 if (i.tm.opcode_modifier.jump)
6253 output_branch ();
6254 else if (i.tm.opcode_modifier.jumpbyte
6255 || i.tm.opcode_modifier.jumpdword)
6256 output_jump ();
6257 else if (i.tm.opcode_modifier.jumpintersegment)
6258 output_interseg_jump ();
6259 else
6260 {
6261 /* Output normal instructions here. */
6262 char *p;
6263 unsigned char *q;
6264 unsigned int j;
6265 unsigned int prefix;
6266
6267 /* Since the VEX prefix contains the implicit prefix, we don't
6268 need the explicit prefix. */
6269 if (!i.tm.opcode_modifier.vex)
6270 {
6271 switch (i.tm.opcode_length)
6272 {
6273 case 3:
6274 if (i.tm.base_opcode & 0xff000000)
6275 {
6276 prefix = (i.tm.base_opcode >> 24) & 0xff;
6277 goto check_prefix;
6278 }
6279 break;
6280 case 2:
6281 if ((i.tm.base_opcode & 0xff0000) != 0)
6282 {
6283 prefix = (i.tm.base_opcode >> 16) & 0xff;
6284 if (i.tm.cpu_flags.bitfield.cpupadlock)
6285 {
6286 check_prefix:
6287 if (prefix != REPE_PREFIX_OPCODE
6288 || (i.prefix[REP_PREFIX]
6289 != REPE_PREFIX_OPCODE))
6290 add_prefix (prefix);
6291 }
6292 else
6293 add_prefix (prefix);
6294 }
6295 break;
6296 case 1:
6297 break;
6298 default:
6299 abort ();
6300 }
6301
6302 /* The prefix bytes. */
6303 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
6304 if (*q)
6305 FRAG_APPEND_1_CHAR (*q);
6306 }
6307 else
6308 {
6309 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
6310 if (*q)
6311 switch (j)
6312 {
6313 case REX_PREFIX:
6314 /* REX byte is encoded in VEX prefix. */
6315 break;
6316 case SEG_PREFIX:
6317 case ADDR_PREFIX:
6318 FRAG_APPEND_1_CHAR (*q);
6319 break;
6320 default:
6321 /* There should be no other prefixes for instructions
6322 with VEX prefix. */
6323 abort ();
6324 }
6325
6326 /* Now the VEX prefix. */
6327 p = frag_more (i.vex.length);
6328 for (j = 0; j < i.vex.length; j++)
6329 p[j] = i.vex.bytes[j];
6330 }
6331
6332 /* Now the opcode; be careful about word order here! */
6333 if (i.tm.opcode_length == 1)
6334 {
6335 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
6336 }
6337 else
6338 {
6339 switch (i.tm.opcode_length)
6340 {
6341 case 3:
6342 p = frag_more (3);
6343 *p++ = (i.tm.base_opcode >> 16) & 0xff;
6344 break;
6345 case 2:
6346 p = frag_more (2);
6347 break;
6348 default:
6349 abort ();
6350 break;
6351 }
6352
6353 /* Put out high byte first: can't use md_number_to_chars! */
6354 *p++ = (i.tm.base_opcode >> 8) & 0xff;
6355 *p = i.tm.base_opcode & 0xff;
6356 }
6357
6358 /* Now the modrm byte and sib byte (if present). */
6359 if (i.tm.opcode_modifier.modrm)
6360 {
6361 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
6362 | i.rm.reg << 3
6363 | i.rm.mode << 6));
6364 /* If i.rm.regmem == ESP (4)
6365 && i.rm.mode != (Register mode)
6366 && not 16 bit
6367 ==> need second modrm byte. */
6368 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
6369 && i.rm.mode != 3
6370 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
6371 FRAG_APPEND_1_CHAR ((i.sib.base << 0
6372 | i.sib.index << 3
6373 | i.sib.scale << 6));
6374 }
6375
6376 if (i.disp_operands)
6377 output_disp (insn_start_frag, insn_start_off);
6378
6379 if (i.imm_operands)
6380 output_imm (insn_start_frag, insn_start_off);
6381 }
6382
6383 #ifdef DEBUG386
6384 if (flag_debug)
6385 {
6386 pi ("" /*line*/, &i);
6387 }
6388 #endif /* DEBUG386 */
6389 }
6390
6391 /* Return the size of the displacement operand N. */
6392
6393 static int
6394 disp_size (unsigned int n)
6395 {
6396 int size = 4;
6397 if (i.types[n].bitfield.disp64)
6398 size = 8;
6399 else if (i.types[n].bitfield.disp8)
6400 size = 1;
6401 else if (i.types[n].bitfield.disp16)
6402 size = 2;
6403 return size;
6404 }
6405
6406 /* Return the size of the immediate operand N. */
6407
6408 static int
6409 imm_size (unsigned int n)
6410 {
6411 int size = 4;
6412 if (i.types[n].bitfield.imm64)
6413 size = 8;
6414 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
6415 size = 1;
6416 else if (i.types[n].bitfield.imm16)
6417 size = 2;
6418 return size;
6419 }
6420
6421 static void
6422 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
6423 {
6424 char *p;
6425 unsigned int n;
6426
6427 for (n = 0; n < i.operands; n++)
6428 {
6429 if (operand_type_check (i.types[n], disp))
6430 {
6431 if (i.op[n].disps->X_op == O_constant)
6432 {
6433 int size = disp_size (n);
6434 offsetT val;
6435
6436 val = offset_in_range (i.op[n].disps->X_add_number,
6437 size);
6438 p = frag_more (size);
6439 md_number_to_chars (p, val, size);
6440 }
6441 else
6442 {
6443 enum bfd_reloc_code_real reloc_type;
6444 int size = disp_size (n);
6445 int sign = i.types[n].bitfield.disp32s;
6446 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
6447
6448 /* We can't have 8 bit displacement here. */
6449 gas_assert (!i.types[n].bitfield.disp8);
6450
6451 /* The PC relative address is computed relative
6452 to the instruction boundary, so in case immediate
6453 fields follows, we need to adjust the value. */
6454 if (pcrel && i.imm_operands)
6455 {
6456 unsigned int n1;
6457 int sz = 0;
6458
6459 for (n1 = 0; n1 < i.operands; n1++)
6460 if (operand_type_check (i.types[n1], imm))
6461 {
6462 /* Only one immediate is allowed for PC
6463 relative address. */
6464 gas_assert (sz == 0);
6465 sz = imm_size (n1);
6466 i.op[n].disps->X_add_number -= sz;
6467 }
6468 /* We should find the immediate. */
6469 gas_assert (sz != 0);
6470 }
6471
6472 p = frag_more (size);
6473 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
6474 if (GOT_symbol
6475 && GOT_symbol == i.op[n].disps->X_add_symbol
6476 && (((reloc_type == BFD_RELOC_32
6477 || reloc_type == BFD_RELOC_X86_64_32S
6478 || (reloc_type == BFD_RELOC_64
6479 && object_64bit))
6480 && (i.op[n].disps->X_op == O_symbol
6481 || (i.op[n].disps->X_op == O_add
6482 && ((symbol_get_value_expression
6483 (i.op[n].disps->X_op_symbol)->X_op)
6484 == O_subtract))))
6485 || reloc_type == BFD_RELOC_32_PCREL))
6486 {
6487 offsetT add;
6488
6489 if (insn_start_frag == frag_now)
6490 add = (p - frag_now->fr_literal) - insn_start_off;
6491 else
6492 {
6493 fragS *fr;
6494
6495 add = insn_start_frag->fr_fix - insn_start_off;
6496 for (fr = insn_start_frag->fr_next;
6497 fr && fr != frag_now; fr = fr->fr_next)
6498 add += fr->fr_fix;
6499 add += p - frag_now->fr_literal;
6500 }
6501
6502 if (!object_64bit)
6503 {
6504 reloc_type = BFD_RELOC_386_GOTPC;
6505 i.op[n].imms->X_add_number += add;
6506 }
6507 else if (reloc_type == BFD_RELOC_64)
6508 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6509 else
6510 /* Don't do the adjustment for x86-64, as there
6511 the pcrel addressing is relative to the _next_
6512 insn, and that is taken care of in other code. */
6513 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6514 }
6515 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6516 i.op[n].disps, pcrel, reloc_type);
6517 }
6518 }
6519 }
6520 }
6521
6522 static void
6523 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
6524 {
6525 char *p;
6526 unsigned int n;
6527
6528 for (n = 0; n < i.operands; n++)
6529 {
6530 if (operand_type_check (i.types[n], imm))
6531 {
6532 if (i.op[n].imms->X_op == O_constant)
6533 {
6534 int size = imm_size (n);
6535 offsetT val;
6536
6537 val = offset_in_range (i.op[n].imms->X_add_number,
6538 size);
6539 p = frag_more (size);
6540 md_number_to_chars (p, val, size);
6541 }
6542 else
6543 {
6544 /* Not absolute_section.
6545 Need a 32-bit fixup (don't support 8bit
6546 non-absolute imms). Try to support other
6547 sizes ... */
6548 enum bfd_reloc_code_real reloc_type;
6549 int size = imm_size (n);
6550 int sign;
6551
6552 if (i.types[n].bitfield.imm32s
6553 && (i.suffix == QWORD_MNEM_SUFFIX
6554 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
6555 sign = 1;
6556 else
6557 sign = 0;
6558
6559 p = frag_more (size);
6560 reloc_type = reloc (size, 0, sign, i.reloc[n]);
6561
6562 /* This is tough to explain. We end up with this one if we
6563 * have operands that look like
6564 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
6565 * obtain the absolute address of the GOT, and it is strongly
6566 * preferable from a performance point of view to avoid using
6567 * a runtime relocation for this. The actual sequence of
6568 * instructions often look something like:
6569 *
6570 * call .L66
6571 * .L66:
6572 * popl %ebx
6573 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
6574 *
6575 * The call and pop essentially return the absolute address
6576 * of the label .L66 and store it in %ebx. The linker itself
6577 * will ultimately change the first operand of the addl so
6578 * that %ebx points to the GOT, but to keep things simple, the
6579 * .o file must have this operand set so that it generates not
6580 * the absolute address of .L66, but the absolute address of
6581 * itself. This allows the linker itself simply treat a GOTPC
6582 * relocation as asking for a pcrel offset to the GOT to be
6583 * added in, and the addend of the relocation is stored in the
6584 * operand field for the instruction itself.
6585 *
6586 * Our job here is to fix the operand so that it would add
6587 * the correct offset so that %ebx would point to itself. The
6588 * thing that is tricky is that .-.L66 will point to the
6589 * beginning of the instruction, so we need to further modify
6590 * the operand so that it will point to itself. There are
6591 * other cases where you have something like:
6592 *
6593 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
6594 *
6595 * and here no correction would be required. Internally in
6596 * the assembler we treat operands of this form as not being
6597 * pcrel since the '.' is explicitly mentioned, and I wonder
6598 * whether it would simplify matters to do it this way. Who
6599 * knows. In earlier versions of the PIC patches, the
6600 * pcrel_adjust field was used to store the correction, but
6601 * since the expression is not pcrel, I felt it would be
6602 * confusing to do it this way. */
6603
6604 if ((reloc_type == BFD_RELOC_32
6605 || reloc_type == BFD_RELOC_X86_64_32S
6606 || reloc_type == BFD_RELOC_64)
6607 && GOT_symbol
6608 && GOT_symbol == i.op[n].imms->X_add_symbol
6609 && (i.op[n].imms->X_op == O_symbol
6610 || (i.op[n].imms->X_op == O_add
6611 && ((symbol_get_value_expression
6612 (i.op[n].imms->X_op_symbol)->X_op)
6613 == O_subtract))))
6614 {
6615 offsetT add;
6616
6617 if (insn_start_frag == frag_now)
6618 add = (p - frag_now->fr_literal) - insn_start_off;
6619 else
6620 {
6621 fragS *fr;
6622
6623 add = insn_start_frag->fr_fix - insn_start_off;
6624 for (fr = insn_start_frag->fr_next;
6625 fr && fr != frag_now; fr = fr->fr_next)
6626 add += fr->fr_fix;
6627 add += p - frag_now->fr_literal;
6628 }
6629
6630 if (!object_64bit)
6631 reloc_type = BFD_RELOC_386_GOTPC;
6632 else if (size == 4)
6633 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6634 else if (size == 8)
6635 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6636 i.op[n].imms->X_add_number += add;
6637 }
6638 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6639 i.op[n].imms, 0, reloc_type);
6640 }
6641 }
6642 }
6643 }
6644 \f
6645 /* x86_cons_fix_new is called via the expression parsing code when a
6646 reloc is needed. We use this hook to get the correct .got reloc. */
6647 static enum bfd_reloc_code_real got_reloc = NO_RELOC;
6648 static int cons_sign = -1;
6649
6650 void
6651 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
6652 expressionS *exp)
6653 {
6654 enum bfd_reloc_code_real r = reloc (len, 0, cons_sign, got_reloc);
6655
6656 got_reloc = NO_RELOC;
6657
6658 #ifdef TE_PE
6659 if (exp->X_op == O_secrel)
6660 {
6661 exp->X_op = O_symbol;
6662 r = BFD_RELOC_32_SECREL;
6663 }
6664 #endif
6665
6666 fix_new_exp (frag, off, len, exp, 0, r);
6667 }
6668
6669 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
6670 purpose of the `.dc.a' internal pseudo-op. */
6671
6672 int
6673 x86_address_bytes (void)
6674 {
6675 if ((stdoutput->arch_info->mach & bfd_mach_x64_32))
6676 return 4;
6677 return stdoutput->arch_info->bits_per_address / 8;
6678 }
6679
6680 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
6681 || defined (LEX_AT)
6682 # define lex_got(reloc, adjust, types) NULL
6683 #else
6684 /* Parse operands of the form
6685 <symbol>@GOTOFF+<nnn>
6686 and similar .plt or .got references.
6687
6688 If we find one, set up the correct relocation in RELOC and copy the
6689 input string, minus the `@GOTOFF' into a malloc'd buffer for
6690 parsing by the calling routine. Return this buffer, and if ADJUST
6691 is non-null set it to the length of the string we removed from the
6692 input line. Otherwise return NULL. */
6693 static char *
6694 lex_got (enum bfd_reloc_code_real *rel,
6695 int *adjust,
6696 i386_operand_type *types)
6697 {
6698 /* Some of the relocations depend on the size of what field is to
6699 be relocated. But in our callers i386_immediate and i386_displacement
6700 we don't yet know the operand size (this will be set by insn
6701 matching). Hence we record the word32 relocation here,
6702 and adjust the reloc according to the real size in reloc(). */
6703 static const struct {
6704 const char *str;
6705 int len;
6706 const enum bfd_reloc_code_real rel[2];
6707 const i386_operand_type types64;
6708 } gotrel[] = {
6709 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
6710 BFD_RELOC_X86_64_PLTOFF64 },
6711 OPERAND_TYPE_IMM64 },
6712 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
6713 BFD_RELOC_X86_64_PLT32 },
6714 OPERAND_TYPE_IMM32_32S_DISP32 },
6715 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
6716 BFD_RELOC_X86_64_GOTPLT64 },
6717 OPERAND_TYPE_IMM64_DISP64 },
6718 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
6719 BFD_RELOC_X86_64_GOTOFF64 },
6720 OPERAND_TYPE_IMM64_DISP64 },
6721 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
6722 BFD_RELOC_X86_64_GOTPCREL },
6723 OPERAND_TYPE_IMM32_32S_DISP32 },
6724 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
6725 BFD_RELOC_X86_64_TLSGD },
6726 OPERAND_TYPE_IMM32_32S_DISP32 },
6727 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
6728 _dummy_first_bfd_reloc_code_real },
6729 OPERAND_TYPE_NONE },
6730 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
6731 BFD_RELOC_X86_64_TLSLD },
6732 OPERAND_TYPE_IMM32_32S_DISP32 },
6733 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
6734 BFD_RELOC_X86_64_GOTTPOFF },
6735 OPERAND_TYPE_IMM32_32S_DISP32 },
6736 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
6737 BFD_RELOC_X86_64_TPOFF32 },
6738 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6739 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
6740 _dummy_first_bfd_reloc_code_real },
6741 OPERAND_TYPE_NONE },
6742 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
6743 BFD_RELOC_X86_64_DTPOFF32 },
6744 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6745 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
6746 _dummy_first_bfd_reloc_code_real },
6747 OPERAND_TYPE_NONE },
6748 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
6749 _dummy_first_bfd_reloc_code_real },
6750 OPERAND_TYPE_NONE },
6751 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
6752 BFD_RELOC_X86_64_GOT32 },
6753 OPERAND_TYPE_IMM32_32S_64_DISP32 },
6754 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
6755 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
6756 OPERAND_TYPE_IMM32_32S_DISP32 },
6757 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
6758 BFD_RELOC_X86_64_TLSDESC_CALL },
6759 OPERAND_TYPE_IMM32_32S_DISP32 },
6760 };
6761 char *cp;
6762 unsigned int j;
6763
6764 #if defined (OBJ_MAYBE_ELF)
6765 if (!IS_ELF)
6766 return NULL;
6767 #endif
6768
6769 for (cp = input_line_pointer; *cp != '@'; cp++)
6770 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6771 return NULL;
6772
6773 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6774 {
6775 int len = gotrel[j].len;
6776 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6777 {
6778 if (gotrel[j].rel[object_64bit] != 0)
6779 {
6780 int first, second;
6781 char *tmpbuf, *past_reloc;
6782
6783 *rel = gotrel[j].rel[object_64bit];
6784 if (adjust)
6785 *adjust = len;
6786
6787 if (types)
6788 {
6789 if (flag_code != CODE_64BIT)
6790 {
6791 types->bitfield.imm32 = 1;
6792 types->bitfield.disp32 = 1;
6793 }
6794 else
6795 *types = gotrel[j].types64;
6796 }
6797
6798 if (GOT_symbol == NULL)
6799 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
6800
6801 /* The length of the first part of our input line. */
6802 first = cp - input_line_pointer;
6803
6804 /* The second part goes from after the reloc token until
6805 (and including) an end_of_line char or comma. */
6806 past_reloc = cp + 1 + len;
6807 cp = past_reloc;
6808 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6809 ++cp;
6810 second = cp + 1 - past_reloc;
6811
6812 /* Allocate and copy string. The trailing NUL shouldn't
6813 be necessary, but be safe. */
6814 tmpbuf = (char *) xmalloc (first + second + 2);
6815 memcpy (tmpbuf, input_line_pointer, first);
6816 if (second != 0 && *past_reloc != ' ')
6817 /* Replace the relocation token with ' ', so that
6818 errors like foo@GOTOFF1 will be detected. */
6819 tmpbuf[first++] = ' ';
6820 memcpy (tmpbuf + first, past_reloc, second);
6821 tmpbuf[first + second] = '\0';
6822 return tmpbuf;
6823 }
6824
6825 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6826 gotrel[j].str, 1 << (5 + object_64bit));
6827 return NULL;
6828 }
6829 }
6830
6831 /* Might be a symbol version string. Don't as_bad here. */
6832 return NULL;
6833 }
6834 #endif
6835
6836 #ifdef TE_PE
6837 #ifdef lex_got
6838 #undef lex_got
6839 #endif
6840 /* Parse operands of the form
6841 <symbol>@SECREL32+<nnn>
6842
6843 If we find one, set up the correct relocation in RELOC and copy the
6844 input string, minus the `@SECREL32' into a malloc'd buffer for
6845 parsing by the calling routine. Return this buffer, and if ADJUST
6846 is non-null set it to the length of the string we removed from the
6847 input line. Otherwise return NULL.
6848
6849 This function is copied from the ELF version above adjusted for PE targets. */
6850
6851 static char *
6852 lex_got (enum bfd_reloc_code_real *rel ATTRIBUTE_UNUSED,
6853 int *adjust ATTRIBUTE_UNUSED,
6854 i386_operand_type *types ATTRIBUTE_UNUSED)
6855 {
6856 static const struct
6857 {
6858 const char *str;
6859 int len;
6860 const enum bfd_reloc_code_real rel[2];
6861 const i386_operand_type types64;
6862 }
6863 gotrel[] =
6864 {
6865 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL,
6866 BFD_RELOC_32_SECREL },
6867 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6868 };
6869
6870 char *cp;
6871 unsigned j;
6872
6873 for (cp = input_line_pointer; *cp != '@'; cp++)
6874 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6875 return NULL;
6876
6877 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6878 {
6879 int len = gotrel[j].len;
6880
6881 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6882 {
6883 if (gotrel[j].rel[object_64bit] != 0)
6884 {
6885 int first, second;
6886 char *tmpbuf, *past_reloc;
6887
6888 *rel = gotrel[j].rel[object_64bit];
6889 if (adjust)
6890 *adjust = len;
6891
6892 if (types)
6893 {
6894 if (flag_code != CODE_64BIT)
6895 {
6896 types->bitfield.imm32 = 1;
6897 types->bitfield.disp32 = 1;
6898 }
6899 else
6900 *types = gotrel[j].types64;
6901 }
6902
6903 /* The length of the first part of our input line. */
6904 first = cp - input_line_pointer;
6905
6906 /* The second part goes from after the reloc token until
6907 (and including) an end_of_line char or comma. */
6908 past_reloc = cp + 1 + len;
6909 cp = past_reloc;
6910 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6911 ++cp;
6912 second = cp + 1 - past_reloc;
6913
6914 /* Allocate and copy string. The trailing NUL shouldn't
6915 be necessary, but be safe. */
6916 tmpbuf = (char *) xmalloc (first + second + 2);
6917 memcpy (tmpbuf, input_line_pointer, first);
6918 if (second != 0 && *past_reloc != ' ')
6919 /* Replace the relocation token with ' ', so that
6920 errors like foo@SECLREL321 will be detected. */
6921 tmpbuf[first++] = ' ';
6922 memcpy (tmpbuf + first, past_reloc, second);
6923 tmpbuf[first + second] = '\0';
6924 return tmpbuf;
6925 }
6926
6927 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6928 gotrel[j].str, 1 << (5 + object_64bit));
6929 return NULL;
6930 }
6931 }
6932
6933 /* Might be a symbol version string. Don't as_bad here. */
6934 return NULL;
6935 }
6936
6937 #endif /* TE_PE */
6938
6939 void
6940 x86_cons (expressionS *exp, int size)
6941 {
6942 intel_syntax = -intel_syntax;
6943
6944 exp->X_md = 0;
6945 if (size == 4 || (object_64bit && size == 8))
6946 {
6947 /* Handle @GOTOFF and the like in an expression. */
6948 char *save;
6949 char *gotfree_input_line;
6950 int adjust = 0;
6951
6952 save = input_line_pointer;
6953 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
6954 if (gotfree_input_line)
6955 input_line_pointer = gotfree_input_line;
6956
6957 expression (exp);
6958
6959 if (gotfree_input_line)
6960 {
6961 /* expression () has merrily parsed up to the end of line,
6962 or a comma - in the wrong buffer. Transfer how far
6963 input_line_pointer has moved to the right buffer. */
6964 input_line_pointer = (save
6965 + (input_line_pointer - gotfree_input_line)
6966 + adjust);
6967 free (gotfree_input_line);
6968 if (exp->X_op == O_constant
6969 || exp->X_op == O_absent
6970 || exp->X_op == O_illegal
6971 || exp->X_op == O_register
6972 || exp->X_op == O_big)
6973 {
6974 char c = *input_line_pointer;
6975 *input_line_pointer = 0;
6976 as_bad (_("missing or invalid expression `%s'"), save);
6977 *input_line_pointer = c;
6978 }
6979 }
6980 }
6981 else
6982 expression (exp);
6983
6984 intel_syntax = -intel_syntax;
6985
6986 if (intel_syntax)
6987 i386_intel_simplify (exp);
6988 }
6989
6990 static void
6991 signed_cons (int size)
6992 {
6993 if (flag_code == CODE_64BIT)
6994 cons_sign = 1;
6995 cons (size);
6996 cons_sign = -1;
6997 }
6998
6999 #ifdef TE_PE
7000 static void
7001 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
7002 {
7003 expressionS exp;
7004
7005 do
7006 {
7007 expression (&exp);
7008 if (exp.X_op == O_symbol)
7009 exp.X_op = O_secrel;
7010
7011 emit_expr (&exp, 4);
7012 }
7013 while (*input_line_pointer++ == ',');
7014
7015 input_line_pointer--;
7016 demand_empty_rest_of_line ();
7017 }
7018 #endif
7019
7020 static int
7021 i386_immediate (char *imm_start)
7022 {
7023 char *save_input_line_pointer;
7024 char *gotfree_input_line;
7025 segT exp_seg = 0;
7026 expressionS *exp;
7027 i386_operand_type types;
7028
7029 operand_type_set (&types, ~0);
7030
7031 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
7032 {
7033 as_bad (_("at most %d immediate operands are allowed"),
7034 MAX_IMMEDIATE_OPERANDS);
7035 return 0;
7036 }
7037
7038 exp = &im_expressions[i.imm_operands++];
7039 i.op[this_operand].imms = exp;
7040
7041 if (is_space_char (*imm_start))
7042 ++imm_start;
7043
7044 save_input_line_pointer = input_line_pointer;
7045 input_line_pointer = imm_start;
7046
7047 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
7048 if (gotfree_input_line)
7049 input_line_pointer = gotfree_input_line;
7050
7051 exp_seg = expression (exp);
7052
7053 SKIP_WHITESPACE ();
7054 if (*input_line_pointer)
7055 as_bad (_("junk `%s' after expression"), input_line_pointer);
7056
7057 input_line_pointer = save_input_line_pointer;
7058 if (gotfree_input_line)
7059 {
7060 free (gotfree_input_line);
7061
7062 if (exp->X_op == O_constant || exp->X_op == O_register)
7063 exp->X_op = O_illegal;
7064 }
7065
7066 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
7067 }
7068
7069 static int
7070 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
7071 i386_operand_type types, const char *imm_start)
7072 {
7073 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
7074 {
7075 if (imm_start)
7076 as_bad (_("missing or invalid immediate expression `%s'"),
7077 imm_start);
7078 return 0;
7079 }
7080 else if (exp->X_op == O_constant)
7081 {
7082 /* Size it properly later. */
7083 i.types[this_operand].bitfield.imm64 = 1;
7084 /* If not 64bit, sign extend val. */
7085 if (flag_code != CODE_64BIT
7086 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
7087 exp->X_add_number
7088 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
7089 }
7090 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7091 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
7092 && exp_seg != absolute_section
7093 && exp_seg != text_section
7094 && exp_seg != data_section
7095 && exp_seg != bss_section
7096 && exp_seg != undefined_section
7097 && !bfd_is_com_section (exp_seg))
7098 {
7099 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7100 return 0;
7101 }
7102 #endif
7103 else if (!intel_syntax && exp->X_op == O_register)
7104 {
7105 if (imm_start)
7106 as_bad (_("illegal immediate register operand %s"), imm_start);
7107 return 0;
7108 }
7109 else
7110 {
7111 /* This is an address. The size of the address will be
7112 determined later, depending on destination register,
7113 suffix, or the default for the section. */
7114 i.types[this_operand].bitfield.imm8 = 1;
7115 i.types[this_operand].bitfield.imm16 = 1;
7116 i.types[this_operand].bitfield.imm32 = 1;
7117 i.types[this_operand].bitfield.imm32s = 1;
7118 i.types[this_operand].bitfield.imm64 = 1;
7119 i.types[this_operand] = operand_type_and (i.types[this_operand],
7120 types);
7121 }
7122
7123 return 1;
7124 }
7125
7126 static char *
7127 i386_scale (char *scale)
7128 {
7129 offsetT val;
7130 char *save = input_line_pointer;
7131
7132 input_line_pointer = scale;
7133 val = get_absolute_expression ();
7134
7135 switch (val)
7136 {
7137 case 1:
7138 i.log2_scale_factor = 0;
7139 break;
7140 case 2:
7141 i.log2_scale_factor = 1;
7142 break;
7143 case 4:
7144 i.log2_scale_factor = 2;
7145 break;
7146 case 8:
7147 i.log2_scale_factor = 3;
7148 break;
7149 default:
7150 {
7151 char sep = *input_line_pointer;
7152
7153 *input_line_pointer = '\0';
7154 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
7155 scale);
7156 *input_line_pointer = sep;
7157 input_line_pointer = save;
7158 return NULL;
7159 }
7160 }
7161 if (i.log2_scale_factor != 0 && i.index_reg == 0)
7162 {
7163 as_warn (_("scale factor of %d without an index register"),
7164 1 << i.log2_scale_factor);
7165 i.log2_scale_factor = 0;
7166 }
7167 scale = input_line_pointer;
7168 input_line_pointer = save;
7169 return scale;
7170 }
7171
7172 static int
7173 i386_displacement (char *disp_start, char *disp_end)
7174 {
7175 expressionS *exp;
7176 segT exp_seg = 0;
7177 char *save_input_line_pointer;
7178 char *gotfree_input_line;
7179 int override;
7180 i386_operand_type bigdisp, types = anydisp;
7181 int ret;
7182
7183 if (i.disp_operands == MAX_MEMORY_OPERANDS)
7184 {
7185 as_bad (_("at most %d displacement operands are allowed"),
7186 MAX_MEMORY_OPERANDS);
7187 return 0;
7188 }
7189
7190 operand_type_set (&bigdisp, 0);
7191 if ((i.types[this_operand].bitfield.jumpabsolute)
7192 || (!current_templates->start->opcode_modifier.jump
7193 && !current_templates->start->opcode_modifier.jumpdword))
7194 {
7195 bigdisp.bitfield.disp32 = 1;
7196 override = (i.prefix[ADDR_PREFIX] != 0);
7197 if (flag_code == CODE_64BIT)
7198 {
7199 if (!override)
7200 {
7201 bigdisp.bitfield.disp32s = 1;
7202 bigdisp.bitfield.disp64 = 1;
7203 }
7204 }
7205 else if ((flag_code == CODE_16BIT) ^ override)
7206 {
7207 bigdisp.bitfield.disp32 = 0;
7208 bigdisp.bitfield.disp16 = 1;
7209 }
7210 }
7211 else
7212 {
7213 /* For PC-relative branches, the width of the displacement
7214 is dependent upon data size, not address size. */
7215 override = (i.prefix[DATA_PREFIX] != 0);
7216 if (flag_code == CODE_64BIT)
7217 {
7218 if (override || i.suffix == WORD_MNEM_SUFFIX)
7219 bigdisp.bitfield.disp16 = 1;
7220 else
7221 {
7222 bigdisp.bitfield.disp32 = 1;
7223 bigdisp.bitfield.disp32s = 1;
7224 }
7225 }
7226 else
7227 {
7228 if (!override)
7229 override = (i.suffix == (flag_code != CODE_16BIT
7230 ? WORD_MNEM_SUFFIX
7231 : LONG_MNEM_SUFFIX));
7232 bigdisp.bitfield.disp32 = 1;
7233 if ((flag_code == CODE_16BIT) ^ override)
7234 {
7235 bigdisp.bitfield.disp32 = 0;
7236 bigdisp.bitfield.disp16 = 1;
7237 }
7238 }
7239 }
7240 i.types[this_operand] = operand_type_or (i.types[this_operand],
7241 bigdisp);
7242
7243 exp = &disp_expressions[i.disp_operands];
7244 i.op[this_operand].disps = exp;
7245 i.disp_operands++;
7246 save_input_line_pointer = input_line_pointer;
7247 input_line_pointer = disp_start;
7248 END_STRING_AND_SAVE (disp_end);
7249
7250 #ifndef GCC_ASM_O_HACK
7251 #define GCC_ASM_O_HACK 0
7252 #endif
7253 #if GCC_ASM_O_HACK
7254 END_STRING_AND_SAVE (disp_end + 1);
7255 if (i.types[this_operand].bitfield.baseIndex
7256 && displacement_string_end[-1] == '+')
7257 {
7258 /* This hack is to avoid a warning when using the "o"
7259 constraint within gcc asm statements.
7260 For instance:
7261
7262 #define _set_tssldt_desc(n,addr,limit,type) \
7263 __asm__ __volatile__ ( \
7264 "movw %w2,%0\n\t" \
7265 "movw %w1,2+%0\n\t" \
7266 "rorl $16,%1\n\t" \
7267 "movb %b1,4+%0\n\t" \
7268 "movb %4,5+%0\n\t" \
7269 "movb $0,6+%0\n\t" \
7270 "movb %h1,7+%0\n\t" \
7271 "rorl $16,%1" \
7272 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
7273
7274 This works great except that the output assembler ends
7275 up looking a bit weird if it turns out that there is
7276 no offset. You end up producing code that looks like:
7277
7278 #APP
7279 movw $235,(%eax)
7280 movw %dx,2+(%eax)
7281 rorl $16,%edx
7282 movb %dl,4+(%eax)
7283 movb $137,5+(%eax)
7284 movb $0,6+(%eax)
7285 movb %dh,7+(%eax)
7286 rorl $16,%edx
7287 #NO_APP
7288
7289 So here we provide the missing zero. */
7290
7291 *displacement_string_end = '0';
7292 }
7293 #endif
7294 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
7295 if (gotfree_input_line)
7296 input_line_pointer = gotfree_input_line;
7297
7298 exp_seg = expression (exp);
7299
7300 SKIP_WHITESPACE ();
7301 if (*input_line_pointer)
7302 as_bad (_("junk `%s' after expression"), input_line_pointer);
7303 #if GCC_ASM_O_HACK
7304 RESTORE_END_STRING (disp_end + 1);
7305 #endif
7306 input_line_pointer = save_input_line_pointer;
7307 if (gotfree_input_line)
7308 {
7309 free (gotfree_input_line);
7310
7311 if (exp->X_op == O_constant || exp->X_op == O_register)
7312 exp->X_op = O_illegal;
7313 }
7314
7315 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
7316
7317 RESTORE_END_STRING (disp_end);
7318
7319 return ret;
7320 }
7321
7322 static int
7323 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
7324 i386_operand_type types, const char *disp_start)
7325 {
7326 i386_operand_type bigdisp;
7327 int ret = 1;
7328
7329 /* We do this to make sure that the section symbol is in
7330 the symbol table. We will ultimately change the relocation
7331 to be relative to the beginning of the section. */
7332 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
7333 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
7334 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7335 {
7336 if (exp->X_op != O_symbol)
7337 goto inv_disp;
7338
7339 if (S_IS_LOCAL (exp->X_add_symbol)
7340 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
7341 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
7342 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
7343 exp->X_op = O_subtract;
7344 exp->X_op_symbol = GOT_symbol;
7345 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
7346 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
7347 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7348 i.reloc[this_operand] = BFD_RELOC_64;
7349 else
7350 i.reloc[this_operand] = BFD_RELOC_32;
7351 }
7352
7353 else if (exp->X_op == O_absent
7354 || exp->X_op == O_illegal
7355 || exp->X_op == O_big)
7356 {
7357 inv_disp:
7358 as_bad (_("missing or invalid displacement expression `%s'"),
7359 disp_start);
7360 ret = 0;
7361 }
7362
7363 else if (flag_code == CODE_64BIT
7364 && !i.prefix[ADDR_PREFIX]
7365 && exp->X_op == O_constant)
7366 {
7367 /* Since displacement is signed extended to 64bit, don't allow
7368 disp32 and turn off disp32s if they are out of range. */
7369 i.types[this_operand].bitfield.disp32 = 0;
7370 if (!fits_in_signed_long (exp->X_add_number))
7371 {
7372 i.types[this_operand].bitfield.disp32s = 0;
7373 if (i.types[this_operand].bitfield.baseindex)
7374 {
7375 as_bad (_("0x%lx out range of signed 32bit displacement"),
7376 (long) exp->X_add_number);
7377 ret = 0;
7378 }
7379 }
7380 }
7381
7382 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7383 else if (exp->X_op != O_constant
7384 && OUTPUT_FLAVOR == bfd_target_aout_flavour
7385 && exp_seg != absolute_section
7386 && exp_seg != text_section
7387 && exp_seg != data_section
7388 && exp_seg != bss_section
7389 && exp_seg != undefined_section
7390 && !bfd_is_com_section (exp_seg))
7391 {
7392 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7393 ret = 0;
7394 }
7395 #endif
7396
7397 /* Check if this is a displacement only operand. */
7398 bigdisp = i.types[this_operand];
7399 bigdisp.bitfield.disp8 = 0;
7400 bigdisp.bitfield.disp16 = 0;
7401 bigdisp.bitfield.disp32 = 0;
7402 bigdisp.bitfield.disp32s = 0;
7403 bigdisp.bitfield.disp64 = 0;
7404 if (operand_type_all_zero (&bigdisp))
7405 i.types[this_operand] = operand_type_and (i.types[this_operand],
7406 types);
7407
7408 return ret;
7409 }
7410
7411 /* Make sure the memory operand we've been dealt is valid.
7412 Return 1 on success, 0 on a failure. */
7413
7414 static int
7415 i386_index_check (const char *operand_string)
7416 {
7417 int ok;
7418 const char *kind = "base/index";
7419 #if INFER_ADDR_PREFIX
7420 int fudged = 0;
7421
7422 tryprefix:
7423 #endif
7424 ok = 1;
7425 if (current_templates->start->opcode_modifier.isstring
7426 && !current_templates->start->opcode_modifier.immext
7427 && (current_templates->end[-1].opcode_modifier.isstring
7428 || i.mem_operands))
7429 {
7430 /* Memory operands of string insns are special in that they only allow
7431 a single register (rDI, rSI, or rBX) as their memory address. */
7432 unsigned int expected;
7433
7434 kind = "string address";
7435
7436 if (current_templates->start->opcode_modifier.w)
7437 {
7438 i386_operand_type type = current_templates->end[-1].operand_types[0];
7439
7440 if (!type.bitfield.baseindex
7441 || ((!i.mem_operands != !intel_syntax)
7442 && current_templates->end[-1].operand_types[1]
7443 .bitfield.baseindex))
7444 type = current_templates->end[-1].operand_types[1];
7445 expected = type.bitfield.esseg ? 7 /* rDI */ : 6 /* rSI */;
7446 }
7447 else
7448 expected = 3 /* rBX */;
7449
7450 if (!i.base_reg || i.index_reg
7451 || operand_type_check (i.types[this_operand], disp))
7452 ok = -1;
7453 else if (!(flag_code == CODE_64BIT
7454 ? i.prefix[ADDR_PREFIX]
7455 ? i.base_reg->reg_type.bitfield.reg32
7456 : i.base_reg->reg_type.bitfield.reg64
7457 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7458 ? i.base_reg->reg_type.bitfield.reg32
7459 : i.base_reg->reg_type.bitfield.reg16))
7460 ok = 0;
7461 else if (register_number (i.base_reg) != expected)
7462 ok = -1;
7463
7464 if (ok < 0)
7465 {
7466 unsigned int j;
7467
7468 for (j = 0; j < i386_regtab_size; ++j)
7469 if ((flag_code == CODE_64BIT
7470 ? i.prefix[ADDR_PREFIX]
7471 ? i386_regtab[j].reg_type.bitfield.reg32
7472 : i386_regtab[j].reg_type.bitfield.reg64
7473 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7474 ? i386_regtab[j].reg_type.bitfield.reg32
7475 : i386_regtab[j].reg_type.bitfield.reg16)
7476 && register_number(i386_regtab + j) == expected)
7477 break;
7478 gas_assert (j < i386_regtab_size);
7479 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
7480 operand_string,
7481 intel_syntax ? '[' : '(',
7482 register_prefix,
7483 i386_regtab[j].reg_name,
7484 intel_syntax ? ']' : ')');
7485 ok = 1;
7486 }
7487 }
7488 else if (flag_code == CODE_64BIT)
7489 {
7490 if ((i.base_reg
7491 && ((i.prefix[ADDR_PREFIX] == 0
7492 && !i.base_reg->reg_type.bitfield.reg64)
7493 || (i.prefix[ADDR_PREFIX]
7494 && !i.base_reg->reg_type.bitfield.reg32))
7495 && (i.index_reg
7496 || i.base_reg->reg_num !=
7497 (i.prefix[ADDR_PREFIX] == 0 ? RegRip : RegEip)))
7498 || (i.index_reg
7499 && !(i.index_reg->reg_type.bitfield.regxmm
7500 || i.index_reg->reg_type.bitfield.regymm)
7501 && (!i.index_reg->reg_type.bitfield.baseindex
7502 || (i.prefix[ADDR_PREFIX] == 0
7503 && i.index_reg->reg_num != RegRiz
7504 && !i.index_reg->reg_type.bitfield.reg64
7505 )
7506 || (i.prefix[ADDR_PREFIX]
7507 && i.index_reg->reg_num != RegEiz
7508 && !i.index_reg->reg_type.bitfield.reg32))))
7509 ok = 0;
7510 }
7511 else
7512 {
7513 if ((flag_code == CODE_16BIT) ^ (i.prefix[ADDR_PREFIX] != 0))
7514 {
7515 /* 16bit checks. */
7516 if ((i.base_reg
7517 && (!i.base_reg->reg_type.bitfield.reg16
7518 || !i.base_reg->reg_type.bitfield.baseindex))
7519 || (i.index_reg
7520 && (!i.index_reg->reg_type.bitfield.reg16
7521 || !i.index_reg->reg_type.bitfield.baseindex
7522 || !(i.base_reg
7523 && i.base_reg->reg_num < 6
7524 && i.index_reg->reg_num >= 6
7525 && i.log2_scale_factor == 0))))
7526 ok = 0;
7527 }
7528 else
7529 {
7530 /* 32bit checks. */
7531 if ((i.base_reg
7532 && !i.base_reg->reg_type.bitfield.reg32)
7533 || (i.index_reg
7534 && !i.index_reg->reg_type.bitfield.regxmm
7535 && !i.index_reg->reg_type.bitfield.regymm
7536 && ((!i.index_reg->reg_type.bitfield.reg32
7537 && i.index_reg->reg_num != RegEiz)
7538 || !i.index_reg->reg_type.bitfield.baseindex)))
7539 ok = 0;
7540 }
7541 }
7542 if (!ok)
7543 {
7544 #if INFER_ADDR_PREFIX
7545 if (!i.mem_operands && !i.prefix[ADDR_PREFIX])
7546 {
7547 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
7548 i.prefixes += 1;
7549 /* Change the size of any displacement too. At most one of
7550 Disp16 or Disp32 is set.
7551 FIXME. There doesn't seem to be any real need for separate
7552 Disp16 and Disp32 flags. The same goes for Imm16 and Imm32.
7553 Removing them would probably clean up the code quite a lot. */
7554 if (flag_code != CODE_64BIT
7555 && (i.types[this_operand].bitfield.disp16
7556 || i.types[this_operand].bitfield.disp32))
7557 i.types[this_operand]
7558 = operand_type_xor (i.types[this_operand], disp16_32);
7559 fudged = 1;
7560 goto tryprefix;
7561 }
7562 if (fudged)
7563 as_bad (_("`%s' is not a valid %s expression"),
7564 operand_string,
7565 kind);
7566 else
7567 #endif
7568 as_bad (_("`%s' is not a valid %s-bit %s expression"),
7569 operand_string,
7570 flag_code_names[i.prefix[ADDR_PREFIX]
7571 ? flag_code == CODE_32BIT
7572 ? CODE_16BIT
7573 : CODE_32BIT
7574 : flag_code],
7575 kind);
7576 }
7577 return ok;
7578 }
7579
7580 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
7581 on error. */
7582
7583 static int
7584 i386_att_operand (char *operand_string)
7585 {
7586 const reg_entry *r;
7587 char *end_op;
7588 char *op_string = operand_string;
7589
7590 if (is_space_char (*op_string))
7591 ++op_string;
7592
7593 /* We check for an absolute prefix (differentiating,
7594 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
7595 if (*op_string == ABSOLUTE_PREFIX)
7596 {
7597 ++op_string;
7598 if (is_space_char (*op_string))
7599 ++op_string;
7600 i.types[this_operand].bitfield.jumpabsolute = 1;
7601 }
7602
7603 /* Check if operand is a register. */
7604 if ((r = parse_register (op_string, &end_op)) != NULL)
7605 {
7606 i386_operand_type temp;
7607
7608 /* Check for a segment override by searching for ':' after a
7609 segment register. */
7610 op_string = end_op;
7611 if (is_space_char (*op_string))
7612 ++op_string;
7613 if (*op_string == ':'
7614 && (r->reg_type.bitfield.sreg2
7615 || r->reg_type.bitfield.sreg3))
7616 {
7617 switch (r->reg_num)
7618 {
7619 case 0:
7620 i.seg[i.mem_operands] = &es;
7621 break;
7622 case 1:
7623 i.seg[i.mem_operands] = &cs;
7624 break;
7625 case 2:
7626 i.seg[i.mem_operands] = &ss;
7627 break;
7628 case 3:
7629 i.seg[i.mem_operands] = &ds;
7630 break;
7631 case 4:
7632 i.seg[i.mem_operands] = &fs;
7633 break;
7634 case 5:
7635 i.seg[i.mem_operands] = &gs;
7636 break;
7637 }
7638
7639 /* Skip the ':' and whitespace. */
7640 ++op_string;
7641 if (is_space_char (*op_string))
7642 ++op_string;
7643
7644 if (!is_digit_char (*op_string)
7645 && !is_identifier_char (*op_string)
7646 && *op_string != '('
7647 && *op_string != ABSOLUTE_PREFIX)
7648 {
7649 as_bad (_("bad memory operand `%s'"), op_string);
7650 return 0;
7651 }
7652 /* Handle case of %es:*foo. */
7653 if (*op_string == ABSOLUTE_PREFIX)
7654 {
7655 ++op_string;
7656 if (is_space_char (*op_string))
7657 ++op_string;
7658 i.types[this_operand].bitfield.jumpabsolute = 1;
7659 }
7660 goto do_memory_reference;
7661 }
7662 if (*op_string)
7663 {
7664 as_bad (_("junk `%s' after register"), op_string);
7665 return 0;
7666 }
7667 temp = r->reg_type;
7668 temp.bitfield.baseindex = 0;
7669 i.types[this_operand] = operand_type_or (i.types[this_operand],
7670 temp);
7671 i.types[this_operand].bitfield.unspecified = 0;
7672 i.op[this_operand].regs = r;
7673 i.reg_operands++;
7674 }
7675 else if (*op_string == REGISTER_PREFIX)
7676 {
7677 as_bad (_("bad register name `%s'"), op_string);
7678 return 0;
7679 }
7680 else if (*op_string == IMMEDIATE_PREFIX)
7681 {
7682 ++op_string;
7683 if (i.types[this_operand].bitfield.jumpabsolute)
7684 {
7685 as_bad (_("immediate operand illegal with absolute jump"));
7686 return 0;
7687 }
7688 if (!i386_immediate (op_string))
7689 return 0;
7690 }
7691 else if (is_digit_char (*op_string)
7692 || is_identifier_char (*op_string)
7693 || *op_string == '(')
7694 {
7695 /* This is a memory reference of some sort. */
7696 char *base_string;
7697
7698 /* Start and end of displacement string expression (if found). */
7699 char *displacement_string_start;
7700 char *displacement_string_end;
7701
7702 do_memory_reference:
7703 if ((i.mem_operands == 1
7704 && !current_templates->start->opcode_modifier.isstring)
7705 || i.mem_operands == 2)
7706 {
7707 as_bad (_("too many memory references for `%s'"),
7708 current_templates->start->name);
7709 return 0;
7710 }
7711
7712 /* Check for base index form. We detect the base index form by
7713 looking for an ')' at the end of the operand, searching
7714 for the '(' matching it, and finding a REGISTER_PREFIX or ','
7715 after the '('. */
7716 base_string = op_string + strlen (op_string);
7717
7718 --base_string;
7719 if (is_space_char (*base_string))
7720 --base_string;
7721
7722 /* If we only have a displacement, set-up for it to be parsed later. */
7723 displacement_string_start = op_string;
7724 displacement_string_end = base_string + 1;
7725
7726 if (*base_string == ')')
7727 {
7728 char *temp_string;
7729 unsigned int parens_balanced = 1;
7730 /* We've already checked that the number of left & right ()'s are
7731 equal, so this loop will not be infinite. */
7732 do
7733 {
7734 base_string--;
7735 if (*base_string == ')')
7736 parens_balanced++;
7737 if (*base_string == '(')
7738 parens_balanced--;
7739 }
7740 while (parens_balanced);
7741
7742 temp_string = base_string;
7743
7744 /* Skip past '(' and whitespace. */
7745 ++base_string;
7746 if (is_space_char (*base_string))
7747 ++base_string;
7748
7749 if (*base_string == ','
7750 || ((i.base_reg = parse_register (base_string, &end_op))
7751 != NULL))
7752 {
7753 displacement_string_end = temp_string;
7754
7755 i.types[this_operand].bitfield.baseindex = 1;
7756
7757 if (i.base_reg)
7758 {
7759 base_string = end_op;
7760 if (is_space_char (*base_string))
7761 ++base_string;
7762 }
7763
7764 /* There may be an index reg or scale factor here. */
7765 if (*base_string == ',')
7766 {
7767 ++base_string;
7768 if (is_space_char (*base_string))
7769 ++base_string;
7770
7771 if ((i.index_reg = parse_register (base_string, &end_op))
7772 != NULL)
7773 {
7774 base_string = end_op;
7775 if (is_space_char (*base_string))
7776 ++base_string;
7777 if (*base_string == ',')
7778 {
7779 ++base_string;
7780 if (is_space_char (*base_string))
7781 ++base_string;
7782 }
7783 else if (*base_string != ')')
7784 {
7785 as_bad (_("expecting `,' or `)' "
7786 "after index register in `%s'"),
7787 operand_string);
7788 return 0;
7789 }
7790 }
7791 else if (*base_string == REGISTER_PREFIX)
7792 {
7793 end_op = strchr (base_string, ',');
7794 if (end_op)
7795 *end_op = '\0';
7796 as_bad (_("bad register name `%s'"), base_string);
7797 return 0;
7798 }
7799
7800 /* Check for scale factor. */
7801 if (*base_string != ')')
7802 {
7803 char *end_scale = i386_scale (base_string);
7804
7805 if (!end_scale)
7806 return 0;
7807
7808 base_string = end_scale;
7809 if (is_space_char (*base_string))
7810 ++base_string;
7811 if (*base_string != ')')
7812 {
7813 as_bad (_("expecting `)' "
7814 "after scale factor in `%s'"),
7815 operand_string);
7816 return 0;
7817 }
7818 }
7819 else if (!i.index_reg)
7820 {
7821 as_bad (_("expecting index register or scale factor "
7822 "after `,'; got '%c'"),
7823 *base_string);
7824 return 0;
7825 }
7826 }
7827 else if (*base_string != ')')
7828 {
7829 as_bad (_("expecting `,' or `)' "
7830 "after base register in `%s'"),
7831 operand_string);
7832 return 0;
7833 }
7834 }
7835 else if (*base_string == REGISTER_PREFIX)
7836 {
7837 end_op = strchr (base_string, ',');
7838 if (end_op)
7839 *end_op = '\0';
7840 as_bad (_("bad register name `%s'"), base_string);
7841 return 0;
7842 }
7843 }
7844
7845 /* If there's an expression beginning the operand, parse it,
7846 assuming displacement_string_start and
7847 displacement_string_end are meaningful. */
7848 if (displacement_string_start != displacement_string_end)
7849 {
7850 if (!i386_displacement (displacement_string_start,
7851 displacement_string_end))
7852 return 0;
7853 }
7854
7855 /* Special case for (%dx) while doing input/output op. */
7856 if (i.base_reg
7857 && operand_type_equal (&i.base_reg->reg_type,
7858 &reg16_inoutportreg)
7859 && i.index_reg == 0
7860 && i.log2_scale_factor == 0
7861 && i.seg[i.mem_operands] == 0
7862 && !operand_type_check (i.types[this_operand], disp))
7863 {
7864 i.types[this_operand] = inoutportreg;
7865 return 1;
7866 }
7867
7868 if (i386_index_check (operand_string) == 0)
7869 return 0;
7870 i.types[this_operand].bitfield.mem = 1;
7871 i.mem_operands++;
7872 }
7873 else
7874 {
7875 /* It's not a memory operand; argh! */
7876 as_bad (_("invalid char %s beginning operand %d `%s'"),
7877 output_invalid (*op_string),
7878 this_operand + 1,
7879 op_string);
7880 return 0;
7881 }
7882 return 1; /* Normal return. */
7883 }
7884 \f
7885 /* Calculate the maximum variable size (i.e., excluding fr_fix)
7886 that an rs_machine_dependent frag may reach. */
7887
7888 unsigned int
7889 i386_frag_max_var (fragS *frag)
7890 {
7891 /* The only relaxable frags are for jumps.
7892 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
7893 gas_assert (frag->fr_type == rs_machine_dependent);
7894 return TYPE_FROM_RELAX_STATE (frag->fr_subtype) == UNCOND_JUMP ? 4 : 5;
7895 }
7896
7897 /* md_estimate_size_before_relax()
7898
7899 Called just before relax() for rs_machine_dependent frags. The x86
7900 assembler uses these frags to handle variable size jump
7901 instructions.
7902
7903 Any symbol that is now undefined will not become defined.
7904 Return the correct fr_subtype in the frag.
7905 Return the initial "guess for variable size of frag" to caller.
7906 The guess is actually the growth beyond the fixed part. Whatever
7907 we do to grow the fixed or variable part contributes to our
7908 returned value. */
7909
7910 int
7911 md_estimate_size_before_relax (fragS *fragP, segT segment)
7912 {
7913 /* We've already got fragP->fr_subtype right; all we have to do is
7914 check for un-relaxable symbols. On an ELF system, we can't relax
7915 an externally visible symbol, because it may be overridden by a
7916 shared library. */
7917 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
7918 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7919 || (IS_ELF
7920 && (S_IS_EXTERNAL (fragP->fr_symbol)
7921 || S_IS_WEAK (fragP->fr_symbol)
7922 || ((symbol_get_bfdsym (fragP->fr_symbol)->flags
7923 & BSF_GNU_INDIRECT_FUNCTION))))
7924 #endif
7925 #if defined (OBJ_COFF) && defined (TE_PE)
7926 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
7927 && S_IS_WEAK (fragP->fr_symbol))
7928 #endif
7929 )
7930 {
7931 /* Symbol is undefined in this segment, or we need to keep a
7932 reloc so that weak symbols can be overridden. */
7933 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
7934 enum bfd_reloc_code_real reloc_type;
7935 unsigned char *opcode;
7936 int old_fr_fix;
7937
7938 if (fragP->fr_var != NO_RELOC)
7939 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
7940 else if (size == 2)
7941 reloc_type = BFD_RELOC_16_PCREL;
7942 else
7943 reloc_type = BFD_RELOC_32_PCREL;
7944
7945 old_fr_fix = fragP->fr_fix;
7946 opcode = (unsigned char *) fragP->fr_opcode;
7947
7948 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
7949 {
7950 case UNCOND_JUMP:
7951 /* Make jmp (0xeb) a (d)word displacement jump. */
7952 opcode[0] = 0xe9;
7953 fragP->fr_fix += size;
7954 fix_new (fragP, old_fr_fix, size,
7955 fragP->fr_symbol,
7956 fragP->fr_offset, 1,
7957 reloc_type);
7958 break;
7959
7960 case COND_JUMP86:
7961 if (size == 2
7962 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
7963 {
7964 /* Negate the condition, and branch past an
7965 unconditional jump. */
7966 opcode[0] ^= 1;
7967 opcode[1] = 3;
7968 /* Insert an unconditional jump. */
7969 opcode[2] = 0xe9;
7970 /* We added two extra opcode bytes, and have a two byte
7971 offset. */
7972 fragP->fr_fix += 2 + 2;
7973 fix_new (fragP, old_fr_fix + 2, 2,
7974 fragP->fr_symbol,
7975 fragP->fr_offset, 1,
7976 reloc_type);
7977 break;
7978 }
7979 /* Fall through. */
7980
7981 case COND_JUMP:
7982 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
7983 {
7984 fixS *fixP;
7985
7986 fragP->fr_fix += 1;
7987 fixP = fix_new (fragP, old_fr_fix, 1,
7988 fragP->fr_symbol,
7989 fragP->fr_offset, 1,
7990 BFD_RELOC_8_PCREL);
7991 fixP->fx_signed = 1;
7992 break;
7993 }
7994
7995 /* This changes the byte-displacement jump 0x7N
7996 to the (d)word-displacement jump 0x0f,0x8N. */
7997 opcode[1] = opcode[0] + 0x10;
7998 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7999 /* We've added an opcode byte. */
8000 fragP->fr_fix += 1 + size;
8001 fix_new (fragP, old_fr_fix + 1, size,
8002 fragP->fr_symbol,
8003 fragP->fr_offset, 1,
8004 reloc_type);
8005 break;
8006
8007 default:
8008 BAD_CASE (fragP->fr_subtype);
8009 break;
8010 }
8011 frag_wane (fragP);
8012 return fragP->fr_fix - old_fr_fix;
8013 }
8014
8015 /* Guess size depending on current relax state. Initially the relax
8016 state will correspond to a short jump and we return 1, because
8017 the variable part of the frag (the branch offset) is one byte
8018 long. However, we can relax a section more than once and in that
8019 case we must either set fr_subtype back to the unrelaxed state,
8020 or return the value for the appropriate branch. */
8021 return md_relax_table[fragP->fr_subtype].rlx_length;
8022 }
8023
8024 /* Called after relax() is finished.
8025
8026 In: Address of frag.
8027 fr_type == rs_machine_dependent.
8028 fr_subtype is what the address relaxed to.
8029
8030 Out: Any fixSs and constants are set up.
8031 Caller will turn frag into a ".space 0". */
8032
8033 void
8034 md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT sec ATTRIBUTE_UNUSED,
8035 fragS *fragP)
8036 {
8037 unsigned char *opcode;
8038 unsigned char *where_to_put_displacement = NULL;
8039 offsetT target_address;
8040 offsetT opcode_address;
8041 unsigned int extension = 0;
8042 offsetT displacement_from_opcode_start;
8043
8044 opcode = (unsigned char *) fragP->fr_opcode;
8045
8046 /* Address we want to reach in file space. */
8047 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
8048
8049 /* Address opcode resides at in file space. */
8050 opcode_address = fragP->fr_address + fragP->fr_fix;
8051
8052 /* Displacement from opcode start to fill into instruction. */
8053 displacement_from_opcode_start = target_address - opcode_address;
8054
8055 if ((fragP->fr_subtype & BIG) == 0)
8056 {
8057 /* Don't have to change opcode. */
8058 extension = 1; /* 1 opcode + 1 displacement */
8059 where_to_put_displacement = &opcode[1];
8060 }
8061 else
8062 {
8063 if (no_cond_jump_promotion
8064 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
8065 as_warn_where (fragP->fr_file, fragP->fr_line,
8066 _("long jump required"));
8067
8068 switch (fragP->fr_subtype)
8069 {
8070 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
8071 extension = 4; /* 1 opcode + 4 displacement */
8072 opcode[0] = 0xe9;
8073 where_to_put_displacement = &opcode[1];
8074 break;
8075
8076 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
8077 extension = 2; /* 1 opcode + 2 displacement */
8078 opcode[0] = 0xe9;
8079 where_to_put_displacement = &opcode[1];
8080 break;
8081
8082 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
8083 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
8084 extension = 5; /* 2 opcode + 4 displacement */
8085 opcode[1] = opcode[0] + 0x10;
8086 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
8087 where_to_put_displacement = &opcode[2];
8088 break;
8089
8090 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
8091 extension = 3; /* 2 opcode + 2 displacement */
8092 opcode[1] = opcode[0] + 0x10;
8093 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
8094 where_to_put_displacement = &opcode[2];
8095 break;
8096
8097 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
8098 extension = 4;
8099 opcode[0] ^= 1;
8100 opcode[1] = 3;
8101 opcode[2] = 0xe9;
8102 where_to_put_displacement = &opcode[3];
8103 break;
8104
8105 default:
8106 BAD_CASE (fragP->fr_subtype);
8107 break;
8108 }
8109 }
8110
8111 /* If size if less then four we are sure that the operand fits,
8112 but if it's 4, then it could be that the displacement is larger
8113 then -/+ 2GB. */
8114 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
8115 && object_64bit
8116 && ((addressT) (displacement_from_opcode_start - extension
8117 + ((addressT) 1 << 31))
8118 > (((addressT) 2 << 31) - 1)))
8119 {
8120 as_bad_where (fragP->fr_file, fragP->fr_line,
8121 _("jump target out of range"));
8122 /* Make us emit 0. */
8123 displacement_from_opcode_start = extension;
8124 }
8125 /* Now put displacement after opcode. */
8126 md_number_to_chars ((char *) where_to_put_displacement,
8127 (valueT) (displacement_from_opcode_start - extension),
8128 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
8129 fragP->fr_fix += extension;
8130 }
8131 \f
8132 /* Apply a fixup (fixP) to segment data, once it has been determined
8133 by our caller that we have all the info we need to fix it up.
8134
8135 Parameter valP is the pointer to the value of the bits.
8136
8137 On the 386, immediates, displacements, and data pointers are all in
8138 the same (little-endian) format, so we don't need to care about which
8139 we are handling. */
8140
8141 void
8142 md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
8143 {
8144 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
8145 valueT value = *valP;
8146
8147 #if !defined (TE_Mach)
8148 if (fixP->fx_pcrel)
8149 {
8150 switch (fixP->fx_r_type)
8151 {
8152 default:
8153 break;
8154
8155 case BFD_RELOC_64:
8156 fixP->fx_r_type = BFD_RELOC_64_PCREL;
8157 break;
8158 case BFD_RELOC_32:
8159 case BFD_RELOC_X86_64_32S:
8160 fixP->fx_r_type = BFD_RELOC_32_PCREL;
8161 break;
8162 case BFD_RELOC_16:
8163 fixP->fx_r_type = BFD_RELOC_16_PCREL;
8164 break;
8165 case BFD_RELOC_8:
8166 fixP->fx_r_type = BFD_RELOC_8_PCREL;
8167 break;
8168 }
8169 }
8170
8171 if (fixP->fx_addsy != NULL
8172 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
8173 || fixP->fx_r_type == BFD_RELOC_64_PCREL
8174 || fixP->fx_r_type == BFD_RELOC_16_PCREL
8175 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
8176 && !use_rela_relocations)
8177 {
8178 /* This is a hack. There should be a better way to handle this.
8179 This covers for the fact that bfd_install_relocation will
8180 subtract the current location (for partial_inplace, PC relative
8181 relocations); see more below. */
8182 #ifndef OBJ_AOUT
8183 if (IS_ELF
8184 #ifdef TE_PE
8185 || OUTPUT_FLAVOR == bfd_target_coff_flavour
8186 #endif
8187 )
8188 value += fixP->fx_where + fixP->fx_frag->fr_address;
8189 #endif
8190 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8191 if (IS_ELF)
8192 {
8193 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
8194
8195 if ((sym_seg == seg
8196 || (symbol_section_p (fixP->fx_addsy)
8197 && sym_seg != absolute_section))
8198 && !generic_force_reloc (fixP))
8199 {
8200 /* Yes, we add the values in twice. This is because
8201 bfd_install_relocation subtracts them out again. I think
8202 bfd_install_relocation is broken, but I don't dare change
8203 it. FIXME. */
8204 value += fixP->fx_where + fixP->fx_frag->fr_address;
8205 }
8206 }
8207 #endif
8208 #if defined (OBJ_COFF) && defined (TE_PE)
8209 /* For some reason, the PE format does not store a
8210 section address offset for a PC relative symbol. */
8211 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
8212 || S_IS_WEAK (fixP->fx_addsy))
8213 value += md_pcrel_from (fixP);
8214 #endif
8215 }
8216 #if defined (OBJ_COFF) && defined (TE_PE)
8217 if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
8218 {
8219 value -= S_GET_VALUE (fixP->fx_addsy);
8220 }
8221 #endif
8222
8223 /* Fix a few things - the dynamic linker expects certain values here,
8224 and we must not disappoint it. */
8225 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8226 if (IS_ELF && fixP->fx_addsy)
8227 switch (fixP->fx_r_type)
8228 {
8229 case BFD_RELOC_386_PLT32:
8230 case BFD_RELOC_X86_64_PLT32:
8231 /* Make the jump instruction point to the address of the operand. At
8232 runtime we merely add the offset to the actual PLT entry. */
8233 value = -4;
8234 break;
8235
8236 case BFD_RELOC_386_TLS_GD:
8237 case BFD_RELOC_386_TLS_LDM:
8238 case BFD_RELOC_386_TLS_IE_32:
8239 case BFD_RELOC_386_TLS_IE:
8240 case BFD_RELOC_386_TLS_GOTIE:
8241 case BFD_RELOC_386_TLS_GOTDESC:
8242 case BFD_RELOC_X86_64_TLSGD:
8243 case BFD_RELOC_X86_64_TLSLD:
8244 case BFD_RELOC_X86_64_GOTTPOFF:
8245 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8246 value = 0; /* Fully resolved at runtime. No addend. */
8247 /* Fallthrough */
8248 case BFD_RELOC_386_TLS_LE:
8249 case BFD_RELOC_386_TLS_LDO_32:
8250 case BFD_RELOC_386_TLS_LE_32:
8251 case BFD_RELOC_X86_64_DTPOFF32:
8252 case BFD_RELOC_X86_64_DTPOFF64:
8253 case BFD_RELOC_X86_64_TPOFF32:
8254 case BFD_RELOC_X86_64_TPOFF64:
8255 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8256 break;
8257
8258 case BFD_RELOC_386_TLS_DESC_CALL:
8259 case BFD_RELOC_X86_64_TLSDESC_CALL:
8260 value = 0; /* Fully resolved at runtime. No addend. */
8261 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8262 fixP->fx_done = 0;
8263 return;
8264
8265 case BFD_RELOC_386_GOT32:
8266 case BFD_RELOC_X86_64_GOT32:
8267 value = 0; /* Fully resolved at runtime. No addend. */
8268 break;
8269
8270 case BFD_RELOC_VTABLE_INHERIT:
8271 case BFD_RELOC_VTABLE_ENTRY:
8272 fixP->fx_done = 0;
8273 return;
8274
8275 default:
8276 break;
8277 }
8278 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
8279 *valP = value;
8280 #endif /* !defined (TE_Mach) */
8281
8282 /* Are we finished with this relocation now? */
8283 if (fixP->fx_addsy == NULL)
8284 fixP->fx_done = 1;
8285 #if defined (OBJ_COFF) && defined (TE_PE)
8286 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
8287 {
8288 fixP->fx_done = 0;
8289 /* Remember value for tc_gen_reloc. */
8290 fixP->fx_addnumber = value;
8291 /* Clear out the frag for now. */
8292 value = 0;
8293 }
8294 #endif
8295 else if (use_rela_relocations)
8296 {
8297 fixP->fx_no_overflow = 1;
8298 /* Remember value for tc_gen_reloc. */
8299 fixP->fx_addnumber = value;
8300 value = 0;
8301 }
8302
8303 md_number_to_chars (p, value, fixP->fx_size);
8304 }
8305 \f
8306 char *
8307 md_atof (int type, char *litP, int *sizeP)
8308 {
8309 /* This outputs the LITTLENUMs in REVERSE order;
8310 in accord with the bigendian 386. */
8311 return ieee_md_atof (type, litP, sizeP, FALSE);
8312 }
8313 \f
8314 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
8315
8316 static char *
8317 output_invalid (int c)
8318 {
8319 if (ISPRINT (c))
8320 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8321 "'%c'", c);
8322 else
8323 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8324 "(0x%x)", (unsigned char) c);
8325 return output_invalid_buf;
8326 }
8327
8328 /* REG_STRING starts *before* REGISTER_PREFIX. */
8329
8330 static const reg_entry *
8331 parse_real_register (char *reg_string, char **end_op)
8332 {
8333 char *s = reg_string;
8334 char *p;
8335 char reg_name_given[MAX_REG_NAME_SIZE + 1];
8336 const reg_entry *r;
8337
8338 /* Skip possible REGISTER_PREFIX and possible whitespace. */
8339 if (*s == REGISTER_PREFIX)
8340 ++s;
8341
8342 if (is_space_char (*s))
8343 ++s;
8344
8345 p = reg_name_given;
8346 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
8347 {
8348 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
8349 return (const reg_entry *) NULL;
8350 s++;
8351 }
8352
8353 /* For naked regs, make sure that we are not dealing with an identifier.
8354 This prevents confusing an identifier like `eax_var' with register
8355 `eax'. */
8356 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
8357 return (const reg_entry *) NULL;
8358
8359 *end_op = s;
8360
8361 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
8362
8363 /* Handle floating point regs, allowing spaces in the (i) part. */
8364 if (r == i386_regtab /* %st is first entry of table */)
8365 {
8366 if (is_space_char (*s))
8367 ++s;
8368 if (*s == '(')
8369 {
8370 ++s;
8371 if (is_space_char (*s))
8372 ++s;
8373 if (*s >= '0' && *s <= '7')
8374 {
8375 int fpr = *s - '0';
8376 ++s;
8377 if (is_space_char (*s))
8378 ++s;
8379 if (*s == ')')
8380 {
8381 *end_op = s + 1;
8382 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
8383 know (r);
8384 return r + fpr;
8385 }
8386 }
8387 /* We have "%st(" then garbage. */
8388 return (const reg_entry *) NULL;
8389 }
8390 }
8391
8392 if (r == NULL || allow_pseudo_reg)
8393 return r;
8394
8395 if (operand_type_all_zero (&r->reg_type))
8396 return (const reg_entry *) NULL;
8397
8398 if ((r->reg_type.bitfield.reg32
8399 || r->reg_type.bitfield.sreg3
8400 || r->reg_type.bitfield.control
8401 || r->reg_type.bitfield.debug
8402 || r->reg_type.bitfield.test)
8403 && !cpu_arch_flags.bitfield.cpui386)
8404 return (const reg_entry *) NULL;
8405
8406 if (r->reg_type.bitfield.floatreg
8407 && !cpu_arch_flags.bitfield.cpu8087
8408 && !cpu_arch_flags.bitfield.cpu287
8409 && !cpu_arch_flags.bitfield.cpu387)
8410 return (const reg_entry *) NULL;
8411
8412 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
8413 return (const reg_entry *) NULL;
8414
8415 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse)
8416 return (const reg_entry *) NULL;
8417
8418 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
8419 return (const reg_entry *) NULL;
8420
8421 /* Don't allow fake index register unless allow_index_reg isn't 0. */
8422 if (!allow_index_reg
8423 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
8424 return (const reg_entry *) NULL;
8425
8426 if (((r->reg_flags & (RegRex64 | RegRex))
8427 || r->reg_type.bitfield.reg64)
8428 && (!cpu_arch_flags.bitfield.cpulm
8429 || !operand_type_equal (&r->reg_type, &control))
8430 && flag_code != CODE_64BIT)
8431 return (const reg_entry *) NULL;
8432
8433 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
8434 return (const reg_entry *) NULL;
8435
8436 return r;
8437 }
8438
8439 /* REG_STRING starts *before* REGISTER_PREFIX. */
8440
8441 static const reg_entry *
8442 parse_register (char *reg_string, char **end_op)
8443 {
8444 const reg_entry *r;
8445
8446 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
8447 r = parse_real_register (reg_string, end_op);
8448 else
8449 r = NULL;
8450 if (!r)
8451 {
8452 char *save = input_line_pointer;
8453 char c;
8454 symbolS *symbolP;
8455
8456 input_line_pointer = reg_string;
8457 c = get_symbol_end ();
8458 symbolP = symbol_find (reg_string);
8459 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
8460 {
8461 const expressionS *e = symbol_get_value_expression (symbolP);
8462
8463 know (e->X_op == O_register);
8464 know (e->X_add_number >= 0
8465 && (valueT) e->X_add_number < i386_regtab_size);
8466 r = i386_regtab + e->X_add_number;
8467 *end_op = input_line_pointer;
8468 }
8469 *input_line_pointer = c;
8470 input_line_pointer = save;
8471 }
8472 return r;
8473 }
8474
8475 int
8476 i386_parse_name (char *name, expressionS *e, char *nextcharP)
8477 {
8478 const reg_entry *r;
8479 char *end = input_line_pointer;
8480
8481 *end = *nextcharP;
8482 r = parse_register (name, &input_line_pointer);
8483 if (r && end <= input_line_pointer)
8484 {
8485 *nextcharP = *input_line_pointer;
8486 *input_line_pointer = 0;
8487 e->X_op = O_register;
8488 e->X_add_number = r - i386_regtab;
8489 return 1;
8490 }
8491 input_line_pointer = end;
8492 *end = 0;
8493 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
8494 }
8495
8496 void
8497 md_operand (expressionS *e)
8498 {
8499 char *end;
8500 const reg_entry *r;
8501
8502 switch (*input_line_pointer)
8503 {
8504 case REGISTER_PREFIX:
8505 r = parse_real_register (input_line_pointer, &end);
8506 if (r)
8507 {
8508 e->X_op = O_register;
8509 e->X_add_number = r - i386_regtab;
8510 input_line_pointer = end;
8511 }
8512 break;
8513
8514 case '[':
8515 gas_assert (intel_syntax);
8516 end = input_line_pointer++;
8517 expression (e);
8518 if (*input_line_pointer == ']')
8519 {
8520 ++input_line_pointer;
8521 e->X_op_symbol = make_expr_symbol (e);
8522 e->X_add_symbol = NULL;
8523 e->X_add_number = 0;
8524 e->X_op = O_index;
8525 }
8526 else
8527 {
8528 e->X_op = O_absent;
8529 input_line_pointer = end;
8530 }
8531 break;
8532 }
8533 }
8534
8535 \f
8536 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8537 const char *md_shortopts = "kVQ:sqn";
8538 #else
8539 const char *md_shortopts = "qn";
8540 #endif
8541
8542 #define OPTION_32 (OPTION_MD_BASE + 0)
8543 #define OPTION_64 (OPTION_MD_BASE + 1)
8544 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
8545 #define OPTION_MARCH (OPTION_MD_BASE + 3)
8546 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
8547 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
8548 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
8549 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
8550 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
8551 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
8552 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
8553 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
8554 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
8555 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
8556 #define OPTION_X32 (OPTION_MD_BASE + 14)
8557
8558 struct option md_longopts[] =
8559 {
8560 {"32", no_argument, NULL, OPTION_32},
8561 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8562 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8563 {"64", no_argument, NULL, OPTION_64},
8564 #endif
8565 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8566 {"x32", no_argument, NULL, OPTION_X32},
8567 #endif
8568 {"divide", no_argument, NULL, OPTION_DIVIDE},
8569 {"march", required_argument, NULL, OPTION_MARCH},
8570 {"mtune", required_argument, NULL, OPTION_MTUNE},
8571 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
8572 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
8573 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
8574 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
8575 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
8576 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
8577 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
8578 {"moperand-check", required_argument, NULL, OPTION_MOPERAND_CHECK},
8579 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
8580 {NULL, no_argument, NULL, 0}
8581 };
8582 size_t md_longopts_size = sizeof (md_longopts);
8583
8584 int
8585 md_parse_option (int c, char *arg)
8586 {
8587 unsigned int j;
8588 char *arch, *next;
8589
8590 switch (c)
8591 {
8592 case 'n':
8593 optimize_align_code = 0;
8594 break;
8595
8596 case 'q':
8597 quiet_warnings = 1;
8598 break;
8599
8600 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8601 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
8602 should be emitted or not. FIXME: Not implemented. */
8603 case 'Q':
8604 break;
8605
8606 /* -V: SVR4 argument to print version ID. */
8607 case 'V':
8608 print_version_id ();
8609 break;
8610
8611 /* -k: Ignore for FreeBSD compatibility. */
8612 case 'k':
8613 break;
8614
8615 case 's':
8616 /* -s: On i386 Solaris, this tells the native assembler to use
8617 .stab instead of .stab.excl. We always use .stab anyhow. */
8618 break;
8619 #endif
8620 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8621 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8622 case OPTION_64:
8623 {
8624 const char **list, **l;
8625
8626 list = bfd_target_list ();
8627 for (l = list; *l != NULL; l++)
8628 if (CONST_STRNEQ (*l, "elf64-x86-64")
8629 || strcmp (*l, "coff-x86-64") == 0
8630 || strcmp (*l, "pe-x86-64") == 0
8631 || strcmp (*l, "pei-x86-64") == 0
8632 || strcmp (*l, "mach-o-x86-64") == 0)
8633 {
8634 default_arch = "x86_64";
8635 break;
8636 }
8637 if (*l == NULL)
8638 as_fatal (_("no compiled in support for x86_64"));
8639 free (list);
8640 }
8641 break;
8642 #endif
8643
8644 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8645 case OPTION_X32:
8646 if (IS_ELF)
8647 {
8648 const char **list, **l;
8649
8650 list = bfd_target_list ();
8651 for (l = list; *l != NULL; l++)
8652 if (CONST_STRNEQ (*l, "elf32-x86-64"))
8653 {
8654 default_arch = "x86_64:32";
8655 break;
8656 }
8657 if (*l == NULL)
8658 as_fatal (_("no compiled in support for 32bit x86_64"));
8659 free (list);
8660 }
8661 else
8662 as_fatal (_("32bit x86_64 is only supported for ELF"));
8663 break;
8664 #endif
8665
8666 case OPTION_32:
8667 default_arch = "i386";
8668 break;
8669
8670 case OPTION_DIVIDE:
8671 #ifdef SVR4_COMMENT_CHARS
8672 {
8673 char *n, *t;
8674 const char *s;
8675
8676 n = (char *) xmalloc (strlen (i386_comment_chars) + 1);
8677 t = n;
8678 for (s = i386_comment_chars; *s != '\0'; s++)
8679 if (*s != '/')
8680 *t++ = *s;
8681 *t = '\0';
8682 i386_comment_chars = n;
8683 }
8684 #endif
8685 break;
8686
8687 case OPTION_MARCH:
8688 arch = xstrdup (arg);
8689 do
8690 {
8691 if (*arch == '.')
8692 as_fatal (_("invalid -march= option: `%s'"), arg);
8693 next = strchr (arch, '+');
8694 if (next)
8695 *next++ = '\0';
8696 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8697 {
8698 if (strcmp (arch, cpu_arch [j].name) == 0)
8699 {
8700 /* Processor. */
8701 if (! cpu_arch[j].flags.bitfield.cpui386)
8702 continue;
8703
8704 cpu_arch_name = cpu_arch[j].name;
8705 cpu_sub_arch_name = NULL;
8706 cpu_arch_flags = cpu_arch[j].flags;
8707 cpu_arch_isa = cpu_arch[j].type;
8708 cpu_arch_isa_flags = cpu_arch[j].flags;
8709 if (!cpu_arch_tune_set)
8710 {
8711 cpu_arch_tune = cpu_arch_isa;
8712 cpu_arch_tune_flags = cpu_arch_isa_flags;
8713 }
8714 break;
8715 }
8716 else if (*cpu_arch [j].name == '.'
8717 && strcmp (arch, cpu_arch [j].name + 1) == 0)
8718 {
8719 /* ISA entension. */
8720 i386_cpu_flags flags;
8721
8722 if (!cpu_arch[j].negated)
8723 flags = cpu_flags_or (cpu_arch_flags,
8724 cpu_arch[j].flags);
8725 else
8726 flags = cpu_flags_and_not (cpu_arch_flags,
8727 cpu_arch[j].flags);
8728 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
8729 {
8730 if (cpu_sub_arch_name)
8731 {
8732 char *name = cpu_sub_arch_name;
8733 cpu_sub_arch_name = concat (name,
8734 cpu_arch[j].name,
8735 (const char *) NULL);
8736 free (name);
8737 }
8738 else
8739 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
8740 cpu_arch_flags = flags;
8741 cpu_arch_isa_flags = flags;
8742 }
8743 break;
8744 }
8745 }
8746
8747 if (j >= ARRAY_SIZE (cpu_arch))
8748 as_fatal (_("invalid -march= option: `%s'"), arg);
8749
8750 arch = next;
8751 }
8752 while (next != NULL );
8753 break;
8754
8755 case OPTION_MTUNE:
8756 if (*arg == '.')
8757 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8758 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8759 {
8760 if (strcmp (arg, cpu_arch [j].name) == 0)
8761 {
8762 cpu_arch_tune_set = 1;
8763 cpu_arch_tune = cpu_arch [j].type;
8764 cpu_arch_tune_flags = cpu_arch[j].flags;
8765 break;
8766 }
8767 }
8768 if (j >= ARRAY_SIZE (cpu_arch))
8769 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8770 break;
8771
8772 case OPTION_MMNEMONIC:
8773 if (strcasecmp (arg, "att") == 0)
8774 intel_mnemonic = 0;
8775 else if (strcasecmp (arg, "intel") == 0)
8776 intel_mnemonic = 1;
8777 else
8778 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg);
8779 break;
8780
8781 case OPTION_MSYNTAX:
8782 if (strcasecmp (arg, "att") == 0)
8783 intel_syntax = 0;
8784 else if (strcasecmp (arg, "intel") == 0)
8785 intel_syntax = 1;
8786 else
8787 as_fatal (_("invalid -msyntax= option: `%s'"), arg);
8788 break;
8789
8790 case OPTION_MINDEX_REG:
8791 allow_index_reg = 1;
8792 break;
8793
8794 case OPTION_MNAKED_REG:
8795 allow_naked_reg = 1;
8796 break;
8797
8798 case OPTION_MOLD_GCC:
8799 old_gcc = 1;
8800 break;
8801
8802 case OPTION_MSSE2AVX:
8803 sse2avx = 1;
8804 break;
8805
8806 case OPTION_MSSE_CHECK:
8807 if (strcasecmp (arg, "error") == 0)
8808 sse_check = check_error;
8809 else if (strcasecmp (arg, "warning") == 0)
8810 sse_check = check_warning;
8811 else if (strcasecmp (arg, "none") == 0)
8812 sse_check = check_none;
8813 else
8814 as_fatal (_("invalid -msse-check= option: `%s'"), arg);
8815 break;
8816
8817 case OPTION_MOPERAND_CHECK:
8818 if (strcasecmp (arg, "error") == 0)
8819 operand_check = check_error;
8820 else if (strcasecmp (arg, "warning") == 0)
8821 operand_check = check_warning;
8822 else if (strcasecmp (arg, "none") == 0)
8823 operand_check = check_none;
8824 else
8825 as_fatal (_("invalid -moperand-check= option: `%s'"), arg);
8826 break;
8827
8828 case OPTION_MAVXSCALAR:
8829 if (strcasecmp (arg, "128") == 0)
8830 avxscalar = vex128;
8831 else if (strcasecmp (arg, "256") == 0)
8832 avxscalar = vex256;
8833 else
8834 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg);
8835 break;
8836
8837 default:
8838 return 0;
8839 }
8840 return 1;
8841 }
8842
8843 #define MESSAGE_TEMPLATE \
8844 " "
8845
8846 static void
8847 show_arch (FILE *stream, int ext, int check)
8848 {
8849 static char message[] = MESSAGE_TEMPLATE;
8850 char *start = message + 27;
8851 char *p;
8852 int size = sizeof (MESSAGE_TEMPLATE);
8853 int left;
8854 const char *name;
8855 int len;
8856 unsigned int j;
8857
8858 p = start;
8859 left = size - (start - message);
8860 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8861 {
8862 /* Should it be skipped? */
8863 if (cpu_arch [j].skip)
8864 continue;
8865
8866 name = cpu_arch [j].name;
8867 len = cpu_arch [j].len;
8868 if (*name == '.')
8869 {
8870 /* It is an extension. Skip if we aren't asked to show it. */
8871 if (ext)
8872 {
8873 name++;
8874 len--;
8875 }
8876 else
8877 continue;
8878 }
8879 else if (ext)
8880 {
8881 /* It is an processor. Skip if we show only extension. */
8882 continue;
8883 }
8884 else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
8885 {
8886 /* It is an impossible processor - skip. */
8887 continue;
8888 }
8889
8890 /* Reserve 2 spaces for ", " or ",\0" */
8891 left -= len + 2;
8892
8893 /* Check if there is any room. */
8894 if (left >= 0)
8895 {
8896 if (p != start)
8897 {
8898 *p++ = ',';
8899 *p++ = ' ';
8900 }
8901 p = mempcpy (p, name, len);
8902 }
8903 else
8904 {
8905 /* Output the current message now and start a new one. */
8906 *p++ = ',';
8907 *p = '\0';
8908 fprintf (stream, "%s\n", message);
8909 p = start;
8910 left = size - (start - message) - len - 2;
8911
8912 gas_assert (left >= 0);
8913
8914 p = mempcpy (p, name, len);
8915 }
8916 }
8917
8918 *p = '\0';
8919 fprintf (stream, "%s\n", message);
8920 }
8921
8922 void
8923 md_show_usage (FILE *stream)
8924 {
8925 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8926 fprintf (stream, _("\
8927 -Q ignored\n\
8928 -V print assembler version number\n\
8929 -k ignored\n"));
8930 #endif
8931 fprintf (stream, _("\
8932 -n Do not optimize code alignment\n\
8933 -q quieten some warnings\n"));
8934 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8935 fprintf (stream, _("\
8936 -s ignored\n"));
8937 #endif
8938 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8939 || defined (TE_PE) || defined (TE_PEP))
8940 fprintf (stream, _("\
8941 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
8942 #endif
8943 #ifdef SVR4_COMMENT_CHARS
8944 fprintf (stream, _("\
8945 --divide do not treat `/' as a comment character\n"));
8946 #else
8947 fprintf (stream, _("\
8948 --divide ignored\n"));
8949 #endif
8950 fprintf (stream, _("\
8951 -march=CPU[,+EXTENSION...]\n\
8952 generate code for CPU and EXTENSION, CPU is one of:\n"));
8953 show_arch (stream, 0, 1);
8954 fprintf (stream, _("\
8955 EXTENSION is combination of:\n"));
8956 show_arch (stream, 1, 0);
8957 fprintf (stream, _("\
8958 -mtune=CPU optimize for CPU, CPU is one of:\n"));
8959 show_arch (stream, 0, 0);
8960 fprintf (stream, _("\
8961 -msse2avx encode SSE instructions with VEX prefix\n"));
8962 fprintf (stream, _("\
8963 -msse-check=[none|error|warning]\n\
8964 check SSE instructions\n"));
8965 fprintf (stream, _("\
8966 -moperand-check=[none|error|warning]\n\
8967 check operand combinations for validity\n"));
8968 fprintf (stream, _("\
8969 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
8970 length\n"));
8971 fprintf (stream, _("\
8972 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
8973 fprintf (stream, _("\
8974 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
8975 fprintf (stream, _("\
8976 -mindex-reg support pseudo index registers\n"));
8977 fprintf (stream, _("\
8978 -mnaked-reg don't require `%%' prefix for registers\n"));
8979 fprintf (stream, _("\
8980 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
8981 }
8982
8983 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
8984 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8985 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8986
8987 /* Pick the target format to use. */
8988
8989 const char *
8990 i386_target_format (void)
8991 {
8992 if (!strncmp (default_arch, "x86_64", 6))
8993 {
8994 update_code_flag (CODE_64BIT, 1);
8995 if (default_arch[6] == '\0')
8996 x86_elf_abi = X86_64_ABI;
8997 else
8998 x86_elf_abi = X86_64_X32_ABI;
8999 }
9000 else if (!strcmp (default_arch, "i386"))
9001 update_code_flag (CODE_32BIT, 1);
9002 else
9003 as_fatal (_("unknown architecture"));
9004
9005 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
9006 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
9007 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
9008 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
9009
9010 switch (OUTPUT_FLAVOR)
9011 {
9012 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
9013 case bfd_target_aout_flavour:
9014 return AOUT_TARGET_FORMAT;
9015 #endif
9016 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
9017 # if defined (TE_PE) || defined (TE_PEP)
9018 case bfd_target_coff_flavour:
9019 return flag_code == CODE_64BIT ? "pe-x86-64" : "pe-i386";
9020 # elif defined (TE_GO32)
9021 case bfd_target_coff_flavour:
9022 return "coff-go32";
9023 # else
9024 case bfd_target_coff_flavour:
9025 return "coff-i386";
9026 # endif
9027 #endif
9028 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9029 case bfd_target_elf_flavour:
9030 {
9031 const char *format;
9032
9033 switch (x86_elf_abi)
9034 {
9035 default:
9036 format = ELF_TARGET_FORMAT;
9037 break;
9038 case X86_64_ABI:
9039 use_rela_relocations = 1;
9040 object_64bit = 1;
9041 format = ELF_TARGET_FORMAT64;
9042 break;
9043 case X86_64_X32_ABI:
9044 use_rela_relocations = 1;
9045 object_64bit = 1;
9046 disallow_64bit_reloc = 1;
9047 format = ELF_TARGET_FORMAT32;
9048 break;
9049 }
9050 if (cpu_arch_isa == PROCESSOR_L1OM)
9051 {
9052 if (x86_elf_abi != X86_64_ABI)
9053 as_fatal (_("Intel L1OM is 64bit only"));
9054 return ELF_TARGET_L1OM_FORMAT;
9055 }
9056 if (cpu_arch_isa == PROCESSOR_K1OM)
9057 {
9058 if (x86_elf_abi != X86_64_ABI)
9059 as_fatal (_("Intel K1OM is 64bit only"));
9060 return ELF_TARGET_K1OM_FORMAT;
9061 }
9062 else
9063 return format;
9064 }
9065 #endif
9066 #if defined (OBJ_MACH_O)
9067 case bfd_target_mach_o_flavour:
9068 if (flag_code == CODE_64BIT)
9069 {
9070 use_rela_relocations = 1;
9071 object_64bit = 1;
9072 return "mach-o-x86-64";
9073 }
9074 else
9075 return "mach-o-i386";
9076 #endif
9077 default:
9078 abort ();
9079 return NULL;
9080 }
9081 }
9082
9083 #endif /* OBJ_MAYBE_ more than one */
9084
9085 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
9086 void
9087 i386_elf_emit_arch_note (void)
9088 {
9089 if (IS_ELF && cpu_arch_name != NULL)
9090 {
9091 char *p;
9092 asection *seg = now_seg;
9093 subsegT subseg = now_subseg;
9094 Elf_Internal_Note i_note;
9095 Elf_External_Note e_note;
9096 asection *note_secp;
9097 int len;
9098
9099 /* Create the .note section. */
9100 note_secp = subseg_new (".note", 0);
9101 bfd_set_section_flags (stdoutput,
9102 note_secp,
9103 SEC_HAS_CONTENTS | SEC_READONLY);
9104
9105 /* Process the arch string. */
9106 len = strlen (cpu_arch_name);
9107
9108 i_note.namesz = len + 1;
9109 i_note.descsz = 0;
9110 i_note.type = NT_ARCH;
9111 p = frag_more (sizeof (e_note.namesz));
9112 md_number_to_chars (p, (valueT) i_note.namesz, sizeof (e_note.namesz));
9113 p = frag_more (sizeof (e_note.descsz));
9114 md_number_to_chars (p, (valueT) i_note.descsz, sizeof (e_note.descsz));
9115 p = frag_more (sizeof (e_note.type));
9116 md_number_to_chars (p, (valueT) i_note.type, sizeof (e_note.type));
9117 p = frag_more (len + 1);
9118 strcpy (p, cpu_arch_name);
9119
9120 frag_align (2, 0, 0);
9121
9122 subseg_set (seg, subseg);
9123 }
9124 }
9125 #endif
9126 \f
9127 symbolS *
9128 md_undefined_symbol (char *name)
9129 {
9130 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
9131 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
9132 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
9133 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
9134 {
9135 if (!GOT_symbol)
9136 {
9137 if (symbol_find (name))
9138 as_bad (_("GOT already in symbol table"));
9139 GOT_symbol = symbol_new (name, undefined_section,
9140 (valueT) 0, &zero_address_frag);
9141 };
9142 return GOT_symbol;
9143 }
9144 return 0;
9145 }
9146
9147 /* Round up a section size to the appropriate boundary. */
9148
9149 valueT
9150 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
9151 {
9152 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
9153 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
9154 {
9155 /* For a.out, force the section size to be aligned. If we don't do
9156 this, BFD will align it for us, but it will not write out the
9157 final bytes of the section. This may be a bug in BFD, but it is
9158 easier to fix it here since that is how the other a.out targets
9159 work. */
9160 int align;
9161
9162 align = bfd_get_section_alignment (stdoutput, segment);
9163 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
9164 }
9165 #endif
9166
9167 return size;
9168 }
9169
9170 /* On the i386, PC-relative offsets are relative to the start of the
9171 next instruction. That is, the address of the offset, plus its
9172 size, since the offset is always the last part of the insn. */
9173
9174 long
9175 md_pcrel_from (fixS *fixP)
9176 {
9177 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
9178 }
9179
9180 #ifndef I386COFF
9181
9182 static void
9183 s_bss (int ignore ATTRIBUTE_UNUSED)
9184 {
9185 int temp;
9186
9187 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9188 if (IS_ELF)
9189 obj_elf_section_change_hook ();
9190 #endif
9191 temp = get_absolute_expression ();
9192 subseg_set (bss_section, (subsegT) temp);
9193 demand_empty_rest_of_line ();
9194 }
9195
9196 #endif
9197
9198 void
9199 i386_validate_fix (fixS *fixp)
9200 {
9201 if (fixp->fx_subsy && fixp->fx_subsy == GOT_symbol)
9202 {
9203 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
9204 {
9205 if (!object_64bit)
9206 abort ();
9207 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
9208 }
9209 else
9210 {
9211 if (!object_64bit)
9212 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
9213 else
9214 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
9215 }
9216 fixp->fx_subsy = 0;
9217 }
9218 }
9219
9220 arelent *
9221 tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
9222 {
9223 arelent *rel;
9224 bfd_reloc_code_real_type code;
9225
9226 switch (fixp->fx_r_type)
9227 {
9228 case BFD_RELOC_X86_64_PLT32:
9229 case BFD_RELOC_X86_64_GOT32:
9230 case BFD_RELOC_X86_64_GOTPCREL:
9231 case BFD_RELOC_386_PLT32:
9232 case BFD_RELOC_386_GOT32:
9233 case BFD_RELOC_386_GOTOFF:
9234 case BFD_RELOC_386_GOTPC:
9235 case BFD_RELOC_386_TLS_GD:
9236 case BFD_RELOC_386_TLS_LDM:
9237 case BFD_RELOC_386_TLS_LDO_32:
9238 case BFD_RELOC_386_TLS_IE_32:
9239 case BFD_RELOC_386_TLS_IE:
9240 case BFD_RELOC_386_TLS_GOTIE:
9241 case BFD_RELOC_386_TLS_LE_32:
9242 case BFD_RELOC_386_TLS_LE:
9243 case BFD_RELOC_386_TLS_GOTDESC:
9244 case BFD_RELOC_386_TLS_DESC_CALL:
9245 case BFD_RELOC_X86_64_TLSGD:
9246 case BFD_RELOC_X86_64_TLSLD:
9247 case BFD_RELOC_X86_64_DTPOFF32:
9248 case BFD_RELOC_X86_64_DTPOFF64:
9249 case BFD_RELOC_X86_64_GOTTPOFF:
9250 case BFD_RELOC_X86_64_TPOFF32:
9251 case BFD_RELOC_X86_64_TPOFF64:
9252 case BFD_RELOC_X86_64_GOTOFF64:
9253 case BFD_RELOC_X86_64_GOTPC32:
9254 case BFD_RELOC_X86_64_GOT64:
9255 case BFD_RELOC_X86_64_GOTPCREL64:
9256 case BFD_RELOC_X86_64_GOTPC64:
9257 case BFD_RELOC_X86_64_GOTPLT64:
9258 case BFD_RELOC_X86_64_PLTOFF64:
9259 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9260 case BFD_RELOC_X86_64_TLSDESC_CALL:
9261 case BFD_RELOC_RVA:
9262 case BFD_RELOC_VTABLE_ENTRY:
9263 case BFD_RELOC_VTABLE_INHERIT:
9264 #ifdef TE_PE
9265 case BFD_RELOC_32_SECREL:
9266 #endif
9267 code = fixp->fx_r_type;
9268 break;
9269 case BFD_RELOC_X86_64_32S:
9270 if (!fixp->fx_pcrel)
9271 {
9272 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
9273 code = fixp->fx_r_type;
9274 break;
9275 }
9276 default:
9277 if (fixp->fx_pcrel)
9278 {
9279 switch (fixp->fx_size)
9280 {
9281 default:
9282 as_bad_where (fixp->fx_file, fixp->fx_line,
9283 _("can not do %d byte pc-relative relocation"),
9284 fixp->fx_size);
9285 code = BFD_RELOC_32_PCREL;
9286 break;
9287 case 1: code = BFD_RELOC_8_PCREL; break;
9288 case 2: code = BFD_RELOC_16_PCREL; break;
9289 case 4: code = BFD_RELOC_32_PCREL; break;
9290 #ifdef BFD64
9291 case 8: code = BFD_RELOC_64_PCREL; break;
9292 #endif
9293 }
9294 }
9295 else
9296 {
9297 switch (fixp->fx_size)
9298 {
9299 default:
9300 as_bad_where (fixp->fx_file, fixp->fx_line,
9301 _("can not do %d byte relocation"),
9302 fixp->fx_size);
9303 code = BFD_RELOC_32;
9304 break;
9305 case 1: code = BFD_RELOC_8; break;
9306 case 2: code = BFD_RELOC_16; break;
9307 case 4: code = BFD_RELOC_32; break;
9308 #ifdef BFD64
9309 case 8: code = BFD_RELOC_64; break;
9310 #endif
9311 }
9312 }
9313 break;
9314 }
9315
9316 if ((code == BFD_RELOC_32
9317 || code == BFD_RELOC_32_PCREL
9318 || code == BFD_RELOC_X86_64_32S)
9319 && GOT_symbol
9320 && fixp->fx_addsy == GOT_symbol)
9321 {
9322 if (!object_64bit)
9323 code = BFD_RELOC_386_GOTPC;
9324 else
9325 code = BFD_RELOC_X86_64_GOTPC32;
9326 }
9327 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
9328 && GOT_symbol
9329 && fixp->fx_addsy == GOT_symbol)
9330 {
9331 code = BFD_RELOC_X86_64_GOTPC64;
9332 }
9333
9334 rel = (arelent *) xmalloc (sizeof (arelent));
9335 rel->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
9336 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9337
9338 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
9339
9340 if (!use_rela_relocations)
9341 {
9342 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
9343 vtable entry to be used in the relocation's section offset. */
9344 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
9345 rel->address = fixp->fx_offset;
9346 #if defined (OBJ_COFF) && defined (TE_PE)
9347 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
9348 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
9349 else
9350 #endif
9351 rel->addend = 0;
9352 }
9353 /* Use the rela in 64bit mode. */
9354 else
9355 {
9356 if (disallow_64bit_reloc)
9357 switch (code)
9358 {
9359 case BFD_RELOC_X86_64_DTPOFF64:
9360 case BFD_RELOC_X86_64_TPOFF64:
9361 case BFD_RELOC_64_PCREL:
9362 case BFD_RELOC_X86_64_GOTOFF64:
9363 case BFD_RELOC_X86_64_GOT64:
9364 case BFD_RELOC_X86_64_GOTPCREL64:
9365 case BFD_RELOC_X86_64_GOTPC64:
9366 case BFD_RELOC_X86_64_GOTPLT64:
9367 case BFD_RELOC_X86_64_PLTOFF64:
9368 as_bad_where (fixp->fx_file, fixp->fx_line,
9369 _("cannot represent relocation type %s in x32 mode"),
9370 bfd_get_reloc_code_name (code));
9371 break;
9372 default:
9373 break;
9374 }
9375
9376 if (!fixp->fx_pcrel)
9377 rel->addend = fixp->fx_offset;
9378 else
9379 switch (code)
9380 {
9381 case BFD_RELOC_X86_64_PLT32:
9382 case BFD_RELOC_X86_64_GOT32:
9383 case BFD_RELOC_X86_64_GOTPCREL:
9384 case BFD_RELOC_X86_64_TLSGD:
9385 case BFD_RELOC_X86_64_TLSLD:
9386 case BFD_RELOC_X86_64_GOTTPOFF:
9387 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9388 case BFD_RELOC_X86_64_TLSDESC_CALL:
9389 rel->addend = fixp->fx_offset - fixp->fx_size;
9390 break;
9391 default:
9392 rel->addend = (section->vma
9393 - fixp->fx_size
9394 + fixp->fx_addnumber
9395 + md_pcrel_from (fixp));
9396 break;
9397 }
9398 }
9399
9400 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
9401 if (rel->howto == NULL)
9402 {
9403 as_bad_where (fixp->fx_file, fixp->fx_line,
9404 _("cannot represent relocation type %s"),
9405 bfd_get_reloc_code_name (code));
9406 /* Set howto to a garbage value so that we can keep going. */
9407 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
9408 gas_assert (rel->howto != NULL);
9409 }
9410
9411 return rel;
9412 }
9413
9414 #include "tc-i386-intel.c"
9415
9416 void
9417 tc_x86_parse_to_dw2regnum (expressionS *exp)
9418 {
9419 int saved_naked_reg;
9420 char saved_register_dot;
9421
9422 saved_naked_reg = allow_naked_reg;
9423 allow_naked_reg = 1;
9424 saved_register_dot = register_chars['.'];
9425 register_chars['.'] = '.';
9426 allow_pseudo_reg = 1;
9427 expression_and_evaluate (exp);
9428 allow_pseudo_reg = 0;
9429 register_chars['.'] = saved_register_dot;
9430 allow_naked_reg = saved_naked_reg;
9431
9432 if (exp->X_op == O_register && exp->X_add_number >= 0)
9433 {
9434 if ((addressT) exp->X_add_number < i386_regtab_size)
9435 {
9436 exp->X_op = O_constant;
9437 exp->X_add_number = i386_regtab[exp->X_add_number]
9438 .dw2_regnum[flag_code >> 1];
9439 }
9440 else
9441 exp->X_op = O_illegal;
9442 }
9443 }
9444
9445 void
9446 tc_x86_frame_initial_instructions (void)
9447 {
9448 static unsigned int sp_regno[2];
9449
9450 if (!sp_regno[flag_code >> 1])
9451 {
9452 char *saved_input = input_line_pointer;
9453 char sp[][4] = {"esp", "rsp"};
9454 expressionS exp;
9455
9456 input_line_pointer = sp[flag_code >> 1];
9457 tc_x86_parse_to_dw2regnum (&exp);
9458 gas_assert (exp.X_op == O_constant);
9459 sp_regno[flag_code >> 1] = exp.X_add_number;
9460 input_line_pointer = saved_input;
9461 }
9462
9463 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
9464 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
9465 }
9466
9467 int
9468 x86_dwarf2_addr_size (void)
9469 {
9470 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9471 if (x86_elf_abi == X86_64_X32_ABI)
9472 return 4;
9473 #endif
9474 return bfd_arch_bits_per_address (stdoutput) / 8;
9475 }
9476
9477 int
9478 i386_elf_section_type (const char *str, size_t len)
9479 {
9480 if (flag_code == CODE_64BIT
9481 && len == sizeof ("unwind") - 1
9482 && strncmp (str, "unwind", 6) == 0)
9483 return SHT_X86_64_UNWIND;
9484
9485 return -1;
9486 }
9487
9488 #ifdef TE_SOLARIS
9489 void
9490 i386_solaris_fix_up_eh_frame (segT sec)
9491 {
9492 if (flag_code == CODE_64BIT)
9493 elf_section_type (sec) = SHT_X86_64_UNWIND;
9494 }
9495 #endif
9496
9497 #ifdef TE_PE
9498 void
9499 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
9500 {
9501 expressionS exp;
9502
9503 exp.X_op = O_secrel;
9504 exp.X_add_symbol = symbol;
9505 exp.X_add_number = 0;
9506 emit_expr (&exp, size);
9507 }
9508 #endif
9509
9510 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9511 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
9512
9513 bfd_vma
9514 x86_64_section_letter (int letter, char **ptr_msg)
9515 {
9516 if (flag_code == CODE_64BIT)
9517 {
9518 if (letter == 'l')
9519 return SHF_X86_64_LARGE;
9520
9521 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
9522 }
9523 else
9524 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
9525 return -1;
9526 }
9527
9528 bfd_vma
9529 x86_64_section_word (char *str, size_t len)
9530 {
9531 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
9532 return SHF_X86_64_LARGE;
9533
9534 return -1;
9535 }
9536
9537 static void
9538 handle_large_common (int small ATTRIBUTE_UNUSED)
9539 {
9540 if (flag_code != CODE_64BIT)
9541 {
9542 s_comm_internal (0, elf_common_parse);
9543 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
9544 }
9545 else
9546 {
9547 static segT lbss_section;
9548 asection *saved_com_section_ptr = elf_com_section_ptr;
9549 asection *saved_bss_section = bss_section;
9550
9551 if (lbss_section == NULL)
9552 {
9553 flagword applicable;
9554 segT seg = now_seg;
9555 subsegT subseg = now_subseg;
9556
9557 /* The .lbss section is for local .largecomm symbols. */
9558 lbss_section = subseg_new (".lbss", 0);
9559 applicable = bfd_applicable_section_flags (stdoutput);
9560 bfd_set_section_flags (stdoutput, lbss_section,
9561 applicable & SEC_ALLOC);
9562 seg_info (lbss_section)->bss = 1;
9563
9564 subseg_set (seg, subseg);
9565 }
9566
9567 elf_com_section_ptr = &_bfd_elf_large_com_section;
9568 bss_section = lbss_section;
9569
9570 s_comm_internal (0, elf_common_parse);
9571
9572 elf_com_section_ptr = saved_com_section_ptr;
9573 bss_section = saved_bss_section;
9574 }
9575 }
9576 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */
This page took 0.337237 seconds and 4 git commands to generate.