* gas/config/tc-arm.c (do_t_bkpt_hlt1): New function.
[deliverable/binutils-gdb.git] / gas / config / tc-i386.c
1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
4 2012
5 Free Software Foundation, Inc.
6
7 This file is part of GAS, the GNU Assembler.
8
9 GAS is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
13
14 GAS is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GAS; see the file COPYING. If not, write to the Free
21 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
22 02110-1301, USA. */
23
24 /* Intel 80386 machine specific gas.
25 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
26 x86_64 support by Jan Hubicka (jh@suse.cz)
27 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
28 Bugs & suggestions are completely welcome. This is free software.
29 Please help us make it better. */
30
31 #include "as.h"
32 #include "safe-ctype.h"
33 #include "subsegs.h"
34 #include "dwarf2dbg.h"
35 #include "dw2gencfi.h"
36 #include "elf/x86-64.h"
37 #include "opcodes/i386-init.h"
38
39 #ifndef REGISTER_WARNINGS
40 #define REGISTER_WARNINGS 1
41 #endif
42
43 #ifndef INFER_ADDR_PREFIX
44 #define INFER_ADDR_PREFIX 1
45 #endif
46
47 #ifndef DEFAULT_ARCH
48 #define DEFAULT_ARCH "i386"
49 #endif
50
51 #ifndef INLINE
52 #if __GNUC__ >= 2
53 #define INLINE __inline__
54 #else
55 #define INLINE
56 #endif
57 #endif
58
59 /* Prefixes will be emitted in the order defined below.
60 WAIT_PREFIX must be the first prefix since FWAIT is really is an
61 instruction, and so must come before any prefixes.
62 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
63 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
64 #define WAIT_PREFIX 0
65 #define SEG_PREFIX 1
66 #define ADDR_PREFIX 2
67 #define DATA_PREFIX 3
68 #define REP_PREFIX 4
69 #define HLE_PREFIX REP_PREFIX
70 #define LOCK_PREFIX 5
71 #define REX_PREFIX 6 /* must come last. */
72 #define MAX_PREFIXES 7 /* max prefixes per opcode */
73
74 /* we define the syntax here (modulo base,index,scale syntax) */
75 #define REGISTER_PREFIX '%'
76 #define IMMEDIATE_PREFIX '$'
77 #define ABSOLUTE_PREFIX '*'
78
79 /* these are the instruction mnemonic suffixes in AT&T syntax or
80 memory operand size in Intel syntax. */
81 #define WORD_MNEM_SUFFIX 'w'
82 #define BYTE_MNEM_SUFFIX 'b'
83 #define SHORT_MNEM_SUFFIX 's'
84 #define LONG_MNEM_SUFFIX 'l'
85 #define QWORD_MNEM_SUFFIX 'q'
86 #define XMMWORD_MNEM_SUFFIX 'x'
87 #define YMMWORD_MNEM_SUFFIX 'y'
88 /* Intel Syntax. Use a non-ascii letter since since it never appears
89 in instructions. */
90 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
91
92 #define END_OF_INSN '\0'
93
94 /*
95 'templates' is for grouping together 'template' structures for opcodes
96 of the same name. This is only used for storing the insns in the grand
97 ole hash table of insns.
98 The templates themselves start at START and range up to (but not including)
99 END.
100 */
101 typedef struct
102 {
103 const insn_template *start;
104 const insn_template *end;
105 }
106 templates;
107
108 /* 386 operand encoding bytes: see 386 book for details of this. */
109 typedef struct
110 {
111 unsigned int regmem; /* codes register or memory operand */
112 unsigned int reg; /* codes register operand (or extended opcode) */
113 unsigned int mode; /* how to interpret regmem & reg */
114 }
115 modrm_byte;
116
117 /* x86-64 extension prefix. */
118 typedef int rex_byte;
119
120 /* 386 opcode byte to code indirect addressing. */
121 typedef struct
122 {
123 unsigned base;
124 unsigned index;
125 unsigned scale;
126 }
127 sib_byte;
128
129 /* x86 arch names, types and features */
130 typedef struct
131 {
132 const char *name; /* arch name */
133 unsigned int len; /* arch string length */
134 enum processor_type type; /* arch type */
135 i386_cpu_flags flags; /* cpu feature flags */
136 unsigned int skip; /* show_arch should skip this. */
137 unsigned int negated; /* turn off indicated flags. */
138 }
139 arch_entry;
140
141 static void update_code_flag (int, int);
142 static void set_code_flag (int);
143 static void set_16bit_gcc_code_flag (int);
144 static void set_intel_syntax (int);
145 static void set_intel_mnemonic (int);
146 static void set_allow_index_reg (int);
147 static void set_check (int);
148 static void set_cpu_arch (int);
149 #ifdef TE_PE
150 static void pe_directive_secrel (int);
151 #endif
152 static void signed_cons (int);
153 static char *output_invalid (int c);
154 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
155 const char *);
156 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
157 const char *);
158 static int i386_att_operand (char *);
159 static int i386_intel_operand (char *, int);
160 static int i386_intel_simplify (expressionS *);
161 static int i386_intel_parse_name (const char *, expressionS *);
162 static const reg_entry *parse_register (char *, char **);
163 static char *parse_insn (char *, char *);
164 static char *parse_operands (char *, const char *);
165 static void swap_operands (void);
166 static void swap_2_operands (int, int);
167 static void optimize_imm (void);
168 static void optimize_disp (void);
169 static const insn_template *match_template (void);
170 static int check_string (void);
171 static int process_suffix (void);
172 static int check_byte_reg (void);
173 static int check_long_reg (void);
174 static int check_qword_reg (void);
175 static int check_word_reg (void);
176 static int finalize_imm (void);
177 static int process_operands (void);
178 static const seg_entry *build_modrm_byte (void);
179 static void output_insn (void);
180 static void output_imm (fragS *, offsetT);
181 static void output_disp (fragS *, offsetT);
182 #ifndef I386COFF
183 static void s_bss (int);
184 #endif
185 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
186 static void handle_large_common (int small ATTRIBUTE_UNUSED);
187 #endif
188
189 static const char *default_arch = DEFAULT_ARCH;
190
191 /* VEX prefix. */
192 typedef struct
193 {
194 /* VEX prefix is either 2 byte or 3 byte. */
195 unsigned char bytes[3];
196 unsigned int length;
197 /* Destination or source register specifier. */
198 const reg_entry *register_specifier;
199 } vex_prefix;
200
201 /* 'md_assemble ()' gathers together information and puts it into a
202 i386_insn. */
203
204 union i386_op
205 {
206 expressionS *disps;
207 expressionS *imms;
208 const reg_entry *regs;
209 };
210
211 enum i386_error
212 {
213 operand_size_mismatch,
214 operand_type_mismatch,
215 register_type_mismatch,
216 number_of_operands_mismatch,
217 invalid_instruction_suffix,
218 bad_imm4,
219 old_gcc_only,
220 unsupported_with_intel_mnemonic,
221 unsupported_syntax,
222 unsupported,
223 invalid_vsib_address,
224 invalid_vector_register_set,
225 unsupported_vector_index_register
226 };
227
228 struct _i386_insn
229 {
230 /* TM holds the template for the insn were currently assembling. */
231 insn_template tm;
232
233 /* SUFFIX holds the instruction size suffix for byte, word, dword
234 or qword, if given. */
235 char suffix;
236
237 /* OPERANDS gives the number of given operands. */
238 unsigned int operands;
239
240 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
241 of given register, displacement, memory operands and immediate
242 operands. */
243 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
244
245 /* TYPES [i] is the type (see above #defines) which tells us how to
246 use OP[i] for the corresponding operand. */
247 i386_operand_type types[MAX_OPERANDS];
248
249 /* Displacement expression, immediate expression, or register for each
250 operand. */
251 union i386_op op[MAX_OPERANDS];
252
253 /* Flags for operands. */
254 unsigned int flags[MAX_OPERANDS];
255 #define Operand_PCrel 1
256
257 /* Relocation type for operand */
258 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
259
260 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
261 the base index byte below. */
262 const reg_entry *base_reg;
263 const reg_entry *index_reg;
264 unsigned int log2_scale_factor;
265
266 /* SEG gives the seg_entries of this insn. They are zero unless
267 explicit segment overrides are given. */
268 const seg_entry *seg[2];
269
270 /* PREFIX holds all the given prefix opcodes (usually null).
271 PREFIXES is the number of prefix opcodes. */
272 unsigned int prefixes;
273 unsigned char prefix[MAX_PREFIXES];
274
275 /* RM and SIB are the modrm byte and the sib byte where the
276 addressing modes of this insn are encoded. */
277 modrm_byte rm;
278 rex_byte rex;
279 sib_byte sib;
280 vex_prefix vex;
281
282 /* Swap operand in encoding. */
283 unsigned int swap_operand;
284
285 /* Prefer 8bit or 32bit displacement in encoding. */
286 enum
287 {
288 disp_encoding_default = 0,
289 disp_encoding_8bit,
290 disp_encoding_32bit
291 } disp_encoding;
292
293 /* Have HLE prefix. */
294 unsigned int have_hle;
295
296 /* Error message. */
297 enum i386_error error;
298 };
299
300 typedef struct _i386_insn i386_insn;
301
302 /* List of chars besides those in app.c:symbol_chars that can start an
303 operand. Used to prevent the scrubber eating vital white-space. */
304 const char extra_symbol_chars[] = "*%-(["
305 #ifdef LEX_AT
306 "@"
307 #endif
308 #ifdef LEX_QM
309 "?"
310 #endif
311 ;
312
313 #if (defined (TE_I386AIX) \
314 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
315 && !defined (TE_GNU) \
316 && !defined (TE_LINUX) \
317 && !defined (TE_NACL) \
318 && !defined (TE_NETWARE) \
319 && !defined (TE_FreeBSD) \
320 && !defined (TE_DragonFly) \
321 && !defined (TE_NetBSD)))
322 /* This array holds the chars that always start a comment. If the
323 pre-processor is disabled, these aren't very useful. The option
324 --divide will remove '/' from this list. */
325 const char *i386_comment_chars = "#/";
326 #define SVR4_COMMENT_CHARS 1
327 #define PREFIX_SEPARATOR '\\'
328
329 #else
330 const char *i386_comment_chars = "#";
331 #define PREFIX_SEPARATOR '/'
332 #endif
333
334 /* This array holds the chars that only start a comment at the beginning of
335 a line. If the line seems to have the form '# 123 filename'
336 .line and .file directives will appear in the pre-processed output.
337 Note that input_file.c hand checks for '#' at the beginning of the
338 first line of the input file. This is because the compiler outputs
339 #NO_APP at the beginning of its output.
340 Also note that comments started like this one will always work if
341 '/' isn't otherwise defined. */
342 const char line_comment_chars[] = "#/";
343
344 const char line_separator_chars[] = ";";
345
346 /* Chars that can be used to separate mant from exp in floating point
347 nums. */
348 const char EXP_CHARS[] = "eE";
349
350 /* Chars that mean this number is a floating point constant
351 As in 0f12.456
352 or 0d1.2345e12. */
353 const char FLT_CHARS[] = "fFdDxX";
354
355 /* Tables for lexical analysis. */
356 static char mnemonic_chars[256];
357 static char register_chars[256];
358 static char operand_chars[256];
359 static char identifier_chars[256];
360 static char digit_chars[256];
361
362 /* Lexical macros. */
363 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
364 #define is_operand_char(x) (operand_chars[(unsigned char) x])
365 #define is_register_char(x) (register_chars[(unsigned char) x])
366 #define is_space_char(x) ((x) == ' ')
367 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
368 #define is_digit_char(x) (digit_chars[(unsigned char) x])
369
370 /* All non-digit non-letter characters that may occur in an operand. */
371 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
372
373 /* md_assemble() always leaves the strings it's passed unaltered. To
374 effect this we maintain a stack of saved characters that we've smashed
375 with '\0's (indicating end of strings for various sub-fields of the
376 assembler instruction). */
377 static char save_stack[32];
378 static char *save_stack_p;
379 #define END_STRING_AND_SAVE(s) \
380 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
381 #define RESTORE_END_STRING(s) \
382 do { *(s) = *--save_stack_p; } while (0)
383
384 /* The instruction we're assembling. */
385 static i386_insn i;
386
387 /* Possible templates for current insn. */
388 static const templates *current_templates;
389
390 /* Per instruction expressionS buffers: max displacements & immediates. */
391 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
392 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
393
394 /* Current operand we are working on. */
395 static int this_operand = -1;
396
397 /* We support four different modes. FLAG_CODE variable is used to distinguish
398 these. */
399
400 enum flag_code {
401 CODE_32BIT,
402 CODE_16BIT,
403 CODE_64BIT };
404
405 static enum flag_code flag_code;
406 static unsigned int object_64bit;
407 static unsigned int disallow_64bit_reloc;
408 static int use_rela_relocations = 0;
409
410 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
411 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
412 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
413
414 /* The ELF ABI to use. */
415 enum x86_elf_abi
416 {
417 I386_ABI,
418 X86_64_ABI,
419 X86_64_X32_ABI
420 };
421
422 static enum x86_elf_abi x86_elf_abi = I386_ABI;
423 #endif
424
425 /* The names used to print error messages. */
426 static const char *flag_code_names[] =
427 {
428 "32",
429 "16",
430 "64"
431 };
432
433 /* 1 for intel syntax,
434 0 if att syntax. */
435 static int intel_syntax = 0;
436
437 /* 1 for intel mnemonic,
438 0 if att mnemonic. */
439 static int intel_mnemonic = !SYSV386_COMPAT;
440
441 /* 1 if support old (<= 2.8.1) versions of gcc. */
442 static int old_gcc = OLDGCC_COMPAT;
443
444 /* 1 if pseudo registers are permitted. */
445 static int allow_pseudo_reg = 0;
446
447 /* 1 if register prefix % not required. */
448 static int allow_naked_reg = 0;
449
450 /* 1 if pseudo index register, eiz/riz, is allowed . */
451 static int allow_index_reg = 0;
452
453 static enum check_kind
454 {
455 check_none = 0,
456 check_warning,
457 check_error
458 }
459 sse_check, operand_check = check_warning;
460
461 /* Register prefix used for error message. */
462 static const char *register_prefix = "%";
463
464 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
465 leave, push, and pop instructions so that gcc has the same stack
466 frame as in 32 bit mode. */
467 static char stackop_size = '\0';
468
469 /* Non-zero to optimize code alignment. */
470 int optimize_align_code = 1;
471
472 /* Non-zero to quieten some warnings. */
473 static int quiet_warnings = 0;
474
475 /* CPU name. */
476 static const char *cpu_arch_name = NULL;
477 static char *cpu_sub_arch_name = NULL;
478
479 /* CPU feature flags. */
480 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
481
482 /* If we have selected a cpu we are generating instructions for. */
483 static int cpu_arch_tune_set = 0;
484
485 /* Cpu we are generating instructions for. */
486 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
487
488 /* CPU feature flags of cpu we are generating instructions for. */
489 static i386_cpu_flags cpu_arch_tune_flags;
490
491 /* CPU instruction set architecture used. */
492 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
493
494 /* CPU feature flags of instruction set architecture used. */
495 i386_cpu_flags cpu_arch_isa_flags;
496
497 /* If set, conditional jumps are not automatically promoted to handle
498 larger than a byte offset. */
499 static unsigned int no_cond_jump_promotion = 0;
500
501 /* Encode SSE instructions with VEX prefix. */
502 static unsigned int sse2avx;
503
504 /* Encode scalar AVX instructions with specific vector length. */
505 static enum
506 {
507 vex128 = 0,
508 vex256
509 } avxscalar;
510
511 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
512 static symbolS *GOT_symbol;
513
514 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
515 unsigned int x86_dwarf2_return_column;
516
517 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
518 int x86_cie_data_alignment;
519
520 /* Interface to relax_segment.
521 There are 3 major relax states for 386 jump insns because the
522 different types of jumps add different sizes to frags when we're
523 figuring out what sort of jump to choose to reach a given label. */
524
525 /* Types. */
526 #define UNCOND_JUMP 0
527 #define COND_JUMP 1
528 #define COND_JUMP86 2
529
530 /* Sizes. */
531 #define CODE16 1
532 #define SMALL 0
533 #define SMALL16 (SMALL | CODE16)
534 #define BIG 2
535 #define BIG16 (BIG | CODE16)
536
537 #ifndef INLINE
538 #ifdef __GNUC__
539 #define INLINE __inline__
540 #else
541 #define INLINE
542 #endif
543 #endif
544
545 #define ENCODE_RELAX_STATE(type, size) \
546 ((relax_substateT) (((type) << 2) | (size)))
547 #define TYPE_FROM_RELAX_STATE(s) \
548 ((s) >> 2)
549 #define DISP_SIZE_FROM_RELAX_STATE(s) \
550 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
551
552 /* This table is used by relax_frag to promote short jumps to long
553 ones where necessary. SMALL (short) jumps may be promoted to BIG
554 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
555 don't allow a short jump in a 32 bit code segment to be promoted to
556 a 16 bit offset jump because it's slower (requires data size
557 prefix), and doesn't work, unless the destination is in the bottom
558 64k of the code segment (The top 16 bits of eip are zeroed). */
559
560 const relax_typeS md_relax_table[] =
561 {
562 /* The fields are:
563 1) most positive reach of this state,
564 2) most negative reach of this state,
565 3) how many bytes this mode will have in the variable part of the frag
566 4) which index into the table to try if we can't fit into this one. */
567
568 /* UNCOND_JUMP states. */
569 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
570 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
571 /* dword jmp adds 4 bytes to frag:
572 0 extra opcode bytes, 4 displacement bytes. */
573 {0, 0, 4, 0},
574 /* word jmp adds 2 byte2 to frag:
575 0 extra opcode bytes, 2 displacement bytes. */
576 {0, 0, 2, 0},
577
578 /* COND_JUMP states. */
579 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
580 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
581 /* dword conditionals adds 5 bytes to frag:
582 1 extra opcode byte, 4 displacement bytes. */
583 {0, 0, 5, 0},
584 /* word conditionals add 3 bytes to frag:
585 1 extra opcode byte, 2 displacement bytes. */
586 {0, 0, 3, 0},
587
588 /* COND_JUMP86 states. */
589 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
590 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
591 /* dword conditionals adds 5 bytes to frag:
592 1 extra opcode byte, 4 displacement bytes. */
593 {0, 0, 5, 0},
594 /* word conditionals add 4 bytes to frag:
595 1 displacement byte and a 3 byte long branch insn. */
596 {0, 0, 4, 0}
597 };
598
599 static const arch_entry cpu_arch[] =
600 {
601 /* Do not replace the first two entries - i386_target_format()
602 relies on them being there in this order. */
603 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
604 CPU_GENERIC32_FLAGS, 0, 0 },
605 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
606 CPU_GENERIC64_FLAGS, 0, 0 },
607 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
608 CPU_NONE_FLAGS, 0, 0 },
609 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
610 CPU_I186_FLAGS, 0, 0 },
611 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
612 CPU_I286_FLAGS, 0, 0 },
613 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
614 CPU_I386_FLAGS, 0, 0 },
615 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
616 CPU_I486_FLAGS, 0, 0 },
617 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
618 CPU_I586_FLAGS, 0, 0 },
619 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
620 CPU_I686_FLAGS, 0, 0 },
621 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
622 CPU_I586_FLAGS, 0, 0 },
623 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
624 CPU_PENTIUMPRO_FLAGS, 0, 0 },
625 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
626 CPU_P2_FLAGS, 0, 0 },
627 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
628 CPU_P3_FLAGS, 0, 0 },
629 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
630 CPU_P4_FLAGS, 0, 0 },
631 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
632 CPU_CORE_FLAGS, 0, 0 },
633 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
634 CPU_NOCONA_FLAGS, 0, 0 },
635 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
636 CPU_CORE_FLAGS, 1, 0 },
637 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
638 CPU_CORE_FLAGS, 0, 0 },
639 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
640 CPU_CORE2_FLAGS, 1, 0 },
641 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
642 CPU_CORE2_FLAGS, 0, 0 },
643 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
644 CPU_COREI7_FLAGS, 0, 0 },
645 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
646 CPU_L1OM_FLAGS, 0, 0 },
647 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM,
648 CPU_K1OM_FLAGS, 0, 0 },
649 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
650 CPU_K6_FLAGS, 0, 0 },
651 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
652 CPU_K6_2_FLAGS, 0, 0 },
653 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
654 CPU_ATHLON_FLAGS, 0, 0 },
655 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
656 CPU_K8_FLAGS, 1, 0 },
657 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
658 CPU_K8_FLAGS, 0, 0 },
659 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
660 CPU_K8_FLAGS, 0, 0 },
661 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
662 CPU_AMDFAM10_FLAGS, 0, 0 },
663 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD,
664 CPU_BDVER1_FLAGS, 0, 0 },
665 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD,
666 CPU_BDVER2_FLAGS, 0, 0 },
667 { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT,
668 CPU_BTVER1_FLAGS, 0, 0 },
669 { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT,
670 CPU_BTVER2_FLAGS, 0, 0 },
671 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
672 CPU_8087_FLAGS, 0, 0 },
673 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
674 CPU_287_FLAGS, 0, 0 },
675 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
676 CPU_387_FLAGS, 0, 0 },
677 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN,
678 CPU_ANY87_FLAGS, 0, 1 },
679 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
680 CPU_MMX_FLAGS, 0, 0 },
681 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN,
682 CPU_3DNOWA_FLAGS, 0, 1 },
683 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
684 CPU_SSE_FLAGS, 0, 0 },
685 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
686 CPU_SSE2_FLAGS, 0, 0 },
687 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
688 CPU_SSE3_FLAGS, 0, 0 },
689 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
690 CPU_SSSE3_FLAGS, 0, 0 },
691 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
692 CPU_SSE4_1_FLAGS, 0, 0 },
693 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
694 CPU_SSE4_2_FLAGS, 0, 0 },
695 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
696 CPU_SSE4_2_FLAGS, 0, 0 },
697 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN,
698 CPU_ANY_SSE_FLAGS, 0, 1 },
699 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
700 CPU_AVX_FLAGS, 0, 0 },
701 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN,
702 CPU_AVX2_FLAGS, 0, 0 },
703 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
704 CPU_ANY_AVX_FLAGS, 0, 1 },
705 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
706 CPU_VMX_FLAGS, 0, 0 },
707 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN,
708 CPU_VMFUNC_FLAGS, 0, 0 },
709 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
710 CPU_SMX_FLAGS, 0, 0 },
711 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
712 CPU_XSAVE_FLAGS, 0, 0 },
713 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
714 CPU_XSAVEOPT_FLAGS, 0, 0 },
715 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
716 CPU_AES_FLAGS, 0, 0 },
717 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
718 CPU_PCLMUL_FLAGS, 0, 0 },
719 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
720 CPU_PCLMUL_FLAGS, 1, 0 },
721 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
722 CPU_FSGSBASE_FLAGS, 0, 0 },
723 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
724 CPU_RDRND_FLAGS, 0, 0 },
725 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
726 CPU_F16C_FLAGS, 0, 0 },
727 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN,
728 CPU_BMI2_FLAGS, 0, 0 },
729 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
730 CPU_FMA_FLAGS, 0, 0 },
731 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
732 CPU_FMA4_FLAGS, 0, 0 },
733 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
734 CPU_XOP_FLAGS, 0, 0 },
735 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
736 CPU_LWP_FLAGS, 0, 0 },
737 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
738 CPU_MOVBE_FLAGS, 0, 0 },
739 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
740 CPU_EPT_FLAGS, 0, 0 },
741 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN,
742 CPU_LZCNT_FLAGS, 0, 0 },
743 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN,
744 CPU_HLE_FLAGS, 0, 0 },
745 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN,
746 CPU_RTM_FLAGS, 0, 0 },
747 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN,
748 CPU_INVPCID_FLAGS, 0, 0 },
749 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
750 CPU_CLFLUSH_FLAGS, 0, 0 },
751 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
752 CPU_NOP_FLAGS, 0, 0 },
753 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
754 CPU_SYSCALL_FLAGS, 0, 0 },
755 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
756 CPU_RDTSCP_FLAGS, 0, 0 },
757 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
758 CPU_3DNOW_FLAGS, 0, 0 },
759 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
760 CPU_3DNOWA_FLAGS, 0, 0 },
761 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
762 CPU_PADLOCK_FLAGS, 0, 0 },
763 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
764 CPU_SVME_FLAGS, 1, 0 },
765 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
766 CPU_SVME_FLAGS, 0, 0 },
767 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
768 CPU_SSE4A_FLAGS, 0, 0 },
769 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
770 CPU_ABM_FLAGS, 0, 0 },
771 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
772 CPU_BMI_FLAGS, 0, 0 },
773 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
774 CPU_TBM_FLAGS, 0, 0 },
775 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN,
776 CPU_ADX_FLAGS, 0, 0 },
777 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN,
778 CPU_RDSEED_FLAGS, 0, 0 },
779 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN,
780 CPU_PRFCHW_FLAGS, 0, 0 },
781 };
782
783 #ifdef I386COFF
784 /* Like s_lcomm_internal in gas/read.c but the alignment string
785 is allowed to be optional. */
786
787 static symbolS *
788 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
789 {
790 addressT align = 0;
791
792 SKIP_WHITESPACE ();
793
794 if (needs_align
795 && *input_line_pointer == ',')
796 {
797 align = parse_align (needs_align - 1);
798
799 if (align == (addressT) -1)
800 return NULL;
801 }
802 else
803 {
804 if (size >= 8)
805 align = 3;
806 else if (size >= 4)
807 align = 2;
808 else if (size >= 2)
809 align = 1;
810 else
811 align = 0;
812 }
813
814 bss_alloc (symbolP, size, align);
815 return symbolP;
816 }
817
818 static void
819 pe_lcomm (int needs_align)
820 {
821 s_comm_internal (needs_align * 2, pe_lcomm_internal);
822 }
823 #endif
824
825 const pseudo_typeS md_pseudo_table[] =
826 {
827 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
828 {"align", s_align_bytes, 0},
829 #else
830 {"align", s_align_ptwo, 0},
831 #endif
832 {"arch", set_cpu_arch, 0},
833 #ifndef I386COFF
834 {"bss", s_bss, 0},
835 #else
836 {"lcomm", pe_lcomm, 1},
837 #endif
838 {"ffloat", float_cons, 'f'},
839 {"dfloat", float_cons, 'd'},
840 {"tfloat", float_cons, 'x'},
841 {"value", cons, 2},
842 {"slong", signed_cons, 4},
843 {"noopt", s_ignore, 0},
844 {"optim", s_ignore, 0},
845 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
846 {"code16", set_code_flag, CODE_16BIT},
847 {"code32", set_code_flag, CODE_32BIT},
848 {"code64", set_code_flag, CODE_64BIT},
849 {"intel_syntax", set_intel_syntax, 1},
850 {"att_syntax", set_intel_syntax, 0},
851 {"intel_mnemonic", set_intel_mnemonic, 1},
852 {"att_mnemonic", set_intel_mnemonic, 0},
853 {"allow_index_reg", set_allow_index_reg, 1},
854 {"disallow_index_reg", set_allow_index_reg, 0},
855 {"sse_check", set_check, 0},
856 {"operand_check", set_check, 1},
857 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
858 {"largecomm", handle_large_common, 0},
859 #else
860 {"file", (void (*) (int)) dwarf2_directive_file, 0},
861 {"loc", dwarf2_directive_loc, 0},
862 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
863 #endif
864 #ifdef TE_PE
865 {"secrel32", pe_directive_secrel, 0},
866 #endif
867 {0, 0, 0}
868 };
869
870 /* For interface with expression (). */
871 extern char *input_line_pointer;
872
873 /* Hash table for instruction mnemonic lookup. */
874 static struct hash_control *op_hash;
875
876 /* Hash table for register lookup. */
877 static struct hash_control *reg_hash;
878 \f
879 void
880 i386_align_code (fragS *fragP, int count)
881 {
882 /* Various efficient no-op patterns for aligning code labels.
883 Note: Don't try to assemble the instructions in the comments.
884 0L and 0w are not legal. */
885 static const char f32_1[] =
886 {0x90}; /* nop */
887 static const char f32_2[] =
888 {0x66,0x90}; /* xchg %ax,%ax */
889 static const char f32_3[] =
890 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
891 static const char f32_4[] =
892 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
893 static const char f32_5[] =
894 {0x90, /* nop */
895 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
896 static const char f32_6[] =
897 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
898 static const char f32_7[] =
899 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
900 static const char f32_8[] =
901 {0x90, /* nop */
902 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
903 static const char f32_9[] =
904 {0x89,0xf6, /* movl %esi,%esi */
905 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
906 static const char f32_10[] =
907 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
908 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
909 static const char f32_11[] =
910 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
911 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
912 static const char f32_12[] =
913 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
914 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
915 static const char f32_13[] =
916 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
917 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
918 static const char f32_14[] =
919 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
920 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
921 static const char f16_3[] =
922 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
923 static const char f16_4[] =
924 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
925 static const char f16_5[] =
926 {0x90, /* nop */
927 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
928 static const char f16_6[] =
929 {0x89,0xf6, /* mov %si,%si */
930 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
931 static const char f16_7[] =
932 {0x8d,0x74,0x00, /* lea 0(%si),%si */
933 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
934 static const char f16_8[] =
935 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
936 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
937 static const char jump_31[] =
938 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
939 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
940 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
941 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
942 static const char *const f32_patt[] = {
943 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
944 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
945 };
946 static const char *const f16_patt[] = {
947 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
948 };
949 /* nopl (%[re]ax) */
950 static const char alt_3[] =
951 {0x0f,0x1f,0x00};
952 /* nopl 0(%[re]ax) */
953 static const char alt_4[] =
954 {0x0f,0x1f,0x40,0x00};
955 /* nopl 0(%[re]ax,%[re]ax,1) */
956 static const char alt_5[] =
957 {0x0f,0x1f,0x44,0x00,0x00};
958 /* nopw 0(%[re]ax,%[re]ax,1) */
959 static const char alt_6[] =
960 {0x66,0x0f,0x1f,0x44,0x00,0x00};
961 /* nopl 0L(%[re]ax) */
962 static const char alt_7[] =
963 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
964 /* nopl 0L(%[re]ax,%[re]ax,1) */
965 static const char alt_8[] =
966 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
967 /* nopw 0L(%[re]ax,%[re]ax,1) */
968 static const char alt_9[] =
969 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
970 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
971 static const char alt_10[] =
972 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
973 /* data16
974 nopw %cs:0L(%[re]ax,%[re]ax,1) */
975 static const char alt_long_11[] =
976 {0x66,
977 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
978 /* data16
979 data16
980 nopw %cs:0L(%[re]ax,%[re]ax,1) */
981 static const char alt_long_12[] =
982 {0x66,
983 0x66,
984 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
985 /* data16
986 data16
987 data16
988 nopw %cs:0L(%[re]ax,%[re]ax,1) */
989 static const char alt_long_13[] =
990 {0x66,
991 0x66,
992 0x66,
993 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
994 /* data16
995 data16
996 data16
997 data16
998 nopw %cs:0L(%[re]ax,%[re]ax,1) */
999 static const char alt_long_14[] =
1000 {0x66,
1001 0x66,
1002 0x66,
1003 0x66,
1004 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1005 /* data16
1006 data16
1007 data16
1008 data16
1009 data16
1010 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1011 static const char alt_long_15[] =
1012 {0x66,
1013 0x66,
1014 0x66,
1015 0x66,
1016 0x66,
1017 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1018 /* nopl 0(%[re]ax,%[re]ax,1)
1019 nopw 0(%[re]ax,%[re]ax,1) */
1020 static const char alt_short_11[] =
1021 {0x0f,0x1f,0x44,0x00,0x00,
1022 0x66,0x0f,0x1f,0x44,0x00,0x00};
1023 /* nopw 0(%[re]ax,%[re]ax,1)
1024 nopw 0(%[re]ax,%[re]ax,1) */
1025 static const char alt_short_12[] =
1026 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1027 0x66,0x0f,0x1f,0x44,0x00,0x00};
1028 /* nopw 0(%[re]ax,%[re]ax,1)
1029 nopl 0L(%[re]ax) */
1030 static const char alt_short_13[] =
1031 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1032 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1033 /* nopl 0L(%[re]ax)
1034 nopl 0L(%[re]ax) */
1035 static const char alt_short_14[] =
1036 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1037 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1038 /* nopl 0L(%[re]ax)
1039 nopl 0L(%[re]ax,%[re]ax,1) */
1040 static const char alt_short_15[] =
1041 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1042 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1043 static const char *const alt_short_patt[] = {
1044 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1045 alt_9, alt_10, alt_short_11, alt_short_12, alt_short_13,
1046 alt_short_14, alt_short_15
1047 };
1048 static const char *const alt_long_patt[] = {
1049 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1050 alt_9, alt_10, alt_long_11, alt_long_12, alt_long_13,
1051 alt_long_14, alt_long_15
1052 };
1053
1054 /* Only align for at least a positive non-zero boundary. */
1055 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
1056 return;
1057
1058 /* We need to decide which NOP sequence to use for 32bit and
1059 64bit. When -mtune= is used:
1060
1061 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1062 PROCESSOR_GENERIC32, f32_patt will be used.
1063 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA,
1064 PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
1065 PROCESSOR_GENERIC64, alt_long_patt will be used.
1066 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
1067 PROCESSOR_AMDFAM10, PROCESSOR_BD and PROCESSOR_BT, alt_short_patt
1068 will be used.
1069
1070 When -mtune= isn't used, alt_long_patt will be used if
1071 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1072 be used.
1073
1074 When -march= or .arch is used, we can't use anything beyond
1075 cpu_arch_isa_flags. */
1076
1077 if (flag_code == CODE_16BIT)
1078 {
1079 if (count > 8)
1080 {
1081 memcpy (fragP->fr_literal + fragP->fr_fix,
1082 jump_31, count);
1083 /* Adjust jump offset. */
1084 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1085 }
1086 else
1087 memcpy (fragP->fr_literal + fragP->fr_fix,
1088 f16_patt[count - 1], count);
1089 }
1090 else
1091 {
1092 const char *const *patt = NULL;
1093
1094 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1095 {
1096 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1097 switch (cpu_arch_tune)
1098 {
1099 case PROCESSOR_UNKNOWN:
1100 /* We use cpu_arch_isa_flags to check if we SHOULD
1101 optimize with nops. */
1102 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1103 patt = alt_long_patt;
1104 else
1105 patt = f32_patt;
1106 break;
1107 case PROCESSOR_PENTIUM4:
1108 case PROCESSOR_NOCONA:
1109 case PROCESSOR_CORE:
1110 case PROCESSOR_CORE2:
1111 case PROCESSOR_COREI7:
1112 case PROCESSOR_L1OM:
1113 case PROCESSOR_K1OM:
1114 case PROCESSOR_GENERIC64:
1115 patt = alt_long_patt;
1116 break;
1117 case PROCESSOR_K6:
1118 case PROCESSOR_ATHLON:
1119 case PROCESSOR_K8:
1120 case PROCESSOR_AMDFAM10:
1121 case PROCESSOR_BD:
1122 case PROCESSOR_BT:
1123 patt = alt_short_patt;
1124 break;
1125 case PROCESSOR_I386:
1126 case PROCESSOR_I486:
1127 case PROCESSOR_PENTIUM:
1128 case PROCESSOR_PENTIUMPRO:
1129 case PROCESSOR_GENERIC32:
1130 patt = f32_patt;
1131 break;
1132 }
1133 }
1134 else
1135 {
1136 switch (fragP->tc_frag_data.tune)
1137 {
1138 case PROCESSOR_UNKNOWN:
1139 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1140 PROCESSOR_UNKNOWN. */
1141 abort ();
1142 break;
1143
1144 case PROCESSOR_I386:
1145 case PROCESSOR_I486:
1146 case PROCESSOR_PENTIUM:
1147 case PROCESSOR_K6:
1148 case PROCESSOR_ATHLON:
1149 case PROCESSOR_K8:
1150 case PROCESSOR_AMDFAM10:
1151 case PROCESSOR_BD:
1152 case PROCESSOR_BT:
1153 case PROCESSOR_GENERIC32:
1154 /* We use cpu_arch_isa_flags to check if we CAN optimize
1155 with nops. */
1156 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1157 patt = alt_short_patt;
1158 else
1159 patt = f32_patt;
1160 break;
1161 case PROCESSOR_PENTIUMPRO:
1162 case PROCESSOR_PENTIUM4:
1163 case PROCESSOR_NOCONA:
1164 case PROCESSOR_CORE:
1165 case PROCESSOR_CORE2:
1166 case PROCESSOR_COREI7:
1167 case PROCESSOR_L1OM:
1168 case PROCESSOR_K1OM:
1169 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1170 patt = alt_long_patt;
1171 else
1172 patt = f32_patt;
1173 break;
1174 case PROCESSOR_GENERIC64:
1175 patt = alt_long_patt;
1176 break;
1177 }
1178 }
1179
1180 if (patt == f32_patt)
1181 {
1182 /* If the padding is less than 15 bytes, we use the normal
1183 ones. Otherwise, we use a jump instruction and adjust
1184 its offset. */
1185 int limit;
1186
1187 /* For 64bit, the limit is 3 bytes. */
1188 if (flag_code == CODE_64BIT
1189 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1190 limit = 3;
1191 else
1192 limit = 15;
1193 if (count < limit)
1194 memcpy (fragP->fr_literal + fragP->fr_fix,
1195 patt[count - 1], count);
1196 else
1197 {
1198 memcpy (fragP->fr_literal + fragP->fr_fix,
1199 jump_31, count);
1200 /* Adjust jump offset. */
1201 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1202 }
1203 }
1204 else
1205 {
1206 /* Maximum length of an instruction is 15 byte. If the
1207 padding is greater than 15 bytes and we don't use jump,
1208 we have to break it into smaller pieces. */
1209 int padding = count;
1210 while (padding > 15)
1211 {
1212 padding -= 15;
1213 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1214 patt [14], 15);
1215 }
1216
1217 if (padding)
1218 memcpy (fragP->fr_literal + fragP->fr_fix,
1219 patt [padding - 1], padding);
1220 }
1221 }
1222 fragP->fr_var = count;
1223 }
1224
1225 static INLINE int
1226 operand_type_all_zero (const union i386_operand_type *x)
1227 {
1228 switch (ARRAY_SIZE(x->array))
1229 {
1230 case 3:
1231 if (x->array[2])
1232 return 0;
1233 case 2:
1234 if (x->array[1])
1235 return 0;
1236 case 1:
1237 return !x->array[0];
1238 default:
1239 abort ();
1240 }
1241 }
1242
1243 static INLINE void
1244 operand_type_set (union i386_operand_type *x, unsigned int v)
1245 {
1246 switch (ARRAY_SIZE(x->array))
1247 {
1248 case 3:
1249 x->array[2] = v;
1250 case 2:
1251 x->array[1] = v;
1252 case 1:
1253 x->array[0] = v;
1254 break;
1255 default:
1256 abort ();
1257 }
1258 }
1259
1260 static INLINE int
1261 operand_type_equal (const union i386_operand_type *x,
1262 const union i386_operand_type *y)
1263 {
1264 switch (ARRAY_SIZE(x->array))
1265 {
1266 case 3:
1267 if (x->array[2] != y->array[2])
1268 return 0;
1269 case 2:
1270 if (x->array[1] != y->array[1])
1271 return 0;
1272 case 1:
1273 return x->array[0] == y->array[0];
1274 break;
1275 default:
1276 abort ();
1277 }
1278 }
1279
1280 static INLINE int
1281 cpu_flags_all_zero (const union i386_cpu_flags *x)
1282 {
1283 switch (ARRAY_SIZE(x->array))
1284 {
1285 case 3:
1286 if (x->array[2])
1287 return 0;
1288 case 2:
1289 if (x->array[1])
1290 return 0;
1291 case 1:
1292 return !x->array[0];
1293 default:
1294 abort ();
1295 }
1296 }
1297
1298 static INLINE void
1299 cpu_flags_set (union i386_cpu_flags *x, unsigned int v)
1300 {
1301 switch (ARRAY_SIZE(x->array))
1302 {
1303 case 3:
1304 x->array[2] = v;
1305 case 2:
1306 x->array[1] = v;
1307 case 1:
1308 x->array[0] = v;
1309 break;
1310 default:
1311 abort ();
1312 }
1313 }
1314
1315 static INLINE int
1316 cpu_flags_equal (const union i386_cpu_flags *x,
1317 const union i386_cpu_flags *y)
1318 {
1319 switch (ARRAY_SIZE(x->array))
1320 {
1321 case 3:
1322 if (x->array[2] != y->array[2])
1323 return 0;
1324 case 2:
1325 if (x->array[1] != y->array[1])
1326 return 0;
1327 case 1:
1328 return x->array[0] == y->array[0];
1329 break;
1330 default:
1331 abort ();
1332 }
1333 }
1334
1335 static INLINE int
1336 cpu_flags_check_cpu64 (i386_cpu_flags f)
1337 {
1338 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1339 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1340 }
1341
1342 static INLINE i386_cpu_flags
1343 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1344 {
1345 switch (ARRAY_SIZE (x.array))
1346 {
1347 case 3:
1348 x.array [2] &= y.array [2];
1349 case 2:
1350 x.array [1] &= y.array [1];
1351 case 1:
1352 x.array [0] &= y.array [0];
1353 break;
1354 default:
1355 abort ();
1356 }
1357 return x;
1358 }
1359
1360 static INLINE i386_cpu_flags
1361 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1362 {
1363 switch (ARRAY_SIZE (x.array))
1364 {
1365 case 3:
1366 x.array [2] |= y.array [2];
1367 case 2:
1368 x.array [1] |= y.array [1];
1369 case 1:
1370 x.array [0] |= y.array [0];
1371 break;
1372 default:
1373 abort ();
1374 }
1375 return x;
1376 }
1377
1378 static INLINE i386_cpu_flags
1379 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1380 {
1381 switch (ARRAY_SIZE (x.array))
1382 {
1383 case 3:
1384 x.array [2] &= ~y.array [2];
1385 case 2:
1386 x.array [1] &= ~y.array [1];
1387 case 1:
1388 x.array [0] &= ~y.array [0];
1389 break;
1390 default:
1391 abort ();
1392 }
1393 return x;
1394 }
1395
1396 #define CPU_FLAGS_ARCH_MATCH 0x1
1397 #define CPU_FLAGS_64BIT_MATCH 0x2
1398 #define CPU_FLAGS_AES_MATCH 0x4
1399 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1400 #define CPU_FLAGS_AVX_MATCH 0x10
1401
1402 #define CPU_FLAGS_32BIT_MATCH \
1403 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1404 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1405 #define CPU_FLAGS_PERFECT_MATCH \
1406 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1407
1408 /* Return CPU flags match bits. */
1409
1410 static int
1411 cpu_flags_match (const insn_template *t)
1412 {
1413 i386_cpu_flags x = t->cpu_flags;
1414 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1415
1416 x.bitfield.cpu64 = 0;
1417 x.bitfield.cpuno64 = 0;
1418
1419 if (cpu_flags_all_zero (&x))
1420 {
1421 /* This instruction is available on all archs. */
1422 match |= CPU_FLAGS_32BIT_MATCH;
1423 }
1424 else
1425 {
1426 /* This instruction is available only on some archs. */
1427 i386_cpu_flags cpu = cpu_arch_flags;
1428
1429 cpu.bitfield.cpu64 = 0;
1430 cpu.bitfield.cpuno64 = 0;
1431 cpu = cpu_flags_and (x, cpu);
1432 if (!cpu_flags_all_zero (&cpu))
1433 {
1434 if (x.bitfield.cpuavx)
1435 {
1436 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1437 if (cpu.bitfield.cpuavx)
1438 {
1439 /* Check SSE2AVX. */
1440 if (!t->opcode_modifier.sse2avx|| sse2avx)
1441 {
1442 match |= (CPU_FLAGS_ARCH_MATCH
1443 | CPU_FLAGS_AVX_MATCH);
1444 /* Check AES. */
1445 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1446 match |= CPU_FLAGS_AES_MATCH;
1447 /* Check PCLMUL. */
1448 if (!x.bitfield.cpupclmul
1449 || cpu.bitfield.cpupclmul)
1450 match |= CPU_FLAGS_PCLMUL_MATCH;
1451 }
1452 }
1453 else
1454 match |= CPU_FLAGS_ARCH_MATCH;
1455 }
1456 else
1457 match |= CPU_FLAGS_32BIT_MATCH;
1458 }
1459 }
1460 return match;
1461 }
1462
1463 static INLINE i386_operand_type
1464 operand_type_and (i386_operand_type x, i386_operand_type y)
1465 {
1466 switch (ARRAY_SIZE (x.array))
1467 {
1468 case 3:
1469 x.array [2] &= y.array [2];
1470 case 2:
1471 x.array [1] &= y.array [1];
1472 case 1:
1473 x.array [0] &= y.array [0];
1474 break;
1475 default:
1476 abort ();
1477 }
1478 return x;
1479 }
1480
1481 static INLINE i386_operand_type
1482 operand_type_or (i386_operand_type x, i386_operand_type y)
1483 {
1484 switch (ARRAY_SIZE (x.array))
1485 {
1486 case 3:
1487 x.array [2] |= y.array [2];
1488 case 2:
1489 x.array [1] |= y.array [1];
1490 case 1:
1491 x.array [0] |= y.array [0];
1492 break;
1493 default:
1494 abort ();
1495 }
1496 return x;
1497 }
1498
1499 static INLINE i386_operand_type
1500 operand_type_xor (i386_operand_type x, i386_operand_type y)
1501 {
1502 switch (ARRAY_SIZE (x.array))
1503 {
1504 case 3:
1505 x.array [2] ^= y.array [2];
1506 case 2:
1507 x.array [1] ^= y.array [1];
1508 case 1:
1509 x.array [0] ^= y.array [0];
1510 break;
1511 default:
1512 abort ();
1513 }
1514 return x;
1515 }
1516
1517 static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1518 static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1519 static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1520 static const i386_operand_type inoutportreg
1521 = OPERAND_TYPE_INOUTPORTREG;
1522 static const i386_operand_type reg16_inoutportreg
1523 = OPERAND_TYPE_REG16_INOUTPORTREG;
1524 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1525 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1526 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1527 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1528 static const i386_operand_type anydisp
1529 = OPERAND_TYPE_ANYDISP;
1530 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1531 static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1532 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1533 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1534 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1535 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1536 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1537 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1538 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1539 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1540 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1541 static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
1542
1543 enum operand_type
1544 {
1545 reg,
1546 imm,
1547 disp,
1548 anymem
1549 };
1550
1551 static INLINE int
1552 operand_type_check (i386_operand_type t, enum operand_type c)
1553 {
1554 switch (c)
1555 {
1556 case reg:
1557 return (t.bitfield.reg8
1558 || t.bitfield.reg16
1559 || t.bitfield.reg32
1560 || t.bitfield.reg64);
1561
1562 case imm:
1563 return (t.bitfield.imm8
1564 || t.bitfield.imm8s
1565 || t.bitfield.imm16
1566 || t.bitfield.imm32
1567 || t.bitfield.imm32s
1568 || t.bitfield.imm64);
1569
1570 case disp:
1571 return (t.bitfield.disp8
1572 || t.bitfield.disp16
1573 || t.bitfield.disp32
1574 || t.bitfield.disp32s
1575 || t.bitfield.disp64);
1576
1577 case anymem:
1578 return (t.bitfield.disp8
1579 || t.bitfield.disp16
1580 || t.bitfield.disp32
1581 || t.bitfield.disp32s
1582 || t.bitfield.disp64
1583 || t.bitfield.baseindex);
1584
1585 default:
1586 abort ();
1587 }
1588
1589 return 0;
1590 }
1591
1592 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1593 operand J for instruction template T. */
1594
1595 static INLINE int
1596 match_reg_size (const insn_template *t, unsigned int j)
1597 {
1598 return !((i.types[j].bitfield.byte
1599 && !t->operand_types[j].bitfield.byte)
1600 || (i.types[j].bitfield.word
1601 && !t->operand_types[j].bitfield.word)
1602 || (i.types[j].bitfield.dword
1603 && !t->operand_types[j].bitfield.dword)
1604 || (i.types[j].bitfield.qword
1605 && !t->operand_types[j].bitfield.qword));
1606 }
1607
1608 /* Return 1 if there is no conflict in any size on operand J for
1609 instruction template T. */
1610
1611 static INLINE int
1612 match_mem_size (const insn_template *t, unsigned int j)
1613 {
1614 return (match_reg_size (t, j)
1615 && !((i.types[j].bitfield.unspecified
1616 && !t->operand_types[j].bitfield.unspecified)
1617 || (i.types[j].bitfield.fword
1618 && !t->operand_types[j].bitfield.fword)
1619 || (i.types[j].bitfield.tbyte
1620 && !t->operand_types[j].bitfield.tbyte)
1621 || (i.types[j].bitfield.xmmword
1622 && !t->operand_types[j].bitfield.xmmword)
1623 || (i.types[j].bitfield.ymmword
1624 && !t->operand_types[j].bitfield.ymmword)));
1625 }
1626
1627 /* Return 1 if there is no size conflict on any operands for
1628 instruction template T. */
1629
1630 static INLINE int
1631 operand_size_match (const insn_template *t)
1632 {
1633 unsigned int j;
1634 int match = 1;
1635
1636 /* Don't check jump instructions. */
1637 if (t->opcode_modifier.jump
1638 || t->opcode_modifier.jumpbyte
1639 || t->opcode_modifier.jumpdword
1640 || t->opcode_modifier.jumpintersegment)
1641 return match;
1642
1643 /* Check memory and accumulator operand size. */
1644 for (j = 0; j < i.operands; j++)
1645 {
1646 if (t->operand_types[j].bitfield.anysize)
1647 continue;
1648
1649 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1650 {
1651 match = 0;
1652 break;
1653 }
1654
1655 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1656 {
1657 match = 0;
1658 break;
1659 }
1660 }
1661
1662 if (match)
1663 return match;
1664 else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
1665 {
1666 mismatch:
1667 i.error = operand_size_mismatch;
1668 return 0;
1669 }
1670
1671 /* Check reverse. */
1672 gas_assert (i.operands == 2);
1673
1674 match = 1;
1675 for (j = 0; j < 2; j++)
1676 {
1677 if (t->operand_types[j].bitfield.acc
1678 && !match_reg_size (t, j ? 0 : 1))
1679 goto mismatch;
1680
1681 if (i.types[j].bitfield.mem
1682 && !match_mem_size (t, j ? 0 : 1))
1683 goto mismatch;
1684 }
1685
1686 return match;
1687 }
1688
1689 static INLINE int
1690 operand_type_match (i386_operand_type overlap,
1691 i386_operand_type given)
1692 {
1693 i386_operand_type temp = overlap;
1694
1695 temp.bitfield.jumpabsolute = 0;
1696 temp.bitfield.unspecified = 0;
1697 temp.bitfield.byte = 0;
1698 temp.bitfield.word = 0;
1699 temp.bitfield.dword = 0;
1700 temp.bitfield.fword = 0;
1701 temp.bitfield.qword = 0;
1702 temp.bitfield.tbyte = 0;
1703 temp.bitfield.xmmword = 0;
1704 temp.bitfield.ymmword = 0;
1705 if (operand_type_all_zero (&temp))
1706 goto mismatch;
1707
1708 if (given.bitfield.baseindex == overlap.bitfield.baseindex
1709 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
1710 return 1;
1711
1712 mismatch:
1713 i.error = operand_type_mismatch;
1714 return 0;
1715 }
1716
1717 /* If given types g0 and g1 are registers they must be of the same type
1718 unless the expected operand type register overlap is null.
1719 Note that Acc in a template matches every size of reg. */
1720
1721 static INLINE int
1722 operand_type_register_match (i386_operand_type m0,
1723 i386_operand_type g0,
1724 i386_operand_type t0,
1725 i386_operand_type m1,
1726 i386_operand_type g1,
1727 i386_operand_type t1)
1728 {
1729 if (!operand_type_check (g0, reg))
1730 return 1;
1731
1732 if (!operand_type_check (g1, reg))
1733 return 1;
1734
1735 if (g0.bitfield.reg8 == g1.bitfield.reg8
1736 && g0.bitfield.reg16 == g1.bitfield.reg16
1737 && g0.bitfield.reg32 == g1.bitfield.reg32
1738 && g0.bitfield.reg64 == g1.bitfield.reg64)
1739 return 1;
1740
1741 if (m0.bitfield.acc)
1742 {
1743 t0.bitfield.reg8 = 1;
1744 t0.bitfield.reg16 = 1;
1745 t0.bitfield.reg32 = 1;
1746 t0.bitfield.reg64 = 1;
1747 }
1748
1749 if (m1.bitfield.acc)
1750 {
1751 t1.bitfield.reg8 = 1;
1752 t1.bitfield.reg16 = 1;
1753 t1.bitfield.reg32 = 1;
1754 t1.bitfield.reg64 = 1;
1755 }
1756
1757 if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1758 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1759 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1760 && !(t0.bitfield.reg64 & t1.bitfield.reg64))
1761 return 1;
1762
1763 i.error = register_type_mismatch;
1764
1765 return 0;
1766 }
1767
1768 static INLINE unsigned int
1769 register_number (const reg_entry *r)
1770 {
1771 unsigned int nr = r->reg_num;
1772
1773 if (r->reg_flags & RegRex)
1774 nr += 8;
1775
1776 return nr;
1777 }
1778
1779 static INLINE unsigned int
1780 mode_from_disp_size (i386_operand_type t)
1781 {
1782 if (t.bitfield.disp8)
1783 return 1;
1784 else if (t.bitfield.disp16
1785 || t.bitfield.disp32
1786 || t.bitfield.disp32s)
1787 return 2;
1788 else
1789 return 0;
1790 }
1791
1792 static INLINE int
1793 fits_in_signed_byte (offsetT num)
1794 {
1795 return (num >= -128) && (num <= 127);
1796 }
1797
1798 static INLINE int
1799 fits_in_unsigned_byte (offsetT num)
1800 {
1801 return (num & 0xff) == num;
1802 }
1803
1804 static INLINE int
1805 fits_in_unsigned_word (offsetT num)
1806 {
1807 return (num & 0xffff) == num;
1808 }
1809
1810 static INLINE int
1811 fits_in_signed_word (offsetT num)
1812 {
1813 return (-32768 <= num) && (num <= 32767);
1814 }
1815
1816 static INLINE int
1817 fits_in_signed_long (offsetT num ATTRIBUTE_UNUSED)
1818 {
1819 #ifndef BFD64
1820 return 1;
1821 #else
1822 return (!(((offsetT) -1 << 31) & num)
1823 || (((offsetT) -1 << 31) & num) == ((offsetT) -1 << 31));
1824 #endif
1825 } /* fits_in_signed_long() */
1826
1827 static INLINE int
1828 fits_in_unsigned_long (offsetT num ATTRIBUTE_UNUSED)
1829 {
1830 #ifndef BFD64
1831 return 1;
1832 #else
1833 return (num & (((offsetT) 2 << 31) - 1)) == num;
1834 #endif
1835 } /* fits_in_unsigned_long() */
1836
1837 static INLINE int
1838 fits_in_imm4 (offsetT num)
1839 {
1840 return (num & 0xf) == num;
1841 }
1842
1843 static i386_operand_type
1844 smallest_imm_type (offsetT num)
1845 {
1846 i386_operand_type t;
1847
1848 operand_type_set (&t, 0);
1849 t.bitfield.imm64 = 1;
1850
1851 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
1852 {
1853 /* This code is disabled on the 486 because all the Imm1 forms
1854 in the opcode table are slower on the i486. They're the
1855 versions with the implicitly specified single-position
1856 displacement, which has another syntax if you really want to
1857 use that form. */
1858 t.bitfield.imm1 = 1;
1859 t.bitfield.imm8 = 1;
1860 t.bitfield.imm8s = 1;
1861 t.bitfield.imm16 = 1;
1862 t.bitfield.imm32 = 1;
1863 t.bitfield.imm32s = 1;
1864 }
1865 else if (fits_in_signed_byte (num))
1866 {
1867 t.bitfield.imm8 = 1;
1868 t.bitfield.imm8s = 1;
1869 t.bitfield.imm16 = 1;
1870 t.bitfield.imm32 = 1;
1871 t.bitfield.imm32s = 1;
1872 }
1873 else if (fits_in_unsigned_byte (num))
1874 {
1875 t.bitfield.imm8 = 1;
1876 t.bitfield.imm16 = 1;
1877 t.bitfield.imm32 = 1;
1878 t.bitfield.imm32s = 1;
1879 }
1880 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
1881 {
1882 t.bitfield.imm16 = 1;
1883 t.bitfield.imm32 = 1;
1884 t.bitfield.imm32s = 1;
1885 }
1886 else if (fits_in_signed_long (num))
1887 {
1888 t.bitfield.imm32 = 1;
1889 t.bitfield.imm32s = 1;
1890 }
1891 else if (fits_in_unsigned_long (num))
1892 t.bitfield.imm32 = 1;
1893
1894 return t;
1895 }
1896
1897 static offsetT
1898 offset_in_range (offsetT val, int size)
1899 {
1900 addressT mask;
1901
1902 switch (size)
1903 {
1904 case 1: mask = ((addressT) 1 << 8) - 1; break;
1905 case 2: mask = ((addressT) 1 << 16) - 1; break;
1906 case 4: mask = ((addressT) 2 << 31) - 1; break;
1907 #ifdef BFD64
1908 case 8: mask = ((addressT) 2 << 63) - 1; break;
1909 #endif
1910 default: abort ();
1911 }
1912
1913 #ifdef BFD64
1914 /* If BFD64, sign extend val for 32bit address mode. */
1915 if (flag_code != CODE_64BIT
1916 || i.prefix[ADDR_PREFIX])
1917 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
1918 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
1919 #endif
1920
1921 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
1922 {
1923 char buf1[40], buf2[40];
1924
1925 sprint_value (buf1, val);
1926 sprint_value (buf2, val & mask);
1927 as_warn (_("%s shortened to %s"), buf1, buf2);
1928 }
1929 return val & mask;
1930 }
1931
1932 enum PREFIX_GROUP
1933 {
1934 PREFIX_EXIST = 0,
1935 PREFIX_LOCK,
1936 PREFIX_REP,
1937 PREFIX_OTHER
1938 };
1939
1940 /* Returns
1941 a. PREFIX_EXIST if attempting to add a prefix where one from the
1942 same class already exists.
1943 b. PREFIX_LOCK if lock prefix is added.
1944 c. PREFIX_REP if rep/repne prefix is added.
1945 d. PREFIX_OTHER if other prefix is added.
1946 */
1947
1948 static enum PREFIX_GROUP
1949 add_prefix (unsigned int prefix)
1950 {
1951 enum PREFIX_GROUP ret = PREFIX_OTHER;
1952 unsigned int q;
1953
1954 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
1955 && flag_code == CODE_64BIT)
1956 {
1957 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
1958 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
1959 && (prefix & (REX_R | REX_X | REX_B))))
1960 ret = PREFIX_EXIST;
1961 q = REX_PREFIX;
1962 }
1963 else
1964 {
1965 switch (prefix)
1966 {
1967 default:
1968 abort ();
1969
1970 case CS_PREFIX_OPCODE:
1971 case DS_PREFIX_OPCODE:
1972 case ES_PREFIX_OPCODE:
1973 case FS_PREFIX_OPCODE:
1974 case GS_PREFIX_OPCODE:
1975 case SS_PREFIX_OPCODE:
1976 q = SEG_PREFIX;
1977 break;
1978
1979 case REPNE_PREFIX_OPCODE:
1980 case REPE_PREFIX_OPCODE:
1981 q = REP_PREFIX;
1982 ret = PREFIX_REP;
1983 break;
1984
1985 case LOCK_PREFIX_OPCODE:
1986 q = LOCK_PREFIX;
1987 ret = PREFIX_LOCK;
1988 break;
1989
1990 case FWAIT_OPCODE:
1991 q = WAIT_PREFIX;
1992 break;
1993
1994 case ADDR_PREFIX_OPCODE:
1995 q = ADDR_PREFIX;
1996 break;
1997
1998 case DATA_PREFIX_OPCODE:
1999 q = DATA_PREFIX;
2000 break;
2001 }
2002 if (i.prefix[q] != 0)
2003 ret = PREFIX_EXIST;
2004 }
2005
2006 if (ret)
2007 {
2008 if (!i.prefix[q])
2009 ++i.prefixes;
2010 i.prefix[q] |= prefix;
2011 }
2012 else
2013 as_bad (_("same type of prefix used twice"));
2014
2015 return ret;
2016 }
2017
2018 static void
2019 update_code_flag (int value, int check)
2020 {
2021 PRINTF_LIKE ((*as_error));
2022
2023 flag_code = (enum flag_code) value;
2024 if (flag_code == CODE_64BIT)
2025 {
2026 cpu_arch_flags.bitfield.cpu64 = 1;
2027 cpu_arch_flags.bitfield.cpuno64 = 0;
2028 }
2029 else
2030 {
2031 cpu_arch_flags.bitfield.cpu64 = 0;
2032 cpu_arch_flags.bitfield.cpuno64 = 1;
2033 }
2034 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
2035 {
2036 if (check)
2037 as_error = as_fatal;
2038 else
2039 as_error = as_bad;
2040 (*as_error) (_("64bit mode not supported on `%s'."),
2041 cpu_arch_name ? cpu_arch_name : default_arch);
2042 }
2043 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
2044 {
2045 if (check)
2046 as_error = as_fatal;
2047 else
2048 as_error = as_bad;
2049 (*as_error) (_("32bit mode not supported on `%s'."),
2050 cpu_arch_name ? cpu_arch_name : default_arch);
2051 }
2052 stackop_size = '\0';
2053 }
2054
2055 static void
2056 set_code_flag (int value)
2057 {
2058 update_code_flag (value, 0);
2059 }
2060
2061 static void
2062 set_16bit_gcc_code_flag (int new_code_flag)
2063 {
2064 flag_code = (enum flag_code) new_code_flag;
2065 if (flag_code != CODE_16BIT)
2066 abort ();
2067 cpu_arch_flags.bitfield.cpu64 = 0;
2068 cpu_arch_flags.bitfield.cpuno64 = 1;
2069 stackop_size = LONG_MNEM_SUFFIX;
2070 }
2071
2072 static void
2073 set_intel_syntax (int syntax_flag)
2074 {
2075 /* Find out if register prefixing is specified. */
2076 int ask_naked_reg = 0;
2077
2078 SKIP_WHITESPACE ();
2079 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2080 {
2081 char *string = input_line_pointer;
2082 int e = get_symbol_end ();
2083
2084 if (strcmp (string, "prefix") == 0)
2085 ask_naked_reg = 1;
2086 else if (strcmp (string, "noprefix") == 0)
2087 ask_naked_reg = -1;
2088 else
2089 as_bad (_("bad argument to syntax directive."));
2090 *input_line_pointer = e;
2091 }
2092 demand_empty_rest_of_line ();
2093
2094 intel_syntax = syntax_flag;
2095
2096 if (ask_naked_reg == 0)
2097 allow_naked_reg = (intel_syntax
2098 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2099 else
2100 allow_naked_reg = (ask_naked_reg < 0);
2101
2102 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2103
2104 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2105 identifier_chars['$'] = intel_syntax ? '$' : 0;
2106 register_prefix = allow_naked_reg ? "" : "%";
2107 }
2108
2109 static void
2110 set_intel_mnemonic (int mnemonic_flag)
2111 {
2112 intel_mnemonic = mnemonic_flag;
2113 }
2114
2115 static void
2116 set_allow_index_reg (int flag)
2117 {
2118 allow_index_reg = flag;
2119 }
2120
2121 static void
2122 set_check (int what)
2123 {
2124 enum check_kind *kind;
2125 const char *str;
2126
2127 if (what)
2128 {
2129 kind = &operand_check;
2130 str = "operand";
2131 }
2132 else
2133 {
2134 kind = &sse_check;
2135 str = "sse";
2136 }
2137
2138 SKIP_WHITESPACE ();
2139
2140 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2141 {
2142 char *string = input_line_pointer;
2143 int e = get_symbol_end ();
2144
2145 if (strcmp (string, "none") == 0)
2146 *kind = check_none;
2147 else if (strcmp (string, "warning") == 0)
2148 *kind = check_warning;
2149 else if (strcmp (string, "error") == 0)
2150 *kind = check_error;
2151 else
2152 as_bad (_("bad argument to %s_check directive."), str);
2153 *input_line_pointer = e;
2154 }
2155 else
2156 as_bad (_("missing argument for %s_check directive"), str);
2157
2158 demand_empty_rest_of_line ();
2159 }
2160
2161 static void
2162 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2163 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2164 {
2165 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2166 static const char *arch;
2167
2168 /* Intel LIOM is only supported on ELF. */
2169 if (!IS_ELF)
2170 return;
2171
2172 if (!arch)
2173 {
2174 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2175 use default_arch. */
2176 arch = cpu_arch_name;
2177 if (!arch)
2178 arch = default_arch;
2179 }
2180
2181 /* If we are targeting Intel L1OM, we must enable it. */
2182 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2183 || new_flag.bitfield.cpul1om)
2184 return;
2185
2186 /* If we are targeting Intel K1OM, we must enable it. */
2187 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_K1OM
2188 || new_flag.bitfield.cpuk1om)
2189 return;
2190
2191 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2192 #endif
2193 }
2194
2195 static void
2196 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2197 {
2198 SKIP_WHITESPACE ();
2199
2200 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2201 {
2202 char *string = input_line_pointer;
2203 int e = get_symbol_end ();
2204 unsigned int j;
2205 i386_cpu_flags flags;
2206
2207 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2208 {
2209 if (strcmp (string, cpu_arch[j].name) == 0)
2210 {
2211 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2212
2213 if (*string != '.')
2214 {
2215 cpu_arch_name = cpu_arch[j].name;
2216 cpu_sub_arch_name = NULL;
2217 cpu_arch_flags = cpu_arch[j].flags;
2218 if (flag_code == CODE_64BIT)
2219 {
2220 cpu_arch_flags.bitfield.cpu64 = 1;
2221 cpu_arch_flags.bitfield.cpuno64 = 0;
2222 }
2223 else
2224 {
2225 cpu_arch_flags.bitfield.cpu64 = 0;
2226 cpu_arch_flags.bitfield.cpuno64 = 1;
2227 }
2228 cpu_arch_isa = cpu_arch[j].type;
2229 cpu_arch_isa_flags = cpu_arch[j].flags;
2230 if (!cpu_arch_tune_set)
2231 {
2232 cpu_arch_tune = cpu_arch_isa;
2233 cpu_arch_tune_flags = cpu_arch_isa_flags;
2234 }
2235 break;
2236 }
2237
2238 if (!cpu_arch[j].negated)
2239 flags = cpu_flags_or (cpu_arch_flags,
2240 cpu_arch[j].flags);
2241 else
2242 flags = cpu_flags_and_not (cpu_arch_flags,
2243 cpu_arch[j].flags);
2244 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2245 {
2246 if (cpu_sub_arch_name)
2247 {
2248 char *name = cpu_sub_arch_name;
2249 cpu_sub_arch_name = concat (name,
2250 cpu_arch[j].name,
2251 (const char *) NULL);
2252 free (name);
2253 }
2254 else
2255 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2256 cpu_arch_flags = flags;
2257 cpu_arch_isa_flags = flags;
2258 }
2259 *input_line_pointer = e;
2260 demand_empty_rest_of_line ();
2261 return;
2262 }
2263 }
2264 if (j >= ARRAY_SIZE (cpu_arch))
2265 as_bad (_("no such architecture: `%s'"), string);
2266
2267 *input_line_pointer = e;
2268 }
2269 else
2270 as_bad (_("missing cpu architecture"));
2271
2272 no_cond_jump_promotion = 0;
2273 if (*input_line_pointer == ','
2274 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2275 {
2276 char *string = ++input_line_pointer;
2277 int e = get_symbol_end ();
2278
2279 if (strcmp (string, "nojumps") == 0)
2280 no_cond_jump_promotion = 1;
2281 else if (strcmp (string, "jumps") == 0)
2282 ;
2283 else
2284 as_bad (_("no such architecture modifier: `%s'"), string);
2285
2286 *input_line_pointer = e;
2287 }
2288
2289 demand_empty_rest_of_line ();
2290 }
2291
2292 enum bfd_architecture
2293 i386_arch (void)
2294 {
2295 if (cpu_arch_isa == PROCESSOR_L1OM)
2296 {
2297 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2298 || flag_code != CODE_64BIT)
2299 as_fatal (_("Intel L1OM is 64bit ELF only"));
2300 return bfd_arch_l1om;
2301 }
2302 else if (cpu_arch_isa == PROCESSOR_K1OM)
2303 {
2304 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2305 || flag_code != CODE_64BIT)
2306 as_fatal (_("Intel K1OM is 64bit ELF only"));
2307 return bfd_arch_k1om;
2308 }
2309 else
2310 return bfd_arch_i386;
2311 }
2312
2313 unsigned long
2314 i386_mach (void)
2315 {
2316 if (!strncmp (default_arch, "x86_64", 6))
2317 {
2318 if (cpu_arch_isa == PROCESSOR_L1OM)
2319 {
2320 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2321 || default_arch[6] != '\0')
2322 as_fatal (_("Intel L1OM is 64bit ELF only"));
2323 return bfd_mach_l1om;
2324 }
2325 else if (cpu_arch_isa == PROCESSOR_K1OM)
2326 {
2327 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2328 || default_arch[6] != '\0')
2329 as_fatal (_("Intel K1OM is 64bit ELF only"));
2330 return bfd_mach_k1om;
2331 }
2332 else if (default_arch[6] == '\0')
2333 return bfd_mach_x86_64;
2334 else
2335 return bfd_mach_x64_32;
2336 }
2337 else if (!strcmp (default_arch, "i386"))
2338 return bfd_mach_i386_i386;
2339 else
2340 as_fatal (_("unknown architecture"));
2341 }
2342 \f
2343 void
2344 md_begin (void)
2345 {
2346 const char *hash_err;
2347
2348 /* Initialize op_hash hash table. */
2349 op_hash = hash_new ();
2350
2351 {
2352 const insn_template *optab;
2353 templates *core_optab;
2354
2355 /* Setup for loop. */
2356 optab = i386_optab;
2357 core_optab = (templates *) xmalloc (sizeof (templates));
2358 core_optab->start = optab;
2359
2360 while (1)
2361 {
2362 ++optab;
2363 if (optab->name == NULL
2364 || strcmp (optab->name, (optab - 1)->name) != 0)
2365 {
2366 /* different name --> ship out current template list;
2367 add to hash table; & begin anew. */
2368 core_optab->end = optab;
2369 hash_err = hash_insert (op_hash,
2370 (optab - 1)->name,
2371 (void *) core_optab);
2372 if (hash_err)
2373 {
2374 as_fatal (_("internal Error: Can't hash %s: %s"),
2375 (optab - 1)->name,
2376 hash_err);
2377 }
2378 if (optab->name == NULL)
2379 break;
2380 core_optab = (templates *) xmalloc (sizeof (templates));
2381 core_optab->start = optab;
2382 }
2383 }
2384 }
2385
2386 /* Initialize reg_hash hash table. */
2387 reg_hash = hash_new ();
2388 {
2389 const reg_entry *regtab;
2390 unsigned int regtab_size = i386_regtab_size;
2391
2392 for (regtab = i386_regtab; regtab_size--; regtab++)
2393 {
2394 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2395 if (hash_err)
2396 as_fatal (_("internal Error: Can't hash %s: %s"),
2397 regtab->reg_name,
2398 hash_err);
2399 }
2400 }
2401
2402 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2403 {
2404 int c;
2405 char *p;
2406
2407 for (c = 0; c < 256; c++)
2408 {
2409 if (ISDIGIT (c))
2410 {
2411 digit_chars[c] = c;
2412 mnemonic_chars[c] = c;
2413 register_chars[c] = c;
2414 operand_chars[c] = c;
2415 }
2416 else if (ISLOWER (c))
2417 {
2418 mnemonic_chars[c] = c;
2419 register_chars[c] = c;
2420 operand_chars[c] = c;
2421 }
2422 else if (ISUPPER (c))
2423 {
2424 mnemonic_chars[c] = TOLOWER (c);
2425 register_chars[c] = mnemonic_chars[c];
2426 operand_chars[c] = c;
2427 }
2428
2429 if (ISALPHA (c) || ISDIGIT (c))
2430 identifier_chars[c] = c;
2431 else if (c >= 128)
2432 {
2433 identifier_chars[c] = c;
2434 operand_chars[c] = c;
2435 }
2436 }
2437
2438 #ifdef LEX_AT
2439 identifier_chars['@'] = '@';
2440 #endif
2441 #ifdef LEX_QM
2442 identifier_chars['?'] = '?';
2443 operand_chars['?'] = '?';
2444 #endif
2445 digit_chars['-'] = '-';
2446 mnemonic_chars['_'] = '_';
2447 mnemonic_chars['-'] = '-';
2448 mnemonic_chars['.'] = '.';
2449 identifier_chars['_'] = '_';
2450 identifier_chars['.'] = '.';
2451
2452 for (p = operand_special_chars; *p != '\0'; p++)
2453 operand_chars[(unsigned char) *p] = *p;
2454 }
2455
2456 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2457 if (IS_ELF)
2458 {
2459 record_alignment (text_section, 2);
2460 record_alignment (data_section, 2);
2461 record_alignment (bss_section, 2);
2462 }
2463 #endif
2464
2465 if (flag_code == CODE_64BIT)
2466 {
2467 #if defined (OBJ_COFF) && defined (TE_PE)
2468 x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
2469 ? 32 : 16);
2470 #else
2471 x86_dwarf2_return_column = 16;
2472 #endif
2473 x86_cie_data_alignment = -8;
2474 }
2475 else
2476 {
2477 x86_dwarf2_return_column = 8;
2478 x86_cie_data_alignment = -4;
2479 }
2480 }
2481
2482 void
2483 i386_print_statistics (FILE *file)
2484 {
2485 hash_print_statistics (file, "i386 opcode", op_hash);
2486 hash_print_statistics (file, "i386 register", reg_hash);
2487 }
2488 \f
2489 #ifdef DEBUG386
2490
2491 /* Debugging routines for md_assemble. */
2492 static void pte (insn_template *);
2493 static void pt (i386_operand_type);
2494 static void pe (expressionS *);
2495 static void ps (symbolS *);
2496
2497 static void
2498 pi (char *line, i386_insn *x)
2499 {
2500 unsigned int j;
2501
2502 fprintf (stdout, "%s: template ", line);
2503 pte (&x->tm);
2504 fprintf (stdout, " address: base %s index %s scale %x\n",
2505 x->base_reg ? x->base_reg->reg_name : "none",
2506 x->index_reg ? x->index_reg->reg_name : "none",
2507 x->log2_scale_factor);
2508 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2509 x->rm.mode, x->rm.reg, x->rm.regmem);
2510 fprintf (stdout, " sib: base %x index %x scale %x\n",
2511 x->sib.base, x->sib.index, x->sib.scale);
2512 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2513 (x->rex & REX_W) != 0,
2514 (x->rex & REX_R) != 0,
2515 (x->rex & REX_X) != 0,
2516 (x->rex & REX_B) != 0);
2517 for (j = 0; j < x->operands; j++)
2518 {
2519 fprintf (stdout, " #%d: ", j + 1);
2520 pt (x->types[j]);
2521 fprintf (stdout, "\n");
2522 if (x->types[j].bitfield.reg8
2523 || x->types[j].bitfield.reg16
2524 || x->types[j].bitfield.reg32
2525 || x->types[j].bitfield.reg64
2526 || x->types[j].bitfield.regmmx
2527 || x->types[j].bitfield.regxmm
2528 || x->types[j].bitfield.regymm
2529 || x->types[j].bitfield.sreg2
2530 || x->types[j].bitfield.sreg3
2531 || x->types[j].bitfield.control
2532 || x->types[j].bitfield.debug
2533 || x->types[j].bitfield.test)
2534 fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
2535 if (operand_type_check (x->types[j], imm))
2536 pe (x->op[j].imms);
2537 if (operand_type_check (x->types[j], disp))
2538 pe (x->op[j].disps);
2539 }
2540 }
2541
2542 static void
2543 pte (insn_template *t)
2544 {
2545 unsigned int j;
2546 fprintf (stdout, " %d operands ", t->operands);
2547 fprintf (stdout, "opcode %x ", t->base_opcode);
2548 if (t->extension_opcode != None)
2549 fprintf (stdout, "ext %x ", t->extension_opcode);
2550 if (t->opcode_modifier.d)
2551 fprintf (stdout, "D");
2552 if (t->opcode_modifier.w)
2553 fprintf (stdout, "W");
2554 fprintf (stdout, "\n");
2555 for (j = 0; j < t->operands; j++)
2556 {
2557 fprintf (stdout, " #%d type ", j + 1);
2558 pt (t->operand_types[j]);
2559 fprintf (stdout, "\n");
2560 }
2561 }
2562
2563 static void
2564 pe (expressionS *e)
2565 {
2566 fprintf (stdout, " operation %d\n", e->X_op);
2567 fprintf (stdout, " add_number %ld (%lx)\n",
2568 (long) e->X_add_number, (long) e->X_add_number);
2569 if (e->X_add_symbol)
2570 {
2571 fprintf (stdout, " add_symbol ");
2572 ps (e->X_add_symbol);
2573 fprintf (stdout, "\n");
2574 }
2575 if (e->X_op_symbol)
2576 {
2577 fprintf (stdout, " op_symbol ");
2578 ps (e->X_op_symbol);
2579 fprintf (stdout, "\n");
2580 }
2581 }
2582
2583 static void
2584 ps (symbolS *s)
2585 {
2586 fprintf (stdout, "%s type %s%s",
2587 S_GET_NAME (s),
2588 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2589 segment_name (S_GET_SEGMENT (s)));
2590 }
2591
2592 static struct type_name
2593 {
2594 i386_operand_type mask;
2595 const char *name;
2596 }
2597 const type_names[] =
2598 {
2599 { OPERAND_TYPE_REG8, "r8" },
2600 { OPERAND_TYPE_REG16, "r16" },
2601 { OPERAND_TYPE_REG32, "r32" },
2602 { OPERAND_TYPE_REG64, "r64" },
2603 { OPERAND_TYPE_IMM8, "i8" },
2604 { OPERAND_TYPE_IMM8, "i8s" },
2605 { OPERAND_TYPE_IMM16, "i16" },
2606 { OPERAND_TYPE_IMM32, "i32" },
2607 { OPERAND_TYPE_IMM32S, "i32s" },
2608 { OPERAND_TYPE_IMM64, "i64" },
2609 { OPERAND_TYPE_IMM1, "i1" },
2610 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2611 { OPERAND_TYPE_DISP8, "d8" },
2612 { OPERAND_TYPE_DISP16, "d16" },
2613 { OPERAND_TYPE_DISP32, "d32" },
2614 { OPERAND_TYPE_DISP32S, "d32s" },
2615 { OPERAND_TYPE_DISP64, "d64" },
2616 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2617 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2618 { OPERAND_TYPE_CONTROL, "control reg" },
2619 { OPERAND_TYPE_TEST, "test reg" },
2620 { OPERAND_TYPE_DEBUG, "debug reg" },
2621 { OPERAND_TYPE_FLOATREG, "FReg" },
2622 { OPERAND_TYPE_FLOATACC, "FAcc" },
2623 { OPERAND_TYPE_SREG2, "SReg2" },
2624 { OPERAND_TYPE_SREG3, "SReg3" },
2625 { OPERAND_TYPE_ACC, "Acc" },
2626 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2627 { OPERAND_TYPE_REGMMX, "rMMX" },
2628 { OPERAND_TYPE_REGXMM, "rXMM" },
2629 { OPERAND_TYPE_REGYMM, "rYMM" },
2630 { OPERAND_TYPE_ESSEG, "es" },
2631 };
2632
2633 static void
2634 pt (i386_operand_type t)
2635 {
2636 unsigned int j;
2637 i386_operand_type a;
2638
2639 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2640 {
2641 a = operand_type_and (t, type_names[j].mask);
2642 if (!operand_type_all_zero (&a))
2643 fprintf (stdout, "%s, ", type_names[j].name);
2644 }
2645 fflush (stdout);
2646 }
2647
2648 #endif /* DEBUG386 */
2649 \f
2650 static bfd_reloc_code_real_type
2651 reloc (unsigned int size,
2652 int pcrel,
2653 int sign,
2654 bfd_reloc_code_real_type other)
2655 {
2656 if (other != NO_RELOC)
2657 {
2658 reloc_howto_type *rel;
2659
2660 if (size == 8)
2661 switch (other)
2662 {
2663 case BFD_RELOC_X86_64_GOT32:
2664 return BFD_RELOC_X86_64_GOT64;
2665 break;
2666 case BFD_RELOC_X86_64_PLTOFF64:
2667 return BFD_RELOC_X86_64_PLTOFF64;
2668 break;
2669 case BFD_RELOC_X86_64_GOTPC32:
2670 other = BFD_RELOC_X86_64_GOTPC64;
2671 break;
2672 case BFD_RELOC_X86_64_GOTPCREL:
2673 other = BFD_RELOC_X86_64_GOTPCREL64;
2674 break;
2675 case BFD_RELOC_X86_64_TPOFF32:
2676 other = BFD_RELOC_X86_64_TPOFF64;
2677 break;
2678 case BFD_RELOC_X86_64_DTPOFF32:
2679 other = BFD_RELOC_X86_64_DTPOFF64;
2680 break;
2681 default:
2682 break;
2683 }
2684
2685 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2686 if (size == 4 && (flag_code != CODE_64BIT || disallow_64bit_reloc))
2687 sign = -1;
2688
2689 rel = bfd_reloc_type_lookup (stdoutput, other);
2690 if (!rel)
2691 as_bad (_("unknown relocation (%u)"), other);
2692 else if (size != bfd_get_reloc_size (rel))
2693 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2694 bfd_get_reloc_size (rel),
2695 size);
2696 else if (pcrel && !rel->pc_relative)
2697 as_bad (_("non-pc-relative relocation for pc-relative field"));
2698 else if ((rel->complain_on_overflow == complain_overflow_signed
2699 && !sign)
2700 || (rel->complain_on_overflow == complain_overflow_unsigned
2701 && sign > 0))
2702 as_bad (_("relocated field and relocation type differ in signedness"));
2703 else
2704 return other;
2705 return NO_RELOC;
2706 }
2707
2708 if (pcrel)
2709 {
2710 if (!sign)
2711 as_bad (_("there are no unsigned pc-relative relocations"));
2712 switch (size)
2713 {
2714 case 1: return BFD_RELOC_8_PCREL;
2715 case 2: return BFD_RELOC_16_PCREL;
2716 case 4: return BFD_RELOC_32_PCREL;
2717 case 8: return BFD_RELOC_64_PCREL;
2718 }
2719 as_bad (_("cannot do %u byte pc-relative relocation"), size);
2720 }
2721 else
2722 {
2723 if (sign > 0)
2724 switch (size)
2725 {
2726 case 4: return BFD_RELOC_X86_64_32S;
2727 }
2728 else
2729 switch (size)
2730 {
2731 case 1: return BFD_RELOC_8;
2732 case 2: return BFD_RELOC_16;
2733 case 4: return BFD_RELOC_32;
2734 case 8: return BFD_RELOC_64;
2735 }
2736 as_bad (_("cannot do %s %u byte relocation"),
2737 sign > 0 ? "signed" : "unsigned", size);
2738 }
2739
2740 return NO_RELOC;
2741 }
2742
2743 /* Here we decide which fixups can be adjusted to make them relative to
2744 the beginning of the section instead of the symbol. Basically we need
2745 to make sure that the dynamic relocations are done correctly, so in
2746 some cases we force the original symbol to be used. */
2747
2748 int
2749 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2750 {
2751 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2752 if (!IS_ELF)
2753 return 1;
2754
2755 /* Don't adjust pc-relative references to merge sections in 64-bit
2756 mode. */
2757 if (use_rela_relocations
2758 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
2759 && fixP->fx_pcrel)
2760 return 0;
2761
2762 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2763 and changed later by validate_fix. */
2764 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
2765 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
2766 return 0;
2767
2768 /* adjust_reloc_syms doesn't know about the GOT. */
2769 if (fixP->fx_r_type == BFD_RELOC_386_GOTOFF
2770 || fixP->fx_r_type == BFD_RELOC_386_PLT32
2771 || fixP->fx_r_type == BFD_RELOC_386_GOT32
2772 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
2773 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
2774 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
2775 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
2776 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
2777 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
2778 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
2779 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
2780 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
2781 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
2782 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
2783 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
2784 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
2785 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
2786 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
2787 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
2788 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
2789 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
2790 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
2791 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
2792 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
2793 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
2794 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
2795 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
2796 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
2797 return 0;
2798 #endif
2799 return 1;
2800 }
2801
2802 static int
2803 intel_float_operand (const char *mnemonic)
2804 {
2805 /* Note that the value returned is meaningful only for opcodes with (memory)
2806 operands, hence the code here is free to improperly handle opcodes that
2807 have no operands (for better performance and smaller code). */
2808
2809 if (mnemonic[0] != 'f')
2810 return 0; /* non-math */
2811
2812 switch (mnemonic[1])
2813 {
2814 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2815 the fs segment override prefix not currently handled because no
2816 call path can make opcodes without operands get here */
2817 case 'i':
2818 return 2 /* integer op */;
2819 case 'l':
2820 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
2821 return 3; /* fldcw/fldenv */
2822 break;
2823 case 'n':
2824 if (mnemonic[2] != 'o' /* fnop */)
2825 return 3; /* non-waiting control op */
2826 break;
2827 case 'r':
2828 if (mnemonic[2] == 's')
2829 return 3; /* frstor/frstpm */
2830 break;
2831 case 's':
2832 if (mnemonic[2] == 'a')
2833 return 3; /* fsave */
2834 if (mnemonic[2] == 't')
2835 {
2836 switch (mnemonic[3])
2837 {
2838 case 'c': /* fstcw */
2839 case 'd': /* fstdw */
2840 case 'e': /* fstenv */
2841 case 's': /* fsts[gw] */
2842 return 3;
2843 }
2844 }
2845 break;
2846 case 'x':
2847 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
2848 return 0; /* fxsave/fxrstor are not really math ops */
2849 break;
2850 }
2851
2852 return 1;
2853 }
2854
2855 /* Build the VEX prefix. */
2856
2857 static void
2858 build_vex_prefix (const insn_template *t)
2859 {
2860 unsigned int register_specifier;
2861 unsigned int implied_prefix;
2862 unsigned int vector_length;
2863
2864 /* Check register specifier. */
2865 if (i.vex.register_specifier)
2866 register_specifier = ~register_number (i.vex.register_specifier) & 0xf;
2867 else
2868 register_specifier = 0xf;
2869
2870 /* Use 2-byte VEX prefix by swappping destination and source
2871 operand. */
2872 if (!i.swap_operand
2873 && i.operands == i.reg_operands
2874 && i.tm.opcode_modifier.vexopcode == VEX0F
2875 && i.tm.opcode_modifier.s
2876 && i.rex == REX_B)
2877 {
2878 unsigned int xchg = i.operands - 1;
2879 union i386_op temp_op;
2880 i386_operand_type temp_type;
2881
2882 temp_type = i.types[xchg];
2883 i.types[xchg] = i.types[0];
2884 i.types[0] = temp_type;
2885 temp_op = i.op[xchg];
2886 i.op[xchg] = i.op[0];
2887 i.op[0] = temp_op;
2888
2889 gas_assert (i.rm.mode == 3);
2890
2891 i.rex = REX_R;
2892 xchg = i.rm.regmem;
2893 i.rm.regmem = i.rm.reg;
2894 i.rm.reg = xchg;
2895
2896 /* Use the next insn. */
2897 i.tm = t[1];
2898 }
2899
2900 if (i.tm.opcode_modifier.vex == VEXScalar)
2901 vector_length = avxscalar;
2902 else
2903 vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
2904
2905 switch ((i.tm.base_opcode >> 8) & 0xff)
2906 {
2907 case 0:
2908 implied_prefix = 0;
2909 break;
2910 case DATA_PREFIX_OPCODE:
2911 implied_prefix = 1;
2912 break;
2913 case REPE_PREFIX_OPCODE:
2914 implied_prefix = 2;
2915 break;
2916 case REPNE_PREFIX_OPCODE:
2917 implied_prefix = 3;
2918 break;
2919 default:
2920 abort ();
2921 }
2922
2923 /* Use 2-byte VEX prefix if possible. */
2924 if (i.tm.opcode_modifier.vexopcode == VEX0F
2925 && i.tm.opcode_modifier.vexw != VEXW1
2926 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
2927 {
2928 /* 2-byte VEX prefix. */
2929 unsigned int r;
2930
2931 i.vex.length = 2;
2932 i.vex.bytes[0] = 0xc5;
2933
2934 /* Check the REX.R bit. */
2935 r = (i.rex & REX_R) ? 0 : 1;
2936 i.vex.bytes[1] = (r << 7
2937 | register_specifier << 3
2938 | vector_length << 2
2939 | implied_prefix);
2940 }
2941 else
2942 {
2943 /* 3-byte VEX prefix. */
2944 unsigned int m, w;
2945
2946 i.vex.length = 3;
2947
2948 switch (i.tm.opcode_modifier.vexopcode)
2949 {
2950 case VEX0F:
2951 m = 0x1;
2952 i.vex.bytes[0] = 0xc4;
2953 break;
2954 case VEX0F38:
2955 m = 0x2;
2956 i.vex.bytes[0] = 0xc4;
2957 break;
2958 case VEX0F3A:
2959 m = 0x3;
2960 i.vex.bytes[0] = 0xc4;
2961 break;
2962 case XOP08:
2963 m = 0x8;
2964 i.vex.bytes[0] = 0x8f;
2965 break;
2966 case XOP09:
2967 m = 0x9;
2968 i.vex.bytes[0] = 0x8f;
2969 break;
2970 case XOP0A:
2971 m = 0xa;
2972 i.vex.bytes[0] = 0x8f;
2973 break;
2974 default:
2975 abort ();
2976 }
2977
2978 /* The high 3 bits of the second VEX byte are 1's compliment
2979 of RXB bits from REX. */
2980 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
2981
2982 /* Check the REX.W bit. */
2983 w = (i.rex & REX_W) ? 1 : 0;
2984 if (i.tm.opcode_modifier.vexw)
2985 {
2986 if (w)
2987 abort ();
2988
2989 if (i.tm.opcode_modifier.vexw == VEXW1)
2990 w = 1;
2991 }
2992
2993 i.vex.bytes[2] = (w << 7
2994 | register_specifier << 3
2995 | vector_length << 2
2996 | implied_prefix);
2997 }
2998 }
2999
3000 static void
3001 process_immext (void)
3002 {
3003 expressionS *exp;
3004
3005 if ((i.tm.cpu_flags.bitfield.cpusse3 || i.tm.cpu_flags.bitfield.cpusvme)
3006 && i.operands > 0)
3007 {
3008 /* MONITOR/MWAIT as well as SVME instructions have fixed operands
3009 with an opcode suffix which is coded in the same place as an
3010 8-bit immediate field would be.
3011 Here we check those operands and remove them afterwards. */
3012 unsigned int x;
3013
3014 for (x = 0; x < i.operands; x++)
3015 if (register_number (i.op[x].regs) != x)
3016 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
3017 register_prefix, i.op[x].regs->reg_name, x + 1,
3018 i.tm.name);
3019
3020 i.operands = 0;
3021 }
3022
3023 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
3024 which is coded in the same place as an 8-bit immediate field
3025 would be. Here we fake an 8-bit immediate operand from the
3026 opcode suffix stored in tm.extension_opcode.
3027
3028 AVX instructions also use this encoding, for some of
3029 3 argument instructions. */
3030
3031 gas_assert (i.imm_operands == 0
3032 && (i.operands <= 2
3033 || (i.tm.opcode_modifier.vex
3034 && i.operands <= 4)));
3035
3036 exp = &im_expressions[i.imm_operands++];
3037 i.op[i.operands].imms = exp;
3038 i.types[i.operands] = imm8;
3039 i.operands++;
3040 exp->X_op = O_constant;
3041 exp->X_add_number = i.tm.extension_opcode;
3042 i.tm.extension_opcode = None;
3043 }
3044
3045
3046 static int
3047 check_hle (void)
3048 {
3049 switch (i.tm.opcode_modifier.hleprefixok)
3050 {
3051 default:
3052 abort ();
3053 case HLEPrefixNone:
3054 if (i.prefix[HLE_PREFIX] == XACQUIRE_PREFIX_OPCODE)
3055 as_bad (_("invalid instruction `%s' after `xacquire'"),
3056 i.tm.name);
3057 else
3058 as_bad (_("invalid instruction `%s' after `xrelease'"),
3059 i.tm.name);
3060 return 0;
3061 case HLEPrefixLock:
3062 if (i.prefix[LOCK_PREFIX])
3063 return 1;
3064 if (i.prefix[HLE_PREFIX] == XACQUIRE_PREFIX_OPCODE)
3065 as_bad (_("missing `lock' with `xacquire'"));
3066 else
3067 as_bad (_("missing `lock' with `xrelease'"));
3068 return 0;
3069 case HLEPrefixAny:
3070 return 1;
3071 case HLEPrefixRelease:
3072 if (i.prefix[HLE_PREFIX] != XRELEASE_PREFIX_OPCODE)
3073 {
3074 as_bad (_("instruction `%s' after `xacquire' not allowed"),
3075 i.tm.name);
3076 return 0;
3077 }
3078 if (i.mem_operands == 0
3079 || !operand_type_check (i.types[i.operands - 1], anymem))
3080 {
3081 as_bad (_("memory destination needed for instruction `%s'"
3082 " after `xrelease'"), i.tm.name);
3083 return 0;
3084 }
3085 return 1;
3086 }
3087 }
3088
3089 /* This is the guts of the machine-dependent assembler. LINE points to a
3090 machine dependent instruction. This function is supposed to emit
3091 the frags/bytes it assembles to. */
3092
3093 void
3094 md_assemble (char *line)
3095 {
3096 unsigned int j;
3097 char mnemonic[MAX_MNEM_SIZE];
3098 const insn_template *t;
3099
3100 /* Initialize globals. */
3101 memset (&i, '\0', sizeof (i));
3102 for (j = 0; j < MAX_OPERANDS; j++)
3103 i.reloc[j] = NO_RELOC;
3104 memset (disp_expressions, '\0', sizeof (disp_expressions));
3105 memset (im_expressions, '\0', sizeof (im_expressions));
3106 save_stack_p = save_stack;
3107
3108 /* First parse an instruction mnemonic & call i386_operand for the operands.
3109 We assume that the scrubber has arranged it so that line[0] is the valid
3110 start of a (possibly prefixed) mnemonic. */
3111
3112 line = parse_insn (line, mnemonic);
3113 if (line == NULL)
3114 return;
3115
3116 line = parse_operands (line, mnemonic);
3117 this_operand = -1;
3118 if (line == NULL)
3119 return;
3120
3121 /* Now we've parsed the mnemonic into a set of templates, and have the
3122 operands at hand. */
3123
3124 /* All intel opcodes have reversed operands except for "bound" and
3125 "enter". We also don't reverse intersegment "jmp" and "call"
3126 instructions with 2 immediate operands so that the immediate segment
3127 precedes the offset, as it does when in AT&T mode. */
3128 if (intel_syntax
3129 && i.operands > 1
3130 && (strcmp (mnemonic, "bound") != 0)
3131 && (strcmp (mnemonic, "invlpga") != 0)
3132 && !(operand_type_check (i.types[0], imm)
3133 && operand_type_check (i.types[1], imm)))
3134 swap_operands ();
3135
3136 /* The order of the immediates should be reversed
3137 for 2 immediates extrq and insertq instructions */
3138 if (i.imm_operands == 2
3139 && (strcmp (mnemonic, "extrq") == 0
3140 || strcmp (mnemonic, "insertq") == 0))
3141 swap_2_operands (0, 1);
3142
3143 if (i.imm_operands)
3144 optimize_imm ();
3145
3146 /* Don't optimize displacement for movabs since it only takes 64bit
3147 displacement. */
3148 if (i.disp_operands
3149 && i.disp_encoding != disp_encoding_32bit
3150 && (flag_code != CODE_64BIT
3151 || strcmp (mnemonic, "movabs") != 0))
3152 optimize_disp ();
3153
3154 /* Next, we find a template that matches the given insn,
3155 making sure the overlap of the given operands types is consistent
3156 with the template operand types. */
3157
3158 if (!(t = match_template ()))
3159 return;
3160
3161 if (sse_check != check_none
3162 && !i.tm.opcode_modifier.noavx
3163 && (i.tm.cpu_flags.bitfield.cpusse
3164 || i.tm.cpu_flags.bitfield.cpusse2
3165 || i.tm.cpu_flags.bitfield.cpusse3
3166 || i.tm.cpu_flags.bitfield.cpussse3
3167 || i.tm.cpu_flags.bitfield.cpusse4_1
3168 || i.tm.cpu_flags.bitfield.cpusse4_2))
3169 {
3170 (sse_check == check_warning
3171 ? as_warn
3172 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
3173 }
3174
3175 /* Zap movzx and movsx suffix. The suffix has been set from
3176 "word ptr" or "byte ptr" on the source operand in Intel syntax
3177 or extracted from mnemonic in AT&T syntax. But we'll use
3178 the destination register to choose the suffix for encoding. */
3179 if ((i.tm.base_opcode & ~9) == 0x0fb6)
3180 {
3181 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3182 there is no suffix, the default will be byte extension. */
3183 if (i.reg_operands != 2
3184 && !i.suffix
3185 && intel_syntax)
3186 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
3187
3188 i.suffix = 0;
3189 }
3190
3191 if (i.tm.opcode_modifier.fwait)
3192 if (!add_prefix (FWAIT_OPCODE))
3193 return;
3194
3195 /* Check for lock without a lockable instruction. Destination operand
3196 must be memory unless it is xchg (0x86). */
3197 if (i.prefix[LOCK_PREFIX]
3198 && (!i.tm.opcode_modifier.islockable
3199 || i.mem_operands == 0
3200 || (i.tm.base_opcode != 0x86
3201 && !operand_type_check (i.types[i.operands - 1], anymem))))
3202 {
3203 as_bad (_("expecting lockable instruction after `lock'"));
3204 return;
3205 }
3206
3207 /* Check if HLE prefix is OK. */
3208 if (i.have_hle && !check_hle ())
3209 return;
3210
3211 /* Check string instruction segment overrides. */
3212 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
3213 {
3214 if (!check_string ())
3215 return;
3216 i.disp_operands = 0;
3217 }
3218
3219 if (!process_suffix ())
3220 return;
3221
3222 /* Update operand types. */
3223 for (j = 0; j < i.operands; j++)
3224 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
3225
3226 /* Make still unresolved immediate matches conform to size of immediate
3227 given in i.suffix. */
3228 if (!finalize_imm ())
3229 return;
3230
3231 if (i.types[0].bitfield.imm1)
3232 i.imm_operands = 0; /* kludge for shift insns. */
3233
3234 /* We only need to check those implicit registers for instructions
3235 with 3 operands or less. */
3236 if (i.operands <= 3)
3237 for (j = 0; j < i.operands; j++)
3238 if (i.types[j].bitfield.inoutportreg
3239 || i.types[j].bitfield.shiftcount
3240 || i.types[j].bitfield.acc
3241 || i.types[j].bitfield.floatacc)
3242 i.reg_operands--;
3243
3244 /* ImmExt should be processed after SSE2AVX. */
3245 if (!i.tm.opcode_modifier.sse2avx
3246 && i.tm.opcode_modifier.immext)
3247 process_immext ();
3248
3249 /* For insns with operands there are more diddles to do to the opcode. */
3250 if (i.operands)
3251 {
3252 if (!process_operands ())
3253 return;
3254 }
3255 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
3256 {
3257 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3258 as_warn (_("translating to `%sp'"), i.tm.name);
3259 }
3260
3261 if (i.tm.opcode_modifier.vex)
3262 build_vex_prefix (t);
3263
3264 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3265 instructions may define INT_OPCODE as well, so avoid this corner
3266 case for those instructions that use MODRM. */
3267 if (i.tm.base_opcode == INT_OPCODE
3268 && !i.tm.opcode_modifier.modrm
3269 && i.op[0].imms->X_add_number == 3)
3270 {
3271 i.tm.base_opcode = INT3_OPCODE;
3272 i.imm_operands = 0;
3273 }
3274
3275 if ((i.tm.opcode_modifier.jump
3276 || i.tm.opcode_modifier.jumpbyte
3277 || i.tm.opcode_modifier.jumpdword)
3278 && i.op[0].disps->X_op == O_constant)
3279 {
3280 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3281 the absolute address given by the constant. Since ix86 jumps and
3282 calls are pc relative, we need to generate a reloc. */
3283 i.op[0].disps->X_add_symbol = &abs_symbol;
3284 i.op[0].disps->X_op = O_symbol;
3285 }
3286
3287 if (i.tm.opcode_modifier.rex64)
3288 i.rex |= REX_W;
3289
3290 /* For 8 bit registers we need an empty rex prefix. Also if the
3291 instruction already has a prefix, we need to convert old
3292 registers to new ones. */
3293
3294 if ((i.types[0].bitfield.reg8
3295 && (i.op[0].regs->reg_flags & RegRex64) != 0)
3296 || (i.types[1].bitfield.reg8
3297 && (i.op[1].regs->reg_flags & RegRex64) != 0)
3298 || ((i.types[0].bitfield.reg8
3299 || i.types[1].bitfield.reg8)
3300 && i.rex != 0))
3301 {
3302 int x;
3303
3304 i.rex |= REX_OPCODE;
3305 for (x = 0; x < 2; x++)
3306 {
3307 /* Look for 8 bit operand that uses old registers. */
3308 if (i.types[x].bitfield.reg8
3309 && (i.op[x].regs->reg_flags & RegRex64) == 0)
3310 {
3311 /* In case it is "hi" register, give up. */
3312 if (i.op[x].regs->reg_num > 3)
3313 as_bad (_("can't encode register '%s%s' in an "
3314 "instruction requiring REX prefix."),
3315 register_prefix, i.op[x].regs->reg_name);
3316
3317 /* Otherwise it is equivalent to the extended register.
3318 Since the encoding doesn't change this is merely
3319 cosmetic cleanup for debug output. */
3320
3321 i.op[x].regs = i.op[x].regs + 8;
3322 }
3323 }
3324 }
3325
3326 if (i.rex != 0)
3327 add_prefix (REX_OPCODE | i.rex);
3328
3329 /* We are ready to output the insn. */
3330 output_insn ();
3331 }
3332
3333 static char *
3334 parse_insn (char *line, char *mnemonic)
3335 {
3336 char *l = line;
3337 char *token_start = l;
3338 char *mnem_p;
3339 int supported;
3340 const insn_template *t;
3341 char *dot_p = NULL;
3342
3343 /* Non-zero if we found a prefix only acceptable with string insns. */
3344 const char *expecting_string_instruction = NULL;
3345
3346 while (1)
3347 {
3348 mnem_p = mnemonic;
3349 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3350 {
3351 if (*mnem_p == '.')
3352 dot_p = mnem_p;
3353 mnem_p++;
3354 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3355 {
3356 as_bad (_("no such instruction: `%s'"), token_start);
3357 return NULL;
3358 }
3359 l++;
3360 }
3361 if (!is_space_char (*l)
3362 && *l != END_OF_INSN
3363 && (intel_syntax
3364 || (*l != PREFIX_SEPARATOR
3365 && *l != ',')))
3366 {
3367 as_bad (_("invalid character %s in mnemonic"),
3368 output_invalid (*l));
3369 return NULL;
3370 }
3371 if (token_start == l)
3372 {
3373 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3374 as_bad (_("expecting prefix; got nothing"));
3375 else
3376 as_bad (_("expecting mnemonic; got nothing"));
3377 return NULL;
3378 }
3379
3380 /* Look up instruction (or prefix) via hash table. */
3381 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3382
3383 if (*l != END_OF_INSN
3384 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3385 && current_templates
3386 && current_templates->start->opcode_modifier.isprefix)
3387 {
3388 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3389 {
3390 as_bad ((flag_code != CODE_64BIT
3391 ? _("`%s' is only supported in 64-bit mode")
3392 : _("`%s' is not supported in 64-bit mode")),
3393 current_templates->start->name);
3394 return NULL;
3395 }
3396 /* If we are in 16-bit mode, do not allow addr16 or data16.
3397 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3398 if ((current_templates->start->opcode_modifier.size16
3399 || current_templates->start->opcode_modifier.size32)
3400 && flag_code != CODE_64BIT
3401 && (current_templates->start->opcode_modifier.size32
3402 ^ (flag_code == CODE_16BIT)))
3403 {
3404 as_bad (_("redundant %s prefix"),
3405 current_templates->start->name);
3406 return NULL;
3407 }
3408 /* Add prefix, checking for repeated prefixes. */
3409 switch (add_prefix (current_templates->start->base_opcode))
3410 {
3411 case PREFIX_EXIST:
3412 return NULL;
3413 case PREFIX_REP:
3414 if (current_templates->start->cpu_flags.bitfield.cpuhle)
3415 i.have_hle = 1;
3416 else
3417 expecting_string_instruction = current_templates->start->name;
3418 break;
3419 default:
3420 break;
3421 }
3422 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3423 token_start = ++l;
3424 }
3425 else
3426 break;
3427 }
3428
3429 if (!current_templates)
3430 {
3431 /* Check if we should swap operand or force 32bit displacement in
3432 encoding. */
3433 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3434 i.swap_operand = 1;
3435 else if (mnem_p - 3 == dot_p
3436 && dot_p[1] == 'd'
3437 && dot_p[2] == '8')
3438 i.disp_encoding = disp_encoding_8bit;
3439 else if (mnem_p - 4 == dot_p
3440 && dot_p[1] == 'd'
3441 && dot_p[2] == '3'
3442 && dot_p[3] == '2')
3443 i.disp_encoding = disp_encoding_32bit;
3444 else
3445 goto check_suffix;
3446 mnem_p = dot_p;
3447 *dot_p = '\0';
3448 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3449 }
3450
3451 if (!current_templates)
3452 {
3453 check_suffix:
3454 /* See if we can get a match by trimming off a suffix. */
3455 switch (mnem_p[-1])
3456 {
3457 case WORD_MNEM_SUFFIX:
3458 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3459 i.suffix = SHORT_MNEM_SUFFIX;
3460 else
3461 case BYTE_MNEM_SUFFIX:
3462 case QWORD_MNEM_SUFFIX:
3463 i.suffix = mnem_p[-1];
3464 mnem_p[-1] = '\0';
3465 current_templates = (const templates *) hash_find (op_hash,
3466 mnemonic);
3467 break;
3468 case SHORT_MNEM_SUFFIX:
3469 case LONG_MNEM_SUFFIX:
3470 if (!intel_syntax)
3471 {
3472 i.suffix = mnem_p[-1];
3473 mnem_p[-1] = '\0';
3474 current_templates = (const templates *) hash_find (op_hash,
3475 mnemonic);
3476 }
3477 break;
3478
3479 /* Intel Syntax. */
3480 case 'd':
3481 if (intel_syntax)
3482 {
3483 if (intel_float_operand (mnemonic) == 1)
3484 i.suffix = SHORT_MNEM_SUFFIX;
3485 else
3486 i.suffix = LONG_MNEM_SUFFIX;
3487 mnem_p[-1] = '\0';
3488 current_templates = (const templates *) hash_find (op_hash,
3489 mnemonic);
3490 }
3491 break;
3492 }
3493 if (!current_templates)
3494 {
3495 as_bad (_("no such instruction: `%s'"), token_start);
3496 return NULL;
3497 }
3498 }
3499
3500 if (current_templates->start->opcode_modifier.jump
3501 || current_templates->start->opcode_modifier.jumpbyte)
3502 {
3503 /* Check for a branch hint. We allow ",pt" and ",pn" for
3504 predict taken and predict not taken respectively.
3505 I'm not sure that branch hints actually do anything on loop
3506 and jcxz insns (JumpByte) for current Pentium4 chips. They
3507 may work in the future and it doesn't hurt to accept them
3508 now. */
3509 if (l[0] == ',' && l[1] == 'p')
3510 {
3511 if (l[2] == 't')
3512 {
3513 if (!add_prefix (DS_PREFIX_OPCODE))
3514 return NULL;
3515 l += 3;
3516 }
3517 else if (l[2] == 'n')
3518 {
3519 if (!add_prefix (CS_PREFIX_OPCODE))
3520 return NULL;
3521 l += 3;
3522 }
3523 }
3524 }
3525 /* Any other comma loses. */
3526 if (*l == ',')
3527 {
3528 as_bad (_("invalid character %s in mnemonic"),
3529 output_invalid (*l));
3530 return NULL;
3531 }
3532
3533 /* Check if instruction is supported on specified architecture. */
3534 supported = 0;
3535 for (t = current_templates->start; t < current_templates->end; ++t)
3536 {
3537 supported |= cpu_flags_match (t);
3538 if (supported == CPU_FLAGS_PERFECT_MATCH)
3539 goto skip;
3540 }
3541
3542 if (!(supported & CPU_FLAGS_64BIT_MATCH))
3543 {
3544 as_bad (flag_code == CODE_64BIT
3545 ? _("`%s' is not supported in 64-bit mode")
3546 : _("`%s' is only supported in 64-bit mode"),
3547 current_templates->start->name);
3548 return NULL;
3549 }
3550 if (supported != CPU_FLAGS_PERFECT_MATCH)
3551 {
3552 as_bad (_("`%s' is not supported on `%s%s'"),
3553 current_templates->start->name,
3554 cpu_arch_name ? cpu_arch_name : default_arch,
3555 cpu_sub_arch_name ? cpu_sub_arch_name : "");
3556 return NULL;
3557 }
3558
3559 skip:
3560 if (!cpu_arch_flags.bitfield.cpui386
3561 && (flag_code != CODE_16BIT))
3562 {
3563 as_warn (_("use .code16 to ensure correct addressing mode"));
3564 }
3565
3566 /* Check for rep/repne without a string (or other allowed) instruction. */
3567 if (expecting_string_instruction)
3568 {
3569 static templates override;
3570
3571 for (t = current_templates->start; t < current_templates->end; ++t)
3572 if (t->opcode_modifier.repprefixok)
3573 break;
3574 if (t >= current_templates->end)
3575 {
3576 as_bad (_("expecting string instruction after `%s'"),
3577 expecting_string_instruction);
3578 return NULL;
3579 }
3580 for (override.start = t; t < current_templates->end; ++t)
3581 if (!t->opcode_modifier.repprefixok)
3582 break;
3583 override.end = t;
3584 current_templates = &override;
3585 }
3586
3587 return l;
3588 }
3589
3590 static char *
3591 parse_operands (char *l, const char *mnemonic)
3592 {
3593 char *token_start;
3594
3595 /* 1 if operand is pending after ','. */
3596 unsigned int expecting_operand = 0;
3597
3598 /* Non-zero if operand parens not balanced. */
3599 unsigned int paren_not_balanced;
3600
3601 while (*l != END_OF_INSN)
3602 {
3603 /* Skip optional white space before operand. */
3604 if (is_space_char (*l))
3605 ++l;
3606 if (!is_operand_char (*l) && *l != END_OF_INSN)
3607 {
3608 as_bad (_("invalid character %s before operand %d"),
3609 output_invalid (*l),
3610 i.operands + 1);
3611 return NULL;
3612 }
3613 token_start = l; /* after white space */
3614 paren_not_balanced = 0;
3615 while (paren_not_balanced || *l != ',')
3616 {
3617 if (*l == END_OF_INSN)
3618 {
3619 if (paren_not_balanced)
3620 {
3621 if (!intel_syntax)
3622 as_bad (_("unbalanced parenthesis in operand %d."),
3623 i.operands + 1);
3624 else
3625 as_bad (_("unbalanced brackets in operand %d."),
3626 i.operands + 1);
3627 return NULL;
3628 }
3629 else
3630 break; /* we are done */
3631 }
3632 else if (!is_operand_char (*l) && !is_space_char (*l))
3633 {
3634 as_bad (_("invalid character %s in operand %d"),
3635 output_invalid (*l),
3636 i.operands + 1);
3637 return NULL;
3638 }
3639 if (!intel_syntax)
3640 {
3641 if (*l == '(')
3642 ++paren_not_balanced;
3643 if (*l == ')')
3644 --paren_not_balanced;
3645 }
3646 else
3647 {
3648 if (*l == '[')
3649 ++paren_not_balanced;
3650 if (*l == ']')
3651 --paren_not_balanced;
3652 }
3653 l++;
3654 }
3655 if (l != token_start)
3656 { /* Yes, we've read in another operand. */
3657 unsigned int operand_ok;
3658 this_operand = i.operands++;
3659 i.types[this_operand].bitfield.unspecified = 1;
3660 if (i.operands > MAX_OPERANDS)
3661 {
3662 as_bad (_("spurious operands; (%d operands/instruction max)"),
3663 MAX_OPERANDS);
3664 return NULL;
3665 }
3666 /* Now parse operand adding info to 'i' as we go along. */
3667 END_STRING_AND_SAVE (l);
3668
3669 if (intel_syntax)
3670 operand_ok =
3671 i386_intel_operand (token_start,
3672 intel_float_operand (mnemonic));
3673 else
3674 operand_ok = i386_att_operand (token_start);
3675
3676 RESTORE_END_STRING (l);
3677 if (!operand_ok)
3678 return NULL;
3679 }
3680 else
3681 {
3682 if (expecting_operand)
3683 {
3684 expecting_operand_after_comma:
3685 as_bad (_("expecting operand after ','; got nothing"));
3686 return NULL;
3687 }
3688 if (*l == ',')
3689 {
3690 as_bad (_("expecting operand before ','; got nothing"));
3691 return NULL;
3692 }
3693 }
3694
3695 /* Now *l must be either ',' or END_OF_INSN. */
3696 if (*l == ',')
3697 {
3698 if (*++l == END_OF_INSN)
3699 {
3700 /* Just skip it, if it's \n complain. */
3701 goto expecting_operand_after_comma;
3702 }
3703 expecting_operand = 1;
3704 }
3705 }
3706 return l;
3707 }
3708
3709 static void
3710 swap_2_operands (int xchg1, int xchg2)
3711 {
3712 union i386_op temp_op;
3713 i386_operand_type temp_type;
3714 enum bfd_reloc_code_real temp_reloc;
3715
3716 temp_type = i.types[xchg2];
3717 i.types[xchg2] = i.types[xchg1];
3718 i.types[xchg1] = temp_type;
3719 temp_op = i.op[xchg2];
3720 i.op[xchg2] = i.op[xchg1];
3721 i.op[xchg1] = temp_op;
3722 temp_reloc = i.reloc[xchg2];
3723 i.reloc[xchg2] = i.reloc[xchg1];
3724 i.reloc[xchg1] = temp_reloc;
3725 }
3726
3727 static void
3728 swap_operands (void)
3729 {
3730 switch (i.operands)
3731 {
3732 case 5:
3733 case 4:
3734 swap_2_operands (1, i.operands - 2);
3735 case 3:
3736 case 2:
3737 swap_2_operands (0, i.operands - 1);
3738 break;
3739 default:
3740 abort ();
3741 }
3742
3743 if (i.mem_operands == 2)
3744 {
3745 const seg_entry *temp_seg;
3746 temp_seg = i.seg[0];
3747 i.seg[0] = i.seg[1];
3748 i.seg[1] = temp_seg;
3749 }
3750 }
3751
3752 /* Try to ensure constant immediates are represented in the smallest
3753 opcode possible. */
3754 static void
3755 optimize_imm (void)
3756 {
3757 char guess_suffix = 0;
3758 int op;
3759
3760 if (i.suffix)
3761 guess_suffix = i.suffix;
3762 else if (i.reg_operands)
3763 {
3764 /* Figure out a suffix from the last register operand specified.
3765 We can't do this properly yet, ie. excluding InOutPortReg,
3766 but the following works for instructions with immediates.
3767 In any case, we can't set i.suffix yet. */
3768 for (op = i.operands; --op >= 0;)
3769 if (i.types[op].bitfield.reg8)
3770 {
3771 guess_suffix = BYTE_MNEM_SUFFIX;
3772 break;
3773 }
3774 else if (i.types[op].bitfield.reg16)
3775 {
3776 guess_suffix = WORD_MNEM_SUFFIX;
3777 break;
3778 }
3779 else if (i.types[op].bitfield.reg32)
3780 {
3781 guess_suffix = LONG_MNEM_SUFFIX;
3782 break;
3783 }
3784 else if (i.types[op].bitfield.reg64)
3785 {
3786 guess_suffix = QWORD_MNEM_SUFFIX;
3787 break;
3788 }
3789 }
3790 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
3791 guess_suffix = WORD_MNEM_SUFFIX;
3792
3793 for (op = i.operands; --op >= 0;)
3794 if (operand_type_check (i.types[op], imm))
3795 {
3796 switch (i.op[op].imms->X_op)
3797 {
3798 case O_constant:
3799 /* If a suffix is given, this operand may be shortened. */
3800 switch (guess_suffix)
3801 {
3802 case LONG_MNEM_SUFFIX:
3803 i.types[op].bitfield.imm32 = 1;
3804 i.types[op].bitfield.imm64 = 1;
3805 break;
3806 case WORD_MNEM_SUFFIX:
3807 i.types[op].bitfield.imm16 = 1;
3808 i.types[op].bitfield.imm32 = 1;
3809 i.types[op].bitfield.imm32s = 1;
3810 i.types[op].bitfield.imm64 = 1;
3811 break;
3812 case BYTE_MNEM_SUFFIX:
3813 i.types[op].bitfield.imm8 = 1;
3814 i.types[op].bitfield.imm8s = 1;
3815 i.types[op].bitfield.imm16 = 1;
3816 i.types[op].bitfield.imm32 = 1;
3817 i.types[op].bitfield.imm32s = 1;
3818 i.types[op].bitfield.imm64 = 1;
3819 break;
3820 }
3821
3822 /* If this operand is at most 16 bits, convert it
3823 to a signed 16 bit number before trying to see
3824 whether it will fit in an even smaller size.
3825 This allows a 16-bit operand such as $0xffe0 to
3826 be recognised as within Imm8S range. */
3827 if ((i.types[op].bitfield.imm16)
3828 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
3829 {
3830 i.op[op].imms->X_add_number =
3831 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
3832 }
3833 if ((i.types[op].bitfield.imm32)
3834 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
3835 == 0))
3836 {
3837 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
3838 ^ ((offsetT) 1 << 31))
3839 - ((offsetT) 1 << 31));
3840 }
3841 i.types[op]
3842 = operand_type_or (i.types[op],
3843 smallest_imm_type (i.op[op].imms->X_add_number));
3844
3845 /* We must avoid matching of Imm32 templates when 64bit
3846 only immediate is available. */
3847 if (guess_suffix == QWORD_MNEM_SUFFIX)
3848 i.types[op].bitfield.imm32 = 0;
3849 break;
3850
3851 case O_absent:
3852 case O_register:
3853 abort ();
3854
3855 /* Symbols and expressions. */
3856 default:
3857 /* Convert symbolic operand to proper sizes for matching, but don't
3858 prevent matching a set of insns that only supports sizes other
3859 than those matching the insn suffix. */
3860 {
3861 i386_operand_type mask, allowed;
3862 const insn_template *t;
3863
3864 operand_type_set (&mask, 0);
3865 operand_type_set (&allowed, 0);
3866
3867 for (t = current_templates->start;
3868 t < current_templates->end;
3869 ++t)
3870 allowed = operand_type_or (allowed,
3871 t->operand_types[op]);
3872 switch (guess_suffix)
3873 {
3874 case QWORD_MNEM_SUFFIX:
3875 mask.bitfield.imm64 = 1;
3876 mask.bitfield.imm32s = 1;
3877 break;
3878 case LONG_MNEM_SUFFIX:
3879 mask.bitfield.imm32 = 1;
3880 break;
3881 case WORD_MNEM_SUFFIX:
3882 mask.bitfield.imm16 = 1;
3883 break;
3884 case BYTE_MNEM_SUFFIX:
3885 mask.bitfield.imm8 = 1;
3886 break;
3887 default:
3888 break;
3889 }
3890 allowed = operand_type_and (mask, allowed);
3891 if (!operand_type_all_zero (&allowed))
3892 i.types[op] = operand_type_and (i.types[op], mask);
3893 }
3894 break;
3895 }
3896 }
3897 }
3898
3899 /* Try to use the smallest displacement type too. */
3900 static void
3901 optimize_disp (void)
3902 {
3903 int op;
3904
3905 for (op = i.operands; --op >= 0;)
3906 if (operand_type_check (i.types[op], disp))
3907 {
3908 if (i.op[op].disps->X_op == O_constant)
3909 {
3910 offsetT op_disp = i.op[op].disps->X_add_number;
3911
3912 if (i.types[op].bitfield.disp16
3913 && (op_disp & ~(offsetT) 0xffff) == 0)
3914 {
3915 /* If this operand is at most 16 bits, convert
3916 to a signed 16 bit number and don't use 64bit
3917 displacement. */
3918 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
3919 i.types[op].bitfield.disp64 = 0;
3920 }
3921 if (i.types[op].bitfield.disp32
3922 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
3923 {
3924 /* If this operand is at most 32 bits, convert
3925 to a signed 32 bit number and don't use 64bit
3926 displacement. */
3927 op_disp &= (((offsetT) 2 << 31) - 1);
3928 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
3929 i.types[op].bitfield.disp64 = 0;
3930 }
3931 if (!op_disp && i.types[op].bitfield.baseindex)
3932 {
3933 i.types[op].bitfield.disp8 = 0;
3934 i.types[op].bitfield.disp16 = 0;
3935 i.types[op].bitfield.disp32 = 0;
3936 i.types[op].bitfield.disp32s = 0;
3937 i.types[op].bitfield.disp64 = 0;
3938 i.op[op].disps = 0;
3939 i.disp_operands--;
3940 }
3941 else if (flag_code == CODE_64BIT)
3942 {
3943 if (fits_in_signed_long (op_disp))
3944 {
3945 i.types[op].bitfield.disp64 = 0;
3946 i.types[op].bitfield.disp32s = 1;
3947 }
3948 if (i.prefix[ADDR_PREFIX]
3949 && fits_in_unsigned_long (op_disp))
3950 i.types[op].bitfield.disp32 = 1;
3951 }
3952 if ((i.types[op].bitfield.disp32
3953 || i.types[op].bitfield.disp32s
3954 || i.types[op].bitfield.disp16)
3955 && fits_in_signed_byte (op_disp))
3956 i.types[op].bitfield.disp8 = 1;
3957 }
3958 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
3959 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
3960 {
3961 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
3962 i.op[op].disps, 0, i.reloc[op]);
3963 i.types[op].bitfield.disp8 = 0;
3964 i.types[op].bitfield.disp16 = 0;
3965 i.types[op].bitfield.disp32 = 0;
3966 i.types[op].bitfield.disp32s = 0;
3967 i.types[op].bitfield.disp64 = 0;
3968 }
3969 else
3970 /* We only support 64bit displacement on constants. */
3971 i.types[op].bitfield.disp64 = 0;
3972 }
3973 }
3974
3975 /* Check if operands are valid for the instruction. */
3976
3977 static int
3978 check_VecOperands (const insn_template *t)
3979 {
3980 /* Without VSIB byte, we can't have a vector register for index. */
3981 if (!t->opcode_modifier.vecsib
3982 && i.index_reg
3983 && (i.index_reg->reg_type.bitfield.regxmm
3984 || i.index_reg->reg_type.bitfield.regymm))
3985 {
3986 i.error = unsupported_vector_index_register;
3987 return 1;
3988 }
3989
3990 /* For VSIB byte, we need a vector register for index, and all vector
3991 registers must be distinct. */
3992 if (t->opcode_modifier.vecsib)
3993 {
3994 if (!i.index_reg
3995 || !((t->opcode_modifier.vecsib == VecSIB128
3996 && i.index_reg->reg_type.bitfield.regxmm)
3997 || (t->opcode_modifier.vecsib == VecSIB256
3998 && i.index_reg->reg_type.bitfield.regymm)))
3999 {
4000 i.error = invalid_vsib_address;
4001 return 1;
4002 }
4003
4004 gas_assert (i.reg_operands == 2);
4005 gas_assert (i.types[0].bitfield.regxmm
4006 || i.types[0].bitfield.regymm);
4007 gas_assert (i.types[2].bitfield.regxmm
4008 || i.types[2].bitfield.regymm);
4009
4010 if (operand_check == check_none)
4011 return 0;
4012 if (register_number (i.op[0].regs) != register_number (i.index_reg)
4013 && register_number (i.op[2].regs) != register_number (i.index_reg)
4014 && register_number (i.op[0].regs) != register_number (i.op[2].regs))
4015 return 0;
4016 if (operand_check == check_error)
4017 {
4018 i.error = invalid_vector_register_set;
4019 return 1;
4020 }
4021 as_warn (_("mask, index, and destination registers should be distinct"));
4022 }
4023
4024 return 0;
4025 }
4026
4027 /* Check if operands are valid for the instruction. Update VEX
4028 operand types. */
4029
4030 static int
4031 VEX_check_operands (const insn_template *t)
4032 {
4033 if (!t->opcode_modifier.vex)
4034 return 0;
4035
4036 /* Only check VEX_Imm4, which must be the first operand. */
4037 if (t->operand_types[0].bitfield.vec_imm4)
4038 {
4039 if (i.op[0].imms->X_op != O_constant
4040 || !fits_in_imm4 (i.op[0].imms->X_add_number))
4041 {
4042 i.error = bad_imm4;
4043 return 1;
4044 }
4045
4046 /* Turn off Imm8 so that update_imm won't complain. */
4047 i.types[0] = vec_imm4;
4048 }
4049
4050 return 0;
4051 }
4052
4053 static const insn_template *
4054 match_template (void)
4055 {
4056 /* Points to template once we've found it. */
4057 const insn_template *t;
4058 i386_operand_type overlap0, overlap1, overlap2, overlap3;
4059 i386_operand_type overlap4;
4060 unsigned int found_reverse_match;
4061 i386_opcode_modifier suffix_check;
4062 i386_operand_type operand_types [MAX_OPERANDS];
4063 int addr_prefix_disp;
4064 unsigned int j;
4065 unsigned int found_cpu_match;
4066 unsigned int check_register;
4067 enum i386_error specific_error = 0;
4068
4069 #if MAX_OPERANDS != 5
4070 # error "MAX_OPERANDS must be 5."
4071 #endif
4072
4073 found_reverse_match = 0;
4074 addr_prefix_disp = -1;
4075
4076 memset (&suffix_check, 0, sizeof (suffix_check));
4077 if (i.suffix == BYTE_MNEM_SUFFIX)
4078 suffix_check.no_bsuf = 1;
4079 else if (i.suffix == WORD_MNEM_SUFFIX)
4080 suffix_check.no_wsuf = 1;
4081 else if (i.suffix == SHORT_MNEM_SUFFIX)
4082 suffix_check.no_ssuf = 1;
4083 else if (i.suffix == LONG_MNEM_SUFFIX)
4084 suffix_check.no_lsuf = 1;
4085 else if (i.suffix == QWORD_MNEM_SUFFIX)
4086 suffix_check.no_qsuf = 1;
4087 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
4088 suffix_check.no_ldsuf = 1;
4089
4090 /* Must have right number of operands. */
4091 i.error = number_of_operands_mismatch;
4092
4093 for (t = current_templates->start; t < current_templates->end; t++)
4094 {
4095 addr_prefix_disp = -1;
4096
4097 if (i.operands != t->operands)
4098 continue;
4099
4100 /* Check processor support. */
4101 i.error = unsupported;
4102 found_cpu_match = (cpu_flags_match (t)
4103 == CPU_FLAGS_PERFECT_MATCH);
4104 if (!found_cpu_match)
4105 continue;
4106
4107 /* Check old gcc support. */
4108 i.error = old_gcc_only;
4109 if (!old_gcc && t->opcode_modifier.oldgcc)
4110 continue;
4111
4112 /* Check AT&T mnemonic. */
4113 i.error = unsupported_with_intel_mnemonic;
4114 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
4115 continue;
4116
4117 /* Check AT&T/Intel syntax. */
4118 i.error = unsupported_syntax;
4119 if ((intel_syntax && t->opcode_modifier.attsyntax)
4120 || (!intel_syntax && t->opcode_modifier.intelsyntax))
4121 continue;
4122
4123 /* Check the suffix, except for some instructions in intel mode. */
4124 i.error = invalid_instruction_suffix;
4125 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
4126 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
4127 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
4128 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
4129 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
4130 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
4131 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
4132 continue;
4133
4134 if (!operand_size_match (t))
4135 continue;
4136
4137 for (j = 0; j < MAX_OPERANDS; j++)
4138 operand_types[j] = t->operand_types[j];
4139
4140 /* In general, don't allow 64-bit operands in 32-bit mode. */
4141 if (i.suffix == QWORD_MNEM_SUFFIX
4142 && flag_code != CODE_64BIT
4143 && (intel_syntax
4144 ? (!t->opcode_modifier.ignoresize
4145 && !intel_float_operand (t->name))
4146 : intel_float_operand (t->name) != 2)
4147 && ((!operand_types[0].bitfield.regmmx
4148 && !operand_types[0].bitfield.regxmm
4149 && !operand_types[0].bitfield.regymm)
4150 || (!operand_types[t->operands > 1].bitfield.regmmx
4151 && !!operand_types[t->operands > 1].bitfield.regxmm
4152 && !!operand_types[t->operands > 1].bitfield.regymm))
4153 && (t->base_opcode != 0x0fc7
4154 || t->extension_opcode != 1 /* cmpxchg8b */))
4155 continue;
4156
4157 /* In general, don't allow 32-bit operands on pre-386. */
4158 else if (i.suffix == LONG_MNEM_SUFFIX
4159 && !cpu_arch_flags.bitfield.cpui386
4160 && (intel_syntax
4161 ? (!t->opcode_modifier.ignoresize
4162 && !intel_float_operand (t->name))
4163 : intel_float_operand (t->name) != 2)
4164 && ((!operand_types[0].bitfield.regmmx
4165 && !operand_types[0].bitfield.regxmm)
4166 || (!operand_types[t->operands > 1].bitfield.regmmx
4167 && !!operand_types[t->operands > 1].bitfield.regxmm)))
4168 continue;
4169
4170 /* Do not verify operands when there are none. */
4171 else
4172 {
4173 if (!t->operands)
4174 /* We've found a match; break out of loop. */
4175 break;
4176 }
4177
4178 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
4179 into Disp32/Disp16/Disp32 operand. */
4180 if (i.prefix[ADDR_PREFIX] != 0)
4181 {
4182 /* There should be only one Disp operand. */
4183 switch (flag_code)
4184 {
4185 case CODE_16BIT:
4186 for (j = 0; j < MAX_OPERANDS; j++)
4187 {
4188 if (operand_types[j].bitfield.disp16)
4189 {
4190 addr_prefix_disp = j;
4191 operand_types[j].bitfield.disp32 = 1;
4192 operand_types[j].bitfield.disp16 = 0;
4193 break;
4194 }
4195 }
4196 break;
4197 case CODE_32BIT:
4198 for (j = 0; j < MAX_OPERANDS; j++)
4199 {
4200 if (operand_types[j].bitfield.disp32)
4201 {
4202 addr_prefix_disp = j;
4203 operand_types[j].bitfield.disp32 = 0;
4204 operand_types[j].bitfield.disp16 = 1;
4205 break;
4206 }
4207 }
4208 break;
4209 case CODE_64BIT:
4210 for (j = 0; j < MAX_OPERANDS; j++)
4211 {
4212 if (operand_types[j].bitfield.disp64)
4213 {
4214 addr_prefix_disp = j;
4215 operand_types[j].bitfield.disp64 = 0;
4216 operand_types[j].bitfield.disp32 = 1;
4217 break;
4218 }
4219 }
4220 break;
4221 }
4222 }
4223
4224 /* We check register size if needed. */
4225 check_register = t->opcode_modifier.checkregsize;
4226 overlap0 = operand_type_and (i.types[0], operand_types[0]);
4227 switch (t->operands)
4228 {
4229 case 1:
4230 if (!operand_type_match (overlap0, i.types[0]))
4231 continue;
4232 break;
4233 case 2:
4234 /* xchg %eax, %eax is a special case. It is an aliase for nop
4235 only in 32bit mode and we can use opcode 0x90. In 64bit
4236 mode, we can't use 0x90 for xchg %eax, %eax since it should
4237 zero-extend %eax to %rax. */
4238 if (flag_code == CODE_64BIT
4239 && t->base_opcode == 0x90
4240 && operand_type_equal (&i.types [0], &acc32)
4241 && operand_type_equal (&i.types [1], &acc32))
4242 continue;
4243 if (i.swap_operand)
4244 {
4245 /* If we swap operand in encoding, we either match
4246 the next one or reverse direction of operands. */
4247 if (t->opcode_modifier.s)
4248 continue;
4249 else if (t->opcode_modifier.d)
4250 goto check_reverse;
4251 }
4252
4253 case 3:
4254 /* If we swap operand in encoding, we match the next one. */
4255 if (i.swap_operand && t->opcode_modifier.s)
4256 continue;
4257 case 4:
4258 case 5:
4259 overlap1 = operand_type_and (i.types[1], operand_types[1]);
4260 if (!operand_type_match (overlap0, i.types[0])
4261 || !operand_type_match (overlap1, i.types[1])
4262 || (check_register
4263 && !operand_type_register_match (overlap0, i.types[0],
4264 operand_types[0],
4265 overlap1, i.types[1],
4266 operand_types[1])))
4267 {
4268 /* Check if other direction is valid ... */
4269 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
4270 continue;
4271
4272 check_reverse:
4273 /* Try reversing direction of operands. */
4274 overlap0 = operand_type_and (i.types[0], operand_types[1]);
4275 overlap1 = operand_type_and (i.types[1], operand_types[0]);
4276 if (!operand_type_match (overlap0, i.types[0])
4277 || !operand_type_match (overlap1, i.types[1])
4278 || (check_register
4279 && !operand_type_register_match (overlap0,
4280 i.types[0],
4281 operand_types[1],
4282 overlap1,
4283 i.types[1],
4284 operand_types[0])))
4285 {
4286 /* Does not match either direction. */
4287 continue;
4288 }
4289 /* found_reverse_match holds which of D or FloatDR
4290 we've found. */
4291 if (t->opcode_modifier.d)
4292 found_reverse_match = Opcode_D;
4293 else if (t->opcode_modifier.floatd)
4294 found_reverse_match = Opcode_FloatD;
4295 else
4296 found_reverse_match = 0;
4297 if (t->opcode_modifier.floatr)
4298 found_reverse_match |= Opcode_FloatR;
4299 }
4300 else
4301 {
4302 /* Found a forward 2 operand match here. */
4303 switch (t->operands)
4304 {
4305 case 5:
4306 overlap4 = operand_type_and (i.types[4],
4307 operand_types[4]);
4308 case 4:
4309 overlap3 = operand_type_and (i.types[3],
4310 operand_types[3]);
4311 case 3:
4312 overlap2 = operand_type_and (i.types[2],
4313 operand_types[2]);
4314 break;
4315 }
4316
4317 switch (t->operands)
4318 {
4319 case 5:
4320 if (!operand_type_match (overlap4, i.types[4])
4321 || !operand_type_register_match (overlap3,
4322 i.types[3],
4323 operand_types[3],
4324 overlap4,
4325 i.types[4],
4326 operand_types[4]))
4327 continue;
4328 case 4:
4329 if (!operand_type_match (overlap3, i.types[3])
4330 || (check_register
4331 && !operand_type_register_match (overlap2,
4332 i.types[2],
4333 operand_types[2],
4334 overlap3,
4335 i.types[3],
4336 operand_types[3])))
4337 continue;
4338 case 3:
4339 /* Here we make use of the fact that there are no
4340 reverse match 3 operand instructions, and all 3
4341 operand instructions only need to be checked for
4342 register consistency between operands 2 and 3. */
4343 if (!operand_type_match (overlap2, i.types[2])
4344 || (check_register
4345 && !operand_type_register_match (overlap1,
4346 i.types[1],
4347 operand_types[1],
4348 overlap2,
4349 i.types[2],
4350 operand_types[2])))
4351 continue;
4352 break;
4353 }
4354 }
4355 /* Found either forward/reverse 2, 3 or 4 operand match here:
4356 slip through to break. */
4357 }
4358 if (!found_cpu_match)
4359 {
4360 found_reverse_match = 0;
4361 continue;
4362 }
4363
4364 /* Check if vector and VEX operands are valid. */
4365 if (check_VecOperands (t) || VEX_check_operands (t))
4366 {
4367 specific_error = i.error;
4368 continue;
4369 }
4370
4371 /* We've found a match; break out of loop. */
4372 break;
4373 }
4374
4375 if (t == current_templates->end)
4376 {
4377 /* We found no match. */
4378 const char *err_msg;
4379 switch (specific_error ? specific_error : i.error)
4380 {
4381 default:
4382 abort ();
4383 case operand_size_mismatch:
4384 err_msg = _("operand size mismatch");
4385 break;
4386 case operand_type_mismatch:
4387 err_msg = _("operand type mismatch");
4388 break;
4389 case register_type_mismatch:
4390 err_msg = _("register type mismatch");
4391 break;
4392 case number_of_operands_mismatch:
4393 err_msg = _("number of operands mismatch");
4394 break;
4395 case invalid_instruction_suffix:
4396 err_msg = _("invalid instruction suffix");
4397 break;
4398 case bad_imm4:
4399 err_msg = _("constant doesn't fit in 4 bits");
4400 break;
4401 case old_gcc_only:
4402 err_msg = _("only supported with old gcc");
4403 break;
4404 case unsupported_with_intel_mnemonic:
4405 err_msg = _("unsupported with Intel mnemonic");
4406 break;
4407 case unsupported_syntax:
4408 err_msg = _("unsupported syntax");
4409 break;
4410 case unsupported:
4411 as_bad (_("unsupported instruction `%s'"),
4412 current_templates->start->name);
4413 return NULL;
4414 case invalid_vsib_address:
4415 err_msg = _("invalid VSIB address");
4416 break;
4417 case invalid_vector_register_set:
4418 err_msg = _("mask, index, and destination registers must be distinct");
4419 break;
4420 case unsupported_vector_index_register:
4421 err_msg = _("unsupported vector index register");
4422 break;
4423 }
4424 as_bad (_("%s for `%s'"), err_msg,
4425 current_templates->start->name);
4426 return NULL;
4427 }
4428
4429 if (!quiet_warnings)
4430 {
4431 if (!intel_syntax
4432 && (i.types[0].bitfield.jumpabsolute
4433 != operand_types[0].bitfield.jumpabsolute))
4434 {
4435 as_warn (_("indirect %s without `*'"), t->name);
4436 }
4437
4438 if (t->opcode_modifier.isprefix
4439 && t->opcode_modifier.ignoresize)
4440 {
4441 /* Warn them that a data or address size prefix doesn't
4442 affect assembly of the next line of code. */
4443 as_warn (_("stand-alone `%s' prefix"), t->name);
4444 }
4445 }
4446
4447 /* Copy the template we found. */
4448 i.tm = *t;
4449
4450 if (addr_prefix_disp != -1)
4451 i.tm.operand_types[addr_prefix_disp]
4452 = operand_types[addr_prefix_disp];
4453
4454 if (found_reverse_match)
4455 {
4456 /* If we found a reverse match we must alter the opcode
4457 direction bit. found_reverse_match holds bits to change
4458 (different for int & float insns). */
4459
4460 i.tm.base_opcode ^= found_reverse_match;
4461
4462 i.tm.operand_types[0] = operand_types[1];
4463 i.tm.operand_types[1] = operand_types[0];
4464 }
4465
4466 return t;
4467 }
4468
4469 static int
4470 check_string (void)
4471 {
4472 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
4473 if (i.tm.operand_types[mem_op].bitfield.esseg)
4474 {
4475 if (i.seg[0] != NULL && i.seg[0] != &es)
4476 {
4477 as_bad (_("`%s' operand %d must use `%ses' segment"),
4478 i.tm.name,
4479 mem_op + 1,
4480 register_prefix);
4481 return 0;
4482 }
4483 /* There's only ever one segment override allowed per instruction.
4484 This instruction possibly has a legal segment override on the
4485 second operand, so copy the segment to where non-string
4486 instructions store it, allowing common code. */
4487 i.seg[0] = i.seg[1];
4488 }
4489 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
4490 {
4491 if (i.seg[1] != NULL && i.seg[1] != &es)
4492 {
4493 as_bad (_("`%s' operand %d must use `%ses' segment"),
4494 i.tm.name,
4495 mem_op + 2,
4496 register_prefix);
4497 return 0;
4498 }
4499 }
4500 return 1;
4501 }
4502
4503 static int
4504 process_suffix (void)
4505 {
4506 /* If matched instruction specifies an explicit instruction mnemonic
4507 suffix, use it. */
4508 if (i.tm.opcode_modifier.size16)
4509 i.suffix = WORD_MNEM_SUFFIX;
4510 else if (i.tm.opcode_modifier.size32)
4511 i.suffix = LONG_MNEM_SUFFIX;
4512 else if (i.tm.opcode_modifier.size64)
4513 i.suffix = QWORD_MNEM_SUFFIX;
4514 else if (i.reg_operands)
4515 {
4516 /* If there's no instruction mnemonic suffix we try to invent one
4517 based on register operands. */
4518 if (!i.suffix)
4519 {
4520 /* We take i.suffix from the last register operand specified,
4521 Destination register type is more significant than source
4522 register type. crc32 in SSE4.2 prefers source register
4523 type. */
4524 if (i.tm.base_opcode == 0xf20f38f1)
4525 {
4526 if (i.types[0].bitfield.reg16)
4527 i.suffix = WORD_MNEM_SUFFIX;
4528 else if (i.types[0].bitfield.reg32)
4529 i.suffix = LONG_MNEM_SUFFIX;
4530 else if (i.types[0].bitfield.reg64)
4531 i.suffix = QWORD_MNEM_SUFFIX;
4532 }
4533 else if (i.tm.base_opcode == 0xf20f38f0)
4534 {
4535 if (i.types[0].bitfield.reg8)
4536 i.suffix = BYTE_MNEM_SUFFIX;
4537 }
4538
4539 if (!i.suffix)
4540 {
4541 int op;
4542
4543 if (i.tm.base_opcode == 0xf20f38f1
4544 || i.tm.base_opcode == 0xf20f38f0)
4545 {
4546 /* We have to know the operand size for crc32. */
4547 as_bad (_("ambiguous memory operand size for `%s`"),
4548 i.tm.name);
4549 return 0;
4550 }
4551
4552 for (op = i.operands; --op >= 0;)
4553 if (!i.tm.operand_types[op].bitfield.inoutportreg)
4554 {
4555 if (i.types[op].bitfield.reg8)
4556 {
4557 i.suffix = BYTE_MNEM_SUFFIX;
4558 break;
4559 }
4560 else if (i.types[op].bitfield.reg16)
4561 {
4562 i.suffix = WORD_MNEM_SUFFIX;
4563 break;
4564 }
4565 else if (i.types[op].bitfield.reg32)
4566 {
4567 i.suffix = LONG_MNEM_SUFFIX;
4568 break;
4569 }
4570 else if (i.types[op].bitfield.reg64)
4571 {
4572 i.suffix = QWORD_MNEM_SUFFIX;
4573 break;
4574 }
4575 }
4576 }
4577 }
4578 else if (i.suffix == BYTE_MNEM_SUFFIX)
4579 {
4580 if (intel_syntax
4581 && i.tm.opcode_modifier.ignoresize
4582 && i.tm.opcode_modifier.no_bsuf)
4583 i.suffix = 0;
4584 else if (!check_byte_reg ())
4585 return 0;
4586 }
4587 else if (i.suffix == LONG_MNEM_SUFFIX)
4588 {
4589 if (intel_syntax
4590 && i.tm.opcode_modifier.ignoresize
4591 && i.tm.opcode_modifier.no_lsuf)
4592 i.suffix = 0;
4593 else if (!check_long_reg ())
4594 return 0;
4595 }
4596 else if (i.suffix == QWORD_MNEM_SUFFIX)
4597 {
4598 if (intel_syntax
4599 && i.tm.opcode_modifier.ignoresize
4600 && i.tm.opcode_modifier.no_qsuf)
4601 i.suffix = 0;
4602 else if (!check_qword_reg ())
4603 return 0;
4604 }
4605 else if (i.suffix == WORD_MNEM_SUFFIX)
4606 {
4607 if (intel_syntax
4608 && i.tm.opcode_modifier.ignoresize
4609 && i.tm.opcode_modifier.no_wsuf)
4610 i.suffix = 0;
4611 else if (!check_word_reg ())
4612 return 0;
4613 }
4614 else if (i.suffix == XMMWORD_MNEM_SUFFIX
4615 || i.suffix == YMMWORD_MNEM_SUFFIX)
4616 {
4617 /* Skip if the instruction has x/y suffix. match_template
4618 should check if it is a valid suffix. */
4619 }
4620 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
4621 /* Do nothing if the instruction is going to ignore the prefix. */
4622 ;
4623 else
4624 abort ();
4625 }
4626 else if (i.tm.opcode_modifier.defaultsize
4627 && !i.suffix
4628 /* exclude fldenv/frstor/fsave/fstenv */
4629 && i.tm.opcode_modifier.no_ssuf)
4630 {
4631 i.suffix = stackop_size;
4632 }
4633 else if (intel_syntax
4634 && !i.suffix
4635 && (i.tm.operand_types[0].bitfield.jumpabsolute
4636 || i.tm.opcode_modifier.jumpbyte
4637 || i.tm.opcode_modifier.jumpintersegment
4638 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
4639 && i.tm.extension_opcode <= 3)))
4640 {
4641 switch (flag_code)
4642 {
4643 case CODE_64BIT:
4644 if (!i.tm.opcode_modifier.no_qsuf)
4645 {
4646 i.suffix = QWORD_MNEM_SUFFIX;
4647 break;
4648 }
4649 case CODE_32BIT:
4650 if (!i.tm.opcode_modifier.no_lsuf)
4651 i.suffix = LONG_MNEM_SUFFIX;
4652 break;
4653 case CODE_16BIT:
4654 if (!i.tm.opcode_modifier.no_wsuf)
4655 i.suffix = WORD_MNEM_SUFFIX;
4656 break;
4657 }
4658 }
4659
4660 if (!i.suffix)
4661 {
4662 if (!intel_syntax)
4663 {
4664 if (i.tm.opcode_modifier.w)
4665 {
4666 as_bad (_("no instruction mnemonic suffix given and "
4667 "no register operands; can't size instruction"));
4668 return 0;
4669 }
4670 }
4671 else
4672 {
4673 unsigned int suffixes;
4674
4675 suffixes = !i.tm.opcode_modifier.no_bsuf;
4676 if (!i.tm.opcode_modifier.no_wsuf)
4677 suffixes |= 1 << 1;
4678 if (!i.tm.opcode_modifier.no_lsuf)
4679 suffixes |= 1 << 2;
4680 if (!i.tm.opcode_modifier.no_ldsuf)
4681 suffixes |= 1 << 3;
4682 if (!i.tm.opcode_modifier.no_ssuf)
4683 suffixes |= 1 << 4;
4684 if (!i.tm.opcode_modifier.no_qsuf)
4685 suffixes |= 1 << 5;
4686
4687 /* There are more than suffix matches. */
4688 if (i.tm.opcode_modifier.w
4689 || ((suffixes & (suffixes - 1))
4690 && !i.tm.opcode_modifier.defaultsize
4691 && !i.tm.opcode_modifier.ignoresize))
4692 {
4693 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
4694 return 0;
4695 }
4696 }
4697 }
4698
4699 /* Change the opcode based on the operand size given by i.suffix;
4700 We don't need to change things for byte insns. */
4701
4702 if (i.suffix
4703 && i.suffix != BYTE_MNEM_SUFFIX
4704 && i.suffix != XMMWORD_MNEM_SUFFIX
4705 && i.suffix != YMMWORD_MNEM_SUFFIX)
4706 {
4707 /* It's not a byte, select word/dword operation. */
4708 if (i.tm.opcode_modifier.w)
4709 {
4710 if (i.tm.opcode_modifier.shortform)
4711 i.tm.base_opcode |= 8;
4712 else
4713 i.tm.base_opcode |= 1;
4714 }
4715
4716 /* Now select between word & dword operations via the operand
4717 size prefix, except for instructions that will ignore this
4718 prefix anyway. */
4719 if (i.tm.opcode_modifier.addrprefixop0)
4720 {
4721 /* The address size override prefix changes the size of the
4722 first operand. */
4723 if ((flag_code == CODE_32BIT
4724 && i.op->regs[0].reg_type.bitfield.reg16)
4725 || (flag_code != CODE_32BIT
4726 && i.op->regs[0].reg_type.bitfield.reg32))
4727 if (!add_prefix (ADDR_PREFIX_OPCODE))
4728 return 0;
4729 }
4730 else if (i.suffix != QWORD_MNEM_SUFFIX
4731 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
4732 && !i.tm.opcode_modifier.ignoresize
4733 && !i.tm.opcode_modifier.floatmf
4734 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
4735 || (flag_code == CODE_64BIT
4736 && i.tm.opcode_modifier.jumpbyte)))
4737 {
4738 unsigned int prefix = DATA_PREFIX_OPCODE;
4739
4740 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
4741 prefix = ADDR_PREFIX_OPCODE;
4742
4743 if (!add_prefix (prefix))
4744 return 0;
4745 }
4746
4747 /* Set mode64 for an operand. */
4748 if (i.suffix == QWORD_MNEM_SUFFIX
4749 && flag_code == CODE_64BIT
4750 && !i.tm.opcode_modifier.norex64)
4751 {
4752 /* Special case for xchg %rax,%rax. It is NOP and doesn't
4753 need rex64. cmpxchg8b is also a special case. */
4754 if (! (i.operands == 2
4755 && i.tm.base_opcode == 0x90
4756 && i.tm.extension_opcode == None
4757 && operand_type_equal (&i.types [0], &acc64)
4758 && operand_type_equal (&i.types [1], &acc64))
4759 && ! (i.operands == 1
4760 && i.tm.base_opcode == 0xfc7
4761 && i.tm.extension_opcode == 1
4762 && !operand_type_check (i.types [0], reg)
4763 && operand_type_check (i.types [0], anymem)))
4764 i.rex |= REX_W;
4765 }
4766
4767 /* Size floating point instruction. */
4768 if (i.suffix == LONG_MNEM_SUFFIX)
4769 if (i.tm.opcode_modifier.floatmf)
4770 i.tm.base_opcode ^= 4;
4771 }
4772
4773 return 1;
4774 }
4775
4776 static int
4777 check_byte_reg (void)
4778 {
4779 int op;
4780
4781 for (op = i.operands; --op >= 0;)
4782 {
4783 /* If this is an eight bit register, it's OK. If it's the 16 or
4784 32 bit version of an eight bit register, we will just use the
4785 low portion, and that's OK too. */
4786 if (i.types[op].bitfield.reg8)
4787 continue;
4788
4789 /* I/O port address operands are OK too. */
4790 if (i.tm.operand_types[op].bitfield.inoutportreg)
4791 continue;
4792
4793 /* crc32 doesn't generate this warning. */
4794 if (i.tm.base_opcode == 0xf20f38f0)
4795 continue;
4796
4797 if ((i.types[op].bitfield.reg16
4798 || i.types[op].bitfield.reg32
4799 || i.types[op].bitfield.reg64)
4800 && i.op[op].regs->reg_num < 4
4801 /* Prohibit these changes in 64bit mode, since the lowering
4802 would be more complicated. */
4803 && flag_code != CODE_64BIT)
4804 {
4805 #if REGISTER_WARNINGS
4806 if (!quiet_warnings)
4807 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4808 register_prefix,
4809 (i.op[op].regs + (i.types[op].bitfield.reg16
4810 ? REGNAM_AL - REGNAM_AX
4811 : REGNAM_AL - REGNAM_EAX))->reg_name,
4812 register_prefix,
4813 i.op[op].regs->reg_name,
4814 i.suffix);
4815 #endif
4816 continue;
4817 }
4818 /* Any other register is bad. */
4819 if (i.types[op].bitfield.reg16
4820 || i.types[op].bitfield.reg32
4821 || i.types[op].bitfield.reg64
4822 || i.types[op].bitfield.regmmx
4823 || i.types[op].bitfield.regxmm
4824 || i.types[op].bitfield.regymm
4825 || i.types[op].bitfield.sreg2
4826 || i.types[op].bitfield.sreg3
4827 || i.types[op].bitfield.control
4828 || i.types[op].bitfield.debug
4829 || i.types[op].bitfield.test
4830 || i.types[op].bitfield.floatreg
4831 || i.types[op].bitfield.floatacc)
4832 {
4833 as_bad (_("`%s%s' not allowed with `%s%c'"),
4834 register_prefix,
4835 i.op[op].regs->reg_name,
4836 i.tm.name,
4837 i.suffix);
4838 return 0;
4839 }
4840 }
4841 return 1;
4842 }
4843
4844 static int
4845 check_long_reg (void)
4846 {
4847 int op;
4848
4849 for (op = i.operands; --op >= 0;)
4850 /* Reject eight bit registers, except where the template requires
4851 them. (eg. movzb) */
4852 if (i.types[op].bitfield.reg8
4853 && (i.tm.operand_types[op].bitfield.reg16
4854 || i.tm.operand_types[op].bitfield.reg32
4855 || i.tm.operand_types[op].bitfield.acc))
4856 {
4857 as_bad (_("`%s%s' not allowed with `%s%c'"),
4858 register_prefix,
4859 i.op[op].regs->reg_name,
4860 i.tm.name,
4861 i.suffix);
4862 return 0;
4863 }
4864 /* Warn if the e prefix on a general reg is missing. */
4865 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4866 && i.types[op].bitfield.reg16
4867 && (i.tm.operand_types[op].bitfield.reg32
4868 || i.tm.operand_types[op].bitfield.acc))
4869 {
4870 /* Prohibit these changes in the 64bit mode, since the
4871 lowering is more complicated. */
4872 if (flag_code == CODE_64BIT)
4873 {
4874 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4875 register_prefix, i.op[op].regs->reg_name,
4876 i.suffix);
4877 return 0;
4878 }
4879 #if REGISTER_WARNINGS
4880 else
4881 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4882 register_prefix,
4883 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
4884 register_prefix,
4885 i.op[op].regs->reg_name,
4886 i.suffix);
4887 #endif
4888 }
4889 /* Warn if the r prefix on a general reg is missing. */
4890 else if (i.types[op].bitfield.reg64
4891 && (i.tm.operand_types[op].bitfield.reg32
4892 || i.tm.operand_types[op].bitfield.acc))
4893 {
4894 if (intel_syntax
4895 && i.tm.opcode_modifier.toqword
4896 && !i.types[0].bitfield.regxmm)
4897 {
4898 /* Convert to QWORD. We want REX byte. */
4899 i.suffix = QWORD_MNEM_SUFFIX;
4900 }
4901 else
4902 {
4903 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4904 register_prefix, i.op[op].regs->reg_name,
4905 i.suffix);
4906 return 0;
4907 }
4908 }
4909 return 1;
4910 }
4911
4912 static int
4913 check_qword_reg (void)
4914 {
4915 int op;
4916
4917 for (op = i.operands; --op >= 0; )
4918 /* Reject eight bit registers, except where the template requires
4919 them. (eg. movzb) */
4920 if (i.types[op].bitfield.reg8
4921 && (i.tm.operand_types[op].bitfield.reg16
4922 || i.tm.operand_types[op].bitfield.reg32
4923 || i.tm.operand_types[op].bitfield.acc))
4924 {
4925 as_bad (_("`%s%s' not allowed with `%s%c'"),
4926 register_prefix,
4927 i.op[op].regs->reg_name,
4928 i.tm.name,
4929 i.suffix);
4930 return 0;
4931 }
4932 /* Warn if the e prefix on a general reg is missing. */
4933 else if ((i.types[op].bitfield.reg16
4934 || i.types[op].bitfield.reg32)
4935 && (i.tm.operand_types[op].bitfield.reg32
4936 || i.tm.operand_types[op].bitfield.acc))
4937 {
4938 /* Prohibit these changes in the 64bit mode, since the
4939 lowering is more complicated. */
4940 if (intel_syntax
4941 && i.tm.opcode_modifier.todword
4942 && !i.types[0].bitfield.regxmm)
4943 {
4944 /* Convert to DWORD. We don't want REX byte. */
4945 i.suffix = LONG_MNEM_SUFFIX;
4946 }
4947 else
4948 {
4949 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4950 register_prefix, i.op[op].regs->reg_name,
4951 i.suffix);
4952 return 0;
4953 }
4954 }
4955 return 1;
4956 }
4957
4958 static int
4959 check_word_reg (void)
4960 {
4961 int op;
4962 for (op = i.operands; --op >= 0;)
4963 /* Reject eight bit registers, except where the template requires
4964 them. (eg. movzb) */
4965 if (i.types[op].bitfield.reg8
4966 && (i.tm.operand_types[op].bitfield.reg16
4967 || i.tm.operand_types[op].bitfield.reg32
4968 || i.tm.operand_types[op].bitfield.acc))
4969 {
4970 as_bad (_("`%s%s' not allowed with `%s%c'"),
4971 register_prefix,
4972 i.op[op].regs->reg_name,
4973 i.tm.name,
4974 i.suffix);
4975 return 0;
4976 }
4977 /* Warn if the e prefix on a general reg is present. */
4978 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4979 && i.types[op].bitfield.reg32
4980 && (i.tm.operand_types[op].bitfield.reg16
4981 || i.tm.operand_types[op].bitfield.acc))
4982 {
4983 /* Prohibit these changes in the 64bit mode, since the
4984 lowering is more complicated. */
4985 if (flag_code == CODE_64BIT)
4986 {
4987 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4988 register_prefix, i.op[op].regs->reg_name,
4989 i.suffix);
4990 return 0;
4991 }
4992 else
4993 #if REGISTER_WARNINGS
4994 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4995 register_prefix,
4996 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
4997 register_prefix,
4998 i.op[op].regs->reg_name,
4999 i.suffix);
5000 #endif
5001 }
5002 return 1;
5003 }
5004
5005 static int
5006 update_imm (unsigned int j)
5007 {
5008 i386_operand_type overlap = i.types[j];
5009 if ((overlap.bitfield.imm8
5010 || overlap.bitfield.imm8s
5011 || overlap.bitfield.imm16
5012 || overlap.bitfield.imm32
5013 || overlap.bitfield.imm32s
5014 || overlap.bitfield.imm64)
5015 && !operand_type_equal (&overlap, &imm8)
5016 && !operand_type_equal (&overlap, &imm8s)
5017 && !operand_type_equal (&overlap, &imm16)
5018 && !operand_type_equal (&overlap, &imm32)
5019 && !operand_type_equal (&overlap, &imm32s)
5020 && !operand_type_equal (&overlap, &imm64))
5021 {
5022 if (i.suffix)
5023 {
5024 i386_operand_type temp;
5025
5026 operand_type_set (&temp, 0);
5027 if (i.suffix == BYTE_MNEM_SUFFIX)
5028 {
5029 temp.bitfield.imm8 = overlap.bitfield.imm8;
5030 temp.bitfield.imm8s = overlap.bitfield.imm8s;
5031 }
5032 else if (i.suffix == WORD_MNEM_SUFFIX)
5033 temp.bitfield.imm16 = overlap.bitfield.imm16;
5034 else if (i.suffix == QWORD_MNEM_SUFFIX)
5035 {
5036 temp.bitfield.imm64 = overlap.bitfield.imm64;
5037 temp.bitfield.imm32s = overlap.bitfield.imm32s;
5038 }
5039 else
5040 temp.bitfield.imm32 = overlap.bitfield.imm32;
5041 overlap = temp;
5042 }
5043 else if (operand_type_equal (&overlap, &imm16_32_32s)
5044 || operand_type_equal (&overlap, &imm16_32)
5045 || operand_type_equal (&overlap, &imm16_32s))
5046 {
5047 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
5048 overlap = imm16;
5049 else
5050 overlap = imm32s;
5051 }
5052 if (!operand_type_equal (&overlap, &imm8)
5053 && !operand_type_equal (&overlap, &imm8s)
5054 && !operand_type_equal (&overlap, &imm16)
5055 && !operand_type_equal (&overlap, &imm32)
5056 && !operand_type_equal (&overlap, &imm32s)
5057 && !operand_type_equal (&overlap, &imm64))
5058 {
5059 as_bad (_("no instruction mnemonic suffix given; "
5060 "can't determine immediate size"));
5061 return 0;
5062 }
5063 }
5064 i.types[j] = overlap;
5065
5066 return 1;
5067 }
5068
5069 static int
5070 finalize_imm (void)
5071 {
5072 unsigned int j, n;
5073
5074 /* Update the first 2 immediate operands. */
5075 n = i.operands > 2 ? 2 : i.operands;
5076 if (n)
5077 {
5078 for (j = 0; j < n; j++)
5079 if (update_imm (j) == 0)
5080 return 0;
5081
5082 /* The 3rd operand can't be immediate operand. */
5083 gas_assert (operand_type_check (i.types[2], imm) == 0);
5084 }
5085
5086 return 1;
5087 }
5088
5089 static int
5090 bad_implicit_operand (int xmm)
5091 {
5092 const char *ireg = xmm ? "xmm0" : "ymm0";
5093
5094 if (intel_syntax)
5095 as_bad (_("the last operand of `%s' must be `%s%s'"),
5096 i.tm.name, register_prefix, ireg);
5097 else
5098 as_bad (_("the first operand of `%s' must be `%s%s'"),
5099 i.tm.name, register_prefix, ireg);
5100 return 0;
5101 }
5102
5103 static int
5104 process_operands (void)
5105 {
5106 /* Default segment register this instruction will use for memory
5107 accesses. 0 means unknown. This is only for optimizing out
5108 unnecessary segment overrides. */
5109 const seg_entry *default_seg = 0;
5110
5111 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
5112 {
5113 unsigned int dupl = i.operands;
5114 unsigned int dest = dupl - 1;
5115 unsigned int j;
5116
5117 /* The destination must be an xmm register. */
5118 gas_assert (i.reg_operands
5119 && MAX_OPERANDS > dupl
5120 && operand_type_equal (&i.types[dest], &regxmm));
5121
5122 if (i.tm.opcode_modifier.firstxmm0)
5123 {
5124 /* The first operand is implicit and must be xmm0. */
5125 gas_assert (operand_type_equal (&i.types[0], &regxmm));
5126 if (register_number (i.op[0].regs) != 0)
5127 return bad_implicit_operand (1);
5128
5129 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
5130 {
5131 /* Keep xmm0 for instructions with VEX prefix and 3
5132 sources. */
5133 goto duplicate;
5134 }
5135 else
5136 {
5137 /* We remove the first xmm0 and keep the number of
5138 operands unchanged, which in fact duplicates the
5139 destination. */
5140 for (j = 1; j < i.operands; j++)
5141 {
5142 i.op[j - 1] = i.op[j];
5143 i.types[j - 1] = i.types[j];
5144 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
5145 }
5146 }
5147 }
5148 else if (i.tm.opcode_modifier.implicit1stxmm0)
5149 {
5150 gas_assert ((MAX_OPERANDS - 1) > dupl
5151 && (i.tm.opcode_modifier.vexsources
5152 == VEX3SOURCES));
5153
5154 /* Add the implicit xmm0 for instructions with VEX prefix
5155 and 3 sources. */
5156 for (j = i.operands; j > 0; j--)
5157 {
5158 i.op[j] = i.op[j - 1];
5159 i.types[j] = i.types[j - 1];
5160 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
5161 }
5162 i.op[0].regs
5163 = (const reg_entry *) hash_find (reg_hash, "xmm0");
5164 i.types[0] = regxmm;
5165 i.tm.operand_types[0] = regxmm;
5166
5167 i.operands += 2;
5168 i.reg_operands += 2;
5169 i.tm.operands += 2;
5170
5171 dupl++;
5172 dest++;
5173 i.op[dupl] = i.op[dest];
5174 i.types[dupl] = i.types[dest];
5175 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5176 }
5177 else
5178 {
5179 duplicate:
5180 i.operands++;
5181 i.reg_operands++;
5182 i.tm.operands++;
5183
5184 i.op[dupl] = i.op[dest];
5185 i.types[dupl] = i.types[dest];
5186 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5187 }
5188
5189 if (i.tm.opcode_modifier.immext)
5190 process_immext ();
5191 }
5192 else if (i.tm.opcode_modifier.firstxmm0)
5193 {
5194 unsigned int j;
5195
5196 /* The first operand is implicit and must be xmm0/ymm0. */
5197 gas_assert (i.reg_operands
5198 && (operand_type_equal (&i.types[0], &regxmm)
5199 || operand_type_equal (&i.types[0], &regymm)));
5200 if (register_number (i.op[0].regs) != 0)
5201 return bad_implicit_operand (i.types[0].bitfield.regxmm);
5202
5203 for (j = 1; j < i.operands; j++)
5204 {
5205 i.op[j - 1] = i.op[j];
5206 i.types[j - 1] = i.types[j];
5207
5208 /* We need to adjust fields in i.tm since they are used by
5209 build_modrm_byte. */
5210 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
5211 }
5212
5213 i.operands--;
5214 i.reg_operands--;
5215 i.tm.operands--;
5216 }
5217 else if (i.tm.opcode_modifier.regkludge)
5218 {
5219 /* The imul $imm, %reg instruction is converted into
5220 imul $imm, %reg, %reg, and the clr %reg instruction
5221 is converted into xor %reg, %reg. */
5222
5223 unsigned int first_reg_op;
5224
5225 if (operand_type_check (i.types[0], reg))
5226 first_reg_op = 0;
5227 else
5228 first_reg_op = 1;
5229 /* Pretend we saw the extra register operand. */
5230 gas_assert (i.reg_operands == 1
5231 && i.op[first_reg_op + 1].regs == 0);
5232 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
5233 i.types[first_reg_op + 1] = i.types[first_reg_op];
5234 i.operands++;
5235 i.reg_operands++;
5236 }
5237
5238 if (i.tm.opcode_modifier.shortform)
5239 {
5240 if (i.types[0].bitfield.sreg2
5241 || i.types[0].bitfield.sreg3)
5242 {
5243 if (i.tm.base_opcode == POP_SEG_SHORT
5244 && i.op[0].regs->reg_num == 1)
5245 {
5246 as_bad (_("you can't `pop %scs'"), register_prefix);
5247 return 0;
5248 }
5249 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
5250 if ((i.op[0].regs->reg_flags & RegRex) != 0)
5251 i.rex |= REX_B;
5252 }
5253 else
5254 {
5255 /* The register or float register operand is in operand
5256 0 or 1. */
5257 unsigned int op;
5258
5259 if (i.types[0].bitfield.floatreg
5260 || operand_type_check (i.types[0], reg))
5261 op = 0;
5262 else
5263 op = 1;
5264 /* Register goes in low 3 bits of opcode. */
5265 i.tm.base_opcode |= i.op[op].regs->reg_num;
5266 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5267 i.rex |= REX_B;
5268 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
5269 {
5270 /* Warn about some common errors, but press on regardless.
5271 The first case can be generated by gcc (<= 2.8.1). */
5272 if (i.operands == 2)
5273 {
5274 /* Reversed arguments on faddp, fsubp, etc. */
5275 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
5276 register_prefix, i.op[!intel_syntax].regs->reg_name,
5277 register_prefix, i.op[intel_syntax].regs->reg_name);
5278 }
5279 else
5280 {
5281 /* Extraneous `l' suffix on fp insn. */
5282 as_warn (_("translating to `%s %s%s'"), i.tm.name,
5283 register_prefix, i.op[0].regs->reg_name);
5284 }
5285 }
5286 }
5287 }
5288 else if (i.tm.opcode_modifier.modrm)
5289 {
5290 /* The opcode is completed (modulo i.tm.extension_opcode which
5291 must be put into the modrm byte). Now, we make the modrm and
5292 index base bytes based on all the info we've collected. */
5293
5294 default_seg = build_modrm_byte ();
5295 }
5296 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
5297 {
5298 default_seg = &ds;
5299 }
5300 else if (i.tm.opcode_modifier.isstring)
5301 {
5302 /* For the string instructions that allow a segment override
5303 on one of their operands, the default segment is ds. */
5304 default_seg = &ds;
5305 }
5306
5307 if (i.tm.base_opcode == 0x8d /* lea */
5308 && i.seg[0]
5309 && !quiet_warnings)
5310 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
5311
5312 /* If a segment was explicitly specified, and the specified segment
5313 is not the default, use an opcode prefix to select it. If we
5314 never figured out what the default segment is, then default_seg
5315 will be zero at this point, and the specified segment prefix will
5316 always be used. */
5317 if ((i.seg[0]) && (i.seg[0] != default_seg))
5318 {
5319 if (!add_prefix (i.seg[0]->seg_prefix))
5320 return 0;
5321 }
5322 return 1;
5323 }
5324
5325 static const seg_entry *
5326 build_modrm_byte (void)
5327 {
5328 const seg_entry *default_seg = 0;
5329 unsigned int source, dest;
5330 int vex_3_sources;
5331
5332 /* The first operand of instructions with VEX prefix and 3 sources
5333 must be VEX_Imm4. */
5334 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
5335 if (vex_3_sources)
5336 {
5337 unsigned int nds, reg_slot;
5338 expressionS *exp;
5339
5340 if (i.tm.opcode_modifier.veximmext
5341 && i.tm.opcode_modifier.immext)
5342 {
5343 dest = i.operands - 2;
5344 gas_assert (dest == 3);
5345 }
5346 else
5347 dest = i.operands - 1;
5348 nds = dest - 1;
5349
5350 /* There are 2 kinds of instructions:
5351 1. 5 operands: 4 register operands or 3 register operands
5352 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
5353 VexW0 or VexW1. The destination must be either XMM or YMM
5354 register.
5355 2. 4 operands: 4 register operands or 3 register operands
5356 plus 1 memory operand, VexXDS, and VexImmExt */
5357 gas_assert ((i.reg_operands == 4
5358 || (i.reg_operands == 3 && i.mem_operands == 1))
5359 && i.tm.opcode_modifier.vexvvvv == VEXXDS
5360 && (i.tm.opcode_modifier.veximmext
5361 || (i.imm_operands == 1
5362 && i.types[0].bitfield.vec_imm4
5363 && (i.tm.opcode_modifier.vexw == VEXW0
5364 || i.tm.opcode_modifier.vexw == VEXW1)
5365 && (operand_type_equal (&i.tm.operand_types[dest], &regxmm)
5366 || operand_type_equal (&i.tm.operand_types[dest], &regymm)))));
5367
5368 if (i.imm_operands == 0)
5369 {
5370 /* When there is no immediate operand, generate an 8bit
5371 immediate operand to encode the first operand. */
5372 exp = &im_expressions[i.imm_operands++];
5373 i.op[i.operands].imms = exp;
5374 i.types[i.operands] = imm8;
5375 i.operands++;
5376 /* If VexW1 is set, the first operand is the source and
5377 the second operand is encoded in the immediate operand. */
5378 if (i.tm.opcode_modifier.vexw == VEXW1)
5379 {
5380 source = 0;
5381 reg_slot = 1;
5382 }
5383 else
5384 {
5385 source = 1;
5386 reg_slot = 0;
5387 }
5388
5389 /* FMA swaps REG and NDS. */
5390 if (i.tm.cpu_flags.bitfield.cpufma)
5391 {
5392 unsigned int tmp;
5393 tmp = reg_slot;
5394 reg_slot = nds;
5395 nds = tmp;
5396 }
5397
5398 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5399 &regxmm)
5400 || operand_type_equal (&i.tm.operand_types[reg_slot],
5401 &regymm));
5402 exp->X_op = O_constant;
5403 exp->X_add_number = register_number (i.op[reg_slot].regs) << 4;
5404 }
5405 else
5406 {
5407 unsigned int imm_slot;
5408
5409 if (i.tm.opcode_modifier.vexw == VEXW0)
5410 {
5411 /* If VexW0 is set, the third operand is the source and
5412 the second operand is encoded in the immediate
5413 operand. */
5414 source = 2;
5415 reg_slot = 1;
5416 }
5417 else
5418 {
5419 /* VexW1 is set, the second operand is the source and
5420 the third operand is encoded in the immediate
5421 operand. */
5422 source = 1;
5423 reg_slot = 2;
5424 }
5425
5426 if (i.tm.opcode_modifier.immext)
5427 {
5428 /* When ImmExt is set, the immdiate byte is the last
5429 operand. */
5430 imm_slot = i.operands - 1;
5431 source--;
5432 reg_slot--;
5433 }
5434 else
5435 {
5436 imm_slot = 0;
5437
5438 /* Turn on Imm8 so that output_imm will generate it. */
5439 i.types[imm_slot].bitfield.imm8 = 1;
5440 }
5441
5442 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5443 &regxmm)
5444 || operand_type_equal (&i.tm.operand_types[reg_slot],
5445 &regymm));
5446 i.op[imm_slot].imms->X_add_number
5447 |= register_number (i.op[reg_slot].regs) << 4;
5448 }
5449
5450 gas_assert (operand_type_equal (&i.tm.operand_types[nds], &regxmm)
5451 || operand_type_equal (&i.tm.operand_types[nds],
5452 &regymm));
5453 i.vex.register_specifier = i.op[nds].regs;
5454 }
5455 else
5456 source = dest = 0;
5457
5458 /* i.reg_operands MUST be the number of real register operands;
5459 implicit registers do not count. If there are 3 register
5460 operands, it must be a instruction with VexNDS. For a
5461 instruction with VexNDD, the destination register is encoded
5462 in VEX prefix. If there are 4 register operands, it must be
5463 a instruction with VEX prefix and 3 sources. */
5464 if (i.mem_operands == 0
5465 && ((i.reg_operands == 2
5466 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
5467 || (i.reg_operands == 3
5468 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
5469 || (i.reg_operands == 4 && vex_3_sources)))
5470 {
5471 switch (i.operands)
5472 {
5473 case 2:
5474 source = 0;
5475 break;
5476 case 3:
5477 /* When there are 3 operands, one of them may be immediate,
5478 which may be the first or the last operand. Otherwise,
5479 the first operand must be shift count register (cl) or it
5480 is an instruction with VexNDS. */
5481 gas_assert (i.imm_operands == 1
5482 || (i.imm_operands == 0
5483 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
5484 || i.types[0].bitfield.shiftcount)));
5485 if (operand_type_check (i.types[0], imm)
5486 || i.types[0].bitfield.shiftcount)
5487 source = 1;
5488 else
5489 source = 0;
5490 break;
5491 case 4:
5492 /* When there are 4 operands, the first two must be 8bit
5493 immediate operands. The source operand will be the 3rd
5494 one.
5495
5496 For instructions with VexNDS, if the first operand
5497 an imm8, the source operand is the 2nd one. If the last
5498 operand is imm8, the source operand is the first one. */
5499 gas_assert ((i.imm_operands == 2
5500 && i.types[0].bitfield.imm8
5501 && i.types[1].bitfield.imm8)
5502 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
5503 && i.imm_operands == 1
5504 && (i.types[0].bitfield.imm8
5505 || i.types[i.operands - 1].bitfield.imm8)));
5506 if (i.imm_operands == 2)
5507 source = 2;
5508 else
5509 {
5510 if (i.types[0].bitfield.imm8)
5511 source = 1;
5512 else
5513 source = 0;
5514 }
5515 break;
5516 case 5:
5517 break;
5518 default:
5519 abort ();
5520 }
5521
5522 if (!vex_3_sources)
5523 {
5524 dest = source + 1;
5525
5526 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5527 {
5528 /* For instructions with VexNDS, the register-only
5529 source operand must be 32/64bit integer, XMM or
5530 YMM register. It is encoded in VEX prefix. We
5531 need to clear RegMem bit before calling
5532 operand_type_equal. */
5533
5534 i386_operand_type op;
5535 unsigned int vvvv;
5536
5537 /* Check register-only source operand when two source
5538 operands are swapped. */
5539 if (!i.tm.operand_types[source].bitfield.baseindex
5540 && i.tm.operand_types[dest].bitfield.baseindex)
5541 {
5542 vvvv = source;
5543 source = dest;
5544 }
5545 else
5546 vvvv = dest;
5547
5548 op = i.tm.operand_types[vvvv];
5549 op.bitfield.regmem = 0;
5550 if ((dest + 1) >= i.operands
5551 || (op.bitfield.reg32 != 1
5552 && !op.bitfield.reg64 != 1
5553 && !operand_type_equal (&op, &regxmm)
5554 && !operand_type_equal (&op, &regymm)))
5555 abort ();
5556 i.vex.register_specifier = i.op[vvvv].regs;
5557 dest++;
5558 }
5559 }
5560
5561 i.rm.mode = 3;
5562 /* One of the register operands will be encoded in the i.tm.reg
5563 field, the other in the combined i.tm.mode and i.tm.regmem
5564 fields. If no form of this instruction supports a memory
5565 destination operand, then we assume the source operand may
5566 sometimes be a memory operand and so we need to store the
5567 destination in the i.rm.reg field. */
5568 if (!i.tm.operand_types[dest].bitfield.regmem
5569 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
5570 {
5571 i.rm.reg = i.op[dest].regs->reg_num;
5572 i.rm.regmem = i.op[source].regs->reg_num;
5573 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5574 i.rex |= REX_R;
5575 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5576 i.rex |= REX_B;
5577 }
5578 else
5579 {
5580 i.rm.reg = i.op[source].regs->reg_num;
5581 i.rm.regmem = i.op[dest].regs->reg_num;
5582 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5583 i.rex |= REX_B;
5584 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5585 i.rex |= REX_R;
5586 }
5587 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
5588 {
5589 if (!i.types[0].bitfield.control
5590 && !i.types[1].bitfield.control)
5591 abort ();
5592 i.rex &= ~(REX_R | REX_B);
5593 add_prefix (LOCK_PREFIX_OPCODE);
5594 }
5595 }
5596 else
5597 { /* If it's not 2 reg operands... */
5598 unsigned int mem;
5599
5600 if (i.mem_operands)
5601 {
5602 unsigned int fake_zero_displacement = 0;
5603 unsigned int op;
5604
5605 for (op = 0; op < i.operands; op++)
5606 if (operand_type_check (i.types[op], anymem))
5607 break;
5608 gas_assert (op < i.operands);
5609
5610 if (i.tm.opcode_modifier.vecsib)
5611 {
5612 if (i.index_reg->reg_num == RegEiz
5613 || i.index_reg->reg_num == RegRiz)
5614 abort ();
5615
5616 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5617 if (!i.base_reg)
5618 {
5619 i.sib.base = NO_BASE_REGISTER;
5620 i.sib.scale = i.log2_scale_factor;
5621 i.types[op].bitfield.disp8 = 0;
5622 i.types[op].bitfield.disp16 = 0;
5623 i.types[op].bitfield.disp64 = 0;
5624 if (flag_code != CODE_64BIT)
5625 {
5626 /* Must be 32 bit */
5627 i.types[op].bitfield.disp32 = 1;
5628 i.types[op].bitfield.disp32s = 0;
5629 }
5630 else
5631 {
5632 i.types[op].bitfield.disp32 = 0;
5633 i.types[op].bitfield.disp32s = 1;
5634 }
5635 }
5636 i.sib.index = i.index_reg->reg_num;
5637 if ((i.index_reg->reg_flags & RegRex) != 0)
5638 i.rex |= REX_X;
5639 }
5640
5641 default_seg = &ds;
5642
5643 if (i.base_reg == 0)
5644 {
5645 i.rm.mode = 0;
5646 if (!i.disp_operands)
5647 {
5648 fake_zero_displacement = 1;
5649 /* Instructions with VSIB byte need 32bit displacement
5650 if there is no base register. */
5651 if (i.tm.opcode_modifier.vecsib)
5652 i.types[op].bitfield.disp32 = 1;
5653 }
5654 if (i.index_reg == 0)
5655 {
5656 gas_assert (!i.tm.opcode_modifier.vecsib);
5657 /* Operand is just <disp> */
5658 if (flag_code == CODE_64BIT)
5659 {
5660 /* 64bit mode overwrites the 32bit absolute
5661 addressing by RIP relative addressing and
5662 absolute addressing is encoded by one of the
5663 redundant SIB forms. */
5664 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5665 i.sib.base = NO_BASE_REGISTER;
5666 i.sib.index = NO_INDEX_REGISTER;
5667 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
5668 ? disp32s : disp32);
5669 }
5670 else if ((flag_code == CODE_16BIT)
5671 ^ (i.prefix[ADDR_PREFIX] != 0))
5672 {
5673 i.rm.regmem = NO_BASE_REGISTER_16;
5674 i.types[op] = disp16;
5675 }
5676 else
5677 {
5678 i.rm.regmem = NO_BASE_REGISTER;
5679 i.types[op] = disp32;
5680 }
5681 }
5682 else if (!i.tm.opcode_modifier.vecsib)
5683 {
5684 /* !i.base_reg && i.index_reg */
5685 if (i.index_reg->reg_num == RegEiz
5686 || i.index_reg->reg_num == RegRiz)
5687 i.sib.index = NO_INDEX_REGISTER;
5688 else
5689 i.sib.index = i.index_reg->reg_num;
5690 i.sib.base = NO_BASE_REGISTER;
5691 i.sib.scale = i.log2_scale_factor;
5692 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5693 i.types[op].bitfield.disp8 = 0;
5694 i.types[op].bitfield.disp16 = 0;
5695 i.types[op].bitfield.disp64 = 0;
5696 if (flag_code != CODE_64BIT)
5697 {
5698 /* Must be 32 bit */
5699 i.types[op].bitfield.disp32 = 1;
5700 i.types[op].bitfield.disp32s = 0;
5701 }
5702 else
5703 {
5704 i.types[op].bitfield.disp32 = 0;
5705 i.types[op].bitfield.disp32s = 1;
5706 }
5707 if ((i.index_reg->reg_flags & RegRex) != 0)
5708 i.rex |= REX_X;
5709 }
5710 }
5711 /* RIP addressing for 64bit mode. */
5712 else if (i.base_reg->reg_num == RegRip ||
5713 i.base_reg->reg_num == RegEip)
5714 {
5715 gas_assert (!i.tm.opcode_modifier.vecsib);
5716 i.rm.regmem = NO_BASE_REGISTER;
5717 i.types[op].bitfield.disp8 = 0;
5718 i.types[op].bitfield.disp16 = 0;
5719 i.types[op].bitfield.disp32 = 0;
5720 i.types[op].bitfield.disp32s = 1;
5721 i.types[op].bitfield.disp64 = 0;
5722 i.flags[op] |= Operand_PCrel;
5723 if (! i.disp_operands)
5724 fake_zero_displacement = 1;
5725 }
5726 else if (i.base_reg->reg_type.bitfield.reg16)
5727 {
5728 gas_assert (!i.tm.opcode_modifier.vecsib);
5729 switch (i.base_reg->reg_num)
5730 {
5731 case 3: /* (%bx) */
5732 if (i.index_reg == 0)
5733 i.rm.regmem = 7;
5734 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
5735 i.rm.regmem = i.index_reg->reg_num - 6;
5736 break;
5737 case 5: /* (%bp) */
5738 default_seg = &ss;
5739 if (i.index_reg == 0)
5740 {
5741 i.rm.regmem = 6;
5742 if (operand_type_check (i.types[op], disp) == 0)
5743 {
5744 /* fake (%bp) into 0(%bp) */
5745 i.types[op].bitfield.disp8 = 1;
5746 fake_zero_displacement = 1;
5747 }
5748 }
5749 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
5750 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
5751 break;
5752 default: /* (%si) -> 4 or (%di) -> 5 */
5753 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
5754 }
5755 i.rm.mode = mode_from_disp_size (i.types[op]);
5756 }
5757 else /* i.base_reg and 32/64 bit mode */
5758 {
5759 if (flag_code == CODE_64BIT
5760 && operand_type_check (i.types[op], disp))
5761 {
5762 i386_operand_type temp;
5763 operand_type_set (&temp, 0);
5764 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
5765 i.types[op] = temp;
5766 if (i.prefix[ADDR_PREFIX] == 0)
5767 i.types[op].bitfield.disp32s = 1;
5768 else
5769 i.types[op].bitfield.disp32 = 1;
5770 }
5771
5772 if (!i.tm.opcode_modifier.vecsib)
5773 i.rm.regmem = i.base_reg->reg_num;
5774 if ((i.base_reg->reg_flags & RegRex) != 0)
5775 i.rex |= REX_B;
5776 i.sib.base = i.base_reg->reg_num;
5777 /* x86-64 ignores REX prefix bit here to avoid decoder
5778 complications. */
5779 if (!(i.base_reg->reg_flags & RegRex)
5780 && (i.base_reg->reg_num == EBP_REG_NUM
5781 || i.base_reg->reg_num == ESP_REG_NUM))
5782 default_seg = &ss;
5783 if (i.base_reg->reg_num == 5 && i.disp_operands == 0)
5784 {
5785 fake_zero_displacement = 1;
5786 i.types[op].bitfield.disp8 = 1;
5787 }
5788 i.sib.scale = i.log2_scale_factor;
5789 if (i.index_reg == 0)
5790 {
5791 gas_assert (!i.tm.opcode_modifier.vecsib);
5792 /* <disp>(%esp) becomes two byte modrm with no index
5793 register. We've already stored the code for esp
5794 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
5795 Any base register besides %esp will not use the
5796 extra modrm byte. */
5797 i.sib.index = NO_INDEX_REGISTER;
5798 }
5799 else if (!i.tm.opcode_modifier.vecsib)
5800 {
5801 if (i.index_reg->reg_num == RegEiz
5802 || i.index_reg->reg_num == RegRiz)
5803 i.sib.index = NO_INDEX_REGISTER;
5804 else
5805 i.sib.index = i.index_reg->reg_num;
5806 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5807 if ((i.index_reg->reg_flags & RegRex) != 0)
5808 i.rex |= REX_X;
5809 }
5810
5811 if (i.disp_operands
5812 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
5813 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
5814 i.rm.mode = 0;
5815 else
5816 {
5817 if (!fake_zero_displacement
5818 && !i.disp_operands
5819 && i.disp_encoding)
5820 {
5821 fake_zero_displacement = 1;
5822 if (i.disp_encoding == disp_encoding_8bit)
5823 i.types[op].bitfield.disp8 = 1;
5824 else
5825 i.types[op].bitfield.disp32 = 1;
5826 }
5827 i.rm.mode = mode_from_disp_size (i.types[op]);
5828 }
5829 }
5830
5831 if (fake_zero_displacement)
5832 {
5833 /* Fakes a zero displacement assuming that i.types[op]
5834 holds the correct displacement size. */
5835 expressionS *exp;
5836
5837 gas_assert (i.op[op].disps == 0);
5838 exp = &disp_expressions[i.disp_operands++];
5839 i.op[op].disps = exp;
5840 exp->X_op = O_constant;
5841 exp->X_add_number = 0;
5842 exp->X_add_symbol = (symbolS *) 0;
5843 exp->X_op_symbol = (symbolS *) 0;
5844 }
5845
5846 mem = op;
5847 }
5848 else
5849 mem = ~0;
5850
5851 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
5852 {
5853 if (operand_type_check (i.types[0], imm))
5854 i.vex.register_specifier = NULL;
5855 else
5856 {
5857 /* VEX.vvvv encodes one of the sources when the first
5858 operand is not an immediate. */
5859 if (i.tm.opcode_modifier.vexw == VEXW0)
5860 i.vex.register_specifier = i.op[0].regs;
5861 else
5862 i.vex.register_specifier = i.op[1].regs;
5863 }
5864
5865 /* Destination is a XMM register encoded in the ModRM.reg
5866 and VEX.R bit. */
5867 i.rm.reg = i.op[2].regs->reg_num;
5868 if ((i.op[2].regs->reg_flags & RegRex) != 0)
5869 i.rex |= REX_R;
5870
5871 /* ModRM.rm and VEX.B encodes the other source. */
5872 if (!i.mem_operands)
5873 {
5874 i.rm.mode = 3;
5875
5876 if (i.tm.opcode_modifier.vexw == VEXW0)
5877 i.rm.regmem = i.op[1].regs->reg_num;
5878 else
5879 i.rm.regmem = i.op[0].regs->reg_num;
5880
5881 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5882 i.rex |= REX_B;
5883 }
5884 }
5885 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
5886 {
5887 i.vex.register_specifier = i.op[2].regs;
5888 if (!i.mem_operands)
5889 {
5890 i.rm.mode = 3;
5891 i.rm.regmem = i.op[1].regs->reg_num;
5892 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5893 i.rex |= REX_B;
5894 }
5895 }
5896 /* Fill in i.rm.reg or i.rm.regmem field with register operand
5897 (if any) based on i.tm.extension_opcode. Again, we must be
5898 careful to make sure that segment/control/debug/test/MMX
5899 registers are coded into the i.rm.reg field. */
5900 else if (i.reg_operands)
5901 {
5902 unsigned int op;
5903 unsigned int vex_reg = ~0;
5904
5905 for (op = 0; op < i.operands; op++)
5906 if (i.types[op].bitfield.reg8
5907 || i.types[op].bitfield.reg16
5908 || i.types[op].bitfield.reg32
5909 || i.types[op].bitfield.reg64
5910 || i.types[op].bitfield.regmmx
5911 || i.types[op].bitfield.regxmm
5912 || i.types[op].bitfield.regymm
5913 || i.types[op].bitfield.sreg2
5914 || i.types[op].bitfield.sreg3
5915 || i.types[op].bitfield.control
5916 || i.types[op].bitfield.debug
5917 || i.types[op].bitfield.test)
5918 break;
5919
5920 if (vex_3_sources)
5921 op = dest;
5922 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5923 {
5924 /* For instructions with VexNDS, the register-only
5925 source operand is encoded in VEX prefix. */
5926 gas_assert (mem != (unsigned int) ~0);
5927
5928 if (op > mem)
5929 {
5930 vex_reg = op++;
5931 gas_assert (op < i.operands);
5932 }
5933 else
5934 {
5935 /* Check register-only source operand when two source
5936 operands are swapped. */
5937 if (!i.tm.operand_types[op].bitfield.baseindex
5938 && i.tm.operand_types[op + 1].bitfield.baseindex)
5939 {
5940 vex_reg = op;
5941 op += 2;
5942 gas_assert (mem == (vex_reg + 1)
5943 && op < i.operands);
5944 }
5945 else
5946 {
5947 vex_reg = op + 1;
5948 gas_assert (vex_reg < i.operands);
5949 }
5950 }
5951 }
5952 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
5953 {
5954 /* For instructions with VexNDD, the register destination
5955 is encoded in VEX prefix. */
5956 if (i.mem_operands == 0)
5957 {
5958 /* There is no memory operand. */
5959 gas_assert ((op + 2) == i.operands);
5960 vex_reg = op + 1;
5961 }
5962 else
5963 {
5964 /* There are only 2 operands. */
5965 gas_assert (op < 2 && i.operands == 2);
5966 vex_reg = 1;
5967 }
5968 }
5969 else
5970 gas_assert (op < i.operands);
5971
5972 if (vex_reg != (unsigned int) ~0)
5973 {
5974 i386_operand_type *type = &i.tm.operand_types[vex_reg];
5975
5976 if (type->bitfield.reg32 != 1
5977 && type->bitfield.reg64 != 1
5978 && !operand_type_equal (type, &regxmm)
5979 && !operand_type_equal (type, &regymm))
5980 abort ();
5981
5982 i.vex.register_specifier = i.op[vex_reg].regs;
5983 }
5984
5985 /* Don't set OP operand twice. */
5986 if (vex_reg != op)
5987 {
5988 /* If there is an extension opcode to put here, the
5989 register number must be put into the regmem field. */
5990 if (i.tm.extension_opcode != None)
5991 {
5992 i.rm.regmem = i.op[op].regs->reg_num;
5993 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5994 i.rex |= REX_B;
5995 }
5996 else
5997 {
5998 i.rm.reg = i.op[op].regs->reg_num;
5999 if ((i.op[op].regs->reg_flags & RegRex) != 0)
6000 i.rex |= REX_R;
6001 }
6002 }
6003
6004 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
6005 must set it to 3 to indicate this is a register operand
6006 in the regmem field. */
6007 if (!i.mem_operands)
6008 i.rm.mode = 3;
6009 }
6010
6011 /* Fill in i.rm.reg field with extension opcode (if any). */
6012 if (i.tm.extension_opcode != None)
6013 i.rm.reg = i.tm.extension_opcode;
6014 }
6015 return default_seg;
6016 }
6017
6018 static void
6019 output_branch (void)
6020 {
6021 char *p;
6022 int size;
6023 int code16;
6024 int prefix;
6025 relax_substateT subtype;
6026 symbolS *sym;
6027 offsetT off;
6028
6029 code16 = flag_code == CODE_16BIT ? CODE16 : 0;
6030 size = i.disp_encoding == disp_encoding_32bit ? BIG : SMALL;
6031
6032 prefix = 0;
6033 if (i.prefix[DATA_PREFIX] != 0)
6034 {
6035 prefix = 1;
6036 i.prefixes -= 1;
6037 code16 ^= CODE16;
6038 }
6039 /* Pentium4 branch hints. */
6040 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6041 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6042 {
6043 prefix++;
6044 i.prefixes--;
6045 }
6046 if (i.prefix[REX_PREFIX] != 0)
6047 {
6048 prefix++;
6049 i.prefixes--;
6050 }
6051
6052 if (i.prefixes != 0 && !intel_syntax)
6053 as_warn (_("skipping prefixes on this instruction"));
6054
6055 /* It's always a symbol; End frag & setup for relax.
6056 Make sure there is enough room in this frag for the largest
6057 instruction we may generate in md_convert_frag. This is 2
6058 bytes for the opcode and room for the prefix and largest
6059 displacement. */
6060 frag_grow (prefix + 2 + 4);
6061 /* Prefix and 1 opcode byte go in fr_fix. */
6062 p = frag_more (prefix + 1);
6063 if (i.prefix[DATA_PREFIX] != 0)
6064 *p++ = DATA_PREFIX_OPCODE;
6065 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
6066 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
6067 *p++ = i.prefix[SEG_PREFIX];
6068 if (i.prefix[REX_PREFIX] != 0)
6069 *p++ = i.prefix[REX_PREFIX];
6070 *p = i.tm.base_opcode;
6071
6072 if ((unsigned char) *p == JUMP_PC_RELATIVE)
6073 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
6074 else if (cpu_arch_flags.bitfield.cpui386)
6075 subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
6076 else
6077 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
6078 subtype |= code16;
6079
6080 sym = i.op[0].disps->X_add_symbol;
6081 off = i.op[0].disps->X_add_number;
6082
6083 if (i.op[0].disps->X_op != O_constant
6084 && i.op[0].disps->X_op != O_symbol)
6085 {
6086 /* Handle complex expressions. */
6087 sym = make_expr_symbol (i.op[0].disps);
6088 off = 0;
6089 }
6090
6091 /* 1 possible extra opcode + 4 byte displacement go in var part.
6092 Pass reloc in fr_var. */
6093 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
6094 }
6095
6096 static void
6097 output_jump (void)
6098 {
6099 char *p;
6100 int size;
6101 fixS *fixP;
6102
6103 if (i.tm.opcode_modifier.jumpbyte)
6104 {
6105 /* This is a loop or jecxz type instruction. */
6106 size = 1;
6107 if (i.prefix[ADDR_PREFIX] != 0)
6108 {
6109 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
6110 i.prefixes -= 1;
6111 }
6112 /* Pentium4 branch hints. */
6113 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6114 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6115 {
6116 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
6117 i.prefixes--;
6118 }
6119 }
6120 else
6121 {
6122 int code16;
6123
6124 code16 = 0;
6125 if (flag_code == CODE_16BIT)
6126 code16 = CODE16;
6127
6128 if (i.prefix[DATA_PREFIX] != 0)
6129 {
6130 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
6131 i.prefixes -= 1;
6132 code16 ^= CODE16;
6133 }
6134
6135 size = 4;
6136 if (code16)
6137 size = 2;
6138 }
6139
6140 if (i.prefix[REX_PREFIX] != 0)
6141 {
6142 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
6143 i.prefixes -= 1;
6144 }
6145
6146 if (i.prefixes != 0 && !intel_syntax)
6147 as_warn (_("skipping prefixes on this instruction"));
6148
6149 p = frag_more (i.tm.opcode_length + size);
6150 switch (i.tm.opcode_length)
6151 {
6152 case 2:
6153 *p++ = i.tm.base_opcode >> 8;
6154 case 1:
6155 *p++ = i.tm.base_opcode;
6156 break;
6157 default:
6158 abort ();
6159 }
6160
6161 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6162 i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
6163
6164 /* All jumps handled here are signed, but don't use a signed limit
6165 check for 32 and 16 bit jumps as we want to allow wrap around at
6166 4G and 64k respectively. */
6167 if (size == 1)
6168 fixP->fx_signed = 1;
6169 }
6170
6171 static void
6172 output_interseg_jump (void)
6173 {
6174 char *p;
6175 int size;
6176 int prefix;
6177 int code16;
6178
6179 code16 = 0;
6180 if (flag_code == CODE_16BIT)
6181 code16 = CODE16;
6182
6183 prefix = 0;
6184 if (i.prefix[DATA_PREFIX] != 0)
6185 {
6186 prefix = 1;
6187 i.prefixes -= 1;
6188 code16 ^= CODE16;
6189 }
6190 if (i.prefix[REX_PREFIX] != 0)
6191 {
6192 prefix++;
6193 i.prefixes -= 1;
6194 }
6195
6196 size = 4;
6197 if (code16)
6198 size = 2;
6199
6200 if (i.prefixes != 0 && !intel_syntax)
6201 as_warn (_("skipping prefixes on this instruction"));
6202
6203 /* 1 opcode; 2 segment; offset */
6204 p = frag_more (prefix + 1 + 2 + size);
6205
6206 if (i.prefix[DATA_PREFIX] != 0)
6207 *p++ = DATA_PREFIX_OPCODE;
6208
6209 if (i.prefix[REX_PREFIX] != 0)
6210 *p++ = i.prefix[REX_PREFIX];
6211
6212 *p++ = i.tm.base_opcode;
6213 if (i.op[1].imms->X_op == O_constant)
6214 {
6215 offsetT n = i.op[1].imms->X_add_number;
6216
6217 if (size == 2
6218 && !fits_in_unsigned_word (n)
6219 && !fits_in_signed_word (n))
6220 {
6221 as_bad (_("16-bit jump out of range"));
6222 return;
6223 }
6224 md_number_to_chars (p, n, size);
6225 }
6226 else
6227 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6228 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
6229 if (i.op[0].imms->X_op != O_constant)
6230 as_bad (_("can't handle non absolute segment in `%s'"),
6231 i.tm.name);
6232 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
6233 }
6234
6235 static void
6236 output_insn (void)
6237 {
6238 fragS *insn_start_frag;
6239 offsetT insn_start_off;
6240
6241 /* Tie dwarf2 debug info to the address at the start of the insn.
6242 We can't do this after the insn has been output as the current
6243 frag may have been closed off. eg. by frag_var. */
6244 dwarf2_emit_insn (0);
6245
6246 insn_start_frag = frag_now;
6247 insn_start_off = frag_now_fix ();
6248
6249 /* Output jumps. */
6250 if (i.tm.opcode_modifier.jump)
6251 output_branch ();
6252 else if (i.tm.opcode_modifier.jumpbyte
6253 || i.tm.opcode_modifier.jumpdword)
6254 output_jump ();
6255 else if (i.tm.opcode_modifier.jumpintersegment)
6256 output_interseg_jump ();
6257 else
6258 {
6259 /* Output normal instructions here. */
6260 char *p;
6261 unsigned char *q;
6262 unsigned int j;
6263 unsigned int prefix;
6264
6265 /* Since the VEX prefix contains the implicit prefix, we don't
6266 need the explicit prefix. */
6267 if (!i.tm.opcode_modifier.vex)
6268 {
6269 switch (i.tm.opcode_length)
6270 {
6271 case 3:
6272 if (i.tm.base_opcode & 0xff000000)
6273 {
6274 prefix = (i.tm.base_opcode >> 24) & 0xff;
6275 goto check_prefix;
6276 }
6277 break;
6278 case 2:
6279 if ((i.tm.base_opcode & 0xff0000) != 0)
6280 {
6281 prefix = (i.tm.base_opcode >> 16) & 0xff;
6282 if (i.tm.cpu_flags.bitfield.cpupadlock)
6283 {
6284 check_prefix:
6285 if (prefix != REPE_PREFIX_OPCODE
6286 || (i.prefix[REP_PREFIX]
6287 != REPE_PREFIX_OPCODE))
6288 add_prefix (prefix);
6289 }
6290 else
6291 add_prefix (prefix);
6292 }
6293 break;
6294 case 1:
6295 break;
6296 default:
6297 abort ();
6298 }
6299
6300 /* The prefix bytes. */
6301 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
6302 if (*q)
6303 FRAG_APPEND_1_CHAR (*q);
6304 }
6305 else
6306 {
6307 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
6308 if (*q)
6309 switch (j)
6310 {
6311 case REX_PREFIX:
6312 /* REX byte is encoded in VEX prefix. */
6313 break;
6314 case SEG_PREFIX:
6315 case ADDR_PREFIX:
6316 FRAG_APPEND_1_CHAR (*q);
6317 break;
6318 default:
6319 /* There should be no other prefixes for instructions
6320 with VEX prefix. */
6321 abort ();
6322 }
6323
6324 /* Now the VEX prefix. */
6325 p = frag_more (i.vex.length);
6326 for (j = 0; j < i.vex.length; j++)
6327 p[j] = i.vex.bytes[j];
6328 }
6329
6330 /* Now the opcode; be careful about word order here! */
6331 if (i.tm.opcode_length == 1)
6332 {
6333 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
6334 }
6335 else
6336 {
6337 switch (i.tm.opcode_length)
6338 {
6339 case 3:
6340 p = frag_more (3);
6341 *p++ = (i.tm.base_opcode >> 16) & 0xff;
6342 break;
6343 case 2:
6344 p = frag_more (2);
6345 break;
6346 default:
6347 abort ();
6348 break;
6349 }
6350
6351 /* Put out high byte first: can't use md_number_to_chars! */
6352 *p++ = (i.tm.base_opcode >> 8) & 0xff;
6353 *p = i.tm.base_opcode & 0xff;
6354 }
6355
6356 /* Now the modrm byte and sib byte (if present). */
6357 if (i.tm.opcode_modifier.modrm)
6358 {
6359 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
6360 | i.rm.reg << 3
6361 | i.rm.mode << 6));
6362 /* If i.rm.regmem == ESP (4)
6363 && i.rm.mode != (Register mode)
6364 && not 16 bit
6365 ==> need second modrm byte. */
6366 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
6367 && i.rm.mode != 3
6368 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
6369 FRAG_APPEND_1_CHAR ((i.sib.base << 0
6370 | i.sib.index << 3
6371 | i.sib.scale << 6));
6372 }
6373
6374 if (i.disp_operands)
6375 output_disp (insn_start_frag, insn_start_off);
6376
6377 if (i.imm_operands)
6378 output_imm (insn_start_frag, insn_start_off);
6379 }
6380
6381 #ifdef DEBUG386
6382 if (flag_debug)
6383 {
6384 pi ("" /*line*/, &i);
6385 }
6386 #endif /* DEBUG386 */
6387 }
6388
6389 /* Return the size of the displacement operand N. */
6390
6391 static int
6392 disp_size (unsigned int n)
6393 {
6394 int size = 4;
6395 if (i.types[n].bitfield.disp64)
6396 size = 8;
6397 else if (i.types[n].bitfield.disp8)
6398 size = 1;
6399 else if (i.types[n].bitfield.disp16)
6400 size = 2;
6401 return size;
6402 }
6403
6404 /* Return the size of the immediate operand N. */
6405
6406 static int
6407 imm_size (unsigned int n)
6408 {
6409 int size = 4;
6410 if (i.types[n].bitfield.imm64)
6411 size = 8;
6412 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
6413 size = 1;
6414 else if (i.types[n].bitfield.imm16)
6415 size = 2;
6416 return size;
6417 }
6418
6419 static void
6420 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
6421 {
6422 char *p;
6423 unsigned int n;
6424
6425 for (n = 0; n < i.operands; n++)
6426 {
6427 if (operand_type_check (i.types[n], disp))
6428 {
6429 if (i.op[n].disps->X_op == O_constant)
6430 {
6431 int size = disp_size (n);
6432 offsetT val;
6433
6434 val = offset_in_range (i.op[n].disps->X_add_number,
6435 size);
6436 p = frag_more (size);
6437 md_number_to_chars (p, val, size);
6438 }
6439 else
6440 {
6441 enum bfd_reloc_code_real reloc_type;
6442 int size = disp_size (n);
6443 int sign = i.types[n].bitfield.disp32s;
6444 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
6445
6446 /* We can't have 8 bit displacement here. */
6447 gas_assert (!i.types[n].bitfield.disp8);
6448
6449 /* The PC relative address is computed relative
6450 to the instruction boundary, so in case immediate
6451 fields follows, we need to adjust the value. */
6452 if (pcrel && i.imm_operands)
6453 {
6454 unsigned int n1;
6455 int sz = 0;
6456
6457 for (n1 = 0; n1 < i.operands; n1++)
6458 if (operand_type_check (i.types[n1], imm))
6459 {
6460 /* Only one immediate is allowed for PC
6461 relative address. */
6462 gas_assert (sz == 0);
6463 sz = imm_size (n1);
6464 i.op[n].disps->X_add_number -= sz;
6465 }
6466 /* We should find the immediate. */
6467 gas_assert (sz != 0);
6468 }
6469
6470 p = frag_more (size);
6471 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
6472 if (GOT_symbol
6473 && GOT_symbol == i.op[n].disps->X_add_symbol
6474 && (((reloc_type == BFD_RELOC_32
6475 || reloc_type == BFD_RELOC_X86_64_32S
6476 || (reloc_type == BFD_RELOC_64
6477 && object_64bit))
6478 && (i.op[n].disps->X_op == O_symbol
6479 || (i.op[n].disps->X_op == O_add
6480 && ((symbol_get_value_expression
6481 (i.op[n].disps->X_op_symbol)->X_op)
6482 == O_subtract))))
6483 || reloc_type == BFD_RELOC_32_PCREL))
6484 {
6485 offsetT add;
6486
6487 if (insn_start_frag == frag_now)
6488 add = (p - frag_now->fr_literal) - insn_start_off;
6489 else
6490 {
6491 fragS *fr;
6492
6493 add = insn_start_frag->fr_fix - insn_start_off;
6494 for (fr = insn_start_frag->fr_next;
6495 fr && fr != frag_now; fr = fr->fr_next)
6496 add += fr->fr_fix;
6497 add += p - frag_now->fr_literal;
6498 }
6499
6500 if (!object_64bit)
6501 {
6502 reloc_type = BFD_RELOC_386_GOTPC;
6503 i.op[n].imms->X_add_number += add;
6504 }
6505 else if (reloc_type == BFD_RELOC_64)
6506 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6507 else
6508 /* Don't do the adjustment for x86-64, as there
6509 the pcrel addressing is relative to the _next_
6510 insn, and that is taken care of in other code. */
6511 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6512 }
6513 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6514 i.op[n].disps, pcrel, reloc_type);
6515 }
6516 }
6517 }
6518 }
6519
6520 static void
6521 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
6522 {
6523 char *p;
6524 unsigned int n;
6525
6526 for (n = 0; n < i.operands; n++)
6527 {
6528 if (operand_type_check (i.types[n], imm))
6529 {
6530 if (i.op[n].imms->X_op == O_constant)
6531 {
6532 int size = imm_size (n);
6533 offsetT val;
6534
6535 val = offset_in_range (i.op[n].imms->X_add_number,
6536 size);
6537 p = frag_more (size);
6538 md_number_to_chars (p, val, size);
6539 }
6540 else
6541 {
6542 /* Not absolute_section.
6543 Need a 32-bit fixup (don't support 8bit
6544 non-absolute imms). Try to support other
6545 sizes ... */
6546 enum bfd_reloc_code_real reloc_type;
6547 int size = imm_size (n);
6548 int sign;
6549
6550 if (i.types[n].bitfield.imm32s
6551 && (i.suffix == QWORD_MNEM_SUFFIX
6552 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
6553 sign = 1;
6554 else
6555 sign = 0;
6556
6557 p = frag_more (size);
6558 reloc_type = reloc (size, 0, sign, i.reloc[n]);
6559
6560 /* This is tough to explain. We end up with this one if we
6561 * have operands that look like
6562 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
6563 * obtain the absolute address of the GOT, and it is strongly
6564 * preferable from a performance point of view to avoid using
6565 * a runtime relocation for this. The actual sequence of
6566 * instructions often look something like:
6567 *
6568 * call .L66
6569 * .L66:
6570 * popl %ebx
6571 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
6572 *
6573 * The call and pop essentially return the absolute address
6574 * of the label .L66 and store it in %ebx. The linker itself
6575 * will ultimately change the first operand of the addl so
6576 * that %ebx points to the GOT, but to keep things simple, the
6577 * .o file must have this operand set so that it generates not
6578 * the absolute address of .L66, but the absolute address of
6579 * itself. This allows the linker itself simply treat a GOTPC
6580 * relocation as asking for a pcrel offset to the GOT to be
6581 * added in, and the addend of the relocation is stored in the
6582 * operand field for the instruction itself.
6583 *
6584 * Our job here is to fix the operand so that it would add
6585 * the correct offset so that %ebx would point to itself. The
6586 * thing that is tricky is that .-.L66 will point to the
6587 * beginning of the instruction, so we need to further modify
6588 * the operand so that it will point to itself. There are
6589 * other cases where you have something like:
6590 *
6591 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
6592 *
6593 * and here no correction would be required. Internally in
6594 * the assembler we treat operands of this form as not being
6595 * pcrel since the '.' is explicitly mentioned, and I wonder
6596 * whether it would simplify matters to do it this way. Who
6597 * knows. In earlier versions of the PIC patches, the
6598 * pcrel_adjust field was used to store the correction, but
6599 * since the expression is not pcrel, I felt it would be
6600 * confusing to do it this way. */
6601
6602 if ((reloc_type == BFD_RELOC_32
6603 || reloc_type == BFD_RELOC_X86_64_32S
6604 || reloc_type == BFD_RELOC_64)
6605 && GOT_symbol
6606 && GOT_symbol == i.op[n].imms->X_add_symbol
6607 && (i.op[n].imms->X_op == O_symbol
6608 || (i.op[n].imms->X_op == O_add
6609 && ((symbol_get_value_expression
6610 (i.op[n].imms->X_op_symbol)->X_op)
6611 == O_subtract))))
6612 {
6613 offsetT add;
6614
6615 if (insn_start_frag == frag_now)
6616 add = (p - frag_now->fr_literal) - insn_start_off;
6617 else
6618 {
6619 fragS *fr;
6620
6621 add = insn_start_frag->fr_fix - insn_start_off;
6622 for (fr = insn_start_frag->fr_next;
6623 fr && fr != frag_now; fr = fr->fr_next)
6624 add += fr->fr_fix;
6625 add += p - frag_now->fr_literal;
6626 }
6627
6628 if (!object_64bit)
6629 reloc_type = BFD_RELOC_386_GOTPC;
6630 else if (size == 4)
6631 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6632 else if (size == 8)
6633 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6634 i.op[n].imms->X_add_number += add;
6635 }
6636 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6637 i.op[n].imms, 0, reloc_type);
6638 }
6639 }
6640 }
6641 }
6642 \f
6643 /* x86_cons_fix_new is called via the expression parsing code when a
6644 reloc is needed. We use this hook to get the correct .got reloc. */
6645 static enum bfd_reloc_code_real got_reloc = NO_RELOC;
6646 static int cons_sign = -1;
6647
6648 void
6649 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
6650 expressionS *exp)
6651 {
6652 enum bfd_reloc_code_real r = reloc (len, 0, cons_sign, got_reloc);
6653
6654 got_reloc = NO_RELOC;
6655
6656 #ifdef TE_PE
6657 if (exp->X_op == O_secrel)
6658 {
6659 exp->X_op = O_symbol;
6660 r = BFD_RELOC_32_SECREL;
6661 }
6662 #endif
6663
6664 fix_new_exp (frag, off, len, exp, 0, r);
6665 }
6666
6667 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
6668 purpose of the `.dc.a' internal pseudo-op. */
6669
6670 int
6671 x86_address_bytes (void)
6672 {
6673 if ((stdoutput->arch_info->mach & bfd_mach_x64_32))
6674 return 4;
6675 return stdoutput->arch_info->bits_per_address / 8;
6676 }
6677
6678 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
6679 || defined (LEX_AT)
6680 # define lex_got(reloc, adjust, types) NULL
6681 #else
6682 /* Parse operands of the form
6683 <symbol>@GOTOFF+<nnn>
6684 and similar .plt or .got references.
6685
6686 If we find one, set up the correct relocation in RELOC and copy the
6687 input string, minus the `@GOTOFF' into a malloc'd buffer for
6688 parsing by the calling routine. Return this buffer, and if ADJUST
6689 is non-null set it to the length of the string we removed from the
6690 input line. Otherwise return NULL. */
6691 static char *
6692 lex_got (enum bfd_reloc_code_real *rel,
6693 int *adjust,
6694 i386_operand_type *types)
6695 {
6696 /* Some of the relocations depend on the size of what field is to
6697 be relocated. But in our callers i386_immediate and i386_displacement
6698 we don't yet know the operand size (this will be set by insn
6699 matching). Hence we record the word32 relocation here,
6700 and adjust the reloc according to the real size in reloc(). */
6701 static const struct {
6702 const char *str;
6703 int len;
6704 const enum bfd_reloc_code_real rel[2];
6705 const i386_operand_type types64;
6706 } gotrel[] = {
6707 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
6708 BFD_RELOC_X86_64_PLTOFF64 },
6709 OPERAND_TYPE_IMM64 },
6710 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
6711 BFD_RELOC_X86_64_PLT32 },
6712 OPERAND_TYPE_IMM32_32S_DISP32 },
6713 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
6714 BFD_RELOC_X86_64_GOTPLT64 },
6715 OPERAND_TYPE_IMM64_DISP64 },
6716 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
6717 BFD_RELOC_X86_64_GOTOFF64 },
6718 OPERAND_TYPE_IMM64_DISP64 },
6719 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
6720 BFD_RELOC_X86_64_GOTPCREL },
6721 OPERAND_TYPE_IMM32_32S_DISP32 },
6722 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
6723 BFD_RELOC_X86_64_TLSGD },
6724 OPERAND_TYPE_IMM32_32S_DISP32 },
6725 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
6726 _dummy_first_bfd_reloc_code_real },
6727 OPERAND_TYPE_NONE },
6728 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
6729 BFD_RELOC_X86_64_TLSLD },
6730 OPERAND_TYPE_IMM32_32S_DISP32 },
6731 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
6732 BFD_RELOC_X86_64_GOTTPOFF },
6733 OPERAND_TYPE_IMM32_32S_DISP32 },
6734 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
6735 BFD_RELOC_X86_64_TPOFF32 },
6736 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6737 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
6738 _dummy_first_bfd_reloc_code_real },
6739 OPERAND_TYPE_NONE },
6740 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
6741 BFD_RELOC_X86_64_DTPOFF32 },
6742 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6743 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
6744 _dummy_first_bfd_reloc_code_real },
6745 OPERAND_TYPE_NONE },
6746 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
6747 _dummy_first_bfd_reloc_code_real },
6748 OPERAND_TYPE_NONE },
6749 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
6750 BFD_RELOC_X86_64_GOT32 },
6751 OPERAND_TYPE_IMM32_32S_64_DISP32 },
6752 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
6753 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
6754 OPERAND_TYPE_IMM32_32S_DISP32 },
6755 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
6756 BFD_RELOC_X86_64_TLSDESC_CALL },
6757 OPERAND_TYPE_IMM32_32S_DISP32 },
6758 };
6759 char *cp;
6760 unsigned int j;
6761
6762 #if defined (OBJ_MAYBE_ELF)
6763 if (!IS_ELF)
6764 return NULL;
6765 #endif
6766
6767 for (cp = input_line_pointer; *cp != '@'; cp++)
6768 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6769 return NULL;
6770
6771 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6772 {
6773 int len = gotrel[j].len;
6774 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6775 {
6776 if (gotrel[j].rel[object_64bit] != 0)
6777 {
6778 int first, second;
6779 char *tmpbuf, *past_reloc;
6780
6781 *rel = gotrel[j].rel[object_64bit];
6782 if (adjust)
6783 *adjust = len;
6784
6785 if (types)
6786 {
6787 if (flag_code != CODE_64BIT)
6788 {
6789 types->bitfield.imm32 = 1;
6790 types->bitfield.disp32 = 1;
6791 }
6792 else
6793 *types = gotrel[j].types64;
6794 }
6795
6796 if (GOT_symbol == NULL)
6797 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
6798
6799 /* The length of the first part of our input line. */
6800 first = cp - input_line_pointer;
6801
6802 /* The second part goes from after the reloc token until
6803 (and including) an end_of_line char or comma. */
6804 past_reloc = cp + 1 + len;
6805 cp = past_reloc;
6806 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6807 ++cp;
6808 second = cp + 1 - past_reloc;
6809
6810 /* Allocate and copy string. The trailing NUL shouldn't
6811 be necessary, but be safe. */
6812 tmpbuf = (char *) xmalloc (first + second + 2);
6813 memcpy (tmpbuf, input_line_pointer, first);
6814 if (second != 0 && *past_reloc != ' ')
6815 /* Replace the relocation token with ' ', so that
6816 errors like foo@GOTOFF1 will be detected. */
6817 tmpbuf[first++] = ' ';
6818 memcpy (tmpbuf + first, past_reloc, second);
6819 tmpbuf[first + second] = '\0';
6820 return tmpbuf;
6821 }
6822
6823 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6824 gotrel[j].str, 1 << (5 + object_64bit));
6825 return NULL;
6826 }
6827 }
6828
6829 /* Might be a symbol version string. Don't as_bad here. */
6830 return NULL;
6831 }
6832 #endif
6833
6834 #ifdef TE_PE
6835 #ifdef lex_got
6836 #undef lex_got
6837 #endif
6838 /* Parse operands of the form
6839 <symbol>@SECREL32+<nnn>
6840
6841 If we find one, set up the correct relocation in RELOC and copy the
6842 input string, minus the `@SECREL32' into a malloc'd buffer for
6843 parsing by the calling routine. Return this buffer, and if ADJUST
6844 is non-null set it to the length of the string we removed from the
6845 input line. Otherwise return NULL.
6846
6847 This function is copied from the ELF version above adjusted for PE targets. */
6848
6849 static char *
6850 lex_got (enum bfd_reloc_code_real *rel ATTRIBUTE_UNUSED,
6851 int *adjust ATTRIBUTE_UNUSED,
6852 i386_operand_type *types ATTRIBUTE_UNUSED)
6853 {
6854 static const struct
6855 {
6856 const char *str;
6857 int len;
6858 const enum bfd_reloc_code_real rel[2];
6859 const i386_operand_type types64;
6860 }
6861 gotrel[] =
6862 {
6863 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL,
6864 BFD_RELOC_32_SECREL },
6865 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6866 };
6867
6868 char *cp;
6869 unsigned j;
6870
6871 for (cp = input_line_pointer; *cp != '@'; cp++)
6872 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6873 return NULL;
6874
6875 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6876 {
6877 int len = gotrel[j].len;
6878
6879 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6880 {
6881 if (gotrel[j].rel[object_64bit] != 0)
6882 {
6883 int first, second;
6884 char *tmpbuf, *past_reloc;
6885
6886 *rel = gotrel[j].rel[object_64bit];
6887 if (adjust)
6888 *adjust = len;
6889
6890 if (types)
6891 {
6892 if (flag_code != CODE_64BIT)
6893 {
6894 types->bitfield.imm32 = 1;
6895 types->bitfield.disp32 = 1;
6896 }
6897 else
6898 *types = gotrel[j].types64;
6899 }
6900
6901 /* The length of the first part of our input line. */
6902 first = cp - input_line_pointer;
6903
6904 /* The second part goes from after the reloc token until
6905 (and including) an end_of_line char or comma. */
6906 past_reloc = cp + 1 + len;
6907 cp = past_reloc;
6908 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6909 ++cp;
6910 second = cp + 1 - past_reloc;
6911
6912 /* Allocate and copy string. The trailing NUL shouldn't
6913 be necessary, but be safe. */
6914 tmpbuf = (char *) xmalloc (first + second + 2);
6915 memcpy (tmpbuf, input_line_pointer, first);
6916 if (second != 0 && *past_reloc != ' ')
6917 /* Replace the relocation token with ' ', so that
6918 errors like foo@SECLREL321 will be detected. */
6919 tmpbuf[first++] = ' ';
6920 memcpy (tmpbuf + first, past_reloc, second);
6921 tmpbuf[first + second] = '\0';
6922 return tmpbuf;
6923 }
6924
6925 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6926 gotrel[j].str, 1 << (5 + object_64bit));
6927 return NULL;
6928 }
6929 }
6930
6931 /* Might be a symbol version string. Don't as_bad here. */
6932 return NULL;
6933 }
6934
6935 #endif /* TE_PE */
6936
6937 void
6938 x86_cons (expressionS *exp, int size)
6939 {
6940 intel_syntax = -intel_syntax;
6941
6942 exp->X_md = 0;
6943 if (size == 4 || (object_64bit && size == 8))
6944 {
6945 /* Handle @GOTOFF and the like in an expression. */
6946 char *save;
6947 char *gotfree_input_line;
6948 int adjust = 0;
6949
6950 save = input_line_pointer;
6951 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
6952 if (gotfree_input_line)
6953 input_line_pointer = gotfree_input_line;
6954
6955 expression (exp);
6956
6957 if (gotfree_input_line)
6958 {
6959 /* expression () has merrily parsed up to the end of line,
6960 or a comma - in the wrong buffer. Transfer how far
6961 input_line_pointer has moved to the right buffer. */
6962 input_line_pointer = (save
6963 + (input_line_pointer - gotfree_input_line)
6964 + adjust);
6965 free (gotfree_input_line);
6966 if (exp->X_op == O_constant
6967 || exp->X_op == O_absent
6968 || exp->X_op == O_illegal
6969 || exp->X_op == O_register
6970 || exp->X_op == O_big)
6971 {
6972 char c = *input_line_pointer;
6973 *input_line_pointer = 0;
6974 as_bad (_("missing or invalid expression `%s'"), save);
6975 *input_line_pointer = c;
6976 }
6977 }
6978 }
6979 else
6980 expression (exp);
6981
6982 intel_syntax = -intel_syntax;
6983
6984 if (intel_syntax)
6985 i386_intel_simplify (exp);
6986 }
6987
6988 static void
6989 signed_cons (int size)
6990 {
6991 if (flag_code == CODE_64BIT)
6992 cons_sign = 1;
6993 cons (size);
6994 cons_sign = -1;
6995 }
6996
6997 #ifdef TE_PE
6998 static void
6999 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
7000 {
7001 expressionS exp;
7002
7003 do
7004 {
7005 expression (&exp);
7006 if (exp.X_op == O_symbol)
7007 exp.X_op = O_secrel;
7008
7009 emit_expr (&exp, 4);
7010 }
7011 while (*input_line_pointer++ == ',');
7012
7013 input_line_pointer--;
7014 demand_empty_rest_of_line ();
7015 }
7016 #endif
7017
7018 static int
7019 i386_immediate (char *imm_start)
7020 {
7021 char *save_input_line_pointer;
7022 char *gotfree_input_line;
7023 segT exp_seg = 0;
7024 expressionS *exp;
7025 i386_operand_type types;
7026
7027 operand_type_set (&types, ~0);
7028
7029 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
7030 {
7031 as_bad (_("at most %d immediate operands are allowed"),
7032 MAX_IMMEDIATE_OPERANDS);
7033 return 0;
7034 }
7035
7036 exp = &im_expressions[i.imm_operands++];
7037 i.op[this_operand].imms = exp;
7038
7039 if (is_space_char (*imm_start))
7040 ++imm_start;
7041
7042 save_input_line_pointer = input_line_pointer;
7043 input_line_pointer = imm_start;
7044
7045 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
7046 if (gotfree_input_line)
7047 input_line_pointer = gotfree_input_line;
7048
7049 exp_seg = expression (exp);
7050
7051 SKIP_WHITESPACE ();
7052 if (*input_line_pointer)
7053 as_bad (_("junk `%s' after expression"), input_line_pointer);
7054
7055 input_line_pointer = save_input_line_pointer;
7056 if (gotfree_input_line)
7057 {
7058 free (gotfree_input_line);
7059
7060 if (exp->X_op == O_constant || exp->X_op == O_register)
7061 exp->X_op = O_illegal;
7062 }
7063
7064 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
7065 }
7066
7067 static int
7068 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
7069 i386_operand_type types, const char *imm_start)
7070 {
7071 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
7072 {
7073 if (imm_start)
7074 as_bad (_("missing or invalid immediate expression `%s'"),
7075 imm_start);
7076 return 0;
7077 }
7078 else if (exp->X_op == O_constant)
7079 {
7080 /* Size it properly later. */
7081 i.types[this_operand].bitfield.imm64 = 1;
7082 /* If not 64bit, sign extend val. */
7083 if (flag_code != CODE_64BIT
7084 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
7085 exp->X_add_number
7086 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
7087 }
7088 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7089 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
7090 && exp_seg != absolute_section
7091 && exp_seg != text_section
7092 && exp_seg != data_section
7093 && exp_seg != bss_section
7094 && exp_seg != undefined_section
7095 && !bfd_is_com_section (exp_seg))
7096 {
7097 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7098 return 0;
7099 }
7100 #endif
7101 else if (!intel_syntax && exp->X_op == O_register)
7102 {
7103 if (imm_start)
7104 as_bad (_("illegal immediate register operand %s"), imm_start);
7105 return 0;
7106 }
7107 else
7108 {
7109 /* This is an address. The size of the address will be
7110 determined later, depending on destination register,
7111 suffix, or the default for the section. */
7112 i.types[this_operand].bitfield.imm8 = 1;
7113 i.types[this_operand].bitfield.imm16 = 1;
7114 i.types[this_operand].bitfield.imm32 = 1;
7115 i.types[this_operand].bitfield.imm32s = 1;
7116 i.types[this_operand].bitfield.imm64 = 1;
7117 i.types[this_operand] = operand_type_and (i.types[this_operand],
7118 types);
7119 }
7120
7121 return 1;
7122 }
7123
7124 static char *
7125 i386_scale (char *scale)
7126 {
7127 offsetT val;
7128 char *save = input_line_pointer;
7129
7130 input_line_pointer = scale;
7131 val = get_absolute_expression ();
7132
7133 switch (val)
7134 {
7135 case 1:
7136 i.log2_scale_factor = 0;
7137 break;
7138 case 2:
7139 i.log2_scale_factor = 1;
7140 break;
7141 case 4:
7142 i.log2_scale_factor = 2;
7143 break;
7144 case 8:
7145 i.log2_scale_factor = 3;
7146 break;
7147 default:
7148 {
7149 char sep = *input_line_pointer;
7150
7151 *input_line_pointer = '\0';
7152 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
7153 scale);
7154 *input_line_pointer = sep;
7155 input_line_pointer = save;
7156 return NULL;
7157 }
7158 }
7159 if (i.log2_scale_factor != 0 && i.index_reg == 0)
7160 {
7161 as_warn (_("scale factor of %d without an index register"),
7162 1 << i.log2_scale_factor);
7163 i.log2_scale_factor = 0;
7164 }
7165 scale = input_line_pointer;
7166 input_line_pointer = save;
7167 return scale;
7168 }
7169
7170 static int
7171 i386_displacement (char *disp_start, char *disp_end)
7172 {
7173 expressionS *exp;
7174 segT exp_seg = 0;
7175 char *save_input_line_pointer;
7176 char *gotfree_input_line;
7177 int override;
7178 i386_operand_type bigdisp, types = anydisp;
7179 int ret;
7180
7181 if (i.disp_operands == MAX_MEMORY_OPERANDS)
7182 {
7183 as_bad (_("at most %d displacement operands are allowed"),
7184 MAX_MEMORY_OPERANDS);
7185 return 0;
7186 }
7187
7188 operand_type_set (&bigdisp, 0);
7189 if ((i.types[this_operand].bitfield.jumpabsolute)
7190 || (!current_templates->start->opcode_modifier.jump
7191 && !current_templates->start->opcode_modifier.jumpdword))
7192 {
7193 bigdisp.bitfield.disp32 = 1;
7194 override = (i.prefix[ADDR_PREFIX] != 0);
7195 if (flag_code == CODE_64BIT)
7196 {
7197 if (!override)
7198 {
7199 bigdisp.bitfield.disp32s = 1;
7200 bigdisp.bitfield.disp64 = 1;
7201 }
7202 }
7203 else if ((flag_code == CODE_16BIT) ^ override)
7204 {
7205 bigdisp.bitfield.disp32 = 0;
7206 bigdisp.bitfield.disp16 = 1;
7207 }
7208 }
7209 else
7210 {
7211 /* For PC-relative branches, the width of the displacement
7212 is dependent upon data size, not address size. */
7213 override = (i.prefix[DATA_PREFIX] != 0);
7214 if (flag_code == CODE_64BIT)
7215 {
7216 if (override || i.suffix == WORD_MNEM_SUFFIX)
7217 bigdisp.bitfield.disp16 = 1;
7218 else
7219 {
7220 bigdisp.bitfield.disp32 = 1;
7221 bigdisp.bitfield.disp32s = 1;
7222 }
7223 }
7224 else
7225 {
7226 if (!override)
7227 override = (i.suffix == (flag_code != CODE_16BIT
7228 ? WORD_MNEM_SUFFIX
7229 : LONG_MNEM_SUFFIX));
7230 bigdisp.bitfield.disp32 = 1;
7231 if ((flag_code == CODE_16BIT) ^ override)
7232 {
7233 bigdisp.bitfield.disp32 = 0;
7234 bigdisp.bitfield.disp16 = 1;
7235 }
7236 }
7237 }
7238 i.types[this_operand] = operand_type_or (i.types[this_operand],
7239 bigdisp);
7240
7241 exp = &disp_expressions[i.disp_operands];
7242 i.op[this_operand].disps = exp;
7243 i.disp_operands++;
7244 save_input_line_pointer = input_line_pointer;
7245 input_line_pointer = disp_start;
7246 END_STRING_AND_SAVE (disp_end);
7247
7248 #ifndef GCC_ASM_O_HACK
7249 #define GCC_ASM_O_HACK 0
7250 #endif
7251 #if GCC_ASM_O_HACK
7252 END_STRING_AND_SAVE (disp_end + 1);
7253 if (i.types[this_operand].bitfield.baseIndex
7254 && displacement_string_end[-1] == '+')
7255 {
7256 /* This hack is to avoid a warning when using the "o"
7257 constraint within gcc asm statements.
7258 For instance:
7259
7260 #define _set_tssldt_desc(n,addr,limit,type) \
7261 __asm__ __volatile__ ( \
7262 "movw %w2,%0\n\t" \
7263 "movw %w1,2+%0\n\t" \
7264 "rorl $16,%1\n\t" \
7265 "movb %b1,4+%0\n\t" \
7266 "movb %4,5+%0\n\t" \
7267 "movb $0,6+%0\n\t" \
7268 "movb %h1,7+%0\n\t" \
7269 "rorl $16,%1" \
7270 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
7271
7272 This works great except that the output assembler ends
7273 up looking a bit weird if it turns out that there is
7274 no offset. You end up producing code that looks like:
7275
7276 #APP
7277 movw $235,(%eax)
7278 movw %dx,2+(%eax)
7279 rorl $16,%edx
7280 movb %dl,4+(%eax)
7281 movb $137,5+(%eax)
7282 movb $0,6+(%eax)
7283 movb %dh,7+(%eax)
7284 rorl $16,%edx
7285 #NO_APP
7286
7287 So here we provide the missing zero. */
7288
7289 *displacement_string_end = '0';
7290 }
7291 #endif
7292 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
7293 if (gotfree_input_line)
7294 input_line_pointer = gotfree_input_line;
7295
7296 exp_seg = expression (exp);
7297
7298 SKIP_WHITESPACE ();
7299 if (*input_line_pointer)
7300 as_bad (_("junk `%s' after expression"), input_line_pointer);
7301 #if GCC_ASM_O_HACK
7302 RESTORE_END_STRING (disp_end + 1);
7303 #endif
7304 input_line_pointer = save_input_line_pointer;
7305 if (gotfree_input_line)
7306 {
7307 free (gotfree_input_line);
7308
7309 if (exp->X_op == O_constant || exp->X_op == O_register)
7310 exp->X_op = O_illegal;
7311 }
7312
7313 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
7314
7315 RESTORE_END_STRING (disp_end);
7316
7317 return ret;
7318 }
7319
7320 static int
7321 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
7322 i386_operand_type types, const char *disp_start)
7323 {
7324 i386_operand_type bigdisp;
7325 int ret = 1;
7326
7327 /* We do this to make sure that the section symbol is in
7328 the symbol table. We will ultimately change the relocation
7329 to be relative to the beginning of the section. */
7330 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
7331 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
7332 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7333 {
7334 if (exp->X_op != O_symbol)
7335 goto inv_disp;
7336
7337 if (S_IS_LOCAL (exp->X_add_symbol)
7338 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
7339 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
7340 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
7341 exp->X_op = O_subtract;
7342 exp->X_op_symbol = GOT_symbol;
7343 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
7344 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
7345 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7346 i.reloc[this_operand] = BFD_RELOC_64;
7347 else
7348 i.reloc[this_operand] = BFD_RELOC_32;
7349 }
7350
7351 else if (exp->X_op == O_absent
7352 || exp->X_op == O_illegal
7353 || exp->X_op == O_big)
7354 {
7355 inv_disp:
7356 as_bad (_("missing or invalid displacement expression `%s'"),
7357 disp_start);
7358 ret = 0;
7359 }
7360
7361 else if (flag_code == CODE_64BIT
7362 && !i.prefix[ADDR_PREFIX]
7363 && exp->X_op == O_constant)
7364 {
7365 /* Since displacement is signed extended to 64bit, don't allow
7366 disp32 and turn off disp32s if they are out of range. */
7367 i.types[this_operand].bitfield.disp32 = 0;
7368 if (!fits_in_signed_long (exp->X_add_number))
7369 {
7370 i.types[this_operand].bitfield.disp32s = 0;
7371 if (i.types[this_operand].bitfield.baseindex)
7372 {
7373 as_bad (_("0x%lx out range of signed 32bit displacement"),
7374 (long) exp->X_add_number);
7375 ret = 0;
7376 }
7377 }
7378 }
7379
7380 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7381 else if (exp->X_op != O_constant
7382 && OUTPUT_FLAVOR == bfd_target_aout_flavour
7383 && exp_seg != absolute_section
7384 && exp_seg != text_section
7385 && exp_seg != data_section
7386 && exp_seg != bss_section
7387 && exp_seg != undefined_section
7388 && !bfd_is_com_section (exp_seg))
7389 {
7390 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7391 ret = 0;
7392 }
7393 #endif
7394
7395 /* Check if this is a displacement only operand. */
7396 bigdisp = i.types[this_operand];
7397 bigdisp.bitfield.disp8 = 0;
7398 bigdisp.bitfield.disp16 = 0;
7399 bigdisp.bitfield.disp32 = 0;
7400 bigdisp.bitfield.disp32s = 0;
7401 bigdisp.bitfield.disp64 = 0;
7402 if (operand_type_all_zero (&bigdisp))
7403 i.types[this_operand] = operand_type_and (i.types[this_operand],
7404 types);
7405
7406 return ret;
7407 }
7408
7409 /* Make sure the memory operand we've been dealt is valid.
7410 Return 1 on success, 0 on a failure. */
7411
7412 static int
7413 i386_index_check (const char *operand_string)
7414 {
7415 int ok;
7416 const char *kind = "base/index";
7417 #if INFER_ADDR_PREFIX
7418 int fudged = 0;
7419
7420 tryprefix:
7421 #endif
7422 ok = 1;
7423 if (current_templates->start->opcode_modifier.isstring
7424 && !current_templates->start->opcode_modifier.immext
7425 && (current_templates->end[-1].opcode_modifier.isstring
7426 || i.mem_operands))
7427 {
7428 /* Memory operands of string insns are special in that they only allow
7429 a single register (rDI, rSI, or rBX) as their memory address. */
7430 unsigned int expected;
7431
7432 kind = "string address";
7433
7434 if (current_templates->start->opcode_modifier.w)
7435 {
7436 i386_operand_type type = current_templates->end[-1].operand_types[0];
7437
7438 if (!type.bitfield.baseindex
7439 || ((!i.mem_operands != !intel_syntax)
7440 && current_templates->end[-1].operand_types[1]
7441 .bitfield.baseindex))
7442 type = current_templates->end[-1].operand_types[1];
7443 expected = type.bitfield.esseg ? 7 /* rDI */ : 6 /* rSI */;
7444 }
7445 else
7446 expected = 3 /* rBX */;
7447
7448 if (!i.base_reg || i.index_reg
7449 || operand_type_check (i.types[this_operand], disp))
7450 ok = -1;
7451 else if (!(flag_code == CODE_64BIT
7452 ? i.prefix[ADDR_PREFIX]
7453 ? i.base_reg->reg_type.bitfield.reg32
7454 : i.base_reg->reg_type.bitfield.reg64
7455 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7456 ? i.base_reg->reg_type.bitfield.reg32
7457 : i.base_reg->reg_type.bitfield.reg16))
7458 ok = 0;
7459 else if (register_number (i.base_reg) != expected)
7460 ok = -1;
7461
7462 if (ok < 0)
7463 {
7464 unsigned int j;
7465
7466 for (j = 0; j < i386_regtab_size; ++j)
7467 if ((flag_code == CODE_64BIT
7468 ? i.prefix[ADDR_PREFIX]
7469 ? i386_regtab[j].reg_type.bitfield.reg32
7470 : i386_regtab[j].reg_type.bitfield.reg64
7471 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7472 ? i386_regtab[j].reg_type.bitfield.reg32
7473 : i386_regtab[j].reg_type.bitfield.reg16)
7474 && register_number(i386_regtab + j) == expected)
7475 break;
7476 gas_assert (j < i386_regtab_size);
7477 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
7478 operand_string,
7479 intel_syntax ? '[' : '(',
7480 register_prefix,
7481 i386_regtab[j].reg_name,
7482 intel_syntax ? ']' : ')');
7483 ok = 1;
7484 }
7485 }
7486 else if (flag_code == CODE_64BIT)
7487 {
7488 if ((i.base_reg
7489 && ((i.prefix[ADDR_PREFIX] == 0
7490 && !i.base_reg->reg_type.bitfield.reg64)
7491 || (i.prefix[ADDR_PREFIX]
7492 && !i.base_reg->reg_type.bitfield.reg32))
7493 && (i.index_reg
7494 || i.base_reg->reg_num !=
7495 (i.prefix[ADDR_PREFIX] == 0 ? RegRip : RegEip)))
7496 || (i.index_reg
7497 && !(i.index_reg->reg_type.bitfield.regxmm
7498 || i.index_reg->reg_type.bitfield.regymm)
7499 && (!i.index_reg->reg_type.bitfield.baseindex
7500 || (i.prefix[ADDR_PREFIX] == 0
7501 && i.index_reg->reg_num != RegRiz
7502 && !i.index_reg->reg_type.bitfield.reg64
7503 )
7504 || (i.prefix[ADDR_PREFIX]
7505 && i.index_reg->reg_num != RegEiz
7506 && !i.index_reg->reg_type.bitfield.reg32))))
7507 ok = 0;
7508 }
7509 else
7510 {
7511 if ((flag_code == CODE_16BIT) ^ (i.prefix[ADDR_PREFIX] != 0))
7512 {
7513 /* 16bit checks. */
7514 if ((i.base_reg
7515 && (!i.base_reg->reg_type.bitfield.reg16
7516 || !i.base_reg->reg_type.bitfield.baseindex))
7517 || (i.index_reg
7518 && (!i.index_reg->reg_type.bitfield.reg16
7519 || !i.index_reg->reg_type.bitfield.baseindex
7520 || !(i.base_reg
7521 && i.base_reg->reg_num < 6
7522 && i.index_reg->reg_num >= 6
7523 && i.log2_scale_factor == 0))))
7524 ok = 0;
7525 }
7526 else
7527 {
7528 /* 32bit checks. */
7529 if ((i.base_reg
7530 && !i.base_reg->reg_type.bitfield.reg32)
7531 || (i.index_reg
7532 && !i.index_reg->reg_type.bitfield.regxmm
7533 && !i.index_reg->reg_type.bitfield.regymm
7534 && ((!i.index_reg->reg_type.bitfield.reg32
7535 && i.index_reg->reg_num != RegEiz)
7536 || !i.index_reg->reg_type.bitfield.baseindex)))
7537 ok = 0;
7538 }
7539 }
7540 if (!ok)
7541 {
7542 #if INFER_ADDR_PREFIX
7543 if (!i.mem_operands && !i.prefix[ADDR_PREFIX])
7544 {
7545 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
7546 i.prefixes += 1;
7547 /* Change the size of any displacement too. At most one of
7548 Disp16 or Disp32 is set.
7549 FIXME. There doesn't seem to be any real need for separate
7550 Disp16 and Disp32 flags. The same goes for Imm16 and Imm32.
7551 Removing them would probably clean up the code quite a lot. */
7552 if (flag_code != CODE_64BIT
7553 && (i.types[this_operand].bitfield.disp16
7554 || i.types[this_operand].bitfield.disp32))
7555 i.types[this_operand]
7556 = operand_type_xor (i.types[this_operand], disp16_32);
7557 fudged = 1;
7558 goto tryprefix;
7559 }
7560 if (fudged)
7561 as_bad (_("`%s' is not a valid %s expression"),
7562 operand_string,
7563 kind);
7564 else
7565 #endif
7566 as_bad (_("`%s' is not a valid %s-bit %s expression"),
7567 operand_string,
7568 flag_code_names[i.prefix[ADDR_PREFIX]
7569 ? flag_code == CODE_32BIT
7570 ? CODE_16BIT
7571 : CODE_32BIT
7572 : flag_code],
7573 kind);
7574 }
7575 return ok;
7576 }
7577
7578 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
7579 on error. */
7580
7581 static int
7582 i386_att_operand (char *operand_string)
7583 {
7584 const reg_entry *r;
7585 char *end_op;
7586 char *op_string = operand_string;
7587
7588 if (is_space_char (*op_string))
7589 ++op_string;
7590
7591 /* We check for an absolute prefix (differentiating,
7592 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
7593 if (*op_string == ABSOLUTE_PREFIX)
7594 {
7595 ++op_string;
7596 if (is_space_char (*op_string))
7597 ++op_string;
7598 i.types[this_operand].bitfield.jumpabsolute = 1;
7599 }
7600
7601 /* Check if operand is a register. */
7602 if ((r = parse_register (op_string, &end_op)) != NULL)
7603 {
7604 i386_operand_type temp;
7605
7606 /* Check for a segment override by searching for ':' after a
7607 segment register. */
7608 op_string = end_op;
7609 if (is_space_char (*op_string))
7610 ++op_string;
7611 if (*op_string == ':'
7612 && (r->reg_type.bitfield.sreg2
7613 || r->reg_type.bitfield.sreg3))
7614 {
7615 switch (r->reg_num)
7616 {
7617 case 0:
7618 i.seg[i.mem_operands] = &es;
7619 break;
7620 case 1:
7621 i.seg[i.mem_operands] = &cs;
7622 break;
7623 case 2:
7624 i.seg[i.mem_operands] = &ss;
7625 break;
7626 case 3:
7627 i.seg[i.mem_operands] = &ds;
7628 break;
7629 case 4:
7630 i.seg[i.mem_operands] = &fs;
7631 break;
7632 case 5:
7633 i.seg[i.mem_operands] = &gs;
7634 break;
7635 }
7636
7637 /* Skip the ':' and whitespace. */
7638 ++op_string;
7639 if (is_space_char (*op_string))
7640 ++op_string;
7641
7642 if (!is_digit_char (*op_string)
7643 && !is_identifier_char (*op_string)
7644 && *op_string != '('
7645 && *op_string != ABSOLUTE_PREFIX)
7646 {
7647 as_bad (_("bad memory operand `%s'"), op_string);
7648 return 0;
7649 }
7650 /* Handle case of %es:*foo. */
7651 if (*op_string == ABSOLUTE_PREFIX)
7652 {
7653 ++op_string;
7654 if (is_space_char (*op_string))
7655 ++op_string;
7656 i.types[this_operand].bitfield.jumpabsolute = 1;
7657 }
7658 goto do_memory_reference;
7659 }
7660 if (*op_string)
7661 {
7662 as_bad (_("junk `%s' after register"), op_string);
7663 return 0;
7664 }
7665 temp = r->reg_type;
7666 temp.bitfield.baseindex = 0;
7667 i.types[this_operand] = operand_type_or (i.types[this_operand],
7668 temp);
7669 i.types[this_operand].bitfield.unspecified = 0;
7670 i.op[this_operand].regs = r;
7671 i.reg_operands++;
7672 }
7673 else if (*op_string == REGISTER_PREFIX)
7674 {
7675 as_bad (_("bad register name `%s'"), op_string);
7676 return 0;
7677 }
7678 else if (*op_string == IMMEDIATE_PREFIX)
7679 {
7680 ++op_string;
7681 if (i.types[this_operand].bitfield.jumpabsolute)
7682 {
7683 as_bad (_("immediate operand illegal with absolute jump"));
7684 return 0;
7685 }
7686 if (!i386_immediate (op_string))
7687 return 0;
7688 }
7689 else if (is_digit_char (*op_string)
7690 || is_identifier_char (*op_string)
7691 || *op_string == '(')
7692 {
7693 /* This is a memory reference of some sort. */
7694 char *base_string;
7695
7696 /* Start and end of displacement string expression (if found). */
7697 char *displacement_string_start;
7698 char *displacement_string_end;
7699
7700 do_memory_reference:
7701 if ((i.mem_operands == 1
7702 && !current_templates->start->opcode_modifier.isstring)
7703 || i.mem_operands == 2)
7704 {
7705 as_bad (_("too many memory references for `%s'"),
7706 current_templates->start->name);
7707 return 0;
7708 }
7709
7710 /* Check for base index form. We detect the base index form by
7711 looking for an ')' at the end of the operand, searching
7712 for the '(' matching it, and finding a REGISTER_PREFIX or ','
7713 after the '('. */
7714 base_string = op_string + strlen (op_string);
7715
7716 --base_string;
7717 if (is_space_char (*base_string))
7718 --base_string;
7719
7720 /* If we only have a displacement, set-up for it to be parsed later. */
7721 displacement_string_start = op_string;
7722 displacement_string_end = base_string + 1;
7723
7724 if (*base_string == ')')
7725 {
7726 char *temp_string;
7727 unsigned int parens_balanced = 1;
7728 /* We've already checked that the number of left & right ()'s are
7729 equal, so this loop will not be infinite. */
7730 do
7731 {
7732 base_string--;
7733 if (*base_string == ')')
7734 parens_balanced++;
7735 if (*base_string == '(')
7736 parens_balanced--;
7737 }
7738 while (parens_balanced);
7739
7740 temp_string = base_string;
7741
7742 /* Skip past '(' and whitespace. */
7743 ++base_string;
7744 if (is_space_char (*base_string))
7745 ++base_string;
7746
7747 if (*base_string == ','
7748 || ((i.base_reg = parse_register (base_string, &end_op))
7749 != NULL))
7750 {
7751 displacement_string_end = temp_string;
7752
7753 i.types[this_operand].bitfield.baseindex = 1;
7754
7755 if (i.base_reg)
7756 {
7757 base_string = end_op;
7758 if (is_space_char (*base_string))
7759 ++base_string;
7760 }
7761
7762 /* There may be an index reg or scale factor here. */
7763 if (*base_string == ',')
7764 {
7765 ++base_string;
7766 if (is_space_char (*base_string))
7767 ++base_string;
7768
7769 if ((i.index_reg = parse_register (base_string, &end_op))
7770 != NULL)
7771 {
7772 base_string = end_op;
7773 if (is_space_char (*base_string))
7774 ++base_string;
7775 if (*base_string == ',')
7776 {
7777 ++base_string;
7778 if (is_space_char (*base_string))
7779 ++base_string;
7780 }
7781 else if (*base_string != ')')
7782 {
7783 as_bad (_("expecting `,' or `)' "
7784 "after index register in `%s'"),
7785 operand_string);
7786 return 0;
7787 }
7788 }
7789 else if (*base_string == REGISTER_PREFIX)
7790 {
7791 end_op = strchr (base_string, ',');
7792 if (end_op)
7793 *end_op = '\0';
7794 as_bad (_("bad register name `%s'"), base_string);
7795 return 0;
7796 }
7797
7798 /* Check for scale factor. */
7799 if (*base_string != ')')
7800 {
7801 char *end_scale = i386_scale (base_string);
7802
7803 if (!end_scale)
7804 return 0;
7805
7806 base_string = end_scale;
7807 if (is_space_char (*base_string))
7808 ++base_string;
7809 if (*base_string != ')')
7810 {
7811 as_bad (_("expecting `)' "
7812 "after scale factor in `%s'"),
7813 operand_string);
7814 return 0;
7815 }
7816 }
7817 else if (!i.index_reg)
7818 {
7819 as_bad (_("expecting index register or scale factor "
7820 "after `,'; got '%c'"),
7821 *base_string);
7822 return 0;
7823 }
7824 }
7825 else if (*base_string != ')')
7826 {
7827 as_bad (_("expecting `,' or `)' "
7828 "after base register in `%s'"),
7829 operand_string);
7830 return 0;
7831 }
7832 }
7833 else if (*base_string == REGISTER_PREFIX)
7834 {
7835 end_op = strchr (base_string, ',');
7836 if (end_op)
7837 *end_op = '\0';
7838 as_bad (_("bad register name `%s'"), base_string);
7839 return 0;
7840 }
7841 }
7842
7843 /* If there's an expression beginning the operand, parse it,
7844 assuming displacement_string_start and
7845 displacement_string_end are meaningful. */
7846 if (displacement_string_start != displacement_string_end)
7847 {
7848 if (!i386_displacement (displacement_string_start,
7849 displacement_string_end))
7850 return 0;
7851 }
7852
7853 /* Special case for (%dx) while doing input/output op. */
7854 if (i.base_reg
7855 && operand_type_equal (&i.base_reg->reg_type,
7856 &reg16_inoutportreg)
7857 && i.index_reg == 0
7858 && i.log2_scale_factor == 0
7859 && i.seg[i.mem_operands] == 0
7860 && !operand_type_check (i.types[this_operand], disp))
7861 {
7862 i.types[this_operand] = inoutportreg;
7863 return 1;
7864 }
7865
7866 if (i386_index_check (operand_string) == 0)
7867 return 0;
7868 i.types[this_operand].bitfield.mem = 1;
7869 i.mem_operands++;
7870 }
7871 else
7872 {
7873 /* It's not a memory operand; argh! */
7874 as_bad (_("invalid char %s beginning operand %d `%s'"),
7875 output_invalid (*op_string),
7876 this_operand + 1,
7877 op_string);
7878 return 0;
7879 }
7880 return 1; /* Normal return. */
7881 }
7882 \f
7883 /* Calculate the maximum variable size (i.e., excluding fr_fix)
7884 that an rs_machine_dependent frag may reach. */
7885
7886 unsigned int
7887 i386_frag_max_var (fragS *frag)
7888 {
7889 /* The only relaxable frags are for jumps.
7890 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
7891 gas_assert (frag->fr_type == rs_machine_dependent);
7892 return TYPE_FROM_RELAX_STATE (frag->fr_subtype) == UNCOND_JUMP ? 4 : 5;
7893 }
7894
7895 /* md_estimate_size_before_relax()
7896
7897 Called just before relax() for rs_machine_dependent frags. The x86
7898 assembler uses these frags to handle variable size jump
7899 instructions.
7900
7901 Any symbol that is now undefined will not become defined.
7902 Return the correct fr_subtype in the frag.
7903 Return the initial "guess for variable size of frag" to caller.
7904 The guess is actually the growth beyond the fixed part. Whatever
7905 we do to grow the fixed or variable part contributes to our
7906 returned value. */
7907
7908 int
7909 md_estimate_size_before_relax (fragS *fragP, segT segment)
7910 {
7911 /* We've already got fragP->fr_subtype right; all we have to do is
7912 check for un-relaxable symbols. On an ELF system, we can't relax
7913 an externally visible symbol, because it may be overridden by a
7914 shared library. */
7915 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
7916 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7917 || (IS_ELF
7918 && (S_IS_EXTERNAL (fragP->fr_symbol)
7919 || S_IS_WEAK (fragP->fr_symbol)
7920 || ((symbol_get_bfdsym (fragP->fr_symbol)->flags
7921 & BSF_GNU_INDIRECT_FUNCTION))))
7922 #endif
7923 #if defined (OBJ_COFF) && defined (TE_PE)
7924 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
7925 && S_IS_WEAK (fragP->fr_symbol))
7926 #endif
7927 )
7928 {
7929 /* Symbol is undefined in this segment, or we need to keep a
7930 reloc so that weak symbols can be overridden. */
7931 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
7932 enum bfd_reloc_code_real reloc_type;
7933 unsigned char *opcode;
7934 int old_fr_fix;
7935
7936 if (fragP->fr_var != NO_RELOC)
7937 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
7938 else if (size == 2)
7939 reloc_type = BFD_RELOC_16_PCREL;
7940 else
7941 reloc_type = BFD_RELOC_32_PCREL;
7942
7943 old_fr_fix = fragP->fr_fix;
7944 opcode = (unsigned char *) fragP->fr_opcode;
7945
7946 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
7947 {
7948 case UNCOND_JUMP:
7949 /* Make jmp (0xeb) a (d)word displacement jump. */
7950 opcode[0] = 0xe9;
7951 fragP->fr_fix += size;
7952 fix_new (fragP, old_fr_fix, size,
7953 fragP->fr_symbol,
7954 fragP->fr_offset, 1,
7955 reloc_type);
7956 break;
7957
7958 case COND_JUMP86:
7959 if (size == 2
7960 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
7961 {
7962 /* Negate the condition, and branch past an
7963 unconditional jump. */
7964 opcode[0] ^= 1;
7965 opcode[1] = 3;
7966 /* Insert an unconditional jump. */
7967 opcode[2] = 0xe9;
7968 /* We added two extra opcode bytes, and have a two byte
7969 offset. */
7970 fragP->fr_fix += 2 + 2;
7971 fix_new (fragP, old_fr_fix + 2, 2,
7972 fragP->fr_symbol,
7973 fragP->fr_offset, 1,
7974 reloc_type);
7975 break;
7976 }
7977 /* Fall through. */
7978
7979 case COND_JUMP:
7980 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
7981 {
7982 fixS *fixP;
7983
7984 fragP->fr_fix += 1;
7985 fixP = fix_new (fragP, old_fr_fix, 1,
7986 fragP->fr_symbol,
7987 fragP->fr_offset, 1,
7988 BFD_RELOC_8_PCREL);
7989 fixP->fx_signed = 1;
7990 break;
7991 }
7992
7993 /* This changes the byte-displacement jump 0x7N
7994 to the (d)word-displacement jump 0x0f,0x8N. */
7995 opcode[1] = opcode[0] + 0x10;
7996 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7997 /* We've added an opcode byte. */
7998 fragP->fr_fix += 1 + size;
7999 fix_new (fragP, old_fr_fix + 1, size,
8000 fragP->fr_symbol,
8001 fragP->fr_offset, 1,
8002 reloc_type);
8003 break;
8004
8005 default:
8006 BAD_CASE (fragP->fr_subtype);
8007 break;
8008 }
8009 frag_wane (fragP);
8010 return fragP->fr_fix - old_fr_fix;
8011 }
8012
8013 /* Guess size depending on current relax state. Initially the relax
8014 state will correspond to a short jump and we return 1, because
8015 the variable part of the frag (the branch offset) is one byte
8016 long. However, we can relax a section more than once and in that
8017 case we must either set fr_subtype back to the unrelaxed state,
8018 or return the value for the appropriate branch. */
8019 return md_relax_table[fragP->fr_subtype].rlx_length;
8020 }
8021
8022 /* Called after relax() is finished.
8023
8024 In: Address of frag.
8025 fr_type == rs_machine_dependent.
8026 fr_subtype is what the address relaxed to.
8027
8028 Out: Any fixSs and constants are set up.
8029 Caller will turn frag into a ".space 0". */
8030
8031 void
8032 md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT sec ATTRIBUTE_UNUSED,
8033 fragS *fragP)
8034 {
8035 unsigned char *opcode;
8036 unsigned char *where_to_put_displacement = NULL;
8037 offsetT target_address;
8038 offsetT opcode_address;
8039 unsigned int extension = 0;
8040 offsetT displacement_from_opcode_start;
8041
8042 opcode = (unsigned char *) fragP->fr_opcode;
8043
8044 /* Address we want to reach in file space. */
8045 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
8046
8047 /* Address opcode resides at in file space. */
8048 opcode_address = fragP->fr_address + fragP->fr_fix;
8049
8050 /* Displacement from opcode start to fill into instruction. */
8051 displacement_from_opcode_start = target_address - opcode_address;
8052
8053 if ((fragP->fr_subtype & BIG) == 0)
8054 {
8055 /* Don't have to change opcode. */
8056 extension = 1; /* 1 opcode + 1 displacement */
8057 where_to_put_displacement = &opcode[1];
8058 }
8059 else
8060 {
8061 if (no_cond_jump_promotion
8062 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
8063 as_warn_where (fragP->fr_file, fragP->fr_line,
8064 _("long jump required"));
8065
8066 switch (fragP->fr_subtype)
8067 {
8068 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
8069 extension = 4; /* 1 opcode + 4 displacement */
8070 opcode[0] = 0xe9;
8071 where_to_put_displacement = &opcode[1];
8072 break;
8073
8074 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
8075 extension = 2; /* 1 opcode + 2 displacement */
8076 opcode[0] = 0xe9;
8077 where_to_put_displacement = &opcode[1];
8078 break;
8079
8080 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
8081 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
8082 extension = 5; /* 2 opcode + 4 displacement */
8083 opcode[1] = opcode[0] + 0x10;
8084 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
8085 where_to_put_displacement = &opcode[2];
8086 break;
8087
8088 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
8089 extension = 3; /* 2 opcode + 2 displacement */
8090 opcode[1] = opcode[0] + 0x10;
8091 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
8092 where_to_put_displacement = &opcode[2];
8093 break;
8094
8095 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
8096 extension = 4;
8097 opcode[0] ^= 1;
8098 opcode[1] = 3;
8099 opcode[2] = 0xe9;
8100 where_to_put_displacement = &opcode[3];
8101 break;
8102
8103 default:
8104 BAD_CASE (fragP->fr_subtype);
8105 break;
8106 }
8107 }
8108
8109 /* If size if less then four we are sure that the operand fits,
8110 but if it's 4, then it could be that the displacement is larger
8111 then -/+ 2GB. */
8112 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
8113 && object_64bit
8114 && ((addressT) (displacement_from_opcode_start - extension
8115 + ((addressT) 1 << 31))
8116 > (((addressT) 2 << 31) - 1)))
8117 {
8118 as_bad_where (fragP->fr_file, fragP->fr_line,
8119 _("jump target out of range"));
8120 /* Make us emit 0. */
8121 displacement_from_opcode_start = extension;
8122 }
8123 /* Now put displacement after opcode. */
8124 md_number_to_chars ((char *) where_to_put_displacement,
8125 (valueT) (displacement_from_opcode_start - extension),
8126 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
8127 fragP->fr_fix += extension;
8128 }
8129 \f
8130 /* Apply a fixup (fixP) to segment data, once it has been determined
8131 by our caller that we have all the info we need to fix it up.
8132
8133 Parameter valP is the pointer to the value of the bits.
8134
8135 On the 386, immediates, displacements, and data pointers are all in
8136 the same (little-endian) format, so we don't need to care about which
8137 we are handling. */
8138
8139 void
8140 md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
8141 {
8142 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
8143 valueT value = *valP;
8144
8145 #if !defined (TE_Mach)
8146 if (fixP->fx_pcrel)
8147 {
8148 switch (fixP->fx_r_type)
8149 {
8150 default:
8151 break;
8152
8153 case BFD_RELOC_64:
8154 fixP->fx_r_type = BFD_RELOC_64_PCREL;
8155 break;
8156 case BFD_RELOC_32:
8157 case BFD_RELOC_X86_64_32S:
8158 fixP->fx_r_type = BFD_RELOC_32_PCREL;
8159 break;
8160 case BFD_RELOC_16:
8161 fixP->fx_r_type = BFD_RELOC_16_PCREL;
8162 break;
8163 case BFD_RELOC_8:
8164 fixP->fx_r_type = BFD_RELOC_8_PCREL;
8165 break;
8166 }
8167 }
8168
8169 if (fixP->fx_addsy != NULL
8170 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
8171 || fixP->fx_r_type == BFD_RELOC_64_PCREL
8172 || fixP->fx_r_type == BFD_RELOC_16_PCREL
8173 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
8174 && !use_rela_relocations)
8175 {
8176 /* This is a hack. There should be a better way to handle this.
8177 This covers for the fact that bfd_install_relocation will
8178 subtract the current location (for partial_inplace, PC relative
8179 relocations); see more below. */
8180 #ifndef OBJ_AOUT
8181 if (IS_ELF
8182 #ifdef TE_PE
8183 || OUTPUT_FLAVOR == bfd_target_coff_flavour
8184 #endif
8185 )
8186 value += fixP->fx_where + fixP->fx_frag->fr_address;
8187 #endif
8188 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8189 if (IS_ELF)
8190 {
8191 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
8192
8193 if ((sym_seg == seg
8194 || (symbol_section_p (fixP->fx_addsy)
8195 && sym_seg != absolute_section))
8196 && !generic_force_reloc (fixP))
8197 {
8198 /* Yes, we add the values in twice. This is because
8199 bfd_install_relocation subtracts them out again. I think
8200 bfd_install_relocation is broken, but I don't dare change
8201 it. FIXME. */
8202 value += fixP->fx_where + fixP->fx_frag->fr_address;
8203 }
8204 }
8205 #endif
8206 #if defined (OBJ_COFF) && defined (TE_PE)
8207 /* For some reason, the PE format does not store a
8208 section address offset for a PC relative symbol. */
8209 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
8210 || S_IS_WEAK (fixP->fx_addsy))
8211 value += md_pcrel_from (fixP);
8212 #endif
8213 }
8214 #if defined (OBJ_COFF) && defined (TE_PE)
8215 if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
8216 {
8217 value -= S_GET_VALUE (fixP->fx_addsy);
8218 }
8219 #endif
8220
8221 /* Fix a few things - the dynamic linker expects certain values here,
8222 and we must not disappoint it. */
8223 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8224 if (IS_ELF && fixP->fx_addsy)
8225 switch (fixP->fx_r_type)
8226 {
8227 case BFD_RELOC_386_PLT32:
8228 case BFD_RELOC_X86_64_PLT32:
8229 /* Make the jump instruction point to the address of the operand. At
8230 runtime we merely add the offset to the actual PLT entry. */
8231 value = -4;
8232 break;
8233
8234 case BFD_RELOC_386_TLS_GD:
8235 case BFD_RELOC_386_TLS_LDM:
8236 case BFD_RELOC_386_TLS_IE_32:
8237 case BFD_RELOC_386_TLS_IE:
8238 case BFD_RELOC_386_TLS_GOTIE:
8239 case BFD_RELOC_386_TLS_GOTDESC:
8240 case BFD_RELOC_X86_64_TLSGD:
8241 case BFD_RELOC_X86_64_TLSLD:
8242 case BFD_RELOC_X86_64_GOTTPOFF:
8243 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8244 value = 0; /* Fully resolved at runtime. No addend. */
8245 /* Fallthrough */
8246 case BFD_RELOC_386_TLS_LE:
8247 case BFD_RELOC_386_TLS_LDO_32:
8248 case BFD_RELOC_386_TLS_LE_32:
8249 case BFD_RELOC_X86_64_DTPOFF32:
8250 case BFD_RELOC_X86_64_DTPOFF64:
8251 case BFD_RELOC_X86_64_TPOFF32:
8252 case BFD_RELOC_X86_64_TPOFF64:
8253 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8254 break;
8255
8256 case BFD_RELOC_386_TLS_DESC_CALL:
8257 case BFD_RELOC_X86_64_TLSDESC_CALL:
8258 value = 0; /* Fully resolved at runtime. No addend. */
8259 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8260 fixP->fx_done = 0;
8261 return;
8262
8263 case BFD_RELOC_386_GOT32:
8264 case BFD_RELOC_X86_64_GOT32:
8265 value = 0; /* Fully resolved at runtime. No addend. */
8266 break;
8267
8268 case BFD_RELOC_VTABLE_INHERIT:
8269 case BFD_RELOC_VTABLE_ENTRY:
8270 fixP->fx_done = 0;
8271 return;
8272
8273 default:
8274 break;
8275 }
8276 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
8277 *valP = value;
8278 #endif /* !defined (TE_Mach) */
8279
8280 /* Are we finished with this relocation now? */
8281 if (fixP->fx_addsy == NULL)
8282 fixP->fx_done = 1;
8283 #if defined (OBJ_COFF) && defined (TE_PE)
8284 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
8285 {
8286 fixP->fx_done = 0;
8287 /* Remember value for tc_gen_reloc. */
8288 fixP->fx_addnumber = value;
8289 /* Clear out the frag for now. */
8290 value = 0;
8291 }
8292 #endif
8293 else if (use_rela_relocations)
8294 {
8295 fixP->fx_no_overflow = 1;
8296 /* Remember value for tc_gen_reloc. */
8297 fixP->fx_addnumber = value;
8298 value = 0;
8299 }
8300
8301 md_number_to_chars (p, value, fixP->fx_size);
8302 }
8303 \f
8304 char *
8305 md_atof (int type, char *litP, int *sizeP)
8306 {
8307 /* This outputs the LITTLENUMs in REVERSE order;
8308 in accord with the bigendian 386. */
8309 return ieee_md_atof (type, litP, sizeP, FALSE);
8310 }
8311 \f
8312 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
8313
8314 static char *
8315 output_invalid (int c)
8316 {
8317 if (ISPRINT (c))
8318 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8319 "'%c'", c);
8320 else
8321 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8322 "(0x%x)", (unsigned char) c);
8323 return output_invalid_buf;
8324 }
8325
8326 /* REG_STRING starts *before* REGISTER_PREFIX. */
8327
8328 static const reg_entry *
8329 parse_real_register (char *reg_string, char **end_op)
8330 {
8331 char *s = reg_string;
8332 char *p;
8333 char reg_name_given[MAX_REG_NAME_SIZE + 1];
8334 const reg_entry *r;
8335
8336 /* Skip possible REGISTER_PREFIX and possible whitespace. */
8337 if (*s == REGISTER_PREFIX)
8338 ++s;
8339
8340 if (is_space_char (*s))
8341 ++s;
8342
8343 p = reg_name_given;
8344 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
8345 {
8346 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
8347 return (const reg_entry *) NULL;
8348 s++;
8349 }
8350
8351 /* For naked regs, make sure that we are not dealing with an identifier.
8352 This prevents confusing an identifier like `eax_var' with register
8353 `eax'. */
8354 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
8355 return (const reg_entry *) NULL;
8356
8357 *end_op = s;
8358
8359 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
8360
8361 /* Handle floating point regs, allowing spaces in the (i) part. */
8362 if (r == i386_regtab /* %st is first entry of table */)
8363 {
8364 if (is_space_char (*s))
8365 ++s;
8366 if (*s == '(')
8367 {
8368 ++s;
8369 if (is_space_char (*s))
8370 ++s;
8371 if (*s >= '0' && *s <= '7')
8372 {
8373 int fpr = *s - '0';
8374 ++s;
8375 if (is_space_char (*s))
8376 ++s;
8377 if (*s == ')')
8378 {
8379 *end_op = s + 1;
8380 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
8381 know (r);
8382 return r + fpr;
8383 }
8384 }
8385 /* We have "%st(" then garbage. */
8386 return (const reg_entry *) NULL;
8387 }
8388 }
8389
8390 if (r == NULL || allow_pseudo_reg)
8391 return r;
8392
8393 if (operand_type_all_zero (&r->reg_type))
8394 return (const reg_entry *) NULL;
8395
8396 if ((r->reg_type.bitfield.reg32
8397 || r->reg_type.bitfield.sreg3
8398 || r->reg_type.bitfield.control
8399 || r->reg_type.bitfield.debug
8400 || r->reg_type.bitfield.test)
8401 && !cpu_arch_flags.bitfield.cpui386)
8402 return (const reg_entry *) NULL;
8403
8404 if (r->reg_type.bitfield.floatreg
8405 && !cpu_arch_flags.bitfield.cpu8087
8406 && !cpu_arch_flags.bitfield.cpu287
8407 && !cpu_arch_flags.bitfield.cpu387)
8408 return (const reg_entry *) NULL;
8409
8410 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
8411 return (const reg_entry *) NULL;
8412
8413 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse)
8414 return (const reg_entry *) NULL;
8415
8416 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
8417 return (const reg_entry *) NULL;
8418
8419 /* Don't allow fake index register unless allow_index_reg isn't 0. */
8420 if (!allow_index_reg
8421 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
8422 return (const reg_entry *) NULL;
8423
8424 if (((r->reg_flags & (RegRex64 | RegRex))
8425 || r->reg_type.bitfield.reg64)
8426 && (!cpu_arch_flags.bitfield.cpulm
8427 || !operand_type_equal (&r->reg_type, &control))
8428 && flag_code != CODE_64BIT)
8429 return (const reg_entry *) NULL;
8430
8431 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
8432 return (const reg_entry *) NULL;
8433
8434 return r;
8435 }
8436
8437 /* REG_STRING starts *before* REGISTER_PREFIX. */
8438
8439 static const reg_entry *
8440 parse_register (char *reg_string, char **end_op)
8441 {
8442 const reg_entry *r;
8443
8444 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
8445 r = parse_real_register (reg_string, end_op);
8446 else
8447 r = NULL;
8448 if (!r)
8449 {
8450 char *save = input_line_pointer;
8451 char c;
8452 symbolS *symbolP;
8453
8454 input_line_pointer = reg_string;
8455 c = get_symbol_end ();
8456 symbolP = symbol_find (reg_string);
8457 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
8458 {
8459 const expressionS *e = symbol_get_value_expression (symbolP);
8460
8461 know (e->X_op == O_register);
8462 know (e->X_add_number >= 0
8463 && (valueT) e->X_add_number < i386_regtab_size);
8464 r = i386_regtab + e->X_add_number;
8465 *end_op = input_line_pointer;
8466 }
8467 *input_line_pointer = c;
8468 input_line_pointer = save;
8469 }
8470 return r;
8471 }
8472
8473 int
8474 i386_parse_name (char *name, expressionS *e, char *nextcharP)
8475 {
8476 const reg_entry *r;
8477 char *end = input_line_pointer;
8478
8479 *end = *nextcharP;
8480 r = parse_register (name, &input_line_pointer);
8481 if (r && end <= input_line_pointer)
8482 {
8483 *nextcharP = *input_line_pointer;
8484 *input_line_pointer = 0;
8485 e->X_op = O_register;
8486 e->X_add_number = r - i386_regtab;
8487 return 1;
8488 }
8489 input_line_pointer = end;
8490 *end = 0;
8491 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
8492 }
8493
8494 void
8495 md_operand (expressionS *e)
8496 {
8497 char *end;
8498 const reg_entry *r;
8499
8500 switch (*input_line_pointer)
8501 {
8502 case REGISTER_PREFIX:
8503 r = parse_real_register (input_line_pointer, &end);
8504 if (r)
8505 {
8506 e->X_op = O_register;
8507 e->X_add_number = r - i386_regtab;
8508 input_line_pointer = end;
8509 }
8510 break;
8511
8512 case '[':
8513 gas_assert (intel_syntax);
8514 end = input_line_pointer++;
8515 expression (e);
8516 if (*input_line_pointer == ']')
8517 {
8518 ++input_line_pointer;
8519 e->X_op_symbol = make_expr_symbol (e);
8520 e->X_add_symbol = NULL;
8521 e->X_add_number = 0;
8522 e->X_op = O_index;
8523 }
8524 else
8525 {
8526 e->X_op = O_absent;
8527 input_line_pointer = end;
8528 }
8529 break;
8530 }
8531 }
8532
8533 \f
8534 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8535 const char *md_shortopts = "kVQ:sqn";
8536 #else
8537 const char *md_shortopts = "qn";
8538 #endif
8539
8540 #define OPTION_32 (OPTION_MD_BASE + 0)
8541 #define OPTION_64 (OPTION_MD_BASE + 1)
8542 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
8543 #define OPTION_MARCH (OPTION_MD_BASE + 3)
8544 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
8545 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
8546 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
8547 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
8548 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
8549 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
8550 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
8551 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
8552 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
8553 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
8554 #define OPTION_X32 (OPTION_MD_BASE + 14)
8555
8556 struct option md_longopts[] =
8557 {
8558 {"32", no_argument, NULL, OPTION_32},
8559 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8560 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8561 {"64", no_argument, NULL, OPTION_64},
8562 #endif
8563 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8564 {"x32", no_argument, NULL, OPTION_X32},
8565 #endif
8566 {"divide", no_argument, NULL, OPTION_DIVIDE},
8567 {"march", required_argument, NULL, OPTION_MARCH},
8568 {"mtune", required_argument, NULL, OPTION_MTUNE},
8569 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
8570 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
8571 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
8572 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
8573 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
8574 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
8575 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
8576 {"moperand-check", required_argument, NULL, OPTION_MOPERAND_CHECK},
8577 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
8578 {NULL, no_argument, NULL, 0}
8579 };
8580 size_t md_longopts_size = sizeof (md_longopts);
8581
8582 int
8583 md_parse_option (int c, char *arg)
8584 {
8585 unsigned int j;
8586 char *arch, *next;
8587
8588 switch (c)
8589 {
8590 case 'n':
8591 optimize_align_code = 0;
8592 break;
8593
8594 case 'q':
8595 quiet_warnings = 1;
8596 break;
8597
8598 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8599 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
8600 should be emitted or not. FIXME: Not implemented. */
8601 case 'Q':
8602 break;
8603
8604 /* -V: SVR4 argument to print version ID. */
8605 case 'V':
8606 print_version_id ();
8607 break;
8608
8609 /* -k: Ignore for FreeBSD compatibility. */
8610 case 'k':
8611 break;
8612
8613 case 's':
8614 /* -s: On i386 Solaris, this tells the native assembler to use
8615 .stab instead of .stab.excl. We always use .stab anyhow. */
8616 break;
8617 #endif
8618 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8619 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8620 case OPTION_64:
8621 {
8622 const char **list, **l;
8623
8624 list = bfd_target_list ();
8625 for (l = list; *l != NULL; l++)
8626 if (CONST_STRNEQ (*l, "elf64-x86-64")
8627 || strcmp (*l, "coff-x86-64") == 0
8628 || strcmp (*l, "pe-x86-64") == 0
8629 || strcmp (*l, "pei-x86-64") == 0
8630 || strcmp (*l, "mach-o-x86-64") == 0)
8631 {
8632 default_arch = "x86_64";
8633 break;
8634 }
8635 if (*l == NULL)
8636 as_fatal (_("no compiled in support for x86_64"));
8637 free (list);
8638 }
8639 break;
8640 #endif
8641
8642 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8643 case OPTION_X32:
8644 if (IS_ELF)
8645 {
8646 const char **list, **l;
8647
8648 list = bfd_target_list ();
8649 for (l = list; *l != NULL; l++)
8650 if (CONST_STRNEQ (*l, "elf32-x86-64"))
8651 {
8652 default_arch = "x86_64:32";
8653 break;
8654 }
8655 if (*l == NULL)
8656 as_fatal (_("no compiled in support for 32bit x86_64"));
8657 free (list);
8658 }
8659 else
8660 as_fatal (_("32bit x86_64 is only supported for ELF"));
8661 break;
8662 #endif
8663
8664 case OPTION_32:
8665 default_arch = "i386";
8666 break;
8667
8668 case OPTION_DIVIDE:
8669 #ifdef SVR4_COMMENT_CHARS
8670 {
8671 char *n, *t;
8672 const char *s;
8673
8674 n = (char *) xmalloc (strlen (i386_comment_chars) + 1);
8675 t = n;
8676 for (s = i386_comment_chars; *s != '\0'; s++)
8677 if (*s != '/')
8678 *t++ = *s;
8679 *t = '\0';
8680 i386_comment_chars = n;
8681 }
8682 #endif
8683 break;
8684
8685 case OPTION_MARCH:
8686 arch = xstrdup (arg);
8687 do
8688 {
8689 if (*arch == '.')
8690 as_fatal (_("invalid -march= option: `%s'"), arg);
8691 next = strchr (arch, '+');
8692 if (next)
8693 *next++ = '\0';
8694 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8695 {
8696 if (strcmp (arch, cpu_arch [j].name) == 0)
8697 {
8698 /* Processor. */
8699 if (! cpu_arch[j].flags.bitfield.cpui386)
8700 continue;
8701
8702 cpu_arch_name = cpu_arch[j].name;
8703 cpu_sub_arch_name = NULL;
8704 cpu_arch_flags = cpu_arch[j].flags;
8705 cpu_arch_isa = cpu_arch[j].type;
8706 cpu_arch_isa_flags = cpu_arch[j].flags;
8707 if (!cpu_arch_tune_set)
8708 {
8709 cpu_arch_tune = cpu_arch_isa;
8710 cpu_arch_tune_flags = cpu_arch_isa_flags;
8711 }
8712 break;
8713 }
8714 else if (*cpu_arch [j].name == '.'
8715 && strcmp (arch, cpu_arch [j].name + 1) == 0)
8716 {
8717 /* ISA entension. */
8718 i386_cpu_flags flags;
8719
8720 if (!cpu_arch[j].negated)
8721 flags = cpu_flags_or (cpu_arch_flags,
8722 cpu_arch[j].flags);
8723 else
8724 flags = cpu_flags_and_not (cpu_arch_flags,
8725 cpu_arch[j].flags);
8726 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
8727 {
8728 if (cpu_sub_arch_name)
8729 {
8730 char *name = cpu_sub_arch_name;
8731 cpu_sub_arch_name = concat (name,
8732 cpu_arch[j].name,
8733 (const char *) NULL);
8734 free (name);
8735 }
8736 else
8737 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
8738 cpu_arch_flags = flags;
8739 cpu_arch_isa_flags = flags;
8740 }
8741 break;
8742 }
8743 }
8744
8745 if (j >= ARRAY_SIZE (cpu_arch))
8746 as_fatal (_("invalid -march= option: `%s'"), arg);
8747
8748 arch = next;
8749 }
8750 while (next != NULL );
8751 break;
8752
8753 case OPTION_MTUNE:
8754 if (*arg == '.')
8755 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8756 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8757 {
8758 if (strcmp (arg, cpu_arch [j].name) == 0)
8759 {
8760 cpu_arch_tune_set = 1;
8761 cpu_arch_tune = cpu_arch [j].type;
8762 cpu_arch_tune_flags = cpu_arch[j].flags;
8763 break;
8764 }
8765 }
8766 if (j >= ARRAY_SIZE (cpu_arch))
8767 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8768 break;
8769
8770 case OPTION_MMNEMONIC:
8771 if (strcasecmp (arg, "att") == 0)
8772 intel_mnemonic = 0;
8773 else if (strcasecmp (arg, "intel") == 0)
8774 intel_mnemonic = 1;
8775 else
8776 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg);
8777 break;
8778
8779 case OPTION_MSYNTAX:
8780 if (strcasecmp (arg, "att") == 0)
8781 intel_syntax = 0;
8782 else if (strcasecmp (arg, "intel") == 0)
8783 intel_syntax = 1;
8784 else
8785 as_fatal (_("invalid -msyntax= option: `%s'"), arg);
8786 break;
8787
8788 case OPTION_MINDEX_REG:
8789 allow_index_reg = 1;
8790 break;
8791
8792 case OPTION_MNAKED_REG:
8793 allow_naked_reg = 1;
8794 break;
8795
8796 case OPTION_MOLD_GCC:
8797 old_gcc = 1;
8798 break;
8799
8800 case OPTION_MSSE2AVX:
8801 sse2avx = 1;
8802 break;
8803
8804 case OPTION_MSSE_CHECK:
8805 if (strcasecmp (arg, "error") == 0)
8806 sse_check = check_error;
8807 else if (strcasecmp (arg, "warning") == 0)
8808 sse_check = check_warning;
8809 else if (strcasecmp (arg, "none") == 0)
8810 sse_check = check_none;
8811 else
8812 as_fatal (_("invalid -msse-check= option: `%s'"), arg);
8813 break;
8814
8815 case OPTION_MOPERAND_CHECK:
8816 if (strcasecmp (arg, "error") == 0)
8817 operand_check = check_error;
8818 else if (strcasecmp (arg, "warning") == 0)
8819 operand_check = check_warning;
8820 else if (strcasecmp (arg, "none") == 0)
8821 operand_check = check_none;
8822 else
8823 as_fatal (_("invalid -moperand-check= option: `%s'"), arg);
8824 break;
8825
8826 case OPTION_MAVXSCALAR:
8827 if (strcasecmp (arg, "128") == 0)
8828 avxscalar = vex128;
8829 else if (strcasecmp (arg, "256") == 0)
8830 avxscalar = vex256;
8831 else
8832 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg);
8833 break;
8834
8835 default:
8836 return 0;
8837 }
8838 return 1;
8839 }
8840
8841 #define MESSAGE_TEMPLATE \
8842 " "
8843
8844 static void
8845 show_arch (FILE *stream, int ext, int check)
8846 {
8847 static char message[] = MESSAGE_TEMPLATE;
8848 char *start = message + 27;
8849 char *p;
8850 int size = sizeof (MESSAGE_TEMPLATE);
8851 int left;
8852 const char *name;
8853 int len;
8854 unsigned int j;
8855
8856 p = start;
8857 left = size - (start - message);
8858 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8859 {
8860 /* Should it be skipped? */
8861 if (cpu_arch [j].skip)
8862 continue;
8863
8864 name = cpu_arch [j].name;
8865 len = cpu_arch [j].len;
8866 if (*name == '.')
8867 {
8868 /* It is an extension. Skip if we aren't asked to show it. */
8869 if (ext)
8870 {
8871 name++;
8872 len--;
8873 }
8874 else
8875 continue;
8876 }
8877 else if (ext)
8878 {
8879 /* It is an processor. Skip if we show only extension. */
8880 continue;
8881 }
8882 else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
8883 {
8884 /* It is an impossible processor - skip. */
8885 continue;
8886 }
8887
8888 /* Reserve 2 spaces for ", " or ",\0" */
8889 left -= len + 2;
8890
8891 /* Check if there is any room. */
8892 if (left >= 0)
8893 {
8894 if (p != start)
8895 {
8896 *p++ = ',';
8897 *p++ = ' ';
8898 }
8899 p = mempcpy (p, name, len);
8900 }
8901 else
8902 {
8903 /* Output the current message now and start a new one. */
8904 *p++ = ',';
8905 *p = '\0';
8906 fprintf (stream, "%s\n", message);
8907 p = start;
8908 left = size - (start - message) - len - 2;
8909
8910 gas_assert (left >= 0);
8911
8912 p = mempcpy (p, name, len);
8913 }
8914 }
8915
8916 *p = '\0';
8917 fprintf (stream, "%s\n", message);
8918 }
8919
8920 void
8921 md_show_usage (FILE *stream)
8922 {
8923 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8924 fprintf (stream, _("\
8925 -Q ignored\n\
8926 -V print assembler version number\n\
8927 -k ignored\n"));
8928 #endif
8929 fprintf (stream, _("\
8930 -n Do not optimize code alignment\n\
8931 -q quieten some warnings\n"));
8932 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8933 fprintf (stream, _("\
8934 -s ignored\n"));
8935 #endif
8936 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8937 || defined (TE_PE) || defined (TE_PEP))
8938 fprintf (stream, _("\
8939 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
8940 #endif
8941 #ifdef SVR4_COMMENT_CHARS
8942 fprintf (stream, _("\
8943 --divide do not treat `/' as a comment character\n"));
8944 #else
8945 fprintf (stream, _("\
8946 --divide ignored\n"));
8947 #endif
8948 fprintf (stream, _("\
8949 -march=CPU[,+EXTENSION...]\n\
8950 generate code for CPU and EXTENSION, CPU is one of:\n"));
8951 show_arch (stream, 0, 1);
8952 fprintf (stream, _("\
8953 EXTENSION is combination of:\n"));
8954 show_arch (stream, 1, 0);
8955 fprintf (stream, _("\
8956 -mtune=CPU optimize for CPU, CPU is one of:\n"));
8957 show_arch (stream, 0, 0);
8958 fprintf (stream, _("\
8959 -msse2avx encode SSE instructions with VEX prefix\n"));
8960 fprintf (stream, _("\
8961 -msse-check=[none|error|warning]\n\
8962 check SSE instructions\n"));
8963 fprintf (stream, _("\
8964 -moperand-check=[none|error|warning]\n\
8965 check operand combinations for validity\n"));
8966 fprintf (stream, _("\
8967 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
8968 length\n"));
8969 fprintf (stream, _("\
8970 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
8971 fprintf (stream, _("\
8972 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
8973 fprintf (stream, _("\
8974 -mindex-reg support pseudo index registers\n"));
8975 fprintf (stream, _("\
8976 -mnaked-reg don't require `%%' prefix for registers\n"));
8977 fprintf (stream, _("\
8978 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
8979 }
8980
8981 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
8982 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8983 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8984
8985 /* Pick the target format to use. */
8986
8987 const char *
8988 i386_target_format (void)
8989 {
8990 if (!strncmp (default_arch, "x86_64", 6))
8991 {
8992 update_code_flag (CODE_64BIT, 1);
8993 if (default_arch[6] == '\0')
8994 x86_elf_abi = X86_64_ABI;
8995 else
8996 x86_elf_abi = X86_64_X32_ABI;
8997 }
8998 else if (!strcmp (default_arch, "i386"))
8999 update_code_flag (CODE_32BIT, 1);
9000 else
9001 as_fatal (_("unknown architecture"));
9002
9003 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
9004 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
9005 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
9006 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
9007
9008 switch (OUTPUT_FLAVOR)
9009 {
9010 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
9011 case bfd_target_aout_flavour:
9012 return AOUT_TARGET_FORMAT;
9013 #endif
9014 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
9015 # if defined (TE_PE) || defined (TE_PEP)
9016 case bfd_target_coff_flavour:
9017 return flag_code == CODE_64BIT ? "pe-x86-64" : "pe-i386";
9018 # elif defined (TE_GO32)
9019 case bfd_target_coff_flavour:
9020 return "coff-go32";
9021 # else
9022 case bfd_target_coff_flavour:
9023 return "coff-i386";
9024 # endif
9025 #endif
9026 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9027 case bfd_target_elf_flavour:
9028 {
9029 const char *format;
9030
9031 switch (x86_elf_abi)
9032 {
9033 default:
9034 format = ELF_TARGET_FORMAT;
9035 break;
9036 case X86_64_ABI:
9037 use_rela_relocations = 1;
9038 object_64bit = 1;
9039 format = ELF_TARGET_FORMAT64;
9040 break;
9041 case X86_64_X32_ABI:
9042 use_rela_relocations = 1;
9043 object_64bit = 1;
9044 disallow_64bit_reloc = 1;
9045 format = ELF_TARGET_FORMAT32;
9046 break;
9047 }
9048 if (cpu_arch_isa == PROCESSOR_L1OM)
9049 {
9050 if (x86_elf_abi != X86_64_ABI)
9051 as_fatal (_("Intel L1OM is 64bit only"));
9052 return ELF_TARGET_L1OM_FORMAT;
9053 }
9054 if (cpu_arch_isa == PROCESSOR_K1OM)
9055 {
9056 if (x86_elf_abi != X86_64_ABI)
9057 as_fatal (_("Intel K1OM is 64bit only"));
9058 return ELF_TARGET_K1OM_FORMAT;
9059 }
9060 else
9061 return format;
9062 }
9063 #endif
9064 #if defined (OBJ_MACH_O)
9065 case bfd_target_mach_o_flavour:
9066 if (flag_code == CODE_64BIT)
9067 {
9068 use_rela_relocations = 1;
9069 object_64bit = 1;
9070 return "mach-o-x86-64";
9071 }
9072 else
9073 return "mach-o-i386";
9074 #endif
9075 default:
9076 abort ();
9077 return NULL;
9078 }
9079 }
9080
9081 #endif /* OBJ_MAYBE_ more than one */
9082
9083 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
9084 void
9085 i386_elf_emit_arch_note (void)
9086 {
9087 if (IS_ELF && cpu_arch_name != NULL)
9088 {
9089 char *p;
9090 asection *seg = now_seg;
9091 subsegT subseg = now_subseg;
9092 Elf_Internal_Note i_note;
9093 Elf_External_Note e_note;
9094 asection *note_secp;
9095 int len;
9096
9097 /* Create the .note section. */
9098 note_secp = subseg_new (".note", 0);
9099 bfd_set_section_flags (stdoutput,
9100 note_secp,
9101 SEC_HAS_CONTENTS | SEC_READONLY);
9102
9103 /* Process the arch string. */
9104 len = strlen (cpu_arch_name);
9105
9106 i_note.namesz = len + 1;
9107 i_note.descsz = 0;
9108 i_note.type = NT_ARCH;
9109 p = frag_more (sizeof (e_note.namesz));
9110 md_number_to_chars (p, (valueT) i_note.namesz, sizeof (e_note.namesz));
9111 p = frag_more (sizeof (e_note.descsz));
9112 md_number_to_chars (p, (valueT) i_note.descsz, sizeof (e_note.descsz));
9113 p = frag_more (sizeof (e_note.type));
9114 md_number_to_chars (p, (valueT) i_note.type, sizeof (e_note.type));
9115 p = frag_more (len + 1);
9116 strcpy (p, cpu_arch_name);
9117
9118 frag_align (2, 0, 0);
9119
9120 subseg_set (seg, subseg);
9121 }
9122 }
9123 #endif
9124 \f
9125 symbolS *
9126 md_undefined_symbol (char *name)
9127 {
9128 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
9129 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
9130 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
9131 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
9132 {
9133 if (!GOT_symbol)
9134 {
9135 if (symbol_find (name))
9136 as_bad (_("GOT already in symbol table"));
9137 GOT_symbol = symbol_new (name, undefined_section,
9138 (valueT) 0, &zero_address_frag);
9139 };
9140 return GOT_symbol;
9141 }
9142 return 0;
9143 }
9144
9145 /* Round up a section size to the appropriate boundary. */
9146
9147 valueT
9148 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
9149 {
9150 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
9151 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
9152 {
9153 /* For a.out, force the section size to be aligned. If we don't do
9154 this, BFD will align it for us, but it will not write out the
9155 final bytes of the section. This may be a bug in BFD, but it is
9156 easier to fix it here since that is how the other a.out targets
9157 work. */
9158 int align;
9159
9160 align = bfd_get_section_alignment (stdoutput, segment);
9161 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
9162 }
9163 #endif
9164
9165 return size;
9166 }
9167
9168 /* On the i386, PC-relative offsets are relative to the start of the
9169 next instruction. That is, the address of the offset, plus its
9170 size, since the offset is always the last part of the insn. */
9171
9172 long
9173 md_pcrel_from (fixS *fixP)
9174 {
9175 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
9176 }
9177
9178 #ifndef I386COFF
9179
9180 static void
9181 s_bss (int ignore ATTRIBUTE_UNUSED)
9182 {
9183 int temp;
9184
9185 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9186 if (IS_ELF)
9187 obj_elf_section_change_hook ();
9188 #endif
9189 temp = get_absolute_expression ();
9190 subseg_set (bss_section, (subsegT) temp);
9191 demand_empty_rest_of_line ();
9192 }
9193
9194 #endif
9195
9196 void
9197 i386_validate_fix (fixS *fixp)
9198 {
9199 if (fixp->fx_subsy && fixp->fx_subsy == GOT_symbol)
9200 {
9201 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
9202 {
9203 if (!object_64bit)
9204 abort ();
9205 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
9206 }
9207 else
9208 {
9209 if (!object_64bit)
9210 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
9211 else
9212 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
9213 }
9214 fixp->fx_subsy = 0;
9215 }
9216 }
9217
9218 arelent *
9219 tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
9220 {
9221 arelent *rel;
9222 bfd_reloc_code_real_type code;
9223
9224 switch (fixp->fx_r_type)
9225 {
9226 case BFD_RELOC_X86_64_PLT32:
9227 case BFD_RELOC_X86_64_GOT32:
9228 case BFD_RELOC_X86_64_GOTPCREL:
9229 case BFD_RELOC_386_PLT32:
9230 case BFD_RELOC_386_GOT32:
9231 case BFD_RELOC_386_GOTOFF:
9232 case BFD_RELOC_386_GOTPC:
9233 case BFD_RELOC_386_TLS_GD:
9234 case BFD_RELOC_386_TLS_LDM:
9235 case BFD_RELOC_386_TLS_LDO_32:
9236 case BFD_RELOC_386_TLS_IE_32:
9237 case BFD_RELOC_386_TLS_IE:
9238 case BFD_RELOC_386_TLS_GOTIE:
9239 case BFD_RELOC_386_TLS_LE_32:
9240 case BFD_RELOC_386_TLS_LE:
9241 case BFD_RELOC_386_TLS_GOTDESC:
9242 case BFD_RELOC_386_TLS_DESC_CALL:
9243 case BFD_RELOC_X86_64_TLSGD:
9244 case BFD_RELOC_X86_64_TLSLD:
9245 case BFD_RELOC_X86_64_DTPOFF32:
9246 case BFD_RELOC_X86_64_DTPOFF64:
9247 case BFD_RELOC_X86_64_GOTTPOFF:
9248 case BFD_RELOC_X86_64_TPOFF32:
9249 case BFD_RELOC_X86_64_TPOFF64:
9250 case BFD_RELOC_X86_64_GOTOFF64:
9251 case BFD_RELOC_X86_64_GOTPC32:
9252 case BFD_RELOC_X86_64_GOT64:
9253 case BFD_RELOC_X86_64_GOTPCREL64:
9254 case BFD_RELOC_X86_64_GOTPC64:
9255 case BFD_RELOC_X86_64_GOTPLT64:
9256 case BFD_RELOC_X86_64_PLTOFF64:
9257 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9258 case BFD_RELOC_X86_64_TLSDESC_CALL:
9259 case BFD_RELOC_RVA:
9260 case BFD_RELOC_VTABLE_ENTRY:
9261 case BFD_RELOC_VTABLE_INHERIT:
9262 #ifdef TE_PE
9263 case BFD_RELOC_32_SECREL:
9264 #endif
9265 code = fixp->fx_r_type;
9266 break;
9267 case BFD_RELOC_X86_64_32S:
9268 if (!fixp->fx_pcrel)
9269 {
9270 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
9271 code = fixp->fx_r_type;
9272 break;
9273 }
9274 default:
9275 if (fixp->fx_pcrel)
9276 {
9277 switch (fixp->fx_size)
9278 {
9279 default:
9280 as_bad_where (fixp->fx_file, fixp->fx_line,
9281 _("can not do %d byte pc-relative relocation"),
9282 fixp->fx_size);
9283 code = BFD_RELOC_32_PCREL;
9284 break;
9285 case 1: code = BFD_RELOC_8_PCREL; break;
9286 case 2: code = BFD_RELOC_16_PCREL; break;
9287 case 4: code = BFD_RELOC_32_PCREL; break;
9288 #ifdef BFD64
9289 case 8: code = BFD_RELOC_64_PCREL; break;
9290 #endif
9291 }
9292 }
9293 else
9294 {
9295 switch (fixp->fx_size)
9296 {
9297 default:
9298 as_bad_where (fixp->fx_file, fixp->fx_line,
9299 _("can not do %d byte relocation"),
9300 fixp->fx_size);
9301 code = BFD_RELOC_32;
9302 break;
9303 case 1: code = BFD_RELOC_8; break;
9304 case 2: code = BFD_RELOC_16; break;
9305 case 4: code = BFD_RELOC_32; break;
9306 #ifdef BFD64
9307 case 8: code = BFD_RELOC_64; break;
9308 #endif
9309 }
9310 }
9311 break;
9312 }
9313
9314 if ((code == BFD_RELOC_32
9315 || code == BFD_RELOC_32_PCREL
9316 || code == BFD_RELOC_X86_64_32S)
9317 && GOT_symbol
9318 && fixp->fx_addsy == GOT_symbol)
9319 {
9320 if (!object_64bit)
9321 code = BFD_RELOC_386_GOTPC;
9322 else
9323 code = BFD_RELOC_X86_64_GOTPC32;
9324 }
9325 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
9326 && GOT_symbol
9327 && fixp->fx_addsy == GOT_symbol)
9328 {
9329 code = BFD_RELOC_X86_64_GOTPC64;
9330 }
9331
9332 rel = (arelent *) xmalloc (sizeof (arelent));
9333 rel->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
9334 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9335
9336 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
9337
9338 if (!use_rela_relocations)
9339 {
9340 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
9341 vtable entry to be used in the relocation's section offset. */
9342 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
9343 rel->address = fixp->fx_offset;
9344 #if defined (OBJ_COFF) && defined (TE_PE)
9345 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
9346 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
9347 else
9348 #endif
9349 rel->addend = 0;
9350 }
9351 /* Use the rela in 64bit mode. */
9352 else
9353 {
9354 if (disallow_64bit_reloc)
9355 switch (code)
9356 {
9357 case BFD_RELOC_X86_64_DTPOFF64:
9358 case BFD_RELOC_X86_64_TPOFF64:
9359 case BFD_RELOC_64_PCREL:
9360 case BFD_RELOC_X86_64_GOTOFF64:
9361 case BFD_RELOC_X86_64_GOT64:
9362 case BFD_RELOC_X86_64_GOTPCREL64:
9363 case BFD_RELOC_X86_64_GOTPC64:
9364 case BFD_RELOC_X86_64_GOTPLT64:
9365 case BFD_RELOC_X86_64_PLTOFF64:
9366 as_bad_where (fixp->fx_file, fixp->fx_line,
9367 _("cannot represent relocation type %s in x32 mode"),
9368 bfd_get_reloc_code_name (code));
9369 break;
9370 default:
9371 break;
9372 }
9373
9374 if (!fixp->fx_pcrel)
9375 rel->addend = fixp->fx_offset;
9376 else
9377 switch (code)
9378 {
9379 case BFD_RELOC_X86_64_PLT32:
9380 case BFD_RELOC_X86_64_GOT32:
9381 case BFD_RELOC_X86_64_GOTPCREL:
9382 case BFD_RELOC_X86_64_TLSGD:
9383 case BFD_RELOC_X86_64_TLSLD:
9384 case BFD_RELOC_X86_64_GOTTPOFF:
9385 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9386 case BFD_RELOC_X86_64_TLSDESC_CALL:
9387 rel->addend = fixp->fx_offset - fixp->fx_size;
9388 break;
9389 default:
9390 rel->addend = (section->vma
9391 - fixp->fx_size
9392 + fixp->fx_addnumber
9393 + md_pcrel_from (fixp));
9394 break;
9395 }
9396 }
9397
9398 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
9399 if (rel->howto == NULL)
9400 {
9401 as_bad_where (fixp->fx_file, fixp->fx_line,
9402 _("cannot represent relocation type %s"),
9403 bfd_get_reloc_code_name (code));
9404 /* Set howto to a garbage value so that we can keep going. */
9405 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
9406 gas_assert (rel->howto != NULL);
9407 }
9408
9409 return rel;
9410 }
9411
9412 #include "tc-i386-intel.c"
9413
9414 void
9415 tc_x86_parse_to_dw2regnum (expressionS *exp)
9416 {
9417 int saved_naked_reg;
9418 char saved_register_dot;
9419
9420 saved_naked_reg = allow_naked_reg;
9421 allow_naked_reg = 1;
9422 saved_register_dot = register_chars['.'];
9423 register_chars['.'] = '.';
9424 allow_pseudo_reg = 1;
9425 expression_and_evaluate (exp);
9426 allow_pseudo_reg = 0;
9427 register_chars['.'] = saved_register_dot;
9428 allow_naked_reg = saved_naked_reg;
9429
9430 if (exp->X_op == O_register && exp->X_add_number >= 0)
9431 {
9432 if ((addressT) exp->X_add_number < i386_regtab_size)
9433 {
9434 exp->X_op = O_constant;
9435 exp->X_add_number = i386_regtab[exp->X_add_number]
9436 .dw2_regnum[flag_code >> 1];
9437 }
9438 else
9439 exp->X_op = O_illegal;
9440 }
9441 }
9442
9443 void
9444 tc_x86_frame_initial_instructions (void)
9445 {
9446 static unsigned int sp_regno[2];
9447
9448 if (!sp_regno[flag_code >> 1])
9449 {
9450 char *saved_input = input_line_pointer;
9451 char sp[][4] = {"esp", "rsp"};
9452 expressionS exp;
9453
9454 input_line_pointer = sp[flag_code >> 1];
9455 tc_x86_parse_to_dw2regnum (&exp);
9456 gas_assert (exp.X_op == O_constant);
9457 sp_regno[flag_code >> 1] = exp.X_add_number;
9458 input_line_pointer = saved_input;
9459 }
9460
9461 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
9462 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
9463 }
9464
9465 int
9466 x86_dwarf2_addr_size (void)
9467 {
9468 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9469 if (x86_elf_abi == X86_64_X32_ABI)
9470 return 4;
9471 #endif
9472 return bfd_arch_bits_per_address (stdoutput) / 8;
9473 }
9474
9475 int
9476 i386_elf_section_type (const char *str, size_t len)
9477 {
9478 if (flag_code == CODE_64BIT
9479 && len == sizeof ("unwind") - 1
9480 && strncmp (str, "unwind", 6) == 0)
9481 return SHT_X86_64_UNWIND;
9482
9483 return -1;
9484 }
9485
9486 #ifdef TE_SOLARIS
9487 void
9488 i386_solaris_fix_up_eh_frame (segT sec)
9489 {
9490 if (flag_code == CODE_64BIT)
9491 elf_section_type (sec) = SHT_X86_64_UNWIND;
9492 }
9493 #endif
9494
9495 #ifdef TE_PE
9496 void
9497 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
9498 {
9499 expressionS exp;
9500
9501 exp.X_op = O_secrel;
9502 exp.X_add_symbol = symbol;
9503 exp.X_add_number = 0;
9504 emit_expr (&exp, size);
9505 }
9506 #endif
9507
9508 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9509 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
9510
9511 bfd_vma
9512 x86_64_section_letter (int letter, char **ptr_msg)
9513 {
9514 if (flag_code == CODE_64BIT)
9515 {
9516 if (letter == 'l')
9517 return SHF_X86_64_LARGE;
9518
9519 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
9520 }
9521 else
9522 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
9523 return -1;
9524 }
9525
9526 bfd_vma
9527 x86_64_section_word (char *str, size_t len)
9528 {
9529 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
9530 return SHF_X86_64_LARGE;
9531
9532 return -1;
9533 }
9534
9535 static void
9536 handle_large_common (int small ATTRIBUTE_UNUSED)
9537 {
9538 if (flag_code != CODE_64BIT)
9539 {
9540 s_comm_internal (0, elf_common_parse);
9541 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
9542 }
9543 else
9544 {
9545 static segT lbss_section;
9546 asection *saved_com_section_ptr = elf_com_section_ptr;
9547 asection *saved_bss_section = bss_section;
9548
9549 if (lbss_section == NULL)
9550 {
9551 flagword applicable;
9552 segT seg = now_seg;
9553 subsegT subseg = now_subseg;
9554
9555 /* The .lbss section is for local .largecomm symbols. */
9556 lbss_section = subseg_new (".lbss", 0);
9557 applicable = bfd_applicable_section_flags (stdoutput);
9558 bfd_set_section_flags (stdoutput, lbss_section,
9559 applicable & SEC_ALLOC);
9560 seg_info (lbss_section)->bss = 1;
9561
9562 subseg_set (seg, subseg);
9563 }
9564
9565 elf_com_section_ptr = &_bfd_elf_large_com_section;
9566 bss_section = lbss_section;
9567
9568 s_comm_internal (0, elf_common_parse);
9569
9570 elf_com_section_ptr = saved_com_section_ptr;
9571 bss_section = saved_bss_section;
9572 }
9573 }
9574 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */
This page took 0.259375 seconds and 4 git commands to generate.