2011-06-27 Tristan Gingold <gingold@adacore.com>
[deliverable/binutils-gdb.git] / gas / config / tc-i386.c
1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5
6 This file is part of GAS, the GNU Assembler.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GAS; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
21 02110-1301, USA. */
22
23 /* Intel 80386 machine specific gas.
24 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
25 x86_64 support by Jan Hubicka (jh@suse.cz)
26 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
27 Bugs & suggestions are completely welcome. This is free software.
28 Please help us make it better. */
29
30 #include "as.h"
31 #include "safe-ctype.h"
32 #include "subsegs.h"
33 #include "dwarf2dbg.h"
34 #include "dw2gencfi.h"
35 #include "elf/x86-64.h"
36 #include "opcodes/i386-init.h"
37
38 #ifndef REGISTER_WARNINGS
39 #define REGISTER_WARNINGS 1
40 #endif
41
42 #ifndef INFER_ADDR_PREFIX
43 #define INFER_ADDR_PREFIX 1
44 #endif
45
46 #ifndef DEFAULT_ARCH
47 #define DEFAULT_ARCH "i386"
48 #endif
49
50 #ifndef INLINE
51 #if __GNUC__ >= 2
52 #define INLINE __inline__
53 #else
54 #define INLINE
55 #endif
56 #endif
57
58 /* Prefixes will be emitted in the order defined below.
59 WAIT_PREFIX must be the first prefix since FWAIT is really is an
60 instruction, and so must come before any prefixes.
61 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
62 REP_PREFIX, LOCK_PREFIX. */
63 #define WAIT_PREFIX 0
64 #define SEG_PREFIX 1
65 #define ADDR_PREFIX 2
66 #define DATA_PREFIX 3
67 #define REP_PREFIX 4
68 #define LOCK_PREFIX 5
69 #define REX_PREFIX 6 /* must come last. */
70 #define MAX_PREFIXES 7 /* max prefixes per opcode */
71
72 /* we define the syntax here (modulo base,index,scale syntax) */
73 #define REGISTER_PREFIX '%'
74 #define IMMEDIATE_PREFIX '$'
75 #define ABSOLUTE_PREFIX '*'
76
77 /* these are the instruction mnemonic suffixes in AT&T syntax or
78 memory operand size in Intel syntax. */
79 #define WORD_MNEM_SUFFIX 'w'
80 #define BYTE_MNEM_SUFFIX 'b'
81 #define SHORT_MNEM_SUFFIX 's'
82 #define LONG_MNEM_SUFFIX 'l'
83 #define QWORD_MNEM_SUFFIX 'q'
84 #define XMMWORD_MNEM_SUFFIX 'x'
85 #define YMMWORD_MNEM_SUFFIX 'y'
86 /* Intel Syntax. Use a non-ascii letter since since it never appears
87 in instructions. */
88 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
89
90 #define END_OF_INSN '\0'
91
92 /*
93 'templates' is for grouping together 'template' structures for opcodes
94 of the same name. This is only used for storing the insns in the grand
95 ole hash table of insns.
96 The templates themselves start at START and range up to (but not including)
97 END.
98 */
99 typedef struct
100 {
101 const insn_template *start;
102 const insn_template *end;
103 }
104 templates;
105
106 /* 386 operand encoding bytes: see 386 book for details of this. */
107 typedef struct
108 {
109 unsigned int regmem; /* codes register or memory operand */
110 unsigned int reg; /* codes register operand (or extended opcode) */
111 unsigned int mode; /* how to interpret regmem & reg */
112 }
113 modrm_byte;
114
115 /* x86-64 extension prefix. */
116 typedef int rex_byte;
117
118 /* 386 opcode byte to code indirect addressing. */
119 typedef struct
120 {
121 unsigned base;
122 unsigned index;
123 unsigned scale;
124 }
125 sib_byte;
126
127 /* x86 arch names, types and features */
128 typedef struct
129 {
130 const char *name; /* arch name */
131 unsigned int len; /* arch string length */
132 enum processor_type type; /* arch type */
133 i386_cpu_flags flags; /* cpu feature flags */
134 unsigned int skip; /* show_arch should skip this. */
135 unsigned int negated; /* turn off indicated flags. */
136 }
137 arch_entry;
138
139 static void update_code_flag (int, int);
140 static void set_code_flag (int);
141 static void set_16bit_gcc_code_flag (int);
142 static void set_intel_syntax (int);
143 static void set_intel_mnemonic (int);
144 static void set_allow_index_reg (int);
145 static void set_sse_check (int);
146 static void set_cpu_arch (int);
147 #ifdef TE_PE
148 static void pe_directive_secrel (int);
149 #endif
150 static void signed_cons (int);
151 static char *output_invalid (int c);
152 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
153 const char *);
154 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
155 const char *);
156 static int i386_att_operand (char *);
157 static int i386_intel_operand (char *, int);
158 static int i386_intel_simplify (expressionS *);
159 static int i386_intel_parse_name (const char *, expressionS *);
160 static const reg_entry *parse_register (char *, char **);
161 static char *parse_insn (char *, char *);
162 static char *parse_operands (char *, const char *);
163 static void swap_operands (void);
164 static void swap_2_operands (int, int);
165 static void optimize_imm (void);
166 static void optimize_disp (void);
167 static const insn_template *match_template (void);
168 static int check_string (void);
169 static int process_suffix (void);
170 static int check_byte_reg (void);
171 static int check_long_reg (void);
172 static int check_qword_reg (void);
173 static int check_word_reg (void);
174 static int finalize_imm (void);
175 static int process_operands (void);
176 static const seg_entry *build_modrm_byte (void);
177 static void output_insn (void);
178 static void output_imm (fragS *, offsetT);
179 static void output_disp (fragS *, offsetT);
180 #ifndef I386COFF
181 static void s_bss (int);
182 #endif
183 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
184 static void handle_large_common (int small ATTRIBUTE_UNUSED);
185 static void handle_quad (int);
186 #endif
187
188 static const char *default_arch = DEFAULT_ARCH;
189
190 /* VEX prefix. */
191 typedef struct
192 {
193 /* VEX prefix is either 2 byte or 3 byte. */
194 unsigned char bytes[3];
195 unsigned int length;
196 /* Destination or source register specifier. */
197 const reg_entry *register_specifier;
198 } vex_prefix;
199
200 /* 'md_assemble ()' gathers together information and puts it into a
201 i386_insn. */
202
203 union i386_op
204 {
205 expressionS *disps;
206 expressionS *imms;
207 const reg_entry *regs;
208 };
209
210 enum i386_error
211 {
212 operand_size_mismatch,
213 operand_type_mismatch,
214 register_type_mismatch,
215 number_of_operands_mismatch,
216 invalid_instruction_suffix,
217 bad_imm4,
218 old_gcc_only,
219 unsupported_with_intel_mnemonic,
220 unsupported_syntax,
221 unsupported,
222 invalid_vsib_address,
223 unsupported_vector_index_register
224 };
225
226 struct _i386_insn
227 {
228 /* TM holds the template for the insn were currently assembling. */
229 insn_template tm;
230
231 /* SUFFIX holds the instruction size suffix for byte, word, dword
232 or qword, if given. */
233 char suffix;
234
235 /* OPERANDS gives the number of given operands. */
236 unsigned int operands;
237
238 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
239 of given register, displacement, memory operands and immediate
240 operands. */
241 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
242
243 /* TYPES [i] is the type (see above #defines) which tells us how to
244 use OP[i] for the corresponding operand. */
245 i386_operand_type types[MAX_OPERANDS];
246
247 /* Displacement expression, immediate expression, or register for each
248 operand. */
249 union i386_op op[MAX_OPERANDS];
250
251 /* Flags for operands. */
252 unsigned int flags[MAX_OPERANDS];
253 #define Operand_PCrel 1
254
255 /* Relocation type for operand */
256 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
257
258 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
259 the base index byte below. */
260 const reg_entry *base_reg;
261 const reg_entry *index_reg;
262 unsigned int log2_scale_factor;
263
264 /* SEG gives the seg_entries of this insn. They are zero unless
265 explicit segment overrides are given. */
266 const seg_entry *seg[2];
267
268 /* PREFIX holds all the given prefix opcodes (usually null).
269 PREFIXES is the number of prefix opcodes. */
270 unsigned int prefixes;
271 unsigned char prefix[MAX_PREFIXES];
272
273 /* RM and SIB are the modrm byte and the sib byte where the
274 addressing modes of this insn are encoded. */
275 modrm_byte rm;
276 rex_byte rex;
277 sib_byte sib;
278 vex_prefix vex;
279
280 /* Swap operand in encoding. */
281 unsigned int swap_operand;
282
283 /* Force 32bit displacement in encoding. */
284 unsigned int disp32_encoding;
285
286 /* Error message. */
287 enum i386_error error;
288 };
289
290 typedef struct _i386_insn i386_insn;
291
292 /* List of chars besides those in app.c:symbol_chars that can start an
293 operand. Used to prevent the scrubber eating vital white-space. */
294 const char extra_symbol_chars[] = "*%-(["
295 #ifdef LEX_AT
296 "@"
297 #endif
298 #ifdef LEX_QM
299 "?"
300 #endif
301 ;
302
303 #if (defined (TE_I386AIX) \
304 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
305 && !defined (TE_GNU) \
306 && !defined (TE_LINUX) \
307 && !defined (TE_NETWARE) \
308 && !defined (TE_FreeBSD) \
309 && !defined (TE_DragonFly) \
310 && !defined (TE_NetBSD)))
311 /* This array holds the chars that always start a comment. If the
312 pre-processor is disabled, these aren't very useful. The option
313 --divide will remove '/' from this list. */
314 const char *i386_comment_chars = "#/";
315 #define SVR4_COMMENT_CHARS 1
316 #define PREFIX_SEPARATOR '\\'
317
318 #else
319 const char *i386_comment_chars = "#";
320 #define PREFIX_SEPARATOR '/'
321 #endif
322
323 /* This array holds the chars that only start a comment at the beginning of
324 a line. If the line seems to have the form '# 123 filename'
325 .line and .file directives will appear in the pre-processed output.
326 Note that input_file.c hand checks for '#' at the beginning of the
327 first line of the input file. This is because the compiler outputs
328 #NO_APP at the beginning of its output.
329 Also note that comments started like this one will always work if
330 '/' isn't otherwise defined. */
331 const char line_comment_chars[] = "#/";
332
333 const char line_separator_chars[] = ";";
334
335 /* Chars that can be used to separate mant from exp in floating point
336 nums. */
337 const char EXP_CHARS[] = "eE";
338
339 /* Chars that mean this number is a floating point constant
340 As in 0f12.456
341 or 0d1.2345e12. */
342 const char FLT_CHARS[] = "fFdDxX";
343
344 /* Tables for lexical analysis. */
345 static char mnemonic_chars[256];
346 static char register_chars[256];
347 static char operand_chars[256];
348 static char identifier_chars[256];
349 static char digit_chars[256];
350
351 /* Lexical macros. */
352 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
353 #define is_operand_char(x) (operand_chars[(unsigned char) x])
354 #define is_register_char(x) (register_chars[(unsigned char) x])
355 #define is_space_char(x) ((x) == ' ')
356 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
357 #define is_digit_char(x) (digit_chars[(unsigned char) x])
358
359 /* All non-digit non-letter characters that may occur in an operand. */
360 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
361
362 /* md_assemble() always leaves the strings it's passed unaltered. To
363 effect this we maintain a stack of saved characters that we've smashed
364 with '\0's (indicating end of strings for various sub-fields of the
365 assembler instruction). */
366 static char save_stack[32];
367 static char *save_stack_p;
368 #define END_STRING_AND_SAVE(s) \
369 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
370 #define RESTORE_END_STRING(s) \
371 do { *(s) = *--save_stack_p; } while (0)
372
373 /* The instruction we're assembling. */
374 static i386_insn i;
375
376 /* Possible templates for current insn. */
377 static const templates *current_templates;
378
379 /* Per instruction expressionS buffers: max displacements & immediates. */
380 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
381 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
382
383 /* Current operand we are working on. */
384 static int this_operand = -1;
385
386 /* We support four different modes. FLAG_CODE variable is used to distinguish
387 these. */
388
389 enum flag_code {
390 CODE_32BIT,
391 CODE_16BIT,
392 CODE_64BIT };
393
394 static enum flag_code flag_code;
395 static unsigned int object_64bit;
396 static unsigned int disallow_64bit_reloc;
397 static int use_rela_relocations = 0;
398
399 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
400 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
401 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
402
403 /* The ELF ABI to use. */
404 enum x86_elf_abi
405 {
406 I386_ABI,
407 X86_64_ABI,
408 X86_64_X32_ABI
409 };
410
411 static enum x86_elf_abi x86_elf_abi = I386_ABI;
412 #endif
413
414 /* The names used to print error messages. */
415 static const char *flag_code_names[] =
416 {
417 "32",
418 "16",
419 "64"
420 };
421
422 /* 1 for intel syntax,
423 0 if att syntax. */
424 static int intel_syntax = 0;
425
426 /* 1 for intel mnemonic,
427 0 if att mnemonic. */
428 static int intel_mnemonic = !SYSV386_COMPAT;
429
430 /* 1 if support old (<= 2.8.1) versions of gcc. */
431 static int old_gcc = OLDGCC_COMPAT;
432
433 /* 1 if pseudo registers are permitted. */
434 static int allow_pseudo_reg = 0;
435
436 /* 1 if register prefix % not required. */
437 static int allow_naked_reg = 0;
438
439 /* 1 if pseudo index register, eiz/riz, is allowed . */
440 static int allow_index_reg = 0;
441
442 static enum
443 {
444 sse_check_none = 0,
445 sse_check_warning,
446 sse_check_error
447 }
448 sse_check;
449
450 /* Register prefix used for error message. */
451 static const char *register_prefix = "%";
452
453 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
454 leave, push, and pop instructions so that gcc has the same stack
455 frame as in 32 bit mode. */
456 static char stackop_size = '\0';
457
458 /* Non-zero to optimize code alignment. */
459 int optimize_align_code = 1;
460
461 /* Non-zero to quieten some warnings. */
462 static int quiet_warnings = 0;
463
464 /* CPU name. */
465 static const char *cpu_arch_name = NULL;
466 static char *cpu_sub_arch_name = NULL;
467
468 /* CPU feature flags. */
469 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
470
471 /* If we have selected a cpu we are generating instructions for. */
472 static int cpu_arch_tune_set = 0;
473
474 /* Cpu we are generating instructions for. */
475 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
476
477 /* CPU feature flags of cpu we are generating instructions for. */
478 static i386_cpu_flags cpu_arch_tune_flags;
479
480 /* CPU instruction set architecture used. */
481 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
482
483 /* CPU feature flags of instruction set architecture used. */
484 i386_cpu_flags cpu_arch_isa_flags;
485
486 /* If set, conditional jumps are not automatically promoted to handle
487 larger than a byte offset. */
488 static unsigned int no_cond_jump_promotion = 0;
489
490 /* Encode SSE instructions with VEX prefix. */
491 static unsigned int sse2avx;
492
493 /* Encode scalar AVX instructions with specific vector length. */
494 static enum
495 {
496 vex128 = 0,
497 vex256
498 } avxscalar;
499
500 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
501 static symbolS *GOT_symbol;
502
503 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
504 unsigned int x86_dwarf2_return_column;
505
506 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
507 int x86_cie_data_alignment;
508
509 /* Interface to relax_segment.
510 There are 3 major relax states for 386 jump insns because the
511 different types of jumps add different sizes to frags when we're
512 figuring out what sort of jump to choose to reach a given label. */
513
514 /* Types. */
515 #define UNCOND_JUMP 0
516 #define COND_JUMP 1
517 #define COND_JUMP86 2
518
519 /* Sizes. */
520 #define CODE16 1
521 #define SMALL 0
522 #define SMALL16 (SMALL | CODE16)
523 #define BIG 2
524 #define BIG16 (BIG | CODE16)
525
526 #ifndef INLINE
527 #ifdef __GNUC__
528 #define INLINE __inline__
529 #else
530 #define INLINE
531 #endif
532 #endif
533
534 #define ENCODE_RELAX_STATE(type, size) \
535 ((relax_substateT) (((type) << 2) | (size)))
536 #define TYPE_FROM_RELAX_STATE(s) \
537 ((s) >> 2)
538 #define DISP_SIZE_FROM_RELAX_STATE(s) \
539 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
540
541 /* This table is used by relax_frag to promote short jumps to long
542 ones where necessary. SMALL (short) jumps may be promoted to BIG
543 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
544 don't allow a short jump in a 32 bit code segment to be promoted to
545 a 16 bit offset jump because it's slower (requires data size
546 prefix), and doesn't work, unless the destination is in the bottom
547 64k of the code segment (The top 16 bits of eip are zeroed). */
548
549 const relax_typeS md_relax_table[] =
550 {
551 /* The fields are:
552 1) most positive reach of this state,
553 2) most negative reach of this state,
554 3) how many bytes this mode will have in the variable part of the frag
555 4) which index into the table to try if we can't fit into this one. */
556
557 /* UNCOND_JUMP states. */
558 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
559 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
560 /* dword jmp adds 4 bytes to frag:
561 0 extra opcode bytes, 4 displacement bytes. */
562 {0, 0, 4, 0},
563 /* word jmp adds 2 byte2 to frag:
564 0 extra opcode bytes, 2 displacement bytes. */
565 {0, 0, 2, 0},
566
567 /* COND_JUMP states. */
568 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
569 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
570 /* dword conditionals adds 5 bytes to frag:
571 1 extra opcode byte, 4 displacement bytes. */
572 {0, 0, 5, 0},
573 /* word conditionals add 3 bytes to frag:
574 1 extra opcode byte, 2 displacement bytes. */
575 {0, 0, 3, 0},
576
577 /* COND_JUMP86 states. */
578 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
579 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
580 /* dword conditionals adds 5 bytes to frag:
581 1 extra opcode byte, 4 displacement bytes. */
582 {0, 0, 5, 0},
583 /* word conditionals add 4 bytes to frag:
584 1 displacement byte and a 3 byte long branch insn. */
585 {0, 0, 4, 0}
586 };
587
588 static const arch_entry cpu_arch[] =
589 {
590 /* Do not replace the first two entries - i386_target_format()
591 relies on them being there in this order. */
592 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
593 CPU_GENERIC32_FLAGS, 0, 0 },
594 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
595 CPU_GENERIC64_FLAGS, 0, 0 },
596 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
597 CPU_NONE_FLAGS, 0, 0 },
598 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
599 CPU_I186_FLAGS, 0, 0 },
600 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
601 CPU_I286_FLAGS, 0, 0 },
602 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
603 CPU_I386_FLAGS, 0, 0 },
604 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
605 CPU_I486_FLAGS, 0, 0 },
606 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
607 CPU_I586_FLAGS, 0, 0 },
608 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
609 CPU_I686_FLAGS, 0, 0 },
610 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
611 CPU_I586_FLAGS, 0, 0 },
612 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
613 CPU_PENTIUMPRO_FLAGS, 0, 0 },
614 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
615 CPU_P2_FLAGS, 0, 0 },
616 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
617 CPU_P3_FLAGS, 0, 0 },
618 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
619 CPU_P4_FLAGS, 0, 0 },
620 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
621 CPU_CORE_FLAGS, 0, 0 },
622 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
623 CPU_NOCONA_FLAGS, 0, 0 },
624 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
625 CPU_CORE_FLAGS, 1, 0 },
626 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
627 CPU_CORE_FLAGS, 0, 0 },
628 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
629 CPU_CORE2_FLAGS, 1, 0 },
630 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
631 CPU_CORE2_FLAGS, 0, 0 },
632 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
633 CPU_COREI7_FLAGS, 0, 0 },
634 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
635 CPU_L1OM_FLAGS, 0, 0 },
636 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
637 CPU_K6_FLAGS, 0, 0 },
638 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
639 CPU_K6_2_FLAGS, 0, 0 },
640 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
641 CPU_ATHLON_FLAGS, 0, 0 },
642 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
643 CPU_K8_FLAGS, 1, 0 },
644 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
645 CPU_K8_FLAGS, 0, 0 },
646 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
647 CPU_K8_FLAGS, 0, 0 },
648 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
649 CPU_AMDFAM10_FLAGS, 0, 0 },
650 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD,
651 CPU_BDVER1_FLAGS, 0, 0 },
652 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD,
653 CPU_BDVER2_FLAGS, 0, 0 },
654 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
655 CPU_8087_FLAGS, 0, 0 },
656 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
657 CPU_287_FLAGS, 0, 0 },
658 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
659 CPU_387_FLAGS, 0, 0 },
660 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN,
661 CPU_ANY87_FLAGS, 0, 1 },
662 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
663 CPU_MMX_FLAGS, 0, 0 },
664 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN,
665 CPU_3DNOWA_FLAGS, 0, 1 },
666 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
667 CPU_SSE_FLAGS, 0, 0 },
668 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
669 CPU_SSE2_FLAGS, 0, 0 },
670 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
671 CPU_SSE3_FLAGS, 0, 0 },
672 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
673 CPU_SSSE3_FLAGS, 0, 0 },
674 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
675 CPU_SSE4_1_FLAGS, 0, 0 },
676 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
677 CPU_SSE4_2_FLAGS, 0, 0 },
678 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
679 CPU_SSE4_2_FLAGS, 0, 0 },
680 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN,
681 CPU_ANY_SSE_FLAGS, 0, 1 },
682 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
683 CPU_AVX_FLAGS, 0, 0 },
684 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN,
685 CPU_AVX2_FLAGS, 0, 0 },
686 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
687 CPU_ANY_AVX_FLAGS, 0, 1 },
688 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
689 CPU_VMX_FLAGS, 0, 0 },
690 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
691 CPU_SMX_FLAGS, 0, 0 },
692 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
693 CPU_XSAVE_FLAGS, 0, 0 },
694 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
695 CPU_XSAVEOPT_FLAGS, 0, 0 },
696 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
697 CPU_AES_FLAGS, 0, 0 },
698 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
699 CPU_PCLMUL_FLAGS, 0, 0 },
700 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
701 CPU_PCLMUL_FLAGS, 1, 0 },
702 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
703 CPU_FSGSBASE_FLAGS, 0, 0 },
704 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
705 CPU_RDRND_FLAGS, 0, 0 },
706 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
707 CPU_F16C_FLAGS, 0, 0 },
708 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN,
709 CPU_BMI2_FLAGS, 0, 0 },
710 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
711 CPU_FMA_FLAGS, 0, 0 },
712 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
713 CPU_FMA4_FLAGS, 0, 0 },
714 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
715 CPU_XOP_FLAGS, 0, 0 },
716 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
717 CPU_LWP_FLAGS, 0, 0 },
718 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
719 CPU_MOVBE_FLAGS, 0, 0 },
720 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
721 CPU_EPT_FLAGS, 0, 0 },
722 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN,
723 CPU_LZCNT_FLAGS, 0, 0 },
724 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN,
725 CPU_INVPCID_FLAGS, 0, 0 },
726 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
727 CPU_CLFLUSH_FLAGS, 0, 0 },
728 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
729 CPU_NOP_FLAGS, 0, 0 },
730 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
731 CPU_SYSCALL_FLAGS, 0, 0 },
732 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
733 CPU_RDTSCP_FLAGS, 0, 0 },
734 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
735 CPU_3DNOW_FLAGS, 0, 0 },
736 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
737 CPU_3DNOWA_FLAGS, 0, 0 },
738 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
739 CPU_PADLOCK_FLAGS, 0, 0 },
740 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
741 CPU_SVME_FLAGS, 1, 0 },
742 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
743 CPU_SVME_FLAGS, 0, 0 },
744 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
745 CPU_SSE4A_FLAGS, 0, 0 },
746 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
747 CPU_ABM_FLAGS, 0, 0 },
748 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
749 CPU_BMI_FLAGS, 0, 0 },
750 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
751 CPU_TBM_FLAGS, 0, 0 },
752 };
753
754 #ifdef I386COFF
755 /* Like s_lcomm_internal in gas/read.c but the alignment string
756 is allowed to be optional. */
757
758 static symbolS *
759 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
760 {
761 addressT align = 0;
762
763 SKIP_WHITESPACE ();
764
765 if (needs_align
766 && *input_line_pointer == ',')
767 {
768 align = parse_align (needs_align - 1);
769
770 if (align == (addressT) -1)
771 return NULL;
772 }
773 else
774 {
775 if (size >= 8)
776 align = 3;
777 else if (size >= 4)
778 align = 2;
779 else if (size >= 2)
780 align = 1;
781 else
782 align = 0;
783 }
784
785 bss_alloc (symbolP, size, align);
786 return symbolP;
787 }
788
789 static void
790 pe_lcomm (int needs_align)
791 {
792 s_comm_internal (needs_align * 2, pe_lcomm_internal);
793 }
794 #endif
795
796 const pseudo_typeS md_pseudo_table[] =
797 {
798 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
799 {"align", s_align_bytes, 0},
800 #else
801 {"align", s_align_ptwo, 0},
802 #endif
803 {"arch", set_cpu_arch, 0},
804 #ifndef I386COFF
805 {"bss", s_bss, 0},
806 #else
807 {"lcomm", pe_lcomm, 1},
808 #endif
809 {"ffloat", float_cons, 'f'},
810 {"dfloat", float_cons, 'd'},
811 {"tfloat", float_cons, 'x'},
812 {"value", cons, 2},
813 {"slong", signed_cons, 4},
814 {"noopt", s_ignore, 0},
815 {"optim", s_ignore, 0},
816 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
817 {"code16", set_code_flag, CODE_16BIT},
818 {"code32", set_code_flag, CODE_32BIT},
819 {"code64", set_code_flag, CODE_64BIT},
820 {"intel_syntax", set_intel_syntax, 1},
821 {"att_syntax", set_intel_syntax, 0},
822 {"intel_mnemonic", set_intel_mnemonic, 1},
823 {"att_mnemonic", set_intel_mnemonic, 0},
824 {"allow_index_reg", set_allow_index_reg, 1},
825 {"disallow_index_reg", set_allow_index_reg, 0},
826 {"sse_check", set_sse_check, 0},
827 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
828 {"largecomm", handle_large_common, 0},
829 {"quad", handle_quad, 8},
830 #else
831 {"file", (void (*) (int)) dwarf2_directive_file, 0},
832 {"loc", dwarf2_directive_loc, 0},
833 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
834 #endif
835 #ifdef TE_PE
836 {"secrel32", pe_directive_secrel, 0},
837 #endif
838 {0, 0, 0}
839 };
840
841 /* For interface with expression (). */
842 extern char *input_line_pointer;
843
844 /* Hash table for instruction mnemonic lookup. */
845 static struct hash_control *op_hash;
846
847 /* Hash table for register lookup. */
848 static struct hash_control *reg_hash;
849 \f
850 void
851 i386_align_code (fragS *fragP, int count)
852 {
853 /* Various efficient no-op patterns for aligning code labels.
854 Note: Don't try to assemble the instructions in the comments.
855 0L and 0w are not legal. */
856 static const char f32_1[] =
857 {0x90}; /* nop */
858 static const char f32_2[] =
859 {0x66,0x90}; /* xchg %ax,%ax */
860 static const char f32_3[] =
861 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
862 static const char f32_4[] =
863 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
864 static const char f32_5[] =
865 {0x90, /* nop */
866 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
867 static const char f32_6[] =
868 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
869 static const char f32_7[] =
870 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
871 static const char f32_8[] =
872 {0x90, /* nop */
873 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
874 static const char f32_9[] =
875 {0x89,0xf6, /* movl %esi,%esi */
876 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
877 static const char f32_10[] =
878 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
879 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
880 static const char f32_11[] =
881 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
882 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
883 static const char f32_12[] =
884 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
885 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
886 static const char f32_13[] =
887 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
888 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
889 static const char f32_14[] =
890 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
891 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
892 static const char f16_3[] =
893 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
894 static const char f16_4[] =
895 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
896 static const char f16_5[] =
897 {0x90, /* nop */
898 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
899 static const char f16_6[] =
900 {0x89,0xf6, /* mov %si,%si */
901 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
902 static const char f16_7[] =
903 {0x8d,0x74,0x00, /* lea 0(%si),%si */
904 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
905 static const char f16_8[] =
906 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
907 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
908 static const char jump_31[] =
909 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
910 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
911 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
912 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
913 static const char *const f32_patt[] = {
914 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
915 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
916 };
917 static const char *const f16_patt[] = {
918 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
919 };
920 /* nopl (%[re]ax) */
921 static const char alt_3[] =
922 {0x0f,0x1f,0x00};
923 /* nopl 0(%[re]ax) */
924 static const char alt_4[] =
925 {0x0f,0x1f,0x40,0x00};
926 /* nopl 0(%[re]ax,%[re]ax,1) */
927 static const char alt_5[] =
928 {0x0f,0x1f,0x44,0x00,0x00};
929 /* nopw 0(%[re]ax,%[re]ax,1) */
930 static const char alt_6[] =
931 {0x66,0x0f,0x1f,0x44,0x00,0x00};
932 /* nopl 0L(%[re]ax) */
933 static const char alt_7[] =
934 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
935 /* nopl 0L(%[re]ax,%[re]ax,1) */
936 static const char alt_8[] =
937 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
938 /* nopw 0L(%[re]ax,%[re]ax,1) */
939 static const char alt_9[] =
940 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
941 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
942 static const char alt_10[] =
943 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
944 /* data16
945 nopw %cs:0L(%[re]ax,%[re]ax,1) */
946 static const char alt_long_11[] =
947 {0x66,
948 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
949 /* data16
950 data16
951 nopw %cs:0L(%[re]ax,%[re]ax,1) */
952 static const char alt_long_12[] =
953 {0x66,
954 0x66,
955 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
956 /* data16
957 data16
958 data16
959 nopw %cs:0L(%[re]ax,%[re]ax,1) */
960 static const char alt_long_13[] =
961 {0x66,
962 0x66,
963 0x66,
964 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
965 /* data16
966 data16
967 data16
968 data16
969 nopw %cs:0L(%[re]ax,%[re]ax,1) */
970 static const char alt_long_14[] =
971 {0x66,
972 0x66,
973 0x66,
974 0x66,
975 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
976 /* data16
977 data16
978 data16
979 data16
980 data16
981 nopw %cs:0L(%[re]ax,%[re]ax,1) */
982 static const char alt_long_15[] =
983 {0x66,
984 0x66,
985 0x66,
986 0x66,
987 0x66,
988 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
989 /* nopl 0(%[re]ax,%[re]ax,1)
990 nopw 0(%[re]ax,%[re]ax,1) */
991 static const char alt_short_11[] =
992 {0x0f,0x1f,0x44,0x00,0x00,
993 0x66,0x0f,0x1f,0x44,0x00,0x00};
994 /* nopw 0(%[re]ax,%[re]ax,1)
995 nopw 0(%[re]ax,%[re]ax,1) */
996 static const char alt_short_12[] =
997 {0x66,0x0f,0x1f,0x44,0x00,0x00,
998 0x66,0x0f,0x1f,0x44,0x00,0x00};
999 /* nopw 0(%[re]ax,%[re]ax,1)
1000 nopl 0L(%[re]ax) */
1001 static const char alt_short_13[] =
1002 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1003 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1004 /* nopl 0L(%[re]ax)
1005 nopl 0L(%[re]ax) */
1006 static const char alt_short_14[] =
1007 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1008 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1009 /* nopl 0L(%[re]ax)
1010 nopl 0L(%[re]ax,%[re]ax,1) */
1011 static const char alt_short_15[] =
1012 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1013 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1014 static const char *const alt_short_patt[] = {
1015 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1016 alt_9, alt_10, alt_short_11, alt_short_12, alt_short_13,
1017 alt_short_14, alt_short_15
1018 };
1019 static const char *const alt_long_patt[] = {
1020 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1021 alt_9, alt_10, alt_long_11, alt_long_12, alt_long_13,
1022 alt_long_14, alt_long_15
1023 };
1024
1025 /* Only align for at least a positive non-zero boundary. */
1026 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
1027 return;
1028
1029 /* We need to decide which NOP sequence to use for 32bit and
1030 64bit. When -mtune= is used:
1031
1032 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1033 PROCESSOR_GENERIC32, f32_patt will be used.
1034 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA,
1035 PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
1036 PROCESSOR_GENERIC64, alt_long_patt will be used.
1037 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
1038 PROCESSOR_AMDFAM10, and PROCESSOR_BD, alt_short_patt
1039 will be used.
1040
1041 When -mtune= isn't used, alt_long_patt will be used if
1042 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1043 be used.
1044
1045 When -march= or .arch is used, we can't use anything beyond
1046 cpu_arch_isa_flags. */
1047
1048 if (flag_code == CODE_16BIT)
1049 {
1050 if (count > 8)
1051 {
1052 memcpy (fragP->fr_literal + fragP->fr_fix,
1053 jump_31, count);
1054 /* Adjust jump offset. */
1055 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1056 }
1057 else
1058 memcpy (fragP->fr_literal + fragP->fr_fix,
1059 f16_patt[count - 1], count);
1060 }
1061 else
1062 {
1063 const char *const *patt = NULL;
1064
1065 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1066 {
1067 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1068 switch (cpu_arch_tune)
1069 {
1070 case PROCESSOR_UNKNOWN:
1071 /* We use cpu_arch_isa_flags to check if we SHOULD
1072 optimize with nops. */
1073 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1074 patt = alt_long_patt;
1075 else
1076 patt = f32_patt;
1077 break;
1078 case PROCESSOR_PENTIUM4:
1079 case PROCESSOR_NOCONA:
1080 case PROCESSOR_CORE:
1081 case PROCESSOR_CORE2:
1082 case PROCESSOR_COREI7:
1083 case PROCESSOR_L1OM:
1084 case PROCESSOR_GENERIC64:
1085 patt = alt_long_patt;
1086 break;
1087 case PROCESSOR_K6:
1088 case PROCESSOR_ATHLON:
1089 case PROCESSOR_K8:
1090 case PROCESSOR_AMDFAM10:
1091 case PROCESSOR_BD:
1092 patt = alt_short_patt;
1093 break;
1094 case PROCESSOR_I386:
1095 case PROCESSOR_I486:
1096 case PROCESSOR_PENTIUM:
1097 case PROCESSOR_PENTIUMPRO:
1098 case PROCESSOR_GENERIC32:
1099 patt = f32_patt;
1100 break;
1101 }
1102 }
1103 else
1104 {
1105 switch (fragP->tc_frag_data.tune)
1106 {
1107 case PROCESSOR_UNKNOWN:
1108 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1109 PROCESSOR_UNKNOWN. */
1110 abort ();
1111 break;
1112
1113 case PROCESSOR_I386:
1114 case PROCESSOR_I486:
1115 case PROCESSOR_PENTIUM:
1116 case PROCESSOR_K6:
1117 case PROCESSOR_ATHLON:
1118 case PROCESSOR_K8:
1119 case PROCESSOR_AMDFAM10:
1120 case PROCESSOR_BD:
1121 case PROCESSOR_GENERIC32:
1122 /* We use cpu_arch_isa_flags to check if we CAN optimize
1123 with nops. */
1124 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1125 patt = alt_short_patt;
1126 else
1127 patt = f32_patt;
1128 break;
1129 case PROCESSOR_PENTIUMPRO:
1130 case PROCESSOR_PENTIUM4:
1131 case PROCESSOR_NOCONA:
1132 case PROCESSOR_CORE:
1133 case PROCESSOR_CORE2:
1134 case PROCESSOR_COREI7:
1135 case PROCESSOR_L1OM:
1136 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1137 patt = alt_long_patt;
1138 else
1139 patt = f32_patt;
1140 break;
1141 case PROCESSOR_GENERIC64:
1142 patt = alt_long_patt;
1143 break;
1144 }
1145 }
1146
1147 if (patt == f32_patt)
1148 {
1149 /* If the padding is less than 15 bytes, we use the normal
1150 ones. Otherwise, we use a jump instruction and adjust
1151 its offset. */
1152 int limit;
1153
1154 /* For 64bit, the limit is 3 bytes. */
1155 if (flag_code == CODE_64BIT
1156 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1157 limit = 3;
1158 else
1159 limit = 15;
1160 if (count < limit)
1161 memcpy (fragP->fr_literal + fragP->fr_fix,
1162 patt[count - 1], count);
1163 else
1164 {
1165 memcpy (fragP->fr_literal + fragP->fr_fix,
1166 jump_31, count);
1167 /* Adjust jump offset. */
1168 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1169 }
1170 }
1171 else
1172 {
1173 /* Maximum length of an instruction is 15 byte. If the
1174 padding is greater than 15 bytes and we don't use jump,
1175 we have to break it into smaller pieces. */
1176 int padding = count;
1177 while (padding > 15)
1178 {
1179 padding -= 15;
1180 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1181 patt [14], 15);
1182 }
1183
1184 if (padding)
1185 memcpy (fragP->fr_literal + fragP->fr_fix,
1186 patt [padding - 1], padding);
1187 }
1188 }
1189 fragP->fr_var = count;
1190 }
1191
1192 static INLINE int
1193 operand_type_all_zero (const union i386_operand_type *x)
1194 {
1195 switch (ARRAY_SIZE(x->array))
1196 {
1197 case 3:
1198 if (x->array[2])
1199 return 0;
1200 case 2:
1201 if (x->array[1])
1202 return 0;
1203 case 1:
1204 return !x->array[0];
1205 default:
1206 abort ();
1207 }
1208 }
1209
1210 static INLINE void
1211 operand_type_set (union i386_operand_type *x, unsigned int v)
1212 {
1213 switch (ARRAY_SIZE(x->array))
1214 {
1215 case 3:
1216 x->array[2] = v;
1217 case 2:
1218 x->array[1] = v;
1219 case 1:
1220 x->array[0] = v;
1221 break;
1222 default:
1223 abort ();
1224 }
1225 }
1226
1227 static INLINE int
1228 operand_type_equal (const union i386_operand_type *x,
1229 const union i386_operand_type *y)
1230 {
1231 switch (ARRAY_SIZE(x->array))
1232 {
1233 case 3:
1234 if (x->array[2] != y->array[2])
1235 return 0;
1236 case 2:
1237 if (x->array[1] != y->array[1])
1238 return 0;
1239 case 1:
1240 return x->array[0] == y->array[0];
1241 break;
1242 default:
1243 abort ();
1244 }
1245 }
1246
1247 static INLINE int
1248 cpu_flags_all_zero (const union i386_cpu_flags *x)
1249 {
1250 switch (ARRAY_SIZE(x->array))
1251 {
1252 case 3:
1253 if (x->array[2])
1254 return 0;
1255 case 2:
1256 if (x->array[1])
1257 return 0;
1258 case 1:
1259 return !x->array[0];
1260 default:
1261 abort ();
1262 }
1263 }
1264
1265 static INLINE void
1266 cpu_flags_set (union i386_cpu_flags *x, unsigned int v)
1267 {
1268 switch (ARRAY_SIZE(x->array))
1269 {
1270 case 3:
1271 x->array[2] = v;
1272 case 2:
1273 x->array[1] = v;
1274 case 1:
1275 x->array[0] = v;
1276 break;
1277 default:
1278 abort ();
1279 }
1280 }
1281
1282 static INLINE int
1283 cpu_flags_equal (const union i386_cpu_flags *x,
1284 const union i386_cpu_flags *y)
1285 {
1286 switch (ARRAY_SIZE(x->array))
1287 {
1288 case 3:
1289 if (x->array[2] != y->array[2])
1290 return 0;
1291 case 2:
1292 if (x->array[1] != y->array[1])
1293 return 0;
1294 case 1:
1295 return x->array[0] == y->array[0];
1296 break;
1297 default:
1298 abort ();
1299 }
1300 }
1301
1302 static INLINE int
1303 cpu_flags_check_cpu64 (i386_cpu_flags f)
1304 {
1305 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1306 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1307 }
1308
1309 static INLINE i386_cpu_flags
1310 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1311 {
1312 switch (ARRAY_SIZE (x.array))
1313 {
1314 case 3:
1315 x.array [2] &= y.array [2];
1316 case 2:
1317 x.array [1] &= y.array [1];
1318 case 1:
1319 x.array [0] &= y.array [0];
1320 break;
1321 default:
1322 abort ();
1323 }
1324 return x;
1325 }
1326
1327 static INLINE i386_cpu_flags
1328 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1329 {
1330 switch (ARRAY_SIZE (x.array))
1331 {
1332 case 3:
1333 x.array [2] |= y.array [2];
1334 case 2:
1335 x.array [1] |= y.array [1];
1336 case 1:
1337 x.array [0] |= y.array [0];
1338 break;
1339 default:
1340 abort ();
1341 }
1342 return x;
1343 }
1344
1345 static INLINE i386_cpu_flags
1346 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1347 {
1348 switch (ARRAY_SIZE (x.array))
1349 {
1350 case 3:
1351 x.array [2] &= ~y.array [2];
1352 case 2:
1353 x.array [1] &= ~y.array [1];
1354 case 1:
1355 x.array [0] &= ~y.array [0];
1356 break;
1357 default:
1358 abort ();
1359 }
1360 return x;
1361 }
1362
1363 #define CPU_FLAGS_ARCH_MATCH 0x1
1364 #define CPU_FLAGS_64BIT_MATCH 0x2
1365 #define CPU_FLAGS_AES_MATCH 0x4
1366 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1367 #define CPU_FLAGS_AVX_MATCH 0x10
1368
1369 #define CPU_FLAGS_32BIT_MATCH \
1370 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1371 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1372 #define CPU_FLAGS_PERFECT_MATCH \
1373 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1374
1375 /* Return CPU flags match bits. */
1376
1377 static int
1378 cpu_flags_match (const insn_template *t)
1379 {
1380 i386_cpu_flags x = t->cpu_flags;
1381 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1382
1383 x.bitfield.cpu64 = 0;
1384 x.bitfield.cpuno64 = 0;
1385
1386 if (cpu_flags_all_zero (&x))
1387 {
1388 /* This instruction is available on all archs. */
1389 match |= CPU_FLAGS_32BIT_MATCH;
1390 }
1391 else
1392 {
1393 /* This instruction is available only on some archs. */
1394 i386_cpu_flags cpu = cpu_arch_flags;
1395
1396 cpu.bitfield.cpu64 = 0;
1397 cpu.bitfield.cpuno64 = 0;
1398 cpu = cpu_flags_and (x, cpu);
1399 if (!cpu_flags_all_zero (&cpu))
1400 {
1401 if (x.bitfield.cpuavx)
1402 {
1403 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1404 if (cpu.bitfield.cpuavx)
1405 {
1406 /* Check SSE2AVX. */
1407 if (!t->opcode_modifier.sse2avx|| sse2avx)
1408 {
1409 match |= (CPU_FLAGS_ARCH_MATCH
1410 | CPU_FLAGS_AVX_MATCH);
1411 /* Check AES. */
1412 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1413 match |= CPU_FLAGS_AES_MATCH;
1414 /* Check PCLMUL. */
1415 if (!x.bitfield.cpupclmul
1416 || cpu.bitfield.cpupclmul)
1417 match |= CPU_FLAGS_PCLMUL_MATCH;
1418 }
1419 }
1420 else
1421 match |= CPU_FLAGS_ARCH_MATCH;
1422 }
1423 else
1424 match |= CPU_FLAGS_32BIT_MATCH;
1425 }
1426 }
1427 return match;
1428 }
1429
1430 static INLINE i386_operand_type
1431 operand_type_and (i386_operand_type x, i386_operand_type y)
1432 {
1433 switch (ARRAY_SIZE (x.array))
1434 {
1435 case 3:
1436 x.array [2] &= y.array [2];
1437 case 2:
1438 x.array [1] &= y.array [1];
1439 case 1:
1440 x.array [0] &= y.array [0];
1441 break;
1442 default:
1443 abort ();
1444 }
1445 return x;
1446 }
1447
1448 static INLINE i386_operand_type
1449 operand_type_or (i386_operand_type x, i386_operand_type y)
1450 {
1451 switch (ARRAY_SIZE (x.array))
1452 {
1453 case 3:
1454 x.array [2] |= y.array [2];
1455 case 2:
1456 x.array [1] |= y.array [1];
1457 case 1:
1458 x.array [0] |= y.array [0];
1459 break;
1460 default:
1461 abort ();
1462 }
1463 return x;
1464 }
1465
1466 static INLINE i386_operand_type
1467 operand_type_xor (i386_operand_type x, i386_operand_type y)
1468 {
1469 switch (ARRAY_SIZE (x.array))
1470 {
1471 case 3:
1472 x.array [2] ^= y.array [2];
1473 case 2:
1474 x.array [1] ^= y.array [1];
1475 case 1:
1476 x.array [0] ^= y.array [0];
1477 break;
1478 default:
1479 abort ();
1480 }
1481 return x;
1482 }
1483
1484 static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1485 static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1486 static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1487 static const i386_operand_type inoutportreg
1488 = OPERAND_TYPE_INOUTPORTREG;
1489 static const i386_operand_type reg16_inoutportreg
1490 = OPERAND_TYPE_REG16_INOUTPORTREG;
1491 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1492 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1493 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1494 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1495 static const i386_operand_type anydisp
1496 = OPERAND_TYPE_ANYDISP;
1497 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1498 static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1499 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1500 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1501 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1502 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1503 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1504 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1505 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1506 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1507 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1508 static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
1509
1510 enum operand_type
1511 {
1512 reg,
1513 imm,
1514 disp,
1515 anymem
1516 };
1517
1518 static INLINE int
1519 operand_type_check (i386_operand_type t, enum operand_type c)
1520 {
1521 switch (c)
1522 {
1523 case reg:
1524 return (t.bitfield.reg8
1525 || t.bitfield.reg16
1526 || t.bitfield.reg32
1527 || t.bitfield.reg64);
1528
1529 case imm:
1530 return (t.bitfield.imm8
1531 || t.bitfield.imm8s
1532 || t.bitfield.imm16
1533 || t.bitfield.imm32
1534 || t.bitfield.imm32s
1535 || t.bitfield.imm64);
1536
1537 case disp:
1538 return (t.bitfield.disp8
1539 || t.bitfield.disp16
1540 || t.bitfield.disp32
1541 || t.bitfield.disp32s
1542 || t.bitfield.disp64);
1543
1544 case anymem:
1545 return (t.bitfield.disp8
1546 || t.bitfield.disp16
1547 || t.bitfield.disp32
1548 || t.bitfield.disp32s
1549 || t.bitfield.disp64
1550 || t.bitfield.baseindex);
1551
1552 default:
1553 abort ();
1554 }
1555
1556 return 0;
1557 }
1558
1559 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1560 operand J for instruction template T. */
1561
1562 static INLINE int
1563 match_reg_size (const insn_template *t, unsigned int j)
1564 {
1565 return !((i.types[j].bitfield.byte
1566 && !t->operand_types[j].bitfield.byte)
1567 || (i.types[j].bitfield.word
1568 && !t->operand_types[j].bitfield.word)
1569 || (i.types[j].bitfield.dword
1570 && !t->operand_types[j].bitfield.dword)
1571 || (i.types[j].bitfield.qword
1572 && !t->operand_types[j].bitfield.qword));
1573 }
1574
1575 /* Return 1 if there is no conflict in any size on operand J for
1576 instruction template T. */
1577
1578 static INLINE int
1579 match_mem_size (const insn_template *t, unsigned int j)
1580 {
1581 return (match_reg_size (t, j)
1582 && !((i.types[j].bitfield.unspecified
1583 && !t->operand_types[j].bitfield.unspecified)
1584 || (i.types[j].bitfield.fword
1585 && !t->operand_types[j].bitfield.fword)
1586 || (i.types[j].bitfield.tbyte
1587 && !t->operand_types[j].bitfield.tbyte)
1588 || (i.types[j].bitfield.xmmword
1589 && !t->operand_types[j].bitfield.xmmword)
1590 || (i.types[j].bitfield.ymmword
1591 && !t->operand_types[j].bitfield.ymmword)));
1592 }
1593
1594 /* Return 1 if there is no size conflict on any operands for
1595 instruction template T. */
1596
1597 static INLINE int
1598 operand_size_match (const insn_template *t)
1599 {
1600 unsigned int j;
1601 int match = 1;
1602
1603 /* Don't check jump instructions. */
1604 if (t->opcode_modifier.jump
1605 || t->opcode_modifier.jumpbyte
1606 || t->opcode_modifier.jumpdword
1607 || t->opcode_modifier.jumpintersegment)
1608 return match;
1609
1610 /* Check memory and accumulator operand size. */
1611 for (j = 0; j < i.operands; j++)
1612 {
1613 if (t->operand_types[j].bitfield.anysize)
1614 continue;
1615
1616 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1617 {
1618 match = 0;
1619 break;
1620 }
1621
1622 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1623 {
1624 match = 0;
1625 break;
1626 }
1627 }
1628
1629 if (match)
1630 return match;
1631 else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
1632 {
1633 mismatch:
1634 i.error = operand_size_mismatch;
1635 return 0;
1636 }
1637
1638 /* Check reverse. */
1639 gas_assert (i.operands == 2);
1640
1641 match = 1;
1642 for (j = 0; j < 2; j++)
1643 {
1644 if (t->operand_types[j].bitfield.acc
1645 && !match_reg_size (t, j ? 0 : 1))
1646 goto mismatch;
1647
1648 if (i.types[j].bitfield.mem
1649 && !match_mem_size (t, j ? 0 : 1))
1650 goto mismatch;
1651 }
1652
1653 return match;
1654 }
1655
1656 static INLINE int
1657 operand_type_match (i386_operand_type overlap,
1658 i386_operand_type given)
1659 {
1660 i386_operand_type temp = overlap;
1661
1662 temp.bitfield.jumpabsolute = 0;
1663 temp.bitfield.unspecified = 0;
1664 temp.bitfield.byte = 0;
1665 temp.bitfield.word = 0;
1666 temp.bitfield.dword = 0;
1667 temp.bitfield.fword = 0;
1668 temp.bitfield.qword = 0;
1669 temp.bitfield.tbyte = 0;
1670 temp.bitfield.xmmword = 0;
1671 temp.bitfield.ymmword = 0;
1672 if (operand_type_all_zero (&temp))
1673 goto mismatch;
1674
1675 if (given.bitfield.baseindex == overlap.bitfield.baseindex
1676 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
1677 return 1;
1678
1679 mismatch:
1680 i.error = operand_type_mismatch;
1681 return 0;
1682 }
1683
1684 /* If given types g0 and g1 are registers they must be of the same type
1685 unless the expected operand type register overlap is null.
1686 Note that Acc in a template matches every size of reg. */
1687
1688 static INLINE int
1689 operand_type_register_match (i386_operand_type m0,
1690 i386_operand_type g0,
1691 i386_operand_type t0,
1692 i386_operand_type m1,
1693 i386_operand_type g1,
1694 i386_operand_type t1)
1695 {
1696 if (!operand_type_check (g0, reg))
1697 return 1;
1698
1699 if (!operand_type_check (g1, reg))
1700 return 1;
1701
1702 if (g0.bitfield.reg8 == g1.bitfield.reg8
1703 && g0.bitfield.reg16 == g1.bitfield.reg16
1704 && g0.bitfield.reg32 == g1.bitfield.reg32
1705 && g0.bitfield.reg64 == g1.bitfield.reg64)
1706 return 1;
1707
1708 if (m0.bitfield.acc)
1709 {
1710 t0.bitfield.reg8 = 1;
1711 t0.bitfield.reg16 = 1;
1712 t0.bitfield.reg32 = 1;
1713 t0.bitfield.reg64 = 1;
1714 }
1715
1716 if (m1.bitfield.acc)
1717 {
1718 t1.bitfield.reg8 = 1;
1719 t1.bitfield.reg16 = 1;
1720 t1.bitfield.reg32 = 1;
1721 t1.bitfield.reg64 = 1;
1722 }
1723
1724 if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1725 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1726 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1727 && !(t0.bitfield.reg64 & t1.bitfield.reg64))
1728 return 1;
1729
1730 i.error = register_type_mismatch;
1731
1732 return 0;
1733 }
1734
1735 static INLINE unsigned int
1736 mode_from_disp_size (i386_operand_type t)
1737 {
1738 if (t.bitfield.disp8)
1739 return 1;
1740 else if (t.bitfield.disp16
1741 || t.bitfield.disp32
1742 || t.bitfield.disp32s)
1743 return 2;
1744 else
1745 return 0;
1746 }
1747
1748 static INLINE int
1749 fits_in_signed_byte (offsetT num)
1750 {
1751 return (num >= -128) && (num <= 127);
1752 }
1753
1754 static INLINE int
1755 fits_in_unsigned_byte (offsetT num)
1756 {
1757 return (num & 0xff) == num;
1758 }
1759
1760 static INLINE int
1761 fits_in_unsigned_word (offsetT num)
1762 {
1763 return (num & 0xffff) == num;
1764 }
1765
1766 static INLINE int
1767 fits_in_signed_word (offsetT num)
1768 {
1769 return (-32768 <= num) && (num <= 32767);
1770 }
1771
1772 static INLINE int
1773 fits_in_signed_long (offsetT num ATTRIBUTE_UNUSED)
1774 {
1775 #ifndef BFD64
1776 return 1;
1777 #else
1778 return (!(((offsetT) -1 << 31) & num)
1779 || (((offsetT) -1 << 31) & num) == ((offsetT) -1 << 31));
1780 #endif
1781 } /* fits_in_signed_long() */
1782
1783 static INLINE int
1784 fits_in_unsigned_long (offsetT num ATTRIBUTE_UNUSED)
1785 {
1786 #ifndef BFD64
1787 return 1;
1788 #else
1789 return (num & (((offsetT) 2 << 31) - 1)) == num;
1790 #endif
1791 } /* fits_in_unsigned_long() */
1792
1793 static INLINE int
1794 fits_in_imm4 (offsetT num)
1795 {
1796 return (num & 0xf) == num;
1797 }
1798
1799 static i386_operand_type
1800 smallest_imm_type (offsetT num)
1801 {
1802 i386_operand_type t;
1803
1804 operand_type_set (&t, 0);
1805 t.bitfield.imm64 = 1;
1806
1807 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
1808 {
1809 /* This code is disabled on the 486 because all the Imm1 forms
1810 in the opcode table are slower on the i486. They're the
1811 versions with the implicitly specified single-position
1812 displacement, which has another syntax if you really want to
1813 use that form. */
1814 t.bitfield.imm1 = 1;
1815 t.bitfield.imm8 = 1;
1816 t.bitfield.imm8s = 1;
1817 t.bitfield.imm16 = 1;
1818 t.bitfield.imm32 = 1;
1819 t.bitfield.imm32s = 1;
1820 }
1821 else if (fits_in_signed_byte (num))
1822 {
1823 t.bitfield.imm8 = 1;
1824 t.bitfield.imm8s = 1;
1825 t.bitfield.imm16 = 1;
1826 t.bitfield.imm32 = 1;
1827 t.bitfield.imm32s = 1;
1828 }
1829 else if (fits_in_unsigned_byte (num))
1830 {
1831 t.bitfield.imm8 = 1;
1832 t.bitfield.imm16 = 1;
1833 t.bitfield.imm32 = 1;
1834 t.bitfield.imm32s = 1;
1835 }
1836 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
1837 {
1838 t.bitfield.imm16 = 1;
1839 t.bitfield.imm32 = 1;
1840 t.bitfield.imm32s = 1;
1841 }
1842 else if (fits_in_signed_long (num))
1843 {
1844 t.bitfield.imm32 = 1;
1845 t.bitfield.imm32s = 1;
1846 }
1847 else if (fits_in_unsigned_long (num))
1848 t.bitfield.imm32 = 1;
1849
1850 return t;
1851 }
1852
1853 static offsetT
1854 offset_in_range (offsetT val, int size)
1855 {
1856 addressT mask;
1857
1858 switch (size)
1859 {
1860 case 1: mask = ((addressT) 1 << 8) - 1; break;
1861 case 2: mask = ((addressT) 1 << 16) - 1; break;
1862 case 4: mask = ((addressT) 2 << 31) - 1; break;
1863 #ifdef BFD64
1864 case 8: mask = ((addressT) 2 << 63) - 1; break;
1865 #endif
1866 default: abort ();
1867 }
1868
1869 #ifdef BFD64
1870 /* If BFD64, sign extend val for 32bit address mode. */
1871 if (flag_code != CODE_64BIT
1872 || i.prefix[ADDR_PREFIX])
1873 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
1874 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
1875 #endif
1876
1877 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
1878 {
1879 char buf1[40], buf2[40];
1880
1881 sprint_value (buf1, val);
1882 sprint_value (buf2, val & mask);
1883 as_warn (_("%s shortened to %s"), buf1, buf2);
1884 }
1885 return val & mask;
1886 }
1887
1888 enum PREFIX_GROUP
1889 {
1890 PREFIX_EXIST = 0,
1891 PREFIX_LOCK,
1892 PREFIX_REP,
1893 PREFIX_OTHER
1894 };
1895
1896 /* Returns
1897 a. PREFIX_EXIST if attempting to add a prefix where one from the
1898 same class already exists.
1899 b. PREFIX_LOCK if lock prefix is added.
1900 c. PREFIX_REP if rep/repne prefix is added.
1901 d. PREFIX_OTHER if other prefix is added.
1902 */
1903
1904 static enum PREFIX_GROUP
1905 add_prefix (unsigned int prefix)
1906 {
1907 enum PREFIX_GROUP ret = PREFIX_OTHER;
1908 unsigned int q;
1909
1910 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
1911 && flag_code == CODE_64BIT)
1912 {
1913 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
1914 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
1915 && (prefix & (REX_R | REX_X | REX_B))))
1916 ret = PREFIX_EXIST;
1917 q = REX_PREFIX;
1918 }
1919 else
1920 {
1921 switch (prefix)
1922 {
1923 default:
1924 abort ();
1925
1926 case CS_PREFIX_OPCODE:
1927 case DS_PREFIX_OPCODE:
1928 case ES_PREFIX_OPCODE:
1929 case FS_PREFIX_OPCODE:
1930 case GS_PREFIX_OPCODE:
1931 case SS_PREFIX_OPCODE:
1932 q = SEG_PREFIX;
1933 break;
1934
1935 case REPNE_PREFIX_OPCODE:
1936 case REPE_PREFIX_OPCODE:
1937 q = REP_PREFIX;
1938 ret = PREFIX_REP;
1939 break;
1940
1941 case LOCK_PREFIX_OPCODE:
1942 q = LOCK_PREFIX;
1943 ret = PREFIX_LOCK;
1944 break;
1945
1946 case FWAIT_OPCODE:
1947 q = WAIT_PREFIX;
1948 break;
1949
1950 case ADDR_PREFIX_OPCODE:
1951 q = ADDR_PREFIX;
1952 break;
1953
1954 case DATA_PREFIX_OPCODE:
1955 q = DATA_PREFIX;
1956 break;
1957 }
1958 if (i.prefix[q] != 0)
1959 ret = PREFIX_EXIST;
1960 }
1961
1962 if (ret)
1963 {
1964 if (!i.prefix[q])
1965 ++i.prefixes;
1966 i.prefix[q] |= prefix;
1967 }
1968 else
1969 as_bad (_("same type of prefix used twice"));
1970
1971 return ret;
1972 }
1973
1974 static void
1975 update_code_flag (int value, int check)
1976 {
1977 PRINTF_LIKE ((*as_error));
1978
1979 flag_code = (enum flag_code) value;
1980 if (flag_code == CODE_64BIT)
1981 {
1982 cpu_arch_flags.bitfield.cpu64 = 1;
1983 cpu_arch_flags.bitfield.cpuno64 = 0;
1984 }
1985 else
1986 {
1987 cpu_arch_flags.bitfield.cpu64 = 0;
1988 cpu_arch_flags.bitfield.cpuno64 = 1;
1989 }
1990 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
1991 {
1992 if (check)
1993 as_error = as_fatal;
1994 else
1995 as_error = as_bad;
1996 (*as_error) (_("64bit mode not supported on `%s'."),
1997 cpu_arch_name ? cpu_arch_name : default_arch);
1998 }
1999 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
2000 {
2001 if (check)
2002 as_error = as_fatal;
2003 else
2004 as_error = as_bad;
2005 (*as_error) (_("32bit mode not supported on `%s'."),
2006 cpu_arch_name ? cpu_arch_name : default_arch);
2007 }
2008 stackop_size = '\0';
2009 }
2010
2011 static void
2012 set_code_flag (int value)
2013 {
2014 update_code_flag (value, 0);
2015 }
2016
2017 static void
2018 set_16bit_gcc_code_flag (int new_code_flag)
2019 {
2020 flag_code = (enum flag_code) new_code_flag;
2021 if (flag_code != CODE_16BIT)
2022 abort ();
2023 cpu_arch_flags.bitfield.cpu64 = 0;
2024 cpu_arch_flags.bitfield.cpuno64 = 1;
2025 stackop_size = LONG_MNEM_SUFFIX;
2026 }
2027
2028 static void
2029 set_intel_syntax (int syntax_flag)
2030 {
2031 /* Find out if register prefixing is specified. */
2032 int ask_naked_reg = 0;
2033
2034 SKIP_WHITESPACE ();
2035 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2036 {
2037 char *string = input_line_pointer;
2038 int e = get_symbol_end ();
2039
2040 if (strcmp (string, "prefix") == 0)
2041 ask_naked_reg = 1;
2042 else if (strcmp (string, "noprefix") == 0)
2043 ask_naked_reg = -1;
2044 else
2045 as_bad (_("bad argument to syntax directive."));
2046 *input_line_pointer = e;
2047 }
2048 demand_empty_rest_of_line ();
2049
2050 intel_syntax = syntax_flag;
2051
2052 if (ask_naked_reg == 0)
2053 allow_naked_reg = (intel_syntax
2054 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2055 else
2056 allow_naked_reg = (ask_naked_reg < 0);
2057
2058 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2059
2060 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2061 identifier_chars['$'] = intel_syntax ? '$' : 0;
2062 register_prefix = allow_naked_reg ? "" : "%";
2063 }
2064
2065 static void
2066 set_intel_mnemonic (int mnemonic_flag)
2067 {
2068 intel_mnemonic = mnemonic_flag;
2069 }
2070
2071 static void
2072 set_allow_index_reg (int flag)
2073 {
2074 allow_index_reg = flag;
2075 }
2076
2077 static void
2078 set_sse_check (int dummy ATTRIBUTE_UNUSED)
2079 {
2080 SKIP_WHITESPACE ();
2081
2082 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2083 {
2084 char *string = input_line_pointer;
2085 int e = get_symbol_end ();
2086
2087 if (strcmp (string, "none") == 0)
2088 sse_check = sse_check_none;
2089 else if (strcmp (string, "warning") == 0)
2090 sse_check = sse_check_warning;
2091 else if (strcmp (string, "error") == 0)
2092 sse_check = sse_check_error;
2093 else
2094 as_bad (_("bad argument to sse_check directive."));
2095 *input_line_pointer = e;
2096 }
2097 else
2098 as_bad (_("missing argument for sse_check directive"));
2099
2100 demand_empty_rest_of_line ();
2101 }
2102
2103 static void
2104 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2105 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2106 {
2107 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2108 static const char *arch;
2109
2110 /* Intel LIOM is only supported on ELF. */
2111 if (!IS_ELF)
2112 return;
2113
2114 if (!arch)
2115 {
2116 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2117 use default_arch. */
2118 arch = cpu_arch_name;
2119 if (!arch)
2120 arch = default_arch;
2121 }
2122
2123 /* If we are targeting Intel L1OM, we must enable it. */
2124 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2125 || new_flag.bitfield.cpul1om)
2126 return;
2127
2128 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2129 #endif
2130 }
2131
2132 static void
2133 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2134 {
2135 SKIP_WHITESPACE ();
2136
2137 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2138 {
2139 char *string = input_line_pointer;
2140 int e = get_symbol_end ();
2141 unsigned int j;
2142 i386_cpu_flags flags;
2143
2144 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2145 {
2146 if (strcmp (string, cpu_arch[j].name) == 0)
2147 {
2148 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2149
2150 if (*string != '.')
2151 {
2152 cpu_arch_name = cpu_arch[j].name;
2153 cpu_sub_arch_name = NULL;
2154 cpu_arch_flags = cpu_arch[j].flags;
2155 if (flag_code == CODE_64BIT)
2156 {
2157 cpu_arch_flags.bitfield.cpu64 = 1;
2158 cpu_arch_flags.bitfield.cpuno64 = 0;
2159 }
2160 else
2161 {
2162 cpu_arch_flags.bitfield.cpu64 = 0;
2163 cpu_arch_flags.bitfield.cpuno64 = 1;
2164 }
2165 cpu_arch_isa = cpu_arch[j].type;
2166 cpu_arch_isa_flags = cpu_arch[j].flags;
2167 if (!cpu_arch_tune_set)
2168 {
2169 cpu_arch_tune = cpu_arch_isa;
2170 cpu_arch_tune_flags = cpu_arch_isa_flags;
2171 }
2172 break;
2173 }
2174
2175 if (!cpu_arch[j].negated)
2176 flags = cpu_flags_or (cpu_arch_flags,
2177 cpu_arch[j].flags);
2178 else
2179 flags = cpu_flags_and_not (cpu_arch_flags,
2180 cpu_arch[j].flags);
2181 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2182 {
2183 if (cpu_sub_arch_name)
2184 {
2185 char *name = cpu_sub_arch_name;
2186 cpu_sub_arch_name = concat (name,
2187 cpu_arch[j].name,
2188 (const char *) NULL);
2189 free (name);
2190 }
2191 else
2192 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2193 cpu_arch_flags = flags;
2194 cpu_arch_isa_flags = flags;
2195 }
2196 *input_line_pointer = e;
2197 demand_empty_rest_of_line ();
2198 return;
2199 }
2200 }
2201 if (j >= ARRAY_SIZE (cpu_arch))
2202 as_bad (_("no such architecture: `%s'"), string);
2203
2204 *input_line_pointer = e;
2205 }
2206 else
2207 as_bad (_("missing cpu architecture"));
2208
2209 no_cond_jump_promotion = 0;
2210 if (*input_line_pointer == ','
2211 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2212 {
2213 char *string = ++input_line_pointer;
2214 int e = get_symbol_end ();
2215
2216 if (strcmp (string, "nojumps") == 0)
2217 no_cond_jump_promotion = 1;
2218 else if (strcmp (string, "jumps") == 0)
2219 ;
2220 else
2221 as_bad (_("no such architecture modifier: `%s'"), string);
2222
2223 *input_line_pointer = e;
2224 }
2225
2226 demand_empty_rest_of_line ();
2227 }
2228
2229 enum bfd_architecture
2230 i386_arch (void)
2231 {
2232 if (cpu_arch_isa == PROCESSOR_L1OM)
2233 {
2234 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2235 || flag_code != CODE_64BIT)
2236 as_fatal (_("Intel L1OM is 64bit ELF only"));
2237 return bfd_arch_l1om;
2238 }
2239 else
2240 return bfd_arch_i386;
2241 }
2242
2243 unsigned long
2244 i386_mach ()
2245 {
2246 if (!strncmp (default_arch, "x86_64", 6))
2247 {
2248 if (cpu_arch_isa == PROCESSOR_L1OM)
2249 {
2250 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2251 || default_arch[6] != '\0')
2252 as_fatal (_("Intel L1OM is 64bit ELF only"));
2253 return bfd_mach_l1om;
2254 }
2255 else if (default_arch[6] == '\0')
2256 return bfd_mach_x86_64;
2257 else
2258 return bfd_mach_x64_32;
2259 }
2260 else if (!strcmp (default_arch, "i386"))
2261 return bfd_mach_i386_i386;
2262 else
2263 as_fatal (_("unknown architecture"));
2264 }
2265 \f
2266 void
2267 md_begin ()
2268 {
2269 const char *hash_err;
2270
2271 /* Initialize op_hash hash table. */
2272 op_hash = hash_new ();
2273
2274 {
2275 const insn_template *optab;
2276 templates *core_optab;
2277
2278 /* Setup for loop. */
2279 optab = i386_optab;
2280 core_optab = (templates *) xmalloc (sizeof (templates));
2281 core_optab->start = optab;
2282
2283 while (1)
2284 {
2285 ++optab;
2286 if (optab->name == NULL
2287 || strcmp (optab->name, (optab - 1)->name) != 0)
2288 {
2289 /* different name --> ship out current template list;
2290 add to hash table; & begin anew. */
2291 core_optab->end = optab;
2292 hash_err = hash_insert (op_hash,
2293 (optab - 1)->name,
2294 (void *) core_optab);
2295 if (hash_err)
2296 {
2297 as_fatal (_("internal Error: Can't hash %s: %s"),
2298 (optab - 1)->name,
2299 hash_err);
2300 }
2301 if (optab->name == NULL)
2302 break;
2303 core_optab = (templates *) xmalloc (sizeof (templates));
2304 core_optab->start = optab;
2305 }
2306 }
2307 }
2308
2309 /* Initialize reg_hash hash table. */
2310 reg_hash = hash_new ();
2311 {
2312 const reg_entry *regtab;
2313 unsigned int regtab_size = i386_regtab_size;
2314
2315 for (regtab = i386_regtab; regtab_size--; regtab++)
2316 {
2317 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2318 if (hash_err)
2319 as_fatal (_("internal Error: Can't hash %s: %s"),
2320 regtab->reg_name,
2321 hash_err);
2322 }
2323 }
2324
2325 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2326 {
2327 int c;
2328 char *p;
2329
2330 for (c = 0; c < 256; c++)
2331 {
2332 if (ISDIGIT (c))
2333 {
2334 digit_chars[c] = c;
2335 mnemonic_chars[c] = c;
2336 register_chars[c] = c;
2337 operand_chars[c] = c;
2338 }
2339 else if (ISLOWER (c))
2340 {
2341 mnemonic_chars[c] = c;
2342 register_chars[c] = c;
2343 operand_chars[c] = c;
2344 }
2345 else if (ISUPPER (c))
2346 {
2347 mnemonic_chars[c] = TOLOWER (c);
2348 register_chars[c] = mnemonic_chars[c];
2349 operand_chars[c] = c;
2350 }
2351
2352 if (ISALPHA (c) || ISDIGIT (c))
2353 identifier_chars[c] = c;
2354 else if (c >= 128)
2355 {
2356 identifier_chars[c] = c;
2357 operand_chars[c] = c;
2358 }
2359 }
2360
2361 #ifdef LEX_AT
2362 identifier_chars['@'] = '@';
2363 #endif
2364 #ifdef LEX_QM
2365 identifier_chars['?'] = '?';
2366 operand_chars['?'] = '?';
2367 #endif
2368 digit_chars['-'] = '-';
2369 mnemonic_chars['_'] = '_';
2370 mnemonic_chars['-'] = '-';
2371 mnemonic_chars['.'] = '.';
2372 identifier_chars['_'] = '_';
2373 identifier_chars['.'] = '.';
2374
2375 for (p = operand_special_chars; *p != '\0'; p++)
2376 operand_chars[(unsigned char) *p] = *p;
2377 }
2378
2379 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2380 if (IS_ELF)
2381 {
2382 record_alignment (text_section, 2);
2383 record_alignment (data_section, 2);
2384 record_alignment (bss_section, 2);
2385 }
2386 #endif
2387
2388 if (flag_code == CODE_64BIT)
2389 {
2390 #if defined (OBJ_COFF) && defined (TE_PE)
2391 x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
2392 ? 32 : 16);
2393 #else
2394 x86_dwarf2_return_column = 16;
2395 #endif
2396 x86_cie_data_alignment = -8;
2397 }
2398 else
2399 {
2400 x86_dwarf2_return_column = 8;
2401 x86_cie_data_alignment = -4;
2402 }
2403 }
2404
2405 void
2406 i386_print_statistics (FILE *file)
2407 {
2408 hash_print_statistics (file, "i386 opcode", op_hash);
2409 hash_print_statistics (file, "i386 register", reg_hash);
2410 }
2411 \f
2412 #ifdef DEBUG386
2413
2414 /* Debugging routines for md_assemble. */
2415 static void pte (insn_template *);
2416 static void pt (i386_operand_type);
2417 static void pe (expressionS *);
2418 static void ps (symbolS *);
2419
2420 static void
2421 pi (char *line, i386_insn *x)
2422 {
2423 unsigned int j;
2424
2425 fprintf (stdout, "%s: template ", line);
2426 pte (&x->tm);
2427 fprintf (stdout, " address: base %s index %s scale %x\n",
2428 x->base_reg ? x->base_reg->reg_name : "none",
2429 x->index_reg ? x->index_reg->reg_name : "none",
2430 x->log2_scale_factor);
2431 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2432 x->rm.mode, x->rm.reg, x->rm.regmem);
2433 fprintf (stdout, " sib: base %x index %x scale %x\n",
2434 x->sib.base, x->sib.index, x->sib.scale);
2435 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2436 (x->rex & REX_W) != 0,
2437 (x->rex & REX_R) != 0,
2438 (x->rex & REX_X) != 0,
2439 (x->rex & REX_B) != 0);
2440 for (j = 0; j < x->operands; j++)
2441 {
2442 fprintf (stdout, " #%d: ", j + 1);
2443 pt (x->types[j]);
2444 fprintf (stdout, "\n");
2445 if (x->types[j].bitfield.reg8
2446 || x->types[j].bitfield.reg16
2447 || x->types[j].bitfield.reg32
2448 || x->types[j].bitfield.reg64
2449 || x->types[j].bitfield.regmmx
2450 || x->types[j].bitfield.regxmm
2451 || x->types[j].bitfield.regymm
2452 || x->types[j].bitfield.sreg2
2453 || x->types[j].bitfield.sreg3
2454 || x->types[j].bitfield.control
2455 || x->types[j].bitfield.debug
2456 || x->types[j].bitfield.test)
2457 fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
2458 if (operand_type_check (x->types[j], imm))
2459 pe (x->op[j].imms);
2460 if (operand_type_check (x->types[j], disp))
2461 pe (x->op[j].disps);
2462 }
2463 }
2464
2465 static void
2466 pte (insn_template *t)
2467 {
2468 unsigned int j;
2469 fprintf (stdout, " %d operands ", t->operands);
2470 fprintf (stdout, "opcode %x ", t->base_opcode);
2471 if (t->extension_opcode != None)
2472 fprintf (stdout, "ext %x ", t->extension_opcode);
2473 if (t->opcode_modifier.d)
2474 fprintf (stdout, "D");
2475 if (t->opcode_modifier.w)
2476 fprintf (stdout, "W");
2477 fprintf (stdout, "\n");
2478 for (j = 0; j < t->operands; j++)
2479 {
2480 fprintf (stdout, " #%d type ", j + 1);
2481 pt (t->operand_types[j]);
2482 fprintf (stdout, "\n");
2483 }
2484 }
2485
2486 static void
2487 pe (expressionS *e)
2488 {
2489 fprintf (stdout, " operation %d\n", e->X_op);
2490 fprintf (stdout, " add_number %ld (%lx)\n",
2491 (long) e->X_add_number, (long) e->X_add_number);
2492 if (e->X_add_symbol)
2493 {
2494 fprintf (stdout, " add_symbol ");
2495 ps (e->X_add_symbol);
2496 fprintf (stdout, "\n");
2497 }
2498 if (e->X_op_symbol)
2499 {
2500 fprintf (stdout, " op_symbol ");
2501 ps (e->X_op_symbol);
2502 fprintf (stdout, "\n");
2503 }
2504 }
2505
2506 static void
2507 ps (symbolS *s)
2508 {
2509 fprintf (stdout, "%s type %s%s",
2510 S_GET_NAME (s),
2511 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2512 segment_name (S_GET_SEGMENT (s)));
2513 }
2514
2515 static struct type_name
2516 {
2517 i386_operand_type mask;
2518 const char *name;
2519 }
2520 const type_names[] =
2521 {
2522 { OPERAND_TYPE_REG8, "r8" },
2523 { OPERAND_TYPE_REG16, "r16" },
2524 { OPERAND_TYPE_REG32, "r32" },
2525 { OPERAND_TYPE_REG64, "r64" },
2526 { OPERAND_TYPE_IMM8, "i8" },
2527 { OPERAND_TYPE_IMM8, "i8s" },
2528 { OPERAND_TYPE_IMM16, "i16" },
2529 { OPERAND_TYPE_IMM32, "i32" },
2530 { OPERAND_TYPE_IMM32S, "i32s" },
2531 { OPERAND_TYPE_IMM64, "i64" },
2532 { OPERAND_TYPE_IMM1, "i1" },
2533 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2534 { OPERAND_TYPE_DISP8, "d8" },
2535 { OPERAND_TYPE_DISP16, "d16" },
2536 { OPERAND_TYPE_DISP32, "d32" },
2537 { OPERAND_TYPE_DISP32S, "d32s" },
2538 { OPERAND_TYPE_DISP64, "d64" },
2539 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2540 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2541 { OPERAND_TYPE_CONTROL, "control reg" },
2542 { OPERAND_TYPE_TEST, "test reg" },
2543 { OPERAND_TYPE_DEBUG, "debug reg" },
2544 { OPERAND_TYPE_FLOATREG, "FReg" },
2545 { OPERAND_TYPE_FLOATACC, "FAcc" },
2546 { OPERAND_TYPE_SREG2, "SReg2" },
2547 { OPERAND_TYPE_SREG3, "SReg3" },
2548 { OPERAND_TYPE_ACC, "Acc" },
2549 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2550 { OPERAND_TYPE_REGMMX, "rMMX" },
2551 { OPERAND_TYPE_REGXMM, "rXMM" },
2552 { OPERAND_TYPE_REGYMM, "rYMM" },
2553 { OPERAND_TYPE_ESSEG, "es" },
2554 };
2555
2556 static void
2557 pt (i386_operand_type t)
2558 {
2559 unsigned int j;
2560 i386_operand_type a;
2561
2562 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2563 {
2564 a = operand_type_and (t, type_names[j].mask);
2565 if (!operand_type_all_zero (&a))
2566 fprintf (stdout, "%s, ", type_names[j].name);
2567 }
2568 fflush (stdout);
2569 }
2570
2571 #endif /* DEBUG386 */
2572 \f
2573 static bfd_reloc_code_real_type
2574 reloc (unsigned int size,
2575 int pcrel,
2576 int sign,
2577 bfd_reloc_code_real_type other)
2578 {
2579 if (other != NO_RELOC)
2580 {
2581 reloc_howto_type *rel;
2582
2583 if (size == 8)
2584 switch (other)
2585 {
2586 case BFD_RELOC_X86_64_GOT32:
2587 return BFD_RELOC_X86_64_GOT64;
2588 break;
2589 case BFD_RELOC_X86_64_PLTOFF64:
2590 return BFD_RELOC_X86_64_PLTOFF64;
2591 break;
2592 case BFD_RELOC_X86_64_GOTPC32:
2593 other = BFD_RELOC_X86_64_GOTPC64;
2594 break;
2595 case BFD_RELOC_X86_64_GOTPCREL:
2596 other = BFD_RELOC_X86_64_GOTPCREL64;
2597 break;
2598 case BFD_RELOC_X86_64_TPOFF32:
2599 other = BFD_RELOC_X86_64_TPOFF64;
2600 break;
2601 case BFD_RELOC_X86_64_DTPOFF32:
2602 other = BFD_RELOC_X86_64_DTPOFF64;
2603 break;
2604 default:
2605 break;
2606 }
2607
2608 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2609 if (size == 4 && (flag_code != CODE_64BIT || disallow_64bit_reloc))
2610 sign = -1;
2611
2612 rel = bfd_reloc_type_lookup (stdoutput, other);
2613 if (!rel)
2614 as_bad (_("unknown relocation (%u)"), other);
2615 else if (size != bfd_get_reloc_size (rel))
2616 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2617 bfd_get_reloc_size (rel),
2618 size);
2619 else if (pcrel && !rel->pc_relative)
2620 as_bad (_("non-pc-relative relocation for pc-relative field"));
2621 else if ((rel->complain_on_overflow == complain_overflow_signed
2622 && !sign)
2623 || (rel->complain_on_overflow == complain_overflow_unsigned
2624 && sign > 0))
2625 as_bad (_("relocated field and relocation type differ in signedness"));
2626 else
2627 return other;
2628 return NO_RELOC;
2629 }
2630
2631 if (pcrel)
2632 {
2633 if (!sign)
2634 as_bad (_("there are no unsigned pc-relative relocations"));
2635 switch (size)
2636 {
2637 case 1: return BFD_RELOC_8_PCREL;
2638 case 2: return BFD_RELOC_16_PCREL;
2639 case 4: return BFD_RELOC_32_PCREL;
2640 case 8: return BFD_RELOC_64_PCREL;
2641 }
2642 as_bad (_("cannot do %u byte pc-relative relocation"), size);
2643 }
2644 else
2645 {
2646 if (sign > 0)
2647 switch (size)
2648 {
2649 case 4: return BFD_RELOC_X86_64_32S;
2650 }
2651 else
2652 switch (size)
2653 {
2654 case 1: return BFD_RELOC_8;
2655 case 2: return BFD_RELOC_16;
2656 case 4: return BFD_RELOC_32;
2657 case 8: return BFD_RELOC_64;
2658 }
2659 as_bad (_("cannot do %s %u byte relocation"),
2660 sign > 0 ? "signed" : "unsigned", size);
2661 }
2662
2663 return NO_RELOC;
2664 }
2665
2666 /* Here we decide which fixups can be adjusted to make them relative to
2667 the beginning of the section instead of the symbol. Basically we need
2668 to make sure that the dynamic relocations are done correctly, so in
2669 some cases we force the original symbol to be used. */
2670
2671 int
2672 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2673 {
2674 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2675 if (!IS_ELF)
2676 return 1;
2677
2678 /* Don't adjust pc-relative references to merge sections in 64-bit
2679 mode. */
2680 if (use_rela_relocations
2681 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
2682 && fixP->fx_pcrel)
2683 return 0;
2684
2685 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2686 and changed later by validate_fix. */
2687 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
2688 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
2689 return 0;
2690
2691 /* adjust_reloc_syms doesn't know about the GOT. */
2692 if (fixP->fx_r_type == BFD_RELOC_386_GOTOFF
2693 || fixP->fx_r_type == BFD_RELOC_386_PLT32
2694 || fixP->fx_r_type == BFD_RELOC_386_GOT32
2695 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
2696 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
2697 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
2698 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
2699 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
2700 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
2701 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
2702 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
2703 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
2704 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
2705 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
2706 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
2707 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
2708 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
2709 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
2710 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
2711 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
2712 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
2713 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
2714 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
2715 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
2716 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
2717 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
2718 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
2719 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
2720 return 0;
2721 #endif
2722 return 1;
2723 }
2724
2725 static int
2726 intel_float_operand (const char *mnemonic)
2727 {
2728 /* Note that the value returned is meaningful only for opcodes with (memory)
2729 operands, hence the code here is free to improperly handle opcodes that
2730 have no operands (for better performance and smaller code). */
2731
2732 if (mnemonic[0] != 'f')
2733 return 0; /* non-math */
2734
2735 switch (mnemonic[1])
2736 {
2737 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2738 the fs segment override prefix not currently handled because no
2739 call path can make opcodes without operands get here */
2740 case 'i':
2741 return 2 /* integer op */;
2742 case 'l':
2743 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
2744 return 3; /* fldcw/fldenv */
2745 break;
2746 case 'n':
2747 if (mnemonic[2] != 'o' /* fnop */)
2748 return 3; /* non-waiting control op */
2749 break;
2750 case 'r':
2751 if (mnemonic[2] == 's')
2752 return 3; /* frstor/frstpm */
2753 break;
2754 case 's':
2755 if (mnemonic[2] == 'a')
2756 return 3; /* fsave */
2757 if (mnemonic[2] == 't')
2758 {
2759 switch (mnemonic[3])
2760 {
2761 case 'c': /* fstcw */
2762 case 'd': /* fstdw */
2763 case 'e': /* fstenv */
2764 case 's': /* fsts[gw] */
2765 return 3;
2766 }
2767 }
2768 break;
2769 case 'x':
2770 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
2771 return 0; /* fxsave/fxrstor are not really math ops */
2772 break;
2773 }
2774
2775 return 1;
2776 }
2777
2778 /* Build the VEX prefix. */
2779
2780 static void
2781 build_vex_prefix (const insn_template *t)
2782 {
2783 unsigned int register_specifier;
2784 unsigned int implied_prefix;
2785 unsigned int vector_length;
2786
2787 /* Check register specifier. */
2788 if (i.vex.register_specifier)
2789 {
2790 register_specifier = i.vex.register_specifier->reg_num;
2791 if ((i.vex.register_specifier->reg_flags & RegRex))
2792 register_specifier += 8;
2793 register_specifier = ~register_specifier & 0xf;
2794 }
2795 else
2796 register_specifier = 0xf;
2797
2798 /* Use 2-byte VEX prefix by swappping destination and source
2799 operand. */
2800 if (!i.swap_operand
2801 && i.operands == i.reg_operands
2802 && i.tm.opcode_modifier.vexopcode == VEX0F
2803 && i.tm.opcode_modifier.s
2804 && i.rex == REX_B)
2805 {
2806 unsigned int xchg = i.operands - 1;
2807 union i386_op temp_op;
2808 i386_operand_type temp_type;
2809
2810 temp_type = i.types[xchg];
2811 i.types[xchg] = i.types[0];
2812 i.types[0] = temp_type;
2813 temp_op = i.op[xchg];
2814 i.op[xchg] = i.op[0];
2815 i.op[0] = temp_op;
2816
2817 gas_assert (i.rm.mode == 3);
2818
2819 i.rex = REX_R;
2820 xchg = i.rm.regmem;
2821 i.rm.regmem = i.rm.reg;
2822 i.rm.reg = xchg;
2823
2824 /* Use the next insn. */
2825 i.tm = t[1];
2826 }
2827
2828 if (i.tm.opcode_modifier.vex == VEXScalar)
2829 vector_length = avxscalar;
2830 else
2831 vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
2832
2833 switch ((i.tm.base_opcode >> 8) & 0xff)
2834 {
2835 case 0:
2836 implied_prefix = 0;
2837 break;
2838 case DATA_PREFIX_OPCODE:
2839 implied_prefix = 1;
2840 break;
2841 case REPE_PREFIX_OPCODE:
2842 implied_prefix = 2;
2843 break;
2844 case REPNE_PREFIX_OPCODE:
2845 implied_prefix = 3;
2846 break;
2847 default:
2848 abort ();
2849 }
2850
2851 /* Use 2-byte VEX prefix if possible. */
2852 if (i.tm.opcode_modifier.vexopcode == VEX0F
2853 && i.tm.opcode_modifier.vexw != VEXW1
2854 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
2855 {
2856 /* 2-byte VEX prefix. */
2857 unsigned int r;
2858
2859 i.vex.length = 2;
2860 i.vex.bytes[0] = 0xc5;
2861
2862 /* Check the REX.R bit. */
2863 r = (i.rex & REX_R) ? 0 : 1;
2864 i.vex.bytes[1] = (r << 7
2865 | register_specifier << 3
2866 | vector_length << 2
2867 | implied_prefix);
2868 }
2869 else
2870 {
2871 /* 3-byte VEX prefix. */
2872 unsigned int m, w;
2873
2874 i.vex.length = 3;
2875
2876 switch (i.tm.opcode_modifier.vexopcode)
2877 {
2878 case VEX0F:
2879 m = 0x1;
2880 i.vex.bytes[0] = 0xc4;
2881 break;
2882 case VEX0F38:
2883 m = 0x2;
2884 i.vex.bytes[0] = 0xc4;
2885 break;
2886 case VEX0F3A:
2887 m = 0x3;
2888 i.vex.bytes[0] = 0xc4;
2889 break;
2890 case XOP08:
2891 m = 0x8;
2892 i.vex.bytes[0] = 0x8f;
2893 break;
2894 case XOP09:
2895 m = 0x9;
2896 i.vex.bytes[0] = 0x8f;
2897 break;
2898 case XOP0A:
2899 m = 0xa;
2900 i.vex.bytes[0] = 0x8f;
2901 break;
2902 default:
2903 abort ();
2904 }
2905
2906 /* The high 3 bits of the second VEX byte are 1's compliment
2907 of RXB bits from REX. */
2908 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
2909
2910 /* Check the REX.W bit. */
2911 w = (i.rex & REX_W) ? 1 : 0;
2912 if (i.tm.opcode_modifier.vexw)
2913 {
2914 if (w)
2915 abort ();
2916
2917 if (i.tm.opcode_modifier.vexw == VEXW1)
2918 w = 1;
2919 }
2920
2921 i.vex.bytes[2] = (w << 7
2922 | register_specifier << 3
2923 | vector_length << 2
2924 | implied_prefix);
2925 }
2926 }
2927
2928 static void
2929 process_immext (void)
2930 {
2931 expressionS *exp;
2932
2933 if (i.tm.cpu_flags.bitfield.cpusse3 && i.operands > 0)
2934 {
2935 /* SSE3 Instructions have the fixed operands with an opcode
2936 suffix which is coded in the same place as an 8-bit immediate
2937 field would be. Here we check those operands and remove them
2938 afterwards. */
2939 unsigned int x;
2940
2941 for (x = 0; x < i.operands; x++)
2942 if (i.op[x].regs->reg_num != x)
2943 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
2944 register_prefix, i.op[x].regs->reg_name, x + 1,
2945 i.tm.name);
2946
2947 i.operands = 0;
2948 }
2949
2950 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
2951 which is coded in the same place as an 8-bit immediate field
2952 would be. Here we fake an 8-bit immediate operand from the
2953 opcode suffix stored in tm.extension_opcode.
2954
2955 AVX instructions also use this encoding, for some of
2956 3 argument instructions. */
2957
2958 gas_assert (i.imm_operands == 0
2959 && (i.operands <= 2
2960 || (i.tm.opcode_modifier.vex
2961 && i.operands <= 4)));
2962
2963 exp = &im_expressions[i.imm_operands++];
2964 i.op[i.operands].imms = exp;
2965 i.types[i.operands] = imm8;
2966 i.operands++;
2967 exp->X_op = O_constant;
2968 exp->X_add_number = i.tm.extension_opcode;
2969 i.tm.extension_opcode = None;
2970 }
2971
2972 /* This is the guts of the machine-dependent assembler. LINE points to a
2973 machine dependent instruction. This function is supposed to emit
2974 the frags/bytes it assembles to. */
2975
2976 void
2977 md_assemble (char *line)
2978 {
2979 unsigned int j;
2980 char mnemonic[MAX_MNEM_SIZE];
2981 const insn_template *t;
2982
2983 /* Initialize globals. */
2984 memset (&i, '\0', sizeof (i));
2985 for (j = 0; j < MAX_OPERANDS; j++)
2986 i.reloc[j] = NO_RELOC;
2987 memset (disp_expressions, '\0', sizeof (disp_expressions));
2988 memset (im_expressions, '\0', sizeof (im_expressions));
2989 save_stack_p = save_stack;
2990
2991 /* First parse an instruction mnemonic & call i386_operand for the operands.
2992 We assume that the scrubber has arranged it so that line[0] is the valid
2993 start of a (possibly prefixed) mnemonic. */
2994
2995 line = parse_insn (line, mnemonic);
2996 if (line == NULL)
2997 return;
2998
2999 line = parse_operands (line, mnemonic);
3000 this_operand = -1;
3001 if (line == NULL)
3002 return;
3003
3004 /* Now we've parsed the mnemonic into a set of templates, and have the
3005 operands at hand. */
3006
3007 /* All intel opcodes have reversed operands except for "bound" and
3008 "enter". We also don't reverse intersegment "jmp" and "call"
3009 instructions with 2 immediate operands so that the immediate segment
3010 precedes the offset, as it does when in AT&T mode. */
3011 if (intel_syntax
3012 && i.operands > 1
3013 && (strcmp (mnemonic, "bound") != 0)
3014 && (strcmp (mnemonic, "invlpga") != 0)
3015 && !(operand_type_check (i.types[0], imm)
3016 && operand_type_check (i.types[1], imm)))
3017 swap_operands ();
3018
3019 /* The order of the immediates should be reversed
3020 for 2 immediates extrq and insertq instructions */
3021 if (i.imm_operands == 2
3022 && (strcmp (mnemonic, "extrq") == 0
3023 || strcmp (mnemonic, "insertq") == 0))
3024 swap_2_operands (0, 1);
3025
3026 if (i.imm_operands)
3027 optimize_imm ();
3028
3029 /* Don't optimize displacement for movabs since it only takes 64bit
3030 displacement. */
3031 if (i.disp_operands
3032 && !i.disp32_encoding
3033 && (flag_code != CODE_64BIT
3034 || strcmp (mnemonic, "movabs") != 0))
3035 optimize_disp ();
3036
3037 /* Next, we find a template that matches the given insn,
3038 making sure the overlap of the given operands types is consistent
3039 with the template operand types. */
3040
3041 if (!(t = match_template ()))
3042 return;
3043
3044 if (sse_check != sse_check_none
3045 && !i.tm.opcode_modifier.noavx
3046 && (i.tm.cpu_flags.bitfield.cpusse
3047 || i.tm.cpu_flags.bitfield.cpusse2
3048 || i.tm.cpu_flags.bitfield.cpusse3
3049 || i.tm.cpu_flags.bitfield.cpussse3
3050 || i.tm.cpu_flags.bitfield.cpusse4_1
3051 || i.tm.cpu_flags.bitfield.cpusse4_2))
3052 {
3053 (sse_check == sse_check_warning
3054 ? as_warn
3055 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
3056 }
3057
3058 /* Zap movzx and movsx suffix. The suffix has been set from
3059 "word ptr" or "byte ptr" on the source operand in Intel syntax
3060 or extracted from mnemonic in AT&T syntax. But we'll use
3061 the destination register to choose the suffix for encoding. */
3062 if ((i.tm.base_opcode & ~9) == 0x0fb6)
3063 {
3064 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3065 there is no suffix, the default will be byte extension. */
3066 if (i.reg_operands != 2
3067 && !i.suffix
3068 && intel_syntax)
3069 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
3070
3071 i.suffix = 0;
3072 }
3073
3074 if (i.tm.opcode_modifier.fwait)
3075 if (!add_prefix (FWAIT_OPCODE))
3076 return;
3077
3078 /* Check for lock without a lockable instruction. Destination operand
3079 must be memory unless it is xchg (0x86). */
3080 if (i.prefix[LOCK_PREFIX]
3081 && (!i.tm.opcode_modifier.islockable
3082 || i.mem_operands == 0
3083 || (i.tm.base_opcode != 0x86
3084 && !operand_type_check (i.types[i.operands - 1], anymem))))
3085 {
3086 as_bad (_("expecting lockable instruction after `lock'"));
3087 return;
3088 }
3089
3090 /* Check string instruction segment overrides. */
3091 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
3092 {
3093 if (!check_string ())
3094 return;
3095 i.disp_operands = 0;
3096 }
3097
3098 if (!process_suffix ())
3099 return;
3100
3101 /* Update operand types. */
3102 for (j = 0; j < i.operands; j++)
3103 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
3104
3105 /* Make still unresolved immediate matches conform to size of immediate
3106 given in i.suffix. */
3107 if (!finalize_imm ())
3108 return;
3109
3110 if (i.types[0].bitfield.imm1)
3111 i.imm_operands = 0; /* kludge for shift insns. */
3112
3113 /* We only need to check those implicit registers for instructions
3114 with 3 operands or less. */
3115 if (i.operands <= 3)
3116 for (j = 0; j < i.operands; j++)
3117 if (i.types[j].bitfield.inoutportreg
3118 || i.types[j].bitfield.shiftcount
3119 || i.types[j].bitfield.acc
3120 || i.types[j].bitfield.floatacc)
3121 i.reg_operands--;
3122
3123 /* ImmExt should be processed after SSE2AVX. */
3124 if (!i.tm.opcode_modifier.sse2avx
3125 && i.tm.opcode_modifier.immext)
3126 process_immext ();
3127
3128 /* For insns with operands there are more diddles to do to the opcode. */
3129 if (i.operands)
3130 {
3131 if (!process_operands ())
3132 return;
3133 }
3134 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
3135 {
3136 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3137 as_warn (_("translating to `%sp'"), i.tm.name);
3138 }
3139
3140 if (i.tm.opcode_modifier.vex)
3141 build_vex_prefix (t);
3142
3143 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3144 instructions may define INT_OPCODE as well, so avoid this corner
3145 case for those instructions that use MODRM. */
3146 if (i.tm.base_opcode == INT_OPCODE
3147 && !i.tm.opcode_modifier.modrm
3148 && i.op[0].imms->X_add_number == 3)
3149 {
3150 i.tm.base_opcode = INT3_OPCODE;
3151 i.imm_operands = 0;
3152 }
3153
3154 if ((i.tm.opcode_modifier.jump
3155 || i.tm.opcode_modifier.jumpbyte
3156 || i.tm.opcode_modifier.jumpdword)
3157 && i.op[0].disps->X_op == O_constant)
3158 {
3159 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3160 the absolute address given by the constant. Since ix86 jumps and
3161 calls are pc relative, we need to generate a reloc. */
3162 i.op[0].disps->X_add_symbol = &abs_symbol;
3163 i.op[0].disps->X_op = O_symbol;
3164 }
3165
3166 if (i.tm.opcode_modifier.rex64)
3167 i.rex |= REX_W;
3168
3169 /* For 8 bit registers we need an empty rex prefix. Also if the
3170 instruction already has a prefix, we need to convert old
3171 registers to new ones. */
3172
3173 if ((i.types[0].bitfield.reg8
3174 && (i.op[0].regs->reg_flags & RegRex64) != 0)
3175 || (i.types[1].bitfield.reg8
3176 && (i.op[1].regs->reg_flags & RegRex64) != 0)
3177 || ((i.types[0].bitfield.reg8
3178 || i.types[1].bitfield.reg8)
3179 && i.rex != 0))
3180 {
3181 int x;
3182
3183 i.rex |= REX_OPCODE;
3184 for (x = 0; x < 2; x++)
3185 {
3186 /* Look for 8 bit operand that uses old registers. */
3187 if (i.types[x].bitfield.reg8
3188 && (i.op[x].regs->reg_flags & RegRex64) == 0)
3189 {
3190 /* In case it is "hi" register, give up. */
3191 if (i.op[x].regs->reg_num > 3)
3192 as_bad (_("can't encode register '%s%s' in an "
3193 "instruction requiring REX prefix."),
3194 register_prefix, i.op[x].regs->reg_name);
3195
3196 /* Otherwise it is equivalent to the extended register.
3197 Since the encoding doesn't change this is merely
3198 cosmetic cleanup for debug output. */
3199
3200 i.op[x].regs = i.op[x].regs + 8;
3201 }
3202 }
3203 }
3204
3205 if (i.rex != 0)
3206 add_prefix (REX_OPCODE | i.rex);
3207
3208 /* We are ready to output the insn. */
3209 output_insn ();
3210 }
3211
3212 static char *
3213 parse_insn (char *line, char *mnemonic)
3214 {
3215 char *l = line;
3216 char *token_start = l;
3217 char *mnem_p;
3218 int supported;
3219 const insn_template *t;
3220 char *dot_p = NULL;
3221
3222 /* Non-zero if we found a prefix only acceptable with string insns. */
3223 const char *expecting_string_instruction = NULL;
3224
3225 while (1)
3226 {
3227 mnem_p = mnemonic;
3228 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3229 {
3230 if (*mnem_p == '.')
3231 dot_p = mnem_p;
3232 mnem_p++;
3233 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3234 {
3235 as_bad (_("no such instruction: `%s'"), token_start);
3236 return NULL;
3237 }
3238 l++;
3239 }
3240 if (!is_space_char (*l)
3241 && *l != END_OF_INSN
3242 && (intel_syntax
3243 || (*l != PREFIX_SEPARATOR
3244 && *l != ',')))
3245 {
3246 as_bad (_("invalid character %s in mnemonic"),
3247 output_invalid (*l));
3248 return NULL;
3249 }
3250 if (token_start == l)
3251 {
3252 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3253 as_bad (_("expecting prefix; got nothing"));
3254 else
3255 as_bad (_("expecting mnemonic; got nothing"));
3256 return NULL;
3257 }
3258
3259 /* Look up instruction (or prefix) via hash table. */
3260 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3261
3262 if (*l != END_OF_INSN
3263 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3264 && current_templates
3265 && current_templates->start->opcode_modifier.isprefix)
3266 {
3267 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3268 {
3269 as_bad ((flag_code != CODE_64BIT
3270 ? _("`%s' is only supported in 64-bit mode")
3271 : _("`%s' is not supported in 64-bit mode")),
3272 current_templates->start->name);
3273 return NULL;
3274 }
3275 /* If we are in 16-bit mode, do not allow addr16 or data16.
3276 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3277 if ((current_templates->start->opcode_modifier.size16
3278 || current_templates->start->opcode_modifier.size32)
3279 && flag_code != CODE_64BIT
3280 && (current_templates->start->opcode_modifier.size32
3281 ^ (flag_code == CODE_16BIT)))
3282 {
3283 as_bad (_("redundant %s prefix"),
3284 current_templates->start->name);
3285 return NULL;
3286 }
3287 /* Add prefix, checking for repeated prefixes. */
3288 switch (add_prefix (current_templates->start->base_opcode))
3289 {
3290 case PREFIX_EXIST:
3291 return NULL;
3292 case PREFIX_REP:
3293 expecting_string_instruction = current_templates->start->name;
3294 break;
3295 default:
3296 break;
3297 }
3298 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3299 token_start = ++l;
3300 }
3301 else
3302 break;
3303 }
3304
3305 if (!current_templates)
3306 {
3307 /* Check if we should swap operand or force 32bit displacement in
3308 encoding. */
3309 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3310 i.swap_operand = 1;
3311 else if (mnem_p - 4 == dot_p
3312 && dot_p[1] == 'd'
3313 && dot_p[2] == '3'
3314 && dot_p[3] == '2')
3315 i.disp32_encoding = 1;
3316 else
3317 goto check_suffix;
3318 mnem_p = dot_p;
3319 *dot_p = '\0';
3320 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3321 }
3322
3323 if (!current_templates)
3324 {
3325 check_suffix:
3326 /* See if we can get a match by trimming off a suffix. */
3327 switch (mnem_p[-1])
3328 {
3329 case WORD_MNEM_SUFFIX:
3330 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3331 i.suffix = SHORT_MNEM_SUFFIX;
3332 else
3333 case BYTE_MNEM_SUFFIX:
3334 case QWORD_MNEM_SUFFIX:
3335 i.suffix = mnem_p[-1];
3336 mnem_p[-1] = '\0';
3337 current_templates = (const templates *) hash_find (op_hash,
3338 mnemonic);
3339 break;
3340 case SHORT_MNEM_SUFFIX:
3341 case LONG_MNEM_SUFFIX:
3342 if (!intel_syntax)
3343 {
3344 i.suffix = mnem_p[-1];
3345 mnem_p[-1] = '\0';
3346 current_templates = (const templates *) hash_find (op_hash,
3347 mnemonic);
3348 }
3349 break;
3350
3351 /* Intel Syntax. */
3352 case 'd':
3353 if (intel_syntax)
3354 {
3355 if (intel_float_operand (mnemonic) == 1)
3356 i.suffix = SHORT_MNEM_SUFFIX;
3357 else
3358 i.suffix = LONG_MNEM_SUFFIX;
3359 mnem_p[-1] = '\0';
3360 current_templates = (const templates *) hash_find (op_hash,
3361 mnemonic);
3362 }
3363 break;
3364 }
3365 if (!current_templates)
3366 {
3367 as_bad (_("no such instruction: `%s'"), token_start);
3368 return NULL;
3369 }
3370 }
3371
3372 if (current_templates->start->opcode_modifier.jump
3373 || current_templates->start->opcode_modifier.jumpbyte)
3374 {
3375 /* Check for a branch hint. We allow ",pt" and ",pn" for
3376 predict taken and predict not taken respectively.
3377 I'm not sure that branch hints actually do anything on loop
3378 and jcxz insns (JumpByte) for current Pentium4 chips. They
3379 may work in the future and it doesn't hurt to accept them
3380 now. */
3381 if (l[0] == ',' && l[1] == 'p')
3382 {
3383 if (l[2] == 't')
3384 {
3385 if (!add_prefix (DS_PREFIX_OPCODE))
3386 return NULL;
3387 l += 3;
3388 }
3389 else if (l[2] == 'n')
3390 {
3391 if (!add_prefix (CS_PREFIX_OPCODE))
3392 return NULL;
3393 l += 3;
3394 }
3395 }
3396 }
3397 /* Any other comma loses. */
3398 if (*l == ',')
3399 {
3400 as_bad (_("invalid character %s in mnemonic"),
3401 output_invalid (*l));
3402 return NULL;
3403 }
3404
3405 /* Check if instruction is supported on specified architecture. */
3406 supported = 0;
3407 for (t = current_templates->start; t < current_templates->end; ++t)
3408 {
3409 supported |= cpu_flags_match (t);
3410 if (supported == CPU_FLAGS_PERFECT_MATCH)
3411 goto skip;
3412 }
3413
3414 if (!(supported & CPU_FLAGS_64BIT_MATCH))
3415 {
3416 as_bad (flag_code == CODE_64BIT
3417 ? _("`%s' is not supported in 64-bit mode")
3418 : _("`%s' is only supported in 64-bit mode"),
3419 current_templates->start->name);
3420 return NULL;
3421 }
3422 if (supported != CPU_FLAGS_PERFECT_MATCH)
3423 {
3424 as_bad (_("`%s' is not supported on `%s%s'"),
3425 current_templates->start->name,
3426 cpu_arch_name ? cpu_arch_name : default_arch,
3427 cpu_sub_arch_name ? cpu_sub_arch_name : "");
3428 return NULL;
3429 }
3430
3431 skip:
3432 if (!cpu_arch_flags.bitfield.cpui386
3433 && (flag_code != CODE_16BIT))
3434 {
3435 as_warn (_("use .code16 to ensure correct addressing mode"));
3436 }
3437
3438 /* Check for rep/repne without a string instruction. */
3439 if (expecting_string_instruction)
3440 {
3441 static templates override;
3442
3443 for (t = current_templates->start; t < current_templates->end; ++t)
3444 if (t->opcode_modifier.isstring)
3445 break;
3446 if (t >= current_templates->end)
3447 {
3448 as_bad (_("expecting string instruction after `%s'"),
3449 expecting_string_instruction);
3450 return NULL;
3451 }
3452 for (override.start = t; t < current_templates->end; ++t)
3453 if (!t->opcode_modifier.isstring)
3454 break;
3455 override.end = t;
3456 current_templates = &override;
3457 }
3458
3459 return l;
3460 }
3461
3462 static char *
3463 parse_operands (char *l, const char *mnemonic)
3464 {
3465 char *token_start;
3466
3467 /* 1 if operand is pending after ','. */
3468 unsigned int expecting_operand = 0;
3469
3470 /* Non-zero if operand parens not balanced. */
3471 unsigned int paren_not_balanced;
3472
3473 while (*l != END_OF_INSN)
3474 {
3475 /* Skip optional white space before operand. */
3476 if (is_space_char (*l))
3477 ++l;
3478 if (!is_operand_char (*l) && *l != END_OF_INSN)
3479 {
3480 as_bad (_("invalid character %s before operand %d"),
3481 output_invalid (*l),
3482 i.operands + 1);
3483 return NULL;
3484 }
3485 token_start = l; /* after white space */
3486 paren_not_balanced = 0;
3487 while (paren_not_balanced || *l != ',')
3488 {
3489 if (*l == END_OF_INSN)
3490 {
3491 if (paren_not_balanced)
3492 {
3493 if (!intel_syntax)
3494 as_bad (_("unbalanced parenthesis in operand %d."),
3495 i.operands + 1);
3496 else
3497 as_bad (_("unbalanced brackets in operand %d."),
3498 i.operands + 1);
3499 return NULL;
3500 }
3501 else
3502 break; /* we are done */
3503 }
3504 else if (!is_operand_char (*l) && !is_space_char (*l))
3505 {
3506 as_bad (_("invalid character %s in operand %d"),
3507 output_invalid (*l),
3508 i.operands + 1);
3509 return NULL;
3510 }
3511 if (!intel_syntax)
3512 {
3513 if (*l == '(')
3514 ++paren_not_balanced;
3515 if (*l == ')')
3516 --paren_not_balanced;
3517 }
3518 else
3519 {
3520 if (*l == '[')
3521 ++paren_not_balanced;
3522 if (*l == ']')
3523 --paren_not_balanced;
3524 }
3525 l++;
3526 }
3527 if (l != token_start)
3528 { /* Yes, we've read in another operand. */
3529 unsigned int operand_ok;
3530 this_operand = i.operands++;
3531 i.types[this_operand].bitfield.unspecified = 1;
3532 if (i.operands > MAX_OPERANDS)
3533 {
3534 as_bad (_("spurious operands; (%d operands/instruction max)"),
3535 MAX_OPERANDS);
3536 return NULL;
3537 }
3538 /* Now parse operand adding info to 'i' as we go along. */
3539 END_STRING_AND_SAVE (l);
3540
3541 if (intel_syntax)
3542 operand_ok =
3543 i386_intel_operand (token_start,
3544 intel_float_operand (mnemonic));
3545 else
3546 operand_ok = i386_att_operand (token_start);
3547
3548 RESTORE_END_STRING (l);
3549 if (!operand_ok)
3550 return NULL;
3551 }
3552 else
3553 {
3554 if (expecting_operand)
3555 {
3556 expecting_operand_after_comma:
3557 as_bad (_("expecting operand after ','; got nothing"));
3558 return NULL;
3559 }
3560 if (*l == ',')
3561 {
3562 as_bad (_("expecting operand before ','; got nothing"));
3563 return NULL;
3564 }
3565 }
3566
3567 /* Now *l must be either ',' or END_OF_INSN. */
3568 if (*l == ',')
3569 {
3570 if (*++l == END_OF_INSN)
3571 {
3572 /* Just skip it, if it's \n complain. */
3573 goto expecting_operand_after_comma;
3574 }
3575 expecting_operand = 1;
3576 }
3577 }
3578 return l;
3579 }
3580
3581 static void
3582 swap_2_operands (int xchg1, int xchg2)
3583 {
3584 union i386_op temp_op;
3585 i386_operand_type temp_type;
3586 enum bfd_reloc_code_real temp_reloc;
3587
3588 temp_type = i.types[xchg2];
3589 i.types[xchg2] = i.types[xchg1];
3590 i.types[xchg1] = temp_type;
3591 temp_op = i.op[xchg2];
3592 i.op[xchg2] = i.op[xchg1];
3593 i.op[xchg1] = temp_op;
3594 temp_reloc = i.reloc[xchg2];
3595 i.reloc[xchg2] = i.reloc[xchg1];
3596 i.reloc[xchg1] = temp_reloc;
3597 }
3598
3599 static void
3600 swap_operands (void)
3601 {
3602 switch (i.operands)
3603 {
3604 case 5:
3605 case 4:
3606 swap_2_operands (1, i.operands - 2);
3607 case 3:
3608 case 2:
3609 swap_2_operands (0, i.operands - 1);
3610 break;
3611 default:
3612 abort ();
3613 }
3614
3615 if (i.mem_operands == 2)
3616 {
3617 const seg_entry *temp_seg;
3618 temp_seg = i.seg[0];
3619 i.seg[0] = i.seg[1];
3620 i.seg[1] = temp_seg;
3621 }
3622 }
3623
3624 /* Try to ensure constant immediates are represented in the smallest
3625 opcode possible. */
3626 static void
3627 optimize_imm (void)
3628 {
3629 char guess_suffix = 0;
3630 int op;
3631
3632 if (i.suffix)
3633 guess_suffix = i.suffix;
3634 else if (i.reg_operands)
3635 {
3636 /* Figure out a suffix from the last register operand specified.
3637 We can't do this properly yet, ie. excluding InOutPortReg,
3638 but the following works for instructions with immediates.
3639 In any case, we can't set i.suffix yet. */
3640 for (op = i.operands; --op >= 0;)
3641 if (i.types[op].bitfield.reg8)
3642 {
3643 guess_suffix = BYTE_MNEM_SUFFIX;
3644 break;
3645 }
3646 else if (i.types[op].bitfield.reg16)
3647 {
3648 guess_suffix = WORD_MNEM_SUFFIX;
3649 break;
3650 }
3651 else if (i.types[op].bitfield.reg32)
3652 {
3653 guess_suffix = LONG_MNEM_SUFFIX;
3654 break;
3655 }
3656 else if (i.types[op].bitfield.reg64)
3657 {
3658 guess_suffix = QWORD_MNEM_SUFFIX;
3659 break;
3660 }
3661 }
3662 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
3663 guess_suffix = WORD_MNEM_SUFFIX;
3664
3665 for (op = i.operands; --op >= 0;)
3666 if (operand_type_check (i.types[op], imm))
3667 {
3668 switch (i.op[op].imms->X_op)
3669 {
3670 case O_constant:
3671 /* If a suffix is given, this operand may be shortened. */
3672 switch (guess_suffix)
3673 {
3674 case LONG_MNEM_SUFFIX:
3675 i.types[op].bitfield.imm32 = 1;
3676 i.types[op].bitfield.imm64 = 1;
3677 break;
3678 case WORD_MNEM_SUFFIX:
3679 i.types[op].bitfield.imm16 = 1;
3680 i.types[op].bitfield.imm32 = 1;
3681 i.types[op].bitfield.imm32s = 1;
3682 i.types[op].bitfield.imm64 = 1;
3683 break;
3684 case BYTE_MNEM_SUFFIX:
3685 i.types[op].bitfield.imm8 = 1;
3686 i.types[op].bitfield.imm8s = 1;
3687 i.types[op].bitfield.imm16 = 1;
3688 i.types[op].bitfield.imm32 = 1;
3689 i.types[op].bitfield.imm32s = 1;
3690 i.types[op].bitfield.imm64 = 1;
3691 break;
3692 }
3693
3694 /* If this operand is at most 16 bits, convert it
3695 to a signed 16 bit number before trying to see
3696 whether it will fit in an even smaller size.
3697 This allows a 16-bit operand such as $0xffe0 to
3698 be recognised as within Imm8S range. */
3699 if ((i.types[op].bitfield.imm16)
3700 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
3701 {
3702 i.op[op].imms->X_add_number =
3703 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
3704 }
3705 if ((i.types[op].bitfield.imm32)
3706 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
3707 == 0))
3708 {
3709 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
3710 ^ ((offsetT) 1 << 31))
3711 - ((offsetT) 1 << 31));
3712 }
3713 i.types[op]
3714 = operand_type_or (i.types[op],
3715 smallest_imm_type (i.op[op].imms->X_add_number));
3716
3717 /* We must avoid matching of Imm32 templates when 64bit
3718 only immediate is available. */
3719 if (guess_suffix == QWORD_MNEM_SUFFIX)
3720 i.types[op].bitfield.imm32 = 0;
3721 break;
3722
3723 case O_absent:
3724 case O_register:
3725 abort ();
3726
3727 /* Symbols and expressions. */
3728 default:
3729 /* Convert symbolic operand to proper sizes for matching, but don't
3730 prevent matching a set of insns that only supports sizes other
3731 than those matching the insn suffix. */
3732 {
3733 i386_operand_type mask, allowed;
3734 const insn_template *t;
3735
3736 operand_type_set (&mask, 0);
3737 operand_type_set (&allowed, 0);
3738
3739 for (t = current_templates->start;
3740 t < current_templates->end;
3741 ++t)
3742 allowed = operand_type_or (allowed,
3743 t->operand_types[op]);
3744 switch (guess_suffix)
3745 {
3746 case QWORD_MNEM_SUFFIX:
3747 mask.bitfield.imm64 = 1;
3748 mask.bitfield.imm32s = 1;
3749 break;
3750 case LONG_MNEM_SUFFIX:
3751 mask.bitfield.imm32 = 1;
3752 break;
3753 case WORD_MNEM_SUFFIX:
3754 mask.bitfield.imm16 = 1;
3755 break;
3756 case BYTE_MNEM_SUFFIX:
3757 mask.bitfield.imm8 = 1;
3758 break;
3759 default:
3760 break;
3761 }
3762 allowed = operand_type_and (mask, allowed);
3763 if (!operand_type_all_zero (&allowed))
3764 i.types[op] = operand_type_and (i.types[op], mask);
3765 }
3766 break;
3767 }
3768 }
3769 }
3770
3771 /* Try to use the smallest displacement type too. */
3772 static void
3773 optimize_disp (void)
3774 {
3775 int op;
3776
3777 for (op = i.operands; --op >= 0;)
3778 if (operand_type_check (i.types[op], disp))
3779 {
3780 if (i.op[op].disps->X_op == O_constant)
3781 {
3782 offsetT op_disp = i.op[op].disps->X_add_number;
3783
3784 if (i.types[op].bitfield.disp16
3785 && (op_disp & ~(offsetT) 0xffff) == 0)
3786 {
3787 /* If this operand is at most 16 bits, convert
3788 to a signed 16 bit number and don't use 64bit
3789 displacement. */
3790 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
3791 i.types[op].bitfield.disp64 = 0;
3792 }
3793 if (i.types[op].bitfield.disp32
3794 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
3795 {
3796 /* If this operand is at most 32 bits, convert
3797 to a signed 32 bit number and don't use 64bit
3798 displacement. */
3799 op_disp &= (((offsetT) 2 << 31) - 1);
3800 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
3801 i.types[op].bitfield.disp64 = 0;
3802 }
3803 if (!op_disp && i.types[op].bitfield.baseindex)
3804 {
3805 i.types[op].bitfield.disp8 = 0;
3806 i.types[op].bitfield.disp16 = 0;
3807 i.types[op].bitfield.disp32 = 0;
3808 i.types[op].bitfield.disp32s = 0;
3809 i.types[op].bitfield.disp64 = 0;
3810 i.op[op].disps = 0;
3811 i.disp_operands--;
3812 }
3813 else if (flag_code == CODE_64BIT)
3814 {
3815 if (fits_in_signed_long (op_disp))
3816 {
3817 i.types[op].bitfield.disp64 = 0;
3818 i.types[op].bitfield.disp32s = 1;
3819 }
3820 if (i.prefix[ADDR_PREFIX]
3821 && fits_in_unsigned_long (op_disp))
3822 i.types[op].bitfield.disp32 = 1;
3823 }
3824 if ((i.types[op].bitfield.disp32
3825 || i.types[op].bitfield.disp32s
3826 || i.types[op].bitfield.disp16)
3827 && fits_in_signed_byte (op_disp))
3828 i.types[op].bitfield.disp8 = 1;
3829 }
3830 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
3831 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
3832 {
3833 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
3834 i.op[op].disps, 0, i.reloc[op]);
3835 i.types[op].bitfield.disp8 = 0;
3836 i.types[op].bitfield.disp16 = 0;
3837 i.types[op].bitfield.disp32 = 0;
3838 i.types[op].bitfield.disp32s = 0;
3839 i.types[op].bitfield.disp64 = 0;
3840 }
3841 else
3842 /* We only support 64bit displacement on constants. */
3843 i.types[op].bitfield.disp64 = 0;
3844 }
3845 }
3846
3847 /* Check if operands are valid for the instruction. */
3848
3849 static int
3850 check_VecOperands (const insn_template *t)
3851 {
3852 /* Without VSIB byte, we can't have a vector register for index. */
3853 if (!t->opcode_modifier.vecsib
3854 && i.index_reg
3855 && (i.index_reg->reg_type.bitfield.regxmm
3856 || i.index_reg->reg_type.bitfield.regymm))
3857 {
3858 i.error = unsupported_vector_index_register;
3859 return 1;
3860 }
3861
3862 /* For VSIB byte, we need a vector register for index and no PC
3863 relative addressing is allowed. */
3864 if (t->opcode_modifier.vecsib
3865 && (!i.index_reg
3866 || !((t->opcode_modifier.vecsib == VecSIB128
3867 && i.index_reg->reg_type.bitfield.regxmm)
3868 || (t->opcode_modifier.vecsib == VecSIB256
3869 && i.index_reg->reg_type.bitfield.regymm))
3870 || (i.base_reg && i.base_reg->reg_num == RegRip)))
3871 {
3872 i.error = invalid_vsib_address;
3873 return 1;
3874 }
3875
3876 return 0;
3877 }
3878
3879 /* Check if operands are valid for the instruction. Update VEX
3880 operand types. */
3881
3882 static int
3883 VEX_check_operands (const insn_template *t)
3884 {
3885 if (!t->opcode_modifier.vex)
3886 return 0;
3887
3888 /* Only check VEX_Imm4, which must be the first operand. */
3889 if (t->operand_types[0].bitfield.vec_imm4)
3890 {
3891 if (i.op[0].imms->X_op != O_constant
3892 || !fits_in_imm4 (i.op[0].imms->X_add_number))
3893 {
3894 i.error = bad_imm4;
3895 return 1;
3896 }
3897
3898 /* Turn off Imm8 so that update_imm won't complain. */
3899 i.types[0] = vec_imm4;
3900 }
3901
3902 return 0;
3903 }
3904
3905 static const insn_template *
3906 match_template (void)
3907 {
3908 /* Points to template once we've found it. */
3909 const insn_template *t;
3910 i386_operand_type overlap0, overlap1, overlap2, overlap3;
3911 i386_operand_type overlap4;
3912 unsigned int found_reverse_match;
3913 i386_opcode_modifier suffix_check;
3914 i386_operand_type operand_types [MAX_OPERANDS];
3915 int addr_prefix_disp;
3916 unsigned int j;
3917 unsigned int found_cpu_match;
3918 unsigned int check_register;
3919
3920 #if MAX_OPERANDS != 5
3921 # error "MAX_OPERANDS must be 5."
3922 #endif
3923
3924 found_reverse_match = 0;
3925 addr_prefix_disp = -1;
3926
3927 memset (&suffix_check, 0, sizeof (suffix_check));
3928 if (i.suffix == BYTE_MNEM_SUFFIX)
3929 suffix_check.no_bsuf = 1;
3930 else if (i.suffix == WORD_MNEM_SUFFIX)
3931 suffix_check.no_wsuf = 1;
3932 else if (i.suffix == SHORT_MNEM_SUFFIX)
3933 suffix_check.no_ssuf = 1;
3934 else if (i.suffix == LONG_MNEM_SUFFIX)
3935 suffix_check.no_lsuf = 1;
3936 else if (i.suffix == QWORD_MNEM_SUFFIX)
3937 suffix_check.no_qsuf = 1;
3938 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
3939 suffix_check.no_ldsuf = 1;
3940
3941 /* Must have right number of operands. */
3942 i.error = number_of_operands_mismatch;
3943
3944 for (t = current_templates->start; t < current_templates->end; t++)
3945 {
3946 addr_prefix_disp = -1;
3947
3948 if (i.operands != t->operands)
3949 continue;
3950
3951 /* Check processor support. */
3952 i.error = unsupported;
3953 found_cpu_match = (cpu_flags_match (t)
3954 == CPU_FLAGS_PERFECT_MATCH);
3955 if (!found_cpu_match)
3956 continue;
3957
3958 /* Check old gcc support. */
3959 i.error = old_gcc_only;
3960 if (!old_gcc && t->opcode_modifier.oldgcc)
3961 continue;
3962
3963 /* Check AT&T mnemonic. */
3964 i.error = unsupported_with_intel_mnemonic;
3965 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
3966 continue;
3967
3968 /* Check AT&T/Intel syntax. */
3969 i.error = unsupported_syntax;
3970 if ((intel_syntax && t->opcode_modifier.attsyntax)
3971 || (!intel_syntax && t->opcode_modifier.intelsyntax))
3972 continue;
3973
3974 /* Check the suffix, except for some instructions in intel mode. */
3975 i.error = invalid_instruction_suffix;
3976 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
3977 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
3978 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
3979 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
3980 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
3981 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
3982 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
3983 continue;
3984
3985 if (!operand_size_match (t))
3986 continue;
3987
3988 for (j = 0; j < MAX_OPERANDS; j++)
3989 operand_types[j] = t->operand_types[j];
3990
3991 /* In general, don't allow 64-bit operands in 32-bit mode. */
3992 if (i.suffix == QWORD_MNEM_SUFFIX
3993 && flag_code != CODE_64BIT
3994 && (intel_syntax
3995 ? (!t->opcode_modifier.ignoresize
3996 && !intel_float_operand (t->name))
3997 : intel_float_operand (t->name) != 2)
3998 && ((!operand_types[0].bitfield.regmmx
3999 && !operand_types[0].bitfield.regxmm
4000 && !operand_types[0].bitfield.regymm)
4001 || (!operand_types[t->operands > 1].bitfield.regmmx
4002 && !!operand_types[t->operands > 1].bitfield.regxmm
4003 && !!operand_types[t->operands > 1].bitfield.regymm))
4004 && (t->base_opcode != 0x0fc7
4005 || t->extension_opcode != 1 /* cmpxchg8b */))
4006 continue;
4007
4008 /* In general, don't allow 32-bit operands on pre-386. */
4009 else if (i.suffix == LONG_MNEM_SUFFIX
4010 && !cpu_arch_flags.bitfield.cpui386
4011 && (intel_syntax
4012 ? (!t->opcode_modifier.ignoresize
4013 && !intel_float_operand (t->name))
4014 : intel_float_operand (t->name) != 2)
4015 && ((!operand_types[0].bitfield.regmmx
4016 && !operand_types[0].bitfield.regxmm)
4017 || (!operand_types[t->operands > 1].bitfield.regmmx
4018 && !!operand_types[t->operands > 1].bitfield.regxmm)))
4019 continue;
4020
4021 /* Do not verify operands when there are none. */
4022 else
4023 {
4024 if (!t->operands)
4025 /* We've found a match; break out of loop. */
4026 break;
4027 }
4028
4029 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
4030 into Disp32/Disp16/Disp32 operand. */
4031 if (i.prefix[ADDR_PREFIX] != 0)
4032 {
4033 /* There should be only one Disp operand. */
4034 switch (flag_code)
4035 {
4036 case CODE_16BIT:
4037 for (j = 0; j < MAX_OPERANDS; j++)
4038 {
4039 if (operand_types[j].bitfield.disp16)
4040 {
4041 addr_prefix_disp = j;
4042 operand_types[j].bitfield.disp32 = 1;
4043 operand_types[j].bitfield.disp16 = 0;
4044 break;
4045 }
4046 }
4047 break;
4048 case CODE_32BIT:
4049 for (j = 0; j < MAX_OPERANDS; j++)
4050 {
4051 if (operand_types[j].bitfield.disp32)
4052 {
4053 addr_prefix_disp = j;
4054 operand_types[j].bitfield.disp32 = 0;
4055 operand_types[j].bitfield.disp16 = 1;
4056 break;
4057 }
4058 }
4059 break;
4060 case CODE_64BIT:
4061 for (j = 0; j < MAX_OPERANDS; j++)
4062 {
4063 if (operand_types[j].bitfield.disp64)
4064 {
4065 addr_prefix_disp = j;
4066 operand_types[j].bitfield.disp64 = 0;
4067 operand_types[j].bitfield.disp32 = 1;
4068 break;
4069 }
4070 }
4071 break;
4072 }
4073 }
4074
4075 /* We check register size if needed. */
4076 check_register = t->opcode_modifier.checkregsize;
4077 overlap0 = operand_type_and (i.types[0], operand_types[0]);
4078 switch (t->operands)
4079 {
4080 case 1:
4081 if (!operand_type_match (overlap0, i.types[0]))
4082 continue;
4083 break;
4084 case 2:
4085 /* xchg %eax, %eax is a special case. It is an aliase for nop
4086 only in 32bit mode and we can use opcode 0x90. In 64bit
4087 mode, we can't use 0x90 for xchg %eax, %eax since it should
4088 zero-extend %eax to %rax. */
4089 if (flag_code == CODE_64BIT
4090 && t->base_opcode == 0x90
4091 && operand_type_equal (&i.types [0], &acc32)
4092 && operand_type_equal (&i.types [1], &acc32))
4093 continue;
4094 if (i.swap_operand)
4095 {
4096 /* If we swap operand in encoding, we either match
4097 the next one or reverse direction of operands. */
4098 if (t->opcode_modifier.s)
4099 continue;
4100 else if (t->opcode_modifier.d)
4101 goto check_reverse;
4102 }
4103
4104 case 3:
4105 /* If we swap operand in encoding, we match the next one. */
4106 if (i.swap_operand && t->opcode_modifier.s)
4107 continue;
4108 case 4:
4109 case 5:
4110 overlap1 = operand_type_and (i.types[1], operand_types[1]);
4111 if (!operand_type_match (overlap0, i.types[0])
4112 || !operand_type_match (overlap1, i.types[1])
4113 || (check_register
4114 && !operand_type_register_match (overlap0, i.types[0],
4115 operand_types[0],
4116 overlap1, i.types[1],
4117 operand_types[1])))
4118 {
4119 /* Check if other direction is valid ... */
4120 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
4121 continue;
4122
4123 check_reverse:
4124 /* Try reversing direction of operands. */
4125 overlap0 = operand_type_and (i.types[0], operand_types[1]);
4126 overlap1 = operand_type_and (i.types[1], operand_types[0]);
4127 if (!operand_type_match (overlap0, i.types[0])
4128 || !operand_type_match (overlap1, i.types[1])
4129 || (check_register
4130 && !operand_type_register_match (overlap0,
4131 i.types[0],
4132 operand_types[1],
4133 overlap1,
4134 i.types[1],
4135 operand_types[0])))
4136 {
4137 /* Does not match either direction. */
4138 continue;
4139 }
4140 /* found_reverse_match holds which of D or FloatDR
4141 we've found. */
4142 if (t->opcode_modifier.d)
4143 found_reverse_match = Opcode_D;
4144 else if (t->opcode_modifier.floatd)
4145 found_reverse_match = Opcode_FloatD;
4146 else
4147 found_reverse_match = 0;
4148 if (t->opcode_modifier.floatr)
4149 found_reverse_match |= Opcode_FloatR;
4150 }
4151 else
4152 {
4153 /* Found a forward 2 operand match here. */
4154 switch (t->operands)
4155 {
4156 case 5:
4157 overlap4 = operand_type_and (i.types[4],
4158 operand_types[4]);
4159 case 4:
4160 overlap3 = operand_type_and (i.types[3],
4161 operand_types[3]);
4162 case 3:
4163 overlap2 = operand_type_and (i.types[2],
4164 operand_types[2]);
4165 break;
4166 }
4167
4168 switch (t->operands)
4169 {
4170 case 5:
4171 if (!operand_type_match (overlap4, i.types[4])
4172 || !operand_type_register_match (overlap3,
4173 i.types[3],
4174 operand_types[3],
4175 overlap4,
4176 i.types[4],
4177 operand_types[4]))
4178 continue;
4179 case 4:
4180 if (!operand_type_match (overlap3, i.types[3])
4181 || (check_register
4182 && !operand_type_register_match (overlap2,
4183 i.types[2],
4184 operand_types[2],
4185 overlap3,
4186 i.types[3],
4187 operand_types[3])))
4188 continue;
4189 case 3:
4190 /* Here we make use of the fact that there are no
4191 reverse match 3 operand instructions, and all 3
4192 operand instructions only need to be checked for
4193 register consistency between operands 2 and 3. */
4194 if (!operand_type_match (overlap2, i.types[2])
4195 || (check_register
4196 && !operand_type_register_match (overlap1,
4197 i.types[1],
4198 operand_types[1],
4199 overlap2,
4200 i.types[2],
4201 operand_types[2])))
4202 continue;
4203 break;
4204 }
4205 }
4206 /* Found either forward/reverse 2, 3 or 4 operand match here:
4207 slip through to break. */
4208 }
4209 if (!found_cpu_match)
4210 {
4211 found_reverse_match = 0;
4212 continue;
4213 }
4214
4215 /* Check if vector operands are valid. */
4216 if (check_VecOperands (t))
4217 continue;
4218
4219 /* Check if VEX operands are valid. */
4220 if (VEX_check_operands (t))
4221 continue;
4222
4223 /* We've found a match; break out of loop. */
4224 break;
4225 }
4226
4227 if (t == current_templates->end)
4228 {
4229 /* We found no match. */
4230 const char *err_msg;
4231 switch (i.error)
4232 {
4233 default:
4234 abort ();
4235 case operand_size_mismatch:
4236 err_msg = _("operand size mismatch");
4237 break;
4238 case operand_type_mismatch:
4239 err_msg = _("operand type mismatch");
4240 break;
4241 case register_type_mismatch:
4242 err_msg = _("register type mismatch");
4243 break;
4244 case number_of_operands_mismatch:
4245 err_msg = _("number of operands mismatch");
4246 break;
4247 case invalid_instruction_suffix:
4248 err_msg = _("invalid instruction suffix");
4249 break;
4250 case bad_imm4:
4251 err_msg = _("Imm4 isn't the first operand");
4252 break;
4253 case old_gcc_only:
4254 err_msg = _("only supported with old gcc");
4255 break;
4256 case unsupported_with_intel_mnemonic:
4257 err_msg = _("unsupported with Intel mnemonic");
4258 break;
4259 case unsupported_syntax:
4260 err_msg = _("unsupported syntax");
4261 break;
4262 case unsupported:
4263 err_msg = _("unsupported");
4264 break;
4265 case invalid_vsib_address:
4266 err_msg = _("invalid VSIB address");
4267 break;
4268 case unsupported_vector_index_register:
4269 err_msg = _("unsupported vector index register");
4270 break;
4271 }
4272 as_bad (_("%s for `%s'"), err_msg,
4273 current_templates->start->name);
4274 return NULL;
4275 }
4276
4277 if (!quiet_warnings)
4278 {
4279 if (!intel_syntax
4280 && (i.types[0].bitfield.jumpabsolute
4281 != operand_types[0].bitfield.jumpabsolute))
4282 {
4283 as_warn (_("indirect %s without `*'"), t->name);
4284 }
4285
4286 if (t->opcode_modifier.isprefix
4287 && t->opcode_modifier.ignoresize)
4288 {
4289 /* Warn them that a data or address size prefix doesn't
4290 affect assembly of the next line of code. */
4291 as_warn (_("stand-alone `%s' prefix"), t->name);
4292 }
4293 }
4294
4295 /* Copy the template we found. */
4296 i.tm = *t;
4297
4298 if (addr_prefix_disp != -1)
4299 i.tm.operand_types[addr_prefix_disp]
4300 = operand_types[addr_prefix_disp];
4301
4302 if (found_reverse_match)
4303 {
4304 /* If we found a reverse match we must alter the opcode
4305 direction bit. found_reverse_match holds bits to change
4306 (different for int & float insns). */
4307
4308 i.tm.base_opcode ^= found_reverse_match;
4309
4310 i.tm.operand_types[0] = operand_types[1];
4311 i.tm.operand_types[1] = operand_types[0];
4312 }
4313
4314 return t;
4315 }
4316
4317 static int
4318 check_string (void)
4319 {
4320 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
4321 if (i.tm.operand_types[mem_op].bitfield.esseg)
4322 {
4323 if (i.seg[0] != NULL && i.seg[0] != &es)
4324 {
4325 as_bad (_("`%s' operand %d must use `%ses' segment"),
4326 i.tm.name,
4327 mem_op + 1,
4328 register_prefix);
4329 return 0;
4330 }
4331 /* There's only ever one segment override allowed per instruction.
4332 This instruction possibly has a legal segment override on the
4333 second operand, so copy the segment to where non-string
4334 instructions store it, allowing common code. */
4335 i.seg[0] = i.seg[1];
4336 }
4337 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
4338 {
4339 if (i.seg[1] != NULL && i.seg[1] != &es)
4340 {
4341 as_bad (_("`%s' operand %d must use `%ses' segment"),
4342 i.tm.name,
4343 mem_op + 2,
4344 register_prefix);
4345 return 0;
4346 }
4347 }
4348 return 1;
4349 }
4350
4351 static int
4352 process_suffix (void)
4353 {
4354 /* If matched instruction specifies an explicit instruction mnemonic
4355 suffix, use it. */
4356 if (i.tm.opcode_modifier.size16)
4357 i.suffix = WORD_MNEM_SUFFIX;
4358 else if (i.tm.opcode_modifier.size32)
4359 i.suffix = LONG_MNEM_SUFFIX;
4360 else if (i.tm.opcode_modifier.size64)
4361 i.suffix = QWORD_MNEM_SUFFIX;
4362 else if (i.reg_operands)
4363 {
4364 /* If there's no instruction mnemonic suffix we try to invent one
4365 based on register operands. */
4366 if (!i.suffix)
4367 {
4368 /* We take i.suffix from the last register operand specified,
4369 Destination register type is more significant than source
4370 register type. crc32 in SSE4.2 prefers source register
4371 type. */
4372 if (i.tm.base_opcode == 0xf20f38f1)
4373 {
4374 if (i.types[0].bitfield.reg16)
4375 i.suffix = WORD_MNEM_SUFFIX;
4376 else if (i.types[0].bitfield.reg32)
4377 i.suffix = LONG_MNEM_SUFFIX;
4378 else if (i.types[0].bitfield.reg64)
4379 i.suffix = QWORD_MNEM_SUFFIX;
4380 }
4381 else if (i.tm.base_opcode == 0xf20f38f0)
4382 {
4383 if (i.types[0].bitfield.reg8)
4384 i.suffix = BYTE_MNEM_SUFFIX;
4385 }
4386
4387 if (!i.suffix)
4388 {
4389 int op;
4390
4391 if (i.tm.base_opcode == 0xf20f38f1
4392 || i.tm.base_opcode == 0xf20f38f0)
4393 {
4394 /* We have to know the operand size for crc32. */
4395 as_bad (_("ambiguous memory operand size for `%s`"),
4396 i.tm.name);
4397 return 0;
4398 }
4399
4400 for (op = i.operands; --op >= 0;)
4401 if (!i.tm.operand_types[op].bitfield.inoutportreg)
4402 {
4403 if (i.types[op].bitfield.reg8)
4404 {
4405 i.suffix = BYTE_MNEM_SUFFIX;
4406 break;
4407 }
4408 else if (i.types[op].bitfield.reg16)
4409 {
4410 i.suffix = WORD_MNEM_SUFFIX;
4411 break;
4412 }
4413 else if (i.types[op].bitfield.reg32)
4414 {
4415 i.suffix = LONG_MNEM_SUFFIX;
4416 break;
4417 }
4418 else if (i.types[op].bitfield.reg64)
4419 {
4420 i.suffix = QWORD_MNEM_SUFFIX;
4421 break;
4422 }
4423 }
4424 }
4425 }
4426 else if (i.suffix == BYTE_MNEM_SUFFIX)
4427 {
4428 if (intel_syntax
4429 && i.tm.opcode_modifier.ignoresize
4430 && i.tm.opcode_modifier.no_bsuf)
4431 i.suffix = 0;
4432 else if (!check_byte_reg ())
4433 return 0;
4434 }
4435 else if (i.suffix == LONG_MNEM_SUFFIX)
4436 {
4437 if (intel_syntax
4438 && i.tm.opcode_modifier.ignoresize
4439 && i.tm.opcode_modifier.no_lsuf)
4440 i.suffix = 0;
4441 else if (!check_long_reg ())
4442 return 0;
4443 }
4444 else if (i.suffix == QWORD_MNEM_SUFFIX)
4445 {
4446 if (intel_syntax
4447 && i.tm.opcode_modifier.ignoresize
4448 && i.tm.opcode_modifier.no_qsuf)
4449 i.suffix = 0;
4450 else if (!check_qword_reg ())
4451 return 0;
4452 }
4453 else if (i.suffix == WORD_MNEM_SUFFIX)
4454 {
4455 if (intel_syntax
4456 && i.tm.opcode_modifier.ignoresize
4457 && i.tm.opcode_modifier.no_wsuf)
4458 i.suffix = 0;
4459 else if (!check_word_reg ())
4460 return 0;
4461 }
4462 else if (i.suffix == XMMWORD_MNEM_SUFFIX
4463 || i.suffix == YMMWORD_MNEM_SUFFIX)
4464 {
4465 /* Skip if the instruction has x/y suffix. match_template
4466 should check if it is a valid suffix. */
4467 }
4468 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
4469 /* Do nothing if the instruction is going to ignore the prefix. */
4470 ;
4471 else
4472 abort ();
4473 }
4474 else if (i.tm.opcode_modifier.defaultsize
4475 && !i.suffix
4476 /* exclude fldenv/frstor/fsave/fstenv */
4477 && i.tm.opcode_modifier.no_ssuf)
4478 {
4479 i.suffix = stackop_size;
4480 }
4481 else if (intel_syntax
4482 && !i.suffix
4483 && (i.tm.operand_types[0].bitfield.jumpabsolute
4484 || i.tm.opcode_modifier.jumpbyte
4485 || i.tm.opcode_modifier.jumpintersegment
4486 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
4487 && i.tm.extension_opcode <= 3)))
4488 {
4489 switch (flag_code)
4490 {
4491 case CODE_64BIT:
4492 if (!i.tm.opcode_modifier.no_qsuf)
4493 {
4494 i.suffix = QWORD_MNEM_SUFFIX;
4495 break;
4496 }
4497 case CODE_32BIT:
4498 if (!i.tm.opcode_modifier.no_lsuf)
4499 i.suffix = LONG_MNEM_SUFFIX;
4500 break;
4501 case CODE_16BIT:
4502 if (!i.tm.opcode_modifier.no_wsuf)
4503 i.suffix = WORD_MNEM_SUFFIX;
4504 break;
4505 }
4506 }
4507
4508 if (!i.suffix)
4509 {
4510 if (!intel_syntax)
4511 {
4512 if (i.tm.opcode_modifier.w)
4513 {
4514 as_bad (_("no instruction mnemonic suffix given and "
4515 "no register operands; can't size instruction"));
4516 return 0;
4517 }
4518 }
4519 else
4520 {
4521 unsigned int suffixes;
4522
4523 suffixes = !i.tm.opcode_modifier.no_bsuf;
4524 if (!i.tm.opcode_modifier.no_wsuf)
4525 suffixes |= 1 << 1;
4526 if (!i.tm.opcode_modifier.no_lsuf)
4527 suffixes |= 1 << 2;
4528 if (!i.tm.opcode_modifier.no_ldsuf)
4529 suffixes |= 1 << 3;
4530 if (!i.tm.opcode_modifier.no_ssuf)
4531 suffixes |= 1 << 4;
4532 if (!i.tm.opcode_modifier.no_qsuf)
4533 suffixes |= 1 << 5;
4534
4535 /* There are more than suffix matches. */
4536 if (i.tm.opcode_modifier.w
4537 || ((suffixes & (suffixes - 1))
4538 && !i.tm.opcode_modifier.defaultsize
4539 && !i.tm.opcode_modifier.ignoresize))
4540 {
4541 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
4542 return 0;
4543 }
4544 }
4545 }
4546
4547 /* Change the opcode based on the operand size given by i.suffix;
4548 We don't need to change things for byte insns. */
4549
4550 if (i.suffix
4551 && i.suffix != BYTE_MNEM_SUFFIX
4552 && i.suffix != XMMWORD_MNEM_SUFFIX
4553 && i.suffix != YMMWORD_MNEM_SUFFIX)
4554 {
4555 /* It's not a byte, select word/dword operation. */
4556 if (i.tm.opcode_modifier.w)
4557 {
4558 if (i.tm.opcode_modifier.shortform)
4559 i.tm.base_opcode |= 8;
4560 else
4561 i.tm.base_opcode |= 1;
4562 }
4563
4564 /* Now select between word & dword operations via the operand
4565 size prefix, except for instructions that will ignore this
4566 prefix anyway. */
4567 if (i.tm.opcode_modifier.addrprefixop0)
4568 {
4569 /* The address size override prefix changes the size of the
4570 first operand. */
4571 if ((flag_code == CODE_32BIT
4572 && i.op->regs[0].reg_type.bitfield.reg16)
4573 || (flag_code != CODE_32BIT
4574 && i.op->regs[0].reg_type.bitfield.reg32))
4575 if (!add_prefix (ADDR_PREFIX_OPCODE))
4576 return 0;
4577 }
4578 else if (i.suffix != QWORD_MNEM_SUFFIX
4579 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
4580 && !i.tm.opcode_modifier.ignoresize
4581 && !i.tm.opcode_modifier.floatmf
4582 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
4583 || (flag_code == CODE_64BIT
4584 && i.tm.opcode_modifier.jumpbyte)))
4585 {
4586 unsigned int prefix = DATA_PREFIX_OPCODE;
4587
4588 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
4589 prefix = ADDR_PREFIX_OPCODE;
4590
4591 if (!add_prefix (prefix))
4592 return 0;
4593 }
4594
4595 /* Set mode64 for an operand. */
4596 if (i.suffix == QWORD_MNEM_SUFFIX
4597 && flag_code == CODE_64BIT
4598 && !i.tm.opcode_modifier.norex64)
4599 {
4600 /* Special case for xchg %rax,%rax. It is NOP and doesn't
4601 need rex64. cmpxchg8b is also a special case. */
4602 if (! (i.operands == 2
4603 && i.tm.base_opcode == 0x90
4604 && i.tm.extension_opcode == None
4605 && operand_type_equal (&i.types [0], &acc64)
4606 && operand_type_equal (&i.types [1], &acc64))
4607 && ! (i.operands == 1
4608 && i.tm.base_opcode == 0xfc7
4609 && i.tm.extension_opcode == 1
4610 && !operand_type_check (i.types [0], reg)
4611 && operand_type_check (i.types [0], anymem)))
4612 i.rex |= REX_W;
4613 }
4614
4615 /* Size floating point instruction. */
4616 if (i.suffix == LONG_MNEM_SUFFIX)
4617 if (i.tm.opcode_modifier.floatmf)
4618 i.tm.base_opcode ^= 4;
4619 }
4620
4621 return 1;
4622 }
4623
4624 static int
4625 check_byte_reg (void)
4626 {
4627 int op;
4628
4629 for (op = i.operands; --op >= 0;)
4630 {
4631 /* If this is an eight bit register, it's OK. If it's the 16 or
4632 32 bit version of an eight bit register, we will just use the
4633 low portion, and that's OK too. */
4634 if (i.types[op].bitfield.reg8)
4635 continue;
4636
4637 /* crc32 doesn't generate this warning. */
4638 if (i.tm.base_opcode == 0xf20f38f0)
4639 continue;
4640
4641 if ((i.types[op].bitfield.reg16
4642 || i.types[op].bitfield.reg32
4643 || i.types[op].bitfield.reg64)
4644 && i.op[op].regs->reg_num < 4)
4645 {
4646 /* Prohibit these changes in the 64bit mode, since the
4647 lowering is more complicated. */
4648 if (flag_code == CODE_64BIT
4649 && !i.tm.operand_types[op].bitfield.inoutportreg)
4650 {
4651 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4652 register_prefix, i.op[op].regs->reg_name,
4653 i.suffix);
4654 return 0;
4655 }
4656 #if REGISTER_WARNINGS
4657 if (!quiet_warnings
4658 && !i.tm.operand_types[op].bitfield.inoutportreg)
4659 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4660 register_prefix,
4661 (i.op[op].regs + (i.types[op].bitfield.reg16
4662 ? REGNAM_AL - REGNAM_AX
4663 : REGNAM_AL - REGNAM_EAX))->reg_name,
4664 register_prefix,
4665 i.op[op].regs->reg_name,
4666 i.suffix);
4667 #endif
4668 continue;
4669 }
4670 /* Any other register is bad. */
4671 if (i.types[op].bitfield.reg16
4672 || i.types[op].bitfield.reg32
4673 || i.types[op].bitfield.reg64
4674 || i.types[op].bitfield.regmmx
4675 || i.types[op].bitfield.regxmm
4676 || i.types[op].bitfield.regymm
4677 || i.types[op].bitfield.sreg2
4678 || i.types[op].bitfield.sreg3
4679 || i.types[op].bitfield.control
4680 || i.types[op].bitfield.debug
4681 || i.types[op].bitfield.test
4682 || i.types[op].bitfield.floatreg
4683 || i.types[op].bitfield.floatacc)
4684 {
4685 as_bad (_("`%s%s' not allowed with `%s%c'"),
4686 register_prefix,
4687 i.op[op].regs->reg_name,
4688 i.tm.name,
4689 i.suffix);
4690 return 0;
4691 }
4692 }
4693 return 1;
4694 }
4695
4696 static int
4697 check_long_reg (void)
4698 {
4699 int op;
4700
4701 for (op = i.operands; --op >= 0;)
4702 /* Reject eight bit registers, except where the template requires
4703 them. (eg. movzb) */
4704 if (i.types[op].bitfield.reg8
4705 && (i.tm.operand_types[op].bitfield.reg16
4706 || i.tm.operand_types[op].bitfield.reg32
4707 || i.tm.operand_types[op].bitfield.acc))
4708 {
4709 as_bad (_("`%s%s' not allowed with `%s%c'"),
4710 register_prefix,
4711 i.op[op].regs->reg_name,
4712 i.tm.name,
4713 i.suffix);
4714 return 0;
4715 }
4716 /* Warn if the e prefix on a general reg is missing. */
4717 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4718 && i.types[op].bitfield.reg16
4719 && (i.tm.operand_types[op].bitfield.reg32
4720 || i.tm.operand_types[op].bitfield.acc))
4721 {
4722 /* Prohibit these changes in the 64bit mode, since the
4723 lowering is more complicated. */
4724 if (flag_code == CODE_64BIT)
4725 {
4726 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4727 register_prefix, i.op[op].regs->reg_name,
4728 i.suffix);
4729 return 0;
4730 }
4731 #if REGISTER_WARNINGS
4732 else
4733 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4734 register_prefix,
4735 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
4736 register_prefix,
4737 i.op[op].regs->reg_name,
4738 i.suffix);
4739 #endif
4740 }
4741 /* Warn if the r prefix on a general reg is missing. */
4742 else if (i.types[op].bitfield.reg64
4743 && (i.tm.operand_types[op].bitfield.reg32
4744 || i.tm.operand_types[op].bitfield.acc))
4745 {
4746 if (intel_syntax
4747 && i.tm.opcode_modifier.toqword
4748 && !i.types[0].bitfield.regxmm)
4749 {
4750 /* Convert to QWORD. We want REX byte. */
4751 i.suffix = QWORD_MNEM_SUFFIX;
4752 }
4753 else
4754 {
4755 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4756 register_prefix, i.op[op].regs->reg_name,
4757 i.suffix);
4758 return 0;
4759 }
4760 }
4761 return 1;
4762 }
4763
4764 static int
4765 check_qword_reg (void)
4766 {
4767 int op;
4768
4769 for (op = i.operands; --op >= 0; )
4770 /* Reject eight bit registers, except where the template requires
4771 them. (eg. movzb) */
4772 if (i.types[op].bitfield.reg8
4773 && (i.tm.operand_types[op].bitfield.reg16
4774 || i.tm.operand_types[op].bitfield.reg32
4775 || i.tm.operand_types[op].bitfield.acc))
4776 {
4777 as_bad (_("`%s%s' not allowed with `%s%c'"),
4778 register_prefix,
4779 i.op[op].regs->reg_name,
4780 i.tm.name,
4781 i.suffix);
4782 return 0;
4783 }
4784 /* Warn if the e prefix on a general reg is missing. */
4785 else if ((i.types[op].bitfield.reg16
4786 || i.types[op].bitfield.reg32)
4787 && (i.tm.operand_types[op].bitfield.reg32
4788 || i.tm.operand_types[op].bitfield.acc))
4789 {
4790 /* Prohibit these changes in the 64bit mode, since the
4791 lowering is more complicated. */
4792 if (intel_syntax
4793 && i.tm.opcode_modifier.todword
4794 && !i.types[0].bitfield.regxmm)
4795 {
4796 /* Convert to DWORD. We don't want REX byte. */
4797 i.suffix = LONG_MNEM_SUFFIX;
4798 }
4799 else
4800 {
4801 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4802 register_prefix, i.op[op].regs->reg_name,
4803 i.suffix);
4804 return 0;
4805 }
4806 }
4807 return 1;
4808 }
4809
4810 static int
4811 check_word_reg (void)
4812 {
4813 int op;
4814 for (op = i.operands; --op >= 0;)
4815 /* Reject eight bit registers, except where the template requires
4816 them. (eg. movzb) */
4817 if (i.types[op].bitfield.reg8
4818 && (i.tm.operand_types[op].bitfield.reg16
4819 || i.tm.operand_types[op].bitfield.reg32
4820 || i.tm.operand_types[op].bitfield.acc))
4821 {
4822 as_bad (_("`%s%s' not allowed with `%s%c'"),
4823 register_prefix,
4824 i.op[op].regs->reg_name,
4825 i.tm.name,
4826 i.suffix);
4827 return 0;
4828 }
4829 /* Warn if the e prefix on a general reg is present. */
4830 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4831 && i.types[op].bitfield.reg32
4832 && (i.tm.operand_types[op].bitfield.reg16
4833 || i.tm.operand_types[op].bitfield.acc))
4834 {
4835 /* Prohibit these changes in the 64bit mode, since the
4836 lowering is more complicated. */
4837 if (flag_code == CODE_64BIT)
4838 {
4839 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4840 register_prefix, i.op[op].regs->reg_name,
4841 i.suffix);
4842 return 0;
4843 }
4844 else
4845 #if REGISTER_WARNINGS
4846 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4847 register_prefix,
4848 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
4849 register_prefix,
4850 i.op[op].regs->reg_name,
4851 i.suffix);
4852 #endif
4853 }
4854 return 1;
4855 }
4856
4857 static int
4858 update_imm (unsigned int j)
4859 {
4860 i386_operand_type overlap = i.types[j];
4861 if ((overlap.bitfield.imm8
4862 || overlap.bitfield.imm8s
4863 || overlap.bitfield.imm16
4864 || overlap.bitfield.imm32
4865 || overlap.bitfield.imm32s
4866 || overlap.bitfield.imm64)
4867 && !operand_type_equal (&overlap, &imm8)
4868 && !operand_type_equal (&overlap, &imm8s)
4869 && !operand_type_equal (&overlap, &imm16)
4870 && !operand_type_equal (&overlap, &imm32)
4871 && !operand_type_equal (&overlap, &imm32s)
4872 && !operand_type_equal (&overlap, &imm64))
4873 {
4874 if (i.suffix)
4875 {
4876 i386_operand_type temp;
4877
4878 operand_type_set (&temp, 0);
4879 if (i.suffix == BYTE_MNEM_SUFFIX)
4880 {
4881 temp.bitfield.imm8 = overlap.bitfield.imm8;
4882 temp.bitfield.imm8s = overlap.bitfield.imm8s;
4883 }
4884 else if (i.suffix == WORD_MNEM_SUFFIX)
4885 temp.bitfield.imm16 = overlap.bitfield.imm16;
4886 else if (i.suffix == QWORD_MNEM_SUFFIX)
4887 {
4888 temp.bitfield.imm64 = overlap.bitfield.imm64;
4889 temp.bitfield.imm32s = overlap.bitfield.imm32s;
4890 }
4891 else
4892 temp.bitfield.imm32 = overlap.bitfield.imm32;
4893 overlap = temp;
4894 }
4895 else if (operand_type_equal (&overlap, &imm16_32_32s)
4896 || operand_type_equal (&overlap, &imm16_32)
4897 || operand_type_equal (&overlap, &imm16_32s))
4898 {
4899 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
4900 overlap = imm16;
4901 else
4902 overlap = imm32s;
4903 }
4904 if (!operand_type_equal (&overlap, &imm8)
4905 && !operand_type_equal (&overlap, &imm8s)
4906 && !operand_type_equal (&overlap, &imm16)
4907 && !operand_type_equal (&overlap, &imm32)
4908 && !operand_type_equal (&overlap, &imm32s)
4909 && !operand_type_equal (&overlap, &imm64))
4910 {
4911 as_bad (_("no instruction mnemonic suffix given; "
4912 "can't determine immediate size"));
4913 return 0;
4914 }
4915 }
4916 i.types[j] = overlap;
4917
4918 return 1;
4919 }
4920
4921 static int
4922 finalize_imm (void)
4923 {
4924 unsigned int j, n;
4925
4926 /* Update the first 2 immediate operands. */
4927 n = i.operands > 2 ? 2 : i.operands;
4928 if (n)
4929 {
4930 for (j = 0; j < n; j++)
4931 if (update_imm (j) == 0)
4932 return 0;
4933
4934 /* The 3rd operand can't be immediate operand. */
4935 gas_assert (operand_type_check (i.types[2], imm) == 0);
4936 }
4937
4938 return 1;
4939 }
4940
4941 static int
4942 bad_implicit_operand (int xmm)
4943 {
4944 const char *ireg = xmm ? "xmm0" : "ymm0";
4945
4946 if (intel_syntax)
4947 as_bad (_("the last operand of `%s' must be `%s%s'"),
4948 i.tm.name, register_prefix, ireg);
4949 else
4950 as_bad (_("the first operand of `%s' must be `%s%s'"),
4951 i.tm.name, register_prefix, ireg);
4952 return 0;
4953 }
4954
4955 static int
4956 process_operands (void)
4957 {
4958 /* Default segment register this instruction will use for memory
4959 accesses. 0 means unknown. This is only for optimizing out
4960 unnecessary segment overrides. */
4961 const seg_entry *default_seg = 0;
4962
4963 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
4964 {
4965 unsigned int dupl = i.operands;
4966 unsigned int dest = dupl - 1;
4967 unsigned int j;
4968
4969 /* The destination must be an xmm register. */
4970 gas_assert (i.reg_operands
4971 && MAX_OPERANDS > dupl
4972 && operand_type_equal (&i.types[dest], &regxmm));
4973
4974 if (i.tm.opcode_modifier.firstxmm0)
4975 {
4976 /* The first operand is implicit and must be xmm0. */
4977 gas_assert (operand_type_equal (&i.types[0], &regxmm));
4978 if (i.op[0].regs->reg_num != 0)
4979 return bad_implicit_operand (1);
4980
4981 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
4982 {
4983 /* Keep xmm0 for instructions with VEX prefix and 3
4984 sources. */
4985 goto duplicate;
4986 }
4987 else
4988 {
4989 /* We remove the first xmm0 and keep the number of
4990 operands unchanged, which in fact duplicates the
4991 destination. */
4992 for (j = 1; j < i.operands; j++)
4993 {
4994 i.op[j - 1] = i.op[j];
4995 i.types[j - 1] = i.types[j];
4996 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
4997 }
4998 }
4999 }
5000 else if (i.tm.opcode_modifier.implicit1stxmm0)
5001 {
5002 gas_assert ((MAX_OPERANDS - 1) > dupl
5003 && (i.tm.opcode_modifier.vexsources
5004 == VEX3SOURCES));
5005
5006 /* Add the implicit xmm0 for instructions with VEX prefix
5007 and 3 sources. */
5008 for (j = i.operands; j > 0; j--)
5009 {
5010 i.op[j] = i.op[j - 1];
5011 i.types[j] = i.types[j - 1];
5012 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
5013 }
5014 i.op[0].regs
5015 = (const reg_entry *) hash_find (reg_hash, "xmm0");
5016 i.types[0] = regxmm;
5017 i.tm.operand_types[0] = regxmm;
5018
5019 i.operands += 2;
5020 i.reg_operands += 2;
5021 i.tm.operands += 2;
5022
5023 dupl++;
5024 dest++;
5025 i.op[dupl] = i.op[dest];
5026 i.types[dupl] = i.types[dest];
5027 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5028 }
5029 else
5030 {
5031 duplicate:
5032 i.operands++;
5033 i.reg_operands++;
5034 i.tm.operands++;
5035
5036 i.op[dupl] = i.op[dest];
5037 i.types[dupl] = i.types[dest];
5038 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5039 }
5040
5041 if (i.tm.opcode_modifier.immext)
5042 process_immext ();
5043 }
5044 else if (i.tm.opcode_modifier.firstxmm0)
5045 {
5046 unsigned int j;
5047
5048 /* The first operand is implicit and must be xmm0/ymm0. */
5049 gas_assert (i.reg_operands
5050 && (operand_type_equal (&i.types[0], &regxmm)
5051 || operand_type_equal (&i.types[0], &regymm)));
5052 if (i.op[0].regs->reg_num != 0)
5053 return bad_implicit_operand (i.types[0].bitfield.regxmm);
5054
5055 for (j = 1; j < i.operands; j++)
5056 {
5057 i.op[j - 1] = i.op[j];
5058 i.types[j - 1] = i.types[j];
5059
5060 /* We need to adjust fields in i.tm since they are used by
5061 build_modrm_byte. */
5062 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
5063 }
5064
5065 i.operands--;
5066 i.reg_operands--;
5067 i.tm.operands--;
5068 }
5069 else if (i.tm.opcode_modifier.regkludge)
5070 {
5071 /* The imul $imm, %reg instruction is converted into
5072 imul $imm, %reg, %reg, and the clr %reg instruction
5073 is converted into xor %reg, %reg. */
5074
5075 unsigned int first_reg_op;
5076
5077 if (operand_type_check (i.types[0], reg))
5078 first_reg_op = 0;
5079 else
5080 first_reg_op = 1;
5081 /* Pretend we saw the extra register operand. */
5082 gas_assert (i.reg_operands == 1
5083 && i.op[first_reg_op + 1].regs == 0);
5084 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
5085 i.types[first_reg_op + 1] = i.types[first_reg_op];
5086 i.operands++;
5087 i.reg_operands++;
5088 }
5089
5090 if (i.tm.opcode_modifier.shortform)
5091 {
5092 if (i.types[0].bitfield.sreg2
5093 || i.types[0].bitfield.sreg3)
5094 {
5095 if (i.tm.base_opcode == POP_SEG_SHORT
5096 && i.op[0].regs->reg_num == 1)
5097 {
5098 as_bad (_("you can't `pop %scs'"), register_prefix);
5099 return 0;
5100 }
5101 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
5102 if ((i.op[0].regs->reg_flags & RegRex) != 0)
5103 i.rex |= REX_B;
5104 }
5105 else
5106 {
5107 /* The register or float register operand is in operand
5108 0 or 1. */
5109 unsigned int op;
5110
5111 if (i.types[0].bitfield.floatreg
5112 || operand_type_check (i.types[0], reg))
5113 op = 0;
5114 else
5115 op = 1;
5116 /* Register goes in low 3 bits of opcode. */
5117 i.tm.base_opcode |= i.op[op].regs->reg_num;
5118 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5119 i.rex |= REX_B;
5120 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
5121 {
5122 /* Warn about some common errors, but press on regardless.
5123 The first case can be generated by gcc (<= 2.8.1). */
5124 if (i.operands == 2)
5125 {
5126 /* Reversed arguments on faddp, fsubp, etc. */
5127 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
5128 register_prefix, i.op[!intel_syntax].regs->reg_name,
5129 register_prefix, i.op[intel_syntax].regs->reg_name);
5130 }
5131 else
5132 {
5133 /* Extraneous `l' suffix on fp insn. */
5134 as_warn (_("translating to `%s %s%s'"), i.tm.name,
5135 register_prefix, i.op[0].regs->reg_name);
5136 }
5137 }
5138 }
5139 }
5140 else if (i.tm.opcode_modifier.modrm)
5141 {
5142 /* The opcode is completed (modulo i.tm.extension_opcode which
5143 must be put into the modrm byte). Now, we make the modrm and
5144 index base bytes based on all the info we've collected. */
5145
5146 default_seg = build_modrm_byte ();
5147 }
5148 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
5149 {
5150 default_seg = &ds;
5151 }
5152 else if (i.tm.opcode_modifier.isstring)
5153 {
5154 /* For the string instructions that allow a segment override
5155 on one of their operands, the default segment is ds. */
5156 default_seg = &ds;
5157 }
5158
5159 if (i.tm.base_opcode == 0x8d /* lea */
5160 && i.seg[0]
5161 && !quiet_warnings)
5162 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
5163
5164 /* If a segment was explicitly specified, and the specified segment
5165 is not the default, use an opcode prefix to select it. If we
5166 never figured out what the default segment is, then default_seg
5167 will be zero at this point, and the specified segment prefix will
5168 always be used. */
5169 if ((i.seg[0]) && (i.seg[0] != default_seg))
5170 {
5171 if (!add_prefix (i.seg[0]->seg_prefix))
5172 return 0;
5173 }
5174 return 1;
5175 }
5176
5177 static const seg_entry *
5178 build_modrm_byte (void)
5179 {
5180 const seg_entry *default_seg = 0;
5181 unsigned int source, dest;
5182 int vex_3_sources;
5183
5184 /* The first operand of instructions with VEX prefix and 3 sources
5185 must be VEX_Imm4. */
5186 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
5187 if (vex_3_sources)
5188 {
5189 unsigned int nds, reg_slot;
5190 expressionS *exp;
5191
5192 if (i.tm.opcode_modifier.veximmext
5193 && i.tm.opcode_modifier.immext)
5194 {
5195 dest = i.operands - 2;
5196 gas_assert (dest == 3);
5197 }
5198 else
5199 dest = i.operands - 1;
5200 nds = dest - 1;
5201
5202 /* There are 2 kinds of instructions:
5203 1. 5 operands: 4 register operands or 3 register operands
5204 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
5205 VexW0 or VexW1. The destination must be either XMM or YMM
5206 register.
5207 2. 4 operands: 4 register operands or 3 register operands
5208 plus 1 memory operand, VexXDS, and VexImmExt */
5209 gas_assert ((i.reg_operands == 4
5210 || (i.reg_operands == 3 && i.mem_operands == 1))
5211 && i.tm.opcode_modifier.vexvvvv == VEXXDS
5212 && (i.tm.opcode_modifier.veximmext
5213 || (i.imm_operands == 1
5214 && i.types[0].bitfield.vec_imm4
5215 && (i.tm.opcode_modifier.vexw == VEXW0
5216 || i.tm.opcode_modifier.vexw == VEXW1)
5217 && (operand_type_equal (&i.tm.operand_types[dest], &regxmm)
5218 || operand_type_equal (&i.tm.operand_types[dest], &regymm)))));
5219
5220 if (i.imm_operands == 0)
5221 {
5222 /* When there is no immediate operand, generate an 8bit
5223 immediate operand to encode the first operand. */
5224 exp = &im_expressions[i.imm_operands++];
5225 i.op[i.operands].imms = exp;
5226 i.types[i.operands] = imm8;
5227 i.operands++;
5228 /* If VexW1 is set, the first operand is the source and
5229 the second operand is encoded in the immediate operand. */
5230 if (i.tm.opcode_modifier.vexw == VEXW1)
5231 {
5232 source = 0;
5233 reg_slot = 1;
5234 }
5235 else
5236 {
5237 source = 1;
5238 reg_slot = 0;
5239 }
5240
5241 /* FMA swaps REG and NDS. */
5242 if (i.tm.cpu_flags.bitfield.cpufma)
5243 {
5244 unsigned int tmp;
5245 tmp = reg_slot;
5246 reg_slot = nds;
5247 nds = tmp;
5248 }
5249
5250 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5251 &regxmm)
5252 || operand_type_equal (&i.tm.operand_types[reg_slot],
5253 &regymm));
5254 exp->X_op = O_constant;
5255 exp->X_add_number
5256 = ((i.op[reg_slot].regs->reg_num
5257 + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
5258 << 4);
5259 }
5260 else
5261 {
5262 unsigned int imm_slot;
5263
5264 if (i.tm.opcode_modifier.vexw == VEXW0)
5265 {
5266 /* If VexW0 is set, the third operand is the source and
5267 the second operand is encoded in the immediate
5268 operand. */
5269 source = 2;
5270 reg_slot = 1;
5271 }
5272 else
5273 {
5274 /* VexW1 is set, the second operand is the source and
5275 the third operand is encoded in the immediate
5276 operand. */
5277 source = 1;
5278 reg_slot = 2;
5279 }
5280
5281 if (i.tm.opcode_modifier.immext)
5282 {
5283 /* When ImmExt is set, the immdiate byte is the last
5284 operand. */
5285 imm_slot = i.operands - 1;
5286 source--;
5287 reg_slot--;
5288 }
5289 else
5290 {
5291 imm_slot = 0;
5292
5293 /* Turn on Imm8 so that output_imm will generate it. */
5294 i.types[imm_slot].bitfield.imm8 = 1;
5295 }
5296
5297 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5298 &regxmm)
5299 || operand_type_equal (&i.tm.operand_types[reg_slot],
5300 &regymm));
5301 i.op[imm_slot].imms->X_add_number
5302 |= ((i.op[reg_slot].regs->reg_num
5303 + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
5304 << 4);
5305 }
5306
5307 gas_assert (operand_type_equal (&i.tm.operand_types[nds], &regxmm)
5308 || operand_type_equal (&i.tm.operand_types[nds],
5309 &regymm));
5310 i.vex.register_specifier = i.op[nds].regs;
5311 }
5312 else
5313 source = dest = 0;
5314
5315 /* i.reg_operands MUST be the number of real register operands;
5316 implicit registers do not count. If there are 3 register
5317 operands, it must be a instruction with VexNDS. For a
5318 instruction with VexNDD, the destination register is encoded
5319 in VEX prefix. If there are 4 register operands, it must be
5320 a instruction with VEX prefix and 3 sources. */
5321 if (i.mem_operands == 0
5322 && ((i.reg_operands == 2
5323 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
5324 || (i.reg_operands == 3
5325 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
5326 || (i.reg_operands == 4 && vex_3_sources)))
5327 {
5328 switch (i.operands)
5329 {
5330 case 2:
5331 source = 0;
5332 break;
5333 case 3:
5334 /* When there are 3 operands, one of them may be immediate,
5335 which may be the first or the last operand. Otherwise,
5336 the first operand must be shift count register (cl) or it
5337 is an instruction with VexNDS. */
5338 gas_assert (i.imm_operands == 1
5339 || (i.imm_operands == 0
5340 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
5341 || i.types[0].bitfield.shiftcount)));
5342 if (operand_type_check (i.types[0], imm)
5343 || i.types[0].bitfield.shiftcount)
5344 source = 1;
5345 else
5346 source = 0;
5347 break;
5348 case 4:
5349 /* When there are 4 operands, the first two must be 8bit
5350 immediate operands. The source operand will be the 3rd
5351 one.
5352
5353 For instructions with VexNDS, if the first operand
5354 an imm8, the source operand is the 2nd one. If the last
5355 operand is imm8, the source operand is the first one. */
5356 gas_assert ((i.imm_operands == 2
5357 && i.types[0].bitfield.imm8
5358 && i.types[1].bitfield.imm8)
5359 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
5360 && i.imm_operands == 1
5361 && (i.types[0].bitfield.imm8
5362 || i.types[i.operands - 1].bitfield.imm8)));
5363 if (i.imm_operands == 2)
5364 source = 2;
5365 else
5366 {
5367 if (i.types[0].bitfield.imm8)
5368 source = 1;
5369 else
5370 source = 0;
5371 }
5372 break;
5373 case 5:
5374 break;
5375 default:
5376 abort ();
5377 }
5378
5379 if (!vex_3_sources)
5380 {
5381 dest = source + 1;
5382
5383 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5384 {
5385 /* For instructions with VexNDS, the register-only
5386 source operand must be 32/64bit integer, XMM or
5387 YMM register. It is encoded in VEX prefix. We
5388 need to clear RegMem bit before calling
5389 operand_type_equal. */
5390
5391 i386_operand_type op;
5392 unsigned int vvvv;
5393
5394 /* Check register-only source operand when two source
5395 operands are swapped. */
5396 if (!i.tm.operand_types[source].bitfield.baseindex
5397 && i.tm.operand_types[dest].bitfield.baseindex)
5398 {
5399 vvvv = source;
5400 source = dest;
5401 }
5402 else
5403 vvvv = dest;
5404
5405 op = i.tm.operand_types[vvvv];
5406 op.bitfield.regmem = 0;
5407 if ((dest + 1) >= i.operands
5408 || (op.bitfield.reg32 != 1
5409 && !op.bitfield.reg64 != 1
5410 && !operand_type_equal (&op, &regxmm)
5411 && !operand_type_equal (&op, &regymm)))
5412 abort ();
5413 i.vex.register_specifier = i.op[vvvv].regs;
5414 dest++;
5415 }
5416 }
5417
5418 i.rm.mode = 3;
5419 /* One of the register operands will be encoded in the i.tm.reg
5420 field, the other in the combined i.tm.mode and i.tm.regmem
5421 fields. If no form of this instruction supports a memory
5422 destination operand, then we assume the source operand may
5423 sometimes be a memory operand and so we need to store the
5424 destination in the i.rm.reg field. */
5425 if (!i.tm.operand_types[dest].bitfield.regmem
5426 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
5427 {
5428 i.rm.reg = i.op[dest].regs->reg_num;
5429 i.rm.regmem = i.op[source].regs->reg_num;
5430 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5431 i.rex |= REX_R;
5432 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5433 i.rex |= REX_B;
5434 }
5435 else
5436 {
5437 i.rm.reg = i.op[source].regs->reg_num;
5438 i.rm.regmem = i.op[dest].regs->reg_num;
5439 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5440 i.rex |= REX_B;
5441 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5442 i.rex |= REX_R;
5443 }
5444 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
5445 {
5446 if (!i.types[0].bitfield.control
5447 && !i.types[1].bitfield.control)
5448 abort ();
5449 i.rex &= ~(REX_R | REX_B);
5450 add_prefix (LOCK_PREFIX_OPCODE);
5451 }
5452 }
5453 else
5454 { /* If it's not 2 reg operands... */
5455 unsigned int mem;
5456
5457 if (i.mem_operands)
5458 {
5459 unsigned int fake_zero_displacement = 0;
5460 unsigned int op;
5461
5462 for (op = 0; op < i.operands; op++)
5463 if (operand_type_check (i.types[op], anymem))
5464 break;
5465 gas_assert (op < i.operands);
5466
5467 if (i.tm.opcode_modifier.vecsib)
5468 {
5469 if (i.index_reg->reg_num == RegEiz
5470 || i.index_reg->reg_num == RegRiz)
5471 abort ();
5472
5473 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5474 if (!i.base_reg)
5475 {
5476 i.sib.base = NO_BASE_REGISTER;
5477 i.sib.scale = i.log2_scale_factor;
5478 i.types[op].bitfield.disp8 = 0;
5479 i.types[op].bitfield.disp16 = 0;
5480 i.types[op].bitfield.disp64 = 0;
5481 if (flag_code != CODE_64BIT)
5482 {
5483 /* Must be 32 bit */
5484 i.types[op].bitfield.disp32 = 1;
5485 i.types[op].bitfield.disp32s = 0;
5486 }
5487 else
5488 {
5489 i.types[op].bitfield.disp32 = 0;
5490 i.types[op].bitfield.disp32s = 1;
5491 }
5492 }
5493 i.sib.index = i.index_reg->reg_num;
5494 if ((i.index_reg->reg_flags & RegRex) != 0)
5495 i.rex |= REX_X;
5496 }
5497
5498 default_seg = &ds;
5499
5500 if (i.base_reg == 0)
5501 {
5502 i.rm.mode = 0;
5503 if (!i.disp_operands)
5504 {
5505 fake_zero_displacement = 1;
5506 /* Instructions with VSIB byte need 32bit displacement
5507 if there is no base register. */
5508 if (i.tm.opcode_modifier.vecsib)
5509 i.types[op].bitfield.disp32 = 1;
5510 }
5511 if (i.index_reg == 0)
5512 {
5513 gas_assert (!i.tm.opcode_modifier.vecsib);
5514 /* Operand is just <disp> */
5515 if (flag_code == CODE_64BIT)
5516 {
5517 /* 64bit mode overwrites the 32bit absolute
5518 addressing by RIP relative addressing and
5519 absolute addressing is encoded by one of the
5520 redundant SIB forms. */
5521 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5522 i.sib.base = NO_BASE_REGISTER;
5523 i.sib.index = NO_INDEX_REGISTER;
5524 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
5525 ? disp32s : disp32);
5526 }
5527 else if ((flag_code == CODE_16BIT)
5528 ^ (i.prefix[ADDR_PREFIX] != 0))
5529 {
5530 i.rm.regmem = NO_BASE_REGISTER_16;
5531 i.types[op] = disp16;
5532 }
5533 else
5534 {
5535 i.rm.regmem = NO_BASE_REGISTER;
5536 i.types[op] = disp32;
5537 }
5538 }
5539 else if (!i.tm.opcode_modifier.vecsib)
5540 {
5541 /* !i.base_reg && i.index_reg */
5542 if (i.index_reg->reg_num == RegEiz
5543 || i.index_reg->reg_num == RegRiz)
5544 i.sib.index = NO_INDEX_REGISTER;
5545 else
5546 i.sib.index = i.index_reg->reg_num;
5547 i.sib.base = NO_BASE_REGISTER;
5548 i.sib.scale = i.log2_scale_factor;
5549 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5550 i.types[op].bitfield.disp8 = 0;
5551 i.types[op].bitfield.disp16 = 0;
5552 i.types[op].bitfield.disp64 = 0;
5553 if (flag_code != CODE_64BIT)
5554 {
5555 /* Must be 32 bit */
5556 i.types[op].bitfield.disp32 = 1;
5557 i.types[op].bitfield.disp32s = 0;
5558 }
5559 else
5560 {
5561 i.types[op].bitfield.disp32 = 0;
5562 i.types[op].bitfield.disp32s = 1;
5563 }
5564 if ((i.index_reg->reg_flags & RegRex) != 0)
5565 i.rex |= REX_X;
5566 }
5567 }
5568 /* RIP addressing for 64bit mode. */
5569 else if (i.base_reg->reg_num == RegRip ||
5570 i.base_reg->reg_num == RegEip)
5571 {
5572 gas_assert (!i.tm.opcode_modifier.vecsib);
5573 i.rm.regmem = NO_BASE_REGISTER;
5574 i.types[op].bitfield.disp8 = 0;
5575 i.types[op].bitfield.disp16 = 0;
5576 i.types[op].bitfield.disp32 = 0;
5577 i.types[op].bitfield.disp32s = 1;
5578 i.types[op].bitfield.disp64 = 0;
5579 i.flags[op] |= Operand_PCrel;
5580 if (! i.disp_operands)
5581 fake_zero_displacement = 1;
5582 }
5583 else if (i.base_reg->reg_type.bitfield.reg16)
5584 {
5585 gas_assert (!i.tm.opcode_modifier.vecsib);
5586 switch (i.base_reg->reg_num)
5587 {
5588 case 3: /* (%bx) */
5589 if (i.index_reg == 0)
5590 i.rm.regmem = 7;
5591 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
5592 i.rm.regmem = i.index_reg->reg_num - 6;
5593 break;
5594 case 5: /* (%bp) */
5595 default_seg = &ss;
5596 if (i.index_reg == 0)
5597 {
5598 i.rm.regmem = 6;
5599 if (operand_type_check (i.types[op], disp) == 0)
5600 {
5601 /* fake (%bp) into 0(%bp) */
5602 i.types[op].bitfield.disp8 = 1;
5603 fake_zero_displacement = 1;
5604 }
5605 }
5606 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
5607 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
5608 break;
5609 default: /* (%si) -> 4 or (%di) -> 5 */
5610 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
5611 }
5612 i.rm.mode = mode_from_disp_size (i.types[op]);
5613 }
5614 else /* i.base_reg and 32/64 bit mode */
5615 {
5616 if (flag_code == CODE_64BIT
5617 && operand_type_check (i.types[op], disp))
5618 {
5619 i386_operand_type temp;
5620 operand_type_set (&temp, 0);
5621 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
5622 i.types[op] = temp;
5623 if (i.prefix[ADDR_PREFIX] == 0)
5624 i.types[op].bitfield.disp32s = 1;
5625 else
5626 i.types[op].bitfield.disp32 = 1;
5627 }
5628
5629 if (!i.tm.opcode_modifier.vecsib)
5630 i.rm.regmem = i.base_reg->reg_num;
5631 if ((i.base_reg->reg_flags & RegRex) != 0)
5632 i.rex |= REX_B;
5633 i.sib.base = i.base_reg->reg_num;
5634 /* x86-64 ignores REX prefix bit here to avoid decoder
5635 complications. */
5636 if ((i.base_reg->reg_num & 7) == EBP_REG_NUM)
5637 {
5638 default_seg = &ss;
5639 if (i.disp_operands == 0)
5640 {
5641 fake_zero_displacement = 1;
5642 i.types[op].bitfield.disp8 = 1;
5643 }
5644 }
5645 else if (i.base_reg->reg_num == ESP_REG_NUM)
5646 {
5647 default_seg = &ss;
5648 }
5649 i.sib.scale = i.log2_scale_factor;
5650 if (i.index_reg == 0)
5651 {
5652 gas_assert (!i.tm.opcode_modifier.vecsib);
5653 /* <disp>(%esp) becomes two byte modrm with no index
5654 register. We've already stored the code for esp
5655 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
5656 Any base register besides %esp will not use the
5657 extra modrm byte. */
5658 i.sib.index = NO_INDEX_REGISTER;
5659 }
5660 else if (!i.tm.opcode_modifier.vecsib)
5661 {
5662 if (i.index_reg->reg_num == RegEiz
5663 || i.index_reg->reg_num == RegRiz)
5664 i.sib.index = NO_INDEX_REGISTER;
5665 else
5666 i.sib.index = i.index_reg->reg_num;
5667 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5668 if ((i.index_reg->reg_flags & RegRex) != 0)
5669 i.rex |= REX_X;
5670 }
5671
5672 if (i.disp_operands
5673 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
5674 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
5675 i.rm.mode = 0;
5676 else
5677 i.rm.mode = mode_from_disp_size (i.types[op]);
5678 }
5679
5680 if (fake_zero_displacement)
5681 {
5682 /* Fakes a zero displacement assuming that i.types[op]
5683 holds the correct displacement size. */
5684 expressionS *exp;
5685
5686 gas_assert (i.op[op].disps == 0);
5687 exp = &disp_expressions[i.disp_operands++];
5688 i.op[op].disps = exp;
5689 exp->X_op = O_constant;
5690 exp->X_add_number = 0;
5691 exp->X_add_symbol = (symbolS *) 0;
5692 exp->X_op_symbol = (symbolS *) 0;
5693 }
5694
5695 mem = op;
5696 }
5697 else
5698 mem = ~0;
5699
5700 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
5701 {
5702 if (operand_type_check (i.types[0], imm))
5703 i.vex.register_specifier = NULL;
5704 else
5705 {
5706 /* VEX.vvvv encodes one of the sources when the first
5707 operand is not an immediate. */
5708 if (i.tm.opcode_modifier.vexw == VEXW0)
5709 i.vex.register_specifier = i.op[0].regs;
5710 else
5711 i.vex.register_specifier = i.op[1].regs;
5712 }
5713
5714 /* Destination is a XMM register encoded in the ModRM.reg
5715 and VEX.R bit. */
5716 i.rm.reg = i.op[2].regs->reg_num;
5717 if ((i.op[2].regs->reg_flags & RegRex) != 0)
5718 i.rex |= REX_R;
5719
5720 /* ModRM.rm and VEX.B encodes the other source. */
5721 if (!i.mem_operands)
5722 {
5723 i.rm.mode = 3;
5724
5725 if (i.tm.opcode_modifier.vexw == VEXW0)
5726 i.rm.regmem = i.op[1].regs->reg_num;
5727 else
5728 i.rm.regmem = i.op[0].regs->reg_num;
5729
5730 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5731 i.rex |= REX_B;
5732 }
5733 }
5734 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
5735 {
5736 i.vex.register_specifier = i.op[2].regs;
5737 if (!i.mem_operands)
5738 {
5739 i.rm.mode = 3;
5740 i.rm.regmem = i.op[1].regs->reg_num;
5741 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5742 i.rex |= REX_B;
5743 }
5744 }
5745 /* Fill in i.rm.reg or i.rm.regmem field with register operand
5746 (if any) based on i.tm.extension_opcode. Again, we must be
5747 careful to make sure that segment/control/debug/test/MMX
5748 registers are coded into the i.rm.reg field. */
5749 else if (i.reg_operands)
5750 {
5751 unsigned int op;
5752 unsigned int vex_reg = ~0;
5753
5754 for (op = 0; op < i.operands; op++)
5755 if (i.types[op].bitfield.reg8
5756 || i.types[op].bitfield.reg16
5757 || i.types[op].bitfield.reg32
5758 || i.types[op].bitfield.reg64
5759 || i.types[op].bitfield.regmmx
5760 || i.types[op].bitfield.regxmm
5761 || i.types[op].bitfield.regymm
5762 || i.types[op].bitfield.sreg2
5763 || i.types[op].bitfield.sreg3
5764 || i.types[op].bitfield.control
5765 || i.types[op].bitfield.debug
5766 || i.types[op].bitfield.test)
5767 break;
5768
5769 if (vex_3_sources)
5770 op = dest;
5771 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5772 {
5773 /* For instructions with VexNDS, the register-only
5774 source operand is encoded in VEX prefix. */
5775 gas_assert (mem != (unsigned int) ~0);
5776
5777 if (op > mem)
5778 {
5779 vex_reg = op++;
5780 gas_assert (op < i.operands);
5781 }
5782 else
5783 {
5784 /* Check register-only source operand when two source
5785 operands are swapped. */
5786 if (!i.tm.operand_types[op].bitfield.baseindex
5787 && i.tm.operand_types[op + 1].bitfield.baseindex)
5788 {
5789 vex_reg = op;
5790 op += 2;
5791 gas_assert (mem == (vex_reg + 1)
5792 && op < i.operands);
5793 }
5794 else
5795 {
5796 vex_reg = op + 1;
5797 gas_assert (vex_reg < i.operands);
5798 }
5799 }
5800 }
5801 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
5802 {
5803 /* For instructions with VexNDD, the register destination
5804 is encoded in VEX prefix. */
5805 if (i.mem_operands == 0)
5806 {
5807 /* There is no memory operand. */
5808 gas_assert ((op + 2) == i.operands);
5809 vex_reg = op + 1;
5810 }
5811 else
5812 {
5813 /* There are only 2 operands. */
5814 gas_assert (op < 2 && i.operands == 2);
5815 vex_reg = 1;
5816 }
5817 }
5818 else
5819 gas_assert (op < i.operands);
5820
5821 if (vex_reg != (unsigned int) ~0)
5822 {
5823 i386_operand_type *type = &i.tm.operand_types[vex_reg];
5824
5825 if (type->bitfield.reg32 != 1
5826 && type->bitfield.reg64 != 1
5827 && !operand_type_equal (type, &regxmm)
5828 && !operand_type_equal (type, &regymm))
5829 abort ();
5830
5831 i.vex.register_specifier = i.op[vex_reg].regs;
5832 }
5833
5834 /* Don't set OP operand twice. */
5835 if (vex_reg != op)
5836 {
5837 /* If there is an extension opcode to put here, the
5838 register number must be put into the regmem field. */
5839 if (i.tm.extension_opcode != None)
5840 {
5841 i.rm.regmem = i.op[op].regs->reg_num;
5842 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5843 i.rex |= REX_B;
5844 }
5845 else
5846 {
5847 i.rm.reg = i.op[op].regs->reg_num;
5848 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5849 i.rex |= REX_R;
5850 }
5851 }
5852
5853 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
5854 must set it to 3 to indicate this is a register operand
5855 in the regmem field. */
5856 if (!i.mem_operands)
5857 i.rm.mode = 3;
5858 }
5859
5860 /* Fill in i.rm.reg field with extension opcode (if any). */
5861 if (i.tm.extension_opcode != None)
5862 i.rm.reg = i.tm.extension_opcode;
5863 }
5864 return default_seg;
5865 }
5866
5867 static void
5868 output_branch (void)
5869 {
5870 char *p;
5871 int size;
5872 int code16;
5873 int prefix;
5874 relax_substateT subtype;
5875 symbolS *sym;
5876 offsetT off;
5877
5878 code16 = flag_code == CODE_16BIT ? CODE16 : 0;
5879 size = i.disp32_encoding ? BIG : SMALL;
5880
5881 prefix = 0;
5882 if (i.prefix[DATA_PREFIX] != 0)
5883 {
5884 prefix = 1;
5885 i.prefixes -= 1;
5886 code16 ^= CODE16;
5887 }
5888 /* Pentium4 branch hints. */
5889 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
5890 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
5891 {
5892 prefix++;
5893 i.prefixes--;
5894 }
5895 if (i.prefix[REX_PREFIX] != 0)
5896 {
5897 prefix++;
5898 i.prefixes--;
5899 }
5900
5901 if (i.prefixes != 0 && !intel_syntax)
5902 as_warn (_("skipping prefixes on this instruction"));
5903
5904 /* It's always a symbol; End frag & setup for relax.
5905 Make sure there is enough room in this frag for the largest
5906 instruction we may generate in md_convert_frag. This is 2
5907 bytes for the opcode and room for the prefix and largest
5908 displacement. */
5909 frag_grow (prefix + 2 + 4);
5910 /* Prefix and 1 opcode byte go in fr_fix. */
5911 p = frag_more (prefix + 1);
5912 if (i.prefix[DATA_PREFIX] != 0)
5913 *p++ = DATA_PREFIX_OPCODE;
5914 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
5915 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
5916 *p++ = i.prefix[SEG_PREFIX];
5917 if (i.prefix[REX_PREFIX] != 0)
5918 *p++ = i.prefix[REX_PREFIX];
5919 *p = i.tm.base_opcode;
5920
5921 if ((unsigned char) *p == JUMP_PC_RELATIVE)
5922 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
5923 else if (cpu_arch_flags.bitfield.cpui386)
5924 subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
5925 else
5926 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
5927 subtype |= code16;
5928
5929 sym = i.op[0].disps->X_add_symbol;
5930 off = i.op[0].disps->X_add_number;
5931
5932 if (i.op[0].disps->X_op != O_constant
5933 && i.op[0].disps->X_op != O_symbol)
5934 {
5935 /* Handle complex expressions. */
5936 sym = make_expr_symbol (i.op[0].disps);
5937 off = 0;
5938 }
5939
5940 /* 1 possible extra opcode + 4 byte displacement go in var part.
5941 Pass reloc in fr_var. */
5942 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
5943 }
5944
5945 static void
5946 output_jump (void)
5947 {
5948 char *p;
5949 int size;
5950 fixS *fixP;
5951
5952 if (i.tm.opcode_modifier.jumpbyte)
5953 {
5954 /* This is a loop or jecxz type instruction. */
5955 size = 1;
5956 if (i.prefix[ADDR_PREFIX] != 0)
5957 {
5958 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
5959 i.prefixes -= 1;
5960 }
5961 /* Pentium4 branch hints. */
5962 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
5963 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
5964 {
5965 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
5966 i.prefixes--;
5967 }
5968 }
5969 else
5970 {
5971 int code16;
5972
5973 code16 = 0;
5974 if (flag_code == CODE_16BIT)
5975 code16 = CODE16;
5976
5977 if (i.prefix[DATA_PREFIX] != 0)
5978 {
5979 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
5980 i.prefixes -= 1;
5981 code16 ^= CODE16;
5982 }
5983
5984 size = 4;
5985 if (code16)
5986 size = 2;
5987 }
5988
5989 if (i.prefix[REX_PREFIX] != 0)
5990 {
5991 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
5992 i.prefixes -= 1;
5993 }
5994
5995 if (i.prefixes != 0 && !intel_syntax)
5996 as_warn (_("skipping prefixes on this instruction"));
5997
5998 p = frag_more (1 + size);
5999 *p++ = i.tm.base_opcode;
6000
6001 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6002 i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
6003
6004 /* All jumps handled here are signed, but don't use a signed limit
6005 check for 32 and 16 bit jumps as we want to allow wrap around at
6006 4G and 64k respectively. */
6007 if (size == 1)
6008 fixP->fx_signed = 1;
6009 }
6010
6011 static void
6012 output_interseg_jump (void)
6013 {
6014 char *p;
6015 int size;
6016 int prefix;
6017 int code16;
6018
6019 code16 = 0;
6020 if (flag_code == CODE_16BIT)
6021 code16 = CODE16;
6022
6023 prefix = 0;
6024 if (i.prefix[DATA_PREFIX] != 0)
6025 {
6026 prefix = 1;
6027 i.prefixes -= 1;
6028 code16 ^= CODE16;
6029 }
6030 if (i.prefix[REX_PREFIX] != 0)
6031 {
6032 prefix++;
6033 i.prefixes -= 1;
6034 }
6035
6036 size = 4;
6037 if (code16)
6038 size = 2;
6039
6040 if (i.prefixes != 0 && !intel_syntax)
6041 as_warn (_("skipping prefixes on this instruction"));
6042
6043 /* 1 opcode; 2 segment; offset */
6044 p = frag_more (prefix + 1 + 2 + size);
6045
6046 if (i.prefix[DATA_PREFIX] != 0)
6047 *p++ = DATA_PREFIX_OPCODE;
6048
6049 if (i.prefix[REX_PREFIX] != 0)
6050 *p++ = i.prefix[REX_PREFIX];
6051
6052 *p++ = i.tm.base_opcode;
6053 if (i.op[1].imms->X_op == O_constant)
6054 {
6055 offsetT n = i.op[1].imms->X_add_number;
6056
6057 if (size == 2
6058 && !fits_in_unsigned_word (n)
6059 && !fits_in_signed_word (n))
6060 {
6061 as_bad (_("16-bit jump out of range"));
6062 return;
6063 }
6064 md_number_to_chars (p, n, size);
6065 }
6066 else
6067 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6068 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
6069 if (i.op[0].imms->X_op != O_constant)
6070 as_bad (_("can't handle non absolute segment in `%s'"),
6071 i.tm.name);
6072 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
6073 }
6074
6075 static void
6076 output_insn (void)
6077 {
6078 fragS *insn_start_frag;
6079 offsetT insn_start_off;
6080
6081 /* Tie dwarf2 debug info to the address at the start of the insn.
6082 We can't do this after the insn has been output as the current
6083 frag may have been closed off. eg. by frag_var. */
6084 dwarf2_emit_insn (0);
6085
6086 insn_start_frag = frag_now;
6087 insn_start_off = frag_now_fix ();
6088
6089 /* Output jumps. */
6090 if (i.tm.opcode_modifier.jump)
6091 output_branch ();
6092 else if (i.tm.opcode_modifier.jumpbyte
6093 || i.tm.opcode_modifier.jumpdword)
6094 output_jump ();
6095 else if (i.tm.opcode_modifier.jumpintersegment)
6096 output_interseg_jump ();
6097 else
6098 {
6099 /* Output normal instructions here. */
6100 char *p;
6101 unsigned char *q;
6102 unsigned int j;
6103 unsigned int prefix;
6104
6105 /* Since the VEX prefix contains the implicit prefix, we don't
6106 need the explicit prefix. */
6107 if (!i.tm.opcode_modifier.vex)
6108 {
6109 switch (i.tm.opcode_length)
6110 {
6111 case 3:
6112 if (i.tm.base_opcode & 0xff000000)
6113 {
6114 prefix = (i.tm.base_opcode >> 24) & 0xff;
6115 goto check_prefix;
6116 }
6117 break;
6118 case 2:
6119 if ((i.tm.base_opcode & 0xff0000) != 0)
6120 {
6121 prefix = (i.tm.base_opcode >> 16) & 0xff;
6122 if (i.tm.cpu_flags.bitfield.cpupadlock)
6123 {
6124 check_prefix:
6125 if (prefix != REPE_PREFIX_OPCODE
6126 || (i.prefix[REP_PREFIX]
6127 != REPE_PREFIX_OPCODE))
6128 add_prefix (prefix);
6129 }
6130 else
6131 add_prefix (prefix);
6132 }
6133 break;
6134 case 1:
6135 break;
6136 default:
6137 abort ();
6138 }
6139
6140 /* The prefix bytes. */
6141 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
6142 if (*q)
6143 FRAG_APPEND_1_CHAR (*q);
6144 }
6145
6146 if (i.tm.opcode_modifier.vex)
6147 {
6148 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
6149 if (*q)
6150 switch (j)
6151 {
6152 case REX_PREFIX:
6153 /* REX byte is encoded in VEX prefix. */
6154 break;
6155 case SEG_PREFIX:
6156 case ADDR_PREFIX:
6157 FRAG_APPEND_1_CHAR (*q);
6158 break;
6159 default:
6160 /* There should be no other prefixes for instructions
6161 with VEX prefix. */
6162 abort ();
6163 }
6164
6165 /* Now the VEX prefix. */
6166 p = frag_more (i.vex.length);
6167 for (j = 0; j < i.vex.length; j++)
6168 p[j] = i.vex.bytes[j];
6169 }
6170
6171 /* Now the opcode; be careful about word order here! */
6172 if (i.tm.opcode_length == 1)
6173 {
6174 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
6175 }
6176 else
6177 {
6178 switch (i.tm.opcode_length)
6179 {
6180 case 3:
6181 p = frag_more (3);
6182 *p++ = (i.tm.base_opcode >> 16) & 0xff;
6183 break;
6184 case 2:
6185 p = frag_more (2);
6186 break;
6187 default:
6188 abort ();
6189 break;
6190 }
6191
6192 /* Put out high byte first: can't use md_number_to_chars! */
6193 *p++ = (i.tm.base_opcode >> 8) & 0xff;
6194 *p = i.tm.base_opcode & 0xff;
6195 }
6196
6197 /* Now the modrm byte and sib byte (if present). */
6198 if (i.tm.opcode_modifier.modrm)
6199 {
6200 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
6201 | i.rm.reg << 3
6202 | i.rm.mode << 6));
6203 /* If i.rm.regmem == ESP (4)
6204 && i.rm.mode != (Register mode)
6205 && not 16 bit
6206 ==> need second modrm byte. */
6207 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
6208 && i.rm.mode != 3
6209 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
6210 FRAG_APPEND_1_CHAR ((i.sib.base << 0
6211 | i.sib.index << 3
6212 | i.sib.scale << 6));
6213 }
6214
6215 if (i.disp_operands)
6216 output_disp (insn_start_frag, insn_start_off);
6217
6218 if (i.imm_operands)
6219 output_imm (insn_start_frag, insn_start_off);
6220 }
6221
6222 #ifdef DEBUG386
6223 if (flag_debug)
6224 {
6225 pi ("" /*line*/, &i);
6226 }
6227 #endif /* DEBUG386 */
6228 }
6229
6230 /* Return the size of the displacement operand N. */
6231
6232 static int
6233 disp_size (unsigned int n)
6234 {
6235 int size = 4;
6236 if (i.types[n].bitfield.disp64)
6237 size = 8;
6238 else if (i.types[n].bitfield.disp8)
6239 size = 1;
6240 else if (i.types[n].bitfield.disp16)
6241 size = 2;
6242 return size;
6243 }
6244
6245 /* Return the size of the immediate operand N. */
6246
6247 static int
6248 imm_size (unsigned int n)
6249 {
6250 int size = 4;
6251 if (i.types[n].bitfield.imm64)
6252 size = 8;
6253 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
6254 size = 1;
6255 else if (i.types[n].bitfield.imm16)
6256 size = 2;
6257 return size;
6258 }
6259
6260 static void
6261 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
6262 {
6263 char *p;
6264 unsigned int n;
6265
6266 for (n = 0; n < i.operands; n++)
6267 {
6268 if (operand_type_check (i.types[n], disp))
6269 {
6270 if (i.op[n].disps->X_op == O_constant)
6271 {
6272 int size = disp_size (n);
6273 offsetT val;
6274
6275 val = offset_in_range (i.op[n].disps->X_add_number,
6276 size);
6277 p = frag_more (size);
6278 md_number_to_chars (p, val, size);
6279 }
6280 else
6281 {
6282 enum bfd_reloc_code_real reloc_type;
6283 int size = disp_size (n);
6284 int sign = i.types[n].bitfield.disp32s;
6285 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
6286
6287 /* We can't have 8 bit displacement here. */
6288 gas_assert (!i.types[n].bitfield.disp8);
6289
6290 /* The PC relative address is computed relative
6291 to the instruction boundary, so in case immediate
6292 fields follows, we need to adjust the value. */
6293 if (pcrel && i.imm_operands)
6294 {
6295 unsigned int n1;
6296 int sz = 0;
6297
6298 for (n1 = 0; n1 < i.operands; n1++)
6299 if (operand_type_check (i.types[n1], imm))
6300 {
6301 /* Only one immediate is allowed for PC
6302 relative address. */
6303 gas_assert (sz == 0);
6304 sz = imm_size (n1);
6305 i.op[n].disps->X_add_number -= sz;
6306 }
6307 /* We should find the immediate. */
6308 gas_assert (sz != 0);
6309 }
6310
6311 p = frag_more (size);
6312 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
6313 if (GOT_symbol
6314 && GOT_symbol == i.op[n].disps->X_add_symbol
6315 && (((reloc_type == BFD_RELOC_32
6316 || reloc_type == BFD_RELOC_X86_64_32S
6317 || (reloc_type == BFD_RELOC_64
6318 && object_64bit))
6319 && (i.op[n].disps->X_op == O_symbol
6320 || (i.op[n].disps->X_op == O_add
6321 && ((symbol_get_value_expression
6322 (i.op[n].disps->X_op_symbol)->X_op)
6323 == O_subtract))))
6324 || reloc_type == BFD_RELOC_32_PCREL))
6325 {
6326 offsetT add;
6327
6328 if (insn_start_frag == frag_now)
6329 add = (p - frag_now->fr_literal) - insn_start_off;
6330 else
6331 {
6332 fragS *fr;
6333
6334 add = insn_start_frag->fr_fix - insn_start_off;
6335 for (fr = insn_start_frag->fr_next;
6336 fr && fr != frag_now; fr = fr->fr_next)
6337 add += fr->fr_fix;
6338 add += p - frag_now->fr_literal;
6339 }
6340
6341 if (!object_64bit)
6342 {
6343 reloc_type = BFD_RELOC_386_GOTPC;
6344 i.op[n].imms->X_add_number += add;
6345 }
6346 else if (reloc_type == BFD_RELOC_64)
6347 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6348 else
6349 /* Don't do the adjustment for x86-64, as there
6350 the pcrel addressing is relative to the _next_
6351 insn, and that is taken care of in other code. */
6352 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6353 }
6354 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6355 i.op[n].disps, pcrel, reloc_type);
6356 }
6357 }
6358 }
6359 }
6360
6361 static void
6362 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
6363 {
6364 char *p;
6365 unsigned int n;
6366
6367 for (n = 0; n < i.operands; n++)
6368 {
6369 if (operand_type_check (i.types[n], imm))
6370 {
6371 if (i.op[n].imms->X_op == O_constant)
6372 {
6373 int size = imm_size (n);
6374 offsetT val;
6375
6376 val = offset_in_range (i.op[n].imms->X_add_number,
6377 size);
6378 p = frag_more (size);
6379 md_number_to_chars (p, val, size);
6380 }
6381 else
6382 {
6383 /* Not absolute_section.
6384 Need a 32-bit fixup (don't support 8bit
6385 non-absolute imms). Try to support other
6386 sizes ... */
6387 enum bfd_reloc_code_real reloc_type;
6388 int size = imm_size (n);
6389 int sign;
6390
6391 if (i.types[n].bitfield.imm32s
6392 && (i.suffix == QWORD_MNEM_SUFFIX
6393 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
6394 sign = 1;
6395 else
6396 sign = 0;
6397
6398 p = frag_more (size);
6399 reloc_type = reloc (size, 0, sign, i.reloc[n]);
6400
6401 /* This is tough to explain. We end up with this one if we
6402 * have operands that look like
6403 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
6404 * obtain the absolute address of the GOT, and it is strongly
6405 * preferable from a performance point of view to avoid using
6406 * a runtime relocation for this. The actual sequence of
6407 * instructions often look something like:
6408 *
6409 * call .L66
6410 * .L66:
6411 * popl %ebx
6412 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
6413 *
6414 * The call and pop essentially return the absolute address
6415 * of the label .L66 and store it in %ebx. The linker itself
6416 * will ultimately change the first operand of the addl so
6417 * that %ebx points to the GOT, but to keep things simple, the
6418 * .o file must have this operand set so that it generates not
6419 * the absolute address of .L66, but the absolute address of
6420 * itself. This allows the linker itself simply treat a GOTPC
6421 * relocation as asking for a pcrel offset to the GOT to be
6422 * added in, and the addend of the relocation is stored in the
6423 * operand field for the instruction itself.
6424 *
6425 * Our job here is to fix the operand so that it would add
6426 * the correct offset so that %ebx would point to itself. The
6427 * thing that is tricky is that .-.L66 will point to the
6428 * beginning of the instruction, so we need to further modify
6429 * the operand so that it will point to itself. There are
6430 * other cases where you have something like:
6431 *
6432 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
6433 *
6434 * and here no correction would be required. Internally in
6435 * the assembler we treat operands of this form as not being
6436 * pcrel since the '.' is explicitly mentioned, and I wonder
6437 * whether it would simplify matters to do it this way. Who
6438 * knows. In earlier versions of the PIC patches, the
6439 * pcrel_adjust field was used to store the correction, but
6440 * since the expression is not pcrel, I felt it would be
6441 * confusing to do it this way. */
6442
6443 if ((reloc_type == BFD_RELOC_32
6444 || reloc_type == BFD_RELOC_X86_64_32S
6445 || reloc_type == BFD_RELOC_64)
6446 && GOT_symbol
6447 && GOT_symbol == i.op[n].imms->X_add_symbol
6448 && (i.op[n].imms->X_op == O_symbol
6449 || (i.op[n].imms->X_op == O_add
6450 && ((symbol_get_value_expression
6451 (i.op[n].imms->X_op_symbol)->X_op)
6452 == O_subtract))))
6453 {
6454 offsetT add;
6455
6456 if (insn_start_frag == frag_now)
6457 add = (p - frag_now->fr_literal) - insn_start_off;
6458 else
6459 {
6460 fragS *fr;
6461
6462 add = insn_start_frag->fr_fix - insn_start_off;
6463 for (fr = insn_start_frag->fr_next;
6464 fr && fr != frag_now; fr = fr->fr_next)
6465 add += fr->fr_fix;
6466 add += p - frag_now->fr_literal;
6467 }
6468
6469 if (!object_64bit)
6470 reloc_type = BFD_RELOC_386_GOTPC;
6471 else if (size == 4)
6472 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6473 else if (size == 8)
6474 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6475 i.op[n].imms->X_add_number += add;
6476 }
6477 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6478 i.op[n].imms, 0, reloc_type);
6479 }
6480 }
6481 }
6482 }
6483 \f
6484 /* x86_cons_fix_new is called via the expression parsing code when a
6485 reloc is needed. We use this hook to get the correct .got reloc. */
6486 static enum bfd_reloc_code_real got_reloc = NO_RELOC;
6487 static int cons_sign = -1;
6488
6489 void
6490 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
6491 expressionS *exp)
6492 {
6493 enum bfd_reloc_code_real r = reloc (len, 0, cons_sign, got_reloc);
6494
6495 got_reloc = NO_RELOC;
6496
6497 #ifdef TE_PE
6498 if (exp->X_op == O_secrel)
6499 {
6500 exp->X_op = O_symbol;
6501 r = BFD_RELOC_32_SECREL;
6502 }
6503 #endif
6504
6505 fix_new_exp (frag, off, len, exp, 0, r);
6506 }
6507
6508 #if (!defined (OBJ_ELF) && !defined (OBJ_MAYBE_ELF)) || defined (LEX_AT)
6509 # define lex_got(reloc, adjust, types) NULL
6510 #else
6511 /* Parse operands of the form
6512 <symbol>@GOTOFF+<nnn>
6513 and similar .plt or .got references.
6514
6515 If we find one, set up the correct relocation in RELOC and copy the
6516 input string, minus the `@GOTOFF' into a malloc'd buffer for
6517 parsing by the calling routine. Return this buffer, and if ADJUST
6518 is non-null set it to the length of the string we removed from the
6519 input line. Otherwise return NULL. */
6520 static char *
6521 lex_got (enum bfd_reloc_code_real *rel,
6522 int *adjust,
6523 i386_operand_type *types)
6524 {
6525 /* Some of the relocations depend on the size of what field is to
6526 be relocated. But in our callers i386_immediate and i386_displacement
6527 we don't yet know the operand size (this will be set by insn
6528 matching). Hence we record the word32 relocation here,
6529 and adjust the reloc according to the real size in reloc(). */
6530 static const struct {
6531 const char *str;
6532 int len;
6533 const enum bfd_reloc_code_real rel[2];
6534 const i386_operand_type types64;
6535 } gotrel[] = {
6536 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
6537 BFD_RELOC_X86_64_PLTOFF64 },
6538 OPERAND_TYPE_IMM64 },
6539 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
6540 BFD_RELOC_X86_64_PLT32 },
6541 OPERAND_TYPE_IMM32_32S_DISP32 },
6542 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
6543 BFD_RELOC_X86_64_GOTPLT64 },
6544 OPERAND_TYPE_IMM64_DISP64 },
6545 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
6546 BFD_RELOC_X86_64_GOTOFF64 },
6547 OPERAND_TYPE_IMM64_DISP64 },
6548 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
6549 BFD_RELOC_X86_64_GOTPCREL },
6550 OPERAND_TYPE_IMM32_32S_DISP32 },
6551 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
6552 BFD_RELOC_X86_64_TLSGD },
6553 OPERAND_TYPE_IMM32_32S_DISP32 },
6554 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
6555 _dummy_first_bfd_reloc_code_real },
6556 OPERAND_TYPE_NONE },
6557 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
6558 BFD_RELOC_X86_64_TLSLD },
6559 OPERAND_TYPE_IMM32_32S_DISP32 },
6560 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
6561 BFD_RELOC_X86_64_GOTTPOFF },
6562 OPERAND_TYPE_IMM32_32S_DISP32 },
6563 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
6564 BFD_RELOC_X86_64_TPOFF32 },
6565 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6566 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
6567 _dummy_first_bfd_reloc_code_real },
6568 OPERAND_TYPE_NONE },
6569 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
6570 BFD_RELOC_X86_64_DTPOFF32 },
6571 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6572 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
6573 _dummy_first_bfd_reloc_code_real },
6574 OPERAND_TYPE_NONE },
6575 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
6576 _dummy_first_bfd_reloc_code_real },
6577 OPERAND_TYPE_NONE },
6578 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
6579 BFD_RELOC_X86_64_GOT32 },
6580 OPERAND_TYPE_IMM32_32S_64_DISP32 },
6581 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
6582 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
6583 OPERAND_TYPE_IMM32_32S_DISP32 },
6584 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
6585 BFD_RELOC_X86_64_TLSDESC_CALL },
6586 OPERAND_TYPE_IMM32_32S_DISP32 },
6587 };
6588 char *cp;
6589 unsigned int j;
6590
6591 if (!IS_ELF)
6592 return NULL;
6593
6594 for (cp = input_line_pointer; *cp != '@'; cp++)
6595 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6596 return NULL;
6597
6598 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6599 {
6600 int len = gotrel[j].len;
6601 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6602 {
6603 if (gotrel[j].rel[object_64bit] != 0)
6604 {
6605 int first, second;
6606 char *tmpbuf, *past_reloc;
6607
6608 *rel = gotrel[j].rel[object_64bit];
6609 if (adjust)
6610 *adjust = len;
6611
6612 if (types)
6613 {
6614 if (flag_code != CODE_64BIT)
6615 {
6616 types->bitfield.imm32 = 1;
6617 types->bitfield.disp32 = 1;
6618 }
6619 else
6620 *types = gotrel[j].types64;
6621 }
6622
6623 if (GOT_symbol == NULL)
6624 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
6625
6626 /* The length of the first part of our input line. */
6627 first = cp - input_line_pointer;
6628
6629 /* The second part goes from after the reloc token until
6630 (and including) an end_of_line char or comma. */
6631 past_reloc = cp + 1 + len;
6632 cp = past_reloc;
6633 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6634 ++cp;
6635 second = cp + 1 - past_reloc;
6636
6637 /* Allocate and copy string. The trailing NUL shouldn't
6638 be necessary, but be safe. */
6639 tmpbuf = (char *) xmalloc (first + second + 2);
6640 memcpy (tmpbuf, input_line_pointer, first);
6641 if (second != 0 && *past_reloc != ' ')
6642 /* Replace the relocation token with ' ', so that
6643 errors like foo@GOTOFF1 will be detected. */
6644 tmpbuf[first++] = ' ';
6645 memcpy (tmpbuf + first, past_reloc, second);
6646 tmpbuf[first + second] = '\0';
6647 return tmpbuf;
6648 }
6649
6650 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6651 gotrel[j].str, 1 << (5 + object_64bit));
6652 return NULL;
6653 }
6654 }
6655
6656 /* Might be a symbol version string. Don't as_bad here. */
6657 return NULL;
6658 }
6659 #endif
6660
6661 void
6662 x86_cons (expressionS *exp, int size)
6663 {
6664 intel_syntax = -intel_syntax;
6665
6666 exp->X_md = 0;
6667 if (size == 4 || (object_64bit && size == 8))
6668 {
6669 /* Handle @GOTOFF and the like in an expression. */
6670 char *save;
6671 char *gotfree_input_line;
6672 int adjust = 0;
6673
6674 save = input_line_pointer;
6675 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
6676 if (gotfree_input_line)
6677 input_line_pointer = gotfree_input_line;
6678
6679 expression (exp);
6680
6681 if (gotfree_input_line)
6682 {
6683 /* expression () has merrily parsed up to the end of line,
6684 or a comma - in the wrong buffer. Transfer how far
6685 input_line_pointer has moved to the right buffer. */
6686 input_line_pointer = (save
6687 + (input_line_pointer - gotfree_input_line)
6688 + adjust);
6689 free (gotfree_input_line);
6690 if (exp->X_op == O_constant
6691 || exp->X_op == O_absent
6692 || exp->X_op == O_illegal
6693 || exp->X_op == O_register
6694 || exp->X_op == O_big)
6695 {
6696 char c = *input_line_pointer;
6697 *input_line_pointer = 0;
6698 as_bad (_("missing or invalid expression `%s'"), save);
6699 *input_line_pointer = c;
6700 }
6701 }
6702 }
6703 else
6704 expression (exp);
6705
6706 intel_syntax = -intel_syntax;
6707
6708 if (intel_syntax)
6709 i386_intel_simplify (exp);
6710 }
6711
6712 static void
6713 signed_cons (int size)
6714 {
6715 if (flag_code == CODE_64BIT)
6716 cons_sign = 1;
6717 cons (size);
6718 cons_sign = -1;
6719 }
6720
6721 #ifdef TE_PE
6722 static void
6723 pe_directive_secrel (dummy)
6724 int dummy ATTRIBUTE_UNUSED;
6725 {
6726 expressionS exp;
6727
6728 do
6729 {
6730 expression (&exp);
6731 if (exp.X_op == O_symbol)
6732 exp.X_op = O_secrel;
6733
6734 emit_expr (&exp, 4);
6735 }
6736 while (*input_line_pointer++ == ',');
6737
6738 input_line_pointer--;
6739 demand_empty_rest_of_line ();
6740 }
6741 #endif
6742
6743 static int
6744 i386_immediate (char *imm_start)
6745 {
6746 char *save_input_line_pointer;
6747 char *gotfree_input_line;
6748 segT exp_seg = 0;
6749 expressionS *exp;
6750 i386_operand_type types;
6751
6752 operand_type_set (&types, ~0);
6753
6754 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
6755 {
6756 as_bad (_("at most %d immediate operands are allowed"),
6757 MAX_IMMEDIATE_OPERANDS);
6758 return 0;
6759 }
6760
6761 exp = &im_expressions[i.imm_operands++];
6762 i.op[this_operand].imms = exp;
6763
6764 if (is_space_char (*imm_start))
6765 ++imm_start;
6766
6767 save_input_line_pointer = input_line_pointer;
6768 input_line_pointer = imm_start;
6769
6770 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
6771 if (gotfree_input_line)
6772 input_line_pointer = gotfree_input_line;
6773
6774 exp_seg = expression (exp);
6775
6776 SKIP_WHITESPACE ();
6777 if (*input_line_pointer)
6778 as_bad (_("junk `%s' after expression"), input_line_pointer);
6779
6780 input_line_pointer = save_input_line_pointer;
6781 if (gotfree_input_line)
6782 {
6783 free (gotfree_input_line);
6784
6785 if (exp->X_op == O_constant || exp->X_op == O_register)
6786 exp->X_op = O_illegal;
6787 }
6788
6789 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
6790 }
6791
6792 static int
6793 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
6794 i386_operand_type types, const char *imm_start)
6795 {
6796 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
6797 {
6798 if (imm_start)
6799 as_bad (_("missing or invalid immediate expression `%s'"),
6800 imm_start);
6801 return 0;
6802 }
6803 else if (exp->X_op == O_constant)
6804 {
6805 /* Size it properly later. */
6806 i.types[this_operand].bitfield.imm64 = 1;
6807 /* If not 64bit, sign extend val. */
6808 if (flag_code != CODE_64BIT
6809 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
6810 exp->X_add_number
6811 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
6812 }
6813 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
6814 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
6815 && exp_seg != absolute_section
6816 && exp_seg != text_section
6817 && exp_seg != data_section
6818 && exp_seg != bss_section
6819 && exp_seg != undefined_section
6820 && !bfd_is_com_section (exp_seg))
6821 {
6822 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
6823 return 0;
6824 }
6825 #endif
6826 else if (!intel_syntax && exp->X_op == O_register)
6827 {
6828 if (imm_start)
6829 as_bad (_("illegal immediate register operand %s"), imm_start);
6830 return 0;
6831 }
6832 else
6833 {
6834 /* This is an address. The size of the address will be
6835 determined later, depending on destination register,
6836 suffix, or the default for the section. */
6837 i.types[this_operand].bitfield.imm8 = 1;
6838 i.types[this_operand].bitfield.imm16 = 1;
6839 i.types[this_operand].bitfield.imm32 = 1;
6840 i.types[this_operand].bitfield.imm32s = 1;
6841 i.types[this_operand].bitfield.imm64 = 1;
6842 i.types[this_operand] = operand_type_and (i.types[this_operand],
6843 types);
6844 }
6845
6846 return 1;
6847 }
6848
6849 static char *
6850 i386_scale (char *scale)
6851 {
6852 offsetT val;
6853 char *save = input_line_pointer;
6854
6855 input_line_pointer = scale;
6856 val = get_absolute_expression ();
6857
6858 switch (val)
6859 {
6860 case 1:
6861 i.log2_scale_factor = 0;
6862 break;
6863 case 2:
6864 i.log2_scale_factor = 1;
6865 break;
6866 case 4:
6867 i.log2_scale_factor = 2;
6868 break;
6869 case 8:
6870 i.log2_scale_factor = 3;
6871 break;
6872 default:
6873 {
6874 char sep = *input_line_pointer;
6875
6876 *input_line_pointer = '\0';
6877 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
6878 scale);
6879 *input_line_pointer = sep;
6880 input_line_pointer = save;
6881 return NULL;
6882 }
6883 }
6884 if (i.log2_scale_factor != 0 && i.index_reg == 0)
6885 {
6886 as_warn (_("scale factor of %d without an index register"),
6887 1 << i.log2_scale_factor);
6888 i.log2_scale_factor = 0;
6889 }
6890 scale = input_line_pointer;
6891 input_line_pointer = save;
6892 return scale;
6893 }
6894
6895 static int
6896 i386_displacement (char *disp_start, char *disp_end)
6897 {
6898 expressionS *exp;
6899 segT exp_seg = 0;
6900 char *save_input_line_pointer;
6901 char *gotfree_input_line;
6902 int override;
6903 i386_operand_type bigdisp, types = anydisp;
6904 int ret;
6905
6906 if (i.disp_operands == MAX_MEMORY_OPERANDS)
6907 {
6908 as_bad (_("at most %d displacement operands are allowed"),
6909 MAX_MEMORY_OPERANDS);
6910 return 0;
6911 }
6912
6913 operand_type_set (&bigdisp, 0);
6914 if ((i.types[this_operand].bitfield.jumpabsolute)
6915 || (!current_templates->start->opcode_modifier.jump
6916 && !current_templates->start->opcode_modifier.jumpdword))
6917 {
6918 bigdisp.bitfield.disp32 = 1;
6919 override = (i.prefix[ADDR_PREFIX] != 0);
6920 if (flag_code == CODE_64BIT)
6921 {
6922 if (!override)
6923 {
6924 bigdisp.bitfield.disp32s = 1;
6925 bigdisp.bitfield.disp64 = 1;
6926 }
6927 }
6928 else if ((flag_code == CODE_16BIT) ^ override)
6929 {
6930 bigdisp.bitfield.disp32 = 0;
6931 bigdisp.bitfield.disp16 = 1;
6932 }
6933 }
6934 else
6935 {
6936 /* For PC-relative branches, the width of the displacement
6937 is dependent upon data size, not address size. */
6938 override = (i.prefix[DATA_PREFIX] != 0);
6939 if (flag_code == CODE_64BIT)
6940 {
6941 if (override || i.suffix == WORD_MNEM_SUFFIX)
6942 bigdisp.bitfield.disp16 = 1;
6943 else
6944 {
6945 bigdisp.bitfield.disp32 = 1;
6946 bigdisp.bitfield.disp32s = 1;
6947 }
6948 }
6949 else
6950 {
6951 if (!override)
6952 override = (i.suffix == (flag_code != CODE_16BIT
6953 ? WORD_MNEM_SUFFIX
6954 : LONG_MNEM_SUFFIX));
6955 bigdisp.bitfield.disp32 = 1;
6956 if ((flag_code == CODE_16BIT) ^ override)
6957 {
6958 bigdisp.bitfield.disp32 = 0;
6959 bigdisp.bitfield.disp16 = 1;
6960 }
6961 }
6962 }
6963 i.types[this_operand] = operand_type_or (i.types[this_operand],
6964 bigdisp);
6965
6966 exp = &disp_expressions[i.disp_operands];
6967 i.op[this_operand].disps = exp;
6968 i.disp_operands++;
6969 save_input_line_pointer = input_line_pointer;
6970 input_line_pointer = disp_start;
6971 END_STRING_AND_SAVE (disp_end);
6972
6973 #ifndef GCC_ASM_O_HACK
6974 #define GCC_ASM_O_HACK 0
6975 #endif
6976 #if GCC_ASM_O_HACK
6977 END_STRING_AND_SAVE (disp_end + 1);
6978 if (i.types[this_operand].bitfield.baseIndex
6979 && displacement_string_end[-1] == '+')
6980 {
6981 /* This hack is to avoid a warning when using the "o"
6982 constraint within gcc asm statements.
6983 For instance:
6984
6985 #define _set_tssldt_desc(n,addr,limit,type) \
6986 __asm__ __volatile__ ( \
6987 "movw %w2,%0\n\t" \
6988 "movw %w1,2+%0\n\t" \
6989 "rorl $16,%1\n\t" \
6990 "movb %b1,4+%0\n\t" \
6991 "movb %4,5+%0\n\t" \
6992 "movb $0,6+%0\n\t" \
6993 "movb %h1,7+%0\n\t" \
6994 "rorl $16,%1" \
6995 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
6996
6997 This works great except that the output assembler ends
6998 up looking a bit weird if it turns out that there is
6999 no offset. You end up producing code that looks like:
7000
7001 #APP
7002 movw $235,(%eax)
7003 movw %dx,2+(%eax)
7004 rorl $16,%edx
7005 movb %dl,4+(%eax)
7006 movb $137,5+(%eax)
7007 movb $0,6+(%eax)
7008 movb %dh,7+(%eax)
7009 rorl $16,%edx
7010 #NO_APP
7011
7012 So here we provide the missing zero. */
7013
7014 *displacement_string_end = '0';
7015 }
7016 #endif
7017 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
7018 if (gotfree_input_line)
7019 input_line_pointer = gotfree_input_line;
7020
7021 exp_seg = expression (exp);
7022
7023 SKIP_WHITESPACE ();
7024 if (*input_line_pointer)
7025 as_bad (_("junk `%s' after expression"), input_line_pointer);
7026 #if GCC_ASM_O_HACK
7027 RESTORE_END_STRING (disp_end + 1);
7028 #endif
7029 input_line_pointer = save_input_line_pointer;
7030 if (gotfree_input_line)
7031 {
7032 free (gotfree_input_line);
7033
7034 if (exp->X_op == O_constant || exp->X_op == O_register)
7035 exp->X_op = O_illegal;
7036 }
7037
7038 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
7039
7040 RESTORE_END_STRING (disp_end);
7041
7042 return ret;
7043 }
7044
7045 static int
7046 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
7047 i386_operand_type types, const char *disp_start)
7048 {
7049 i386_operand_type bigdisp;
7050 int ret = 1;
7051
7052 /* We do this to make sure that the section symbol is in
7053 the symbol table. We will ultimately change the relocation
7054 to be relative to the beginning of the section. */
7055 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
7056 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
7057 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7058 {
7059 if (exp->X_op != O_symbol)
7060 goto inv_disp;
7061
7062 if (S_IS_LOCAL (exp->X_add_symbol)
7063 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
7064 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
7065 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
7066 exp->X_op = O_subtract;
7067 exp->X_op_symbol = GOT_symbol;
7068 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
7069 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
7070 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7071 i.reloc[this_operand] = BFD_RELOC_64;
7072 else
7073 i.reloc[this_operand] = BFD_RELOC_32;
7074 }
7075
7076 else if (exp->X_op == O_absent
7077 || exp->X_op == O_illegal
7078 || exp->X_op == O_big)
7079 {
7080 inv_disp:
7081 as_bad (_("missing or invalid displacement expression `%s'"),
7082 disp_start);
7083 ret = 0;
7084 }
7085
7086 else if (flag_code == CODE_64BIT
7087 && !i.prefix[ADDR_PREFIX]
7088 && exp->X_op == O_constant)
7089 {
7090 /* Since displacement is signed extended to 64bit, don't allow
7091 disp32 and turn off disp32s if they are out of range. */
7092 i.types[this_operand].bitfield.disp32 = 0;
7093 if (!fits_in_signed_long (exp->X_add_number))
7094 {
7095 i.types[this_operand].bitfield.disp32s = 0;
7096 if (i.types[this_operand].bitfield.baseindex)
7097 {
7098 as_bad (_("0x%lx out range of signed 32bit displacement"),
7099 (long) exp->X_add_number);
7100 ret = 0;
7101 }
7102 }
7103 }
7104
7105 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7106 else if (exp->X_op != O_constant
7107 && OUTPUT_FLAVOR == bfd_target_aout_flavour
7108 && exp_seg != absolute_section
7109 && exp_seg != text_section
7110 && exp_seg != data_section
7111 && exp_seg != bss_section
7112 && exp_seg != undefined_section
7113 && !bfd_is_com_section (exp_seg))
7114 {
7115 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7116 ret = 0;
7117 }
7118 #endif
7119
7120 /* Check if this is a displacement only operand. */
7121 bigdisp = i.types[this_operand];
7122 bigdisp.bitfield.disp8 = 0;
7123 bigdisp.bitfield.disp16 = 0;
7124 bigdisp.bitfield.disp32 = 0;
7125 bigdisp.bitfield.disp32s = 0;
7126 bigdisp.bitfield.disp64 = 0;
7127 if (operand_type_all_zero (&bigdisp))
7128 i.types[this_operand] = operand_type_and (i.types[this_operand],
7129 types);
7130
7131 return ret;
7132 }
7133
7134 /* Make sure the memory operand we've been dealt is valid.
7135 Return 1 on success, 0 on a failure. */
7136
7137 static int
7138 i386_index_check (const char *operand_string)
7139 {
7140 int ok;
7141 const char *kind = "base/index";
7142 #if INFER_ADDR_PREFIX
7143 int fudged = 0;
7144
7145 tryprefix:
7146 #endif
7147 ok = 1;
7148 if (current_templates->start->opcode_modifier.isstring
7149 && !current_templates->start->opcode_modifier.immext
7150 && (current_templates->end[-1].opcode_modifier.isstring
7151 || i.mem_operands))
7152 {
7153 /* Memory operands of string insns are special in that they only allow
7154 a single register (rDI, rSI, or rBX) as their memory address. */
7155 unsigned int expected;
7156
7157 kind = "string address";
7158
7159 if (current_templates->start->opcode_modifier.w)
7160 {
7161 i386_operand_type type = current_templates->end[-1].operand_types[0];
7162
7163 if (!type.bitfield.baseindex
7164 || ((!i.mem_operands != !intel_syntax)
7165 && current_templates->end[-1].operand_types[1]
7166 .bitfield.baseindex))
7167 type = current_templates->end[-1].operand_types[1];
7168 expected = type.bitfield.esseg ? 7 /* rDI */ : 6 /* rSI */;
7169 }
7170 else
7171 expected = 3 /* rBX */;
7172
7173 if (!i.base_reg || i.index_reg
7174 || operand_type_check (i.types[this_operand], disp))
7175 ok = -1;
7176 else if (!(flag_code == CODE_64BIT
7177 ? i.prefix[ADDR_PREFIX]
7178 ? i.base_reg->reg_type.bitfield.reg32
7179 : i.base_reg->reg_type.bitfield.reg64
7180 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7181 ? i.base_reg->reg_type.bitfield.reg32
7182 : i.base_reg->reg_type.bitfield.reg16))
7183 ok = 0;
7184 else if (i.base_reg->reg_num != expected)
7185 ok = -1;
7186
7187 if (ok < 0)
7188 {
7189 unsigned int j;
7190
7191 for (j = 0; j < i386_regtab_size; ++j)
7192 if ((flag_code == CODE_64BIT
7193 ? i.prefix[ADDR_PREFIX]
7194 ? i386_regtab[j].reg_type.bitfield.reg32
7195 : i386_regtab[j].reg_type.bitfield.reg64
7196 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7197 ? i386_regtab[j].reg_type.bitfield.reg32
7198 : i386_regtab[j].reg_type.bitfield.reg16)
7199 && i386_regtab[j].reg_num == expected)
7200 break;
7201 gas_assert (j < i386_regtab_size);
7202 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
7203 operand_string,
7204 intel_syntax ? '[' : '(',
7205 register_prefix,
7206 i386_regtab[j].reg_name,
7207 intel_syntax ? ']' : ')');
7208 ok = 1;
7209 }
7210 }
7211 else if (flag_code == CODE_64BIT)
7212 {
7213 if ((i.base_reg
7214 && ((i.prefix[ADDR_PREFIX] == 0
7215 && !i.base_reg->reg_type.bitfield.reg64)
7216 || (i.prefix[ADDR_PREFIX]
7217 && !i.base_reg->reg_type.bitfield.reg32))
7218 && (i.index_reg
7219 || i.base_reg->reg_num !=
7220 (i.prefix[ADDR_PREFIX] == 0 ? RegRip : RegEip)))
7221 || (i.index_reg
7222 && !(i.index_reg->reg_type.bitfield.regxmm
7223 || i.index_reg->reg_type.bitfield.regymm)
7224 && (!i.index_reg->reg_type.bitfield.baseindex
7225 || (i.prefix[ADDR_PREFIX] == 0
7226 && i.index_reg->reg_num != RegRiz
7227 && !i.index_reg->reg_type.bitfield.reg64
7228 )
7229 || (i.prefix[ADDR_PREFIX]
7230 && i.index_reg->reg_num != RegEiz
7231 && !i.index_reg->reg_type.bitfield.reg32))))
7232 ok = 0;
7233 }
7234 else
7235 {
7236 if ((flag_code == CODE_16BIT) ^ (i.prefix[ADDR_PREFIX] != 0))
7237 {
7238 /* 16bit checks. */
7239 if ((i.base_reg
7240 && (!i.base_reg->reg_type.bitfield.reg16
7241 || !i.base_reg->reg_type.bitfield.baseindex))
7242 || (i.index_reg
7243 && (!i.index_reg->reg_type.bitfield.reg16
7244 || !i.index_reg->reg_type.bitfield.baseindex
7245 || !(i.base_reg
7246 && i.base_reg->reg_num < 6
7247 && i.index_reg->reg_num >= 6
7248 && i.log2_scale_factor == 0))))
7249 ok = 0;
7250 }
7251 else
7252 {
7253 /* 32bit checks. */
7254 if ((i.base_reg
7255 && !i.base_reg->reg_type.bitfield.reg32)
7256 || (i.index_reg
7257 && !i.index_reg->reg_type.bitfield.regxmm
7258 && !i.index_reg->reg_type.bitfield.regymm
7259 && ((!i.index_reg->reg_type.bitfield.reg32
7260 && i.index_reg->reg_num != RegEiz)
7261 || !i.index_reg->reg_type.bitfield.baseindex)))
7262 ok = 0;
7263 }
7264 }
7265 if (!ok)
7266 {
7267 #if INFER_ADDR_PREFIX
7268 if (!i.mem_operands && !i.prefix[ADDR_PREFIX])
7269 {
7270 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
7271 i.prefixes += 1;
7272 /* Change the size of any displacement too. At most one of
7273 Disp16 or Disp32 is set.
7274 FIXME. There doesn't seem to be any real need for separate
7275 Disp16 and Disp32 flags. The same goes for Imm16 and Imm32.
7276 Removing them would probably clean up the code quite a lot. */
7277 if (flag_code != CODE_64BIT
7278 && (i.types[this_operand].bitfield.disp16
7279 || i.types[this_operand].bitfield.disp32))
7280 i.types[this_operand]
7281 = operand_type_xor (i.types[this_operand], disp16_32);
7282 fudged = 1;
7283 goto tryprefix;
7284 }
7285 if (fudged)
7286 as_bad (_("`%s' is not a valid %s expression"),
7287 operand_string,
7288 kind);
7289 else
7290 #endif
7291 as_bad (_("`%s' is not a valid %s-bit %s expression"),
7292 operand_string,
7293 flag_code_names[i.prefix[ADDR_PREFIX]
7294 ? flag_code == CODE_32BIT
7295 ? CODE_16BIT
7296 : CODE_32BIT
7297 : flag_code],
7298 kind);
7299 }
7300 return ok;
7301 }
7302
7303 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
7304 on error. */
7305
7306 static int
7307 i386_att_operand (char *operand_string)
7308 {
7309 const reg_entry *r;
7310 char *end_op;
7311 char *op_string = operand_string;
7312
7313 if (is_space_char (*op_string))
7314 ++op_string;
7315
7316 /* We check for an absolute prefix (differentiating,
7317 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
7318 if (*op_string == ABSOLUTE_PREFIX)
7319 {
7320 ++op_string;
7321 if (is_space_char (*op_string))
7322 ++op_string;
7323 i.types[this_operand].bitfield.jumpabsolute = 1;
7324 }
7325
7326 /* Check if operand is a register. */
7327 if ((r = parse_register (op_string, &end_op)) != NULL)
7328 {
7329 i386_operand_type temp;
7330
7331 /* Check for a segment override by searching for ':' after a
7332 segment register. */
7333 op_string = end_op;
7334 if (is_space_char (*op_string))
7335 ++op_string;
7336 if (*op_string == ':'
7337 && (r->reg_type.bitfield.sreg2
7338 || r->reg_type.bitfield.sreg3))
7339 {
7340 switch (r->reg_num)
7341 {
7342 case 0:
7343 i.seg[i.mem_operands] = &es;
7344 break;
7345 case 1:
7346 i.seg[i.mem_operands] = &cs;
7347 break;
7348 case 2:
7349 i.seg[i.mem_operands] = &ss;
7350 break;
7351 case 3:
7352 i.seg[i.mem_operands] = &ds;
7353 break;
7354 case 4:
7355 i.seg[i.mem_operands] = &fs;
7356 break;
7357 case 5:
7358 i.seg[i.mem_operands] = &gs;
7359 break;
7360 }
7361
7362 /* Skip the ':' and whitespace. */
7363 ++op_string;
7364 if (is_space_char (*op_string))
7365 ++op_string;
7366
7367 if (!is_digit_char (*op_string)
7368 && !is_identifier_char (*op_string)
7369 && *op_string != '('
7370 && *op_string != ABSOLUTE_PREFIX)
7371 {
7372 as_bad (_("bad memory operand `%s'"), op_string);
7373 return 0;
7374 }
7375 /* Handle case of %es:*foo. */
7376 if (*op_string == ABSOLUTE_PREFIX)
7377 {
7378 ++op_string;
7379 if (is_space_char (*op_string))
7380 ++op_string;
7381 i.types[this_operand].bitfield.jumpabsolute = 1;
7382 }
7383 goto do_memory_reference;
7384 }
7385 if (*op_string)
7386 {
7387 as_bad (_("junk `%s' after register"), op_string);
7388 return 0;
7389 }
7390 temp = r->reg_type;
7391 temp.bitfield.baseindex = 0;
7392 i.types[this_operand] = operand_type_or (i.types[this_operand],
7393 temp);
7394 i.types[this_operand].bitfield.unspecified = 0;
7395 i.op[this_operand].regs = r;
7396 i.reg_operands++;
7397 }
7398 else if (*op_string == REGISTER_PREFIX)
7399 {
7400 as_bad (_("bad register name `%s'"), op_string);
7401 return 0;
7402 }
7403 else if (*op_string == IMMEDIATE_PREFIX)
7404 {
7405 ++op_string;
7406 if (i.types[this_operand].bitfield.jumpabsolute)
7407 {
7408 as_bad (_("immediate operand illegal with absolute jump"));
7409 return 0;
7410 }
7411 if (!i386_immediate (op_string))
7412 return 0;
7413 }
7414 else if (is_digit_char (*op_string)
7415 || is_identifier_char (*op_string)
7416 || *op_string == '(')
7417 {
7418 /* This is a memory reference of some sort. */
7419 char *base_string;
7420
7421 /* Start and end of displacement string expression (if found). */
7422 char *displacement_string_start;
7423 char *displacement_string_end;
7424
7425 do_memory_reference:
7426 if ((i.mem_operands == 1
7427 && !current_templates->start->opcode_modifier.isstring)
7428 || i.mem_operands == 2)
7429 {
7430 as_bad (_("too many memory references for `%s'"),
7431 current_templates->start->name);
7432 return 0;
7433 }
7434
7435 /* Check for base index form. We detect the base index form by
7436 looking for an ')' at the end of the operand, searching
7437 for the '(' matching it, and finding a REGISTER_PREFIX or ','
7438 after the '('. */
7439 base_string = op_string + strlen (op_string);
7440
7441 --base_string;
7442 if (is_space_char (*base_string))
7443 --base_string;
7444
7445 /* If we only have a displacement, set-up for it to be parsed later. */
7446 displacement_string_start = op_string;
7447 displacement_string_end = base_string + 1;
7448
7449 if (*base_string == ')')
7450 {
7451 char *temp_string;
7452 unsigned int parens_balanced = 1;
7453 /* We've already checked that the number of left & right ()'s are
7454 equal, so this loop will not be infinite. */
7455 do
7456 {
7457 base_string--;
7458 if (*base_string == ')')
7459 parens_balanced++;
7460 if (*base_string == '(')
7461 parens_balanced--;
7462 }
7463 while (parens_balanced);
7464
7465 temp_string = base_string;
7466
7467 /* Skip past '(' and whitespace. */
7468 ++base_string;
7469 if (is_space_char (*base_string))
7470 ++base_string;
7471
7472 if (*base_string == ','
7473 || ((i.base_reg = parse_register (base_string, &end_op))
7474 != NULL))
7475 {
7476 displacement_string_end = temp_string;
7477
7478 i.types[this_operand].bitfield.baseindex = 1;
7479
7480 if (i.base_reg)
7481 {
7482 base_string = end_op;
7483 if (is_space_char (*base_string))
7484 ++base_string;
7485 }
7486
7487 /* There may be an index reg or scale factor here. */
7488 if (*base_string == ',')
7489 {
7490 ++base_string;
7491 if (is_space_char (*base_string))
7492 ++base_string;
7493
7494 if ((i.index_reg = parse_register (base_string, &end_op))
7495 != NULL)
7496 {
7497 base_string = end_op;
7498 if (is_space_char (*base_string))
7499 ++base_string;
7500 if (*base_string == ',')
7501 {
7502 ++base_string;
7503 if (is_space_char (*base_string))
7504 ++base_string;
7505 }
7506 else if (*base_string != ')')
7507 {
7508 as_bad (_("expecting `,' or `)' "
7509 "after index register in `%s'"),
7510 operand_string);
7511 return 0;
7512 }
7513 }
7514 else if (*base_string == REGISTER_PREFIX)
7515 {
7516 as_bad (_("bad register name `%s'"), base_string);
7517 return 0;
7518 }
7519
7520 /* Check for scale factor. */
7521 if (*base_string != ')')
7522 {
7523 char *end_scale = i386_scale (base_string);
7524
7525 if (!end_scale)
7526 return 0;
7527
7528 base_string = end_scale;
7529 if (is_space_char (*base_string))
7530 ++base_string;
7531 if (*base_string != ')')
7532 {
7533 as_bad (_("expecting `)' "
7534 "after scale factor in `%s'"),
7535 operand_string);
7536 return 0;
7537 }
7538 }
7539 else if (!i.index_reg)
7540 {
7541 as_bad (_("expecting index register or scale factor "
7542 "after `,'; got '%c'"),
7543 *base_string);
7544 return 0;
7545 }
7546 }
7547 else if (*base_string != ')')
7548 {
7549 as_bad (_("expecting `,' or `)' "
7550 "after base register in `%s'"),
7551 operand_string);
7552 return 0;
7553 }
7554 }
7555 else if (*base_string == REGISTER_PREFIX)
7556 {
7557 as_bad (_("bad register name `%s'"), base_string);
7558 return 0;
7559 }
7560 }
7561
7562 /* If there's an expression beginning the operand, parse it,
7563 assuming displacement_string_start and
7564 displacement_string_end are meaningful. */
7565 if (displacement_string_start != displacement_string_end)
7566 {
7567 if (!i386_displacement (displacement_string_start,
7568 displacement_string_end))
7569 return 0;
7570 }
7571
7572 /* Special case for (%dx) while doing input/output op. */
7573 if (i.base_reg
7574 && operand_type_equal (&i.base_reg->reg_type,
7575 &reg16_inoutportreg)
7576 && i.index_reg == 0
7577 && i.log2_scale_factor == 0
7578 && i.seg[i.mem_operands] == 0
7579 && !operand_type_check (i.types[this_operand], disp))
7580 {
7581 i.types[this_operand] = inoutportreg;
7582 return 1;
7583 }
7584
7585 if (i386_index_check (operand_string) == 0)
7586 return 0;
7587 i.types[this_operand].bitfield.mem = 1;
7588 i.mem_operands++;
7589 }
7590 else
7591 {
7592 /* It's not a memory operand; argh! */
7593 as_bad (_("invalid char %s beginning operand %d `%s'"),
7594 output_invalid (*op_string),
7595 this_operand + 1,
7596 op_string);
7597 return 0;
7598 }
7599 return 1; /* Normal return. */
7600 }
7601 \f
7602 /* md_estimate_size_before_relax()
7603
7604 Called just before relax() for rs_machine_dependent frags. The x86
7605 assembler uses these frags to handle variable size jump
7606 instructions.
7607
7608 Any symbol that is now undefined will not become defined.
7609 Return the correct fr_subtype in the frag.
7610 Return the initial "guess for variable size of frag" to caller.
7611 The guess is actually the growth beyond the fixed part. Whatever
7612 we do to grow the fixed or variable part contributes to our
7613 returned value. */
7614
7615 int
7616 md_estimate_size_before_relax (fragP, segment)
7617 fragS *fragP;
7618 segT segment;
7619 {
7620 /* We've already got fragP->fr_subtype right; all we have to do is
7621 check for un-relaxable symbols. On an ELF system, we can't relax
7622 an externally visible symbol, because it may be overridden by a
7623 shared library. */
7624 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
7625 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7626 || (IS_ELF
7627 && (S_IS_EXTERNAL (fragP->fr_symbol)
7628 || S_IS_WEAK (fragP->fr_symbol)
7629 || ((symbol_get_bfdsym (fragP->fr_symbol)->flags
7630 & BSF_GNU_INDIRECT_FUNCTION))))
7631 #endif
7632 #if defined (OBJ_COFF) && defined (TE_PE)
7633 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
7634 && S_IS_WEAK (fragP->fr_symbol))
7635 #endif
7636 )
7637 {
7638 /* Symbol is undefined in this segment, or we need to keep a
7639 reloc so that weak symbols can be overridden. */
7640 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
7641 enum bfd_reloc_code_real reloc_type;
7642 unsigned char *opcode;
7643 int old_fr_fix;
7644
7645 if (fragP->fr_var != NO_RELOC)
7646 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
7647 else if (size == 2)
7648 reloc_type = BFD_RELOC_16_PCREL;
7649 else
7650 reloc_type = BFD_RELOC_32_PCREL;
7651
7652 old_fr_fix = fragP->fr_fix;
7653 opcode = (unsigned char *) fragP->fr_opcode;
7654
7655 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
7656 {
7657 case UNCOND_JUMP:
7658 /* Make jmp (0xeb) a (d)word displacement jump. */
7659 opcode[0] = 0xe9;
7660 fragP->fr_fix += size;
7661 fix_new (fragP, old_fr_fix, size,
7662 fragP->fr_symbol,
7663 fragP->fr_offset, 1,
7664 reloc_type);
7665 break;
7666
7667 case COND_JUMP86:
7668 if (size == 2
7669 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
7670 {
7671 /* Negate the condition, and branch past an
7672 unconditional jump. */
7673 opcode[0] ^= 1;
7674 opcode[1] = 3;
7675 /* Insert an unconditional jump. */
7676 opcode[2] = 0xe9;
7677 /* We added two extra opcode bytes, and have a two byte
7678 offset. */
7679 fragP->fr_fix += 2 + 2;
7680 fix_new (fragP, old_fr_fix + 2, 2,
7681 fragP->fr_symbol,
7682 fragP->fr_offset, 1,
7683 reloc_type);
7684 break;
7685 }
7686 /* Fall through. */
7687
7688 case COND_JUMP:
7689 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
7690 {
7691 fixS *fixP;
7692
7693 fragP->fr_fix += 1;
7694 fixP = fix_new (fragP, old_fr_fix, 1,
7695 fragP->fr_symbol,
7696 fragP->fr_offset, 1,
7697 BFD_RELOC_8_PCREL);
7698 fixP->fx_signed = 1;
7699 break;
7700 }
7701
7702 /* This changes the byte-displacement jump 0x7N
7703 to the (d)word-displacement jump 0x0f,0x8N. */
7704 opcode[1] = opcode[0] + 0x10;
7705 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7706 /* We've added an opcode byte. */
7707 fragP->fr_fix += 1 + size;
7708 fix_new (fragP, old_fr_fix + 1, size,
7709 fragP->fr_symbol,
7710 fragP->fr_offset, 1,
7711 reloc_type);
7712 break;
7713
7714 default:
7715 BAD_CASE (fragP->fr_subtype);
7716 break;
7717 }
7718 frag_wane (fragP);
7719 return fragP->fr_fix - old_fr_fix;
7720 }
7721
7722 /* Guess size depending on current relax state. Initially the relax
7723 state will correspond to a short jump and we return 1, because
7724 the variable part of the frag (the branch offset) is one byte
7725 long. However, we can relax a section more than once and in that
7726 case we must either set fr_subtype back to the unrelaxed state,
7727 or return the value for the appropriate branch. */
7728 return md_relax_table[fragP->fr_subtype].rlx_length;
7729 }
7730
7731 /* Called after relax() is finished.
7732
7733 In: Address of frag.
7734 fr_type == rs_machine_dependent.
7735 fr_subtype is what the address relaxed to.
7736
7737 Out: Any fixSs and constants are set up.
7738 Caller will turn frag into a ".space 0". */
7739
7740 void
7741 md_convert_frag (abfd, sec, fragP)
7742 bfd *abfd ATTRIBUTE_UNUSED;
7743 segT sec ATTRIBUTE_UNUSED;
7744 fragS *fragP;
7745 {
7746 unsigned char *opcode;
7747 unsigned char *where_to_put_displacement = NULL;
7748 offsetT target_address;
7749 offsetT opcode_address;
7750 unsigned int extension = 0;
7751 offsetT displacement_from_opcode_start;
7752
7753 opcode = (unsigned char *) fragP->fr_opcode;
7754
7755 /* Address we want to reach in file space. */
7756 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
7757
7758 /* Address opcode resides at in file space. */
7759 opcode_address = fragP->fr_address + fragP->fr_fix;
7760
7761 /* Displacement from opcode start to fill into instruction. */
7762 displacement_from_opcode_start = target_address - opcode_address;
7763
7764 if ((fragP->fr_subtype & BIG) == 0)
7765 {
7766 /* Don't have to change opcode. */
7767 extension = 1; /* 1 opcode + 1 displacement */
7768 where_to_put_displacement = &opcode[1];
7769 }
7770 else
7771 {
7772 if (no_cond_jump_promotion
7773 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
7774 as_warn_where (fragP->fr_file, fragP->fr_line,
7775 _("long jump required"));
7776
7777 switch (fragP->fr_subtype)
7778 {
7779 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
7780 extension = 4; /* 1 opcode + 4 displacement */
7781 opcode[0] = 0xe9;
7782 where_to_put_displacement = &opcode[1];
7783 break;
7784
7785 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
7786 extension = 2; /* 1 opcode + 2 displacement */
7787 opcode[0] = 0xe9;
7788 where_to_put_displacement = &opcode[1];
7789 break;
7790
7791 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
7792 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
7793 extension = 5; /* 2 opcode + 4 displacement */
7794 opcode[1] = opcode[0] + 0x10;
7795 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7796 where_to_put_displacement = &opcode[2];
7797 break;
7798
7799 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
7800 extension = 3; /* 2 opcode + 2 displacement */
7801 opcode[1] = opcode[0] + 0x10;
7802 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7803 where_to_put_displacement = &opcode[2];
7804 break;
7805
7806 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
7807 extension = 4;
7808 opcode[0] ^= 1;
7809 opcode[1] = 3;
7810 opcode[2] = 0xe9;
7811 where_to_put_displacement = &opcode[3];
7812 break;
7813
7814 default:
7815 BAD_CASE (fragP->fr_subtype);
7816 break;
7817 }
7818 }
7819
7820 /* If size if less then four we are sure that the operand fits,
7821 but if it's 4, then it could be that the displacement is larger
7822 then -/+ 2GB. */
7823 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
7824 && object_64bit
7825 && ((addressT) (displacement_from_opcode_start - extension
7826 + ((addressT) 1 << 31))
7827 > (((addressT) 2 << 31) - 1)))
7828 {
7829 as_bad_where (fragP->fr_file, fragP->fr_line,
7830 _("jump target out of range"));
7831 /* Make us emit 0. */
7832 displacement_from_opcode_start = extension;
7833 }
7834 /* Now put displacement after opcode. */
7835 md_number_to_chars ((char *) where_to_put_displacement,
7836 (valueT) (displacement_from_opcode_start - extension),
7837 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
7838 fragP->fr_fix += extension;
7839 }
7840 \f
7841 /* Apply a fixup (fixS) to segment data, once it has been determined
7842 by our caller that we have all the info we need to fix it up.
7843
7844 On the 386, immediates, displacements, and data pointers are all in
7845 the same (little-endian) format, so we don't need to care about which
7846 we are handling. */
7847
7848 void
7849 md_apply_fix (fixP, valP, seg)
7850 /* The fix we're to put in. */
7851 fixS *fixP;
7852 /* Pointer to the value of the bits. */
7853 valueT *valP;
7854 /* Segment fix is from. */
7855 segT seg ATTRIBUTE_UNUSED;
7856 {
7857 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
7858 valueT value = *valP;
7859
7860 #if !defined (TE_Mach)
7861 if (fixP->fx_pcrel)
7862 {
7863 switch (fixP->fx_r_type)
7864 {
7865 default:
7866 break;
7867
7868 case BFD_RELOC_64:
7869 fixP->fx_r_type = BFD_RELOC_64_PCREL;
7870 break;
7871 case BFD_RELOC_32:
7872 case BFD_RELOC_X86_64_32S:
7873 fixP->fx_r_type = BFD_RELOC_32_PCREL;
7874 break;
7875 case BFD_RELOC_16:
7876 fixP->fx_r_type = BFD_RELOC_16_PCREL;
7877 break;
7878 case BFD_RELOC_8:
7879 fixP->fx_r_type = BFD_RELOC_8_PCREL;
7880 break;
7881 }
7882 }
7883
7884 if (fixP->fx_addsy != NULL
7885 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
7886 || fixP->fx_r_type == BFD_RELOC_64_PCREL
7887 || fixP->fx_r_type == BFD_RELOC_16_PCREL
7888 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
7889 && !use_rela_relocations)
7890 {
7891 /* This is a hack. There should be a better way to handle this.
7892 This covers for the fact that bfd_install_relocation will
7893 subtract the current location (for partial_inplace, PC relative
7894 relocations); see more below. */
7895 #ifndef OBJ_AOUT
7896 if (IS_ELF
7897 #ifdef TE_PE
7898 || OUTPUT_FLAVOR == bfd_target_coff_flavour
7899 #endif
7900 )
7901 value += fixP->fx_where + fixP->fx_frag->fr_address;
7902 #endif
7903 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7904 if (IS_ELF)
7905 {
7906 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
7907
7908 if ((sym_seg == seg
7909 || (symbol_section_p (fixP->fx_addsy)
7910 && sym_seg != absolute_section))
7911 && !generic_force_reloc (fixP))
7912 {
7913 /* Yes, we add the values in twice. This is because
7914 bfd_install_relocation subtracts them out again. I think
7915 bfd_install_relocation is broken, but I don't dare change
7916 it. FIXME. */
7917 value += fixP->fx_where + fixP->fx_frag->fr_address;
7918 }
7919 }
7920 #endif
7921 #if defined (OBJ_COFF) && defined (TE_PE)
7922 /* For some reason, the PE format does not store a
7923 section address offset for a PC relative symbol. */
7924 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
7925 || S_IS_WEAK (fixP->fx_addsy))
7926 value += md_pcrel_from (fixP);
7927 #endif
7928 }
7929 #if defined (OBJ_COFF) && defined (TE_PE)
7930 if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
7931 {
7932 value -= S_GET_VALUE (fixP->fx_addsy);
7933 }
7934 #endif
7935
7936 /* Fix a few things - the dynamic linker expects certain values here,
7937 and we must not disappoint it. */
7938 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7939 if (IS_ELF && fixP->fx_addsy)
7940 switch (fixP->fx_r_type)
7941 {
7942 case BFD_RELOC_386_PLT32:
7943 case BFD_RELOC_X86_64_PLT32:
7944 /* Make the jump instruction point to the address of the operand. At
7945 runtime we merely add the offset to the actual PLT entry. */
7946 value = -4;
7947 break;
7948
7949 case BFD_RELOC_386_TLS_GD:
7950 case BFD_RELOC_386_TLS_LDM:
7951 case BFD_RELOC_386_TLS_IE_32:
7952 case BFD_RELOC_386_TLS_IE:
7953 case BFD_RELOC_386_TLS_GOTIE:
7954 case BFD_RELOC_386_TLS_GOTDESC:
7955 case BFD_RELOC_X86_64_TLSGD:
7956 case BFD_RELOC_X86_64_TLSLD:
7957 case BFD_RELOC_X86_64_GOTTPOFF:
7958 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
7959 value = 0; /* Fully resolved at runtime. No addend. */
7960 /* Fallthrough */
7961 case BFD_RELOC_386_TLS_LE:
7962 case BFD_RELOC_386_TLS_LDO_32:
7963 case BFD_RELOC_386_TLS_LE_32:
7964 case BFD_RELOC_X86_64_DTPOFF32:
7965 case BFD_RELOC_X86_64_DTPOFF64:
7966 case BFD_RELOC_X86_64_TPOFF32:
7967 case BFD_RELOC_X86_64_TPOFF64:
7968 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7969 break;
7970
7971 case BFD_RELOC_386_TLS_DESC_CALL:
7972 case BFD_RELOC_X86_64_TLSDESC_CALL:
7973 value = 0; /* Fully resolved at runtime. No addend. */
7974 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7975 fixP->fx_done = 0;
7976 return;
7977
7978 case BFD_RELOC_386_GOT32:
7979 case BFD_RELOC_X86_64_GOT32:
7980 value = 0; /* Fully resolved at runtime. No addend. */
7981 break;
7982
7983 case BFD_RELOC_VTABLE_INHERIT:
7984 case BFD_RELOC_VTABLE_ENTRY:
7985 fixP->fx_done = 0;
7986 return;
7987
7988 default:
7989 break;
7990 }
7991 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
7992 *valP = value;
7993 #endif /* !defined (TE_Mach) */
7994
7995 /* Are we finished with this relocation now? */
7996 if (fixP->fx_addsy == NULL)
7997 fixP->fx_done = 1;
7998 #if defined (OBJ_COFF) && defined (TE_PE)
7999 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
8000 {
8001 fixP->fx_done = 0;
8002 /* Remember value for tc_gen_reloc. */
8003 fixP->fx_addnumber = value;
8004 /* Clear out the frag for now. */
8005 value = 0;
8006 }
8007 #endif
8008 else if (use_rela_relocations)
8009 {
8010 fixP->fx_no_overflow = 1;
8011 /* Remember value for tc_gen_reloc. */
8012 fixP->fx_addnumber = value;
8013 value = 0;
8014 }
8015
8016 md_number_to_chars (p, value, fixP->fx_size);
8017 }
8018 \f
8019 char *
8020 md_atof (int type, char *litP, int *sizeP)
8021 {
8022 /* This outputs the LITTLENUMs in REVERSE order;
8023 in accord with the bigendian 386. */
8024 return ieee_md_atof (type, litP, sizeP, FALSE);
8025 }
8026 \f
8027 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
8028
8029 static char *
8030 output_invalid (int c)
8031 {
8032 if (ISPRINT (c))
8033 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8034 "'%c'", c);
8035 else
8036 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8037 "(0x%x)", (unsigned char) c);
8038 return output_invalid_buf;
8039 }
8040
8041 /* REG_STRING starts *before* REGISTER_PREFIX. */
8042
8043 static const reg_entry *
8044 parse_real_register (char *reg_string, char **end_op)
8045 {
8046 char *s = reg_string;
8047 char *p;
8048 char reg_name_given[MAX_REG_NAME_SIZE + 1];
8049 const reg_entry *r;
8050
8051 /* Skip possible REGISTER_PREFIX and possible whitespace. */
8052 if (*s == REGISTER_PREFIX)
8053 ++s;
8054
8055 if (is_space_char (*s))
8056 ++s;
8057
8058 p = reg_name_given;
8059 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
8060 {
8061 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
8062 return (const reg_entry *) NULL;
8063 s++;
8064 }
8065
8066 /* For naked regs, make sure that we are not dealing with an identifier.
8067 This prevents confusing an identifier like `eax_var' with register
8068 `eax'. */
8069 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
8070 return (const reg_entry *) NULL;
8071
8072 *end_op = s;
8073
8074 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
8075
8076 /* Handle floating point regs, allowing spaces in the (i) part. */
8077 if (r == i386_regtab /* %st is first entry of table */)
8078 {
8079 if (is_space_char (*s))
8080 ++s;
8081 if (*s == '(')
8082 {
8083 ++s;
8084 if (is_space_char (*s))
8085 ++s;
8086 if (*s >= '0' && *s <= '7')
8087 {
8088 int fpr = *s - '0';
8089 ++s;
8090 if (is_space_char (*s))
8091 ++s;
8092 if (*s == ')')
8093 {
8094 *end_op = s + 1;
8095 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
8096 know (r);
8097 return r + fpr;
8098 }
8099 }
8100 /* We have "%st(" then garbage. */
8101 return (const reg_entry *) NULL;
8102 }
8103 }
8104
8105 if (r == NULL || allow_pseudo_reg)
8106 return r;
8107
8108 if (operand_type_all_zero (&r->reg_type))
8109 return (const reg_entry *) NULL;
8110
8111 if ((r->reg_type.bitfield.reg32
8112 || r->reg_type.bitfield.sreg3
8113 || r->reg_type.bitfield.control
8114 || r->reg_type.bitfield.debug
8115 || r->reg_type.bitfield.test)
8116 && !cpu_arch_flags.bitfield.cpui386)
8117 return (const reg_entry *) NULL;
8118
8119 if (r->reg_type.bitfield.floatreg
8120 && !cpu_arch_flags.bitfield.cpu8087
8121 && !cpu_arch_flags.bitfield.cpu287
8122 && !cpu_arch_flags.bitfield.cpu387)
8123 return (const reg_entry *) NULL;
8124
8125 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
8126 return (const reg_entry *) NULL;
8127
8128 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse)
8129 return (const reg_entry *) NULL;
8130
8131 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
8132 return (const reg_entry *) NULL;
8133
8134 /* Don't allow fake index register unless allow_index_reg isn't 0. */
8135 if (!allow_index_reg
8136 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
8137 return (const reg_entry *) NULL;
8138
8139 if (((r->reg_flags & (RegRex64 | RegRex))
8140 || r->reg_type.bitfield.reg64)
8141 && (!cpu_arch_flags.bitfield.cpulm
8142 || !operand_type_equal (&r->reg_type, &control))
8143 && flag_code != CODE_64BIT)
8144 return (const reg_entry *) NULL;
8145
8146 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
8147 return (const reg_entry *) NULL;
8148
8149 return r;
8150 }
8151
8152 /* REG_STRING starts *before* REGISTER_PREFIX. */
8153
8154 static const reg_entry *
8155 parse_register (char *reg_string, char **end_op)
8156 {
8157 const reg_entry *r;
8158
8159 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
8160 r = parse_real_register (reg_string, end_op);
8161 else
8162 r = NULL;
8163 if (!r)
8164 {
8165 char *save = input_line_pointer;
8166 char c;
8167 symbolS *symbolP;
8168
8169 input_line_pointer = reg_string;
8170 c = get_symbol_end ();
8171 symbolP = symbol_find (reg_string);
8172 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
8173 {
8174 const expressionS *e = symbol_get_value_expression (symbolP);
8175
8176 know (e->X_op == O_register);
8177 know (e->X_add_number >= 0
8178 && (valueT) e->X_add_number < i386_regtab_size);
8179 r = i386_regtab + e->X_add_number;
8180 *end_op = input_line_pointer;
8181 }
8182 *input_line_pointer = c;
8183 input_line_pointer = save;
8184 }
8185 return r;
8186 }
8187
8188 int
8189 i386_parse_name (char *name, expressionS *e, char *nextcharP)
8190 {
8191 const reg_entry *r;
8192 char *end = input_line_pointer;
8193
8194 *end = *nextcharP;
8195 r = parse_register (name, &input_line_pointer);
8196 if (r && end <= input_line_pointer)
8197 {
8198 *nextcharP = *input_line_pointer;
8199 *input_line_pointer = 0;
8200 e->X_op = O_register;
8201 e->X_add_number = r - i386_regtab;
8202 return 1;
8203 }
8204 input_line_pointer = end;
8205 *end = 0;
8206 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
8207 }
8208
8209 void
8210 md_operand (expressionS *e)
8211 {
8212 char *end;
8213 const reg_entry *r;
8214
8215 switch (*input_line_pointer)
8216 {
8217 case REGISTER_PREFIX:
8218 r = parse_real_register (input_line_pointer, &end);
8219 if (r)
8220 {
8221 e->X_op = O_register;
8222 e->X_add_number = r - i386_regtab;
8223 input_line_pointer = end;
8224 }
8225 break;
8226
8227 case '[':
8228 gas_assert (intel_syntax);
8229 end = input_line_pointer++;
8230 expression (e);
8231 if (*input_line_pointer == ']')
8232 {
8233 ++input_line_pointer;
8234 e->X_op_symbol = make_expr_symbol (e);
8235 e->X_add_symbol = NULL;
8236 e->X_add_number = 0;
8237 e->X_op = O_index;
8238 }
8239 else
8240 {
8241 e->X_op = O_absent;
8242 input_line_pointer = end;
8243 }
8244 break;
8245 }
8246 }
8247
8248 \f
8249 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8250 const char *md_shortopts = "kVQ:sqn";
8251 #else
8252 const char *md_shortopts = "qn";
8253 #endif
8254
8255 #define OPTION_32 (OPTION_MD_BASE + 0)
8256 #define OPTION_64 (OPTION_MD_BASE + 1)
8257 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
8258 #define OPTION_MARCH (OPTION_MD_BASE + 3)
8259 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
8260 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
8261 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
8262 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
8263 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
8264 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
8265 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
8266 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
8267 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 12)
8268 #define OPTION_X32 (OPTION_MD_BASE + 13)
8269
8270 struct option md_longopts[] =
8271 {
8272 {"32", no_argument, NULL, OPTION_32},
8273 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8274 || defined (TE_PE) || defined (TE_PEP))
8275 {"64", no_argument, NULL, OPTION_64},
8276 #endif
8277 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8278 {"x32", no_argument, NULL, OPTION_X32},
8279 #endif
8280 {"divide", no_argument, NULL, OPTION_DIVIDE},
8281 {"march", required_argument, NULL, OPTION_MARCH},
8282 {"mtune", required_argument, NULL, OPTION_MTUNE},
8283 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
8284 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
8285 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
8286 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
8287 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
8288 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
8289 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
8290 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
8291 {NULL, no_argument, NULL, 0}
8292 };
8293 size_t md_longopts_size = sizeof (md_longopts);
8294
8295 int
8296 md_parse_option (int c, char *arg)
8297 {
8298 unsigned int j;
8299 char *arch, *next;
8300
8301 switch (c)
8302 {
8303 case 'n':
8304 optimize_align_code = 0;
8305 break;
8306
8307 case 'q':
8308 quiet_warnings = 1;
8309 break;
8310
8311 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8312 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
8313 should be emitted or not. FIXME: Not implemented. */
8314 case 'Q':
8315 break;
8316
8317 /* -V: SVR4 argument to print version ID. */
8318 case 'V':
8319 print_version_id ();
8320 break;
8321
8322 /* -k: Ignore for FreeBSD compatibility. */
8323 case 'k':
8324 break;
8325
8326 case 's':
8327 /* -s: On i386 Solaris, this tells the native assembler to use
8328 .stab instead of .stab.excl. We always use .stab anyhow. */
8329 break;
8330 #endif
8331 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8332 || defined (TE_PE) || defined (TE_PEP))
8333 case OPTION_64:
8334 {
8335 const char **list, **l;
8336
8337 list = bfd_target_list ();
8338 for (l = list; *l != NULL; l++)
8339 if (CONST_STRNEQ (*l, "elf64-x86-64")
8340 || strcmp (*l, "coff-x86-64") == 0
8341 || strcmp (*l, "pe-x86-64") == 0
8342 || strcmp (*l, "pei-x86-64") == 0)
8343 {
8344 default_arch = "x86_64";
8345 break;
8346 }
8347 if (*l == NULL)
8348 as_fatal (_("no compiled in support for x86_64"));
8349 free (list);
8350 }
8351 break;
8352 #endif
8353
8354 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8355 case OPTION_X32:
8356 if (IS_ELF)
8357 {
8358 const char **list, **l;
8359
8360 list = bfd_target_list ();
8361 for (l = list; *l != NULL; l++)
8362 if (CONST_STRNEQ (*l, "elf32-x86-64"))
8363 {
8364 default_arch = "x86_64:32";
8365 break;
8366 }
8367 if (*l == NULL)
8368 as_fatal (_("no compiled in support for 32bit x86_64"));
8369 free (list);
8370 }
8371 else
8372 as_fatal (_("32bit x86_64 is only supported for ELF"));
8373 break;
8374 #endif
8375
8376 case OPTION_32:
8377 default_arch = "i386";
8378 break;
8379
8380 case OPTION_DIVIDE:
8381 #ifdef SVR4_COMMENT_CHARS
8382 {
8383 char *n, *t;
8384 const char *s;
8385
8386 n = (char *) xmalloc (strlen (i386_comment_chars) + 1);
8387 t = n;
8388 for (s = i386_comment_chars; *s != '\0'; s++)
8389 if (*s != '/')
8390 *t++ = *s;
8391 *t = '\0';
8392 i386_comment_chars = n;
8393 }
8394 #endif
8395 break;
8396
8397 case OPTION_MARCH:
8398 arch = xstrdup (arg);
8399 do
8400 {
8401 if (*arch == '.')
8402 as_fatal (_("invalid -march= option: `%s'"), arg);
8403 next = strchr (arch, '+');
8404 if (next)
8405 *next++ = '\0';
8406 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8407 {
8408 if (strcmp (arch, cpu_arch [j].name) == 0)
8409 {
8410 /* Processor. */
8411 if (! cpu_arch[j].flags.bitfield.cpui386)
8412 continue;
8413
8414 cpu_arch_name = cpu_arch[j].name;
8415 cpu_sub_arch_name = NULL;
8416 cpu_arch_flags = cpu_arch[j].flags;
8417 cpu_arch_isa = cpu_arch[j].type;
8418 cpu_arch_isa_flags = cpu_arch[j].flags;
8419 if (!cpu_arch_tune_set)
8420 {
8421 cpu_arch_tune = cpu_arch_isa;
8422 cpu_arch_tune_flags = cpu_arch_isa_flags;
8423 }
8424 break;
8425 }
8426 else if (*cpu_arch [j].name == '.'
8427 && strcmp (arch, cpu_arch [j].name + 1) == 0)
8428 {
8429 /* ISA entension. */
8430 i386_cpu_flags flags;
8431
8432 if (!cpu_arch[j].negated)
8433 flags = cpu_flags_or (cpu_arch_flags,
8434 cpu_arch[j].flags);
8435 else
8436 flags = cpu_flags_and_not (cpu_arch_flags,
8437 cpu_arch[j].flags);
8438 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
8439 {
8440 if (cpu_sub_arch_name)
8441 {
8442 char *name = cpu_sub_arch_name;
8443 cpu_sub_arch_name = concat (name,
8444 cpu_arch[j].name,
8445 (const char *) NULL);
8446 free (name);
8447 }
8448 else
8449 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
8450 cpu_arch_flags = flags;
8451 cpu_arch_isa_flags = flags;
8452 }
8453 break;
8454 }
8455 }
8456
8457 if (j >= ARRAY_SIZE (cpu_arch))
8458 as_fatal (_("invalid -march= option: `%s'"), arg);
8459
8460 arch = next;
8461 }
8462 while (next != NULL );
8463 break;
8464
8465 case OPTION_MTUNE:
8466 if (*arg == '.')
8467 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8468 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8469 {
8470 if (strcmp (arg, cpu_arch [j].name) == 0)
8471 {
8472 cpu_arch_tune_set = 1;
8473 cpu_arch_tune = cpu_arch [j].type;
8474 cpu_arch_tune_flags = cpu_arch[j].flags;
8475 break;
8476 }
8477 }
8478 if (j >= ARRAY_SIZE (cpu_arch))
8479 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8480 break;
8481
8482 case OPTION_MMNEMONIC:
8483 if (strcasecmp (arg, "att") == 0)
8484 intel_mnemonic = 0;
8485 else if (strcasecmp (arg, "intel") == 0)
8486 intel_mnemonic = 1;
8487 else
8488 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg);
8489 break;
8490
8491 case OPTION_MSYNTAX:
8492 if (strcasecmp (arg, "att") == 0)
8493 intel_syntax = 0;
8494 else if (strcasecmp (arg, "intel") == 0)
8495 intel_syntax = 1;
8496 else
8497 as_fatal (_("invalid -msyntax= option: `%s'"), arg);
8498 break;
8499
8500 case OPTION_MINDEX_REG:
8501 allow_index_reg = 1;
8502 break;
8503
8504 case OPTION_MNAKED_REG:
8505 allow_naked_reg = 1;
8506 break;
8507
8508 case OPTION_MOLD_GCC:
8509 old_gcc = 1;
8510 break;
8511
8512 case OPTION_MSSE2AVX:
8513 sse2avx = 1;
8514 break;
8515
8516 case OPTION_MSSE_CHECK:
8517 if (strcasecmp (arg, "error") == 0)
8518 sse_check = sse_check_error;
8519 else if (strcasecmp (arg, "warning") == 0)
8520 sse_check = sse_check_warning;
8521 else if (strcasecmp (arg, "none") == 0)
8522 sse_check = sse_check_none;
8523 else
8524 as_fatal (_("invalid -msse-check= option: `%s'"), arg);
8525 break;
8526
8527 case OPTION_MAVXSCALAR:
8528 if (strcasecmp (arg, "128") == 0)
8529 avxscalar = vex128;
8530 else if (strcasecmp (arg, "256") == 0)
8531 avxscalar = vex256;
8532 else
8533 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg);
8534 break;
8535
8536 default:
8537 return 0;
8538 }
8539 return 1;
8540 }
8541
8542 #define MESSAGE_TEMPLATE \
8543 " "
8544
8545 static void
8546 show_arch (FILE *stream, int ext, int check)
8547 {
8548 static char message[] = MESSAGE_TEMPLATE;
8549 char *start = message + 27;
8550 char *p;
8551 int size = sizeof (MESSAGE_TEMPLATE);
8552 int left;
8553 const char *name;
8554 int len;
8555 unsigned int j;
8556
8557 p = start;
8558 left = size - (start - message);
8559 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8560 {
8561 /* Should it be skipped? */
8562 if (cpu_arch [j].skip)
8563 continue;
8564
8565 name = cpu_arch [j].name;
8566 len = cpu_arch [j].len;
8567 if (*name == '.')
8568 {
8569 /* It is an extension. Skip if we aren't asked to show it. */
8570 if (ext)
8571 {
8572 name++;
8573 len--;
8574 }
8575 else
8576 continue;
8577 }
8578 else if (ext)
8579 {
8580 /* It is an processor. Skip if we show only extension. */
8581 continue;
8582 }
8583 else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
8584 {
8585 /* It is an impossible processor - skip. */
8586 continue;
8587 }
8588
8589 /* Reserve 2 spaces for ", " or ",\0" */
8590 left -= len + 2;
8591
8592 /* Check if there is any room. */
8593 if (left >= 0)
8594 {
8595 if (p != start)
8596 {
8597 *p++ = ',';
8598 *p++ = ' ';
8599 }
8600 p = mempcpy (p, name, len);
8601 }
8602 else
8603 {
8604 /* Output the current message now and start a new one. */
8605 *p++ = ',';
8606 *p = '\0';
8607 fprintf (stream, "%s\n", message);
8608 p = start;
8609 left = size - (start - message) - len - 2;
8610
8611 gas_assert (left >= 0);
8612
8613 p = mempcpy (p, name, len);
8614 }
8615 }
8616
8617 *p = '\0';
8618 fprintf (stream, "%s\n", message);
8619 }
8620
8621 void
8622 md_show_usage (FILE *stream)
8623 {
8624 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8625 fprintf (stream, _("\
8626 -Q ignored\n\
8627 -V print assembler version number\n\
8628 -k ignored\n"));
8629 #endif
8630 fprintf (stream, _("\
8631 -n Do not optimize code alignment\n\
8632 -q quieten some warnings\n"));
8633 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8634 fprintf (stream, _("\
8635 -s ignored\n"));
8636 #endif
8637 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8638 || defined (TE_PE) || defined (TE_PEP))
8639 fprintf (stream, _("\
8640 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
8641 #endif
8642 #ifdef SVR4_COMMENT_CHARS
8643 fprintf (stream, _("\
8644 --divide do not treat `/' as a comment character\n"));
8645 #else
8646 fprintf (stream, _("\
8647 --divide ignored\n"));
8648 #endif
8649 fprintf (stream, _("\
8650 -march=CPU[,+EXTENSION...]\n\
8651 generate code for CPU and EXTENSION, CPU is one of:\n"));
8652 show_arch (stream, 0, 1);
8653 fprintf (stream, _("\
8654 EXTENSION is combination of:\n"));
8655 show_arch (stream, 1, 0);
8656 fprintf (stream, _("\
8657 -mtune=CPU optimize for CPU, CPU is one of:\n"));
8658 show_arch (stream, 0, 0);
8659 fprintf (stream, _("\
8660 -msse2avx encode SSE instructions with VEX prefix\n"));
8661 fprintf (stream, _("\
8662 -msse-check=[none|error|warning]\n\
8663 check SSE instructions\n"));
8664 fprintf (stream, _("\
8665 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
8666 length\n"));
8667 fprintf (stream, _("\
8668 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
8669 fprintf (stream, _("\
8670 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
8671 fprintf (stream, _("\
8672 -mindex-reg support pseudo index registers\n"));
8673 fprintf (stream, _("\
8674 -mnaked-reg don't require `%%' prefix for registers\n"));
8675 fprintf (stream, _("\
8676 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
8677 }
8678
8679 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
8680 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8681 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8682
8683 /* Pick the target format to use. */
8684
8685 const char *
8686 i386_target_format (void)
8687 {
8688 if (!strncmp (default_arch, "x86_64", 6))
8689 {
8690 update_code_flag (CODE_64BIT, 1);
8691 if (default_arch[6] == '\0')
8692 x86_elf_abi = X86_64_ABI;
8693 else
8694 x86_elf_abi = X86_64_X32_ABI;
8695 }
8696 else if (!strcmp (default_arch, "i386"))
8697 update_code_flag (CODE_32BIT, 1);
8698 else
8699 as_fatal (_("unknown architecture"));
8700
8701 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
8702 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
8703 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
8704 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
8705
8706 switch (OUTPUT_FLAVOR)
8707 {
8708 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
8709 case bfd_target_aout_flavour:
8710 return AOUT_TARGET_FORMAT;
8711 #endif
8712 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
8713 # if defined (TE_PE) || defined (TE_PEP)
8714 case bfd_target_coff_flavour:
8715 return flag_code == CODE_64BIT ? "pe-x86-64" : "pe-i386";
8716 # elif defined (TE_GO32)
8717 case bfd_target_coff_flavour:
8718 return "coff-go32";
8719 # else
8720 case bfd_target_coff_flavour:
8721 return "coff-i386";
8722 # endif
8723 #endif
8724 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8725 case bfd_target_elf_flavour:
8726 {
8727 const char *format;
8728
8729 switch (x86_elf_abi)
8730 {
8731 default:
8732 format = ELF_TARGET_FORMAT;
8733 break;
8734 case X86_64_ABI:
8735 use_rela_relocations = 1;
8736 object_64bit = 1;
8737 format = ELF_TARGET_FORMAT64;
8738 break;
8739 case X86_64_X32_ABI:
8740 use_rela_relocations = 1;
8741 object_64bit = 1;
8742 disallow_64bit_reloc = 1;
8743 format = ELF_TARGET_FORMAT32;
8744 break;
8745 }
8746 if (cpu_arch_isa == PROCESSOR_L1OM)
8747 {
8748 if (x86_elf_abi != X86_64_ABI)
8749 as_fatal (_("Intel L1OM is 64bit only"));
8750 return ELF_TARGET_L1OM_FORMAT;
8751 }
8752 else
8753 return format;
8754 }
8755 #endif
8756 #if defined (OBJ_MACH_O)
8757 case bfd_target_mach_o_flavour:
8758 return flag_code == CODE_64BIT ? "mach-o-x86-64" : "mach-o-i386";
8759 #endif
8760 default:
8761 abort ();
8762 return NULL;
8763 }
8764 }
8765
8766 #endif /* OBJ_MAYBE_ more than one */
8767
8768 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
8769 void
8770 i386_elf_emit_arch_note (void)
8771 {
8772 if (IS_ELF && cpu_arch_name != NULL)
8773 {
8774 char *p;
8775 asection *seg = now_seg;
8776 subsegT subseg = now_subseg;
8777 Elf_Internal_Note i_note;
8778 Elf_External_Note e_note;
8779 asection *note_secp;
8780 int len;
8781
8782 /* Create the .note section. */
8783 note_secp = subseg_new (".note", 0);
8784 bfd_set_section_flags (stdoutput,
8785 note_secp,
8786 SEC_HAS_CONTENTS | SEC_READONLY);
8787
8788 /* Process the arch string. */
8789 len = strlen (cpu_arch_name);
8790
8791 i_note.namesz = len + 1;
8792 i_note.descsz = 0;
8793 i_note.type = NT_ARCH;
8794 p = frag_more (sizeof (e_note.namesz));
8795 md_number_to_chars (p, (valueT) i_note.namesz, sizeof (e_note.namesz));
8796 p = frag_more (sizeof (e_note.descsz));
8797 md_number_to_chars (p, (valueT) i_note.descsz, sizeof (e_note.descsz));
8798 p = frag_more (sizeof (e_note.type));
8799 md_number_to_chars (p, (valueT) i_note.type, sizeof (e_note.type));
8800 p = frag_more (len + 1);
8801 strcpy (p, cpu_arch_name);
8802
8803 frag_align (2, 0, 0);
8804
8805 subseg_set (seg, subseg);
8806 }
8807 }
8808 #endif
8809 \f
8810 symbolS *
8811 md_undefined_symbol (name)
8812 char *name;
8813 {
8814 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
8815 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
8816 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
8817 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
8818 {
8819 if (!GOT_symbol)
8820 {
8821 if (symbol_find (name))
8822 as_bad (_("GOT already in symbol table"));
8823 GOT_symbol = symbol_new (name, undefined_section,
8824 (valueT) 0, &zero_address_frag);
8825 };
8826 return GOT_symbol;
8827 }
8828 return 0;
8829 }
8830
8831 /* Round up a section size to the appropriate boundary. */
8832
8833 valueT
8834 md_section_align (segment, size)
8835 segT segment ATTRIBUTE_UNUSED;
8836 valueT size;
8837 {
8838 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8839 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
8840 {
8841 /* For a.out, force the section size to be aligned. If we don't do
8842 this, BFD will align it for us, but it will not write out the
8843 final bytes of the section. This may be a bug in BFD, but it is
8844 easier to fix it here since that is how the other a.out targets
8845 work. */
8846 int align;
8847
8848 align = bfd_get_section_alignment (stdoutput, segment);
8849 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
8850 }
8851 #endif
8852
8853 return size;
8854 }
8855
8856 /* On the i386, PC-relative offsets are relative to the start of the
8857 next instruction. That is, the address of the offset, plus its
8858 size, since the offset is always the last part of the insn. */
8859
8860 long
8861 md_pcrel_from (fixS *fixP)
8862 {
8863 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
8864 }
8865
8866 #ifndef I386COFF
8867
8868 static void
8869 s_bss (int ignore ATTRIBUTE_UNUSED)
8870 {
8871 int temp;
8872
8873 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8874 if (IS_ELF)
8875 obj_elf_section_change_hook ();
8876 #endif
8877 temp = get_absolute_expression ();
8878 subseg_set (bss_section, (subsegT) temp);
8879 demand_empty_rest_of_line ();
8880 }
8881
8882 #endif
8883
8884 void
8885 i386_validate_fix (fixS *fixp)
8886 {
8887 if (fixp->fx_subsy && fixp->fx_subsy == GOT_symbol)
8888 {
8889 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
8890 {
8891 if (!object_64bit)
8892 abort ();
8893 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
8894 }
8895 else
8896 {
8897 if (!object_64bit)
8898 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
8899 else
8900 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
8901 }
8902 fixp->fx_subsy = 0;
8903 }
8904 }
8905
8906 arelent *
8907 tc_gen_reloc (section, fixp)
8908 asection *section ATTRIBUTE_UNUSED;
8909 fixS *fixp;
8910 {
8911 arelent *rel;
8912 bfd_reloc_code_real_type code;
8913
8914 switch (fixp->fx_r_type)
8915 {
8916 case BFD_RELOC_X86_64_PLT32:
8917 case BFD_RELOC_X86_64_GOT32:
8918 case BFD_RELOC_X86_64_GOTPCREL:
8919 case BFD_RELOC_386_PLT32:
8920 case BFD_RELOC_386_GOT32:
8921 case BFD_RELOC_386_GOTOFF:
8922 case BFD_RELOC_386_GOTPC:
8923 case BFD_RELOC_386_TLS_GD:
8924 case BFD_RELOC_386_TLS_LDM:
8925 case BFD_RELOC_386_TLS_LDO_32:
8926 case BFD_RELOC_386_TLS_IE_32:
8927 case BFD_RELOC_386_TLS_IE:
8928 case BFD_RELOC_386_TLS_GOTIE:
8929 case BFD_RELOC_386_TLS_LE_32:
8930 case BFD_RELOC_386_TLS_LE:
8931 case BFD_RELOC_386_TLS_GOTDESC:
8932 case BFD_RELOC_386_TLS_DESC_CALL:
8933 case BFD_RELOC_X86_64_TLSGD:
8934 case BFD_RELOC_X86_64_TLSLD:
8935 case BFD_RELOC_X86_64_DTPOFF32:
8936 case BFD_RELOC_X86_64_DTPOFF64:
8937 case BFD_RELOC_X86_64_GOTTPOFF:
8938 case BFD_RELOC_X86_64_TPOFF32:
8939 case BFD_RELOC_X86_64_TPOFF64:
8940 case BFD_RELOC_X86_64_GOTOFF64:
8941 case BFD_RELOC_X86_64_GOTPC32:
8942 case BFD_RELOC_X86_64_GOT64:
8943 case BFD_RELOC_X86_64_GOTPCREL64:
8944 case BFD_RELOC_X86_64_GOTPC64:
8945 case BFD_RELOC_X86_64_GOTPLT64:
8946 case BFD_RELOC_X86_64_PLTOFF64:
8947 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8948 case BFD_RELOC_X86_64_TLSDESC_CALL:
8949 case BFD_RELOC_RVA:
8950 case BFD_RELOC_VTABLE_ENTRY:
8951 case BFD_RELOC_VTABLE_INHERIT:
8952 #ifdef TE_PE
8953 case BFD_RELOC_32_SECREL:
8954 #endif
8955 code = fixp->fx_r_type;
8956 break;
8957 case BFD_RELOC_X86_64_32S:
8958 if (!fixp->fx_pcrel)
8959 {
8960 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
8961 code = fixp->fx_r_type;
8962 break;
8963 }
8964 default:
8965 if (fixp->fx_pcrel)
8966 {
8967 switch (fixp->fx_size)
8968 {
8969 default:
8970 as_bad_where (fixp->fx_file, fixp->fx_line,
8971 _("can not do %d byte pc-relative relocation"),
8972 fixp->fx_size);
8973 code = BFD_RELOC_32_PCREL;
8974 break;
8975 case 1: code = BFD_RELOC_8_PCREL; break;
8976 case 2: code = BFD_RELOC_16_PCREL; break;
8977 case 4: code = BFD_RELOC_32_PCREL; break;
8978 #ifdef BFD64
8979 case 8: code = BFD_RELOC_64_PCREL; break;
8980 #endif
8981 }
8982 }
8983 else
8984 {
8985 switch (fixp->fx_size)
8986 {
8987 default:
8988 as_bad_where (fixp->fx_file, fixp->fx_line,
8989 _("can not do %d byte relocation"),
8990 fixp->fx_size);
8991 code = BFD_RELOC_32;
8992 break;
8993 case 1: code = BFD_RELOC_8; break;
8994 case 2: code = BFD_RELOC_16; break;
8995 case 4: code = BFD_RELOC_32; break;
8996 #ifdef BFD64
8997 case 8: code = BFD_RELOC_64; break;
8998 #endif
8999 }
9000 }
9001 break;
9002 }
9003
9004 if ((code == BFD_RELOC_32
9005 || code == BFD_RELOC_32_PCREL
9006 || code == BFD_RELOC_X86_64_32S)
9007 && GOT_symbol
9008 && fixp->fx_addsy == GOT_symbol)
9009 {
9010 if (!object_64bit)
9011 code = BFD_RELOC_386_GOTPC;
9012 else
9013 code = BFD_RELOC_X86_64_GOTPC32;
9014 }
9015 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
9016 && GOT_symbol
9017 && fixp->fx_addsy == GOT_symbol)
9018 {
9019 code = BFD_RELOC_X86_64_GOTPC64;
9020 }
9021
9022 rel = (arelent *) xmalloc (sizeof (arelent));
9023 rel->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
9024 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9025
9026 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
9027
9028 if (!use_rela_relocations)
9029 {
9030 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
9031 vtable entry to be used in the relocation's section offset. */
9032 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
9033 rel->address = fixp->fx_offset;
9034 #if defined (OBJ_COFF) && defined (TE_PE)
9035 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
9036 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
9037 else
9038 #endif
9039 rel->addend = 0;
9040 }
9041 /* Use the rela in 64bit mode. */
9042 else
9043 {
9044 if (disallow_64bit_reloc)
9045 switch (code)
9046 {
9047 case BFD_RELOC_64:
9048 case BFD_RELOC_X86_64_DTPOFF64:
9049 case BFD_RELOC_X86_64_TPOFF64:
9050 case BFD_RELOC_64_PCREL:
9051 case BFD_RELOC_X86_64_GOTOFF64:
9052 case BFD_RELOC_X86_64_GOT64:
9053 case BFD_RELOC_X86_64_GOTPCREL64:
9054 case BFD_RELOC_X86_64_GOTPC64:
9055 case BFD_RELOC_X86_64_GOTPLT64:
9056 case BFD_RELOC_X86_64_PLTOFF64:
9057 as_bad_where (fixp->fx_file, fixp->fx_line,
9058 _("cannot represent relocation type %s in x32 mode"),
9059 bfd_get_reloc_code_name (code));
9060 break;
9061 default:
9062 break;
9063 }
9064
9065 if (!fixp->fx_pcrel)
9066 rel->addend = fixp->fx_offset;
9067 else
9068 switch (code)
9069 {
9070 case BFD_RELOC_X86_64_PLT32:
9071 case BFD_RELOC_X86_64_GOT32:
9072 case BFD_RELOC_X86_64_GOTPCREL:
9073 case BFD_RELOC_X86_64_TLSGD:
9074 case BFD_RELOC_X86_64_TLSLD:
9075 case BFD_RELOC_X86_64_GOTTPOFF:
9076 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9077 case BFD_RELOC_X86_64_TLSDESC_CALL:
9078 rel->addend = fixp->fx_offset - fixp->fx_size;
9079 break;
9080 default:
9081 rel->addend = (section->vma
9082 - fixp->fx_size
9083 + fixp->fx_addnumber
9084 + md_pcrel_from (fixp));
9085 break;
9086 }
9087 }
9088
9089 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
9090 if (rel->howto == NULL)
9091 {
9092 as_bad_where (fixp->fx_file, fixp->fx_line,
9093 _("cannot represent relocation type %s"),
9094 bfd_get_reloc_code_name (code));
9095 /* Set howto to a garbage value so that we can keep going. */
9096 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
9097 gas_assert (rel->howto != NULL);
9098 }
9099
9100 return rel;
9101 }
9102
9103 #include "tc-i386-intel.c"
9104
9105 void
9106 tc_x86_parse_to_dw2regnum (expressionS *exp)
9107 {
9108 int saved_naked_reg;
9109 char saved_register_dot;
9110
9111 saved_naked_reg = allow_naked_reg;
9112 allow_naked_reg = 1;
9113 saved_register_dot = register_chars['.'];
9114 register_chars['.'] = '.';
9115 allow_pseudo_reg = 1;
9116 expression_and_evaluate (exp);
9117 allow_pseudo_reg = 0;
9118 register_chars['.'] = saved_register_dot;
9119 allow_naked_reg = saved_naked_reg;
9120
9121 if (exp->X_op == O_register && exp->X_add_number >= 0)
9122 {
9123 if ((addressT) exp->X_add_number < i386_regtab_size)
9124 {
9125 exp->X_op = O_constant;
9126 exp->X_add_number = i386_regtab[exp->X_add_number]
9127 .dw2_regnum[flag_code >> 1];
9128 }
9129 else
9130 exp->X_op = O_illegal;
9131 }
9132 }
9133
9134 void
9135 tc_x86_frame_initial_instructions (void)
9136 {
9137 static unsigned int sp_regno[2];
9138
9139 if (!sp_regno[flag_code >> 1])
9140 {
9141 char *saved_input = input_line_pointer;
9142 char sp[][4] = {"esp", "rsp"};
9143 expressionS exp;
9144
9145 input_line_pointer = sp[flag_code >> 1];
9146 tc_x86_parse_to_dw2regnum (&exp);
9147 gas_assert (exp.X_op == O_constant);
9148 sp_regno[flag_code >> 1] = exp.X_add_number;
9149 input_line_pointer = saved_input;
9150 }
9151
9152 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
9153 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
9154 }
9155
9156 int
9157 i386_elf_section_type (const char *str, size_t len)
9158 {
9159 if (flag_code == CODE_64BIT
9160 && len == sizeof ("unwind") - 1
9161 && strncmp (str, "unwind", 6) == 0)
9162 return SHT_X86_64_UNWIND;
9163
9164 return -1;
9165 }
9166
9167 #ifdef TE_SOLARIS
9168 void
9169 i386_solaris_fix_up_eh_frame (segT sec)
9170 {
9171 if (flag_code == CODE_64BIT)
9172 elf_section_type (sec) = SHT_X86_64_UNWIND;
9173 }
9174 #endif
9175
9176 #ifdef TE_PE
9177 void
9178 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
9179 {
9180 expressionS exp;
9181
9182 exp.X_op = O_secrel;
9183 exp.X_add_symbol = symbol;
9184 exp.X_add_number = 0;
9185 emit_expr (&exp, size);
9186 }
9187 #endif
9188
9189 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9190 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
9191
9192 bfd_vma
9193 x86_64_section_letter (int letter, char **ptr_msg)
9194 {
9195 if (flag_code == CODE_64BIT)
9196 {
9197 if (letter == 'l')
9198 return SHF_X86_64_LARGE;
9199
9200 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
9201 }
9202 else
9203 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
9204 return -1;
9205 }
9206
9207 bfd_vma
9208 x86_64_section_word (char *str, size_t len)
9209 {
9210 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
9211 return SHF_X86_64_LARGE;
9212
9213 return -1;
9214 }
9215
9216 static void
9217 handle_large_common (int small ATTRIBUTE_UNUSED)
9218 {
9219 if (flag_code != CODE_64BIT)
9220 {
9221 s_comm_internal (0, elf_common_parse);
9222 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
9223 }
9224 else
9225 {
9226 static segT lbss_section;
9227 asection *saved_com_section_ptr = elf_com_section_ptr;
9228 asection *saved_bss_section = bss_section;
9229
9230 if (lbss_section == NULL)
9231 {
9232 flagword applicable;
9233 segT seg = now_seg;
9234 subsegT subseg = now_subseg;
9235
9236 /* The .lbss section is for local .largecomm symbols. */
9237 lbss_section = subseg_new (".lbss", 0);
9238 applicable = bfd_applicable_section_flags (stdoutput);
9239 bfd_set_section_flags (stdoutput, lbss_section,
9240 applicable & SEC_ALLOC);
9241 seg_info (lbss_section)->bss = 1;
9242
9243 subseg_set (seg, subseg);
9244 }
9245
9246 elf_com_section_ptr = &_bfd_elf_large_com_section;
9247 bss_section = lbss_section;
9248
9249 s_comm_internal (0, elf_common_parse);
9250
9251 elf_com_section_ptr = saved_com_section_ptr;
9252 bss_section = saved_bss_section;
9253 }
9254 }
9255
9256 static void
9257 handle_quad (int nbytes)
9258 {
9259 expressionS exp;
9260
9261 if (x86_elf_abi != X86_64_X32_ABI)
9262 {
9263 cons (nbytes);
9264 return;
9265 }
9266
9267 if (is_it_end_of_statement ())
9268 {
9269 demand_empty_rest_of_line ();
9270 return;
9271 }
9272
9273 do
9274 {
9275 if (*input_line_pointer == '"')
9276 {
9277 as_bad (_("unexpected `\"' in expression"));
9278 ignore_rest_of_line ();
9279 return;
9280 }
9281 x86_cons (&exp, nbytes);
9282 /* Output 4 bytes if not constant. */
9283 if (exp.X_op != O_constant)
9284 nbytes = 4;
9285 emit_expr (&exp, (unsigned int) nbytes);
9286 /* Zero-extends to 8 bytes if not constant. */
9287 if (nbytes == 4)
9288 {
9289 memset (&exp, '\0', sizeof (exp));
9290 exp.X_op = O_constant;
9291 emit_expr (&exp, nbytes);
9292 }
9293 nbytes = 8;
9294 }
9295 while (*input_line_pointer++ == ',');
9296
9297 input_line_pointer--; /* Put terminator back into stream. */
9298
9299 demand_empty_rest_of_line ();
9300 }
9301 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */
This page took 0.372826 seconds and 4 git commands to generate.