PR gas/12848
[deliverable/binutils-gdb.git] / gas / config / tc-i386.c
1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5
6 This file is part of GAS, the GNU Assembler.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GAS; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
21 02110-1301, USA. */
22
23 /* Intel 80386 machine specific gas.
24 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
25 x86_64 support by Jan Hubicka (jh@suse.cz)
26 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
27 Bugs & suggestions are completely welcome. This is free software.
28 Please help us make it better. */
29
30 #include "as.h"
31 #include "safe-ctype.h"
32 #include "subsegs.h"
33 #include "dwarf2dbg.h"
34 #include "dw2gencfi.h"
35 #include "elf/x86-64.h"
36 #include "opcodes/i386-init.h"
37
38 #ifndef REGISTER_WARNINGS
39 #define REGISTER_WARNINGS 1
40 #endif
41
42 #ifndef INFER_ADDR_PREFIX
43 #define INFER_ADDR_PREFIX 1
44 #endif
45
46 #ifndef DEFAULT_ARCH
47 #define DEFAULT_ARCH "i386"
48 #endif
49
50 #ifndef INLINE
51 #if __GNUC__ >= 2
52 #define INLINE __inline__
53 #else
54 #define INLINE
55 #endif
56 #endif
57
58 /* Prefixes will be emitted in the order defined below.
59 WAIT_PREFIX must be the first prefix since FWAIT is really is an
60 instruction, and so must come before any prefixes.
61 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
62 REP_PREFIX, LOCK_PREFIX. */
63 #define WAIT_PREFIX 0
64 #define SEG_PREFIX 1
65 #define ADDR_PREFIX 2
66 #define DATA_PREFIX 3
67 #define REP_PREFIX 4
68 #define LOCK_PREFIX 5
69 #define REX_PREFIX 6 /* must come last. */
70 #define MAX_PREFIXES 7 /* max prefixes per opcode */
71
72 /* we define the syntax here (modulo base,index,scale syntax) */
73 #define REGISTER_PREFIX '%'
74 #define IMMEDIATE_PREFIX '$'
75 #define ABSOLUTE_PREFIX '*'
76
77 /* these are the instruction mnemonic suffixes in AT&T syntax or
78 memory operand size in Intel syntax. */
79 #define WORD_MNEM_SUFFIX 'w'
80 #define BYTE_MNEM_SUFFIX 'b'
81 #define SHORT_MNEM_SUFFIX 's'
82 #define LONG_MNEM_SUFFIX 'l'
83 #define QWORD_MNEM_SUFFIX 'q'
84 #define XMMWORD_MNEM_SUFFIX 'x'
85 #define YMMWORD_MNEM_SUFFIX 'y'
86 /* Intel Syntax. Use a non-ascii letter since since it never appears
87 in instructions. */
88 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
89
90 #define END_OF_INSN '\0'
91
92 /*
93 'templates' is for grouping together 'template' structures for opcodes
94 of the same name. This is only used for storing the insns in the grand
95 ole hash table of insns.
96 The templates themselves start at START and range up to (but not including)
97 END.
98 */
99 typedef struct
100 {
101 const insn_template *start;
102 const insn_template *end;
103 }
104 templates;
105
106 /* 386 operand encoding bytes: see 386 book for details of this. */
107 typedef struct
108 {
109 unsigned int regmem; /* codes register or memory operand */
110 unsigned int reg; /* codes register operand (or extended opcode) */
111 unsigned int mode; /* how to interpret regmem & reg */
112 }
113 modrm_byte;
114
115 /* x86-64 extension prefix. */
116 typedef int rex_byte;
117
118 /* 386 opcode byte to code indirect addressing. */
119 typedef struct
120 {
121 unsigned base;
122 unsigned index;
123 unsigned scale;
124 }
125 sib_byte;
126
127 /* x86 arch names, types and features */
128 typedef struct
129 {
130 const char *name; /* arch name */
131 unsigned int len; /* arch string length */
132 enum processor_type type; /* arch type */
133 i386_cpu_flags flags; /* cpu feature flags */
134 unsigned int skip; /* show_arch should skip this. */
135 unsigned int negated; /* turn off indicated flags. */
136 }
137 arch_entry;
138
139 static void update_code_flag (int, int);
140 static void set_code_flag (int);
141 static void set_16bit_gcc_code_flag (int);
142 static void set_intel_syntax (int);
143 static void set_intel_mnemonic (int);
144 static void set_allow_index_reg (int);
145 static void set_sse_check (int);
146 static void set_cpu_arch (int);
147 #ifdef TE_PE
148 static void pe_directive_secrel (int);
149 #endif
150 static void signed_cons (int);
151 static char *output_invalid (int c);
152 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
153 const char *);
154 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
155 const char *);
156 static int i386_att_operand (char *);
157 static int i386_intel_operand (char *, int);
158 static int i386_intel_simplify (expressionS *);
159 static int i386_intel_parse_name (const char *, expressionS *);
160 static const reg_entry *parse_register (char *, char **);
161 static char *parse_insn (char *, char *);
162 static char *parse_operands (char *, const char *);
163 static void swap_operands (void);
164 static void swap_2_operands (int, int);
165 static void optimize_imm (void);
166 static void optimize_disp (void);
167 static const insn_template *match_template (void);
168 static int check_string (void);
169 static int process_suffix (void);
170 static int check_byte_reg (void);
171 static int check_long_reg (void);
172 static int check_qword_reg (void);
173 static int check_word_reg (void);
174 static int finalize_imm (void);
175 static int process_operands (void);
176 static const seg_entry *build_modrm_byte (void);
177 static void output_insn (void);
178 static void output_imm (fragS *, offsetT);
179 static void output_disp (fragS *, offsetT);
180 #ifndef I386COFF
181 static void s_bss (int);
182 #endif
183 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
184 static void handle_large_common (int small ATTRIBUTE_UNUSED);
185 static void handle_quad (int);
186 #endif
187
188 static const char *default_arch = DEFAULT_ARCH;
189
190 /* VEX prefix. */
191 typedef struct
192 {
193 /* VEX prefix is either 2 byte or 3 byte. */
194 unsigned char bytes[3];
195 unsigned int length;
196 /* Destination or source register specifier. */
197 const reg_entry *register_specifier;
198 } vex_prefix;
199
200 /* 'md_assemble ()' gathers together information and puts it into a
201 i386_insn. */
202
203 union i386_op
204 {
205 expressionS *disps;
206 expressionS *imms;
207 const reg_entry *regs;
208 };
209
210 enum i386_error
211 {
212 operand_size_mismatch,
213 operand_type_mismatch,
214 register_type_mismatch,
215 number_of_operands_mismatch,
216 invalid_instruction_suffix,
217 bad_imm4,
218 old_gcc_only,
219 unsupported_with_intel_mnemonic,
220 unsupported_syntax,
221 unsupported,
222 invalid_vsib_address,
223 unsupported_vector_index_register
224 };
225
226 struct _i386_insn
227 {
228 /* TM holds the template for the insn were currently assembling. */
229 insn_template tm;
230
231 /* SUFFIX holds the instruction size suffix for byte, word, dword
232 or qword, if given. */
233 char suffix;
234
235 /* OPERANDS gives the number of given operands. */
236 unsigned int operands;
237
238 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
239 of given register, displacement, memory operands and immediate
240 operands. */
241 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
242
243 /* TYPES [i] is the type (see above #defines) which tells us how to
244 use OP[i] for the corresponding operand. */
245 i386_operand_type types[MAX_OPERANDS];
246
247 /* Displacement expression, immediate expression, or register for each
248 operand. */
249 union i386_op op[MAX_OPERANDS];
250
251 /* Flags for operands. */
252 unsigned int flags[MAX_OPERANDS];
253 #define Operand_PCrel 1
254
255 /* Relocation type for operand */
256 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
257
258 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
259 the base index byte below. */
260 const reg_entry *base_reg;
261 const reg_entry *index_reg;
262 unsigned int log2_scale_factor;
263
264 /* SEG gives the seg_entries of this insn. They are zero unless
265 explicit segment overrides are given. */
266 const seg_entry *seg[2];
267
268 /* PREFIX holds all the given prefix opcodes (usually null).
269 PREFIXES is the number of prefix opcodes. */
270 unsigned int prefixes;
271 unsigned char prefix[MAX_PREFIXES];
272
273 /* RM and SIB are the modrm byte and the sib byte where the
274 addressing modes of this insn are encoded. */
275 modrm_byte rm;
276 rex_byte rex;
277 sib_byte sib;
278 vex_prefix vex;
279
280 /* Swap operand in encoding. */
281 unsigned int swap_operand;
282
283 /* Force 32bit displacement in encoding. */
284 unsigned int disp32_encoding;
285
286 /* Error message. */
287 enum i386_error error;
288 };
289
290 typedef struct _i386_insn i386_insn;
291
292 /* List of chars besides those in app.c:symbol_chars that can start an
293 operand. Used to prevent the scrubber eating vital white-space. */
294 const char extra_symbol_chars[] = "*%-(["
295 #ifdef LEX_AT
296 "@"
297 #endif
298 #ifdef LEX_QM
299 "?"
300 #endif
301 ;
302
303 #if (defined (TE_I386AIX) \
304 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
305 && !defined (TE_GNU) \
306 && !defined (TE_LINUX) \
307 && !defined (TE_NETWARE) \
308 && !defined (TE_FreeBSD) \
309 && !defined (TE_DragonFly) \
310 && !defined (TE_NetBSD)))
311 /* This array holds the chars that always start a comment. If the
312 pre-processor is disabled, these aren't very useful. The option
313 --divide will remove '/' from this list. */
314 const char *i386_comment_chars = "#/";
315 #define SVR4_COMMENT_CHARS 1
316 #define PREFIX_SEPARATOR '\\'
317
318 #else
319 const char *i386_comment_chars = "#";
320 #define PREFIX_SEPARATOR '/'
321 #endif
322
323 /* This array holds the chars that only start a comment at the beginning of
324 a line. If the line seems to have the form '# 123 filename'
325 .line and .file directives will appear in the pre-processed output.
326 Note that input_file.c hand checks for '#' at the beginning of the
327 first line of the input file. This is because the compiler outputs
328 #NO_APP at the beginning of its output.
329 Also note that comments started like this one will always work if
330 '/' isn't otherwise defined. */
331 const char line_comment_chars[] = "#/";
332
333 const char line_separator_chars[] = ";";
334
335 /* Chars that can be used to separate mant from exp in floating point
336 nums. */
337 const char EXP_CHARS[] = "eE";
338
339 /* Chars that mean this number is a floating point constant
340 As in 0f12.456
341 or 0d1.2345e12. */
342 const char FLT_CHARS[] = "fFdDxX";
343
344 /* Tables for lexical analysis. */
345 static char mnemonic_chars[256];
346 static char register_chars[256];
347 static char operand_chars[256];
348 static char identifier_chars[256];
349 static char digit_chars[256];
350
351 /* Lexical macros. */
352 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
353 #define is_operand_char(x) (operand_chars[(unsigned char) x])
354 #define is_register_char(x) (register_chars[(unsigned char) x])
355 #define is_space_char(x) ((x) == ' ')
356 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
357 #define is_digit_char(x) (digit_chars[(unsigned char) x])
358
359 /* All non-digit non-letter characters that may occur in an operand. */
360 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
361
362 /* md_assemble() always leaves the strings it's passed unaltered. To
363 effect this we maintain a stack of saved characters that we've smashed
364 with '\0's (indicating end of strings for various sub-fields of the
365 assembler instruction). */
366 static char save_stack[32];
367 static char *save_stack_p;
368 #define END_STRING_AND_SAVE(s) \
369 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
370 #define RESTORE_END_STRING(s) \
371 do { *(s) = *--save_stack_p; } while (0)
372
373 /* The instruction we're assembling. */
374 static i386_insn i;
375
376 /* Possible templates for current insn. */
377 static const templates *current_templates;
378
379 /* Per instruction expressionS buffers: max displacements & immediates. */
380 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
381 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
382
383 /* Current operand we are working on. */
384 static int this_operand = -1;
385
386 /* We support four different modes. FLAG_CODE variable is used to distinguish
387 these. */
388
389 enum flag_code {
390 CODE_32BIT,
391 CODE_16BIT,
392 CODE_64BIT };
393
394 static enum flag_code flag_code;
395 static unsigned int object_64bit;
396 static unsigned int disallow_64bit_reloc;
397 static int use_rela_relocations = 0;
398
399 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
400 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
401 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
402
403 /* The ELF ABI to use. */
404 enum x86_elf_abi
405 {
406 I386_ABI,
407 X86_64_ABI,
408 X86_64_X32_ABI
409 };
410
411 static enum x86_elf_abi x86_elf_abi = I386_ABI;
412 #endif
413
414 /* The names used to print error messages. */
415 static const char *flag_code_names[] =
416 {
417 "32",
418 "16",
419 "64"
420 };
421
422 /* 1 for intel syntax,
423 0 if att syntax. */
424 static int intel_syntax = 0;
425
426 /* 1 for intel mnemonic,
427 0 if att mnemonic. */
428 static int intel_mnemonic = !SYSV386_COMPAT;
429
430 /* 1 if support old (<= 2.8.1) versions of gcc. */
431 static int old_gcc = OLDGCC_COMPAT;
432
433 /* 1 if pseudo registers are permitted. */
434 static int allow_pseudo_reg = 0;
435
436 /* 1 if register prefix % not required. */
437 static int allow_naked_reg = 0;
438
439 /* 1 if pseudo index register, eiz/riz, is allowed . */
440 static int allow_index_reg = 0;
441
442 static enum
443 {
444 sse_check_none = 0,
445 sse_check_warning,
446 sse_check_error
447 }
448 sse_check;
449
450 /* Register prefix used for error message. */
451 static const char *register_prefix = "%";
452
453 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
454 leave, push, and pop instructions so that gcc has the same stack
455 frame as in 32 bit mode. */
456 static char stackop_size = '\0';
457
458 /* Non-zero to optimize code alignment. */
459 int optimize_align_code = 1;
460
461 /* Non-zero to quieten some warnings. */
462 static int quiet_warnings = 0;
463
464 /* CPU name. */
465 static const char *cpu_arch_name = NULL;
466 static char *cpu_sub_arch_name = NULL;
467
468 /* CPU feature flags. */
469 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
470
471 /* If we have selected a cpu we are generating instructions for. */
472 static int cpu_arch_tune_set = 0;
473
474 /* Cpu we are generating instructions for. */
475 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
476
477 /* CPU feature flags of cpu we are generating instructions for. */
478 static i386_cpu_flags cpu_arch_tune_flags;
479
480 /* CPU instruction set architecture used. */
481 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
482
483 /* CPU feature flags of instruction set architecture used. */
484 i386_cpu_flags cpu_arch_isa_flags;
485
486 /* If set, conditional jumps are not automatically promoted to handle
487 larger than a byte offset. */
488 static unsigned int no_cond_jump_promotion = 0;
489
490 /* Encode SSE instructions with VEX prefix. */
491 static unsigned int sse2avx;
492
493 /* Encode scalar AVX instructions with specific vector length. */
494 static enum
495 {
496 vex128 = 0,
497 vex256
498 } avxscalar;
499
500 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
501 static symbolS *GOT_symbol;
502
503 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
504 unsigned int x86_dwarf2_return_column;
505
506 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
507 int x86_cie_data_alignment;
508
509 /* Interface to relax_segment.
510 There are 3 major relax states for 386 jump insns because the
511 different types of jumps add different sizes to frags when we're
512 figuring out what sort of jump to choose to reach a given label. */
513
514 /* Types. */
515 #define UNCOND_JUMP 0
516 #define COND_JUMP 1
517 #define COND_JUMP86 2
518
519 /* Sizes. */
520 #define CODE16 1
521 #define SMALL 0
522 #define SMALL16 (SMALL | CODE16)
523 #define BIG 2
524 #define BIG16 (BIG | CODE16)
525
526 #ifndef INLINE
527 #ifdef __GNUC__
528 #define INLINE __inline__
529 #else
530 #define INLINE
531 #endif
532 #endif
533
534 #define ENCODE_RELAX_STATE(type, size) \
535 ((relax_substateT) (((type) << 2) | (size)))
536 #define TYPE_FROM_RELAX_STATE(s) \
537 ((s) >> 2)
538 #define DISP_SIZE_FROM_RELAX_STATE(s) \
539 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
540
541 /* This table is used by relax_frag to promote short jumps to long
542 ones where necessary. SMALL (short) jumps may be promoted to BIG
543 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
544 don't allow a short jump in a 32 bit code segment to be promoted to
545 a 16 bit offset jump because it's slower (requires data size
546 prefix), and doesn't work, unless the destination is in the bottom
547 64k of the code segment (The top 16 bits of eip are zeroed). */
548
549 const relax_typeS md_relax_table[] =
550 {
551 /* The fields are:
552 1) most positive reach of this state,
553 2) most negative reach of this state,
554 3) how many bytes this mode will have in the variable part of the frag
555 4) which index into the table to try if we can't fit into this one. */
556
557 /* UNCOND_JUMP states. */
558 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
559 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
560 /* dword jmp adds 4 bytes to frag:
561 0 extra opcode bytes, 4 displacement bytes. */
562 {0, 0, 4, 0},
563 /* word jmp adds 2 byte2 to frag:
564 0 extra opcode bytes, 2 displacement bytes. */
565 {0, 0, 2, 0},
566
567 /* COND_JUMP states. */
568 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
569 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
570 /* dword conditionals adds 5 bytes to frag:
571 1 extra opcode byte, 4 displacement bytes. */
572 {0, 0, 5, 0},
573 /* word conditionals add 3 bytes to frag:
574 1 extra opcode byte, 2 displacement bytes. */
575 {0, 0, 3, 0},
576
577 /* COND_JUMP86 states. */
578 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
579 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
580 /* dword conditionals adds 5 bytes to frag:
581 1 extra opcode byte, 4 displacement bytes. */
582 {0, 0, 5, 0},
583 /* word conditionals add 4 bytes to frag:
584 1 displacement byte and a 3 byte long branch insn. */
585 {0, 0, 4, 0}
586 };
587
588 static const arch_entry cpu_arch[] =
589 {
590 /* Do not replace the first two entries - i386_target_format()
591 relies on them being there in this order. */
592 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
593 CPU_GENERIC32_FLAGS, 0, 0 },
594 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
595 CPU_GENERIC64_FLAGS, 0, 0 },
596 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
597 CPU_NONE_FLAGS, 0, 0 },
598 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
599 CPU_I186_FLAGS, 0, 0 },
600 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
601 CPU_I286_FLAGS, 0, 0 },
602 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
603 CPU_I386_FLAGS, 0, 0 },
604 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
605 CPU_I486_FLAGS, 0, 0 },
606 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
607 CPU_I586_FLAGS, 0, 0 },
608 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
609 CPU_I686_FLAGS, 0, 0 },
610 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
611 CPU_I586_FLAGS, 0, 0 },
612 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
613 CPU_PENTIUMPRO_FLAGS, 0, 0 },
614 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
615 CPU_P2_FLAGS, 0, 0 },
616 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
617 CPU_P3_FLAGS, 0, 0 },
618 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
619 CPU_P4_FLAGS, 0, 0 },
620 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
621 CPU_CORE_FLAGS, 0, 0 },
622 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
623 CPU_NOCONA_FLAGS, 0, 0 },
624 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
625 CPU_CORE_FLAGS, 1, 0 },
626 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
627 CPU_CORE_FLAGS, 0, 0 },
628 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
629 CPU_CORE2_FLAGS, 1, 0 },
630 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
631 CPU_CORE2_FLAGS, 0, 0 },
632 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
633 CPU_COREI7_FLAGS, 0, 0 },
634 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
635 CPU_L1OM_FLAGS, 0, 0 },
636 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
637 CPU_K6_FLAGS, 0, 0 },
638 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
639 CPU_K6_2_FLAGS, 0, 0 },
640 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
641 CPU_ATHLON_FLAGS, 0, 0 },
642 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
643 CPU_K8_FLAGS, 1, 0 },
644 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
645 CPU_K8_FLAGS, 0, 0 },
646 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
647 CPU_K8_FLAGS, 0, 0 },
648 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
649 CPU_AMDFAM10_FLAGS, 0, 0 },
650 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD,
651 CPU_BDVER1_FLAGS, 0, 0 },
652 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD,
653 CPU_BDVER2_FLAGS, 0, 0 },
654 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
655 CPU_8087_FLAGS, 0, 0 },
656 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
657 CPU_287_FLAGS, 0, 0 },
658 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
659 CPU_387_FLAGS, 0, 0 },
660 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN,
661 CPU_ANY87_FLAGS, 0, 1 },
662 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
663 CPU_MMX_FLAGS, 0, 0 },
664 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN,
665 CPU_3DNOWA_FLAGS, 0, 1 },
666 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
667 CPU_SSE_FLAGS, 0, 0 },
668 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
669 CPU_SSE2_FLAGS, 0, 0 },
670 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
671 CPU_SSE3_FLAGS, 0, 0 },
672 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
673 CPU_SSSE3_FLAGS, 0, 0 },
674 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
675 CPU_SSE4_1_FLAGS, 0, 0 },
676 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
677 CPU_SSE4_2_FLAGS, 0, 0 },
678 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
679 CPU_SSE4_2_FLAGS, 0, 0 },
680 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN,
681 CPU_ANY_SSE_FLAGS, 0, 1 },
682 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
683 CPU_AVX_FLAGS, 0, 0 },
684 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN,
685 CPU_AVX2_FLAGS, 0, 0 },
686 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
687 CPU_ANY_AVX_FLAGS, 0, 1 },
688 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
689 CPU_VMX_FLAGS, 0, 0 },
690 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
691 CPU_SMX_FLAGS, 0, 0 },
692 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
693 CPU_XSAVE_FLAGS, 0, 0 },
694 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
695 CPU_XSAVEOPT_FLAGS, 0, 0 },
696 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
697 CPU_AES_FLAGS, 0, 0 },
698 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
699 CPU_PCLMUL_FLAGS, 0, 0 },
700 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
701 CPU_PCLMUL_FLAGS, 1, 0 },
702 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
703 CPU_FSGSBASE_FLAGS, 0, 0 },
704 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
705 CPU_RDRND_FLAGS, 0, 0 },
706 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
707 CPU_F16C_FLAGS, 0, 0 },
708 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN,
709 CPU_BMI2_FLAGS, 0, 0 },
710 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
711 CPU_FMA_FLAGS, 0, 0 },
712 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
713 CPU_FMA4_FLAGS, 0, 0 },
714 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
715 CPU_XOP_FLAGS, 0, 0 },
716 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
717 CPU_LWP_FLAGS, 0, 0 },
718 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
719 CPU_MOVBE_FLAGS, 0, 0 },
720 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
721 CPU_EPT_FLAGS, 0, 0 },
722 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN,
723 CPU_LZCNT_FLAGS, 0, 0 },
724 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN,
725 CPU_INVPCID_FLAGS, 0, 0 },
726 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
727 CPU_CLFLUSH_FLAGS, 0, 0 },
728 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
729 CPU_NOP_FLAGS, 0, 0 },
730 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
731 CPU_SYSCALL_FLAGS, 0, 0 },
732 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
733 CPU_RDTSCP_FLAGS, 0, 0 },
734 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
735 CPU_3DNOW_FLAGS, 0, 0 },
736 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
737 CPU_3DNOWA_FLAGS, 0, 0 },
738 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
739 CPU_PADLOCK_FLAGS, 0, 0 },
740 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
741 CPU_SVME_FLAGS, 1, 0 },
742 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
743 CPU_SVME_FLAGS, 0, 0 },
744 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
745 CPU_SSE4A_FLAGS, 0, 0 },
746 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
747 CPU_ABM_FLAGS, 0, 0 },
748 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
749 CPU_BMI_FLAGS, 0, 0 },
750 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
751 CPU_TBM_FLAGS, 0, 0 },
752 };
753
754 #ifdef I386COFF
755 /* Like s_lcomm_internal in gas/read.c but the alignment string
756 is allowed to be optional. */
757
758 static symbolS *
759 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
760 {
761 addressT align = 0;
762
763 SKIP_WHITESPACE ();
764
765 if (needs_align
766 && *input_line_pointer == ',')
767 {
768 align = parse_align (needs_align - 1);
769
770 if (align == (addressT) -1)
771 return NULL;
772 }
773 else
774 {
775 if (size >= 8)
776 align = 3;
777 else if (size >= 4)
778 align = 2;
779 else if (size >= 2)
780 align = 1;
781 else
782 align = 0;
783 }
784
785 bss_alloc (symbolP, size, align);
786 return symbolP;
787 }
788
789 static void
790 pe_lcomm (int needs_align)
791 {
792 s_comm_internal (needs_align * 2, pe_lcomm_internal);
793 }
794 #endif
795
796 const pseudo_typeS md_pseudo_table[] =
797 {
798 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
799 {"align", s_align_bytes, 0},
800 #else
801 {"align", s_align_ptwo, 0},
802 #endif
803 {"arch", set_cpu_arch, 0},
804 #ifndef I386COFF
805 {"bss", s_bss, 0},
806 #else
807 {"lcomm", pe_lcomm, 1},
808 #endif
809 {"ffloat", float_cons, 'f'},
810 {"dfloat", float_cons, 'd'},
811 {"tfloat", float_cons, 'x'},
812 {"value", cons, 2},
813 {"slong", signed_cons, 4},
814 {"noopt", s_ignore, 0},
815 {"optim", s_ignore, 0},
816 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
817 {"code16", set_code_flag, CODE_16BIT},
818 {"code32", set_code_flag, CODE_32BIT},
819 {"code64", set_code_flag, CODE_64BIT},
820 {"intel_syntax", set_intel_syntax, 1},
821 {"att_syntax", set_intel_syntax, 0},
822 {"intel_mnemonic", set_intel_mnemonic, 1},
823 {"att_mnemonic", set_intel_mnemonic, 0},
824 {"allow_index_reg", set_allow_index_reg, 1},
825 {"disallow_index_reg", set_allow_index_reg, 0},
826 {"sse_check", set_sse_check, 0},
827 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
828 {"largecomm", handle_large_common, 0},
829 {"quad", handle_quad, 8},
830 #else
831 {"file", (void (*) (int)) dwarf2_directive_file, 0},
832 {"loc", dwarf2_directive_loc, 0},
833 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
834 #endif
835 #ifdef TE_PE
836 {"secrel32", pe_directive_secrel, 0},
837 #endif
838 {0, 0, 0}
839 };
840
841 /* For interface with expression (). */
842 extern char *input_line_pointer;
843
844 /* Hash table for instruction mnemonic lookup. */
845 static struct hash_control *op_hash;
846
847 /* Hash table for register lookup. */
848 static struct hash_control *reg_hash;
849 \f
850 void
851 i386_align_code (fragS *fragP, int count)
852 {
853 /* Various efficient no-op patterns for aligning code labels.
854 Note: Don't try to assemble the instructions in the comments.
855 0L and 0w are not legal. */
856 static const char f32_1[] =
857 {0x90}; /* nop */
858 static const char f32_2[] =
859 {0x66,0x90}; /* xchg %ax,%ax */
860 static const char f32_3[] =
861 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
862 static const char f32_4[] =
863 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
864 static const char f32_5[] =
865 {0x90, /* nop */
866 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
867 static const char f32_6[] =
868 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
869 static const char f32_7[] =
870 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
871 static const char f32_8[] =
872 {0x90, /* nop */
873 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
874 static const char f32_9[] =
875 {0x89,0xf6, /* movl %esi,%esi */
876 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
877 static const char f32_10[] =
878 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
879 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
880 static const char f32_11[] =
881 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
882 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
883 static const char f32_12[] =
884 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
885 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
886 static const char f32_13[] =
887 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
888 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
889 static const char f32_14[] =
890 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
891 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
892 static const char f16_3[] =
893 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
894 static const char f16_4[] =
895 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
896 static const char f16_5[] =
897 {0x90, /* nop */
898 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
899 static const char f16_6[] =
900 {0x89,0xf6, /* mov %si,%si */
901 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
902 static const char f16_7[] =
903 {0x8d,0x74,0x00, /* lea 0(%si),%si */
904 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
905 static const char f16_8[] =
906 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
907 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
908 static const char jump_31[] =
909 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
910 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
911 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
912 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
913 static const char *const f32_patt[] = {
914 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
915 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
916 };
917 static const char *const f16_patt[] = {
918 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
919 };
920 /* nopl (%[re]ax) */
921 static const char alt_3[] =
922 {0x0f,0x1f,0x00};
923 /* nopl 0(%[re]ax) */
924 static const char alt_4[] =
925 {0x0f,0x1f,0x40,0x00};
926 /* nopl 0(%[re]ax,%[re]ax,1) */
927 static const char alt_5[] =
928 {0x0f,0x1f,0x44,0x00,0x00};
929 /* nopw 0(%[re]ax,%[re]ax,1) */
930 static const char alt_6[] =
931 {0x66,0x0f,0x1f,0x44,0x00,0x00};
932 /* nopl 0L(%[re]ax) */
933 static const char alt_7[] =
934 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
935 /* nopl 0L(%[re]ax,%[re]ax,1) */
936 static const char alt_8[] =
937 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
938 /* nopw 0L(%[re]ax,%[re]ax,1) */
939 static const char alt_9[] =
940 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
941 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
942 static const char alt_10[] =
943 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
944 /* data16
945 nopw %cs:0L(%[re]ax,%[re]ax,1) */
946 static const char alt_long_11[] =
947 {0x66,
948 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
949 /* data16
950 data16
951 nopw %cs:0L(%[re]ax,%[re]ax,1) */
952 static const char alt_long_12[] =
953 {0x66,
954 0x66,
955 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
956 /* data16
957 data16
958 data16
959 nopw %cs:0L(%[re]ax,%[re]ax,1) */
960 static const char alt_long_13[] =
961 {0x66,
962 0x66,
963 0x66,
964 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
965 /* data16
966 data16
967 data16
968 data16
969 nopw %cs:0L(%[re]ax,%[re]ax,1) */
970 static const char alt_long_14[] =
971 {0x66,
972 0x66,
973 0x66,
974 0x66,
975 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
976 /* data16
977 data16
978 data16
979 data16
980 data16
981 nopw %cs:0L(%[re]ax,%[re]ax,1) */
982 static const char alt_long_15[] =
983 {0x66,
984 0x66,
985 0x66,
986 0x66,
987 0x66,
988 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
989 /* nopl 0(%[re]ax,%[re]ax,1)
990 nopw 0(%[re]ax,%[re]ax,1) */
991 static const char alt_short_11[] =
992 {0x0f,0x1f,0x44,0x00,0x00,
993 0x66,0x0f,0x1f,0x44,0x00,0x00};
994 /* nopw 0(%[re]ax,%[re]ax,1)
995 nopw 0(%[re]ax,%[re]ax,1) */
996 static const char alt_short_12[] =
997 {0x66,0x0f,0x1f,0x44,0x00,0x00,
998 0x66,0x0f,0x1f,0x44,0x00,0x00};
999 /* nopw 0(%[re]ax,%[re]ax,1)
1000 nopl 0L(%[re]ax) */
1001 static const char alt_short_13[] =
1002 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1003 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1004 /* nopl 0L(%[re]ax)
1005 nopl 0L(%[re]ax) */
1006 static const char alt_short_14[] =
1007 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1008 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1009 /* nopl 0L(%[re]ax)
1010 nopl 0L(%[re]ax,%[re]ax,1) */
1011 static const char alt_short_15[] =
1012 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1013 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1014 static const char *const alt_short_patt[] = {
1015 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1016 alt_9, alt_10, alt_short_11, alt_short_12, alt_short_13,
1017 alt_short_14, alt_short_15
1018 };
1019 static const char *const alt_long_patt[] = {
1020 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1021 alt_9, alt_10, alt_long_11, alt_long_12, alt_long_13,
1022 alt_long_14, alt_long_15
1023 };
1024
1025 /* Only align for at least a positive non-zero boundary. */
1026 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
1027 return;
1028
1029 /* We need to decide which NOP sequence to use for 32bit and
1030 64bit. When -mtune= is used:
1031
1032 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1033 PROCESSOR_GENERIC32, f32_patt will be used.
1034 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA,
1035 PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
1036 PROCESSOR_GENERIC64, alt_long_patt will be used.
1037 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
1038 PROCESSOR_AMDFAM10, and PROCESSOR_BD, alt_short_patt
1039 will be used.
1040
1041 When -mtune= isn't used, alt_long_patt will be used if
1042 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1043 be used.
1044
1045 When -march= or .arch is used, we can't use anything beyond
1046 cpu_arch_isa_flags. */
1047
1048 if (flag_code == CODE_16BIT)
1049 {
1050 if (count > 8)
1051 {
1052 memcpy (fragP->fr_literal + fragP->fr_fix,
1053 jump_31, count);
1054 /* Adjust jump offset. */
1055 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1056 }
1057 else
1058 memcpy (fragP->fr_literal + fragP->fr_fix,
1059 f16_patt[count - 1], count);
1060 }
1061 else
1062 {
1063 const char *const *patt = NULL;
1064
1065 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1066 {
1067 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1068 switch (cpu_arch_tune)
1069 {
1070 case PROCESSOR_UNKNOWN:
1071 /* We use cpu_arch_isa_flags to check if we SHOULD
1072 optimize with nops. */
1073 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1074 patt = alt_long_patt;
1075 else
1076 patt = f32_patt;
1077 break;
1078 case PROCESSOR_PENTIUM4:
1079 case PROCESSOR_NOCONA:
1080 case PROCESSOR_CORE:
1081 case PROCESSOR_CORE2:
1082 case PROCESSOR_COREI7:
1083 case PROCESSOR_L1OM:
1084 case PROCESSOR_GENERIC64:
1085 patt = alt_long_patt;
1086 break;
1087 case PROCESSOR_K6:
1088 case PROCESSOR_ATHLON:
1089 case PROCESSOR_K8:
1090 case PROCESSOR_AMDFAM10:
1091 case PROCESSOR_BD:
1092 patt = alt_short_patt;
1093 break;
1094 case PROCESSOR_I386:
1095 case PROCESSOR_I486:
1096 case PROCESSOR_PENTIUM:
1097 case PROCESSOR_PENTIUMPRO:
1098 case PROCESSOR_GENERIC32:
1099 patt = f32_patt;
1100 break;
1101 }
1102 }
1103 else
1104 {
1105 switch (fragP->tc_frag_data.tune)
1106 {
1107 case PROCESSOR_UNKNOWN:
1108 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1109 PROCESSOR_UNKNOWN. */
1110 abort ();
1111 break;
1112
1113 case PROCESSOR_I386:
1114 case PROCESSOR_I486:
1115 case PROCESSOR_PENTIUM:
1116 case PROCESSOR_K6:
1117 case PROCESSOR_ATHLON:
1118 case PROCESSOR_K8:
1119 case PROCESSOR_AMDFAM10:
1120 case PROCESSOR_BD:
1121 case PROCESSOR_GENERIC32:
1122 /* We use cpu_arch_isa_flags to check if we CAN optimize
1123 with nops. */
1124 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1125 patt = alt_short_patt;
1126 else
1127 patt = f32_patt;
1128 break;
1129 case PROCESSOR_PENTIUMPRO:
1130 case PROCESSOR_PENTIUM4:
1131 case PROCESSOR_NOCONA:
1132 case PROCESSOR_CORE:
1133 case PROCESSOR_CORE2:
1134 case PROCESSOR_COREI7:
1135 case PROCESSOR_L1OM:
1136 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1137 patt = alt_long_patt;
1138 else
1139 patt = f32_patt;
1140 break;
1141 case PROCESSOR_GENERIC64:
1142 patt = alt_long_patt;
1143 break;
1144 }
1145 }
1146
1147 if (patt == f32_patt)
1148 {
1149 /* If the padding is less than 15 bytes, we use the normal
1150 ones. Otherwise, we use a jump instruction and adjust
1151 its offset. */
1152 int limit;
1153
1154 /* For 64bit, the limit is 3 bytes. */
1155 if (flag_code == CODE_64BIT
1156 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1157 limit = 3;
1158 else
1159 limit = 15;
1160 if (count < limit)
1161 memcpy (fragP->fr_literal + fragP->fr_fix,
1162 patt[count - 1], count);
1163 else
1164 {
1165 memcpy (fragP->fr_literal + fragP->fr_fix,
1166 jump_31, count);
1167 /* Adjust jump offset. */
1168 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1169 }
1170 }
1171 else
1172 {
1173 /* Maximum length of an instruction is 15 byte. If the
1174 padding is greater than 15 bytes and we don't use jump,
1175 we have to break it into smaller pieces. */
1176 int padding = count;
1177 while (padding > 15)
1178 {
1179 padding -= 15;
1180 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1181 patt [14], 15);
1182 }
1183
1184 if (padding)
1185 memcpy (fragP->fr_literal + fragP->fr_fix,
1186 patt [padding - 1], padding);
1187 }
1188 }
1189 fragP->fr_var = count;
1190 }
1191
1192 static INLINE int
1193 operand_type_all_zero (const union i386_operand_type *x)
1194 {
1195 switch (ARRAY_SIZE(x->array))
1196 {
1197 case 3:
1198 if (x->array[2])
1199 return 0;
1200 case 2:
1201 if (x->array[1])
1202 return 0;
1203 case 1:
1204 return !x->array[0];
1205 default:
1206 abort ();
1207 }
1208 }
1209
1210 static INLINE void
1211 operand_type_set (union i386_operand_type *x, unsigned int v)
1212 {
1213 switch (ARRAY_SIZE(x->array))
1214 {
1215 case 3:
1216 x->array[2] = v;
1217 case 2:
1218 x->array[1] = v;
1219 case 1:
1220 x->array[0] = v;
1221 break;
1222 default:
1223 abort ();
1224 }
1225 }
1226
1227 static INLINE int
1228 operand_type_equal (const union i386_operand_type *x,
1229 const union i386_operand_type *y)
1230 {
1231 switch (ARRAY_SIZE(x->array))
1232 {
1233 case 3:
1234 if (x->array[2] != y->array[2])
1235 return 0;
1236 case 2:
1237 if (x->array[1] != y->array[1])
1238 return 0;
1239 case 1:
1240 return x->array[0] == y->array[0];
1241 break;
1242 default:
1243 abort ();
1244 }
1245 }
1246
1247 static INLINE int
1248 cpu_flags_all_zero (const union i386_cpu_flags *x)
1249 {
1250 switch (ARRAY_SIZE(x->array))
1251 {
1252 case 3:
1253 if (x->array[2])
1254 return 0;
1255 case 2:
1256 if (x->array[1])
1257 return 0;
1258 case 1:
1259 return !x->array[0];
1260 default:
1261 abort ();
1262 }
1263 }
1264
1265 static INLINE void
1266 cpu_flags_set (union i386_cpu_flags *x, unsigned int v)
1267 {
1268 switch (ARRAY_SIZE(x->array))
1269 {
1270 case 3:
1271 x->array[2] = v;
1272 case 2:
1273 x->array[1] = v;
1274 case 1:
1275 x->array[0] = v;
1276 break;
1277 default:
1278 abort ();
1279 }
1280 }
1281
1282 static INLINE int
1283 cpu_flags_equal (const union i386_cpu_flags *x,
1284 const union i386_cpu_flags *y)
1285 {
1286 switch (ARRAY_SIZE(x->array))
1287 {
1288 case 3:
1289 if (x->array[2] != y->array[2])
1290 return 0;
1291 case 2:
1292 if (x->array[1] != y->array[1])
1293 return 0;
1294 case 1:
1295 return x->array[0] == y->array[0];
1296 break;
1297 default:
1298 abort ();
1299 }
1300 }
1301
1302 static INLINE int
1303 cpu_flags_check_cpu64 (i386_cpu_flags f)
1304 {
1305 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1306 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1307 }
1308
1309 static INLINE i386_cpu_flags
1310 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1311 {
1312 switch (ARRAY_SIZE (x.array))
1313 {
1314 case 3:
1315 x.array [2] &= y.array [2];
1316 case 2:
1317 x.array [1] &= y.array [1];
1318 case 1:
1319 x.array [0] &= y.array [0];
1320 break;
1321 default:
1322 abort ();
1323 }
1324 return x;
1325 }
1326
1327 static INLINE i386_cpu_flags
1328 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1329 {
1330 switch (ARRAY_SIZE (x.array))
1331 {
1332 case 3:
1333 x.array [2] |= y.array [2];
1334 case 2:
1335 x.array [1] |= y.array [1];
1336 case 1:
1337 x.array [0] |= y.array [0];
1338 break;
1339 default:
1340 abort ();
1341 }
1342 return x;
1343 }
1344
1345 static INLINE i386_cpu_flags
1346 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1347 {
1348 switch (ARRAY_SIZE (x.array))
1349 {
1350 case 3:
1351 x.array [2] &= ~y.array [2];
1352 case 2:
1353 x.array [1] &= ~y.array [1];
1354 case 1:
1355 x.array [0] &= ~y.array [0];
1356 break;
1357 default:
1358 abort ();
1359 }
1360 return x;
1361 }
1362
1363 #define CPU_FLAGS_ARCH_MATCH 0x1
1364 #define CPU_FLAGS_64BIT_MATCH 0x2
1365 #define CPU_FLAGS_AES_MATCH 0x4
1366 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1367 #define CPU_FLAGS_AVX_MATCH 0x10
1368
1369 #define CPU_FLAGS_32BIT_MATCH \
1370 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1371 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1372 #define CPU_FLAGS_PERFECT_MATCH \
1373 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1374
1375 /* Return CPU flags match bits. */
1376
1377 static int
1378 cpu_flags_match (const insn_template *t)
1379 {
1380 i386_cpu_flags x = t->cpu_flags;
1381 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1382
1383 x.bitfield.cpu64 = 0;
1384 x.bitfield.cpuno64 = 0;
1385
1386 if (cpu_flags_all_zero (&x))
1387 {
1388 /* This instruction is available on all archs. */
1389 match |= CPU_FLAGS_32BIT_MATCH;
1390 }
1391 else
1392 {
1393 /* This instruction is available only on some archs. */
1394 i386_cpu_flags cpu = cpu_arch_flags;
1395
1396 cpu.bitfield.cpu64 = 0;
1397 cpu.bitfield.cpuno64 = 0;
1398 cpu = cpu_flags_and (x, cpu);
1399 if (!cpu_flags_all_zero (&cpu))
1400 {
1401 if (x.bitfield.cpuavx)
1402 {
1403 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1404 if (cpu.bitfield.cpuavx)
1405 {
1406 /* Check SSE2AVX. */
1407 if (!t->opcode_modifier.sse2avx|| sse2avx)
1408 {
1409 match |= (CPU_FLAGS_ARCH_MATCH
1410 | CPU_FLAGS_AVX_MATCH);
1411 /* Check AES. */
1412 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1413 match |= CPU_FLAGS_AES_MATCH;
1414 /* Check PCLMUL. */
1415 if (!x.bitfield.cpupclmul
1416 || cpu.bitfield.cpupclmul)
1417 match |= CPU_FLAGS_PCLMUL_MATCH;
1418 }
1419 }
1420 else
1421 match |= CPU_FLAGS_ARCH_MATCH;
1422 }
1423 else
1424 match |= CPU_FLAGS_32BIT_MATCH;
1425 }
1426 }
1427 return match;
1428 }
1429
1430 static INLINE i386_operand_type
1431 operand_type_and (i386_operand_type x, i386_operand_type y)
1432 {
1433 switch (ARRAY_SIZE (x.array))
1434 {
1435 case 3:
1436 x.array [2] &= y.array [2];
1437 case 2:
1438 x.array [1] &= y.array [1];
1439 case 1:
1440 x.array [0] &= y.array [0];
1441 break;
1442 default:
1443 abort ();
1444 }
1445 return x;
1446 }
1447
1448 static INLINE i386_operand_type
1449 operand_type_or (i386_operand_type x, i386_operand_type y)
1450 {
1451 switch (ARRAY_SIZE (x.array))
1452 {
1453 case 3:
1454 x.array [2] |= y.array [2];
1455 case 2:
1456 x.array [1] |= y.array [1];
1457 case 1:
1458 x.array [0] |= y.array [0];
1459 break;
1460 default:
1461 abort ();
1462 }
1463 return x;
1464 }
1465
1466 static INLINE i386_operand_type
1467 operand_type_xor (i386_operand_type x, i386_operand_type y)
1468 {
1469 switch (ARRAY_SIZE (x.array))
1470 {
1471 case 3:
1472 x.array [2] ^= y.array [2];
1473 case 2:
1474 x.array [1] ^= y.array [1];
1475 case 1:
1476 x.array [0] ^= y.array [0];
1477 break;
1478 default:
1479 abort ();
1480 }
1481 return x;
1482 }
1483
1484 static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1485 static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1486 static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1487 static const i386_operand_type inoutportreg
1488 = OPERAND_TYPE_INOUTPORTREG;
1489 static const i386_operand_type reg16_inoutportreg
1490 = OPERAND_TYPE_REG16_INOUTPORTREG;
1491 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1492 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1493 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1494 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1495 static const i386_operand_type anydisp
1496 = OPERAND_TYPE_ANYDISP;
1497 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1498 static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1499 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1500 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1501 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1502 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1503 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1504 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1505 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1506 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1507 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1508 static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
1509
1510 enum operand_type
1511 {
1512 reg,
1513 imm,
1514 disp,
1515 anymem
1516 };
1517
1518 static INLINE int
1519 operand_type_check (i386_operand_type t, enum operand_type c)
1520 {
1521 switch (c)
1522 {
1523 case reg:
1524 return (t.bitfield.reg8
1525 || t.bitfield.reg16
1526 || t.bitfield.reg32
1527 || t.bitfield.reg64);
1528
1529 case imm:
1530 return (t.bitfield.imm8
1531 || t.bitfield.imm8s
1532 || t.bitfield.imm16
1533 || t.bitfield.imm32
1534 || t.bitfield.imm32s
1535 || t.bitfield.imm64);
1536
1537 case disp:
1538 return (t.bitfield.disp8
1539 || t.bitfield.disp16
1540 || t.bitfield.disp32
1541 || t.bitfield.disp32s
1542 || t.bitfield.disp64);
1543
1544 case anymem:
1545 return (t.bitfield.disp8
1546 || t.bitfield.disp16
1547 || t.bitfield.disp32
1548 || t.bitfield.disp32s
1549 || t.bitfield.disp64
1550 || t.bitfield.baseindex);
1551
1552 default:
1553 abort ();
1554 }
1555
1556 return 0;
1557 }
1558
1559 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1560 operand J for instruction template T. */
1561
1562 static INLINE int
1563 match_reg_size (const insn_template *t, unsigned int j)
1564 {
1565 return !((i.types[j].bitfield.byte
1566 && !t->operand_types[j].bitfield.byte)
1567 || (i.types[j].bitfield.word
1568 && !t->operand_types[j].bitfield.word)
1569 || (i.types[j].bitfield.dword
1570 && !t->operand_types[j].bitfield.dword)
1571 || (i.types[j].bitfield.qword
1572 && !t->operand_types[j].bitfield.qword));
1573 }
1574
1575 /* Return 1 if there is no conflict in any size on operand J for
1576 instruction template T. */
1577
1578 static INLINE int
1579 match_mem_size (const insn_template *t, unsigned int j)
1580 {
1581 return (match_reg_size (t, j)
1582 && !((i.types[j].bitfield.unspecified
1583 && !t->operand_types[j].bitfield.unspecified)
1584 || (i.types[j].bitfield.fword
1585 && !t->operand_types[j].bitfield.fword)
1586 || (i.types[j].bitfield.tbyte
1587 && !t->operand_types[j].bitfield.tbyte)
1588 || (i.types[j].bitfield.xmmword
1589 && !t->operand_types[j].bitfield.xmmword)
1590 || (i.types[j].bitfield.ymmword
1591 && !t->operand_types[j].bitfield.ymmword)));
1592 }
1593
1594 /* Return 1 if there is no size conflict on any operands for
1595 instruction template T. */
1596
1597 static INLINE int
1598 operand_size_match (const insn_template *t)
1599 {
1600 unsigned int j;
1601 int match = 1;
1602
1603 /* Don't check jump instructions. */
1604 if (t->opcode_modifier.jump
1605 || t->opcode_modifier.jumpbyte
1606 || t->opcode_modifier.jumpdword
1607 || t->opcode_modifier.jumpintersegment)
1608 return match;
1609
1610 /* Check memory and accumulator operand size. */
1611 for (j = 0; j < i.operands; j++)
1612 {
1613 if (t->operand_types[j].bitfield.anysize)
1614 continue;
1615
1616 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1617 {
1618 match = 0;
1619 break;
1620 }
1621
1622 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1623 {
1624 match = 0;
1625 break;
1626 }
1627 }
1628
1629 if (match)
1630 return match;
1631 else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
1632 {
1633 mismatch:
1634 i.error = operand_size_mismatch;
1635 return 0;
1636 }
1637
1638 /* Check reverse. */
1639 gas_assert (i.operands == 2);
1640
1641 match = 1;
1642 for (j = 0; j < 2; j++)
1643 {
1644 if (t->operand_types[j].bitfield.acc
1645 && !match_reg_size (t, j ? 0 : 1))
1646 goto mismatch;
1647
1648 if (i.types[j].bitfield.mem
1649 && !match_mem_size (t, j ? 0 : 1))
1650 goto mismatch;
1651 }
1652
1653 return match;
1654 }
1655
1656 static INLINE int
1657 operand_type_match (i386_operand_type overlap,
1658 i386_operand_type given)
1659 {
1660 i386_operand_type temp = overlap;
1661
1662 temp.bitfield.jumpabsolute = 0;
1663 temp.bitfield.unspecified = 0;
1664 temp.bitfield.byte = 0;
1665 temp.bitfield.word = 0;
1666 temp.bitfield.dword = 0;
1667 temp.bitfield.fword = 0;
1668 temp.bitfield.qword = 0;
1669 temp.bitfield.tbyte = 0;
1670 temp.bitfield.xmmword = 0;
1671 temp.bitfield.ymmword = 0;
1672 if (operand_type_all_zero (&temp))
1673 goto mismatch;
1674
1675 if (given.bitfield.baseindex == overlap.bitfield.baseindex
1676 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
1677 return 1;
1678
1679 mismatch:
1680 i.error = operand_type_mismatch;
1681 return 0;
1682 }
1683
1684 /* If given types g0 and g1 are registers they must be of the same type
1685 unless the expected operand type register overlap is null.
1686 Note that Acc in a template matches every size of reg. */
1687
1688 static INLINE int
1689 operand_type_register_match (i386_operand_type m0,
1690 i386_operand_type g0,
1691 i386_operand_type t0,
1692 i386_operand_type m1,
1693 i386_operand_type g1,
1694 i386_operand_type t1)
1695 {
1696 if (!operand_type_check (g0, reg))
1697 return 1;
1698
1699 if (!operand_type_check (g1, reg))
1700 return 1;
1701
1702 if (g0.bitfield.reg8 == g1.bitfield.reg8
1703 && g0.bitfield.reg16 == g1.bitfield.reg16
1704 && g0.bitfield.reg32 == g1.bitfield.reg32
1705 && g0.bitfield.reg64 == g1.bitfield.reg64)
1706 return 1;
1707
1708 if (m0.bitfield.acc)
1709 {
1710 t0.bitfield.reg8 = 1;
1711 t0.bitfield.reg16 = 1;
1712 t0.bitfield.reg32 = 1;
1713 t0.bitfield.reg64 = 1;
1714 }
1715
1716 if (m1.bitfield.acc)
1717 {
1718 t1.bitfield.reg8 = 1;
1719 t1.bitfield.reg16 = 1;
1720 t1.bitfield.reg32 = 1;
1721 t1.bitfield.reg64 = 1;
1722 }
1723
1724 if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1725 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1726 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1727 && !(t0.bitfield.reg64 & t1.bitfield.reg64))
1728 return 1;
1729
1730 i.error = register_type_mismatch;
1731
1732 return 0;
1733 }
1734
1735 static INLINE unsigned int
1736 mode_from_disp_size (i386_operand_type t)
1737 {
1738 if (t.bitfield.disp8)
1739 return 1;
1740 else if (t.bitfield.disp16
1741 || t.bitfield.disp32
1742 || t.bitfield.disp32s)
1743 return 2;
1744 else
1745 return 0;
1746 }
1747
1748 static INLINE int
1749 fits_in_signed_byte (offsetT num)
1750 {
1751 return (num >= -128) && (num <= 127);
1752 }
1753
1754 static INLINE int
1755 fits_in_unsigned_byte (offsetT num)
1756 {
1757 return (num & 0xff) == num;
1758 }
1759
1760 static INLINE int
1761 fits_in_unsigned_word (offsetT num)
1762 {
1763 return (num & 0xffff) == num;
1764 }
1765
1766 static INLINE int
1767 fits_in_signed_word (offsetT num)
1768 {
1769 return (-32768 <= num) && (num <= 32767);
1770 }
1771
1772 static INLINE int
1773 fits_in_signed_long (offsetT num ATTRIBUTE_UNUSED)
1774 {
1775 #ifndef BFD64
1776 return 1;
1777 #else
1778 return (!(((offsetT) -1 << 31) & num)
1779 || (((offsetT) -1 << 31) & num) == ((offsetT) -1 << 31));
1780 #endif
1781 } /* fits_in_signed_long() */
1782
1783 static INLINE int
1784 fits_in_unsigned_long (offsetT num ATTRIBUTE_UNUSED)
1785 {
1786 #ifndef BFD64
1787 return 1;
1788 #else
1789 return (num & (((offsetT) 2 << 31) - 1)) == num;
1790 #endif
1791 } /* fits_in_unsigned_long() */
1792
1793 static INLINE int
1794 fits_in_imm4 (offsetT num)
1795 {
1796 return (num & 0xf) == num;
1797 }
1798
1799 static i386_operand_type
1800 smallest_imm_type (offsetT num)
1801 {
1802 i386_operand_type t;
1803
1804 operand_type_set (&t, 0);
1805 t.bitfield.imm64 = 1;
1806
1807 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
1808 {
1809 /* This code is disabled on the 486 because all the Imm1 forms
1810 in the opcode table are slower on the i486. They're the
1811 versions with the implicitly specified single-position
1812 displacement, which has another syntax if you really want to
1813 use that form. */
1814 t.bitfield.imm1 = 1;
1815 t.bitfield.imm8 = 1;
1816 t.bitfield.imm8s = 1;
1817 t.bitfield.imm16 = 1;
1818 t.bitfield.imm32 = 1;
1819 t.bitfield.imm32s = 1;
1820 }
1821 else if (fits_in_signed_byte (num))
1822 {
1823 t.bitfield.imm8 = 1;
1824 t.bitfield.imm8s = 1;
1825 t.bitfield.imm16 = 1;
1826 t.bitfield.imm32 = 1;
1827 t.bitfield.imm32s = 1;
1828 }
1829 else if (fits_in_unsigned_byte (num))
1830 {
1831 t.bitfield.imm8 = 1;
1832 t.bitfield.imm16 = 1;
1833 t.bitfield.imm32 = 1;
1834 t.bitfield.imm32s = 1;
1835 }
1836 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
1837 {
1838 t.bitfield.imm16 = 1;
1839 t.bitfield.imm32 = 1;
1840 t.bitfield.imm32s = 1;
1841 }
1842 else if (fits_in_signed_long (num))
1843 {
1844 t.bitfield.imm32 = 1;
1845 t.bitfield.imm32s = 1;
1846 }
1847 else if (fits_in_unsigned_long (num))
1848 t.bitfield.imm32 = 1;
1849
1850 return t;
1851 }
1852
1853 static offsetT
1854 offset_in_range (offsetT val, int size)
1855 {
1856 addressT mask;
1857
1858 switch (size)
1859 {
1860 case 1: mask = ((addressT) 1 << 8) - 1; break;
1861 case 2: mask = ((addressT) 1 << 16) - 1; break;
1862 case 4: mask = ((addressT) 2 << 31) - 1; break;
1863 #ifdef BFD64
1864 case 8: mask = ((addressT) 2 << 63) - 1; break;
1865 #endif
1866 default: abort ();
1867 }
1868
1869 #ifdef BFD64
1870 /* If BFD64, sign extend val for 32bit address mode. */
1871 if (flag_code != CODE_64BIT
1872 || i.prefix[ADDR_PREFIX])
1873 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
1874 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
1875 #endif
1876
1877 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
1878 {
1879 char buf1[40], buf2[40];
1880
1881 sprint_value (buf1, val);
1882 sprint_value (buf2, val & mask);
1883 as_warn (_("%s shortened to %s"), buf1, buf2);
1884 }
1885 return val & mask;
1886 }
1887
1888 enum PREFIX_GROUP
1889 {
1890 PREFIX_EXIST = 0,
1891 PREFIX_LOCK,
1892 PREFIX_REP,
1893 PREFIX_OTHER
1894 };
1895
1896 /* Returns
1897 a. PREFIX_EXIST if attempting to add a prefix where one from the
1898 same class already exists.
1899 b. PREFIX_LOCK if lock prefix is added.
1900 c. PREFIX_REP if rep/repne prefix is added.
1901 d. PREFIX_OTHER if other prefix is added.
1902 */
1903
1904 static enum PREFIX_GROUP
1905 add_prefix (unsigned int prefix)
1906 {
1907 enum PREFIX_GROUP ret = PREFIX_OTHER;
1908 unsigned int q;
1909
1910 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
1911 && flag_code == CODE_64BIT)
1912 {
1913 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
1914 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
1915 && (prefix & (REX_R | REX_X | REX_B))))
1916 ret = PREFIX_EXIST;
1917 q = REX_PREFIX;
1918 }
1919 else
1920 {
1921 switch (prefix)
1922 {
1923 default:
1924 abort ();
1925
1926 case CS_PREFIX_OPCODE:
1927 case DS_PREFIX_OPCODE:
1928 case ES_PREFIX_OPCODE:
1929 case FS_PREFIX_OPCODE:
1930 case GS_PREFIX_OPCODE:
1931 case SS_PREFIX_OPCODE:
1932 q = SEG_PREFIX;
1933 break;
1934
1935 case REPNE_PREFIX_OPCODE:
1936 case REPE_PREFIX_OPCODE:
1937 q = REP_PREFIX;
1938 ret = PREFIX_REP;
1939 break;
1940
1941 case LOCK_PREFIX_OPCODE:
1942 q = LOCK_PREFIX;
1943 ret = PREFIX_LOCK;
1944 break;
1945
1946 case FWAIT_OPCODE:
1947 q = WAIT_PREFIX;
1948 break;
1949
1950 case ADDR_PREFIX_OPCODE:
1951 q = ADDR_PREFIX;
1952 break;
1953
1954 case DATA_PREFIX_OPCODE:
1955 q = DATA_PREFIX;
1956 break;
1957 }
1958 if (i.prefix[q] != 0)
1959 ret = PREFIX_EXIST;
1960 }
1961
1962 if (ret)
1963 {
1964 if (!i.prefix[q])
1965 ++i.prefixes;
1966 i.prefix[q] |= prefix;
1967 }
1968 else
1969 as_bad (_("same type of prefix used twice"));
1970
1971 return ret;
1972 }
1973
1974 static void
1975 update_code_flag (int value, int check)
1976 {
1977 PRINTF_LIKE ((*as_error));
1978
1979 flag_code = (enum flag_code) value;
1980 if (flag_code == CODE_64BIT)
1981 {
1982 cpu_arch_flags.bitfield.cpu64 = 1;
1983 cpu_arch_flags.bitfield.cpuno64 = 0;
1984 }
1985 else
1986 {
1987 cpu_arch_flags.bitfield.cpu64 = 0;
1988 cpu_arch_flags.bitfield.cpuno64 = 1;
1989 }
1990 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
1991 {
1992 if (check)
1993 as_error = as_fatal;
1994 else
1995 as_error = as_bad;
1996 (*as_error) (_("64bit mode not supported on `%s'."),
1997 cpu_arch_name ? cpu_arch_name : default_arch);
1998 }
1999 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
2000 {
2001 if (check)
2002 as_error = as_fatal;
2003 else
2004 as_error = as_bad;
2005 (*as_error) (_("32bit mode not supported on `%s'."),
2006 cpu_arch_name ? cpu_arch_name : default_arch);
2007 }
2008 stackop_size = '\0';
2009 }
2010
2011 static void
2012 set_code_flag (int value)
2013 {
2014 update_code_flag (value, 0);
2015 }
2016
2017 static void
2018 set_16bit_gcc_code_flag (int new_code_flag)
2019 {
2020 flag_code = (enum flag_code) new_code_flag;
2021 if (flag_code != CODE_16BIT)
2022 abort ();
2023 cpu_arch_flags.bitfield.cpu64 = 0;
2024 cpu_arch_flags.bitfield.cpuno64 = 1;
2025 stackop_size = LONG_MNEM_SUFFIX;
2026 }
2027
2028 static void
2029 set_intel_syntax (int syntax_flag)
2030 {
2031 /* Find out if register prefixing is specified. */
2032 int ask_naked_reg = 0;
2033
2034 SKIP_WHITESPACE ();
2035 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2036 {
2037 char *string = input_line_pointer;
2038 int e = get_symbol_end ();
2039
2040 if (strcmp (string, "prefix") == 0)
2041 ask_naked_reg = 1;
2042 else if (strcmp (string, "noprefix") == 0)
2043 ask_naked_reg = -1;
2044 else
2045 as_bad (_("bad argument to syntax directive."));
2046 *input_line_pointer = e;
2047 }
2048 demand_empty_rest_of_line ();
2049
2050 intel_syntax = syntax_flag;
2051
2052 if (ask_naked_reg == 0)
2053 allow_naked_reg = (intel_syntax
2054 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2055 else
2056 allow_naked_reg = (ask_naked_reg < 0);
2057
2058 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2059
2060 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2061 identifier_chars['$'] = intel_syntax ? '$' : 0;
2062 register_prefix = allow_naked_reg ? "" : "%";
2063 }
2064
2065 static void
2066 set_intel_mnemonic (int mnemonic_flag)
2067 {
2068 intel_mnemonic = mnemonic_flag;
2069 }
2070
2071 static void
2072 set_allow_index_reg (int flag)
2073 {
2074 allow_index_reg = flag;
2075 }
2076
2077 static void
2078 set_sse_check (int dummy ATTRIBUTE_UNUSED)
2079 {
2080 SKIP_WHITESPACE ();
2081
2082 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2083 {
2084 char *string = input_line_pointer;
2085 int e = get_symbol_end ();
2086
2087 if (strcmp (string, "none") == 0)
2088 sse_check = sse_check_none;
2089 else if (strcmp (string, "warning") == 0)
2090 sse_check = sse_check_warning;
2091 else if (strcmp (string, "error") == 0)
2092 sse_check = sse_check_error;
2093 else
2094 as_bad (_("bad argument to sse_check directive."));
2095 *input_line_pointer = e;
2096 }
2097 else
2098 as_bad (_("missing argument for sse_check directive"));
2099
2100 demand_empty_rest_of_line ();
2101 }
2102
2103 static void
2104 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2105 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2106 {
2107 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2108 static const char *arch;
2109
2110 /* Intel LIOM is only supported on ELF. */
2111 if (!IS_ELF)
2112 return;
2113
2114 if (!arch)
2115 {
2116 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2117 use default_arch. */
2118 arch = cpu_arch_name;
2119 if (!arch)
2120 arch = default_arch;
2121 }
2122
2123 /* If we are targeting Intel L1OM, we must enable it. */
2124 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2125 || new_flag.bitfield.cpul1om)
2126 return;
2127
2128 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2129 #endif
2130 }
2131
2132 static void
2133 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2134 {
2135 SKIP_WHITESPACE ();
2136
2137 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2138 {
2139 char *string = input_line_pointer;
2140 int e = get_symbol_end ();
2141 unsigned int j;
2142 i386_cpu_flags flags;
2143
2144 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2145 {
2146 if (strcmp (string, cpu_arch[j].name) == 0)
2147 {
2148 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2149
2150 if (*string != '.')
2151 {
2152 cpu_arch_name = cpu_arch[j].name;
2153 cpu_sub_arch_name = NULL;
2154 cpu_arch_flags = cpu_arch[j].flags;
2155 if (flag_code == CODE_64BIT)
2156 {
2157 cpu_arch_flags.bitfield.cpu64 = 1;
2158 cpu_arch_flags.bitfield.cpuno64 = 0;
2159 }
2160 else
2161 {
2162 cpu_arch_flags.bitfield.cpu64 = 0;
2163 cpu_arch_flags.bitfield.cpuno64 = 1;
2164 }
2165 cpu_arch_isa = cpu_arch[j].type;
2166 cpu_arch_isa_flags = cpu_arch[j].flags;
2167 if (!cpu_arch_tune_set)
2168 {
2169 cpu_arch_tune = cpu_arch_isa;
2170 cpu_arch_tune_flags = cpu_arch_isa_flags;
2171 }
2172 break;
2173 }
2174
2175 if (!cpu_arch[j].negated)
2176 flags = cpu_flags_or (cpu_arch_flags,
2177 cpu_arch[j].flags);
2178 else
2179 flags = cpu_flags_and_not (cpu_arch_flags,
2180 cpu_arch[j].flags);
2181 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2182 {
2183 if (cpu_sub_arch_name)
2184 {
2185 char *name = cpu_sub_arch_name;
2186 cpu_sub_arch_name = concat (name,
2187 cpu_arch[j].name,
2188 (const char *) NULL);
2189 free (name);
2190 }
2191 else
2192 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2193 cpu_arch_flags = flags;
2194 cpu_arch_isa_flags = flags;
2195 }
2196 *input_line_pointer = e;
2197 demand_empty_rest_of_line ();
2198 return;
2199 }
2200 }
2201 if (j >= ARRAY_SIZE (cpu_arch))
2202 as_bad (_("no such architecture: `%s'"), string);
2203
2204 *input_line_pointer = e;
2205 }
2206 else
2207 as_bad (_("missing cpu architecture"));
2208
2209 no_cond_jump_promotion = 0;
2210 if (*input_line_pointer == ','
2211 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2212 {
2213 char *string = ++input_line_pointer;
2214 int e = get_symbol_end ();
2215
2216 if (strcmp (string, "nojumps") == 0)
2217 no_cond_jump_promotion = 1;
2218 else if (strcmp (string, "jumps") == 0)
2219 ;
2220 else
2221 as_bad (_("no such architecture modifier: `%s'"), string);
2222
2223 *input_line_pointer = e;
2224 }
2225
2226 demand_empty_rest_of_line ();
2227 }
2228
2229 enum bfd_architecture
2230 i386_arch (void)
2231 {
2232 if (cpu_arch_isa == PROCESSOR_L1OM)
2233 {
2234 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2235 || flag_code != CODE_64BIT)
2236 as_fatal (_("Intel L1OM is 64bit ELF only"));
2237 return bfd_arch_l1om;
2238 }
2239 else
2240 return bfd_arch_i386;
2241 }
2242
2243 unsigned long
2244 i386_mach (void)
2245 {
2246 if (!strncmp (default_arch, "x86_64", 6))
2247 {
2248 if (cpu_arch_isa == PROCESSOR_L1OM)
2249 {
2250 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2251 || default_arch[6] != '\0')
2252 as_fatal (_("Intel L1OM is 64bit ELF only"));
2253 return bfd_mach_l1om;
2254 }
2255 else if (default_arch[6] == '\0')
2256 return bfd_mach_x86_64;
2257 else
2258 return bfd_mach_x64_32;
2259 }
2260 else if (!strcmp (default_arch, "i386"))
2261 return bfd_mach_i386_i386;
2262 else
2263 as_fatal (_("unknown architecture"));
2264 }
2265 \f
2266 void
2267 md_begin (void)
2268 {
2269 const char *hash_err;
2270
2271 /* Initialize op_hash hash table. */
2272 op_hash = hash_new ();
2273
2274 {
2275 const insn_template *optab;
2276 templates *core_optab;
2277
2278 /* Setup for loop. */
2279 optab = i386_optab;
2280 core_optab = (templates *) xmalloc (sizeof (templates));
2281 core_optab->start = optab;
2282
2283 while (1)
2284 {
2285 ++optab;
2286 if (optab->name == NULL
2287 || strcmp (optab->name, (optab - 1)->name) != 0)
2288 {
2289 /* different name --> ship out current template list;
2290 add to hash table; & begin anew. */
2291 core_optab->end = optab;
2292 hash_err = hash_insert (op_hash,
2293 (optab - 1)->name,
2294 (void *) core_optab);
2295 if (hash_err)
2296 {
2297 as_fatal (_("internal Error: Can't hash %s: %s"),
2298 (optab - 1)->name,
2299 hash_err);
2300 }
2301 if (optab->name == NULL)
2302 break;
2303 core_optab = (templates *) xmalloc (sizeof (templates));
2304 core_optab->start = optab;
2305 }
2306 }
2307 }
2308
2309 /* Initialize reg_hash hash table. */
2310 reg_hash = hash_new ();
2311 {
2312 const reg_entry *regtab;
2313 unsigned int regtab_size = i386_regtab_size;
2314
2315 for (regtab = i386_regtab; regtab_size--; regtab++)
2316 {
2317 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2318 if (hash_err)
2319 as_fatal (_("internal Error: Can't hash %s: %s"),
2320 regtab->reg_name,
2321 hash_err);
2322 }
2323 }
2324
2325 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2326 {
2327 int c;
2328 char *p;
2329
2330 for (c = 0; c < 256; c++)
2331 {
2332 if (ISDIGIT (c))
2333 {
2334 digit_chars[c] = c;
2335 mnemonic_chars[c] = c;
2336 register_chars[c] = c;
2337 operand_chars[c] = c;
2338 }
2339 else if (ISLOWER (c))
2340 {
2341 mnemonic_chars[c] = c;
2342 register_chars[c] = c;
2343 operand_chars[c] = c;
2344 }
2345 else if (ISUPPER (c))
2346 {
2347 mnemonic_chars[c] = TOLOWER (c);
2348 register_chars[c] = mnemonic_chars[c];
2349 operand_chars[c] = c;
2350 }
2351
2352 if (ISALPHA (c) || ISDIGIT (c))
2353 identifier_chars[c] = c;
2354 else if (c >= 128)
2355 {
2356 identifier_chars[c] = c;
2357 operand_chars[c] = c;
2358 }
2359 }
2360
2361 #ifdef LEX_AT
2362 identifier_chars['@'] = '@';
2363 #endif
2364 #ifdef LEX_QM
2365 identifier_chars['?'] = '?';
2366 operand_chars['?'] = '?';
2367 #endif
2368 digit_chars['-'] = '-';
2369 mnemonic_chars['_'] = '_';
2370 mnemonic_chars['-'] = '-';
2371 mnemonic_chars['.'] = '.';
2372 identifier_chars['_'] = '_';
2373 identifier_chars['.'] = '.';
2374
2375 for (p = operand_special_chars; *p != '\0'; p++)
2376 operand_chars[(unsigned char) *p] = *p;
2377 }
2378
2379 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2380 if (IS_ELF)
2381 {
2382 record_alignment (text_section, 2);
2383 record_alignment (data_section, 2);
2384 record_alignment (bss_section, 2);
2385 }
2386 #endif
2387
2388 if (flag_code == CODE_64BIT)
2389 {
2390 #if defined (OBJ_COFF) && defined (TE_PE)
2391 x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
2392 ? 32 : 16);
2393 #else
2394 x86_dwarf2_return_column = 16;
2395 #endif
2396 x86_cie_data_alignment = -8;
2397 }
2398 else
2399 {
2400 x86_dwarf2_return_column = 8;
2401 x86_cie_data_alignment = -4;
2402 }
2403 }
2404
2405 void
2406 i386_print_statistics (FILE *file)
2407 {
2408 hash_print_statistics (file, "i386 opcode", op_hash);
2409 hash_print_statistics (file, "i386 register", reg_hash);
2410 }
2411 \f
2412 #ifdef DEBUG386
2413
2414 /* Debugging routines for md_assemble. */
2415 static void pte (insn_template *);
2416 static void pt (i386_operand_type);
2417 static void pe (expressionS *);
2418 static void ps (symbolS *);
2419
2420 static void
2421 pi (char *line, i386_insn *x)
2422 {
2423 unsigned int j;
2424
2425 fprintf (stdout, "%s: template ", line);
2426 pte (&x->tm);
2427 fprintf (stdout, " address: base %s index %s scale %x\n",
2428 x->base_reg ? x->base_reg->reg_name : "none",
2429 x->index_reg ? x->index_reg->reg_name : "none",
2430 x->log2_scale_factor);
2431 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2432 x->rm.mode, x->rm.reg, x->rm.regmem);
2433 fprintf (stdout, " sib: base %x index %x scale %x\n",
2434 x->sib.base, x->sib.index, x->sib.scale);
2435 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2436 (x->rex & REX_W) != 0,
2437 (x->rex & REX_R) != 0,
2438 (x->rex & REX_X) != 0,
2439 (x->rex & REX_B) != 0);
2440 for (j = 0; j < x->operands; j++)
2441 {
2442 fprintf (stdout, " #%d: ", j + 1);
2443 pt (x->types[j]);
2444 fprintf (stdout, "\n");
2445 if (x->types[j].bitfield.reg8
2446 || x->types[j].bitfield.reg16
2447 || x->types[j].bitfield.reg32
2448 || x->types[j].bitfield.reg64
2449 || x->types[j].bitfield.regmmx
2450 || x->types[j].bitfield.regxmm
2451 || x->types[j].bitfield.regymm
2452 || x->types[j].bitfield.sreg2
2453 || x->types[j].bitfield.sreg3
2454 || x->types[j].bitfield.control
2455 || x->types[j].bitfield.debug
2456 || x->types[j].bitfield.test)
2457 fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
2458 if (operand_type_check (x->types[j], imm))
2459 pe (x->op[j].imms);
2460 if (operand_type_check (x->types[j], disp))
2461 pe (x->op[j].disps);
2462 }
2463 }
2464
2465 static void
2466 pte (insn_template *t)
2467 {
2468 unsigned int j;
2469 fprintf (stdout, " %d operands ", t->operands);
2470 fprintf (stdout, "opcode %x ", t->base_opcode);
2471 if (t->extension_opcode != None)
2472 fprintf (stdout, "ext %x ", t->extension_opcode);
2473 if (t->opcode_modifier.d)
2474 fprintf (stdout, "D");
2475 if (t->opcode_modifier.w)
2476 fprintf (stdout, "W");
2477 fprintf (stdout, "\n");
2478 for (j = 0; j < t->operands; j++)
2479 {
2480 fprintf (stdout, " #%d type ", j + 1);
2481 pt (t->operand_types[j]);
2482 fprintf (stdout, "\n");
2483 }
2484 }
2485
2486 static void
2487 pe (expressionS *e)
2488 {
2489 fprintf (stdout, " operation %d\n", e->X_op);
2490 fprintf (stdout, " add_number %ld (%lx)\n",
2491 (long) e->X_add_number, (long) e->X_add_number);
2492 if (e->X_add_symbol)
2493 {
2494 fprintf (stdout, " add_symbol ");
2495 ps (e->X_add_symbol);
2496 fprintf (stdout, "\n");
2497 }
2498 if (e->X_op_symbol)
2499 {
2500 fprintf (stdout, " op_symbol ");
2501 ps (e->X_op_symbol);
2502 fprintf (stdout, "\n");
2503 }
2504 }
2505
2506 static void
2507 ps (symbolS *s)
2508 {
2509 fprintf (stdout, "%s type %s%s",
2510 S_GET_NAME (s),
2511 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2512 segment_name (S_GET_SEGMENT (s)));
2513 }
2514
2515 static struct type_name
2516 {
2517 i386_operand_type mask;
2518 const char *name;
2519 }
2520 const type_names[] =
2521 {
2522 { OPERAND_TYPE_REG8, "r8" },
2523 { OPERAND_TYPE_REG16, "r16" },
2524 { OPERAND_TYPE_REG32, "r32" },
2525 { OPERAND_TYPE_REG64, "r64" },
2526 { OPERAND_TYPE_IMM8, "i8" },
2527 { OPERAND_TYPE_IMM8, "i8s" },
2528 { OPERAND_TYPE_IMM16, "i16" },
2529 { OPERAND_TYPE_IMM32, "i32" },
2530 { OPERAND_TYPE_IMM32S, "i32s" },
2531 { OPERAND_TYPE_IMM64, "i64" },
2532 { OPERAND_TYPE_IMM1, "i1" },
2533 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2534 { OPERAND_TYPE_DISP8, "d8" },
2535 { OPERAND_TYPE_DISP16, "d16" },
2536 { OPERAND_TYPE_DISP32, "d32" },
2537 { OPERAND_TYPE_DISP32S, "d32s" },
2538 { OPERAND_TYPE_DISP64, "d64" },
2539 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2540 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2541 { OPERAND_TYPE_CONTROL, "control reg" },
2542 { OPERAND_TYPE_TEST, "test reg" },
2543 { OPERAND_TYPE_DEBUG, "debug reg" },
2544 { OPERAND_TYPE_FLOATREG, "FReg" },
2545 { OPERAND_TYPE_FLOATACC, "FAcc" },
2546 { OPERAND_TYPE_SREG2, "SReg2" },
2547 { OPERAND_TYPE_SREG3, "SReg3" },
2548 { OPERAND_TYPE_ACC, "Acc" },
2549 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2550 { OPERAND_TYPE_REGMMX, "rMMX" },
2551 { OPERAND_TYPE_REGXMM, "rXMM" },
2552 { OPERAND_TYPE_REGYMM, "rYMM" },
2553 { OPERAND_TYPE_ESSEG, "es" },
2554 };
2555
2556 static void
2557 pt (i386_operand_type t)
2558 {
2559 unsigned int j;
2560 i386_operand_type a;
2561
2562 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2563 {
2564 a = operand_type_and (t, type_names[j].mask);
2565 if (!operand_type_all_zero (&a))
2566 fprintf (stdout, "%s, ", type_names[j].name);
2567 }
2568 fflush (stdout);
2569 }
2570
2571 #endif /* DEBUG386 */
2572 \f
2573 static bfd_reloc_code_real_type
2574 reloc (unsigned int size,
2575 int pcrel,
2576 int sign,
2577 bfd_reloc_code_real_type other)
2578 {
2579 if (other != NO_RELOC)
2580 {
2581 reloc_howto_type *rel;
2582
2583 if (size == 8)
2584 switch (other)
2585 {
2586 case BFD_RELOC_X86_64_GOT32:
2587 return BFD_RELOC_X86_64_GOT64;
2588 break;
2589 case BFD_RELOC_X86_64_PLTOFF64:
2590 return BFD_RELOC_X86_64_PLTOFF64;
2591 break;
2592 case BFD_RELOC_X86_64_GOTPC32:
2593 other = BFD_RELOC_X86_64_GOTPC64;
2594 break;
2595 case BFD_RELOC_X86_64_GOTPCREL:
2596 other = BFD_RELOC_X86_64_GOTPCREL64;
2597 break;
2598 case BFD_RELOC_X86_64_TPOFF32:
2599 other = BFD_RELOC_X86_64_TPOFF64;
2600 break;
2601 case BFD_RELOC_X86_64_DTPOFF32:
2602 other = BFD_RELOC_X86_64_DTPOFF64;
2603 break;
2604 default:
2605 break;
2606 }
2607
2608 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2609 if (size == 4 && (flag_code != CODE_64BIT || disallow_64bit_reloc))
2610 sign = -1;
2611
2612 rel = bfd_reloc_type_lookup (stdoutput, other);
2613 if (!rel)
2614 as_bad (_("unknown relocation (%u)"), other);
2615 else if (size != bfd_get_reloc_size (rel))
2616 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2617 bfd_get_reloc_size (rel),
2618 size);
2619 else if (pcrel && !rel->pc_relative)
2620 as_bad (_("non-pc-relative relocation for pc-relative field"));
2621 else if ((rel->complain_on_overflow == complain_overflow_signed
2622 && !sign)
2623 || (rel->complain_on_overflow == complain_overflow_unsigned
2624 && sign > 0))
2625 as_bad (_("relocated field and relocation type differ in signedness"));
2626 else
2627 return other;
2628 return NO_RELOC;
2629 }
2630
2631 if (pcrel)
2632 {
2633 if (!sign)
2634 as_bad (_("there are no unsigned pc-relative relocations"));
2635 switch (size)
2636 {
2637 case 1: return BFD_RELOC_8_PCREL;
2638 case 2: return BFD_RELOC_16_PCREL;
2639 case 4: return BFD_RELOC_32_PCREL;
2640 case 8: return BFD_RELOC_64_PCREL;
2641 }
2642 as_bad (_("cannot do %u byte pc-relative relocation"), size);
2643 }
2644 else
2645 {
2646 if (sign > 0)
2647 switch (size)
2648 {
2649 case 4: return BFD_RELOC_X86_64_32S;
2650 }
2651 else
2652 switch (size)
2653 {
2654 case 1: return BFD_RELOC_8;
2655 case 2: return BFD_RELOC_16;
2656 case 4: return BFD_RELOC_32;
2657 case 8: return BFD_RELOC_64;
2658 }
2659 as_bad (_("cannot do %s %u byte relocation"),
2660 sign > 0 ? "signed" : "unsigned", size);
2661 }
2662
2663 return NO_RELOC;
2664 }
2665
2666 /* Here we decide which fixups can be adjusted to make them relative to
2667 the beginning of the section instead of the symbol. Basically we need
2668 to make sure that the dynamic relocations are done correctly, so in
2669 some cases we force the original symbol to be used. */
2670
2671 int
2672 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2673 {
2674 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2675 if (!IS_ELF)
2676 return 1;
2677
2678 /* Don't adjust pc-relative references to merge sections in 64-bit
2679 mode. */
2680 if (use_rela_relocations
2681 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
2682 && fixP->fx_pcrel)
2683 return 0;
2684
2685 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2686 and changed later by validate_fix. */
2687 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
2688 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
2689 return 0;
2690
2691 /* adjust_reloc_syms doesn't know about the GOT. */
2692 if (fixP->fx_r_type == BFD_RELOC_386_GOTOFF
2693 || fixP->fx_r_type == BFD_RELOC_386_PLT32
2694 || fixP->fx_r_type == BFD_RELOC_386_GOT32
2695 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
2696 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
2697 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
2698 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
2699 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
2700 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
2701 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
2702 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
2703 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
2704 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
2705 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
2706 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
2707 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
2708 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
2709 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
2710 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
2711 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
2712 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
2713 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
2714 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
2715 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
2716 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
2717 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
2718 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
2719 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
2720 return 0;
2721 #endif
2722 return 1;
2723 }
2724
2725 static int
2726 intel_float_operand (const char *mnemonic)
2727 {
2728 /* Note that the value returned is meaningful only for opcodes with (memory)
2729 operands, hence the code here is free to improperly handle opcodes that
2730 have no operands (for better performance and smaller code). */
2731
2732 if (mnemonic[0] != 'f')
2733 return 0; /* non-math */
2734
2735 switch (mnemonic[1])
2736 {
2737 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2738 the fs segment override prefix not currently handled because no
2739 call path can make opcodes without operands get here */
2740 case 'i':
2741 return 2 /* integer op */;
2742 case 'l':
2743 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
2744 return 3; /* fldcw/fldenv */
2745 break;
2746 case 'n':
2747 if (mnemonic[2] != 'o' /* fnop */)
2748 return 3; /* non-waiting control op */
2749 break;
2750 case 'r':
2751 if (mnemonic[2] == 's')
2752 return 3; /* frstor/frstpm */
2753 break;
2754 case 's':
2755 if (mnemonic[2] == 'a')
2756 return 3; /* fsave */
2757 if (mnemonic[2] == 't')
2758 {
2759 switch (mnemonic[3])
2760 {
2761 case 'c': /* fstcw */
2762 case 'd': /* fstdw */
2763 case 'e': /* fstenv */
2764 case 's': /* fsts[gw] */
2765 return 3;
2766 }
2767 }
2768 break;
2769 case 'x':
2770 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
2771 return 0; /* fxsave/fxrstor are not really math ops */
2772 break;
2773 }
2774
2775 return 1;
2776 }
2777
2778 /* Build the VEX prefix. */
2779
2780 static void
2781 build_vex_prefix (const insn_template *t)
2782 {
2783 unsigned int register_specifier;
2784 unsigned int implied_prefix;
2785 unsigned int vector_length;
2786
2787 /* Check register specifier. */
2788 if (i.vex.register_specifier)
2789 {
2790 register_specifier = i.vex.register_specifier->reg_num;
2791 if ((i.vex.register_specifier->reg_flags & RegRex))
2792 register_specifier += 8;
2793 register_specifier = ~register_specifier & 0xf;
2794 }
2795 else
2796 register_specifier = 0xf;
2797
2798 /* Use 2-byte VEX prefix by swappping destination and source
2799 operand. */
2800 if (!i.swap_operand
2801 && i.operands == i.reg_operands
2802 && i.tm.opcode_modifier.vexopcode == VEX0F
2803 && i.tm.opcode_modifier.s
2804 && i.rex == REX_B)
2805 {
2806 unsigned int xchg = i.operands - 1;
2807 union i386_op temp_op;
2808 i386_operand_type temp_type;
2809
2810 temp_type = i.types[xchg];
2811 i.types[xchg] = i.types[0];
2812 i.types[0] = temp_type;
2813 temp_op = i.op[xchg];
2814 i.op[xchg] = i.op[0];
2815 i.op[0] = temp_op;
2816
2817 gas_assert (i.rm.mode == 3);
2818
2819 i.rex = REX_R;
2820 xchg = i.rm.regmem;
2821 i.rm.regmem = i.rm.reg;
2822 i.rm.reg = xchg;
2823
2824 /* Use the next insn. */
2825 i.tm = t[1];
2826 }
2827
2828 if (i.tm.opcode_modifier.vex == VEXScalar)
2829 vector_length = avxscalar;
2830 else
2831 vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
2832
2833 switch ((i.tm.base_opcode >> 8) & 0xff)
2834 {
2835 case 0:
2836 implied_prefix = 0;
2837 break;
2838 case DATA_PREFIX_OPCODE:
2839 implied_prefix = 1;
2840 break;
2841 case REPE_PREFIX_OPCODE:
2842 implied_prefix = 2;
2843 break;
2844 case REPNE_PREFIX_OPCODE:
2845 implied_prefix = 3;
2846 break;
2847 default:
2848 abort ();
2849 }
2850
2851 /* Use 2-byte VEX prefix if possible. */
2852 if (i.tm.opcode_modifier.vexopcode == VEX0F
2853 && i.tm.opcode_modifier.vexw != VEXW1
2854 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
2855 {
2856 /* 2-byte VEX prefix. */
2857 unsigned int r;
2858
2859 i.vex.length = 2;
2860 i.vex.bytes[0] = 0xc5;
2861
2862 /* Check the REX.R bit. */
2863 r = (i.rex & REX_R) ? 0 : 1;
2864 i.vex.bytes[1] = (r << 7
2865 | register_specifier << 3
2866 | vector_length << 2
2867 | implied_prefix);
2868 }
2869 else
2870 {
2871 /* 3-byte VEX prefix. */
2872 unsigned int m, w;
2873
2874 i.vex.length = 3;
2875
2876 switch (i.tm.opcode_modifier.vexopcode)
2877 {
2878 case VEX0F:
2879 m = 0x1;
2880 i.vex.bytes[0] = 0xc4;
2881 break;
2882 case VEX0F38:
2883 m = 0x2;
2884 i.vex.bytes[0] = 0xc4;
2885 break;
2886 case VEX0F3A:
2887 m = 0x3;
2888 i.vex.bytes[0] = 0xc4;
2889 break;
2890 case XOP08:
2891 m = 0x8;
2892 i.vex.bytes[0] = 0x8f;
2893 break;
2894 case XOP09:
2895 m = 0x9;
2896 i.vex.bytes[0] = 0x8f;
2897 break;
2898 case XOP0A:
2899 m = 0xa;
2900 i.vex.bytes[0] = 0x8f;
2901 break;
2902 default:
2903 abort ();
2904 }
2905
2906 /* The high 3 bits of the second VEX byte are 1's compliment
2907 of RXB bits from REX. */
2908 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
2909
2910 /* Check the REX.W bit. */
2911 w = (i.rex & REX_W) ? 1 : 0;
2912 if (i.tm.opcode_modifier.vexw)
2913 {
2914 if (w)
2915 abort ();
2916
2917 if (i.tm.opcode_modifier.vexw == VEXW1)
2918 w = 1;
2919 }
2920
2921 i.vex.bytes[2] = (w << 7
2922 | register_specifier << 3
2923 | vector_length << 2
2924 | implied_prefix);
2925 }
2926 }
2927
2928 static void
2929 process_immext (void)
2930 {
2931 expressionS *exp;
2932
2933 if (i.tm.cpu_flags.bitfield.cpusse3 && i.operands > 0)
2934 {
2935 /* SSE3 Instructions have the fixed operands with an opcode
2936 suffix which is coded in the same place as an 8-bit immediate
2937 field would be. Here we check those operands and remove them
2938 afterwards. */
2939 unsigned int x;
2940
2941 for (x = 0; x < i.operands; x++)
2942 if (i.op[x].regs->reg_num != x)
2943 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
2944 register_prefix, i.op[x].regs->reg_name, x + 1,
2945 i.tm.name);
2946
2947 i.operands = 0;
2948 }
2949
2950 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
2951 which is coded in the same place as an 8-bit immediate field
2952 would be. Here we fake an 8-bit immediate operand from the
2953 opcode suffix stored in tm.extension_opcode.
2954
2955 AVX instructions also use this encoding, for some of
2956 3 argument instructions. */
2957
2958 gas_assert (i.imm_operands == 0
2959 && (i.operands <= 2
2960 || (i.tm.opcode_modifier.vex
2961 && i.operands <= 4)));
2962
2963 exp = &im_expressions[i.imm_operands++];
2964 i.op[i.operands].imms = exp;
2965 i.types[i.operands] = imm8;
2966 i.operands++;
2967 exp->X_op = O_constant;
2968 exp->X_add_number = i.tm.extension_opcode;
2969 i.tm.extension_opcode = None;
2970 }
2971
2972 /* This is the guts of the machine-dependent assembler. LINE points to a
2973 machine dependent instruction. This function is supposed to emit
2974 the frags/bytes it assembles to. */
2975
2976 void
2977 md_assemble (char *line)
2978 {
2979 unsigned int j;
2980 char mnemonic[MAX_MNEM_SIZE];
2981 const insn_template *t;
2982
2983 /* Initialize globals. */
2984 memset (&i, '\0', sizeof (i));
2985 for (j = 0; j < MAX_OPERANDS; j++)
2986 i.reloc[j] = NO_RELOC;
2987 memset (disp_expressions, '\0', sizeof (disp_expressions));
2988 memset (im_expressions, '\0', sizeof (im_expressions));
2989 save_stack_p = save_stack;
2990
2991 /* First parse an instruction mnemonic & call i386_operand for the operands.
2992 We assume that the scrubber has arranged it so that line[0] is the valid
2993 start of a (possibly prefixed) mnemonic. */
2994
2995 line = parse_insn (line, mnemonic);
2996 if (line == NULL)
2997 return;
2998
2999 line = parse_operands (line, mnemonic);
3000 this_operand = -1;
3001 if (line == NULL)
3002 return;
3003
3004 /* Now we've parsed the mnemonic into a set of templates, and have the
3005 operands at hand. */
3006
3007 /* All intel opcodes have reversed operands except for "bound" and
3008 "enter". We also don't reverse intersegment "jmp" and "call"
3009 instructions with 2 immediate operands so that the immediate segment
3010 precedes the offset, as it does when in AT&T mode. */
3011 if (intel_syntax
3012 && i.operands > 1
3013 && (strcmp (mnemonic, "bound") != 0)
3014 && (strcmp (mnemonic, "invlpga") != 0)
3015 && !(operand_type_check (i.types[0], imm)
3016 && operand_type_check (i.types[1], imm)))
3017 swap_operands ();
3018
3019 /* The order of the immediates should be reversed
3020 for 2 immediates extrq and insertq instructions */
3021 if (i.imm_operands == 2
3022 && (strcmp (mnemonic, "extrq") == 0
3023 || strcmp (mnemonic, "insertq") == 0))
3024 swap_2_operands (0, 1);
3025
3026 if (i.imm_operands)
3027 optimize_imm ();
3028
3029 /* Don't optimize displacement for movabs since it only takes 64bit
3030 displacement. */
3031 if (i.disp_operands
3032 && !i.disp32_encoding
3033 && (flag_code != CODE_64BIT
3034 || strcmp (mnemonic, "movabs") != 0))
3035 optimize_disp ();
3036
3037 /* Next, we find a template that matches the given insn,
3038 making sure the overlap of the given operands types is consistent
3039 with the template operand types. */
3040
3041 if (!(t = match_template ()))
3042 return;
3043
3044 if (sse_check != sse_check_none
3045 && !i.tm.opcode_modifier.noavx
3046 && (i.tm.cpu_flags.bitfield.cpusse
3047 || i.tm.cpu_flags.bitfield.cpusse2
3048 || i.tm.cpu_flags.bitfield.cpusse3
3049 || i.tm.cpu_flags.bitfield.cpussse3
3050 || i.tm.cpu_flags.bitfield.cpusse4_1
3051 || i.tm.cpu_flags.bitfield.cpusse4_2))
3052 {
3053 (sse_check == sse_check_warning
3054 ? as_warn
3055 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
3056 }
3057
3058 /* Zap movzx and movsx suffix. The suffix has been set from
3059 "word ptr" or "byte ptr" on the source operand in Intel syntax
3060 or extracted from mnemonic in AT&T syntax. But we'll use
3061 the destination register to choose the suffix for encoding. */
3062 if ((i.tm.base_opcode & ~9) == 0x0fb6)
3063 {
3064 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3065 there is no suffix, the default will be byte extension. */
3066 if (i.reg_operands != 2
3067 && !i.suffix
3068 && intel_syntax)
3069 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
3070
3071 i.suffix = 0;
3072 }
3073
3074 if (i.tm.opcode_modifier.fwait)
3075 if (!add_prefix (FWAIT_OPCODE))
3076 return;
3077
3078 /* Check for lock without a lockable instruction. Destination operand
3079 must be memory unless it is xchg (0x86). */
3080 if (i.prefix[LOCK_PREFIX]
3081 && (!i.tm.opcode_modifier.islockable
3082 || i.mem_operands == 0
3083 || (i.tm.base_opcode != 0x86
3084 && !operand_type_check (i.types[i.operands - 1], anymem))))
3085 {
3086 as_bad (_("expecting lockable instruction after `lock'"));
3087 return;
3088 }
3089
3090 /* Check string instruction segment overrides. */
3091 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
3092 {
3093 if (!check_string ())
3094 return;
3095 i.disp_operands = 0;
3096 }
3097
3098 if (!process_suffix ())
3099 return;
3100
3101 /* Update operand types. */
3102 for (j = 0; j < i.operands; j++)
3103 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
3104
3105 /* Make still unresolved immediate matches conform to size of immediate
3106 given in i.suffix. */
3107 if (!finalize_imm ())
3108 return;
3109
3110 if (i.types[0].bitfield.imm1)
3111 i.imm_operands = 0; /* kludge for shift insns. */
3112
3113 /* We only need to check those implicit registers for instructions
3114 with 3 operands or less. */
3115 if (i.operands <= 3)
3116 for (j = 0; j < i.operands; j++)
3117 if (i.types[j].bitfield.inoutportreg
3118 || i.types[j].bitfield.shiftcount
3119 || i.types[j].bitfield.acc
3120 || i.types[j].bitfield.floatacc)
3121 i.reg_operands--;
3122
3123 /* ImmExt should be processed after SSE2AVX. */
3124 if (!i.tm.opcode_modifier.sse2avx
3125 && i.tm.opcode_modifier.immext)
3126 process_immext ();
3127
3128 /* For insns with operands there are more diddles to do to the opcode. */
3129 if (i.operands)
3130 {
3131 if (!process_operands ())
3132 return;
3133 }
3134 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
3135 {
3136 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3137 as_warn (_("translating to `%sp'"), i.tm.name);
3138 }
3139
3140 if (i.tm.opcode_modifier.vex)
3141 build_vex_prefix (t);
3142
3143 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3144 instructions may define INT_OPCODE as well, so avoid this corner
3145 case for those instructions that use MODRM. */
3146 if (i.tm.base_opcode == INT_OPCODE
3147 && !i.tm.opcode_modifier.modrm
3148 && i.op[0].imms->X_add_number == 3)
3149 {
3150 i.tm.base_opcode = INT3_OPCODE;
3151 i.imm_operands = 0;
3152 }
3153
3154 if ((i.tm.opcode_modifier.jump
3155 || i.tm.opcode_modifier.jumpbyte
3156 || i.tm.opcode_modifier.jumpdword)
3157 && i.op[0].disps->X_op == O_constant)
3158 {
3159 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3160 the absolute address given by the constant. Since ix86 jumps and
3161 calls are pc relative, we need to generate a reloc. */
3162 i.op[0].disps->X_add_symbol = &abs_symbol;
3163 i.op[0].disps->X_op = O_symbol;
3164 }
3165
3166 if (i.tm.opcode_modifier.rex64)
3167 i.rex |= REX_W;
3168
3169 /* For 8 bit registers we need an empty rex prefix. Also if the
3170 instruction already has a prefix, we need to convert old
3171 registers to new ones. */
3172
3173 if ((i.types[0].bitfield.reg8
3174 && (i.op[0].regs->reg_flags & RegRex64) != 0)
3175 || (i.types[1].bitfield.reg8
3176 && (i.op[1].regs->reg_flags & RegRex64) != 0)
3177 || ((i.types[0].bitfield.reg8
3178 || i.types[1].bitfield.reg8)
3179 && i.rex != 0))
3180 {
3181 int x;
3182
3183 i.rex |= REX_OPCODE;
3184 for (x = 0; x < 2; x++)
3185 {
3186 /* Look for 8 bit operand that uses old registers. */
3187 if (i.types[x].bitfield.reg8
3188 && (i.op[x].regs->reg_flags & RegRex64) == 0)
3189 {
3190 /* In case it is "hi" register, give up. */
3191 if (i.op[x].regs->reg_num > 3)
3192 as_bad (_("can't encode register '%s%s' in an "
3193 "instruction requiring REX prefix."),
3194 register_prefix, i.op[x].regs->reg_name);
3195
3196 /* Otherwise it is equivalent to the extended register.
3197 Since the encoding doesn't change this is merely
3198 cosmetic cleanup for debug output. */
3199
3200 i.op[x].regs = i.op[x].regs + 8;
3201 }
3202 }
3203 }
3204
3205 if (i.rex != 0)
3206 add_prefix (REX_OPCODE | i.rex);
3207
3208 /* We are ready to output the insn. */
3209 output_insn ();
3210 }
3211
3212 static char *
3213 parse_insn (char *line, char *mnemonic)
3214 {
3215 char *l = line;
3216 char *token_start = l;
3217 char *mnem_p;
3218 int supported;
3219 const insn_template *t;
3220 char *dot_p = NULL;
3221
3222 /* Non-zero if we found a prefix only acceptable with string insns. */
3223 const char *expecting_string_instruction = NULL;
3224
3225 while (1)
3226 {
3227 mnem_p = mnemonic;
3228 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3229 {
3230 if (*mnem_p == '.')
3231 dot_p = mnem_p;
3232 mnem_p++;
3233 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3234 {
3235 as_bad (_("no such instruction: `%s'"), token_start);
3236 return NULL;
3237 }
3238 l++;
3239 }
3240 if (!is_space_char (*l)
3241 && *l != END_OF_INSN
3242 && (intel_syntax
3243 || (*l != PREFIX_SEPARATOR
3244 && *l != ',')))
3245 {
3246 as_bad (_("invalid character %s in mnemonic"),
3247 output_invalid (*l));
3248 return NULL;
3249 }
3250 if (token_start == l)
3251 {
3252 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3253 as_bad (_("expecting prefix; got nothing"));
3254 else
3255 as_bad (_("expecting mnemonic; got nothing"));
3256 return NULL;
3257 }
3258
3259 /* Look up instruction (or prefix) via hash table. */
3260 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3261
3262 if (*l != END_OF_INSN
3263 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3264 && current_templates
3265 && current_templates->start->opcode_modifier.isprefix)
3266 {
3267 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3268 {
3269 as_bad ((flag_code != CODE_64BIT
3270 ? _("`%s' is only supported in 64-bit mode")
3271 : _("`%s' is not supported in 64-bit mode")),
3272 current_templates->start->name);
3273 return NULL;
3274 }
3275 /* If we are in 16-bit mode, do not allow addr16 or data16.
3276 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3277 if ((current_templates->start->opcode_modifier.size16
3278 || current_templates->start->opcode_modifier.size32)
3279 && flag_code != CODE_64BIT
3280 && (current_templates->start->opcode_modifier.size32
3281 ^ (flag_code == CODE_16BIT)))
3282 {
3283 as_bad (_("redundant %s prefix"),
3284 current_templates->start->name);
3285 return NULL;
3286 }
3287 /* Add prefix, checking for repeated prefixes. */
3288 switch (add_prefix (current_templates->start->base_opcode))
3289 {
3290 case PREFIX_EXIST:
3291 return NULL;
3292 case PREFIX_REP:
3293 expecting_string_instruction = current_templates->start->name;
3294 break;
3295 default:
3296 break;
3297 }
3298 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3299 token_start = ++l;
3300 }
3301 else
3302 break;
3303 }
3304
3305 if (!current_templates)
3306 {
3307 /* Check if we should swap operand or force 32bit displacement in
3308 encoding. */
3309 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3310 i.swap_operand = 1;
3311 else if (mnem_p - 4 == dot_p
3312 && dot_p[1] == 'd'
3313 && dot_p[2] == '3'
3314 && dot_p[3] == '2')
3315 i.disp32_encoding = 1;
3316 else
3317 goto check_suffix;
3318 mnem_p = dot_p;
3319 *dot_p = '\0';
3320 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3321 }
3322
3323 if (!current_templates)
3324 {
3325 check_suffix:
3326 /* See if we can get a match by trimming off a suffix. */
3327 switch (mnem_p[-1])
3328 {
3329 case WORD_MNEM_SUFFIX:
3330 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3331 i.suffix = SHORT_MNEM_SUFFIX;
3332 else
3333 case BYTE_MNEM_SUFFIX:
3334 case QWORD_MNEM_SUFFIX:
3335 i.suffix = mnem_p[-1];
3336 mnem_p[-1] = '\0';
3337 current_templates = (const templates *) hash_find (op_hash,
3338 mnemonic);
3339 break;
3340 case SHORT_MNEM_SUFFIX:
3341 case LONG_MNEM_SUFFIX:
3342 if (!intel_syntax)
3343 {
3344 i.suffix = mnem_p[-1];
3345 mnem_p[-1] = '\0';
3346 current_templates = (const templates *) hash_find (op_hash,
3347 mnemonic);
3348 }
3349 break;
3350
3351 /* Intel Syntax. */
3352 case 'd':
3353 if (intel_syntax)
3354 {
3355 if (intel_float_operand (mnemonic) == 1)
3356 i.suffix = SHORT_MNEM_SUFFIX;
3357 else
3358 i.suffix = LONG_MNEM_SUFFIX;
3359 mnem_p[-1] = '\0';
3360 current_templates = (const templates *) hash_find (op_hash,
3361 mnemonic);
3362 }
3363 break;
3364 }
3365 if (!current_templates)
3366 {
3367 as_bad (_("no such instruction: `%s'"), token_start);
3368 return NULL;
3369 }
3370 }
3371
3372 if (current_templates->start->opcode_modifier.jump
3373 || current_templates->start->opcode_modifier.jumpbyte)
3374 {
3375 /* Check for a branch hint. We allow ",pt" and ",pn" for
3376 predict taken and predict not taken respectively.
3377 I'm not sure that branch hints actually do anything on loop
3378 and jcxz insns (JumpByte) for current Pentium4 chips. They
3379 may work in the future and it doesn't hurt to accept them
3380 now. */
3381 if (l[0] == ',' && l[1] == 'p')
3382 {
3383 if (l[2] == 't')
3384 {
3385 if (!add_prefix (DS_PREFIX_OPCODE))
3386 return NULL;
3387 l += 3;
3388 }
3389 else if (l[2] == 'n')
3390 {
3391 if (!add_prefix (CS_PREFIX_OPCODE))
3392 return NULL;
3393 l += 3;
3394 }
3395 }
3396 }
3397 /* Any other comma loses. */
3398 if (*l == ',')
3399 {
3400 as_bad (_("invalid character %s in mnemonic"),
3401 output_invalid (*l));
3402 return NULL;
3403 }
3404
3405 /* Check if instruction is supported on specified architecture. */
3406 supported = 0;
3407 for (t = current_templates->start; t < current_templates->end; ++t)
3408 {
3409 supported |= cpu_flags_match (t);
3410 if (supported == CPU_FLAGS_PERFECT_MATCH)
3411 goto skip;
3412 }
3413
3414 if (!(supported & CPU_FLAGS_64BIT_MATCH))
3415 {
3416 as_bad (flag_code == CODE_64BIT
3417 ? _("`%s' is not supported in 64-bit mode")
3418 : _("`%s' is only supported in 64-bit mode"),
3419 current_templates->start->name);
3420 return NULL;
3421 }
3422 if (supported != CPU_FLAGS_PERFECT_MATCH)
3423 {
3424 as_bad (_("`%s' is not supported on `%s%s'"),
3425 current_templates->start->name,
3426 cpu_arch_name ? cpu_arch_name : default_arch,
3427 cpu_sub_arch_name ? cpu_sub_arch_name : "");
3428 return NULL;
3429 }
3430
3431 skip:
3432 if (!cpu_arch_flags.bitfield.cpui386
3433 && (flag_code != CODE_16BIT))
3434 {
3435 as_warn (_("use .code16 to ensure correct addressing mode"));
3436 }
3437
3438 /* Check for rep/repne without a string instruction. */
3439 if (expecting_string_instruction)
3440 {
3441 static templates override;
3442
3443 for (t = current_templates->start; t < current_templates->end; ++t)
3444 if (t->opcode_modifier.isstring)
3445 break;
3446 if (t >= current_templates->end)
3447 {
3448 as_bad (_("expecting string instruction after `%s'"),
3449 expecting_string_instruction);
3450 return NULL;
3451 }
3452 for (override.start = t; t < current_templates->end; ++t)
3453 if (!t->opcode_modifier.isstring)
3454 break;
3455 override.end = t;
3456 current_templates = &override;
3457 }
3458
3459 return l;
3460 }
3461
3462 static char *
3463 parse_operands (char *l, const char *mnemonic)
3464 {
3465 char *token_start;
3466
3467 /* 1 if operand is pending after ','. */
3468 unsigned int expecting_operand = 0;
3469
3470 /* Non-zero if operand parens not balanced. */
3471 unsigned int paren_not_balanced;
3472
3473 while (*l != END_OF_INSN)
3474 {
3475 /* Skip optional white space before operand. */
3476 if (is_space_char (*l))
3477 ++l;
3478 if (!is_operand_char (*l) && *l != END_OF_INSN)
3479 {
3480 as_bad (_("invalid character %s before operand %d"),
3481 output_invalid (*l),
3482 i.operands + 1);
3483 return NULL;
3484 }
3485 token_start = l; /* after white space */
3486 paren_not_balanced = 0;
3487 while (paren_not_balanced || *l != ',')
3488 {
3489 if (*l == END_OF_INSN)
3490 {
3491 if (paren_not_balanced)
3492 {
3493 if (!intel_syntax)
3494 as_bad (_("unbalanced parenthesis in operand %d."),
3495 i.operands + 1);
3496 else
3497 as_bad (_("unbalanced brackets in operand %d."),
3498 i.operands + 1);
3499 return NULL;
3500 }
3501 else
3502 break; /* we are done */
3503 }
3504 else if (!is_operand_char (*l) && !is_space_char (*l))
3505 {
3506 as_bad (_("invalid character %s in operand %d"),
3507 output_invalid (*l),
3508 i.operands + 1);
3509 return NULL;
3510 }
3511 if (!intel_syntax)
3512 {
3513 if (*l == '(')
3514 ++paren_not_balanced;
3515 if (*l == ')')
3516 --paren_not_balanced;
3517 }
3518 else
3519 {
3520 if (*l == '[')
3521 ++paren_not_balanced;
3522 if (*l == ']')
3523 --paren_not_balanced;
3524 }
3525 l++;
3526 }
3527 if (l != token_start)
3528 { /* Yes, we've read in another operand. */
3529 unsigned int operand_ok;
3530 this_operand = i.operands++;
3531 i.types[this_operand].bitfield.unspecified = 1;
3532 if (i.operands > MAX_OPERANDS)
3533 {
3534 as_bad (_("spurious operands; (%d operands/instruction max)"),
3535 MAX_OPERANDS);
3536 return NULL;
3537 }
3538 /* Now parse operand adding info to 'i' as we go along. */
3539 END_STRING_AND_SAVE (l);
3540
3541 if (intel_syntax)
3542 operand_ok =
3543 i386_intel_operand (token_start,
3544 intel_float_operand (mnemonic));
3545 else
3546 operand_ok = i386_att_operand (token_start);
3547
3548 RESTORE_END_STRING (l);
3549 if (!operand_ok)
3550 return NULL;
3551 }
3552 else
3553 {
3554 if (expecting_operand)
3555 {
3556 expecting_operand_after_comma:
3557 as_bad (_("expecting operand after ','; got nothing"));
3558 return NULL;
3559 }
3560 if (*l == ',')
3561 {
3562 as_bad (_("expecting operand before ','; got nothing"));
3563 return NULL;
3564 }
3565 }
3566
3567 /* Now *l must be either ',' or END_OF_INSN. */
3568 if (*l == ',')
3569 {
3570 if (*++l == END_OF_INSN)
3571 {
3572 /* Just skip it, if it's \n complain. */
3573 goto expecting_operand_after_comma;
3574 }
3575 expecting_operand = 1;
3576 }
3577 }
3578 return l;
3579 }
3580
3581 static void
3582 swap_2_operands (int xchg1, int xchg2)
3583 {
3584 union i386_op temp_op;
3585 i386_operand_type temp_type;
3586 enum bfd_reloc_code_real temp_reloc;
3587
3588 temp_type = i.types[xchg2];
3589 i.types[xchg2] = i.types[xchg1];
3590 i.types[xchg1] = temp_type;
3591 temp_op = i.op[xchg2];
3592 i.op[xchg2] = i.op[xchg1];
3593 i.op[xchg1] = temp_op;
3594 temp_reloc = i.reloc[xchg2];
3595 i.reloc[xchg2] = i.reloc[xchg1];
3596 i.reloc[xchg1] = temp_reloc;
3597 }
3598
3599 static void
3600 swap_operands (void)
3601 {
3602 switch (i.operands)
3603 {
3604 case 5:
3605 case 4:
3606 swap_2_operands (1, i.operands - 2);
3607 case 3:
3608 case 2:
3609 swap_2_operands (0, i.operands - 1);
3610 break;
3611 default:
3612 abort ();
3613 }
3614
3615 if (i.mem_operands == 2)
3616 {
3617 const seg_entry *temp_seg;
3618 temp_seg = i.seg[0];
3619 i.seg[0] = i.seg[1];
3620 i.seg[1] = temp_seg;
3621 }
3622 }
3623
3624 /* Try to ensure constant immediates are represented in the smallest
3625 opcode possible. */
3626 static void
3627 optimize_imm (void)
3628 {
3629 char guess_suffix = 0;
3630 int op;
3631
3632 if (i.suffix)
3633 guess_suffix = i.suffix;
3634 else if (i.reg_operands)
3635 {
3636 /* Figure out a suffix from the last register operand specified.
3637 We can't do this properly yet, ie. excluding InOutPortReg,
3638 but the following works for instructions with immediates.
3639 In any case, we can't set i.suffix yet. */
3640 for (op = i.operands; --op >= 0;)
3641 if (i.types[op].bitfield.reg8)
3642 {
3643 guess_suffix = BYTE_MNEM_SUFFIX;
3644 break;
3645 }
3646 else if (i.types[op].bitfield.reg16)
3647 {
3648 guess_suffix = WORD_MNEM_SUFFIX;
3649 break;
3650 }
3651 else if (i.types[op].bitfield.reg32)
3652 {
3653 guess_suffix = LONG_MNEM_SUFFIX;
3654 break;
3655 }
3656 else if (i.types[op].bitfield.reg64)
3657 {
3658 guess_suffix = QWORD_MNEM_SUFFIX;
3659 break;
3660 }
3661 }
3662 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
3663 guess_suffix = WORD_MNEM_SUFFIX;
3664
3665 for (op = i.operands; --op >= 0;)
3666 if (operand_type_check (i.types[op], imm))
3667 {
3668 switch (i.op[op].imms->X_op)
3669 {
3670 case O_constant:
3671 /* If a suffix is given, this operand may be shortened. */
3672 switch (guess_suffix)
3673 {
3674 case LONG_MNEM_SUFFIX:
3675 i.types[op].bitfield.imm32 = 1;
3676 i.types[op].bitfield.imm64 = 1;
3677 break;
3678 case WORD_MNEM_SUFFIX:
3679 i.types[op].bitfield.imm16 = 1;
3680 i.types[op].bitfield.imm32 = 1;
3681 i.types[op].bitfield.imm32s = 1;
3682 i.types[op].bitfield.imm64 = 1;
3683 break;
3684 case BYTE_MNEM_SUFFIX:
3685 i.types[op].bitfield.imm8 = 1;
3686 i.types[op].bitfield.imm8s = 1;
3687 i.types[op].bitfield.imm16 = 1;
3688 i.types[op].bitfield.imm32 = 1;
3689 i.types[op].bitfield.imm32s = 1;
3690 i.types[op].bitfield.imm64 = 1;
3691 break;
3692 }
3693
3694 /* If this operand is at most 16 bits, convert it
3695 to a signed 16 bit number before trying to see
3696 whether it will fit in an even smaller size.
3697 This allows a 16-bit operand such as $0xffe0 to
3698 be recognised as within Imm8S range. */
3699 if ((i.types[op].bitfield.imm16)
3700 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
3701 {
3702 i.op[op].imms->X_add_number =
3703 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
3704 }
3705 if ((i.types[op].bitfield.imm32)
3706 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
3707 == 0))
3708 {
3709 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
3710 ^ ((offsetT) 1 << 31))
3711 - ((offsetT) 1 << 31));
3712 }
3713 i.types[op]
3714 = operand_type_or (i.types[op],
3715 smallest_imm_type (i.op[op].imms->X_add_number));
3716
3717 /* We must avoid matching of Imm32 templates when 64bit
3718 only immediate is available. */
3719 if (guess_suffix == QWORD_MNEM_SUFFIX)
3720 i.types[op].bitfield.imm32 = 0;
3721 break;
3722
3723 case O_absent:
3724 case O_register:
3725 abort ();
3726
3727 /* Symbols and expressions. */
3728 default:
3729 /* Convert symbolic operand to proper sizes for matching, but don't
3730 prevent matching a set of insns that only supports sizes other
3731 than those matching the insn suffix. */
3732 {
3733 i386_operand_type mask, allowed;
3734 const insn_template *t;
3735
3736 operand_type_set (&mask, 0);
3737 operand_type_set (&allowed, 0);
3738
3739 for (t = current_templates->start;
3740 t < current_templates->end;
3741 ++t)
3742 allowed = operand_type_or (allowed,
3743 t->operand_types[op]);
3744 switch (guess_suffix)
3745 {
3746 case QWORD_MNEM_SUFFIX:
3747 mask.bitfield.imm64 = 1;
3748 mask.bitfield.imm32s = 1;
3749 break;
3750 case LONG_MNEM_SUFFIX:
3751 mask.bitfield.imm32 = 1;
3752 break;
3753 case WORD_MNEM_SUFFIX:
3754 mask.bitfield.imm16 = 1;
3755 break;
3756 case BYTE_MNEM_SUFFIX:
3757 mask.bitfield.imm8 = 1;
3758 break;
3759 default:
3760 break;
3761 }
3762 allowed = operand_type_and (mask, allowed);
3763 if (!operand_type_all_zero (&allowed))
3764 i.types[op] = operand_type_and (i.types[op], mask);
3765 }
3766 break;
3767 }
3768 }
3769 }
3770
3771 /* Try to use the smallest displacement type too. */
3772 static void
3773 optimize_disp (void)
3774 {
3775 int op;
3776
3777 for (op = i.operands; --op >= 0;)
3778 if (operand_type_check (i.types[op], disp))
3779 {
3780 if (i.op[op].disps->X_op == O_constant)
3781 {
3782 offsetT op_disp = i.op[op].disps->X_add_number;
3783
3784 if (i.types[op].bitfield.disp16
3785 && (op_disp & ~(offsetT) 0xffff) == 0)
3786 {
3787 /* If this operand is at most 16 bits, convert
3788 to a signed 16 bit number and don't use 64bit
3789 displacement. */
3790 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
3791 i.types[op].bitfield.disp64 = 0;
3792 }
3793 if (i.types[op].bitfield.disp32
3794 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
3795 {
3796 /* If this operand is at most 32 bits, convert
3797 to a signed 32 bit number and don't use 64bit
3798 displacement. */
3799 op_disp &= (((offsetT) 2 << 31) - 1);
3800 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
3801 i.types[op].bitfield.disp64 = 0;
3802 }
3803 if (!op_disp && i.types[op].bitfield.baseindex)
3804 {
3805 i.types[op].bitfield.disp8 = 0;
3806 i.types[op].bitfield.disp16 = 0;
3807 i.types[op].bitfield.disp32 = 0;
3808 i.types[op].bitfield.disp32s = 0;
3809 i.types[op].bitfield.disp64 = 0;
3810 i.op[op].disps = 0;
3811 i.disp_operands--;
3812 }
3813 else if (flag_code == CODE_64BIT)
3814 {
3815 if (fits_in_signed_long (op_disp))
3816 {
3817 i.types[op].bitfield.disp64 = 0;
3818 i.types[op].bitfield.disp32s = 1;
3819 }
3820 if (i.prefix[ADDR_PREFIX]
3821 && fits_in_unsigned_long (op_disp))
3822 i.types[op].bitfield.disp32 = 1;
3823 }
3824 if ((i.types[op].bitfield.disp32
3825 || i.types[op].bitfield.disp32s
3826 || i.types[op].bitfield.disp16)
3827 && fits_in_signed_byte (op_disp))
3828 i.types[op].bitfield.disp8 = 1;
3829 }
3830 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
3831 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
3832 {
3833 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
3834 i.op[op].disps, 0, i.reloc[op]);
3835 i.types[op].bitfield.disp8 = 0;
3836 i.types[op].bitfield.disp16 = 0;
3837 i.types[op].bitfield.disp32 = 0;
3838 i.types[op].bitfield.disp32s = 0;
3839 i.types[op].bitfield.disp64 = 0;
3840 }
3841 else
3842 /* We only support 64bit displacement on constants. */
3843 i.types[op].bitfield.disp64 = 0;
3844 }
3845 }
3846
3847 /* Check if operands are valid for the instruction. */
3848
3849 static int
3850 check_VecOperands (const insn_template *t)
3851 {
3852 /* Without VSIB byte, we can't have a vector register for index. */
3853 if (!t->opcode_modifier.vecsib
3854 && i.index_reg
3855 && (i.index_reg->reg_type.bitfield.regxmm
3856 || i.index_reg->reg_type.bitfield.regymm))
3857 {
3858 i.error = unsupported_vector_index_register;
3859 return 1;
3860 }
3861
3862 /* For VSIB byte, we need a vector register for index and no PC
3863 relative addressing is allowed. */
3864 if (t->opcode_modifier.vecsib
3865 && (!i.index_reg
3866 || !((t->opcode_modifier.vecsib == VecSIB128
3867 && i.index_reg->reg_type.bitfield.regxmm)
3868 || (t->opcode_modifier.vecsib == VecSIB256
3869 && i.index_reg->reg_type.bitfield.regymm))
3870 || (i.base_reg && i.base_reg->reg_num == RegRip)))
3871 {
3872 i.error = invalid_vsib_address;
3873 return 1;
3874 }
3875
3876 return 0;
3877 }
3878
3879 /* Check if operands are valid for the instruction. Update VEX
3880 operand types. */
3881
3882 static int
3883 VEX_check_operands (const insn_template *t)
3884 {
3885 if (!t->opcode_modifier.vex)
3886 return 0;
3887
3888 /* Only check VEX_Imm4, which must be the first operand. */
3889 if (t->operand_types[0].bitfield.vec_imm4)
3890 {
3891 if (i.op[0].imms->X_op != O_constant
3892 || !fits_in_imm4 (i.op[0].imms->X_add_number))
3893 {
3894 i.error = bad_imm4;
3895 return 1;
3896 }
3897
3898 /* Turn off Imm8 so that update_imm won't complain. */
3899 i.types[0] = vec_imm4;
3900 }
3901
3902 return 0;
3903 }
3904
3905 static const insn_template *
3906 match_template (void)
3907 {
3908 /* Points to template once we've found it. */
3909 const insn_template *t;
3910 i386_operand_type overlap0, overlap1, overlap2, overlap3;
3911 i386_operand_type overlap4;
3912 unsigned int found_reverse_match;
3913 i386_opcode_modifier suffix_check;
3914 i386_operand_type operand_types [MAX_OPERANDS];
3915 int addr_prefix_disp;
3916 unsigned int j;
3917 unsigned int found_cpu_match;
3918 unsigned int check_register;
3919
3920 #if MAX_OPERANDS != 5
3921 # error "MAX_OPERANDS must be 5."
3922 #endif
3923
3924 found_reverse_match = 0;
3925 addr_prefix_disp = -1;
3926
3927 memset (&suffix_check, 0, sizeof (suffix_check));
3928 if (i.suffix == BYTE_MNEM_SUFFIX)
3929 suffix_check.no_bsuf = 1;
3930 else if (i.suffix == WORD_MNEM_SUFFIX)
3931 suffix_check.no_wsuf = 1;
3932 else if (i.suffix == SHORT_MNEM_SUFFIX)
3933 suffix_check.no_ssuf = 1;
3934 else if (i.suffix == LONG_MNEM_SUFFIX)
3935 suffix_check.no_lsuf = 1;
3936 else if (i.suffix == QWORD_MNEM_SUFFIX)
3937 suffix_check.no_qsuf = 1;
3938 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
3939 suffix_check.no_ldsuf = 1;
3940
3941 /* Must have right number of operands. */
3942 i.error = number_of_operands_mismatch;
3943
3944 for (t = current_templates->start; t < current_templates->end; t++)
3945 {
3946 addr_prefix_disp = -1;
3947
3948 if (i.operands != t->operands)
3949 continue;
3950
3951 /* Check processor support. */
3952 i.error = unsupported;
3953 found_cpu_match = (cpu_flags_match (t)
3954 == CPU_FLAGS_PERFECT_MATCH);
3955 if (!found_cpu_match)
3956 continue;
3957
3958 /* Check old gcc support. */
3959 i.error = old_gcc_only;
3960 if (!old_gcc && t->opcode_modifier.oldgcc)
3961 continue;
3962
3963 /* Check AT&T mnemonic. */
3964 i.error = unsupported_with_intel_mnemonic;
3965 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
3966 continue;
3967
3968 /* Check AT&T/Intel syntax. */
3969 i.error = unsupported_syntax;
3970 if ((intel_syntax && t->opcode_modifier.attsyntax)
3971 || (!intel_syntax && t->opcode_modifier.intelsyntax))
3972 continue;
3973
3974 /* Check the suffix, except for some instructions in intel mode. */
3975 i.error = invalid_instruction_suffix;
3976 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
3977 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
3978 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
3979 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
3980 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
3981 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
3982 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
3983 continue;
3984
3985 if (!operand_size_match (t))
3986 continue;
3987
3988 for (j = 0; j < MAX_OPERANDS; j++)
3989 operand_types[j] = t->operand_types[j];
3990
3991 /* In general, don't allow 64-bit operands in 32-bit mode. */
3992 if (i.suffix == QWORD_MNEM_SUFFIX
3993 && flag_code != CODE_64BIT
3994 && (intel_syntax
3995 ? (!t->opcode_modifier.ignoresize
3996 && !intel_float_operand (t->name))
3997 : intel_float_operand (t->name) != 2)
3998 && ((!operand_types[0].bitfield.regmmx
3999 && !operand_types[0].bitfield.regxmm
4000 && !operand_types[0].bitfield.regymm)
4001 || (!operand_types[t->operands > 1].bitfield.regmmx
4002 && !!operand_types[t->operands > 1].bitfield.regxmm
4003 && !!operand_types[t->operands > 1].bitfield.regymm))
4004 && (t->base_opcode != 0x0fc7
4005 || t->extension_opcode != 1 /* cmpxchg8b */))
4006 continue;
4007
4008 /* In general, don't allow 32-bit operands on pre-386. */
4009 else if (i.suffix == LONG_MNEM_SUFFIX
4010 && !cpu_arch_flags.bitfield.cpui386
4011 && (intel_syntax
4012 ? (!t->opcode_modifier.ignoresize
4013 && !intel_float_operand (t->name))
4014 : intel_float_operand (t->name) != 2)
4015 && ((!operand_types[0].bitfield.regmmx
4016 && !operand_types[0].bitfield.regxmm)
4017 || (!operand_types[t->operands > 1].bitfield.regmmx
4018 && !!operand_types[t->operands > 1].bitfield.regxmm)))
4019 continue;
4020
4021 /* Do not verify operands when there are none. */
4022 else
4023 {
4024 if (!t->operands)
4025 /* We've found a match; break out of loop. */
4026 break;
4027 }
4028
4029 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
4030 into Disp32/Disp16/Disp32 operand. */
4031 if (i.prefix[ADDR_PREFIX] != 0)
4032 {
4033 /* There should be only one Disp operand. */
4034 switch (flag_code)
4035 {
4036 case CODE_16BIT:
4037 for (j = 0; j < MAX_OPERANDS; j++)
4038 {
4039 if (operand_types[j].bitfield.disp16)
4040 {
4041 addr_prefix_disp = j;
4042 operand_types[j].bitfield.disp32 = 1;
4043 operand_types[j].bitfield.disp16 = 0;
4044 break;
4045 }
4046 }
4047 break;
4048 case CODE_32BIT:
4049 for (j = 0; j < MAX_OPERANDS; j++)
4050 {
4051 if (operand_types[j].bitfield.disp32)
4052 {
4053 addr_prefix_disp = j;
4054 operand_types[j].bitfield.disp32 = 0;
4055 operand_types[j].bitfield.disp16 = 1;
4056 break;
4057 }
4058 }
4059 break;
4060 case CODE_64BIT:
4061 for (j = 0; j < MAX_OPERANDS; j++)
4062 {
4063 if (operand_types[j].bitfield.disp64)
4064 {
4065 addr_prefix_disp = j;
4066 operand_types[j].bitfield.disp64 = 0;
4067 operand_types[j].bitfield.disp32 = 1;
4068 break;
4069 }
4070 }
4071 break;
4072 }
4073 }
4074
4075 /* We check register size if needed. */
4076 check_register = t->opcode_modifier.checkregsize;
4077 overlap0 = operand_type_and (i.types[0], operand_types[0]);
4078 switch (t->operands)
4079 {
4080 case 1:
4081 if (!operand_type_match (overlap0, i.types[0]))
4082 continue;
4083 break;
4084 case 2:
4085 /* xchg %eax, %eax is a special case. It is an aliase for nop
4086 only in 32bit mode and we can use opcode 0x90. In 64bit
4087 mode, we can't use 0x90 for xchg %eax, %eax since it should
4088 zero-extend %eax to %rax. */
4089 if (flag_code == CODE_64BIT
4090 && t->base_opcode == 0x90
4091 && operand_type_equal (&i.types [0], &acc32)
4092 && operand_type_equal (&i.types [1], &acc32))
4093 continue;
4094 if (i.swap_operand)
4095 {
4096 /* If we swap operand in encoding, we either match
4097 the next one or reverse direction of operands. */
4098 if (t->opcode_modifier.s)
4099 continue;
4100 else if (t->opcode_modifier.d)
4101 goto check_reverse;
4102 }
4103
4104 case 3:
4105 /* If we swap operand in encoding, we match the next one. */
4106 if (i.swap_operand && t->opcode_modifier.s)
4107 continue;
4108 case 4:
4109 case 5:
4110 overlap1 = operand_type_and (i.types[1], operand_types[1]);
4111 if (!operand_type_match (overlap0, i.types[0])
4112 || !operand_type_match (overlap1, i.types[1])
4113 || (check_register
4114 && !operand_type_register_match (overlap0, i.types[0],
4115 operand_types[0],
4116 overlap1, i.types[1],
4117 operand_types[1])))
4118 {
4119 /* Check if other direction is valid ... */
4120 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
4121 continue;
4122
4123 check_reverse:
4124 /* Try reversing direction of operands. */
4125 overlap0 = operand_type_and (i.types[0], operand_types[1]);
4126 overlap1 = operand_type_and (i.types[1], operand_types[0]);
4127 if (!operand_type_match (overlap0, i.types[0])
4128 || !operand_type_match (overlap1, i.types[1])
4129 || (check_register
4130 && !operand_type_register_match (overlap0,
4131 i.types[0],
4132 operand_types[1],
4133 overlap1,
4134 i.types[1],
4135 operand_types[0])))
4136 {
4137 /* Does not match either direction. */
4138 continue;
4139 }
4140 /* found_reverse_match holds which of D or FloatDR
4141 we've found. */
4142 if (t->opcode_modifier.d)
4143 found_reverse_match = Opcode_D;
4144 else if (t->opcode_modifier.floatd)
4145 found_reverse_match = Opcode_FloatD;
4146 else
4147 found_reverse_match = 0;
4148 if (t->opcode_modifier.floatr)
4149 found_reverse_match |= Opcode_FloatR;
4150 }
4151 else
4152 {
4153 /* Found a forward 2 operand match here. */
4154 switch (t->operands)
4155 {
4156 case 5:
4157 overlap4 = operand_type_and (i.types[4],
4158 operand_types[4]);
4159 case 4:
4160 overlap3 = operand_type_and (i.types[3],
4161 operand_types[3]);
4162 case 3:
4163 overlap2 = operand_type_and (i.types[2],
4164 operand_types[2]);
4165 break;
4166 }
4167
4168 switch (t->operands)
4169 {
4170 case 5:
4171 if (!operand_type_match (overlap4, i.types[4])
4172 || !operand_type_register_match (overlap3,
4173 i.types[3],
4174 operand_types[3],
4175 overlap4,
4176 i.types[4],
4177 operand_types[4]))
4178 continue;
4179 case 4:
4180 if (!operand_type_match (overlap3, i.types[3])
4181 || (check_register
4182 && !operand_type_register_match (overlap2,
4183 i.types[2],
4184 operand_types[2],
4185 overlap3,
4186 i.types[3],
4187 operand_types[3])))
4188 continue;
4189 case 3:
4190 /* Here we make use of the fact that there are no
4191 reverse match 3 operand instructions, and all 3
4192 operand instructions only need to be checked for
4193 register consistency between operands 2 and 3. */
4194 if (!operand_type_match (overlap2, i.types[2])
4195 || (check_register
4196 && !operand_type_register_match (overlap1,
4197 i.types[1],
4198 operand_types[1],
4199 overlap2,
4200 i.types[2],
4201 operand_types[2])))
4202 continue;
4203 break;
4204 }
4205 }
4206 /* Found either forward/reverse 2, 3 or 4 operand match here:
4207 slip through to break. */
4208 }
4209 if (!found_cpu_match)
4210 {
4211 found_reverse_match = 0;
4212 continue;
4213 }
4214
4215 /* Check if vector operands are valid. */
4216 if (check_VecOperands (t))
4217 continue;
4218
4219 /* Check if VEX operands are valid. */
4220 if (VEX_check_operands (t))
4221 continue;
4222
4223 /* We've found a match; break out of loop. */
4224 break;
4225 }
4226
4227 if (t == current_templates->end)
4228 {
4229 /* We found no match. */
4230 const char *err_msg;
4231 switch (i.error)
4232 {
4233 default:
4234 abort ();
4235 case operand_size_mismatch:
4236 err_msg = _("operand size mismatch");
4237 break;
4238 case operand_type_mismatch:
4239 err_msg = _("operand type mismatch");
4240 break;
4241 case register_type_mismatch:
4242 err_msg = _("register type mismatch");
4243 break;
4244 case number_of_operands_mismatch:
4245 err_msg = _("number of operands mismatch");
4246 break;
4247 case invalid_instruction_suffix:
4248 err_msg = _("invalid instruction suffix");
4249 break;
4250 case bad_imm4:
4251 err_msg = _("Imm4 isn't the first operand");
4252 break;
4253 case old_gcc_only:
4254 err_msg = _("only supported with old gcc");
4255 break;
4256 case unsupported_with_intel_mnemonic:
4257 err_msg = _("unsupported with Intel mnemonic");
4258 break;
4259 case unsupported_syntax:
4260 err_msg = _("unsupported syntax");
4261 break;
4262 case unsupported:
4263 err_msg = _("unsupported");
4264 break;
4265 case invalid_vsib_address:
4266 err_msg = _("invalid VSIB address");
4267 break;
4268 case unsupported_vector_index_register:
4269 err_msg = _("unsupported vector index register");
4270 break;
4271 }
4272 as_bad (_("%s for `%s'"), err_msg,
4273 current_templates->start->name);
4274 return NULL;
4275 }
4276
4277 if (!quiet_warnings)
4278 {
4279 if (!intel_syntax
4280 && (i.types[0].bitfield.jumpabsolute
4281 != operand_types[0].bitfield.jumpabsolute))
4282 {
4283 as_warn (_("indirect %s without `*'"), t->name);
4284 }
4285
4286 if (t->opcode_modifier.isprefix
4287 && t->opcode_modifier.ignoresize)
4288 {
4289 /* Warn them that a data or address size prefix doesn't
4290 affect assembly of the next line of code. */
4291 as_warn (_("stand-alone `%s' prefix"), t->name);
4292 }
4293 }
4294
4295 /* Copy the template we found. */
4296 i.tm = *t;
4297
4298 if (addr_prefix_disp != -1)
4299 i.tm.operand_types[addr_prefix_disp]
4300 = operand_types[addr_prefix_disp];
4301
4302 if (found_reverse_match)
4303 {
4304 /* If we found a reverse match we must alter the opcode
4305 direction bit. found_reverse_match holds bits to change
4306 (different for int & float insns). */
4307
4308 i.tm.base_opcode ^= found_reverse_match;
4309
4310 i.tm.operand_types[0] = operand_types[1];
4311 i.tm.operand_types[1] = operand_types[0];
4312 }
4313
4314 return t;
4315 }
4316
4317 static int
4318 check_string (void)
4319 {
4320 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
4321 if (i.tm.operand_types[mem_op].bitfield.esseg)
4322 {
4323 if (i.seg[0] != NULL && i.seg[0] != &es)
4324 {
4325 as_bad (_("`%s' operand %d must use `%ses' segment"),
4326 i.tm.name,
4327 mem_op + 1,
4328 register_prefix);
4329 return 0;
4330 }
4331 /* There's only ever one segment override allowed per instruction.
4332 This instruction possibly has a legal segment override on the
4333 second operand, so copy the segment to where non-string
4334 instructions store it, allowing common code. */
4335 i.seg[0] = i.seg[1];
4336 }
4337 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
4338 {
4339 if (i.seg[1] != NULL && i.seg[1] != &es)
4340 {
4341 as_bad (_("`%s' operand %d must use `%ses' segment"),
4342 i.tm.name,
4343 mem_op + 2,
4344 register_prefix);
4345 return 0;
4346 }
4347 }
4348 return 1;
4349 }
4350
4351 static int
4352 process_suffix (void)
4353 {
4354 /* If matched instruction specifies an explicit instruction mnemonic
4355 suffix, use it. */
4356 if (i.tm.opcode_modifier.size16)
4357 i.suffix = WORD_MNEM_SUFFIX;
4358 else if (i.tm.opcode_modifier.size32)
4359 i.suffix = LONG_MNEM_SUFFIX;
4360 else if (i.tm.opcode_modifier.size64)
4361 i.suffix = QWORD_MNEM_SUFFIX;
4362 else if (i.reg_operands)
4363 {
4364 /* If there's no instruction mnemonic suffix we try to invent one
4365 based on register operands. */
4366 if (!i.suffix)
4367 {
4368 /* We take i.suffix from the last register operand specified,
4369 Destination register type is more significant than source
4370 register type. crc32 in SSE4.2 prefers source register
4371 type. */
4372 if (i.tm.base_opcode == 0xf20f38f1)
4373 {
4374 if (i.types[0].bitfield.reg16)
4375 i.suffix = WORD_MNEM_SUFFIX;
4376 else if (i.types[0].bitfield.reg32)
4377 i.suffix = LONG_MNEM_SUFFIX;
4378 else if (i.types[0].bitfield.reg64)
4379 i.suffix = QWORD_MNEM_SUFFIX;
4380 }
4381 else if (i.tm.base_opcode == 0xf20f38f0)
4382 {
4383 if (i.types[0].bitfield.reg8)
4384 i.suffix = BYTE_MNEM_SUFFIX;
4385 }
4386
4387 if (!i.suffix)
4388 {
4389 int op;
4390
4391 if (i.tm.base_opcode == 0xf20f38f1
4392 || i.tm.base_opcode == 0xf20f38f0)
4393 {
4394 /* We have to know the operand size for crc32. */
4395 as_bad (_("ambiguous memory operand size for `%s`"),
4396 i.tm.name);
4397 return 0;
4398 }
4399
4400 for (op = i.operands; --op >= 0;)
4401 if (!i.tm.operand_types[op].bitfield.inoutportreg)
4402 {
4403 if (i.types[op].bitfield.reg8)
4404 {
4405 i.suffix = BYTE_MNEM_SUFFIX;
4406 break;
4407 }
4408 else if (i.types[op].bitfield.reg16)
4409 {
4410 i.suffix = WORD_MNEM_SUFFIX;
4411 break;
4412 }
4413 else if (i.types[op].bitfield.reg32)
4414 {
4415 i.suffix = LONG_MNEM_SUFFIX;
4416 break;
4417 }
4418 else if (i.types[op].bitfield.reg64)
4419 {
4420 i.suffix = QWORD_MNEM_SUFFIX;
4421 break;
4422 }
4423 }
4424 }
4425 }
4426 else if (i.suffix == BYTE_MNEM_SUFFIX)
4427 {
4428 if (intel_syntax
4429 && i.tm.opcode_modifier.ignoresize
4430 && i.tm.opcode_modifier.no_bsuf)
4431 i.suffix = 0;
4432 else if (!check_byte_reg ())
4433 return 0;
4434 }
4435 else if (i.suffix == LONG_MNEM_SUFFIX)
4436 {
4437 if (intel_syntax
4438 && i.tm.opcode_modifier.ignoresize
4439 && i.tm.opcode_modifier.no_lsuf)
4440 i.suffix = 0;
4441 else if (!check_long_reg ())
4442 return 0;
4443 }
4444 else if (i.suffix == QWORD_MNEM_SUFFIX)
4445 {
4446 if (intel_syntax
4447 && i.tm.opcode_modifier.ignoresize
4448 && i.tm.opcode_modifier.no_qsuf)
4449 i.suffix = 0;
4450 else if (!check_qword_reg ())
4451 return 0;
4452 }
4453 else if (i.suffix == WORD_MNEM_SUFFIX)
4454 {
4455 if (intel_syntax
4456 && i.tm.opcode_modifier.ignoresize
4457 && i.tm.opcode_modifier.no_wsuf)
4458 i.suffix = 0;
4459 else if (!check_word_reg ())
4460 return 0;
4461 }
4462 else if (i.suffix == XMMWORD_MNEM_SUFFIX
4463 || i.suffix == YMMWORD_MNEM_SUFFIX)
4464 {
4465 /* Skip if the instruction has x/y suffix. match_template
4466 should check if it is a valid suffix. */
4467 }
4468 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
4469 /* Do nothing if the instruction is going to ignore the prefix. */
4470 ;
4471 else
4472 abort ();
4473 }
4474 else if (i.tm.opcode_modifier.defaultsize
4475 && !i.suffix
4476 /* exclude fldenv/frstor/fsave/fstenv */
4477 && i.tm.opcode_modifier.no_ssuf)
4478 {
4479 i.suffix = stackop_size;
4480 }
4481 else if (intel_syntax
4482 && !i.suffix
4483 && (i.tm.operand_types[0].bitfield.jumpabsolute
4484 || i.tm.opcode_modifier.jumpbyte
4485 || i.tm.opcode_modifier.jumpintersegment
4486 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
4487 && i.tm.extension_opcode <= 3)))
4488 {
4489 switch (flag_code)
4490 {
4491 case CODE_64BIT:
4492 if (!i.tm.opcode_modifier.no_qsuf)
4493 {
4494 i.suffix = QWORD_MNEM_SUFFIX;
4495 break;
4496 }
4497 case CODE_32BIT:
4498 if (!i.tm.opcode_modifier.no_lsuf)
4499 i.suffix = LONG_MNEM_SUFFIX;
4500 break;
4501 case CODE_16BIT:
4502 if (!i.tm.opcode_modifier.no_wsuf)
4503 i.suffix = WORD_MNEM_SUFFIX;
4504 break;
4505 }
4506 }
4507
4508 if (!i.suffix)
4509 {
4510 if (!intel_syntax)
4511 {
4512 if (i.tm.opcode_modifier.w)
4513 {
4514 as_bad (_("no instruction mnemonic suffix given and "
4515 "no register operands; can't size instruction"));
4516 return 0;
4517 }
4518 }
4519 else
4520 {
4521 unsigned int suffixes;
4522
4523 suffixes = !i.tm.opcode_modifier.no_bsuf;
4524 if (!i.tm.opcode_modifier.no_wsuf)
4525 suffixes |= 1 << 1;
4526 if (!i.tm.opcode_modifier.no_lsuf)
4527 suffixes |= 1 << 2;
4528 if (!i.tm.opcode_modifier.no_ldsuf)
4529 suffixes |= 1 << 3;
4530 if (!i.tm.opcode_modifier.no_ssuf)
4531 suffixes |= 1 << 4;
4532 if (!i.tm.opcode_modifier.no_qsuf)
4533 suffixes |= 1 << 5;
4534
4535 /* There are more than suffix matches. */
4536 if (i.tm.opcode_modifier.w
4537 || ((suffixes & (suffixes - 1))
4538 && !i.tm.opcode_modifier.defaultsize
4539 && !i.tm.opcode_modifier.ignoresize))
4540 {
4541 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
4542 return 0;
4543 }
4544 }
4545 }
4546
4547 /* Change the opcode based on the operand size given by i.suffix;
4548 We don't need to change things for byte insns. */
4549
4550 if (i.suffix
4551 && i.suffix != BYTE_MNEM_SUFFIX
4552 && i.suffix != XMMWORD_MNEM_SUFFIX
4553 && i.suffix != YMMWORD_MNEM_SUFFIX)
4554 {
4555 /* It's not a byte, select word/dword operation. */
4556 if (i.tm.opcode_modifier.w)
4557 {
4558 if (i.tm.opcode_modifier.shortform)
4559 i.tm.base_opcode |= 8;
4560 else
4561 i.tm.base_opcode |= 1;
4562 }
4563
4564 /* Now select between word & dword operations via the operand
4565 size prefix, except for instructions that will ignore this
4566 prefix anyway. */
4567 if (i.tm.opcode_modifier.addrprefixop0)
4568 {
4569 /* The address size override prefix changes the size of the
4570 first operand. */
4571 if ((flag_code == CODE_32BIT
4572 && i.op->regs[0].reg_type.bitfield.reg16)
4573 || (flag_code != CODE_32BIT
4574 && i.op->regs[0].reg_type.bitfield.reg32))
4575 if (!add_prefix (ADDR_PREFIX_OPCODE))
4576 return 0;
4577 }
4578 else if (i.suffix != QWORD_MNEM_SUFFIX
4579 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
4580 && !i.tm.opcode_modifier.ignoresize
4581 && !i.tm.opcode_modifier.floatmf
4582 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
4583 || (flag_code == CODE_64BIT
4584 && i.tm.opcode_modifier.jumpbyte)))
4585 {
4586 unsigned int prefix = DATA_PREFIX_OPCODE;
4587
4588 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
4589 prefix = ADDR_PREFIX_OPCODE;
4590
4591 if (!add_prefix (prefix))
4592 return 0;
4593 }
4594
4595 /* Set mode64 for an operand. */
4596 if (i.suffix == QWORD_MNEM_SUFFIX
4597 && flag_code == CODE_64BIT
4598 && !i.tm.opcode_modifier.norex64)
4599 {
4600 /* Special case for xchg %rax,%rax. It is NOP and doesn't
4601 need rex64. cmpxchg8b is also a special case. */
4602 if (! (i.operands == 2
4603 && i.tm.base_opcode == 0x90
4604 && i.tm.extension_opcode == None
4605 && operand_type_equal (&i.types [0], &acc64)
4606 && operand_type_equal (&i.types [1], &acc64))
4607 && ! (i.operands == 1
4608 && i.tm.base_opcode == 0xfc7
4609 && i.tm.extension_opcode == 1
4610 && !operand_type_check (i.types [0], reg)
4611 && operand_type_check (i.types [0], anymem)))
4612 i.rex |= REX_W;
4613 }
4614
4615 /* Size floating point instruction. */
4616 if (i.suffix == LONG_MNEM_SUFFIX)
4617 if (i.tm.opcode_modifier.floatmf)
4618 i.tm.base_opcode ^= 4;
4619 }
4620
4621 return 1;
4622 }
4623
4624 static int
4625 check_byte_reg (void)
4626 {
4627 int op;
4628
4629 for (op = i.operands; --op >= 0;)
4630 {
4631 /* If this is an eight bit register, it's OK. If it's the 16 or
4632 32 bit version of an eight bit register, we will just use the
4633 low portion, and that's OK too. */
4634 if (i.types[op].bitfield.reg8)
4635 continue;
4636
4637 /* crc32 doesn't generate this warning. */
4638 if (i.tm.base_opcode == 0xf20f38f0)
4639 continue;
4640
4641 if ((i.types[op].bitfield.reg16
4642 || i.types[op].bitfield.reg32
4643 || i.types[op].bitfield.reg64)
4644 && i.op[op].regs->reg_num < 4)
4645 {
4646 /* Prohibit these changes in the 64bit mode, since the
4647 lowering is more complicated. */
4648 if (flag_code == CODE_64BIT
4649 && !i.tm.operand_types[op].bitfield.inoutportreg)
4650 {
4651 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4652 register_prefix, i.op[op].regs->reg_name,
4653 i.suffix);
4654 return 0;
4655 }
4656 #if REGISTER_WARNINGS
4657 if (!quiet_warnings
4658 && !i.tm.operand_types[op].bitfield.inoutportreg)
4659 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4660 register_prefix,
4661 (i.op[op].regs + (i.types[op].bitfield.reg16
4662 ? REGNAM_AL - REGNAM_AX
4663 : REGNAM_AL - REGNAM_EAX))->reg_name,
4664 register_prefix,
4665 i.op[op].regs->reg_name,
4666 i.suffix);
4667 #endif
4668 continue;
4669 }
4670 /* Any other register is bad. */
4671 if (i.types[op].bitfield.reg16
4672 || i.types[op].bitfield.reg32
4673 || i.types[op].bitfield.reg64
4674 || i.types[op].bitfield.regmmx
4675 || i.types[op].bitfield.regxmm
4676 || i.types[op].bitfield.regymm
4677 || i.types[op].bitfield.sreg2
4678 || i.types[op].bitfield.sreg3
4679 || i.types[op].bitfield.control
4680 || i.types[op].bitfield.debug
4681 || i.types[op].bitfield.test
4682 || i.types[op].bitfield.floatreg
4683 || i.types[op].bitfield.floatacc)
4684 {
4685 as_bad (_("`%s%s' not allowed with `%s%c'"),
4686 register_prefix,
4687 i.op[op].regs->reg_name,
4688 i.tm.name,
4689 i.suffix);
4690 return 0;
4691 }
4692 }
4693 return 1;
4694 }
4695
4696 static int
4697 check_long_reg (void)
4698 {
4699 int op;
4700
4701 for (op = i.operands; --op >= 0;)
4702 /* Reject eight bit registers, except where the template requires
4703 them. (eg. movzb) */
4704 if (i.types[op].bitfield.reg8
4705 && (i.tm.operand_types[op].bitfield.reg16
4706 || i.tm.operand_types[op].bitfield.reg32
4707 || i.tm.operand_types[op].bitfield.acc))
4708 {
4709 as_bad (_("`%s%s' not allowed with `%s%c'"),
4710 register_prefix,
4711 i.op[op].regs->reg_name,
4712 i.tm.name,
4713 i.suffix);
4714 return 0;
4715 }
4716 /* Warn if the e prefix on a general reg is missing. */
4717 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4718 && i.types[op].bitfield.reg16
4719 && (i.tm.operand_types[op].bitfield.reg32
4720 || i.tm.operand_types[op].bitfield.acc))
4721 {
4722 /* Prohibit these changes in the 64bit mode, since the
4723 lowering is more complicated. */
4724 if (flag_code == CODE_64BIT)
4725 {
4726 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4727 register_prefix, i.op[op].regs->reg_name,
4728 i.suffix);
4729 return 0;
4730 }
4731 #if REGISTER_WARNINGS
4732 else
4733 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4734 register_prefix,
4735 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
4736 register_prefix,
4737 i.op[op].regs->reg_name,
4738 i.suffix);
4739 #endif
4740 }
4741 /* Warn if the r prefix on a general reg is missing. */
4742 else if (i.types[op].bitfield.reg64
4743 && (i.tm.operand_types[op].bitfield.reg32
4744 || i.tm.operand_types[op].bitfield.acc))
4745 {
4746 if (intel_syntax
4747 && i.tm.opcode_modifier.toqword
4748 && !i.types[0].bitfield.regxmm)
4749 {
4750 /* Convert to QWORD. We want REX byte. */
4751 i.suffix = QWORD_MNEM_SUFFIX;
4752 }
4753 else
4754 {
4755 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4756 register_prefix, i.op[op].regs->reg_name,
4757 i.suffix);
4758 return 0;
4759 }
4760 }
4761 return 1;
4762 }
4763
4764 static int
4765 check_qword_reg (void)
4766 {
4767 int op;
4768
4769 for (op = i.operands; --op >= 0; )
4770 /* Reject eight bit registers, except where the template requires
4771 them. (eg. movzb) */
4772 if (i.types[op].bitfield.reg8
4773 && (i.tm.operand_types[op].bitfield.reg16
4774 || i.tm.operand_types[op].bitfield.reg32
4775 || i.tm.operand_types[op].bitfield.acc))
4776 {
4777 as_bad (_("`%s%s' not allowed with `%s%c'"),
4778 register_prefix,
4779 i.op[op].regs->reg_name,
4780 i.tm.name,
4781 i.suffix);
4782 return 0;
4783 }
4784 /* Warn if the e prefix on a general reg is missing. */
4785 else if ((i.types[op].bitfield.reg16
4786 || i.types[op].bitfield.reg32)
4787 && (i.tm.operand_types[op].bitfield.reg32
4788 || i.tm.operand_types[op].bitfield.acc))
4789 {
4790 /* Prohibit these changes in the 64bit mode, since the
4791 lowering is more complicated. */
4792 if (intel_syntax
4793 && i.tm.opcode_modifier.todword
4794 && !i.types[0].bitfield.regxmm)
4795 {
4796 /* Convert to DWORD. We don't want REX byte. */
4797 i.suffix = LONG_MNEM_SUFFIX;
4798 }
4799 else
4800 {
4801 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4802 register_prefix, i.op[op].regs->reg_name,
4803 i.suffix);
4804 return 0;
4805 }
4806 }
4807 return 1;
4808 }
4809
4810 static int
4811 check_word_reg (void)
4812 {
4813 int op;
4814 for (op = i.operands; --op >= 0;)
4815 /* Reject eight bit registers, except where the template requires
4816 them. (eg. movzb) */
4817 if (i.types[op].bitfield.reg8
4818 && (i.tm.operand_types[op].bitfield.reg16
4819 || i.tm.operand_types[op].bitfield.reg32
4820 || i.tm.operand_types[op].bitfield.acc))
4821 {
4822 as_bad (_("`%s%s' not allowed with `%s%c'"),
4823 register_prefix,
4824 i.op[op].regs->reg_name,
4825 i.tm.name,
4826 i.suffix);
4827 return 0;
4828 }
4829 /* Warn if the e prefix on a general reg is present. */
4830 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4831 && i.types[op].bitfield.reg32
4832 && (i.tm.operand_types[op].bitfield.reg16
4833 || i.tm.operand_types[op].bitfield.acc))
4834 {
4835 /* Prohibit these changes in the 64bit mode, since the
4836 lowering is more complicated. */
4837 if (flag_code == CODE_64BIT)
4838 {
4839 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4840 register_prefix, i.op[op].regs->reg_name,
4841 i.suffix);
4842 return 0;
4843 }
4844 else
4845 #if REGISTER_WARNINGS
4846 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4847 register_prefix,
4848 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
4849 register_prefix,
4850 i.op[op].regs->reg_name,
4851 i.suffix);
4852 #endif
4853 }
4854 return 1;
4855 }
4856
4857 static int
4858 update_imm (unsigned int j)
4859 {
4860 i386_operand_type overlap = i.types[j];
4861 if ((overlap.bitfield.imm8
4862 || overlap.bitfield.imm8s
4863 || overlap.bitfield.imm16
4864 || overlap.bitfield.imm32
4865 || overlap.bitfield.imm32s
4866 || overlap.bitfield.imm64)
4867 && !operand_type_equal (&overlap, &imm8)
4868 && !operand_type_equal (&overlap, &imm8s)
4869 && !operand_type_equal (&overlap, &imm16)
4870 && !operand_type_equal (&overlap, &imm32)
4871 && !operand_type_equal (&overlap, &imm32s)
4872 && !operand_type_equal (&overlap, &imm64))
4873 {
4874 if (i.suffix)
4875 {
4876 i386_operand_type temp;
4877
4878 operand_type_set (&temp, 0);
4879 if (i.suffix == BYTE_MNEM_SUFFIX)
4880 {
4881 temp.bitfield.imm8 = overlap.bitfield.imm8;
4882 temp.bitfield.imm8s = overlap.bitfield.imm8s;
4883 }
4884 else if (i.suffix == WORD_MNEM_SUFFIX)
4885 temp.bitfield.imm16 = overlap.bitfield.imm16;
4886 else if (i.suffix == QWORD_MNEM_SUFFIX)
4887 {
4888 temp.bitfield.imm64 = overlap.bitfield.imm64;
4889 temp.bitfield.imm32s = overlap.bitfield.imm32s;
4890 }
4891 else
4892 temp.bitfield.imm32 = overlap.bitfield.imm32;
4893 overlap = temp;
4894 }
4895 else if (operand_type_equal (&overlap, &imm16_32_32s)
4896 || operand_type_equal (&overlap, &imm16_32)
4897 || operand_type_equal (&overlap, &imm16_32s))
4898 {
4899 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
4900 overlap = imm16;
4901 else
4902 overlap = imm32s;
4903 }
4904 if (!operand_type_equal (&overlap, &imm8)
4905 && !operand_type_equal (&overlap, &imm8s)
4906 && !operand_type_equal (&overlap, &imm16)
4907 && !operand_type_equal (&overlap, &imm32)
4908 && !operand_type_equal (&overlap, &imm32s)
4909 && !operand_type_equal (&overlap, &imm64))
4910 {
4911 as_bad (_("no instruction mnemonic suffix given; "
4912 "can't determine immediate size"));
4913 return 0;
4914 }
4915 }
4916 i.types[j] = overlap;
4917
4918 return 1;
4919 }
4920
4921 static int
4922 finalize_imm (void)
4923 {
4924 unsigned int j, n;
4925
4926 /* Update the first 2 immediate operands. */
4927 n = i.operands > 2 ? 2 : i.operands;
4928 if (n)
4929 {
4930 for (j = 0; j < n; j++)
4931 if (update_imm (j) == 0)
4932 return 0;
4933
4934 /* The 3rd operand can't be immediate operand. */
4935 gas_assert (operand_type_check (i.types[2], imm) == 0);
4936 }
4937
4938 return 1;
4939 }
4940
4941 static int
4942 bad_implicit_operand (int xmm)
4943 {
4944 const char *ireg = xmm ? "xmm0" : "ymm0";
4945
4946 if (intel_syntax)
4947 as_bad (_("the last operand of `%s' must be `%s%s'"),
4948 i.tm.name, register_prefix, ireg);
4949 else
4950 as_bad (_("the first operand of `%s' must be `%s%s'"),
4951 i.tm.name, register_prefix, ireg);
4952 return 0;
4953 }
4954
4955 static int
4956 process_operands (void)
4957 {
4958 /* Default segment register this instruction will use for memory
4959 accesses. 0 means unknown. This is only for optimizing out
4960 unnecessary segment overrides. */
4961 const seg_entry *default_seg = 0;
4962
4963 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
4964 {
4965 unsigned int dupl = i.operands;
4966 unsigned int dest = dupl - 1;
4967 unsigned int j;
4968
4969 /* The destination must be an xmm register. */
4970 gas_assert (i.reg_operands
4971 && MAX_OPERANDS > dupl
4972 && operand_type_equal (&i.types[dest], &regxmm));
4973
4974 if (i.tm.opcode_modifier.firstxmm0)
4975 {
4976 /* The first operand is implicit and must be xmm0. */
4977 gas_assert (operand_type_equal (&i.types[0], &regxmm));
4978 if (i.op[0].regs->reg_num != 0)
4979 return bad_implicit_operand (1);
4980
4981 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
4982 {
4983 /* Keep xmm0 for instructions with VEX prefix and 3
4984 sources. */
4985 goto duplicate;
4986 }
4987 else
4988 {
4989 /* We remove the first xmm0 and keep the number of
4990 operands unchanged, which in fact duplicates the
4991 destination. */
4992 for (j = 1; j < i.operands; j++)
4993 {
4994 i.op[j - 1] = i.op[j];
4995 i.types[j - 1] = i.types[j];
4996 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
4997 }
4998 }
4999 }
5000 else if (i.tm.opcode_modifier.implicit1stxmm0)
5001 {
5002 gas_assert ((MAX_OPERANDS - 1) > dupl
5003 && (i.tm.opcode_modifier.vexsources
5004 == VEX3SOURCES));
5005
5006 /* Add the implicit xmm0 for instructions with VEX prefix
5007 and 3 sources. */
5008 for (j = i.operands; j > 0; j--)
5009 {
5010 i.op[j] = i.op[j - 1];
5011 i.types[j] = i.types[j - 1];
5012 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
5013 }
5014 i.op[0].regs
5015 = (const reg_entry *) hash_find (reg_hash, "xmm0");
5016 i.types[0] = regxmm;
5017 i.tm.operand_types[0] = regxmm;
5018
5019 i.operands += 2;
5020 i.reg_operands += 2;
5021 i.tm.operands += 2;
5022
5023 dupl++;
5024 dest++;
5025 i.op[dupl] = i.op[dest];
5026 i.types[dupl] = i.types[dest];
5027 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5028 }
5029 else
5030 {
5031 duplicate:
5032 i.operands++;
5033 i.reg_operands++;
5034 i.tm.operands++;
5035
5036 i.op[dupl] = i.op[dest];
5037 i.types[dupl] = i.types[dest];
5038 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5039 }
5040
5041 if (i.tm.opcode_modifier.immext)
5042 process_immext ();
5043 }
5044 else if (i.tm.opcode_modifier.firstxmm0)
5045 {
5046 unsigned int j;
5047
5048 /* The first operand is implicit and must be xmm0/ymm0. */
5049 gas_assert (i.reg_operands
5050 && (operand_type_equal (&i.types[0], &regxmm)
5051 || operand_type_equal (&i.types[0], &regymm)));
5052 if (i.op[0].regs->reg_num != 0)
5053 return bad_implicit_operand (i.types[0].bitfield.regxmm);
5054
5055 for (j = 1; j < i.operands; j++)
5056 {
5057 i.op[j - 1] = i.op[j];
5058 i.types[j - 1] = i.types[j];
5059
5060 /* We need to adjust fields in i.tm since they are used by
5061 build_modrm_byte. */
5062 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
5063 }
5064
5065 i.operands--;
5066 i.reg_operands--;
5067 i.tm.operands--;
5068 }
5069 else if (i.tm.opcode_modifier.regkludge)
5070 {
5071 /* The imul $imm, %reg instruction is converted into
5072 imul $imm, %reg, %reg, and the clr %reg instruction
5073 is converted into xor %reg, %reg. */
5074
5075 unsigned int first_reg_op;
5076
5077 if (operand_type_check (i.types[0], reg))
5078 first_reg_op = 0;
5079 else
5080 first_reg_op = 1;
5081 /* Pretend we saw the extra register operand. */
5082 gas_assert (i.reg_operands == 1
5083 && i.op[first_reg_op + 1].regs == 0);
5084 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
5085 i.types[first_reg_op + 1] = i.types[first_reg_op];
5086 i.operands++;
5087 i.reg_operands++;
5088 }
5089
5090 if (i.tm.opcode_modifier.shortform)
5091 {
5092 if (i.types[0].bitfield.sreg2
5093 || i.types[0].bitfield.sreg3)
5094 {
5095 if (i.tm.base_opcode == POP_SEG_SHORT
5096 && i.op[0].regs->reg_num == 1)
5097 {
5098 as_bad (_("you can't `pop %scs'"), register_prefix);
5099 return 0;
5100 }
5101 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
5102 if ((i.op[0].regs->reg_flags & RegRex) != 0)
5103 i.rex |= REX_B;
5104 }
5105 else
5106 {
5107 /* The register or float register operand is in operand
5108 0 or 1. */
5109 unsigned int op;
5110
5111 if (i.types[0].bitfield.floatreg
5112 || operand_type_check (i.types[0], reg))
5113 op = 0;
5114 else
5115 op = 1;
5116 /* Register goes in low 3 bits of opcode. */
5117 i.tm.base_opcode |= i.op[op].regs->reg_num;
5118 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5119 i.rex |= REX_B;
5120 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
5121 {
5122 /* Warn about some common errors, but press on regardless.
5123 The first case can be generated by gcc (<= 2.8.1). */
5124 if (i.operands == 2)
5125 {
5126 /* Reversed arguments on faddp, fsubp, etc. */
5127 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
5128 register_prefix, i.op[!intel_syntax].regs->reg_name,
5129 register_prefix, i.op[intel_syntax].regs->reg_name);
5130 }
5131 else
5132 {
5133 /* Extraneous `l' suffix on fp insn. */
5134 as_warn (_("translating to `%s %s%s'"), i.tm.name,
5135 register_prefix, i.op[0].regs->reg_name);
5136 }
5137 }
5138 }
5139 }
5140 else if (i.tm.opcode_modifier.modrm)
5141 {
5142 /* The opcode is completed (modulo i.tm.extension_opcode which
5143 must be put into the modrm byte). Now, we make the modrm and
5144 index base bytes based on all the info we've collected. */
5145
5146 default_seg = build_modrm_byte ();
5147 }
5148 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
5149 {
5150 default_seg = &ds;
5151 }
5152 else if (i.tm.opcode_modifier.isstring)
5153 {
5154 /* For the string instructions that allow a segment override
5155 on one of their operands, the default segment is ds. */
5156 default_seg = &ds;
5157 }
5158
5159 if (i.tm.base_opcode == 0x8d /* lea */
5160 && i.seg[0]
5161 && !quiet_warnings)
5162 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
5163
5164 /* If a segment was explicitly specified, and the specified segment
5165 is not the default, use an opcode prefix to select it. If we
5166 never figured out what the default segment is, then default_seg
5167 will be zero at this point, and the specified segment prefix will
5168 always be used. */
5169 if ((i.seg[0]) && (i.seg[0] != default_seg))
5170 {
5171 if (!add_prefix (i.seg[0]->seg_prefix))
5172 return 0;
5173 }
5174 return 1;
5175 }
5176
5177 static const seg_entry *
5178 build_modrm_byte (void)
5179 {
5180 const seg_entry *default_seg = 0;
5181 unsigned int source, dest;
5182 int vex_3_sources;
5183
5184 /* The first operand of instructions with VEX prefix and 3 sources
5185 must be VEX_Imm4. */
5186 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
5187 if (vex_3_sources)
5188 {
5189 unsigned int nds, reg_slot;
5190 expressionS *exp;
5191
5192 if (i.tm.opcode_modifier.veximmext
5193 && i.tm.opcode_modifier.immext)
5194 {
5195 dest = i.operands - 2;
5196 gas_assert (dest == 3);
5197 }
5198 else
5199 dest = i.operands - 1;
5200 nds = dest - 1;
5201
5202 /* There are 2 kinds of instructions:
5203 1. 5 operands: 4 register operands or 3 register operands
5204 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
5205 VexW0 or VexW1. The destination must be either XMM or YMM
5206 register.
5207 2. 4 operands: 4 register operands or 3 register operands
5208 plus 1 memory operand, VexXDS, and VexImmExt */
5209 gas_assert ((i.reg_operands == 4
5210 || (i.reg_operands == 3 && i.mem_operands == 1))
5211 && i.tm.opcode_modifier.vexvvvv == VEXXDS
5212 && (i.tm.opcode_modifier.veximmext
5213 || (i.imm_operands == 1
5214 && i.types[0].bitfield.vec_imm4
5215 && (i.tm.opcode_modifier.vexw == VEXW0
5216 || i.tm.opcode_modifier.vexw == VEXW1)
5217 && (operand_type_equal (&i.tm.operand_types[dest], &regxmm)
5218 || operand_type_equal (&i.tm.operand_types[dest], &regymm)))));
5219
5220 if (i.imm_operands == 0)
5221 {
5222 /* When there is no immediate operand, generate an 8bit
5223 immediate operand to encode the first operand. */
5224 exp = &im_expressions[i.imm_operands++];
5225 i.op[i.operands].imms = exp;
5226 i.types[i.operands] = imm8;
5227 i.operands++;
5228 /* If VexW1 is set, the first operand is the source and
5229 the second operand is encoded in the immediate operand. */
5230 if (i.tm.opcode_modifier.vexw == VEXW1)
5231 {
5232 source = 0;
5233 reg_slot = 1;
5234 }
5235 else
5236 {
5237 source = 1;
5238 reg_slot = 0;
5239 }
5240
5241 /* FMA swaps REG and NDS. */
5242 if (i.tm.cpu_flags.bitfield.cpufma)
5243 {
5244 unsigned int tmp;
5245 tmp = reg_slot;
5246 reg_slot = nds;
5247 nds = tmp;
5248 }
5249
5250 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5251 &regxmm)
5252 || operand_type_equal (&i.tm.operand_types[reg_slot],
5253 &regymm));
5254 exp->X_op = O_constant;
5255 exp->X_add_number
5256 = ((i.op[reg_slot].regs->reg_num
5257 + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
5258 << 4);
5259 }
5260 else
5261 {
5262 unsigned int imm_slot;
5263
5264 if (i.tm.opcode_modifier.vexw == VEXW0)
5265 {
5266 /* If VexW0 is set, the third operand is the source and
5267 the second operand is encoded in the immediate
5268 operand. */
5269 source = 2;
5270 reg_slot = 1;
5271 }
5272 else
5273 {
5274 /* VexW1 is set, the second operand is the source and
5275 the third operand is encoded in the immediate
5276 operand. */
5277 source = 1;
5278 reg_slot = 2;
5279 }
5280
5281 if (i.tm.opcode_modifier.immext)
5282 {
5283 /* When ImmExt is set, the immdiate byte is the last
5284 operand. */
5285 imm_slot = i.operands - 1;
5286 source--;
5287 reg_slot--;
5288 }
5289 else
5290 {
5291 imm_slot = 0;
5292
5293 /* Turn on Imm8 so that output_imm will generate it. */
5294 i.types[imm_slot].bitfield.imm8 = 1;
5295 }
5296
5297 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5298 &regxmm)
5299 || operand_type_equal (&i.tm.operand_types[reg_slot],
5300 &regymm));
5301 i.op[imm_slot].imms->X_add_number
5302 |= ((i.op[reg_slot].regs->reg_num
5303 + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
5304 << 4);
5305 }
5306
5307 gas_assert (operand_type_equal (&i.tm.operand_types[nds], &regxmm)
5308 || operand_type_equal (&i.tm.operand_types[nds],
5309 &regymm));
5310 i.vex.register_specifier = i.op[nds].regs;
5311 }
5312 else
5313 source = dest = 0;
5314
5315 /* i.reg_operands MUST be the number of real register operands;
5316 implicit registers do not count. If there are 3 register
5317 operands, it must be a instruction with VexNDS. For a
5318 instruction with VexNDD, the destination register is encoded
5319 in VEX prefix. If there are 4 register operands, it must be
5320 a instruction with VEX prefix and 3 sources. */
5321 if (i.mem_operands == 0
5322 && ((i.reg_operands == 2
5323 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
5324 || (i.reg_operands == 3
5325 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
5326 || (i.reg_operands == 4 && vex_3_sources)))
5327 {
5328 switch (i.operands)
5329 {
5330 case 2:
5331 source = 0;
5332 break;
5333 case 3:
5334 /* When there are 3 operands, one of them may be immediate,
5335 which may be the first or the last operand. Otherwise,
5336 the first operand must be shift count register (cl) or it
5337 is an instruction with VexNDS. */
5338 gas_assert (i.imm_operands == 1
5339 || (i.imm_operands == 0
5340 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
5341 || i.types[0].bitfield.shiftcount)));
5342 if (operand_type_check (i.types[0], imm)
5343 || i.types[0].bitfield.shiftcount)
5344 source = 1;
5345 else
5346 source = 0;
5347 break;
5348 case 4:
5349 /* When there are 4 operands, the first two must be 8bit
5350 immediate operands. The source operand will be the 3rd
5351 one.
5352
5353 For instructions with VexNDS, if the first operand
5354 an imm8, the source operand is the 2nd one. If the last
5355 operand is imm8, the source operand is the first one. */
5356 gas_assert ((i.imm_operands == 2
5357 && i.types[0].bitfield.imm8
5358 && i.types[1].bitfield.imm8)
5359 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
5360 && i.imm_operands == 1
5361 && (i.types[0].bitfield.imm8
5362 || i.types[i.operands - 1].bitfield.imm8)));
5363 if (i.imm_operands == 2)
5364 source = 2;
5365 else
5366 {
5367 if (i.types[0].bitfield.imm8)
5368 source = 1;
5369 else
5370 source = 0;
5371 }
5372 break;
5373 case 5:
5374 break;
5375 default:
5376 abort ();
5377 }
5378
5379 if (!vex_3_sources)
5380 {
5381 dest = source + 1;
5382
5383 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5384 {
5385 /* For instructions with VexNDS, the register-only
5386 source operand must be 32/64bit integer, XMM or
5387 YMM register. It is encoded in VEX prefix. We
5388 need to clear RegMem bit before calling
5389 operand_type_equal. */
5390
5391 i386_operand_type op;
5392 unsigned int vvvv;
5393
5394 /* Check register-only source operand when two source
5395 operands are swapped. */
5396 if (!i.tm.operand_types[source].bitfield.baseindex
5397 && i.tm.operand_types[dest].bitfield.baseindex)
5398 {
5399 vvvv = source;
5400 source = dest;
5401 }
5402 else
5403 vvvv = dest;
5404
5405 op = i.tm.operand_types[vvvv];
5406 op.bitfield.regmem = 0;
5407 if ((dest + 1) >= i.operands
5408 || (op.bitfield.reg32 != 1
5409 && !op.bitfield.reg64 != 1
5410 && !operand_type_equal (&op, &regxmm)
5411 && !operand_type_equal (&op, &regymm)))
5412 abort ();
5413 i.vex.register_specifier = i.op[vvvv].regs;
5414 dest++;
5415 }
5416 }
5417
5418 i.rm.mode = 3;
5419 /* One of the register operands will be encoded in the i.tm.reg
5420 field, the other in the combined i.tm.mode and i.tm.regmem
5421 fields. If no form of this instruction supports a memory
5422 destination operand, then we assume the source operand may
5423 sometimes be a memory operand and so we need to store the
5424 destination in the i.rm.reg field. */
5425 if (!i.tm.operand_types[dest].bitfield.regmem
5426 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
5427 {
5428 i.rm.reg = i.op[dest].regs->reg_num;
5429 i.rm.regmem = i.op[source].regs->reg_num;
5430 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5431 i.rex |= REX_R;
5432 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5433 i.rex |= REX_B;
5434 }
5435 else
5436 {
5437 i.rm.reg = i.op[source].regs->reg_num;
5438 i.rm.regmem = i.op[dest].regs->reg_num;
5439 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5440 i.rex |= REX_B;
5441 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5442 i.rex |= REX_R;
5443 }
5444 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
5445 {
5446 if (!i.types[0].bitfield.control
5447 && !i.types[1].bitfield.control)
5448 abort ();
5449 i.rex &= ~(REX_R | REX_B);
5450 add_prefix (LOCK_PREFIX_OPCODE);
5451 }
5452 }
5453 else
5454 { /* If it's not 2 reg operands... */
5455 unsigned int mem;
5456
5457 if (i.mem_operands)
5458 {
5459 unsigned int fake_zero_displacement = 0;
5460 unsigned int op;
5461
5462 for (op = 0; op < i.operands; op++)
5463 if (operand_type_check (i.types[op], anymem))
5464 break;
5465 gas_assert (op < i.operands);
5466
5467 if (i.tm.opcode_modifier.vecsib)
5468 {
5469 if (i.index_reg->reg_num == RegEiz
5470 || i.index_reg->reg_num == RegRiz)
5471 abort ();
5472
5473 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5474 if (!i.base_reg)
5475 {
5476 i.sib.base = NO_BASE_REGISTER;
5477 i.sib.scale = i.log2_scale_factor;
5478 i.types[op].bitfield.disp8 = 0;
5479 i.types[op].bitfield.disp16 = 0;
5480 i.types[op].bitfield.disp64 = 0;
5481 if (flag_code != CODE_64BIT)
5482 {
5483 /* Must be 32 bit */
5484 i.types[op].bitfield.disp32 = 1;
5485 i.types[op].bitfield.disp32s = 0;
5486 }
5487 else
5488 {
5489 i.types[op].bitfield.disp32 = 0;
5490 i.types[op].bitfield.disp32s = 1;
5491 }
5492 }
5493 i.sib.index = i.index_reg->reg_num;
5494 if ((i.index_reg->reg_flags & RegRex) != 0)
5495 i.rex |= REX_X;
5496 }
5497
5498 default_seg = &ds;
5499
5500 if (i.base_reg == 0)
5501 {
5502 i.rm.mode = 0;
5503 if (!i.disp_operands)
5504 {
5505 fake_zero_displacement = 1;
5506 /* Instructions with VSIB byte need 32bit displacement
5507 if there is no base register. */
5508 if (i.tm.opcode_modifier.vecsib)
5509 i.types[op].bitfield.disp32 = 1;
5510 }
5511 if (i.index_reg == 0)
5512 {
5513 gas_assert (!i.tm.opcode_modifier.vecsib);
5514 /* Operand is just <disp> */
5515 if (flag_code == CODE_64BIT)
5516 {
5517 /* 64bit mode overwrites the 32bit absolute
5518 addressing by RIP relative addressing and
5519 absolute addressing is encoded by one of the
5520 redundant SIB forms. */
5521 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5522 i.sib.base = NO_BASE_REGISTER;
5523 i.sib.index = NO_INDEX_REGISTER;
5524 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
5525 ? disp32s : disp32);
5526 }
5527 else if ((flag_code == CODE_16BIT)
5528 ^ (i.prefix[ADDR_PREFIX] != 0))
5529 {
5530 i.rm.regmem = NO_BASE_REGISTER_16;
5531 i.types[op] = disp16;
5532 }
5533 else
5534 {
5535 i.rm.regmem = NO_BASE_REGISTER;
5536 i.types[op] = disp32;
5537 }
5538 }
5539 else if (!i.tm.opcode_modifier.vecsib)
5540 {
5541 /* !i.base_reg && i.index_reg */
5542 if (i.index_reg->reg_num == RegEiz
5543 || i.index_reg->reg_num == RegRiz)
5544 i.sib.index = NO_INDEX_REGISTER;
5545 else
5546 i.sib.index = i.index_reg->reg_num;
5547 i.sib.base = NO_BASE_REGISTER;
5548 i.sib.scale = i.log2_scale_factor;
5549 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5550 i.types[op].bitfield.disp8 = 0;
5551 i.types[op].bitfield.disp16 = 0;
5552 i.types[op].bitfield.disp64 = 0;
5553 if (flag_code != CODE_64BIT)
5554 {
5555 /* Must be 32 bit */
5556 i.types[op].bitfield.disp32 = 1;
5557 i.types[op].bitfield.disp32s = 0;
5558 }
5559 else
5560 {
5561 i.types[op].bitfield.disp32 = 0;
5562 i.types[op].bitfield.disp32s = 1;
5563 }
5564 if ((i.index_reg->reg_flags & RegRex) != 0)
5565 i.rex |= REX_X;
5566 }
5567 }
5568 /* RIP addressing for 64bit mode. */
5569 else if (i.base_reg->reg_num == RegRip ||
5570 i.base_reg->reg_num == RegEip)
5571 {
5572 gas_assert (!i.tm.opcode_modifier.vecsib);
5573 i.rm.regmem = NO_BASE_REGISTER;
5574 i.types[op].bitfield.disp8 = 0;
5575 i.types[op].bitfield.disp16 = 0;
5576 i.types[op].bitfield.disp32 = 0;
5577 i.types[op].bitfield.disp32s = 1;
5578 i.types[op].bitfield.disp64 = 0;
5579 i.flags[op] |= Operand_PCrel;
5580 if (! i.disp_operands)
5581 fake_zero_displacement = 1;
5582 }
5583 else if (i.base_reg->reg_type.bitfield.reg16)
5584 {
5585 gas_assert (!i.tm.opcode_modifier.vecsib);
5586 switch (i.base_reg->reg_num)
5587 {
5588 case 3: /* (%bx) */
5589 if (i.index_reg == 0)
5590 i.rm.regmem = 7;
5591 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
5592 i.rm.regmem = i.index_reg->reg_num - 6;
5593 break;
5594 case 5: /* (%bp) */
5595 default_seg = &ss;
5596 if (i.index_reg == 0)
5597 {
5598 i.rm.regmem = 6;
5599 if (operand_type_check (i.types[op], disp) == 0)
5600 {
5601 /* fake (%bp) into 0(%bp) */
5602 i.types[op].bitfield.disp8 = 1;
5603 fake_zero_displacement = 1;
5604 }
5605 }
5606 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
5607 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
5608 break;
5609 default: /* (%si) -> 4 or (%di) -> 5 */
5610 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
5611 }
5612 i.rm.mode = mode_from_disp_size (i.types[op]);
5613 }
5614 else /* i.base_reg and 32/64 bit mode */
5615 {
5616 if (flag_code == CODE_64BIT
5617 && operand_type_check (i.types[op], disp))
5618 {
5619 i386_operand_type temp;
5620 operand_type_set (&temp, 0);
5621 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
5622 i.types[op] = temp;
5623 if (i.prefix[ADDR_PREFIX] == 0)
5624 i.types[op].bitfield.disp32s = 1;
5625 else
5626 i.types[op].bitfield.disp32 = 1;
5627 }
5628
5629 if (!i.tm.opcode_modifier.vecsib)
5630 i.rm.regmem = i.base_reg->reg_num;
5631 if ((i.base_reg->reg_flags & RegRex) != 0)
5632 i.rex |= REX_B;
5633 i.sib.base = i.base_reg->reg_num;
5634 /* x86-64 ignores REX prefix bit here to avoid decoder
5635 complications. */
5636 if ((i.base_reg->reg_num & 7) == EBP_REG_NUM)
5637 {
5638 default_seg = &ss;
5639 if (i.disp_operands == 0)
5640 {
5641 fake_zero_displacement = 1;
5642 i.types[op].bitfield.disp8 = 1;
5643 }
5644 }
5645 else if (i.base_reg->reg_num == ESP_REG_NUM)
5646 {
5647 default_seg = &ss;
5648 }
5649 i.sib.scale = i.log2_scale_factor;
5650 if (i.index_reg == 0)
5651 {
5652 gas_assert (!i.tm.opcode_modifier.vecsib);
5653 /* <disp>(%esp) becomes two byte modrm with no index
5654 register. We've already stored the code for esp
5655 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
5656 Any base register besides %esp will not use the
5657 extra modrm byte. */
5658 i.sib.index = NO_INDEX_REGISTER;
5659 }
5660 else if (!i.tm.opcode_modifier.vecsib)
5661 {
5662 if (i.index_reg->reg_num == RegEiz
5663 || i.index_reg->reg_num == RegRiz)
5664 i.sib.index = NO_INDEX_REGISTER;
5665 else
5666 i.sib.index = i.index_reg->reg_num;
5667 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5668 if ((i.index_reg->reg_flags & RegRex) != 0)
5669 i.rex |= REX_X;
5670 }
5671
5672 if (i.disp_operands
5673 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
5674 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
5675 i.rm.mode = 0;
5676 else
5677 i.rm.mode = mode_from_disp_size (i.types[op]);
5678 }
5679
5680 if (fake_zero_displacement)
5681 {
5682 /* Fakes a zero displacement assuming that i.types[op]
5683 holds the correct displacement size. */
5684 expressionS *exp;
5685
5686 gas_assert (i.op[op].disps == 0);
5687 exp = &disp_expressions[i.disp_operands++];
5688 i.op[op].disps = exp;
5689 exp->X_op = O_constant;
5690 exp->X_add_number = 0;
5691 exp->X_add_symbol = (symbolS *) 0;
5692 exp->X_op_symbol = (symbolS *) 0;
5693 }
5694
5695 mem = op;
5696 }
5697 else
5698 mem = ~0;
5699
5700 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
5701 {
5702 if (operand_type_check (i.types[0], imm))
5703 i.vex.register_specifier = NULL;
5704 else
5705 {
5706 /* VEX.vvvv encodes one of the sources when the first
5707 operand is not an immediate. */
5708 if (i.tm.opcode_modifier.vexw == VEXW0)
5709 i.vex.register_specifier = i.op[0].regs;
5710 else
5711 i.vex.register_specifier = i.op[1].regs;
5712 }
5713
5714 /* Destination is a XMM register encoded in the ModRM.reg
5715 and VEX.R bit. */
5716 i.rm.reg = i.op[2].regs->reg_num;
5717 if ((i.op[2].regs->reg_flags & RegRex) != 0)
5718 i.rex |= REX_R;
5719
5720 /* ModRM.rm and VEX.B encodes the other source. */
5721 if (!i.mem_operands)
5722 {
5723 i.rm.mode = 3;
5724
5725 if (i.tm.opcode_modifier.vexw == VEXW0)
5726 i.rm.regmem = i.op[1].regs->reg_num;
5727 else
5728 i.rm.regmem = i.op[0].regs->reg_num;
5729
5730 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5731 i.rex |= REX_B;
5732 }
5733 }
5734 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
5735 {
5736 i.vex.register_specifier = i.op[2].regs;
5737 if (!i.mem_operands)
5738 {
5739 i.rm.mode = 3;
5740 i.rm.regmem = i.op[1].regs->reg_num;
5741 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5742 i.rex |= REX_B;
5743 }
5744 }
5745 /* Fill in i.rm.reg or i.rm.regmem field with register operand
5746 (if any) based on i.tm.extension_opcode. Again, we must be
5747 careful to make sure that segment/control/debug/test/MMX
5748 registers are coded into the i.rm.reg field. */
5749 else if (i.reg_operands)
5750 {
5751 unsigned int op;
5752 unsigned int vex_reg = ~0;
5753
5754 for (op = 0; op < i.operands; op++)
5755 if (i.types[op].bitfield.reg8
5756 || i.types[op].bitfield.reg16
5757 || i.types[op].bitfield.reg32
5758 || i.types[op].bitfield.reg64
5759 || i.types[op].bitfield.regmmx
5760 || i.types[op].bitfield.regxmm
5761 || i.types[op].bitfield.regymm
5762 || i.types[op].bitfield.sreg2
5763 || i.types[op].bitfield.sreg3
5764 || i.types[op].bitfield.control
5765 || i.types[op].bitfield.debug
5766 || i.types[op].bitfield.test)
5767 break;
5768
5769 if (vex_3_sources)
5770 op = dest;
5771 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5772 {
5773 /* For instructions with VexNDS, the register-only
5774 source operand is encoded in VEX prefix. */
5775 gas_assert (mem != (unsigned int) ~0);
5776
5777 if (op > mem)
5778 {
5779 vex_reg = op++;
5780 gas_assert (op < i.operands);
5781 }
5782 else
5783 {
5784 /* Check register-only source operand when two source
5785 operands are swapped. */
5786 if (!i.tm.operand_types[op].bitfield.baseindex
5787 && i.tm.operand_types[op + 1].bitfield.baseindex)
5788 {
5789 vex_reg = op;
5790 op += 2;
5791 gas_assert (mem == (vex_reg + 1)
5792 && op < i.operands);
5793 }
5794 else
5795 {
5796 vex_reg = op + 1;
5797 gas_assert (vex_reg < i.operands);
5798 }
5799 }
5800 }
5801 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
5802 {
5803 /* For instructions with VexNDD, the register destination
5804 is encoded in VEX prefix. */
5805 if (i.mem_operands == 0)
5806 {
5807 /* There is no memory operand. */
5808 gas_assert ((op + 2) == i.operands);
5809 vex_reg = op + 1;
5810 }
5811 else
5812 {
5813 /* There are only 2 operands. */
5814 gas_assert (op < 2 && i.operands == 2);
5815 vex_reg = 1;
5816 }
5817 }
5818 else
5819 gas_assert (op < i.operands);
5820
5821 if (vex_reg != (unsigned int) ~0)
5822 {
5823 i386_operand_type *type = &i.tm.operand_types[vex_reg];
5824
5825 if (type->bitfield.reg32 != 1
5826 && type->bitfield.reg64 != 1
5827 && !operand_type_equal (type, &regxmm)
5828 && !operand_type_equal (type, &regymm))
5829 abort ();
5830
5831 i.vex.register_specifier = i.op[vex_reg].regs;
5832 }
5833
5834 /* Don't set OP operand twice. */
5835 if (vex_reg != op)
5836 {
5837 /* If there is an extension opcode to put here, the
5838 register number must be put into the regmem field. */
5839 if (i.tm.extension_opcode != None)
5840 {
5841 i.rm.regmem = i.op[op].regs->reg_num;
5842 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5843 i.rex |= REX_B;
5844 }
5845 else
5846 {
5847 i.rm.reg = i.op[op].regs->reg_num;
5848 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5849 i.rex |= REX_R;
5850 }
5851 }
5852
5853 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
5854 must set it to 3 to indicate this is a register operand
5855 in the regmem field. */
5856 if (!i.mem_operands)
5857 i.rm.mode = 3;
5858 }
5859
5860 /* Fill in i.rm.reg field with extension opcode (if any). */
5861 if (i.tm.extension_opcode != None)
5862 i.rm.reg = i.tm.extension_opcode;
5863 }
5864 return default_seg;
5865 }
5866
5867 static void
5868 output_branch (void)
5869 {
5870 char *p;
5871 int size;
5872 int code16;
5873 int prefix;
5874 relax_substateT subtype;
5875 symbolS *sym;
5876 offsetT off;
5877
5878 code16 = flag_code == CODE_16BIT ? CODE16 : 0;
5879 size = i.disp32_encoding ? BIG : SMALL;
5880
5881 prefix = 0;
5882 if (i.prefix[DATA_PREFIX] != 0)
5883 {
5884 prefix = 1;
5885 i.prefixes -= 1;
5886 code16 ^= CODE16;
5887 }
5888 /* Pentium4 branch hints. */
5889 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
5890 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
5891 {
5892 prefix++;
5893 i.prefixes--;
5894 }
5895 if (i.prefix[REX_PREFIX] != 0)
5896 {
5897 prefix++;
5898 i.prefixes--;
5899 }
5900
5901 if (i.prefixes != 0 && !intel_syntax)
5902 as_warn (_("skipping prefixes on this instruction"));
5903
5904 /* It's always a symbol; End frag & setup for relax.
5905 Make sure there is enough room in this frag for the largest
5906 instruction we may generate in md_convert_frag. This is 2
5907 bytes for the opcode and room for the prefix and largest
5908 displacement. */
5909 frag_grow (prefix + 2 + 4);
5910 /* Prefix and 1 opcode byte go in fr_fix. */
5911 p = frag_more (prefix + 1);
5912 if (i.prefix[DATA_PREFIX] != 0)
5913 *p++ = DATA_PREFIX_OPCODE;
5914 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
5915 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
5916 *p++ = i.prefix[SEG_PREFIX];
5917 if (i.prefix[REX_PREFIX] != 0)
5918 *p++ = i.prefix[REX_PREFIX];
5919 *p = i.tm.base_opcode;
5920
5921 if ((unsigned char) *p == JUMP_PC_RELATIVE)
5922 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
5923 else if (cpu_arch_flags.bitfield.cpui386)
5924 subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
5925 else
5926 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
5927 subtype |= code16;
5928
5929 sym = i.op[0].disps->X_add_symbol;
5930 off = i.op[0].disps->X_add_number;
5931
5932 if (i.op[0].disps->X_op != O_constant
5933 && i.op[0].disps->X_op != O_symbol)
5934 {
5935 /* Handle complex expressions. */
5936 sym = make_expr_symbol (i.op[0].disps);
5937 off = 0;
5938 }
5939
5940 /* 1 possible extra opcode + 4 byte displacement go in var part.
5941 Pass reloc in fr_var. */
5942 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
5943 }
5944
5945 static void
5946 output_jump (void)
5947 {
5948 char *p;
5949 int size;
5950 fixS *fixP;
5951
5952 if (i.tm.opcode_modifier.jumpbyte)
5953 {
5954 /* This is a loop or jecxz type instruction. */
5955 size = 1;
5956 if (i.prefix[ADDR_PREFIX] != 0)
5957 {
5958 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
5959 i.prefixes -= 1;
5960 }
5961 /* Pentium4 branch hints. */
5962 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
5963 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
5964 {
5965 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
5966 i.prefixes--;
5967 }
5968 }
5969 else
5970 {
5971 int code16;
5972
5973 code16 = 0;
5974 if (flag_code == CODE_16BIT)
5975 code16 = CODE16;
5976
5977 if (i.prefix[DATA_PREFIX] != 0)
5978 {
5979 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
5980 i.prefixes -= 1;
5981 code16 ^= CODE16;
5982 }
5983
5984 size = 4;
5985 if (code16)
5986 size = 2;
5987 }
5988
5989 if (i.prefix[REX_PREFIX] != 0)
5990 {
5991 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
5992 i.prefixes -= 1;
5993 }
5994
5995 if (i.prefixes != 0 && !intel_syntax)
5996 as_warn (_("skipping prefixes on this instruction"));
5997
5998 p = frag_more (1 + size);
5999 *p++ = i.tm.base_opcode;
6000
6001 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6002 i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
6003
6004 /* All jumps handled here are signed, but don't use a signed limit
6005 check for 32 and 16 bit jumps as we want to allow wrap around at
6006 4G and 64k respectively. */
6007 if (size == 1)
6008 fixP->fx_signed = 1;
6009 }
6010
6011 static void
6012 output_interseg_jump (void)
6013 {
6014 char *p;
6015 int size;
6016 int prefix;
6017 int code16;
6018
6019 code16 = 0;
6020 if (flag_code == CODE_16BIT)
6021 code16 = CODE16;
6022
6023 prefix = 0;
6024 if (i.prefix[DATA_PREFIX] != 0)
6025 {
6026 prefix = 1;
6027 i.prefixes -= 1;
6028 code16 ^= CODE16;
6029 }
6030 if (i.prefix[REX_PREFIX] != 0)
6031 {
6032 prefix++;
6033 i.prefixes -= 1;
6034 }
6035
6036 size = 4;
6037 if (code16)
6038 size = 2;
6039
6040 if (i.prefixes != 0 && !intel_syntax)
6041 as_warn (_("skipping prefixes on this instruction"));
6042
6043 /* 1 opcode; 2 segment; offset */
6044 p = frag_more (prefix + 1 + 2 + size);
6045
6046 if (i.prefix[DATA_PREFIX] != 0)
6047 *p++ = DATA_PREFIX_OPCODE;
6048
6049 if (i.prefix[REX_PREFIX] != 0)
6050 *p++ = i.prefix[REX_PREFIX];
6051
6052 *p++ = i.tm.base_opcode;
6053 if (i.op[1].imms->X_op == O_constant)
6054 {
6055 offsetT n = i.op[1].imms->X_add_number;
6056
6057 if (size == 2
6058 && !fits_in_unsigned_word (n)
6059 && !fits_in_signed_word (n))
6060 {
6061 as_bad (_("16-bit jump out of range"));
6062 return;
6063 }
6064 md_number_to_chars (p, n, size);
6065 }
6066 else
6067 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6068 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
6069 if (i.op[0].imms->X_op != O_constant)
6070 as_bad (_("can't handle non absolute segment in `%s'"),
6071 i.tm.name);
6072 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
6073 }
6074
6075 static void
6076 output_insn (void)
6077 {
6078 fragS *insn_start_frag;
6079 offsetT insn_start_off;
6080
6081 /* Tie dwarf2 debug info to the address at the start of the insn.
6082 We can't do this after the insn has been output as the current
6083 frag may have been closed off. eg. by frag_var. */
6084 dwarf2_emit_insn (0);
6085
6086 insn_start_frag = frag_now;
6087 insn_start_off = frag_now_fix ();
6088
6089 /* Output jumps. */
6090 if (i.tm.opcode_modifier.jump)
6091 output_branch ();
6092 else if (i.tm.opcode_modifier.jumpbyte
6093 || i.tm.opcode_modifier.jumpdword)
6094 output_jump ();
6095 else if (i.tm.opcode_modifier.jumpintersegment)
6096 output_interseg_jump ();
6097 else
6098 {
6099 /* Output normal instructions here. */
6100 char *p;
6101 unsigned char *q;
6102 unsigned int j;
6103 unsigned int prefix;
6104
6105 /* Since the VEX prefix contains the implicit prefix, we don't
6106 need the explicit prefix. */
6107 if (!i.tm.opcode_modifier.vex)
6108 {
6109 switch (i.tm.opcode_length)
6110 {
6111 case 3:
6112 if (i.tm.base_opcode & 0xff000000)
6113 {
6114 prefix = (i.tm.base_opcode >> 24) & 0xff;
6115 goto check_prefix;
6116 }
6117 break;
6118 case 2:
6119 if ((i.tm.base_opcode & 0xff0000) != 0)
6120 {
6121 prefix = (i.tm.base_opcode >> 16) & 0xff;
6122 if (i.tm.cpu_flags.bitfield.cpupadlock)
6123 {
6124 check_prefix:
6125 if (prefix != REPE_PREFIX_OPCODE
6126 || (i.prefix[REP_PREFIX]
6127 != REPE_PREFIX_OPCODE))
6128 add_prefix (prefix);
6129 }
6130 else
6131 add_prefix (prefix);
6132 }
6133 break;
6134 case 1:
6135 break;
6136 default:
6137 abort ();
6138 }
6139
6140 /* The prefix bytes. */
6141 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
6142 if (*q)
6143 FRAG_APPEND_1_CHAR (*q);
6144 }
6145
6146 if (i.tm.opcode_modifier.vex)
6147 {
6148 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
6149 if (*q)
6150 switch (j)
6151 {
6152 case REX_PREFIX:
6153 /* REX byte is encoded in VEX prefix. */
6154 break;
6155 case SEG_PREFIX:
6156 case ADDR_PREFIX:
6157 FRAG_APPEND_1_CHAR (*q);
6158 break;
6159 default:
6160 /* There should be no other prefixes for instructions
6161 with VEX prefix. */
6162 abort ();
6163 }
6164
6165 /* Now the VEX prefix. */
6166 p = frag_more (i.vex.length);
6167 for (j = 0; j < i.vex.length; j++)
6168 p[j] = i.vex.bytes[j];
6169 }
6170
6171 /* Now the opcode; be careful about word order here! */
6172 if (i.tm.opcode_length == 1)
6173 {
6174 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
6175 }
6176 else
6177 {
6178 switch (i.tm.opcode_length)
6179 {
6180 case 3:
6181 p = frag_more (3);
6182 *p++ = (i.tm.base_opcode >> 16) & 0xff;
6183 break;
6184 case 2:
6185 p = frag_more (2);
6186 break;
6187 default:
6188 abort ();
6189 break;
6190 }
6191
6192 /* Put out high byte first: can't use md_number_to_chars! */
6193 *p++ = (i.tm.base_opcode >> 8) & 0xff;
6194 *p = i.tm.base_opcode & 0xff;
6195 }
6196
6197 /* Now the modrm byte and sib byte (if present). */
6198 if (i.tm.opcode_modifier.modrm)
6199 {
6200 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
6201 | i.rm.reg << 3
6202 | i.rm.mode << 6));
6203 /* If i.rm.regmem == ESP (4)
6204 && i.rm.mode != (Register mode)
6205 && not 16 bit
6206 ==> need second modrm byte. */
6207 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
6208 && i.rm.mode != 3
6209 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
6210 FRAG_APPEND_1_CHAR ((i.sib.base << 0
6211 | i.sib.index << 3
6212 | i.sib.scale << 6));
6213 }
6214
6215 if (i.disp_operands)
6216 output_disp (insn_start_frag, insn_start_off);
6217
6218 if (i.imm_operands)
6219 output_imm (insn_start_frag, insn_start_off);
6220 }
6221
6222 #ifdef DEBUG386
6223 if (flag_debug)
6224 {
6225 pi ("" /*line*/, &i);
6226 }
6227 #endif /* DEBUG386 */
6228 }
6229
6230 /* Return the size of the displacement operand N. */
6231
6232 static int
6233 disp_size (unsigned int n)
6234 {
6235 int size = 4;
6236 if (i.types[n].bitfield.disp64)
6237 size = 8;
6238 else if (i.types[n].bitfield.disp8)
6239 size = 1;
6240 else if (i.types[n].bitfield.disp16)
6241 size = 2;
6242 return size;
6243 }
6244
6245 /* Return the size of the immediate operand N. */
6246
6247 static int
6248 imm_size (unsigned int n)
6249 {
6250 int size = 4;
6251 if (i.types[n].bitfield.imm64)
6252 size = 8;
6253 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
6254 size = 1;
6255 else if (i.types[n].bitfield.imm16)
6256 size = 2;
6257 return size;
6258 }
6259
6260 static void
6261 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
6262 {
6263 char *p;
6264 unsigned int n;
6265
6266 for (n = 0; n < i.operands; n++)
6267 {
6268 if (operand_type_check (i.types[n], disp))
6269 {
6270 if (i.op[n].disps->X_op == O_constant)
6271 {
6272 int size = disp_size (n);
6273 offsetT val;
6274
6275 val = offset_in_range (i.op[n].disps->X_add_number,
6276 size);
6277 p = frag_more (size);
6278 md_number_to_chars (p, val, size);
6279 }
6280 else
6281 {
6282 enum bfd_reloc_code_real reloc_type;
6283 int size = disp_size (n);
6284 int sign = i.types[n].bitfield.disp32s;
6285 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
6286
6287 /* We can't have 8 bit displacement here. */
6288 gas_assert (!i.types[n].bitfield.disp8);
6289
6290 /* The PC relative address is computed relative
6291 to the instruction boundary, so in case immediate
6292 fields follows, we need to adjust the value. */
6293 if (pcrel && i.imm_operands)
6294 {
6295 unsigned int n1;
6296 int sz = 0;
6297
6298 for (n1 = 0; n1 < i.operands; n1++)
6299 if (operand_type_check (i.types[n1], imm))
6300 {
6301 /* Only one immediate is allowed for PC
6302 relative address. */
6303 gas_assert (sz == 0);
6304 sz = imm_size (n1);
6305 i.op[n].disps->X_add_number -= sz;
6306 }
6307 /* We should find the immediate. */
6308 gas_assert (sz != 0);
6309 }
6310
6311 p = frag_more (size);
6312 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
6313 if (GOT_symbol
6314 && GOT_symbol == i.op[n].disps->X_add_symbol
6315 && (((reloc_type == BFD_RELOC_32
6316 || reloc_type == BFD_RELOC_X86_64_32S
6317 || (reloc_type == BFD_RELOC_64
6318 && object_64bit))
6319 && (i.op[n].disps->X_op == O_symbol
6320 || (i.op[n].disps->X_op == O_add
6321 && ((symbol_get_value_expression
6322 (i.op[n].disps->X_op_symbol)->X_op)
6323 == O_subtract))))
6324 || reloc_type == BFD_RELOC_32_PCREL))
6325 {
6326 offsetT add;
6327
6328 if (insn_start_frag == frag_now)
6329 add = (p - frag_now->fr_literal) - insn_start_off;
6330 else
6331 {
6332 fragS *fr;
6333
6334 add = insn_start_frag->fr_fix - insn_start_off;
6335 for (fr = insn_start_frag->fr_next;
6336 fr && fr != frag_now; fr = fr->fr_next)
6337 add += fr->fr_fix;
6338 add += p - frag_now->fr_literal;
6339 }
6340
6341 if (!object_64bit)
6342 {
6343 reloc_type = BFD_RELOC_386_GOTPC;
6344 i.op[n].imms->X_add_number += add;
6345 }
6346 else if (reloc_type == BFD_RELOC_64)
6347 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6348 else
6349 /* Don't do the adjustment for x86-64, as there
6350 the pcrel addressing is relative to the _next_
6351 insn, and that is taken care of in other code. */
6352 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6353 }
6354 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6355 i.op[n].disps, pcrel, reloc_type);
6356 }
6357 }
6358 }
6359 }
6360
6361 static void
6362 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
6363 {
6364 char *p;
6365 unsigned int n;
6366
6367 for (n = 0; n < i.operands; n++)
6368 {
6369 if (operand_type_check (i.types[n], imm))
6370 {
6371 if (i.op[n].imms->X_op == O_constant)
6372 {
6373 int size = imm_size (n);
6374 offsetT val;
6375
6376 val = offset_in_range (i.op[n].imms->X_add_number,
6377 size);
6378 p = frag_more (size);
6379 md_number_to_chars (p, val, size);
6380 }
6381 else
6382 {
6383 /* Not absolute_section.
6384 Need a 32-bit fixup (don't support 8bit
6385 non-absolute imms). Try to support other
6386 sizes ... */
6387 enum bfd_reloc_code_real reloc_type;
6388 int size = imm_size (n);
6389 int sign;
6390
6391 if (i.types[n].bitfield.imm32s
6392 && (i.suffix == QWORD_MNEM_SUFFIX
6393 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
6394 sign = 1;
6395 else
6396 sign = 0;
6397
6398 p = frag_more (size);
6399 reloc_type = reloc (size, 0, sign, i.reloc[n]);
6400
6401 /* This is tough to explain. We end up with this one if we
6402 * have operands that look like
6403 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
6404 * obtain the absolute address of the GOT, and it is strongly
6405 * preferable from a performance point of view to avoid using
6406 * a runtime relocation for this. The actual sequence of
6407 * instructions often look something like:
6408 *
6409 * call .L66
6410 * .L66:
6411 * popl %ebx
6412 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
6413 *
6414 * The call and pop essentially return the absolute address
6415 * of the label .L66 and store it in %ebx. The linker itself
6416 * will ultimately change the first operand of the addl so
6417 * that %ebx points to the GOT, but to keep things simple, the
6418 * .o file must have this operand set so that it generates not
6419 * the absolute address of .L66, but the absolute address of
6420 * itself. This allows the linker itself simply treat a GOTPC
6421 * relocation as asking for a pcrel offset to the GOT to be
6422 * added in, and the addend of the relocation is stored in the
6423 * operand field for the instruction itself.
6424 *
6425 * Our job here is to fix the operand so that it would add
6426 * the correct offset so that %ebx would point to itself. The
6427 * thing that is tricky is that .-.L66 will point to the
6428 * beginning of the instruction, so we need to further modify
6429 * the operand so that it will point to itself. There are
6430 * other cases where you have something like:
6431 *
6432 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
6433 *
6434 * and here no correction would be required. Internally in
6435 * the assembler we treat operands of this form as not being
6436 * pcrel since the '.' is explicitly mentioned, and I wonder
6437 * whether it would simplify matters to do it this way. Who
6438 * knows. In earlier versions of the PIC patches, the
6439 * pcrel_adjust field was used to store the correction, but
6440 * since the expression is not pcrel, I felt it would be
6441 * confusing to do it this way. */
6442
6443 if ((reloc_type == BFD_RELOC_32
6444 || reloc_type == BFD_RELOC_X86_64_32S
6445 || reloc_type == BFD_RELOC_64)
6446 && GOT_symbol
6447 && GOT_symbol == i.op[n].imms->X_add_symbol
6448 && (i.op[n].imms->X_op == O_symbol
6449 || (i.op[n].imms->X_op == O_add
6450 && ((symbol_get_value_expression
6451 (i.op[n].imms->X_op_symbol)->X_op)
6452 == O_subtract))))
6453 {
6454 offsetT add;
6455
6456 if (insn_start_frag == frag_now)
6457 add = (p - frag_now->fr_literal) - insn_start_off;
6458 else
6459 {
6460 fragS *fr;
6461
6462 add = insn_start_frag->fr_fix - insn_start_off;
6463 for (fr = insn_start_frag->fr_next;
6464 fr && fr != frag_now; fr = fr->fr_next)
6465 add += fr->fr_fix;
6466 add += p - frag_now->fr_literal;
6467 }
6468
6469 if (!object_64bit)
6470 reloc_type = BFD_RELOC_386_GOTPC;
6471 else if (size == 4)
6472 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6473 else if (size == 8)
6474 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6475 i.op[n].imms->X_add_number += add;
6476 }
6477 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6478 i.op[n].imms, 0, reloc_type);
6479 }
6480 }
6481 }
6482 }
6483 \f
6484 /* x86_cons_fix_new is called via the expression parsing code when a
6485 reloc is needed. We use this hook to get the correct .got reloc. */
6486 static enum bfd_reloc_code_real got_reloc = NO_RELOC;
6487 static int cons_sign = -1;
6488
6489 void
6490 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
6491 expressionS *exp)
6492 {
6493 enum bfd_reloc_code_real r = reloc (len, 0, cons_sign, got_reloc);
6494
6495 got_reloc = NO_RELOC;
6496
6497 #ifdef TE_PE
6498 if (exp->X_op == O_secrel)
6499 {
6500 exp->X_op = O_symbol;
6501 r = BFD_RELOC_32_SECREL;
6502 }
6503 #endif
6504
6505 fix_new_exp (frag, off, len, exp, 0, r);
6506 }
6507
6508 #if (!defined (OBJ_ELF) && !defined (OBJ_MAYBE_ELF)) || defined (LEX_AT)
6509 # define lex_got(reloc, adjust, types) NULL
6510 #else
6511 /* Parse operands of the form
6512 <symbol>@GOTOFF+<nnn>
6513 and similar .plt or .got references.
6514
6515 If we find one, set up the correct relocation in RELOC and copy the
6516 input string, minus the `@GOTOFF' into a malloc'd buffer for
6517 parsing by the calling routine. Return this buffer, and if ADJUST
6518 is non-null set it to the length of the string we removed from the
6519 input line. Otherwise return NULL. */
6520 static char *
6521 lex_got (enum bfd_reloc_code_real *rel,
6522 int *adjust,
6523 i386_operand_type *types)
6524 {
6525 /* Some of the relocations depend on the size of what field is to
6526 be relocated. But in our callers i386_immediate and i386_displacement
6527 we don't yet know the operand size (this will be set by insn
6528 matching). Hence we record the word32 relocation here,
6529 and adjust the reloc according to the real size in reloc(). */
6530 static const struct {
6531 const char *str;
6532 int len;
6533 const enum bfd_reloc_code_real rel[2];
6534 const i386_operand_type types64;
6535 } gotrel[] = {
6536 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
6537 BFD_RELOC_X86_64_PLTOFF64 },
6538 OPERAND_TYPE_IMM64 },
6539 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
6540 BFD_RELOC_X86_64_PLT32 },
6541 OPERAND_TYPE_IMM32_32S_DISP32 },
6542 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
6543 BFD_RELOC_X86_64_GOTPLT64 },
6544 OPERAND_TYPE_IMM64_DISP64 },
6545 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
6546 BFD_RELOC_X86_64_GOTOFF64 },
6547 OPERAND_TYPE_IMM64_DISP64 },
6548 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
6549 BFD_RELOC_X86_64_GOTPCREL },
6550 OPERAND_TYPE_IMM32_32S_DISP32 },
6551 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
6552 BFD_RELOC_X86_64_TLSGD },
6553 OPERAND_TYPE_IMM32_32S_DISP32 },
6554 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
6555 _dummy_first_bfd_reloc_code_real },
6556 OPERAND_TYPE_NONE },
6557 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
6558 BFD_RELOC_X86_64_TLSLD },
6559 OPERAND_TYPE_IMM32_32S_DISP32 },
6560 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
6561 BFD_RELOC_X86_64_GOTTPOFF },
6562 OPERAND_TYPE_IMM32_32S_DISP32 },
6563 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
6564 BFD_RELOC_X86_64_TPOFF32 },
6565 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6566 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
6567 _dummy_first_bfd_reloc_code_real },
6568 OPERAND_TYPE_NONE },
6569 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
6570 BFD_RELOC_X86_64_DTPOFF32 },
6571 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6572 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
6573 _dummy_first_bfd_reloc_code_real },
6574 OPERAND_TYPE_NONE },
6575 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
6576 _dummy_first_bfd_reloc_code_real },
6577 OPERAND_TYPE_NONE },
6578 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
6579 BFD_RELOC_X86_64_GOT32 },
6580 OPERAND_TYPE_IMM32_32S_64_DISP32 },
6581 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
6582 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
6583 OPERAND_TYPE_IMM32_32S_DISP32 },
6584 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
6585 BFD_RELOC_X86_64_TLSDESC_CALL },
6586 OPERAND_TYPE_IMM32_32S_DISP32 },
6587 };
6588 char *cp;
6589 unsigned int j;
6590
6591 if (!IS_ELF)
6592 return NULL;
6593
6594 for (cp = input_line_pointer; *cp != '@'; cp++)
6595 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6596 return NULL;
6597
6598 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6599 {
6600 int len = gotrel[j].len;
6601 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6602 {
6603 if (gotrel[j].rel[object_64bit] != 0)
6604 {
6605 int first, second;
6606 char *tmpbuf, *past_reloc;
6607
6608 *rel = gotrel[j].rel[object_64bit];
6609 if (adjust)
6610 *adjust = len;
6611
6612 if (types)
6613 {
6614 if (flag_code != CODE_64BIT)
6615 {
6616 types->bitfield.imm32 = 1;
6617 types->bitfield.disp32 = 1;
6618 }
6619 else
6620 *types = gotrel[j].types64;
6621 }
6622
6623 if (GOT_symbol == NULL)
6624 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
6625
6626 /* The length of the first part of our input line. */
6627 first = cp - input_line_pointer;
6628
6629 /* The second part goes from after the reloc token until
6630 (and including) an end_of_line char or comma. */
6631 past_reloc = cp + 1 + len;
6632 cp = past_reloc;
6633 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6634 ++cp;
6635 second = cp + 1 - past_reloc;
6636
6637 /* Allocate and copy string. The trailing NUL shouldn't
6638 be necessary, but be safe. */
6639 tmpbuf = (char *) xmalloc (first + second + 2);
6640 memcpy (tmpbuf, input_line_pointer, first);
6641 if (second != 0 && *past_reloc != ' ')
6642 /* Replace the relocation token with ' ', so that
6643 errors like foo@GOTOFF1 will be detected. */
6644 tmpbuf[first++] = ' ';
6645 memcpy (tmpbuf + first, past_reloc, second);
6646 tmpbuf[first + second] = '\0';
6647 return tmpbuf;
6648 }
6649
6650 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6651 gotrel[j].str, 1 << (5 + object_64bit));
6652 return NULL;
6653 }
6654 }
6655
6656 /* Might be a symbol version string. Don't as_bad here. */
6657 return NULL;
6658 }
6659 #endif
6660
6661 void
6662 x86_cons (expressionS *exp, int size)
6663 {
6664 intel_syntax = -intel_syntax;
6665
6666 exp->X_md = 0;
6667 if (size == 4 || (object_64bit && size == 8))
6668 {
6669 /* Handle @GOTOFF and the like in an expression. */
6670 char *save;
6671 char *gotfree_input_line;
6672 int adjust = 0;
6673
6674 save = input_line_pointer;
6675 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
6676 if (gotfree_input_line)
6677 input_line_pointer = gotfree_input_line;
6678
6679 expression (exp);
6680
6681 if (gotfree_input_line)
6682 {
6683 /* expression () has merrily parsed up to the end of line,
6684 or a comma - in the wrong buffer. Transfer how far
6685 input_line_pointer has moved to the right buffer. */
6686 input_line_pointer = (save
6687 + (input_line_pointer - gotfree_input_line)
6688 + adjust);
6689 free (gotfree_input_line);
6690 if (exp->X_op == O_constant
6691 || exp->X_op == O_absent
6692 || exp->X_op == O_illegal
6693 || exp->X_op == O_register
6694 || exp->X_op == O_big)
6695 {
6696 char c = *input_line_pointer;
6697 *input_line_pointer = 0;
6698 as_bad (_("missing or invalid expression `%s'"), save);
6699 *input_line_pointer = c;
6700 }
6701 }
6702 }
6703 else
6704 expression (exp);
6705
6706 intel_syntax = -intel_syntax;
6707
6708 if (intel_syntax)
6709 i386_intel_simplify (exp);
6710 }
6711
6712 static void
6713 signed_cons (int size)
6714 {
6715 if (flag_code == CODE_64BIT)
6716 cons_sign = 1;
6717 cons (size);
6718 cons_sign = -1;
6719 }
6720
6721 #ifdef TE_PE
6722 static void
6723 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
6724 {
6725 expressionS exp;
6726
6727 do
6728 {
6729 expression (&exp);
6730 if (exp.X_op == O_symbol)
6731 exp.X_op = O_secrel;
6732
6733 emit_expr (&exp, 4);
6734 }
6735 while (*input_line_pointer++ == ',');
6736
6737 input_line_pointer--;
6738 demand_empty_rest_of_line ();
6739 }
6740 #endif
6741
6742 static int
6743 i386_immediate (char *imm_start)
6744 {
6745 char *save_input_line_pointer;
6746 char *gotfree_input_line;
6747 segT exp_seg = 0;
6748 expressionS *exp;
6749 i386_operand_type types;
6750
6751 operand_type_set (&types, ~0);
6752
6753 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
6754 {
6755 as_bad (_("at most %d immediate operands are allowed"),
6756 MAX_IMMEDIATE_OPERANDS);
6757 return 0;
6758 }
6759
6760 exp = &im_expressions[i.imm_operands++];
6761 i.op[this_operand].imms = exp;
6762
6763 if (is_space_char (*imm_start))
6764 ++imm_start;
6765
6766 save_input_line_pointer = input_line_pointer;
6767 input_line_pointer = imm_start;
6768
6769 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
6770 if (gotfree_input_line)
6771 input_line_pointer = gotfree_input_line;
6772
6773 exp_seg = expression (exp);
6774
6775 SKIP_WHITESPACE ();
6776 if (*input_line_pointer)
6777 as_bad (_("junk `%s' after expression"), input_line_pointer);
6778
6779 input_line_pointer = save_input_line_pointer;
6780 if (gotfree_input_line)
6781 {
6782 free (gotfree_input_line);
6783
6784 if (exp->X_op == O_constant || exp->X_op == O_register)
6785 exp->X_op = O_illegal;
6786 }
6787
6788 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
6789 }
6790
6791 static int
6792 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
6793 i386_operand_type types, const char *imm_start)
6794 {
6795 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
6796 {
6797 if (imm_start)
6798 as_bad (_("missing or invalid immediate expression `%s'"),
6799 imm_start);
6800 return 0;
6801 }
6802 else if (exp->X_op == O_constant)
6803 {
6804 /* Size it properly later. */
6805 i.types[this_operand].bitfield.imm64 = 1;
6806 /* If not 64bit, sign extend val. */
6807 if (flag_code != CODE_64BIT
6808 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
6809 exp->X_add_number
6810 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
6811 }
6812 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
6813 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
6814 && exp_seg != absolute_section
6815 && exp_seg != text_section
6816 && exp_seg != data_section
6817 && exp_seg != bss_section
6818 && exp_seg != undefined_section
6819 && !bfd_is_com_section (exp_seg))
6820 {
6821 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
6822 return 0;
6823 }
6824 #endif
6825 else if (!intel_syntax && exp->X_op == O_register)
6826 {
6827 if (imm_start)
6828 as_bad (_("illegal immediate register operand %s"), imm_start);
6829 return 0;
6830 }
6831 else
6832 {
6833 /* This is an address. The size of the address will be
6834 determined later, depending on destination register,
6835 suffix, or the default for the section. */
6836 i.types[this_operand].bitfield.imm8 = 1;
6837 i.types[this_operand].bitfield.imm16 = 1;
6838 i.types[this_operand].bitfield.imm32 = 1;
6839 i.types[this_operand].bitfield.imm32s = 1;
6840 i.types[this_operand].bitfield.imm64 = 1;
6841 i.types[this_operand] = operand_type_and (i.types[this_operand],
6842 types);
6843 }
6844
6845 return 1;
6846 }
6847
6848 static char *
6849 i386_scale (char *scale)
6850 {
6851 offsetT val;
6852 char *save = input_line_pointer;
6853
6854 input_line_pointer = scale;
6855 val = get_absolute_expression ();
6856
6857 switch (val)
6858 {
6859 case 1:
6860 i.log2_scale_factor = 0;
6861 break;
6862 case 2:
6863 i.log2_scale_factor = 1;
6864 break;
6865 case 4:
6866 i.log2_scale_factor = 2;
6867 break;
6868 case 8:
6869 i.log2_scale_factor = 3;
6870 break;
6871 default:
6872 {
6873 char sep = *input_line_pointer;
6874
6875 *input_line_pointer = '\0';
6876 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
6877 scale);
6878 *input_line_pointer = sep;
6879 input_line_pointer = save;
6880 return NULL;
6881 }
6882 }
6883 if (i.log2_scale_factor != 0 && i.index_reg == 0)
6884 {
6885 as_warn (_("scale factor of %d without an index register"),
6886 1 << i.log2_scale_factor);
6887 i.log2_scale_factor = 0;
6888 }
6889 scale = input_line_pointer;
6890 input_line_pointer = save;
6891 return scale;
6892 }
6893
6894 static int
6895 i386_displacement (char *disp_start, char *disp_end)
6896 {
6897 expressionS *exp;
6898 segT exp_seg = 0;
6899 char *save_input_line_pointer;
6900 char *gotfree_input_line;
6901 int override;
6902 i386_operand_type bigdisp, types = anydisp;
6903 int ret;
6904
6905 if (i.disp_operands == MAX_MEMORY_OPERANDS)
6906 {
6907 as_bad (_("at most %d displacement operands are allowed"),
6908 MAX_MEMORY_OPERANDS);
6909 return 0;
6910 }
6911
6912 operand_type_set (&bigdisp, 0);
6913 if ((i.types[this_operand].bitfield.jumpabsolute)
6914 || (!current_templates->start->opcode_modifier.jump
6915 && !current_templates->start->opcode_modifier.jumpdword))
6916 {
6917 bigdisp.bitfield.disp32 = 1;
6918 override = (i.prefix[ADDR_PREFIX] != 0);
6919 if (flag_code == CODE_64BIT)
6920 {
6921 if (!override)
6922 {
6923 bigdisp.bitfield.disp32s = 1;
6924 bigdisp.bitfield.disp64 = 1;
6925 }
6926 }
6927 else if ((flag_code == CODE_16BIT) ^ override)
6928 {
6929 bigdisp.bitfield.disp32 = 0;
6930 bigdisp.bitfield.disp16 = 1;
6931 }
6932 }
6933 else
6934 {
6935 /* For PC-relative branches, the width of the displacement
6936 is dependent upon data size, not address size. */
6937 override = (i.prefix[DATA_PREFIX] != 0);
6938 if (flag_code == CODE_64BIT)
6939 {
6940 if (override || i.suffix == WORD_MNEM_SUFFIX)
6941 bigdisp.bitfield.disp16 = 1;
6942 else
6943 {
6944 bigdisp.bitfield.disp32 = 1;
6945 bigdisp.bitfield.disp32s = 1;
6946 }
6947 }
6948 else
6949 {
6950 if (!override)
6951 override = (i.suffix == (flag_code != CODE_16BIT
6952 ? WORD_MNEM_SUFFIX
6953 : LONG_MNEM_SUFFIX));
6954 bigdisp.bitfield.disp32 = 1;
6955 if ((flag_code == CODE_16BIT) ^ override)
6956 {
6957 bigdisp.bitfield.disp32 = 0;
6958 bigdisp.bitfield.disp16 = 1;
6959 }
6960 }
6961 }
6962 i.types[this_operand] = operand_type_or (i.types[this_operand],
6963 bigdisp);
6964
6965 exp = &disp_expressions[i.disp_operands];
6966 i.op[this_operand].disps = exp;
6967 i.disp_operands++;
6968 save_input_line_pointer = input_line_pointer;
6969 input_line_pointer = disp_start;
6970 END_STRING_AND_SAVE (disp_end);
6971
6972 #ifndef GCC_ASM_O_HACK
6973 #define GCC_ASM_O_HACK 0
6974 #endif
6975 #if GCC_ASM_O_HACK
6976 END_STRING_AND_SAVE (disp_end + 1);
6977 if (i.types[this_operand].bitfield.baseIndex
6978 && displacement_string_end[-1] == '+')
6979 {
6980 /* This hack is to avoid a warning when using the "o"
6981 constraint within gcc asm statements.
6982 For instance:
6983
6984 #define _set_tssldt_desc(n,addr,limit,type) \
6985 __asm__ __volatile__ ( \
6986 "movw %w2,%0\n\t" \
6987 "movw %w1,2+%0\n\t" \
6988 "rorl $16,%1\n\t" \
6989 "movb %b1,4+%0\n\t" \
6990 "movb %4,5+%0\n\t" \
6991 "movb $0,6+%0\n\t" \
6992 "movb %h1,7+%0\n\t" \
6993 "rorl $16,%1" \
6994 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
6995
6996 This works great except that the output assembler ends
6997 up looking a bit weird if it turns out that there is
6998 no offset. You end up producing code that looks like:
6999
7000 #APP
7001 movw $235,(%eax)
7002 movw %dx,2+(%eax)
7003 rorl $16,%edx
7004 movb %dl,4+(%eax)
7005 movb $137,5+(%eax)
7006 movb $0,6+(%eax)
7007 movb %dh,7+(%eax)
7008 rorl $16,%edx
7009 #NO_APP
7010
7011 So here we provide the missing zero. */
7012
7013 *displacement_string_end = '0';
7014 }
7015 #endif
7016 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
7017 if (gotfree_input_line)
7018 input_line_pointer = gotfree_input_line;
7019
7020 exp_seg = expression (exp);
7021
7022 SKIP_WHITESPACE ();
7023 if (*input_line_pointer)
7024 as_bad (_("junk `%s' after expression"), input_line_pointer);
7025 #if GCC_ASM_O_HACK
7026 RESTORE_END_STRING (disp_end + 1);
7027 #endif
7028 input_line_pointer = save_input_line_pointer;
7029 if (gotfree_input_line)
7030 {
7031 free (gotfree_input_line);
7032
7033 if (exp->X_op == O_constant || exp->X_op == O_register)
7034 exp->X_op = O_illegal;
7035 }
7036
7037 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
7038
7039 RESTORE_END_STRING (disp_end);
7040
7041 return ret;
7042 }
7043
7044 static int
7045 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
7046 i386_operand_type types, const char *disp_start)
7047 {
7048 i386_operand_type bigdisp;
7049 int ret = 1;
7050
7051 /* We do this to make sure that the section symbol is in
7052 the symbol table. We will ultimately change the relocation
7053 to be relative to the beginning of the section. */
7054 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
7055 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
7056 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7057 {
7058 if (exp->X_op != O_symbol)
7059 goto inv_disp;
7060
7061 if (S_IS_LOCAL (exp->X_add_symbol)
7062 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
7063 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
7064 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
7065 exp->X_op = O_subtract;
7066 exp->X_op_symbol = GOT_symbol;
7067 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
7068 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
7069 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7070 i.reloc[this_operand] = BFD_RELOC_64;
7071 else
7072 i.reloc[this_operand] = BFD_RELOC_32;
7073 }
7074
7075 else if (exp->X_op == O_absent
7076 || exp->X_op == O_illegal
7077 || exp->X_op == O_big)
7078 {
7079 inv_disp:
7080 as_bad (_("missing or invalid displacement expression `%s'"),
7081 disp_start);
7082 ret = 0;
7083 }
7084
7085 else if (flag_code == CODE_64BIT
7086 && !i.prefix[ADDR_PREFIX]
7087 && exp->X_op == O_constant)
7088 {
7089 /* Since displacement is signed extended to 64bit, don't allow
7090 disp32 and turn off disp32s if they are out of range. */
7091 i.types[this_operand].bitfield.disp32 = 0;
7092 if (!fits_in_signed_long (exp->X_add_number))
7093 {
7094 i.types[this_operand].bitfield.disp32s = 0;
7095 if (i.types[this_operand].bitfield.baseindex)
7096 {
7097 as_bad (_("0x%lx out range of signed 32bit displacement"),
7098 (long) exp->X_add_number);
7099 ret = 0;
7100 }
7101 }
7102 }
7103
7104 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7105 else if (exp->X_op != O_constant
7106 && OUTPUT_FLAVOR == bfd_target_aout_flavour
7107 && exp_seg != absolute_section
7108 && exp_seg != text_section
7109 && exp_seg != data_section
7110 && exp_seg != bss_section
7111 && exp_seg != undefined_section
7112 && !bfd_is_com_section (exp_seg))
7113 {
7114 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7115 ret = 0;
7116 }
7117 #endif
7118
7119 /* Check if this is a displacement only operand. */
7120 bigdisp = i.types[this_operand];
7121 bigdisp.bitfield.disp8 = 0;
7122 bigdisp.bitfield.disp16 = 0;
7123 bigdisp.bitfield.disp32 = 0;
7124 bigdisp.bitfield.disp32s = 0;
7125 bigdisp.bitfield.disp64 = 0;
7126 if (operand_type_all_zero (&bigdisp))
7127 i.types[this_operand] = operand_type_and (i.types[this_operand],
7128 types);
7129
7130 return ret;
7131 }
7132
7133 /* Make sure the memory operand we've been dealt is valid.
7134 Return 1 on success, 0 on a failure. */
7135
7136 static int
7137 i386_index_check (const char *operand_string)
7138 {
7139 int ok;
7140 const char *kind = "base/index";
7141 #if INFER_ADDR_PREFIX
7142 int fudged = 0;
7143
7144 tryprefix:
7145 #endif
7146 ok = 1;
7147 if (current_templates->start->opcode_modifier.isstring
7148 && !current_templates->start->opcode_modifier.immext
7149 && (current_templates->end[-1].opcode_modifier.isstring
7150 || i.mem_operands))
7151 {
7152 /* Memory operands of string insns are special in that they only allow
7153 a single register (rDI, rSI, or rBX) as their memory address. */
7154 unsigned int expected;
7155
7156 kind = "string address";
7157
7158 if (current_templates->start->opcode_modifier.w)
7159 {
7160 i386_operand_type type = current_templates->end[-1].operand_types[0];
7161
7162 if (!type.bitfield.baseindex
7163 || ((!i.mem_operands != !intel_syntax)
7164 && current_templates->end[-1].operand_types[1]
7165 .bitfield.baseindex))
7166 type = current_templates->end[-1].operand_types[1];
7167 expected = type.bitfield.esseg ? 7 /* rDI */ : 6 /* rSI */;
7168 }
7169 else
7170 expected = 3 /* rBX */;
7171
7172 if (!i.base_reg || i.index_reg
7173 || operand_type_check (i.types[this_operand], disp))
7174 ok = -1;
7175 else if (!(flag_code == CODE_64BIT
7176 ? i.prefix[ADDR_PREFIX]
7177 ? i.base_reg->reg_type.bitfield.reg32
7178 : i.base_reg->reg_type.bitfield.reg64
7179 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7180 ? i.base_reg->reg_type.bitfield.reg32
7181 : i.base_reg->reg_type.bitfield.reg16))
7182 ok = 0;
7183 else if (i.base_reg->reg_num != expected)
7184 ok = -1;
7185
7186 if (ok < 0)
7187 {
7188 unsigned int j;
7189
7190 for (j = 0; j < i386_regtab_size; ++j)
7191 if ((flag_code == CODE_64BIT
7192 ? i.prefix[ADDR_PREFIX]
7193 ? i386_regtab[j].reg_type.bitfield.reg32
7194 : i386_regtab[j].reg_type.bitfield.reg64
7195 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7196 ? i386_regtab[j].reg_type.bitfield.reg32
7197 : i386_regtab[j].reg_type.bitfield.reg16)
7198 && i386_regtab[j].reg_num == expected)
7199 break;
7200 gas_assert (j < i386_regtab_size);
7201 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
7202 operand_string,
7203 intel_syntax ? '[' : '(',
7204 register_prefix,
7205 i386_regtab[j].reg_name,
7206 intel_syntax ? ']' : ')');
7207 ok = 1;
7208 }
7209 }
7210 else if (flag_code == CODE_64BIT)
7211 {
7212 if ((i.base_reg
7213 && ((i.prefix[ADDR_PREFIX] == 0
7214 && !i.base_reg->reg_type.bitfield.reg64)
7215 || (i.prefix[ADDR_PREFIX]
7216 && !i.base_reg->reg_type.bitfield.reg32))
7217 && (i.index_reg
7218 || i.base_reg->reg_num !=
7219 (i.prefix[ADDR_PREFIX] == 0 ? RegRip : RegEip)))
7220 || (i.index_reg
7221 && !(i.index_reg->reg_type.bitfield.regxmm
7222 || i.index_reg->reg_type.bitfield.regymm)
7223 && (!i.index_reg->reg_type.bitfield.baseindex
7224 || (i.prefix[ADDR_PREFIX] == 0
7225 && i.index_reg->reg_num != RegRiz
7226 && !i.index_reg->reg_type.bitfield.reg64
7227 )
7228 || (i.prefix[ADDR_PREFIX]
7229 && i.index_reg->reg_num != RegEiz
7230 && !i.index_reg->reg_type.bitfield.reg32))))
7231 ok = 0;
7232 }
7233 else
7234 {
7235 if ((flag_code == CODE_16BIT) ^ (i.prefix[ADDR_PREFIX] != 0))
7236 {
7237 /* 16bit checks. */
7238 if ((i.base_reg
7239 && (!i.base_reg->reg_type.bitfield.reg16
7240 || !i.base_reg->reg_type.bitfield.baseindex))
7241 || (i.index_reg
7242 && (!i.index_reg->reg_type.bitfield.reg16
7243 || !i.index_reg->reg_type.bitfield.baseindex
7244 || !(i.base_reg
7245 && i.base_reg->reg_num < 6
7246 && i.index_reg->reg_num >= 6
7247 && i.log2_scale_factor == 0))))
7248 ok = 0;
7249 }
7250 else
7251 {
7252 /* 32bit checks. */
7253 if ((i.base_reg
7254 && !i.base_reg->reg_type.bitfield.reg32)
7255 || (i.index_reg
7256 && !i.index_reg->reg_type.bitfield.regxmm
7257 && !i.index_reg->reg_type.bitfield.regymm
7258 && ((!i.index_reg->reg_type.bitfield.reg32
7259 && i.index_reg->reg_num != RegEiz)
7260 || !i.index_reg->reg_type.bitfield.baseindex)))
7261 ok = 0;
7262 }
7263 }
7264 if (!ok)
7265 {
7266 #if INFER_ADDR_PREFIX
7267 if (!i.mem_operands && !i.prefix[ADDR_PREFIX])
7268 {
7269 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
7270 i.prefixes += 1;
7271 /* Change the size of any displacement too. At most one of
7272 Disp16 or Disp32 is set.
7273 FIXME. There doesn't seem to be any real need for separate
7274 Disp16 and Disp32 flags. The same goes for Imm16 and Imm32.
7275 Removing them would probably clean up the code quite a lot. */
7276 if (flag_code != CODE_64BIT
7277 && (i.types[this_operand].bitfield.disp16
7278 || i.types[this_operand].bitfield.disp32))
7279 i.types[this_operand]
7280 = operand_type_xor (i.types[this_operand], disp16_32);
7281 fudged = 1;
7282 goto tryprefix;
7283 }
7284 if (fudged)
7285 as_bad (_("`%s' is not a valid %s expression"),
7286 operand_string,
7287 kind);
7288 else
7289 #endif
7290 as_bad (_("`%s' is not a valid %s-bit %s expression"),
7291 operand_string,
7292 flag_code_names[i.prefix[ADDR_PREFIX]
7293 ? flag_code == CODE_32BIT
7294 ? CODE_16BIT
7295 : CODE_32BIT
7296 : flag_code],
7297 kind);
7298 }
7299 return ok;
7300 }
7301
7302 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
7303 on error. */
7304
7305 static int
7306 i386_att_operand (char *operand_string)
7307 {
7308 const reg_entry *r;
7309 char *end_op;
7310 char *op_string = operand_string;
7311
7312 if (is_space_char (*op_string))
7313 ++op_string;
7314
7315 /* We check for an absolute prefix (differentiating,
7316 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
7317 if (*op_string == ABSOLUTE_PREFIX)
7318 {
7319 ++op_string;
7320 if (is_space_char (*op_string))
7321 ++op_string;
7322 i.types[this_operand].bitfield.jumpabsolute = 1;
7323 }
7324
7325 /* Check if operand is a register. */
7326 if ((r = parse_register (op_string, &end_op)) != NULL)
7327 {
7328 i386_operand_type temp;
7329
7330 /* Check for a segment override by searching for ':' after a
7331 segment register. */
7332 op_string = end_op;
7333 if (is_space_char (*op_string))
7334 ++op_string;
7335 if (*op_string == ':'
7336 && (r->reg_type.bitfield.sreg2
7337 || r->reg_type.bitfield.sreg3))
7338 {
7339 switch (r->reg_num)
7340 {
7341 case 0:
7342 i.seg[i.mem_operands] = &es;
7343 break;
7344 case 1:
7345 i.seg[i.mem_operands] = &cs;
7346 break;
7347 case 2:
7348 i.seg[i.mem_operands] = &ss;
7349 break;
7350 case 3:
7351 i.seg[i.mem_operands] = &ds;
7352 break;
7353 case 4:
7354 i.seg[i.mem_operands] = &fs;
7355 break;
7356 case 5:
7357 i.seg[i.mem_operands] = &gs;
7358 break;
7359 }
7360
7361 /* Skip the ':' and whitespace. */
7362 ++op_string;
7363 if (is_space_char (*op_string))
7364 ++op_string;
7365
7366 if (!is_digit_char (*op_string)
7367 && !is_identifier_char (*op_string)
7368 && *op_string != '('
7369 && *op_string != ABSOLUTE_PREFIX)
7370 {
7371 as_bad (_("bad memory operand `%s'"), op_string);
7372 return 0;
7373 }
7374 /* Handle case of %es:*foo. */
7375 if (*op_string == ABSOLUTE_PREFIX)
7376 {
7377 ++op_string;
7378 if (is_space_char (*op_string))
7379 ++op_string;
7380 i.types[this_operand].bitfield.jumpabsolute = 1;
7381 }
7382 goto do_memory_reference;
7383 }
7384 if (*op_string)
7385 {
7386 as_bad (_("junk `%s' after register"), op_string);
7387 return 0;
7388 }
7389 temp = r->reg_type;
7390 temp.bitfield.baseindex = 0;
7391 i.types[this_operand] = operand_type_or (i.types[this_operand],
7392 temp);
7393 i.types[this_operand].bitfield.unspecified = 0;
7394 i.op[this_operand].regs = r;
7395 i.reg_operands++;
7396 }
7397 else if (*op_string == REGISTER_PREFIX)
7398 {
7399 as_bad (_("bad register name `%s'"), op_string);
7400 return 0;
7401 }
7402 else if (*op_string == IMMEDIATE_PREFIX)
7403 {
7404 ++op_string;
7405 if (i.types[this_operand].bitfield.jumpabsolute)
7406 {
7407 as_bad (_("immediate operand illegal with absolute jump"));
7408 return 0;
7409 }
7410 if (!i386_immediate (op_string))
7411 return 0;
7412 }
7413 else if (is_digit_char (*op_string)
7414 || is_identifier_char (*op_string)
7415 || *op_string == '(')
7416 {
7417 /* This is a memory reference of some sort. */
7418 char *base_string;
7419
7420 /* Start and end of displacement string expression (if found). */
7421 char *displacement_string_start;
7422 char *displacement_string_end;
7423
7424 do_memory_reference:
7425 if ((i.mem_operands == 1
7426 && !current_templates->start->opcode_modifier.isstring)
7427 || i.mem_operands == 2)
7428 {
7429 as_bad (_("too many memory references for `%s'"),
7430 current_templates->start->name);
7431 return 0;
7432 }
7433
7434 /* Check for base index form. We detect the base index form by
7435 looking for an ')' at the end of the operand, searching
7436 for the '(' matching it, and finding a REGISTER_PREFIX or ','
7437 after the '('. */
7438 base_string = op_string + strlen (op_string);
7439
7440 --base_string;
7441 if (is_space_char (*base_string))
7442 --base_string;
7443
7444 /* If we only have a displacement, set-up for it to be parsed later. */
7445 displacement_string_start = op_string;
7446 displacement_string_end = base_string + 1;
7447
7448 if (*base_string == ')')
7449 {
7450 char *temp_string;
7451 unsigned int parens_balanced = 1;
7452 /* We've already checked that the number of left & right ()'s are
7453 equal, so this loop will not be infinite. */
7454 do
7455 {
7456 base_string--;
7457 if (*base_string == ')')
7458 parens_balanced++;
7459 if (*base_string == '(')
7460 parens_balanced--;
7461 }
7462 while (parens_balanced);
7463
7464 temp_string = base_string;
7465
7466 /* Skip past '(' and whitespace. */
7467 ++base_string;
7468 if (is_space_char (*base_string))
7469 ++base_string;
7470
7471 if (*base_string == ','
7472 || ((i.base_reg = parse_register (base_string, &end_op))
7473 != NULL))
7474 {
7475 displacement_string_end = temp_string;
7476
7477 i.types[this_operand].bitfield.baseindex = 1;
7478
7479 if (i.base_reg)
7480 {
7481 base_string = end_op;
7482 if (is_space_char (*base_string))
7483 ++base_string;
7484 }
7485
7486 /* There may be an index reg or scale factor here. */
7487 if (*base_string == ',')
7488 {
7489 ++base_string;
7490 if (is_space_char (*base_string))
7491 ++base_string;
7492
7493 if ((i.index_reg = parse_register (base_string, &end_op))
7494 != NULL)
7495 {
7496 base_string = end_op;
7497 if (is_space_char (*base_string))
7498 ++base_string;
7499 if (*base_string == ',')
7500 {
7501 ++base_string;
7502 if (is_space_char (*base_string))
7503 ++base_string;
7504 }
7505 else if (*base_string != ')')
7506 {
7507 as_bad (_("expecting `,' or `)' "
7508 "after index register in `%s'"),
7509 operand_string);
7510 return 0;
7511 }
7512 }
7513 else if (*base_string == REGISTER_PREFIX)
7514 {
7515 as_bad (_("bad register name `%s'"), base_string);
7516 return 0;
7517 }
7518
7519 /* Check for scale factor. */
7520 if (*base_string != ')')
7521 {
7522 char *end_scale = i386_scale (base_string);
7523
7524 if (!end_scale)
7525 return 0;
7526
7527 base_string = end_scale;
7528 if (is_space_char (*base_string))
7529 ++base_string;
7530 if (*base_string != ')')
7531 {
7532 as_bad (_("expecting `)' "
7533 "after scale factor in `%s'"),
7534 operand_string);
7535 return 0;
7536 }
7537 }
7538 else if (!i.index_reg)
7539 {
7540 as_bad (_("expecting index register or scale factor "
7541 "after `,'; got '%c'"),
7542 *base_string);
7543 return 0;
7544 }
7545 }
7546 else if (*base_string != ')')
7547 {
7548 as_bad (_("expecting `,' or `)' "
7549 "after base register in `%s'"),
7550 operand_string);
7551 return 0;
7552 }
7553 }
7554 else if (*base_string == REGISTER_PREFIX)
7555 {
7556 as_bad (_("bad register name `%s'"), base_string);
7557 return 0;
7558 }
7559 }
7560
7561 /* If there's an expression beginning the operand, parse it,
7562 assuming displacement_string_start and
7563 displacement_string_end are meaningful. */
7564 if (displacement_string_start != displacement_string_end)
7565 {
7566 if (!i386_displacement (displacement_string_start,
7567 displacement_string_end))
7568 return 0;
7569 }
7570
7571 /* Special case for (%dx) while doing input/output op. */
7572 if (i.base_reg
7573 && operand_type_equal (&i.base_reg->reg_type,
7574 &reg16_inoutportreg)
7575 && i.index_reg == 0
7576 && i.log2_scale_factor == 0
7577 && i.seg[i.mem_operands] == 0
7578 && !operand_type_check (i.types[this_operand], disp))
7579 {
7580 i.types[this_operand] = inoutportreg;
7581 return 1;
7582 }
7583
7584 if (i386_index_check (operand_string) == 0)
7585 return 0;
7586 i.types[this_operand].bitfield.mem = 1;
7587 i.mem_operands++;
7588 }
7589 else
7590 {
7591 /* It's not a memory operand; argh! */
7592 as_bad (_("invalid char %s beginning operand %d `%s'"),
7593 output_invalid (*op_string),
7594 this_operand + 1,
7595 op_string);
7596 return 0;
7597 }
7598 return 1; /* Normal return. */
7599 }
7600 \f
7601 /* md_estimate_size_before_relax()
7602
7603 Called just before relax() for rs_machine_dependent frags. The x86
7604 assembler uses these frags to handle variable size jump
7605 instructions.
7606
7607 Any symbol that is now undefined will not become defined.
7608 Return the correct fr_subtype in the frag.
7609 Return the initial "guess for variable size of frag" to caller.
7610 The guess is actually the growth beyond the fixed part. Whatever
7611 we do to grow the fixed or variable part contributes to our
7612 returned value. */
7613
7614 int
7615 md_estimate_size_before_relax (fragS *fragP, segT segment)
7616 {
7617 /* We've already got fragP->fr_subtype right; all we have to do is
7618 check for un-relaxable symbols. On an ELF system, we can't relax
7619 an externally visible symbol, because it may be overridden by a
7620 shared library. */
7621 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
7622 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7623 || (IS_ELF
7624 && (S_IS_EXTERNAL (fragP->fr_symbol)
7625 || S_IS_WEAK (fragP->fr_symbol)
7626 || ((symbol_get_bfdsym (fragP->fr_symbol)->flags
7627 & BSF_GNU_INDIRECT_FUNCTION))))
7628 #endif
7629 #if defined (OBJ_COFF) && defined (TE_PE)
7630 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
7631 && S_IS_WEAK (fragP->fr_symbol))
7632 #endif
7633 )
7634 {
7635 /* Symbol is undefined in this segment, or we need to keep a
7636 reloc so that weak symbols can be overridden. */
7637 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
7638 enum bfd_reloc_code_real reloc_type;
7639 unsigned char *opcode;
7640 int old_fr_fix;
7641
7642 if (fragP->fr_var != NO_RELOC)
7643 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
7644 else if (size == 2)
7645 reloc_type = BFD_RELOC_16_PCREL;
7646 else
7647 reloc_type = BFD_RELOC_32_PCREL;
7648
7649 old_fr_fix = fragP->fr_fix;
7650 opcode = (unsigned char *) fragP->fr_opcode;
7651
7652 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
7653 {
7654 case UNCOND_JUMP:
7655 /* Make jmp (0xeb) a (d)word displacement jump. */
7656 opcode[0] = 0xe9;
7657 fragP->fr_fix += size;
7658 fix_new (fragP, old_fr_fix, size,
7659 fragP->fr_symbol,
7660 fragP->fr_offset, 1,
7661 reloc_type);
7662 break;
7663
7664 case COND_JUMP86:
7665 if (size == 2
7666 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
7667 {
7668 /* Negate the condition, and branch past an
7669 unconditional jump. */
7670 opcode[0] ^= 1;
7671 opcode[1] = 3;
7672 /* Insert an unconditional jump. */
7673 opcode[2] = 0xe9;
7674 /* We added two extra opcode bytes, and have a two byte
7675 offset. */
7676 fragP->fr_fix += 2 + 2;
7677 fix_new (fragP, old_fr_fix + 2, 2,
7678 fragP->fr_symbol,
7679 fragP->fr_offset, 1,
7680 reloc_type);
7681 break;
7682 }
7683 /* Fall through. */
7684
7685 case COND_JUMP:
7686 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
7687 {
7688 fixS *fixP;
7689
7690 fragP->fr_fix += 1;
7691 fixP = fix_new (fragP, old_fr_fix, 1,
7692 fragP->fr_symbol,
7693 fragP->fr_offset, 1,
7694 BFD_RELOC_8_PCREL);
7695 fixP->fx_signed = 1;
7696 break;
7697 }
7698
7699 /* This changes the byte-displacement jump 0x7N
7700 to the (d)word-displacement jump 0x0f,0x8N. */
7701 opcode[1] = opcode[0] + 0x10;
7702 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7703 /* We've added an opcode byte. */
7704 fragP->fr_fix += 1 + size;
7705 fix_new (fragP, old_fr_fix + 1, size,
7706 fragP->fr_symbol,
7707 fragP->fr_offset, 1,
7708 reloc_type);
7709 break;
7710
7711 default:
7712 BAD_CASE (fragP->fr_subtype);
7713 break;
7714 }
7715 frag_wane (fragP);
7716 return fragP->fr_fix - old_fr_fix;
7717 }
7718
7719 /* Guess size depending on current relax state. Initially the relax
7720 state will correspond to a short jump and we return 1, because
7721 the variable part of the frag (the branch offset) is one byte
7722 long. However, we can relax a section more than once and in that
7723 case we must either set fr_subtype back to the unrelaxed state,
7724 or return the value for the appropriate branch. */
7725 return md_relax_table[fragP->fr_subtype].rlx_length;
7726 }
7727
7728 /* Called after relax() is finished.
7729
7730 In: Address of frag.
7731 fr_type == rs_machine_dependent.
7732 fr_subtype is what the address relaxed to.
7733
7734 Out: Any fixSs and constants are set up.
7735 Caller will turn frag into a ".space 0". */
7736
7737 void
7738 md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT sec ATTRIBUTE_UNUSED,
7739 fragS *fragP)
7740 {
7741 unsigned char *opcode;
7742 unsigned char *where_to_put_displacement = NULL;
7743 offsetT target_address;
7744 offsetT opcode_address;
7745 unsigned int extension = 0;
7746 offsetT displacement_from_opcode_start;
7747
7748 opcode = (unsigned char *) fragP->fr_opcode;
7749
7750 /* Address we want to reach in file space. */
7751 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
7752
7753 /* Address opcode resides at in file space. */
7754 opcode_address = fragP->fr_address + fragP->fr_fix;
7755
7756 /* Displacement from opcode start to fill into instruction. */
7757 displacement_from_opcode_start = target_address - opcode_address;
7758
7759 if ((fragP->fr_subtype & BIG) == 0)
7760 {
7761 /* Don't have to change opcode. */
7762 extension = 1; /* 1 opcode + 1 displacement */
7763 where_to_put_displacement = &opcode[1];
7764 }
7765 else
7766 {
7767 if (no_cond_jump_promotion
7768 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
7769 as_warn_where (fragP->fr_file, fragP->fr_line,
7770 _("long jump required"));
7771
7772 switch (fragP->fr_subtype)
7773 {
7774 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
7775 extension = 4; /* 1 opcode + 4 displacement */
7776 opcode[0] = 0xe9;
7777 where_to_put_displacement = &opcode[1];
7778 break;
7779
7780 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
7781 extension = 2; /* 1 opcode + 2 displacement */
7782 opcode[0] = 0xe9;
7783 where_to_put_displacement = &opcode[1];
7784 break;
7785
7786 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
7787 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
7788 extension = 5; /* 2 opcode + 4 displacement */
7789 opcode[1] = opcode[0] + 0x10;
7790 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7791 where_to_put_displacement = &opcode[2];
7792 break;
7793
7794 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
7795 extension = 3; /* 2 opcode + 2 displacement */
7796 opcode[1] = opcode[0] + 0x10;
7797 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7798 where_to_put_displacement = &opcode[2];
7799 break;
7800
7801 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
7802 extension = 4;
7803 opcode[0] ^= 1;
7804 opcode[1] = 3;
7805 opcode[2] = 0xe9;
7806 where_to_put_displacement = &opcode[3];
7807 break;
7808
7809 default:
7810 BAD_CASE (fragP->fr_subtype);
7811 break;
7812 }
7813 }
7814
7815 /* If size if less then four we are sure that the operand fits,
7816 but if it's 4, then it could be that the displacement is larger
7817 then -/+ 2GB. */
7818 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
7819 && object_64bit
7820 && ((addressT) (displacement_from_opcode_start - extension
7821 + ((addressT) 1 << 31))
7822 > (((addressT) 2 << 31) - 1)))
7823 {
7824 as_bad_where (fragP->fr_file, fragP->fr_line,
7825 _("jump target out of range"));
7826 /* Make us emit 0. */
7827 displacement_from_opcode_start = extension;
7828 }
7829 /* Now put displacement after opcode. */
7830 md_number_to_chars ((char *) where_to_put_displacement,
7831 (valueT) (displacement_from_opcode_start - extension),
7832 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
7833 fragP->fr_fix += extension;
7834 }
7835 \f
7836 /* Apply a fixup (fixP) to segment data, once it has been determined
7837 by our caller that we have all the info we need to fix it up.
7838
7839 Parameter valP is the pointer to the value of the bits.
7840
7841 On the 386, immediates, displacements, and data pointers are all in
7842 the same (little-endian) format, so we don't need to care about which
7843 we are handling. */
7844
7845 void
7846 md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
7847 {
7848 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
7849 valueT value = *valP;
7850
7851 #if !defined (TE_Mach)
7852 if (fixP->fx_pcrel)
7853 {
7854 switch (fixP->fx_r_type)
7855 {
7856 default:
7857 break;
7858
7859 case BFD_RELOC_64:
7860 fixP->fx_r_type = BFD_RELOC_64_PCREL;
7861 break;
7862 case BFD_RELOC_32:
7863 case BFD_RELOC_X86_64_32S:
7864 fixP->fx_r_type = BFD_RELOC_32_PCREL;
7865 break;
7866 case BFD_RELOC_16:
7867 fixP->fx_r_type = BFD_RELOC_16_PCREL;
7868 break;
7869 case BFD_RELOC_8:
7870 fixP->fx_r_type = BFD_RELOC_8_PCREL;
7871 break;
7872 }
7873 }
7874
7875 if (fixP->fx_addsy != NULL
7876 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
7877 || fixP->fx_r_type == BFD_RELOC_64_PCREL
7878 || fixP->fx_r_type == BFD_RELOC_16_PCREL
7879 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
7880 && !use_rela_relocations)
7881 {
7882 /* This is a hack. There should be a better way to handle this.
7883 This covers for the fact that bfd_install_relocation will
7884 subtract the current location (for partial_inplace, PC relative
7885 relocations); see more below. */
7886 #ifndef OBJ_AOUT
7887 if (IS_ELF
7888 #ifdef TE_PE
7889 || OUTPUT_FLAVOR == bfd_target_coff_flavour
7890 #endif
7891 )
7892 value += fixP->fx_where + fixP->fx_frag->fr_address;
7893 #endif
7894 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7895 if (IS_ELF)
7896 {
7897 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
7898
7899 if ((sym_seg == seg
7900 || (symbol_section_p (fixP->fx_addsy)
7901 && sym_seg != absolute_section))
7902 && !generic_force_reloc (fixP))
7903 {
7904 /* Yes, we add the values in twice. This is because
7905 bfd_install_relocation subtracts them out again. I think
7906 bfd_install_relocation is broken, but I don't dare change
7907 it. FIXME. */
7908 value += fixP->fx_where + fixP->fx_frag->fr_address;
7909 }
7910 }
7911 #endif
7912 #if defined (OBJ_COFF) && defined (TE_PE)
7913 /* For some reason, the PE format does not store a
7914 section address offset for a PC relative symbol. */
7915 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
7916 || S_IS_WEAK (fixP->fx_addsy))
7917 value += md_pcrel_from (fixP);
7918 #endif
7919 }
7920 #if defined (OBJ_COFF) && defined (TE_PE)
7921 if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
7922 {
7923 value -= S_GET_VALUE (fixP->fx_addsy);
7924 }
7925 #endif
7926
7927 /* Fix a few things - the dynamic linker expects certain values here,
7928 and we must not disappoint it. */
7929 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7930 if (IS_ELF && fixP->fx_addsy)
7931 switch (fixP->fx_r_type)
7932 {
7933 case BFD_RELOC_386_PLT32:
7934 case BFD_RELOC_X86_64_PLT32:
7935 /* Make the jump instruction point to the address of the operand. At
7936 runtime we merely add the offset to the actual PLT entry. */
7937 value = -4;
7938 break;
7939
7940 case BFD_RELOC_386_TLS_GD:
7941 case BFD_RELOC_386_TLS_LDM:
7942 case BFD_RELOC_386_TLS_IE_32:
7943 case BFD_RELOC_386_TLS_IE:
7944 case BFD_RELOC_386_TLS_GOTIE:
7945 case BFD_RELOC_386_TLS_GOTDESC:
7946 case BFD_RELOC_X86_64_TLSGD:
7947 case BFD_RELOC_X86_64_TLSLD:
7948 case BFD_RELOC_X86_64_GOTTPOFF:
7949 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
7950 value = 0; /* Fully resolved at runtime. No addend. */
7951 /* Fallthrough */
7952 case BFD_RELOC_386_TLS_LE:
7953 case BFD_RELOC_386_TLS_LDO_32:
7954 case BFD_RELOC_386_TLS_LE_32:
7955 case BFD_RELOC_X86_64_DTPOFF32:
7956 case BFD_RELOC_X86_64_DTPOFF64:
7957 case BFD_RELOC_X86_64_TPOFF32:
7958 case BFD_RELOC_X86_64_TPOFF64:
7959 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7960 break;
7961
7962 case BFD_RELOC_386_TLS_DESC_CALL:
7963 case BFD_RELOC_X86_64_TLSDESC_CALL:
7964 value = 0; /* Fully resolved at runtime. No addend. */
7965 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7966 fixP->fx_done = 0;
7967 return;
7968
7969 case BFD_RELOC_386_GOT32:
7970 case BFD_RELOC_X86_64_GOT32:
7971 value = 0; /* Fully resolved at runtime. No addend. */
7972 break;
7973
7974 case BFD_RELOC_VTABLE_INHERIT:
7975 case BFD_RELOC_VTABLE_ENTRY:
7976 fixP->fx_done = 0;
7977 return;
7978
7979 default:
7980 break;
7981 }
7982 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
7983 *valP = value;
7984 #endif /* !defined (TE_Mach) */
7985
7986 /* Are we finished with this relocation now? */
7987 if (fixP->fx_addsy == NULL)
7988 fixP->fx_done = 1;
7989 #if defined (OBJ_COFF) && defined (TE_PE)
7990 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
7991 {
7992 fixP->fx_done = 0;
7993 /* Remember value for tc_gen_reloc. */
7994 fixP->fx_addnumber = value;
7995 /* Clear out the frag for now. */
7996 value = 0;
7997 }
7998 #endif
7999 else if (use_rela_relocations)
8000 {
8001 fixP->fx_no_overflow = 1;
8002 /* Remember value for tc_gen_reloc. */
8003 fixP->fx_addnumber = value;
8004 value = 0;
8005 }
8006
8007 md_number_to_chars (p, value, fixP->fx_size);
8008 }
8009 \f
8010 char *
8011 md_atof (int type, char *litP, int *sizeP)
8012 {
8013 /* This outputs the LITTLENUMs in REVERSE order;
8014 in accord with the bigendian 386. */
8015 return ieee_md_atof (type, litP, sizeP, FALSE);
8016 }
8017 \f
8018 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
8019
8020 static char *
8021 output_invalid (int c)
8022 {
8023 if (ISPRINT (c))
8024 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8025 "'%c'", c);
8026 else
8027 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8028 "(0x%x)", (unsigned char) c);
8029 return output_invalid_buf;
8030 }
8031
8032 /* REG_STRING starts *before* REGISTER_PREFIX. */
8033
8034 static const reg_entry *
8035 parse_real_register (char *reg_string, char **end_op)
8036 {
8037 char *s = reg_string;
8038 char *p;
8039 char reg_name_given[MAX_REG_NAME_SIZE + 1];
8040 const reg_entry *r;
8041
8042 /* Skip possible REGISTER_PREFIX and possible whitespace. */
8043 if (*s == REGISTER_PREFIX)
8044 ++s;
8045
8046 if (is_space_char (*s))
8047 ++s;
8048
8049 p = reg_name_given;
8050 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
8051 {
8052 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
8053 return (const reg_entry *) NULL;
8054 s++;
8055 }
8056
8057 /* For naked regs, make sure that we are not dealing with an identifier.
8058 This prevents confusing an identifier like `eax_var' with register
8059 `eax'. */
8060 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
8061 return (const reg_entry *) NULL;
8062
8063 *end_op = s;
8064
8065 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
8066
8067 /* Handle floating point regs, allowing spaces in the (i) part. */
8068 if (r == i386_regtab /* %st is first entry of table */)
8069 {
8070 if (is_space_char (*s))
8071 ++s;
8072 if (*s == '(')
8073 {
8074 ++s;
8075 if (is_space_char (*s))
8076 ++s;
8077 if (*s >= '0' && *s <= '7')
8078 {
8079 int fpr = *s - '0';
8080 ++s;
8081 if (is_space_char (*s))
8082 ++s;
8083 if (*s == ')')
8084 {
8085 *end_op = s + 1;
8086 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
8087 know (r);
8088 return r + fpr;
8089 }
8090 }
8091 /* We have "%st(" then garbage. */
8092 return (const reg_entry *) NULL;
8093 }
8094 }
8095
8096 if (r == NULL || allow_pseudo_reg)
8097 return r;
8098
8099 if (operand_type_all_zero (&r->reg_type))
8100 return (const reg_entry *) NULL;
8101
8102 if ((r->reg_type.bitfield.reg32
8103 || r->reg_type.bitfield.sreg3
8104 || r->reg_type.bitfield.control
8105 || r->reg_type.bitfield.debug
8106 || r->reg_type.bitfield.test)
8107 && !cpu_arch_flags.bitfield.cpui386)
8108 return (const reg_entry *) NULL;
8109
8110 if (r->reg_type.bitfield.floatreg
8111 && !cpu_arch_flags.bitfield.cpu8087
8112 && !cpu_arch_flags.bitfield.cpu287
8113 && !cpu_arch_flags.bitfield.cpu387)
8114 return (const reg_entry *) NULL;
8115
8116 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
8117 return (const reg_entry *) NULL;
8118
8119 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse)
8120 return (const reg_entry *) NULL;
8121
8122 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
8123 return (const reg_entry *) NULL;
8124
8125 /* Don't allow fake index register unless allow_index_reg isn't 0. */
8126 if (!allow_index_reg
8127 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
8128 return (const reg_entry *) NULL;
8129
8130 if (((r->reg_flags & (RegRex64 | RegRex))
8131 || r->reg_type.bitfield.reg64)
8132 && (!cpu_arch_flags.bitfield.cpulm
8133 || !operand_type_equal (&r->reg_type, &control))
8134 && flag_code != CODE_64BIT)
8135 return (const reg_entry *) NULL;
8136
8137 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
8138 return (const reg_entry *) NULL;
8139
8140 return r;
8141 }
8142
8143 /* REG_STRING starts *before* REGISTER_PREFIX. */
8144
8145 static const reg_entry *
8146 parse_register (char *reg_string, char **end_op)
8147 {
8148 const reg_entry *r;
8149
8150 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
8151 r = parse_real_register (reg_string, end_op);
8152 else
8153 r = NULL;
8154 if (!r)
8155 {
8156 char *save = input_line_pointer;
8157 char c;
8158 symbolS *symbolP;
8159
8160 input_line_pointer = reg_string;
8161 c = get_symbol_end ();
8162 symbolP = symbol_find (reg_string);
8163 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
8164 {
8165 const expressionS *e = symbol_get_value_expression (symbolP);
8166
8167 know (e->X_op == O_register);
8168 know (e->X_add_number >= 0
8169 && (valueT) e->X_add_number < i386_regtab_size);
8170 r = i386_regtab + e->X_add_number;
8171 *end_op = input_line_pointer;
8172 }
8173 *input_line_pointer = c;
8174 input_line_pointer = save;
8175 }
8176 return r;
8177 }
8178
8179 int
8180 i386_parse_name (char *name, expressionS *e, char *nextcharP)
8181 {
8182 const reg_entry *r;
8183 char *end = input_line_pointer;
8184
8185 *end = *nextcharP;
8186 r = parse_register (name, &input_line_pointer);
8187 if (r && end <= input_line_pointer)
8188 {
8189 *nextcharP = *input_line_pointer;
8190 *input_line_pointer = 0;
8191 e->X_op = O_register;
8192 e->X_add_number = r - i386_regtab;
8193 return 1;
8194 }
8195 input_line_pointer = end;
8196 *end = 0;
8197 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
8198 }
8199
8200 void
8201 md_operand (expressionS *e)
8202 {
8203 char *end;
8204 const reg_entry *r;
8205
8206 switch (*input_line_pointer)
8207 {
8208 case REGISTER_PREFIX:
8209 r = parse_real_register (input_line_pointer, &end);
8210 if (r)
8211 {
8212 e->X_op = O_register;
8213 e->X_add_number = r - i386_regtab;
8214 input_line_pointer = end;
8215 }
8216 break;
8217
8218 case '[':
8219 gas_assert (intel_syntax);
8220 end = input_line_pointer++;
8221 expression (e);
8222 if (*input_line_pointer == ']')
8223 {
8224 ++input_line_pointer;
8225 e->X_op_symbol = make_expr_symbol (e);
8226 e->X_add_symbol = NULL;
8227 e->X_add_number = 0;
8228 e->X_op = O_index;
8229 }
8230 else
8231 {
8232 e->X_op = O_absent;
8233 input_line_pointer = end;
8234 }
8235 break;
8236 }
8237 }
8238
8239 \f
8240 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8241 const char *md_shortopts = "kVQ:sqn";
8242 #else
8243 const char *md_shortopts = "qn";
8244 #endif
8245
8246 #define OPTION_32 (OPTION_MD_BASE + 0)
8247 #define OPTION_64 (OPTION_MD_BASE + 1)
8248 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
8249 #define OPTION_MARCH (OPTION_MD_BASE + 3)
8250 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
8251 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
8252 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
8253 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
8254 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
8255 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
8256 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
8257 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
8258 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 12)
8259 #define OPTION_X32 (OPTION_MD_BASE + 13)
8260
8261 struct option md_longopts[] =
8262 {
8263 {"32", no_argument, NULL, OPTION_32},
8264 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8265 || defined (TE_PE) || defined (TE_PEP))
8266 {"64", no_argument, NULL, OPTION_64},
8267 #endif
8268 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8269 {"x32", no_argument, NULL, OPTION_X32},
8270 #endif
8271 {"divide", no_argument, NULL, OPTION_DIVIDE},
8272 {"march", required_argument, NULL, OPTION_MARCH},
8273 {"mtune", required_argument, NULL, OPTION_MTUNE},
8274 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
8275 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
8276 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
8277 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
8278 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
8279 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
8280 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
8281 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
8282 {NULL, no_argument, NULL, 0}
8283 };
8284 size_t md_longopts_size = sizeof (md_longopts);
8285
8286 int
8287 md_parse_option (int c, char *arg)
8288 {
8289 unsigned int j;
8290 char *arch, *next;
8291
8292 switch (c)
8293 {
8294 case 'n':
8295 optimize_align_code = 0;
8296 break;
8297
8298 case 'q':
8299 quiet_warnings = 1;
8300 break;
8301
8302 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8303 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
8304 should be emitted or not. FIXME: Not implemented. */
8305 case 'Q':
8306 break;
8307
8308 /* -V: SVR4 argument to print version ID. */
8309 case 'V':
8310 print_version_id ();
8311 break;
8312
8313 /* -k: Ignore for FreeBSD compatibility. */
8314 case 'k':
8315 break;
8316
8317 case 's':
8318 /* -s: On i386 Solaris, this tells the native assembler to use
8319 .stab instead of .stab.excl. We always use .stab anyhow. */
8320 break;
8321 #endif
8322 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8323 || defined (TE_PE) || defined (TE_PEP))
8324 case OPTION_64:
8325 {
8326 const char **list, **l;
8327
8328 list = bfd_target_list ();
8329 for (l = list; *l != NULL; l++)
8330 if (CONST_STRNEQ (*l, "elf64-x86-64")
8331 || strcmp (*l, "coff-x86-64") == 0
8332 || strcmp (*l, "pe-x86-64") == 0
8333 || strcmp (*l, "pei-x86-64") == 0)
8334 {
8335 default_arch = "x86_64";
8336 break;
8337 }
8338 if (*l == NULL)
8339 as_fatal (_("no compiled in support for x86_64"));
8340 free (list);
8341 }
8342 break;
8343 #endif
8344
8345 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8346 case OPTION_X32:
8347 if (IS_ELF)
8348 {
8349 const char **list, **l;
8350
8351 list = bfd_target_list ();
8352 for (l = list; *l != NULL; l++)
8353 if (CONST_STRNEQ (*l, "elf32-x86-64"))
8354 {
8355 default_arch = "x86_64:32";
8356 break;
8357 }
8358 if (*l == NULL)
8359 as_fatal (_("no compiled in support for 32bit x86_64"));
8360 free (list);
8361 }
8362 else
8363 as_fatal (_("32bit x86_64 is only supported for ELF"));
8364 break;
8365 #endif
8366
8367 case OPTION_32:
8368 default_arch = "i386";
8369 break;
8370
8371 case OPTION_DIVIDE:
8372 #ifdef SVR4_COMMENT_CHARS
8373 {
8374 char *n, *t;
8375 const char *s;
8376
8377 n = (char *) xmalloc (strlen (i386_comment_chars) + 1);
8378 t = n;
8379 for (s = i386_comment_chars; *s != '\0'; s++)
8380 if (*s != '/')
8381 *t++ = *s;
8382 *t = '\0';
8383 i386_comment_chars = n;
8384 }
8385 #endif
8386 break;
8387
8388 case OPTION_MARCH:
8389 arch = xstrdup (arg);
8390 do
8391 {
8392 if (*arch == '.')
8393 as_fatal (_("invalid -march= option: `%s'"), arg);
8394 next = strchr (arch, '+');
8395 if (next)
8396 *next++ = '\0';
8397 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8398 {
8399 if (strcmp (arch, cpu_arch [j].name) == 0)
8400 {
8401 /* Processor. */
8402 if (! cpu_arch[j].flags.bitfield.cpui386)
8403 continue;
8404
8405 cpu_arch_name = cpu_arch[j].name;
8406 cpu_sub_arch_name = NULL;
8407 cpu_arch_flags = cpu_arch[j].flags;
8408 cpu_arch_isa = cpu_arch[j].type;
8409 cpu_arch_isa_flags = cpu_arch[j].flags;
8410 if (!cpu_arch_tune_set)
8411 {
8412 cpu_arch_tune = cpu_arch_isa;
8413 cpu_arch_tune_flags = cpu_arch_isa_flags;
8414 }
8415 break;
8416 }
8417 else if (*cpu_arch [j].name == '.'
8418 && strcmp (arch, cpu_arch [j].name + 1) == 0)
8419 {
8420 /* ISA entension. */
8421 i386_cpu_flags flags;
8422
8423 if (!cpu_arch[j].negated)
8424 flags = cpu_flags_or (cpu_arch_flags,
8425 cpu_arch[j].flags);
8426 else
8427 flags = cpu_flags_and_not (cpu_arch_flags,
8428 cpu_arch[j].flags);
8429 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
8430 {
8431 if (cpu_sub_arch_name)
8432 {
8433 char *name = cpu_sub_arch_name;
8434 cpu_sub_arch_name = concat (name,
8435 cpu_arch[j].name,
8436 (const char *) NULL);
8437 free (name);
8438 }
8439 else
8440 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
8441 cpu_arch_flags = flags;
8442 cpu_arch_isa_flags = flags;
8443 }
8444 break;
8445 }
8446 }
8447
8448 if (j >= ARRAY_SIZE (cpu_arch))
8449 as_fatal (_("invalid -march= option: `%s'"), arg);
8450
8451 arch = next;
8452 }
8453 while (next != NULL );
8454 break;
8455
8456 case OPTION_MTUNE:
8457 if (*arg == '.')
8458 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8459 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8460 {
8461 if (strcmp (arg, cpu_arch [j].name) == 0)
8462 {
8463 cpu_arch_tune_set = 1;
8464 cpu_arch_tune = cpu_arch [j].type;
8465 cpu_arch_tune_flags = cpu_arch[j].flags;
8466 break;
8467 }
8468 }
8469 if (j >= ARRAY_SIZE (cpu_arch))
8470 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8471 break;
8472
8473 case OPTION_MMNEMONIC:
8474 if (strcasecmp (arg, "att") == 0)
8475 intel_mnemonic = 0;
8476 else if (strcasecmp (arg, "intel") == 0)
8477 intel_mnemonic = 1;
8478 else
8479 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg);
8480 break;
8481
8482 case OPTION_MSYNTAX:
8483 if (strcasecmp (arg, "att") == 0)
8484 intel_syntax = 0;
8485 else if (strcasecmp (arg, "intel") == 0)
8486 intel_syntax = 1;
8487 else
8488 as_fatal (_("invalid -msyntax= option: `%s'"), arg);
8489 break;
8490
8491 case OPTION_MINDEX_REG:
8492 allow_index_reg = 1;
8493 break;
8494
8495 case OPTION_MNAKED_REG:
8496 allow_naked_reg = 1;
8497 break;
8498
8499 case OPTION_MOLD_GCC:
8500 old_gcc = 1;
8501 break;
8502
8503 case OPTION_MSSE2AVX:
8504 sse2avx = 1;
8505 break;
8506
8507 case OPTION_MSSE_CHECK:
8508 if (strcasecmp (arg, "error") == 0)
8509 sse_check = sse_check_error;
8510 else if (strcasecmp (arg, "warning") == 0)
8511 sse_check = sse_check_warning;
8512 else if (strcasecmp (arg, "none") == 0)
8513 sse_check = sse_check_none;
8514 else
8515 as_fatal (_("invalid -msse-check= option: `%s'"), arg);
8516 break;
8517
8518 case OPTION_MAVXSCALAR:
8519 if (strcasecmp (arg, "128") == 0)
8520 avxscalar = vex128;
8521 else if (strcasecmp (arg, "256") == 0)
8522 avxscalar = vex256;
8523 else
8524 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg);
8525 break;
8526
8527 default:
8528 return 0;
8529 }
8530 return 1;
8531 }
8532
8533 #define MESSAGE_TEMPLATE \
8534 " "
8535
8536 static void
8537 show_arch (FILE *stream, int ext, int check)
8538 {
8539 static char message[] = MESSAGE_TEMPLATE;
8540 char *start = message + 27;
8541 char *p;
8542 int size = sizeof (MESSAGE_TEMPLATE);
8543 int left;
8544 const char *name;
8545 int len;
8546 unsigned int j;
8547
8548 p = start;
8549 left = size - (start - message);
8550 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8551 {
8552 /* Should it be skipped? */
8553 if (cpu_arch [j].skip)
8554 continue;
8555
8556 name = cpu_arch [j].name;
8557 len = cpu_arch [j].len;
8558 if (*name == '.')
8559 {
8560 /* It is an extension. Skip if we aren't asked to show it. */
8561 if (ext)
8562 {
8563 name++;
8564 len--;
8565 }
8566 else
8567 continue;
8568 }
8569 else if (ext)
8570 {
8571 /* It is an processor. Skip if we show only extension. */
8572 continue;
8573 }
8574 else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
8575 {
8576 /* It is an impossible processor - skip. */
8577 continue;
8578 }
8579
8580 /* Reserve 2 spaces for ", " or ",\0" */
8581 left -= len + 2;
8582
8583 /* Check if there is any room. */
8584 if (left >= 0)
8585 {
8586 if (p != start)
8587 {
8588 *p++ = ',';
8589 *p++ = ' ';
8590 }
8591 p = mempcpy (p, name, len);
8592 }
8593 else
8594 {
8595 /* Output the current message now and start a new one. */
8596 *p++ = ',';
8597 *p = '\0';
8598 fprintf (stream, "%s\n", message);
8599 p = start;
8600 left = size - (start - message) - len - 2;
8601
8602 gas_assert (left >= 0);
8603
8604 p = mempcpy (p, name, len);
8605 }
8606 }
8607
8608 *p = '\0';
8609 fprintf (stream, "%s\n", message);
8610 }
8611
8612 void
8613 md_show_usage (FILE *stream)
8614 {
8615 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8616 fprintf (stream, _("\
8617 -Q ignored\n\
8618 -V print assembler version number\n\
8619 -k ignored\n"));
8620 #endif
8621 fprintf (stream, _("\
8622 -n Do not optimize code alignment\n\
8623 -q quieten some warnings\n"));
8624 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8625 fprintf (stream, _("\
8626 -s ignored\n"));
8627 #endif
8628 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8629 || defined (TE_PE) || defined (TE_PEP))
8630 fprintf (stream, _("\
8631 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
8632 #endif
8633 #ifdef SVR4_COMMENT_CHARS
8634 fprintf (stream, _("\
8635 --divide do not treat `/' as a comment character\n"));
8636 #else
8637 fprintf (stream, _("\
8638 --divide ignored\n"));
8639 #endif
8640 fprintf (stream, _("\
8641 -march=CPU[,+EXTENSION...]\n\
8642 generate code for CPU and EXTENSION, CPU is one of:\n"));
8643 show_arch (stream, 0, 1);
8644 fprintf (stream, _("\
8645 EXTENSION is combination of:\n"));
8646 show_arch (stream, 1, 0);
8647 fprintf (stream, _("\
8648 -mtune=CPU optimize for CPU, CPU is one of:\n"));
8649 show_arch (stream, 0, 0);
8650 fprintf (stream, _("\
8651 -msse2avx encode SSE instructions with VEX prefix\n"));
8652 fprintf (stream, _("\
8653 -msse-check=[none|error|warning]\n\
8654 check SSE instructions\n"));
8655 fprintf (stream, _("\
8656 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
8657 length\n"));
8658 fprintf (stream, _("\
8659 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
8660 fprintf (stream, _("\
8661 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
8662 fprintf (stream, _("\
8663 -mindex-reg support pseudo index registers\n"));
8664 fprintf (stream, _("\
8665 -mnaked-reg don't require `%%' prefix for registers\n"));
8666 fprintf (stream, _("\
8667 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
8668 }
8669
8670 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
8671 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8672 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8673
8674 /* Pick the target format to use. */
8675
8676 const char *
8677 i386_target_format (void)
8678 {
8679 if (!strncmp (default_arch, "x86_64", 6))
8680 {
8681 update_code_flag (CODE_64BIT, 1);
8682 if (default_arch[6] == '\0')
8683 x86_elf_abi = X86_64_ABI;
8684 else
8685 x86_elf_abi = X86_64_X32_ABI;
8686 }
8687 else if (!strcmp (default_arch, "i386"))
8688 update_code_flag (CODE_32BIT, 1);
8689 else
8690 as_fatal (_("unknown architecture"));
8691
8692 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
8693 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
8694 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
8695 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
8696
8697 switch (OUTPUT_FLAVOR)
8698 {
8699 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
8700 case bfd_target_aout_flavour:
8701 return AOUT_TARGET_FORMAT;
8702 #endif
8703 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
8704 # if defined (TE_PE) || defined (TE_PEP)
8705 case bfd_target_coff_flavour:
8706 return flag_code == CODE_64BIT ? "pe-x86-64" : "pe-i386";
8707 # elif defined (TE_GO32)
8708 case bfd_target_coff_flavour:
8709 return "coff-go32";
8710 # else
8711 case bfd_target_coff_flavour:
8712 return "coff-i386";
8713 # endif
8714 #endif
8715 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8716 case bfd_target_elf_flavour:
8717 {
8718 const char *format;
8719
8720 switch (x86_elf_abi)
8721 {
8722 default:
8723 format = ELF_TARGET_FORMAT;
8724 break;
8725 case X86_64_ABI:
8726 use_rela_relocations = 1;
8727 object_64bit = 1;
8728 format = ELF_TARGET_FORMAT64;
8729 break;
8730 case X86_64_X32_ABI:
8731 use_rela_relocations = 1;
8732 object_64bit = 1;
8733 disallow_64bit_reloc = 1;
8734 format = ELF_TARGET_FORMAT32;
8735 break;
8736 }
8737 if (cpu_arch_isa == PROCESSOR_L1OM)
8738 {
8739 if (x86_elf_abi != X86_64_ABI)
8740 as_fatal (_("Intel L1OM is 64bit only"));
8741 return ELF_TARGET_L1OM_FORMAT;
8742 }
8743 else
8744 return format;
8745 }
8746 #endif
8747 #if defined (OBJ_MACH_O)
8748 case bfd_target_mach_o_flavour:
8749 return flag_code == CODE_64BIT ? "mach-o-x86-64" : "mach-o-i386";
8750 #endif
8751 default:
8752 abort ();
8753 return NULL;
8754 }
8755 }
8756
8757 #endif /* OBJ_MAYBE_ more than one */
8758
8759 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
8760 void
8761 i386_elf_emit_arch_note (void)
8762 {
8763 if (IS_ELF && cpu_arch_name != NULL)
8764 {
8765 char *p;
8766 asection *seg = now_seg;
8767 subsegT subseg = now_subseg;
8768 Elf_Internal_Note i_note;
8769 Elf_External_Note e_note;
8770 asection *note_secp;
8771 int len;
8772
8773 /* Create the .note section. */
8774 note_secp = subseg_new (".note", 0);
8775 bfd_set_section_flags (stdoutput,
8776 note_secp,
8777 SEC_HAS_CONTENTS | SEC_READONLY);
8778
8779 /* Process the arch string. */
8780 len = strlen (cpu_arch_name);
8781
8782 i_note.namesz = len + 1;
8783 i_note.descsz = 0;
8784 i_note.type = NT_ARCH;
8785 p = frag_more (sizeof (e_note.namesz));
8786 md_number_to_chars (p, (valueT) i_note.namesz, sizeof (e_note.namesz));
8787 p = frag_more (sizeof (e_note.descsz));
8788 md_number_to_chars (p, (valueT) i_note.descsz, sizeof (e_note.descsz));
8789 p = frag_more (sizeof (e_note.type));
8790 md_number_to_chars (p, (valueT) i_note.type, sizeof (e_note.type));
8791 p = frag_more (len + 1);
8792 strcpy (p, cpu_arch_name);
8793
8794 frag_align (2, 0, 0);
8795
8796 subseg_set (seg, subseg);
8797 }
8798 }
8799 #endif
8800 \f
8801 symbolS *
8802 md_undefined_symbol (char *name)
8803 {
8804 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
8805 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
8806 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
8807 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
8808 {
8809 if (!GOT_symbol)
8810 {
8811 if (symbol_find (name))
8812 as_bad (_("GOT already in symbol table"));
8813 GOT_symbol = symbol_new (name, undefined_section,
8814 (valueT) 0, &zero_address_frag);
8815 };
8816 return GOT_symbol;
8817 }
8818 return 0;
8819 }
8820
8821 /* Round up a section size to the appropriate boundary. */
8822
8823 valueT
8824 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8825 {
8826 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8827 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
8828 {
8829 /* For a.out, force the section size to be aligned. If we don't do
8830 this, BFD will align it for us, but it will not write out the
8831 final bytes of the section. This may be a bug in BFD, but it is
8832 easier to fix it here since that is how the other a.out targets
8833 work. */
8834 int align;
8835
8836 align = bfd_get_section_alignment (stdoutput, segment);
8837 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
8838 }
8839 #endif
8840
8841 return size;
8842 }
8843
8844 /* On the i386, PC-relative offsets are relative to the start of the
8845 next instruction. That is, the address of the offset, plus its
8846 size, since the offset is always the last part of the insn. */
8847
8848 long
8849 md_pcrel_from (fixS *fixP)
8850 {
8851 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
8852 }
8853
8854 #ifndef I386COFF
8855
8856 static void
8857 s_bss (int ignore ATTRIBUTE_UNUSED)
8858 {
8859 int temp;
8860
8861 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8862 if (IS_ELF)
8863 obj_elf_section_change_hook ();
8864 #endif
8865 temp = get_absolute_expression ();
8866 subseg_set (bss_section, (subsegT) temp);
8867 demand_empty_rest_of_line ();
8868 }
8869
8870 #endif
8871
8872 void
8873 i386_validate_fix (fixS *fixp)
8874 {
8875 if (fixp->fx_subsy && fixp->fx_subsy == GOT_symbol)
8876 {
8877 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
8878 {
8879 if (!object_64bit)
8880 abort ();
8881 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
8882 }
8883 else
8884 {
8885 if (!object_64bit)
8886 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
8887 else
8888 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
8889 }
8890 fixp->fx_subsy = 0;
8891 }
8892 }
8893
8894 arelent *
8895 tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
8896 {
8897 arelent *rel;
8898 bfd_reloc_code_real_type code;
8899
8900 switch (fixp->fx_r_type)
8901 {
8902 case BFD_RELOC_X86_64_PLT32:
8903 case BFD_RELOC_X86_64_GOT32:
8904 case BFD_RELOC_X86_64_GOTPCREL:
8905 case BFD_RELOC_386_PLT32:
8906 case BFD_RELOC_386_GOT32:
8907 case BFD_RELOC_386_GOTOFF:
8908 case BFD_RELOC_386_GOTPC:
8909 case BFD_RELOC_386_TLS_GD:
8910 case BFD_RELOC_386_TLS_LDM:
8911 case BFD_RELOC_386_TLS_LDO_32:
8912 case BFD_RELOC_386_TLS_IE_32:
8913 case BFD_RELOC_386_TLS_IE:
8914 case BFD_RELOC_386_TLS_GOTIE:
8915 case BFD_RELOC_386_TLS_LE_32:
8916 case BFD_RELOC_386_TLS_LE:
8917 case BFD_RELOC_386_TLS_GOTDESC:
8918 case BFD_RELOC_386_TLS_DESC_CALL:
8919 case BFD_RELOC_X86_64_TLSGD:
8920 case BFD_RELOC_X86_64_TLSLD:
8921 case BFD_RELOC_X86_64_DTPOFF32:
8922 case BFD_RELOC_X86_64_DTPOFF64:
8923 case BFD_RELOC_X86_64_GOTTPOFF:
8924 case BFD_RELOC_X86_64_TPOFF32:
8925 case BFD_RELOC_X86_64_TPOFF64:
8926 case BFD_RELOC_X86_64_GOTOFF64:
8927 case BFD_RELOC_X86_64_GOTPC32:
8928 case BFD_RELOC_X86_64_GOT64:
8929 case BFD_RELOC_X86_64_GOTPCREL64:
8930 case BFD_RELOC_X86_64_GOTPC64:
8931 case BFD_RELOC_X86_64_GOTPLT64:
8932 case BFD_RELOC_X86_64_PLTOFF64:
8933 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8934 case BFD_RELOC_X86_64_TLSDESC_CALL:
8935 case BFD_RELOC_RVA:
8936 case BFD_RELOC_VTABLE_ENTRY:
8937 case BFD_RELOC_VTABLE_INHERIT:
8938 #ifdef TE_PE
8939 case BFD_RELOC_32_SECREL:
8940 #endif
8941 code = fixp->fx_r_type;
8942 break;
8943 case BFD_RELOC_X86_64_32S:
8944 if (!fixp->fx_pcrel)
8945 {
8946 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
8947 code = fixp->fx_r_type;
8948 break;
8949 }
8950 default:
8951 if (fixp->fx_pcrel)
8952 {
8953 switch (fixp->fx_size)
8954 {
8955 default:
8956 as_bad_where (fixp->fx_file, fixp->fx_line,
8957 _("can not do %d byte pc-relative relocation"),
8958 fixp->fx_size);
8959 code = BFD_RELOC_32_PCREL;
8960 break;
8961 case 1: code = BFD_RELOC_8_PCREL; break;
8962 case 2: code = BFD_RELOC_16_PCREL; break;
8963 case 4: code = BFD_RELOC_32_PCREL; break;
8964 #ifdef BFD64
8965 case 8: code = BFD_RELOC_64_PCREL; break;
8966 #endif
8967 }
8968 }
8969 else
8970 {
8971 switch (fixp->fx_size)
8972 {
8973 default:
8974 as_bad_where (fixp->fx_file, fixp->fx_line,
8975 _("can not do %d byte relocation"),
8976 fixp->fx_size);
8977 code = BFD_RELOC_32;
8978 break;
8979 case 1: code = BFD_RELOC_8; break;
8980 case 2: code = BFD_RELOC_16; break;
8981 case 4: code = BFD_RELOC_32; break;
8982 #ifdef BFD64
8983 case 8: code = BFD_RELOC_64; break;
8984 #endif
8985 }
8986 }
8987 break;
8988 }
8989
8990 if ((code == BFD_RELOC_32
8991 || code == BFD_RELOC_32_PCREL
8992 || code == BFD_RELOC_X86_64_32S)
8993 && GOT_symbol
8994 && fixp->fx_addsy == GOT_symbol)
8995 {
8996 if (!object_64bit)
8997 code = BFD_RELOC_386_GOTPC;
8998 else
8999 code = BFD_RELOC_X86_64_GOTPC32;
9000 }
9001 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
9002 && GOT_symbol
9003 && fixp->fx_addsy == GOT_symbol)
9004 {
9005 code = BFD_RELOC_X86_64_GOTPC64;
9006 }
9007
9008 rel = (arelent *) xmalloc (sizeof (arelent));
9009 rel->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
9010 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9011
9012 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
9013
9014 if (!use_rela_relocations)
9015 {
9016 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
9017 vtable entry to be used in the relocation's section offset. */
9018 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
9019 rel->address = fixp->fx_offset;
9020 #if defined (OBJ_COFF) && defined (TE_PE)
9021 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
9022 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
9023 else
9024 #endif
9025 rel->addend = 0;
9026 }
9027 /* Use the rela in 64bit mode. */
9028 else
9029 {
9030 if (disallow_64bit_reloc)
9031 switch (code)
9032 {
9033 case BFD_RELOC_64:
9034 case BFD_RELOC_X86_64_DTPOFF64:
9035 case BFD_RELOC_X86_64_TPOFF64:
9036 case BFD_RELOC_64_PCREL:
9037 case BFD_RELOC_X86_64_GOTOFF64:
9038 case BFD_RELOC_X86_64_GOT64:
9039 case BFD_RELOC_X86_64_GOTPCREL64:
9040 case BFD_RELOC_X86_64_GOTPC64:
9041 case BFD_RELOC_X86_64_GOTPLT64:
9042 case BFD_RELOC_X86_64_PLTOFF64:
9043 as_bad_where (fixp->fx_file, fixp->fx_line,
9044 _("cannot represent relocation type %s in x32 mode"),
9045 bfd_get_reloc_code_name (code));
9046 break;
9047 default:
9048 break;
9049 }
9050
9051 if (!fixp->fx_pcrel)
9052 rel->addend = fixp->fx_offset;
9053 else
9054 switch (code)
9055 {
9056 case BFD_RELOC_X86_64_PLT32:
9057 case BFD_RELOC_X86_64_GOT32:
9058 case BFD_RELOC_X86_64_GOTPCREL:
9059 case BFD_RELOC_X86_64_TLSGD:
9060 case BFD_RELOC_X86_64_TLSLD:
9061 case BFD_RELOC_X86_64_GOTTPOFF:
9062 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9063 case BFD_RELOC_X86_64_TLSDESC_CALL:
9064 rel->addend = fixp->fx_offset - fixp->fx_size;
9065 break;
9066 default:
9067 rel->addend = (section->vma
9068 - fixp->fx_size
9069 + fixp->fx_addnumber
9070 + md_pcrel_from (fixp));
9071 break;
9072 }
9073 }
9074
9075 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
9076 if (rel->howto == NULL)
9077 {
9078 as_bad_where (fixp->fx_file, fixp->fx_line,
9079 _("cannot represent relocation type %s"),
9080 bfd_get_reloc_code_name (code));
9081 /* Set howto to a garbage value so that we can keep going. */
9082 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
9083 gas_assert (rel->howto != NULL);
9084 }
9085
9086 return rel;
9087 }
9088
9089 #include "tc-i386-intel.c"
9090
9091 void
9092 tc_x86_parse_to_dw2regnum (expressionS *exp)
9093 {
9094 int saved_naked_reg;
9095 char saved_register_dot;
9096
9097 saved_naked_reg = allow_naked_reg;
9098 allow_naked_reg = 1;
9099 saved_register_dot = register_chars['.'];
9100 register_chars['.'] = '.';
9101 allow_pseudo_reg = 1;
9102 expression_and_evaluate (exp);
9103 allow_pseudo_reg = 0;
9104 register_chars['.'] = saved_register_dot;
9105 allow_naked_reg = saved_naked_reg;
9106
9107 if (exp->X_op == O_register && exp->X_add_number >= 0)
9108 {
9109 if ((addressT) exp->X_add_number < i386_regtab_size)
9110 {
9111 exp->X_op = O_constant;
9112 exp->X_add_number = i386_regtab[exp->X_add_number]
9113 .dw2_regnum[flag_code >> 1];
9114 }
9115 else
9116 exp->X_op = O_illegal;
9117 }
9118 }
9119
9120 void
9121 tc_x86_frame_initial_instructions (void)
9122 {
9123 static unsigned int sp_regno[2];
9124
9125 if (!sp_regno[flag_code >> 1])
9126 {
9127 char *saved_input = input_line_pointer;
9128 char sp[][4] = {"esp", "rsp"};
9129 expressionS exp;
9130
9131 input_line_pointer = sp[flag_code >> 1];
9132 tc_x86_parse_to_dw2regnum (&exp);
9133 gas_assert (exp.X_op == O_constant);
9134 sp_regno[flag_code >> 1] = exp.X_add_number;
9135 input_line_pointer = saved_input;
9136 }
9137
9138 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
9139 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
9140 }
9141
9142 int
9143 i386_elf_section_type (const char *str, size_t len)
9144 {
9145 if (flag_code == CODE_64BIT
9146 && len == sizeof ("unwind") - 1
9147 && strncmp (str, "unwind", 6) == 0)
9148 return SHT_X86_64_UNWIND;
9149
9150 return -1;
9151 }
9152
9153 #ifdef TE_SOLARIS
9154 void
9155 i386_solaris_fix_up_eh_frame (segT sec)
9156 {
9157 if (flag_code == CODE_64BIT)
9158 elf_section_type (sec) = SHT_X86_64_UNWIND;
9159 }
9160 #endif
9161
9162 #ifdef TE_PE
9163 void
9164 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
9165 {
9166 expressionS exp;
9167
9168 exp.X_op = O_secrel;
9169 exp.X_add_symbol = symbol;
9170 exp.X_add_number = 0;
9171 emit_expr (&exp, size);
9172 }
9173 #endif
9174
9175 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9176 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
9177
9178 bfd_vma
9179 x86_64_section_letter (int letter, char **ptr_msg)
9180 {
9181 if (flag_code == CODE_64BIT)
9182 {
9183 if (letter == 'l')
9184 return SHF_X86_64_LARGE;
9185
9186 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
9187 }
9188 else
9189 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
9190 return -1;
9191 }
9192
9193 bfd_vma
9194 x86_64_section_word (char *str, size_t len)
9195 {
9196 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
9197 return SHF_X86_64_LARGE;
9198
9199 return -1;
9200 }
9201
9202 static void
9203 handle_large_common (int small ATTRIBUTE_UNUSED)
9204 {
9205 if (flag_code != CODE_64BIT)
9206 {
9207 s_comm_internal (0, elf_common_parse);
9208 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
9209 }
9210 else
9211 {
9212 static segT lbss_section;
9213 asection *saved_com_section_ptr = elf_com_section_ptr;
9214 asection *saved_bss_section = bss_section;
9215
9216 if (lbss_section == NULL)
9217 {
9218 flagword applicable;
9219 segT seg = now_seg;
9220 subsegT subseg = now_subseg;
9221
9222 /* The .lbss section is for local .largecomm symbols. */
9223 lbss_section = subseg_new (".lbss", 0);
9224 applicable = bfd_applicable_section_flags (stdoutput);
9225 bfd_set_section_flags (stdoutput, lbss_section,
9226 applicable & SEC_ALLOC);
9227 seg_info (lbss_section)->bss = 1;
9228
9229 subseg_set (seg, subseg);
9230 }
9231
9232 elf_com_section_ptr = &_bfd_elf_large_com_section;
9233 bss_section = lbss_section;
9234
9235 s_comm_internal (0, elf_common_parse);
9236
9237 elf_com_section_ptr = saved_com_section_ptr;
9238 bss_section = saved_bss_section;
9239 }
9240 }
9241
9242 static void
9243 handle_quad (int nbytes)
9244 {
9245 expressionS exp;
9246
9247 if (x86_elf_abi != X86_64_X32_ABI)
9248 {
9249 cons (nbytes);
9250 return;
9251 }
9252
9253 if (is_it_end_of_statement ())
9254 {
9255 demand_empty_rest_of_line ();
9256 return;
9257 }
9258
9259 do
9260 {
9261 if (*input_line_pointer == '"')
9262 {
9263 as_bad (_("unexpected `\"' in expression"));
9264 ignore_rest_of_line ();
9265 return;
9266 }
9267 x86_cons (&exp, nbytes);
9268 /* Output 4 bytes if not constant. */
9269 if (exp.X_op != O_constant)
9270 nbytes = 4;
9271 emit_expr (&exp, (unsigned int) nbytes);
9272 /* Zero-extends to 8 bytes if not constant. */
9273 if (nbytes == 4)
9274 {
9275 memset (&exp, '\0', sizeof (exp));
9276 exp.X_op = O_constant;
9277 emit_expr (&exp, nbytes);
9278 }
9279 nbytes = 8;
9280 }
9281 while (*input_line_pointer++ == ',');
9282
9283 input_line_pointer--; /* Put terminator back into stream. */
9284
9285 demand_empty_rest_of_line ();
9286 }
9287 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */
This page took 0.219679 seconds and 4 git commands to generate.