2009-11-13 H.J. Lu <hongjiu.lu@intel.com>
[deliverable/binutils-gdb.git] / gas / config / tc-i386.c
1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5
6 This file is part of GAS, the GNU Assembler.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GAS; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
21 02110-1301, USA. */
22
23 /* Intel 80386 machine specific gas.
24 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
25 x86_64 support by Jan Hubicka (jh@suse.cz)
26 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
27 Bugs & suggestions are completely welcome. This is free software.
28 Please help us make it better. */
29
30 #include "as.h"
31 #include "safe-ctype.h"
32 #include "subsegs.h"
33 #include "dwarf2dbg.h"
34 #include "dw2gencfi.h"
35 #include "elf/x86-64.h"
36 #include "opcodes/i386-init.h"
37
38 #ifndef REGISTER_WARNINGS
39 #define REGISTER_WARNINGS 1
40 #endif
41
42 #ifndef INFER_ADDR_PREFIX
43 #define INFER_ADDR_PREFIX 1
44 #endif
45
46 #ifndef DEFAULT_ARCH
47 #define DEFAULT_ARCH "i386"
48 #endif
49
50 #ifndef INLINE
51 #if __GNUC__ >= 2
52 #define INLINE __inline__
53 #else
54 #define INLINE
55 #endif
56 #endif
57
58 /* Prefixes will be emitted in the order defined below.
59 WAIT_PREFIX must be the first prefix since FWAIT is really is an
60 instruction, and so must come before any prefixes.
61 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
62 REP_PREFIX, LOCK_PREFIX. */
63 #define WAIT_PREFIX 0
64 #define SEG_PREFIX 1
65 #define ADDR_PREFIX 2
66 #define DATA_PREFIX 3
67 #define REP_PREFIX 4
68 #define LOCK_PREFIX 5
69 #define REX_PREFIX 6 /* must come last. */
70 #define MAX_PREFIXES 7 /* max prefixes per opcode */
71
72 /* we define the syntax here (modulo base,index,scale syntax) */
73 #define REGISTER_PREFIX '%'
74 #define IMMEDIATE_PREFIX '$'
75 #define ABSOLUTE_PREFIX '*'
76
77 /* these are the instruction mnemonic suffixes in AT&T syntax or
78 memory operand size in Intel syntax. */
79 #define WORD_MNEM_SUFFIX 'w'
80 #define BYTE_MNEM_SUFFIX 'b'
81 #define SHORT_MNEM_SUFFIX 's'
82 #define LONG_MNEM_SUFFIX 'l'
83 #define QWORD_MNEM_SUFFIX 'q'
84 #define XMMWORD_MNEM_SUFFIX 'x'
85 #define YMMWORD_MNEM_SUFFIX 'y'
86 /* Intel Syntax. Use a non-ascii letter since since it never appears
87 in instructions. */
88 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
89
90 #define END_OF_INSN '\0'
91
92 /*
93 'templates' is for grouping together 'template' structures for opcodes
94 of the same name. This is only used for storing the insns in the grand
95 ole hash table of insns.
96 The templates themselves start at START and range up to (but not including)
97 END.
98 */
99 typedef struct
100 {
101 const insn_template *start;
102 const insn_template *end;
103 }
104 templates;
105
106 /* 386 operand encoding bytes: see 386 book for details of this. */
107 typedef struct
108 {
109 unsigned int regmem; /* codes register or memory operand */
110 unsigned int reg; /* codes register operand (or extended opcode) */
111 unsigned int mode; /* how to interpret regmem & reg */
112 }
113 modrm_byte;
114
115 /* x86-64 extension prefix. */
116 typedef int rex_byte;
117
118 /* 386 opcode byte to code indirect addressing. */
119 typedef struct
120 {
121 unsigned base;
122 unsigned index;
123 unsigned scale;
124 }
125 sib_byte;
126
127 /* x86 arch names, types and features */
128 typedef struct
129 {
130 const char *name; /* arch name */
131 enum processor_type type; /* arch type */
132 i386_cpu_flags flags; /* cpu feature flags */
133 }
134 arch_entry;
135
136 static void set_code_flag (int);
137 static void set_16bit_gcc_code_flag (int);
138 static void set_intel_syntax (int);
139 static void set_intel_mnemonic (int);
140 static void set_allow_index_reg (int);
141 static void set_sse_check (int);
142 static void set_cpu_arch (int);
143 #ifdef TE_PE
144 static void pe_directive_secrel (int);
145 #endif
146 static void signed_cons (int);
147 static char *output_invalid (int c);
148 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
149 const char *);
150 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
151 const char *);
152 static int i386_att_operand (char *);
153 static int i386_intel_operand (char *, int);
154 static int i386_intel_simplify (expressionS *);
155 static int i386_intel_parse_name (const char *, expressionS *);
156 static const reg_entry *parse_register (char *, char **);
157 static char *parse_insn (char *, char *);
158 static char *parse_operands (char *, const char *);
159 static void swap_operands (void);
160 static void swap_2_operands (int, int);
161 static void optimize_imm (void);
162 static void optimize_disp (void);
163 static const insn_template *match_template (void);
164 static int check_string (void);
165 static int process_suffix (void);
166 static int check_byte_reg (void);
167 static int check_long_reg (void);
168 static int check_qword_reg (void);
169 static int check_word_reg (void);
170 static int finalize_imm (void);
171 static int process_operands (void);
172 static const seg_entry *build_modrm_byte (void);
173 static void output_insn (void);
174 static void output_imm (fragS *, offsetT);
175 static void output_disp (fragS *, offsetT);
176 #ifndef I386COFF
177 static void s_bss (int);
178 #endif
179 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
180 static void handle_large_common (int small ATTRIBUTE_UNUSED);
181 #endif
182
183 static const char *default_arch = DEFAULT_ARCH;
184
185 /* VEX prefix. */
186 typedef struct
187 {
188 /* VEX prefix is either 2 byte or 3 byte. */
189 unsigned char bytes[3];
190 unsigned int length;
191 /* Destination or source register specifier. */
192 const reg_entry *register_specifier;
193 } vex_prefix;
194
195 /* 'md_assemble ()' gathers together information and puts it into a
196 i386_insn. */
197
198 union i386_op
199 {
200 expressionS *disps;
201 expressionS *imms;
202 const reg_entry *regs;
203 };
204
205 struct _i386_insn
206 {
207 /* TM holds the template for the insn were currently assembling. */
208 insn_template tm;
209
210 /* SUFFIX holds the instruction size suffix for byte, word, dword
211 or qword, if given. */
212 char suffix;
213
214 /* OPERANDS gives the number of given operands. */
215 unsigned int operands;
216
217 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
218 of given register, displacement, memory operands and immediate
219 operands. */
220 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
221
222 /* TYPES [i] is the type (see above #defines) which tells us how to
223 use OP[i] for the corresponding operand. */
224 i386_operand_type types[MAX_OPERANDS];
225
226 /* Displacement expression, immediate expression, or register for each
227 operand. */
228 union i386_op op[MAX_OPERANDS];
229
230 /* Flags for operands. */
231 unsigned int flags[MAX_OPERANDS];
232 #define Operand_PCrel 1
233
234 /* Relocation type for operand */
235 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
236
237 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
238 the base index byte below. */
239 const reg_entry *base_reg;
240 const reg_entry *index_reg;
241 unsigned int log2_scale_factor;
242
243 /* SEG gives the seg_entries of this insn. They are zero unless
244 explicit segment overrides are given. */
245 const seg_entry *seg[2];
246
247 /* PREFIX holds all the given prefix opcodes (usually null).
248 PREFIXES is the number of prefix opcodes. */
249 unsigned int prefixes;
250 unsigned char prefix[MAX_PREFIXES];
251
252 /* RM and SIB are the modrm byte and the sib byte where the
253 addressing modes of this insn are encoded. */
254 modrm_byte rm;
255 rex_byte rex;
256 sib_byte sib;
257 vex_prefix vex;
258
259 /* Swap operand in encoding. */
260 unsigned int swap_operand;
261 };
262
263 typedef struct _i386_insn i386_insn;
264
265 /* List of chars besides those in app.c:symbol_chars that can start an
266 operand. Used to prevent the scrubber eating vital white-space. */
267 const char extra_symbol_chars[] = "*%-(["
268 #ifdef LEX_AT
269 "@"
270 #endif
271 #ifdef LEX_QM
272 "?"
273 #endif
274 ;
275
276 #if (defined (TE_I386AIX) \
277 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
278 && !defined (TE_GNU) \
279 && !defined (TE_LINUX) \
280 && !defined (TE_NETWARE) \
281 && !defined (TE_FreeBSD) \
282 && !defined (TE_NetBSD)))
283 /* This array holds the chars that always start a comment. If the
284 pre-processor is disabled, these aren't very useful. The option
285 --divide will remove '/' from this list. */
286 const char *i386_comment_chars = "#/";
287 #define SVR4_COMMENT_CHARS 1
288 #define PREFIX_SEPARATOR '\\'
289
290 #else
291 const char *i386_comment_chars = "#";
292 #define PREFIX_SEPARATOR '/'
293 #endif
294
295 /* This array holds the chars that only start a comment at the beginning of
296 a line. If the line seems to have the form '# 123 filename'
297 .line and .file directives will appear in the pre-processed output.
298 Note that input_file.c hand checks for '#' at the beginning of the
299 first line of the input file. This is because the compiler outputs
300 #NO_APP at the beginning of its output.
301 Also note that comments started like this one will always work if
302 '/' isn't otherwise defined. */
303 const char line_comment_chars[] = "#/";
304
305 const char line_separator_chars[] = ";";
306
307 /* Chars that can be used to separate mant from exp in floating point
308 nums. */
309 const char EXP_CHARS[] = "eE";
310
311 /* Chars that mean this number is a floating point constant
312 As in 0f12.456
313 or 0d1.2345e12. */
314 const char FLT_CHARS[] = "fFdDxX";
315
316 /* Tables for lexical analysis. */
317 static char mnemonic_chars[256];
318 static char register_chars[256];
319 static char operand_chars[256];
320 static char identifier_chars[256];
321 static char digit_chars[256];
322
323 /* Lexical macros. */
324 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
325 #define is_operand_char(x) (operand_chars[(unsigned char) x])
326 #define is_register_char(x) (register_chars[(unsigned char) x])
327 #define is_space_char(x) ((x) == ' ')
328 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
329 #define is_digit_char(x) (digit_chars[(unsigned char) x])
330
331 /* All non-digit non-letter characters that may occur in an operand. */
332 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
333
334 /* md_assemble() always leaves the strings it's passed unaltered. To
335 effect this we maintain a stack of saved characters that we've smashed
336 with '\0's (indicating end of strings for various sub-fields of the
337 assembler instruction). */
338 static char save_stack[32];
339 static char *save_stack_p;
340 #define END_STRING_AND_SAVE(s) \
341 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
342 #define RESTORE_END_STRING(s) \
343 do { *(s) = *--save_stack_p; } while (0)
344
345 /* The instruction we're assembling. */
346 static i386_insn i;
347
348 /* Possible templates for current insn. */
349 static const templates *current_templates;
350
351 /* Per instruction expressionS buffers: max displacements & immediates. */
352 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
353 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
354
355 /* Current operand we are working on. */
356 static int this_operand = -1;
357
358 /* We support four different modes. FLAG_CODE variable is used to distinguish
359 these. */
360
361 enum flag_code {
362 CODE_32BIT,
363 CODE_16BIT,
364 CODE_64BIT };
365
366 static enum flag_code flag_code;
367 static unsigned int object_64bit;
368 static int use_rela_relocations = 0;
369
370 /* The names used to print error messages. */
371 static const char *flag_code_names[] =
372 {
373 "32",
374 "16",
375 "64"
376 };
377
378 /* 1 for intel syntax,
379 0 if att syntax. */
380 static int intel_syntax = 0;
381
382 /* 1 for intel mnemonic,
383 0 if att mnemonic. */
384 static int intel_mnemonic = !SYSV386_COMPAT;
385
386 /* 1 if support old (<= 2.8.1) versions of gcc. */
387 static int old_gcc = OLDGCC_COMPAT;
388
389 /* 1 if pseudo registers are permitted. */
390 static int allow_pseudo_reg = 0;
391
392 /* 1 if register prefix % not required. */
393 static int allow_naked_reg = 0;
394
395 /* 1 if pseudo index register, eiz/riz, is allowed . */
396 static int allow_index_reg = 0;
397
398 static enum
399 {
400 sse_check_none = 0,
401 sse_check_warning,
402 sse_check_error
403 }
404 sse_check;
405
406 /* Register prefix used for error message. */
407 static const char *register_prefix = "%";
408
409 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
410 leave, push, and pop instructions so that gcc has the same stack
411 frame as in 32 bit mode. */
412 static char stackop_size = '\0';
413
414 /* Non-zero to optimize code alignment. */
415 int optimize_align_code = 1;
416
417 /* Non-zero to quieten some warnings. */
418 static int quiet_warnings = 0;
419
420 /* CPU name. */
421 static const char *cpu_arch_name = NULL;
422 static char *cpu_sub_arch_name = NULL;
423
424 /* CPU feature flags. */
425 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
426
427 /* If we have selected a cpu we are generating instructions for. */
428 static int cpu_arch_tune_set = 0;
429
430 /* Cpu we are generating instructions for. */
431 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
432
433 /* CPU feature flags of cpu we are generating instructions for. */
434 static i386_cpu_flags cpu_arch_tune_flags;
435
436 /* CPU instruction set architecture used. */
437 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
438
439 /* CPU feature flags of instruction set architecture used. */
440 i386_cpu_flags cpu_arch_isa_flags;
441
442 /* If set, conditional jumps are not automatically promoted to handle
443 larger than a byte offset. */
444 static unsigned int no_cond_jump_promotion = 0;
445
446 /* Encode SSE instructions with VEX prefix. */
447 static unsigned int sse2avx;
448
449 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
450 static symbolS *GOT_symbol;
451
452 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
453 unsigned int x86_dwarf2_return_column;
454
455 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
456 int x86_cie_data_alignment;
457
458 /* Interface to relax_segment.
459 There are 3 major relax states for 386 jump insns because the
460 different types of jumps add different sizes to frags when we're
461 figuring out what sort of jump to choose to reach a given label. */
462
463 /* Types. */
464 #define UNCOND_JUMP 0
465 #define COND_JUMP 1
466 #define COND_JUMP86 2
467
468 /* Sizes. */
469 #define CODE16 1
470 #define SMALL 0
471 #define SMALL16 (SMALL | CODE16)
472 #define BIG 2
473 #define BIG16 (BIG | CODE16)
474
475 #ifndef INLINE
476 #ifdef __GNUC__
477 #define INLINE __inline__
478 #else
479 #define INLINE
480 #endif
481 #endif
482
483 #define ENCODE_RELAX_STATE(type, size) \
484 ((relax_substateT) (((type) << 2) | (size)))
485 #define TYPE_FROM_RELAX_STATE(s) \
486 ((s) >> 2)
487 #define DISP_SIZE_FROM_RELAX_STATE(s) \
488 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
489
490 /* This table is used by relax_frag to promote short jumps to long
491 ones where necessary. SMALL (short) jumps may be promoted to BIG
492 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
493 don't allow a short jump in a 32 bit code segment to be promoted to
494 a 16 bit offset jump because it's slower (requires data size
495 prefix), and doesn't work, unless the destination is in the bottom
496 64k of the code segment (The top 16 bits of eip are zeroed). */
497
498 const relax_typeS md_relax_table[] =
499 {
500 /* The fields are:
501 1) most positive reach of this state,
502 2) most negative reach of this state,
503 3) how many bytes this mode will have in the variable part of the frag
504 4) which index into the table to try if we can't fit into this one. */
505
506 /* UNCOND_JUMP states. */
507 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
508 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
509 /* dword jmp adds 4 bytes to frag:
510 0 extra opcode bytes, 4 displacement bytes. */
511 {0, 0, 4, 0},
512 /* word jmp adds 2 byte2 to frag:
513 0 extra opcode bytes, 2 displacement bytes. */
514 {0, 0, 2, 0},
515
516 /* COND_JUMP states. */
517 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
518 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
519 /* dword conditionals adds 5 bytes to frag:
520 1 extra opcode byte, 4 displacement bytes. */
521 {0, 0, 5, 0},
522 /* word conditionals add 3 bytes to frag:
523 1 extra opcode byte, 2 displacement bytes. */
524 {0, 0, 3, 0},
525
526 /* COND_JUMP86 states. */
527 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
528 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
529 /* dword conditionals adds 5 bytes to frag:
530 1 extra opcode byte, 4 displacement bytes. */
531 {0, 0, 5, 0},
532 /* word conditionals add 4 bytes to frag:
533 1 displacement byte and a 3 byte long branch insn. */
534 {0, 0, 4, 0}
535 };
536
537 static const arch_entry cpu_arch[] =
538 {
539 { "generic32", PROCESSOR_GENERIC32,
540 CPU_GENERIC32_FLAGS },
541 { "generic64", PROCESSOR_GENERIC64,
542 CPU_GENERIC64_FLAGS },
543 { "i8086", PROCESSOR_UNKNOWN,
544 CPU_NONE_FLAGS },
545 { "i186", PROCESSOR_UNKNOWN,
546 CPU_I186_FLAGS },
547 { "i286", PROCESSOR_UNKNOWN,
548 CPU_I286_FLAGS },
549 { "i386", PROCESSOR_I386,
550 CPU_I386_FLAGS },
551 { "i486", PROCESSOR_I486,
552 CPU_I486_FLAGS },
553 { "i586", PROCESSOR_PENTIUM,
554 CPU_I586_FLAGS },
555 { "i686", PROCESSOR_PENTIUMPRO,
556 CPU_I686_FLAGS },
557 { "pentium", PROCESSOR_PENTIUM,
558 CPU_I586_FLAGS },
559 { "pentiumpro", PROCESSOR_PENTIUMPRO,
560 CPU_I686_FLAGS },
561 { "pentiumii", PROCESSOR_PENTIUMPRO,
562 CPU_P2_FLAGS },
563 { "pentiumiii",PROCESSOR_PENTIUMPRO,
564 CPU_P3_FLAGS },
565 { "pentium4", PROCESSOR_PENTIUM4,
566 CPU_P4_FLAGS },
567 { "prescott", PROCESSOR_NOCONA,
568 CPU_CORE_FLAGS },
569 { "nocona", PROCESSOR_NOCONA,
570 CPU_NOCONA_FLAGS },
571 { "yonah", PROCESSOR_CORE,
572 CPU_CORE_FLAGS },
573 { "core", PROCESSOR_CORE,
574 CPU_CORE_FLAGS },
575 { "merom", PROCESSOR_CORE2,
576 CPU_CORE2_FLAGS },
577 { "core2", PROCESSOR_CORE2,
578 CPU_CORE2_FLAGS },
579 { "corei7", PROCESSOR_COREI7,
580 CPU_COREI7_FLAGS },
581 { "l1om", PROCESSOR_L1OM,
582 CPU_L1OM_FLAGS },
583 { "k6", PROCESSOR_K6,
584 CPU_K6_FLAGS },
585 { "k6_2", PROCESSOR_K6,
586 CPU_K6_2_FLAGS },
587 { "athlon", PROCESSOR_ATHLON,
588 CPU_ATHLON_FLAGS },
589 { "sledgehammer", PROCESSOR_K8,
590 CPU_K8_FLAGS },
591 { "opteron", PROCESSOR_K8,
592 CPU_K8_FLAGS },
593 { "k8", PROCESSOR_K8,
594 CPU_K8_FLAGS },
595 { "amdfam10", PROCESSOR_AMDFAM10,
596 CPU_AMDFAM10_FLAGS },
597 { ".8087", PROCESSOR_UNKNOWN,
598 CPU_8087_FLAGS },
599 { ".287", PROCESSOR_UNKNOWN,
600 CPU_287_FLAGS },
601 { ".387", PROCESSOR_UNKNOWN,
602 CPU_387_FLAGS },
603 { ".no87", PROCESSOR_UNKNOWN,
604 CPU_ANY87_FLAGS },
605 { ".mmx", PROCESSOR_UNKNOWN,
606 CPU_MMX_FLAGS },
607 { ".nommx", PROCESSOR_UNKNOWN,
608 CPU_3DNOWA_FLAGS },
609 { ".sse", PROCESSOR_UNKNOWN,
610 CPU_SSE_FLAGS },
611 { ".sse2", PROCESSOR_UNKNOWN,
612 CPU_SSE2_FLAGS },
613 { ".sse3", PROCESSOR_UNKNOWN,
614 CPU_SSE3_FLAGS },
615 { ".ssse3", PROCESSOR_UNKNOWN,
616 CPU_SSSE3_FLAGS },
617 { ".sse4.1", PROCESSOR_UNKNOWN,
618 CPU_SSE4_1_FLAGS },
619 { ".sse4.2", PROCESSOR_UNKNOWN,
620 CPU_SSE4_2_FLAGS },
621 { ".sse4", PROCESSOR_UNKNOWN,
622 CPU_SSE4_2_FLAGS },
623 { ".nosse", PROCESSOR_UNKNOWN,
624 CPU_ANY_SSE_FLAGS },
625 { ".avx", PROCESSOR_UNKNOWN,
626 CPU_AVX_FLAGS },
627 { ".noavx", PROCESSOR_UNKNOWN,
628 CPU_ANY_AVX_FLAGS },
629 { ".vmx", PROCESSOR_UNKNOWN,
630 CPU_VMX_FLAGS },
631 { ".smx", PROCESSOR_UNKNOWN,
632 CPU_SMX_FLAGS },
633 { ".xsave", PROCESSOR_UNKNOWN,
634 CPU_XSAVE_FLAGS },
635 { ".aes", PROCESSOR_UNKNOWN,
636 CPU_AES_FLAGS },
637 { ".pclmul", PROCESSOR_UNKNOWN,
638 CPU_PCLMUL_FLAGS },
639 { ".clmul", PROCESSOR_UNKNOWN,
640 CPU_PCLMUL_FLAGS },
641 { ".fma", PROCESSOR_UNKNOWN,
642 CPU_FMA_FLAGS },
643 { ".fma4", PROCESSOR_UNKNOWN,
644 CPU_FMA4_FLAGS },
645 { ".lwp", PROCESSOR_UNKNOWN,
646 CPU_LWP_FLAGS },
647 { ".movbe", PROCESSOR_UNKNOWN,
648 CPU_MOVBE_FLAGS },
649 { ".ept", PROCESSOR_UNKNOWN,
650 CPU_EPT_FLAGS },
651 { ".clflush", PROCESSOR_UNKNOWN,
652 CPU_CLFLUSH_FLAGS },
653 { ".syscall", PROCESSOR_UNKNOWN,
654 CPU_SYSCALL_FLAGS },
655 { ".rdtscp", PROCESSOR_UNKNOWN,
656 CPU_RDTSCP_FLAGS },
657 { ".3dnow", PROCESSOR_UNKNOWN,
658 CPU_3DNOW_FLAGS },
659 { ".3dnowa", PROCESSOR_UNKNOWN,
660 CPU_3DNOWA_FLAGS },
661 { ".padlock", PROCESSOR_UNKNOWN,
662 CPU_PADLOCK_FLAGS },
663 { ".pacifica", PROCESSOR_UNKNOWN,
664 CPU_SVME_FLAGS },
665 { ".svme", PROCESSOR_UNKNOWN,
666 CPU_SVME_FLAGS },
667 { ".sse4a", PROCESSOR_UNKNOWN,
668 CPU_SSE4A_FLAGS },
669 { ".abm", PROCESSOR_UNKNOWN,
670 CPU_ABM_FLAGS },
671 };
672
673 #ifdef I386COFF
674 /* Like s_lcomm_internal in gas/read.c but the alignment string
675 is allowed to be optional. */
676
677 static symbolS *
678 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
679 {
680 addressT align = 0;
681
682 SKIP_WHITESPACE ();
683
684 if (needs_align
685 && *input_line_pointer == ',')
686 {
687 align = parse_align (needs_align - 1);
688
689 if (align == (addressT) -1)
690 return NULL;
691 }
692 else
693 {
694 if (size >= 8)
695 align = 3;
696 else if (size >= 4)
697 align = 2;
698 else if (size >= 2)
699 align = 1;
700 else
701 align = 0;
702 }
703
704 bss_alloc (symbolP, size, align);
705 return symbolP;
706 }
707
708 static void
709 pe_lcomm (int needs_align)
710 {
711 s_comm_internal (needs_align * 2, pe_lcomm_internal);
712 }
713 #endif
714
715 const pseudo_typeS md_pseudo_table[] =
716 {
717 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
718 {"align", s_align_bytes, 0},
719 #else
720 {"align", s_align_ptwo, 0},
721 #endif
722 {"arch", set_cpu_arch, 0},
723 #ifndef I386COFF
724 {"bss", s_bss, 0},
725 #else
726 {"lcomm", pe_lcomm, 1},
727 #endif
728 {"ffloat", float_cons, 'f'},
729 {"dfloat", float_cons, 'd'},
730 {"tfloat", float_cons, 'x'},
731 {"value", cons, 2},
732 {"slong", signed_cons, 4},
733 {"noopt", s_ignore, 0},
734 {"optim", s_ignore, 0},
735 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
736 {"code16", set_code_flag, CODE_16BIT},
737 {"code32", set_code_flag, CODE_32BIT},
738 {"code64", set_code_flag, CODE_64BIT},
739 {"intel_syntax", set_intel_syntax, 1},
740 {"att_syntax", set_intel_syntax, 0},
741 {"intel_mnemonic", set_intel_mnemonic, 1},
742 {"att_mnemonic", set_intel_mnemonic, 0},
743 {"allow_index_reg", set_allow_index_reg, 1},
744 {"disallow_index_reg", set_allow_index_reg, 0},
745 {"sse_check", set_sse_check, 0},
746 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
747 {"largecomm", handle_large_common, 0},
748 #else
749 {"file", (void (*) (int)) dwarf2_directive_file, 0},
750 {"loc", dwarf2_directive_loc, 0},
751 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
752 #endif
753 #ifdef TE_PE
754 {"secrel32", pe_directive_secrel, 0},
755 #endif
756 {0, 0, 0}
757 };
758
759 /* For interface with expression (). */
760 extern char *input_line_pointer;
761
762 /* Hash table for instruction mnemonic lookup. */
763 static struct hash_control *op_hash;
764
765 /* Hash table for register lookup. */
766 static struct hash_control *reg_hash;
767 \f
768 void
769 i386_align_code (fragS *fragP, int count)
770 {
771 /* Various efficient no-op patterns for aligning code labels.
772 Note: Don't try to assemble the instructions in the comments.
773 0L and 0w are not legal. */
774 static const char f32_1[] =
775 {0x90}; /* nop */
776 static const char f32_2[] =
777 {0x66,0x90}; /* xchg %ax,%ax */
778 static const char f32_3[] =
779 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
780 static const char f32_4[] =
781 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
782 static const char f32_5[] =
783 {0x90, /* nop */
784 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
785 static const char f32_6[] =
786 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
787 static const char f32_7[] =
788 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
789 static const char f32_8[] =
790 {0x90, /* nop */
791 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
792 static const char f32_9[] =
793 {0x89,0xf6, /* movl %esi,%esi */
794 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
795 static const char f32_10[] =
796 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
797 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
798 static const char f32_11[] =
799 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
800 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
801 static const char f32_12[] =
802 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
803 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
804 static const char f32_13[] =
805 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
806 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
807 static const char f32_14[] =
808 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
809 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
810 static const char f16_3[] =
811 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
812 static const char f16_4[] =
813 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
814 static const char f16_5[] =
815 {0x90, /* nop */
816 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
817 static const char f16_6[] =
818 {0x89,0xf6, /* mov %si,%si */
819 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
820 static const char f16_7[] =
821 {0x8d,0x74,0x00, /* lea 0(%si),%si */
822 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
823 static const char f16_8[] =
824 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
825 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
826 static const char jump_31[] =
827 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
828 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
829 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
830 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
831 static const char *const f32_patt[] = {
832 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
833 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
834 };
835 static const char *const f16_patt[] = {
836 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
837 };
838 /* nopl (%[re]ax) */
839 static const char alt_3[] =
840 {0x0f,0x1f,0x00};
841 /* nopl 0(%[re]ax) */
842 static const char alt_4[] =
843 {0x0f,0x1f,0x40,0x00};
844 /* nopl 0(%[re]ax,%[re]ax,1) */
845 static const char alt_5[] =
846 {0x0f,0x1f,0x44,0x00,0x00};
847 /* nopw 0(%[re]ax,%[re]ax,1) */
848 static const char alt_6[] =
849 {0x66,0x0f,0x1f,0x44,0x00,0x00};
850 /* nopl 0L(%[re]ax) */
851 static const char alt_7[] =
852 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
853 /* nopl 0L(%[re]ax,%[re]ax,1) */
854 static const char alt_8[] =
855 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
856 /* nopw 0L(%[re]ax,%[re]ax,1) */
857 static const char alt_9[] =
858 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
859 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
860 static const char alt_10[] =
861 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
862 /* data16
863 nopw %cs:0L(%[re]ax,%[re]ax,1) */
864 static const char alt_long_11[] =
865 {0x66,
866 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
867 /* data16
868 data16
869 nopw %cs:0L(%[re]ax,%[re]ax,1) */
870 static const char alt_long_12[] =
871 {0x66,
872 0x66,
873 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
874 /* data16
875 data16
876 data16
877 nopw %cs:0L(%[re]ax,%[re]ax,1) */
878 static const char alt_long_13[] =
879 {0x66,
880 0x66,
881 0x66,
882 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
883 /* data16
884 data16
885 data16
886 data16
887 nopw %cs:0L(%[re]ax,%[re]ax,1) */
888 static const char alt_long_14[] =
889 {0x66,
890 0x66,
891 0x66,
892 0x66,
893 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
894 /* data16
895 data16
896 data16
897 data16
898 data16
899 nopw %cs:0L(%[re]ax,%[re]ax,1) */
900 static const char alt_long_15[] =
901 {0x66,
902 0x66,
903 0x66,
904 0x66,
905 0x66,
906 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
907 /* nopl 0(%[re]ax,%[re]ax,1)
908 nopw 0(%[re]ax,%[re]ax,1) */
909 static const char alt_short_11[] =
910 {0x0f,0x1f,0x44,0x00,0x00,
911 0x66,0x0f,0x1f,0x44,0x00,0x00};
912 /* nopw 0(%[re]ax,%[re]ax,1)
913 nopw 0(%[re]ax,%[re]ax,1) */
914 static const char alt_short_12[] =
915 {0x66,0x0f,0x1f,0x44,0x00,0x00,
916 0x66,0x0f,0x1f,0x44,0x00,0x00};
917 /* nopw 0(%[re]ax,%[re]ax,1)
918 nopl 0L(%[re]ax) */
919 static const char alt_short_13[] =
920 {0x66,0x0f,0x1f,0x44,0x00,0x00,
921 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
922 /* nopl 0L(%[re]ax)
923 nopl 0L(%[re]ax) */
924 static const char alt_short_14[] =
925 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
926 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
927 /* nopl 0L(%[re]ax)
928 nopl 0L(%[re]ax,%[re]ax,1) */
929 static const char alt_short_15[] =
930 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
931 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
932 static const char *const alt_short_patt[] = {
933 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
934 alt_9, alt_10, alt_short_11, alt_short_12, alt_short_13,
935 alt_short_14, alt_short_15
936 };
937 static const char *const alt_long_patt[] = {
938 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
939 alt_9, alt_10, alt_long_11, alt_long_12, alt_long_13,
940 alt_long_14, alt_long_15
941 };
942
943 /* Only align for at least a positive non-zero boundary. */
944 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
945 return;
946
947 /* We need to decide which NOP sequence to use for 32bit and
948 64bit. When -mtune= is used:
949
950 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
951 PROCESSOR_GENERIC32, f32_patt will be used.
952 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA,
953 PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
954 PROCESSOR_GENERIC64, alt_long_patt will be used.
955 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
956 PROCESSOR_AMDFAM10, alt_short_patt will be used.
957
958 When -mtune= isn't used, alt_long_patt will be used if
959 cpu_arch_isa_flags has Cpu686. Otherwise, f32_patt will
960 be used.
961
962 When -march= or .arch is used, we can't use anything beyond
963 cpu_arch_isa_flags. */
964
965 if (flag_code == CODE_16BIT)
966 {
967 if (count > 8)
968 {
969 memcpy (fragP->fr_literal + fragP->fr_fix,
970 jump_31, count);
971 /* Adjust jump offset. */
972 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
973 }
974 else
975 memcpy (fragP->fr_literal + fragP->fr_fix,
976 f16_patt[count - 1], count);
977 }
978 else
979 {
980 const char *const *patt = NULL;
981
982 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
983 {
984 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
985 switch (cpu_arch_tune)
986 {
987 case PROCESSOR_UNKNOWN:
988 /* We use cpu_arch_isa_flags to check if we SHOULD
989 optimize for Cpu686. */
990 if (fragP->tc_frag_data.isa_flags.bitfield.cpui686)
991 patt = alt_long_patt;
992 else
993 patt = f32_patt;
994 break;
995 case PROCESSOR_PENTIUMPRO:
996 case PROCESSOR_PENTIUM4:
997 case PROCESSOR_NOCONA:
998 case PROCESSOR_CORE:
999 case PROCESSOR_CORE2:
1000 case PROCESSOR_COREI7:
1001 case PROCESSOR_L1OM:
1002 case PROCESSOR_GENERIC64:
1003 patt = alt_long_patt;
1004 break;
1005 case PROCESSOR_K6:
1006 case PROCESSOR_ATHLON:
1007 case PROCESSOR_K8:
1008 case PROCESSOR_AMDFAM10:
1009 patt = alt_short_patt;
1010 break;
1011 case PROCESSOR_I386:
1012 case PROCESSOR_I486:
1013 case PROCESSOR_PENTIUM:
1014 case PROCESSOR_GENERIC32:
1015 patt = f32_patt;
1016 break;
1017 }
1018 }
1019 else
1020 {
1021 switch (fragP->tc_frag_data.tune)
1022 {
1023 case PROCESSOR_UNKNOWN:
1024 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1025 PROCESSOR_UNKNOWN. */
1026 abort ();
1027 break;
1028
1029 case PROCESSOR_I386:
1030 case PROCESSOR_I486:
1031 case PROCESSOR_PENTIUM:
1032 case PROCESSOR_K6:
1033 case PROCESSOR_ATHLON:
1034 case PROCESSOR_K8:
1035 case PROCESSOR_AMDFAM10:
1036 case PROCESSOR_GENERIC32:
1037 /* We use cpu_arch_isa_flags to check if we CAN optimize
1038 for Cpu686. */
1039 if (fragP->tc_frag_data.isa_flags.bitfield.cpui686)
1040 patt = alt_short_patt;
1041 else
1042 patt = f32_patt;
1043 break;
1044 case PROCESSOR_PENTIUMPRO:
1045 case PROCESSOR_PENTIUM4:
1046 case PROCESSOR_NOCONA:
1047 case PROCESSOR_CORE:
1048 case PROCESSOR_CORE2:
1049 case PROCESSOR_COREI7:
1050 case PROCESSOR_L1OM:
1051 if (fragP->tc_frag_data.isa_flags.bitfield.cpui686)
1052 patt = alt_long_patt;
1053 else
1054 patt = f32_patt;
1055 break;
1056 case PROCESSOR_GENERIC64:
1057 patt = alt_long_patt;
1058 break;
1059 }
1060 }
1061
1062 if (patt == f32_patt)
1063 {
1064 /* If the padding is less than 15 bytes, we use the normal
1065 ones. Otherwise, we use a jump instruction and adjust
1066 its offset. */
1067 int limit;
1068
1069 /* For 64bit, the limit is 3 bytes. */
1070 if (flag_code == CODE_64BIT
1071 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1072 limit = 3;
1073 else
1074 limit = 15;
1075 if (count < limit)
1076 memcpy (fragP->fr_literal + fragP->fr_fix,
1077 patt[count - 1], count);
1078 else
1079 {
1080 memcpy (fragP->fr_literal + fragP->fr_fix,
1081 jump_31, count);
1082 /* Adjust jump offset. */
1083 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1084 }
1085 }
1086 else
1087 {
1088 /* Maximum length of an instruction is 15 byte. If the
1089 padding is greater than 15 bytes and we don't use jump,
1090 we have to break it into smaller pieces. */
1091 int padding = count;
1092 while (padding > 15)
1093 {
1094 padding -= 15;
1095 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1096 patt [14], 15);
1097 }
1098
1099 if (padding)
1100 memcpy (fragP->fr_literal + fragP->fr_fix,
1101 patt [padding - 1], padding);
1102 }
1103 }
1104 fragP->fr_var = count;
1105 }
1106
1107 static INLINE int
1108 operand_type_all_zero (const union i386_operand_type *x)
1109 {
1110 switch (ARRAY_SIZE(x->array))
1111 {
1112 case 3:
1113 if (x->array[2])
1114 return 0;
1115 case 2:
1116 if (x->array[1])
1117 return 0;
1118 case 1:
1119 return !x->array[0];
1120 default:
1121 abort ();
1122 }
1123 }
1124
1125 static INLINE void
1126 operand_type_set (union i386_operand_type *x, unsigned int v)
1127 {
1128 switch (ARRAY_SIZE(x->array))
1129 {
1130 case 3:
1131 x->array[2] = v;
1132 case 2:
1133 x->array[1] = v;
1134 case 1:
1135 x->array[0] = v;
1136 break;
1137 default:
1138 abort ();
1139 }
1140 }
1141
1142 static INLINE int
1143 operand_type_equal (const union i386_operand_type *x,
1144 const union i386_operand_type *y)
1145 {
1146 switch (ARRAY_SIZE(x->array))
1147 {
1148 case 3:
1149 if (x->array[2] != y->array[2])
1150 return 0;
1151 case 2:
1152 if (x->array[1] != y->array[1])
1153 return 0;
1154 case 1:
1155 return x->array[0] == y->array[0];
1156 break;
1157 default:
1158 abort ();
1159 }
1160 }
1161
1162 static INLINE int
1163 cpu_flags_all_zero (const union i386_cpu_flags *x)
1164 {
1165 switch (ARRAY_SIZE(x->array))
1166 {
1167 case 3:
1168 if (x->array[2])
1169 return 0;
1170 case 2:
1171 if (x->array[1])
1172 return 0;
1173 case 1:
1174 return !x->array[0];
1175 default:
1176 abort ();
1177 }
1178 }
1179
1180 static INLINE void
1181 cpu_flags_set (union i386_cpu_flags *x, unsigned int v)
1182 {
1183 switch (ARRAY_SIZE(x->array))
1184 {
1185 case 3:
1186 x->array[2] = v;
1187 case 2:
1188 x->array[1] = v;
1189 case 1:
1190 x->array[0] = v;
1191 break;
1192 default:
1193 abort ();
1194 }
1195 }
1196
1197 static INLINE int
1198 cpu_flags_equal (const union i386_cpu_flags *x,
1199 const union i386_cpu_flags *y)
1200 {
1201 switch (ARRAY_SIZE(x->array))
1202 {
1203 case 3:
1204 if (x->array[2] != y->array[2])
1205 return 0;
1206 case 2:
1207 if (x->array[1] != y->array[1])
1208 return 0;
1209 case 1:
1210 return x->array[0] == y->array[0];
1211 break;
1212 default:
1213 abort ();
1214 }
1215 }
1216
1217 static INLINE int
1218 cpu_flags_check_cpu64 (i386_cpu_flags f)
1219 {
1220 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1221 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1222 }
1223
1224 static INLINE i386_cpu_flags
1225 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1226 {
1227 switch (ARRAY_SIZE (x.array))
1228 {
1229 case 3:
1230 x.array [2] &= y.array [2];
1231 case 2:
1232 x.array [1] &= y.array [1];
1233 case 1:
1234 x.array [0] &= y.array [0];
1235 break;
1236 default:
1237 abort ();
1238 }
1239 return x;
1240 }
1241
1242 static INLINE i386_cpu_flags
1243 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1244 {
1245 switch (ARRAY_SIZE (x.array))
1246 {
1247 case 3:
1248 x.array [2] |= y.array [2];
1249 case 2:
1250 x.array [1] |= y.array [1];
1251 case 1:
1252 x.array [0] |= y.array [0];
1253 break;
1254 default:
1255 abort ();
1256 }
1257 return x;
1258 }
1259
1260 static INLINE i386_cpu_flags
1261 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1262 {
1263 switch (ARRAY_SIZE (x.array))
1264 {
1265 case 3:
1266 x.array [2] &= ~y.array [2];
1267 case 2:
1268 x.array [1] &= ~y.array [1];
1269 case 1:
1270 x.array [0] &= ~y.array [0];
1271 break;
1272 default:
1273 abort ();
1274 }
1275 return x;
1276 }
1277
1278 #define CPU_FLAGS_ARCH_MATCH 0x1
1279 #define CPU_FLAGS_64BIT_MATCH 0x2
1280 #define CPU_FLAGS_AES_MATCH 0x4
1281 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1282 #define CPU_FLAGS_AVX_MATCH 0x10
1283
1284 #define CPU_FLAGS_32BIT_MATCH \
1285 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1286 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1287 #define CPU_FLAGS_PERFECT_MATCH \
1288 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1289
1290 /* Return CPU flags match bits. */
1291
1292 static int
1293 cpu_flags_match (const insn_template *t)
1294 {
1295 i386_cpu_flags x = t->cpu_flags;
1296 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1297
1298 x.bitfield.cpu64 = 0;
1299 x.bitfield.cpuno64 = 0;
1300
1301 if (cpu_flags_all_zero (&x))
1302 {
1303 /* This instruction is available on all archs. */
1304 match |= CPU_FLAGS_32BIT_MATCH;
1305 }
1306 else
1307 {
1308 /* This instruction is available only on some archs. */
1309 i386_cpu_flags cpu = cpu_arch_flags;
1310
1311 cpu.bitfield.cpu64 = 0;
1312 cpu.bitfield.cpuno64 = 0;
1313 cpu = cpu_flags_and (x, cpu);
1314 if (!cpu_flags_all_zero (&cpu))
1315 {
1316 if (x.bitfield.cpuavx)
1317 {
1318 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1319 if (cpu.bitfield.cpuavx)
1320 {
1321 /* Check SSE2AVX. */
1322 if (!t->opcode_modifier.sse2avx|| sse2avx)
1323 {
1324 match |= (CPU_FLAGS_ARCH_MATCH
1325 | CPU_FLAGS_AVX_MATCH);
1326 /* Check AES. */
1327 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1328 match |= CPU_FLAGS_AES_MATCH;
1329 /* Check PCLMUL. */
1330 if (!x.bitfield.cpupclmul
1331 || cpu.bitfield.cpupclmul)
1332 match |= CPU_FLAGS_PCLMUL_MATCH;
1333 }
1334 }
1335 else
1336 match |= CPU_FLAGS_ARCH_MATCH;
1337 }
1338 else
1339 match |= CPU_FLAGS_32BIT_MATCH;
1340 }
1341 }
1342 return match;
1343 }
1344
1345 static INLINE i386_operand_type
1346 operand_type_and (i386_operand_type x, i386_operand_type y)
1347 {
1348 switch (ARRAY_SIZE (x.array))
1349 {
1350 case 3:
1351 x.array [2] &= y.array [2];
1352 case 2:
1353 x.array [1] &= y.array [1];
1354 case 1:
1355 x.array [0] &= y.array [0];
1356 break;
1357 default:
1358 abort ();
1359 }
1360 return x;
1361 }
1362
1363 static INLINE i386_operand_type
1364 operand_type_or (i386_operand_type x, i386_operand_type y)
1365 {
1366 switch (ARRAY_SIZE (x.array))
1367 {
1368 case 3:
1369 x.array [2] |= y.array [2];
1370 case 2:
1371 x.array [1] |= y.array [1];
1372 case 1:
1373 x.array [0] |= y.array [0];
1374 break;
1375 default:
1376 abort ();
1377 }
1378 return x;
1379 }
1380
1381 static INLINE i386_operand_type
1382 operand_type_xor (i386_operand_type x, i386_operand_type y)
1383 {
1384 switch (ARRAY_SIZE (x.array))
1385 {
1386 case 3:
1387 x.array [2] ^= y.array [2];
1388 case 2:
1389 x.array [1] ^= y.array [1];
1390 case 1:
1391 x.array [0] ^= y.array [0];
1392 break;
1393 default:
1394 abort ();
1395 }
1396 return x;
1397 }
1398
1399 static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1400 static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1401 static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1402 static const i386_operand_type inoutportreg
1403 = OPERAND_TYPE_INOUTPORTREG;
1404 static const i386_operand_type reg16_inoutportreg
1405 = OPERAND_TYPE_REG16_INOUTPORTREG;
1406 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1407 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1408 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1409 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1410 static const i386_operand_type anydisp
1411 = OPERAND_TYPE_ANYDISP;
1412 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1413 static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1414 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1415 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1416 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1417 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1418 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1419 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1420 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1421 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1422 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1423
1424 enum operand_type
1425 {
1426 reg,
1427 imm,
1428 disp,
1429 anymem
1430 };
1431
1432 static INLINE int
1433 operand_type_check (i386_operand_type t, enum operand_type c)
1434 {
1435 switch (c)
1436 {
1437 case reg:
1438 return (t.bitfield.reg8
1439 || t.bitfield.reg16
1440 || t.bitfield.reg32
1441 || t.bitfield.reg64);
1442
1443 case imm:
1444 return (t.bitfield.imm8
1445 || t.bitfield.imm8s
1446 || t.bitfield.imm16
1447 || t.bitfield.imm32
1448 || t.bitfield.imm32s
1449 || t.bitfield.imm64);
1450
1451 case disp:
1452 return (t.bitfield.disp8
1453 || t.bitfield.disp16
1454 || t.bitfield.disp32
1455 || t.bitfield.disp32s
1456 || t.bitfield.disp64);
1457
1458 case anymem:
1459 return (t.bitfield.disp8
1460 || t.bitfield.disp16
1461 || t.bitfield.disp32
1462 || t.bitfield.disp32s
1463 || t.bitfield.disp64
1464 || t.bitfield.baseindex);
1465
1466 default:
1467 abort ();
1468 }
1469
1470 return 0;
1471 }
1472
1473 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1474 operand J for instruction template T. */
1475
1476 static INLINE int
1477 match_reg_size (const insn_template *t, unsigned int j)
1478 {
1479 return !((i.types[j].bitfield.byte
1480 && !t->operand_types[j].bitfield.byte)
1481 || (i.types[j].bitfield.word
1482 && !t->operand_types[j].bitfield.word)
1483 || (i.types[j].bitfield.dword
1484 && !t->operand_types[j].bitfield.dword)
1485 || (i.types[j].bitfield.qword
1486 && !t->operand_types[j].bitfield.qword));
1487 }
1488
1489 /* Return 1 if there is no conflict in any size on operand J for
1490 instruction template T. */
1491
1492 static INLINE int
1493 match_mem_size (const insn_template *t, unsigned int j)
1494 {
1495 return (match_reg_size (t, j)
1496 && !((i.types[j].bitfield.unspecified
1497 && !t->operand_types[j].bitfield.unspecified)
1498 || (i.types[j].bitfield.fword
1499 && !t->operand_types[j].bitfield.fword)
1500 || (i.types[j].bitfield.tbyte
1501 && !t->operand_types[j].bitfield.tbyte)
1502 || (i.types[j].bitfield.xmmword
1503 && !t->operand_types[j].bitfield.xmmword)
1504 || (i.types[j].bitfield.ymmword
1505 && !t->operand_types[j].bitfield.ymmword)));
1506 }
1507
1508 /* Return 1 if there is no size conflict on any operands for
1509 instruction template T. */
1510
1511 static INLINE int
1512 operand_size_match (const insn_template *t)
1513 {
1514 unsigned int j;
1515 int match = 1;
1516
1517 /* Don't check jump instructions. */
1518 if (t->opcode_modifier.jump
1519 || t->opcode_modifier.jumpbyte
1520 || t->opcode_modifier.jumpdword
1521 || t->opcode_modifier.jumpintersegment)
1522 return match;
1523
1524 /* Check memory and accumulator operand size. */
1525 for (j = 0; j < i.operands; j++)
1526 {
1527 if (t->operand_types[j].bitfield.anysize)
1528 continue;
1529
1530 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1531 {
1532 match = 0;
1533 break;
1534 }
1535
1536 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1537 {
1538 match = 0;
1539 break;
1540 }
1541 }
1542
1543 if (match
1544 || (!t->opcode_modifier.d && !t->opcode_modifier.floatd))
1545 return match;
1546
1547 /* Check reverse. */
1548 gas_assert (i.operands == 2);
1549
1550 match = 1;
1551 for (j = 0; j < 2; j++)
1552 {
1553 if (t->operand_types[j].bitfield.acc
1554 && !match_reg_size (t, j ? 0 : 1))
1555 {
1556 match = 0;
1557 break;
1558 }
1559
1560 if (i.types[j].bitfield.mem
1561 && !match_mem_size (t, j ? 0 : 1))
1562 {
1563 match = 0;
1564 break;
1565 }
1566 }
1567
1568 return match;
1569 }
1570
1571 static INLINE int
1572 operand_type_match (i386_operand_type overlap,
1573 i386_operand_type given)
1574 {
1575 i386_operand_type temp = overlap;
1576
1577 temp.bitfield.jumpabsolute = 0;
1578 temp.bitfield.unspecified = 0;
1579 temp.bitfield.byte = 0;
1580 temp.bitfield.word = 0;
1581 temp.bitfield.dword = 0;
1582 temp.bitfield.fword = 0;
1583 temp.bitfield.qword = 0;
1584 temp.bitfield.tbyte = 0;
1585 temp.bitfield.xmmword = 0;
1586 temp.bitfield.ymmword = 0;
1587 if (operand_type_all_zero (&temp))
1588 return 0;
1589
1590 return (given.bitfield.baseindex == overlap.bitfield.baseindex
1591 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute);
1592 }
1593
1594 /* If given types g0 and g1 are registers they must be of the same type
1595 unless the expected operand type register overlap is null.
1596 Note that Acc in a template matches every size of reg. */
1597
1598 static INLINE int
1599 operand_type_register_match (i386_operand_type m0,
1600 i386_operand_type g0,
1601 i386_operand_type t0,
1602 i386_operand_type m1,
1603 i386_operand_type g1,
1604 i386_operand_type t1)
1605 {
1606 if (!operand_type_check (g0, reg))
1607 return 1;
1608
1609 if (!operand_type_check (g1, reg))
1610 return 1;
1611
1612 if (g0.bitfield.reg8 == g1.bitfield.reg8
1613 && g0.bitfield.reg16 == g1.bitfield.reg16
1614 && g0.bitfield.reg32 == g1.bitfield.reg32
1615 && g0.bitfield.reg64 == g1.bitfield.reg64)
1616 return 1;
1617
1618 if (m0.bitfield.acc)
1619 {
1620 t0.bitfield.reg8 = 1;
1621 t0.bitfield.reg16 = 1;
1622 t0.bitfield.reg32 = 1;
1623 t0.bitfield.reg64 = 1;
1624 }
1625
1626 if (m1.bitfield.acc)
1627 {
1628 t1.bitfield.reg8 = 1;
1629 t1.bitfield.reg16 = 1;
1630 t1.bitfield.reg32 = 1;
1631 t1.bitfield.reg64 = 1;
1632 }
1633
1634 return (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1635 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1636 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1637 && !(t0.bitfield.reg64 & t1.bitfield.reg64));
1638 }
1639
1640 static INLINE unsigned int
1641 mode_from_disp_size (i386_operand_type t)
1642 {
1643 if (t.bitfield.disp8)
1644 return 1;
1645 else if (t.bitfield.disp16
1646 || t.bitfield.disp32
1647 || t.bitfield.disp32s)
1648 return 2;
1649 else
1650 return 0;
1651 }
1652
1653 static INLINE int
1654 fits_in_signed_byte (offsetT num)
1655 {
1656 return (num >= -128) && (num <= 127);
1657 }
1658
1659 static INLINE int
1660 fits_in_unsigned_byte (offsetT num)
1661 {
1662 return (num & 0xff) == num;
1663 }
1664
1665 static INLINE int
1666 fits_in_unsigned_word (offsetT num)
1667 {
1668 return (num & 0xffff) == num;
1669 }
1670
1671 static INLINE int
1672 fits_in_signed_word (offsetT num)
1673 {
1674 return (-32768 <= num) && (num <= 32767);
1675 }
1676
1677 static INLINE int
1678 fits_in_signed_long (offsetT num ATTRIBUTE_UNUSED)
1679 {
1680 #ifndef BFD64
1681 return 1;
1682 #else
1683 return (!(((offsetT) -1 << 31) & num)
1684 || (((offsetT) -1 << 31) & num) == ((offsetT) -1 << 31));
1685 #endif
1686 } /* fits_in_signed_long() */
1687
1688 static INLINE int
1689 fits_in_unsigned_long (offsetT num ATTRIBUTE_UNUSED)
1690 {
1691 #ifndef BFD64
1692 return 1;
1693 #else
1694 return (num & (((offsetT) 2 << 31) - 1)) == num;
1695 #endif
1696 } /* fits_in_unsigned_long() */
1697
1698 static i386_operand_type
1699 smallest_imm_type (offsetT num)
1700 {
1701 i386_operand_type t;
1702
1703 operand_type_set (&t, 0);
1704 t.bitfield.imm64 = 1;
1705
1706 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
1707 {
1708 /* This code is disabled on the 486 because all the Imm1 forms
1709 in the opcode table are slower on the i486. They're the
1710 versions with the implicitly specified single-position
1711 displacement, which has another syntax if you really want to
1712 use that form. */
1713 t.bitfield.imm1 = 1;
1714 t.bitfield.imm8 = 1;
1715 t.bitfield.imm8s = 1;
1716 t.bitfield.imm16 = 1;
1717 t.bitfield.imm32 = 1;
1718 t.bitfield.imm32s = 1;
1719 }
1720 else if (fits_in_signed_byte (num))
1721 {
1722 t.bitfield.imm8 = 1;
1723 t.bitfield.imm8s = 1;
1724 t.bitfield.imm16 = 1;
1725 t.bitfield.imm32 = 1;
1726 t.bitfield.imm32s = 1;
1727 }
1728 else if (fits_in_unsigned_byte (num))
1729 {
1730 t.bitfield.imm8 = 1;
1731 t.bitfield.imm16 = 1;
1732 t.bitfield.imm32 = 1;
1733 t.bitfield.imm32s = 1;
1734 }
1735 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
1736 {
1737 t.bitfield.imm16 = 1;
1738 t.bitfield.imm32 = 1;
1739 t.bitfield.imm32s = 1;
1740 }
1741 else if (fits_in_signed_long (num))
1742 {
1743 t.bitfield.imm32 = 1;
1744 t.bitfield.imm32s = 1;
1745 }
1746 else if (fits_in_unsigned_long (num))
1747 t.bitfield.imm32 = 1;
1748
1749 return t;
1750 }
1751
1752 static offsetT
1753 offset_in_range (offsetT val, int size)
1754 {
1755 addressT mask;
1756
1757 switch (size)
1758 {
1759 case 1: mask = ((addressT) 1 << 8) - 1; break;
1760 case 2: mask = ((addressT) 1 << 16) - 1; break;
1761 case 4: mask = ((addressT) 2 << 31) - 1; break;
1762 #ifdef BFD64
1763 case 8: mask = ((addressT) 2 << 63) - 1; break;
1764 #endif
1765 default: abort ();
1766 }
1767
1768 #ifdef BFD64
1769 /* If BFD64, sign extend val for 32bit address mode. */
1770 if (flag_code != CODE_64BIT
1771 || i.prefix[ADDR_PREFIX])
1772 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
1773 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
1774 #endif
1775
1776 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
1777 {
1778 char buf1[40], buf2[40];
1779
1780 sprint_value (buf1, val);
1781 sprint_value (buf2, val & mask);
1782 as_warn (_("%s shortened to %s"), buf1, buf2);
1783 }
1784 return val & mask;
1785 }
1786
1787 enum PREFIX_GROUP
1788 {
1789 PREFIX_EXIST = 0,
1790 PREFIX_LOCK,
1791 PREFIX_REP,
1792 PREFIX_OTHER
1793 };
1794
1795 /* Returns
1796 a. PREFIX_EXIST if attempting to add a prefix where one from the
1797 same class already exists.
1798 b. PREFIX_LOCK if lock prefix is added.
1799 c. PREFIX_REP if rep/repne prefix is added.
1800 d. PREFIX_OTHER if other prefix is added.
1801 */
1802
1803 static enum PREFIX_GROUP
1804 add_prefix (unsigned int prefix)
1805 {
1806 enum PREFIX_GROUP ret = PREFIX_OTHER;
1807 unsigned int q;
1808
1809 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
1810 && flag_code == CODE_64BIT)
1811 {
1812 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
1813 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
1814 && (prefix & (REX_R | REX_X | REX_B))))
1815 ret = PREFIX_EXIST;
1816 q = REX_PREFIX;
1817 }
1818 else
1819 {
1820 switch (prefix)
1821 {
1822 default:
1823 abort ();
1824
1825 case CS_PREFIX_OPCODE:
1826 case DS_PREFIX_OPCODE:
1827 case ES_PREFIX_OPCODE:
1828 case FS_PREFIX_OPCODE:
1829 case GS_PREFIX_OPCODE:
1830 case SS_PREFIX_OPCODE:
1831 q = SEG_PREFIX;
1832 break;
1833
1834 case REPNE_PREFIX_OPCODE:
1835 case REPE_PREFIX_OPCODE:
1836 q = REP_PREFIX;
1837 ret = PREFIX_REP;
1838 break;
1839
1840 case LOCK_PREFIX_OPCODE:
1841 q = LOCK_PREFIX;
1842 ret = PREFIX_LOCK;
1843 break;
1844
1845 case FWAIT_OPCODE:
1846 q = WAIT_PREFIX;
1847 break;
1848
1849 case ADDR_PREFIX_OPCODE:
1850 q = ADDR_PREFIX;
1851 break;
1852
1853 case DATA_PREFIX_OPCODE:
1854 q = DATA_PREFIX;
1855 break;
1856 }
1857 if (i.prefix[q] != 0)
1858 ret = PREFIX_EXIST;
1859 }
1860
1861 if (ret)
1862 {
1863 if (!i.prefix[q])
1864 ++i.prefixes;
1865 i.prefix[q] |= prefix;
1866 }
1867 else
1868 as_bad (_("same type of prefix used twice"));
1869
1870 return ret;
1871 }
1872
1873 static void
1874 set_code_flag (int value)
1875 {
1876 flag_code = (enum flag_code) value;
1877 if (flag_code == CODE_64BIT)
1878 {
1879 cpu_arch_flags.bitfield.cpu64 = 1;
1880 cpu_arch_flags.bitfield.cpuno64 = 0;
1881 }
1882 else
1883 {
1884 cpu_arch_flags.bitfield.cpu64 = 0;
1885 cpu_arch_flags.bitfield.cpuno64 = 1;
1886 }
1887 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
1888 {
1889 as_bad (_("64bit mode not supported on this CPU."));
1890 }
1891 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
1892 {
1893 as_bad (_("32bit mode not supported on this CPU."));
1894 }
1895 stackop_size = '\0';
1896 }
1897
1898 static void
1899 set_16bit_gcc_code_flag (int new_code_flag)
1900 {
1901 flag_code = (enum flag_code) new_code_flag;
1902 if (flag_code != CODE_16BIT)
1903 abort ();
1904 cpu_arch_flags.bitfield.cpu64 = 0;
1905 cpu_arch_flags.bitfield.cpuno64 = 1;
1906 stackop_size = LONG_MNEM_SUFFIX;
1907 }
1908
1909 static void
1910 set_intel_syntax (int syntax_flag)
1911 {
1912 /* Find out if register prefixing is specified. */
1913 int ask_naked_reg = 0;
1914
1915 SKIP_WHITESPACE ();
1916 if (!is_end_of_line[(unsigned char) *input_line_pointer])
1917 {
1918 char *string = input_line_pointer;
1919 int e = get_symbol_end ();
1920
1921 if (strcmp (string, "prefix") == 0)
1922 ask_naked_reg = 1;
1923 else if (strcmp (string, "noprefix") == 0)
1924 ask_naked_reg = -1;
1925 else
1926 as_bad (_("bad argument to syntax directive."));
1927 *input_line_pointer = e;
1928 }
1929 demand_empty_rest_of_line ();
1930
1931 intel_syntax = syntax_flag;
1932
1933 if (ask_naked_reg == 0)
1934 allow_naked_reg = (intel_syntax
1935 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
1936 else
1937 allow_naked_reg = (ask_naked_reg < 0);
1938
1939 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
1940
1941 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
1942 identifier_chars['$'] = intel_syntax ? '$' : 0;
1943 register_prefix = allow_naked_reg ? "" : "%";
1944 }
1945
1946 static void
1947 set_intel_mnemonic (int mnemonic_flag)
1948 {
1949 intel_mnemonic = mnemonic_flag;
1950 }
1951
1952 static void
1953 set_allow_index_reg (int flag)
1954 {
1955 allow_index_reg = flag;
1956 }
1957
1958 static void
1959 set_sse_check (int dummy ATTRIBUTE_UNUSED)
1960 {
1961 SKIP_WHITESPACE ();
1962
1963 if (!is_end_of_line[(unsigned char) *input_line_pointer])
1964 {
1965 char *string = input_line_pointer;
1966 int e = get_symbol_end ();
1967
1968 if (strcmp (string, "none") == 0)
1969 sse_check = sse_check_none;
1970 else if (strcmp (string, "warning") == 0)
1971 sse_check = sse_check_warning;
1972 else if (strcmp (string, "error") == 0)
1973 sse_check = sse_check_error;
1974 else
1975 as_bad (_("bad argument to sse_check directive."));
1976 *input_line_pointer = e;
1977 }
1978 else
1979 as_bad (_("missing argument for sse_check directive"));
1980
1981 demand_empty_rest_of_line ();
1982 }
1983
1984 static void
1985 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
1986 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
1987 {
1988 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
1989 static const char *arch;
1990
1991 /* Intel LIOM is only supported on ELF. */
1992 if (!IS_ELF)
1993 return;
1994
1995 if (!arch)
1996 {
1997 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
1998 use default_arch. */
1999 arch = cpu_arch_name;
2000 if (!arch)
2001 arch = default_arch;
2002 }
2003
2004 /* If we are targeting Intel L1OM, we must enable it. */
2005 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2006 || new_flag.bitfield.cpul1om)
2007 return;
2008
2009 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2010 #endif
2011 }
2012
2013 static void
2014 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2015 {
2016 SKIP_WHITESPACE ();
2017
2018 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2019 {
2020 char *string = input_line_pointer;
2021 int e = get_symbol_end ();
2022 unsigned int i;
2023 i386_cpu_flags flags;
2024
2025 for (i = 0; i < ARRAY_SIZE (cpu_arch); i++)
2026 {
2027 if (strcmp (string, cpu_arch[i].name) == 0)
2028 {
2029 check_cpu_arch_compatible (string, cpu_arch[i].flags);
2030
2031 if (*string != '.')
2032 {
2033 cpu_arch_name = cpu_arch[i].name;
2034 cpu_sub_arch_name = NULL;
2035 cpu_arch_flags = cpu_arch[i].flags;
2036 if (flag_code == CODE_64BIT)
2037 {
2038 cpu_arch_flags.bitfield.cpu64 = 1;
2039 cpu_arch_flags.bitfield.cpuno64 = 0;
2040 }
2041 else
2042 {
2043 cpu_arch_flags.bitfield.cpu64 = 0;
2044 cpu_arch_flags.bitfield.cpuno64 = 1;
2045 }
2046 cpu_arch_isa = cpu_arch[i].type;
2047 cpu_arch_isa_flags = cpu_arch[i].flags;
2048 if (!cpu_arch_tune_set)
2049 {
2050 cpu_arch_tune = cpu_arch_isa;
2051 cpu_arch_tune_flags = cpu_arch_isa_flags;
2052 }
2053 break;
2054 }
2055
2056 if (strncmp (string + 1, "no", 2))
2057 flags = cpu_flags_or (cpu_arch_flags,
2058 cpu_arch[i].flags);
2059 else
2060 flags = cpu_flags_and_not (cpu_arch_flags,
2061 cpu_arch[i].flags);
2062 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2063 {
2064 if (cpu_sub_arch_name)
2065 {
2066 char *name = cpu_sub_arch_name;
2067 cpu_sub_arch_name = concat (name,
2068 cpu_arch[i].name,
2069 (const char *) NULL);
2070 free (name);
2071 }
2072 else
2073 cpu_sub_arch_name = xstrdup (cpu_arch[i].name);
2074 cpu_arch_flags = flags;
2075 }
2076 *input_line_pointer = e;
2077 demand_empty_rest_of_line ();
2078 return;
2079 }
2080 }
2081 if (i >= ARRAY_SIZE (cpu_arch))
2082 as_bad (_("no such architecture: `%s'"), string);
2083
2084 *input_line_pointer = e;
2085 }
2086 else
2087 as_bad (_("missing cpu architecture"));
2088
2089 no_cond_jump_promotion = 0;
2090 if (*input_line_pointer == ','
2091 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2092 {
2093 char *string = ++input_line_pointer;
2094 int e = get_symbol_end ();
2095
2096 if (strcmp (string, "nojumps") == 0)
2097 no_cond_jump_promotion = 1;
2098 else if (strcmp (string, "jumps") == 0)
2099 ;
2100 else
2101 as_bad (_("no such architecture modifier: `%s'"), string);
2102
2103 *input_line_pointer = e;
2104 }
2105
2106 demand_empty_rest_of_line ();
2107 }
2108
2109 enum bfd_architecture
2110 i386_arch (void)
2111 {
2112 if (cpu_arch_isa == PROCESSOR_L1OM)
2113 {
2114 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2115 || flag_code != CODE_64BIT)
2116 as_fatal (_("Intel L1OM is 64bit ELF only"));
2117 return bfd_arch_l1om;
2118 }
2119 else
2120 return bfd_arch_i386;
2121 }
2122
2123 unsigned long
2124 i386_mach ()
2125 {
2126 if (!strcmp (default_arch, "x86_64"))
2127 {
2128 if (cpu_arch_isa == PROCESSOR_L1OM)
2129 {
2130 if (OUTPUT_FLAVOR != bfd_target_elf_flavour)
2131 as_fatal (_("Intel L1OM is 64bit ELF only"));
2132 return bfd_mach_l1om;
2133 }
2134 else
2135 return bfd_mach_x86_64;
2136 }
2137 else if (!strcmp (default_arch, "i386"))
2138 return bfd_mach_i386_i386;
2139 else
2140 as_fatal (_("Unknown architecture"));
2141 }
2142 \f
2143 void
2144 md_begin ()
2145 {
2146 const char *hash_err;
2147
2148 /* Initialize op_hash hash table. */
2149 op_hash = hash_new ();
2150
2151 {
2152 const insn_template *optab;
2153 templates *core_optab;
2154
2155 /* Setup for loop. */
2156 optab = i386_optab;
2157 core_optab = (templates *) xmalloc (sizeof (templates));
2158 core_optab->start = optab;
2159
2160 while (1)
2161 {
2162 ++optab;
2163 if (optab->name == NULL
2164 || strcmp (optab->name, (optab - 1)->name) != 0)
2165 {
2166 /* different name --> ship out current template list;
2167 add to hash table; & begin anew. */
2168 core_optab->end = optab;
2169 hash_err = hash_insert (op_hash,
2170 (optab - 1)->name,
2171 (void *) core_optab);
2172 if (hash_err)
2173 {
2174 as_fatal (_("Internal Error: Can't hash %s: %s"),
2175 (optab - 1)->name,
2176 hash_err);
2177 }
2178 if (optab->name == NULL)
2179 break;
2180 core_optab = (templates *) xmalloc (sizeof (templates));
2181 core_optab->start = optab;
2182 }
2183 }
2184 }
2185
2186 /* Initialize reg_hash hash table. */
2187 reg_hash = hash_new ();
2188 {
2189 const reg_entry *regtab;
2190 unsigned int regtab_size = i386_regtab_size;
2191
2192 for (regtab = i386_regtab; regtab_size--; regtab++)
2193 {
2194 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2195 if (hash_err)
2196 as_fatal (_("Internal Error: Can't hash %s: %s"),
2197 regtab->reg_name,
2198 hash_err);
2199 }
2200 }
2201
2202 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2203 {
2204 int c;
2205 char *p;
2206
2207 for (c = 0; c < 256; c++)
2208 {
2209 if (ISDIGIT (c))
2210 {
2211 digit_chars[c] = c;
2212 mnemonic_chars[c] = c;
2213 register_chars[c] = c;
2214 operand_chars[c] = c;
2215 }
2216 else if (ISLOWER (c))
2217 {
2218 mnemonic_chars[c] = c;
2219 register_chars[c] = c;
2220 operand_chars[c] = c;
2221 }
2222 else if (ISUPPER (c))
2223 {
2224 mnemonic_chars[c] = TOLOWER (c);
2225 register_chars[c] = mnemonic_chars[c];
2226 operand_chars[c] = c;
2227 }
2228
2229 if (ISALPHA (c) || ISDIGIT (c))
2230 identifier_chars[c] = c;
2231 else if (c >= 128)
2232 {
2233 identifier_chars[c] = c;
2234 operand_chars[c] = c;
2235 }
2236 }
2237
2238 #ifdef LEX_AT
2239 identifier_chars['@'] = '@';
2240 #endif
2241 #ifdef LEX_QM
2242 identifier_chars['?'] = '?';
2243 operand_chars['?'] = '?';
2244 #endif
2245 digit_chars['-'] = '-';
2246 mnemonic_chars['_'] = '_';
2247 mnemonic_chars['-'] = '-';
2248 mnemonic_chars['.'] = '.';
2249 identifier_chars['_'] = '_';
2250 identifier_chars['.'] = '.';
2251
2252 for (p = operand_special_chars; *p != '\0'; p++)
2253 operand_chars[(unsigned char) *p] = *p;
2254 }
2255
2256 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2257 if (IS_ELF)
2258 {
2259 record_alignment (text_section, 2);
2260 record_alignment (data_section, 2);
2261 record_alignment (bss_section, 2);
2262 }
2263 #endif
2264
2265 if (flag_code == CODE_64BIT)
2266 {
2267 x86_dwarf2_return_column = 16;
2268 x86_cie_data_alignment = -8;
2269 }
2270 else
2271 {
2272 x86_dwarf2_return_column = 8;
2273 x86_cie_data_alignment = -4;
2274 }
2275 }
2276
2277 void
2278 i386_print_statistics (FILE *file)
2279 {
2280 hash_print_statistics (file, "i386 opcode", op_hash);
2281 hash_print_statistics (file, "i386 register", reg_hash);
2282 }
2283 \f
2284 #ifdef DEBUG386
2285
2286 /* Debugging routines for md_assemble. */
2287 static void pte (insn_template *);
2288 static void pt (i386_operand_type);
2289 static void pe (expressionS *);
2290 static void ps (symbolS *);
2291
2292 static void
2293 pi (char *line, i386_insn *x)
2294 {
2295 unsigned int i;
2296
2297 fprintf (stdout, "%s: template ", line);
2298 pte (&x->tm);
2299 fprintf (stdout, " address: base %s index %s scale %x\n",
2300 x->base_reg ? x->base_reg->reg_name : "none",
2301 x->index_reg ? x->index_reg->reg_name : "none",
2302 x->log2_scale_factor);
2303 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2304 x->rm.mode, x->rm.reg, x->rm.regmem);
2305 fprintf (stdout, " sib: base %x index %x scale %x\n",
2306 x->sib.base, x->sib.index, x->sib.scale);
2307 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2308 (x->rex & REX_W) != 0,
2309 (x->rex & REX_R) != 0,
2310 (x->rex & REX_X) != 0,
2311 (x->rex & REX_B) != 0);
2312 for (i = 0; i < x->operands; i++)
2313 {
2314 fprintf (stdout, " #%d: ", i + 1);
2315 pt (x->types[i]);
2316 fprintf (stdout, "\n");
2317 if (x->types[i].bitfield.reg8
2318 || x->types[i].bitfield.reg16
2319 || x->types[i].bitfield.reg32
2320 || x->types[i].bitfield.reg64
2321 || x->types[i].bitfield.regmmx
2322 || x->types[i].bitfield.regxmm
2323 || x->types[i].bitfield.regymm
2324 || x->types[i].bitfield.sreg2
2325 || x->types[i].bitfield.sreg3
2326 || x->types[i].bitfield.control
2327 || x->types[i].bitfield.debug
2328 || x->types[i].bitfield.test)
2329 fprintf (stdout, "%s\n", x->op[i].regs->reg_name);
2330 if (operand_type_check (x->types[i], imm))
2331 pe (x->op[i].imms);
2332 if (operand_type_check (x->types[i], disp))
2333 pe (x->op[i].disps);
2334 }
2335 }
2336
2337 static void
2338 pte (insn_template *t)
2339 {
2340 unsigned int i;
2341 fprintf (stdout, " %d operands ", t->operands);
2342 fprintf (stdout, "opcode %x ", t->base_opcode);
2343 if (t->extension_opcode != None)
2344 fprintf (stdout, "ext %x ", t->extension_opcode);
2345 if (t->opcode_modifier.d)
2346 fprintf (stdout, "D");
2347 if (t->opcode_modifier.w)
2348 fprintf (stdout, "W");
2349 fprintf (stdout, "\n");
2350 for (i = 0; i < t->operands; i++)
2351 {
2352 fprintf (stdout, " #%d type ", i + 1);
2353 pt (t->operand_types[i]);
2354 fprintf (stdout, "\n");
2355 }
2356 }
2357
2358 static void
2359 pe (expressionS *e)
2360 {
2361 fprintf (stdout, " operation %d\n", e->X_op);
2362 fprintf (stdout, " add_number %ld (%lx)\n",
2363 (long) e->X_add_number, (long) e->X_add_number);
2364 if (e->X_add_symbol)
2365 {
2366 fprintf (stdout, " add_symbol ");
2367 ps (e->X_add_symbol);
2368 fprintf (stdout, "\n");
2369 }
2370 if (e->X_op_symbol)
2371 {
2372 fprintf (stdout, " op_symbol ");
2373 ps (e->X_op_symbol);
2374 fprintf (stdout, "\n");
2375 }
2376 }
2377
2378 static void
2379 ps (symbolS *s)
2380 {
2381 fprintf (stdout, "%s type %s%s",
2382 S_GET_NAME (s),
2383 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2384 segment_name (S_GET_SEGMENT (s)));
2385 }
2386
2387 static struct type_name
2388 {
2389 i386_operand_type mask;
2390 const char *name;
2391 }
2392 const type_names[] =
2393 {
2394 { OPERAND_TYPE_REG8, "r8" },
2395 { OPERAND_TYPE_REG16, "r16" },
2396 { OPERAND_TYPE_REG32, "r32" },
2397 { OPERAND_TYPE_REG64, "r64" },
2398 { OPERAND_TYPE_IMM8, "i8" },
2399 { OPERAND_TYPE_IMM8, "i8s" },
2400 { OPERAND_TYPE_IMM16, "i16" },
2401 { OPERAND_TYPE_IMM32, "i32" },
2402 { OPERAND_TYPE_IMM32S, "i32s" },
2403 { OPERAND_TYPE_IMM64, "i64" },
2404 { OPERAND_TYPE_IMM1, "i1" },
2405 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2406 { OPERAND_TYPE_DISP8, "d8" },
2407 { OPERAND_TYPE_DISP16, "d16" },
2408 { OPERAND_TYPE_DISP32, "d32" },
2409 { OPERAND_TYPE_DISP32S, "d32s" },
2410 { OPERAND_TYPE_DISP64, "d64" },
2411 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2412 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2413 { OPERAND_TYPE_CONTROL, "control reg" },
2414 { OPERAND_TYPE_TEST, "test reg" },
2415 { OPERAND_TYPE_DEBUG, "debug reg" },
2416 { OPERAND_TYPE_FLOATREG, "FReg" },
2417 { OPERAND_TYPE_FLOATACC, "FAcc" },
2418 { OPERAND_TYPE_SREG2, "SReg2" },
2419 { OPERAND_TYPE_SREG3, "SReg3" },
2420 { OPERAND_TYPE_ACC, "Acc" },
2421 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2422 { OPERAND_TYPE_REGMMX, "rMMX" },
2423 { OPERAND_TYPE_REGXMM, "rXMM" },
2424 { OPERAND_TYPE_REGYMM, "rYMM" },
2425 { OPERAND_TYPE_ESSEG, "es" },
2426 };
2427
2428 static void
2429 pt (i386_operand_type t)
2430 {
2431 unsigned int j;
2432 i386_operand_type a;
2433
2434 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2435 {
2436 a = operand_type_and (t, type_names[j].mask);
2437 if (!operand_type_all_zero (&a))
2438 fprintf (stdout, "%s, ", type_names[j].name);
2439 }
2440 fflush (stdout);
2441 }
2442
2443 #endif /* DEBUG386 */
2444 \f
2445 static bfd_reloc_code_real_type
2446 reloc (unsigned int size,
2447 int pcrel,
2448 int sign,
2449 bfd_reloc_code_real_type other)
2450 {
2451 if (other != NO_RELOC)
2452 {
2453 reloc_howto_type *reloc;
2454
2455 if (size == 8)
2456 switch (other)
2457 {
2458 case BFD_RELOC_X86_64_GOT32:
2459 return BFD_RELOC_X86_64_GOT64;
2460 break;
2461 case BFD_RELOC_X86_64_PLTOFF64:
2462 return BFD_RELOC_X86_64_PLTOFF64;
2463 break;
2464 case BFD_RELOC_X86_64_GOTPC32:
2465 other = BFD_RELOC_X86_64_GOTPC64;
2466 break;
2467 case BFD_RELOC_X86_64_GOTPCREL:
2468 other = BFD_RELOC_X86_64_GOTPCREL64;
2469 break;
2470 case BFD_RELOC_X86_64_TPOFF32:
2471 other = BFD_RELOC_X86_64_TPOFF64;
2472 break;
2473 case BFD_RELOC_X86_64_DTPOFF32:
2474 other = BFD_RELOC_X86_64_DTPOFF64;
2475 break;
2476 default:
2477 break;
2478 }
2479
2480 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2481 if (size == 4 && flag_code != CODE_64BIT)
2482 sign = -1;
2483
2484 reloc = bfd_reloc_type_lookup (stdoutput, other);
2485 if (!reloc)
2486 as_bad (_("unknown relocation (%u)"), other);
2487 else if (size != bfd_get_reloc_size (reloc))
2488 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2489 bfd_get_reloc_size (reloc),
2490 size);
2491 else if (pcrel && !reloc->pc_relative)
2492 as_bad (_("non-pc-relative relocation for pc-relative field"));
2493 else if ((reloc->complain_on_overflow == complain_overflow_signed
2494 && !sign)
2495 || (reloc->complain_on_overflow == complain_overflow_unsigned
2496 && sign > 0))
2497 as_bad (_("relocated field and relocation type differ in signedness"));
2498 else
2499 return other;
2500 return NO_RELOC;
2501 }
2502
2503 if (pcrel)
2504 {
2505 if (!sign)
2506 as_bad (_("there are no unsigned pc-relative relocations"));
2507 switch (size)
2508 {
2509 case 1: return BFD_RELOC_8_PCREL;
2510 case 2: return BFD_RELOC_16_PCREL;
2511 case 4: return BFD_RELOC_32_PCREL;
2512 case 8: return BFD_RELOC_64_PCREL;
2513 }
2514 as_bad (_("cannot do %u byte pc-relative relocation"), size);
2515 }
2516 else
2517 {
2518 if (sign > 0)
2519 switch (size)
2520 {
2521 case 4: return BFD_RELOC_X86_64_32S;
2522 }
2523 else
2524 switch (size)
2525 {
2526 case 1: return BFD_RELOC_8;
2527 case 2: return BFD_RELOC_16;
2528 case 4: return BFD_RELOC_32;
2529 case 8: return BFD_RELOC_64;
2530 }
2531 as_bad (_("cannot do %s %u byte relocation"),
2532 sign > 0 ? "signed" : "unsigned", size);
2533 }
2534
2535 return NO_RELOC;
2536 }
2537
2538 /* Here we decide which fixups can be adjusted to make them relative to
2539 the beginning of the section instead of the symbol. Basically we need
2540 to make sure that the dynamic relocations are done correctly, so in
2541 some cases we force the original symbol to be used. */
2542
2543 int
2544 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2545 {
2546 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2547 if (!IS_ELF)
2548 return 1;
2549
2550 /* Don't adjust pc-relative references to merge sections in 64-bit
2551 mode. */
2552 if (use_rela_relocations
2553 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
2554 && fixP->fx_pcrel)
2555 return 0;
2556
2557 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2558 and changed later by validate_fix. */
2559 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
2560 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
2561 return 0;
2562
2563 /* adjust_reloc_syms doesn't know about the GOT. */
2564 if (fixP->fx_r_type == BFD_RELOC_386_GOTOFF
2565 || fixP->fx_r_type == BFD_RELOC_386_PLT32
2566 || fixP->fx_r_type == BFD_RELOC_386_GOT32
2567 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
2568 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
2569 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
2570 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
2571 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
2572 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
2573 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
2574 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
2575 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
2576 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
2577 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
2578 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
2579 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
2580 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
2581 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
2582 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
2583 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
2584 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
2585 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
2586 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
2587 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
2588 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
2589 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
2590 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
2591 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
2592 return 0;
2593 #endif
2594 return 1;
2595 }
2596
2597 static int
2598 intel_float_operand (const char *mnemonic)
2599 {
2600 /* Note that the value returned is meaningful only for opcodes with (memory)
2601 operands, hence the code here is free to improperly handle opcodes that
2602 have no operands (for better performance and smaller code). */
2603
2604 if (mnemonic[0] != 'f')
2605 return 0; /* non-math */
2606
2607 switch (mnemonic[1])
2608 {
2609 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2610 the fs segment override prefix not currently handled because no
2611 call path can make opcodes without operands get here */
2612 case 'i':
2613 return 2 /* integer op */;
2614 case 'l':
2615 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
2616 return 3; /* fldcw/fldenv */
2617 break;
2618 case 'n':
2619 if (mnemonic[2] != 'o' /* fnop */)
2620 return 3; /* non-waiting control op */
2621 break;
2622 case 'r':
2623 if (mnemonic[2] == 's')
2624 return 3; /* frstor/frstpm */
2625 break;
2626 case 's':
2627 if (mnemonic[2] == 'a')
2628 return 3; /* fsave */
2629 if (mnemonic[2] == 't')
2630 {
2631 switch (mnemonic[3])
2632 {
2633 case 'c': /* fstcw */
2634 case 'd': /* fstdw */
2635 case 'e': /* fstenv */
2636 case 's': /* fsts[gw] */
2637 return 3;
2638 }
2639 }
2640 break;
2641 case 'x':
2642 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
2643 return 0; /* fxsave/fxrstor are not really math ops */
2644 break;
2645 }
2646
2647 return 1;
2648 }
2649
2650 /* Build the VEX prefix. */
2651
2652 static void
2653 build_vex_prefix (const insn_template *t)
2654 {
2655 unsigned int register_specifier;
2656 unsigned int implied_prefix;
2657 unsigned int vector_length;
2658
2659 /* Check register specifier. */
2660 if (i.vex.register_specifier)
2661 {
2662 register_specifier = i.vex.register_specifier->reg_num;
2663 if ((i.vex.register_specifier->reg_flags & RegRex))
2664 register_specifier += 8;
2665 register_specifier = ~register_specifier & 0xf;
2666 }
2667 else
2668 register_specifier = 0xf;
2669
2670 /* Use 2-byte VEX prefix by swappping destination and source
2671 operand. */
2672 if (!i.swap_operand
2673 && i.operands == i.reg_operands
2674 && i.tm.opcode_modifier.vex0f
2675 && i.tm.opcode_modifier.s
2676 && i.rex == REX_B)
2677 {
2678 unsigned int xchg = i.operands - 1;
2679 union i386_op temp_op;
2680 i386_operand_type temp_type;
2681
2682 temp_type = i.types[xchg];
2683 i.types[xchg] = i.types[0];
2684 i.types[0] = temp_type;
2685 temp_op = i.op[xchg];
2686 i.op[xchg] = i.op[0];
2687 i.op[0] = temp_op;
2688
2689 gas_assert (i.rm.mode == 3);
2690
2691 i.rex = REX_R;
2692 xchg = i.rm.regmem;
2693 i.rm.regmem = i.rm.reg;
2694 i.rm.reg = xchg;
2695
2696 /* Use the next insn. */
2697 i.tm = t[1];
2698 }
2699
2700 vector_length = i.tm.opcode_modifier.vex == 2 ? 1 : 0;
2701
2702 switch ((i.tm.base_opcode >> 8) & 0xff)
2703 {
2704 case 0:
2705 implied_prefix = 0;
2706 break;
2707 case DATA_PREFIX_OPCODE:
2708 implied_prefix = 1;
2709 break;
2710 case REPE_PREFIX_OPCODE:
2711 implied_prefix = 2;
2712 break;
2713 case REPNE_PREFIX_OPCODE:
2714 implied_prefix = 3;
2715 break;
2716 default:
2717 abort ();
2718 }
2719
2720 /* Use 2-byte VEX prefix if possible. */
2721 if (i.tm.opcode_modifier.vex0f
2722 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
2723 {
2724 /* 2-byte VEX prefix. */
2725 unsigned int r;
2726
2727 i.vex.length = 2;
2728 i.vex.bytes[0] = 0xc5;
2729
2730 /* Check the REX.R bit. */
2731 r = (i.rex & REX_R) ? 0 : 1;
2732 i.vex.bytes[1] = (r << 7
2733 | register_specifier << 3
2734 | vector_length << 2
2735 | implied_prefix);
2736 }
2737 else
2738 {
2739 /* 3-byte VEX prefix. */
2740 unsigned int m, w;
2741
2742 i.vex.length = 3;
2743 i.vex.bytes[0] = 0xc4;
2744
2745 if (i.tm.opcode_modifier.vex0f)
2746 m = 0x1;
2747 else if (i.tm.opcode_modifier.vex0f38)
2748 m = 0x2;
2749 else if (i.tm.opcode_modifier.vex0f3a)
2750 m = 0x3;
2751 else if (i.tm.opcode_modifier.xop09)
2752 {
2753 m = 0x9;
2754 i.vex.bytes[0] = 0x8f;
2755 }
2756 else if (i.tm.opcode_modifier.xop0a)
2757 {
2758 m = 0xa;
2759 i.vex.bytes[0] = 0x8f;
2760 }
2761 else
2762 abort ();
2763
2764 /* The high 3 bits of the second VEX byte are 1's compliment
2765 of RXB bits from REX. */
2766 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
2767
2768 /* Check the REX.W bit. */
2769 w = (i.rex & REX_W) ? 1 : 0;
2770 if (i.tm.opcode_modifier.vexw0 || i.tm.opcode_modifier.vexw1)
2771 {
2772 if (w)
2773 abort ();
2774
2775 if (i.tm.opcode_modifier.vexw1)
2776 w = 1;
2777 }
2778
2779 i.vex.bytes[2] = (w << 7
2780 | register_specifier << 3
2781 | vector_length << 2
2782 | implied_prefix);
2783 }
2784 }
2785
2786 static void
2787 process_immext (void)
2788 {
2789 expressionS *exp;
2790
2791 if (i.tm.cpu_flags.bitfield.cpusse3 && i.operands > 0)
2792 {
2793 /* SSE3 Instructions have the fixed operands with an opcode
2794 suffix which is coded in the same place as an 8-bit immediate
2795 field would be. Here we check those operands and remove them
2796 afterwards. */
2797 unsigned int x;
2798
2799 for (x = 0; x < i.operands; x++)
2800 if (i.op[x].regs->reg_num != x)
2801 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
2802 register_prefix, i.op[x].regs->reg_name, x + 1,
2803 i.tm.name);
2804
2805 i.operands = 0;
2806 }
2807
2808 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
2809 which is coded in the same place as an 8-bit immediate field
2810 would be. Here we fake an 8-bit immediate operand from the
2811 opcode suffix stored in tm.extension_opcode.
2812
2813 AVX instructions also use this encoding, for some of
2814 3 argument instructions. */
2815
2816 gas_assert (i.imm_operands == 0
2817 && (i.operands <= 2
2818 || (i.tm.opcode_modifier.vex
2819 && i.operands <= 4)));
2820
2821 exp = &im_expressions[i.imm_operands++];
2822 i.op[i.operands].imms = exp;
2823 i.types[i.operands] = imm8;
2824 i.operands++;
2825 exp->X_op = O_constant;
2826 exp->X_add_number = i.tm.extension_opcode;
2827 i.tm.extension_opcode = None;
2828 }
2829
2830 /* This is the guts of the machine-dependent assembler. LINE points to a
2831 machine dependent instruction. This function is supposed to emit
2832 the frags/bytes it assembles to. */
2833
2834 void
2835 md_assemble (char *line)
2836 {
2837 unsigned int j;
2838 char mnemonic[MAX_MNEM_SIZE];
2839 const insn_template *t;
2840
2841 /* Initialize globals. */
2842 memset (&i, '\0', sizeof (i));
2843 for (j = 0; j < MAX_OPERANDS; j++)
2844 i.reloc[j] = NO_RELOC;
2845 memset (disp_expressions, '\0', sizeof (disp_expressions));
2846 memset (im_expressions, '\0', sizeof (im_expressions));
2847 save_stack_p = save_stack;
2848
2849 /* First parse an instruction mnemonic & call i386_operand for the operands.
2850 We assume that the scrubber has arranged it so that line[0] is the valid
2851 start of a (possibly prefixed) mnemonic. */
2852
2853 line = parse_insn (line, mnemonic);
2854 if (line == NULL)
2855 return;
2856
2857 line = parse_operands (line, mnemonic);
2858 this_operand = -1;
2859 if (line == NULL)
2860 return;
2861
2862 /* Now we've parsed the mnemonic into a set of templates, and have the
2863 operands at hand. */
2864
2865 /* All intel opcodes have reversed operands except for "bound" and
2866 "enter". We also don't reverse intersegment "jmp" and "call"
2867 instructions with 2 immediate operands so that the immediate segment
2868 precedes the offset, as it does when in AT&T mode. */
2869 if (intel_syntax
2870 && i.operands > 1
2871 && (strcmp (mnemonic, "bound") != 0)
2872 && (strcmp (mnemonic, "invlpga") != 0)
2873 && !(operand_type_check (i.types[0], imm)
2874 && operand_type_check (i.types[1], imm)))
2875 swap_operands ();
2876
2877 /* The order of the immediates should be reversed
2878 for 2 immediates extrq and insertq instructions */
2879 if (i.imm_operands == 2
2880 && (strcmp (mnemonic, "extrq") == 0
2881 || strcmp (mnemonic, "insertq") == 0))
2882 swap_2_operands (0, 1);
2883
2884 if (i.imm_operands)
2885 optimize_imm ();
2886
2887 /* Don't optimize displacement for movabs since it only takes 64bit
2888 displacement. */
2889 if (i.disp_operands
2890 && (flag_code != CODE_64BIT
2891 || strcmp (mnemonic, "movabs") != 0))
2892 optimize_disp ();
2893
2894 /* Next, we find a template that matches the given insn,
2895 making sure the overlap of the given operands types is consistent
2896 with the template operand types. */
2897
2898 if (!(t = match_template ()))
2899 return;
2900
2901 if (sse_check != sse_check_none
2902 && !i.tm.opcode_modifier.noavx
2903 && (i.tm.cpu_flags.bitfield.cpusse
2904 || i.tm.cpu_flags.bitfield.cpusse2
2905 || i.tm.cpu_flags.bitfield.cpusse3
2906 || i.tm.cpu_flags.bitfield.cpussse3
2907 || i.tm.cpu_flags.bitfield.cpusse4_1
2908 || i.tm.cpu_flags.bitfield.cpusse4_2))
2909 {
2910 (sse_check == sse_check_warning
2911 ? as_warn
2912 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
2913 }
2914
2915 /* Zap movzx and movsx suffix. The suffix has been set from
2916 "word ptr" or "byte ptr" on the source operand in Intel syntax
2917 or extracted from mnemonic in AT&T syntax. But we'll use
2918 the destination register to choose the suffix for encoding. */
2919 if ((i.tm.base_opcode & ~9) == 0x0fb6)
2920 {
2921 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
2922 there is no suffix, the default will be byte extension. */
2923 if (i.reg_operands != 2
2924 && !i.suffix
2925 && intel_syntax)
2926 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
2927
2928 i.suffix = 0;
2929 }
2930
2931 if (i.tm.opcode_modifier.fwait)
2932 if (!add_prefix (FWAIT_OPCODE))
2933 return;
2934
2935 /* Check for lock without a lockable instruction. */
2936 if (i.prefix[LOCK_PREFIX]
2937 && (!i.tm.opcode_modifier.islockable
2938 || i.mem_operands == 0))
2939 {
2940 as_bad (_("expecting lockable instruction after `lock'"));
2941 return;
2942 }
2943
2944 /* Check string instruction segment overrides. */
2945 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
2946 {
2947 if (!check_string ())
2948 return;
2949 i.disp_operands = 0;
2950 }
2951
2952 if (!process_suffix ())
2953 return;
2954
2955 /* Update operand types. */
2956 for (j = 0; j < i.operands; j++)
2957 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
2958
2959 /* Make still unresolved immediate matches conform to size of immediate
2960 given in i.suffix. */
2961 if (!finalize_imm ())
2962 return;
2963
2964 if (i.types[0].bitfield.imm1)
2965 i.imm_operands = 0; /* kludge for shift insns. */
2966
2967 /* We only need to check those implicit registers for instructions
2968 with 3 operands or less. */
2969 if (i.operands <= 3)
2970 for (j = 0; j < i.operands; j++)
2971 if (i.types[j].bitfield.inoutportreg
2972 || i.types[j].bitfield.shiftcount
2973 || i.types[j].bitfield.acc
2974 || i.types[j].bitfield.floatacc)
2975 i.reg_operands--;
2976
2977 /* ImmExt should be processed after SSE2AVX. */
2978 if (!i.tm.opcode_modifier.sse2avx
2979 && i.tm.opcode_modifier.immext)
2980 process_immext ();
2981
2982 /* For insns with operands there are more diddles to do to the opcode. */
2983 if (i.operands)
2984 {
2985 if (!process_operands ())
2986 return;
2987 }
2988 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
2989 {
2990 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
2991 as_warn (_("translating to `%sp'"), i.tm.name);
2992 }
2993
2994 if (i.tm.opcode_modifier.vex)
2995 build_vex_prefix (t);
2996
2997 /* Handle conversion of 'int $3' --> special int3 insn. */
2998 if (i.tm.base_opcode == INT_OPCODE && i.op[0].imms->X_add_number == 3)
2999 {
3000 i.tm.base_opcode = INT3_OPCODE;
3001 i.imm_operands = 0;
3002 }
3003
3004 if ((i.tm.opcode_modifier.jump
3005 || i.tm.opcode_modifier.jumpbyte
3006 || i.tm.opcode_modifier.jumpdword)
3007 && i.op[0].disps->X_op == O_constant)
3008 {
3009 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3010 the absolute address given by the constant. Since ix86 jumps and
3011 calls are pc relative, we need to generate a reloc. */
3012 i.op[0].disps->X_add_symbol = &abs_symbol;
3013 i.op[0].disps->X_op = O_symbol;
3014 }
3015
3016 if (i.tm.opcode_modifier.rex64)
3017 i.rex |= REX_W;
3018
3019 /* For 8 bit registers we need an empty rex prefix. Also if the
3020 instruction already has a prefix, we need to convert old
3021 registers to new ones. */
3022
3023 if ((i.types[0].bitfield.reg8
3024 && (i.op[0].regs->reg_flags & RegRex64) != 0)
3025 || (i.types[1].bitfield.reg8
3026 && (i.op[1].regs->reg_flags & RegRex64) != 0)
3027 || ((i.types[0].bitfield.reg8
3028 || i.types[1].bitfield.reg8)
3029 && i.rex != 0))
3030 {
3031 int x;
3032
3033 i.rex |= REX_OPCODE;
3034 for (x = 0; x < 2; x++)
3035 {
3036 /* Look for 8 bit operand that uses old registers. */
3037 if (i.types[x].bitfield.reg8
3038 && (i.op[x].regs->reg_flags & RegRex64) == 0)
3039 {
3040 /* In case it is "hi" register, give up. */
3041 if (i.op[x].regs->reg_num > 3)
3042 as_bad (_("can't encode register '%s%s' in an "
3043 "instruction requiring REX prefix."),
3044 register_prefix, i.op[x].regs->reg_name);
3045
3046 /* Otherwise it is equivalent to the extended register.
3047 Since the encoding doesn't change this is merely
3048 cosmetic cleanup for debug output. */
3049
3050 i.op[x].regs = i.op[x].regs + 8;
3051 }
3052 }
3053 }
3054
3055 if (i.rex != 0)
3056 add_prefix (REX_OPCODE | i.rex);
3057
3058 /* We are ready to output the insn. */
3059 output_insn ();
3060 }
3061
3062 static char *
3063 parse_insn (char *line, char *mnemonic)
3064 {
3065 char *l = line;
3066 char *token_start = l;
3067 char *mnem_p;
3068 int supported;
3069 const insn_template *t;
3070 char *dot_p = NULL;
3071
3072 /* Non-zero if we found a prefix only acceptable with string insns. */
3073 const char *expecting_string_instruction = NULL;
3074
3075 while (1)
3076 {
3077 mnem_p = mnemonic;
3078 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3079 {
3080 if (*mnem_p == '.')
3081 dot_p = mnem_p;
3082 mnem_p++;
3083 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3084 {
3085 as_bad (_("no such instruction: `%s'"), token_start);
3086 return NULL;
3087 }
3088 l++;
3089 }
3090 if (!is_space_char (*l)
3091 && *l != END_OF_INSN
3092 && (intel_syntax
3093 || (*l != PREFIX_SEPARATOR
3094 && *l != ',')))
3095 {
3096 as_bad (_("invalid character %s in mnemonic"),
3097 output_invalid (*l));
3098 return NULL;
3099 }
3100 if (token_start == l)
3101 {
3102 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3103 as_bad (_("expecting prefix; got nothing"));
3104 else
3105 as_bad (_("expecting mnemonic; got nothing"));
3106 return NULL;
3107 }
3108
3109 /* Look up instruction (or prefix) via hash table. */
3110 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3111
3112 if (*l != END_OF_INSN
3113 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3114 && current_templates
3115 && current_templates->start->opcode_modifier.isprefix)
3116 {
3117 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3118 {
3119 as_bad ((flag_code != CODE_64BIT
3120 ? _("`%s' is only supported in 64-bit mode")
3121 : _("`%s' is not supported in 64-bit mode")),
3122 current_templates->start->name);
3123 return NULL;
3124 }
3125 /* If we are in 16-bit mode, do not allow addr16 or data16.
3126 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3127 if ((current_templates->start->opcode_modifier.size16
3128 || current_templates->start->opcode_modifier.size32)
3129 && flag_code != CODE_64BIT
3130 && (current_templates->start->opcode_modifier.size32
3131 ^ (flag_code == CODE_16BIT)))
3132 {
3133 as_bad (_("redundant %s prefix"),
3134 current_templates->start->name);
3135 return NULL;
3136 }
3137 /* Add prefix, checking for repeated prefixes. */
3138 switch (add_prefix (current_templates->start->base_opcode))
3139 {
3140 case PREFIX_EXIST:
3141 return NULL;
3142 case PREFIX_REP:
3143 expecting_string_instruction = current_templates->start->name;
3144 break;
3145 default:
3146 break;
3147 }
3148 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3149 token_start = ++l;
3150 }
3151 else
3152 break;
3153 }
3154
3155 if (!current_templates)
3156 {
3157 /* Check if we should swap operand in encoding. */
3158 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3159 i.swap_operand = 1;
3160 else
3161 goto check_suffix;
3162 mnem_p = dot_p;
3163 *dot_p = '\0';
3164 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3165 }
3166
3167 if (!current_templates)
3168 {
3169 check_suffix:
3170 /* See if we can get a match by trimming off a suffix. */
3171 switch (mnem_p[-1])
3172 {
3173 case WORD_MNEM_SUFFIX:
3174 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3175 i.suffix = SHORT_MNEM_SUFFIX;
3176 else
3177 case BYTE_MNEM_SUFFIX:
3178 case QWORD_MNEM_SUFFIX:
3179 i.suffix = mnem_p[-1];
3180 mnem_p[-1] = '\0';
3181 current_templates = (const templates *) hash_find (op_hash,
3182 mnemonic);
3183 break;
3184 case SHORT_MNEM_SUFFIX:
3185 case LONG_MNEM_SUFFIX:
3186 if (!intel_syntax)
3187 {
3188 i.suffix = mnem_p[-1];
3189 mnem_p[-1] = '\0';
3190 current_templates = (const templates *) hash_find (op_hash,
3191 mnemonic);
3192 }
3193 break;
3194
3195 /* Intel Syntax. */
3196 case 'd':
3197 if (intel_syntax)
3198 {
3199 if (intel_float_operand (mnemonic) == 1)
3200 i.suffix = SHORT_MNEM_SUFFIX;
3201 else
3202 i.suffix = LONG_MNEM_SUFFIX;
3203 mnem_p[-1] = '\0';
3204 current_templates = (const templates *) hash_find (op_hash,
3205 mnemonic);
3206 }
3207 break;
3208 }
3209 if (!current_templates)
3210 {
3211 as_bad (_("no such instruction: `%s'"), token_start);
3212 return NULL;
3213 }
3214 }
3215
3216 if (current_templates->start->opcode_modifier.jump
3217 || current_templates->start->opcode_modifier.jumpbyte)
3218 {
3219 /* Check for a branch hint. We allow ",pt" and ",pn" for
3220 predict taken and predict not taken respectively.
3221 I'm not sure that branch hints actually do anything on loop
3222 and jcxz insns (JumpByte) for current Pentium4 chips. They
3223 may work in the future and it doesn't hurt to accept them
3224 now. */
3225 if (l[0] == ',' && l[1] == 'p')
3226 {
3227 if (l[2] == 't')
3228 {
3229 if (!add_prefix (DS_PREFIX_OPCODE))
3230 return NULL;
3231 l += 3;
3232 }
3233 else if (l[2] == 'n')
3234 {
3235 if (!add_prefix (CS_PREFIX_OPCODE))
3236 return NULL;
3237 l += 3;
3238 }
3239 }
3240 }
3241 /* Any other comma loses. */
3242 if (*l == ',')
3243 {
3244 as_bad (_("invalid character %s in mnemonic"),
3245 output_invalid (*l));
3246 return NULL;
3247 }
3248
3249 /* Check if instruction is supported on specified architecture. */
3250 supported = 0;
3251 for (t = current_templates->start; t < current_templates->end; ++t)
3252 {
3253 supported |= cpu_flags_match (t);
3254 if (supported == CPU_FLAGS_PERFECT_MATCH)
3255 goto skip;
3256 }
3257
3258 if (!(supported & CPU_FLAGS_64BIT_MATCH))
3259 {
3260 as_bad (flag_code == CODE_64BIT
3261 ? _("`%s' is not supported in 64-bit mode")
3262 : _("`%s' is only supported in 64-bit mode"),
3263 current_templates->start->name);
3264 return NULL;
3265 }
3266 if (supported != CPU_FLAGS_PERFECT_MATCH)
3267 {
3268 as_bad (_("`%s' is not supported on `%s%s'"),
3269 current_templates->start->name,
3270 cpu_arch_name ? cpu_arch_name : default_arch,
3271 cpu_sub_arch_name ? cpu_sub_arch_name : "");
3272 return NULL;
3273 }
3274
3275 skip:
3276 if (!cpu_arch_flags.bitfield.cpui386
3277 && (flag_code != CODE_16BIT))
3278 {
3279 as_warn (_("use .code16 to ensure correct addressing mode"));
3280 }
3281
3282 /* Check for rep/repne without a string instruction. */
3283 if (expecting_string_instruction)
3284 {
3285 static templates override;
3286
3287 for (t = current_templates->start; t < current_templates->end; ++t)
3288 if (t->opcode_modifier.isstring)
3289 break;
3290 if (t >= current_templates->end)
3291 {
3292 as_bad (_("expecting string instruction after `%s'"),
3293 expecting_string_instruction);
3294 return NULL;
3295 }
3296 for (override.start = t; t < current_templates->end; ++t)
3297 if (!t->opcode_modifier.isstring)
3298 break;
3299 override.end = t;
3300 current_templates = &override;
3301 }
3302
3303 return l;
3304 }
3305
3306 static char *
3307 parse_operands (char *l, const char *mnemonic)
3308 {
3309 char *token_start;
3310
3311 /* 1 if operand is pending after ','. */
3312 unsigned int expecting_operand = 0;
3313
3314 /* Non-zero if operand parens not balanced. */
3315 unsigned int paren_not_balanced;
3316
3317 while (*l != END_OF_INSN)
3318 {
3319 /* Skip optional white space before operand. */
3320 if (is_space_char (*l))
3321 ++l;
3322 if (!is_operand_char (*l) && *l != END_OF_INSN)
3323 {
3324 as_bad (_("invalid character %s before operand %d"),
3325 output_invalid (*l),
3326 i.operands + 1);
3327 return NULL;
3328 }
3329 token_start = l; /* after white space */
3330 paren_not_balanced = 0;
3331 while (paren_not_balanced || *l != ',')
3332 {
3333 if (*l == END_OF_INSN)
3334 {
3335 if (paren_not_balanced)
3336 {
3337 if (!intel_syntax)
3338 as_bad (_("unbalanced parenthesis in operand %d."),
3339 i.operands + 1);
3340 else
3341 as_bad (_("unbalanced brackets in operand %d."),
3342 i.operands + 1);
3343 return NULL;
3344 }
3345 else
3346 break; /* we are done */
3347 }
3348 else if (!is_operand_char (*l) && !is_space_char (*l))
3349 {
3350 as_bad (_("invalid character %s in operand %d"),
3351 output_invalid (*l),
3352 i.operands + 1);
3353 return NULL;
3354 }
3355 if (!intel_syntax)
3356 {
3357 if (*l == '(')
3358 ++paren_not_balanced;
3359 if (*l == ')')
3360 --paren_not_balanced;
3361 }
3362 else
3363 {
3364 if (*l == '[')
3365 ++paren_not_balanced;
3366 if (*l == ']')
3367 --paren_not_balanced;
3368 }
3369 l++;
3370 }
3371 if (l != token_start)
3372 { /* Yes, we've read in another operand. */
3373 unsigned int operand_ok;
3374 this_operand = i.operands++;
3375 i.types[this_operand].bitfield.unspecified = 1;
3376 if (i.operands > MAX_OPERANDS)
3377 {
3378 as_bad (_("spurious operands; (%d operands/instruction max)"),
3379 MAX_OPERANDS);
3380 return NULL;
3381 }
3382 /* Now parse operand adding info to 'i' as we go along. */
3383 END_STRING_AND_SAVE (l);
3384
3385 if (intel_syntax)
3386 operand_ok =
3387 i386_intel_operand (token_start,
3388 intel_float_operand (mnemonic));
3389 else
3390 operand_ok = i386_att_operand (token_start);
3391
3392 RESTORE_END_STRING (l);
3393 if (!operand_ok)
3394 return NULL;
3395 }
3396 else
3397 {
3398 if (expecting_operand)
3399 {
3400 expecting_operand_after_comma:
3401 as_bad (_("expecting operand after ','; got nothing"));
3402 return NULL;
3403 }
3404 if (*l == ',')
3405 {
3406 as_bad (_("expecting operand before ','; got nothing"));
3407 return NULL;
3408 }
3409 }
3410
3411 /* Now *l must be either ',' or END_OF_INSN. */
3412 if (*l == ',')
3413 {
3414 if (*++l == END_OF_INSN)
3415 {
3416 /* Just skip it, if it's \n complain. */
3417 goto expecting_operand_after_comma;
3418 }
3419 expecting_operand = 1;
3420 }
3421 }
3422 return l;
3423 }
3424
3425 static void
3426 swap_2_operands (int xchg1, int xchg2)
3427 {
3428 union i386_op temp_op;
3429 i386_operand_type temp_type;
3430 enum bfd_reloc_code_real temp_reloc;
3431
3432 temp_type = i.types[xchg2];
3433 i.types[xchg2] = i.types[xchg1];
3434 i.types[xchg1] = temp_type;
3435 temp_op = i.op[xchg2];
3436 i.op[xchg2] = i.op[xchg1];
3437 i.op[xchg1] = temp_op;
3438 temp_reloc = i.reloc[xchg2];
3439 i.reloc[xchg2] = i.reloc[xchg1];
3440 i.reloc[xchg1] = temp_reloc;
3441 }
3442
3443 static void
3444 swap_operands (void)
3445 {
3446 switch (i.operands)
3447 {
3448 case 5:
3449 case 4:
3450 swap_2_operands (1, i.operands - 2);
3451 case 3:
3452 case 2:
3453 swap_2_operands (0, i.operands - 1);
3454 break;
3455 default:
3456 abort ();
3457 }
3458
3459 if (i.mem_operands == 2)
3460 {
3461 const seg_entry *temp_seg;
3462 temp_seg = i.seg[0];
3463 i.seg[0] = i.seg[1];
3464 i.seg[1] = temp_seg;
3465 }
3466 }
3467
3468 /* Try to ensure constant immediates are represented in the smallest
3469 opcode possible. */
3470 static void
3471 optimize_imm (void)
3472 {
3473 char guess_suffix = 0;
3474 int op;
3475
3476 if (i.suffix)
3477 guess_suffix = i.suffix;
3478 else if (i.reg_operands)
3479 {
3480 /* Figure out a suffix from the last register operand specified.
3481 We can't do this properly yet, ie. excluding InOutPortReg,
3482 but the following works for instructions with immediates.
3483 In any case, we can't set i.suffix yet. */
3484 for (op = i.operands; --op >= 0;)
3485 if (i.types[op].bitfield.reg8)
3486 {
3487 guess_suffix = BYTE_MNEM_SUFFIX;
3488 break;
3489 }
3490 else if (i.types[op].bitfield.reg16)
3491 {
3492 guess_suffix = WORD_MNEM_SUFFIX;
3493 break;
3494 }
3495 else if (i.types[op].bitfield.reg32)
3496 {
3497 guess_suffix = LONG_MNEM_SUFFIX;
3498 break;
3499 }
3500 else if (i.types[op].bitfield.reg64)
3501 {
3502 guess_suffix = QWORD_MNEM_SUFFIX;
3503 break;
3504 }
3505 }
3506 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
3507 guess_suffix = WORD_MNEM_SUFFIX;
3508
3509 for (op = i.operands; --op >= 0;)
3510 if (operand_type_check (i.types[op], imm))
3511 {
3512 switch (i.op[op].imms->X_op)
3513 {
3514 case O_constant:
3515 /* If a suffix is given, this operand may be shortened. */
3516 switch (guess_suffix)
3517 {
3518 case LONG_MNEM_SUFFIX:
3519 i.types[op].bitfield.imm32 = 1;
3520 i.types[op].bitfield.imm64 = 1;
3521 break;
3522 case WORD_MNEM_SUFFIX:
3523 i.types[op].bitfield.imm16 = 1;
3524 i.types[op].bitfield.imm32 = 1;
3525 i.types[op].bitfield.imm32s = 1;
3526 i.types[op].bitfield.imm64 = 1;
3527 break;
3528 case BYTE_MNEM_SUFFIX:
3529 i.types[op].bitfield.imm8 = 1;
3530 i.types[op].bitfield.imm8s = 1;
3531 i.types[op].bitfield.imm16 = 1;
3532 i.types[op].bitfield.imm32 = 1;
3533 i.types[op].bitfield.imm32s = 1;
3534 i.types[op].bitfield.imm64 = 1;
3535 break;
3536 }
3537
3538 /* If this operand is at most 16 bits, convert it
3539 to a signed 16 bit number before trying to see
3540 whether it will fit in an even smaller size.
3541 This allows a 16-bit operand such as $0xffe0 to
3542 be recognised as within Imm8S range. */
3543 if ((i.types[op].bitfield.imm16)
3544 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
3545 {
3546 i.op[op].imms->X_add_number =
3547 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
3548 }
3549 if ((i.types[op].bitfield.imm32)
3550 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
3551 == 0))
3552 {
3553 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
3554 ^ ((offsetT) 1 << 31))
3555 - ((offsetT) 1 << 31));
3556 }
3557 i.types[op]
3558 = operand_type_or (i.types[op],
3559 smallest_imm_type (i.op[op].imms->X_add_number));
3560
3561 /* We must avoid matching of Imm32 templates when 64bit
3562 only immediate is available. */
3563 if (guess_suffix == QWORD_MNEM_SUFFIX)
3564 i.types[op].bitfield.imm32 = 0;
3565 break;
3566
3567 case O_absent:
3568 case O_register:
3569 abort ();
3570
3571 /* Symbols and expressions. */
3572 default:
3573 /* Convert symbolic operand to proper sizes for matching, but don't
3574 prevent matching a set of insns that only supports sizes other
3575 than those matching the insn suffix. */
3576 {
3577 i386_operand_type mask, allowed;
3578 const insn_template *t;
3579
3580 operand_type_set (&mask, 0);
3581 operand_type_set (&allowed, 0);
3582
3583 for (t = current_templates->start;
3584 t < current_templates->end;
3585 ++t)
3586 allowed = operand_type_or (allowed,
3587 t->operand_types[op]);
3588 switch (guess_suffix)
3589 {
3590 case QWORD_MNEM_SUFFIX:
3591 mask.bitfield.imm64 = 1;
3592 mask.bitfield.imm32s = 1;
3593 break;
3594 case LONG_MNEM_SUFFIX:
3595 mask.bitfield.imm32 = 1;
3596 break;
3597 case WORD_MNEM_SUFFIX:
3598 mask.bitfield.imm16 = 1;
3599 break;
3600 case BYTE_MNEM_SUFFIX:
3601 mask.bitfield.imm8 = 1;
3602 break;
3603 default:
3604 break;
3605 }
3606 allowed = operand_type_and (mask, allowed);
3607 if (!operand_type_all_zero (&allowed))
3608 i.types[op] = operand_type_and (i.types[op], mask);
3609 }
3610 break;
3611 }
3612 }
3613 }
3614
3615 /* Try to use the smallest displacement type too. */
3616 static void
3617 optimize_disp (void)
3618 {
3619 int op;
3620
3621 for (op = i.operands; --op >= 0;)
3622 if (operand_type_check (i.types[op], disp))
3623 {
3624 if (i.op[op].disps->X_op == O_constant)
3625 {
3626 offsetT disp = i.op[op].disps->X_add_number;
3627
3628 if (i.types[op].bitfield.disp16
3629 && (disp & ~(offsetT) 0xffff) == 0)
3630 {
3631 /* If this operand is at most 16 bits, convert
3632 to a signed 16 bit number and don't use 64bit
3633 displacement. */
3634 disp = (((disp & 0xffff) ^ 0x8000) - 0x8000);
3635 i.types[op].bitfield.disp64 = 0;
3636 }
3637 if (i.types[op].bitfield.disp32
3638 && (disp & ~(((offsetT) 2 << 31) - 1)) == 0)
3639 {
3640 /* If this operand is at most 32 bits, convert
3641 to a signed 32 bit number and don't use 64bit
3642 displacement. */
3643 disp &= (((offsetT) 2 << 31) - 1);
3644 disp = (disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
3645 i.types[op].bitfield.disp64 = 0;
3646 }
3647 if (!disp && i.types[op].bitfield.baseindex)
3648 {
3649 i.types[op].bitfield.disp8 = 0;
3650 i.types[op].bitfield.disp16 = 0;
3651 i.types[op].bitfield.disp32 = 0;
3652 i.types[op].bitfield.disp32s = 0;
3653 i.types[op].bitfield.disp64 = 0;
3654 i.op[op].disps = 0;
3655 i.disp_operands--;
3656 }
3657 else if (flag_code == CODE_64BIT)
3658 {
3659 if (fits_in_signed_long (disp))
3660 {
3661 i.types[op].bitfield.disp64 = 0;
3662 i.types[op].bitfield.disp32s = 1;
3663 }
3664 if (i.prefix[ADDR_PREFIX]
3665 && fits_in_unsigned_long (disp))
3666 i.types[op].bitfield.disp32 = 1;
3667 }
3668 if ((i.types[op].bitfield.disp32
3669 || i.types[op].bitfield.disp32s
3670 || i.types[op].bitfield.disp16)
3671 && fits_in_signed_byte (disp))
3672 i.types[op].bitfield.disp8 = 1;
3673 }
3674 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
3675 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
3676 {
3677 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
3678 i.op[op].disps, 0, i.reloc[op]);
3679 i.types[op].bitfield.disp8 = 0;
3680 i.types[op].bitfield.disp16 = 0;
3681 i.types[op].bitfield.disp32 = 0;
3682 i.types[op].bitfield.disp32s = 0;
3683 i.types[op].bitfield.disp64 = 0;
3684 }
3685 else
3686 /* We only support 64bit displacement on constants. */
3687 i.types[op].bitfield.disp64 = 0;
3688 }
3689 }
3690
3691 static const insn_template *
3692 match_template (void)
3693 {
3694 /* Points to template once we've found it. */
3695 const insn_template *t;
3696 i386_operand_type overlap0, overlap1, overlap2, overlap3;
3697 i386_operand_type overlap4;
3698 unsigned int found_reverse_match;
3699 i386_opcode_modifier suffix_check;
3700 i386_operand_type operand_types [MAX_OPERANDS];
3701 int addr_prefix_disp;
3702 unsigned int j;
3703 unsigned int found_cpu_match;
3704 unsigned int check_register;
3705
3706 #if MAX_OPERANDS != 5
3707 # error "MAX_OPERANDS must be 5."
3708 #endif
3709
3710 found_reverse_match = 0;
3711 addr_prefix_disp = -1;
3712
3713 memset (&suffix_check, 0, sizeof (suffix_check));
3714 if (i.suffix == BYTE_MNEM_SUFFIX)
3715 suffix_check.no_bsuf = 1;
3716 else if (i.suffix == WORD_MNEM_SUFFIX)
3717 suffix_check.no_wsuf = 1;
3718 else if (i.suffix == SHORT_MNEM_SUFFIX)
3719 suffix_check.no_ssuf = 1;
3720 else if (i.suffix == LONG_MNEM_SUFFIX)
3721 suffix_check.no_lsuf = 1;
3722 else if (i.suffix == QWORD_MNEM_SUFFIX)
3723 suffix_check.no_qsuf = 1;
3724 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
3725 suffix_check.no_ldsuf = 1;
3726
3727 for (t = current_templates->start; t < current_templates->end; t++)
3728 {
3729 addr_prefix_disp = -1;
3730
3731 /* Must have right number of operands. */
3732 if (i.operands != t->operands)
3733 continue;
3734
3735 /* Check processor support. */
3736 found_cpu_match = (cpu_flags_match (t)
3737 == CPU_FLAGS_PERFECT_MATCH);
3738 if (!found_cpu_match)
3739 continue;
3740
3741 /* Check old gcc support. */
3742 if (!old_gcc && t->opcode_modifier.oldgcc)
3743 continue;
3744
3745 /* Check AT&T mnemonic. */
3746 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
3747 continue;
3748
3749 /* Check AT&T syntax Intel syntax. */
3750 if ((intel_syntax && t->opcode_modifier.attsyntax)
3751 || (!intel_syntax && t->opcode_modifier.intelsyntax))
3752 continue;
3753
3754 /* Check the suffix, except for some instructions in intel mode. */
3755 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
3756 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
3757 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
3758 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
3759 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
3760 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
3761 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
3762 continue;
3763
3764 if (!operand_size_match (t))
3765 continue;
3766
3767 for (j = 0; j < MAX_OPERANDS; j++)
3768 operand_types[j] = t->operand_types[j];
3769
3770 /* In general, don't allow 64-bit operands in 32-bit mode. */
3771 if (i.suffix == QWORD_MNEM_SUFFIX
3772 && flag_code != CODE_64BIT
3773 && (intel_syntax
3774 ? (!t->opcode_modifier.ignoresize
3775 && !intel_float_operand (t->name))
3776 : intel_float_operand (t->name) != 2)
3777 && ((!operand_types[0].bitfield.regmmx
3778 && !operand_types[0].bitfield.regxmm
3779 && !operand_types[0].bitfield.regymm)
3780 || (!operand_types[t->operands > 1].bitfield.regmmx
3781 && !!operand_types[t->operands > 1].bitfield.regxmm
3782 && !!operand_types[t->operands > 1].bitfield.regymm))
3783 && (t->base_opcode != 0x0fc7
3784 || t->extension_opcode != 1 /* cmpxchg8b */))
3785 continue;
3786
3787 /* In general, don't allow 32-bit operands on pre-386. */
3788 else if (i.suffix == LONG_MNEM_SUFFIX
3789 && !cpu_arch_flags.bitfield.cpui386
3790 && (intel_syntax
3791 ? (!t->opcode_modifier.ignoresize
3792 && !intel_float_operand (t->name))
3793 : intel_float_operand (t->name) != 2)
3794 && ((!operand_types[0].bitfield.regmmx
3795 && !operand_types[0].bitfield.regxmm)
3796 || (!operand_types[t->operands > 1].bitfield.regmmx
3797 && !!operand_types[t->operands > 1].bitfield.regxmm)))
3798 continue;
3799
3800 /* Do not verify operands when there are none. */
3801 else
3802 {
3803 if (!t->operands)
3804 /* We've found a match; break out of loop. */
3805 break;
3806 }
3807
3808 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
3809 into Disp32/Disp16/Disp32 operand. */
3810 if (i.prefix[ADDR_PREFIX] != 0)
3811 {
3812 /* There should be only one Disp operand. */
3813 switch (flag_code)
3814 {
3815 case CODE_16BIT:
3816 for (j = 0; j < MAX_OPERANDS; j++)
3817 {
3818 if (operand_types[j].bitfield.disp16)
3819 {
3820 addr_prefix_disp = j;
3821 operand_types[j].bitfield.disp32 = 1;
3822 operand_types[j].bitfield.disp16 = 0;
3823 break;
3824 }
3825 }
3826 break;
3827 case CODE_32BIT:
3828 for (j = 0; j < MAX_OPERANDS; j++)
3829 {
3830 if (operand_types[j].bitfield.disp32)
3831 {
3832 addr_prefix_disp = j;
3833 operand_types[j].bitfield.disp32 = 0;
3834 operand_types[j].bitfield.disp16 = 1;
3835 break;
3836 }
3837 }
3838 break;
3839 case CODE_64BIT:
3840 for (j = 0; j < MAX_OPERANDS; j++)
3841 {
3842 if (operand_types[j].bitfield.disp64)
3843 {
3844 addr_prefix_disp = j;
3845 operand_types[j].bitfield.disp64 = 0;
3846 operand_types[j].bitfield.disp32 = 1;
3847 break;
3848 }
3849 }
3850 break;
3851 }
3852 }
3853
3854 /* We check register size only if size of operands can be
3855 encoded the canonical way. */
3856 check_register = t->opcode_modifier.w;
3857 overlap0 = operand_type_and (i.types[0], operand_types[0]);
3858 switch (t->operands)
3859 {
3860 case 1:
3861 if (!operand_type_match (overlap0, i.types[0]))
3862 continue;
3863 break;
3864 case 2:
3865 /* xchg %eax, %eax is a special case. It is an aliase for nop
3866 only in 32bit mode and we can use opcode 0x90. In 64bit
3867 mode, we can't use 0x90 for xchg %eax, %eax since it should
3868 zero-extend %eax to %rax. */
3869 if (flag_code == CODE_64BIT
3870 && t->base_opcode == 0x90
3871 && operand_type_equal (&i.types [0], &acc32)
3872 && operand_type_equal (&i.types [1], &acc32))
3873 continue;
3874 if (i.swap_operand)
3875 {
3876 /* If we swap operand in encoding, we either match
3877 the next one or reverse direction of operands. */
3878 if (t->opcode_modifier.s)
3879 continue;
3880 else if (t->opcode_modifier.d)
3881 goto check_reverse;
3882 }
3883
3884 case 3:
3885 /* If we swap operand in encoding, we match the next one. */
3886 if (i.swap_operand && t->opcode_modifier.s)
3887 continue;
3888 case 4:
3889 case 5:
3890 overlap1 = operand_type_and (i.types[1], operand_types[1]);
3891 if (!operand_type_match (overlap0, i.types[0])
3892 || !operand_type_match (overlap1, i.types[1])
3893 || (check_register
3894 && !operand_type_register_match (overlap0, i.types[0],
3895 operand_types[0],
3896 overlap1, i.types[1],
3897 operand_types[1])))
3898 {
3899 /* Check if other direction is valid ... */
3900 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
3901 continue;
3902
3903 check_reverse:
3904 /* Try reversing direction of operands. */
3905 overlap0 = operand_type_and (i.types[0], operand_types[1]);
3906 overlap1 = operand_type_and (i.types[1], operand_types[0]);
3907 if (!operand_type_match (overlap0, i.types[0])
3908 || !operand_type_match (overlap1, i.types[1])
3909 || (check_register
3910 && !operand_type_register_match (overlap0,
3911 i.types[0],
3912 operand_types[1],
3913 overlap1,
3914 i.types[1],
3915 operand_types[0])))
3916 {
3917 /* Does not match either direction. */
3918 continue;
3919 }
3920 /* found_reverse_match holds which of D or FloatDR
3921 we've found. */
3922 if (t->opcode_modifier.d)
3923 found_reverse_match = Opcode_D;
3924 else if (t->opcode_modifier.floatd)
3925 found_reverse_match = Opcode_FloatD;
3926 else
3927 found_reverse_match = 0;
3928 if (t->opcode_modifier.floatr)
3929 found_reverse_match |= Opcode_FloatR;
3930 }
3931 else
3932 {
3933 /* Found a forward 2 operand match here. */
3934 switch (t->operands)
3935 {
3936 case 5:
3937 overlap4 = operand_type_and (i.types[4],
3938 operand_types[4]);
3939 case 4:
3940 overlap3 = operand_type_and (i.types[3],
3941 operand_types[3]);
3942 case 3:
3943 overlap2 = operand_type_and (i.types[2],
3944 operand_types[2]);
3945 break;
3946 }
3947
3948 switch (t->operands)
3949 {
3950 case 5:
3951 if (!operand_type_match (overlap4, i.types[4])
3952 || !operand_type_register_match (overlap3,
3953 i.types[3],
3954 operand_types[3],
3955 overlap4,
3956 i.types[4],
3957 operand_types[4]))
3958 continue;
3959 case 4:
3960 if (!operand_type_match (overlap3, i.types[3])
3961 || (check_register
3962 && !operand_type_register_match (overlap2,
3963 i.types[2],
3964 operand_types[2],
3965 overlap3,
3966 i.types[3],
3967 operand_types[3])))
3968 continue;
3969 case 3:
3970 /* Here we make use of the fact that there are no
3971 reverse match 3 operand instructions, and all 3
3972 operand instructions only need to be checked for
3973 register consistency between operands 2 and 3. */
3974 if (!operand_type_match (overlap2, i.types[2])
3975 || (check_register
3976 && !operand_type_register_match (overlap1,
3977 i.types[1],
3978 operand_types[1],
3979 overlap2,
3980 i.types[2],
3981 operand_types[2])))
3982 continue;
3983 break;
3984 }
3985 }
3986 /* Found either forward/reverse 2, 3 or 4 operand match here:
3987 slip through to break. */
3988 }
3989 if (!found_cpu_match)
3990 {
3991 found_reverse_match = 0;
3992 continue;
3993 }
3994
3995 /* We've found a match; break out of loop. */
3996 break;
3997 }
3998
3999 if (t == current_templates->end)
4000 {
4001 /* We found no match. */
4002 if (intel_syntax)
4003 as_bad (_("ambiguous operand size or operands invalid for `%s'"),
4004 current_templates->start->name);
4005 else
4006 as_bad (_("suffix or operands invalid for `%s'"),
4007 current_templates->start->name);
4008 return NULL;
4009 }
4010
4011 if (!quiet_warnings)
4012 {
4013 if (!intel_syntax
4014 && (i.types[0].bitfield.jumpabsolute
4015 != operand_types[0].bitfield.jumpabsolute))
4016 {
4017 as_warn (_("indirect %s without `*'"), t->name);
4018 }
4019
4020 if (t->opcode_modifier.isprefix
4021 && t->opcode_modifier.ignoresize)
4022 {
4023 /* Warn them that a data or address size prefix doesn't
4024 affect assembly of the next line of code. */
4025 as_warn (_("stand-alone `%s' prefix"), t->name);
4026 }
4027 }
4028
4029 /* Copy the template we found. */
4030 i.tm = *t;
4031
4032 if (addr_prefix_disp != -1)
4033 i.tm.operand_types[addr_prefix_disp]
4034 = operand_types[addr_prefix_disp];
4035
4036 if (found_reverse_match)
4037 {
4038 /* If we found a reverse match we must alter the opcode
4039 direction bit. found_reverse_match holds bits to change
4040 (different for int & float insns). */
4041
4042 i.tm.base_opcode ^= found_reverse_match;
4043
4044 i.tm.operand_types[0] = operand_types[1];
4045 i.tm.operand_types[1] = operand_types[0];
4046 }
4047
4048 return t;
4049 }
4050
4051 static int
4052 check_string (void)
4053 {
4054 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
4055 if (i.tm.operand_types[mem_op].bitfield.esseg)
4056 {
4057 if (i.seg[0] != NULL && i.seg[0] != &es)
4058 {
4059 as_bad (_("`%s' operand %d must use `%ses' segment"),
4060 i.tm.name,
4061 mem_op + 1,
4062 register_prefix);
4063 return 0;
4064 }
4065 /* There's only ever one segment override allowed per instruction.
4066 This instruction possibly has a legal segment override on the
4067 second operand, so copy the segment to where non-string
4068 instructions store it, allowing common code. */
4069 i.seg[0] = i.seg[1];
4070 }
4071 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
4072 {
4073 if (i.seg[1] != NULL && i.seg[1] != &es)
4074 {
4075 as_bad (_("`%s' operand %d must use `%ses' segment"),
4076 i.tm.name,
4077 mem_op + 2,
4078 register_prefix);
4079 return 0;
4080 }
4081 }
4082 return 1;
4083 }
4084
4085 static int
4086 process_suffix (void)
4087 {
4088 /* If matched instruction specifies an explicit instruction mnemonic
4089 suffix, use it. */
4090 if (i.tm.opcode_modifier.size16)
4091 i.suffix = WORD_MNEM_SUFFIX;
4092 else if (i.tm.opcode_modifier.size32)
4093 i.suffix = LONG_MNEM_SUFFIX;
4094 else if (i.tm.opcode_modifier.size64)
4095 i.suffix = QWORD_MNEM_SUFFIX;
4096 else if (i.reg_operands)
4097 {
4098 /* If there's no instruction mnemonic suffix we try to invent one
4099 based on register operands. */
4100 if (!i.suffix)
4101 {
4102 /* We take i.suffix from the last register operand specified,
4103 Destination register type is more significant than source
4104 register type. crc32 in SSE4.2 prefers source register
4105 type. */
4106 if (i.tm.base_opcode == 0xf20f38f1)
4107 {
4108 if (i.types[0].bitfield.reg16)
4109 i.suffix = WORD_MNEM_SUFFIX;
4110 else if (i.types[0].bitfield.reg32)
4111 i.suffix = LONG_MNEM_SUFFIX;
4112 else if (i.types[0].bitfield.reg64)
4113 i.suffix = QWORD_MNEM_SUFFIX;
4114 }
4115 else if (i.tm.base_opcode == 0xf20f38f0)
4116 {
4117 if (i.types[0].bitfield.reg8)
4118 i.suffix = BYTE_MNEM_SUFFIX;
4119 }
4120
4121 if (!i.suffix)
4122 {
4123 int op;
4124
4125 if (i.tm.base_opcode == 0xf20f38f1
4126 || i.tm.base_opcode == 0xf20f38f0)
4127 {
4128 /* We have to know the operand size for crc32. */
4129 as_bad (_("ambiguous memory operand size for `%s`"),
4130 i.tm.name);
4131 return 0;
4132 }
4133
4134 for (op = i.operands; --op >= 0;)
4135 if (!i.tm.operand_types[op].bitfield.inoutportreg)
4136 {
4137 if (i.types[op].bitfield.reg8)
4138 {
4139 i.suffix = BYTE_MNEM_SUFFIX;
4140 break;
4141 }
4142 else if (i.types[op].bitfield.reg16)
4143 {
4144 i.suffix = WORD_MNEM_SUFFIX;
4145 break;
4146 }
4147 else if (i.types[op].bitfield.reg32)
4148 {
4149 i.suffix = LONG_MNEM_SUFFIX;
4150 break;
4151 }
4152 else if (i.types[op].bitfield.reg64)
4153 {
4154 i.suffix = QWORD_MNEM_SUFFIX;
4155 break;
4156 }
4157 }
4158 }
4159 }
4160 else if (i.suffix == BYTE_MNEM_SUFFIX)
4161 {
4162 if (!check_byte_reg ())
4163 return 0;
4164 }
4165 else if (i.suffix == LONG_MNEM_SUFFIX)
4166 {
4167 if (!check_long_reg ())
4168 return 0;
4169 }
4170 else if (i.suffix == QWORD_MNEM_SUFFIX)
4171 {
4172 if (intel_syntax
4173 && i.tm.opcode_modifier.ignoresize
4174 && i.tm.opcode_modifier.no_qsuf)
4175 i.suffix = 0;
4176 else if (!check_qword_reg ())
4177 return 0;
4178 }
4179 else if (i.suffix == WORD_MNEM_SUFFIX)
4180 {
4181 if (!check_word_reg ())
4182 return 0;
4183 }
4184 else if (i.suffix == XMMWORD_MNEM_SUFFIX
4185 || i.suffix == YMMWORD_MNEM_SUFFIX)
4186 {
4187 /* Skip if the instruction has x/y suffix. match_template
4188 should check if it is a valid suffix. */
4189 }
4190 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
4191 /* Do nothing if the instruction is going to ignore the prefix. */
4192 ;
4193 else
4194 abort ();
4195 }
4196 else if (i.tm.opcode_modifier.defaultsize
4197 && !i.suffix
4198 /* exclude fldenv/frstor/fsave/fstenv */
4199 && i.tm.opcode_modifier.no_ssuf)
4200 {
4201 i.suffix = stackop_size;
4202 }
4203 else if (intel_syntax
4204 && !i.suffix
4205 && (i.tm.operand_types[0].bitfield.jumpabsolute
4206 || i.tm.opcode_modifier.jumpbyte
4207 || i.tm.opcode_modifier.jumpintersegment
4208 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
4209 && i.tm.extension_opcode <= 3)))
4210 {
4211 switch (flag_code)
4212 {
4213 case CODE_64BIT:
4214 if (!i.tm.opcode_modifier.no_qsuf)
4215 {
4216 i.suffix = QWORD_MNEM_SUFFIX;
4217 break;
4218 }
4219 case CODE_32BIT:
4220 if (!i.tm.opcode_modifier.no_lsuf)
4221 i.suffix = LONG_MNEM_SUFFIX;
4222 break;
4223 case CODE_16BIT:
4224 if (!i.tm.opcode_modifier.no_wsuf)
4225 i.suffix = WORD_MNEM_SUFFIX;
4226 break;
4227 }
4228 }
4229
4230 if (!i.suffix)
4231 {
4232 if (!intel_syntax)
4233 {
4234 if (i.tm.opcode_modifier.w)
4235 {
4236 as_bad (_("no instruction mnemonic suffix given and "
4237 "no register operands; can't size instruction"));
4238 return 0;
4239 }
4240 }
4241 else
4242 {
4243 unsigned int suffixes;
4244
4245 suffixes = !i.tm.opcode_modifier.no_bsuf;
4246 if (!i.tm.opcode_modifier.no_wsuf)
4247 suffixes |= 1 << 1;
4248 if (!i.tm.opcode_modifier.no_lsuf)
4249 suffixes |= 1 << 2;
4250 if (!i.tm.opcode_modifier.no_ldsuf)
4251 suffixes |= 1 << 3;
4252 if (!i.tm.opcode_modifier.no_ssuf)
4253 suffixes |= 1 << 4;
4254 if (!i.tm.opcode_modifier.no_qsuf)
4255 suffixes |= 1 << 5;
4256
4257 /* There are more than suffix matches. */
4258 if (i.tm.opcode_modifier.w
4259 || ((suffixes & (suffixes - 1))
4260 && !i.tm.opcode_modifier.defaultsize
4261 && !i.tm.opcode_modifier.ignoresize))
4262 {
4263 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
4264 return 0;
4265 }
4266 }
4267 }
4268
4269 /* Change the opcode based on the operand size given by i.suffix;
4270 We don't need to change things for byte insns. */
4271
4272 if (i.suffix
4273 && i.suffix != BYTE_MNEM_SUFFIX
4274 && i.suffix != XMMWORD_MNEM_SUFFIX
4275 && i.suffix != YMMWORD_MNEM_SUFFIX)
4276 {
4277 /* It's not a byte, select word/dword operation. */
4278 if (i.tm.opcode_modifier.w)
4279 {
4280 if (i.tm.opcode_modifier.shortform)
4281 i.tm.base_opcode |= 8;
4282 else
4283 i.tm.base_opcode |= 1;
4284 }
4285
4286 /* Now select between word & dword operations via the operand
4287 size prefix, except for instructions that will ignore this
4288 prefix anyway. */
4289 if (i.tm.opcode_modifier.addrprefixop0)
4290 {
4291 /* The address size override prefix changes the size of the
4292 first operand. */
4293 if ((flag_code == CODE_32BIT
4294 && i.op->regs[0].reg_type.bitfield.reg16)
4295 || (flag_code != CODE_32BIT
4296 && i.op->regs[0].reg_type.bitfield.reg32))
4297 if (!add_prefix (ADDR_PREFIX_OPCODE))
4298 return 0;
4299 }
4300 else if (i.suffix != QWORD_MNEM_SUFFIX
4301 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
4302 && !i.tm.opcode_modifier.ignoresize
4303 && !i.tm.opcode_modifier.floatmf
4304 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
4305 || (flag_code == CODE_64BIT
4306 && i.tm.opcode_modifier.jumpbyte)))
4307 {
4308 unsigned int prefix = DATA_PREFIX_OPCODE;
4309
4310 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
4311 prefix = ADDR_PREFIX_OPCODE;
4312
4313 if (!add_prefix (prefix))
4314 return 0;
4315 }
4316
4317 /* Set mode64 for an operand. */
4318 if (i.suffix == QWORD_MNEM_SUFFIX
4319 && flag_code == CODE_64BIT
4320 && !i.tm.opcode_modifier.norex64)
4321 {
4322 /* Special case for xchg %rax,%rax. It is NOP and doesn't
4323 need rex64. cmpxchg8b is also a special case. */
4324 if (! (i.operands == 2
4325 && i.tm.base_opcode == 0x90
4326 && i.tm.extension_opcode == None
4327 && operand_type_equal (&i.types [0], &acc64)
4328 && operand_type_equal (&i.types [1], &acc64))
4329 && ! (i.operands == 1
4330 && i.tm.base_opcode == 0xfc7
4331 && i.tm.extension_opcode == 1
4332 && !operand_type_check (i.types [0], reg)
4333 && operand_type_check (i.types [0], anymem)))
4334 i.rex |= REX_W;
4335 }
4336
4337 /* Size floating point instruction. */
4338 if (i.suffix == LONG_MNEM_SUFFIX)
4339 if (i.tm.opcode_modifier.floatmf)
4340 i.tm.base_opcode ^= 4;
4341 }
4342
4343 return 1;
4344 }
4345
4346 static int
4347 check_byte_reg (void)
4348 {
4349 int op;
4350
4351 for (op = i.operands; --op >= 0;)
4352 {
4353 /* If this is an eight bit register, it's OK. If it's the 16 or
4354 32 bit version of an eight bit register, we will just use the
4355 low portion, and that's OK too. */
4356 if (i.types[op].bitfield.reg8)
4357 continue;
4358
4359 /* Don't generate this warning if not needed. */
4360 if (intel_syntax && i.tm.opcode_modifier.byteokintel)
4361 continue;
4362
4363 /* crc32 doesn't generate this warning. */
4364 if (i.tm.base_opcode == 0xf20f38f0)
4365 continue;
4366
4367 if ((i.types[op].bitfield.reg16
4368 || i.types[op].bitfield.reg32
4369 || i.types[op].bitfield.reg64)
4370 && i.op[op].regs->reg_num < 4)
4371 {
4372 /* Prohibit these changes in the 64bit mode, since the
4373 lowering is more complicated. */
4374 if (flag_code == CODE_64BIT
4375 && !i.tm.operand_types[op].bitfield.inoutportreg)
4376 {
4377 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4378 register_prefix, i.op[op].regs->reg_name,
4379 i.suffix);
4380 return 0;
4381 }
4382 #if REGISTER_WARNINGS
4383 if (!quiet_warnings
4384 && !i.tm.operand_types[op].bitfield.inoutportreg)
4385 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4386 register_prefix,
4387 (i.op[op].regs + (i.types[op].bitfield.reg16
4388 ? REGNAM_AL - REGNAM_AX
4389 : REGNAM_AL - REGNAM_EAX))->reg_name,
4390 register_prefix,
4391 i.op[op].regs->reg_name,
4392 i.suffix);
4393 #endif
4394 continue;
4395 }
4396 /* Any other register is bad. */
4397 if (i.types[op].bitfield.reg16
4398 || i.types[op].bitfield.reg32
4399 || i.types[op].bitfield.reg64
4400 || i.types[op].bitfield.regmmx
4401 || i.types[op].bitfield.regxmm
4402 || i.types[op].bitfield.regymm
4403 || i.types[op].bitfield.sreg2
4404 || i.types[op].bitfield.sreg3
4405 || i.types[op].bitfield.control
4406 || i.types[op].bitfield.debug
4407 || i.types[op].bitfield.test
4408 || i.types[op].bitfield.floatreg
4409 || i.types[op].bitfield.floatacc)
4410 {
4411 as_bad (_("`%s%s' not allowed with `%s%c'"),
4412 register_prefix,
4413 i.op[op].regs->reg_name,
4414 i.tm.name,
4415 i.suffix);
4416 return 0;
4417 }
4418 }
4419 return 1;
4420 }
4421
4422 static int
4423 check_long_reg (void)
4424 {
4425 int op;
4426
4427 for (op = i.operands; --op >= 0;)
4428 /* Reject eight bit registers, except where the template requires
4429 them. (eg. movzb) */
4430 if (i.types[op].bitfield.reg8
4431 && (i.tm.operand_types[op].bitfield.reg16
4432 || i.tm.operand_types[op].bitfield.reg32
4433 || i.tm.operand_types[op].bitfield.acc))
4434 {
4435 as_bad (_("`%s%s' not allowed with `%s%c'"),
4436 register_prefix,
4437 i.op[op].regs->reg_name,
4438 i.tm.name,
4439 i.suffix);
4440 return 0;
4441 }
4442 /* Warn if the e prefix on a general reg is missing. */
4443 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4444 && i.types[op].bitfield.reg16
4445 && (i.tm.operand_types[op].bitfield.reg32
4446 || i.tm.operand_types[op].bitfield.acc))
4447 {
4448 /* Prohibit these changes in the 64bit mode, since the
4449 lowering is more complicated. */
4450 if (flag_code == CODE_64BIT)
4451 {
4452 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4453 register_prefix, i.op[op].regs->reg_name,
4454 i.suffix);
4455 return 0;
4456 }
4457 #if REGISTER_WARNINGS
4458 else
4459 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4460 register_prefix,
4461 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
4462 register_prefix,
4463 i.op[op].regs->reg_name,
4464 i.suffix);
4465 #endif
4466 }
4467 /* Warn if the r prefix on a general reg is missing. */
4468 else if (i.types[op].bitfield.reg64
4469 && (i.tm.operand_types[op].bitfield.reg32
4470 || i.tm.operand_types[op].bitfield.acc))
4471 {
4472 if (intel_syntax
4473 && i.tm.opcode_modifier.toqword
4474 && !i.types[0].bitfield.regxmm)
4475 {
4476 /* Convert to QWORD. We want REX byte. */
4477 i.suffix = QWORD_MNEM_SUFFIX;
4478 }
4479 else
4480 {
4481 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4482 register_prefix, i.op[op].regs->reg_name,
4483 i.suffix);
4484 return 0;
4485 }
4486 }
4487 return 1;
4488 }
4489
4490 static int
4491 check_qword_reg (void)
4492 {
4493 int op;
4494
4495 for (op = i.operands; --op >= 0; )
4496 /* Reject eight bit registers, except where the template requires
4497 them. (eg. movzb) */
4498 if (i.types[op].bitfield.reg8
4499 && (i.tm.operand_types[op].bitfield.reg16
4500 || i.tm.operand_types[op].bitfield.reg32
4501 || i.tm.operand_types[op].bitfield.acc))
4502 {
4503 as_bad (_("`%s%s' not allowed with `%s%c'"),
4504 register_prefix,
4505 i.op[op].regs->reg_name,
4506 i.tm.name,
4507 i.suffix);
4508 return 0;
4509 }
4510 /* Warn if the e prefix on a general reg is missing. */
4511 else if ((i.types[op].bitfield.reg16
4512 || i.types[op].bitfield.reg32)
4513 && (i.tm.operand_types[op].bitfield.reg32
4514 || i.tm.operand_types[op].bitfield.acc))
4515 {
4516 /* Prohibit these changes in the 64bit mode, since the
4517 lowering is more complicated. */
4518 if (intel_syntax
4519 && i.tm.opcode_modifier.todword
4520 && !i.types[0].bitfield.regxmm)
4521 {
4522 /* Convert to DWORD. We don't want REX byte. */
4523 i.suffix = LONG_MNEM_SUFFIX;
4524 }
4525 else
4526 {
4527 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4528 register_prefix, i.op[op].regs->reg_name,
4529 i.suffix);
4530 return 0;
4531 }
4532 }
4533 return 1;
4534 }
4535
4536 static int
4537 check_word_reg (void)
4538 {
4539 int op;
4540 for (op = i.operands; --op >= 0;)
4541 /* Reject eight bit registers, except where the template requires
4542 them. (eg. movzb) */
4543 if (i.types[op].bitfield.reg8
4544 && (i.tm.operand_types[op].bitfield.reg16
4545 || i.tm.operand_types[op].bitfield.reg32
4546 || i.tm.operand_types[op].bitfield.acc))
4547 {
4548 as_bad (_("`%s%s' not allowed with `%s%c'"),
4549 register_prefix,
4550 i.op[op].regs->reg_name,
4551 i.tm.name,
4552 i.suffix);
4553 return 0;
4554 }
4555 /* Warn if the e prefix on a general reg is present. */
4556 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4557 && i.types[op].bitfield.reg32
4558 && (i.tm.operand_types[op].bitfield.reg16
4559 || i.tm.operand_types[op].bitfield.acc))
4560 {
4561 /* Prohibit these changes in the 64bit mode, since the
4562 lowering is more complicated. */
4563 if (flag_code == CODE_64BIT)
4564 {
4565 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4566 register_prefix, i.op[op].regs->reg_name,
4567 i.suffix);
4568 return 0;
4569 }
4570 else
4571 #if REGISTER_WARNINGS
4572 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4573 register_prefix,
4574 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
4575 register_prefix,
4576 i.op[op].regs->reg_name,
4577 i.suffix);
4578 #endif
4579 }
4580 return 1;
4581 }
4582
4583 static int
4584 update_imm (unsigned int j)
4585 {
4586 i386_operand_type overlap = i.types[j];
4587 if ((overlap.bitfield.imm8
4588 || overlap.bitfield.imm8s
4589 || overlap.bitfield.imm16
4590 || overlap.bitfield.imm32
4591 || overlap.bitfield.imm32s
4592 || overlap.bitfield.imm64)
4593 && !operand_type_equal (&overlap, &imm8)
4594 && !operand_type_equal (&overlap, &imm8s)
4595 && !operand_type_equal (&overlap, &imm16)
4596 && !operand_type_equal (&overlap, &imm32)
4597 && !operand_type_equal (&overlap, &imm32s)
4598 && !operand_type_equal (&overlap, &imm64))
4599 {
4600 if (i.suffix)
4601 {
4602 i386_operand_type temp;
4603
4604 operand_type_set (&temp, 0);
4605 if (i.suffix == BYTE_MNEM_SUFFIX)
4606 {
4607 temp.bitfield.imm8 = overlap.bitfield.imm8;
4608 temp.bitfield.imm8s = overlap.bitfield.imm8s;
4609 }
4610 else if (i.suffix == WORD_MNEM_SUFFIX)
4611 temp.bitfield.imm16 = overlap.bitfield.imm16;
4612 else if (i.suffix == QWORD_MNEM_SUFFIX)
4613 {
4614 temp.bitfield.imm64 = overlap.bitfield.imm64;
4615 temp.bitfield.imm32s = overlap.bitfield.imm32s;
4616 }
4617 else
4618 temp.bitfield.imm32 = overlap.bitfield.imm32;
4619 overlap = temp;
4620 }
4621 else if (operand_type_equal (&overlap, &imm16_32_32s)
4622 || operand_type_equal (&overlap, &imm16_32)
4623 || operand_type_equal (&overlap, &imm16_32s))
4624 {
4625 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
4626 overlap = imm16;
4627 else
4628 overlap = imm32s;
4629 }
4630 if (!operand_type_equal (&overlap, &imm8)
4631 && !operand_type_equal (&overlap, &imm8s)
4632 && !operand_type_equal (&overlap, &imm16)
4633 && !operand_type_equal (&overlap, &imm32)
4634 && !operand_type_equal (&overlap, &imm32s)
4635 && !operand_type_equal (&overlap, &imm64))
4636 {
4637 as_bad (_("no instruction mnemonic suffix given; "
4638 "can't determine immediate size"));
4639 return 0;
4640 }
4641 }
4642 i.types[j] = overlap;
4643
4644 return 1;
4645 }
4646
4647 static int
4648 finalize_imm (void)
4649 {
4650 unsigned int j, n;
4651
4652 /* Update the first 2 immediate operands. */
4653 n = i.operands > 2 ? 2 : i.operands;
4654 if (n)
4655 {
4656 for (j = 0; j < n; j++)
4657 if (update_imm (j) == 0)
4658 return 0;
4659
4660 /* The 3rd operand can't be immediate operand. */
4661 gas_assert (operand_type_check (i.types[2], imm) == 0);
4662 }
4663
4664 return 1;
4665 }
4666
4667 static int
4668 bad_implicit_operand (int xmm)
4669 {
4670 const char *reg = xmm ? "xmm0" : "ymm0";
4671 if (intel_syntax)
4672 as_bad (_("the last operand of `%s' must be `%s%s'"),
4673 i.tm.name, register_prefix, reg);
4674 else
4675 as_bad (_("the first operand of `%s' must be `%s%s'"),
4676 i.tm.name, register_prefix, reg);
4677 return 0;
4678 }
4679
4680 static int
4681 process_operands (void)
4682 {
4683 /* Default segment register this instruction will use for memory
4684 accesses. 0 means unknown. This is only for optimizing out
4685 unnecessary segment overrides. */
4686 const seg_entry *default_seg = 0;
4687
4688 if (i.tm.opcode_modifier.sse2avx
4689 && (i.tm.opcode_modifier.vexnds
4690 || i.tm.opcode_modifier.vexndd))
4691 {
4692 unsigned int dup = i.operands;
4693 unsigned int dest = dup - 1;
4694 unsigned int j;
4695
4696 /* The destination must be an xmm register. */
4697 gas_assert (i.reg_operands
4698 && MAX_OPERANDS > dup
4699 && operand_type_equal (&i.types[dest], &regxmm));
4700
4701 if (i.tm.opcode_modifier.firstxmm0)
4702 {
4703 /* The first operand is implicit and must be xmm0. */
4704 gas_assert (operand_type_equal (&i.types[0], &regxmm));
4705 if (i.op[0].regs->reg_num != 0)
4706 return bad_implicit_operand (1);
4707
4708 if (i.tm.opcode_modifier.vex3sources)
4709 {
4710 /* Keep xmm0 for instructions with VEX prefix and 3
4711 sources. */
4712 goto duplicate;
4713 }
4714 else
4715 {
4716 /* We remove the first xmm0 and keep the number of
4717 operands unchanged, which in fact duplicates the
4718 destination. */
4719 for (j = 1; j < i.operands; j++)
4720 {
4721 i.op[j - 1] = i.op[j];
4722 i.types[j - 1] = i.types[j];
4723 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
4724 }
4725 }
4726 }
4727 else if (i.tm.opcode_modifier.implicit1stxmm0)
4728 {
4729 gas_assert ((MAX_OPERANDS - 1) > dup
4730 && i.tm.opcode_modifier.vex3sources);
4731
4732 /* Add the implicit xmm0 for instructions with VEX prefix
4733 and 3 sources. */
4734 for (j = i.operands; j > 0; j--)
4735 {
4736 i.op[j] = i.op[j - 1];
4737 i.types[j] = i.types[j - 1];
4738 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
4739 }
4740 i.op[0].regs
4741 = (const reg_entry *) hash_find (reg_hash, "xmm0");
4742 i.types[0] = regxmm;
4743 i.tm.operand_types[0] = regxmm;
4744
4745 i.operands += 2;
4746 i.reg_operands += 2;
4747 i.tm.operands += 2;
4748
4749 dup++;
4750 dest++;
4751 i.op[dup] = i.op[dest];
4752 i.types[dup] = i.types[dest];
4753 i.tm.operand_types[dup] = i.tm.operand_types[dest];
4754 }
4755 else
4756 {
4757 duplicate:
4758 i.operands++;
4759 i.reg_operands++;
4760 i.tm.operands++;
4761
4762 i.op[dup] = i.op[dest];
4763 i.types[dup] = i.types[dest];
4764 i.tm.operand_types[dup] = i.tm.operand_types[dest];
4765 }
4766
4767 if (i.tm.opcode_modifier.immext)
4768 process_immext ();
4769 }
4770 else if (i.tm.opcode_modifier.firstxmm0)
4771 {
4772 unsigned int j;
4773
4774 /* The first operand is implicit and must be xmm0/ymm0. */
4775 gas_assert (i.reg_operands
4776 && (operand_type_equal (&i.types[0], &regxmm)
4777 || operand_type_equal (&i.types[0], &regymm)));
4778 if (i.op[0].regs->reg_num != 0)
4779 return bad_implicit_operand (i.types[0].bitfield.regxmm);
4780
4781 for (j = 1; j < i.operands; j++)
4782 {
4783 i.op[j - 1] = i.op[j];
4784 i.types[j - 1] = i.types[j];
4785
4786 /* We need to adjust fields in i.tm since they are used by
4787 build_modrm_byte. */
4788 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
4789 }
4790
4791 i.operands--;
4792 i.reg_operands--;
4793 i.tm.operands--;
4794 }
4795 else if (i.tm.opcode_modifier.regkludge)
4796 {
4797 /* The imul $imm, %reg instruction is converted into
4798 imul $imm, %reg, %reg, and the clr %reg instruction
4799 is converted into xor %reg, %reg. */
4800
4801 unsigned int first_reg_op;
4802
4803 if (operand_type_check (i.types[0], reg))
4804 first_reg_op = 0;
4805 else
4806 first_reg_op = 1;
4807 /* Pretend we saw the extra register operand. */
4808 gas_assert (i.reg_operands == 1
4809 && i.op[first_reg_op + 1].regs == 0);
4810 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
4811 i.types[first_reg_op + 1] = i.types[first_reg_op];
4812 i.operands++;
4813 i.reg_operands++;
4814 }
4815
4816 if (i.tm.opcode_modifier.shortform)
4817 {
4818 if (i.types[0].bitfield.sreg2
4819 || i.types[0].bitfield.sreg3)
4820 {
4821 if (i.tm.base_opcode == POP_SEG_SHORT
4822 && i.op[0].regs->reg_num == 1)
4823 {
4824 as_bad (_("you can't `pop %scs'"), register_prefix);
4825 return 0;
4826 }
4827 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
4828 if ((i.op[0].regs->reg_flags & RegRex) != 0)
4829 i.rex |= REX_B;
4830 }
4831 else
4832 {
4833 /* The register or float register operand is in operand
4834 0 or 1. */
4835 unsigned int op;
4836
4837 if (i.types[0].bitfield.floatreg
4838 || operand_type_check (i.types[0], reg))
4839 op = 0;
4840 else
4841 op = 1;
4842 /* Register goes in low 3 bits of opcode. */
4843 i.tm.base_opcode |= i.op[op].regs->reg_num;
4844 if ((i.op[op].regs->reg_flags & RegRex) != 0)
4845 i.rex |= REX_B;
4846 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
4847 {
4848 /* Warn about some common errors, but press on regardless.
4849 The first case can be generated by gcc (<= 2.8.1). */
4850 if (i.operands == 2)
4851 {
4852 /* Reversed arguments on faddp, fsubp, etc. */
4853 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
4854 register_prefix, i.op[!intel_syntax].regs->reg_name,
4855 register_prefix, i.op[intel_syntax].regs->reg_name);
4856 }
4857 else
4858 {
4859 /* Extraneous `l' suffix on fp insn. */
4860 as_warn (_("translating to `%s %s%s'"), i.tm.name,
4861 register_prefix, i.op[0].regs->reg_name);
4862 }
4863 }
4864 }
4865 }
4866 else if (i.tm.opcode_modifier.modrm)
4867 {
4868 /* The opcode is completed (modulo i.tm.extension_opcode which
4869 must be put into the modrm byte). Now, we make the modrm and
4870 index base bytes based on all the info we've collected. */
4871
4872 default_seg = build_modrm_byte ();
4873 }
4874 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
4875 {
4876 default_seg = &ds;
4877 }
4878 else if (i.tm.opcode_modifier.isstring)
4879 {
4880 /* For the string instructions that allow a segment override
4881 on one of their operands, the default segment is ds. */
4882 default_seg = &ds;
4883 }
4884
4885 if (i.tm.base_opcode == 0x8d /* lea */
4886 && i.seg[0]
4887 && !quiet_warnings)
4888 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
4889
4890 /* If a segment was explicitly specified, and the specified segment
4891 is not the default, use an opcode prefix to select it. If we
4892 never figured out what the default segment is, then default_seg
4893 will be zero at this point, and the specified segment prefix will
4894 always be used. */
4895 if ((i.seg[0]) && (i.seg[0] != default_seg))
4896 {
4897 if (!add_prefix (i.seg[0]->seg_prefix))
4898 return 0;
4899 }
4900 return 1;
4901 }
4902
4903 static const seg_entry *
4904 build_modrm_byte (void)
4905 {
4906 const seg_entry *default_seg = 0;
4907 unsigned int source, dest;
4908 int vex_3_sources;
4909
4910 /* The first operand of instructions with VEX prefix and 3 sources
4911 must be VEX_Imm4. */
4912 vex_3_sources = i.tm.opcode_modifier.vex3sources;
4913 if (vex_3_sources)
4914 {
4915 unsigned int nds, reg;
4916 expressionS *exp;
4917
4918 if (i.tm.opcode_modifier.veximmext
4919 && i.tm.opcode_modifier.immext)
4920 {
4921 dest = i.operands - 2;
4922 gas_assert (dest == 3);
4923 }
4924 else
4925 dest = i.operands - 1;
4926 nds = dest - 1;
4927
4928 /* This instruction must have 4 register operands
4929 or 3 register operands plus 1 memory operand.
4930 It must have VexNDS and VexImmExt. */
4931 gas_assert ((i.reg_operands == 4
4932 || (i.reg_operands == 3 && i.mem_operands == 1))
4933 && i.tm.opcode_modifier.vexnds
4934 && i.tm.opcode_modifier.veximmext
4935 && (operand_type_equal (&i.tm.operand_types[dest], &regxmm)
4936 || operand_type_equal (&i.tm.operand_types[dest], &regymm)));
4937
4938 /* Generate an 8bit immediate operand to encode the register
4939 operand. */
4940 exp = &im_expressions[i.imm_operands++];
4941 i.op[i.operands].imms = exp;
4942 i.types[i.operands] = imm8;
4943 i.operands++;
4944 /* If VexW1 is set, the first operand is the source and
4945 the second operand is encoded in the immediate operand. */
4946 if (i.tm.opcode_modifier.vexw1)
4947 {
4948 source = 0;
4949 reg = 1;
4950 }
4951 else
4952 {
4953 source = 1;
4954 reg = 0;
4955 }
4956 gas_assert ((operand_type_equal (&i.tm.operand_types[reg], &regxmm)
4957 || operand_type_equal (&i.tm.operand_types[reg],
4958 &regymm))
4959 && (operand_type_equal (&i.tm.operand_types[nds], &regxmm)
4960 || operand_type_equal (&i.tm.operand_types[nds],
4961 &regymm)));
4962 exp->X_op = O_constant;
4963 exp->X_add_number
4964 = ((i.op[reg].regs->reg_num
4965 + ((i.op[reg].regs->reg_flags & RegRex) ? 8 : 0)) << 4);
4966 i.vex.register_specifier = i.op[nds].regs;
4967 }
4968 else
4969 source = dest = 0;
4970
4971 /* i.reg_operands MUST be the number of real register operands;
4972 implicit registers do not count. If there are 3 register
4973 operands, it must be a instruction with VexNDS. For a
4974 instruction with VexNDD, the destination register is encoded
4975 in VEX prefix. If there are 4 register operands, it must be
4976 a instruction with VEX prefix and 3 sources. */
4977 if (i.mem_operands == 0
4978 && ((i.reg_operands == 2
4979 && !i.tm.opcode_modifier.vexndd
4980 && !i.tm.opcode_modifier.vexlwp)
4981 || (i.reg_operands == 3
4982 && i.tm.opcode_modifier.vexnds)
4983 || (i.reg_operands == 4 && vex_3_sources)))
4984 {
4985 switch (i.operands)
4986 {
4987 case 2:
4988 source = 0;
4989 break;
4990 case 3:
4991 /* When there are 3 operands, one of them may be immediate,
4992 which may be the first or the last operand. Otherwise,
4993 the first operand must be shift count register (cl) or it
4994 is an instruction with VexNDS. */
4995 gas_assert (i.imm_operands == 1
4996 || (i.imm_operands == 0
4997 && (i.tm.opcode_modifier.vexnds
4998 || i.types[0].bitfield.shiftcount)));
4999 if (operand_type_check (i.types[0], imm)
5000 || i.types[0].bitfield.shiftcount)
5001 source = 1;
5002 else
5003 source = 0;
5004 break;
5005 case 4:
5006 /* When there are 4 operands, the first two must be 8bit
5007 immediate operands. The source operand will be the 3rd
5008 one.
5009
5010 For instructions with VexNDS, if the first operand
5011 an imm8, the source operand is the 2nd one. If the last
5012 operand is imm8, the source operand is the first one. */
5013 gas_assert ((i.imm_operands == 2
5014 && i.types[0].bitfield.imm8
5015 && i.types[1].bitfield.imm8)
5016 || (i.tm.opcode_modifier.vexnds
5017 && i.imm_operands == 1
5018 && (i.types[0].bitfield.imm8
5019 || i.types[i.operands - 1].bitfield.imm8)));
5020 if (i.tm.opcode_modifier.vexnds)
5021 {
5022 if (i.types[0].bitfield.imm8)
5023 source = 1;
5024 else
5025 source = 0;
5026 }
5027 else
5028 source = 2;
5029 break;
5030 case 5:
5031 break;
5032 default:
5033 abort ();
5034 }
5035
5036 if (!vex_3_sources)
5037 {
5038 dest = source + 1;
5039
5040 if (i.tm.opcode_modifier.vexnds)
5041 {
5042 /* For instructions with VexNDS, the register-only
5043 source operand must be XMM or YMM register. It is
5044 encoded in VEX prefix. We need to clear RegMem bit
5045 before calling operand_type_equal. */
5046 i386_operand_type op = i.tm.operand_types[dest];
5047 op.bitfield.regmem = 0;
5048 if ((dest + 1) >= i.operands
5049 || (!operand_type_equal (&op, &regxmm)
5050 && !operand_type_equal (&op, &regymm)))
5051 abort ();
5052 i.vex.register_specifier = i.op[dest].regs;
5053 dest++;
5054 }
5055 }
5056
5057 i.rm.mode = 3;
5058 /* One of the register operands will be encoded in the i.tm.reg
5059 field, the other in the combined i.tm.mode and i.tm.regmem
5060 fields. If no form of this instruction supports a memory
5061 destination operand, then we assume the source operand may
5062 sometimes be a memory operand and so we need to store the
5063 destination in the i.rm.reg field. */
5064 if (!i.tm.operand_types[dest].bitfield.regmem
5065 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
5066 {
5067 i.rm.reg = i.op[dest].regs->reg_num;
5068 i.rm.regmem = i.op[source].regs->reg_num;
5069 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5070 i.rex |= REX_R;
5071 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5072 i.rex |= REX_B;
5073 }
5074 else
5075 {
5076 i.rm.reg = i.op[source].regs->reg_num;
5077 i.rm.regmem = i.op[dest].regs->reg_num;
5078 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5079 i.rex |= REX_B;
5080 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5081 i.rex |= REX_R;
5082 }
5083 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
5084 {
5085 if (!i.types[0].bitfield.control
5086 && !i.types[1].bitfield.control)
5087 abort ();
5088 i.rex &= ~(REX_R | REX_B);
5089 add_prefix (LOCK_PREFIX_OPCODE);
5090 }
5091 }
5092 else
5093 { /* If it's not 2 reg operands... */
5094 unsigned int mem;
5095
5096 if (i.mem_operands)
5097 {
5098 unsigned int fake_zero_displacement = 0;
5099 unsigned int op;
5100
5101 for (op = 0; op < i.operands; op++)
5102 if (operand_type_check (i.types[op], anymem))
5103 break;
5104 gas_assert (op < i.operands);
5105
5106 default_seg = &ds;
5107
5108 if (i.base_reg == 0)
5109 {
5110 i.rm.mode = 0;
5111 if (!i.disp_operands)
5112 fake_zero_displacement = 1;
5113 if (i.index_reg == 0)
5114 {
5115 /* Operand is just <disp> */
5116 if (flag_code == CODE_64BIT)
5117 {
5118 /* 64bit mode overwrites the 32bit absolute
5119 addressing by RIP relative addressing and
5120 absolute addressing is encoded by one of the
5121 redundant SIB forms. */
5122 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5123 i.sib.base = NO_BASE_REGISTER;
5124 i.sib.index = NO_INDEX_REGISTER;
5125 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
5126 ? disp32s : disp32);
5127 }
5128 else if ((flag_code == CODE_16BIT)
5129 ^ (i.prefix[ADDR_PREFIX] != 0))
5130 {
5131 i.rm.regmem = NO_BASE_REGISTER_16;
5132 i.types[op] = disp16;
5133 }
5134 else
5135 {
5136 i.rm.regmem = NO_BASE_REGISTER;
5137 i.types[op] = disp32;
5138 }
5139 }
5140 else /* !i.base_reg && i.index_reg */
5141 {
5142 if (i.index_reg->reg_num == RegEiz
5143 || i.index_reg->reg_num == RegRiz)
5144 i.sib.index = NO_INDEX_REGISTER;
5145 else
5146 i.sib.index = i.index_reg->reg_num;
5147 i.sib.base = NO_BASE_REGISTER;
5148 i.sib.scale = i.log2_scale_factor;
5149 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5150 i.types[op].bitfield.disp8 = 0;
5151 i.types[op].bitfield.disp16 = 0;
5152 i.types[op].bitfield.disp64 = 0;
5153 if (flag_code != CODE_64BIT)
5154 {
5155 /* Must be 32 bit */
5156 i.types[op].bitfield.disp32 = 1;
5157 i.types[op].bitfield.disp32s = 0;
5158 }
5159 else
5160 {
5161 i.types[op].bitfield.disp32 = 0;
5162 i.types[op].bitfield.disp32s = 1;
5163 }
5164 if ((i.index_reg->reg_flags & RegRex) != 0)
5165 i.rex |= REX_X;
5166 }
5167 }
5168 /* RIP addressing for 64bit mode. */
5169 else if (i.base_reg->reg_num == RegRip ||
5170 i.base_reg->reg_num == RegEip)
5171 {
5172 i.rm.regmem = NO_BASE_REGISTER;
5173 i.types[op].bitfield.disp8 = 0;
5174 i.types[op].bitfield.disp16 = 0;
5175 i.types[op].bitfield.disp32 = 0;
5176 i.types[op].bitfield.disp32s = 1;
5177 i.types[op].bitfield.disp64 = 0;
5178 i.flags[op] |= Operand_PCrel;
5179 if (! i.disp_operands)
5180 fake_zero_displacement = 1;
5181 }
5182 else if (i.base_reg->reg_type.bitfield.reg16)
5183 {
5184 switch (i.base_reg->reg_num)
5185 {
5186 case 3: /* (%bx) */
5187 if (i.index_reg == 0)
5188 i.rm.regmem = 7;
5189 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
5190 i.rm.regmem = i.index_reg->reg_num - 6;
5191 break;
5192 case 5: /* (%bp) */
5193 default_seg = &ss;
5194 if (i.index_reg == 0)
5195 {
5196 i.rm.regmem = 6;
5197 if (operand_type_check (i.types[op], disp) == 0)
5198 {
5199 /* fake (%bp) into 0(%bp) */
5200 i.types[op].bitfield.disp8 = 1;
5201 fake_zero_displacement = 1;
5202 }
5203 }
5204 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
5205 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
5206 break;
5207 default: /* (%si) -> 4 or (%di) -> 5 */
5208 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
5209 }
5210 i.rm.mode = mode_from_disp_size (i.types[op]);
5211 }
5212 else /* i.base_reg and 32/64 bit mode */
5213 {
5214 if (flag_code == CODE_64BIT
5215 && operand_type_check (i.types[op], disp))
5216 {
5217 i386_operand_type temp;
5218 operand_type_set (&temp, 0);
5219 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
5220 i.types[op] = temp;
5221 if (i.prefix[ADDR_PREFIX] == 0)
5222 i.types[op].bitfield.disp32s = 1;
5223 else
5224 i.types[op].bitfield.disp32 = 1;
5225 }
5226
5227 i.rm.regmem = i.base_reg->reg_num;
5228 if ((i.base_reg->reg_flags & RegRex) != 0)
5229 i.rex |= REX_B;
5230 i.sib.base = i.base_reg->reg_num;
5231 /* x86-64 ignores REX prefix bit here to avoid decoder
5232 complications. */
5233 if ((i.base_reg->reg_num & 7) == EBP_REG_NUM)
5234 {
5235 default_seg = &ss;
5236 if (i.disp_operands == 0)
5237 {
5238 fake_zero_displacement = 1;
5239 i.types[op].bitfield.disp8 = 1;
5240 }
5241 }
5242 else if (i.base_reg->reg_num == ESP_REG_NUM)
5243 {
5244 default_seg = &ss;
5245 }
5246 i.sib.scale = i.log2_scale_factor;
5247 if (i.index_reg == 0)
5248 {
5249 /* <disp>(%esp) becomes two byte modrm with no index
5250 register. We've already stored the code for esp
5251 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
5252 Any base register besides %esp will not use the
5253 extra modrm byte. */
5254 i.sib.index = NO_INDEX_REGISTER;
5255 }
5256 else
5257 {
5258 if (i.index_reg->reg_num == RegEiz
5259 || i.index_reg->reg_num == RegRiz)
5260 i.sib.index = NO_INDEX_REGISTER;
5261 else
5262 i.sib.index = i.index_reg->reg_num;
5263 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5264 if ((i.index_reg->reg_flags & RegRex) != 0)
5265 i.rex |= REX_X;
5266 }
5267
5268 if (i.disp_operands
5269 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
5270 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
5271 i.rm.mode = 0;
5272 else
5273 i.rm.mode = mode_from_disp_size (i.types[op]);
5274 }
5275
5276 if (fake_zero_displacement)
5277 {
5278 /* Fakes a zero displacement assuming that i.types[op]
5279 holds the correct displacement size. */
5280 expressionS *exp;
5281
5282 gas_assert (i.op[op].disps == 0);
5283 exp = &disp_expressions[i.disp_operands++];
5284 i.op[op].disps = exp;
5285 exp->X_op = O_constant;
5286 exp->X_add_number = 0;
5287 exp->X_add_symbol = (symbolS *) 0;
5288 exp->X_op_symbol = (symbolS *) 0;
5289 }
5290
5291 mem = op;
5292 }
5293 else
5294 mem = ~0;
5295
5296 if (i.tm.opcode_modifier.vexlwp)
5297 {
5298 i.vex.register_specifier = i.op[2].regs;
5299 if (!i.mem_operands)
5300 {
5301 i.rm.mode = 3;
5302 i.rm.regmem = i.op[1].regs->reg_num;
5303 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5304 i.rex |= REX_B;
5305 }
5306 }
5307 /* Fill in i.rm.reg or i.rm.regmem field with register operand
5308 (if any) based on i.tm.extension_opcode. Again, we must be
5309 careful to make sure that segment/control/debug/test/MMX
5310 registers are coded into the i.rm.reg field. */
5311 else if (i.reg_operands)
5312 {
5313 unsigned int op;
5314 unsigned int vex_reg = ~0;
5315
5316 for (op = 0; op < i.operands; op++)
5317 if (i.types[op].bitfield.reg8
5318 || i.types[op].bitfield.reg16
5319 || i.types[op].bitfield.reg32
5320 || i.types[op].bitfield.reg64
5321 || i.types[op].bitfield.regmmx
5322 || i.types[op].bitfield.regxmm
5323 || i.types[op].bitfield.regymm
5324 || i.types[op].bitfield.sreg2
5325 || i.types[op].bitfield.sreg3
5326 || i.types[op].bitfield.control
5327 || i.types[op].bitfield.debug
5328 || i.types[op].bitfield.test)
5329 break;
5330
5331 if (vex_3_sources)
5332 op = dest;
5333 else if (i.tm.opcode_modifier.vexnds)
5334 {
5335 /* For instructions with VexNDS, the register-only
5336 source operand is encoded in VEX prefix. */
5337 gas_assert (mem != (unsigned int) ~0);
5338
5339 if (op > mem)
5340 {
5341 vex_reg = op++;
5342 gas_assert (op < i.operands);
5343 }
5344 else
5345 {
5346 vex_reg = op + 1;
5347 gas_assert (vex_reg < i.operands);
5348 }
5349 }
5350 else if (i.tm.opcode_modifier.vexndd)
5351 {
5352 /* For instructions with VexNDD, there should be
5353 no memory operand and the register destination
5354 is encoded in VEX prefix. */
5355 gas_assert (i.mem_operands == 0
5356 && (op + 2) == i.operands);
5357 vex_reg = op + 1;
5358 }
5359 else
5360 gas_assert (op < i.operands);
5361
5362 if (vex_reg != (unsigned int) ~0)
5363 {
5364 gas_assert (i.reg_operands == 2);
5365
5366 if (!operand_type_equal (&i.tm.operand_types[vex_reg],
5367 &regxmm)
5368 && !operand_type_equal (&i.tm.operand_types[vex_reg],
5369 &regymm))
5370 abort ();
5371
5372 i.vex.register_specifier = i.op[vex_reg].regs;
5373 }
5374
5375 /* Don't set OP operand twice. */
5376 if (vex_reg != op)
5377 {
5378 /* If there is an extension opcode to put here, the
5379 register number must be put into the regmem field. */
5380 if (i.tm.extension_opcode != None)
5381 {
5382 i.rm.regmem = i.op[op].regs->reg_num;
5383 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5384 i.rex |= REX_B;
5385 }
5386 else
5387 {
5388 i.rm.reg = i.op[op].regs->reg_num;
5389 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5390 i.rex |= REX_R;
5391 }
5392 }
5393
5394 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
5395 must set it to 3 to indicate this is a register operand
5396 in the regmem field. */
5397 if (!i.mem_operands)
5398 i.rm.mode = 3;
5399 }
5400
5401 /* Fill in i.rm.reg field with extension opcode (if any). */
5402 if (i.tm.extension_opcode != None)
5403 i.rm.reg = i.tm.extension_opcode;
5404 }
5405 return default_seg;
5406 }
5407
5408 static void
5409 output_branch (void)
5410 {
5411 char *p;
5412 int code16;
5413 int prefix;
5414 relax_substateT subtype;
5415 symbolS *sym;
5416 offsetT off;
5417
5418 code16 = 0;
5419 if (flag_code == CODE_16BIT)
5420 code16 = CODE16;
5421
5422 prefix = 0;
5423 if (i.prefix[DATA_PREFIX] != 0)
5424 {
5425 prefix = 1;
5426 i.prefixes -= 1;
5427 code16 ^= CODE16;
5428 }
5429 /* Pentium4 branch hints. */
5430 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
5431 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
5432 {
5433 prefix++;
5434 i.prefixes--;
5435 }
5436 if (i.prefix[REX_PREFIX] != 0)
5437 {
5438 prefix++;
5439 i.prefixes--;
5440 }
5441
5442 if (i.prefixes != 0 && !intel_syntax)
5443 as_warn (_("skipping prefixes on this instruction"));
5444
5445 /* It's always a symbol; End frag & setup for relax.
5446 Make sure there is enough room in this frag for the largest
5447 instruction we may generate in md_convert_frag. This is 2
5448 bytes for the opcode and room for the prefix and largest
5449 displacement. */
5450 frag_grow (prefix + 2 + 4);
5451 /* Prefix and 1 opcode byte go in fr_fix. */
5452 p = frag_more (prefix + 1);
5453 if (i.prefix[DATA_PREFIX] != 0)
5454 *p++ = DATA_PREFIX_OPCODE;
5455 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
5456 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
5457 *p++ = i.prefix[SEG_PREFIX];
5458 if (i.prefix[REX_PREFIX] != 0)
5459 *p++ = i.prefix[REX_PREFIX];
5460 *p = i.tm.base_opcode;
5461
5462 if ((unsigned char) *p == JUMP_PC_RELATIVE)
5463 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, SMALL);
5464 else if (cpu_arch_flags.bitfield.cpui386)
5465 subtype = ENCODE_RELAX_STATE (COND_JUMP, SMALL);
5466 else
5467 subtype = ENCODE_RELAX_STATE (COND_JUMP86, SMALL);
5468 subtype |= code16;
5469
5470 sym = i.op[0].disps->X_add_symbol;
5471 off = i.op[0].disps->X_add_number;
5472
5473 if (i.op[0].disps->X_op != O_constant
5474 && i.op[0].disps->X_op != O_symbol)
5475 {
5476 /* Handle complex expressions. */
5477 sym = make_expr_symbol (i.op[0].disps);
5478 off = 0;
5479 }
5480
5481 /* 1 possible extra opcode + 4 byte displacement go in var part.
5482 Pass reloc in fr_var. */
5483 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
5484 }
5485
5486 static void
5487 output_jump (void)
5488 {
5489 char *p;
5490 int size;
5491 fixS *fixP;
5492
5493 if (i.tm.opcode_modifier.jumpbyte)
5494 {
5495 /* This is a loop or jecxz type instruction. */
5496 size = 1;
5497 if (i.prefix[ADDR_PREFIX] != 0)
5498 {
5499 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
5500 i.prefixes -= 1;
5501 }
5502 /* Pentium4 branch hints. */
5503 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
5504 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
5505 {
5506 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
5507 i.prefixes--;
5508 }
5509 }
5510 else
5511 {
5512 int code16;
5513
5514 code16 = 0;
5515 if (flag_code == CODE_16BIT)
5516 code16 = CODE16;
5517
5518 if (i.prefix[DATA_PREFIX] != 0)
5519 {
5520 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
5521 i.prefixes -= 1;
5522 code16 ^= CODE16;
5523 }
5524
5525 size = 4;
5526 if (code16)
5527 size = 2;
5528 }
5529
5530 if (i.prefix[REX_PREFIX] != 0)
5531 {
5532 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
5533 i.prefixes -= 1;
5534 }
5535
5536 if (i.prefixes != 0 && !intel_syntax)
5537 as_warn (_("skipping prefixes on this instruction"));
5538
5539 p = frag_more (1 + size);
5540 *p++ = i.tm.base_opcode;
5541
5542 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
5543 i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
5544
5545 /* All jumps handled here are signed, but don't use a signed limit
5546 check for 32 and 16 bit jumps as we want to allow wrap around at
5547 4G and 64k respectively. */
5548 if (size == 1)
5549 fixP->fx_signed = 1;
5550 }
5551
5552 static void
5553 output_interseg_jump (void)
5554 {
5555 char *p;
5556 int size;
5557 int prefix;
5558 int code16;
5559
5560 code16 = 0;
5561 if (flag_code == CODE_16BIT)
5562 code16 = CODE16;
5563
5564 prefix = 0;
5565 if (i.prefix[DATA_PREFIX] != 0)
5566 {
5567 prefix = 1;
5568 i.prefixes -= 1;
5569 code16 ^= CODE16;
5570 }
5571 if (i.prefix[REX_PREFIX] != 0)
5572 {
5573 prefix++;
5574 i.prefixes -= 1;
5575 }
5576
5577 size = 4;
5578 if (code16)
5579 size = 2;
5580
5581 if (i.prefixes != 0 && !intel_syntax)
5582 as_warn (_("skipping prefixes on this instruction"));
5583
5584 /* 1 opcode; 2 segment; offset */
5585 p = frag_more (prefix + 1 + 2 + size);
5586
5587 if (i.prefix[DATA_PREFIX] != 0)
5588 *p++ = DATA_PREFIX_OPCODE;
5589
5590 if (i.prefix[REX_PREFIX] != 0)
5591 *p++ = i.prefix[REX_PREFIX];
5592
5593 *p++ = i.tm.base_opcode;
5594 if (i.op[1].imms->X_op == O_constant)
5595 {
5596 offsetT n = i.op[1].imms->X_add_number;
5597
5598 if (size == 2
5599 && !fits_in_unsigned_word (n)
5600 && !fits_in_signed_word (n))
5601 {
5602 as_bad (_("16-bit jump out of range"));
5603 return;
5604 }
5605 md_number_to_chars (p, n, size);
5606 }
5607 else
5608 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
5609 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
5610 if (i.op[0].imms->X_op != O_constant)
5611 as_bad (_("can't handle non absolute segment in `%s'"),
5612 i.tm.name);
5613 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
5614 }
5615
5616 static void
5617 output_insn (void)
5618 {
5619 fragS *insn_start_frag;
5620 offsetT insn_start_off;
5621
5622 /* Tie dwarf2 debug info to the address at the start of the insn.
5623 We can't do this after the insn has been output as the current
5624 frag may have been closed off. eg. by frag_var. */
5625 dwarf2_emit_insn (0);
5626
5627 insn_start_frag = frag_now;
5628 insn_start_off = frag_now_fix ();
5629
5630 /* Output jumps. */
5631 if (i.tm.opcode_modifier.jump)
5632 output_branch ();
5633 else if (i.tm.opcode_modifier.jumpbyte
5634 || i.tm.opcode_modifier.jumpdword)
5635 output_jump ();
5636 else if (i.tm.opcode_modifier.jumpintersegment)
5637 output_interseg_jump ();
5638 else
5639 {
5640 /* Output normal instructions here. */
5641 char *p;
5642 unsigned char *q;
5643 unsigned int j;
5644 unsigned int prefix;
5645
5646 /* Since the VEX prefix contains the implicit prefix, we don't
5647 need the explicit prefix. */
5648 if (!i.tm.opcode_modifier.vex)
5649 {
5650 switch (i.tm.opcode_length)
5651 {
5652 case 3:
5653 if (i.tm.base_opcode & 0xff000000)
5654 {
5655 prefix = (i.tm.base_opcode >> 24) & 0xff;
5656 goto check_prefix;
5657 }
5658 break;
5659 case 2:
5660 if ((i.tm.base_opcode & 0xff0000) != 0)
5661 {
5662 prefix = (i.tm.base_opcode >> 16) & 0xff;
5663 if (i.tm.cpu_flags.bitfield.cpupadlock)
5664 {
5665 check_prefix:
5666 if (prefix != REPE_PREFIX_OPCODE
5667 || (i.prefix[REP_PREFIX]
5668 != REPE_PREFIX_OPCODE))
5669 add_prefix (prefix);
5670 }
5671 else
5672 add_prefix (prefix);
5673 }
5674 break;
5675 case 1:
5676 break;
5677 default:
5678 abort ();
5679 }
5680
5681 /* The prefix bytes. */
5682 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
5683 if (*q)
5684 FRAG_APPEND_1_CHAR (*q);
5685 }
5686
5687 if (i.tm.opcode_modifier.vex)
5688 {
5689 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
5690 if (*q)
5691 switch (j)
5692 {
5693 case REX_PREFIX:
5694 /* REX byte is encoded in VEX prefix. */
5695 break;
5696 case SEG_PREFIX:
5697 case ADDR_PREFIX:
5698 FRAG_APPEND_1_CHAR (*q);
5699 break;
5700 default:
5701 /* There should be no other prefixes for instructions
5702 with VEX prefix. */
5703 abort ();
5704 }
5705
5706 /* Now the VEX prefix. */
5707 p = frag_more (i.vex.length);
5708 for (j = 0; j < i.vex.length; j++)
5709 p[j] = i.vex.bytes[j];
5710 }
5711
5712 /* Now the opcode; be careful about word order here! */
5713 if (i.tm.opcode_length == 1)
5714 {
5715 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
5716 }
5717 else
5718 {
5719 switch (i.tm.opcode_length)
5720 {
5721 case 3:
5722 p = frag_more (3);
5723 *p++ = (i.tm.base_opcode >> 16) & 0xff;
5724 break;
5725 case 2:
5726 p = frag_more (2);
5727 break;
5728 default:
5729 abort ();
5730 break;
5731 }
5732
5733 /* Put out high byte first: can't use md_number_to_chars! */
5734 *p++ = (i.tm.base_opcode >> 8) & 0xff;
5735 *p = i.tm.base_opcode & 0xff;
5736 }
5737
5738 /* Now the modrm byte and sib byte (if present). */
5739 if (i.tm.opcode_modifier.modrm)
5740 {
5741 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
5742 | i.rm.reg << 3
5743 | i.rm.mode << 6));
5744 /* If i.rm.regmem == ESP (4)
5745 && i.rm.mode != (Register mode)
5746 && not 16 bit
5747 ==> need second modrm byte. */
5748 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
5749 && i.rm.mode != 3
5750 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
5751 FRAG_APPEND_1_CHAR ((i.sib.base << 0
5752 | i.sib.index << 3
5753 | i.sib.scale << 6));
5754 }
5755
5756 if (i.disp_operands)
5757 output_disp (insn_start_frag, insn_start_off);
5758
5759 if (i.imm_operands)
5760 output_imm (insn_start_frag, insn_start_off);
5761 }
5762
5763 #ifdef DEBUG386
5764 if (flag_debug)
5765 {
5766 pi ("" /*line*/, &i);
5767 }
5768 #endif /* DEBUG386 */
5769 }
5770
5771 /* Return the size of the displacement operand N. */
5772
5773 static int
5774 disp_size (unsigned int n)
5775 {
5776 int size = 4;
5777 if (i.types[n].bitfield.disp64)
5778 size = 8;
5779 else if (i.types[n].bitfield.disp8)
5780 size = 1;
5781 else if (i.types[n].bitfield.disp16)
5782 size = 2;
5783 return size;
5784 }
5785
5786 /* Return the size of the immediate operand N. */
5787
5788 static int
5789 imm_size (unsigned int n)
5790 {
5791 int size = 4;
5792 if (i.types[n].bitfield.imm64)
5793 size = 8;
5794 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
5795 size = 1;
5796 else if (i.types[n].bitfield.imm16)
5797 size = 2;
5798 return size;
5799 }
5800
5801 static void
5802 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
5803 {
5804 char *p;
5805 unsigned int n;
5806
5807 for (n = 0; n < i.operands; n++)
5808 {
5809 if (operand_type_check (i.types[n], disp))
5810 {
5811 if (i.op[n].disps->X_op == O_constant)
5812 {
5813 int size = disp_size (n);
5814 offsetT val;
5815
5816 val = offset_in_range (i.op[n].disps->X_add_number,
5817 size);
5818 p = frag_more (size);
5819 md_number_to_chars (p, val, size);
5820 }
5821 else
5822 {
5823 enum bfd_reloc_code_real reloc_type;
5824 int size = disp_size (n);
5825 int sign = i.types[n].bitfield.disp32s;
5826 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
5827
5828 /* We can't have 8 bit displacement here. */
5829 gas_assert (!i.types[n].bitfield.disp8);
5830
5831 /* The PC relative address is computed relative
5832 to the instruction boundary, so in case immediate
5833 fields follows, we need to adjust the value. */
5834 if (pcrel && i.imm_operands)
5835 {
5836 unsigned int n1;
5837 int sz = 0;
5838
5839 for (n1 = 0; n1 < i.operands; n1++)
5840 if (operand_type_check (i.types[n1], imm))
5841 {
5842 /* Only one immediate is allowed for PC
5843 relative address. */
5844 gas_assert (sz == 0);
5845 sz = imm_size (n1);
5846 i.op[n].disps->X_add_number -= sz;
5847 }
5848 /* We should find the immediate. */
5849 gas_assert (sz != 0);
5850 }
5851
5852 p = frag_more (size);
5853 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
5854 if (GOT_symbol
5855 && GOT_symbol == i.op[n].disps->X_add_symbol
5856 && (((reloc_type == BFD_RELOC_32
5857 || reloc_type == BFD_RELOC_X86_64_32S
5858 || (reloc_type == BFD_RELOC_64
5859 && object_64bit))
5860 && (i.op[n].disps->X_op == O_symbol
5861 || (i.op[n].disps->X_op == O_add
5862 && ((symbol_get_value_expression
5863 (i.op[n].disps->X_op_symbol)->X_op)
5864 == O_subtract))))
5865 || reloc_type == BFD_RELOC_32_PCREL))
5866 {
5867 offsetT add;
5868
5869 if (insn_start_frag == frag_now)
5870 add = (p - frag_now->fr_literal) - insn_start_off;
5871 else
5872 {
5873 fragS *fr;
5874
5875 add = insn_start_frag->fr_fix - insn_start_off;
5876 for (fr = insn_start_frag->fr_next;
5877 fr && fr != frag_now; fr = fr->fr_next)
5878 add += fr->fr_fix;
5879 add += p - frag_now->fr_literal;
5880 }
5881
5882 if (!object_64bit)
5883 {
5884 reloc_type = BFD_RELOC_386_GOTPC;
5885 i.op[n].imms->X_add_number += add;
5886 }
5887 else if (reloc_type == BFD_RELOC_64)
5888 reloc_type = BFD_RELOC_X86_64_GOTPC64;
5889 else
5890 /* Don't do the adjustment for x86-64, as there
5891 the pcrel addressing is relative to the _next_
5892 insn, and that is taken care of in other code. */
5893 reloc_type = BFD_RELOC_X86_64_GOTPC32;
5894 }
5895 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
5896 i.op[n].disps, pcrel, reloc_type);
5897 }
5898 }
5899 }
5900 }
5901
5902 static void
5903 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
5904 {
5905 char *p;
5906 unsigned int n;
5907
5908 for (n = 0; n < i.operands; n++)
5909 {
5910 if (operand_type_check (i.types[n], imm))
5911 {
5912 if (i.op[n].imms->X_op == O_constant)
5913 {
5914 int size = imm_size (n);
5915 offsetT val;
5916
5917 val = offset_in_range (i.op[n].imms->X_add_number,
5918 size);
5919 p = frag_more (size);
5920 md_number_to_chars (p, val, size);
5921 }
5922 else
5923 {
5924 /* Not absolute_section.
5925 Need a 32-bit fixup (don't support 8bit
5926 non-absolute imms). Try to support other
5927 sizes ... */
5928 enum bfd_reloc_code_real reloc_type;
5929 int size = imm_size (n);
5930 int sign;
5931
5932 if (i.types[n].bitfield.imm32s
5933 && (i.suffix == QWORD_MNEM_SUFFIX
5934 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
5935 sign = 1;
5936 else
5937 sign = 0;
5938
5939 p = frag_more (size);
5940 reloc_type = reloc (size, 0, sign, i.reloc[n]);
5941
5942 /* This is tough to explain. We end up with this one if we
5943 * have operands that look like
5944 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
5945 * obtain the absolute address of the GOT, and it is strongly
5946 * preferable from a performance point of view to avoid using
5947 * a runtime relocation for this. The actual sequence of
5948 * instructions often look something like:
5949 *
5950 * call .L66
5951 * .L66:
5952 * popl %ebx
5953 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
5954 *
5955 * The call and pop essentially return the absolute address
5956 * of the label .L66 and store it in %ebx. The linker itself
5957 * will ultimately change the first operand of the addl so
5958 * that %ebx points to the GOT, but to keep things simple, the
5959 * .o file must have this operand set so that it generates not
5960 * the absolute address of .L66, but the absolute address of
5961 * itself. This allows the linker itself simply treat a GOTPC
5962 * relocation as asking for a pcrel offset to the GOT to be
5963 * added in, and the addend of the relocation is stored in the
5964 * operand field for the instruction itself.
5965 *
5966 * Our job here is to fix the operand so that it would add
5967 * the correct offset so that %ebx would point to itself. The
5968 * thing that is tricky is that .-.L66 will point to the
5969 * beginning of the instruction, so we need to further modify
5970 * the operand so that it will point to itself. There are
5971 * other cases where you have something like:
5972 *
5973 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
5974 *
5975 * and here no correction would be required. Internally in
5976 * the assembler we treat operands of this form as not being
5977 * pcrel since the '.' is explicitly mentioned, and I wonder
5978 * whether it would simplify matters to do it this way. Who
5979 * knows. In earlier versions of the PIC patches, the
5980 * pcrel_adjust field was used to store the correction, but
5981 * since the expression is not pcrel, I felt it would be
5982 * confusing to do it this way. */
5983
5984 if ((reloc_type == BFD_RELOC_32
5985 || reloc_type == BFD_RELOC_X86_64_32S
5986 || reloc_type == BFD_RELOC_64)
5987 && GOT_symbol
5988 && GOT_symbol == i.op[n].imms->X_add_symbol
5989 && (i.op[n].imms->X_op == O_symbol
5990 || (i.op[n].imms->X_op == O_add
5991 && ((symbol_get_value_expression
5992 (i.op[n].imms->X_op_symbol)->X_op)
5993 == O_subtract))))
5994 {
5995 offsetT add;
5996
5997 if (insn_start_frag == frag_now)
5998 add = (p - frag_now->fr_literal) - insn_start_off;
5999 else
6000 {
6001 fragS *fr;
6002
6003 add = insn_start_frag->fr_fix - insn_start_off;
6004 for (fr = insn_start_frag->fr_next;
6005 fr && fr != frag_now; fr = fr->fr_next)
6006 add += fr->fr_fix;
6007 add += p - frag_now->fr_literal;
6008 }
6009
6010 if (!object_64bit)
6011 reloc_type = BFD_RELOC_386_GOTPC;
6012 else if (size == 4)
6013 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6014 else if (size == 8)
6015 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6016 i.op[n].imms->X_add_number += add;
6017 }
6018 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6019 i.op[n].imms, 0, reloc_type);
6020 }
6021 }
6022 }
6023 }
6024 \f
6025 /* x86_cons_fix_new is called via the expression parsing code when a
6026 reloc is needed. We use this hook to get the correct .got reloc. */
6027 static enum bfd_reloc_code_real got_reloc = NO_RELOC;
6028 static int cons_sign = -1;
6029
6030 void
6031 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
6032 expressionS *exp)
6033 {
6034 enum bfd_reloc_code_real r = reloc (len, 0, cons_sign, got_reloc);
6035
6036 got_reloc = NO_RELOC;
6037
6038 #ifdef TE_PE
6039 if (exp->X_op == O_secrel)
6040 {
6041 exp->X_op = O_symbol;
6042 r = BFD_RELOC_32_SECREL;
6043 }
6044 #endif
6045
6046 fix_new_exp (frag, off, len, exp, 0, r);
6047 }
6048
6049 #if (!defined (OBJ_ELF) && !defined (OBJ_MAYBE_ELF)) || defined (LEX_AT)
6050 # define lex_got(reloc, adjust, types) NULL
6051 #else
6052 /* Parse operands of the form
6053 <symbol>@GOTOFF+<nnn>
6054 and similar .plt or .got references.
6055
6056 If we find one, set up the correct relocation in RELOC and copy the
6057 input string, minus the `@GOTOFF' into a malloc'd buffer for
6058 parsing by the calling routine. Return this buffer, and if ADJUST
6059 is non-null set it to the length of the string we removed from the
6060 input line. Otherwise return NULL. */
6061 static char *
6062 lex_got (enum bfd_reloc_code_real *reloc,
6063 int *adjust,
6064 i386_operand_type *types)
6065 {
6066 /* Some of the relocations depend on the size of what field is to
6067 be relocated. But in our callers i386_immediate and i386_displacement
6068 we don't yet know the operand size (this will be set by insn
6069 matching). Hence we record the word32 relocation here,
6070 and adjust the reloc according to the real size in reloc(). */
6071 static const struct {
6072 const char *str;
6073 const enum bfd_reloc_code_real rel[2];
6074 const i386_operand_type types64;
6075 } gotrel[] = {
6076 { "PLTOFF", { _dummy_first_bfd_reloc_code_real,
6077 BFD_RELOC_X86_64_PLTOFF64 },
6078 OPERAND_TYPE_IMM64 },
6079 { "PLT", { BFD_RELOC_386_PLT32,
6080 BFD_RELOC_X86_64_PLT32 },
6081 OPERAND_TYPE_IMM32_32S_DISP32 },
6082 { "GOTPLT", { _dummy_first_bfd_reloc_code_real,
6083 BFD_RELOC_X86_64_GOTPLT64 },
6084 OPERAND_TYPE_IMM64_DISP64 },
6085 { "GOTOFF", { BFD_RELOC_386_GOTOFF,
6086 BFD_RELOC_X86_64_GOTOFF64 },
6087 OPERAND_TYPE_IMM64_DISP64 },
6088 { "GOTPCREL", { _dummy_first_bfd_reloc_code_real,
6089 BFD_RELOC_X86_64_GOTPCREL },
6090 OPERAND_TYPE_IMM32_32S_DISP32 },
6091 { "TLSGD", { BFD_RELOC_386_TLS_GD,
6092 BFD_RELOC_X86_64_TLSGD },
6093 OPERAND_TYPE_IMM32_32S_DISP32 },
6094 { "TLSLDM", { BFD_RELOC_386_TLS_LDM,
6095 _dummy_first_bfd_reloc_code_real },
6096 OPERAND_TYPE_NONE },
6097 { "TLSLD", { _dummy_first_bfd_reloc_code_real,
6098 BFD_RELOC_X86_64_TLSLD },
6099 OPERAND_TYPE_IMM32_32S_DISP32 },
6100 { "GOTTPOFF", { BFD_RELOC_386_TLS_IE_32,
6101 BFD_RELOC_X86_64_GOTTPOFF },
6102 OPERAND_TYPE_IMM32_32S_DISP32 },
6103 { "TPOFF", { BFD_RELOC_386_TLS_LE_32,
6104 BFD_RELOC_X86_64_TPOFF32 },
6105 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6106 { "NTPOFF", { BFD_RELOC_386_TLS_LE,
6107 _dummy_first_bfd_reloc_code_real },
6108 OPERAND_TYPE_NONE },
6109 { "DTPOFF", { BFD_RELOC_386_TLS_LDO_32,
6110 BFD_RELOC_X86_64_DTPOFF32 },
6111
6112 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6113 { "GOTNTPOFF",{ BFD_RELOC_386_TLS_GOTIE,
6114 _dummy_first_bfd_reloc_code_real },
6115 OPERAND_TYPE_NONE },
6116 { "INDNTPOFF",{ BFD_RELOC_386_TLS_IE,
6117 _dummy_first_bfd_reloc_code_real },
6118 OPERAND_TYPE_NONE },
6119 { "GOT", { BFD_RELOC_386_GOT32,
6120 BFD_RELOC_X86_64_GOT32 },
6121 OPERAND_TYPE_IMM32_32S_64_DISP32 },
6122 { "TLSDESC", { BFD_RELOC_386_TLS_GOTDESC,
6123 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
6124 OPERAND_TYPE_IMM32_32S_DISP32 },
6125 { "TLSCALL", { BFD_RELOC_386_TLS_DESC_CALL,
6126 BFD_RELOC_X86_64_TLSDESC_CALL },
6127 OPERAND_TYPE_IMM32_32S_DISP32 },
6128 };
6129 char *cp;
6130 unsigned int j;
6131
6132 if (!IS_ELF)
6133 return NULL;
6134
6135 for (cp = input_line_pointer; *cp != '@'; cp++)
6136 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6137 return NULL;
6138
6139 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6140 {
6141 int len;
6142
6143 len = strlen (gotrel[j].str);
6144 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6145 {
6146 if (gotrel[j].rel[object_64bit] != 0)
6147 {
6148 int first, second;
6149 char *tmpbuf, *past_reloc;
6150
6151 *reloc = gotrel[j].rel[object_64bit];
6152 if (adjust)
6153 *adjust = len;
6154
6155 if (types)
6156 {
6157 if (flag_code != CODE_64BIT)
6158 {
6159 types->bitfield.imm32 = 1;
6160 types->bitfield.disp32 = 1;
6161 }
6162 else
6163 *types = gotrel[j].types64;
6164 }
6165
6166 if (GOT_symbol == NULL)
6167 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
6168
6169 /* The length of the first part of our input line. */
6170 first = cp - input_line_pointer;
6171
6172 /* The second part goes from after the reloc token until
6173 (and including) an end_of_line char or comma. */
6174 past_reloc = cp + 1 + len;
6175 cp = past_reloc;
6176 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6177 ++cp;
6178 second = cp + 1 - past_reloc;
6179
6180 /* Allocate and copy string. The trailing NUL shouldn't
6181 be necessary, but be safe. */
6182 tmpbuf = (char *) xmalloc (first + second + 2);
6183 memcpy (tmpbuf, input_line_pointer, first);
6184 if (second != 0 && *past_reloc != ' ')
6185 /* Replace the relocation token with ' ', so that
6186 errors like foo@GOTOFF1 will be detected. */
6187 tmpbuf[first++] = ' ';
6188 memcpy (tmpbuf + first, past_reloc, second);
6189 tmpbuf[first + second] = '\0';
6190 return tmpbuf;
6191 }
6192
6193 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6194 gotrel[j].str, 1 << (5 + object_64bit));
6195 return NULL;
6196 }
6197 }
6198
6199 /* Might be a symbol version string. Don't as_bad here. */
6200 return NULL;
6201 }
6202
6203 void
6204 x86_cons (expressionS *exp, int size)
6205 {
6206 intel_syntax = -intel_syntax;
6207
6208 if (size == 4 || (object_64bit && size == 8))
6209 {
6210 /* Handle @GOTOFF and the like in an expression. */
6211 char *save;
6212 char *gotfree_input_line;
6213 int adjust;
6214
6215 save = input_line_pointer;
6216 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
6217 if (gotfree_input_line)
6218 input_line_pointer = gotfree_input_line;
6219
6220 expression (exp);
6221
6222 if (gotfree_input_line)
6223 {
6224 /* expression () has merrily parsed up to the end of line,
6225 or a comma - in the wrong buffer. Transfer how far
6226 input_line_pointer has moved to the right buffer. */
6227 input_line_pointer = (save
6228 + (input_line_pointer - gotfree_input_line)
6229 + adjust);
6230 free (gotfree_input_line);
6231 if (exp->X_op == O_constant
6232 || exp->X_op == O_absent
6233 || exp->X_op == O_illegal
6234 || exp->X_op == O_register
6235 || exp->X_op == O_big)
6236 {
6237 char c = *input_line_pointer;
6238 *input_line_pointer = 0;
6239 as_bad (_("missing or invalid expression `%s'"), save);
6240 *input_line_pointer = c;
6241 }
6242 }
6243 }
6244 else
6245 expression (exp);
6246
6247 intel_syntax = -intel_syntax;
6248
6249 if (intel_syntax)
6250 i386_intel_simplify (exp);
6251 }
6252 #endif
6253
6254 static void
6255 signed_cons (int size)
6256 {
6257 if (flag_code == CODE_64BIT)
6258 cons_sign = 1;
6259 cons (size);
6260 cons_sign = -1;
6261 }
6262
6263 #ifdef TE_PE
6264 static void
6265 pe_directive_secrel (dummy)
6266 int dummy ATTRIBUTE_UNUSED;
6267 {
6268 expressionS exp;
6269
6270 do
6271 {
6272 expression (&exp);
6273 if (exp.X_op == O_symbol)
6274 exp.X_op = O_secrel;
6275
6276 emit_expr (&exp, 4);
6277 }
6278 while (*input_line_pointer++ == ',');
6279
6280 input_line_pointer--;
6281 demand_empty_rest_of_line ();
6282 }
6283 #endif
6284
6285 static int
6286 i386_immediate (char *imm_start)
6287 {
6288 char *save_input_line_pointer;
6289 char *gotfree_input_line;
6290 segT exp_seg = 0;
6291 expressionS *exp;
6292 i386_operand_type types;
6293
6294 operand_type_set (&types, ~0);
6295
6296 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
6297 {
6298 as_bad (_("at most %d immediate operands are allowed"),
6299 MAX_IMMEDIATE_OPERANDS);
6300 return 0;
6301 }
6302
6303 exp = &im_expressions[i.imm_operands++];
6304 i.op[this_operand].imms = exp;
6305
6306 if (is_space_char (*imm_start))
6307 ++imm_start;
6308
6309 save_input_line_pointer = input_line_pointer;
6310 input_line_pointer = imm_start;
6311
6312 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
6313 if (gotfree_input_line)
6314 input_line_pointer = gotfree_input_line;
6315
6316 exp_seg = expression (exp);
6317
6318 SKIP_WHITESPACE ();
6319 if (*input_line_pointer)
6320 as_bad (_("junk `%s' after expression"), input_line_pointer);
6321
6322 input_line_pointer = save_input_line_pointer;
6323 if (gotfree_input_line)
6324 {
6325 free (gotfree_input_line);
6326
6327 if (exp->X_op == O_constant || exp->X_op == O_register)
6328 exp->X_op = O_illegal;
6329 }
6330
6331 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
6332 }
6333
6334 static int
6335 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
6336 i386_operand_type types, const char *imm_start)
6337 {
6338 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
6339 {
6340 if (imm_start)
6341 as_bad (_("missing or invalid immediate expression `%s'"),
6342 imm_start);
6343 return 0;
6344 }
6345 else if (exp->X_op == O_constant)
6346 {
6347 /* Size it properly later. */
6348 i.types[this_operand].bitfield.imm64 = 1;
6349 /* If BFD64, sign extend val. */
6350 if (!use_rela_relocations
6351 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
6352 exp->X_add_number
6353 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
6354 }
6355 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
6356 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
6357 && exp_seg != absolute_section
6358 && exp_seg != text_section
6359 && exp_seg != data_section
6360 && exp_seg != bss_section
6361 && exp_seg != undefined_section
6362 && !bfd_is_com_section (exp_seg))
6363 {
6364 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
6365 return 0;
6366 }
6367 #endif
6368 else if (!intel_syntax && exp->X_op == O_register)
6369 {
6370 if (imm_start)
6371 as_bad (_("illegal immediate register operand %s"), imm_start);
6372 return 0;
6373 }
6374 else
6375 {
6376 /* This is an address. The size of the address will be
6377 determined later, depending on destination register,
6378 suffix, or the default for the section. */
6379 i.types[this_operand].bitfield.imm8 = 1;
6380 i.types[this_operand].bitfield.imm16 = 1;
6381 i.types[this_operand].bitfield.imm32 = 1;
6382 i.types[this_operand].bitfield.imm32s = 1;
6383 i.types[this_operand].bitfield.imm64 = 1;
6384 i.types[this_operand] = operand_type_and (i.types[this_operand],
6385 types);
6386 }
6387
6388 return 1;
6389 }
6390
6391 static char *
6392 i386_scale (char *scale)
6393 {
6394 offsetT val;
6395 char *save = input_line_pointer;
6396
6397 input_line_pointer = scale;
6398 val = get_absolute_expression ();
6399
6400 switch (val)
6401 {
6402 case 1:
6403 i.log2_scale_factor = 0;
6404 break;
6405 case 2:
6406 i.log2_scale_factor = 1;
6407 break;
6408 case 4:
6409 i.log2_scale_factor = 2;
6410 break;
6411 case 8:
6412 i.log2_scale_factor = 3;
6413 break;
6414 default:
6415 {
6416 char sep = *input_line_pointer;
6417
6418 *input_line_pointer = '\0';
6419 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
6420 scale);
6421 *input_line_pointer = sep;
6422 input_line_pointer = save;
6423 return NULL;
6424 }
6425 }
6426 if (i.log2_scale_factor != 0 && i.index_reg == 0)
6427 {
6428 as_warn (_("scale factor of %d without an index register"),
6429 1 << i.log2_scale_factor);
6430 i.log2_scale_factor = 0;
6431 }
6432 scale = input_line_pointer;
6433 input_line_pointer = save;
6434 return scale;
6435 }
6436
6437 static int
6438 i386_displacement (char *disp_start, char *disp_end)
6439 {
6440 expressionS *exp;
6441 segT exp_seg = 0;
6442 char *save_input_line_pointer;
6443 char *gotfree_input_line;
6444 int override;
6445 i386_operand_type bigdisp, types = anydisp;
6446 int ret;
6447
6448 if (i.disp_operands == MAX_MEMORY_OPERANDS)
6449 {
6450 as_bad (_("at most %d displacement operands are allowed"),
6451 MAX_MEMORY_OPERANDS);
6452 return 0;
6453 }
6454
6455 operand_type_set (&bigdisp, 0);
6456 if ((i.types[this_operand].bitfield.jumpabsolute)
6457 || (!current_templates->start->opcode_modifier.jump
6458 && !current_templates->start->opcode_modifier.jumpdword))
6459 {
6460 bigdisp.bitfield.disp32 = 1;
6461 override = (i.prefix[ADDR_PREFIX] != 0);
6462 if (flag_code == CODE_64BIT)
6463 {
6464 if (!override)
6465 {
6466 bigdisp.bitfield.disp32s = 1;
6467 bigdisp.bitfield.disp64 = 1;
6468 }
6469 }
6470 else if ((flag_code == CODE_16BIT) ^ override)
6471 {
6472 bigdisp.bitfield.disp32 = 0;
6473 bigdisp.bitfield.disp16 = 1;
6474 }
6475 }
6476 else
6477 {
6478 /* For PC-relative branches, the width of the displacement
6479 is dependent upon data size, not address size. */
6480 override = (i.prefix[DATA_PREFIX] != 0);
6481 if (flag_code == CODE_64BIT)
6482 {
6483 if (override || i.suffix == WORD_MNEM_SUFFIX)
6484 bigdisp.bitfield.disp16 = 1;
6485 else
6486 {
6487 bigdisp.bitfield.disp32 = 1;
6488 bigdisp.bitfield.disp32s = 1;
6489 }
6490 }
6491 else
6492 {
6493 if (!override)
6494 override = (i.suffix == (flag_code != CODE_16BIT
6495 ? WORD_MNEM_SUFFIX
6496 : LONG_MNEM_SUFFIX));
6497 bigdisp.bitfield.disp32 = 1;
6498 if ((flag_code == CODE_16BIT) ^ override)
6499 {
6500 bigdisp.bitfield.disp32 = 0;
6501 bigdisp.bitfield.disp16 = 1;
6502 }
6503 }
6504 }
6505 i.types[this_operand] = operand_type_or (i.types[this_operand],
6506 bigdisp);
6507
6508 exp = &disp_expressions[i.disp_operands];
6509 i.op[this_operand].disps = exp;
6510 i.disp_operands++;
6511 save_input_line_pointer = input_line_pointer;
6512 input_line_pointer = disp_start;
6513 END_STRING_AND_SAVE (disp_end);
6514
6515 #ifndef GCC_ASM_O_HACK
6516 #define GCC_ASM_O_HACK 0
6517 #endif
6518 #if GCC_ASM_O_HACK
6519 END_STRING_AND_SAVE (disp_end + 1);
6520 if (i.types[this_operand].bitfield.baseIndex
6521 && displacement_string_end[-1] == '+')
6522 {
6523 /* This hack is to avoid a warning when using the "o"
6524 constraint within gcc asm statements.
6525 For instance:
6526
6527 #define _set_tssldt_desc(n,addr,limit,type) \
6528 __asm__ __volatile__ ( \
6529 "movw %w2,%0\n\t" \
6530 "movw %w1,2+%0\n\t" \
6531 "rorl $16,%1\n\t" \
6532 "movb %b1,4+%0\n\t" \
6533 "movb %4,5+%0\n\t" \
6534 "movb $0,6+%0\n\t" \
6535 "movb %h1,7+%0\n\t" \
6536 "rorl $16,%1" \
6537 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
6538
6539 This works great except that the output assembler ends
6540 up looking a bit weird if it turns out that there is
6541 no offset. You end up producing code that looks like:
6542
6543 #APP
6544 movw $235,(%eax)
6545 movw %dx,2+(%eax)
6546 rorl $16,%edx
6547 movb %dl,4+(%eax)
6548 movb $137,5+(%eax)
6549 movb $0,6+(%eax)
6550 movb %dh,7+(%eax)
6551 rorl $16,%edx
6552 #NO_APP
6553
6554 So here we provide the missing zero. */
6555
6556 *displacement_string_end = '0';
6557 }
6558 #endif
6559 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
6560 if (gotfree_input_line)
6561 input_line_pointer = gotfree_input_line;
6562
6563 exp_seg = expression (exp);
6564
6565 SKIP_WHITESPACE ();
6566 if (*input_line_pointer)
6567 as_bad (_("junk `%s' after expression"), input_line_pointer);
6568 #if GCC_ASM_O_HACK
6569 RESTORE_END_STRING (disp_end + 1);
6570 #endif
6571 input_line_pointer = save_input_line_pointer;
6572 if (gotfree_input_line)
6573 {
6574 free (gotfree_input_line);
6575
6576 if (exp->X_op == O_constant || exp->X_op == O_register)
6577 exp->X_op = O_illegal;
6578 }
6579
6580 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
6581
6582 RESTORE_END_STRING (disp_end);
6583
6584 return ret;
6585 }
6586
6587 static int
6588 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
6589 i386_operand_type types, const char *disp_start)
6590 {
6591 i386_operand_type bigdisp;
6592 int ret = 1;
6593
6594 /* We do this to make sure that the section symbol is in
6595 the symbol table. We will ultimately change the relocation
6596 to be relative to the beginning of the section. */
6597 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
6598 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
6599 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
6600 {
6601 if (exp->X_op != O_symbol)
6602 goto inv_disp;
6603
6604 if (S_IS_LOCAL (exp->X_add_symbol)
6605 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section)
6606 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
6607 exp->X_op = O_subtract;
6608 exp->X_op_symbol = GOT_symbol;
6609 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
6610 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
6611 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
6612 i.reloc[this_operand] = BFD_RELOC_64;
6613 else
6614 i.reloc[this_operand] = BFD_RELOC_32;
6615 }
6616
6617 else if (exp->X_op == O_absent
6618 || exp->X_op == O_illegal
6619 || exp->X_op == O_big)
6620 {
6621 inv_disp:
6622 as_bad (_("missing or invalid displacement expression `%s'"),
6623 disp_start);
6624 ret = 0;
6625 }
6626
6627 else if (flag_code == CODE_64BIT
6628 && !i.prefix[ADDR_PREFIX]
6629 && exp->X_op == O_constant)
6630 {
6631 /* Since displacement is signed extended to 64bit, don't allow
6632 disp32 and turn off disp32s if they are out of range. */
6633 i.types[this_operand].bitfield.disp32 = 0;
6634 if (!fits_in_signed_long (exp->X_add_number))
6635 {
6636 i.types[this_operand].bitfield.disp32s = 0;
6637 if (i.types[this_operand].bitfield.baseindex)
6638 {
6639 as_bad (_("0x%lx out range of signed 32bit displacement"),
6640 (long) exp->X_add_number);
6641 ret = 0;
6642 }
6643 }
6644 }
6645
6646 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
6647 else if (exp->X_op != O_constant
6648 && OUTPUT_FLAVOR == bfd_target_aout_flavour
6649 && exp_seg != absolute_section
6650 && exp_seg != text_section
6651 && exp_seg != data_section
6652 && exp_seg != bss_section
6653 && exp_seg != undefined_section
6654 && !bfd_is_com_section (exp_seg))
6655 {
6656 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
6657 ret = 0;
6658 }
6659 #endif
6660
6661 /* Check if this is a displacement only operand. */
6662 bigdisp = i.types[this_operand];
6663 bigdisp.bitfield.disp8 = 0;
6664 bigdisp.bitfield.disp16 = 0;
6665 bigdisp.bitfield.disp32 = 0;
6666 bigdisp.bitfield.disp32s = 0;
6667 bigdisp.bitfield.disp64 = 0;
6668 if (operand_type_all_zero (&bigdisp))
6669 i.types[this_operand] = operand_type_and (i.types[this_operand],
6670 types);
6671
6672 return ret;
6673 }
6674
6675 /* Make sure the memory operand we've been dealt is valid.
6676 Return 1 on success, 0 on a failure. */
6677
6678 static int
6679 i386_index_check (const char *operand_string)
6680 {
6681 int ok;
6682 const char *kind = "base/index";
6683 #if INFER_ADDR_PREFIX
6684 int fudged = 0;
6685
6686 tryprefix:
6687 #endif
6688 ok = 1;
6689 if (current_templates->start->opcode_modifier.isstring
6690 && !current_templates->start->opcode_modifier.immext
6691 && (current_templates->end[-1].opcode_modifier.isstring
6692 || i.mem_operands))
6693 {
6694 /* Memory operands of string insns are special in that they only allow
6695 a single register (rDI, rSI, or rBX) as their memory address. */
6696 unsigned int expected;
6697
6698 kind = "string address";
6699
6700 if (current_templates->start->opcode_modifier.w)
6701 {
6702 i386_operand_type type = current_templates->end[-1].operand_types[0];
6703
6704 if (!type.bitfield.baseindex
6705 || ((!i.mem_operands != !intel_syntax)
6706 && current_templates->end[-1].operand_types[1]
6707 .bitfield.baseindex))
6708 type = current_templates->end[-1].operand_types[1];
6709 expected = type.bitfield.esseg ? 7 /* rDI */ : 6 /* rSI */;
6710 }
6711 else
6712 expected = 3 /* rBX */;
6713
6714 if (!i.base_reg || i.index_reg
6715 || operand_type_check (i.types[this_operand], disp))
6716 ok = -1;
6717 else if (!(flag_code == CODE_64BIT
6718 ? i.prefix[ADDR_PREFIX]
6719 ? i.base_reg->reg_type.bitfield.reg32
6720 : i.base_reg->reg_type.bitfield.reg64
6721 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
6722 ? i.base_reg->reg_type.bitfield.reg32
6723 : i.base_reg->reg_type.bitfield.reg16))
6724 ok = 0;
6725 else if (i.base_reg->reg_num != expected)
6726 ok = -1;
6727
6728 if (ok < 0)
6729 {
6730 unsigned int j;
6731
6732 for (j = 0; j < i386_regtab_size; ++j)
6733 if ((flag_code == CODE_64BIT
6734 ? i.prefix[ADDR_PREFIX]
6735 ? i386_regtab[j].reg_type.bitfield.reg32
6736 : i386_regtab[j].reg_type.bitfield.reg64
6737 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
6738 ? i386_regtab[j].reg_type.bitfield.reg32
6739 : i386_regtab[j].reg_type.bitfield.reg16)
6740 && i386_regtab[j].reg_num == expected)
6741 break;
6742 gas_assert (j < i386_regtab_size);
6743 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
6744 operand_string,
6745 intel_syntax ? '[' : '(',
6746 register_prefix,
6747 i386_regtab[j].reg_name,
6748 intel_syntax ? ']' : ')');
6749 ok = 1;
6750 }
6751 }
6752 else if (flag_code == CODE_64BIT)
6753 {
6754 if ((i.base_reg
6755 && ((i.prefix[ADDR_PREFIX] == 0
6756 && !i.base_reg->reg_type.bitfield.reg64)
6757 || (i.prefix[ADDR_PREFIX]
6758 && !i.base_reg->reg_type.bitfield.reg32))
6759 && (i.index_reg
6760 || i.base_reg->reg_num !=
6761 (i.prefix[ADDR_PREFIX] == 0 ? RegRip : RegEip)))
6762 || (i.index_reg
6763 && (!i.index_reg->reg_type.bitfield.baseindex
6764 || (i.prefix[ADDR_PREFIX] == 0
6765 && i.index_reg->reg_num != RegRiz
6766 && !i.index_reg->reg_type.bitfield.reg64
6767 )
6768 || (i.prefix[ADDR_PREFIX]
6769 && i.index_reg->reg_num != RegEiz
6770 && !i.index_reg->reg_type.bitfield.reg32))))
6771 ok = 0;
6772 }
6773 else
6774 {
6775 if ((flag_code == CODE_16BIT) ^ (i.prefix[ADDR_PREFIX] != 0))
6776 {
6777 /* 16bit checks. */
6778 if ((i.base_reg
6779 && (!i.base_reg->reg_type.bitfield.reg16
6780 || !i.base_reg->reg_type.bitfield.baseindex))
6781 || (i.index_reg
6782 && (!i.index_reg->reg_type.bitfield.reg16
6783 || !i.index_reg->reg_type.bitfield.baseindex
6784 || !(i.base_reg
6785 && i.base_reg->reg_num < 6
6786 && i.index_reg->reg_num >= 6
6787 && i.log2_scale_factor == 0))))
6788 ok = 0;
6789 }
6790 else
6791 {
6792 /* 32bit checks. */
6793 if ((i.base_reg
6794 && !i.base_reg->reg_type.bitfield.reg32)
6795 || (i.index_reg
6796 && ((!i.index_reg->reg_type.bitfield.reg32
6797 && i.index_reg->reg_num != RegEiz)
6798 || !i.index_reg->reg_type.bitfield.baseindex)))
6799 ok = 0;
6800 }
6801 }
6802 if (!ok)
6803 {
6804 #if INFER_ADDR_PREFIX
6805 if (!i.mem_operands && !i.prefix[ADDR_PREFIX])
6806 {
6807 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
6808 i.prefixes += 1;
6809 /* Change the size of any displacement too. At most one of
6810 Disp16 or Disp32 is set.
6811 FIXME. There doesn't seem to be any real need for separate
6812 Disp16 and Disp32 flags. The same goes for Imm16 and Imm32.
6813 Removing them would probably clean up the code quite a lot. */
6814 if (flag_code != CODE_64BIT
6815 && (i.types[this_operand].bitfield.disp16
6816 || i.types[this_operand].bitfield.disp32))
6817 i.types[this_operand]
6818 = operand_type_xor (i.types[this_operand], disp16_32);
6819 fudged = 1;
6820 goto tryprefix;
6821 }
6822 if (fudged)
6823 as_bad (_("`%s' is not a valid %s expression"),
6824 operand_string,
6825 kind);
6826 else
6827 #endif
6828 as_bad (_("`%s' is not a valid %s-bit %s expression"),
6829 operand_string,
6830 flag_code_names[i.prefix[ADDR_PREFIX]
6831 ? flag_code == CODE_32BIT
6832 ? CODE_16BIT
6833 : CODE_32BIT
6834 : flag_code],
6835 kind);
6836 }
6837 return ok;
6838 }
6839
6840 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
6841 on error. */
6842
6843 static int
6844 i386_att_operand (char *operand_string)
6845 {
6846 const reg_entry *r;
6847 char *end_op;
6848 char *op_string = operand_string;
6849
6850 if (is_space_char (*op_string))
6851 ++op_string;
6852
6853 /* We check for an absolute prefix (differentiating,
6854 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
6855 if (*op_string == ABSOLUTE_PREFIX)
6856 {
6857 ++op_string;
6858 if (is_space_char (*op_string))
6859 ++op_string;
6860 i.types[this_operand].bitfield.jumpabsolute = 1;
6861 }
6862
6863 /* Check if operand is a register. */
6864 if ((r = parse_register (op_string, &end_op)) != NULL)
6865 {
6866 i386_operand_type temp;
6867
6868 /* Check for a segment override by searching for ':' after a
6869 segment register. */
6870 op_string = end_op;
6871 if (is_space_char (*op_string))
6872 ++op_string;
6873 if (*op_string == ':'
6874 && (r->reg_type.bitfield.sreg2
6875 || r->reg_type.bitfield.sreg3))
6876 {
6877 switch (r->reg_num)
6878 {
6879 case 0:
6880 i.seg[i.mem_operands] = &es;
6881 break;
6882 case 1:
6883 i.seg[i.mem_operands] = &cs;
6884 break;
6885 case 2:
6886 i.seg[i.mem_operands] = &ss;
6887 break;
6888 case 3:
6889 i.seg[i.mem_operands] = &ds;
6890 break;
6891 case 4:
6892 i.seg[i.mem_operands] = &fs;
6893 break;
6894 case 5:
6895 i.seg[i.mem_operands] = &gs;
6896 break;
6897 }
6898
6899 /* Skip the ':' and whitespace. */
6900 ++op_string;
6901 if (is_space_char (*op_string))
6902 ++op_string;
6903
6904 if (!is_digit_char (*op_string)
6905 && !is_identifier_char (*op_string)
6906 && *op_string != '('
6907 && *op_string != ABSOLUTE_PREFIX)
6908 {
6909 as_bad (_("bad memory operand `%s'"), op_string);
6910 return 0;
6911 }
6912 /* Handle case of %es:*foo. */
6913 if (*op_string == ABSOLUTE_PREFIX)
6914 {
6915 ++op_string;
6916 if (is_space_char (*op_string))
6917 ++op_string;
6918 i.types[this_operand].bitfield.jumpabsolute = 1;
6919 }
6920 goto do_memory_reference;
6921 }
6922 if (*op_string)
6923 {
6924 as_bad (_("junk `%s' after register"), op_string);
6925 return 0;
6926 }
6927 temp = r->reg_type;
6928 temp.bitfield.baseindex = 0;
6929 i.types[this_operand] = operand_type_or (i.types[this_operand],
6930 temp);
6931 i.types[this_operand].bitfield.unspecified = 0;
6932 i.op[this_operand].regs = r;
6933 i.reg_operands++;
6934 }
6935 else if (*op_string == REGISTER_PREFIX)
6936 {
6937 as_bad (_("bad register name `%s'"), op_string);
6938 return 0;
6939 }
6940 else if (*op_string == IMMEDIATE_PREFIX)
6941 {
6942 ++op_string;
6943 if (i.types[this_operand].bitfield.jumpabsolute)
6944 {
6945 as_bad (_("immediate operand illegal with absolute jump"));
6946 return 0;
6947 }
6948 if (!i386_immediate (op_string))
6949 return 0;
6950 }
6951 else if (is_digit_char (*op_string)
6952 || is_identifier_char (*op_string)
6953 || *op_string == '(')
6954 {
6955 /* This is a memory reference of some sort. */
6956 char *base_string;
6957
6958 /* Start and end of displacement string expression (if found). */
6959 char *displacement_string_start;
6960 char *displacement_string_end;
6961
6962 do_memory_reference:
6963 if ((i.mem_operands == 1
6964 && !current_templates->start->opcode_modifier.isstring)
6965 || i.mem_operands == 2)
6966 {
6967 as_bad (_("too many memory references for `%s'"),
6968 current_templates->start->name);
6969 return 0;
6970 }
6971
6972 /* Check for base index form. We detect the base index form by
6973 looking for an ')' at the end of the operand, searching
6974 for the '(' matching it, and finding a REGISTER_PREFIX or ','
6975 after the '('. */
6976 base_string = op_string + strlen (op_string);
6977
6978 --base_string;
6979 if (is_space_char (*base_string))
6980 --base_string;
6981
6982 /* If we only have a displacement, set-up for it to be parsed later. */
6983 displacement_string_start = op_string;
6984 displacement_string_end = base_string + 1;
6985
6986 if (*base_string == ')')
6987 {
6988 char *temp_string;
6989 unsigned int parens_balanced = 1;
6990 /* We've already checked that the number of left & right ()'s are
6991 equal, so this loop will not be infinite. */
6992 do
6993 {
6994 base_string--;
6995 if (*base_string == ')')
6996 parens_balanced++;
6997 if (*base_string == '(')
6998 parens_balanced--;
6999 }
7000 while (parens_balanced);
7001
7002 temp_string = base_string;
7003
7004 /* Skip past '(' and whitespace. */
7005 ++base_string;
7006 if (is_space_char (*base_string))
7007 ++base_string;
7008
7009 if (*base_string == ','
7010 || ((i.base_reg = parse_register (base_string, &end_op))
7011 != NULL))
7012 {
7013 displacement_string_end = temp_string;
7014
7015 i.types[this_operand].bitfield.baseindex = 1;
7016
7017 if (i.base_reg)
7018 {
7019 base_string = end_op;
7020 if (is_space_char (*base_string))
7021 ++base_string;
7022 }
7023
7024 /* There may be an index reg or scale factor here. */
7025 if (*base_string == ',')
7026 {
7027 ++base_string;
7028 if (is_space_char (*base_string))
7029 ++base_string;
7030
7031 if ((i.index_reg = parse_register (base_string, &end_op))
7032 != NULL)
7033 {
7034 base_string = end_op;
7035 if (is_space_char (*base_string))
7036 ++base_string;
7037 if (*base_string == ',')
7038 {
7039 ++base_string;
7040 if (is_space_char (*base_string))
7041 ++base_string;
7042 }
7043 else if (*base_string != ')')
7044 {
7045 as_bad (_("expecting `,' or `)' "
7046 "after index register in `%s'"),
7047 operand_string);
7048 return 0;
7049 }
7050 }
7051 else if (*base_string == REGISTER_PREFIX)
7052 {
7053 as_bad (_("bad register name `%s'"), base_string);
7054 return 0;
7055 }
7056
7057 /* Check for scale factor. */
7058 if (*base_string != ')')
7059 {
7060 char *end_scale = i386_scale (base_string);
7061
7062 if (!end_scale)
7063 return 0;
7064
7065 base_string = end_scale;
7066 if (is_space_char (*base_string))
7067 ++base_string;
7068 if (*base_string != ')')
7069 {
7070 as_bad (_("expecting `)' "
7071 "after scale factor in `%s'"),
7072 operand_string);
7073 return 0;
7074 }
7075 }
7076 else if (!i.index_reg)
7077 {
7078 as_bad (_("expecting index register or scale factor "
7079 "after `,'; got '%c'"),
7080 *base_string);
7081 return 0;
7082 }
7083 }
7084 else if (*base_string != ')')
7085 {
7086 as_bad (_("expecting `,' or `)' "
7087 "after base register in `%s'"),
7088 operand_string);
7089 return 0;
7090 }
7091 }
7092 else if (*base_string == REGISTER_PREFIX)
7093 {
7094 as_bad (_("bad register name `%s'"), base_string);
7095 return 0;
7096 }
7097 }
7098
7099 /* If there's an expression beginning the operand, parse it,
7100 assuming displacement_string_start and
7101 displacement_string_end are meaningful. */
7102 if (displacement_string_start != displacement_string_end)
7103 {
7104 if (!i386_displacement (displacement_string_start,
7105 displacement_string_end))
7106 return 0;
7107 }
7108
7109 /* Special case for (%dx) while doing input/output op. */
7110 if (i.base_reg
7111 && operand_type_equal (&i.base_reg->reg_type,
7112 &reg16_inoutportreg)
7113 && i.index_reg == 0
7114 && i.log2_scale_factor == 0
7115 && i.seg[i.mem_operands] == 0
7116 && !operand_type_check (i.types[this_operand], disp))
7117 {
7118 i.types[this_operand] = inoutportreg;
7119 return 1;
7120 }
7121
7122 if (i386_index_check (operand_string) == 0)
7123 return 0;
7124 i.types[this_operand].bitfield.mem = 1;
7125 i.mem_operands++;
7126 }
7127 else
7128 {
7129 /* It's not a memory operand; argh! */
7130 as_bad (_("invalid char %s beginning operand %d `%s'"),
7131 output_invalid (*op_string),
7132 this_operand + 1,
7133 op_string);
7134 return 0;
7135 }
7136 return 1; /* Normal return. */
7137 }
7138 \f
7139 /* md_estimate_size_before_relax()
7140
7141 Called just before relax() for rs_machine_dependent frags. The x86
7142 assembler uses these frags to handle variable size jump
7143 instructions.
7144
7145 Any symbol that is now undefined will not become defined.
7146 Return the correct fr_subtype in the frag.
7147 Return the initial "guess for variable size of frag" to caller.
7148 The guess is actually the growth beyond the fixed part. Whatever
7149 we do to grow the fixed or variable part contributes to our
7150 returned value. */
7151
7152 int
7153 md_estimate_size_before_relax (fragP, segment)
7154 fragS *fragP;
7155 segT segment;
7156 {
7157 /* We've already got fragP->fr_subtype right; all we have to do is
7158 check for un-relaxable symbols. On an ELF system, we can't relax
7159 an externally visible symbol, because it may be overridden by a
7160 shared library. */
7161 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
7162 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7163 || (IS_ELF
7164 && (S_IS_EXTERNAL (fragP->fr_symbol)
7165 || S_IS_WEAK (fragP->fr_symbol)
7166 || ((symbol_get_bfdsym (fragP->fr_symbol)->flags
7167 & BSF_GNU_INDIRECT_FUNCTION))))
7168 #endif
7169 #if defined (OBJ_COFF) && defined (TE_PE)
7170 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
7171 && S_IS_WEAK (fragP->fr_symbol))
7172 #endif
7173 )
7174 {
7175 /* Symbol is undefined in this segment, or we need to keep a
7176 reloc so that weak symbols can be overridden. */
7177 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
7178 enum bfd_reloc_code_real reloc_type;
7179 unsigned char *opcode;
7180 int old_fr_fix;
7181
7182 if (fragP->fr_var != NO_RELOC)
7183 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
7184 else if (size == 2)
7185 reloc_type = BFD_RELOC_16_PCREL;
7186 else
7187 reloc_type = BFD_RELOC_32_PCREL;
7188
7189 old_fr_fix = fragP->fr_fix;
7190 opcode = (unsigned char *) fragP->fr_opcode;
7191
7192 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
7193 {
7194 case UNCOND_JUMP:
7195 /* Make jmp (0xeb) a (d)word displacement jump. */
7196 opcode[0] = 0xe9;
7197 fragP->fr_fix += size;
7198 fix_new (fragP, old_fr_fix, size,
7199 fragP->fr_symbol,
7200 fragP->fr_offset, 1,
7201 reloc_type);
7202 break;
7203
7204 case COND_JUMP86:
7205 if (size == 2
7206 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
7207 {
7208 /* Negate the condition, and branch past an
7209 unconditional jump. */
7210 opcode[0] ^= 1;
7211 opcode[1] = 3;
7212 /* Insert an unconditional jump. */
7213 opcode[2] = 0xe9;
7214 /* We added two extra opcode bytes, and have a two byte
7215 offset. */
7216 fragP->fr_fix += 2 + 2;
7217 fix_new (fragP, old_fr_fix + 2, 2,
7218 fragP->fr_symbol,
7219 fragP->fr_offset, 1,
7220 reloc_type);
7221 break;
7222 }
7223 /* Fall through. */
7224
7225 case COND_JUMP:
7226 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
7227 {
7228 fixS *fixP;
7229
7230 fragP->fr_fix += 1;
7231 fixP = fix_new (fragP, old_fr_fix, 1,
7232 fragP->fr_symbol,
7233 fragP->fr_offset, 1,
7234 BFD_RELOC_8_PCREL);
7235 fixP->fx_signed = 1;
7236 break;
7237 }
7238
7239 /* This changes the byte-displacement jump 0x7N
7240 to the (d)word-displacement jump 0x0f,0x8N. */
7241 opcode[1] = opcode[0] + 0x10;
7242 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7243 /* We've added an opcode byte. */
7244 fragP->fr_fix += 1 + size;
7245 fix_new (fragP, old_fr_fix + 1, size,
7246 fragP->fr_symbol,
7247 fragP->fr_offset, 1,
7248 reloc_type);
7249 break;
7250
7251 default:
7252 BAD_CASE (fragP->fr_subtype);
7253 break;
7254 }
7255 frag_wane (fragP);
7256 return fragP->fr_fix - old_fr_fix;
7257 }
7258
7259 /* Guess size depending on current relax state. Initially the relax
7260 state will correspond to a short jump and we return 1, because
7261 the variable part of the frag (the branch offset) is one byte
7262 long. However, we can relax a section more than once and in that
7263 case we must either set fr_subtype back to the unrelaxed state,
7264 or return the value for the appropriate branch. */
7265 return md_relax_table[fragP->fr_subtype].rlx_length;
7266 }
7267
7268 /* Called after relax() is finished.
7269
7270 In: Address of frag.
7271 fr_type == rs_machine_dependent.
7272 fr_subtype is what the address relaxed to.
7273
7274 Out: Any fixSs and constants are set up.
7275 Caller will turn frag into a ".space 0". */
7276
7277 void
7278 md_convert_frag (abfd, sec, fragP)
7279 bfd *abfd ATTRIBUTE_UNUSED;
7280 segT sec ATTRIBUTE_UNUSED;
7281 fragS *fragP;
7282 {
7283 unsigned char *opcode;
7284 unsigned char *where_to_put_displacement = NULL;
7285 offsetT target_address;
7286 offsetT opcode_address;
7287 unsigned int extension = 0;
7288 offsetT displacement_from_opcode_start;
7289
7290 opcode = (unsigned char *) fragP->fr_opcode;
7291
7292 /* Address we want to reach in file space. */
7293 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
7294
7295 /* Address opcode resides at in file space. */
7296 opcode_address = fragP->fr_address + fragP->fr_fix;
7297
7298 /* Displacement from opcode start to fill into instruction. */
7299 displacement_from_opcode_start = target_address - opcode_address;
7300
7301 if ((fragP->fr_subtype & BIG) == 0)
7302 {
7303 /* Don't have to change opcode. */
7304 extension = 1; /* 1 opcode + 1 displacement */
7305 where_to_put_displacement = &opcode[1];
7306 }
7307 else
7308 {
7309 if (no_cond_jump_promotion
7310 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
7311 as_warn_where (fragP->fr_file, fragP->fr_line,
7312 _("long jump required"));
7313
7314 switch (fragP->fr_subtype)
7315 {
7316 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
7317 extension = 4; /* 1 opcode + 4 displacement */
7318 opcode[0] = 0xe9;
7319 where_to_put_displacement = &opcode[1];
7320 break;
7321
7322 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
7323 extension = 2; /* 1 opcode + 2 displacement */
7324 opcode[0] = 0xe9;
7325 where_to_put_displacement = &opcode[1];
7326 break;
7327
7328 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
7329 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
7330 extension = 5; /* 2 opcode + 4 displacement */
7331 opcode[1] = opcode[0] + 0x10;
7332 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7333 where_to_put_displacement = &opcode[2];
7334 break;
7335
7336 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
7337 extension = 3; /* 2 opcode + 2 displacement */
7338 opcode[1] = opcode[0] + 0x10;
7339 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7340 where_to_put_displacement = &opcode[2];
7341 break;
7342
7343 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
7344 extension = 4;
7345 opcode[0] ^= 1;
7346 opcode[1] = 3;
7347 opcode[2] = 0xe9;
7348 where_to_put_displacement = &opcode[3];
7349 break;
7350
7351 default:
7352 BAD_CASE (fragP->fr_subtype);
7353 break;
7354 }
7355 }
7356
7357 /* If size if less then four we are sure that the operand fits,
7358 but if it's 4, then it could be that the displacement is larger
7359 then -/+ 2GB. */
7360 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
7361 && object_64bit
7362 && ((addressT) (displacement_from_opcode_start - extension
7363 + ((addressT) 1 << 31))
7364 > (((addressT) 2 << 31) - 1)))
7365 {
7366 as_bad_where (fragP->fr_file, fragP->fr_line,
7367 _("jump target out of range"));
7368 /* Make us emit 0. */
7369 displacement_from_opcode_start = extension;
7370 }
7371 /* Now put displacement after opcode. */
7372 md_number_to_chars ((char *) where_to_put_displacement,
7373 (valueT) (displacement_from_opcode_start - extension),
7374 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
7375 fragP->fr_fix += extension;
7376 }
7377 \f
7378 /* Apply a fixup (fixS) to segment data, once it has been determined
7379 by our caller that we have all the info we need to fix it up.
7380
7381 On the 386, immediates, displacements, and data pointers are all in
7382 the same (little-endian) format, so we don't need to care about which
7383 we are handling. */
7384
7385 void
7386 md_apply_fix (fixP, valP, seg)
7387 /* The fix we're to put in. */
7388 fixS *fixP;
7389 /* Pointer to the value of the bits. */
7390 valueT *valP;
7391 /* Segment fix is from. */
7392 segT seg ATTRIBUTE_UNUSED;
7393 {
7394 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
7395 valueT value = *valP;
7396
7397 #if !defined (TE_Mach)
7398 if (fixP->fx_pcrel)
7399 {
7400 switch (fixP->fx_r_type)
7401 {
7402 default:
7403 break;
7404
7405 case BFD_RELOC_64:
7406 fixP->fx_r_type = BFD_RELOC_64_PCREL;
7407 break;
7408 case BFD_RELOC_32:
7409 case BFD_RELOC_X86_64_32S:
7410 fixP->fx_r_type = BFD_RELOC_32_PCREL;
7411 break;
7412 case BFD_RELOC_16:
7413 fixP->fx_r_type = BFD_RELOC_16_PCREL;
7414 break;
7415 case BFD_RELOC_8:
7416 fixP->fx_r_type = BFD_RELOC_8_PCREL;
7417 break;
7418 }
7419 }
7420
7421 if (fixP->fx_addsy != NULL
7422 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
7423 || fixP->fx_r_type == BFD_RELOC_64_PCREL
7424 || fixP->fx_r_type == BFD_RELOC_16_PCREL
7425 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
7426 && !use_rela_relocations)
7427 {
7428 /* This is a hack. There should be a better way to handle this.
7429 This covers for the fact that bfd_install_relocation will
7430 subtract the current location (for partial_inplace, PC relative
7431 relocations); see more below. */
7432 #ifndef OBJ_AOUT
7433 if (IS_ELF
7434 #ifdef TE_PE
7435 || OUTPUT_FLAVOR == bfd_target_coff_flavour
7436 #endif
7437 )
7438 value += fixP->fx_where + fixP->fx_frag->fr_address;
7439 #endif
7440 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7441 if (IS_ELF)
7442 {
7443 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
7444
7445 if ((sym_seg == seg
7446 || (symbol_section_p (fixP->fx_addsy)
7447 && sym_seg != absolute_section))
7448 && !generic_force_reloc (fixP))
7449 {
7450 /* Yes, we add the values in twice. This is because
7451 bfd_install_relocation subtracts them out again. I think
7452 bfd_install_relocation is broken, but I don't dare change
7453 it. FIXME. */
7454 value += fixP->fx_where + fixP->fx_frag->fr_address;
7455 }
7456 }
7457 #endif
7458 #if defined (OBJ_COFF) && defined (TE_PE)
7459 /* For some reason, the PE format does not store a
7460 section address offset for a PC relative symbol. */
7461 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
7462 || S_IS_WEAK (fixP->fx_addsy))
7463 value += md_pcrel_from (fixP);
7464 #endif
7465 }
7466 #if defined (OBJ_COFF) && defined (TE_PE)
7467 if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
7468 {
7469 value -= S_GET_VALUE (fixP->fx_addsy);
7470 }
7471 #endif
7472
7473 /* Fix a few things - the dynamic linker expects certain values here,
7474 and we must not disappoint it. */
7475 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7476 if (IS_ELF && fixP->fx_addsy)
7477 switch (fixP->fx_r_type)
7478 {
7479 case BFD_RELOC_386_PLT32:
7480 case BFD_RELOC_X86_64_PLT32:
7481 /* Make the jump instruction point to the address of the operand. At
7482 runtime we merely add the offset to the actual PLT entry. */
7483 value = -4;
7484 break;
7485
7486 case BFD_RELOC_386_TLS_GD:
7487 case BFD_RELOC_386_TLS_LDM:
7488 case BFD_RELOC_386_TLS_IE_32:
7489 case BFD_RELOC_386_TLS_IE:
7490 case BFD_RELOC_386_TLS_GOTIE:
7491 case BFD_RELOC_386_TLS_GOTDESC:
7492 case BFD_RELOC_X86_64_TLSGD:
7493 case BFD_RELOC_X86_64_TLSLD:
7494 case BFD_RELOC_X86_64_GOTTPOFF:
7495 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
7496 value = 0; /* Fully resolved at runtime. No addend. */
7497 /* Fallthrough */
7498 case BFD_RELOC_386_TLS_LE:
7499 case BFD_RELOC_386_TLS_LDO_32:
7500 case BFD_RELOC_386_TLS_LE_32:
7501 case BFD_RELOC_X86_64_DTPOFF32:
7502 case BFD_RELOC_X86_64_DTPOFF64:
7503 case BFD_RELOC_X86_64_TPOFF32:
7504 case BFD_RELOC_X86_64_TPOFF64:
7505 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7506 break;
7507
7508 case BFD_RELOC_386_TLS_DESC_CALL:
7509 case BFD_RELOC_X86_64_TLSDESC_CALL:
7510 value = 0; /* Fully resolved at runtime. No addend. */
7511 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7512 fixP->fx_done = 0;
7513 return;
7514
7515 case BFD_RELOC_386_GOT32:
7516 case BFD_RELOC_X86_64_GOT32:
7517 value = 0; /* Fully resolved at runtime. No addend. */
7518 break;
7519
7520 case BFD_RELOC_VTABLE_INHERIT:
7521 case BFD_RELOC_VTABLE_ENTRY:
7522 fixP->fx_done = 0;
7523 return;
7524
7525 default:
7526 break;
7527 }
7528 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
7529 *valP = value;
7530 #endif /* !defined (TE_Mach) */
7531
7532 /* Are we finished with this relocation now? */
7533 if (fixP->fx_addsy == NULL)
7534 fixP->fx_done = 1;
7535 #if defined (OBJ_COFF) && defined (TE_PE)
7536 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
7537 {
7538 fixP->fx_done = 0;
7539 /* Remember value for tc_gen_reloc. */
7540 fixP->fx_addnumber = value;
7541 /* Clear out the frag for now. */
7542 value = 0;
7543 }
7544 #endif
7545 else if (use_rela_relocations)
7546 {
7547 fixP->fx_no_overflow = 1;
7548 /* Remember value for tc_gen_reloc. */
7549 fixP->fx_addnumber = value;
7550 value = 0;
7551 }
7552
7553 md_number_to_chars (p, value, fixP->fx_size);
7554 }
7555 \f
7556 char *
7557 md_atof (int type, char *litP, int *sizeP)
7558 {
7559 /* This outputs the LITTLENUMs in REVERSE order;
7560 in accord with the bigendian 386. */
7561 return ieee_md_atof (type, litP, sizeP, FALSE);
7562 }
7563 \f
7564 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
7565
7566 static char *
7567 output_invalid (int c)
7568 {
7569 if (ISPRINT (c))
7570 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
7571 "'%c'", c);
7572 else
7573 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
7574 "(0x%x)", (unsigned char) c);
7575 return output_invalid_buf;
7576 }
7577
7578 /* REG_STRING starts *before* REGISTER_PREFIX. */
7579
7580 static const reg_entry *
7581 parse_real_register (char *reg_string, char **end_op)
7582 {
7583 char *s = reg_string;
7584 char *p;
7585 char reg_name_given[MAX_REG_NAME_SIZE + 1];
7586 const reg_entry *r;
7587
7588 /* Skip possible REGISTER_PREFIX and possible whitespace. */
7589 if (*s == REGISTER_PREFIX)
7590 ++s;
7591
7592 if (is_space_char (*s))
7593 ++s;
7594
7595 p = reg_name_given;
7596 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
7597 {
7598 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
7599 return (const reg_entry *) NULL;
7600 s++;
7601 }
7602
7603 /* For naked regs, make sure that we are not dealing with an identifier.
7604 This prevents confusing an identifier like `eax_var' with register
7605 `eax'. */
7606 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
7607 return (const reg_entry *) NULL;
7608
7609 *end_op = s;
7610
7611 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
7612
7613 /* Handle floating point regs, allowing spaces in the (i) part. */
7614 if (r == i386_regtab /* %st is first entry of table */)
7615 {
7616 if (is_space_char (*s))
7617 ++s;
7618 if (*s == '(')
7619 {
7620 ++s;
7621 if (is_space_char (*s))
7622 ++s;
7623 if (*s >= '0' && *s <= '7')
7624 {
7625 int fpr = *s - '0';
7626 ++s;
7627 if (is_space_char (*s))
7628 ++s;
7629 if (*s == ')')
7630 {
7631 *end_op = s + 1;
7632 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
7633 know (r);
7634 return r + fpr;
7635 }
7636 }
7637 /* We have "%st(" then garbage. */
7638 return (const reg_entry *) NULL;
7639 }
7640 }
7641
7642 if (r == NULL || allow_pseudo_reg)
7643 return r;
7644
7645 if (operand_type_all_zero (&r->reg_type))
7646 return (const reg_entry *) NULL;
7647
7648 if ((r->reg_type.bitfield.reg32
7649 || r->reg_type.bitfield.sreg3
7650 || r->reg_type.bitfield.control
7651 || r->reg_type.bitfield.debug
7652 || r->reg_type.bitfield.test)
7653 && !cpu_arch_flags.bitfield.cpui386)
7654 return (const reg_entry *) NULL;
7655
7656 if (r->reg_type.bitfield.floatreg
7657 && !cpu_arch_flags.bitfield.cpu8087
7658 && !cpu_arch_flags.bitfield.cpu287
7659 && !cpu_arch_flags.bitfield.cpu387)
7660 return (const reg_entry *) NULL;
7661
7662 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
7663 return (const reg_entry *) NULL;
7664
7665 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse)
7666 return (const reg_entry *) NULL;
7667
7668 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
7669 return (const reg_entry *) NULL;
7670
7671 /* Don't allow fake index register unless allow_index_reg isn't 0. */
7672 if (!allow_index_reg
7673 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
7674 return (const reg_entry *) NULL;
7675
7676 if (((r->reg_flags & (RegRex64 | RegRex))
7677 || r->reg_type.bitfield.reg64)
7678 && (!cpu_arch_flags.bitfield.cpulm
7679 || !operand_type_equal (&r->reg_type, &control))
7680 && flag_code != CODE_64BIT)
7681 return (const reg_entry *) NULL;
7682
7683 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
7684 return (const reg_entry *) NULL;
7685
7686 return r;
7687 }
7688
7689 /* REG_STRING starts *before* REGISTER_PREFIX. */
7690
7691 static const reg_entry *
7692 parse_register (char *reg_string, char **end_op)
7693 {
7694 const reg_entry *r;
7695
7696 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
7697 r = parse_real_register (reg_string, end_op);
7698 else
7699 r = NULL;
7700 if (!r)
7701 {
7702 char *save = input_line_pointer;
7703 char c;
7704 symbolS *symbolP;
7705
7706 input_line_pointer = reg_string;
7707 c = get_symbol_end ();
7708 symbolP = symbol_find (reg_string);
7709 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
7710 {
7711 const expressionS *e = symbol_get_value_expression (symbolP);
7712
7713 know (e->X_op == O_register);
7714 know (e->X_add_number >= 0
7715 && (valueT) e->X_add_number < i386_regtab_size);
7716 r = i386_regtab + e->X_add_number;
7717 *end_op = input_line_pointer;
7718 }
7719 *input_line_pointer = c;
7720 input_line_pointer = save;
7721 }
7722 return r;
7723 }
7724
7725 int
7726 i386_parse_name (char *name, expressionS *e, char *nextcharP)
7727 {
7728 const reg_entry *r;
7729 char *end = input_line_pointer;
7730
7731 *end = *nextcharP;
7732 r = parse_register (name, &input_line_pointer);
7733 if (r && end <= input_line_pointer)
7734 {
7735 *nextcharP = *input_line_pointer;
7736 *input_line_pointer = 0;
7737 e->X_op = O_register;
7738 e->X_add_number = r - i386_regtab;
7739 return 1;
7740 }
7741 input_line_pointer = end;
7742 *end = 0;
7743 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
7744 }
7745
7746 void
7747 md_operand (expressionS *e)
7748 {
7749 char *end;
7750 const reg_entry *r;
7751
7752 switch (*input_line_pointer)
7753 {
7754 case REGISTER_PREFIX:
7755 r = parse_real_register (input_line_pointer, &end);
7756 if (r)
7757 {
7758 e->X_op = O_register;
7759 e->X_add_number = r - i386_regtab;
7760 input_line_pointer = end;
7761 }
7762 break;
7763
7764 case '[':
7765 gas_assert (intel_syntax);
7766 end = input_line_pointer++;
7767 expression (e);
7768 if (*input_line_pointer == ']')
7769 {
7770 ++input_line_pointer;
7771 e->X_op_symbol = make_expr_symbol (e);
7772 e->X_add_symbol = NULL;
7773 e->X_add_number = 0;
7774 e->X_op = O_index;
7775 }
7776 else
7777 {
7778 e->X_op = O_absent;
7779 input_line_pointer = end;
7780 }
7781 break;
7782 }
7783 }
7784
7785 \f
7786 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7787 const char *md_shortopts = "kVQ:sqn";
7788 #else
7789 const char *md_shortopts = "qn";
7790 #endif
7791
7792 #define OPTION_32 (OPTION_MD_BASE + 0)
7793 #define OPTION_64 (OPTION_MD_BASE + 1)
7794 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
7795 #define OPTION_MARCH (OPTION_MD_BASE + 3)
7796 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
7797 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
7798 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
7799 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
7800 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
7801 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
7802 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
7803 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
7804
7805 struct option md_longopts[] =
7806 {
7807 {"32", no_argument, NULL, OPTION_32},
7808 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
7809 || defined (TE_PE) || defined (TE_PEP))
7810 {"64", no_argument, NULL, OPTION_64},
7811 #endif
7812 {"divide", no_argument, NULL, OPTION_DIVIDE},
7813 {"march", required_argument, NULL, OPTION_MARCH},
7814 {"mtune", required_argument, NULL, OPTION_MTUNE},
7815 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
7816 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
7817 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
7818 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
7819 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
7820 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
7821 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
7822 {NULL, no_argument, NULL, 0}
7823 };
7824 size_t md_longopts_size = sizeof (md_longopts);
7825
7826 int
7827 md_parse_option (int c, char *arg)
7828 {
7829 unsigned int i;
7830 char *arch, *next;
7831
7832 switch (c)
7833 {
7834 case 'n':
7835 optimize_align_code = 0;
7836 break;
7837
7838 case 'q':
7839 quiet_warnings = 1;
7840 break;
7841
7842 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7843 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
7844 should be emitted or not. FIXME: Not implemented. */
7845 case 'Q':
7846 break;
7847
7848 /* -V: SVR4 argument to print version ID. */
7849 case 'V':
7850 print_version_id ();
7851 break;
7852
7853 /* -k: Ignore for FreeBSD compatibility. */
7854 case 'k':
7855 break;
7856
7857 case 's':
7858 /* -s: On i386 Solaris, this tells the native assembler to use
7859 .stab instead of .stab.excl. We always use .stab anyhow. */
7860 break;
7861 #endif
7862 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
7863 || defined (TE_PE) || defined (TE_PEP))
7864 case OPTION_64:
7865 {
7866 const char **list, **l;
7867
7868 list = bfd_target_list ();
7869 for (l = list; *l != NULL; l++)
7870 if (CONST_STRNEQ (*l, "elf64-x86-64")
7871 || strcmp (*l, "coff-x86-64") == 0
7872 || strcmp (*l, "pe-x86-64") == 0
7873 || strcmp (*l, "pei-x86-64") == 0)
7874 {
7875 default_arch = "x86_64";
7876 break;
7877 }
7878 if (*l == NULL)
7879 as_fatal (_("No compiled in support for x86_64"));
7880 free (list);
7881 }
7882 break;
7883 #endif
7884
7885 case OPTION_32:
7886 default_arch = "i386";
7887 break;
7888
7889 case OPTION_DIVIDE:
7890 #ifdef SVR4_COMMENT_CHARS
7891 {
7892 char *n, *t;
7893 const char *s;
7894
7895 n = (char *) xmalloc (strlen (i386_comment_chars) + 1);
7896 t = n;
7897 for (s = i386_comment_chars; *s != '\0'; s++)
7898 if (*s != '/')
7899 *t++ = *s;
7900 *t = '\0';
7901 i386_comment_chars = n;
7902 }
7903 #endif
7904 break;
7905
7906 case OPTION_MARCH:
7907 arch = xstrdup (arg);
7908 do
7909 {
7910 if (*arch == '.')
7911 as_fatal (_("Invalid -march= option: `%s'"), arg);
7912 next = strchr (arch, '+');
7913 if (next)
7914 *next++ = '\0';
7915 for (i = 0; i < ARRAY_SIZE (cpu_arch); i++)
7916 {
7917 if (strcmp (arch, cpu_arch [i].name) == 0)
7918 {
7919 /* Processor. */
7920 cpu_arch_name = cpu_arch[i].name;
7921 cpu_sub_arch_name = NULL;
7922 cpu_arch_flags = cpu_arch[i].flags;
7923 cpu_arch_isa = cpu_arch[i].type;
7924 cpu_arch_isa_flags = cpu_arch[i].flags;
7925 if (!cpu_arch_tune_set)
7926 {
7927 cpu_arch_tune = cpu_arch_isa;
7928 cpu_arch_tune_flags = cpu_arch_isa_flags;
7929 }
7930 break;
7931 }
7932 else if (*cpu_arch [i].name == '.'
7933 && strcmp (arch, cpu_arch [i].name + 1) == 0)
7934 {
7935 /* ISA entension. */
7936 i386_cpu_flags flags;
7937
7938 if (strncmp (arch, "no", 2))
7939 flags = cpu_flags_or (cpu_arch_flags,
7940 cpu_arch[i].flags);
7941 else
7942 flags = cpu_flags_and_not (cpu_arch_flags,
7943 cpu_arch[i].flags);
7944 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
7945 {
7946 if (cpu_sub_arch_name)
7947 {
7948 char *name = cpu_sub_arch_name;
7949 cpu_sub_arch_name = concat (name,
7950 cpu_arch[i].name,
7951 (const char *) NULL);
7952 free (name);
7953 }
7954 else
7955 cpu_sub_arch_name = xstrdup (cpu_arch[i].name);
7956 cpu_arch_flags = flags;
7957 }
7958 break;
7959 }
7960 }
7961
7962 if (i >= ARRAY_SIZE (cpu_arch))
7963 as_fatal (_("Invalid -march= option: `%s'"), arg);
7964
7965 arch = next;
7966 }
7967 while (next != NULL );
7968 break;
7969
7970 case OPTION_MTUNE:
7971 if (*arg == '.')
7972 as_fatal (_("Invalid -mtune= option: `%s'"), arg);
7973 for (i = 0; i < ARRAY_SIZE (cpu_arch); i++)
7974 {
7975 if (strcmp (arg, cpu_arch [i].name) == 0)
7976 {
7977 cpu_arch_tune_set = 1;
7978 cpu_arch_tune = cpu_arch [i].type;
7979 cpu_arch_tune_flags = cpu_arch[i].flags;
7980 break;
7981 }
7982 }
7983 if (i >= ARRAY_SIZE (cpu_arch))
7984 as_fatal (_("Invalid -mtune= option: `%s'"), arg);
7985 break;
7986
7987 case OPTION_MMNEMONIC:
7988 if (strcasecmp (arg, "att") == 0)
7989 intel_mnemonic = 0;
7990 else if (strcasecmp (arg, "intel") == 0)
7991 intel_mnemonic = 1;
7992 else
7993 as_fatal (_("Invalid -mmnemonic= option: `%s'"), arg);
7994 break;
7995
7996 case OPTION_MSYNTAX:
7997 if (strcasecmp (arg, "att") == 0)
7998 intel_syntax = 0;
7999 else if (strcasecmp (arg, "intel") == 0)
8000 intel_syntax = 1;
8001 else
8002 as_fatal (_("Invalid -msyntax= option: `%s'"), arg);
8003 break;
8004
8005 case OPTION_MINDEX_REG:
8006 allow_index_reg = 1;
8007 break;
8008
8009 case OPTION_MNAKED_REG:
8010 allow_naked_reg = 1;
8011 break;
8012
8013 case OPTION_MOLD_GCC:
8014 old_gcc = 1;
8015 break;
8016
8017 case OPTION_MSSE2AVX:
8018 sse2avx = 1;
8019 break;
8020
8021 case OPTION_MSSE_CHECK:
8022 if (strcasecmp (arg, "error") == 0)
8023 sse_check = sse_check_error;
8024 else if (strcasecmp (arg, "warning") == 0)
8025 sse_check = sse_check_warning;
8026 else if (strcasecmp (arg, "none") == 0)
8027 sse_check = sse_check_none;
8028 else
8029 as_fatal (_("Invalid -msse-check= option: `%s'"), arg);
8030 break;
8031
8032 default:
8033 return 0;
8034 }
8035 return 1;
8036 }
8037
8038 void
8039 md_show_usage (stream)
8040 FILE *stream;
8041 {
8042 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8043 fprintf (stream, _("\
8044 -Q ignored\n\
8045 -V print assembler version number\n\
8046 -k ignored\n"));
8047 #endif
8048 fprintf (stream, _("\
8049 -n Do not optimize code alignment\n\
8050 -q quieten some warnings\n"));
8051 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8052 fprintf (stream, _("\
8053 -s ignored\n"));
8054 #endif
8055 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8056 || defined (TE_PE) || defined (TE_PEP))
8057 fprintf (stream, _("\
8058 --32/--64 generate 32bit/64bit code\n"));
8059 #endif
8060 #ifdef SVR4_COMMENT_CHARS
8061 fprintf (stream, _("\
8062 --divide do not treat `/' as a comment character\n"));
8063 #else
8064 fprintf (stream, _("\
8065 --divide ignored\n"));
8066 #endif
8067 fprintf (stream, _("\
8068 -march=CPU[,+EXTENSION...]\n\
8069 generate code for CPU and EXTENSION, CPU is one of:\n\
8070 i8086, i186, i286, i386, i486, pentium, pentiumpro,\n\
8071 pentiumii, pentiumiii, pentium4, prescott, nocona,\n\
8072 core, core2, corei7, l1om, k6, k6_2, athlon, k8,\n\
8073 amdfam10, generic32, generic64\n\
8074 EXTENSION is combination of:\n\
8075 8087, 287, 387, no87, mmx, nommx, sse, sse2, sse3,\n\
8076 ssse3, sse4.1, sse4.2, sse4, nosse, avx, noavx,\n\
8077 vmx, smx, xsave, movbe, ept, aes, pclmul, fma,\n\
8078 clflush, syscall, rdtscp, 3dnow, 3dnowa, sse4a,\n\
8079 svme, abm, padlock, fma4, lwp\n"));
8080 fprintf (stream, _("\
8081 -mtune=CPU optimize for CPU, CPU is one of:\n\
8082 i8086, i186, i286, i386, i486, pentium, pentiumpro,\n\
8083 pentiumii, pentiumiii, pentium4, prescott, nocona,\n\
8084 core, core2, corei7, l1om, k6, k6_2, athlon, k8,\n\
8085 amdfam10, generic32, generic64\n"));
8086 fprintf (stream, _("\
8087 -msse2avx encode SSE instructions with VEX prefix\n"));
8088 fprintf (stream, _("\
8089 -msse-check=[none|error|warning]\n\
8090 check SSE instructions\n"));
8091 fprintf (stream, _("\
8092 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
8093 fprintf (stream, _("\
8094 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
8095 fprintf (stream, _("\
8096 -mindex-reg support pseudo index registers\n"));
8097 fprintf (stream, _("\
8098 -mnaked-reg don't require `%%' prefix for registers\n"));
8099 fprintf (stream, _("\
8100 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
8101 }
8102
8103 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
8104 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8105 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8106
8107 /* Pick the target format to use. */
8108
8109 const char *
8110 i386_target_format (void)
8111 {
8112 if (!strcmp (default_arch, "x86_64"))
8113 {
8114 set_code_flag (CODE_64BIT);
8115 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
8116 {
8117 cpu_arch_isa_flags.bitfield.cpui186 = 1;
8118 cpu_arch_isa_flags.bitfield.cpui286 = 1;
8119 cpu_arch_isa_flags.bitfield.cpui386 = 1;
8120 cpu_arch_isa_flags.bitfield.cpui486 = 1;
8121 cpu_arch_isa_flags.bitfield.cpui586 = 1;
8122 cpu_arch_isa_flags.bitfield.cpui686 = 1;
8123 cpu_arch_isa_flags.bitfield.cpuclflush = 1;
8124 cpu_arch_isa_flags.bitfield.cpummx= 1;
8125 cpu_arch_isa_flags.bitfield.cpusse = 1;
8126 cpu_arch_isa_flags.bitfield.cpusse2 = 1;
8127 cpu_arch_isa_flags.bitfield.cpulm = 1;
8128 }
8129 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
8130 {
8131 cpu_arch_tune_flags.bitfield.cpui186 = 1;
8132 cpu_arch_tune_flags.bitfield.cpui286 = 1;
8133 cpu_arch_tune_flags.bitfield.cpui386 = 1;
8134 cpu_arch_tune_flags.bitfield.cpui486 = 1;
8135 cpu_arch_tune_flags.bitfield.cpui586 = 1;
8136 cpu_arch_tune_flags.bitfield.cpui686 = 1;
8137 cpu_arch_tune_flags.bitfield.cpuclflush = 1;
8138 cpu_arch_tune_flags.bitfield.cpummx= 1;
8139 cpu_arch_tune_flags.bitfield.cpusse = 1;
8140 cpu_arch_tune_flags.bitfield.cpusse2 = 1;
8141 }
8142 }
8143 else if (!strcmp (default_arch, "i386"))
8144 {
8145 set_code_flag (CODE_32BIT);
8146 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
8147 {
8148 cpu_arch_isa_flags.bitfield.cpui186 = 1;
8149 cpu_arch_isa_flags.bitfield.cpui286 = 1;
8150 cpu_arch_isa_flags.bitfield.cpui386 = 1;
8151 }
8152 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
8153 {
8154 cpu_arch_tune_flags.bitfield.cpui186 = 1;
8155 cpu_arch_tune_flags.bitfield.cpui286 = 1;
8156 cpu_arch_tune_flags.bitfield.cpui386 = 1;
8157 }
8158 }
8159 else
8160 as_fatal (_("Unknown architecture"));
8161 switch (OUTPUT_FLAVOR)
8162 {
8163 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
8164 case bfd_target_aout_flavour:
8165 return AOUT_TARGET_FORMAT;
8166 #endif
8167 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
8168 # if defined (TE_PE) || defined (TE_PEP)
8169 case bfd_target_coff_flavour:
8170 return flag_code == CODE_64BIT ? "pe-x86-64" : "pe-i386";
8171 # elif defined (TE_GO32)
8172 case bfd_target_coff_flavour:
8173 return "coff-go32";
8174 # else
8175 case bfd_target_coff_flavour:
8176 return "coff-i386";
8177 # endif
8178 #endif
8179 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8180 case bfd_target_elf_flavour:
8181 {
8182 if (flag_code == CODE_64BIT)
8183 {
8184 object_64bit = 1;
8185 use_rela_relocations = 1;
8186 }
8187 if (cpu_arch_isa == PROCESSOR_L1OM)
8188 {
8189 if (flag_code != CODE_64BIT)
8190 as_fatal (_("Intel L1OM is 64bit only"));
8191 return ELF_TARGET_L1OM_FORMAT;
8192 }
8193 else
8194 return (flag_code == CODE_64BIT
8195 ? ELF_TARGET_FORMAT64 : ELF_TARGET_FORMAT);
8196 }
8197 #endif
8198 #if defined (OBJ_MACH_O)
8199 case bfd_target_mach_o_flavour:
8200 return flag_code == CODE_64BIT ? "mach-o-x86-64" : "mach-o-i386";
8201 #endif
8202 default:
8203 abort ();
8204 return NULL;
8205 }
8206 }
8207
8208 #endif /* OBJ_MAYBE_ more than one */
8209
8210 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
8211 void
8212 i386_elf_emit_arch_note (void)
8213 {
8214 if (IS_ELF && cpu_arch_name != NULL)
8215 {
8216 char *p;
8217 asection *seg = now_seg;
8218 subsegT subseg = now_subseg;
8219 Elf_Internal_Note i_note;
8220 Elf_External_Note e_note;
8221 asection *note_secp;
8222 int len;
8223
8224 /* Create the .note section. */
8225 note_secp = subseg_new (".note", 0);
8226 bfd_set_section_flags (stdoutput,
8227 note_secp,
8228 SEC_HAS_CONTENTS | SEC_READONLY);
8229
8230 /* Process the arch string. */
8231 len = strlen (cpu_arch_name);
8232
8233 i_note.namesz = len + 1;
8234 i_note.descsz = 0;
8235 i_note.type = NT_ARCH;
8236 p = frag_more (sizeof (e_note.namesz));
8237 md_number_to_chars (p, (valueT) i_note.namesz, sizeof (e_note.namesz));
8238 p = frag_more (sizeof (e_note.descsz));
8239 md_number_to_chars (p, (valueT) i_note.descsz, sizeof (e_note.descsz));
8240 p = frag_more (sizeof (e_note.type));
8241 md_number_to_chars (p, (valueT) i_note.type, sizeof (e_note.type));
8242 p = frag_more (len + 1);
8243 strcpy (p, cpu_arch_name);
8244
8245 frag_align (2, 0, 0);
8246
8247 subseg_set (seg, subseg);
8248 }
8249 }
8250 #endif
8251 \f
8252 symbolS *
8253 md_undefined_symbol (name)
8254 char *name;
8255 {
8256 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
8257 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
8258 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
8259 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
8260 {
8261 if (!GOT_symbol)
8262 {
8263 if (symbol_find (name))
8264 as_bad (_("GOT already in symbol table"));
8265 GOT_symbol = symbol_new (name, undefined_section,
8266 (valueT) 0, &zero_address_frag);
8267 };
8268 return GOT_symbol;
8269 }
8270 return 0;
8271 }
8272
8273 /* Round up a section size to the appropriate boundary. */
8274
8275 valueT
8276 md_section_align (segment, size)
8277 segT segment ATTRIBUTE_UNUSED;
8278 valueT size;
8279 {
8280 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8281 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
8282 {
8283 /* For a.out, force the section size to be aligned. If we don't do
8284 this, BFD will align it for us, but it will not write out the
8285 final bytes of the section. This may be a bug in BFD, but it is
8286 easier to fix it here since that is how the other a.out targets
8287 work. */
8288 int align;
8289
8290 align = bfd_get_section_alignment (stdoutput, segment);
8291 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
8292 }
8293 #endif
8294
8295 return size;
8296 }
8297
8298 /* On the i386, PC-relative offsets are relative to the start of the
8299 next instruction. That is, the address of the offset, plus its
8300 size, since the offset is always the last part of the insn. */
8301
8302 long
8303 md_pcrel_from (fixS *fixP)
8304 {
8305 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
8306 }
8307
8308 #ifndef I386COFF
8309
8310 static void
8311 s_bss (int ignore ATTRIBUTE_UNUSED)
8312 {
8313 int temp;
8314
8315 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8316 if (IS_ELF)
8317 obj_elf_section_change_hook ();
8318 #endif
8319 temp = get_absolute_expression ();
8320 subseg_set (bss_section, (subsegT) temp);
8321 demand_empty_rest_of_line ();
8322 }
8323
8324 #endif
8325
8326 void
8327 i386_validate_fix (fixS *fixp)
8328 {
8329 if (fixp->fx_subsy && fixp->fx_subsy == GOT_symbol)
8330 {
8331 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
8332 {
8333 if (!object_64bit)
8334 abort ();
8335 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
8336 }
8337 else
8338 {
8339 if (!object_64bit)
8340 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
8341 else
8342 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
8343 }
8344 fixp->fx_subsy = 0;
8345 }
8346 }
8347
8348 arelent *
8349 tc_gen_reloc (section, fixp)
8350 asection *section ATTRIBUTE_UNUSED;
8351 fixS *fixp;
8352 {
8353 arelent *rel;
8354 bfd_reloc_code_real_type code;
8355
8356 switch (fixp->fx_r_type)
8357 {
8358 case BFD_RELOC_X86_64_PLT32:
8359 case BFD_RELOC_X86_64_GOT32:
8360 case BFD_RELOC_X86_64_GOTPCREL:
8361 case BFD_RELOC_386_PLT32:
8362 case BFD_RELOC_386_GOT32:
8363 case BFD_RELOC_386_GOTOFF:
8364 case BFD_RELOC_386_GOTPC:
8365 case BFD_RELOC_386_TLS_GD:
8366 case BFD_RELOC_386_TLS_LDM:
8367 case BFD_RELOC_386_TLS_LDO_32:
8368 case BFD_RELOC_386_TLS_IE_32:
8369 case BFD_RELOC_386_TLS_IE:
8370 case BFD_RELOC_386_TLS_GOTIE:
8371 case BFD_RELOC_386_TLS_LE_32:
8372 case BFD_RELOC_386_TLS_LE:
8373 case BFD_RELOC_386_TLS_GOTDESC:
8374 case BFD_RELOC_386_TLS_DESC_CALL:
8375 case BFD_RELOC_X86_64_TLSGD:
8376 case BFD_RELOC_X86_64_TLSLD:
8377 case BFD_RELOC_X86_64_DTPOFF32:
8378 case BFD_RELOC_X86_64_DTPOFF64:
8379 case BFD_RELOC_X86_64_GOTTPOFF:
8380 case BFD_RELOC_X86_64_TPOFF32:
8381 case BFD_RELOC_X86_64_TPOFF64:
8382 case BFD_RELOC_X86_64_GOTOFF64:
8383 case BFD_RELOC_X86_64_GOTPC32:
8384 case BFD_RELOC_X86_64_GOT64:
8385 case BFD_RELOC_X86_64_GOTPCREL64:
8386 case BFD_RELOC_X86_64_GOTPC64:
8387 case BFD_RELOC_X86_64_GOTPLT64:
8388 case BFD_RELOC_X86_64_PLTOFF64:
8389 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8390 case BFD_RELOC_X86_64_TLSDESC_CALL:
8391 case BFD_RELOC_RVA:
8392 case BFD_RELOC_VTABLE_ENTRY:
8393 case BFD_RELOC_VTABLE_INHERIT:
8394 #ifdef TE_PE
8395 case BFD_RELOC_32_SECREL:
8396 #endif
8397 code = fixp->fx_r_type;
8398 break;
8399 case BFD_RELOC_X86_64_32S:
8400 if (!fixp->fx_pcrel)
8401 {
8402 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
8403 code = fixp->fx_r_type;
8404 break;
8405 }
8406 default:
8407 if (fixp->fx_pcrel)
8408 {
8409 switch (fixp->fx_size)
8410 {
8411 default:
8412 as_bad_where (fixp->fx_file, fixp->fx_line,
8413 _("can not do %d byte pc-relative relocation"),
8414 fixp->fx_size);
8415 code = BFD_RELOC_32_PCREL;
8416 break;
8417 case 1: code = BFD_RELOC_8_PCREL; break;
8418 case 2: code = BFD_RELOC_16_PCREL; break;
8419 case 4: code = BFD_RELOC_32_PCREL; break;
8420 #ifdef BFD64
8421 case 8: code = BFD_RELOC_64_PCREL; break;
8422 #endif
8423 }
8424 }
8425 else
8426 {
8427 switch (fixp->fx_size)
8428 {
8429 default:
8430 as_bad_where (fixp->fx_file, fixp->fx_line,
8431 _("can not do %d byte relocation"),
8432 fixp->fx_size);
8433 code = BFD_RELOC_32;
8434 break;
8435 case 1: code = BFD_RELOC_8; break;
8436 case 2: code = BFD_RELOC_16; break;
8437 case 4: code = BFD_RELOC_32; break;
8438 #ifdef BFD64
8439 case 8: code = BFD_RELOC_64; break;
8440 #endif
8441 }
8442 }
8443 break;
8444 }
8445
8446 if ((code == BFD_RELOC_32
8447 || code == BFD_RELOC_32_PCREL
8448 || code == BFD_RELOC_X86_64_32S)
8449 && GOT_symbol
8450 && fixp->fx_addsy == GOT_symbol)
8451 {
8452 if (!object_64bit)
8453 code = BFD_RELOC_386_GOTPC;
8454 else
8455 code = BFD_RELOC_X86_64_GOTPC32;
8456 }
8457 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
8458 && GOT_symbol
8459 && fixp->fx_addsy == GOT_symbol)
8460 {
8461 code = BFD_RELOC_X86_64_GOTPC64;
8462 }
8463
8464 rel = (arelent *) xmalloc (sizeof (arelent));
8465 rel->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
8466 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
8467
8468 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
8469
8470 if (!use_rela_relocations)
8471 {
8472 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
8473 vtable entry to be used in the relocation's section offset. */
8474 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
8475 rel->address = fixp->fx_offset;
8476 #if defined (OBJ_COFF) && defined (TE_PE)
8477 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
8478 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
8479 else
8480 #endif
8481 rel->addend = 0;
8482 }
8483 /* Use the rela in 64bit mode. */
8484 else
8485 {
8486 if (!fixp->fx_pcrel)
8487 rel->addend = fixp->fx_offset;
8488 else
8489 switch (code)
8490 {
8491 case BFD_RELOC_X86_64_PLT32:
8492 case BFD_RELOC_X86_64_GOT32:
8493 case BFD_RELOC_X86_64_GOTPCREL:
8494 case BFD_RELOC_X86_64_TLSGD:
8495 case BFD_RELOC_X86_64_TLSLD:
8496 case BFD_RELOC_X86_64_GOTTPOFF:
8497 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8498 case BFD_RELOC_X86_64_TLSDESC_CALL:
8499 rel->addend = fixp->fx_offset - fixp->fx_size;
8500 break;
8501 default:
8502 rel->addend = (section->vma
8503 - fixp->fx_size
8504 + fixp->fx_addnumber
8505 + md_pcrel_from (fixp));
8506 break;
8507 }
8508 }
8509
8510 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
8511 if (rel->howto == NULL)
8512 {
8513 as_bad_where (fixp->fx_file, fixp->fx_line,
8514 _("cannot represent relocation type %s"),
8515 bfd_get_reloc_code_name (code));
8516 /* Set howto to a garbage value so that we can keep going. */
8517 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
8518 gas_assert (rel->howto != NULL);
8519 }
8520
8521 return rel;
8522 }
8523
8524 #include "tc-i386-intel.c"
8525
8526 void
8527 tc_x86_parse_to_dw2regnum (expressionS *exp)
8528 {
8529 int saved_naked_reg;
8530 char saved_register_dot;
8531
8532 saved_naked_reg = allow_naked_reg;
8533 allow_naked_reg = 1;
8534 saved_register_dot = register_chars['.'];
8535 register_chars['.'] = '.';
8536 allow_pseudo_reg = 1;
8537 expression_and_evaluate (exp);
8538 allow_pseudo_reg = 0;
8539 register_chars['.'] = saved_register_dot;
8540 allow_naked_reg = saved_naked_reg;
8541
8542 if (exp->X_op == O_register && exp->X_add_number >= 0)
8543 {
8544 if ((addressT) exp->X_add_number < i386_regtab_size)
8545 {
8546 exp->X_op = O_constant;
8547 exp->X_add_number = i386_regtab[exp->X_add_number]
8548 .dw2_regnum[flag_code >> 1];
8549 }
8550 else
8551 exp->X_op = O_illegal;
8552 }
8553 }
8554
8555 void
8556 tc_x86_frame_initial_instructions (void)
8557 {
8558 static unsigned int sp_regno[2];
8559
8560 if (!sp_regno[flag_code >> 1])
8561 {
8562 char *saved_input = input_line_pointer;
8563 char sp[][4] = {"esp", "rsp"};
8564 expressionS exp;
8565
8566 input_line_pointer = sp[flag_code >> 1];
8567 tc_x86_parse_to_dw2regnum (&exp);
8568 gas_assert (exp.X_op == O_constant);
8569 sp_regno[flag_code >> 1] = exp.X_add_number;
8570 input_line_pointer = saved_input;
8571 }
8572
8573 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
8574 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
8575 }
8576
8577 int
8578 i386_elf_section_type (const char *str, size_t len)
8579 {
8580 if (flag_code == CODE_64BIT
8581 && len == sizeof ("unwind") - 1
8582 && strncmp (str, "unwind", 6) == 0)
8583 return SHT_X86_64_UNWIND;
8584
8585 return -1;
8586 }
8587
8588 #ifdef TE_SOLARIS
8589 void
8590 i386_solaris_fix_up_eh_frame (segT sec)
8591 {
8592 if (flag_code == CODE_64BIT)
8593 elf_section_type (sec) = SHT_X86_64_UNWIND;
8594 }
8595 #endif
8596
8597 #ifdef TE_PE
8598 void
8599 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
8600 {
8601 expressionS expr;
8602
8603 expr.X_op = O_secrel;
8604 expr.X_add_symbol = symbol;
8605 expr.X_add_number = 0;
8606 emit_expr (&expr, size);
8607 }
8608 #endif
8609
8610 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8611 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
8612
8613 bfd_vma
8614 x86_64_section_letter (int letter, char **ptr_msg)
8615 {
8616 if (flag_code == CODE_64BIT)
8617 {
8618 if (letter == 'l')
8619 return SHF_X86_64_LARGE;
8620
8621 *ptr_msg = _("Bad .section directive: want a,l,w,x,M,S,G,T in string");
8622 }
8623 else
8624 *ptr_msg = _("Bad .section directive: want a,w,x,M,S,G,T in string");
8625 return -1;
8626 }
8627
8628 bfd_vma
8629 x86_64_section_word (char *str, size_t len)
8630 {
8631 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
8632 return SHF_X86_64_LARGE;
8633
8634 return -1;
8635 }
8636
8637 static void
8638 handle_large_common (int small ATTRIBUTE_UNUSED)
8639 {
8640 if (flag_code != CODE_64BIT)
8641 {
8642 s_comm_internal (0, elf_common_parse);
8643 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
8644 }
8645 else
8646 {
8647 static segT lbss_section;
8648 asection *saved_com_section_ptr = elf_com_section_ptr;
8649 asection *saved_bss_section = bss_section;
8650
8651 if (lbss_section == NULL)
8652 {
8653 flagword applicable;
8654 segT seg = now_seg;
8655 subsegT subseg = now_subseg;
8656
8657 /* The .lbss section is for local .largecomm symbols. */
8658 lbss_section = subseg_new (".lbss", 0);
8659 applicable = bfd_applicable_section_flags (stdoutput);
8660 bfd_set_section_flags (stdoutput, lbss_section,
8661 applicable & SEC_ALLOC);
8662 seg_info (lbss_section)->bss = 1;
8663
8664 subseg_set (seg, subseg);
8665 }
8666
8667 elf_com_section_ptr = &_bfd_elf_large_com_section;
8668 bss_section = lbss_section;
8669
8670 s_comm_internal (0, elf_common_parse);
8671
8672 elf_com_section_ptr = saved_com_section_ptr;
8673 bss_section = saved_bss_section;
8674 }
8675 }
8676 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */
This page took 0.202692 seconds and 5 git commands to generate.