Allow VL=1 on AVX scalar instructions.
[deliverable/binutils-gdb.git] / gas / config / tc-i386.c
1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5
6 This file is part of GAS, the GNU Assembler.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GAS; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
21 02110-1301, USA. */
22
23 /* Intel 80386 machine specific gas.
24 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
25 x86_64 support by Jan Hubicka (jh@suse.cz)
26 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
27 Bugs & suggestions are completely welcome. This is free software.
28 Please help us make it better. */
29
30 #include "as.h"
31 #include "safe-ctype.h"
32 #include "subsegs.h"
33 #include "dwarf2dbg.h"
34 #include "dw2gencfi.h"
35 #include "elf/x86-64.h"
36 #include "opcodes/i386-init.h"
37
38 #ifndef REGISTER_WARNINGS
39 #define REGISTER_WARNINGS 1
40 #endif
41
42 #ifndef INFER_ADDR_PREFIX
43 #define INFER_ADDR_PREFIX 1
44 #endif
45
46 #ifndef DEFAULT_ARCH
47 #define DEFAULT_ARCH "i386"
48 #endif
49
50 #ifndef INLINE
51 #if __GNUC__ >= 2
52 #define INLINE __inline__
53 #else
54 #define INLINE
55 #endif
56 #endif
57
58 /* Prefixes will be emitted in the order defined below.
59 WAIT_PREFIX must be the first prefix since FWAIT is really is an
60 instruction, and so must come before any prefixes.
61 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
62 REP_PREFIX, LOCK_PREFIX. */
63 #define WAIT_PREFIX 0
64 #define SEG_PREFIX 1
65 #define ADDR_PREFIX 2
66 #define DATA_PREFIX 3
67 #define REP_PREFIX 4
68 #define LOCK_PREFIX 5
69 #define REX_PREFIX 6 /* must come last. */
70 #define MAX_PREFIXES 7 /* max prefixes per opcode */
71
72 /* we define the syntax here (modulo base,index,scale syntax) */
73 #define REGISTER_PREFIX '%'
74 #define IMMEDIATE_PREFIX '$'
75 #define ABSOLUTE_PREFIX '*'
76
77 /* these are the instruction mnemonic suffixes in AT&T syntax or
78 memory operand size in Intel syntax. */
79 #define WORD_MNEM_SUFFIX 'w'
80 #define BYTE_MNEM_SUFFIX 'b'
81 #define SHORT_MNEM_SUFFIX 's'
82 #define LONG_MNEM_SUFFIX 'l'
83 #define QWORD_MNEM_SUFFIX 'q'
84 #define XMMWORD_MNEM_SUFFIX 'x'
85 #define YMMWORD_MNEM_SUFFIX 'y'
86 /* Intel Syntax. Use a non-ascii letter since since it never appears
87 in instructions. */
88 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
89
90 #define END_OF_INSN '\0'
91
92 /*
93 'templates' is for grouping together 'template' structures for opcodes
94 of the same name. This is only used for storing the insns in the grand
95 ole hash table of insns.
96 The templates themselves start at START and range up to (but not including)
97 END.
98 */
99 typedef struct
100 {
101 const insn_template *start;
102 const insn_template *end;
103 }
104 templates;
105
106 /* 386 operand encoding bytes: see 386 book for details of this. */
107 typedef struct
108 {
109 unsigned int regmem; /* codes register or memory operand */
110 unsigned int reg; /* codes register operand (or extended opcode) */
111 unsigned int mode; /* how to interpret regmem & reg */
112 }
113 modrm_byte;
114
115 /* x86-64 extension prefix. */
116 typedef int rex_byte;
117
118 /* 386 opcode byte to code indirect addressing. */
119 typedef struct
120 {
121 unsigned base;
122 unsigned index;
123 unsigned scale;
124 }
125 sib_byte;
126
127 /* x86 arch names, types and features */
128 typedef struct
129 {
130 const char *name; /* arch name */
131 unsigned int len; /* arch string length */
132 enum processor_type type; /* arch type */
133 i386_cpu_flags flags; /* cpu feature flags */
134 unsigned int skip; /* show_arch should skip this. */
135 }
136 arch_entry;
137
138 static void set_code_flag (int);
139 static void set_16bit_gcc_code_flag (int);
140 static void set_intel_syntax (int);
141 static void set_intel_mnemonic (int);
142 static void set_allow_index_reg (int);
143 static void set_sse_check (int);
144 static void set_cpu_arch (int);
145 #ifdef TE_PE
146 static void pe_directive_secrel (int);
147 #endif
148 static void signed_cons (int);
149 static char *output_invalid (int c);
150 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
151 const char *);
152 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
153 const char *);
154 static int i386_att_operand (char *);
155 static int i386_intel_operand (char *, int);
156 static int i386_intel_simplify (expressionS *);
157 static int i386_intel_parse_name (const char *, expressionS *);
158 static const reg_entry *parse_register (char *, char **);
159 static char *parse_insn (char *, char *);
160 static char *parse_operands (char *, const char *);
161 static void swap_operands (void);
162 static void swap_2_operands (int, int);
163 static void optimize_imm (void);
164 static void optimize_disp (void);
165 static const insn_template *match_template (void);
166 static int check_string (void);
167 static int process_suffix (void);
168 static int check_byte_reg (void);
169 static int check_long_reg (void);
170 static int check_qword_reg (void);
171 static int check_word_reg (void);
172 static int finalize_imm (void);
173 static int process_operands (void);
174 static const seg_entry *build_modrm_byte (void);
175 static void output_insn (void);
176 static void output_imm (fragS *, offsetT);
177 static void output_disp (fragS *, offsetT);
178 #ifndef I386COFF
179 static void s_bss (int);
180 #endif
181 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
182 static void handle_large_common (int small ATTRIBUTE_UNUSED);
183 #endif
184
185 static const char *default_arch = DEFAULT_ARCH;
186
187 /* VEX prefix. */
188 typedef struct
189 {
190 /* VEX prefix is either 2 byte or 3 byte. */
191 unsigned char bytes[3];
192 unsigned int length;
193 /* Destination or source register specifier. */
194 const reg_entry *register_specifier;
195 } vex_prefix;
196
197 /* 'md_assemble ()' gathers together information and puts it into a
198 i386_insn. */
199
200 union i386_op
201 {
202 expressionS *disps;
203 expressionS *imms;
204 const reg_entry *regs;
205 };
206
207 struct _i386_insn
208 {
209 /* TM holds the template for the insn were currently assembling. */
210 insn_template tm;
211
212 /* SUFFIX holds the instruction size suffix for byte, word, dword
213 or qword, if given. */
214 char suffix;
215
216 /* OPERANDS gives the number of given operands. */
217 unsigned int operands;
218
219 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
220 of given register, displacement, memory operands and immediate
221 operands. */
222 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
223
224 /* TYPES [i] is the type (see above #defines) which tells us how to
225 use OP[i] for the corresponding operand. */
226 i386_operand_type types[MAX_OPERANDS];
227
228 /* Displacement expression, immediate expression, or register for each
229 operand. */
230 union i386_op op[MAX_OPERANDS];
231
232 /* Flags for operands. */
233 unsigned int flags[MAX_OPERANDS];
234 #define Operand_PCrel 1
235
236 /* Relocation type for operand */
237 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
238
239 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
240 the base index byte below. */
241 const reg_entry *base_reg;
242 const reg_entry *index_reg;
243 unsigned int log2_scale_factor;
244
245 /* SEG gives the seg_entries of this insn. They are zero unless
246 explicit segment overrides are given. */
247 const seg_entry *seg[2];
248
249 /* PREFIX holds all the given prefix opcodes (usually null).
250 PREFIXES is the number of prefix opcodes. */
251 unsigned int prefixes;
252 unsigned char prefix[MAX_PREFIXES];
253
254 /* RM and SIB are the modrm byte and the sib byte where the
255 addressing modes of this insn are encoded. */
256 modrm_byte rm;
257 rex_byte rex;
258 sib_byte sib;
259 vex_prefix vex;
260
261 /* Swap operand in encoding. */
262 unsigned int swap_operand;
263 };
264
265 typedef struct _i386_insn i386_insn;
266
267 /* List of chars besides those in app.c:symbol_chars that can start an
268 operand. Used to prevent the scrubber eating vital white-space. */
269 const char extra_symbol_chars[] = "*%-(["
270 #ifdef LEX_AT
271 "@"
272 #endif
273 #ifdef LEX_QM
274 "?"
275 #endif
276 ;
277
278 #if (defined (TE_I386AIX) \
279 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
280 && !defined (TE_GNU) \
281 && !defined (TE_LINUX) \
282 && !defined (TE_NETWARE) \
283 && !defined (TE_FreeBSD) \
284 && !defined (TE_NetBSD)))
285 /* This array holds the chars that always start a comment. If the
286 pre-processor is disabled, these aren't very useful. The option
287 --divide will remove '/' from this list. */
288 const char *i386_comment_chars = "#/";
289 #define SVR4_COMMENT_CHARS 1
290 #define PREFIX_SEPARATOR '\\'
291
292 #else
293 const char *i386_comment_chars = "#";
294 #define PREFIX_SEPARATOR '/'
295 #endif
296
297 /* This array holds the chars that only start a comment at the beginning of
298 a line. If the line seems to have the form '# 123 filename'
299 .line and .file directives will appear in the pre-processed output.
300 Note that input_file.c hand checks for '#' at the beginning of the
301 first line of the input file. This is because the compiler outputs
302 #NO_APP at the beginning of its output.
303 Also note that comments started like this one will always work if
304 '/' isn't otherwise defined. */
305 const char line_comment_chars[] = "#/";
306
307 const char line_separator_chars[] = ";";
308
309 /* Chars that can be used to separate mant from exp in floating point
310 nums. */
311 const char EXP_CHARS[] = "eE";
312
313 /* Chars that mean this number is a floating point constant
314 As in 0f12.456
315 or 0d1.2345e12. */
316 const char FLT_CHARS[] = "fFdDxX";
317
318 /* Tables for lexical analysis. */
319 static char mnemonic_chars[256];
320 static char register_chars[256];
321 static char operand_chars[256];
322 static char identifier_chars[256];
323 static char digit_chars[256];
324
325 /* Lexical macros. */
326 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
327 #define is_operand_char(x) (operand_chars[(unsigned char) x])
328 #define is_register_char(x) (register_chars[(unsigned char) x])
329 #define is_space_char(x) ((x) == ' ')
330 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
331 #define is_digit_char(x) (digit_chars[(unsigned char) x])
332
333 /* All non-digit non-letter characters that may occur in an operand. */
334 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
335
336 /* md_assemble() always leaves the strings it's passed unaltered. To
337 effect this we maintain a stack of saved characters that we've smashed
338 with '\0's (indicating end of strings for various sub-fields of the
339 assembler instruction). */
340 static char save_stack[32];
341 static char *save_stack_p;
342 #define END_STRING_AND_SAVE(s) \
343 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
344 #define RESTORE_END_STRING(s) \
345 do { *(s) = *--save_stack_p; } while (0)
346
347 /* The instruction we're assembling. */
348 static i386_insn i;
349
350 /* Possible templates for current insn. */
351 static const templates *current_templates;
352
353 /* Per instruction expressionS buffers: max displacements & immediates. */
354 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
355 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
356
357 /* Current operand we are working on. */
358 static int this_operand = -1;
359
360 /* We support four different modes. FLAG_CODE variable is used to distinguish
361 these. */
362
363 enum flag_code {
364 CODE_32BIT,
365 CODE_16BIT,
366 CODE_64BIT };
367
368 static enum flag_code flag_code;
369 static unsigned int object_64bit;
370 static int use_rela_relocations = 0;
371
372 /* The names used to print error messages. */
373 static const char *flag_code_names[] =
374 {
375 "32",
376 "16",
377 "64"
378 };
379
380 /* 1 for intel syntax,
381 0 if att syntax. */
382 static int intel_syntax = 0;
383
384 /* 1 for intel mnemonic,
385 0 if att mnemonic. */
386 static int intel_mnemonic = !SYSV386_COMPAT;
387
388 /* 1 if support old (<= 2.8.1) versions of gcc. */
389 static int old_gcc = OLDGCC_COMPAT;
390
391 /* 1 if pseudo registers are permitted. */
392 static int allow_pseudo_reg = 0;
393
394 /* 1 if register prefix % not required. */
395 static int allow_naked_reg = 0;
396
397 /* 1 if pseudo index register, eiz/riz, is allowed . */
398 static int allow_index_reg = 0;
399
400 static enum
401 {
402 sse_check_none = 0,
403 sse_check_warning,
404 sse_check_error
405 }
406 sse_check;
407
408 /* Register prefix used for error message. */
409 static const char *register_prefix = "%";
410
411 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
412 leave, push, and pop instructions so that gcc has the same stack
413 frame as in 32 bit mode. */
414 static char stackop_size = '\0';
415
416 /* Non-zero to optimize code alignment. */
417 int optimize_align_code = 1;
418
419 /* Non-zero to quieten some warnings. */
420 static int quiet_warnings = 0;
421
422 /* CPU name. */
423 static const char *cpu_arch_name = NULL;
424 static char *cpu_sub_arch_name = NULL;
425
426 /* CPU feature flags. */
427 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
428
429 /* If we have selected a cpu we are generating instructions for. */
430 static int cpu_arch_tune_set = 0;
431
432 /* Cpu we are generating instructions for. */
433 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
434
435 /* CPU feature flags of cpu we are generating instructions for. */
436 static i386_cpu_flags cpu_arch_tune_flags;
437
438 /* CPU instruction set architecture used. */
439 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
440
441 /* CPU feature flags of instruction set architecture used. */
442 i386_cpu_flags cpu_arch_isa_flags;
443
444 /* If set, conditional jumps are not automatically promoted to handle
445 larger than a byte offset. */
446 static unsigned int no_cond_jump_promotion = 0;
447
448 /* Encode SSE instructions with VEX prefix. */
449 static unsigned int sse2avx;
450
451 /* Encode scalar AVX instructions with specific vector length. */
452 static enum
453 {
454 vex128 = 0,
455 vex256
456 } avxscalar;
457
458 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
459 static symbolS *GOT_symbol;
460
461 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
462 unsigned int x86_dwarf2_return_column;
463
464 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
465 int x86_cie_data_alignment;
466
467 /* Interface to relax_segment.
468 There are 3 major relax states for 386 jump insns because the
469 different types of jumps add different sizes to frags when we're
470 figuring out what sort of jump to choose to reach a given label. */
471
472 /* Types. */
473 #define UNCOND_JUMP 0
474 #define COND_JUMP 1
475 #define COND_JUMP86 2
476
477 /* Sizes. */
478 #define CODE16 1
479 #define SMALL 0
480 #define SMALL16 (SMALL | CODE16)
481 #define BIG 2
482 #define BIG16 (BIG | CODE16)
483
484 #ifndef INLINE
485 #ifdef __GNUC__
486 #define INLINE __inline__
487 #else
488 #define INLINE
489 #endif
490 #endif
491
492 #define ENCODE_RELAX_STATE(type, size) \
493 ((relax_substateT) (((type) << 2) | (size)))
494 #define TYPE_FROM_RELAX_STATE(s) \
495 ((s) >> 2)
496 #define DISP_SIZE_FROM_RELAX_STATE(s) \
497 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
498
499 /* This table is used by relax_frag to promote short jumps to long
500 ones where necessary. SMALL (short) jumps may be promoted to BIG
501 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
502 don't allow a short jump in a 32 bit code segment to be promoted to
503 a 16 bit offset jump because it's slower (requires data size
504 prefix), and doesn't work, unless the destination is in the bottom
505 64k of the code segment (The top 16 bits of eip are zeroed). */
506
507 const relax_typeS md_relax_table[] =
508 {
509 /* The fields are:
510 1) most positive reach of this state,
511 2) most negative reach of this state,
512 3) how many bytes this mode will have in the variable part of the frag
513 4) which index into the table to try if we can't fit into this one. */
514
515 /* UNCOND_JUMP states. */
516 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
517 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
518 /* dword jmp adds 4 bytes to frag:
519 0 extra opcode bytes, 4 displacement bytes. */
520 {0, 0, 4, 0},
521 /* word jmp adds 2 byte2 to frag:
522 0 extra opcode bytes, 2 displacement bytes. */
523 {0, 0, 2, 0},
524
525 /* COND_JUMP states. */
526 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
527 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
528 /* dword conditionals adds 5 bytes to frag:
529 1 extra opcode byte, 4 displacement bytes. */
530 {0, 0, 5, 0},
531 /* word conditionals add 3 bytes to frag:
532 1 extra opcode byte, 2 displacement bytes. */
533 {0, 0, 3, 0},
534
535 /* COND_JUMP86 states. */
536 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
537 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
538 /* dword conditionals adds 5 bytes to frag:
539 1 extra opcode byte, 4 displacement bytes. */
540 {0, 0, 5, 0},
541 /* word conditionals add 4 bytes to frag:
542 1 displacement byte and a 3 byte long branch insn. */
543 {0, 0, 4, 0}
544 };
545
546 static const arch_entry cpu_arch[] =
547 {
548 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
549 CPU_GENERIC32_FLAGS, 0 },
550 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
551 CPU_GENERIC64_FLAGS, 0 },
552 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
553 CPU_NONE_FLAGS, 0 },
554 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
555 CPU_I186_FLAGS, 0 },
556 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
557 CPU_I286_FLAGS, 0 },
558 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
559 CPU_I386_FLAGS, 0 },
560 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
561 CPU_I486_FLAGS, 0 },
562 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
563 CPU_I586_FLAGS, 0 },
564 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
565 CPU_I686_FLAGS, 0 },
566 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
567 CPU_I586_FLAGS, 0 },
568 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
569 CPU_I686_FLAGS, 0 },
570 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
571 CPU_P2_FLAGS, 0 },
572 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
573 CPU_P3_FLAGS, 0 },
574 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
575 CPU_P4_FLAGS, 0 },
576 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
577 CPU_CORE_FLAGS, 0 },
578 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
579 CPU_NOCONA_FLAGS, 0 },
580 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
581 CPU_CORE_FLAGS, 1 },
582 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
583 CPU_CORE_FLAGS, 0 },
584 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
585 CPU_CORE2_FLAGS, 1 },
586 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
587 CPU_CORE2_FLAGS, 0 },
588 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
589 CPU_COREI7_FLAGS, 0 },
590 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
591 CPU_L1OM_FLAGS, 0 },
592 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
593 CPU_K6_FLAGS, 0 },
594 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
595 CPU_K6_2_FLAGS, 0 },
596 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
597 CPU_ATHLON_FLAGS, 0 },
598 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
599 CPU_K8_FLAGS, 1 },
600 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
601 CPU_K8_FLAGS, 0 },
602 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
603 CPU_K8_FLAGS, 0 },
604 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
605 CPU_AMDFAM10_FLAGS, 0 },
606 { STRING_COMMA_LEN ("amdfam15"), PROCESSOR_AMDFAM15,
607 CPU_AMDFAM15_FLAGS, 0 },
608 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
609 CPU_8087_FLAGS, 0 },
610 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
611 CPU_287_FLAGS, 0 },
612 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
613 CPU_387_FLAGS, 0 },
614 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN,
615 CPU_ANY87_FLAGS, 0 },
616 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
617 CPU_MMX_FLAGS, 0 },
618 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN,
619 CPU_3DNOWA_FLAGS, 0 },
620 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
621 CPU_SSE_FLAGS, 0 },
622 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
623 CPU_SSE2_FLAGS, 0 },
624 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
625 CPU_SSE3_FLAGS, 0 },
626 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
627 CPU_SSSE3_FLAGS, 0 },
628 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
629 CPU_SSE4_1_FLAGS, 0 },
630 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
631 CPU_SSE4_2_FLAGS, 0 },
632 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
633 CPU_SSE4_2_FLAGS, 0 },
634 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN,
635 CPU_ANY_SSE_FLAGS, 0 },
636 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
637 CPU_AVX_FLAGS, 0 },
638 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
639 CPU_ANY_AVX_FLAGS, 0 },
640 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
641 CPU_VMX_FLAGS, 0 },
642 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
643 CPU_SMX_FLAGS, 0 },
644 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
645 CPU_XSAVE_FLAGS, 0 },
646 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
647 CPU_AES_FLAGS, 0 },
648 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
649 CPU_PCLMUL_FLAGS, 0 },
650 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
651 CPU_PCLMUL_FLAGS, 1 },
652 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
653 CPU_FMA_FLAGS, 0 },
654 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
655 CPU_FMA4_FLAGS, 0 },
656 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
657 CPU_XOP_FLAGS, 0 },
658 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
659 CPU_LWP_FLAGS, 0 },
660 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
661 CPU_MOVBE_FLAGS, 0 },
662 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
663 CPU_EPT_FLAGS, 0 },
664 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
665 CPU_CLFLUSH_FLAGS, 0 },
666 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
667 CPU_SYSCALL_FLAGS, 0 },
668 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
669 CPU_RDTSCP_FLAGS, 0 },
670 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
671 CPU_3DNOW_FLAGS, 0 },
672 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
673 CPU_3DNOWA_FLAGS, 0 },
674 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
675 CPU_PADLOCK_FLAGS, 0 },
676 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
677 CPU_SVME_FLAGS, 1 },
678 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
679 CPU_SVME_FLAGS, 0 },
680 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
681 CPU_SSE4A_FLAGS, 0 },
682 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
683 CPU_ABM_FLAGS, 0 },
684 };
685
686 #ifdef I386COFF
687 /* Like s_lcomm_internal in gas/read.c but the alignment string
688 is allowed to be optional. */
689
690 static symbolS *
691 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
692 {
693 addressT align = 0;
694
695 SKIP_WHITESPACE ();
696
697 if (needs_align
698 && *input_line_pointer == ',')
699 {
700 align = parse_align (needs_align - 1);
701
702 if (align == (addressT) -1)
703 return NULL;
704 }
705 else
706 {
707 if (size >= 8)
708 align = 3;
709 else if (size >= 4)
710 align = 2;
711 else if (size >= 2)
712 align = 1;
713 else
714 align = 0;
715 }
716
717 bss_alloc (symbolP, size, align);
718 return symbolP;
719 }
720
721 static void
722 pe_lcomm (int needs_align)
723 {
724 s_comm_internal (needs_align * 2, pe_lcomm_internal);
725 }
726 #endif
727
728 const pseudo_typeS md_pseudo_table[] =
729 {
730 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
731 {"align", s_align_bytes, 0},
732 #else
733 {"align", s_align_ptwo, 0},
734 #endif
735 {"arch", set_cpu_arch, 0},
736 #ifndef I386COFF
737 {"bss", s_bss, 0},
738 #else
739 {"lcomm", pe_lcomm, 1},
740 #endif
741 {"ffloat", float_cons, 'f'},
742 {"dfloat", float_cons, 'd'},
743 {"tfloat", float_cons, 'x'},
744 {"value", cons, 2},
745 {"slong", signed_cons, 4},
746 {"noopt", s_ignore, 0},
747 {"optim", s_ignore, 0},
748 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
749 {"code16", set_code_flag, CODE_16BIT},
750 {"code32", set_code_flag, CODE_32BIT},
751 {"code64", set_code_flag, CODE_64BIT},
752 {"intel_syntax", set_intel_syntax, 1},
753 {"att_syntax", set_intel_syntax, 0},
754 {"intel_mnemonic", set_intel_mnemonic, 1},
755 {"att_mnemonic", set_intel_mnemonic, 0},
756 {"allow_index_reg", set_allow_index_reg, 1},
757 {"disallow_index_reg", set_allow_index_reg, 0},
758 {"sse_check", set_sse_check, 0},
759 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
760 {"largecomm", handle_large_common, 0},
761 #else
762 {"file", (void (*) (int)) dwarf2_directive_file, 0},
763 {"loc", dwarf2_directive_loc, 0},
764 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
765 #endif
766 #ifdef TE_PE
767 {"secrel32", pe_directive_secrel, 0},
768 #endif
769 {0, 0, 0}
770 };
771
772 /* For interface with expression (). */
773 extern char *input_line_pointer;
774
775 /* Hash table for instruction mnemonic lookup. */
776 static struct hash_control *op_hash;
777
778 /* Hash table for register lookup. */
779 static struct hash_control *reg_hash;
780 \f
781 void
782 i386_align_code (fragS *fragP, int count)
783 {
784 /* Various efficient no-op patterns for aligning code labels.
785 Note: Don't try to assemble the instructions in the comments.
786 0L and 0w are not legal. */
787 static const char f32_1[] =
788 {0x90}; /* nop */
789 static const char f32_2[] =
790 {0x66,0x90}; /* xchg %ax,%ax */
791 static const char f32_3[] =
792 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
793 static const char f32_4[] =
794 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
795 static const char f32_5[] =
796 {0x90, /* nop */
797 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
798 static const char f32_6[] =
799 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
800 static const char f32_7[] =
801 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
802 static const char f32_8[] =
803 {0x90, /* nop */
804 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
805 static const char f32_9[] =
806 {0x89,0xf6, /* movl %esi,%esi */
807 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
808 static const char f32_10[] =
809 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
810 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
811 static const char f32_11[] =
812 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
813 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
814 static const char f32_12[] =
815 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
816 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
817 static const char f32_13[] =
818 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
819 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
820 static const char f32_14[] =
821 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
822 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
823 static const char f16_3[] =
824 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
825 static const char f16_4[] =
826 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
827 static const char f16_5[] =
828 {0x90, /* nop */
829 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
830 static const char f16_6[] =
831 {0x89,0xf6, /* mov %si,%si */
832 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
833 static const char f16_7[] =
834 {0x8d,0x74,0x00, /* lea 0(%si),%si */
835 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
836 static const char f16_8[] =
837 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
838 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
839 static const char jump_31[] =
840 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
841 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
842 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
843 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
844 static const char *const f32_patt[] = {
845 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
846 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
847 };
848 static const char *const f16_patt[] = {
849 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
850 };
851 /* nopl (%[re]ax) */
852 static const char alt_3[] =
853 {0x0f,0x1f,0x00};
854 /* nopl 0(%[re]ax) */
855 static const char alt_4[] =
856 {0x0f,0x1f,0x40,0x00};
857 /* nopl 0(%[re]ax,%[re]ax,1) */
858 static const char alt_5[] =
859 {0x0f,0x1f,0x44,0x00,0x00};
860 /* nopw 0(%[re]ax,%[re]ax,1) */
861 static const char alt_6[] =
862 {0x66,0x0f,0x1f,0x44,0x00,0x00};
863 /* nopl 0L(%[re]ax) */
864 static const char alt_7[] =
865 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
866 /* nopl 0L(%[re]ax,%[re]ax,1) */
867 static const char alt_8[] =
868 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
869 /* nopw 0L(%[re]ax,%[re]ax,1) */
870 static const char alt_9[] =
871 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
872 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
873 static const char alt_10[] =
874 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
875 /* data16
876 nopw %cs:0L(%[re]ax,%[re]ax,1) */
877 static const char alt_long_11[] =
878 {0x66,
879 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
880 /* data16
881 data16
882 nopw %cs:0L(%[re]ax,%[re]ax,1) */
883 static const char alt_long_12[] =
884 {0x66,
885 0x66,
886 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
887 /* data16
888 data16
889 data16
890 nopw %cs:0L(%[re]ax,%[re]ax,1) */
891 static const char alt_long_13[] =
892 {0x66,
893 0x66,
894 0x66,
895 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
896 /* data16
897 data16
898 data16
899 data16
900 nopw %cs:0L(%[re]ax,%[re]ax,1) */
901 static const char alt_long_14[] =
902 {0x66,
903 0x66,
904 0x66,
905 0x66,
906 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
907 /* data16
908 data16
909 data16
910 data16
911 data16
912 nopw %cs:0L(%[re]ax,%[re]ax,1) */
913 static const char alt_long_15[] =
914 {0x66,
915 0x66,
916 0x66,
917 0x66,
918 0x66,
919 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
920 /* nopl 0(%[re]ax,%[re]ax,1)
921 nopw 0(%[re]ax,%[re]ax,1) */
922 static const char alt_short_11[] =
923 {0x0f,0x1f,0x44,0x00,0x00,
924 0x66,0x0f,0x1f,0x44,0x00,0x00};
925 /* nopw 0(%[re]ax,%[re]ax,1)
926 nopw 0(%[re]ax,%[re]ax,1) */
927 static const char alt_short_12[] =
928 {0x66,0x0f,0x1f,0x44,0x00,0x00,
929 0x66,0x0f,0x1f,0x44,0x00,0x00};
930 /* nopw 0(%[re]ax,%[re]ax,1)
931 nopl 0L(%[re]ax) */
932 static const char alt_short_13[] =
933 {0x66,0x0f,0x1f,0x44,0x00,0x00,
934 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
935 /* nopl 0L(%[re]ax)
936 nopl 0L(%[re]ax) */
937 static const char alt_short_14[] =
938 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
939 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
940 /* nopl 0L(%[re]ax)
941 nopl 0L(%[re]ax,%[re]ax,1) */
942 static const char alt_short_15[] =
943 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
944 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
945 static const char *const alt_short_patt[] = {
946 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
947 alt_9, alt_10, alt_short_11, alt_short_12, alt_short_13,
948 alt_short_14, alt_short_15
949 };
950 static const char *const alt_long_patt[] = {
951 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
952 alt_9, alt_10, alt_long_11, alt_long_12, alt_long_13,
953 alt_long_14, alt_long_15
954 };
955
956 /* Only align for at least a positive non-zero boundary. */
957 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
958 return;
959
960 /* We need to decide which NOP sequence to use for 32bit and
961 64bit. When -mtune= is used:
962
963 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
964 PROCESSOR_GENERIC32, f32_patt will be used.
965 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA,
966 PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
967 PROCESSOR_GENERIC64, alt_long_patt will be used.
968 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
969 PROCESSOR_AMDFAM10, and PROCESSOR_AMDFAM15, alt_short_patt
970 will be used.
971
972 When -mtune= isn't used, alt_long_patt will be used if
973 cpu_arch_isa_flags has Cpu686. Otherwise, f32_patt will
974 be used.
975
976 When -march= or .arch is used, we can't use anything beyond
977 cpu_arch_isa_flags. */
978
979 if (flag_code == CODE_16BIT)
980 {
981 if (count > 8)
982 {
983 memcpy (fragP->fr_literal + fragP->fr_fix,
984 jump_31, count);
985 /* Adjust jump offset. */
986 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
987 }
988 else
989 memcpy (fragP->fr_literal + fragP->fr_fix,
990 f16_patt[count - 1], count);
991 }
992 else
993 {
994 const char *const *patt = NULL;
995
996 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
997 {
998 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
999 switch (cpu_arch_tune)
1000 {
1001 case PROCESSOR_UNKNOWN:
1002 /* We use cpu_arch_isa_flags to check if we SHOULD
1003 optimize for Cpu686. */
1004 if (fragP->tc_frag_data.isa_flags.bitfield.cpui686)
1005 patt = alt_long_patt;
1006 else
1007 patt = f32_patt;
1008 break;
1009 case PROCESSOR_PENTIUMPRO:
1010 case PROCESSOR_PENTIUM4:
1011 case PROCESSOR_NOCONA:
1012 case PROCESSOR_CORE:
1013 case PROCESSOR_CORE2:
1014 case PROCESSOR_COREI7:
1015 case PROCESSOR_L1OM:
1016 case PROCESSOR_GENERIC64:
1017 patt = alt_long_patt;
1018 break;
1019 case PROCESSOR_K6:
1020 case PROCESSOR_ATHLON:
1021 case PROCESSOR_K8:
1022 case PROCESSOR_AMDFAM10:
1023 case PROCESSOR_AMDFAM15:
1024 patt = alt_short_patt;
1025 break;
1026 case PROCESSOR_I386:
1027 case PROCESSOR_I486:
1028 case PROCESSOR_PENTIUM:
1029 case PROCESSOR_GENERIC32:
1030 patt = f32_patt;
1031 break;
1032 }
1033 }
1034 else
1035 {
1036 switch (fragP->tc_frag_data.tune)
1037 {
1038 case PROCESSOR_UNKNOWN:
1039 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1040 PROCESSOR_UNKNOWN. */
1041 abort ();
1042 break;
1043
1044 case PROCESSOR_I386:
1045 case PROCESSOR_I486:
1046 case PROCESSOR_PENTIUM:
1047 case PROCESSOR_K6:
1048 case PROCESSOR_ATHLON:
1049 case PROCESSOR_K8:
1050 case PROCESSOR_AMDFAM10:
1051 case PROCESSOR_AMDFAM15:
1052 case PROCESSOR_GENERIC32:
1053 /* We use cpu_arch_isa_flags to check if we CAN optimize
1054 for Cpu686. */
1055 if (fragP->tc_frag_data.isa_flags.bitfield.cpui686)
1056 patt = alt_short_patt;
1057 else
1058 patt = f32_patt;
1059 break;
1060 case PROCESSOR_PENTIUMPRO:
1061 case PROCESSOR_PENTIUM4:
1062 case PROCESSOR_NOCONA:
1063 case PROCESSOR_CORE:
1064 case PROCESSOR_CORE2:
1065 case PROCESSOR_COREI7:
1066 case PROCESSOR_L1OM:
1067 if (fragP->tc_frag_data.isa_flags.bitfield.cpui686)
1068 patt = alt_long_patt;
1069 else
1070 patt = f32_patt;
1071 break;
1072 case PROCESSOR_GENERIC64:
1073 patt = alt_long_patt;
1074 break;
1075 }
1076 }
1077
1078 if (patt == f32_patt)
1079 {
1080 /* If the padding is less than 15 bytes, we use the normal
1081 ones. Otherwise, we use a jump instruction and adjust
1082 its offset. */
1083 int limit;
1084
1085 /* For 64bit, the limit is 3 bytes. */
1086 if (flag_code == CODE_64BIT
1087 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1088 limit = 3;
1089 else
1090 limit = 15;
1091 if (count < limit)
1092 memcpy (fragP->fr_literal + fragP->fr_fix,
1093 patt[count - 1], count);
1094 else
1095 {
1096 memcpy (fragP->fr_literal + fragP->fr_fix,
1097 jump_31, count);
1098 /* Adjust jump offset. */
1099 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1100 }
1101 }
1102 else
1103 {
1104 /* Maximum length of an instruction is 15 byte. If the
1105 padding is greater than 15 bytes and we don't use jump,
1106 we have to break it into smaller pieces. */
1107 int padding = count;
1108 while (padding > 15)
1109 {
1110 padding -= 15;
1111 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1112 patt [14], 15);
1113 }
1114
1115 if (padding)
1116 memcpy (fragP->fr_literal + fragP->fr_fix,
1117 patt [padding - 1], padding);
1118 }
1119 }
1120 fragP->fr_var = count;
1121 }
1122
1123 static INLINE int
1124 operand_type_all_zero (const union i386_operand_type *x)
1125 {
1126 switch (ARRAY_SIZE(x->array))
1127 {
1128 case 3:
1129 if (x->array[2])
1130 return 0;
1131 case 2:
1132 if (x->array[1])
1133 return 0;
1134 case 1:
1135 return !x->array[0];
1136 default:
1137 abort ();
1138 }
1139 }
1140
1141 static INLINE void
1142 operand_type_set (union i386_operand_type *x, unsigned int v)
1143 {
1144 switch (ARRAY_SIZE(x->array))
1145 {
1146 case 3:
1147 x->array[2] = v;
1148 case 2:
1149 x->array[1] = v;
1150 case 1:
1151 x->array[0] = v;
1152 break;
1153 default:
1154 abort ();
1155 }
1156 }
1157
1158 static INLINE int
1159 operand_type_equal (const union i386_operand_type *x,
1160 const union i386_operand_type *y)
1161 {
1162 switch (ARRAY_SIZE(x->array))
1163 {
1164 case 3:
1165 if (x->array[2] != y->array[2])
1166 return 0;
1167 case 2:
1168 if (x->array[1] != y->array[1])
1169 return 0;
1170 case 1:
1171 return x->array[0] == y->array[0];
1172 break;
1173 default:
1174 abort ();
1175 }
1176 }
1177
1178 static INLINE int
1179 cpu_flags_all_zero (const union i386_cpu_flags *x)
1180 {
1181 switch (ARRAY_SIZE(x->array))
1182 {
1183 case 3:
1184 if (x->array[2])
1185 return 0;
1186 case 2:
1187 if (x->array[1])
1188 return 0;
1189 case 1:
1190 return !x->array[0];
1191 default:
1192 abort ();
1193 }
1194 }
1195
1196 static INLINE void
1197 cpu_flags_set (union i386_cpu_flags *x, unsigned int v)
1198 {
1199 switch (ARRAY_SIZE(x->array))
1200 {
1201 case 3:
1202 x->array[2] = v;
1203 case 2:
1204 x->array[1] = v;
1205 case 1:
1206 x->array[0] = v;
1207 break;
1208 default:
1209 abort ();
1210 }
1211 }
1212
1213 static INLINE int
1214 cpu_flags_equal (const union i386_cpu_flags *x,
1215 const union i386_cpu_flags *y)
1216 {
1217 switch (ARRAY_SIZE(x->array))
1218 {
1219 case 3:
1220 if (x->array[2] != y->array[2])
1221 return 0;
1222 case 2:
1223 if (x->array[1] != y->array[1])
1224 return 0;
1225 case 1:
1226 return x->array[0] == y->array[0];
1227 break;
1228 default:
1229 abort ();
1230 }
1231 }
1232
1233 static INLINE int
1234 cpu_flags_check_cpu64 (i386_cpu_flags f)
1235 {
1236 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1237 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1238 }
1239
1240 static INLINE i386_cpu_flags
1241 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1242 {
1243 switch (ARRAY_SIZE (x.array))
1244 {
1245 case 3:
1246 x.array [2] &= y.array [2];
1247 case 2:
1248 x.array [1] &= y.array [1];
1249 case 1:
1250 x.array [0] &= y.array [0];
1251 break;
1252 default:
1253 abort ();
1254 }
1255 return x;
1256 }
1257
1258 static INLINE i386_cpu_flags
1259 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1260 {
1261 switch (ARRAY_SIZE (x.array))
1262 {
1263 case 3:
1264 x.array [2] |= y.array [2];
1265 case 2:
1266 x.array [1] |= y.array [1];
1267 case 1:
1268 x.array [0] |= y.array [0];
1269 break;
1270 default:
1271 abort ();
1272 }
1273 return x;
1274 }
1275
1276 static INLINE i386_cpu_flags
1277 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1278 {
1279 switch (ARRAY_SIZE (x.array))
1280 {
1281 case 3:
1282 x.array [2] &= ~y.array [2];
1283 case 2:
1284 x.array [1] &= ~y.array [1];
1285 case 1:
1286 x.array [0] &= ~y.array [0];
1287 break;
1288 default:
1289 abort ();
1290 }
1291 return x;
1292 }
1293
1294 #define CPU_FLAGS_ARCH_MATCH 0x1
1295 #define CPU_FLAGS_64BIT_MATCH 0x2
1296 #define CPU_FLAGS_AES_MATCH 0x4
1297 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1298 #define CPU_FLAGS_AVX_MATCH 0x10
1299
1300 #define CPU_FLAGS_32BIT_MATCH \
1301 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1302 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1303 #define CPU_FLAGS_PERFECT_MATCH \
1304 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1305
1306 /* Return CPU flags match bits. */
1307
1308 static int
1309 cpu_flags_match (const insn_template *t)
1310 {
1311 i386_cpu_flags x = t->cpu_flags;
1312 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1313
1314 x.bitfield.cpu64 = 0;
1315 x.bitfield.cpuno64 = 0;
1316
1317 if (cpu_flags_all_zero (&x))
1318 {
1319 /* This instruction is available on all archs. */
1320 match |= CPU_FLAGS_32BIT_MATCH;
1321 }
1322 else
1323 {
1324 /* This instruction is available only on some archs. */
1325 i386_cpu_flags cpu = cpu_arch_flags;
1326
1327 cpu.bitfield.cpu64 = 0;
1328 cpu.bitfield.cpuno64 = 0;
1329 cpu = cpu_flags_and (x, cpu);
1330 if (!cpu_flags_all_zero (&cpu))
1331 {
1332 if (x.bitfield.cpuavx)
1333 {
1334 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1335 if (cpu.bitfield.cpuavx)
1336 {
1337 /* Check SSE2AVX. */
1338 if (!t->opcode_modifier.sse2avx|| sse2avx)
1339 {
1340 match |= (CPU_FLAGS_ARCH_MATCH
1341 | CPU_FLAGS_AVX_MATCH);
1342 /* Check AES. */
1343 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1344 match |= CPU_FLAGS_AES_MATCH;
1345 /* Check PCLMUL. */
1346 if (!x.bitfield.cpupclmul
1347 || cpu.bitfield.cpupclmul)
1348 match |= CPU_FLAGS_PCLMUL_MATCH;
1349 }
1350 }
1351 else
1352 match |= CPU_FLAGS_ARCH_MATCH;
1353 }
1354 else
1355 match |= CPU_FLAGS_32BIT_MATCH;
1356 }
1357 }
1358 return match;
1359 }
1360
1361 static INLINE i386_operand_type
1362 operand_type_and (i386_operand_type x, i386_operand_type y)
1363 {
1364 switch (ARRAY_SIZE (x.array))
1365 {
1366 case 3:
1367 x.array [2] &= y.array [2];
1368 case 2:
1369 x.array [1] &= y.array [1];
1370 case 1:
1371 x.array [0] &= y.array [0];
1372 break;
1373 default:
1374 abort ();
1375 }
1376 return x;
1377 }
1378
1379 static INLINE i386_operand_type
1380 operand_type_or (i386_operand_type x, i386_operand_type y)
1381 {
1382 switch (ARRAY_SIZE (x.array))
1383 {
1384 case 3:
1385 x.array [2] |= y.array [2];
1386 case 2:
1387 x.array [1] |= y.array [1];
1388 case 1:
1389 x.array [0] |= y.array [0];
1390 break;
1391 default:
1392 abort ();
1393 }
1394 return x;
1395 }
1396
1397 static INLINE i386_operand_type
1398 operand_type_xor (i386_operand_type x, i386_operand_type y)
1399 {
1400 switch (ARRAY_SIZE (x.array))
1401 {
1402 case 3:
1403 x.array [2] ^= y.array [2];
1404 case 2:
1405 x.array [1] ^= y.array [1];
1406 case 1:
1407 x.array [0] ^= y.array [0];
1408 break;
1409 default:
1410 abort ();
1411 }
1412 return x;
1413 }
1414
1415 static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1416 static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1417 static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1418 static const i386_operand_type inoutportreg
1419 = OPERAND_TYPE_INOUTPORTREG;
1420 static const i386_operand_type reg16_inoutportreg
1421 = OPERAND_TYPE_REG16_INOUTPORTREG;
1422 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1423 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1424 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1425 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1426 static const i386_operand_type anydisp
1427 = OPERAND_TYPE_ANYDISP;
1428 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1429 static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1430 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1431 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1432 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1433 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1434 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1435 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1436 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1437 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1438 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1439
1440 enum operand_type
1441 {
1442 reg,
1443 imm,
1444 disp,
1445 anymem
1446 };
1447
1448 static INLINE int
1449 operand_type_check (i386_operand_type t, enum operand_type c)
1450 {
1451 switch (c)
1452 {
1453 case reg:
1454 return (t.bitfield.reg8
1455 || t.bitfield.reg16
1456 || t.bitfield.reg32
1457 || t.bitfield.reg64);
1458
1459 case imm:
1460 return (t.bitfield.imm8
1461 || t.bitfield.imm8s
1462 || t.bitfield.imm16
1463 || t.bitfield.imm32
1464 || t.bitfield.imm32s
1465 || t.bitfield.imm64);
1466
1467 case disp:
1468 return (t.bitfield.disp8
1469 || t.bitfield.disp16
1470 || t.bitfield.disp32
1471 || t.bitfield.disp32s
1472 || t.bitfield.disp64);
1473
1474 case anymem:
1475 return (t.bitfield.disp8
1476 || t.bitfield.disp16
1477 || t.bitfield.disp32
1478 || t.bitfield.disp32s
1479 || t.bitfield.disp64
1480 || t.bitfield.baseindex);
1481
1482 default:
1483 abort ();
1484 }
1485
1486 return 0;
1487 }
1488
1489 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1490 operand J for instruction template T. */
1491
1492 static INLINE int
1493 match_reg_size (const insn_template *t, unsigned int j)
1494 {
1495 return !((i.types[j].bitfield.byte
1496 && !t->operand_types[j].bitfield.byte)
1497 || (i.types[j].bitfield.word
1498 && !t->operand_types[j].bitfield.word)
1499 || (i.types[j].bitfield.dword
1500 && !t->operand_types[j].bitfield.dword)
1501 || (i.types[j].bitfield.qword
1502 && !t->operand_types[j].bitfield.qword));
1503 }
1504
1505 /* Return 1 if there is no conflict in any size on operand J for
1506 instruction template T. */
1507
1508 static INLINE int
1509 match_mem_size (const insn_template *t, unsigned int j)
1510 {
1511 return (match_reg_size (t, j)
1512 && !((i.types[j].bitfield.unspecified
1513 && !t->operand_types[j].bitfield.unspecified)
1514 || (i.types[j].bitfield.fword
1515 && !t->operand_types[j].bitfield.fword)
1516 || (i.types[j].bitfield.tbyte
1517 && !t->operand_types[j].bitfield.tbyte)
1518 || (i.types[j].bitfield.xmmword
1519 && !t->operand_types[j].bitfield.xmmword)
1520 || (i.types[j].bitfield.ymmword
1521 && !t->operand_types[j].bitfield.ymmword)));
1522 }
1523
1524 /* Return 1 if there is no size conflict on any operands for
1525 instruction template T. */
1526
1527 static INLINE int
1528 operand_size_match (const insn_template *t)
1529 {
1530 unsigned int j;
1531 int match = 1;
1532
1533 /* Don't check jump instructions. */
1534 if (t->opcode_modifier.jump
1535 || t->opcode_modifier.jumpbyte
1536 || t->opcode_modifier.jumpdword
1537 || t->opcode_modifier.jumpintersegment)
1538 return match;
1539
1540 /* Check memory and accumulator operand size. */
1541 for (j = 0; j < i.operands; j++)
1542 {
1543 if (t->operand_types[j].bitfield.anysize)
1544 continue;
1545
1546 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1547 {
1548 match = 0;
1549 break;
1550 }
1551
1552 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1553 {
1554 match = 0;
1555 break;
1556 }
1557 }
1558
1559 if (match
1560 || (!t->opcode_modifier.d && !t->opcode_modifier.floatd))
1561 return match;
1562
1563 /* Check reverse. */
1564 gas_assert (i.operands == 2);
1565
1566 match = 1;
1567 for (j = 0; j < 2; j++)
1568 {
1569 if (t->operand_types[j].bitfield.acc
1570 && !match_reg_size (t, j ? 0 : 1))
1571 {
1572 match = 0;
1573 break;
1574 }
1575
1576 if (i.types[j].bitfield.mem
1577 && !match_mem_size (t, j ? 0 : 1))
1578 {
1579 match = 0;
1580 break;
1581 }
1582 }
1583
1584 return match;
1585 }
1586
1587 static INLINE int
1588 operand_type_match (i386_operand_type overlap,
1589 i386_operand_type given)
1590 {
1591 i386_operand_type temp = overlap;
1592
1593 temp.bitfield.jumpabsolute = 0;
1594 temp.bitfield.unspecified = 0;
1595 temp.bitfield.byte = 0;
1596 temp.bitfield.word = 0;
1597 temp.bitfield.dword = 0;
1598 temp.bitfield.fword = 0;
1599 temp.bitfield.qword = 0;
1600 temp.bitfield.tbyte = 0;
1601 temp.bitfield.xmmword = 0;
1602 temp.bitfield.ymmword = 0;
1603 if (operand_type_all_zero (&temp))
1604 return 0;
1605
1606 return (given.bitfield.baseindex == overlap.bitfield.baseindex
1607 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute);
1608 }
1609
1610 /* If given types g0 and g1 are registers they must be of the same type
1611 unless the expected operand type register overlap is null.
1612 Note that Acc in a template matches every size of reg. */
1613
1614 static INLINE int
1615 operand_type_register_match (i386_operand_type m0,
1616 i386_operand_type g0,
1617 i386_operand_type t0,
1618 i386_operand_type m1,
1619 i386_operand_type g1,
1620 i386_operand_type t1)
1621 {
1622 if (!operand_type_check (g0, reg))
1623 return 1;
1624
1625 if (!operand_type_check (g1, reg))
1626 return 1;
1627
1628 if (g0.bitfield.reg8 == g1.bitfield.reg8
1629 && g0.bitfield.reg16 == g1.bitfield.reg16
1630 && g0.bitfield.reg32 == g1.bitfield.reg32
1631 && g0.bitfield.reg64 == g1.bitfield.reg64)
1632 return 1;
1633
1634 if (m0.bitfield.acc)
1635 {
1636 t0.bitfield.reg8 = 1;
1637 t0.bitfield.reg16 = 1;
1638 t0.bitfield.reg32 = 1;
1639 t0.bitfield.reg64 = 1;
1640 }
1641
1642 if (m1.bitfield.acc)
1643 {
1644 t1.bitfield.reg8 = 1;
1645 t1.bitfield.reg16 = 1;
1646 t1.bitfield.reg32 = 1;
1647 t1.bitfield.reg64 = 1;
1648 }
1649
1650 return (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1651 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1652 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1653 && !(t0.bitfield.reg64 & t1.bitfield.reg64));
1654 }
1655
1656 static INLINE unsigned int
1657 mode_from_disp_size (i386_operand_type t)
1658 {
1659 if (t.bitfield.disp8)
1660 return 1;
1661 else if (t.bitfield.disp16
1662 || t.bitfield.disp32
1663 || t.bitfield.disp32s)
1664 return 2;
1665 else
1666 return 0;
1667 }
1668
1669 static INLINE int
1670 fits_in_signed_byte (offsetT num)
1671 {
1672 return (num >= -128) && (num <= 127);
1673 }
1674
1675 static INLINE int
1676 fits_in_unsigned_byte (offsetT num)
1677 {
1678 return (num & 0xff) == num;
1679 }
1680
1681 static INLINE int
1682 fits_in_unsigned_word (offsetT num)
1683 {
1684 return (num & 0xffff) == num;
1685 }
1686
1687 static INLINE int
1688 fits_in_signed_word (offsetT num)
1689 {
1690 return (-32768 <= num) && (num <= 32767);
1691 }
1692
1693 static INLINE int
1694 fits_in_signed_long (offsetT num ATTRIBUTE_UNUSED)
1695 {
1696 #ifndef BFD64
1697 return 1;
1698 #else
1699 return (!(((offsetT) -1 << 31) & num)
1700 || (((offsetT) -1 << 31) & num) == ((offsetT) -1 << 31));
1701 #endif
1702 } /* fits_in_signed_long() */
1703
1704 static INLINE int
1705 fits_in_unsigned_long (offsetT num ATTRIBUTE_UNUSED)
1706 {
1707 #ifndef BFD64
1708 return 1;
1709 #else
1710 return (num & (((offsetT) 2 << 31) - 1)) == num;
1711 #endif
1712 } /* fits_in_unsigned_long() */
1713
1714 static i386_operand_type
1715 smallest_imm_type (offsetT num)
1716 {
1717 i386_operand_type t;
1718
1719 operand_type_set (&t, 0);
1720 t.bitfield.imm64 = 1;
1721
1722 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
1723 {
1724 /* This code is disabled on the 486 because all the Imm1 forms
1725 in the opcode table are slower on the i486. They're the
1726 versions with the implicitly specified single-position
1727 displacement, which has another syntax if you really want to
1728 use that form. */
1729 t.bitfield.imm1 = 1;
1730 t.bitfield.imm8 = 1;
1731 t.bitfield.imm8s = 1;
1732 t.bitfield.imm16 = 1;
1733 t.bitfield.imm32 = 1;
1734 t.bitfield.imm32s = 1;
1735 }
1736 else if (fits_in_signed_byte (num))
1737 {
1738 t.bitfield.imm8 = 1;
1739 t.bitfield.imm8s = 1;
1740 t.bitfield.imm16 = 1;
1741 t.bitfield.imm32 = 1;
1742 t.bitfield.imm32s = 1;
1743 }
1744 else if (fits_in_unsigned_byte (num))
1745 {
1746 t.bitfield.imm8 = 1;
1747 t.bitfield.imm16 = 1;
1748 t.bitfield.imm32 = 1;
1749 t.bitfield.imm32s = 1;
1750 }
1751 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
1752 {
1753 t.bitfield.imm16 = 1;
1754 t.bitfield.imm32 = 1;
1755 t.bitfield.imm32s = 1;
1756 }
1757 else if (fits_in_signed_long (num))
1758 {
1759 t.bitfield.imm32 = 1;
1760 t.bitfield.imm32s = 1;
1761 }
1762 else if (fits_in_unsigned_long (num))
1763 t.bitfield.imm32 = 1;
1764
1765 return t;
1766 }
1767
1768 static offsetT
1769 offset_in_range (offsetT val, int size)
1770 {
1771 addressT mask;
1772
1773 switch (size)
1774 {
1775 case 1: mask = ((addressT) 1 << 8) - 1; break;
1776 case 2: mask = ((addressT) 1 << 16) - 1; break;
1777 case 4: mask = ((addressT) 2 << 31) - 1; break;
1778 #ifdef BFD64
1779 case 8: mask = ((addressT) 2 << 63) - 1; break;
1780 #endif
1781 default: abort ();
1782 }
1783
1784 #ifdef BFD64
1785 /* If BFD64, sign extend val for 32bit address mode. */
1786 if (flag_code != CODE_64BIT
1787 || i.prefix[ADDR_PREFIX])
1788 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
1789 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
1790 #endif
1791
1792 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
1793 {
1794 char buf1[40], buf2[40];
1795
1796 sprint_value (buf1, val);
1797 sprint_value (buf2, val & mask);
1798 as_warn (_("%s shortened to %s"), buf1, buf2);
1799 }
1800 return val & mask;
1801 }
1802
1803 enum PREFIX_GROUP
1804 {
1805 PREFIX_EXIST = 0,
1806 PREFIX_LOCK,
1807 PREFIX_REP,
1808 PREFIX_OTHER
1809 };
1810
1811 /* Returns
1812 a. PREFIX_EXIST if attempting to add a prefix where one from the
1813 same class already exists.
1814 b. PREFIX_LOCK if lock prefix is added.
1815 c. PREFIX_REP if rep/repne prefix is added.
1816 d. PREFIX_OTHER if other prefix is added.
1817 */
1818
1819 static enum PREFIX_GROUP
1820 add_prefix (unsigned int prefix)
1821 {
1822 enum PREFIX_GROUP ret = PREFIX_OTHER;
1823 unsigned int q;
1824
1825 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
1826 && flag_code == CODE_64BIT)
1827 {
1828 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
1829 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
1830 && (prefix & (REX_R | REX_X | REX_B))))
1831 ret = PREFIX_EXIST;
1832 q = REX_PREFIX;
1833 }
1834 else
1835 {
1836 switch (prefix)
1837 {
1838 default:
1839 abort ();
1840
1841 case CS_PREFIX_OPCODE:
1842 case DS_PREFIX_OPCODE:
1843 case ES_PREFIX_OPCODE:
1844 case FS_PREFIX_OPCODE:
1845 case GS_PREFIX_OPCODE:
1846 case SS_PREFIX_OPCODE:
1847 q = SEG_PREFIX;
1848 break;
1849
1850 case REPNE_PREFIX_OPCODE:
1851 case REPE_PREFIX_OPCODE:
1852 q = REP_PREFIX;
1853 ret = PREFIX_REP;
1854 break;
1855
1856 case LOCK_PREFIX_OPCODE:
1857 q = LOCK_PREFIX;
1858 ret = PREFIX_LOCK;
1859 break;
1860
1861 case FWAIT_OPCODE:
1862 q = WAIT_PREFIX;
1863 break;
1864
1865 case ADDR_PREFIX_OPCODE:
1866 q = ADDR_PREFIX;
1867 break;
1868
1869 case DATA_PREFIX_OPCODE:
1870 q = DATA_PREFIX;
1871 break;
1872 }
1873 if (i.prefix[q] != 0)
1874 ret = PREFIX_EXIST;
1875 }
1876
1877 if (ret)
1878 {
1879 if (!i.prefix[q])
1880 ++i.prefixes;
1881 i.prefix[q] |= prefix;
1882 }
1883 else
1884 as_bad (_("same type of prefix used twice"));
1885
1886 return ret;
1887 }
1888
1889 static void
1890 set_code_flag (int value)
1891 {
1892 flag_code = (enum flag_code) value;
1893 if (flag_code == CODE_64BIT)
1894 {
1895 cpu_arch_flags.bitfield.cpu64 = 1;
1896 cpu_arch_flags.bitfield.cpuno64 = 0;
1897 }
1898 else
1899 {
1900 cpu_arch_flags.bitfield.cpu64 = 0;
1901 cpu_arch_flags.bitfield.cpuno64 = 1;
1902 }
1903 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
1904 {
1905 as_bad (_("64bit mode not supported on this CPU."));
1906 }
1907 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
1908 {
1909 as_bad (_("32bit mode not supported on this CPU."));
1910 }
1911 stackop_size = '\0';
1912 }
1913
1914 static void
1915 set_16bit_gcc_code_flag (int new_code_flag)
1916 {
1917 flag_code = (enum flag_code) new_code_flag;
1918 if (flag_code != CODE_16BIT)
1919 abort ();
1920 cpu_arch_flags.bitfield.cpu64 = 0;
1921 cpu_arch_flags.bitfield.cpuno64 = 1;
1922 stackop_size = LONG_MNEM_SUFFIX;
1923 }
1924
1925 static void
1926 set_intel_syntax (int syntax_flag)
1927 {
1928 /* Find out if register prefixing is specified. */
1929 int ask_naked_reg = 0;
1930
1931 SKIP_WHITESPACE ();
1932 if (!is_end_of_line[(unsigned char) *input_line_pointer])
1933 {
1934 char *string = input_line_pointer;
1935 int e = get_symbol_end ();
1936
1937 if (strcmp (string, "prefix") == 0)
1938 ask_naked_reg = 1;
1939 else if (strcmp (string, "noprefix") == 0)
1940 ask_naked_reg = -1;
1941 else
1942 as_bad (_("bad argument to syntax directive."));
1943 *input_line_pointer = e;
1944 }
1945 demand_empty_rest_of_line ();
1946
1947 intel_syntax = syntax_flag;
1948
1949 if (ask_naked_reg == 0)
1950 allow_naked_reg = (intel_syntax
1951 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
1952 else
1953 allow_naked_reg = (ask_naked_reg < 0);
1954
1955 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
1956
1957 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
1958 identifier_chars['$'] = intel_syntax ? '$' : 0;
1959 register_prefix = allow_naked_reg ? "" : "%";
1960 }
1961
1962 static void
1963 set_intel_mnemonic (int mnemonic_flag)
1964 {
1965 intel_mnemonic = mnemonic_flag;
1966 }
1967
1968 static void
1969 set_allow_index_reg (int flag)
1970 {
1971 allow_index_reg = flag;
1972 }
1973
1974 static void
1975 set_sse_check (int dummy ATTRIBUTE_UNUSED)
1976 {
1977 SKIP_WHITESPACE ();
1978
1979 if (!is_end_of_line[(unsigned char) *input_line_pointer])
1980 {
1981 char *string = input_line_pointer;
1982 int e = get_symbol_end ();
1983
1984 if (strcmp (string, "none") == 0)
1985 sse_check = sse_check_none;
1986 else if (strcmp (string, "warning") == 0)
1987 sse_check = sse_check_warning;
1988 else if (strcmp (string, "error") == 0)
1989 sse_check = sse_check_error;
1990 else
1991 as_bad (_("bad argument to sse_check directive."));
1992 *input_line_pointer = e;
1993 }
1994 else
1995 as_bad (_("missing argument for sse_check directive"));
1996
1997 demand_empty_rest_of_line ();
1998 }
1999
2000 static void
2001 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2002 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2003 {
2004 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2005 static const char *arch;
2006
2007 /* Intel LIOM is only supported on ELF. */
2008 if (!IS_ELF)
2009 return;
2010
2011 if (!arch)
2012 {
2013 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2014 use default_arch. */
2015 arch = cpu_arch_name;
2016 if (!arch)
2017 arch = default_arch;
2018 }
2019
2020 /* If we are targeting Intel L1OM, we must enable it. */
2021 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2022 || new_flag.bitfield.cpul1om)
2023 return;
2024
2025 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2026 #endif
2027 }
2028
2029 static void
2030 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2031 {
2032 SKIP_WHITESPACE ();
2033
2034 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2035 {
2036 char *string = input_line_pointer;
2037 int e = get_symbol_end ();
2038 unsigned int j;
2039 i386_cpu_flags flags;
2040
2041 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2042 {
2043 if (strcmp (string, cpu_arch[j].name) == 0)
2044 {
2045 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2046
2047 if (*string != '.')
2048 {
2049 cpu_arch_name = cpu_arch[j].name;
2050 cpu_sub_arch_name = NULL;
2051 cpu_arch_flags = cpu_arch[j].flags;
2052 if (flag_code == CODE_64BIT)
2053 {
2054 cpu_arch_flags.bitfield.cpu64 = 1;
2055 cpu_arch_flags.bitfield.cpuno64 = 0;
2056 }
2057 else
2058 {
2059 cpu_arch_flags.bitfield.cpu64 = 0;
2060 cpu_arch_flags.bitfield.cpuno64 = 1;
2061 }
2062 cpu_arch_isa = cpu_arch[j].type;
2063 cpu_arch_isa_flags = cpu_arch[j].flags;
2064 if (!cpu_arch_tune_set)
2065 {
2066 cpu_arch_tune = cpu_arch_isa;
2067 cpu_arch_tune_flags = cpu_arch_isa_flags;
2068 }
2069 break;
2070 }
2071
2072 if (strncmp (string + 1, "no", 2))
2073 flags = cpu_flags_or (cpu_arch_flags,
2074 cpu_arch[j].flags);
2075 else
2076 flags = cpu_flags_and_not (cpu_arch_flags,
2077 cpu_arch[j].flags);
2078 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2079 {
2080 if (cpu_sub_arch_name)
2081 {
2082 char *name = cpu_sub_arch_name;
2083 cpu_sub_arch_name = concat (name,
2084 cpu_arch[j].name,
2085 (const char *) NULL);
2086 free (name);
2087 }
2088 else
2089 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2090 cpu_arch_flags = flags;
2091 }
2092 *input_line_pointer = e;
2093 demand_empty_rest_of_line ();
2094 return;
2095 }
2096 }
2097 if (j >= ARRAY_SIZE (cpu_arch))
2098 as_bad (_("no such architecture: `%s'"), string);
2099
2100 *input_line_pointer = e;
2101 }
2102 else
2103 as_bad (_("missing cpu architecture"));
2104
2105 no_cond_jump_promotion = 0;
2106 if (*input_line_pointer == ','
2107 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2108 {
2109 char *string = ++input_line_pointer;
2110 int e = get_symbol_end ();
2111
2112 if (strcmp (string, "nojumps") == 0)
2113 no_cond_jump_promotion = 1;
2114 else if (strcmp (string, "jumps") == 0)
2115 ;
2116 else
2117 as_bad (_("no such architecture modifier: `%s'"), string);
2118
2119 *input_line_pointer = e;
2120 }
2121
2122 demand_empty_rest_of_line ();
2123 }
2124
2125 enum bfd_architecture
2126 i386_arch (void)
2127 {
2128 if (cpu_arch_isa == PROCESSOR_L1OM)
2129 {
2130 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2131 || flag_code != CODE_64BIT)
2132 as_fatal (_("Intel L1OM is 64bit ELF only"));
2133 return bfd_arch_l1om;
2134 }
2135 else
2136 return bfd_arch_i386;
2137 }
2138
2139 unsigned long
2140 i386_mach ()
2141 {
2142 if (!strcmp (default_arch, "x86_64"))
2143 {
2144 if (cpu_arch_isa == PROCESSOR_L1OM)
2145 {
2146 if (OUTPUT_FLAVOR != bfd_target_elf_flavour)
2147 as_fatal (_("Intel L1OM is 64bit ELF only"));
2148 return bfd_mach_l1om;
2149 }
2150 else
2151 return bfd_mach_x86_64;
2152 }
2153 else if (!strcmp (default_arch, "i386"))
2154 return bfd_mach_i386_i386;
2155 else
2156 as_fatal (_("Unknown architecture"));
2157 }
2158 \f
2159 void
2160 md_begin ()
2161 {
2162 const char *hash_err;
2163
2164 /* Initialize op_hash hash table. */
2165 op_hash = hash_new ();
2166
2167 {
2168 const insn_template *optab;
2169 templates *core_optab;
2170
2171 /* Setup for loop. */
2172 optab = i386_optab;
2173 core_optab = (templates *) xmalloc (sizeof (templates));
2174 core_optab->start = optab;
2175
2176 while (1)
2177 {
2178 ++optab;
2179 if (optab->name == NULL
2180 || strcmp (optab->name, (optab - 1)->name) != 0)
2181 {
2182 /* different name --> ship out current template list;
2183 add to hash table; & begin anew. */
2184 core_optab->end = optab;
2185 hash_err = hash_insert (op_hash,
2186 (optab - 1)->name,
2187 (void *) core_optab);
2188 if (hash_err)
2189 {
2190 as_fatal (_("Internal Error: Can't hash %s: %s"),
2191 (optab - 1)->name,
2192 hash_err);
2193 }
2194 if (optab->name == NULL)
2195 break;
2196 core_optab = (templates *) xmalloc (sizeof (templates));
2197 core_optab->start = optab;
2198 }
2199 }
2200 }
2201
2202 /* Initialize reg_hash hash table. */
2203 reg_hash = hash_new ();
2204 {
2205 const reg_entry *regtab;
2206 unsigned int regtab_size = i386_regtab_size;
2207
2208 for (regtab = i386_regtab; regtab_size--; regtab++)
2209 {
2210 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2211 if (hash_err)
2212 as_fatal (_("Internal Error: Can't hash %s: %s"),
2213 regtab->reg_name,
2214 hash_err);
2215 }
2216 }
2217
2218 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2219 {
2220 int c;
2221 char *p;
2222
2223 for (c = 0; c < 256; c++)
2224 {
2225 if (ISDIGIT (c))
2226 {
2227 digit_chars[c] = c;
2228 mnemonic_chars[c] = c;
2229 register_chars[c] = c;
2230 operand_chars[c] = c;
2231 }
2232 else if (ISLOWER (c))
2233 {
2234 mnemonic_chars[c] = c;
2235 register_chars[c] = c;
2236 operand_chars[c] = c;
2237 }
2238 else if (ISUPPER (c))
2239 {
2240 mnemonic_chars[c] = TOLOWER (c);
2241 register_chars[c] = mnemonic_chars[c];
2242 operand_chars[c] = c;
2243 }
2244
2245 if (ISALPHA (c) || ISDIGIT (c))
2246 identifier_chars[c] = c;
2247 else if (c >= 128)
2248 {
2249 identifier_chars[c] = c;
2250 operand_chars[c] = c;
2251 }
2252 }
2253
2254 #ifdef LEX_AT
2255 identifier_chars['@'] = '@';
2256 #endif
2257 #ifdef LEX_QM
2258 identifier_chars['?'] = '?';
2259 operand_chars['?'] = '?';
2260 #endif
2261 digit_chars['-'] = '-';
2262 mnemonic_chars['_'] = '_';
2263 mnemonic_chars['-'] = '-';
2264 mnemonic_chars['.'] = '.';
2265 identifier_chars['_'] = '_';
2266 identifier_chars['.'] = '.';
2267
2268 for (p = operand_special_chars; *p != '\0'; p++)
2269 operand_chars[(unsigned char) *p] = *p;
2270 }
2271
2272 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2273 if (IS_ELF)
2274 {
2275 record_alignment (text_section, 2);
2276 record_alignment (data_section, 2);
2277 record_alignment (bss_section, 2);
2278 }
2279 #endif
2280
2281 if (flag_code == CODE_64BIT)
2282 {
2283 x86_dwarf2_return_column = 16;
2284 x86_cie_data_alignment = -8;
2285 }
2286 else
2287 {
2288 x86_dwarf2_return_column = 8;
2289 x86_cie_data_alignment = -4;
2290 }
2291 }
2292
2293 void
2294 i386_print_statistics (FILE *file)
2295 {
2296 hash_print_statistics (file, "i386 opcode", op_hash);
2297 hash_print_statistics (file, "i386 register", reg_hash);
2298 }
2299 \f
2300 #ifdef DEBUG386
2301
2302 /* Debugging routines for md_assemble. */
2303 static void pte (insn_template *);
2304 static void pt (i386_operand_type);
2305 static void pe (expressionS *);
2306 static void ps (symbolS *);
2307
2308 static void
2309 pi (char *line, i386_insn *x)
2310 {
2311 unsigned int i;
2312
2313 fprintf (stdout, "%s: template ", line);
2314 pte (&x->tm);
2315 fprintf (stdout, " address: base %s index %s scale %x\n",
2316 x->base_reg ? x->base_reg->reg_name : "none",
2317 x->index_reg ? x->index_reg->reg_name : "none",
2318 x->log2_scale_factor);
2319 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2320 x->rm.mode, x->rm.reg, x->rm.regmem);
2321 fprintf (stdout, " sib: base %x index %x scale %x\n",
2322 x->sib.base, x->sib.index, x->sib.scale);
2323 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2324 (x->rex & REX_W) != 0,
2325 (x->rex & REX_R) != 0,
2326 (x->rex & REX_X) != 0,
2327 (x->rex & REX_B) != 0);
2328 for (i = 0; i < x->operands; i++)
2329 {
2330 fprintf (stdout, " #%d: ", i + 1);
2331 pt (x->types[i]);
2332 fprintf (stdout, "\n");
2333 if (x->types[i].bitfield.reg8
2334 || x->types[i].bitfield.reg16
2335 || x->types[i].bitfield.reg32
2336 || x->types[i].bitfield.reg64
2337 || x->types[i].bitfield.regmmx
2338 || x->types[i].bitfield.regxmm
2339 || x->types[i].bitfield.regymm
2340 || x->types[i].bitfield.sreg2
2341 || x->types[i].bitfield.sreg3
2342 || x->types[i].bitfield.control
2343 || x->types[i].bitfield.debug
2344 || x->types[i].bitfield.test)
2345 fprintf (stdout, "%s\n", x->op[i].regs->reg_name);
2346 if (operand_type_check (x->types[i], imm))
2347 pe (x->op[i].imms);
2348 if (operand_type_check (x->types[i], disp))
2349 pe (x->op[i].disps);
2350 }
2351 }
2352
2353 static void
2354 pte (insn_template *t)
2355 {
2356 unsigned int i;
2357 fprintf (stdout, " %d operands ", t->operands);
2358 fprintf (stdout, "opcode %x ", t->base_opcode);
2359 if (t->extension_opcode != None)
2360 fprintf (stdout, "ext %x ", t->extension_opcode);
2361 if (t->opcode_modifier.d)
2362 fprintf (stdout, "D");
2363 if (t->opcode_modifier.w)
2364 fprintf (stdout, "W");
2365 fprintf (stdout, "\n");
2366 for (i = 0; i < t->operands; i++)
2367 {
2368 fprintf (stdout, " #%d type ", i + 1);
2369 pt (t->operand_types[i]);
2370 fprintf (stdout, "\n");
2371 }
2372 }
2373
2374 static void
2375 pe (expressionS *e)
2376 {
2377 fprintf (stdout, " operation %d\n", e->X_op);
2378 fprintf (stdout, " add_number %ld (%lx)\n",
2379 (long) e->X_add_number, (long) e->X_add_number);
2380 if (e->X_add_symbol)
2381 {
2382 fprintf (stdout, " add_symbol ");
2383 ps (e->X_add_symbol);
2384 fprintf (stdout, "\n");
2385 }
2386 if (e->X_op_symbol)
2387 {
2388 fprintf (stdout, " op_symbol ");
2389 ps (e->X_op_symbol);
2390 fprintf (stdout, "\n");
2391 }
2392 }
2393
2394 static void
2395 ps (symbolS *s)
2396 {
2397 fprintf (stdout, "%s type %s%s",
2398 S_GET_NAME (s),
2399 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2400 segment_name (S_GET_SEGMENT (s)));
2401 }
2402
2403 static struct type_name
2404 {
2405 i386_operand_type mask;
2406 const char *name;
2407 }
2408 const type_names[] =
2409 {
2410 { OPERAND_TYPE_REG8, "r8" },
2411 { OPERAND_TYPE_REG16, "r16" },
2412 { OPERAND_TYPE_REG32, "r32" },
2413 { OPERAND_TYPE_REG64, "r64" },
2414 { OPERAND_TYPE_IMM8, "i8" },
2415 { OPERAND_TYPE_IMM8, "i8s" },
2416 { OPERAND_TYPE_IMM16, "i16" },
2417 { OPERAND_TYPE_IMM32, "i32" },
2418 { OPERAND_TYPE_IMM32S, "i32s" },
2419 { OPERAND_TYPE_IMM64, "i64" },
2420 { OPERAND_TYPE_IMM1, "i1" },
2421 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2422 { OPERAND_TYPE_DISP8, "d8" },
2423 { OPERAND_TYPE_DISP16, "d16" },
2424 { OPERAND_TYPE_DISP32, "d32" },
2425 { OPERAND_TYPE_DISP32S, "d32s" },
2426 { OPERAND_TYPE_DISP64, "d64" },
2427 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2428 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2429 { OPERAND_TYPE_CONTROL, "control reg" },
2430 { OPERAND_TYPE_TEST, "test reg" },
2431 { OPERAND_TYPE_DEBUG, "debug reg" },
2432 { OPERAND_TYPE_FLOATREG, "FReg" },
2433 { OPERAND_TYPE_FLOATACC, "FAcc" },
2434 { OPERAND_TYPE_SREG2, "SReg2" },
2435 { OPERAND_TYPE_SREG3, "SReg3" },
2436 { OPERAND_TYPE_ACC, "Acc" },
2437 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2438 { OPERAND_TYPE_REGMMX, "rMMX" },
2439 { OPERAND_TYPE_REGXMM, "rXMM" },
2440 { OPERAND_TYPE_REGYMM, "rYMM" },
2441 { OPERAND_TYPE_ESSEG, "es" },
2442 };
2443
2444 static void
2445 pt (i386_operand_type t)
2446 {
2447 unsigned int j;
2448 i386_operand_type a;
2449
2450 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2451 {
2452 a = operand_type_and (t, type_names[j].mask);
2453 if (!operand_type_all_zero (&a))
2454 fprintf (stdout, "%s, ", type_names[j].name);
2455 }
2456 fflush (stdout);
2457 }
2458
2459 #endif /* DEBUG386 */
2460 \f
2461 static bfd_reloc_code_real_type
2462 reloc (unsigned int size,
2463 int pcrel,
2464 int sign,
2465 bfd_reloc_code_real_type other)
2466 {
2467 if (other != NO_RELOC)
2468 {
2469 reloc_howto_type *rel;
2470
2471 if (size == 8)
2472 switch (other)
2473 {
2474 case BFD_RELOC_X86_64_GOT32:
2475 return BFD_RELOC_X86_64_GOT64;
2476 break;
2477 case BFD_RELOC_X86_64_PLTOFF64:
2478 return BFD_RELOC_X86_64_PLTOFF64;
2479 break;
2480 case BFD_RELOC_X86_64_GOTPC32:
2481 other = BFD_RELOC_X86_64_GOTPC64;
2482 break;
2483 case BFD_RELOC_X86_64_GOTPCREL:
2484 other = BFD_RELOC_X86_64_GOTPCREL64;
2485 break;
2486 case BFD_RELOC_X86_64_TPOFF32:
2487 other = BFD_RELOC_X86_64_TPOFF64;
2488 break;
2489 case BFD_RELOC_X86_64_DTPOFF32:
2490 other = BFD_RELOC_X86_64_DTPOFF64;
2491 break;
2492 default:
2493 break;
2494 }
2495
2496 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2497 if (size == 4 && flag_code != CODE_64BIT)
2498 sign = -1;
2499
2500 rel = bfd_reloc_type_lookup (stdoutput, other);
2501 if (!rel)
2502 as_bad (_("unknown relocation (%u)"), other);
2503 else if (size != bfd_get_reloc_size (rel))
2504 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2505 bfd_get_reloc_size (rel),
2506 size);
2507 else if (pcrel && !rel->pc_relative)
2508 as_bad (_("non-pc-relative relocation for pc-relative field"));
2509 else if ((rel->complain_on_overflow == complain_overflow_signed
2510 && !sign)
2511 || (rel->complain_on_overflow == complain_overflow_unsigned
2512 && sign > 0))
2513 as_bad (_("relocated field and relocation type differ in signedness"));
2514 else
2515 return other;
2516 return NO_RELOC;
2517 }
2518
2519 if (pcrel)
2520 {
2521 if (!sign)
2522 as_bad (_("there are no unsigned pc-relative relocations"));
2523 switch (size)
2524 {
2525 case 1: return BFD_RELOC_8_PCREL;
2526 case 2: return BFD_RELOC_16_PCREL;
2527 case 4: return BFD_RELOC_32_PCREL;
2528 case 8: return BFD_RELOC_64_PCREL;
2529 }
2530 as_bad (_("cannot do %u byte pc-relative relocation"), size);
2531 }
2532 else
2533 {
2534 if (sign > 0)
2535 switch (size)
2536 {
2537 case 4: return BFD_RELOC_X86_64_32S;
2538 }
2539 else
2540 switch (size)
2541 {
2542 case 1: return BFD_RELOC_8;
2543 case 2: return BFD_RELOC_16;
2544 case 4: return BFD_RELOC_32;
2545 case 8: return BFD_RELOC_64;
2546 }
2547 as_bad (_("cannot do %s %u byte relocation"),
2548 sign > 0 ? "signed" : "unsigned", size);
2549 }
2550
2551 return NO_RELOC;
2552 }
2553
2554 /* Here we decide which fixups can be adjusted to make them relative to
2555 the beginning of the section instead of the symbol. Basically we need
2556 to make sure that the dynamic relocations are done correctly, so in
2557 some cases we force the original symbol to be used. */
2558
2559 int
2560 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2561 {
2562 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2563 if (!IS_ELF)
2564 return 1;
2565
2566 /* Don't adjust pc-relative references to merge sections in 64-bit
2567 mode. */
2568 if (use_rela_relocations
2569 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
2570 && fixP->fx_pcrel)
2571 return 0;
2572
2573 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2574 and changed later by validate_fix. */
2575 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
2576 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
2577 return 0;
2578
2579 /* adjust_reloc_syms doesn't know about the GOT. */
2580 if (fixP->fx_r_type == BFD_RELOC_386_GOTOFF
2581 || fixP->fx_r_type == BFD_RELOC_386_PLT32
2582 || fixP->fx_r_type == BFD_RELOC_386_GOT32
2583 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
2584 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
2585 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
2586 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
2587 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
2588 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
2589 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
2590 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
2591 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
2592 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
2593 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
2594 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
2595 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
2596 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
2597 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
2598 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
2599 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
2600 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
2601 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
2602 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
2603 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
2604 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
2605 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
2606 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
2607 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
2608 return 0;
2609 #endif
2610 return 1;
2611 }
2612
2613 static int
2614 intel_float_operand (const char *mnemonic)
2615 {
2616 /* Note that the value returned is meaningful only for opcodes with (memory)
2617 operands, hence the code here is free to improperly handle opcodes that
2618 have no operands (for better performance and smaller code). */
2619
2620 if (mnemonic[0] != 'f')
2621 return 0; /* non-math */
2622
2623 switch (mnemonic[1])
2624 {
2625 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2626 the fs segment override prefix not currently handled because no
2627 call path can make opcodes without operands get here */
2628 case 'i':
2629 return 2 /* integer op */;
2630 case 'l':
2631 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
2632 return 3; /* fldcw/fldenv */
2633 break;
2634 case 'n':
2635 if (mnemonic[2] != 'o' /* fnop */)
2636 return 3; /* non-waiting control op */
2637 break;
2638 case 'r':
2639 if (mnemonic[2] == 's')
2640 return 3; /* frstor/frstpm */
2641 break;
2642 case 's':
2643 if (mnemonic[2] == 'a')
2644 return 3; /* fsave */
2645 if (mnemonic[2] == 't')
2646 {
2647 switch (mnemonic[3])
2648 {
2649 case 'c': /* fstcw */
2650 case 'd': /* fstdw */
2651 case 'e': /* fstenv */
2652 case 's': /* fsts[gw] */
2653 return 3;
2654 }
2655 }
2656 break;
2657 case 'x':
2658 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
2659 return 0; /* fxsave/fxrstor are not really math ops */
2660 break;
2661 }
2662
2663 return 1;
2664 }
2665
2666 /* Build the VEX prefix. */
2667
2668 static void
2669 build_vex_prefix (const insn_template *t)
2670 {
2671 unsigned int register_specifier;
2672 unsigned int implied_prefix;
2673 unsigned int vector_length;
2674
2675 /* Check register specifier. */
2676 if (i.vex.register_specifier)
2677 {
2678 register_specifier = i.vex.register_specifier->reg_num;
2679 if ((i.vex.register_specifier->reg_flags & RegRex))
2680 register_specifier += 8;
2681 register_specifier = ~register_specifier & 0xf;
2682 }
2683 else
2684 register_specifier = 0xf;
2685
2686 /* Use 2-byte VEX prefix by swappping destination and source
2687 operand. */
2688 if (!i.swap_operand
2689 && i.operands == i.reg_operands
2690 && i.tm.opcode_modifier.vexopcode == VEX0F
2691 && i.tm.opcode_modifier.s
2692 && i.rex == REX_B)
2693 {
2694 unsigned int xchg = i.operands - 1;
2695 union i386_op temp_op;
2696 i386_operand_type temp_type;
2697
2698 temp_type = i.types[xchg];
2699 i.types[xchg] = i.types[0];
2700 i.types[0] = temp_type;
2701 temp_op = i.op[xchg];
2702 i.op[xchg] = i.op[0];
2703 i.op[0] = temp_op;
2704
2705 gas_assert (i.rm.mode == 3);
2706
2707 i.rex = REX_R;
2708 xchg = i.rm.regmem;
2709 i.rm.regmem = i.rm.reg;
2710 i.rm.reg = xchg;
2711
2712 /* Use the next insn. */
2713 i.tm = t[1];
2714 }
2715
2716 if (i.tm.opcode_modifier.vex == VEXScalar)
2717 vector_length = avxscalar;
2718 else
2719 vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
2720
2721 switch ((i.tm.base_opcode >> 8) & 0xff)
2722 {
2723 case 0:
2724 implied_prefix = 0;
2725 break;
2726 case DATA_PREFIX_OPCODE:
2727 implied_prefix = 1;
2728 break;
2729 case REPE_PREFIX_OPCODE:
2730 implied_prefix = 2;
2731 break;
2732 case REPNE_PREFIX_OPCODE:
2733 implied_prefix = 3;
2734 break;
2735 default:
2736 abort ();
2737 }
2738
2739 /* Use 2-byte VEX prefix if possible. */
2740 if (i.tm.opcode_modifier.vexopcode == VEX0F
2741 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
2742 {
2743 /* 2-byte VEX prefix. */
2744 unsigned int r;
2745
2746 i.vex.length = 2;
2747 i.vex.bytes[0] = 0xc5;
2748
2749 /* Check the REX.R bit. */
2750 r = (i.rex & REX_R) ? 0 : 1;
2751 i.vex.bytes[1] = (r << 7
2752 | register_specifier << 3
2753 | vector_length << 2
2754 | implied_prefix);
2755 }
2756 else
2757 {
2758 /* 3-byte VEX prefix. */
2759 unsigned int m, w;
2760
2761 i.vex.length = 3;
2762
2763 switch (i.tm.opcode_modifier.vexopcode)
2764 {
2765 case VEX0F:
2766 m = 0x1;
2767 i.vex.bytes[0] = 0xc4;
2768 break;
2769 case VEX0F38:
2770 m = 0x2;
2771 i.vex.bytes[0] = 0xc4;
2772 break;
2773 case VEX0F3A:
2774 m = 0x3;
2775 i.vex.bytes[0] = 0xc4;
2776 break;
2777 case XOP08:
2778 m = 0x8;
2779 i.vex.bytes[0] = 0x8f;
2780 break;
2781 case XOP09:
2782 m = 0x9;
2783 i.vex.bytes[0] = 0x8f;
2784 break;
2785 case XOP0A:
2786 m = 0xa;
2787 i.vex.bytes[0] = 0x8f;
2788 break;
2789 default:
2790 abort ();
2791 }
2792
2793 /* The high 3 bits of the second VEX byte are 1's compliment
2794 of RXB bits from REX. */
2795 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
2796
2797 /* Check the REX.W bit. */
2798 w = (i.rex & REX_W) ? 1 : 0;
2799 if (i.tm.opcode_modifier.vexw)
2800 {
2801 if (w)
2802 abort ();
2803
2804 if (i.tm.opcode_modifier.vexw == VEXW1)
2805 w = 1;
2806 }
2807
2808 i.vex.bytes[2] = (w << 7
2809 | register_specifier << 3
2810 | vector_length << 2
2811 | implied_prefix);
2812 }
2813 }
2814
2815 static void
2816 process_immext (void)
2817 {
2818 expressionS *exp;
2819
2820 if (i.tm.cpu_flags.bitfield.cpusse3 && i.operands > 0)
2821 {
2822 /* SSE3 Instructions have the fixed operands with an opcode
2823 suffix which is coded in the same place as an 8-bit immediate
2824 field would be. Here we check those operands and remove them
2825 afterwards. */
2826 unsigned int x;
2827
2828 for (x = 0; x < i.operands; x++)
2829 if (i.op[x].regs->reg_num != x)
2830 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
2831 register_prefix, i.op[x].regs->reg_name, x + 1,
2832 i.tm.name);
2833
2834 i.operands = 0;
2835 }
2836
2837 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
2838 which is coded in the same place as an 8-bit immediate field
2839 would be. Here we fake an 8-bit immediate operand from the
2840 opcode suffix stored in tm.extension_opcode.
2841
2842 AVX instructions also use this encoding, for some of
2843 3 argument instructions. */
2844
2845 gas_assert (i.imm_operands == 0
2846 && (i.operands <= 2
2847 || (i.tm.opcode_modifier.vex
2848 && i.operands <= 4)));
2849
2850 exp = &im_expressions[i.imm_operands++];
2851 i.op[i.operands].imms = exp;
2852 i.types[i.operands] = imm8;
2853 i.operands++;
2854 exp->X_op = O_constant;
2855 exp->X_add_number = i.tm.extension_opcode;
2856 i.tm.extension_opcode = None;
2857 }
2858
2859 /* This is the guts of the machine-dependent assembler. LINE points to a
2860 machine dependent instruction. This function is supposed to emit
2861 the frags/bytes it assembles to. */
2862
2863 void
2864 md_assemble (char *line)
2865 {
2866 unsigned int j;
2867 char mnemonic[MAX_MNEM_SIZE];
2868 const insn_template *t;
2869
2870 /* Initialize globals. */
2871 memset (&i, '\0', sizeof (i));
2872 for (j = 0; j < MAX_OPERANDS; j++)
2873 i.reloc[j] = NO_RELOC;
2874 memset (disp_expressions, '\0', sizeof (disp_expressions));
2875 memset (im_expressions, '\0', sizeof (im_expressions));
2876 save_stack_p = save_stack;
2877
2878 /* First parse an instruction mnemonic & call i386_operand for the operands.
2879 We assume that the scrubber has arranged it so that line[0] is the valid
2880 start of a (possibly prefixed) mnemonic. */
2881
2882 line = parse_insn (line, mnemonic);
2883 if (line == NULL)
2884 return;
2885
2886 line = parse_operands (line, mnemonic);
2887 this_operand = -1;
2888 if (line == NULL)
2889 return;
2890
2891 /* Now we've parsed the mnemonic into a set of templates, and have the
2892 operands at hand. */
2893
2894 /* All intel opcodes have reversed operands except for "bound" and
2895 "enter". We also don't reverse intersegment "jmp" and "call"
2896 instructions with 2 immediate operands so that the immediate segment
2897 precedes the offset, as it does when in AT&T mode. */
2898 if (intel_syntax
2899 && i.operands > 1
2900 && (strcmp (mnemonic, "bound") != 0)
2901 && (strcmp (mnemonic, "invlpga") != 0)
2902 && !(operand_type_check (i.types[0], imm)
2903 && operand_type_check (i.types[1], imm)))
2904 swap_operands ();
2905
2906 /* The order of the immediates should be reversed
2907 for 2 immediates extrq and insertq instructions */
2908 if (i.imm_operands == 2
2909 && (strcmp (mnemonic, "extrq") == 0
2910 || strcmp (mnemonic, "insertq") == 0))
2911 swap_2_operands (0, 1);
2912
2913 if (i.imm_operands)
2914 optimize_imm ();
2915
2916 /* Don't optimize displacement for movabs since it only takes 64bit
2917 displacement. */
2918 if (i.disp_operands
2919 && (flag_code != CODE_64BIT
2920 || strcmp (mnemonic, "movabs") != 0))
2921 optimize_disp ();
2922
2923 /* Next, we find a template that matches the given insn,
2924 making sure the overlap of the given operands types is consistent
2925 with the template operand types. */
2926
2927 if (!(t = match_template ()))
2928 return;
2929
2930 if (sse_check != sse_check_none
2931 && !i.tm.opcode_modifier.noavx
2932 && (i.tm.cpu_flags.bitfield.cpusse
2933 || i.tm.cpu_flags.bitfield.cpusse2
2934 || i.tm.cpu_flags.bitfield.cpusse3
2935 || i.tm.cpu_flags.bitfield.cpussse3
2936 || i.tm.cpu_flags.bitfield.cpusse4_1
2937 || i.tm.cpu_flags.bitfield.cpusse4_2))
2938 {
2939 (sse_check == sse_check_warning
2940 ? as_warn
2941 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
2942 }
2943
2944 /* Zap movzx and movsx suffix. The suffix has been set from
2945 "word ptr" or "byte ptr" on the source operand in Intel syntax
2946 or extracted from mnemonic in AT&T syntax. But we'll use
2947 the destination register to choose the suffix for encoding. */
2948 if ((i.tm.base_opcode & ~9) == 0x0fb6)
2949 {
2950 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
2951 there is no suffix, the default will be byte extension. */
2952 if (i.reg_operands != 2
2953 && !i.suffix
2954 && intel_syntax)
2955 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
2956
2957 i.suffix = 0;
2958 }
2959
2960 if (i.tm.opcode_modifier.fwait)
2961 if (!add_prefix (FWAIT_OPCODE))
2962 return;
2963
2964 /* Check for lock without a lockable instruction. Destination operand
2965 must be memory unless it is xchg (0x86). */
2966 if (i.prefix[LOCK_PREFIX]
2967 && (!i.tm.opcode_modifier.islockable
2968 || i.mem_operands == 0
2969 || (i.tm.base_opcode != 0x86
2970 && !operand_type_check (i.types[i.operands - 1], anymem))))
2971 {
2972 as_bad (_("expecting lockable instruction after `lock'"));
2973 return;
2974 }
2975
2976 /* Check string instruction segment overrides. */
2977 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
2978 {
2979 if (!check_string ())
2980 return;
2981 i.disp_operands = 0;
2982 }
2983
2984 if (!process_suffix ())
2985 return;
2986
2987 /* Update operand types. */
2988 for (j = 0; j < i.operands; j++)
2989 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
2990
2991 /* Make still unresolved immediate matches conform to size of immediate
2992 given in i.suffix. */
2993 if (!finalize_imm ())
2994 return;
2995
2996 if (i.types[0].bitfield.imm1)
2997 i.imm_operands = 0; /* kludge for shift insns. */
2998
2999 /* We only need to check those implicit registers for instructions
3000 with 3 operands or less. */
3001 if (i.operands <= 3)
3002 for (j = 0; j < i.operands; j++)
3003 if (i.types[j].bitfield.inoutportreg
3004 || i.types[j].bitfield.shiftcount
3005 || i.types[j].bitfield.acc
3006 || i.types[j].bitfield.floatacc)
3007 i.reg_operands--;
3008
3009 /* ImmExt should be processed after SSE2AVX. */
3010 if (!i.tm.opcode_modifier.sse2avx
3011 && i.tm.opcode_modifier.immext)
3012 process_immext ();
3013
3014 /* For insns with operands there are more diddles to do to the opcode. */
3015 if (i.operands)
3016 {
3017 if (!process_operands ())
3018 return;
3019 }
3020 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
3021 {
3022 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3023 as_warn (_("translating to `%sp'"), i.tm.name);
3024 }
3025
3026 if (i.tm.opcode_modifier.vex)
3027 build_vex_prefix (t);
3028
3029 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3030 instructions may define INT_OPCODE as well, so avoid this corner
3031 case for those instructions that use MODRM. */
3032 if (i.tm.base_opcode == INT_OPCODE
3033 && !i.tm.opcode_modifier.modrm
3034 && i.op[0].imms->X_add_number == 3)
3035 {
3036 i.tm.base_opcode = INT3_OPCODE;
3037 i.imm_operands = 0;
3038 }
3039
3040 if ((i.tm.opcode_modifier.jump
3041 || i.tm.opcode_modifier.jumpbyte
3042 || i.tm.opcode_modifier.jumpdword)
3043 && i.op[0].disps->X_op == O_constant)
3044 {
3045 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3046 the absolute address given by the constant. Since ix86 jumps and
3047 calls are pc relative, we need to generate a reloc. */
3048 i.op[0].disps->X_add_symbol = &abs_symbol;
3049 i.op[0].disps->X_op = O_symbol;
3050 }
3051
3052 if (i.tm.opcode_modifier.rex64)
3053 i.rex |= REX_W;
3054
3055 /* For 8 bit registers we need an empty rex prefix. Also if the
3056 instruction already has a prefix, we need to convert old
3057 registers to new ones. */
3058
3059 if ((i.types[0].bitfield.reg8
3060 && (i.op[0].regs->reg_flags & RegRex64) != 0)
3061 || (i.types[1].bitfield.reg8
3062 && (i.op[1].regs->reg_flags & RegRex64) != 0)
3063 || ((i.types[0].bitfield.reg8
3064 || i.types[1].bitfield.reg8)
3065 && i.rex != 0))
3066 {
3067 int x;
3068
3069 i.rex |= REX_OPCODE;
3070 for (x = 0; x < 2; x++)
3071 {
3072 /* Look for 8 bit operand that uses old registers. */
3073 if (i.types[x].bitfield.reg8
3074 && (i.op[x].regs->reg_flags & RegRex64) == 0)
3075 {
3076 /* In case it is "hi" register, give up. */
3077 if (i.op[x].regs->reg_num > 3)
3078 as_bad (_("can't encode register '%s%s' in an "
3079 "instruction requiring REX prefix."),
3080 register_prefix, i.op[x].regs->reg_name);
3081
3082 /* Otherwise it is equivalent to the extended register.
3083 Since the encoding doesn't change this is merely
3084 cosmetic cleanup for debug output. */
3085
3086 i.op[x].regs = i.op[x].regs + 8;
3087 }
3088 }
3089 }
3090
3091 if (i.rex != 0)
3092 add_prefix (REX_OPCODE | i.rex);
3093
3094 /* We are ready to output the insn. */
3095 output_insn ();
3096 }
3097
3098 static char *
3099 parse_insn (char *line, char *mnemonic)
3100 {
3101 char *l = line;
3102 char *token_start = l;
3103 char *mnem_p;
3104 int supported;
3105 const insn_template *t;
3106 char *dot_p = NULL;
3107
3108 /* Non-zero if we found a prefix only acceptable with string insns. */
3109 const char *expecting_string_instruction = NULL;
3110
3111 while (1)
3112 {
3113 mnem_p = mnemonic;
3114 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3115 {
3116 if (*mnem_p == '.')
3117 dot_p = mnem_p;
3118 mnem_p++;
3119 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3120 {
3121 as_bad (_("no such instruction: `%s'"), token_start);
3122 return NULL;
3123 }
3124 l++;
3125 }
3126 if (!is_space_char (*l)
3127 && *l != END_OF_INSN
3128 && (intel_syntax
3129 || (*l != PREFIX_SEPARATOR
3130 && *l != ',')))
3131 {
3132 as_bad (_("invalid character %s in mnemonic"),
3133 output_invalid (*l));
3134 return NULL;
3135 }
3136 if (token_start == l)
3137 {
3138 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3139 as_bad (_("expecting prefix; got nothing"));
3140 else
3141 as_bad (_("expecting mnemonic; got nothing"));
3142 return NULL;
3143 }
3144
3145 /* Look up instruction (or prefix) via hash table. */
3146 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3147
3148 if (*l != END_OF_INSN
3149 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3150 && current_templates
3151 && current_templates->start->opcode_modifier.isprefix)
3152 {
3153 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3154 {
3155 as_bad ((flag_code != CODE_64BIT
3156 ? _("`%s' is only supported in 64-bit mode")
3157 : _("`%s' is not supported in 64-bit mode")),
3158 current_templates->start->name);
3159 return NULL;
3160 }
3161 /* If we are in 16-bit mode, do not allow addr16 or data16.
3162 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3163 if ((current_templates->start->opcode_modifier.size16
3164 || current_templates->start->opcode_modifier.size32)
3165 && flag_code != CODE_64BIT
3166 && (current_templates->start->opcode_modifier.size32
3167 ^ (flag_code == CODE_16BIT)))
3168 {
3169 as_bad (_("redundant %s prefix"),
3170 current_templates->start->name);
3171 return NULL;
3172 }
3173 /* Add prefix, checking for repeated prefixes. */
3174 switch (add_prefix (current_templates->start->base_opcode))
3175 {
3176 case PREFIX_EXIST:
3177 return NULL;
3178 case PREFIX_REP:
3179 expecting_string_instruction = current_templates->start->name;
3180 break;
3181 default:
3182 break;
3183 }
3184 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3185 token_start = ++l;
3186 }
3187 else
3188 break;
3189 }
3190
3191 if (!current_templates)
3192 {
3193 /* Check if we should swap operand in encoding. */
3194 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3195 i.swap_operand = 1;
3196 else
3197 goto check_suffix;
3198 mnem_p = dot_p;
3199 *dot_p = '\0';
3200 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3201 }
3202
3203 if (!current_templates)
3204 {
3205 check_suffix:
3206 /* See if we can get a match by trimming off a suffix. */
3207 switch (mnem_p[-1])
3208 {
3209 case WORD_MNEM_SUFFIX:
3210 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3211 i.suffix = SHORT_MNEM_SUFFIX;
3212 else
3213 case BYTE_MNEM_SUFFIX:
3214 case QWORD_MNEM_SUFFIX:
3215 i.suffix = mnem_p[-1];
3216 mnem_p[-1] = '\0';
3217 current_templates = (const templates *) hash_find (op_hash,
3218 mnemonic);
3219 break;
3220 case SHORT_MNEM_SUFFIX:
3221 case LONG_MNEM_SUFFIX:
3222 if (!intel_syntax)
3223 {
3224 i.suffix = mnem_p[-1];
3225 mnem_p[-1] = '\0';
3226 current_templates = (const templates *) hash_find (op_hash,
3227 mnemonic);
3228 }
3229 break;
3230
3231 /* Intel Syntax. */
3232 case 'd':
3233 if (intel_syntax)
3234 {
3235 if (intel_float_operand (mnemonic) == 1)
3236 i.suffix = SHORT_MNEM_SUFFIX;
3237 else
3238 i.suffix = LONG_MNEM_SUFFIX;
3239 mnem_p[-1] = '\0';
3240 current_templates = (const templates *) hash_find (op_hash,
3241 mnemonic);
3242 }
3243 break;
3244 }
3245 if (!current_templates)
3246 {
3247 as_bad (_("no such instruction: `%s'"), token_start);
3248 return NULL;
3249 }
3250 }
3251
3252 if (current_templates->start->opcode_modifier.jump
3253 || current_templates->start->opcode_modifier.jumpbyte)
3254 {
3255 /* Check for a branch hint. We allow ",pt" and ",pn" for
3256 predict taken and predict not taken respectively.
3257 I'm not sure that branch hints actually do anything on loop
3258 and jcxz insns (JumpByte) for current Pentium4 chips. They
3259 may work in the future and it doesn't hurt to accept them
3260 now. */
3261 if (l[0] == ',' && l[1] == 'p')
3262 {
3263 if (l[2] == 't')
3264 {
3265 if (!add_prefix (DS_PREFIX_OPCODE))
3266 return NULL;
3267 l += 3;
3268 }
3269 else if (l[2] == 'n')
3270 {
3271 if (!add_prefix (CS_PREFIX_OPCODE))
3272 return NULL;
3273 l += 3;
3274 }
3275 }
3276 }
3277 /* Any other comma loses. */
3278 if (*l == ',')
3279 {
3280 as_bad (_("invalid character %s in mnemonic"),
3281 output_invalid (*l));
3282 return NULL;
3283 }
3284
3285 /* Check if instruction is supported on specified architecture. */
3286 supported = 0;
3287 for (t = current_templates->start; t < current_templates->end; ++t)
3288 {
3289 supported |= cpu_flags_match (t);
3290 if (supported == CPU_FLAGS_PERFECT_MATCH)
3291 goto skip;
3292 }
3293
3294 if (!(supported & CPU_FLAGS_64BIT_MATCH))
3295 {
3296 as_bad (flag_code == CODE_64BIT
3297 ? _("`%s' is not supported in 64-bit mode")
3298 : _("`%s' is only supported in 64-bit mode"),
3299 current_templates->start->name);
3300 return NULL;
3301 }
3302 if (supported != CPU_FLAGS_PERFECT_MATCH)
3303 {
3304 as_bad (_("`%s' is not supported on `%s%s'"),
3305 current_templates->start->name,
3306 cpu_arch_name ? cpu_arch_name : default_arch,
3307 cpu_sub_arch_name ? cpu_sub_arch_name : "");
3308 return NULL;
3309 }
3310
3311 skip:
3312 if (!cpu_arch_flags.bitfield.cpui386
3313 && (flag_code != CODE_16BIT))
3314 {
3315 as_warn (_("use .code16 to ensure correct addressing mode"));
3316 }
3317
3318 /* Check for rep/repne without a string instruction. */
3319 if (expecting_string_instruction)
3320 {
3321 static templates override;
3322
3323 for (t = current_templates->start; t < current_templates->end; ++t)
3324 if (t->opcode_modifier.isstring)
3325 break;
3326 if (t >= current_templates->end)
3327 {
3328 as_bad (_("expecting string instruction after `%s'"),
3329 expecting_string_instruction);
3330 return NULL;
3331 }
3332 for (override.start = t; t < current_templates->end; ++t)
3333 if (!t->opcode_modifier.isstring)
3334 break;
3335 override.end = t;
3336 current_templates = &override;
3337 }
3338
3339 return l;
3340 }
3341
3342 static char *
3343 parse_operands (char *l, const char *mnemonic)
3344 {
3345 char *token_start;
3346
3347 /* 1 if operand is pending after ','. */
3348 unsigned int expecting_operand = 0;
3349
3350 /* Non-zero if operand parens not balanced. */
3351 unsigned int paren_not_balanced;
3352
3353 while (*l != END_OF_INSN)
3354 {
3355 /* Skip optional white space before operand. */
3356 if (is_space_char (*l))
3357 ++l;
3358 if (!is_operand_char (*l) && *l != END_OF_INSN)
3359 {
3360 as_bad (_("invalid character %s before operand %d"),
3361 output_invalid (*l),
3362 i.operands + 1);
3363 return NULL;
3364 }
3365 token_start = l; /* after white space */
3366 paren_not_balanced = 0;
3367 while (paren_not_balanced || *l != ',')
3368 {
3369 if (*l == END_OF_INSN)
3370 {
3371 if (paren_not_balanced)
3372 {
3373 if (!intel_syntax)
3374 as_bad (_("unbalanced parenthesis in operand %d."),
3375 i.operands + 1);
3376 else
3377 as_bad (_("unbalanced brackets in operand %d."),
3378 i.operands + 1);
3379 return NULL;
3380 }
3381 else
3382 break; /* we are done */
3383 }
3384 else if (!is_operand_char (*l) && !is_space_char (*l))
3385 {
3386 as_bad (_("invalid character %s in operand %d"),
3387 output_invalid (*l),
3388 i.operands + 1);
3389 return NULL;
3390 }
3391 if (!intel_syntax)
3392 {
3393 if (*l == '(')
3394 ++paren_not_balanced;
3395 if (*l == ')')
3396 --paren_not_balanced;
3397 }
3398 else
3399 {
3400 if (*l == '[')
3401 ++paren_not_balanced;
3402 if (*l == ']')
3403 --paren_not_balanced;
3404 }
3405 l++;
3406 }
3407 if (l != token_start)
3408 { /* Yes, we've read in another operand. */
3409 unsigned int operand_ok;
3410 this_operand = i.operands++;
3411 i.types[this_operand].bitfield.unspecified = 1;
3412 if (i.operands > MAX_OPERANDS)
3413 {
3414 as_bad (_("spurious operands; (%d operands/instruction max)"),
3415 MAX_OPERANDS);
3416 return NULL;
3417 }
3418 /* Now parse operand adding info to 'i' as we go along. */
3419 END_STRING_AND_SAVE (l);
3420
3421 if (intel_syntax)
3422 operand_ok =
3423 i386_intel_operand (token_start,
3424 intel_float_operand (mnemonic));
3425 else
3426 operand_ok = i386_att_operand (token_start);
3427
3428 RESTORE_END_STRING (l);
3429 if (!operand_ok)
3430 return NULL;
3431 }
3432 else
3433 {
3434 if (expecting_operand)
3435 {
3436 expecting_operand_after_comma:
3437 as_bad (_("expecting operand after ','; got nothing"));
3438 return NULL;
3439 }
3440 if (*l == ',')
3441 {
3442 as_bad (_("expecting operand before ','; got nothing"));
3443 return NULL;
3444 }
3445 }
3446
3447 /* Now *l must be either ',' or END_OF_INSN. */
3448 if (*l == ',')
3449 {
3450 if (*++l == END_OF_INSN)
3451 {
3452 /* Just skip it, if it's \n complain. */
3453 goto expecting_operand_after_comma;
3454 }
3455 expecting_operand = 1;
3456 }
3457 }
3458 return l;
3459 }
3460
3461 static void
3462 swap_2_operands (int xchg1, int xchg2)
3463 {
3464 union i386_op temp_op;
3465 i386_operand_type temp_type;
3466 enum bfd_reloc_code_real temp_reloc;
3467
3468 temp_type = i.types[xchg2];
3469 i.types[xchg2] = i.types[xchg1];
3470 i.types[xchg1] = temp_type;
3471 temp_op = i.op[xchg2];
3472 i.op[xchg2] = i.op[xchg1];
3473 i.op[xchg1] = temp_op;
3474 temp_reloc = i.reloc[xchg2];
3475 i.reloc[xchg2] = i.reloc[xchg1];
3476 i.reloc[xchg1] = temp_reloc;
3477 }
3478
3479 static void
3480 swap_operands (void)
3481 {
3482 switch (i.operands)
3483 {
3484 case 5:
3485 case 4:
3486 swap_2_operands (1, i.operands - 2);
3487 case 3:
3488 case 2:
3489 swap_2_operands (0, i.operands - 1);
3490 break;
3491 default:
3492 abort ();
3493 }
3494
3495 if (i.mem_operands == 2)
3496 {
3497 const seg_entry *temp_seg;
3498 temp_seg = i.seg[0];
3499 i.seg[0] = i.seg[1];
3500 i.seg[1] = temp_seg;
3501 }
3502 }
3503
3504 /* Try to ensure constant immediates are represented in the smallest
3505 opcode possible. */
3506 static void
3507 optimize_imm (void)
3508 {
3509 char guess_suffix = 0;
3510 int op;
3511
3512 if (i.suffix)
3513 guess_suffix = i.suffix;
3514 else if (i.reg_operands)
3515 {
3516 /* Figure out a suffix from the last register operand specified.
3517 We can't do this properly yet, ie. excluding InOutPortReg,
3518 but the following works for instructions with immediates.
3519 In any case, we can't set i.suffix yet. */
3520 for (op = i.operands; --op >= 0;)
3521 if (i.types[op].bitfield.reg8)
3522 {
3523 guess_suffix = BYTE_MNEM_SUFFIX;
3524 break;
3525 }
3526 else if (i.types[op].bitfield.reg16)
3527 {
3528 guess_suffix = WORD_MNEM_SUFFIX;
3529 break;
3530 }
3531 else if (i.types[op].bitfield.reg32)
3532 {
3533 guess_suffix = LONG_MNEM_SUFFIX;
3534 break;
3535 }
3536 else if (i.types[op].bitfield.reg64)
3537 {
3538 guess_suffix = QWORD_MNEM_SUFFIX;
3539 break;
3540 }
3541 }
3542 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
3543 guess_suffix = WORD_MNEM_SUFFIX;
3544
3545 for (op = i.operands; --op >= 0;)
3546 if (operand_type_check (i.types[op], imm))
3547 {
3548 switch (i.op[op].imms->X_op)
3549 {
3550 case O_constant:
3551 /* If a suffix is given, this operand may be shortened. */
3552 switch (guess_suffix)
3553 {
3554 case LONG_MNEM_SUFFIX:
3555 i.types[op].bitfield.imm32 = 1;
3556 i.types[op].bitfield.imm64 = 1;
3557 break;
3558 case WORD_MNEM_SUFFIX:
3559 i.types[op].bitfield.imm16 = 1;
3560 i.types[op].bitfield.imm32 = 1;
3561 i.types[op].bitfield.imm32s = 1;
3562 i.types[op].bitfield.imm64 = 1;
3563 break;
3564 case BYTE_MNEM_SUFFIX:
3565 i.types[op].bitfield.imm8 = 1;
3566 i.types[op].bitfield.imm8s = 1;
3567 i.types[op].bitfield.imm16 = 1;
3568 i.types[op].bitfield.imm32 = 1;
3569 i.types[op].bitfield.imm32s = 1;
3570 i.types[op].bitfield.imm64 = 1;
3571 break;
3572 }
3573
3574 /* If this operand is at most 16 bits, convert it
3575 to a signed 16 bit number before trying to see
3576 whether it will fit in an even smaller size.
3577 This allows a 16-bit operand such as $0xffe0 to
3578 be recognised as within Imm8S range. */
3579 if ((i.types[op].bitfield.imm16)
3580 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
3581 {
3582 i.op[op].imms->X_add_number =
3583 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
3584 }
3585 if ((i.types[op].bitfield.imm32)
3586 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
3587 == 0))
3588 {
3589 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
3590 ^ ((offsetT) 1 << 31))
3591 - ((offsetT) 1 << 31));
3592 }
3593 i.types[op]
3594 = operand_type_or (i.types[op],
3595 smallest_imm_type (i.op[op].imms->X_add_number));
3596
3597 /* We must avoid matching of Imm32 templates when 64bit
3598 only immediate is available. */
3599 if (guess_suffix == QWORD_MNEM_SUFFIX)
3600 i.types[op].bitfield.imm32 = 0;
3601 break;
3602
3603 case O_absent:
3604 case O_register:
3605 abort ();
3606
3607 /* Symbols and expressions. */
3608 default:
3609 /* Convert symbolic operand to proper sizes for matching, but don't
3610 prevent matching a set of insns that only supports sizes other
3611 than those matching the insn suffix. */
3612 {
3613 i386_operand_type mask, allowed;
3614 const insn_template *t;
3615
3616 operand_type_set (&mask, 0);
3617 operand_type_set (&allowed, 0);
3618
3619 for (t = current_templates->start;
3620 t < current_templates->end;
3621 ++t)
3622 allowed = operand_type_or (allowed,
3623 t->operand_types[op]);
3624 switch (guess_suffix)
3625 {
3626 case QWORD_MNEM_SUFFIX:
3627 mask.bitfield.imm64 = 1;
3628 mask.bitfield.imm32s = 1;
3629 break;
3630 case LONG_MNEM_SUFFIX:
3631 mask.bitfield.imm32 = 1;
3632 break;
3633 case WORD_MNEM_SUFFIX:
3634 mask.bitfield.imm16 = 1;
3635 break;
3636 case BYTE_MNEM_SUFFIX:
3637 mask.bitfield.imm8 = 1;
3638 break;
3639 default:
3640 break;
3641 }
3642 allowed = operand_type_and (mask, allowed);
3643 if (!operand_type_all_zero (&allowed))
3644 i.types[op] = operand_type_and (i.types[op], mask);
3645 }
3646 break;
3647 }
3648 }
3649 }
3650
3651 /* Try to use the smallest displacement type too. */
3652 static void
3653 optimize_disp (void)
3654 {
3655 int op;
3656
3657 for (op = i.operands; --op >= 0;)
3658 if (operand_type_check (i.types[op], disp))
3659 {
3660 if (i.op[op].disps->X_op == O_constant)
3661 {
3662 offsetT op_disp = i.op[op].disps->X_add_number;
3663
3664 if (i.types[op].bitfield.disp16
3665 && (op_disp & ~(offsetT) 0xffff) == 0)
3666 {
3667 /* If this operand is at most 16 bits, convert
3668 to a signed 16 bit number and don't use 64bit
3669 displacement. */
3670 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
3671 i.types[op].bitfield.disp64 = 0;
3672 }
3673 if (i.types[op].bitfield.disp32
3674 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
3675 {
3676 /* If this operand is at most 32 bits, convert
3677 to a signed 32 bit number and don't use 64bit
3678 displacement. */
3679 op_disp &= (((offsetT) 2 << 31) - 1);
3680 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
3681 i.types[op].bitfield.disp64 = 0;
3682 }
3683 if (!op_disp && i.types[op].bitfield.baseindex)
3684 {
3685 i.types[op].bitfield.disp8 = 0;
3686 i.types[op].bitfield.disp16 = 0;
3687 i.types[op].bitfield.disp32 = 0;
3688 i.types[op].bitfield.disp32s = 0;
3689 i.types[op].bitfield.disp64 = 0;
3690 i.op[op].disps = 0;
3691 i.disp_operands--;
3692 }
3693 else if (flag_code == CODE_64BIT)
3694 {
3695 if (fits_in_signed_long (op_disp))
3696 {
3697 i.types[op].bitfield.disp64 = 0;
3698 i.types[op].bitfield.disp32s = 1;
3699 }
3700 if (i.prefix[ADDR_PREFIX]
3701 && fits_in_unsigned_long (op_disp))
3702 i.types[op].bitfield.disp32 = 1;
3703 }
3704 if ((i.types[op].bitfield.disp32
3705 || i.types[op].bitfield.disp32s
3706 || i.types[op].bitfield.disp16)
3707 && fits_in_signed_byte (op_disp))
3708 i.types[op].bitfield.disp8 = 1;
3709 }
3710 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
3711 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
3712 {
3713 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
3714 i.op[op].disps, 0, i.reloc[op]);
3715 i.types[op].bitfield.disp8 = 0;
3716 i.types[op].bitfield.disp16 = 0;
3717 i.types[op].bitfield.disp32 = 0;
3718 i.types[op].bitfield.disp32s = 0;
3719 i.types[op].bitfield.disp64 = 0;
3720 }
3721 else
3722 /* We only support 64bit displacement on constants. */
3723 i.types[op].bitfield.disp64 = 0;
3724 }
3725 }
3726
3727 static const insn_template *
3728 match_template (void)
3729 {
3730 /* Points to template once we've found it. */
3731 const insn_template *t;
3732 i386_operand_type overlap0, overlap1, overlap2, overlap3;
3733 i386_operand_type overlap4;
3734 unsigned int found_reverse_match;
3735 i386_opcode_modifier suffix_check;
3736 i386_operand_type operand_types [MAX_OPERANDS];
3737 int addr_prefix_disp;
3738 unsigned int j;
3739 unsigned int found_cpu_match;
3740 unsigned int check_register;
3741
3742 #if MAX_OPERANDS != 5
3743 # error "MAX_OPERANDS must be 5."
3744 #endif
3745
3746 found_reverse_match = 0;
3747 addr_prefix_disp = -1;
3748
3749 memset (&suffix_check, 0, sizeof (suffix_check));
3750 if (i.suffix == BYTE_MNEM_SUFFIX)
3751 suffix_check.no_bsuf = 1;
3752 else if (i.suffix == WORD_MNEM_SUFFIX)
3753 suffix_check.no_wsuf = 1;
3754 else if (i.suffix == SHORT_MNEM_SUFFIX)
3755 suffix_check.no_ssuf = 1;
3756 else if (i.suffix == LONG_MNEM_SUFFIX)
3757 suffix_check.no_lsuf = 1;
3758 else if (i.suffix == QWORD_MNEM_SUFFIX)
3759 suffix_check.no_qsuf = 1;
3760 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
3761 suffix_check.no_ldsuf = 1;
3762
3763 for (t = current_templates->start; t < current_templates->end; t++)
3764 {
3765 addr_prefix_disp = -1;
3766
3767 /* Must have right number of operands. */
3768 if (i.operands != t->operands)
3769 continue;
3770
3771 /* Check processor support. */
3772 found_cpu_match = (cpu_flags_match (t)
3773 == CPU_FLAGS_PERFECT_MATCH);
3774 if (!found_cpu_match)
3775 continue;
3776
3777 /* Check old gcc support. */
3778 if (!old_gcc && t->opcode_modifier.oldgcc)
3779 continue;
3780
3781 /* Check AT&T mnemonic. */
3782 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
3783 continue;
3784
3785 /* Check AT&T syntax Intel syntax. */
3786 if ((intel_syntax && t->opcode_modifier.attsyntax)
3787 || (!intel_syntax && t->opcode_modifier.intelsyntax))
3788 continue;
3789
3790 /* Check the suffix, except for some instructions in intel mode. */
3791 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
3792 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
3793 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
3794 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
3795 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
3796 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
3797 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
3798 continue;
3799
3800 if (!operand_size_match (t))
3801 continue;
3802
3803 for (j = 0; j < MAX_OPERANDS; j++)
3804 operand_types[j] = t->operand_types[j];
3805
3806 /* In general, don't allow 64-bit operands in 32-bit mode. */
3807 if (i.suffix == QWORD_MNEM_SUFFIX
3808 && flag_code != CODE_64BIT
3809 && (intel_syntax
3810 ? (!t->opcode_modifier.ignoresize
3811 && !intel_float_operand (t->name))
3812 : intel_float_operand (t->name) != 2)
3813 && ((!operand_types[0].bitfield.regmmx
3814 && !operand_types[0].bitfield.regxmm
3815 && !operand_types[0].bitfield.regymm)
3816 || (!operand_types[t->operands > 1].bitfield.regmmx
3817 && !!operand_types[t->operands > 1].bitfield.regxmm
3818 && !!operand_types[t->operands > 1].bitfield.regymm))
3819 && (t->base_opcode != 0x0fc7
3820 || t->extension_opcode != 1 /* cmpxchg8b */))
3821 continue;
3822
3823 /* In general, don't allow 32-bit operands on pre-386. */
3824 else if (i.suffix == LONG_MNEM_SUFFIX
3825 && !cpu_arch_flags.bitfield.cpui386
3826 && (intel_syntax
3827 ? (!t->opcode_modifier.ignoresize
3828 && !intel_float_operand (t->name))
3829 : intel_float_operand (t->name) != 2)
3830 && ((!operand_types[0].bitfield.regmmx
3831 && !operand_types[0].bitfield.regxmm)
3832 || (!operand_types[t->operands > 1].bitfield.regmmx
3833 && !!operand_types[t->operands > 1].bitfield.regxmm)))
3834 continue;
3835
3836 /* Do not verify operands when there are none. */
3837 else
3838 {
3839 if (!t->operands)
3840 /* We've found a match; break out of loop. */
3841 break;
3842 }
3843
3844 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
3845 into Disp32/Disp16/Disp32 operand. */
3846 if (i.prefix[ADDR_PREFIX] != 0)
3847 {
3848 /* There should be only one Disp operand. */
3849 switch (flag_code)
3850 {
3851 case CODE_16BIT:
3852 for (j = 0; j < MAX_OPERANDS; j++)
3853 {
3854 if (operand_types[j].bitfield.disp16)
3855 {
3856 addr_prefix_disp = j;
3857 operand_types[j].bitfield.disp32 = 1;
3858 operand_types[j].bitfield.disp16 = 0;
3859 break;
3860 }
3861 }
3862 break;
3863 case CODE_32BIT:
3864 for (j = 0; j < MAX_OPERANDS; j++)
3865 {
3866 if (operand_types[j].bitfield.disp32)
3867 {
3868 addr_prefix_disp = j;
3869 operand_types[j].bitfield.disp32 = 0;
3870 operand_types[j].bitfield.disp16 = 1;
3871 break;
3872 }
3873 }
3874 break;
3875 case CODE_64BIT:
3876 for (j = 0; j < MAX_OPERANDS; j++)
3877 {
3878 if (operand_types[j].bitfield.disp64)
3879 {
3880 addr_prefix_disp = j;
3881 operand_types[j].bitfield.disp64 = 0;
3882 operand_types[j].bitfield.disp32 = 1;
3883 break;
3884 }
3885 }
3886 break;
3887 }
3888 }
3889
3890 /* We check register size only if size of operands can be
3891 encoded the canonical way. */
3892 check_register = t->opcode_modifier.w;
3893 overlap0 = operand_type_and (i.types[0], operand_types[0]);
3894 switch (t->operands)
3895 {
3896 case 1:
3897 if (!operand_type_match (overlap0, i.types[0]))
3898 continue;
3899 break;
3900 case 2:
3901 /* xchg %eax, %eax is a special case. It is an aliase for nop
3902 only in 32bit mode and we can use opcode 0x90. In 64bit
3903 mode, we can't use 0x90 for xchg %eax, %eax since it should
3904 zero-extend %eax to %rax. */
3905 if (flag_code == CODE_64BIT
3906 && t->base_opcode == 0x90
3907 && operand_type_equal (&i.types [0], &acc32)
3908 && operand_type_equal (&i.types [1], &acc32))
3909 continue;
3910 if (i.swap_operand)
3911 {
3912 /* If we swap operand in encoding, we either match
3913 the next one or reverse direction of operands. */
3914 if (t->opcode_modifier.s)
3915 continue;
3916 else if (t->opcode_modifier.d)
3917 goto check_reverse;
3918 }
3919
3920 case 3:
3921 /* If we swap operand in encoding, we match the next one. */
3922 if (i.swap_operand && t->opcode_modifier.s)
3923 continue;
3924 case 4:
3925 case 5:
3926 overlap1 = operand_type_and (i.types[1], operand_types[1]);
3927 if (!operand_type_match (overlap0, i.types[0])
3928 || !operand_type_match (overlap1, i.types[1])
3929 || (check_register
3930 && !operand_type_register_match (overlap0, i.types[0],
3931 operand_types[0],
3932 overlap1, i.types[1],
3933 operand_types[1])))
3934 {
3935 /* Check if other direction is valid ... */
3936 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
3937 continue;
3938
3939 check_reverse:
3940 /* Try reversing direction of operands. */
3941 overlap0 = operand_type_and (i.types[0], operand_types[1]);
3942 overlap1 = operand_type_and (i.types[1], operand_types[0]);
3943 if (!operand_type_match (overlap0, i.types[0])
3944 || !operand_type_match (overlap1, i.types[1])
3945 || (check_register
3946 && !operand_type_register_match (overlap0,
3947 i.types[0],
3948 operand_types[1],
3949 overlap1,
3950 i.types[1],
3951 operand_types[0])))
3952 {
3953 /* Does not match either direction. */
3954 continue;
3955 }
3956 /* found_reverse_match holds which of D or FloatDR
3957 we've found. */
3958 if (t->opcode_modifier.d)
3959 found_reverse_match = Opcode_D;
3960 else if (t->opcode_modifier.floatd)
3961 found_reverse_match = Opcode_FloatD;
3962 else
3963 found_reverse_match = 0;
3964 if (t->opcode_modifier.floatr)
3965 found_reverse_match |= Opcode_FloatR;
3966 }
3967 else
3968 {
3969 /* Found a forward 2 operand match here. */
3970 switch (t->operands)
3971 {
3972 case 5:
3973 overlap4 = operand_type_and (i.types[4],
3974 operand_types[4]);
3975 case 4:
3976 overlap3 = operand_type_and (i.types[3],
3977 operand_types[3]);
3978 case 3:
3979 overlap2 = operand_type_and (i.types[2],
3980 operand_types[2]);
3981 break;
3982 }
3983
3984 switch (t->operands)
3985 {
3986 case 5:
3987 if (!operand_type_match (overlap4, i.types[4])
3988 || !operand_type_register_match (overlap3,
3989 i.types[3],
3990 operand_types[3],
3991 overlap4,
3992 i.types[4],
3993 operand_types[4]))
3994 continue;
3995 case 4:
3996 if (!operand_type_match (overlap3, i.types[3])
3997 || (check_register
3998 && !operand_type_register_match (overlap2,
3999 i.types[2],
4000 operand_types[2],
4001 overlap3,
4002 i.types[3],
4003 operand_types[3])))
4004 continue;
4005 case 3:
4006 /* Here we make use of the fact that there are no
4007 reverse match 3 operand instructions, and all 3
4008 operand instructions only need to be checked for
4009 register consistency between operands 2 and 3. */
4010 if (!operand_type_match (overlap2, i.types[2])
4011 || (check_register
4012 && !operand_type_register_match (overlap1,
4013 i.types[1],
4014 operand_types[1],
4015 overlap2,
4016 i.types[2],
4017 operand_types[2])))
4018 continue;
4019 break;
4020 }
4021 }
4022 /* Found either forward/reverse 2, 3 or 4 operand match here:
4023 slip through to break. */
4024 }
4025 if (!found_cpu_match)
4026 {
4027 found_reverse_match = 0;
4028 continue;
4029 }
4030
4031 /* We've found a match; break out of loop. */
4032 break;
4033 }
4034
4035 if (t == current_templates->end)
4036 {
4037 /* We found no match. */
4038 if (intel_syntax)
4039 as_bad (_("ambiguous operand size or operands invalid for `%s'"),
4040 current_templates->start->name);
4041 else
4042 as_bad (_("suffix or operands invalid for `%s'"),
4043 current_templates->start->name);
4044 return NULL;
4045 }
4046
4047 if (!quiet_warnings)
4048 {
4049 if (!intel_syntax
4050 && (i.types[0].bitfield.jumpabsolute
4051 != operand_types[0].bitfield.jumpabsolute))
4052 {
4053 as_warn (_("indirect %s without `*'"), t->name);
4054 }
4055
4056 if (t->opcode_modifier.isprefix
4057 && t->opcode_modifier.ignoresize)
4058 {
4059 /* Warn them that a data or address size prefix doesn't
4060 affect assembly of the next line of code. */
4061 as_warn (_("stand-alone `%s' prefix"), t->name);
4062 }
4063 }
4064
4065 /* Copy the template we found. */
4066 i.tm = *t;
4067
4068 if (addr_prefix_disp != -1)
4069 i.tm.operand_types[addr_prefix_disp]
4070 = operand_types[addr_prefix_disp];
4071
4072 if (found_reverse_match)
4073 {
4074 /* If we found a reverse match we must alter the opcode
4075 direction bit. found_reverse_match holds bits to change
4076 (different for int & float insns). */
4077
4078 i.tm.base_opcode ^= found_reverse_match;
4079
4080 i.tm.operand_types[0] = operand_types[1];
4081 i.tm.operand_types[1] = operand_types[0];
4082 }
4083
4084 return t;
4085 }
4086
4087 static int
4088 check_string (void)
4089 {
4090 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
4091 if (i.tm.operand_types[mem_op].bitfield.esseg)
4092 {
4093 if (i.seg[0] != NULL && i.seg[0] != &es)
4094 {
4095 as_bad (_("`%s' operand %d must use `%ses' segment"),
4096 i.tm.name,
4097 mem_op + 1,
4098 register_prefix);
4099 return 0;
4100 }
4101 /* There's only ever one segment override allowed per instruction.
4102 This instruction possibly has a legal segment override on the
4103 second operand, so copy the segment to where non-string
4104 instructions store it, allowing common code. */
4105 i.seg[0] = i.seg[1];
4106 }
4107 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
4108 {
4109 if (i.seg[1] != NULL && i.seg[1] != &es)
4110 {
4111 as_bad (_("`%s' operand %d must use `%ses' segment"),
4112 i.tm.name,
4113 mem_op + 2,
4114 register_prefix);
4115 return 0;
4116 }
4117 }
4118 return 1;
4119 }
4120
4121 static int
4122 process_suffix (void)
4123 {
4124 /* If matched instruction specifies an explicit instruction mnemonic
4125 suffix, use it. */
4126 if (i.tm.opcode_modifier.size16)
4127 i.suffix = WORD_MNEM_SUFFIX;
4128 else if (i.tm.opcode_modifier.size32)
4129 i.suffix = LONG_MNEM_SUFFIX;
4130 else if (i.tm.opcode_modifier.size64)
4131 i.suffix = QWORD_MNEM_SUFFIX;
4132 else if (i.reg_operands)
4133 {
4134 /* If there's no instruction mnemonic suffix we try to invent one
4135 based on register operands. */
4136 if (!i.suffix)
4137 {
4138 /* We take i.suffix from the last register operand specified,
4139 Destination register type is more significant than source
4140 register type. crc32 in SSE4.2 prefers source register
4141 type. */
4142 if (i.tm.base_opcode == 0xf20f38f1)
4143 {
4144 if (i.types[0].bitfield.reg16)
4145 i.suffix = WORD_MNEM_SUFFIX;
4146 else if (i.types[0].bitfield.reg32)
4147 i.suffix = LONG_MNEM_SUFFIX;
4148 else if (i.types[0].bitfield.reg64)
4149 i.suffix = QWORD_MNEM_SUFFIX;
4150 }
4151 else if (i.tm.base_opcode == 0xf20f38f0)
4152 {
4153 if (i.types[0].bitfield.reg8)
4154 i.suffix = BYTE_MNEM_SUFFIX;
4155 }
4156
4157 if (!i.suffix)
4158 {
4159 int op;
4160
4161 if (i.tm.base_opcode == 0xf20f38f1
4162 || i.tm.base_opcode == 0xf20f38f0)
4163 {
4164 /* We have to know the operand size for crc32. */
4165 as_bad (_("ambiguous memory operand size for `%s`"),
4166 i.tm.name);
4167 return 0;
4168 }
4169
4170 for (op = i.operands; --op >= 0;)
4171 if (!i.tm.operand_types[op].bitfield.inoutportreg)
4172 {
4173 if (i.types[op].bitfield.reg8)
4174 {
4175 i.suffix = BYTE_MNEM_SUFFIX;
4176 break;
4177 }
4178 else if (i.types[op].bitfield.reg16)
4179 {
4180 i.suffix = WORD_MNEM_SUFFIX;
4181 break;
4182 }
4183 else if (i.types[op].bitfield.reg32)
4184 {
4185 i.suffix = LONG_MNEM_SUFFIX;
4186 break;
4187 }
4188 else if (i.types[op].bitfield.reg64)
4189 {
4190 i.suffix = QWORD_MNEM_SUFFIX;
4191 break;
4192 }
4193 }
4194 }
4195 }
4196 else if (i.suffix == BYTE_MNEM_SUFFIX)
4197 {
4198 if (intel_syntax
4199 && i.tm.opcode_modifier.ignoresize
4200 && i.tm.opcode_modifier.no_bsuf)
4201 i.suffix = 0;
4202 else if (!check_byte_reg ())
4203 return 0;
4204 }
4205 else if (i.suffix == LONG_MNEM_SUFFIX)
4206 {
4207 if (intel_syntax
4208 && i.tm.opcode_modifier.ignoresize
4209 && i.tm.opcode_modifier.no_lsuf)
4210 i.suffix = 0;
4211 else if (!check_long_reg ())
4212 return 0;
4213 }
4214 else if (i.suffix == QWORD_MNEM_SUFFIX)
4215 {
4216 if (intel_syntax
4217 && i.tm.opcode_modifier.ignoresize
4218 && i.tm.opcode_modifier.no_qsuf)
4219 i.suffix = 0;
4220 else if (!check_qword_reg ())
4221 return 0;
4222 }
4223 else if (i.suffix == WORD_MNEM_SUFFIX)
4224 {
4225 if (intel_syntax
4226 && i.tm.opcode_modifier.ignoresize
4227 && i.tm.opcode_modifier.no_wsuf)
4228 i.suffix = 0;
4229 else if (!check_word_reg ())
4230 return 0;
4231 }
4232 else if (i.suffix == XMMWORD_MNEM_SUFFIX
4233 || i.suffix == YMMWORD_MNEM_SUFFIX)
4234 {
4235 /* Skip if the instruction has x/y suffix. match_template
4236 should check if it is a valid suffix. */
4237 }
4238 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
4239 /* Do nothing if the instruction is going to ignore the prefix. */
4240 ;
4241 else
4242 abort ();
4243 }
4244 else if (i.tm.opcode_modifier.defaultsize
4245 && !i.suffix
4246 /* exclude fldenv/frstor/fsave/fstenv */
4247 && i.tm.opcode_modifier.no_ssuf)
4248 {
4249 i.suffix = stackop_size;
4250 }
4251 else if (intel_syntax
4252 && !i.suffix
4253 && (i.tm.operand_types[0].bitfield.jumpabsolute
4254 || i.tm.opcode_modifier.jumpbyte
4255 || i.tm.opcode_modifier.jumpintersegment
4256 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
4257 && i.tm.extension_opcode <= 3)))
4258 {
4259 switch (flag_code)
4260 {
4261 case CODE_64BIT:
4262 if (!i.tm.opcode_modifier.no_qsuf)
4263 {
4264 i.suffix = QWORD_MNEM_SUFFIX;
4265 break;
4266 }
4267 case CODE_32BIT:
4268 if (!i.tm.opcode_modifier.no_lsuf)
4269 i.suffix = LONG_MNEM_SUFFIX;
4270 break;
4271 case CODE_16BIT:
4272 if (!i.tm.opcode_modifier.no_wsuf)
4273 i.suffix = WORD_MNEM_SUFFIX;
4274 break;
4275 }
4276 }
4277
4278 if (!i.suffix)
4279 {
4280 if (!intel_syntax)
4281 {
4282 if (i.tm.opcode_modifier.w)
4283 {
4284 as_bad (_("no instruction mnemonic suffix given and "
4285 "no register operands; can't size instruction"));
4286 return 0;
4287 }
4288 }
4289 else
4290 {
4291 unsigned int suffixes;
4292
4293 suffixes = !i.tm.opcode_modifier.no_bsuf;
4294 if (!i.tm.opcode_modifier.no_wsuf)
4295 suffixes |= 1 << 1;
4296 if (!i.tm.opcode_modifier.no_lsuf)
4297 suffixes |= 1 << 2;
4298 if (!i.tm.opcode_modifier.no_ldsuf)
4299 suffixes |= 1 << 3;
4300 if (!i.tm.opcode_modifier.no_ssuf)
4301 suffixes |= 1 << 4;
4302 if (!i.tm.opcode_modifier.no_qsuf)
4303 suffixes |= 1 << 5;
4304
4305 /* There are more than suffix matches. */
4306 if (i.tm.opcode_modifier.w
4307 || ((suffixes & (suffixes - 1))
4308 && !i.tm.opcode_modifier.defaultsize
4309 && !i.tm.opcode_modifier.ignoresize))
4310 {
4311 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
4312 return 0;
4313 }
4314 }
4315 }
4316
4317 /* Change the opcode based on the operand size given by i.suffix;
4318 We don't need to change things for byte insns. */
4319
4320 if (i.suffix
4321 && i.suffix != BYTE_MNEM_SUFFIX
4322 && i.suffix != XMMWORD_MNEM_SUFFIX
4323 && i.suffix != YMMWORD_MNEM_SUFFIX)
4324 {
4325 /* It's not a byte, select word/dword operation. */
4326 if (i.tm.opcode_modifier.w)
4327 {
4328 if (i.tm.opcode_modifier.shortform)
4329 i.tm.base_opcode |= 8;
4330 else
4331 i.tm.base_opcode |= 1;
4332 }
4333
4334 /* Now select between word & dword operations via the operand
4335 size prefix, except for instructions that will ignore this
4336 prefix anyway. */
4337 if (i.tm.opcode_modifier.addrprefixop0)
4338 {
4339 /* The address size override prefix changes the size of the
4340 first operand. */
4341 if ((flag_code == CODE_32BIT
4342 && i.op->regs[0].reg_type.bitfield.reg16)
4343 || (flag_code != CODE_32BIT
4344 && i.op->regs[0].reg_type.bitfield.reg32))
4345 if (!add_prefix (ADDR_PREFIX_OPCODE))
4346 return 0;
4347 }
4348 else if (i.suffix != QWORD_MNEM_SUFFIX
4349 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
4350 && !i.tm.opcode_modifier.ignoresize
4351 && !i.tm.opcode_modifier.floatmf
4352 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
4353 || (flag_code == CODE_64BIT
4354 && i.tm.opcode_modifier.jumpbyte)))
4355 {
4356 unsigned int prefix = DATA_PREFIX_OPCODE;
4357
4358 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
4359 prefix = ADDR_PREFIX_OPCODE;
4360
4361 if (!add_prefix (prefix))
4362 return 0;
4363 }
4364
4365 /* Set mode64 for an operand. */
4366 if (i.suffix == QWORD_MNEM_SUFFIX
4367 && flag_code == CODE_64BIT
4368 && !i.tm.opcode_modifier.norex64)
4369 {
4370 /* Special case for xchg %rax,%rax. It is NOP and doesn't
4371 need rex64. cmpxchg8b is also a special case. */
4372 if (! (i.operands == 2
4373 && i.tm.base_opcode == 0x90
4374 && i.tm.extension_opcode == None
4375 && operand_type_equal (&i.types [0], &acc64)
4376 && operand_type_equal (&i.types [1], &acc64))
4377 && ! (i.operands == 1
4378 && i.tm.base_opcode == 0xfc7
4379 && i.tm.extension_opcode == 1
4380 && !operand_type_check (i.types [0], reg)
4381 && operand_type_check (i.types [0], anymem)))
4382 i.rex |= REX_W;
4383 }
4384
4385 /* Size floating point instruction. */
4386 if (i.suffix == LONG_MNEM_SUFFIX)
4387 if (i.tm.opcode_modifier.floatmf)
4388 i.tm.base_opcode ^= 4;
4389 }
4390
4391 return 1;
4392 }
4393
4394 static int
4395 check_byte_reg (void)
4396 {
4397 int op;
4398
4399 for (op = i.operands; --op >= 0;)
4400 {
4401 /* If this is an eight bit register, it's OK. If it's the 16 or
4402 32 bit version of an eight bit register, we will just use the
4403 low portion, and that's OK too. */
4404 if (i.types[op].bitfield.reg8)
4405 continue;
4406
4407 /* crc32 doesn't generate this warning. */
4408 if (i.tm.base_opcode == 0xf20f38f0)
4409 continue;
4410
4411 if ((i.types[op].bitfield.reg16
4412 || i.types[op].bitfield.reg32
4413 || i.types[op].bitfield.reg64)
4414 && i.op[op].regs->reg_num < 4)
4415 {
4416 /* Prohibit these changes in the 64bit mode, since the
4417 lowering is more complicated. */
4418 if (flag_code == CODE_64BIT
4419 && !i.tm.operand_types[op].bitfield.inoutportreg)
4420 {
4421 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4422 register_prefix, i.op[op].regs->reg_name,
4423 i.suffix);
4424 return 0;
4425 }
4426 #if REGISTER_WARNINGS
4427 if (!quiet_warnings
4428 && !i.tm.operand_types[op].bitfield.inoutportreg)
4429 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4430 register_prefix,
4431 (i.op[op].regs + (i.types[op].bitfield.reg16
4432 ? REGNAM_AL - REGNAM_AX
4433 : REGNAM_AL - REGNAM_EAX))->reg_name,
4434 register_prefix,
4435 i.op[op].regs->reg_name,
4436 i.suffix);
4437 #endif
4438 continue;
4439 }
4440 /* Any other register is bad. */
4441 if (i.types[op].bitfield.reg16
4442 || i.types[op].bitfield.reg32
4443 || i.types[op].bitfield.reg64
4444 || i.types[op].bitfield.regmmx
4445 || i.types[op].bitfield.regxmm
4446 || i.types[op].bitfield.regymm
4447 || i.types[op].bitfield.sreg2
4448 || i.types[op].bitfield.sreg3
4449 || i.types[op].bitfield.control
4450 || i.types[op].bitfield.debug
4451 || i.types[op].bitfield.test
4452 || i.types[op].bitfield.floatreg
4453 || i.types[op].bitfield.floatacc)
4454 {
4455 as_bad (_("`%s%s' not allowed with `%s%c'"),
4456 register_prefix,
4457 i.op[op].regs->reg_name,
4458 i.tm.name,
4459 i.suffix);
4460 return 0;
4461 }
4462 }
4463 return 1;
4464 }
4465
4466 static int
4467 check_long_reg (void)
4468 {
4469 int op;
4470
4471 for (op = i.operands; --op >= 0;)
4472 /* Reject eight bit registers, except where the template requires
4473 them. (eg. movzb) */
4474 if (i.types[op].bitfield.reg8
4475 && (i.tm.operand_types[op].bitfield.reg16
4476 || i.tm.operand_types[op].bitfield.reg32
4477 || i.tm.operand_types[op].bitfield.acc))
4478 {
4479 as_bad (_("`%s%s' not allowed with `%s%c'"),
4480 register_prefix,
4481 i.op[op].regs->reg_name,
4482 i.tm.name,
4483 i.suffix);
4484 return 0;
4485 }
4486 /* Warn if the e prefix on a general reg is missing. */
4487 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4488 && i.types[op].bitfield.reg16
4489 && (i.tm.operand_types[op].bitfield.reg32
4490 || i.tm.operand_types[op].bitfield.acc))
4491 {
4492 /* Prohibit these changes in the 64bit mode, since the
4493 lowering is more complicated. */
4494 if (flag_code == CODE_64BIT)
4495 {
4496 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4497 register_prefix, i.op[op].regs->reg_name,
4498 i.suffix);
4499 return 0;
4500 }
4501 #if REGISTER_WARNINGS
4502 else
4503 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4504 register_prefix,
4505 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
4506 register_prefix,
4507 i.op[op].regs->reg_name,
4508 i.suffix);
4509 #endif
4510 }
4511 /* Warn if the r prefix on a general reg is missing. */
4512 else if (i.types[op].bitfield.reg64
4513 && (i.tm.operand_types[op].bitfield.reg32
4514 || i.tm.operand_types[op].bitfield.acc))
4515 {
4516 if (intel_syntax
4517 && i.tm.opcode_modifier.toqword
4518 && !i.types[0].bitfield.regxmm)
4519 {
4520 /* Convert to QWORD. We want REX byte. */
4521 i.suffix = QWORD_MNEM_SUFFIX;
4522 }
4523 else
4524 {
4525 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4526 register_prefix, i.op[op].regs->reg_name,
4527 i.suffix);
4528 return 0;
4529 }
4530 }
4531 return 1;
4532 }
4533
4534 static int
4535 check_qword_reg (void)
4536 {
4537 int op;
4538
4539 for (op = i.operands; --op >= 0; )
4540 /* Reject eight bit registers, except where the template requires
4541 them. (eg. movzb) */
4542 if (i.types[op].bitfield.reg8
4543 && (i.tm.operand_types[op].bitfield.reg16
4544 || i.tm.operand_types[op].bitfield.reg32
4545 || i.tm.operand_types[op].bitfield.acc))
4546 {
4547 as_bad (_("`%s%s' not allowed with `%s%c'"),
4548 register_prefix,
4549 i.op[op].regs->reg_name,
4550 i.tm.name,
4551 i.suffix);
4552 return 0;
4553 }
4554 /* Warn if the e prefix on a general reg is missing. */
4555 else if ((i.types[op].bitfield.reg16
4556 || i.types[op].bitfield.reg32)
4557 && (i.tm.operand_types[op].bitfield.reg32
4558 || i.tm.operand_types[op].bitfield.acc))
4559 {
4560 /* Prohibit these changes in the 64bit mode, since the
4561 lowering is more complicated. */
4562 if (intel_syntax
4563 && i.tm.opcode_modifier.todword
4564 && !i.types[0].bitfield.regxmm)
4565 {
4566 /* Convert to DWORD. We don't want REX byte. */
4567 i.suffix = LONG_MNEM_SUFFIX;
4568 }
4569 else
4570 {
4571 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4572 register_prefix, i.op[op].regs->reg_name,
4573 i.suffix);
4574 return 0;
4575 }
4576 }
4577 return 1;
4578 }
4579
4580 static int
4581 check_word_reg (void)
4582 {
4583 int op;
4584 for (op = i.operands; --op >= 0;)
4585 /* Reject eight bit registers, except where the template requires
4586 them. (eg. movzb) */
4587 if (i.types[op].bitfield.reg8
4588 && (i.tm.operand_types[op].bitfield.reg16
4589 || i.tm.operand_types[op].bitfield.reg32
4590 || i.tm.operand_types[op].bitfield.acc))
4591 {
4592 as_bad (_("`%s%s' not allowed with `%s%c'"),
4593 register_prefix,
4594 i.op[op].regs->reg_name,
4595 i.tm.name,
4596 i.suffix);
4597 return 0;
4598 }
4599 /* Warn if the e prefix on a general reg is present. */
4600 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4601 && i.types[op].bitfield.reg32
4602 && (i.tm.operand_types[op].bitfield.reg16
4603 || i.tm.operand_types[op].bitfield.acc))
4604 {
4605 /* Prohibit these changes in the 64bit mode, since the
4606 lowering is more complicated. */
4607 if (flag_code == CODE_64BIT)
4608 {
4609 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4610 register_prefix, i.op[op].regs->reg_name,
4611 i.suffix);
4612 return 0;
4613 }
4614 else
4615 #if REGISTER_WARNINGS
4616 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4617 register_prefix,
4618 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
4619 register_prefix,
4620 i.op[op].regs->reg_name,
4621 i.suffix);
4622 #endif
4623 }
4624 return 1;
4625 }
4626
4627 static int
4628 update_imm (unsigned int j)
4629 {
4630 i386_operand_type overlap = i.types[j];
4631 if ((overlap.bitfield.imm8
4632 || overlap.bitfield.imm8s
4633 || overlap.bitfield.imm16
4634 || overlap.bitfield.imm32
4635 || overlap.bitfield.imm32s
4636 || overlap.bitfield.imm64)
4637 && !operand_type_equal (&overlap, &imm8)
4638 && !operand_type_equal (&overlap, &imm8s)
4639 && !operand_type_equal (&overlap, &imm16)
4640 && !operand_type_equal (&overlap, &imm32)
4641 && !operand_type_equal (&overlap, &imm32s)
4642 && !operand_type_equal (&overlap, &imm64))
4643 {
4644 if (i.suffix)
4645 {
4646 i386_operand_type temp;
4647
4648 operand_type_set (&temp, 0);
4649 if (i.suffix == BYTE_MNEM_SUFFIX)
4650 {
4651 temp.bitfield.imm8 = overlap.bitfield.imm8;
4652 temp.bitfield.imm8s = overlap.bitfield.imm8s;
4653 }
4654 else if (i.suffix == WORD_MNEM_SUFFIX)
4655 temp.bitfield.imm16 = overlap.bitfield.imm16;
4656 else if (i.suffix == QWORD_MNEM_SUFFIX)
4657 {
4658 temp.bitfield.imm64 = overlap.bitfield.imm64;
4659 temp.bitfield.imm32s = overlap.bitfield.imm32s;
4660 }
4661 else
4662 temp.bitfield.imm32 = overlap.bitfield.imm32;
4663 overlap = temp;
4664 }
4665 else if (operand_type_equal (&overlap, &imm16_32_32s)
4666 || operand_type_equal (&overlap, &imm16_32)
4667 || operand_type_equal (&overlap, &imm16_32s))
4668 {
4669 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
4670 overlap = imm16;
4671 else
4672 overlap = imm32s;
4673 }
4674 if (!operand_type_equal (&overlap, &imm8)
4675 && !operand_type_equal (&overlap, &imm8s)
4676 && !operand_type_equal (&overlap, &imm16)
4677 && !operand_type_equal (&overlap, &imm32)
4678 && !operand_type_equal (&overlap, &imm32s)
4679 && !operand_type_equal (&overlap, &imm64))
4680 {
4681 as_bad (_("no instruction mnemonic suffix given; "
4682 "can't determine immediate size"));
4683 return 0;
4684 }
4685 }
4686 i.types[j] = overlap;
4687
4688 return 1;
4689 }
4690
4691 static int
4692 finalize_imm (void)
4693 {
4694 unsigned int j, n;
4695
4696 /* Update the first 2 immediate operands. */
4697 n = i.operands > 2 ? 2 : i.operands;
4698 if (n)
4699 {
4700 for (j = 0; j < n; j++)
4701 if (update_imm (j) == 0)
4702 return 0;
4703
4704 /* The 3rd operand can't be immediate operand. */
4705 gas_assert (operand_type_check (i.types[2], imm) == 0);
4706 }
4707
4708 return 1;
4709 }
4710
4711 static int
4712 bad_implicit_operand (int xmm)
4713 {
4714 const char *ireg = xmm ? "xmm0" : "ymm0";
4715
4716 if (intel_syntax)
4717 as_bad (_("the last operand of `%s' must be `%s%s'"),
4718 i.tm.name, register_prefix, ireg);
4719 else
4720 as_bad (_("the first operand of `%s' must be `%s%s'"),
4721 i.tm.name, register_prefix, ireg);
4722 return 0;
4723 }
4724
4725 static int
4726 process_operands (void)
4727 {
4728 /* Default segment register this instruction will use for memory
4729 accesses. 0 means unknown. This is only for optimizing out
4730 unnecessary segment overrides. */
4731 const seg_entry *default_seg = 0;
4732
4733 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
4734 {
4735 unsigned int dupl = i.operands;
4736 unsigned int dest = dupl - 1;
4737 unsigned int j;
4738
4739 /* The destination must be an xmm register. */
4740 gas_assert (i.reg_operands
4741 && MAX_OPERANDS > dupl
4742 && operand_type_equal (&i.types[dest], &regxmm));
4743
4744 if (i.tm.opcode_modifier.firstxmm0)
4745 {
4746 /* The first operand is implicit and must be xmm0. */
4747 gas_assert (operand_type_equal (&i.types[0], &regxmm));
4748 if (i.op[0].regs->reg_num != 0)
4749 return bad_implicit_operand (1);
4750
4751 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
4752 {
4753 /* Keep xmm0 for instructions with VEX prefix and 3
4754 sources. */
4755 goto duplicate;
4756 }
4757 else
4758 {
4759 /* We remove the first xmm0 and keep the number of
4760 operands unchanged, which in fact duplicates the
4761 destination. */
4762 for (j = 1; j < i.operands; j++)
4763 {
4764 i.op[j - 1] = i.op[j];
4765 i.types[j - 1] = i.types[j];
4766 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
4767 }
4768 }
4769 }
4770 else if (i.tm.opcode_modifier.implicit1stxmm0)
4771 {
4772 gas_assert ((MAX_OPERANDS - 1) > dupl
4773 && (i.tm.opcode_modifier.vexsources
4774 == VEX3SOURCES));
4775
4776 /* Add the implicit xmm0 for instructions with VEX prefix
4777 and 3 sources. */
4778 for (j = i.operands; j > 0; j--)
4779 {
4780 i.op[j] = i.op[j - 1];
4781 i.types[j] = i.types[j - 1];
4782 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
4783 }
4784 i.op[0].regs
4785 = (const reg_entry *) hash_find (reg_hash, "xmm0");
4786 i.types[0] = regxmm;
4787 i.tm.operand_types[0] = regxmm;
4788
4789 i.operands += 2;
4790 i.reg_operands += 2;
4791 i.tm.operands += 2;
4792
4793 dupl++;
4794 dest++;
4795 i.op[dupl] = i.op[dest];
4796 i.types[dupl] = i.types[dest];
4797 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
4798 }
4799 else
4800 {
4801 duplicate:
4802 i.operands++;
4803 i.reg_operands++;
4804 i.tm.operands++;
4805
4806 i.op[dupl] = i.op[dest];
4807 i.types[dupl] = i.types[dest];
4808 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
4809 }
4810
4811 if (i.tm.opcode_modifier.immext)
4812 process_immext ();
4813 }
4814 else if (i.tm.opcode_modifier.firstxmm0)
4815 {
4816 unsigned int j;
4817
4818 /* The first operand is implicit and must be xmm0/ymm0. */
4819 gas_assert (i.reg_operands
4820 && (operand_type_equal (&i.types[0], &regxmm)
4821 || operand_type_equal (&i.types[0], &regymm)));
4822 if (i.op[0].regs->reg_num != 0)
4823 return bad_implicit_operand (i.types[0].bitfield.regxmm);
4824
4825 for (j = 1; j < i.operands; j++)
4826 {
4827 i.op[j - 1] = i.op[j];
4828 i.types[j - 1] = i.types[j];
4829
4830 /* We need to adjust fields in i.tm since they are used by
4831 build_modrm_byte. */
4832 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
4833 }
4834
4835 i.operands--;
4836 i.reg_operands--;
4837 i.tm.operands--;
4838 }
4839 else if (i.tm.opcode_modifier.regkludge)
4840 {
4841 /* The imul $imm, %reg instruction is converted into
4842 imul $imm, %reg, %reg, and the clr %reg instruction
4843 is converted into xor %reg, %reg. */
4844
4845 unsigned int first_reg_op;
4846
4847 if (operand_type_check (i.types[0], reg))
4848 first_reg_op = 0;
4849 else
4850 first_reg_op = 1;
4851 /* Pretend we saw the extra register operand. */
4852 gas_assert (i.reg_operands == 1
4853 && i.op[first_reg_op + 1].regs == 0);
4854 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
4855 i.types[first_reg_op + 1] = i.types[first_reg_op];
4856 i.operands++;
4857 i.reg_operands++;
4858 }
4859
4860 if (i.tm.opcode_modifier.shortform)
4861 {
4862 if (i.types[0].bitfield.sreg2
4863 || i.types[0].bitfield.sreg3)
4864 {
4865 if (i.tm.base_opcode == POP_SEG_SHORT
4866 && i.op[0].regs->reg_num == 1)
4867 {
4868 as_bad (_("you can't `pop %scs'"), register_prefix);
4869 return 0;
4870 }
4871 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
4872 if ((i.op[0].regs->reg_flags & RegRex) != 0)
4873 i.rex |= REX_B;
4874 }
4875 else
4876 {
4877 /* The register or float register operand is in operand
4878 0 or 1. */
4879 unsigned int op;
4880
4881 if (i.types[0].bitfield.floatreg
4882 || operand_type_check (i.types[0], reg))
4883 op = 0;
4884 else
4885 op = 1;
4886 /* Register goes in low 3 bits of opcode. */
4887 i.tm.base_opcode |= i.op[op].regs->reg_num;
4888 if ((i.op[op].regs->reg_flags & RegRex) != 0)
4889 i.rex |= REX_B;
4890 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
4891 {
4892 /* Warn about some common errors, but press on regardless.
4893 The first case can be generated by gcc (<= 2.8.1). */
4894 if (i.operands == 2)
4895 {
4896 /* Reversed arguments on faddp, fsubp, etc. */
4897 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
4898 register_prefix, i.op[!intel_syntax].regs->reg_name,
4899 register_prefix, i.op[intel_syntax].regs->reg_name);
4900 }
4901 else
4902 {
4903 /* Extraneous `l' suffix on fp insn. */
4904 as_warn (_("translating to `%s %s%s'"), i.tm.name,
4905 register_prefix, i.op[0].regs->reg_name);
4906 }
4907 }
4908 }
4909 }
4910 else if (i.tm.opcode_modifier.modrm)
4911 {
4912 /* The opcode is completed (modulo i.tm.extension_opcode which
4913 must be put into the modrm byte). Now, we make the modrm and
4914 index base bytes based on all the info we've collected. */
4915
4916 default_seg = build_modrm_byte ();
4917 }
4918 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
4919 {
4920 default_seg = &ds;
4921 }
4922 else if (i.tm.opcode_modifier.isstring)
4923 {
4924 /* For the string instructions that allow a segment override
4925 on one of their operands, the default segment is ds. */
4926 default_seg = &ds;
4927 }
4928
4929 if (i.tm.base_opcode == 0x8d /* lea */
4930 && i.seg[0]
4931 && !quiet_warnings)
4932 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
4933
4934 /* If a segment was explicitly specified, and the specified segment
4935 is not the default, use an opcode prefix to select it. If we
4936 never figured out what the default segment is, then default_seg
4937 will be zero at this point, and the specified segment prefix will
4938 always be used. */
4939 if ((i.seg[0]) && (i.seg[0] != default_seg))
4940 {
4941 if (!add_prefix (i.seg[0]->seg_prefix))
4942 return 0;
4943 }
4944 return 1;
4945 }
4946
4947 static const seg_entry *
4948 build_modrm_byte (void)
4949 {
4950 const seg_entry *default_seg = 0;
4951 unsigned int source, dest;
4952 int vex_3_sources;
4953
4954 /* The first operand of instructions with VEX prefix and 3 sources
4955 must be VEX_Imm4. */
4956 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
4957 if (vex_3_sources)
4958 {
4959 unsigned int nds, reg_slot;
4960 expressionS *exp;
4961
4962 if (i.tm.opcode_modifier.veximmext
4963 && i.tm.opcode_modifier.immext)
4964 {
4965 dest = i.operands - 2;
4966 gas_assert (dest == 3);
4967 }
4968 else
4969 dest = i.operands - 1;
4970 nds = dest - 1;
4971
4972 /* This instruction must have 4 register operands
4973 or 3 register operands plus 1 memory operand.
4974 It must have VexNDS and VexImmExt. */
4975 gas_assert ((i.reg_operands == 4
4976 || (i.reg_operands == 3 && i.mem_operands == 1))
4977 && i.tm.opcode_modifier.vexvvvv == VEXXDS
4978 && i.tm.opcode_modifier.veximmext
4979 && (operand_type_equal (&i.tm.operand_types[dest], &regxmm)
4980 || operand_type_equal (&i.tm.operand_types[dest], &regymm)));
4981
4982 /* Generate an 8bit immediate operand to encode the register
4983 operand. */
4984 exp = &im_expressions[i.imm_operands++];
4985 i.op[i.operands].imms = exp;
4986 i.types[i.operands] = imm8;
4987 i.operands++;
4988 /* If VexW1 is set, the first operand is the source and
4989 the second operand is encoded in the immediate operand. */
4990 if (i.tm.opcode_modifier.vexw == VEXW1)
4991 {
4992 source = 0;
4993 reg_slot = 1;
4994 }
4995 else
4996 {
4997 source = 1;
4998 reg_slot = 0;
4999 }
5000 gas_assert ((operand_type_equal (&i.tm.operand_types[reg_slot], &regxmm)
5001 || operand_type_equal (&i.tm.operand_types[reg_slot],
5002 &regymm))
5003 && (operand_type_equal (&i.tm.operand_types[nds], &regxmm)
5004 || operand_type_equal (&i.tm.operand_types[nds],
5005 &regymm)));
5006 exp->X_op = O_constant;
5007 exp->X_add_number
5008 = ((i.op[reg_slot].regs->reg_num
5009 + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0)) << 4);
5010 i.vex.register_specifier = i.op[nds].regs;
5011 }
5012 else
5013 source = dest = 0;
5014
5015 /* i.reg_operands MUST be the number of real register operands;
5016 implicit registers do not count. If there are 3 register
5017 operands, it must be a instruction with VexNDS. For a
5018 instruction with VexNDD, the destination register is encoded
5019 in VEX prefix. If there are 4 register operands, it must be
5020 a instruction with VEX prefix and 3 sources. */
5021 if (i.mem_operands == 0
5022 && ((i.reg_operands == 2
5023 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
5024 || (i.reg_operands == 3
5025 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
5026 || (i.reg_operands == 4 && vex_3_sources)))
5027 {
5028 switch (i.operands)
5029 {
5030 case 2:
5031 source = 0;
5032 break;
5033 case 3:
5034 /* When there are 3 operands, one of them may be immediate,
5035 which may be the first or the last operand. Otherwise,
5036 the first operand must be shift count register (cl) or it
5037 is an instruction with VexNDS. */
5038 gas_assert (i.imm_operands == 1
5039 || (i.imm_operands == 0
5040 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
5041 || i.types[0].bitfield.shiftcount)));
5042 if (operand_type_check (i.types[0], imm)
5043 || i.types[0].bitfield.shiftcount)
5044 source = 1;
5045 else
5046 source = 0;
5047 break;
5048 case 4:
5049 /* When there are 4 operands, the first two must be 8bit
5050 immediate operands. The source operand will be the 3rd
5051 one.
5052
5053 For instructions with VexNDS, if the first operand
5054 an imm8, the source operand is the 2nd one. If the last
5055 operand is imm8, the source operand is the first one. */
5056 gas_assert ((i.imm_operands == 2
5057 && i.types[0].bitfield.imm8
5058 && i.types[1].bitfield.imm8)
5059 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
5060 && i.imm_operands == 1
5061 && (i.types[0].bitfield.imm8
5062 || i.types[i.operands - 1].bitfield.imm8)));
5063 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5064 {
5065 if (i.types[0].bitfield.imm8)
5066 source = 1;
5067 else
5068 source = 0;
5069 }
5070 else
5071 source = 2;
5072 break;
5073 case 5:
5074 break;
5075 default:
5076 abort ();
5077 }
5078
5079 if (!vex_3_sources)
5080 {
5081 dest = source + 1;
5082
5083 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5084 {
5085 /* For instructions with VexNDS, the register-only
5086 source operand must be XMM or YMM register. It is
5087 encoded in VEX prefix. We need to clear RegMem bit
5088 before calling operand_type_equal. */
5089 i386_operand_type op = i.tm.operand_types[dest];
5090 op.bitfield.regmem = 0;
5091 if ((dest + 1) >= i.operands
5092 || (!operand_type_equal (&op, &regxmm)
5093 && !operand_type_equal (&op, &regymm)))
5094 abort ();
5095 i.vex.register_specifier = i.op[dest].regs;
5096 dest++;
5097 }
5098 }
5099
5100 i.rm.mode = 3;
5101 /* One of the register operands will be encoded in the i.tm.reg
5102 field, the other in the combined i.tm.mode and i.tm.regmem
5103 fields. If no form of this instruction supports a memory
5104 destination operand, then we assume the source operand may
5105 sometimes be a memory operand and so we need to store the
5106 destination in the i.rm.reg field. */
5107 if (!i.tm.operand_types[dest].bitfield.regmem
5108 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
5109 {
5110 i.rm.reg = i.op[dest].regs->reg_num;
5111 i.rm.regmem = i.op[source].regs->reg_num;
5112 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5113 i.rex |= REX_R;
5114 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5115 i.rex |= REX_B;
5116 }
5117 else
5118 {
5119 i.rm.reg = i.op[source].regs->reg_num;
5120 i.rm.regmem = i.op[dest].regs->reg_num;
5121 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5122 i.rex |= REX_B;
5123 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5124 i.rex |= REX_R;
5125 }
5126 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
5127 {
5128 if (!i.types[0].bitfield.control
5129 && !i.types[1].bitfield.control)
5130 abort ();
5131 i.rex &= ~(REX_R | REX_B);
5132 add_prefix (LOCK_PREFIX_OPCODE);
5133 }
5134 }
5135 else
5136 { /* If it's not 2 reg operands... */
5137 unsigned int mem;
5138
5139 if (i.mem_operands)
5140 {
5141 unsigned int fake_zero_displacement = 0;
5142 unsigned int op;
5143
5144 for (op = 0; op < i.operands; op++)
5145 if (operand_type_check (i.types[op], anymem))
5146 break;
5147 gas_assert (op < i.operands);
5148
5149 default_seg = &ds;
5150
5151 if (i.base_reg == 0)
5152 {
5153 i.rm.mode = 0;
5154 if (!i.disp_operands)
5155 fake_zero_displacement = 1;
5156 if (i.index_reg == 0)
5157 {
5158 /* Operand is just <disp> */
5159 if (flag_code == CODE_64BIT)
5160 {
5161 /* 64bit mode overwrites the 32bit absolute
5162 addressing by RIP relative addressing and
5163 absolute addressing is encoded by one of the
5164 redundant SIB forms. */
5165 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5166 i.sib.base = NO_BASE_REGISTER;
5167 i.sib.index = NO_INDEX_REGISTER;
5168 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
5169 ? disp32s : disp32);
5170 }
5171 else if ((flag_code == CODE_16BIT)
5172 ^ (i.prefix[ADDR_PREFIX] != 0))
5173 {
5174 i.rm.regmem = NO_BASE_REGISTER_16;
5175 i.types[op] = disp16;
5176 }
5177 else
5178 {
5179 i.rm.regmem = NO_BASE_REGISTER;
5180 i.types[op] = disp32;
5181 }
5182 }
5183 else /* !i.base_reg && i.index_reg */
5184 {
5185 if (i.index_reg->reg_num == RegEiz
5186 || i.index_reg->reg_num == RegRiz)
5187 i.sib.index = NO_INDEX_REGISTER;
5188 else
5189 i.sib.index = i.index_reg->reg_num;
5190 i.sib.base = NO_BASE_REGISTER;
5191 i.sib.scale = i.log2_scale_factor;
5192 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5193 i.types[op].bitfield.disp8 = 0;
5194 i.types[op].bitfield.disp16 = 0;
5195 i.types[op].bitfield.disp64 = 0;
5196 if (flag_code != CODE_64BIT)
5197 {
5198 /* Must be 32 bit */
5199 i.types[op].bitfield.disp32 = 1;
5200 i.types[op].bitfield.disp32s = 0;
5201 }
5202 else
5203 {
5204 i.types[op].bitfield.disp32 = 0;
5205 i.types[op].bitfield.disp32s = 1;
5206 }
5207 if ((i.index_reg->reg_flags & RegRex) != 0)
5208 i.rex |= REX_X;
5209 }
5210 }
5211 /* RIP addressing for 64bit mode. */
5212 else if (i.base_reg->reg_num == RegRip ||
5213 i.base_reg->reg_num == RegEip)
5214 {
5215 i.rm.regmem = NO_BASE_REGISTER;
5216 i.types[op].bitfield.disp8 = 0;
5217 i.types[op].bitfield.disp16 = 0;
5218 i.types[op].bitfield.disp32 = 0;
5219 i.types[op].bitfield.disp32s = 1;
5220 i.types[op].bitfield.disp64 = 0;
5221 i.flags[op] |= Operand_PCrel;
5222 if (! i.disp_operands)
5223 fake_zero_displacement = 1;
5224 }
5225 else if (i.base_reg->reg_type.bitfield.reg16)
5226 {
5227 switch (i.base_reg->reg_num)
5228 {
5229 case 3: /* (%bx) */
5230 if (i.index_reg == 0)
5231 i.rm.regmem = 7;
5232 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
5233 i.rm.regmem = i.index_reg->reg_num - 6;
5234 break;
5235 case 5: /* (%bp) */
5236 default_seg = &ss;
5237 if (i.index_reg == 0)
5238 {
5239 i.rm.regmem = 6;
5240 if (operand_type_check (i.types[op], disp) == 0)
5241 {
5242 /* fake (%bp) into 0(%bp) */
5243 i.types[op].bitfield.disp8 = 1;
5244 fake_zero_displacement = 1;
5245 }
5246 }
5247 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
5248 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
5249 break;
5250 default: /* (%si) -> 4 or (%di) -> 5 */
5251 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
5252 }
5253 i.rm.mode = mode_from_disp_size (i.types[op]);
5254 }
5255 else /* i.base_reg and 32/64 bit mode */
5256 {
5257 if (flag_code == CODE_64BIT
5258 && operand_type_check (i.types[op], disp))
5259 {
5260 i386_operand_type temp;
5261 operand_type_set (&temp, 0);
5262 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
5263 i.types[op] = temp;
5264 if (i.prefix[ADDR_PREFIX] == 0)
5265 i.types[op].bitfield.disp32s = 1;
5266 else
5267 i.types[op].bitfield.disp32 = 1;
5268 }
5269
5270 i.rm.regmem = i.base_reg->reg_num;
5271 if ((i.base_reg->reg_flags & RegRex) != 0)
5272 i.rex |= REX_B;
5273 i.sib.base = i.base_reg->reg_num;
5274 /* x86-64 ignores REX prefix bit here to avoid decoder
5275 complications. */
5276 if ((i.base_reg->reg_num & 7) == EBP_REG_NUM)
5277 {
5278 default_seg = &ss;
5279 if (i.disp_operands == 0)
5280 {
5281 fake_zero_displacement = 1;
5282 i.types[op].bitfield.disp8 = 1;
5283 }
5284 }
5285 else if (i.base_reg->reg_num == ESP_REG_NUM)
5286 {
5287 default_seg = &ss;
5288 }
5289 i.sib.scale = i.log2_scale_factor;
5290 if (i.index_reg == 0)
5291 {
5292 /* <disp>(%esp) becomes two byte modrm with no index
5293 register. We've already stored the code for esp
5294 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
5295 Any base register besides %esp will not use the
5296 extra modrm byte. */
5297 i.sib.index = NO_INDEX_REGISTER;
5298 }
5299 else
5300 {
5301 if (i.index_reg->reg_num == RegEiz
5302 || i.index_reg->reg_num == RegRiz)
5303 i.sib.index = NO_INDEX_REGISTER;
5304 else
5305 i.sib.index = i.index_reg->reg_num;
5306 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5307 if ((i.index_reg->reg_flags & RegRex) != 0)
5308 i.rex |= REX_X;
5309 }
5310
5311 if (i.disp_operands
5312 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
5313 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
5314 i.rm.mode = 0;
5315 else
5316 i.rm.mode = mode_from_disp_size (i.types[op]);
5317 }
5318
5319 if (fake_zero_displacement)
5320 {
5321 /* Fakes a zero displacement assuming that i.types[op]
5322 holds the correct displacement size. */
5323 expressionS *exp;
5324
5325 gas_assert (i.op[op].disps == 0);
5326 exp = &disp_expressions[i.disp_operands++];
5327 i.op[op].disps = exp;
5328 exp->X_op = O_constant;
5329 exp->X_add_number = 0;
5330 exp->X_add_symbol = (symbolS *) 0;
5331 exp->X_op_symbol = (symbolS *) 0;
5332 }
5333
5334 mem = op;
5335 }
5336 else
5337 mem = ~0;
5338
5339 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
5340 {
5341 if (operand_type_check (i.types[0], imm))
5342 i.vex.register_specifier = NULL;
5343 else
5344 {
5345 /* VEX.vvvv encodes one of the sources when the first
5346 operand is not an immediate. */
5347 if (i.tm.opcode_modifier.vexw == VEXW0)
5348 i.vex.register_specifier = i.op[0].regs;
5349 else
5350 i.vex.register_specifier = i.op[1].regs;
5351 }
5352
5353 /* Destination is a XMM register encoded in the ModRM.reg
5354 and VEX.R bit. */
5355 i.rm.reg = i.op[2].regs->reg_num;
5356 if ((i.op[2].regs->reg_flags & RegRex) != 0)
5357 i.rex |= REX_R;
5358
5359 /* ModRM.rm and VEX.B encodes the other source. */
5360 if (!i.mem_operands)
5361 {
5362 i.rm.mode = 3;
5363
5364 if (i.tm.opcode_modifier.vexw == VEXW0)
5365 i.rm.regmem = i.op[1].regs->reg_num;
5366 else
5367 i.rm.regmem = i.op[0].regs->reg_num;
5368
5369 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5370 i.rex |= REX_B;
5371 }
5372 }
5373 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
5374 {
5375 i.vex.register_specifier = i.op[2].regs;
5376 if (!i.mem_operands)
5377 {
5378 i.rm.mode = 3;
5379 i.rm.regmem = i.op[1].regs->reg_num;
5380 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5381 i.rex |= REX_B;
5382 }
5383 }
5384 /* Fill in i.rm.reg or i.rm.regmem field with register operand
5385 (if any) based on i.tm.extension_opcode. Again, we must be
5386 careful to make sure that segment/control/debug/test/MMX
5387 registers are coded into the i.rm.reg field. */
5388 else if (i.reg_operands)
5389 {
5390 unsigned int op;
5391 unsigned int vex_reg = ~0;
5392
5393 for (op = 0; op < i.operands; op++)
5394 if (i.types[op].bitfield.reg8
5395 || i.types[op].bitfield.reg16
5396 || i.types[op].bitfield.reg32
5397 || i.types[op].bitfield.reg64
5398 || i.types[op].bitfield.regmmx
5399 || i.types[op].bitfield.regxmm
5400 || i.types[op].bitfield.regymm
5401 || i.types[op].bitfield.sreg2
5402 || i.types[op].bitfield.sreg3
5403 || i.types[op].bitfield.control
5404 || i.types[op].bitfield.debug
5405 || i.types[op].bitfield.test)
5406 break;
5407
5408 if (vex_3_sources)
5409 op = dest;
5410 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5411 {
5412 /* For instructions with VexNDS, the register-only
5413 source operand is encoded in VEX prefix. */
5414 gas_assert (mem != (unsigned int) ~0);
5415
5416 if (op > mem)
5417 {
5418 vex_reg = op++;
5419 gas_assert (op < i.operands);
5420 }
5421 else
5422 {
5423 vex_reg = op + 1;
5424 gas_assert (vex_reg < i.operands);
5425 }
5426 }
5427 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
5428 {
5429 /* For instructions with VexNDD, there should be
5430 no memory operand and the register destination
5431 is encoded in VEX prefix. */
5432 gas_assert (i.mem_operands == 0
5433 && (op + 2) == i.operands);
5434 vex_reg = op + 1;
5435 }
5436 else
5437 gas_assert (op < i.operands);
5438
5439 if (vex_reg != (unsigned int) ~0)
5440 {
5441 gas_assert (i.reg_operands == 2);
5442
5443 if (!operand_type_equal (&i.tm.operand_types[vex_reg],
5444 &regxmm)
5445 && !operand_type_equal (&i.tm.operand_types[vex_reg],
5446 &regymm))
5447 abort ();
5448
5449 i.vex.register_specifier = i.op[vex_reg].regs;
5450 }
5451
5452 /* Don't set OP operand twice. */
5453 if (vex_reg != op)
5454 {
5455 /* If there is an extension opcode to put here, the
5456 register number must be put into the regmem field. */
5457 if (i.tm.extension_opcode != None)
5458 {
5459 i.rm.regmem = i.op[op].regs->reg_num;
5460 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5461 i.rex |= REX_B;
5462 }
5463 else
5464 {
5465 i.rm.reg = i.op[op].regs->reg_num;
5466 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5467 i.rex |= REX_R;
5468 }
5469 }
5470
5471 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
5472 must set it to 3 to indicate this is a register operand
5473 in the regmem field. */
5474 if (!i.mem_operands)
5475 i.rm.mode = 3;
5476 }
5477
5478 /* Fill in i.rm.reg field with extension opcode (if any). */
5479 if (i.tm.extension_opcode != None)
5480 i.rm.reg = i.tm.extension_opcode;
5481 }
5482 return default_seg;
5483 }
5484
5485 static void
5486 output_branch (void)
5487 {
5488 char *p;
5489 int code16;
5490 int prefix;
5491 relax_substateT subtype;
5492 symbolS *sym;
5493 offsetT off;
5494
5495 code16 = 0;
5496 if (flag_code == CODE_16BIT)
5497 code16 = CODE16;
5498
5499 prefix = 0;
5500 if (i.prefix[DATA_PREFIX] != 0)
5501 {
5502 prefix = 1;
5503 i.prefixes -= 1;
5504 code16 ^= CODE16;
5505 }
5506 /* Pentium4 branch hints. */
5507 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
5508 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
5509 {
5510 prefix++;
5511 i.prefixes--;
5512 }
5513 if (i.prefix[REX_PREFIX] != 0)
5514 {
5515 prefix++;
5516 i.prefixes--;
5517 }
5518
5519 if (i.prefixes != 0 && !intel_syntax)
5520 as_warn (_("skipping prefixes on this instruction"));
5521
5522 /* It's always a symbol; End frag & setup for relax.
5523 Make sure there is enough room in this frag for the largest
5524 instruction we may generate in md_convert_frag. This is 2
5525 bytes for the opcode and room for the prefix and largest
5526 displacement. */
5527 frag_grow (prefix + 2 + 4);
5528 /* Prefix and 1 opcode byte go in fr_fix. */
5529 p = frag_more (prefix + 1);
5530 if (i.prefix[DATA_PREFIX] != 0)
5531 *p++ = DATA_PREFIX_OPCODE;
5532 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
5533 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
5534 *p++ = i.prefix[SEG_PREFIX];
5535 if (i.prefix[REX_PREFIX] != 0)
5536 *p++ = i.prefix[REX_PREFIX];
5537 *p = i.tm.base_opcode;
5538
5539 if ((unsigned char) *p == JUMP_PC_RELATIVE)
5540 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, SMALL);
5541 else if (cpu_arch_flags.bitfield.cpui386)
5542 subtype = ENCODE_RELAX_STATE (COND_JUMP, SMALL);
5543 else
5544 subtype = ENCODE_RELAX_STATE (COND_JUMP86, SMALL);
5545 subtype |= code16;
5546
5547 sym = i.op[0].disps->X_add_symbol;
5548 off = i.op[0].disps->X_add_number;
5549
5550 if (i.op[0].disps->X_op != O_constant
5551 && i.op[0].disps->X_op != O_symbol)
5552 {
5553 /* Handle complex expressions. */
5554 sym = make_expr_symbol (i.op[0].disps);
5555 off = 0;
5556 }
5557
5558 /* 1 possible extra opcode + 4 byte displacement go in var part.
5559 Pass reloc in fr_var. */
5560 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
5561 }
5562
5563 static void
5564 output_jump (void)
5565 {
5566 char *p;
5567 int size;
5568 fixS *fixP;
5569
5570 if (i.tm.opcode_modifier.jumpbyte)
5571 {
5572 /* This is a loop or jecxz type instruction. */
5573 size = 1;
5574 if (i.prefix[ADDR_PREFIX] != 0)
5575 {
5576 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
5577 i.prefixes -= 1;
5578 }
5579 /* Pentium4 branch hints. */
5580 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
5581 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
5582 {
5583 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
5584 i.prefixes--;
5585 }
5586 }
5587 else
5588 {
5589 int code16;
5590
5591 code16 = 0;
5592 if (flag_code == CODE_16BIT)
5593 code16 = CODE16;
5594
5595 if (i.prefix[DATA_PREFIX] != 0)
5596 {
5597 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
5598 i.prefixes -= 1;
5599 code16 ^= CODE16;
5600 }
5601
5602 size = 4;
5603 if (code16)
5604 size = 2;
5605 }
5606
5607 if (i.prefix[REX_PREFIX] != 0)
5608 {
5609 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
5610 i.prefixes -= 1;
5611 }
5612
5613 if (i.prefixes != 0 && !intel_syntax)
5614 as_warn (_("skipping prefixes on this instruction"));
5615
5616 p = frag_more (1 + size);
5617 *p++ = i.tm.base_opcode;
5618
5619 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
5620 i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
5621
5622 /* All jumps handled here are signed, but don't use a signed limit
5623 check for 32 and 16 bit jumps as we want to allow wrap around at
5624 4G and 64k respectively. */
5625 if (size == 1)
5626 fixP->fx_signed = 1;
5627 }
5628
5629 static void
5630 output_interseg_jump (void)
5631 {
5632 char *p;
5633 int size;
5634 int prefix;
5635 int code16;
5636
5637 code16 = 0;
5638 if (flag_code == CODE_16BIT)
5639 code16 = CODE16;
5640
5641 prefix = 0;
5642 if (i.prefix[DATA_PREFIX] != 0)
5643 {
5644 prefix = 1;
5645 i.prefixes -= 1;
5646 code16 ^= CODE16;
5647 }
5648 if (i.prefix[REX_PREFIX] != 0)
5649 {
5650 prefix++;
5651 i.prefixes -= 1;
5652 }
5653
5654 size = 4;
5655 if (code16)
5656 size = 2;
5657
5658 if (i.prefixes != 0 && !intel_syntax)
5659 as_warn (_("skipping prefixes on this instruction"));
5660
5661 /* 1 opcode; 2 segment; offset */
5662 p = frag_more (prefix + 1 + 2 + size);
5663
5664 if (i.prefix[DATA_PREFIX] != 0)
5665 *p++ = DATA_PREFIX_OPCODE;
5666
5667 if (i.prefix[REX_PREFIX] != 0)
5668 *p++ = i.prefix[REX_PREFIX];
5669
5670 *p++ = i.tm.base_opcode;
5671 if (i.op[1].imms->X_op == O_constant)
5672 {
5673 offsetT n = i.op[1].imms->X_add_number;
5674
5675 if (size == 2
5676 && !fits_in_unsigned_word (n)
5677 && !fits_in_signed_word (n))
5678 {
5679 as_bad (_("16-bit jump out of range"));
5680 return;
5681 }
5682 md_number_to_chars (p, n, size);
5683 }
5684 else
5685 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
5686 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
5687 if (i.op[0].imms->X_op != O_constant)
5688 as_bad (_("can't handle non absolute segment in `%s'"),
5689 i.tm.name);
5690 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
5691 }
5692
5693 static void
5694 output_insn (void)
5695 {
5696 fragS *insn_start_frag;
5697 offsetT insn_start_off;
5698
5699 /* Tie dwarf2 debug info to the address at the start of the insn.
5700 We can't do this after the insn has been output as the current
5701 frag may have been closed off. eg. by frag_var. */
5702 dwarf2_emit_insn (0);
5703
5704 insn_start_frag = frag_now;
5705 insn_start_off = frag_now_fix ();
5706
5707 /* Output jumps. */
5708 if (i.tm.opcode_modifier.jump)
5709 output_branch ();
5710 else if (i.tm.opcode_modifier.jumpbyte
5711 || i.tm.opcode_modifier.jumpdword)
5712 output_jump ();
5713 else if (i.tm.opcode_modifier.jumpintersegment)
5714 output_interseg_jump ();
5715 else
5716 {
5717 /* Output normal instructions here. */
5718 char *p;
5719 unsigned char *q;
5720 unsigned int j;
5721 unsigned int prefix;
5722
5723 /* Since the VEX prefix contains the implicit prefix, we don't
5724 need the explicit prefix. */
5725 if (!i.tm.opcode_modifier.vex)
5726 {
5727 switch (i.tm.opcode_length)
5728 {
5729 case 3:
5730 if (i.tm.base_opcode & 0xff000000)
5731 {
5732 prefix = (i.tm.base_opcode >> 24) & 0xff;
5733 goto check_prefix;
5734 }
5735 break;
5736 case 2:
5737 if ((i.tm.base_opcode & 0xff0000) != 0)
5738 {
5739 prefix = (i.tm.base_opcode >> 16) & 0xff;
5740 if (i.tm.cpu_flags.bitfield.cpupadlock)
5741 {
5742 check_prefix:
5743 if (prefix != REPE_PREFIX_OPCODE
5744 || (i.prefix[REP_PREFIX]
5745 != REPE_PREFIX_OPCODE))
5746 add_prefix (prefix);
5747 }
5748 else
5749 add_prefix (prefix);
5750 }
5751 break;
5752 case 1:
5753 break;
5754 default:
5755 abort ();
5756 }
5757
5758 /* The prefix bytes. */
5759 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
5760 if (*q)
5761 FRAG_APPEND_1_CHAR (*q);
5762 }
5763
5764 if (i.tm.opcode_modifier.vex)
5765 {
5766 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
5767 if (*q)
5768 switch (j)
5769 {
5770 case REX_PREFIX:
5771 /* REX byte is encoded in VEX prefix. */
5772 break;
5773 case SEG_PREFIX:
5774 case ADDR_PREFIX:
5775 FRAG_APPEND_1_CHAR (*q);
5776 break;
5777 default:
5778 /* There should be no other prefixes for instructions
5779 with VEX prefix. */
5780 abort ();
5781 }
5782
5783 /* Now the VEX prefix. */
5784 p = frag_more (i.vex.length);
5785 for (j = 0; j < i.vex.length; j++)
5786 p[j] = i.vex.bytes[j];
5787 }
5788
5789 /* Now the opcode; be careful about word order here! */
5790 if (i.tm.opcode_length == 1)
5791 {
5792 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
5793 }
5794 else
5795 {
5796 switch (i.tm.opcode_length)
5797 {
5798 case 3:
5799 p = frag_more (3);
5800 *p++ = (i.tm.base_opcode >> 16) & 0xff;
5801 break;
5802 case 2:
5803 p = frag_more (2);
5804 break;
5805 default:
5806 abort ();
5807 break;
5808 }
5809
5810 /* Put out high byte first: can't use md_number_to_chars! */
5811 *p++ = (i.tm.base_opcode >> 8) & 0xff;
5812 *p = i.tm.base_opcode & 0xff;
5813 }
5814
5815 /* Now the modrm byte and sib byte (if present). */
5816 if (i.tm.opcode_modifier.modrm)
5817 {
5818 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
5819 | i.rm.reg << 3
5820 | i.rm.mode << 6));
5821 /* If i.rm.regmem == ESP (4)
5822 && i.rm.mode != (Register mode)
5823 && not 16 bit
5824 ==> need second modrm byte. */
5825 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
5826 && i.rm.mode != 3
5827 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
5828 FRAG_APPEND_1_CHAR ((i.sib.base << 0
5829 | i.sib.index << 3
5830 | i.sib.scale << 6));
5831 }
5832
5833 if (i.disp_operands)
5834 output_disp (insn_start_frag, insn_start_off);
5835
5836 if (i.imm_operands)
5837 output_imm (insn_start_frag, insn_start_off);
5838 }
5839
5840 #ifdef DEBUG386
5841 if (flag_debug)
5842 {
5843 pi ("" /*line*/, &i);
5844 }
5845 #endif /* DEBUG386 */
5846 }
5847
5848 /* Return the size of the displacement operand N. */
5849
5850 static int
5851 disp_size (unsigned int n)
5852 {
5853 int size = 4;
5854 if (i.types[n].bitfield.disp64)
5855 size = 8;
5856 else if (i.types[n].bitfield.disp8)
5857 size = 1;
5858 else if (i.types[n].bitfield.disp16)
5859 size = 2;
5860 return size;
5861 }
5862
5863 /* Return the size of the immediate operand N. */
5864
5865 static int
5866 imm_size (unsigned int n)
5867 {
5868 int size = 4;
5869 if (i.types[n].bitfield.imm64)
5870 size = 8;
5871 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
5872 size = 1;
5873 else if (i.types[n].bitfield.imm16)
5874 size = 2;
5875 return size;
5876 }
5877
5878 static void
5879 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
5880 {
5881 char *p;
5882 unsigned int n;
5883
5884 for (n = 0; n < i.operands; n++)
5885 {
5886 if (operand_type_check (i.types[n], disp))
5887 {
5888 if (i.op[n].disps->X_op == O_constant)
5889 {
5890 int size = disp_size (n);
5891 offsetT val;
5892
5893 val = offset_in_range (i.op[n].disps->X_add_number,
5894 size);
5895 p = frag_more (size);
5896 md_number_to_chars (p, val, size);
5897 }
5898 else
5899 {
5900 enum bfd_reloc_code_real reloc_type;
5901 int size = disp_size (n);
5902 int sign = i.types[n].bitfield.disp32s;
5903 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
5904
5905 /* We can't have 8 bit displacement here. */
5906 gas_assert (!i.types[n].bitfield.disp8);
5907
5908 /* The PC relative address is computed relative
5909 to the instruction boundary, so in case immediate
5910 fields follows, we need to adjust the value. */
5911 if (pcrel && i.imm_operands)
5912 {
5913 unsigned int n1;
5914 int sz = 0;
5915
5916 for (n1 = 0; n1 < i.operands; n1++)
5917 if (operand_type_check (i.types[n1], imm))
5918 {
5919 /* Only one immediate is allowed for PC
5920 relative address. */
5921 gas_assert (sz == 0);
5922 sz = imm_size (n1);
5923 i.op[n].disps->X_add_number -= sz;
5924 }
5925 /* We should find the immediate. */
5926 gas_assert (sz != 0);
5927 }
5928
5929 p = frag_more (size);
5930 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
5931 if (GOT_symbol
5932 && GOT_symbol == i.op[n].disps->X_add_symbol
5933 && (((reloc_type == BFD_RELOC_32
5934 || reloc_type == BFD_RELOC_X86_64_32S
5935 || (reloc_type == BFD_RELOC_64
5936 && object_64bit))
5937 && (i.op[n].disps->X_op == O_symbol
5938 || (i.op[n].disps->X_op == O_add
5939 && ((symbol_get_value_expression
5940 (i.op[n].disps->X_op_symbol)->X_op)
5941 == O_subtract))))
5942 || reloc_type == BFD_RELOC_32_PCREL))
5943 {
5944 offsetT add;
5945
5946 if (insn_start_frag == frag_now)
5947 add = (p - frag_now->fr_literal) - insn_start_off;
5948 else
5949 {
5950 fragS *fr;
5951
5952 add = insn_start_frag->fr_fix - insn_start_off;
5953 for (fr = insn_start_frag->fr_next;
5954 fr && fr != frag_now; fr = fr->fr_next)
5955 add += fr->fr_fix;
5956 add += p - frag_now->fr_literal;
5957 }
5958
5959 if (!object_64bit)
5960 {
5961 reloc_type = BFD_RELOC_386_GOTPC;
5962 i.op[n].imms->X_add_number += add;
5963 }
5964 else if (reloc_type == BFD_RELOC_64)
5965 reloc_type = BFD_RELOC_X86_64_GOTPC64;
5966 else
5967 /* Don't do the adjustment for x86-64, as there
5968 the pcrel addressing is relative to the _next_
5969 insn, and that is taken care of in other code. */
5970 reloc_type = BFD_RELOC_X86_64_GOTPC32;
5971 }
5972 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
5973 i.op[n].disps, pcrel, reloc_type);
5974 }
5975 }
5976 }
5977 }
5978
5979 static void
5980 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
5981 {
5982 char *p;
5983 unsigned int n;
5984
5985 for (n = 0; n < i.operands; n++)
5986 {
5987 if (operand_type_check (i.types[n], imm))
5988 {
5989 if (i.op[n].imms->X_op == O_constant)
5990 {
5991 int size = imm_size (n);
5992 offsetT val;
5993
5994 val = offset_in_range (i.op[n].imms->X_add_number,
5995 size);
5996 p = frag_more (size);
5997 md_number_to_chars (p, val, size);
5998 }
5999 else
6000 {
6001 /* Not absolute_section.
6002 Need a 32-bit fixup (don't support 8bit
6003 non-absolute imms). Try to support other
6004 sizes ... */
6005 enum bfd_reloc_code_real reloc_type;
6006 int size = imm_size (n);
6007 int sign;
6008
6009 if (i.types[n].bitfield.imm32s
6010 && (i.suffix == QWORD_MNEM_SUFFIX
6011 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
6012 sign = 1;
6013 else
6014 sign = 0;
6015
6016 p = frag_more (size);
6017 reloc_type = reloc (size, 0, sign, i.reloc[n]);
6018
6019 /* This is tough to explain. We end up with this one if we
6020 * have operands that look like
6021 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
6022 * obtain the absolute address of the GOT, and it is strongly
6023 * preferable from a performance point of view to avoid using
6024 * a runtime relocation for this. The actual sequence of
6025 * instructions often look something like:
6026 *
6027 * call .L66
6028 * .L66:
6029 * popl %ebx
6030 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
6031 *
6032 * The call and pop essentially return the absolute address
6033 * of the label .L66 and store it in %ebx. The linker itself
6034 * will ultimately change the first operand of the addl so
6035 * that %ebx points to the GOT, but to keep things simple, the
6036 * .o file must have this operand set so that it generates not
6037 * the absolute address of .L66, but the absolute address of
6038 * itself. This allows the linker itself simply treat a GOTPC
6039 * relocation as asking for a pcrel offset to the GOT to be
6040 * added in, and the addend of the relocation is stored in the
6041 * operand field for the instruction itself.
6042 *
6043 * Our job here is to fix the operand so that it would add
6044 * the correct offset so that %ebx would point to itself. The
6045 * thing that is tricky is that .-.L66 will point to the
6046 * beginning of the instruction, so we need to further modify
6047 * the operand so that it will point to itself. There are
6048 * other cases where you have something like:
6049 *
6050 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
6051 *
6052 * and here no correction would be required. Internally in
6053 * the assembler we treat operands of this form as not being
6054 * pcrel since the '.' is explicitly mentioned, and I wonder
6055 * whether it would simplify matters to do it this way. Who
6056 * knows. In earlier versions of the PIC patches, the
6057 * pcrel_adjust field was used to store the correction, but
6058 * since the expression is not pcrel, I felt it would be
6059 * confusing to do it this way. */
6060
6061 if ((reloc_type == BFD_RELOC_32
6062 || reloc_type == BFD_RELOC_X86_64_32S
6063 || reloc_type == BFD_RELOC_64)
6064 && GOT_symbol
6065 && GOT_symbol == i.op[n].imms->X_add_symbol
6066 && (i.op[n].imms->X_op == O_symbol
6067 || (i.op[n].imms->X_op == O_add
6068 && ((symbol_get_value_expression
6069 (i.op[n].imms->X_op_symbol)->X_op)
6070 == O_subtract))))
6071 {
6072 offsetT add;
6073
6074 if (insn_start_frag == frag_now)
6075 add = (p - frag_now->fr_literal) - insn_start_off;
6076 else
6077 {
6078 fragS *fr;
6079
6080 add = insn_start_frag->fr_fix - insn_start_off;
6081 for (fr = insn_start_frag->fr_next;
6082 fr && fr != frag_now; fr = fr->fr_next)
6083 add += fr->fr_fix;
6084 add += p - frag_now->fr_literal;
6085 }
6086
6087 if (!object_64bit)
6088 reloc_type = BFD_RELOC_386_GOTPC;
6089 else if (size == 4)
6090 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6091 else if (size == 8)
6092 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6093 i.op[n].imms->X_add_number += add;
6094 }
6095 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6096 i.op[n].imms, 0, reloc_type);
6097 }
6098 }
6099 }
6100 }
6101 \f
6102 /* x86_cons_fix_new is called via the expression parsing code when a
6103 reloc is needed. We use this hook to get the correct .got reloc. */
6104 static enum bfd_reloc_code_real got_reloc = NO_RELOC;
6105 static int cons_sign = -1;
6106
6107 void
6108 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
6109 expressionS *exp)
6110 {
6111 enum bfd_reloc_code_real r = reloc (len, 0, cons_sign, got_reloc);
6112
6113 got_reloc = NO_RELOC;
6114
6115 #ifdef TE_PE
6116 if (exp->X_op == O_secrel)
6117 {
6118 exp->X_op = O_symbol;
6119 r = BFD_RELOC_32_SECREL;
6120 }
6121 #endif
6122
6123 fix_new_exp (frag, off, len, exp, 0, r);
6124 }
6125
6126 #if (!defined (OBJ_ELF) && !defined (OBJ_MAYBE_ELF)) || defined (LEX_AT)
6127 # define lex_got(reloc, adjust, types) NULL
6128 #else
6129 /* Parse operands of the form
6130 <symbol>@GOTOFF+<nnn>
6131 and similar .plt or .got references.
6132
6133 If we find one, set up the correct relocation in RELOC and copy the
6134 input string, minus the `@GOTOFF' into a malloc'd buffer for
6135 parsing by the calling routine. Return this buffer, and if ADJUST
6136 is non-null set it to the length of the string we removed from the
6137 input line. Otherwise return NULL. */
6138 static char *
6139 lex_got (enum bfd_reloc_code_real *rel,
6140 int *adjust,
6141 i386_operand_type *types)
6142 {
6143 /* Some of the relocations depend on the size of what field is to
6144 be relocated. But in our callers i386_immediate and i386_displacement
6145 we don't yet know the operand size (this will be set by insn
6146 matching). Hence we record the word32 relocation here,
6147 and adjust the reloc according to the real size in reloc(). */
6148 static const struct {
6149 const char *str;
6150 const enum bfd_reloc_code_real rel[2];
6151 const i386_operand_type types64;
6152 } gotrel[] = {
6153 { "PLTOFF", { _dummy_first_bfd_reloc_code_real,
6154 BFD_RELOC_X86_64_PLTOFF64 },
6155 OPERAND_TYPE_IMM64 },
6156 { "PLT", { BFD_RELOC_386_PLT32,
6157 BFD_RELOC_X86_64_PLT32 },
6158 OPERAND_TYPE_IMM32_32S_DISP32 },
6159 { "GOTPLT", { _dummy_first_bfd_reloc_code_real,
6160 BFD_RELOC_X86_64_GOTPLT64 },
6161 OPERAND_TYPE_IMM64_DISP64 },
6162 { "GOTOFF", { BFD_RELOC_386_GOTOFF,
6163 BFD_RELOC_X86_64_GOTOFF64 },
6164 OPERAND_TYPE_IMM64_DISP64 },
6165 { "GOTPCREL", { _dummy_first_bfd_reloc_code_real,
6166 BFD_RELOC_X86_64_GOTPCREL },
6167 OPERAND_TYPE_IMM32_32S_DISP32 },
6168 { "TLSGD", { BFD_RELOC_386_TLS_GD,
6169 BFD_RELOC_X86_64_TLSGD },
6170 OPERAND_TYPE_IMM32_32S_DISP32 },
6171 { "TLSLDM", { BFD_RELOC_386_TLS_LDM,
6172 _dummy_first_bfd_reloc_code_real },
6173 OPERAND_TYPE_NONE },
6174 { "TLSLD", { _dummy_first_bfd_reloc_code_real,
6175 BFD_RELOC_X86_64_TLSLD },
6176 OPERAND_TYPE_IMM32_32S_DISP32 },
6177 { "GOTTPOFF", { BFD_RELOC_386_TLS_IE_32,
6178 BFD_RELOC_X86_64_GOTTPOFF },
6179 OPERAND_TYPE_IMM32_32S_DISP32 },
6180 { "TPOFF", { BFD_RELOC_386_TLS_LE_32,
6181 BFD_RELOC_X86_64_TPOFF32 },
6182 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6183 { "NTPOFF", { BFD_RELOC_386_TLS_LE,
6184 _dummy_first_bfd_reloc_code_real },
6185 OPERAND_TYPE_NONE },
6186 { "DTPOFF", { BFD_RELOC_386_TLS_LDO_32,
6187 BFD_RELOC_X86_64_DTPOFF32 },
6188
6189 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6190 { "GOTNTPOFF",{ BFD_RELOC_386_TLS_GOTIE,
6191 _dummy_first_bfd_reloc_code_real },
6192 OPERAND_TYPE_NONE },
6193 { "INDNTPOFF",{ BFD_RELOC_386_TLS_IE,
6194 _dummy_first_bfd_reloc_code_real },
6195 OPERAND_TYPE_NONE },
6196 { "GOT", { BFD_RELOC_386_GOT32,
6197 BFD_RELOC_X86_64_GOT32 },
6198 OPERAND_TYPE_IMM32_32S_64_DISP32 },
6199 { "TLSDESC", { BFD_RELOC_386_TLS_GOTDESC,
6200 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
6201 OPERAND_TYPE_IMM32_32S_DISP32 },
6202 { "TLSCALL", { BFD_RELOC_386_TLS_DESC_CALL,
6203 BFD_RELOC_X86_64_TLSDESC_CALL },
6204 OPERAND_TYPE_IMM32_32S_DISP32 },
6205 };
6206 char *cp;
6207 unsigned int j;
6208
6209 if (!IS_ELF)
6210 return NULL;
6211
6212 for (cp = input_line_pointer; *cp != '@'; cp++)
6213 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6214 return NULL;
6215
6216 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6217 {
6218 int len;
6219
6220 len = strlen (gotrel[j].str);
6221 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6222 {
6223 if (gotrel[j].rel[object_64bit] != 0)
6224 {
6225 int first, second;
6226 char *tmpbuf, *past_reloc;
6227
6228 *rel = gotrel[j].rel[object_64bit];
6229 if (adjust)
6230 *adjust = len;
6231
6232 if (types)
6233 {
6234 if (flag_code != CODE_64BIT)
6235 {
6236 types->bitfield.imm32 = 1;
6237 types->bitfield.disp32 = 1;
6238 }
6239 else
6240 *types = gotrel[j].types64;
6241 }
6242
6243 if (GOT_symbol == NULL)
6244 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
6245
6246 /* The length of the first part of our input line. */
6247 first = cp - input_line_pointer;
6248
6249 /* The second part goes from after the reloc token until
6250 (and including) an end_of_line char or comma. */
6251 past_reloc = cp + 1 + len;
6252 cp = past_reloc;
6253 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6254 ++cp;
6255 second = cp + 1 - past_reloc;
6256
6257 /* Allocate and copy string. The trailing NUL shouldn't
6258 be necessary, but be safe. */
6259 tmpbuf = (char *) xmalloc (first + second + 2);
6260 memcpy (tmpbuf, input_line_pointer, first);
6261 if (second != 0 && *past_reloc != ' ')
6262 /* Replace the relocation token with ' ', so that
6263 errors like foo@GOTOFF1 will be detected. */
6264 tmpbuf[first++] = ' ';
6265 memcpy (tmpbuf + first, past_reloc, second);
6266 tmpbuf[first + second] = '\0';
6267 return tmpbuf;
6268 }
6269
6270 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6271 gotrel[j].str, 1 << (5 + object_64bit));
6272 return NULL;
6273 }
6274 }
6275
6276 /* Might be a symbol version string. Don't as_bad here. */
6277 return NULL;
6278 }
6279
6280 void
6281 x86_cons (expressionS *exp, int size)
6282 {
6283 intel_syntax = -intel_syntax;
6284
6285 if (size == 4 || (object_64bit && size == 8))
6286 {
6287 /* Handle @GOTOFF and the like in an expression. */
6288 char *save;
6289 char *gotfree_input_line;
6290 int adjust;
6291
6292 save = input_line_pointer;
6293 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
6294 if (gotfree_input_line)
6295 input_line_pointer = gotfree_input_line;
6296
6297 expression (exp);
6298
6299 if (gotfree_input_line)
6300 {
6301 /* expression () has merrily parsed up to the end of line,
6302 or a comma - in the wrong buffer. Transfer how far
6303 input_line_pointer has moved to the right buffer. */
6304 input_line_pointer = (save
6305 + (input_line_pointer - gotfree_input_line)
6306 + adjust);
6307 free (gotfree_input_line);
6308 if (exp->X_op == O_constant
6309 || exp->X_op == O_absent
6310 || exp->X_op == O_illegal
6311 || exp->X_op == O_register
6312 || exp->X_op == O_big)
6313 {
6314 char c = *input_line_pointer;
6315 *input_line_pointer = 0;
6316 as_bad (_("missing or invalid expression `%s'"), save);
6317 *input_line_pointer = c;
6318 }
6319 }
6320 }
6321 else
6322 expression (exp);
6323
6324 intel_syntax = -intel_syntax;
6325
6326 if (intel_syntax)
6327 i386_intel_simplify (exp);
6328 }
6329 #endif
6330
6331 static void
6332 signed_cons (int size)
6333 {
6334 if (flag_code == CODE_64BIT)
6335 cons_sign = 1;
6336 cons (size);
6337 cons_sign = -1;
6338 }
6339
6340 #ifdef TE_PE
6341 static void
6342 pe_directive_secrel (dummy)
6343 int dummy ATTRIBUTE_UNUSED;
6344 {
6345 expressionS exp;
6346
6347 do
6348 {
6349 expression (&exp);
6350 if (exp.X_op == O_symbol)
6351 exp.X_op = O_secrel;
6352
6353 emit_expr (&exp, 4);
6354 }
6355 while (*input_line_pointer++ == ',');
6356
6357 input_line_pointer--;
6358 demand_empty_rest_of_line ();
6359 }
6360 #endif
6361
6362 static int
6363 i386_immediate (char *imm_start)
6364 {
6365 char *save_input_line_pointer;
6366 char *gotfree_input_line;
6367 segT exp_seg = 0;
6368 expressionS *exp;
6369 i386_operand_type types;
6370
6371 operand_type_set (&types, ~0);
6372
6373 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
6374 {
6375 as_bad (_("at most %d immediate operands are allowed"),
6376 MAX_IMMEDIATE_OPERANDS);
6377 return 0;
6378 }
6379
6380 exp = &im_expressions[i.imm_operands++];
6381 i.op[this_operand].imms = exp;
6382
6383 if (is_space_char (*imm_start))
6384 ++imm_start;
6385
6386 save_input_line_pointer = input_line_pointer;
6387 input_line_pointer = imm_start;
6388
6389 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
6390 if (gotfree_input_line)
6391 input_line_pointer = gotfree_input_line;
6392
6393 exp_seg = expression (exp);
6394
6395 SKIP_WHITESPACE ();
6396 if (*input_line_pointer)
6397 as_bad (_("junk `%s' after expression"), input_line_pointer);
6398
6399 input_line_pointer = save_input_line_pointer;
6400 if (gotfree_input_line)
6401 {
6402 free (gotfree_input_line);
6403
6404 if (exp->X_op == O_constant || exp->X_op == O_register)
6405 exp->X_op = O_illegal;
6406 }
6407
6408 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
6409 }
6410
6411 static int
6412 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
6413 i386_operand_type types, const char *imm_start)
6414 {
6415 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
6416 {
6417 if (imm_start)
6418 as_bad (_("missing or invalid immediate expression `%s'"),
6419 imm_start);
6420 return 0;
6421 }
6422 else if (exp->X_op == O_constant)
6423 {
6424 /* Size it properly later. */
6425 i.types[this_operand].bitfield.imm64 = 1;
6426 /* If BFD64, sign extend val. */
6427 if (!use_rela_relocations
6428 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
6429 exp->X_add_number
6430 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
6431 }
6432 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
6433 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
6434 && exp_seg != absolute_section
6435 && exp_seg != text_section
6436 && exp_seg != data_section
6437 && exp_seg != bss_section
6438 && exp_seg != undefined_section
6439 && !bfd_is_com_section (exp_seg))
6440 {
6441 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
6442 return 0;
6443 }
6444 #endif
6445 else if (!intel_syntax && exp->X_op == O_register)
6446 {
6447 if (imm_start)
6448 as_bad (_("illegal immediate register operand %s"), imm_start);
6449 return 0;
6450 }
6451 else
6452 {
6453 /* This is an address. The size of the address will be
6454 determined later, depending on destination register,
6455 suffix, or the default for the section. */
6456 i.types[this_operand].bitfield.imm8 = 1;
6457 i.types[this_operand].bitfield.imm16 = 1;
6458 i.types[this_operand].bitfield.imm32 = 1;
6459 i.types[this_operand].bitfield.imm32s = 1;
6460 i.types[this_operand].bitfield.imm64 = 1;
6461 i.types[this_operand] = operand_type_and (i.types[this_operand],
6462 types);
6463 }
6464
6465 return 1;
6466 }
6467
6468 static char *
6469 i386_scale (char *scale)
6470 {
6471 offsetT val;
6472 char *save = input_line_pointer;
6473
6474 input_line_pointer = scale;
6475 val = get_absolute_expression ();
6476
6477 switch (val)
6478 {
6479 case 1:
6480 i.log2_scale_factor = 0;
6481 break;
6482 case 2:
6483 i.log2_scale_factor = 1;
6484 break;
6485 case 4:
6486 i.log2_scale_factor = 2;
6487 break;
6488 case 8:
6489 i.log2_scale_factor = 3;
6490 break;
6491 default:
6492 {
6493 char sep = *input_line_pointer;
6494
6495 *input_line_pointer = '\0';
6496 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
6497 scale);
6498 *input_line_pointer = sep;
6499 input_line_pointer = save;
6500 return NULL;
6501 }
6502 }
6503 if (i.log2_scale_factor != 0 && i.index_reg == 0)
6504 {
6505 as_warn (_("scale factor of %d without an index register"),
6506 1 << i.log2_scale_factor);
6507 i.log2_scale_factor = 0;
6508 }
6509 scale = input_line_pointer;
6510 input_line_pointer = save;
6511 return scale;
6512 }
6513
6514 static int
6515 i386_displacement (char *disp_start, char *disp_end)
6516 {
6517 expressionS *exp;
6518 segT exp_seg = 0;
6519 char *save_input_line_pointer;
6520 char *gotfree_input_line;
6521 int override;
6522 i386_operand_type bigdisp, types = anydisp;
6523 int ret;
6524
6525 if (i.disp_operands == MAX_MEMORY_OPERANDS)
6526 {
6527 as_bad (_("at most %d displacement operands are allowed"),
6528 MAX_MEMORY_OPERANDS);
6529 return 0;
6530 }
6531
6532 operand_type_set (&bigdisp, 0);
6533 if ((i.types[this_operand].bitfield.jumpabsolute)
6534 || (!current_templates->start->opcode_modifier.jump
6535 && !current_templates->start->opcode_modifier.jumpdword))
6536 {
6537 bigdisp.bitfield.disp32 = 1;
6538 override = (i.prefix[ADDR_PREFIX] != 0);
6539 if (flag_code == CODE_64BIT)
6540 {
6541 if (!override)
6542 {
6543 bigdisp.bitfield.disp32s = 1;
6544 bigdisp.bitfield.disp64 = 1;
6545 }
6546 }
6547 else if ((flag_code == CODE_16BIT) ^ override)
6548 {
6549 bigdisp.bitfield.disp32 = 0;
6550 bigdisp.bitfield.disp16 = 1;
6551 }
6552 }
6553 else
6554 {
6555 /* For PC-relative branches, the width of the displacement
6556 is dependent upon data size, not address size. */
6557 override = (i.prefix[DATA_PREFIX] != 0);
6558 if (flag_code == CODE_64BIT)
6559 {
6560 if (override || i.suffix == WORD_MNEM_SUFFIX)
6561 bigdisp.bitfield.disp16 = 1;
6562 else
6563 {
6564 bigdisp.bitfield.disp32 = 1;
6565 bigdisp.bitfield.disp32s = 1;
6566 }
6567 }
6568 else
6569 {
6570 if (!override)
6571 override = (i.suffix == (flag_code != CODE_16BIT
6572 ? WORD_MNEM_SUFFIX
6573 : LONG_MNEM_SUFFIX));
6574 bigdisp.bitfield.disp32 = 1;
6575 if ((flag_code == CODE_16BIT) ^ override)
6576 {
6577 bigdisp.bitfield.disp32 = 0;
6578 bigdisp.bitfield.disp16 = 1;
6579 }
6580 }
6581 }
6582 i.types[this_operand] = operand_type_or (i.types[this_operand],
6583 bigdisp);
6584
6585 exp = &disp_expressions[i.disp_operands];
6586 i.op[this_operand].disps = exp;
6587 i.disp_operands++;
6588 save_input_line_pointer = input_line_pointer;
6589 input_line_pointer = disp_start;
6590 END_STRING_AND_SAVE (disp_end);
6591
6592 #ifndef GCC_ASM_O_HACK
6593 #define GCC_ASM_O_HACK 0
6594 #endif
6595 #if GCC_ASM_O_HACK
6596 END_STRING_AND_SAVE (disp_end + 1);
6597 if (i.types[this_operand].bitfield.baseIndex
6598 && displacement_string_end[-1] == '+')
6599 {
6600 /* This hack is to avoid a warning when using the "o"
6601 constraint within gcc asm statements.
6602 For instance:
6603
6604 #define _set_tssldt_desc(n,addr,limit,type) \
6605 __asm__ __volatile__ ( \
6606 "movw %w2,%0\n\t" \
6607 "movw %w1,2+%0\n\t" \
6608 "rorl $16,%1\n\t" \
6609 "movb %b1,4+%0\n\t" \
6610 "movb %4,5+%0\n\t" \
6611 "movb $0,6+%0\n\t" \
6612 "movb %h1,7+%0\n\t" \
6613 "rorl $16,%1" \
6614 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
6615
6616 This works great except that the output assembler ends
6617 up looking a bit weird if it turns out that there is
6618 no offset. You end up producing code that looks like:
6619
6620 #APP
6621 movw $235,(%eax)
6622 movw %dx,2+(%eax)
6623 rorl $16,%edx
6624 movb %dl,4+(%eax)
6625 movb $137,5+(%eax)
6626 movb $0,6+(%eax)
6627 movb %dh,7+(%eax)
6628 rorl $16,%edx
6629 #NO_APP
6630
6631 So here we provide the missing zero. */
6632
6633 *displacement_string_end = '0';
6634 }
6635 #endif
6636 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
6637 if (gotfree_input_line)
6638 input_line_pointer = gotfree_input_line;
6639
6640 exp_seg = expression (exp);
6641
6642 SKIP_WHITESPACE ();
6643 if (*input_line_pointer)
6644 as_bad (_("junk `%s' after expression"), input_line_pointer);
6645 #if GCC_ASM_O_HACK
6646 RESTORE_END_STRING (disp_end + 1);
6647 #endif
6648 input_line_pointer = save_input_line_pointer;
6649 if (gotfree_input_line)
6650 {
6651 free (gotfree_input_line);
6652
6653 if (exp->X_op == O_constant || exp->X_op == O_register)
6654 exp->X_op = O_illegal;
6655 }
6656
6657 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
6658
6659 RESTORE_END_STRING (disp_end);
6660
6661 return ret;
6662 }
6663
6664 static int
6665 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
6666 i386_operand_type types, const char *disp_start)
6667 {
6668 i386_operand_type bigdisp;
6669 int ret = 1;
6670
6671 /* We do this to make sure that the section symbol is in
6672 the symbol table. We will ultimately change the relocation
6673 to be relative to the beginning of the section. */
6674 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
6675 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
6676 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
6677 {
6678 if (exp->X_op != O_symbol)
6679 goto inv_disp;
6680
6681 if (S_IS_LOCAL (exp->X_add_symbol)
6682 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section)
6683 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
6684 exp->X_op = O_subtract;
6685 exp->X_op_symbol = GOT_symbol;
6686 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
6687 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
6688 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
6689 i.reloc[this_operand] = BFD_RELOC_64;
6690 else
6691 i.reloc[this_operand] = BFD_RELOC_32;
6692 }
6693
6694 else if (exp->X_op == O_absent
6695 || exp->X_op == O_illegal
6696 || exp->X_op == O_big)
6697 {
6698 inv_disp:
6699 as_bad (_("missing or invalid displacement expression `%s'"),
6700 disp_start);
6701 ret = 0;
6702 }
6703
6704 else if (flag_code == CODE_64BIT
6705 && !i.prefix[ADDR_PREFIX]
6706 && exp->X_op == O_constant)
6707 {
6708 /* Since displacement is signed extended to 64bit, don't allow
6709 disp32 and turn off disp32s if they are out of range. */
6710 i.types[this_operand].bitfield.disp32 = 0;
6711 if (!fits_in_signed_long (exp->X_add_number))
6712 {
6713 i.types[this_operand].bitfield.disp32s = 0;
6714 if (i.types[this_operand].bitfield.baseindex)
6715 {
6716 as_bad (_("0x%lx out range of signed 32bit displacement"),
6717 (long) exp->X_add_number);
6718 ret = 0;
6719 }
6720 }
6721 }
6722
6723 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
6724 else if (exp->X_op != O_constant
6725 && OUTPUT_FLAVOR == bfd_target_aout_flavour
6726 && exp_seg != absolute_section
6727 && exp_seg != text_section
6728 && exp_seg != data_section
6729 && exp_seg != bss_section
6730 && exp_seg != undefined_section
6731 && !bfd_is_com_section (exp_seg))
6732 {
6733 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
6734 ret = 0;
6735 }
6736 #endif
6737
6738 /* Check if this is a displacement only operand. */
6739 bigdisp = i.types[this_operand];
6740 bigdisp.bitfield.disp8 = 0;
6741 bigdisp.bitfield.disp16 = 0;
6742 bigdisp.bitfield.disp32 = 0;
6743 bigdisp.bitfield.disp32s = 0;
6744 bigdisp.bitfield.disp64 = 0;
6745 if (operand_type_all_zero (&bigdisp))
6746 i.types[this_operand] = operand_type_and (i.types[this_operand],
6747 types);
6748
6749 return ret;
6750 }
6751
6752 /* Make sure the memory operand we've been dealt is valid.
6753 Return 1 on success, 0 on a failure. */
6754
6755 static int
6756 i386_index_check (const char *operand_string)
6757 {
6758 int ok;
6759 const char *kind = "base/index";
6760 #if INFER_ADDR_PREFIX
6761 int fudged = 0;
6762
6763 tryprefix:
6764 #endif
6765 ok = 1;
6766 if (current_templates->start->opcode_modifier.isstring
6767 && !current_templates->start->opcode_modifier.immext
6768 && (current_templates->end[-1].opcode_modifier.isstring
6769 || i.mem_operands))
6770 {
6771 /* Memory operands of string insns are special in that they only allow
6772 a single register (rDI, rSI, or rBX) as their memory address. */
6773 unsigned int expected;
6774
6775 kind = "string address";
6776
6777 if (current_templates->start->opcode_modifier.w)
6778 {
6779 i386_operand_type type = current_templates->end[-1].operand_types[0];
6780
6781 if (!type.bitfield.baseindex
6782 || ((!i.mem_operands != !intel_syntax)
6783 && current_templates->end[-1].operand_types[1]
6784 .bitfield.baseindex))
6785 type = current_templates->end[-1].operand_types[1];
6786 expected = type.bitfield.esseg ? 7 /* rDI */ : 6 /* rSI */;
6787 }
6788 else
6789 expected = 3 /* rBX */;
6790
6791 if (!i.base_reg || i.index_reg
6792 || operand_type_check (i.types[this_operand], disp))
6793 ok = -1;
6794 else if (!(flag_code == CODE_64BIT
6795 ? i.prefix[ADDR_PREFIX]
6796 ? i.base_reg->reg_type.bitfield.reg32
6797 : i.base_reg->reg_type.bitfield.reg64
6798 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
6799 ? i.base_reg->reg_type.bitfield.reg32
6800 : i.base_reg->reg_type.bitfield.reg16))
6801 ok = 0;
6802 else if (i.base_reg->reg_num != expected)
6803 ok = -1;
6804
6805 if (ok < 0)
6806 {
6807 unsigned int j;
6808
6809 for (j = 0; j < i386_regtab_size; ++j)
6810 if ((flag_code == CODE_64BIT
6811 ? i.prefix[ADDR_PREFIX]
6812 ? i386_regtab[j].reg_type.bitfield.reg32
6813 : i386_regtab[j].reg_type.bitfield.reg64
6814 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
6815 ? i386_regtab[j].reg_type.bitfield.reg32
6816 : i386_regtab[j].reg_type.bitfield.reg16)
6817 && i386_regtab[j].reg_num == expected)
6818 break;
6819 gas_assert (j < i386_regtab_size);
6820 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
6821 operand_string,
6822 intel_syntax ? '[' : '(',
6823 register_prefix,
6824 i386_regtab[j].reg_name,
6825 intel_syntax ? ']' : ')');
6826 ok = 1;
6827 }
6828 }
6829 else if (flag_code == CODE_64BIT)
6830 {
6831 if ((i.base_reg
6832 && ((i.prefix[ADDR_PREFIX] == 0
6833 && !i.base_reg->reg_type.bitfield.reg64)
6834 || (i.prefix[ADDR_PREFIX]
6835 && !i.base_reg->reg_type.bitfield.reg32))
6836 && (i.index_reg
6837 || i.base_reg->reg_num !=
6838 (i.prefix[ADDR_PREFIX] == 0 ? RegRip : RegEip)))
6839 || (i.index_reg
6840 && (!i.index_reg->reg_type.bitfield.baseindex
6841 || (i.prefix[ADDR_PREFIX] == 0
6842 && i.index_reg->reg_num != RegRiz
6843 && !i.index_reg->reg_type.bitfield.reg64
6844 )
6845 || (i.prefix[ADDR_PREFIX]
6846 && i.index_reg->reg_num != RegEiz
6847 && !i.index_reg->reg_type.bitfield.reg32))))
6848 ok = 0;
6849 }
6850 else
6851 {
6852 if ((flag_code == CODE_16BIT) ^ (i.prefix[ADDR_PREFIX] != 0))
6853 {
6854 /* 16bit checks. */
6855 if ((i.base_reg
6856 && (!i.base_reg->reg_type.bitfield.reg16
6857 || !i.base_reg->reg_type.bitfield.baseindex))
6858 || (i.index_reg
6859 && (!i.index_reg->reg_type.bitfield.reg16
6860 || !i.index_reg->reg_type.bitfield.baseindex
6861 || !(i.base_reg
6862 && i.base_reg->reg_num < 6
6863 && i.index_reg->reg_num >= 6
6864 && i.log2_scale_factor == 0))))
6865 ok = 0;
6866 }
6867 else
6868 {
6869 /* 32bit checks. */
6870 if ((i.base_reg
6871 && !i.base_reg->reg_type.bitfield.reg32)
6872 || (i.index_reg
6873 && ((!i.index_reg->reg_type.bitfield.reg32
6874 && i.index_reg->reg_num != RegEiz)
6875 || !i.index_reg->reg_type.bitfield.baseindex)))
6876 ok = 0;
6877 }
6878 }
6879 if (!ok)
6880 {
6881 #if INFER_ADDR_PREFIX
6882 if (!i.mem_operands && !i.prefix[ADDR_PREFIX])
6883 {
6884 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
6885 i.prefixes += 1;
6886 /* Change the size of any displacement too. At most one of
6887 Disp16 or Disp32 is set.
6888 FIXME. There doesn't seem to be any real need for separate
6889 Disp16 and Disp32 flags. The same goes for Imm16 and Imm32.
6890 Removing them would probably clean up the code quite a lot. */
6891 if (flag_code != CODE_64BIT
6892 && (i.types[this_operand].bitfield.disp16
6893 || i.types[this_operand].bitfield.disp32))
6894 i.types[this_operand]
6895 = operand_type_xor (i.types[this_operand], disp16_32);
6896 fudged = 1;
6897 goto tryprefix;
6898 }
6899 if (fudged)
6900 as_bad (_("`%s' is not a valid %s expression"),
6901 operand_string,
6902 kind);
6903 else
6904 #endif
6905 as_bad (_("`%s' is not a valid %s-bit %s expression"),
6906 operand_string,
6907 flag_code_names[i.prefix[ADDR_PREFIX]
6908 ? flag_code == CODE_32BIT
6909 ? CODE_16BIT
6910 : CODE_32BIT
6911 : flag_code],
6912 kind);
6913 }
6914 return ok;
6915 }
6916
6917 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
6918 on error. */
6919
6920 static int
6921 i386_att_operand (char *operand_string)
6922 {
6923 const reg_entry *r;
6924 char *end_op;
6925 char *op_string = operand_string;
6926
6927 if (is_space_char (*op_string))
6928 ++op_string;
6929
6930 /* We check for an absolute prefix (differentiating,
6931 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
6932 if (*op_string == ABSOLUTE_PREFIX)
6933 {
6934 ++op_string;
6935 if (is_space_char (*op_string))
6936 ++op_string;
6937 i.types[this_operand].bitfield.jumpabsolute = 1;
6938 }
6939
6940 /* Check if operand is a register. */
6941 if ((r = parse_register (op_string, &end_op)) != NULL)
6942 {
6943 i386_operand_type temp;
6944
6945 /* Check for a segment override by searching for ':' after a
6946 segment register. */
6947 op_string = end_op;
6948 if (is_space_char (*op_string))
6949 ++op_string;
6950 if (*op_string == ':'
6951 && (r->reg_type.bitfield.sreg2
6952 || r->reg_type.bitfield.sreg3))
6953 {
6954 switch (r->reg_num)
6955 {
6956 case 0:
6957 i.seg[i.mem_operands] = &es;
6958 break;
6959 case 1:
6960 i.seg[i.mem_operands] = &cs;
6961 break;
6962 case 2:
6963 i.seg[i.mem_operands] = &ss;
6964 break;
6965 case 3:
6966 i.seg[i.mem_operands] = &ds;
6967 break;
6968 case 4:
6969 i.seg[i.mem_operands] = &fs;
6970 break;
6971 case 5:
6972 i.seg[i.mem_operands] = &gs;
6973 break;
6974 }
6975
6976 /* Skip the ':' and whitespace. */
6977 ++op_string;
6978 if (is_space_char (*op_string))
6979 ++op_string;
6980
6981 if (!is_digit_char (*op_string)
6982 && !is_identifier_char (*op_string)
6983 && *op_string != '('
6984 && *op_string != ABSOLUTE_PREFIX)
6985 {
6986 as_bad (_("bad memory operand `%s'"), op_string);
6987 return 0;
6988 }
6989 /* Handle case of %es:*foo. */
6990 if (*op_string == ABSOLUTE_PREFIX)
6991 {
6992 ++op_string;
6993 if (is_space_char (*op_string))
6994 ++op_string;
6995 i.types[this_operand].bitfield.jumpabsolute = 1;
6996 }
6997 goto do_memory_reference;
6998 }
6999 if (*op_string)
7000 {
7001 as_bad (_("junk `%s' after register"), op_string);
7002 return 0;
7003 }
7004 temp = r->reg_type;
7005 temp.bitfield.baseindex = 0;
7006 i.types[this_operand] = operand_type_or (i.types[this_operand],
7007 temp);
7008 i.types[this_operand].bitfield.unspecified = 0;
7009 i.op[this_operand].regs = r;
7010 i.reg_operands++;
7011 }
7012 else if (*op_string == REGISTER_PREFIX)
7013 {
7014 as_bad (_("bad register name `%s'"), op_string);
7015 return 0;
7016 }
7017 else if (*op_string == IMMEDIATE_PREFIX)
7018 {
7019 ++op_string;
7020 if (i.types[this_operand].bitfield.jumpabsolute)
7021 {
7022 as_bad (_("immediate operand illegal with absolute jump"));
7023 return 0;
7024 }
7025 if (!i386_immediate (op_string))
7026 return 0;
7027 }
7028 else if (is_digit_char (*op_string)
7029 || is_identifier_char (*op_string)
7030 || *op_string == '(')
7031 {
7032 /* This is a memory reference of some sort. */
7033 char *base_string;
7034
7035 /* Start and end of displacement string expression (if found). */
7036 char *displacement_string_start;
7037 char *displacement_string_end;
7038
7039 do_memory_reference:
7040 if ((i.mem_operands == 1
7041 && !current_templates->start->opcode_modifier.isstring)
7042 || i.mem_operands == 2)
7043 {
7044 as_bad (_("too many memory references for `%s'"),
7045 current_templates->start->name);
7046 return 0;
7047 }
7048
7049 /* Check for base index form. We detect the base index form by
7050 looking for an ')' at the end of the operand, searching
7051 for the '(' matching it, and finding a REGISTER_PREFIX or ','
7052 after the '('. */
7053 base_string = op_string + strlen (op_string);
7054
7055 --base_string;
7056 if (is_space_char (*base_string))
7057 --base_string;
7058
7059 /* If we only have a displacement, set-up for it to be parsed later. */
7060 displacement_string_start = op_string;
7061 displacement_string_end = base_string + 1;
7062
7063 if (*base_string == ')')
7064 {
7065 char *temp_string;
7066 unsigned int parens_balanced = 1;
7067 /* We've already checked that the number of left & right ()'s are
7068 equal, so this loop will not be infinite. */
7069 do
7070 {
7071 base_string--;
7072 if (*base_string == ')')
7073 parens_balanced++;
7074 if (*base_string == '(')
7075 parens_balanced--;
7076 }
7077 while (parens_balanced);
7078
7079 temp_string = base_string;
7080
7081 /* Skip past '(' and whitespace. */
7082 ++base_string;
7083 if (is_space_char (*base_string))
7084 ++base_string;
7085
7086 if (*base_string == ','
7087 || ((i.base_reg = parse_register (base_string, &end_op))
7088 != NULL))
7089 {
7090 displacement_string_end = temp_string;
7091
7092 i.types[this_operand].bitfield.baseindex = 1;
7093
7094 if (i.base_reg)
7095 {
7096 base_string = end_op;
7097 if (is_space_char (*base_string))
7098 ++base_string;
7099 }
7100
7101 /* There may be an index reg or scale factor here. */
7102 if (*base_string == ',')
7103 {
7104 ++base_string;
7105 if (is_space_char (*base_string))
7106 ++base_string;
7107
7108 if ((i.index_reg = parse_register (base_string, &end_op))
7109 != NULL)
7110 {
7111 base_string = end_op;
7112 if (is_space_char (*base_string))
7113 ++base_string;
7114 if (*base_string == ',')
7115 {
7116 ++base_string;
7117 if (is_space_char (*base_string))
7118 ++base_string;
7119 }
7120 else if (*base_string != ')')
7121 {
7122 as_bad (_("expecting `,' or `)' "
7123 "after index register in `%s'"),
7124 operand_string);
7125 return 0;
7126 }
7127 }
7128 else if (*base_string == REGISTER_PREFIX)
7129 {
7130 as_bad (_("bad register name `%s'"), base_string);
7131 return 0;
7132 }
7133
7134 /* Check for scale factor. */
7135 if (*base_string != ')')
7136 {
7137 char *end_scale = i386_scale (base_string);
7138
7139 if (!end_scale)
7140 return 0;
7141
7142 base_string = end_scale;
7143 if (is_space_char (*base_string))
7144 ++base_string;
7145 if (*base_string != ')')
7146 {
7147 as_bad (_("expecting `)' "
7148 "after scale factor in `%s'"),
7149 operand_string);
7150 return 0;
7151 }
7152 }
7153 else if (!i.index_reg)
7154 {
7155 as_bad (_("expecting index register or scale factor "
7156 "after `,'; got '%c'"),
7157 *base_string);
7158 return 0;
7159 }
7160 }
7161 else if (*base_string != ')')
7162 {
7163 as_bad (_("expecting `,' or `)' "
7164 "after base register in `%s'"),
7165 operand_string);
7166 return 0;
7167 }
7168 }
7169 else if (*base_string == REGISTER_PREFIX)
7170 {
7171 as_bad (_("bad register name `%s'"), base_string);
7172 return 0;
7173 }
7174 }
7175
7176 /* If there's an expression beginning the operand, parse it,
7177 assuming displacement_string_start and
7178 displacement_string_end are meaningful. */
7179 if (displacement_string_start != displacement_string_end)
7180 {
7181 if (!i386_displacement (displacement_string_start,
7182 displacement_string_end))
7183 return 0;
7184 }
7185
7186 /* Special case for (%dx) while doing input/output op. */
7187 if (i.base_reg
7188 && operand_type_equal (&i.base_reg->reg_type,
7189 &reg16_inoutportreg)
7190 && i.index_reg == 0
7191 && i.log2_scale_factor == 0
7192 && i.seg[i.mem_operands] == 0
7193 && !operand_type_check (i.types[this_operand], disp))
7194 {
7195 i.types[this_operand] = inoutportreg;
7196 return 1;
7197 }
7198
7199 if (i386_index_check (operand_string) == 0)
7200 return 0;
7201 i.types[this_operand].bitfield.mem = 1;
7202 i.mem_operands++;
7203 }
7204 else
7205 {
7206 /* It's not a memory operand; argh! */
7207 as_bad (_("invalid char %s beginning operand %d `%s'"),
7208 output_invalid (*op_string),
7209 this_operand + 1,
7210 op_string);
7211 return 0;
7212 }
7213 return 1; /* Normal return. */
7214 }
7215 \f
7216 /* md_estimate_size_before_relax()
7217
7218 Called just before relax() for rs_machine_dependent frags. The x86
7219 assembler uses these frags to handle variable size jump
7220 instructions.
7221
7222 Any symbol that is now undefined will not become defined.
7223 Return the correct fr_subtype in the frag.
7224 Return the initial "guess for variable size of frag" to caller.
7225 The guess is actually the growth beyond the fixed part. Whatever
7226 we do to grow the fixed or variable part contributes to our
7227 returned value. */
7228
7229 int
7230 md_estimate_size_before_relax (fragP, segment)
7231 fragS *fragP;
7232 segT segment;
7233 {
7234 /* We've already got fragP->fr_subtype right; all we have to do is
7235 check for un-relaxable symbols. On an ELF system, we can't relax
7236 an externally visible symbol, because it may be overridden by a
7237 shared library. */
7238 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
7239 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7240 || (IS_ELF
7241 && (S_IS_EXTERNAL (fragP->fr_symbol)
7242 || S_IS_WEAK (fragP->fr_symbol)
7243 || ((symbol_get_bfdsym (fragP->fr_symbol)->flags
7244 & BSF_GNU_INDIRECT_FUNCTION))))
7245 #endif
7246 #if defined (OBJ_COFF) && defined (TE_PE)
7247 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
7248 && S_IS_WEAK (fragP->fr_symbol))
7249 #endif
7250 )
7251 {
7252 /* Symbol is undefined in this segment, or we need to keep a
7253 reloc so that weak symbols can be overridden. */
7254 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
7255 enum bfd_reloc_code_real reloc_type;
7256 unsigned char *opcode;
7257 int old_fr_fix;
7258
7259 if (fragP->fr_var != NO_RELOC)
7260 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
7261 else if (size == 2)
7262 reloc_type = BFD_RELOC_16_PCREL;
7263 else
7264 reloc_type = BFD_RELOC_32_PCREL;
7265
7266 old_fr_fix = fragP->fr_fix;
7267 opcode = (unsigned char *) fragP->fr_opcode;
7268
7269 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
7270 {
7271 case UNCOND_JUMP:
7272 /* Make jmp (0xeb) a (d)word displacement jump. */
7273 opcode[0] = 0xe9;
7274 fragP->fr_fix += size;
7275 fix_new (fragP, old_fr_fix, size,
7276 fragP->fr_symbol,
7277 fragP->fr_offset, 1,
7278 reloc_type);
7279 break;
7280
7281 case COND_JUMP86:
7282 if (size == 2
7283 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
7284 {
7285 /* Negate the condition, and branch past an
7286 unconditional jump. */
7287 opcode[0] ^= 1;
7288 opcode[1] = 3;
7289 /* Insert an unconditional jump. */
7290 opcode[2] = 0xe9;
7291 /* We added two extra opcode bytes, and have a two byte
7292 offset. */
7293 fragP->fr_fix += 2 + 2;
7294 fix_new (fragP, old_fr_fix + 2, 2,
7295 fragP->fr_symbol,
7296 fragP->fr_offset, 1,
7297 reloc_type);
7298 break;
7299 }
7300 /* Fall through. */
7301
7302 case COND_JUMP:
7303 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
7304 {
7305 fixS *fixP;
7306
7307 fragP->fr_fix += 1;
7308 fixP = fix_new (fragP, old_fr_fix, 1,
7309 fragP->fr_symbol,
7310 fragP->fr_offset, 1,
7311 BFD_RELOC_8_PCREL);
7312 fixP->fx_signed = 1;
7313 break;
7314 }
7315
7316 /* This changes the byte-displacement jump 0x7N
7317 to the (d)word-displacement jump 0x0f,0x8N. */
7318 opcode[1] = opcode[0] + 0x10;
7319 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7320 /* We've added an opcode byte. */
7321 fragP->fr_fix += 1 + size;
7322 fix_new (fragP, old_fr_fix + 1, size,
7323 fragP->fr_symbol,
7324 fragP->fr_offset, 1,
7325 reloc_type);
7326 break;
7327
7328 default:
7329 BAD_CASE (fragP->fr_subtype);
7330 break;
7331 }
7332 frag_wane (fragP);
7333 return fragP->fr_fix - old_fr_fix;
7334 }
7335
7336 /* Guess size depending on current relax state. Initially the relax
7337 state will correspond to a short jump and we return 1, because
7338 the variable part of the frag (the branch offset) is one byte
7339 long. However, we can relax a section more than once and in that
7340 case we must either set fr_subtype back to the unrelaxed state,
7341 or return the value for the appropriate branch. */
7342 return md_relax_table[fragP->fr_subtype].rlx_length;
7343 }
7344
7345 /* Called after relax() is finished.
7346
7347 In: Address of frag.
7348 fr_type == rs_machine_dependent.
7349 fr_subtype is what the address relaxed to.
7350
7351 Out: Any fixSs and constants are set up.
7352 Caller will turn frag into a ".space 0". */
7353
7354 void
7355 md_convert_frag (abfd, sec, fragP)
7356 bfd *abfd ATTRIBUTE_UNUSED;
7357 segT sec ATTRIBUTE_UNUSED;
7358 fragS *fragP;
7359 {
7360 unsigned char *opcode;
7361 unsigned char *where_to_put_displacement = NULL;
7362 offsetT target_address;
7363 offsetT opcode_address;
7364 unsigned int extension = 0;
7365 offsetT displacement_from_opcode_start;
7366
7367 opcode = (unsigned char *) fragP->fr_opcode;
7368
7369 /* Address we want to reach in file space. */
7370 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
7371
7372 /* Address opcode resides at in file space. */
7373 opcode_address = fragP->fr_address + fragP->fr_fix;
7374
7375 /* Displacement from opcode start to fill into instruction. */
7376 displacement_from_opcode_start = target_address - opcode_address;
7377
7378 if ((fragP->fr_subtype & BIG) == 0)
7379 {
7380 /* Don't have to change opcode. */
7381 extension = 1; /* 1 opcode + 1 displacement */
7382 where_to_put_displacement = &opcode[1];
7383 }
7384 else
7385 {
7386 if (no_cond_jump_promotion
7387 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
7388 as_warn_where (fragP->fr_file, fragP->fr_line,
7389 _("long jump required"));
7390
7391 switch (fragP->fr_subtype)
7392 {
7393 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
7394 extension = 4; /* 1 opcode + 4 displacement */
7395 opcode[0] = 0xe9;
7396 where_to_put_displacement = &opcode[1];
7397 break;
7398
7399 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
7400 extension = 2; /* 1 opcode + 2 displacement */
7401 opcode[0] = 0xe9;
7402 where_to_put_displacement = &opcode[1];
7403 break;
7404
7405 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
7406 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
7407 extension = 5; /* 2 opcode + 4 displacement */
7408 opcode[1] = opcode[0] + 0x10;
7409 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7410 where_to_put_displacement = &opcode[2];
7411 break;
7412
7413 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
7414 extension = 3; /* 2 opcode + 2 displacement */
7415 opcode[1] = opcode[0] + 0x10;
7416 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7417 where_to_put_displacement = &opcode[2];
7418 break;
7419
7420 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
7421 extension = 4;
7422 opcode[0] ^= 1;
7423 opcode[1] = 3;
7424 opcode[2] = 0xe9;
7425 where_to_put_displacement = &opcode[3];
7426 break;
7427
7428 default:
7429 BAD_CASE (fragP->fr_subtype);
7430 break;
7431 }
7432 }
7433
7434 /* If size if less then four we are sure that the operand fits,
7435 but if it's 4, then it could be that the displacement is larger
7436 then -/+ 2GB. */
7437 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
7438 && object_64bit
7439 && ((addressT) (displacement_from_opcode_start - extension
7440 + ((addressT) 1 << 31))
7441 > (((addressT) 2 << 31) - 1)))
7442 {
7443 as_bad_where (fragP->fr_file, fragP->fr_line,
7444 _("jump target out of range"));
7445 /* Make us emit 0. */
7446 displacement_from_opcode_start = extension;
7447 }
7448 /* Now put displacement after opcode. */
7449 md_number_to_chars ((char *) where_to_put_displacement,
7450 (valueT) (displacement_from_opcode_start - extension),
7451 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
7452 fragP->fr_fix += extension;
7453 }
7454 \f
7455 /* Apply a fixup (fixS) to segment data, once it has been determined
7456 by our caller that we have all the info we need to fix it up.
7457
7458 On the 386, immediates, displacements, and data pointers are all in
7459 the same (little-endian) format, so we don't need to care about which
7460 we are handling. */
7461
7462 void
7463 md_apply_fix (fixP, valP, seg)
7464 /* The fix we're to put in. */
7465 fixS *fixP;
7466 /* Pointer to the value of the bits. */
7467 valueT *valP;
7468 /* Segment fix is from. */
7469 segT seg ATTRIBUTE_UNUSED;
7470 {
7471 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
7472 valueT value = *valP;
7473
7474 #if !defined (TE_Mach)
7475 if (fixP->fx_pcrel)
7476 {
7477 switch (fixP->fx_r_type)
7478 {
7479 default:
7480 break;
7481
7482 case BFD_RELOC_64:
7483 fixP->fx_r_type = BFD_RELOC_64_PCREL;
7484 break;
7485 case BFD_RELOC_32:
7486 case BFD_RELOC_X86_64_32S:
7487 fixP->fx_r_type = BFD_RELOC_32_PCREL;
7488 break;
7489 case BFD_RELOC_16:
7490 fixP->fx_r_type = BFD_RELOC_16_PCREL;
7491 break;
7492 case BFD_RELOC_8:
7493 fixP->fx_r_type = BFD_RELOC_8_PCREL;
7494 break;
7495 }
7496 }
7497
7498 if (fixP->fx_addsy != NULL
7499 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
7500 || fixP->fx_r_type == BFD_RELOC_64_PCREL
7501 || fixP->fx_r_type == BFD_RELOC_16_PCREL
7502 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
7503 && !use_rela_relocations)
7504 {
7505 /* This is a hack. There should be a better way to handle this.
7506 This covers for the fact that bfd_install_relocation will
7507 subtract the current location (for partial_inplace, PC relative
7508 relocations); see more below. */
7509 #ifndef OBJ_AOUT
7510 if (IS_ELF
7511 #ifdef TE_PE
7512 || OUTPUT_FLAVOR == bfd_target_coff_flavour
7513 #endif
7514 )
7515 value += fixP->fx_where + fixP->fx_frag->fr_address;
7516 #endif
7517 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7518 if (IS_ELF)
7519 {
7520 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
7521
7522 if ((sym_seg == seg
7523 || (symbol_section_p (fixP->fx_addsy)
7524 && sym_seg != absolute_section))
7525 && !generic_force_reloc (fixP))
7526 {
7527 /* Yes, we add the values in twice. This is because
7528 bfd_install_relocation subtracts them out again. I think
7529 bfd_install_relocation is broken, but I don't dare change
7530 it. FIXME. */
7531 value += fixP->fx_where + fixP->fx_frag->fr_address;
7532 }
7533 }
7534 #endif
7535 #if defined (OBJ_COFF) && defined (TE_PE)
7536 /* For some reason, the PE format does not store a
7537 section address offset for a PC relative symbol. */
7538 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
7539 || S_IS_WEAK (fixP->fx_addsy))
7540 value += md_pcrel_from (fixP);
7541 #endif
7542 }
7543 #if defined (OBJ_COFF) && defined (TE_PE)
7544 if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
7545 {
7546 value -= S_GET_VALUE (fixP->fx_addsy);
7547 }
7548 #endif
7549
7550 /* Fix a few things - the dynamic linker expects certain values here,
7551 and we must not disappoint it. */
7552 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7553 if (IS_ELF && fixP->fx_addsy)
7554 switch (fixP->fx_r_type)
7555 {
7556 case BFD_RELOC_386_PLT32:
7557 case BFD_RELOC_X86_64_PLT32:
7558 /* Make the jump instruction point to the address of the operand. At
7559 runtime we merely add the offset to the actual PLT entry. */
7560 value = -4;
7561 break;
7562
7563 case BFD_RELOC_386_TLS_GD:
7564 case BFD_RELOC_386_TLS_LDM:
7565 case BFD_RELOC_386_TLS_IE_32:
7566 case BFD_RELOC_386_TLS_IE:
7567 case BFD_RELOC_386_TLS_GOTIE:
7568 case BFD_RELOC_386_TLS_GOTDESC:
7569 case BFD_RELOC_X86_64_TLSGD:
7570 case BFD_RELOC_X86_64_TLSLD:
7571 case BFD_RELOC_X86_64_GOTTPOFF:
7572 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
7573 value = 0; /* Fully resolved at runtime. No addend. */
7574 /* Fallthrough */
7575 case BFD_RELOC_386_TLS_LE:
7576 case BFD_RELOC_386_TLS_LDO_32:
7577 case BFD_RELOC_386_TLS_LE_32:
7578 case BFD_RELOC_X86_64_DTPOFF32:
7579 case BFD_RELOC_X86_64_DTPOFF64:
7580 case BFD_RELOC_X86_64_TPOFF32:
7581 case BFD_RELOC_X86_64_TPOFF64:
7582 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7583 break;
7584
7585 case BFD_RELOC_386_TLS_DESC_CALL:
7586 case BFD_RELOC_X86_64_TLSDESC_CALL:
7587 value = 0; /* Fully resolved at runtime. No addend. */
7588 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7589 fixP->fx_done = 0;
7590 return;
7591
7592 case BFD_RELOC_386_GOT32:
7593 case BFD_RELOC_X86_64_GOT32:
7594 value = 0; /* Fully resolved at runtime. No addend. */
7595 break;
7596
7597 case BFD_RELOC_VTABLE_INHERIT:
7598 case BFD_RELOC_VTABLE_ENTRY:
7599 fixP->fx_done = 0;
7600 return;
7601
7602 default:
7603 break;
7604 }
7605 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
7606 *valP = value;
7607 #endif /* !defined (TE_Mach) */
7608
7609 /* Are we finished with this relocation now? */
7610 if (fixP->fx_addsy == NULL)
7611 fixP->fx_done = 1;
7612 #if defined (OBJ_COFF) && defined (TE_PE)
7613 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
7614 {
7615 fixP->fx_done = 0;
7616 /* Remember value for tc_gen_reloc. */
7617 fixP->fx_addnumber = value;
7618 /* Clear out the frag for now. */
7619 value = 0;
7620 }
7621 #endif
7622 else if (use_rela_relocations)
7623 {
7624 fixP->fx_no_overflow = 1;
7625 /* Remember value for tc_gen_reloc. */
7626 fixP->fx_addnumber = value;
7627 value = 0;
7628 }
7629
7630 md_number_to_chars (p, value, fixP->fx_size);
7631 }
7632 \f
7633 char *
7634 md_atof (int type, char *litP, int *sizeP)
7635 {
7636 /* This outputs the LITTLENUMs in REVERSE order;
7637 in accord with the bigendian 386. */
7638 return ieee_md_atof (type, litP, sizeP, FALSE);
7639 }
7640 \f
7641 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
7642
7643 static char *
7644 output_invalid (int c)
7645 {
7646 if (ISPRINT (c))
7647 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
7648 "'%c'", c);
7649 else
7650 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
7651 "(0x%x)", (unsigned char) c);
7652 return output_invalid_buf;
7653 }
7654
7655 /* REG_STRING starts *before* REGISTER_PREFIX. */
7656
7657 static const reg_entry *
7658 parse_real_register (char *reg_string, char **end_op)
7659 {
7660 char *s = reg_string;
7661 char *p;
7662 char reg_name_given[MAX_REG_NAME_SIZE + 1];
7663 const reg_entry *r;
7664
7665 /* Skip possible REGISTER_PREFIX and possible whitespace. */
7666 if (*s == REGISTER_PREFIX)
7667 ++s;
7668
7669 if (is_space_char (*s))
7670 ++s;
7671
7672 p = reg_name_given;
7673 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
7674 {
7675 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
7676 return (const reg_entry *) NULL;
7677 s++;
7678 }
7679
7680 /* For naked regs, make sure that we are not dealing with an identifier.
7681 This prevents confusing an identifier like `eax_var' with register
7682 `eax'. */
7683 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
7684 return (const reg_entry *) NULL;
7685
7686 *end_op = s;
7687
7688 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
7689
7690 /* Handle floating point regs, allowing spaces in the (i) part. */
7691 if (r == i386_regtab /* %st is first entry of table */)
7692 {
7693 if (is_space_char (*s))
7694 ++s;
7695 if (*s == '(')
7696 {
7697 ++s;
7698 if (is_space_char (*s))
7699 ++s;
7700 if (*s >= '0' && *s <= '7')
7701 {
7702 int fpr = *s - '0';
7703 ++s;
7704 if (is_space_char (*s))
7705 ++s;
7706 if (*s == ')')
7707 {
7708 *end_op = s + 1;
7709 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
7710 know (r);
7711 return r + fpr;
7712 }
7713 }
7714 /* We have "%st(" then garbage. */
7715 return (const reg_entry *) NULL;
7716 }
7717 }
7718
7719 if (r == NULL || allow_pseudo_reg)
7720 return r;
7721
7722 if (operand_type_all_zero (&r->reg_type))
7723 return (const reg_entry *) NULL;
7724
7725 if ((r->reg_type.bitfield.reg32
7726 || r->reg_type.bitfield.sreg3
7727 || r->reg_type.bitfield.control
7728 || r->reg_type.bitfield.debug
7729 || r->reg_type.bitfield.test)
7730 && !cpu_arch_flags.bitfield.cpui386)
7731 return (const reg_entry *) NULL;
7732
7733 if (r->reg_type.bitfield.floatreg
7734 && !cpu_arch_flags.bitfield.cpu8087
7735 && !cpu_arch_flags.bitfield.cpu287
7736 && !cpu_arch_flags.bitfield.cpu387)
7737 return (const reg_entry *) NULL;
7738
7739 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
7740 return (const reg_entry *) NULL;
7741
7742 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse)
7743 return (const reg_entry *) NULL;
7744
7745 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
7746 return (const reg_entry *) NULL;
7747
7748 /* Don't allow fake index register unless allow_index_reg isn't 0. */
7749 if (!allow_index_reg
7750 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
7751 return (const reg_entry *) NULL;
7752
7753 if (((r->reg_flags & (RegRex64 | RegRex))
7754 || r->reg_type.bitfield.reg64)
7755 && (!cpu_arch_flags.bitfield.cpulm
7756 || !operand_type_equal (&r->reg_type, &control))
7757 && flag_code != CODE_64BIT)
7758 return (const reg_entry *) NULL;
7759
7760 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
7761 return (const reg_entry *) NULL;
7762
7763 return r;
7764 }
7765
7766 /* REG_STRING starts *before* REGISTER_PREFIX. */
7767
7768 static const reg_entry *
7769 parse_register (char *reg_string, char **end_op)
7770 {
7771 const reg_entry *r;
7772
7773 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
7774 r = parse_real_register (reg_string, end_op);
7775 else
7776 r = NULL;
7777 if (!r)
7778 {
7779 char *save = input_line_pointer;
7780 char c;
7781 symbolS *symbolP;
7782
7783 input_line_pointer = reg_string;
7784 c = get_symbol_end ();
7785 symbolP = symbol_find (reg_string);
7786 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
7787 {
7788 const expressionS *e = symbol_get_value_expression (symbolP);
7789
7790 know (e->X_op == O_register);
7791 know (e->X_add_number >= 0
7792 && (valueT) e->X_add_number < i386_regtab_size);
7793 r = i386_regtab + e->X_add_number;
7794 *end_op = input_line_pointer;
7795 }
7796 *input_line_pointer = c;
7797 input_line_pointer = save;
7798 }
7799 return r;
7800 }
7801
7802 int
7803 i386_parse_name (char *name, expressionS *e, char *nextcharP)
7804 {
7805 const reg_entry *r;
7806 char *end = input_line_pointer;
7807
7808 *end = *nextcharP;
7809 r = parse_register (name, &input_line_pointer);
7810 if (r && end <= input_line_pointer)
7811 {
7812 *nextcharP = *input_line_pointer;
7813 *input_line_pointer = 0;
7814 e->X_op = O_register;
7815 e->X_add_number = r - i386_regtab;
7816 return 1;
7817 }
7818 input_line_pointer = end;
7819 *end = 0;
7820 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
7821 }
7822
7823 void
7824 md_operand (expressionS *e)
7825 {
7826 char *end;
7827 const reg_entry *r;
7828
7829 switch (*input_line_pointer)
7830 {
7831 case REGISTER_PREFIX:
7832 r = parse_real_register (input_line_pointer, &end);
7833 if (r)
7834 {
7835 e->X_op = O_register;
7836 e->X_add_number = r - i386_regtab;
7837 input_line_pointer = end;
7838 }
7839 break;
7840
7841 case '[':
7842 gas_assert (intel_syntax);
7843 end = input_line_pointer++;
7844 expression (e);
7845 if (*input_line_pointer == ']')
7846 {
7847 ++input_line_pointer;
7848 e->X_op_symbol = make_expr_symbol (e);
7849 e->X_add_symbol = NULL;
7850 e->X_add_number = 0;
7851 e->X_op = O_index;
7852 }
7853 else
7854 {
7855 e->X_op = O_absent;
7856 input_line_pointer = end;
7857 }
7858 break;
7859 }
7860 }
7861
7862 \f
7863 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7864 const char *md_shortopts = "kVQ:sqn";
7865 #else
7866 const char *md_shortopts = "qn";
7867 #endif
7868
7869 #define OPTION_32 (OPTION_MD_BASE + 0)
7870 #define OPTION_64 (OPTION_MD_BASE + 1)
7871 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
7872 #define OPTION_MARCH (OPTION_MD_BASE + 3)
7873 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
7874 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
7875 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
7876 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
7877 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
7878 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
7879 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
7880 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
7881 #define OPTION_MAVXSCALAR (OPTION_MSSE_CHECK + 11)
7882
7883 struct option md_longopts[] =
7884 {
7885 {"32", no_argument, NULL, OPTION_32},
7886 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
7887 || defined (TE_PE) || defined (TE_PEP))
7888 {"64", no_argument, NULL, OPTION_64},
7889 #endif
7890 {"divide", no_argument, NULL, OPTION_DIVIDE},
7891 {"march", required_argument, NULL, OPTION_MARCH},
7892 {"mtune", required_argument, NULL, OPTION_MTUNE},
7893 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
7894 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
7895 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
7896 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
7897 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
7898 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
7899 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
7900 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
7901 {NULL, no_argument, NULL, 0}
7902 };
7903 size_t md_longopts_size = sizeof (md_longopts);
7904
7905 int
7906 md_parse_option (int c, char *arg)
7907 {
7908 unsigned int j;
7909 char *arch, *next;
7910
7911 switch (c)
7912 {
7913 case 'n':
7914 optimize_align_code = 0;
7915 break;
7916
7917 case 'q':
7918 quiet_warnings = 1;
7919 break;
7920
7921 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7922 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
7923 should be emitted or not. FIXME: Not implemented. */
7924 case 'Q':
7925 break;
7926
7927 /* -V: SVR4 argument to print version ID. */
7928 case 'V':
7929 print_version_id ();
7930 break;
7931
7932 /* -k: Ignore for FreeBSD compatibility. */
7933 case 'k':
7934 break;
7935
7936 case 's':
7937 /* -s: On i386 Solaris, this tells the native assembler to use
7938 .stab instead of .stab.excl. We always use .stab anyhow. */
7939 break;
7940 #endif
7941 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
7942 || defined (TE_PE) || defined (TE_PEP))
7943 case OPTION_64:
7944 {
7945 const char **list, **l;
7946
7947 list = bfd_target_list ();
7948 for (l = list; *l != NULL; l++)
7949 if (CONST_STRNEQ (*l, "elf64-x86-64")
7950 || strcmp (*l, "coff-x86-64") == 0
7951 || strcmp (*l, "pe-x86-64") == 0
7952 || strcmp (*l, "pei-x86-64") == 0)
7953 {
7954 default_arch = "x86_64";
7955 break;
7956 }
7957 if (*l == NULL)
7958 as_fatal (_("No compiled in support for x86_64"));
7959 free (list);
7960 }
7961 break;
7962 #endif
7963
7964 case OPTION_32:
7965 default_arch = "i386";
7966 break;
7967
7968 case OPTION_DIVIDE:
7969 #ifdef SVR4_COMMENT_CHARS
7970 {
7971 char *n, *t;
7972 const char *s;
7973
7974 n = (char *) xmalloc (strlen (i386_comment_chars) + 1);
7975 t = n;
7976 for (s = i386_comment_chars; *s != '\0'; s++)
7977 if (*s != '/')
7978 *t++ = *s;
7979 *t = '\0';
7980 i386_comment_chars = n;
7981 }
7982 #endif
7983 break;
7984
7985 case OPTION_MARCH:
7986 arch = xstrdup (arg);
7987 do
7988 {
7989 if (*arch == '.')
7990 as_fatal (_("Invalid -march= option: `%s'"), arg);
7991 next = strchr (arch, '+');
7992 if (next)
7993 *next++ = '\0';
7994 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
7995 {
7996 if (strcmp (arch, cpu_arch [j].name) == 0)
7997 {
7998 /* Processor. */
7999 cpu_arch_name = cpu_arch[j].name;
8000 cpu_sub_arch_name = NULL;
8001 cpu_arch_flags = cpu_arch[j].flags;
8002 cpu_arch_isa = cpu_arch[j].type;
8003 cpu_arch_isa_flags = cpu_arch[j].flags;
8004 if (!cpu_arch_tune_set)
8005 {
8006 cpu_arch_tune = cpu_arch_isa;
8007 cpu_arch_tune_flags = cpu_arch_isa_flags;
8008 }
8009 break;
8010 }
8011 else if (*cpu_arch [j].name == '.'
8012 && strcmp (arch, cpu_arch [j].name + 1) == 0)
8013 {
8014 /* ISA entension. */
8015 i386_cpu_flags flags;
8016
8017 if (strncmp (arch, "no", 2))
8018 flags = cpu_flags_or (cpu_arch_flags,
8019 cpu_arch[j].flags);
8020 else
8021 flags = cpu_flags_and_not (cpu_arch_flags,
8022 cpu_arch[j].flags);
8023 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
8024 {
8025 if (cpu_sub_arch_name)
8026 {
8027 char *name = cpu_sub_arch_name;
8028 cpu_sub_arch_name = concat (name,
8029 cpu_arch[j].name,
8030 (const char *) NULL);
8031 free (name);
8032 }
8033 else
8034 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
8035 cpu_arch_flags = flags;
8036 }
8037 break;
8038 }
8039 }
8040
8041 if (j >= ARRAY_SIZE (cpu_arch))
8042 as_fatal (_("Invalid -march= option: `%s'"), arg);
8043
8044 arch = next;
8045 }
8046 while (next != NULL );
8047 break;
8048
8049 case OPTION_MTUNE:
8050 if (*arg == '.')
8051 as_fatal (_("Invalid -mtune= option: `%s'"), arg);
8052 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8053 {
8054 if (strcmp (arg, cpu_arch [j].name) == 0)
8055 {
8056 cpu_arch_tune_set = 1;
8057 cpu_arch_tune = cpu_arch [j].type;
8058 cpu_arch_tune_flags = cpu_arch[j].flags;
8059 break;
8060 }
8061 }
8062 if (j >= ARRAY_SIZE (cpu_arch))
8063 as_fatal (_("Invalid -mtune= option: `%s'"), arg);
8064 break;
8065
8066 case OPTION_MMNEMONIC:
8067 if (strcasecmp (arg, "att") == 0)
8068 intel_mnemonic = 0;
8069 else if (strcasecmp (arg, "intel") == 0)
8070 intel_mnemonic = 1;
8071 else
8072 as_fatal (_("Invalid -mmnemonic= option: `%s'"), arg);
8073 break;
8074
8075 case OPTION_MSYNTAX:
8076 if (strcasecmp (arg, "att") == 0)
8077 intel_syntax = 0;
8078 else if (strcasecmp (arg, "intel") == 0)
8079 intel_syntax = 1;
8080 else
8081 as_fatal (_("Invalid -msyntax= option: `%s'"), arg);
8082 break;
8083
8084 case OPTION_MINDEX_REG:
8085 allow_index_reg = 1;
8086 break;
8087
8088 case OPTION_MNAKED_REG:
8089 allow_naked_reg = 1;
8090 break;
8091
8092 case OPTION_MOLD_GCC:
8093 old_gcc = 1;
8094 break;
8095
8096 case OPTION_MSSE2AVX:
8097 sse2avx = 1;
8098 break;
8099
8100 case OPTION_MSSE_CHECK:
8101 if (strcasecmp (arg, "error") == 0)
8102 sse_check = sse_check_error;
8103 else if (strcasecmp (arg, "warning") == 0)
8104 sse_check = sse_check_warning;
8105 else if (strcasecmp (arg, "none") == 0)
8106 sse_check = sse_check_none;
8107 else
8108 as_fatal (_("Invalid -msse-check= option: `%s'"), arg);
8109 break;
8110
8111 case OPTION_MAVXSCALAR:
8112 if (strcasecmp (arg, "128") == 0)
8113 avxscalar = vex128;
8114 else if (strcasecmp (arg, "256") == 0)
8115 avxscalar = vex256;
8116 else
8117 as_fatal (_("Invalid -mavxscalar= option: `%s'"), arg);
8118 break;
8119
8120 default:
8121 return 0;
8122 }
8123 return 1;
8124 }
8125
8126 #define MESSAGE_TEMPLATE \
8127 " "
8128
8129 static void
8130 show_arch (FILE *stream, int ext)
8131 {
8132 static char message[] = MESSAGE_TEMPLATE;
8133 char *start = message + 27;
8134 char *p;
8135 int size = sizeof (MESSAGE_TEMPLATE);
8136 int left;
8137 const char *name;
8138 int len;
8139 unsigned int j;
8140
8141 p = start;
8142 left = size - (start - message);
8143 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8144 {
8145 /* Should it be skipped? */
8146 if (cpu_arch [j].skip)
8147 continue;
8148
8149 name = cpu_arch [j].name;
8150 len = cpu_arch [j].len;
8151 if (*name == '.')
8152 {
8153 /* It is an extension. Skip if we aren't asked to show it. */
8154 if (ext)
8155 {
8156 name++;
8157 len--;
8158 }
8159 else
8160 continue;
8161 }
8162 else if (ext)
8163 {
8164 /* It is an processor. Skip if we show only extension. */
8165 continue;
8166 }
8167
8168 /* Reserve 2 spaces for ", " or ",\0" */
8169 left -= len + 2;
8170
8171 /* Check if there is any room. */
8172 if (left >= 0)
8173 {
8174 if (p != start)
8175 {
8176 *p++ = ',';
8177 *p++ = ' ';
8178 }
8179 p = mempcpy (p, name, len);
8180 }
8181 else
8182 {
8183 /* Output the current message now and start a new one. */
8184 *p++ = ',';
8185 *p = '\0';
8186 fprintf (stream, "%s\n", message);
8187 p = start;
8188 left = size - (start - message) - len - 2;
8189
8190 gas_assert (left >= 0);
8191
8192 p = mempcpy (p, name, len);
8193 }
8194 }
8195
8196 *p = '\0';
8197 fprintf (stream, "%s\n", message);
8198 }
8199
8200 void
8201 md_show_usage (FILE *stream)
8202 {
8203 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8204 fprintf (stream, _("\
8205 -Q ignored\n\
8206 -V print assembler version number\n\
8207 -k ignored\n"));
8208 #endif
8209 fprintf (stream, _("\
8210 -n Do not optimize code alignment\n\
8211 -q quieten some warnings\n"));
8212 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8213 fprintf (stream, _("\
8214 -s ignored\n"));
8215 #endif
8216 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8217 || defined (TE_PE) || defined (TE_PEP))
8218 fprintf (stream, _("\
8219 --32/--64 generate 32bit/64bit code\n"));
8220 #endif
8221 #ifdef SVR4_COMMENT_CHARS
8222 fprintf (stream, _("\
8223 --divide do not treat `/' as a comment character\n"));
8224 #else
8225 fprintf (stream, _("\
8226 --divide ignored\n"));
8227 #endif
8228 fprintf (stream, _("\
8229 -march=CPU[,+EXTENSION...]\n\
8230 generate code for CPU and EXTENSION, CPU is one of:\n"));
8231 show_arch (stream, 0);
8232 fprintf (stream, _("\
8233 EXTENSION is combination of:\n"));
8234 show_arch (stream, 1);
8235 fprintf (stream, _("\
8236 -mtune=CPU optimize for CPU, CPU is one of:\n"));
8237 show_arch (stream, 0);
8238 fprintf (stream, _("\
8239 -msse2avx encode SSE instructions with VEX prefix\n"));
8240 fprintf (stream, _("\
8241 -msse-check=[none|error|warning]\n\
8242 check SSE instructions\n"));
8243 fprintf (stream, _("\
8244 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
8245 length\n"));
8246 fprintf (stream, _("\
8247 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
8248 fprintf (stream, _("\
8249 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
8250 fprintf (stream, _("\
8251 -mindex-reg support pseudo index registers\n"));
8252 fprintf (stream, _("\
8253 -mnaked-reg don't require `%%' prefix for registers\n"));
8254 fprintf (stream, _("\
8255 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
8256 }
8257
8258 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
8259 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8260 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8261
8262 /* Pick the target format to use. */
8263
8264 const char *
8265 i386_target_format (void)
8266 {
8267 if (!strcmp (default_arch, "x86_64"))
8268 {
8269 set_code_flag (CODE_64BIT);
8270 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
8271 {
8272 cpu_arch_isa_flags.bitfield.cpui186 = 1;
8273 cpu_arch_isa_flags.bitfield.cpui286 = 1;
8274 cpu_arch_isa_flags.bitfield.cpui386 = 1;
8275 cpu_arch_isa_flags.bitfield.cpui486 = 1;
8276 cpu_arch_isa_flags.bitfield.cpui586 = 1;
8277 cpu_arch_isa_flags.bitfield.cpui686 = 1;
8278 cpu_arch_isa_flags.bitfield.cpuclflush = 1;
8279 cpu_arch_isa_flags.bitfield.cpummx= 1;
8280 cpu_arch_isa_flags.bitfield.cpusse = 1;
8281 cpu_arch_isa_flags.bitfield.cpusse2 = 1;
8282 cpu_arch_isa_flags.bitfield.cpulm = 1;
8283 }
8284 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
8285 {
8286 cpu_arch_tune_flags.bitfield.cpui186 = 1;
8287 cpu_arch_tune_flags.bitfield.cpui286 = 1;
8288 cpu_arch_tune_flags.bitfield.cpui386 = 1;
8289 cpu_arch_tune_flags.bitfield.cpui486 = 1;
8290 cpu_arch_tune_flags.bitfield.cpui586 = 1;
8291 cpu_arch_tune_flags.bitfield.cpui686 = 1;
8292 cpu_arch_tune_flags.bitfield.cpuclflush = 1;
8293 cpu_arch_tune_flags.bitfield.cpummx= 1;
8294 cpu_arch_tune_flags.bitfield.cpusse = 1;
8295 cpu_arch_tune_flags.bitfield.cpusse2 = 1;
8296 }
8297 }
8298 else if (!strcmp (default_arch, "i386"))
8299 {
8300 set_code_flag (CODE_32BIT);
8301 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
8302 {
8303 cpu_arch_isa_flags.bitfield.cpui186 = 1;
8304 cpu_arch_isa_flags.bitfield.cpui286 = 1;
8305 cpu_arch_isa_flags.bitfield.cpui386 = 1;
8306 }
8307 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
8308 {
8309 cpu_arch_tune_flags.bitfield.cpui186 = 1;
8310 cpu_arch_tune_flags.bitfield.cpui286 = 1;
8311 cpu_arch_tune_flags.bitfield.cpui386 = 1;
8312 }
8313 }
8314 else
8315 as_fatal (_("Unknown architecture"));
8316 switch (OUTPUT_FLAVOR)
8317 {
8318 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
8319 case bfd_target_aout_flavour:
8320 return AOUT_TARGET_FORMAT;
8321 #endif
8322 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
8323 # if defined (TE_PE) || defined (TE_PEP)
8324 case bfd_target_coff_flavour:
8325 return flag_code == CODE_64BIT ? "pe-x86-64" : "pe-i386";
8326 # elif defined (TE_GO32)
8327 case bfd_target_coff_flavour:
8328 return "coff-go32";
8329 # else
8330 case bfd_target_coff_flavour:
8331 return "coff-i386";
8332 # endif
8333 #endif
8334 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8335 case bfd_target_elf_flavour:
8336 {
8337 if (flag_code == CODE_64BIT)
8338 {
8339 object_64bit = 1;
8340 use_rela_relocations = 1;
8341 }
8342 if (cpu_arch_isa == PROCESSOR_L1OM)
8343 {
8344 if (flag_code != CODE_64BIT)
8345 as_fatal (_("Intel L1OM is 64bit only"));
8346 return ELF_TARGET_L1OM_FORMAT;
8347 }
8348 else
8349 return (flag_code == CODE_64BIT
8350 ? ELF_TARGET_FORMAT64 : ELF_TARGET_FORMAT);
8351 }
8352 #endif
8353 #if defined (OBJ_MACH_O)
8354 case bfd_target_mach_o_flavour:
8355 return flag_code == CODE_64BIT ? "mach-o-x86-64" : "mach-o-i386";
8356 #endif
8357 default:
8358 abort ();
8359 return NULL;
8360 }
8361 }
8362
8363 #endif /* OBJ_MAYBE_ more than one */
8364
8365 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
8366 void
8367 i386_elf_emit_arch_note (void)
8368 {
8369 if (IS_ELF && cpu_arch_name != NULL)
8370 {
8371 char *p;
8372 asection *seg = now_seg;
8373 subsegT subseg = now_subseg;
8374 Elf_Internal_Note i_note;
8375 Elf_External_Note e_note;
8376 asection *note_secp;
8377 int len;
8378
8379 /* Create the .note section. */
8380 note_secp = subseg_new (".note", 0);
8381 bfd_set_section_flags (stdoutput,
8382 note_secp,
8383 SEC_HAS_CONTENTS | SEC_READONLY);
8384
8385 /* Process the arch string. */
8386 len = strlen (cpu_arch_name);
8387
8388 i_note.namesz = len + 1;
8389 i_note.descsz = 0;
8390 i_note.type = NT_ARCH;
8391 p = frag_more (sizeof (e_note.namesz));
8392 md_number_to_chars (p, (valueT) i_note.namesz, sizeof (e_note.namesz));
8393 p = frag_more (sizeof (e_note.descsz));
8394 md_number_to_chars (p, (valueT) i_note.descsz, sizeof (e_note.descsz));
8395 p = frag_more (sizeof (e_note.type));
8396 md_number_to_chars (p, (valueT) i_note.type, sizeof (e_note.type));
8397 p = frag_more (len + 1);
8398 strcpy (p, cpu_arch_name);
8399
8400 frag_align (2, 0, 0);
8401
8402 subseg_set (seg, subseg);
8403 }
8404 }
8405 #endif
8406 \f
8407 symbolS *
8408 md_undefined_symbol (name)
8409 char *name;
8410 {
8411 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
8412 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
8413 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
8414 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
8415 {
8416 if (!GOT_symbol)
8417 {
8418 if (symbol_find (name))
8419 as_bad (_("GOT already in symbol table"));
8420 GOT_symbol = symbol_new (name, undefined_section,
8421 (valueT) 0, &zero_address_frag);
8422 };
8423 return GOT_symbol;
8424 }
8425 return 0;
8426 }
8427
8428 /* Round up a section size to the appropriate boundary. */
8429
8430 valueT
8431 md_section_align (segment, size)
8432 segT segment ATTRIBUTE_UNUSED;
8433 valueT size;
8434 {
8435 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8436 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
8437 {
8438 /* For a.out, force the section size to be aligned. If we don't do
8439 this, BFD will align it for us, but it will not write out the
8440 final bytes of the section. This may be a bug in BFD, but it is
8441 easier to fix it here since that is how the other a.out targets
8442 work. */
8443 int align;
8444
8445 align = bfd_get_section_alignment (stdoutput, segment);
8446 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
8447 }
8448 #endif
8449
8450 return size;
8451 }
8452
8453 /* On the i386, PC-relative offsets are relative to the start of the
8454 next instruction. That is, the address of the offset, plus its
8455 size, since the offset is always the last part of the insn. */
8456
8457 long
8458 md_pcrel_from (fixS *fixP)
8459 {
8460 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
8461 }
8462
8463 #ifndef I386COFF
8464
8465 static void
8466 s_bss (int ignore ATTRIBUTE_UNUSED)
8467 {
8468 int temp;
8469
8470 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8471 if (IS_ELF)
8472 obj_elf_section_change_hook ();
8473 #endif
8474 temp = get_absolute_expression ();
8475 subseg_set (bss_section, (subsegT) temp);
8476 demand_empty_rest_of_line ();
8477 }
8478
8479 #endif
8480
8481 void
8482 i386_validate_fix (fixS *fixp)
8483 {
8484 if (fixp->fx_subsy && fixp->fx_subsy == GOT_symbol)
8485 {
8486 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
8487 {
8488 if (!object_64bit)
8489 abort ();
8490 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
8491 }
8492 else
8493 {
8494 if (!object_64bit)
8495 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
8496 else
8497 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
8498 }
8499 fixp->fx_subsy = 0;
8500 }
8501 }
8502
8503 arelent *
8504 tc_gen_reloc (section, fixp)
8505 asection *section ATTRIBUTE_UNUSED;
8506 fixS *fixp;
8507 {
8508 arelent *rel;
8509 bfd_reloc_code_real_type code;
8510
8511 switch (fixp->fx_r_type)
8512 {
8513 case BFD_RELOC_X86_64_PLT32:
8514 case BFD_RELOC_X86_64_GOT32:
8515 case BFD_RELOC_X86_64_GOTPCREL:
8516 case BFD_RELOC_386_PLT32:
8517 case BFD_RELOC_386_GOT32:
8518 case BFD_RELOC_386_GOTOFF:
8519 case BFD_RELOC_386_GOTPC:
8520 case BFD_RELOC_386_TLS_GD:
8521 case BFD_RELOC_386_TLS_LDM:
8522 case BFD_RELOC_386_TLS_LDO_32:
8523 case BFD_RELOC_386_TLS_IE_32:
8524 case BFD_RELOC_386_TLS_IE:
8525 case BFD_RELOC_386_TLS_GOTIE:
8526 case BFD_RELOC_386_TLS_LE_32:
8527 case BFD_RELOC_386_TLS_LE:
8528 case BFD_RELOC_386_TLS_GOTDESC:
8529 case BFD_RELOC_386_TLS_DESC_CALL:
8530 case BFD_RELOC_X86_64_TLSGD:
8531 case BFD_RELOC_X86_64_TLSLD:
8532 case BFD_RELOC_X86_64_DTPOFF32:
8533 case BFD_RELOC_X86_64_DTPOFF64:
8534 case BFD_RELOC_X86_64_GOTTPOFF:
8535 case BFD_RELOC_X86_64_TPOFF32:
8536 case BFD_RELOC_X86_64_TPOFF64:
8537 case BFD_RELOC_X86_64_GOTOFF64:
8538 case BFD_RELOC_X86_64_GOTPC32:
8539 case BFD_RELOC_X86_64_GOT64:
8540 case BFD_RELOC_X86_64_GOTPCREL64:
8541 case BFD_RELOC_X86_64_GOTPC64:
8542 case BFD_RELOC_X86_64_GOTPLT64:
8543 case BFD_RELOC_X86_64_PLTOFF64:
8544 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8545 case BFD_RELOC_X86_64_TLSDESC_CALL:
8546 case BFD_RELOC_RVA:
8547 case BFD_RELOC_VTABLE_ENTRY:
8548 case BFD_RELOC_VTABLE_INHERIT:
8549 #ifdef TE_PE
8550 case BFD_RELOC_32_SECREL:
8551 #endif
8552 code = fixp->fx_r_type;
8553 break;
8554 case BFD_RELOC_X86_64_32S:
8555 if (!fixp->fx_pcrel)
8556 {
8557 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
8558 code = fixp->fx_r_type;
8559 break;
8560 }
8561 default:
8562 if (fixp->fx_pcrel)
8563 {
8564 switch (fixp->fx_size)
8565 {
8566 default:
8567 as_bad_where (fixp->fx_file, fixp->fx_line,
8568 _("can not do %d byte pc-relative relocation"),
8569 fixp->fx_size);
8570 code = BFD_RELOC_32_PCREL;
8571 break;
8572 case 1: code = BFD_RELOC_8_PCREL; break;
8573 case 2: code = BFD_RELOC_16_PCREL; break;
8574 case 4: code = BFD_RELOC_32_PCREL; break;
8575 #ifdef BFD64
8576 case 8: code = BFD_RELOC_64_PCREL; break;
8577 #endif
8578 }
8579 }
8580 else
8581 {
8582 switch (fixp->fx_size)
8583 {
8584 default:
8585 as_bad_where (fixp->fx_file, fixp->fx_line,
8586 _("can not do %d byte relocation"),
8587 fixp->fx_size);
8588 code = BFD_RELOC_32;
8589 break;
8590 case 1: code = BFD_RELOC_8; break;
8591 case 2: code = BFD_RELOC_16; break;
8592 case 4: code = BFD_RELOC_32; break;
8593 #ifdef BFD64
8594 case 8: code = BFD_RELOC_64; break;
8595 #endif
8596 }
8597 }
8598 break;
8599 }
8600
8601 if ((code == BFD_RELOC_32
8602 || code == BFD_RELOC_32_PCREL
8603 || code == BFD_RELOC_X86_64_32S)
8604 && GOT_symbol
8605 && fixp->fx_addsy == GOT_symbol)
8606 {
8607 if (!object_64bit)
8608 code = BFD_RELOC_386_GOTPC;
8609 else
8610 code = BFD_RELOC_X86_64_GOTPC32;
8611 }
8612 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
8613 && GOT_symbol
8614 && fixp->fx_addsy == GOT_symbol)
8615 {
8616 code = BFD_RELOC_X86_64_GOTPC64;
8617 }
8618
8619 rel = (arelent *) xmalloc (sizeof (arelent));
8620 rel->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
8621 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
8622
8623 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
8624
8625 if (!use_rela_relocations)
8626 {
8627 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
8628 vtable entry to be used in the relocation's section offset. */
8629 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
8630 rel->address = fixp->fx_offset;
8631 #if defined (OBJ_COFF) && defined (TE_PE)
8632 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
8633 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
8634 else
8635 #endif
8636 rel->addend = 0;
8637 }
8638 /* Use the rela in 64bit mode. */
8639 else
8640 {
8641 if (!fixp->fx_pcrel)
8642 rel->addend = fixp->fx_offset;
8643 else
8644 switch (code)
8645 {
8646 case BFD_RELOC_X86_64_PLT32:
8647 case BFD_RELOC_X86_64_GOT32:
8648 case BFD_RELOC_X86_64_GOTPCREL:
8649 case BFD_RELOC_X86_64_TLSGD:
8650 case BFD_RELOC_X86_64_TLSLD:
8651 case BFD_RELOC_X86_64_GOTTPOFF:
8652 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8653 case BFD_RELOC_X86_64_TLSDESC_CALL:
8654 rel->addend = fixp->fx_offset - fixp->fx_size;
8655 break;
8656 default:
8657 rel->addend = (section->vma
8658 - fixp->fx_size
8659 + fixp->fx_addnumber
8660 + md_pcrel_from (fixp));
8661 break;
8662 }
8663 }
8664
8665 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
8666 if (rel->howto == NULL)
8667 {
8668 as_bad_where (fixp->fx_file, fixp->fx_line,
8669 _("cannot represent relocation type %s"),
8670 bfd_get_reloc_code_name (code));
8671 /* Set howto to a garbage value so that we can keep going. */
8672 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
8673 gas_assert (rel->howto != NULL);
8674 }
8675
8676 return rel;
8677 }
8678
8679 #include "tc-i386-intel.c"
8680
8681 void
8682 tc_x86_parse_to_dw2regnum (expressionS *exp)
8683 {
8684 int saved_naked_reg;
8685 char saved_register_dot;
8686
8687 saved_naked_reg = allow_naked_reg;
8688 allow_naked_reg = 1;
8689 saved_register_dot = register_chars['.'];
8690 register_chars['.'] = '.';
8691 allow_pseudo_reg = 1;
8692 expression_and_evaluate (exp);
8693 allow_pseudo_reg = 0;
8694 register_chars['.'] = saved_register_dot;
8695 allow_naked_reg = saved_naked_reg;
8696
8697 if (exp->X_op == O_register && exp->X_add_number >= 0)
8698 {
8699 if ((addressT) exp->X_add_number < i386_regtab_size)
8700 {
8701 exp->X_op = O_constant;
8702 exp->X_add_number = i386_regtab[exp->X_add_number]
8703 .dw2_regnum[flag_code >> 1];
8704 }
8705 else
8706 exp->X_op = O_illegal;
8707 }
8708 }
8709
8710 void
8711 tc_x86_frame_initial_instructions (void)
8712 {
8713 static unsigned int sp_regno[2];
8714
8715 if (!sp_regno[flag_code >> 1])
8716 {
8717 char *saved_input = input_line_pointer;
8718 char sp[][4] = {"esp", "rsp"};
8719 expressionS exp;
8720
8721 input_line_pointer = sp[flag_code >> 1];
8722 tc_x86_parse_to_dw2regnum (&exp);
8723 gas_assert (exp.X_op == O_constant);
8724 sp_regno[flag_code >> 1] = exp.X_add_number;
8725 input_line_pointer = saved_input;
8726 }
8727
8728 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
8729 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
8730 }
8731
8732 int
8733 i386_elf_section_type (const char *str, size_t len)
8734 {
8735 if (flag_code == CODE_64BIT
8736 && len == sizeof ("unwind") - 1
8737 && strncmp (str, "unwind", 6) == 0)
8738 return SHT_X86_64_UNWIND;
8739
8740 return -1;
8741 }
8742
8743 #ifdef TE_SOLARIS
8744 void
8745 i386_solaris_fix_up_eh_frame (segT sec)
8746 {
8747 if (flag_code == CODE_64BIT)
8748 elf_section_type (sec) = SHT_X86_64_UNWIND;
8749 }
8750 #endif
8751
8752 #ifdef TE_PE
8753 void
8754 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
8755 {
8756 expressionS exp;
8757
8758 exp.X_op = O_secrel;
8759 exp.X_add_symbol = symbol;
8760 exp.X_add_number = 0;
8761 emit_expr (&exp, size);
8762 }
8763 #endif
8764
8765 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8766 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
8767
8768 bfd_vma
8769 x86_64_section_letter (int letter, char **ptr_msg)
8770 {
8771 if (flag_code == CODE_64BIT)
8772 {
8773 if (letter == 'l')
8774 return SHF_X86_64_LARGE;
8775
8776 *ptr_msg = _("Bad .section directive: want a,l,w,x,M,S,G,T in string");
8777 }
8778 else
8779 *ptr_msg = _("Bad .section directive: want a,w,x,M,S,G,T in string");
8780 return -1;
8781 }
8782
8783 bfd_vma
8784 x86_64_section_word (char *str, size_t len)
8785 {
8786 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
8787 return SHF_X86_64_LARGE;
8788
8789 return -1;
8790 }
8791
8792 static void
8793 handle_large_common (int small ATTRIBUTE_UNUSED)
8794 {
8795 if (flag_code != CODE_64BIT)
8796 {
8797 s_comm_internal (0, elf_common_parse);
8798 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
8799 }
8800 else
8801 {
8802 static segT lbss_section;
8803 asection *saved_com_section_ptr = elf_com_section_ptr;
8804 asection *saved_bss_section = bss_section;
8805
8806 if (lbss_section == NULL)
8807 {
8808 flagword applicable;
8809 segT seg = now_seg;
8810 subsegT subseg = now_subseg;
8811
8812 /* The .lbss section is for local .largecomm symbols. */
8813 lbss_section = subseg_new (".lbss", 0);
8814 applicable = bfd_applicable_section_flags (stdoutput);
8815 bfd_set_section_flags (stdoutput, lbss_section,
8816 applicable & SEC_ALLOC);
8817 seg_info (lbss_section)->bss = 1;
8818
8819 subseg_set (seg, subseg);
8820 }
8821
8822 elf_com_section_ptr = &_bfd_elf_large_com_section;
8823 bss_section = lbss_section;
8824
8825 s_comm_internal (0, elf_common_parse);
8826
8827 elf_com_section_ptr = saved_com_section_ptr;
8828 bss_section = saved_bss_section;
8829 }
8830 }
8831 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */
This page took 0.265116 seconds and 4 git commands to generate.