1728e60a81d9ca334500236cdf8d099a2a53895b
[deliverable/binutils-gdb.git] / gas / config / tc-i386.c
1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5
6 This file is part of GAS, the GNU Assembler.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GAS; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
21 02110-1301, USA. */
22
23 /* Intel 80386 machine specific gas.
24 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
25 x86_64 support by Jan Hubicka (jh@suse.cz)
26 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
27 Bugs & suggestions are completely welcome. This is free software.
28 Please help us make it better. */
29
30 #include "as.h"
31 #include "safe-ctype.h"
32 #include "subsegs.h"
33 #include "dwarf2dbg.h"
34 #include "dw2gencfi.h"
35 #include "elf/x86-64.h"
36 #include "opcodes/i386-init.h"
37
38 #ifndef REGISTER_WARNINGS
39 #define REGISTER_WARNINGS 1
40 #endif
41
42 #ifndef INFER_ADDR_PREFIX
43 #define INFER_ADDR_PREFIX 1
44 #endif
45
46 #ifndef DEFAULT_ARCH
47 #define DEFAULT_ARCH "i386"
48 #endif
49
50 #ifndef INLINE
51 #if __GNUC__ >= 2
52 #define INLINE __inline__
53 #else
54 #define INLINE
55 #endif
56 #endif
57
58 /* Prefixes will be emitted in the order defined below.
59 WAIT_PREFIX must be the first prefix since FWAIT is really is an
60 instruction, and so must come before any prefixes.
61 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
62 REP_PREFIX, LOCK_PREFIX. */
63 #define WAIT_PREFIX 0
64 #define SEG_PREFIX 1
65 #define ADDR_PREFIX 2
66 #define DATA_PREFIX 3
67 #define REP_PREFIX 4
68 #define LOCK_PREFIX 5
69 #define REX_PREFIX 6 /* must come last. */
70 #define MAX_PREFIXES 7 /* max prefixes per opcode */
71
72 /* we define the syntax here (modulo base,index,scale syntax) */
73 #define REGISTER_PREFIX '%'
74 #define IMMEDIATE_PREFIX '$'
75 #define ABSOLUTE_PREFIX '*'
76
77 /* these are the instruction mnemonic suffixes in AT&T syntax or
78 memory operand size in Intel syntax. */
79 #define WORD_MNEM_SUFFIX 'w'
80 #define BYTE_MNEM_SUFFIX 'b'
81 #define SHORT_MNEM_SUFFIX 's'
82 #define LONG_MNEM_SUFFIX 'l'
83 #define QWORD_MNEM_SUFFIX 'q'
84 #define XMMWORD_MNEM_SUFFIX 'x'
85 #define YMMWORD_MNEM_SUFFIX 'y'
86 /* Intel Syntax. Use a non-ascii letter since since it never appears
87 in instructions. */
88 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
89
90 #define END_OF_INSN '\0'
91
92 /*
93 'templates' is for grouping together 'template' structures for opcodes
94 of the same name. This is only used for storing the insns in the grand
95 ole hash table of insns.
96 The templates themselves start at START and range up to (but not including)
97 END.
98 */
99 typedef struct
100 {
101 const insn_template *start;
102 const insn_template *end;
103 }
104 templates;
105
106 /* 386 operand encoding bytes: see 386 book for details of this. */
107 typedef struct
108 {
109 unsigned int regmem; /* codes register or memory operand */
110 unsigned int reg; /* codes register operand (or extended opcode) */
111 unsigned int mode; /* how to interpret regmem & reg */
112 }
113 modrm_byte;
114
115 /* x86-64 extension prefix. */
116 typedef int rex_byte;
117
118 /* 386 opcode byte to code indirect addressing. */
119 typedef struct
120 {
121 unsigned base;
122 unsigned index;
123 unsigned scale;
124 }
125 sib_byte;
126
127 /* x86 arch names, types and features */
128 typedef struct
129 {
130 const char *name; /* arch name */
131 unsigned int len; /* arch string length */
132 enum processor_type type; /* arch type */
133 i386_cpu_flags flags; /* cpu feature flags */
134 unsigned int skip; /* show_arch should skip this. */
135 }
136 arch_entry;
137
138 static void set_code_flag (int);
139 static void set_16bit_gcc_code_flag (int);
140 static void set_intel_syntax (int);
141 static void set_intel_mnemonic (int);
142 static void set_allow_index_reg (int);
143 static void set_sse_check (int);
144 static void set_cpu_arch (int);
145 #ifdef TE_PE
146 static void pe_directive_secrel (int);
147 #endif
148 static void signed_cons (int);
149 static char *output_invalid (int c);
150 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
151 const char *);
152 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
153 const char *);
154 static int i386_att_operand (char *);
155 static int i386_intel_operand (char *, int);
156 static int i386_intel_simplify (expressionS *);
157 static int i386_intel_parse_name (const char *, expressionS *);
158 static const reg_entry *parse_register (char *, char **);
159 static char *parse_insn (char *, char *);
160 static char *parse_operands (char *, const char *);
161 static void swap_operands (void);
162 static void swap_2_operands (int, int);
163 static void optimize_imm (void);
164 static void optimize_disp (void);
165 static const insn_template *match_template (void);
166 static int check_string (void);
167 static int process_suffix (void);
168 static int check_byte_reg (void);
169 static int check_long_reg (void);
170 static int check_qword_reg (void);
171 static int check_word_reg (void);
172 static int finalize_imm (void);
173 static int process_operands (void);
174 static const seg_entry *build_modrm_byte (void);
175 static void output_insn (void);
176 static void output_imm (fragS *, offsetT);
177 static void output_disp (fragS *, offsetT);
178 #ifndef I386COFF
179 static void s_bss (int);
180 #endif
181 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
182 static void handle_large_common (int small ATTRIBUTE_UNUSED);
183 #endif
184
185 static const char *default_arch = DEFAULT_ARCH;
186
187 /* VEX prefix. */
188 typedef struct
189 {
190 /* VEX prefix is either 2 byte or 3 byte. */
191 unsigned char bytes[3];
192 unsigned int length;
193 /* Destination or source register specifier. */
194 const reg_entry *register_specifier;
195 } vex_prefix;
196
197 /* 'md_assemble ()' gathers together information and puts it into a
198 i386_insn. */
199
200 union i386_op
201 {
202 expressionS *disps;
203 expressionS *imms;
204 const reg_entry *regs;
205 };
206
207 enum i386_error
208 {
209 operand_size_mismatch,
210 operand_type_mismatch,
211 register_type_mismatch,
212 number_of_operands_mismatch,
213 invalid_instruction_suffix,
214 bad_imm4,
215 old_gcc_only,
216 unsupported_with_intel_mnemonic,
217 unsupported_syntax,
218 unsupported
219 };
220
221 struct _i386_insn
222 {
223 /* TM holds the template for the insn were currently assembling. */
224 insn_template tm;
225
226 /* SUFFIX holds the instruction size suffix for byte, word, dword
227 or qword, if given. */
228 char suffix;
229
230 /* OPERANDS gives the number of given operands. */
231 unsigned int operands;
232
233 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
234 of given register, displacement, memory operands and immediate
235 operands. */
236 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
237
238 /* TYPES [i] is the type (see above #defines) which tells us how to
239 use OP[i] for the corresponding operand. */
240 i386_operand_type types[MAX_OPERANDS];
241
242 /* Displacement expression, immediate expression, or register for each
243 operand. */
244 union i386_op op[MAX_OPERANDS];
245
246 /* Flags for operands. */
247 unsigned int flags[MAX_OPERANDS];
248 #define Operand_PCrel 1
249
250 /* Relocation type for operand */
251 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
252
253 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
254 the base index byte below. */
255 const reg_entry *base_reg;
256 const reg_entry *index_reg;
257 unsigned int log2_scale_factor;
258
259 /* SEG gives the seg_entries of this insn. They are zero unless
260 explicit segment overrides are given. */
261 const seg_entry *seg[2];
262
263 /* PREFIX holds all the given prefix opcodes (usually null).
264 PREFIXES is the number of prefix opcodes. */
265 unsigned int prefixes;
266 unsigned char prefix[MAX_PREFIXES];
267
268 /* RM and SIB are the modrm byte and the sib byte where the
269 addressing modes of this insn are encoded. */
270 modrm_byte rm;
271 rex_byte rex;
272 sib_byte sib;
273 vex_prefix vex;
274
275 /* Swap operand in encoding. */
276 unsigned int swap_operand;
277
278 /* Error message. */
279 enum i386_error error;
280 };
281
282 typedef struct _i386_insn i386_insn;
283
284 /* List of chars besides those in app.c:symbol_chars that can start an
285 operand. Used to prevent the scrubber eating vital white-space. */
286 const char extra_symbol_chars[] = "*%-(["
287 #ifdef LEX_AT
288 "@"
289 #endif
290 #ifdef LEX_QM
291 "?"
292 #endif
293 ;
294
295 #if (defined (TE_I386AIX) \
296 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
297 && !defined (TE_GNU) \
298 && !defined (TE_LINUX) \
299 && !defined (TE_NETWARE) \
300 && !defined (TE_FreeBSD) \
301 && !defined (TE_NetBSD)))
302 /* This array holds the chars that always start a comment. If the
303 pre-processor is disabled, these aren't very useful. The option
304 --divide will remove '/' from this list. */
305 const char *i386_comment_chars = "#/";
306 #define SVR4_COMMENT_CHARS 1
307 #define PREFIX_SEPARATOR '\\'
308
309 #else
310 const char *i386_comment_chars = "#";
311 #define PREFIX_SEPARATOR '/'
312 #endif
313
314 /* This array holds the chars that only start a comment at the beginning of
315 a line. If the line seems to have the form '# 123 filename'
316 .line and .file directives will appear in the pre-processed output.
317 Note that input_file.c hand checks for '#' at the beginning of the
318 first line of the input file. This is because the compiler outputs
319 #NO_APP at the beginning of its output.
320 Also note that comments started like this one will always work if
321 '/' isn't otherwise defined. */
322 const char line_comment_chars[] = "#/";
323
324 const char line_separator_chars[] = ";";
325
326 /* Chars that can be used to separate mant from exp in floating point
327 nums. */
328 const char EXP_CHARS[] = "eE";
329
330 /* Chars that mean this number is a floating point constant
331 As in 0f12.456
332 or 0d1.2345e12. */
333 const char FLT_CHARS[] = "fFdDxX";
334
335 /* Tables for lexical analysis. */
336 static char mnemonic_chars[256];
337 static char register_chars[256];
338 static char operand_chars[256];
339 static char identifier_chars[256];
340 static char digit_chars[256];
341
342 /* Lexical macros. */
343 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
344 #define is_operand_char(x) (operand_chars[(unsigned char) x])
345 #define is_register_char(x) (register_chars[(unsigned char) x])
346 #define is_space_char(x) ((x) == ' ')
347 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
348 #define is_digit_char(x) (digit_chars[(unsigned char) x])
349
350 /* All non-digit non-letter characters that may occur in an operand. */
351 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
352
353 /* md_assemble() always leaves the strings it's passed unaltered. To
354 effect this we maintain a stack of saved characters that we've smashed
355 with '\0's (indicating end of strings for various sub-fields of the
356 assembler instruction). */
357 static char save_stack[32];
358 static char *save_stack_p;
359 #define END_STRING_AND_SAVE(s) \
360 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
361 #define RESTORE_END_STRING(s) \
362 do { *(s) = *--save_stack_p; } while (0)
363
364 /* The instruction we're assembling. */
365 static i386_insn i;
366
367 /* Possible templates for current insn. */
368 static const templates *current_templates;
369
370 /* Per instruction expressionS buffers: max displacements & immediates. */
371 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
372 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
373
374 /* Current operand we are working on. */
375 static int this_operand = -1;
376
377 /* We support four different modes. FLAG_CODE variable is used to distinguish
378 these. */
379
380 enum flag_code {
381 CODE_32BIT,
382 CODE_16BIT,
383 CODE_64BIT };
384
385 static enum flag_code flag_code;
386 static unsigned int object_64bit;
387 static int use_rela_relocations = 0;
388
389 /* The names used to print error messages. */
390 static const char *flag_code_names[] =
391 {
392 "32",
393 "16",
394 "64"
395 };
396
397 /* 1 for intel syntax,
398 0 if att syntax. */
399 static int intel_syntax = 0;
400
401 /* 1 for intel mnemonic,
402 0 if att mnemonic. */
403 static int intel_mnemonic = !SYSV386_COMPAT;
404
405 /* 1 if support old (<= 2.8.1) versions of gcc. */
406 static int old_gcc = OLDGCC_COMPAT;
407
408 /* 1 if pseudo registers are permitted. */
409 static int allow_pseudo_reg = 0;
410
411 /* 1 if register prefix % not required. */
412 static int allow_naked_reg = 0;
413
414 /* 1 if pseudo index register, eiz/riz, is allowed . */
415 static int allow_index_reg = 0;
416
417 static enum
418 {
419 sse_check_none = 0,
420 sse_check_warning,
421 sse_check_error
422 }
423 sse_check;
424
425 /* Register prefix used for error message. */
426 static const char *register_prefix = "%";
427
428 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
429 leave, push, and pop instructions so that gcc has the same stack
430 frame as in 32 bit mode. */
431 static char stackop_size = '\0';
432
433 /* Non-zero to optimize code alignment. */
434 int optimize_align_code = 1;
435
436 /* Non-zero to quieten some warnings. */
437 static int quiet_warnings = 0;
438
439 /* CPU name. */
440 static const char *cpu_arch_name = NULL;
441 static char *cpu_sub_arch_name = NULL;
442
443 /* CPU feature flags. */
444 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
445
446 /* If we have selected a cpu we are generating instructions for. */
447 static int cpu_arch_tune_set = 0;
448
449 /* Cpu we are generating instructions for. */
450 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
451
452 /* CPU feature flags of cpu we are generating instructions for. */
453 static i386_cpu_flags cpu_arch_tune_flags;
454
455 /* CPU instruction set architecture used. */
456 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
457
458 /* CPU feature flags of instruction set architecture used. */
459 i386_cpu_flags cpu_arch_isa_flags;
460
461 /* If set, conditional jumps are not automatically promoted to handle
462 larger than a byte offset. */
463 static unsigned int no_cond_jump_promotion = 0;
464
465 /* Encode SSE instructions with VEX prefix. */
466 static unsigned int sse2avx;
467
468 /* Encode scalar AVX instructions with specific vector length. */
469 static enum
470 {
471 vex128 = 0,
472 vex256
473 } avxscalar;
474
475 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
476 static symbolS *GOT_symbol;
477
478 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
479 unsigned int x86_dwarf2_return_column;
480
481 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
482 int x86_cie_data_alignment;
483
484 /* Interface to relax_segment.
485 There are 3 major relax states for 386 jump insns because the
486 different types of jumps add different sizes to frags when we're
487 figuring out what sort of jump to choose to reach a given label. */
488
489 /* Types. */
490 #define UNCOND_JUMP 0
491 #define COND_JUMP 1
492 #define COND_JUMP86 2
493
494 /* Sizes. */
495 #define CODE16 1
496 #define SMALL 0
497 #define SMALL16 (SMALL | CODE16)
498 #define BIG 2
499 #define BIG16 (BIG | CODE16)
500
501 #ifndef INLINE
502 #ifdef __GNUC__
503 #define INLINE __inline__
504 #else
505 #define INLINE
506 #endif
507 #endif
508
509 #define ENCODE_RELAX_STATE(type, size) \
510 ((relax_substateT) (((type) << 2) | (size)))
511 #define TYPE_FROM_RELAX_STATE(s) \
512 ((s) >> 2)
513 #define DISP_SIZE_FROM_RELAX_STATE(s) \
514 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
515
516 /* This table is used by relax_frag to promote short jumps to long
517 ones where necessary. SMALL (short) jumps may be promoted to BIG
518 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
519 don't allow a short jump in a 32 bit code segment to be promoted to
520 a 16 bit offset jump because it's slower (requires data size
521 prefix), and doesn't work, unless the destination is in the bottom
522 64k of the code segment (The top 16 bits of eip are zeroed). */
523
524 const relax_typeS md_relax_table[] =
525 {
526 /* The fields are:
527 1) most positive reach of this state,
528 2) most negative reach of this state,
529 3) how many bytes this mode will have in the variable part of the frag
530 4) which index into the table to try if we can't fit into this one. */
531
532 /* UNCOND_JUMP states. */
533 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
534 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
535 /* dword jmp adds 4 bytes to frag:
536 0 extra opcode bytes, 4 displacement bytes. */
537 {0, 0, 4, 0},
538 /* word jmp adds 2 byte2 to frag:
539 0 extra opcode bytes, 2 displacement bytes. */
540 {0, 0, 2, 0},
541
542 /* COND_JUMP states. */
543 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
544 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
545 /* dword conditionals adds 5 bytes to frag:
546 1 extra opcode byte, 4 displacement bytes. */
547 {0, 0, 5, 0},
548 /* word conditionals add 3 bytes to frag:
549 1 extra opcode byte, 2 displacement bytes. */
550 {0, 0, 3, 0},
551
552 /* COND_JUMP86 states. */
553 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
554 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
555 /* dword conditionals adds 5 bytes to frag:
556 1 extra opcode byte, 4 displacement bytes. */
557 {0, 0, 5, 0},
558 /* word conditionals add 4 bytes to frag:
559 1 displacement byte and a 3 byte long branch insn. */
560 {0, 0, 4, 0}
561 };
562
563 static const arch_entry cpu_arch[] =
564 {
565 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
566 CPU_GENERIC32_FLAGS, 0 },
567 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
568 CPU_GENERIC64_FLAGS, 0 },
569 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
570 CPU_NONE_FLAGS, 0 },
571 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
572 CPU_I186_FLAGS, 0 },
573 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
574 CPU_I286_FLAGS, 0 },
575 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
576 CPU_I386_FLAGS, 0 },
577 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
578 CPU_I486_FLAGS, 0 },
579 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
580 CPU_I586_FLAGS, 0 },
581 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
582 CPU_I686_FLAGS, 0 },
583 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
584 CPU_I586_FLAGS, 0 },
585 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
586 CPU_I686_FLAGS, 0 },
587 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
588 CPU_P2_FLAGS, 0 },
589 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
590 CPU_P3_FLAGS, 0 },
591 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
592 CPU_P4_FLAGS, 0 },
593 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
594 CPU_CORE_FLAGS, 0 },
595 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
596 CPU_NOCONA_FLAGS, 0 },
597 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
598 CPU_CORE_FLAGS, 1 },
599 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
600 CPU_CORE_FLAGS, 0 },
601 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
602 CPU_CORE2_FLAGS, 1 },
603 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
604 CPU_CORE2_FLAGS, 0 },
605 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
606 CPU_COREI7_FLAGS, 0 },
607 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
608 CPU_L1OM_FLAGS, 0 },
609 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
610 CPU_K6_FLAGS, 0 },
611 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
612 CPU_K6_2_FLAGS, 0 },
613 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
614 CPU_ATHLON_FLAGS, 0 },
615 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
616 CPU_K8_FLAGS, 1 },
617 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
618 CPU_K8_FLAGS, 0 },
619 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
620 CPU_K8_FLAGS, 0 },
621 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
622 CPU_AMDFAM10_FLAGS, 0 },
623 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BDVER1,
624 CPU_BDVER1_FLAGS, 0 },
625 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
626 CPU_8087_FLAGS, 0 },
627 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
628 CPU_287_FLAGS, 0 },
629 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
630 CPU_387_FLAGS, 0 },
631 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN,
632 CPU_ANY87_FLAGS, 0 },
633 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
634 CPU_MMX_FLAGS, 0 },
635 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN,
636 CPU_3DNOWA_FLAGS, 0 },
637 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
638 CPU_SSE_FLAGS, 0 },
639 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
640 CPU_SSE2_FLAGS, 0 },
641 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
642 CPU_SSE3_FLAGS, 0 },
643 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
644 CPU_SSSE3_FLAGS, 0 },
645 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
646 CPU_SSE4_1_FLAGS, 0 },
647 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
648 CPU_SSE4_2_FLAGS, 0 },
649 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
650 CPU_SSE4_2_FLAGS, 0 },
651 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN,
652 CPU_ANY_SSE_FLAGS, 0 },
653 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
654 CPU_AVX_FLAGS, 0 },
655 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
656 CPU_ANY_AVX_FLAGS, 0 },
657 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
658 CPU_VMX_FLAGS, 0 },
659 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
660 CPU_SMX_FLAGS, 0 },
661 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
662 CPU_XSAVE_FLAGS, 0 },
663 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
664 CPU_AES_FLAGS, 0 },
665 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
666 CPU_PCLMUL_FLAGS, 0 },
667 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
668 CPU_PCLMUL_FLAGS, 1 },
669 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
670 CPU_FMA_FLAGS, 0 },
671 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
672 CPU_FMA4_FLAGS, 0 },
673 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
674 CPU_XOP_FLAGS, 0 },
675 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
676 CPU_LWP_FLAGS, 0 },
677 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
678 CPU_MOVBE_FLAGS, 0 },
679 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
680 CPU_EPT_FLAGS, 0 },
681 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
682 CPU_CLFLUSH_FLAGS, 0 },
683 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
684 CPU_SYSCALL_FLAGS, 0 },
685 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
686 CPU_RDTSCP_FLAGS, 0 },
687 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
688 CPU_3DNOW_FLAGS, 0 },
689 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
690 CPU_3DNOWA_FLAGS, 0 },
691 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
692 CPU_PADLOCK_FLAGS, 0 },
693 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
694 CPU_SVME_FLAGS, 1 },
695 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
696 CPU_SVME_FLAGS, 0 },
697 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
698 CPU_SSE4A_FLAGS, 0 },
699 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
700 CPU_ABM_FLAGS, 0 },
701 };
702
703 #ifdef I386COFF
704 /* Like s_lcomm_internal in gas/read.c but the alignment string
705 is allowed to be optional. */
706
707 static symbolS *
708 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
709 {
710 addressT align = 0;
711
712 SKIP_WHITESPACE ();
713
714 if (needs_align
715 && *input_line_pointer == ',')
716 {
717 align = parse_align (needs_align - 1);
718
719 if (align == (addressT) -1)
720 return NULL;
721 }
722 else
723 {
724 if (size >= 8)
725 align = 3;
726 else if (size >= 4)
727 align = 2;
728 else if (size >= 2)
729 align = 1;
730 else
731 align = 0;
732 }
733
734 bss_alloc (symbolP, size, align);
735 return symbolP;
736 }
737
738 static void
739 pe_lcomm (int needs_align)
740 {
741 s_comm_internal (needs_align * 2, pe_lcomm_internal);
742 }
743 #endif
744
745 const pseudo_typeS md_pseudo_table[] =
746 {
747 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
748 {"align", s_align_bytes, 0},
749 #else
750 {"align", s_align_ptwo, 0},
751 #endif
752 {"arch", set_cpu_arch, 0},
753 #ifndef I386COFF
754 {"bss", s_bss, 0},
755 #else
756 {"lcomm", pe_lcomm, 1},
757 #endif
758 {"ffloat", float_cons, 'f'},
759 {"dfloat", float_cons, 'd'},
760 {"tfloat", float_cons, 'x'},
761 {"value", cons, 2},
762 {"slong", signed_cons, 4},
763 {"noopt", s_ignore, 0},
764 {"optim", s_ignore, 0},
765 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
766 {"code16", set_code_flag, CODE_16BIT},
767 {"code32", set_code_flag, CODE_32BIT},
768 {"code64", set_code_flag, CODE_64BIT},
769 {"intel_syntax", set_intel_syntax, 1},
770 {"att_syntax", set_intel_syntax, 0},
771 {"intel_mnemonic", set_intel_mnemonic, 1},
772 {"att_mnemonic", set_intel_mnemonic, 0},
773 {"allow_index_reg", set_allow_index_reg, 1},
774 {"disallow_index_reg", set_allow_index_reg, 0},
775 {"sse_check", set_sse_check, 0},
776 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
777 {"largecomm", handle_large_common, 0},
778 #else
779 {"file", (void (*) (int)) dwarf2_directive_file, 0},
780 {"loc", dwarf2_directive_loc, 0},
781 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
782 #endif
783 #ifdef TE_PE
784 {"secrel32", pe_directive_secrel, 0},
785 #endif
786 {0, 0, 0}
787 };
788
789 /* For interface with expression (). */
790 extern char *input_line_pointer;
791
792 /* Hash table for instruction mnemonic lookup. */
793 static struct hash_control *op_hash;
794
795 /* Hash table for register lookup. */
796 static struct hash_control *reg_hash;
797 \f
798 void
799 i386_align_code (fragS *fragP, int count)
800 {
801 /* Various efficient no-op patterns for aligning code labels.
802 Note: Don't try to assemble the instructions in the comments.
803 0L and 0w are not legal. */
804 static const char f32_1[] =
805 {0x90}; /* nop */
806 static const char f32_2[] =
807 {0x66,0x90}; /* xchg %ax,%ax */
808 static const char f32_3[] =
809 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
810 static const char f32_4[] =
811 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
812 static const char f32_5[] =
813 {0x90, /* nop */
814 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
815 static const char f32_6[] =
816 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
817 static const char f32_7[] =
818 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
819 static const char f32_8[] =
820 {0x90, /* nop */
821 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
822 static const char f32_9[] =
823 {0x89,0xf6, /* movl %esi,%esi */
824 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
825 static const char f32_10[] =
826 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
827 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
828 static const char f32_11[] =
829 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
830 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
831 static const char f32_12[] =
832 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
833 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
834 static const char f32_13[] =
835 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
836 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
837 static const char f32_14[] =
838 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
839 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
840 static const char f16_3[] =
841 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
842 static const char f16_4[] =
843 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
844 static const char f16_5[] =
845 {0x90, /* nop */
846 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
847 static const char f16_6[] =
848 {0x89,0xf6, /* mov %si,%si */
849 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
850 static const char f16_7[] =
851 {0x8d,0x74,0x00, /* lea 0(%si),%si */
852 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
853 static const char f16_8[] =
854 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
855 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
856 static const char jump_31[] =
857 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
858 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
859 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
860 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
861 static const char *const f32_patt[] = {
862 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
863 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
864 };
865 static const char *const f16_patt[] = {
866 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
867 };
868 /* nopl (%[re]ax) */
869 static const char alt_3[] =
870 {0x0f,0x1f,0x00};
871 /* nopl 0(%[re]ax) */
872 static const char alt_4[] =
873 {0x0f,0x1f,0x40,0x00};
874 /* nopl 0(%[re]ax,%[re]ax,1) */
875 static const char alt_5[] =
876 {0x0f,0x1f,0x44,0x00,0x00};
877 /* nopw 0(%[re]ax,%[re]ax,1) */
878 static const char alt_6[] =
879 {0x66,0x0f,0x1f,0x44,0x00,0x00};
880 /* nopl 0L(%[re]ax) */
881 static const char alt_7[] =
882 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
883 /* nopl 0L(%[re]ax,%[re]ax,1) */
884 static const char alt_8[] =
885 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
886 /* nopw 0L(%[re]ax,%[re]ax,1) */
887 static const char alt_9[] =
888 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
889 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
890 static const char alt_10[] =
891 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
892 /* data16
893 nopw %cs:0L(%[re]ax,%[re]ax,1) */
894 static const char alt_long_11[] =
895 {0x66,
896 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
897 /* data16
898 data16
899 nopw %cs:0L(%[re]ax,%[re]ax,1) */
900 static const char alt_long_12[] =
901 {0x66,
902 0x66,
903 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
904 /* data16
905 data16
906 data16
907 nopw %cs:0L(%[re]ax,%[re]ax,1) */
908 static const char alt_long_13[] =
909 {0x66,
910 0x66,
911 0x66,
912 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
913 /* data16
914 data16
915 data16
916 data16
917 nopw %cs:0L(%[re]ax,%[re]ax,1) */
918 static const char alt_long_14[] =
919 {0x66,
920 0x66,
921 0x66,
922 0x66,
923 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
924 /* data16
925 data16
926 data16
927 data16
928 data16
929 nopw %cs:0L(%[re]ax,%[re]ax,1) */
930 static const char alt_long_15[] =
931 {0x66,
932 0x66,
933 0x66,
934 0x66,
935 0x66,
936 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
937 /* nopl 0(%[re]ax,%[re]ax,1)
938 nopw 0(%[re]ax,%[re]ax,1) */
939 static const char alt_short_11[] =
940 {0x0f,0x1f,0x44,0x00,0x00,
941 0x66,0x0f,0x1f,0x44,0x00,0x00};
942 /* nopw 0(%[re]ax,%[re]ax,1)
943 nopw 0(%[re]ax,%[re]ax,1) */
944 static const char alt_short_12[] =
945 {0x66,0x0f,0x1f,0x44,0x00,0x00,
946 0x66,0x0f,0x1f,0x44,0x00,0x00};
947 /* nopw 0(%[re]ax,%[re]ax,1)
948 nopl 0L(%[re]ax) */
949 static const char alt_short_13[] =
950 {0x66,0x0f,0x1f,0x44,0x00,0x00,
951 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
952 /* nopl 0L(%[re]ax)
953 nopl 0L(%[re]ax) */
954 static const char alt_short_14[] =
955 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
956 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
957 /* nopl 0L(%[re]ax)
958 nopl 0L(%[re]ax,%[re]ax,1) */
959 static const char alt_short_15[] =
960 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
961 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
962 static const char *const alt_short_patt[] = {
963 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
964 alt_9, alt_10, alt_short_11, alt_short_12, alt_short_13,
965 alt_short_14, alt_short_15
966 };
967 static const char *const alt_long_patt[] = {
968 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
969 alt_9, alt_10, alt_long_11, alt_long_12, alt_long_13,
970 alt_long_14, alt_long_15
971 };
972
973 /* Only align for at least a positive non-zero boundary. */
974 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
975 return;
976
977 /* We need to decide which NOP sequence to use for 32bit and
978 64bit. When -mtune= is used:
979
980 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
981 PROCESSOR_GENERIC32, f32_patt will be used.
982 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA,
983 PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
984 PROCESSOR_GENERIC64, alt_long_patt will be used.
985 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
986 PROCESSOR_AMDFAM10, and PROCESSOR_BDVER1, alt_short_patt
987 will be used.
988
989 When -mtune= isn't used, alt_long_patt will be used if
990 cpu_arch_isa_flags has Cpu686. Otherwise, f32_patt will
991 be used.
992
993 When -march= or .arch is used, we can't use anything beyond
994 cpu_arch_isa_flags. */
995
996 if (flag_code == CODE_16BIT)
997 {
998 if (count > 8)
999 {
1000 memcpy (fragP->fr_literal + fragP->fr_fix,
1001 jump_31, count);
1002 /* Adjust jump offset. */
1003 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1004 }
1005 else
1006 memcpy (fragP->fr_literal + fragP->fr_fix,
1007 f16_patt[count - 1], count);
1008 }
1009 else
1010 {
1011 const char *const *patt = NULL;
1012
1013 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1014 {
1015 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1016 switch (cpu_arch_tune)
1017 {
1018 case PROCESSOR_UNKNOWN:
1019 /* We use cpu_arch_isa_flags to check if we SHOULD
1020 optimize for Cpu686. */
1021 if (fragP->tc_frag_data.isa_flags.bitfield.cpui686)
1022 patt = alt_long_patt;
1023 else
1024 patt = f32_patt;
1025 break;
1026 case PROCESSOR_PENTIUMPRO:
1027 case PROCESSOR_PENTIUM4:
1028 case PROCESSOR_NOCONA:
1029 case PROCESSOR_CORE:
1030 case PROCESSOR_CORE2:
1031 case PROCESSOR_COREI7:
1032 case PROCESSOR_L1OM:
1033 case PROCESSOR_GENERIC64:
1034 patt = alt_long_patt;
1035 break;
1036 case PROCESSOR_K6:
1037 case PROCESSOR_ATHLON:
1038 case PROCESSOR_K8:
1039 case PROCESSOR_AMDFAM10:
1040 case PROCESSOR_BDVER1:
1041 patt = alt_short_patt;
1042 break;
1043 case PROCESSOR_I386:
1044 case PROCESSOR_I486:
1045 case PROCESSOR_PENTIUM:
1046 case PROCESSOR_GENERIC32:
1047 patt = f32_patt;
1048 break;
1049 }
1050 }
1051 else
1052 {
1053 switch (fragP->tc_frag_data.tune)
1054 {
1055 case PROCESSOR_UNKNOWN:
1056 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1057 PROCESSOR_UNKNOWN. */
1058 abort ();
1059 break;
1060
1061 case PROCESSOR_I386:
1062 case PROCESSOR_I486:
1063 case PROCESSOR_PENTIUM:
1064 case PROCESSOR_K6:
1065 case PROCESSOR_ATHLON:
1066 case PROCESSOR_K8:
1067 case PROCESSOR_AMDFAM10:
1068 case PROCESSOR_BDVER1:
1069 case PROCESSOR_GENERIC32:
1070 /* We use cpu_arch_isa_flags to check if we CAN optimize
1071 for Cpu686. */
1072 if (fragP->tc_frag_data.isa_flags.bitfield.cpui686)
1073 patt = alt_short_patt;
1074 else
1075 patt = f32_patt;
1076 break;
1077 case PROCESSOR_PENTIUMPRO:
1078 case PROCESSOR_PENTIUM4:
1079 case PROCESSOR_NOCONA:
1080 case PROCESSOR_CORE:
1081 case PROCESSOR_CORE2:
1082 case PROCESSOR_COREI7:
1083 case PROCESSOR_L1OM:
1084 if (fragP->tc_frag_data.isa_flags.bitfield.cpui686)
1085 patt = alt_long_patt;
1086 else
1087 patt = f32_patt;
1088 break;
1089 case PROCESSOR_GENERIC64:
1090 patt = alt_long_patt;
1091 break;
1092 }
1093 }
1094
1095 if (patt == f32_patt)
1096 {
1097 /* If the padding is less than 15 bytes, we use the normal
1098 ones. Otherwise, we use a jump instruction and adjust
1099 its offset. */
1100 int limit;
1101
1102 /* For 64bit, the limit is 3 bytes. */
1103 if (flag_code == CODE_64BIT
1104 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1105 limit = 3;
1106 else
1107 limit = 15;
1108 if (count < limit)
1109 memcpy (fragP->fr_literal + fragP->fr_fix,
1110 patt[count - 1], count);
1111 else
1112 {
1113 memcpy (fragP->fr_literal + fragP->fr_fix,
1114 jump_31, count);
1115 /* Adjust jump offset. */
1116 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1117 }
1118 }
1119 else
1120 {
1121 /* Maximum length of an instruction is 15 byte. If the
1122 padding is greater than 15 bytes and we don't use jump,
1123 we have to break it into smaller pieces. */
1124 int padding = count;
1125 while (padding > 15)
1126 {
1127 padding -= 15;
1128 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1129 patt [14], 15);
1130 }
1131
1132 if (padding)
1133 memcpy (fragP->fr_literal + fragP->fr_fix,
1134 patt [padding - 1], padding);
1135 }
1136 }
1137 fragP->fr_var = count;
1138 }
1139
1140 static INLINE int
1141 operand_type_all_zero (const union i386_operand_type *x)
1142 {
1143 switch (ARRAY_SIZE(x->array))
1144 {
1145 case 3:
1146 if (x->array[2])
1147 return 0;
1148 case 2:
1149 if (x->array[1])
1150 return 0;
1151 case 1:
1152 return !x->array[0];
1153 default:
1154 abort ();
1155 }
1156 }
1157
1158 static INLINE void
1159 operand_type_set (union i386_operand_type *x, unsigned int v)
1160 {
1161 switch (ARRAY_SIZE(x->array))
1162 {
1163 case 3:
1164 x->array[2] = v;
1165 case 2:
1166 x->array[1] = v;
1167 case 1:
1168 x->array[0] = v;
1169 break;
1170 default:
1171 abort ();
1172 }
1173 }
1174
1175 static INLINE int
1176 operand_type_equal (const union i386_operand_type *x,
1177 const union i386_operand_type *y)
1178 {
1179 switch (ARRAY_SIZE(x->array))
1180 {
1181 case 3:
1182 if (x->array[2] != y->array[2])
1183 return 0;
1184 case 2:
1185 if (x->array[1] != y->array[1])
1186 return 0;
1187 case 1:
1188 return x->array[0] == y->array[0];
1189 break;
1190 default:
1191 abort ();
1192 }
1193 }
1194
1195 static INLINE int
1196 cpu_flags_all_zero (const union i386_cpu_flags *x)
1197 {
1198 switch (ARRAY_SIZE(x->array))
1199 {
1200 case 3:
1201 if (x->array[2])
1202 return 0;
1203 case 2:
1204 if (x->array[1])
1205 return 0;
1206 case 1:
1207 return !x->array[0];
1208 default:
1209 abort ();
1210 }
1211 }
1212
1213 static INLINE void
1214 cpu_flags_set (union i386_cpu_flags *x, unsigned int v)
1215 {
1216 switch (ARRAY_SIZE(x->array))
1217 {
1218 case 3:
1219 x->array[2] = v;
1220 case 2:
1221 x->array[1] = v;
1222 case 1:
1223 x->array[0] = v;
1224 break;
1225 default:
1226 abort ();
1227 }
1228 }
1229
1230 static INLINE int
1231 cpu_flags_equal (const union i386_cpu_flags *x,
1232 const union i386_cpu_flags *y)
1233 {
1234 switch (ARRAY_SIZE(x->array))
1235 {
1236 case 3:
1237 if (x->array[2] != y->array[2])
1238 return 0;
1239 case 2:
1240 if (x->array[1] != y->array[1])
1241 return 0;
1242 case 1:
1243 return x->array[0] == y->array[0];
1244 break;
1245 default:
1246 abort ();
1247 }
1248 }
1249
1250 static INLINE int
1251 cpu_flags_check_cpu64 (i386_cpu_flags f)
1252 {
1253 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1254 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1255 }
1256
1257 static INLINE i386_cpu_flags
1258 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1259 {
1260 switch (ARRAY_SIZE (x.array))
1261 {
1262 case 3:
1263 x.array [2] &= y.array [2];
1264 case 2:
1265 x.array [1] &= y.array [1];
1266 case 1:
1267 x.array [0] &= y.array [0];
1268 break;
1269 default:
1270 abort ();
1271 }
1272 return x;
1273 }
1274
1275 static INLINE i386_cpu_flags
1276 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1277 {
1278 switch (ARRAY_SIZE (x.array))
1279 {
1280 case 3:
1281 x.array [2] |= y.array [2];
1282 case 2:
1283 x.array [1] |= y.array [1];
1284 case 1:
1285 x.array [0] |= y.array [0];
1286 break;
1287 default:
1288 abort ();
1289 }
1290 return x;
1291 }
1292
1293 static INLINE i386_cpu_flags
1294 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1295 {
1296 switch (ARRAY_SIZE (x.array))
1297 {
1298 case 3:
1299 x.array [2] &= ~y.array [2];
1300 case 2:
1301 x.array [1] &= ~y.array [1];
1302 case 1:
1303 x.array [0] &= ~y.array [0];
1304 break;
1305 default:
1306 abort ();
1307 }
1308 return x;
1309 }
1310
1311 #define CPU_FLAGS_ARCH_MATCH 0x1
1312 #define CPU_FLAGS_64BIT_MATCH 0x2
1313 #define CPU_FLAGS_AES_MATCH 0x4
1314 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1315 #define CPU_FLAGS_AVX_MATCH 0x10
1316
1317 #define CPU_FLAGS_32BIT_MATCH \
1318 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1319 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1320 #define CPU_FLAGS_PERFECT_MATCH \
1321 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1322
1323 /* Return CPU flags match bits. */
1324
1325 static int
1326 cpu_flags_match (const insn_template *t)
1327 {
1328 i386_cpu_flags x = t->cpu_flags;
1329 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1330
1331 x.bitfield.cpu64 = 0;
1332 x.bitfield.cpuno64 = 0;
1333
1334 if (cpu_flags_all_zero (&x))
1335 {
1336 /* This instruction is available on all archs. */
1337 match |= CPU_FLAGS_32BIT_MATCH;
1338 }
1339 else
1340 {
1341 /* This instruction is available only on some archs. */
1342 i386_cpu_flags cpu = cpu_arch_flags;
1343
1344 cpu.bitfield.cpu64 = 0;
1345 cpu.bitfield.cpuno64 = 0;
1346 cpu = cpu_flags_and (x, cpu);
1347 if (!cpu_flags_all_zero (&cpu))
1348 {
1349 if (x.bitfield.cpuavx)
1350 {
1351 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1352 if (cpu.bitfield.cpuavx)
1353 {
1354 /* Check SSE2AVX. */
1355 if (!t->opcode_modifier.sse2avx|| sse2avx)
1356 {
1357 match |= (CPU_FLAGS_ARCH_MATCH
1358 | CPU_FLAGS_AVX_MATCH);
1359 /* Check AES. */
1360 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1361 match |= CPU_FLAGS_AES_MATCH;
1362 /* Check PCLMUL. */
1363 if (!x.bitfield.cpupclmul
1364 || cpu.bitfield.cpupclmul)
1365 match |= CPU_FLAGS_PCLMUL_MATCH;
1366 }
1367 }
1368 else
1369 match |= CPU_FLAGS_ARCH_MATCH;
1370 }
1371 else
1372 match |= CPU_FLAGS_32BIT_MATCH;
1373 }
1374 }
1375 return match;
1376 }
1377
1378 static INLINE i386_operand_type
1379 operand_type_and (i386_operand_type x, i386_operand_type y)
1380 {
1381 switch (ARRAY_SIZE (x.array))
1382 {
1383 case 3:
1384 x.array [2] &= y.array [2];
1385 case 2:
1386 x.array [1] &= y.array [1];
1387 case 1:
1388 x.array [0] &= y.array [0];
1389 break;
1390 default:
1391 abort ();
1392 }
1393 return x;
1394 }
1395
1396 static INLINE i386_operand_type
1397 operand_type_or (i386_operand_type x, i386_operand_type y)
1398 {
1399 switch (ARRAY_SIZE (x.array))
1400 {
1401 case 3:
1402 x.array [2] |= y.array [2];
1403 case 2:
1404 x.array [1] |= y.array [1];
1405 case 1:
1406 x.array [0] |= y.array [0];
1407 break;
1408 default:
1409 abort ();
1410 }
1411 return x;
1412 }
1413
1414 static INLINE i386_operand_type
1415 operand_type_xor (i386_operand_type x, i386_operand_type y)
1416 {
1417 switch (ARRAY_SIZE (x.array))
1418 {
1419 case 3:
1420 x.array [2] ^= y.array [2];
1421 case 2:
1422 x.array [1] ^= y.array [1];
1423 case 1:
1424 x.array [0] ^= y.array [0];
1425 break;
1426 default:
1427 abort ();
1428 }
1429 return x;
1430 }
1431
1432 static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1433 static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1434 static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1435 static const i386_operand_type inoutportreg
1436 = OPERAND_TYPE_INOUTPORTREG;
1437 static const i386_operand_type reg16_inoutportreg
1438 = OPERAND_TYPE_REG16_INOUTPORTREG;
1439 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1440 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1441 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1442 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1443 static const i386_operand_type anydisp
1444 = OPERAND_TYPE_ANYDISP;
1445 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1446 static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1447 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1448 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1449 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1450 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1451 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1452 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1453 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1454 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1455 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1456 static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
1457
1458 enum operand_type
1459 {
1460 reg,
1461 imm,
1462 disp,
1463 anymem
1464 };
1465
1466 static INLINE int
1467 operand_type_check (i386_operand_type t, enum operand_type c)
1468 {
1469 switch (c)
1470 {
1471 case reg:
1472 return (t.bitfield.reg8
1473 || t.bitfield.reg16
1474 || t.bitfield.reg32
1475 || t.bitfield.reg64);
1476
1477 case imm:
1478 return (t.bitfield.imm8
1479 || t.bitfield.imm8s
1480 || t.bitfield.imm16
1481 || t.bitfield.imm32
1482 || t.bitfield.imm32s
1483 || t.bitfield.imm64);
1484
1485 case disp:
1486 return (t.bitfield.disp8
1487 || t.bitfield.disp16
1488 || t.bitfield.disp32
1489 || t.bitfield.disp32s
1490 || t.bitfield.disp64);
1491
1492 case anymem:
1493 return (t.bitfield.disp8
1494 || t.bitfield.disp16
1495 || t.bitfield.disp32
1496 || t.bitfield.disp32s
1497 || t.bitfield.disp64
1498 || t.bitfield.baseindex);
1499
1500 default:
1501 abort ();
1502 }
1503
1504 return 0;
1505 }
1506
1507 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1508 operand J for instruction template T. */
1509
1510 static INLINE int
1511 match_reg_size (const insn_template *t, unsigned int j)
1512 {
1513 return !((i.types[j].bitfield.byte
1514 && !t->operand_types[j].bitfield.byte)
1515 || (i.types[j].bitfield.word
1516 && !t->operand_types[j].bitfield.word)
1517 || (i.types[j].bitfield.dword
1518 && !t->operand_types[j].bitfield.dword)
1519 || (i.types[j].bitfield.qword
1520 && !t->operand_types[j].bitfield.qword));
1521 }
1522
1523 /* Return 1 if there is no conflict in any size on operand J for
1524 instruction template T. */
1525
1526 static INLINE int
1527 match_mem_size (const insn_template *t, unsigned int j)
1528 {
1529 return (match_reg_size (t, j)
1530 && !((i.types[j].bitfield.unspecified
1531 && !t->operand_types[j].bitfield.unspecified)
1532 || (i.types[j].bitfield.fword
1533 && !t->operand_types[j].bitfield.fword)
1534 || (i.types[j].bitfield.tbyte
1535 && !t->operand_types[j].bitfield.tbyte)
1536 || (i.types[j].bitfield.xmmword
1537 && !t->operand_types[j].bitfield.xmmword)
1538 || (i.types[j].bitfield.ymmword
1539 && !t->operand_types[j].bitfield.ymmword)));
1540 }
1541
1542 /* Return 1 if there is no size conflict on any operands for
1543 instruction template T. */
1544
1545 static INLINE int
1546 operand_size_match (const insn_template *t)
1547 {
1548 unsigned int j;
1549 int match = 1;
1550
1551 /* Don't check jump instructions. */
1552 if (t->opcode_modifier.jump
1553 || t->opcode_modifier.jumpbyte
1554 || t->opcode_modifier.jumpdword
1555 || t->opcode_modifier.jumpintersegment)
1556 return match;
1557
1558 /* Check memory and accumulator operand size. */
1559 for (j = 0; j < i.operands; j++)
1560 {
1561 if (t->operand_types[j].bitfield.anysize)
1562 continue;
1563
1564 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1565 {
1566 match = 0;
1567 break;
1568 }
1569
1570 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1571 {
1572 match = 0;
1573 break;
1574 }
1575 }
1576
1577 if (match)
1578 return match;
1579 else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
1580 {
1581 mismatch:
1582 i.error = operand_size_mismatch;
1583 return 0;
1584 }
1585
1586 /* Check reverse. */
1587 gas_assert (i.operands == 2);
1588
1589 match = 1;
1590 for (j = 0; j < 2; j++)
1591 {
1592 if (t->operand_types[j].bitfield.acc
1593 && !match_reg_size (t, j ? 0 : 1))
1594 goto mismatch;
1595
1596 if (i.types[j].bitfield.mem
1597 && !match_mem_size (t, j ? 0 : 1))
1598 goto mismatch;
1599 }
1600
1601 return match;
1602 }
1603
1604 static INLINE int
1605 operand_type_match (i386_operand_type overlap,
1606 i386_operand_type given)
1607 {
1608 i386_operand_type temp = overlap;
1609
1610 temp.bitfield.jumpabsolute = 0;
1611 temp.bitfield.unspecified = 0;
1612 temp.bitfield.byte = 0;
1613 temp.bitfield.word = 0;
1614 temp.bitfield.dword = 0;
1615 temp.bitfield.fword = 0;
1616 temp.bitfield.qword = 0;
1617 temp.bitfield.tbyte = 0;
1618 temp.bitfield.xmmword = 0;
1619 temp.bitfield.ymmword = 0;
1620 if (operand_type_all_zero (&temp))
1621 goto mismatch;
1622
1623 if (given.bitfield.baseindex == overlap.bitfield.baseindex
1624 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
1625 return 1;
1626
1627 mismatch:
1628 i.error = operand_type_mismatch;
1629 return 0;
1630 }
1631
1632 /* If given types g0 and g1 are registers they must be of the same type
1633 unless the expected operand type register overlap is null.
1634 Note that Acc in a template matches every size of reg. */
1635
1636 static INLINE int
1637 operand_type_register_match (i386_operand_type m0,
1638 i386_operand_type g0,
1639 i386_operand_type t0,
1640 i386_operand_type m1,
1641 i386_operand_type g1,
1642 i386_operand_type t1)
1643 {
1644 if (!operand_type_check (g0, reg))
1645 return 1;
1646
1647 if (!operand_type_check (g1, reg))
1648 return 1;
1649
1650 if (g0.bitfield.reg8 == g1.bitfield.reg8
1651 && g0.bitfield.reg16 == g1.bitfield.reg16
1652 && g0.bitfield.reg32 == g1.bitfield.reg32
1653 && g0.bitfield.reg64 == g1.bitfield.reg64)
1654 return 1;
1655
1656 if (m0.bitfield.acc)
1657 {
1658 t0.bitfield.reg8 = 1;
1659 t0.bitfield.reg16 = 1;
1660 t0.bitfield.reg32 = 1;
1661 t0.bitfield.reg64 = 1;
1662 }
1663
1664 if (m1.bitfield.acc)
1665 {
1666 t1.bitfield.reg8 = 1;
1667 t1.bitfield.reg16 = 1;
1668 t1.bitfield.reg32 = 1;
1669 t1.bitfield.reg64 = 1;
1670 }
1671
1672 if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1673 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1674 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1675 && !(t0.bitfield.reg64 & t1.bitfield.reg64))
1676 return 1;
1677
1678 i.error = register_type_mismatch;
1679
1680 return 0;
1681 }
1682
1683 static INLINE unsigned int
1684 mode_from_disp_size (i386_operand_type t)
1685 {
1686 if (t.bitfield.disp8)
1687 return 1;
1688 else if (t.bitfield.disp16
1689 || t.bitfield.disp32
1690 || t.bitfield.disp32s)
1691 return 2;
1692 else
1693 return 0;
1694 }
1695
1696 static INLINE int
1697 fits_in_signed_byte (offsetT num)
1698 {
1699 return (num >= -128) && (num <= 127);
1700 }
1701
1702 static INLINE int
1703 fits_in_unsigned_byte (offsetT num)
1704 {
1705 return (num & 0xff) == num;
1706 }
1707
1708 static INLINE int
1709 fits_in_unsigned_word (offsetT num)
1710 {
1711 return (num & 0xffff) == num;
1712 }
1713
1714 static INLINE int
1715 fits_in_signed_word (offsetT num)
1716 {
1717 return (-32768 <= num) && (num <= 32767);
1718 }
1719
1720 static INLINE int
1721 fits_in_signed_long (offsetT num ATTRIBUTE_UNUSED)
1722 {
1723 #ifndef BFD64
1724 return 1;
1725 #else
1726 return (!(((offsetT) -1 << 31) & num)
1727 || (((offsetT) -1 << 31) & num) == ((offsetT) -1 << 31));
1728 #endif
1729 } /* fits_in_signed_long() */
1730
1731 static INLINE int
1732 fits_in_unsigned_long (offsetT num ATTRIBUTE_UNUSED)
1733 {
1734 #ifndef BFD64
1735 return 1;
1736 #else
1737 return (num & (((offsetT) 2 << 31) - 1)) == num;
1738 #endif
1739 } /* fits_in_unsigned_long() */
1740
1741 static INLINE int
1742 fits_in_imm4 (offsetT num)
1743 {
1744 return (num & 0xf) == num;
1745 }
1746
1747 static i386_operand_type
1748 smallest_imm_type (offsetT num)
1749 {
1750 i386_operand_type t;
1751
1752 operand_type_set (&t, 0);
1753 t.bitfield.imm64 = 1;
1754
1755 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
1756 {
1757 /* This code is disabled on the 486 because all the Imm1 forms
1758 in the opcode table are slower on the i486. They're the
1759 versions with the implicitly specified single-position
1760 displacement, which has another syntax if you really want to
1761 use that form. */
1762 t.bitfield.imm1 = 1;
1763 t.bitfield.imm8 = 1;
1764 t.bitfield.imm8s = 1;
1765 t.bitfield.imm16 = 1;
1766 t.bitfield.imm32 = 1;
1767 t.bitfield.imm32s = 1;
1768 }
1769 else if (fits_in_signed_byte (num))
1770 {
1771 t.bitfield.imm8 = 1;
1772 t.bitfield.imm8s = 1;
1773 t.bitfield.imm16 = 1;
1774 t.bitfield.imm32 = 1;
1775 t.bitfield.imm32s = 1;
1776 }
1777 else if (fits_in_unsigned_byte (num))
1778 {
1779 t.bitfield.imm8 = 1;
1780 t.bitfield.imm16 = 1;
1781 t.bitfield.imm32 = 1;
1782 t.bitfield.imm32s = 1;
1783 }
1784 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
1785 {
1786 t.bitfield.imm16 = 1;
1787 t.bitfield.imm32 = 1;
1788 t.bitfield.imm32s = 1;
1789 }
1790 else if (fits_in_signed_long (num))
1791 {
1792 t.bitfield.imm32 = 1;
1793 t.bitfield.imm32s = 1;
1794 }
1795 else if (fits_in_unsigned_long (num))
1796 t.bitfield.imm32 = 1;
1797
1798 return t;
1799 }
1800
1801 static offsetT
1802 offset_in_range (offsetT val, int size)
1803 {
1804 addressT mask;
1805
1806 switch (size)
1807 {
1808 case 1: mask = ((addressT) 1 << 8) - 1; break;
1809 case 2: mask = ((addressT) 1 << 16) - 1; break;
1810 case 4: mask = ((addressT) 2 << 31) - 1; break;
1811 #ifdef BFD64
1812 case 8: mask = ((addressT) 2 << 63) - 1; break;
1813 #endif
1814 default: abort ();
1815 }
1816
1817 #ifdef BFD64
1818 /* If BFD64, sign extend val for 32bit address mode. */
1819 if (flag_code != CODE_64BIT
1820 || i.prefix[ADDR_PREFIX])
1821 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
1822 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
1823 #endif
1824
1825 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
1826 {
1827 char buf1[40], buf2[40];
1828
1829 sprint_value (buf1, val);
1830 sprint_value (buf2, val & mask);
1831 as_warn (_("%s shortened to %s"), buf1, buf2);
1832 }
1833 return val & mask;
1834 }
1835
1836 enum PREFIX_GROUP
1837 {
1838 PREFIX_EXIST = 0,
1839 PREFIX_LOCK,
1840 PREFIX_REP,
1841 PREFIX_OTHER
1842 };
1843
1844 /* Returns
1845 a. PREFIX_EXIST if attempting to add a prefix where one from the
1846 same class already exists.
1847 b. PREFIX_LOCK if lock prefix is added.
1848 c. PREFIX_REP if rep/repne prefix is added.
1849 d. PREFIX_OTHER if other prefix is added.
1850 */
1851
1852 static enum PREFIX_GROUP
1853 add_prefix (unsigned int prefix)
1854 {
1855 enum PREFIX_GROUP ret = PREFIX_OTHER;
1856 unsigned int q;
1857
1858 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
1859 && flag_code == CODE_64BIT)
1860 {
1861 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
1862 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
1863 && (prefix & (REX_R | REX_X | REX_B))))
1864 ret = PREFIX_EXIST;
1865 q = REX_PREFIX;
1866 }
1867 else
1868 {
1869 switch (prefix)
1870 {
1871 default:
1872 abort ();
1873
1874 case CS_PREFIX_OPCODE:
1875 case DS_PREFIX_OPCODE:
1876 case ES_PREFIX_OPCODE:
1877 case FS_PREFIX_OPCODE:
1878 case GS_PREFIX_OPCODE:
1879 case SS_PREFIX_OPCODE:
1880 q = SEG_PREFIX;
1881 break;
1882
1883 case REPNE_PREFIX_OPCODE:
1884 case REPE_PREFIX_OPCODE:
1885 q = REP_PREFIX;
1886 ret = PREFIX_REP;
1887 break;
1888
1889 case LOCK_PREFIX_OPCODE:
1890 q = LOCK_PREFIX;
1891 ret = PREFIX_LOCK;
1892 break;
1893
1894 case FWAIT_OPCODE:
1895 q = WAIT_PREFIX;
1896 break;
1897
1898 case ADDR_PREFIX_OPCODE:
1899 q = ADDR_PREFIX;
1900 break;
1901
1902 case DATA_PREFIX_OPCODE:
1903 q = DATA_PREFIX;
1904 break;
1905 }
1906 if (i.prefix[q] != 0)
1907 ret = PREFIX_EXIST;
1908 }
1909
1910 if (ret)
1911 {
1912 if (!i.prefix[q])
1913 ++i.prefixes;
1914 i.prefix[q] |= prefix;
1915 }
1916 else
1917 as_bad (_("same type of prefix used twice"));
1918
1919 return ret;
1920 }
1921
1922 static void
1923 set_code_flag (int value)
1924 {
1925 flag_code = (enum flag_code) value;
1926 if (flag_code == CODE_64BIT)
1927 {
1928 cpu_arch_flags.bitfield.cpu64 = 1;
1929 cpu_arch_flags.bitfield.cpuno64 = 0;
1930 }
1931 else
1932 {
1933 cpu_arch_flags.bitfield.cpu64 = 0;
1934 cpu_arch_flags.bitfield.cpuno64 = 1;
1935 }
1936 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
1937 {
1938 as_bad (_("64bit mode not supported on this CPU."));
1939 }
1940 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
1941 {
1942 as_bad (_("32bit mode not supported on this CPU."));
1943 }
1944 stackop_size = '\0';
1945 }
1946
1947 static void
1948 set_16bit_gcc_code_flag (int new_code_flag)
1949 {
1950 flag_code = (enum flag_code) new_code_flag;
1951 if (flag_code != CODE_16BIT)
1952 abort ();
1953 cpu_arch_flags.bitfield.cpu64 = 0;
1954 cpu_arch_flags.bitfield.cpuno64 = 1;
1955 stackop_size = LONG_MNEM_SUFFIX;
1956 }
1957
1958 static void
1959 set_intel_syntax (int syntax_flag)
1960 {
1961 /* Find out if register prefixing is specified. */
1962 int ask_naked_reg = 0;
1963
1964 SKIP_WHITESPACE ();
1965 if (!is_end_of_line[(unsigned char) *input_line_pointer])
1966 {
1967 char *string = input_line_pointer;
1968 int e = get_symbol_end ();
1969
1970 if (strcmp (string, "prefix") == 0)
1971 ask_naked_reg = 1;
1972 else if (strcmp (string, "noprefix") == 0)
1973 ask_naked_reg = -1;
1974 else
1975 as_bad (_("bad argument to syntax directive."));
1976 *input_line_pointer = e;
1977 }
1978 demand_empty_rest_of_line ();
1979
1980 intel_syntax = syntax_flag;
1981
1982 if (ask_naked_reg == 0)
1983 allow_naked_reg = (intel_syntax
1984 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
1985 else
1986 allow_naked_reg = (ask_naked_reg < 0);
1987
1988 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
1989
1990 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
1991 identifier_chars['$'] = intel_syntax ? '$' : 0;
1992 register_prefix = allow_naked_reg ? "" : "%";
1993 }
1994
1995 static void
1996 set_intel_mnemonic (int mnemonic_flag)
1997 {
1998 intel_mnemonic = mnemonic_flag;
1999 }
2000
2001 static void
2002 set_allow_index_reg (int flag)
2003 {
2004 allow_index_reg = flag;
2005 }
2006
2007 static void
2008 set_sse_check (int dummy ATTRIBUTE_UNUSED)
2009 {
2010 SKIP_WHITESPACE ();
2011
2012 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2013 {
2014 char *string = input_line_pointer;
2015 int e = get_symbol_end ();
2016
2017 if (strcmp (string, "none") == 0)
2018 sse_check = sse_check_none;
2019 else if (strcmp (string, "warning") == 0)
2020 sse_check = sse_check_warning;
2021 else if (strcmp (string, "error") == 0)
2022 sse_check = sse_check_error;
2023 else
2024 as_bad (_("bad argument to sse_check directive."));
2025 *input_line_pointer = e;
2026 }
2027 else
2028 as_bad (_("missing argument for sse_check directive"));
2029
2030 demand_empty_rest_of_line ();
2031 }
2032
2033 static void
2034 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2035 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2036 {
2037 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2038 static const char *arch;
2039
2040 /* Intel LIOM is only supported on ELF. */
2041 if (!IS_ELF)
2042 return;
2043
2044 if (!arch)
2045 {
2046 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2047 use default_arch. */
2048 arch = cpu_arch_name;
2049 if (!arch)
2050 arch = default_arch;
2051 }
2052
2053 /* If we are targeting Intel L1OM, we must enable it. */
2054 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2055 || new_flag.bitfield.cpul1om)
2056 return;
2057
2058 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2059 #endif
2060 }
2061
2062 static void
2063 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2064 {
2065 SKIP_WHITESPACE ();
2066
2067 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2068 {
2069 char *string = input_line_pointer;
2070 int e = get_symbol_end ();
2071 unsigned int j;
2072 i386_cpu_flags flags;
2073
2074 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2075 {
2076 if (strcmp (string, cpu_arch[j].name) == 0)
2077 {
2078 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2079
2080 if (*string != '.')
2081 {
2082 cpu_arch_name = cpu_arch[j].name;
2083 cpu_sub_arch_name = NULL;
2084 cpu_arch_flags = cpu_arch[j].flags;
2085 if (flag_code == CODE_64BIT)
2086 {
2087 cpu_arch_flags.bitfield.cpu64 = 1;
2088 cpu_arch_flags.bitfield.cpuno64 = 0;
2089 }
2090 else
2091 {
2092 cpu_arch_flags.bitfield.cpu64 = 0;
2093 cpu_arch_flags.bitfield.cpuno64 = 1;
2094 }
2095 cpu_arch_isa = cpu_arch[j].type;
2096 cpu_arch_isa_flags = cpu_arch[j].flags;
2097 if (!cpu_arch_tune_set)
2098 {
2099 cpu_arch_tune = cpu_arch_isa;
2100 cpu_arch_tune_flags = cpu_arch_isa_flags;
2101 }
2102 break;
2103 }
2104
2105 if (strncmp (string + 1, "no", 2))
2106 flags = cpu_flags_or (cpu_arch_flags,
2107 cpu_arch[j].flags);
2108 else
2109 flags = cpu_flags_and_not (cpu_arch_flags,
2110 cpu_arch[j].flags);
2111 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2112 {
2113 if (cpu_sub_arch_name)
2114 {
2115 char *name = cpu_sub_arch_name;
2116 cpu_sub_arch_name = concat (name,
2117 cpu_arch[j].name,
2118 (const char *) NULL);
2119 free (name);
2120 }
2121 else
2122 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2123 cpu_arch_flags = flags;
2124 }
2125 *input_line_pointer = e;
2126 demand_empty_rest_of_line ();
2127 return;
2128 }
2129 }
2130 if (j >= ARRAY_SIZE (cpu_arch))
2131 as_bad (_("no such architecture: `%s'"), string);
2132
2133 *input_line_pointer = e;
2134 }
2135 else
2136 as_bad (_("missing cpu architecture"));
2137
2138 no_cond_jump_promotion = 0;
2139 if (*input_line_pointer == ','
2140 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2141 {
2142 char *string = ++input_line_pointer;
2143 int e = get_symbol_end ();
2144
2145 if (strcmp (string, "nojumps") == 0)
2146 no_cond_jump_promotion = 1;
2147 else if (strcmp (string, "jumps") == 0)
2148 ;
2149 else
2150 as_bad (_("no such architecture modifier: `%s'"), string);
2151
2152 *input_line_pointer = e;
2153 }
2154
2155 demand_empty_rest_of_line ();
2156 }
2157
2158 enum bfd_architecture
2159 i386_arch (void)
2160 {
2161 if (cpu_arch_isa == PROCESSOR_L1OM)
2162 {
2163 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2164 || flag_code != CODE_64BIT)
2165 as_fatal (_("Intel L1OM is 64bit ELF only"));
2166 return bfd_arch_l1om;
2167 }
2168 else
2169 return bfd_arch_i386;
2170 }
2171
2172 unsigned long
2173 i386_mach ()
2174 {
2175 if (!strcmp (default_arch, "x86_64"))
2176 {
2177 if (cpu_arch_isa == PROCESSOR_L1OM)
2178 {
2179 if (OUTPUT_FLAVOR != bfd_target_elf_flavour)
2180 as_fatal (_("Intel L1OM is 64bit ELF only"));
2181 return bfd_mach_l1om;
2182 }
2183 else
2184 return bfd_mach_x86_64;
2185 }
2186 else if (!strcmp (default_arch, "i386"))
2187 return bfd_mach_i386_i386;
2188 else
2189 as_fatal (_("Unknown architecture"));
2190 }
2191 \f
2192 void
2193 md_begin ()
2194 {
2195 const char *hash_err;
2196
2197 /* Initialize op_hash hash table. */
2198 op_hash = hash_new ();
2199
2200 {
2201 const insn_template *optab;
2202 templates *core_optab;
2203
2204 /* Setup for loop. */
2205 optab = i386_optab;
2206 core_optab = (templates *) xmalloc (sizeof (templates));
2207 core_optab->start = optab;
2208
2209 while (1)
2210 {
2211 ++optab;
2212 if (optab->name == NULL
2213 || strcmp (optab->name, (optab - 1)->name) != 0)
2214 {
2215 /* different name --> ship out current template list;
2216 add to hash table; & begin anew. */
2217 core_optab->end = optab;
2218 hash_err = hash_insert (op_hash,
2219 (optab - 1)->name,
2220 (void *) core_optab);
2221 if (hash_err)
2222 {
2223 as_fatal (_("Internal Error: Can't hash %s: %s"),
2224 (optab - 1)->name,
2225 hash_err);
2226 }
2227 if (optab->name == NULL)
2228 break;
2229 core_optab = (templates *) xmalloc (sizeof (templates));
2230 core_optab->start = optab;
2231 }
2232 }
2233 }
2234
2235 /* Initialize reg_hash hash table. */
2236 reg_hash = hash_new ();
2237 {
2238 const reg_entry *regtab;
2239 unsigned int regtab_size = i386_regtab_size;
2240
2241 for (regtab = i386_regtab; regtab_size--; regtab++)
2242 {
2243 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2244 if (hash_err)
2245 as_fatal (_("Internal Error: Can't hash %s: %s"),
2246 regtab->reg_name,
2247 hash_err);
2248 }
2249 }
2250
2251 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2252 {
2253 int c;
2254 char *p;
2255
2256 for (c = 0; c < 256; c++)
2257 {
2258 if (ISDIGIT (c))
2259 {
2260 digit_chars[c] = c;
2261 mnemonic_chars[c] = c;
2262 register_chars[c] = c;
2263 operand_chars[c] = c;
2264 }
2265 else if (ISLOWER (c))
2266 {
2267 mnemonic_chars[c] = c;
2268 register_chars[c] = c;
2269 operand_chars[c] = c;
2270 }
2271 else if (ISUPPER (c))
2272 {
2273 mnemonic_chars[c] = TOLOWER (c);
2274 register_chars[c] = mnemonic_chars[c];
2275 operand_chars[c] = c;
2276 }
2277
2278 if (ISALPHA (c) || ISDIGIT (c))
2279 identifier_chars[c] = c;
2280 else if (c >= 128)
2281 {
2282 identifier_chars[c] = c;
2283 operand_chars[c] = c;
2284 }
2285 }
2286
2287 #ifdef LEX_AT
2288 identifier_chars['@'] = '@';
2289 #endif
2290 #ifdef LEX_QM
2291 identifier_chars['?'] = '?';
2292 operand_chars['?'] = '?';
2293 #endif
2294 digit_chars['-'] = '-';
2295 mnemonic_chars['_'] = '_';
2296 mnemonic_chars['-'] = '-';
2297 mnemonic_chars['.'] = '.';
2298 identifier_chars['_'] = '_';
2299 identifier_chars['.'] = '.';
2300
2301 for (p = operand_special_chars; *p != '\0'; p++)
2302 operand_chars[(unsigned char) *p] = *p;
2303 }
2304
2305 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2306 if (IS_ELF)
2307 {
2308 record_alignment (text_section, 2);
2309 record_alignment (data_section, 2);
2310 record_alignment (bss_section, 2);
2311 }
2312 #endif
2313
2314 if (flag_code == CODE_64BIT)
2315 {
2316 x86_dwarf2_return_column = 16;
2317 x86_cie_data_alignment = -8;
2318 }
2319 else
2320 {
2321 x86_dwarf2_return_column = 8;
2322 x86_cie_data_alignment = -4;
2323 }
2324 }
2325
2326 void
2327 i386_print_statistics (FILE *file)
2328 {
2329 hash_print_statistics (file, "i386 opcode", op_hash);
2330 hash_print_statistics (file, "i386 register", reg_hash);
2331 }
2332 \f
2333 #ifdef DEBUG386
2334
2335 /* Debugging routines for md_assemble. */
2336 static void pte (insn_template *);
2337 static void pt (i386_operand_type);
2338 static void pe (expressionS *);
2339 static void ps (symbolS *);
2340
2341 static void
2342 pi (char *line, i386_insn *x)
2343 {
2344 unsigned int i;
2345
2346 fprintf (stdout, "%s: template ", line);
2347 pte (&x->tm);
2348 fprintf (stdout, " address: base %s index %s scale %x\n",
2349 x->base_reg ? x->base_reg->reg_name : "none",
2350 x->index_reg ? x->index_reg->reg_name : "none",
2351 x->log2_scale_factor);
2352 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2353 x->rm.mode, x->rm.reg, x->rm.regmem);
2354 fprintf (stdout, " sib: base %x index %x scale %x\n",
2355 x->sib.base, x->sib.index, x->sib.scale);
2356 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2357 (x->rex & REX_W) != 0,
2358 (x->rex & REX_R) != 0,
2359 (x->rex & REX_X) != 0,
2360 (x->rex & REX_B) != 0);
2361 for (i = 0; i < x->operands; i++)
2362 {
2363 fprintf (stdout, " #%d: ", i + 1);
2364 pt (x->types[i]);
2365 fprintf (stdout, "\n");
2366 if (x->types[i].bitfield.reg8
2367 || x->types[i].bitfield.reg16
2368 || x->types[i].bitfield.reg32
2369 || x->types[i].bitfield.reg64
2370 || x->types[i].bitfield.regmmx
2371 || x->types[i].bitfield.regxmm
2372 || x->types[i].bitfield.regymm
2373 || x->types[i].bitfield.sreg2
2374 || x->types[i].bitfield.sreg3
2375 || x->types[i].bitfield.control
2376 || x->types[i].bitfield.debug
2377 || x->types[i].bitfield.test)
2378 fprintf (stdout, "%s\n", x->op[i].regs->reg_name);
2379 if (operand_type_check (x->types[i], imm))
2380 pe (x->op[i].imms);
2381 if (operand_type_check (x->types[i], disp))
2382 pe (x->op[i].disps);
2383 }
2384 }
2385
2386 static void
2387 pte (insn_template *t)
2388 {
2389 unsigned int i;
2390 fprintf (stdout, " %d operands ", t->operands);
2391 fprintf (stdout, "opcode %x ", t->base_opcode);
2392 if (t->extension_opcode != None)
2393 fprintf (stdout, "ext %x ", t->extension_opcode);
2394 if (t->opcode_modifier.d)
2395 fprintf (stdout, "D");
2396 if (t->opcode_modifier.w)
2397 fprintf (stdout, "W");
2398 fprintf (stdout, "\n");
2399 for (i = 0; i < t->operands; i++)
2400 {
2401 fprintf (stdout, " #%d type ", i + 1);
2402 pt (t->operand_types[i]);
2403 fprintf (stdout, "\n");
2404 }
2405 }
2406
2407 static void
2408 pe (expressionS *e)
2409 {
2410 fprintf (stdout, " operation %d\n", e->X_op);
2411 fprintf (stdout, " add_number %ld (%lx)\n",
2412 (long) e->X_add_number, (long) e->X_add_number);
2413 if (e->X_add_symbol)
2414 {
2415 fprintf (stdout, " add_symbol ");
2416 ps (e->X_add_symbol);
2417 fprintf (stdout, "\n");
2418 }
2419 if (e->X_op_symbol)
2420 {
2421 fprintf (stdout, " op_symbol ");
2422 ps (e->X_op_symbol);
2423 fprintf (stdout, "\n");
2424 }
2425 }
2426
2427 static void
2428 ps (symbolS *s)
2429 {
2430 fprintf (stdout, "%s type %s%s",
2431 S_GET_NAME (s),
2432 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2433 segment_name (S_GET_SEGMENT (s)));
2434 }
2435
2436 static struct type_name
2437 {
2438 i386_operand_type mask;
2439 const char *name;
2440 }
2441 const type_names[] =
2442 {
2443 { OPERAND_TYPE_REG8, "r8" },
2444 { OPERAND_TYPE_REG16, "r16" },
2445 { OPERAND_TYPE_REG32, "r32" },
2446 { OPERAND_TYPE_REG64, "r64" },
2447 { OPERAND_TYPE_IMM8, "i8" },
2448 { OPERAND_TYPE_IMM8, "i8s" },
2449 { OPERAND_TYPE_IMM16, "i16" },
2450 { OPERAND_TYPE_IMM32, "i32" },
2451 { OPERAND_TYPE_IMM32S, "i32s" },
2452 { OPERAND_TYPE_IMM64, "i64" },
2453 { OPERAND_TYPE_IMM1, "i1" },
2454 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2455 { OPERAND_TYPE_DISP8, "d8" },
2456 { OPERAND_TYPE_DISP16, "d16" },
2457 { OPERAND_TYPE_DISP32, "d32" },
2458 { OPERAND_TYPE_DISP32S, "d32s" },
2459 { OPERAND_TYPE_DISP64, "d64" },
2460 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2461 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2462 { OPERAND_TYPE_CONTROL, "control reg" },
2463 { OPERAND_TYPE_TEST, "test reg" },
2464 { OPERAND_TYPE_DEBUG, "debug reg" },
2465 { OPERAND_TYPE_FLOATREG, "FReg" },
2466 { OPERAND_TYPE_FLOATACC, "FAcc" },
2467 { OPERAND_TYPE_SREG2, "SReg2" },
2468 { OPERAND_TYPE_SREG3, "SReg3" },
2469 { OPERAND_TYPE_ACC, "Acc" },
2470 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2471 { OPERAND_TYPE_REGMMX, "rMMX" },
2472 { OPERAND_TYPE_REGXMM, "rXMM" },
2473 { OPERAND_TYPE_REGYMM, "rYMM" },
2474 { OPERAND_TYPE_ESSEG, "es" },
2475 };
2476
2477 static void
2478 pt (i386_operand_type t)
2479 {
2480 unsigned int j;
2481 i386_operand_type a;
2482
2483 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2484 {
2485 a = operand_type_and (t, type_names[j].mask);
2486 if (!operand_type_all_zero (&a))
2487 fprintf (stdout, "%s, ", type_names[j].name);
2488 }
2489 fflush (stdout);
2490 }
2491
2492 #endif /* DEBUG386 */
2493 \f
2494 static bfd_reloc_code_real_type
2495 reloc (unsigned int size,
2496 int pcrel,
2497 int sign,
2498 bfd_reloc_code_real_type other)
2499 {
2500 if (other != NO_RELOC)
2501 {
2502 reloc_howto_type *rel;
2503
2504 if (size == 8)
2505 switch (other)
2506 {
2507 case BFD_RELOC_X86_64_GOT32:
2508 return BFD_RELOC_X86_64_GOT64;
2509 break;
2510 case BFD_RELOC_X86_64_PLTOFF64:
2511 return BFD_RELOC_X86_64_PLTOFF64;
2512 break;
2513 case BFD_RELOC_X86_64_GOTPC32:
2514 other = BFD_RELOC_X86_64_GOTPC64;
2515 break;
2516 case BFD_RELOC_X86_64_GOTPCREL:
2517 other = BFD_RELOC_X86_64_GOTPCREL64;
2518 break;
2519 case BFD_RELOC_X86_64_TPOFF32:
2520 other = BFD_RELOC_X86_64_TPOFF64;
2521 break;
2522 case BFD_RELOC_X86_64_DTPOFF32:
2523 other = BFD_RELOC_X86_64_DTPOFF64;
2524 break;
2525 default:
2526 break;
2527 }
2528
2529 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2530 if (size == 4 && flag_code != CODE_64BIT)
2531 sign = -1;
2532
2533 rel = bfd_reloc_type_lookup (stdoutput, other);
2534 if (!rel)
2535 as_bad (_("unknown relocation (%u)"), other);
2536 else if (size != bfd_get_reloc_size (rel))
2537 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2538 bfd_get_reloc_size (rel),
2539 size);
2540 else if (pcrel && !rel->pc_relative)
2541 as_bad (_("non-pc-relative relocation for pc-relative field"));
2542 else if ((rel->complain_on_overflow == complain_overflow_signed
2543 && !sign)
2544 || (rel->complain_on_overflow == complain_overflow_unsigned
2545 && sign > 0))
2546 as_bad (_("relocated field and relocation type differ in signedness"));
2547 else
2548 return other;
2549 return NO_RELOC;
2550 }
2551
2552 if (pcrel)
2553 {
2554 if (!sign)
2555 as_bad (_("there are no unsigned pc-relative relocations"));
2556 switch (size)
2557 {
2558 case 1: return BFD_RELOC_8_PCREL;
2559 case 2: return BFD_RELOC_16_PCREL;
2560 case 4: return BFD_RELOC_32_PCREL;
2561 case 8: return BFD_RELOC_64_PCREL;
2562 }
2563 as_bad (_("cannot do %u byte pc-relative relocation"), size);
2564 }
2565 else
2566 {
2567 if (sign > 0)
2568 switch (size)
2569 {
2570 case 4: return BFD_RELOC_X86_64_32S;
2571 }
2572 else
2573 switch (size)
2574 {
2575 case 1: return BFD_RELOC_8;
2576 case 2: return BFD_RELOC_16;
2577 case 4: return BFD_RELOC_32;
2578 case 8: return BFD_RELOC_64;
2579 }
2580 as_bad (_("cannot do %s %u byte relocation"),
2581 sign > 0 ? "signed" : "unsigned", size);
2582 }
2583
2584 return NO_RELOC;
2585 }
2586
2587 /* Here we decide which fixups can be adjusted to make them relative to
2588 the beginning of the section instead of the symbol. Basically we need
2589 to make sure that the dynamic relocations are done correctly, so in
2590 some cases we force the original symbol to be used. */
2591
2592 int
2593 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2594 {
2595 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2596 if (!IS_ELF)
2597 return 1;
2598
2599 /* Don't adjust pc-relative references to merge sections in 64-bit
2600 mode. */
2601 if (use_rela_relocations
2602 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
2603 && fixP->fx_pcrel)
2604 return 0;
2605
2606 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2607 and changed later by validate_fix. */
2608 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
2609 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
2610 return 0;
2611
2612 /* adjust_reloc_syms doesn't know about the GOT. */
2613 if (fixP->fx_r_type == BFD_RELOC_386_GOTOFF
2614 || fixP->fx_r_type == BFD_RELOC_386_PLT32
2615 || fixP->fx_r_type == BFD_RELOC_386_GOT32
2616 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
2617 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
2618 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
2619 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
2620 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
2621 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
2622 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
2623 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
2624 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
2625 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
2626 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
2627 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
2628 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
2629 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
2630 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
2631 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
2632 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
2633 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
2634 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
2635 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
2636 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
2637 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
2638 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
2639 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
2640 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
2641 return 0;
2642 #endif
2643 return 1;
2644 }
2645
2646 static int
2647 intel_float_operand (const char *mnemonic)
2648 {
2649 /* Note that the value returned is meaningful only for opcodes with (memory)
2650 operands, hence the code here is free to improperly handle opcodes that
2651 have no operands (for better performance and smaller code). */
2652
2653 if (mnemonic[0] != 'f')
2654 return 0; /* non-math */
2655
2656 switch (mnemonic[1])
2657 {
2658 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2659 the fs segment override prefix not currently handled because no
2660 call path can make opcodes without operands get here */
2661 case 'i':
2662 return 2 /* integer op */;
2663 case 'l':
2664 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
2665 return 3; /* fldcw/fldenv */
2666 break;
2667 case 'n':
2668 if (mnemonic[2] != 'o' /* fnop */)
2669 return 3; /* non-waiting control op */
2670 break;
2671 case 'r':
2672 if (mnemonic[2] == 's')
2673 return 3; /* frstor/frstpm */
2674 break;
2675 case 's':
2676 if (mnemonic[2] == 'a')
2677 return 3; /* fsave */
2678 if (mnemonic[2] == 't')
2679 {
2680 switch (mnemonic[3])
2681 {
2682 case 'c': /* fstcw */
2683 case 'd': /* fstdw */
2684 case 'e': /* fstenv */
2685 case 's': /* fsts[gw] */
2686 return 3;
2687 }
2688 }
2689 break;
2690 case 'x':
2691 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
2692 return 0; /* fxsave/fxrstor are not really math ops */
2693 break;
2694 }
2695
2696 return 1;
2697 }
2698
2699 /* Build the VEX prefix. */
2700
2701 static void
2702 build_vex_prefix (const insn_template *t)
2703 {
2704 unsigned int register_specifier;
2705 unsigned int implied_prefix;
2706 unsigned int vector_length;
2707
2708 /* Check register specifier. */
2709 if (i.vex.register_specifier)
2710 {
2711 register_specifier = i.vex.register_specifier->reg_num;
2712 if ((i.vex.register_specifier->reg_flags & RegRex))
2713 register_specifier += 8;
2714 register_specifier = ~register_specifier & 0xf;
2715 }
2716 else
2717 register_specifier = 0xf;
2718
2719 /* Use 2-byte VEX prefix by swappping destination and source
2720 operand. */
2721 if (!i.swap_operand
2722 && i.operands == i.reg_operands
2723 && i.tm.opcode_modifier.vexopcode == VEX0F
2724 && i.tm.opcode_modifier.s
2725 && i.rex == REX_B)
2726 {
2727 unsigned int xchg = i.operands - 1;
2728 union i386_op temp_op;
2729 i386_operand_type temp_type;
2730
2731 temp_type = i.types[xchg];
2732 i.types[xchg] = i.types[0];
2733 i.types[0] = temp_type;
2734 temp_op = i.op[xchg];
2735 i.op[xchg] = i.op[0];
2736 i.op[0] = temp_op;
2737
2738 gas_assert (i.rm.mode == 3);
2739
2740 i.rex = REX_R;
2741 xchg = i.rm.regmem;
2742 i.rm.regmem = i.rm.reg;
2743 i.rm.reg = xchg;
2744
2745 /* Use the next insn. */
2746 i.tm = t[1];
2747 }
2748
2749 if (i.tm.opcode_modifier.vex == VEXScalar)
2750 vector_length = avxscalar;
2751 else
2752 vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
2753
2754 switch ((i.tm.base_opcode >> 8) & 0xff)
2755 {
2756 case 0:
2757 implied_prefix = 0;
2758 break;
2759 case DATA_PREFIX_OPCODE:
2760 implied_prefix = 1;
2761 break;
2762 case REPE_PREFIX_OPCODE:
2763 implied_prefix = 2;
2764 break;
2765 case REPNE_PREFIX_OPCODE:
2766 implied_prefix = 3;
2767 break;
2768 default:
2769 abort ();
2770 }
2771
2772 /* Use 2-byte VEX prefix if possible. */
2773 if (i.tm.opcode_modifier.vexopcode == VEX0F
2774 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
2775 {
2776 /* 2-byte VEX prefix. */
2777 unsigned int r;
2778
2779 i.vex.length = 2;
2780 i.vex.bytes[0] = 0xc5;
2781
2782 /* Check the REX.R bit. */
2783 r = (i.rex & REX_R) ? 0 : 1;
2784 i.vex.bytes[1] = (r << 7
2785 | register_specifier << 3
2786 | vector_length << 2
2787 | implied_prefix);
2788 }
2789 else
2790 {
2791 /* 3-byte VEX prefix. */
2792 unsigned int m, w;
2793
2794 i.vex.length = 3;
2795
2796 switch (i.tm.opcode_modifier.vexopcode)
2797 {
2798 case VEX0F:
2799 m = 0x1;
2800 i.vex.bytes[0] = 0xc4;
2801 break;
2802 case VEX0F38:
2803 m = 0x2;
2804 i.vex.bytes[0] = 0xc4;
2805 break;
2806 case VEX0F3A:
2807 m = 0x3;
2808 i.vex.bytes[0] = 0xc4;
2809 break;
2810 case XOP08:
2811 m = 0x8;
2812 i.vex.bytes[0] = 0x8f;
2813 break;
2814 case XOP09:
2815 m = 0x9;
2816 i.vex.bytes[0] = 0x8f;
2817 break;
2818 case XOP0A:
2819 m = 0xa;
2820 i.vex.bytes[0] = 0x8f;
2821 break;
2822 default:
2823 abort ();
2824 }
2825
2826 /* The high 3 bits of the second VEX byte are 1's compliment
2827 of RXB bits from REX. */
2828 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
2829
2830 /* Check the REX.W bit. */
2831 w = (i.rex & REX_W) ? 1 : 0;
2832 if (i.tm.opcode_modifier.vexw)
2833 {
2834 if (w)
2835 abort ();
2836
2837 if (i.tm.opcode_modifier.vexw == VEXW1)
2838 w = 1;
2839 }
2840
2841 i.vex.bytes[2] = (w << 7
2842 | register_specifier << 3
2843 | vector_length << 2
2844 | implied_prefix);
2845 }
2846 }
2847
2848 static void
2849 process_immext (void)
2850 {
2851 expressionS *exp;
2852
2853 if (i.tm.cpu_flags.bitfield.cpusse3 && i.operands > 0)
2854 {
2855 /* SSE3 Instructions have the fixed operands with an opcode
2856 suffix which is coded in the same place as an 8-bit immediate
2857 field would be. Here we check those operands and remove them
2858 afterwards. */
2859 unsigned int x;
2860
2861 for (x = 0; x < i.operands; x++)
2862 if (i.op[x].regs->reg_num != x)
2863 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
2864 register_prefix, i.op[x].regs->reg_name, x + 1,
2865 i.tm.name);
2866
2867 i.operands = 0;
2868 }
2869
2870 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
2871 which is coded in the same place as an 8-bit immediate field
2872 would be. Here we fake an 8-bit immediate operand from the
2873 opcode suffix stored in tm.extension_opcode.
2874
2875 AVX instructions also use this encoding, for some of
2876 3 argument instructions. */
2877
2878 gas_assert (i.imm_operands == 0
2879 && (i.operands <= 2
2880 || (i.tm.opcode_modifier.vex
2881 && i.operands <= 4)));
2882
2883 exp = &im_expressions[i.imm_operands++];
2884 i.op[i.operands].imms = exp;
2885 i.types[i.operands] = imm8;
2886 i.operands++;
2887 exp->X_op = O_constant;
2888 exp->X_add_number = i.tm.extension_opcode;
2889 i.tm.extension_opcode = None;
2890 }
2891
2892 /* This is the guts of the machine-dependent assembler. LINE points to a
2893 machine dependent instruction. This function is supposed to emit
2894 the frags/bytes it assembles to. */
2895
2896 void
2897 md_assemble (char *line)
2898 {
2899 unsigned int j;
2900 char mnemonic[MAX_MNEM_SIZE];
2901 const insn_template *t;
2902
2903 /* Initialize globals. */
2904 memset (&i, '\0', sizeof (i));
2905 for (j = 0; j < MAX_OPERANDS; j++)
2906 i.reloc[j] = NO_RELOC;
2907 memset (disp_expressions, '\0', sizeof (disp_expressions));
2908 memset (im_expressions, '\0', sizeof (im_expressions));
2909 save_stack_p = save_stack;
2910
2911 /* First parse an instruction mnemonic & call i386_operand for the operands.
2912 We assume that the scrubber has arranged it so that line[0] is the valid
2913 start of a (possibly prefixed) mnemonic. */
2914
2915 line = parse_insn (line, mnemonic);
2916 if (line == NULL)
2917 return;
2918
2919 line = parse_operands (line, mnemonic);
2920 this_operand = -1;
2921 if (line == NULL)
2922 return;
2923
2924 /* Now we've parsed the mnemonic into a set of templates, and have the
2925 operands at hand. */
2926
2927 /* All intel opcodes have reversed operands except for "bound" and
2928 "enter". We also don't reverse intersegment "jmp" and "call"
2929 instructions with 2 immediate operands so that the immediate segment
2930 precedes the offset, as it does when in AT&T mode. */
2931 if (intel_syntax
2932 && i.operands > 1
2933 && (strcmp (mnemonic, "bound") != 0)
2934 && (strcmp (mnemonic, "invlpga") != 0)
2935 && !(operand_type_check (i.types[0], imm)
2936 && operand_type_check (i.types[1], imm)))
2937 swap_operands ();
2938
2939 /* The order of the immediates should be reversed
2940 for 2 immediates extrq and insertq instructions */
2941 if (i.imm_operands == 2
2942 && (strcmp (mnemonic, "extrq") == 0
2943 || strcmp (mnemonic, "insertq") == 0))
2944 swap_2_operands (0, 1);
2945
2946 if (i.imm_operands)
2947 optimize_imm ();
2948
2949 /* Don't optimize displacement for movabs since it only takes 64bit
2950 displacement. */
2951 if (i.disp_operands
2952 && (flag_code != CODE_64BIT
2953 || strcmp (mnemonic, "movabs") != 0))
2954 optimize_disp ();
2955
2956 /* Next, we find a template that matches the given insn,
2957 making sure the overlap of the given operands types is consistent
2958 with the template operand types. */
2959
2960 if (!(t = match_template ()))
2961 return;
2962
2963 if (sse_check != sse_check_none
2964 && !i.tm.opcode_modifier.noavx
2965 && (i.tm.cpu_flags.bitfield.cpusse
2966 || i.tm.cpu_flags.bitfield.cpusse2
2967 || i.tm.cpu_flags.bitfield.cpusse3
2968 || i.tm.cpu_flags.bitfield.cpussse3
2969 || i.tm.cpu_flags.bitfield.cpusse4_1
2970 || i.tm.cpu_flags.bitfield.cpusse4_2))
2971 {
2972 (sse_check == sse_check_warning
2973 ? as_warn
2974 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
2975 }
2976
2977 /* Zap movzx and movsx suffix. The suffix has been set from
2978 "word ptr" or "byte ptr" on the source operand in Intel syntax
2979 or extracted from mnemonic in AT&T syntax. But we'll use
2980 the destination register to choose the suffix for encoding. */
2981 if ((i.tm.base_opcode & ~9) == 0x0fb6)
2982 {
2983 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
2984 there is no suffix, the default will be byte extension. */
2985 if (i.reg_operands != 2
2986 && !i.suffix
2987 && intel_syntax)
2988 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
2989
2990 i.suffix = 0;
2991 }
2992
2993 if (i.tm.opcode_modifier.fwait)
2994 if (!add_prefix (FWAIT_OPCODE))
2995 return;
2996
2997 /* Check for lock without a lockable instruction. Destination operand
2998 must be memory unless it is xchg (0x86). */
2999 if (i.prefix[LOCK_PREFIX]
3000 && (!i.tm.opcode_modifier.islockable
3001 || i.mem_operands == 0
3002 || (i.tm.base_opcode != 0x86
3003 && !operand_type_check (i.types[i.operands - 1], anymem))))
3004 {
3005 as_bad (_("expecting lockable instruction after `lock'"));
3006 return;
3007 }
3008
3009 /* Check string instruction segment overrides. */
3010 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
3011 {
3012 if (!check_string ())
3013 return;
3014 i.disp_operands = 0;
3015 }
3016
3017 if (!process_suffix ())
3018 return;
3019
3020 /* Update operand types. */
3021 for (j = 0; j < i.operands; j++)
3022 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
3023
3024 /* Make still unresolved immediate matches conform to size of immediate
3025 given in i.suffix. */
3026 if (!finalize_imm ())
3027 return;
3028
3029 if (i.types[0].bitfield.imm1)
3030 i.imm_operands = 0; /* kludge for shift insns. */
3031
3032 /* We only need to check those implicit registers for instructions
3033 with 3 operands or less. */
3034 if (i.operands <= 3)
3035 for (j = 0; j < i.operands; j++)
3036 if (i.types[j].bitfield.inoutportreg
3037 || i.types[j].bitfield.shiftcount
3038 || i.types[j].bitfield.acc
3039 || i.types[j].bitfield.floatacc)
3040 i.reg_operands--;
3041
3042 /* ImmExt should be processed after SSE2AVX. */
3043 if (!i.tm.opcode_modifier.sse2avx
3044 && i.tm.opcode_modifier.immext)
3045 process_immext ();
3046
3047 /* For insns with operands there are more diddles to do to the opcode. */
3048 if (i.operands)
3049 {
3050 if (!process_operands ())
3051 return;
3052 }
3053 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
3054 {
3055 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3056 as_warn (_("translating to `%sp'"), i.tm.name);
3057 }
3058
3059 if (i.tm.opcode_modifier.vex)
3060 build_vex_prefix (t);
3061
3062 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3063 instructions may define INT_OPCODE as well, so avoid this corner
3064 case for those instructions that use MODRM. */
3065 if (i.tm.base_opcode == INT_OPCODE
3066 && !i.tm.opcode_modifier.modrm
3067 && i.op[0].imms->X_add_number == 3)
3068 {
3069 i.tm.base_opcode = INT3_OPCODE;
3070 i.imm_operands = 0;
3071 }
3072
3073 if ((i.tm.opcode_modifier.jump
3074 || i.tm.opcode_modifier.jumpbyte
3075 || i.tm.opcode_modifier.jumpdword)
3076 && i.op[0].disps->X_op == O_constant)
3077 {
3078 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3079 the absolute address given by the constant. Since ix86 jumps and
3080 calls are pc relative, we need to generate a reloc. */
3081 i.op[0].disps->X_add_symbol = &abs_symbol;
3082 i.op[0].disps->X_op = O_symbol;
3083 }
3084
3085 if (i.tm.opcode_modifier.rex64)
3086 i.rex |= REX_W;
3087
3088 /* For 8 bit registers we need an empty rex prefix. Also if the
3089 instruction already has a prefix, we need to convert old
3090 registers to new ones. */
3091
3092 if ((i.types[0].bitfield.reg8
3093 && (i.op[0].regs->reg_flags & RegRex64) != 0)
3094 || (i.types[1].bitfield.reg8
3095 && (i.op[1].regs->reg_flags & RegRex64) != 0)
3096 || ((i.types[0].bitfield.reg8
3097 || i.types[1].bitfield.reg8)
3098 && i.rex != 0))
3099 {
3100 int x;
3101
3102 i.rex |= REX_OPCODE;
3103 for (x = 0; x < 2; x++)
3104 {
3105 /* Look for 8 bit operand that uses old registers. */
3106 if (i.types[x].bitfield.reg8
3107 && (i.op[x].regs->reg_flags & RegRex64) == 0)
3108 {
3109 /* In case it is "hi" register, give up. */
3110 if (i.op[x].regs->reg_num > 3)
3111 as_bad (_("can't encode register '%s%s' in an "
3112 "instruction requiring REX prefix."),
3113 register_prefix, i.op[x].regs->reg_name);
3114
3115 /* Otherwise it is equivalent to the extended register.
3116 Since the encoding doesn't change this is merely
3117 cosmetic cleanup for debug output. */
3118
3119 i.op[x].regs = i.op[x].regs + 8;
3120 }
3121 }
3122 }
3123
3124 if (i.rex != 0)
3125 add_prefix (REX_OPCODE | i.rex);
3126
3127 /* We are ready to output the insn. */
3128 output_insn ();
3129 }
3130
3131 static char *
3132 parse_insn (char *line, char *mnemonic)
3133 {
3134 char *l = line;
3135 char *token_start = l;
3136 char *mnem_p;
3137 int supported;
3138 const insn_template *t;
3139 char *dot_p = NULL;
3140
3141 /* Non-zero if we found a prefix only acceptable with string insns. */
3142 const char *expecting_string_instruction = NULL;
3143
3144 while (1)
3145 {
3146 mnem_p = mnemonic;
3147 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3148 {
3149 if (*mnem_p == '.')
3150 dot_p = mnem_p;
3151 mnem_p++;
3152 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3153 {
3154 as_bad (_("no such instruction: `%s'"), token_start);
3155 return NULL;
3156 }
3157 l++;
3158 }
3159 if (!is_space_char (*l)
3160 && *l != END_OF_INSN
3161 && (intel_syntax
3162 || (*l != PREFIX_SEPARATOR
3163 && *l != ',')))
3164 {
3165 as_bad (_("invalid character %s in mnemonic"),
3166 output_invalid (*l));
3167 return NULL;
3168 }
3169 if (token_start == l)
3170 {
3171 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3172 as_bad (_("expecting prefix; got nothing"));
3173 else
3174 as_bad (_("expecting mnemonic; got nothing"));
3175 return NULL;
3176 }
3177
3178 /* Look up instruction (or prefix) via hash table. */
3179 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3180
3181 if (*l != END_OF_INSN
3182 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3183 && current_templates
3184 && current_templates->start->opcode_modifier.isprefix)
3185 {
3186 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3187 {
3188 as_bad ((flag_code != CODE_64BIT
3189 ? _("`%s' is only supported in 64-bit mode")
3190 : _("`%s' is not supported in 64-bit mode")),
3191 current_templates->start->name);
3192 return NULL;
3193 }
3194 /* If we are in 16-bit mode, do not allow addr16 or data16.
3195 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3196 if ((current_templates->start->opcode_modifier.size16
3197 || current_templates->start->opcode_modifier.size32)
3198 && flag_code != CODE_64BIT
3199 && (current_templates->start->opcode_modifier.size32
3200 ^ (flag_code == CODE_16BIT)))
3201 {
3202 as_bad (_("redundant %s prefix"),
3203 current_templates->start->name);
3204 return NULL;
3205 }
3206 /* Add prefix, checking for repeated prefixes. */
3207 switch (add_prefix (current_templates->start->base_opcode))
3208 {
3209 case PREFIX_EXIST:
3210 return NULL;
3211 case PREFIX_REP:
3212 expecting_string_instruction = current_templates->start->name;
3213 break;
3214 default:
3215 break;
3216 }
3217 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3218 token_start = ++l;
3219 }
3220 else
3221 break;
3222 }
3223
3224 if (!current_templates)
3225 {
3226 /* Check if we should swap operand in encoding. */
3227 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3228 i.swap_operand = 1;
3229 else
3230 goto check_suffix;
3231 mnem_p = dot_p;
3232 *dot_p = '\0';
3233 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3234 }
3235
3236 if (!current_templates)
3237 {
3238 check_suffix:
3239 /* See if we can get a match by trimming off a suffix. */
3240 switch (mnem_p[-1])
3241 {
3242 case WORD_MNEM_SUFFIX:
3243 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3244 i.suffix = SHORT_MNEM_SUFFIX;
3245 else
3246 case BYTE_MNEM_SUFFIX:
3247 case QWORD_MNEM_SUFFIX:
3248 i.suffix = mnem_p[-1];
3249 mnem_p[-1] = '\0';
3250 current_templates = (const templates *) hash_find (op_hash,
3251 mnemonic);
3252 break;
3253 case SHORT_MNEM_SUFFIX:
3254 case LONG_MNEM_SUFFIX:
3255 if (!intel_syntax)
3256 {
3257 i.suffix = mnem_p[-1];
3258 mnem_p[-1] = '\0';
3259 current_templates = (const templates *) hash_find (op_hash,
3260 mnemonic);
3261 }
3262 break;
3263
3264 /* Intel Syntax. */
3265 case 'd':
3266 if (intel_syntax)
3267 {
3268 if (intel_float_operand (mnemonic) == 1)
3269 i.suffix = SHORT_MNEM_SUFFIX;
3270 else
3271 i.suffix = LONG_MNEM_SUFFIX;
3272 mnem_p[-1] = '\0';
3273 current_templates = (const templates *) hash_find (op_hash,
3274 mnemonic);
3275 }
3276 break;
3277 }
3278 if (!current_templates)
3279 {
3280 as_bad (_("no such instruction: `%s'"), token_start);
3281 return NULL;
3282 }
3283 }
3284
3285 if (current_templates->start->opcode_modifier.jump
3286 || current_templates->start->opcode_modifier.jumpbyte)
3287 {
3288 /* Check for a branch hint. We allow ",pt" and ",pn" for
3289 predict taken and predict not taken respectively.
3290 I'm not sure that branch hints actually do anything on loop
3291 and jcxz insns (JumpByte) for current Pentium4 chips. They
3292 may work in the future and it doesn't hurt to accept them
3293 now. */
3294 if (l[0] == ',' && l[1] == 'p')
3295 {
3296 if (l[2] == 't')
3297 {
3298 if (!add_prefix (DS_PREFIX_OPCODE))
3299 return NULL;
3300 l += 3;
3301 }
3302 else if (l[2] == 'n')
3303 {
3304 if (!add_prefix (CS_PREFIX_OPCODE))
3305 return NULL;
3306 l += 3;
3307 }
3308 }
3309 }
3310 /* Any other comma loses. */
3311 if (*l == ',')
3312 {
3313 as_bad (_("invalid character %s in mnemonic"),
3314 output_invalid (*l));
3315 return NULL;
3316 }
3317
3318 /* Check if instruction is supported on specified architecture. */
3319 supported = 0;
3320 for (t = current_templates->start; t < current_templates->end; ++t)
3321 {
3322 supported |= cpu_flags_match (t);
3323 if (supported == CPU_FLAGS_PERFECT_MATCH)
3324 goto skip;
3325 }
3326
3327 if (!(supported & CPU_FLAGS_64BIT_MATCH))
3328 {
3329 as_bad (flag_code == CODE_64BIT
3330 ? _("`%s' is not supported in 64-bit mode")
3331 : _("`%s' is only supported in 64-bit mode"),
3332 current_templates->start->name);
3333 return NULL;
3334 }
3335 if (supported != CPU_FLAGS_PERFECT_MATCH)
3336 {
3337 as_bad (_("`%s' is not supported on `%s%s'"),
3338 current_templates->start->name,
3339 cpu_arch_name ? cpu_arch_name : default_arch,
3340 cpu_sub_arch_name ? cpu_sub_arch_name : "");
3341 return NULL;
3342 }
3343
3344 skip:
3345 if (!cpu_arch_flags.bitfield.cpui386
3346 && (flag_code != CODE_16BIT))
3347 {
3348 as_warn (_("use .code16 to ensure correct addressing mode"));
3349 }
3350
3351 /* Check for rep/repne without a string instruction. */
3352 if (expecting_string_instruction)
3353 {
3354 static templates override;
3355
3356 for (t = current_templates->start; t < current_templates->end; ++t)
3357 if (t->opcode_modifier.isstring)
3358 break;
3359 if (t >= current_templates->end)
3360 {
3361 as_bad (_("expecting string instruction after `%s'"),
3362 expecting_string_instruction);
3363 return NULL;
3364 }
3365 for (override.start = t; t < current_templates->end; ++t)
3366 if (!t->opcode_modifier.isstring)
3367 break;
3368 override.end = t;
3369 current_templates = &override;
3370 }
3371
3372 return l;
3373 }
3374
3375 static char *
3376 parse_operands (char *l, const char *mnemonic)
3377 {
3378 char *token_start;
3379
3380 /* 1 if operand is pending after ','. */
3381 unsigned int expecting_operand = 0;
3382
3383 /* Non-zero if operand parens not balanced. */
3384 unsigned int paren_not_balanced;
3385
3386 while (*l != END_OF_INSN)
3387 {
3388 /* Skip optional white space before operand. */
3389 if (is_space_char (*l))
3390 ++l;
3391 if (!is_operand_char (*l) && *l != END_OF_INSN)
3392 {
3393 as_bad (_("invalid character %s before operand %d"),
3394 output_invalid (*l),
3395 i.operands + 1);
3396 return NULL;
3397 }
3398 token_start = l; /* after white space */
3399 paren_not_balanced = 0;
3400 while (paren_not_balanced || *l != ',')
3401 {
3402 if (*l == END_OF_INSN)
3403 {
3404 if (paren_not_balanced)
3405 {
3406 if (!intel_syntax)
3407 as_bad (_("unbalanced parenthesis in operand %d."),
3408 i.operands + 1);
3409 else
3410 as_bad (_("unbalanced brackets in operand %d."),
3411 i.operands + 1);
3412 return NULL;
3413 }
3414 else
3415 break; /* we are done */
3416 }
3417 else if (!is_operand_char (*l) && !is_space_char (*l))
3418 {
3419 as_bad (_("invalid character %s in operand %d"),
3420 output_invalid (*l),
3421 i.operands + 1);
3422 return NULL;
3423 }
3424 if (!intel_syntax)
3425 {
3426 if (*l == '(')
3427 ++paren_not_balanced;
3428 if (*l == ')')
3429 --paren_not_balanced;
3430 }
3431 else
3432 {
3433 if (*l == '[')
3434 ++paren_not_balanced;
3435 if (*l == ']')
3436 --paren_not_balanced;
3437 }
3438 l++;
3439 }
3440 if (l != token_start)
3441 { /* Yes, we've read in another operand. */
3442 unsigned int operand_ok;
3443 this_operand = i.operands++;
3444 i.types[this_operand].bitfield.unspecified = 1;
3445 if (i.operands > MAX_OPERANDS)
3446 {
3447 as_bad (_("spurious operands; (%d operands/instruction max)"),
3448 MAX_OPERANDS);
3449 return NULL;
3450 }
3451 /* Now parse operand adding info to 'i' as we go along. */
3452 END_STRING_AND_SAVE (l);
3453
3454 if (intel_syntax)
3455 operand_ok =
3456 i386_intel_operand (token_start,
3457 intel_float_operand (mnemonic));
3458 else
3459 operand_ok = i386_att_operand (token_start);
3460
3461 RESTORE_END_STRING (l);
3462 if (!operand_ok)
3463 return NULL;
3464 }
3465 else
3466 {
3467 if (expecting_operand)
3468 {
3469 expecting_operand_after_comma:
3470 as_bad (_("expecting operand after ','; got nothing"));
3471 return NULL;
3472 }
3473 if (*l == ',')
3474 {
3475 as_bad (_("expecting operand before ','; got nothing"));
3476 return NULL;
3477 }
3478 }
3479
3480 /* Now *l must be either ',' or END_OF_INSN. */
3481 if (*l == ',')
3482 {
3483 if (*++l == END_OF_INSN)
3484 {
3485 /* Just skip it, if it's \n complain. */
3486 goto expecting_operand_after_comma;
3487 }
3488 expecting_operand = 1;
3489 }
3490 }
3491 return l;
3492 }
3493
3494 static void
3495 swap_2_operands (int xchg1, int xchg2)
3496 {
3497 union i386_op temp_op;
3498 i386_operand_type temp_type;
3499 enum bfd_reloc_code_real temp_reloc;
3500
3501 temp_type = i.types[xchg2];
3502 i.types[xchg2] = i.types[xchg1];
3503 i.types[xchg1] = temp_type;
3504 temp_op = i.op[xchg2];
3505 i.op[xchg2] = i.op[xchg1];
3506 i.op[xchg1] = temp_op;
3507 temp_reloc = i.reloc[xchg2];
3508 i.reloc[xchg2] = i.reloc[xchg1];
3509 i.reloc[xchg1] = temp_reloc;
3510 }
3511
3512 static void
3513 swap_operands (void)
3514 {
3515 switch (i.operands)
3516 {
3517 case 5:
3518 case 4:
3519 swap_2_operands (1, i.operands - 2);
3520 case 3:
3521 case 2:
3522 swap_2_operands (0, i.operands - 1);
3523 break;
3524 default:
3525 abort ();
3526 }
3527
3528 if (i.mem_operands == 2)
3529 {
3530 const seg_entry *temp_seg;
3531 temp_seg = i.seg[0];
3532 i.seg[0] = i.seg[1];
3533 i.seg[1] = temp_seg;
3534 }
3535 }
3536
3537 static int
3538 i386_is_register (const expressionS *e, int is_intel_syntax)
3539 {
3540 return (e->X_op == O_register
3541 || (is_intel_syntax
3542 && e->X_op == O_constant
3543 && e->X_md));
3544 }
3545
3546 /* Try to ensure constant immediates are represented in the smallest
3547 opcode possible. */
3548 static void
3549 optimize_imm (void)
3550 {
3551 char guess_suffix = 0;
3552 int op;
3553
3554 if (i.suffix)
3555 guess_suffix = i.suffix;
3556 else if (i.reg_operands)
3557 {
3558 /* Figure out a suffix from the last register operand specified.
3559 We can't do this properly yet, ie. excluding InOutPortReg,
3560 but the following works for instructions with immediates.
3561 In any case, we can't set i.suffix yet. */
3562 for (op = i.operands; --op >= 0;)
3563 if (i.types[op].bitfield.reg8)
3564 {
3565 guess_suffix = BYTE_MNEM_SUFFIX;
3566 break;
3567 }
3568 else if (i.types[op].bitfield.reg16)
3569 {
3570 guess_suffix = WORD_MNEM_SUFFIX;
3571 break;
3572 }
3573 else if (i.types[op].bitfield.reg32)
3574 {
3575 guess_suffix = LONG_MNEM_SUFFIX;
3576 break;
3577 }
3578 else if (i.types[op].bitfield.reg64)
3579 {
3580 guess_suffix = QWORD_MNEM_SUFFIX;
3581 break;
3582 }
3583 }
3584 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
3585 guess_suffix = WORD_MNEM_SUFFIX;
3586
3587 for (op = i.operands; --op >= 0;)
3588 if (operand_type_check (i.types[op], imm))
3589 {
3590 switch (i.op[op].imms->X_op)
3591 {
3592 case O_constant:
3593 /* If a suffix is given, this operand may be shortened. */
3594 switch (guess_suffix)
3595 {
3596 case LONG_MNEM_SUFFIX:
3597 i.types[op].bitfield.imm32 = 1;
3598 i.types[op].bitfield.imm64 = 1;
3599 break;
3600 case WORD_MNEM_SUFFIX:
3601 i.types[op].bitfield.imm16 = 1;
3602 i.types[op].bitfield.imm32 = 1;
3603 i.types[op].bitfield.imm32s = 1;
3604 i.types[op].bitfield.imm64 = 1;
3605 break;
3606 case BYTE_MNEM_SUFFIX:
3607 i.types[op].bitfield.imm8 = 1;
3608 i.types[op].bitfield.imm8s = 1;
3609 i.types[op].bitfield.imm16 = 1;
3610 i.types[op].bitfield.imm32 = 1;
3611 i.types[op].bitfield.imm32s = 1;
3612 i.types[op].bitfield.imm64 = 1;
3613 break;
3614 }
3615
3616 /* If this operand is at most 16 bits, convert it
3617 to a signed 16 bit number before trying to see
3618 whether it will fit in an even smaller size.
3619 This allows a 16-bit operand such as $0xffe0 to
3620 be recognised as within Imm8S range. */
3621 if ((i.types[op].bitfield.imm16)
3622 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
3623 {
3624 i.op[op].imms->X_add_number =
3625 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
3626 }
3627 if ((i.types[op].bitfield.imm32)
3628 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
3629 == 0))
3630 {
3631 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
3632 ^ ((offsetT) 1 << 31))
3633 - ((offsetT) 1 << 31));
3634 }
3635 i.types[op]
3636 = operand_type_or (i.types[op],
3637 smallest_imm_type (i.op[op].imms->X_add_number));
3638
3639 /* We must avoid matching of Imm32 templates when 64bit
3640 only immediate is available. */
3641 if (guess_suffix == QWORD_MNEM_SUFFIX)
3642 i.types[op].bitfield.imm32 = 0;
3643 break;
3644
3645 case O_absent:
3646 case O_register:
3647 abort ();
3648
3649 /* Symbols and expressions. */
3650 default:
3651 /* Convert symbolic operand to proper sizes for matching, but don't
3652 prevent matching a set of insns that only supports sizes other
3653 than those matching the insn suffix. */
3654 {
3655 i386_operand_type mask, allowed;
3656 const insn_template *t;
3657
3658 operand_type_set (&mask, 0);
3659 operand_type_set (&allowed, 0);
3660
3661 for (t = current_templates->start;
3662 t < current_templates->end;
3663 ++t)
3664 allowed = operand_type_or (allowed,
3665 t->operand_types[op]);
3666 switch (guess_suffix)
3667 {
3668 case QWORD_MNEM_SUFFIX:
3669 mask.bitfield.imm64 = 1;
3670 mask.bitfield.imm32s = 1;
3671 break;
3672 case LONG_MNEM_SUFFIX:
3673 mask.bitfield.imm32 = 1;
3674 break;
3675 case WORD_MNEM_SUFFIX:
3676 mask.bitfield.imm16 = 1;
3677 break;
3678 case BYTE_MNEM_SUFFIX:
3679 mask.bitfield.imm8 = 1;
3680 break;
3681 default:
3682 break;
3683 }
3684 allowed = operand_type_and (mask, allowed);
3685 if (!operand_type_all_zero (&allowed))
3686 i.types[op] = operand_type_and (i.types[op], mask);
3687 }
3688 break;
3689 }
3690 }
3691 }
3692
3693 /* Try to use the smallest displacement type too. */
3694 static void
3695 optimize_disp (void)
3696 {
3697 int op;
3698
3699 for (op = i.operands; --op >= 0;)
3700 if (operand_type_check (i.types[op], disp))
3701 {
3702 if (i.op[op].disps->X_op == O_constant)
3703 {
3704 offsetT op_disp = i.op[op].disps->X_add_number;
3705
3706 if (i.types[op].bitfield.disp16
3707 && (op_disp & ~(offsetT) 0xffff) == 0)
3708 {
3709 /* If this operand is at most 16 bits, convert
3710 to a signed 16 bit number and don't use 64bit
3711 displacement. */
3712 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
3713 i.types[op].bitfield.disp64 = 0;
3714 }
3715 if (i.types[op].bitfield.disp32
3716 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
3717 {
3718 /* If this operand is at most 32 bits, convert
3719 to a signed 32 bit number and don't use 64bit
3720 displacement. */
3721 op_disp &= (((offsetT) 2 << 31) - 1);
3722 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
3723 i.types[op].bitfield.disp64 = 0;
3724 }
3725 if (!op_disp && i.types[op].bitfield.baseindex)
3726 {
3727 i.types[op].bitfield.disp8 = 0;
3728 i.types[op].bitfield.disp16 = 0;
3729 i.types[op].bitfield.disp32 = 0;
3730 i.types[op].bitfield.disp32s = 0;
3731 i.types[op].bitfield.disp64 = 0;
3732 i.op[op].disps = 0;
3733 i.disp_operands--;
3734 }
3735 else if (flag_code == CODE_64BIT)
3736 {
3737 if (fits_in_signed_long (op_disp))
3738 {
3739 i.types[op].bitfield.disp64 = 0;
3740 i.types[op].bitfield.disp32s = 1;
3741 }
3742 if (i.prefix[ADDR_PREFIX]
3743 && fits_in_unsigned_long (op_disp))
3744 i.types[op].bitfield.disp32 = 1;
3745 }
3746 if ((i.types[op].bitfield.disp32
3747 || i.types[op].bitfield.disp32s
3748 || i.types[op].bitfield.disp16)
3749 && fits_in_signed_byte (op_disp))
3750 i.types[op].bitfield.disp8 = 1;
3751 }
3752 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
3753 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
3754 {
3755 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
3756 i.op[op].disps, 0, i.reloc[op]);
3757 i.types[op].bitfield.disp8 = 0;
3758 i.types[op].bitfield.disp16 = 0;
3759 i.types[op].bitfield.disp32 = 0;
3760 i.types[op].bitfield.disp32s = 0;
3761 i.types[op].bitfield.disp64 = 0;
3762 }
3763 else
3764 /* We only support 64bit displacement on constants. */
3765 i.types[op].bitfield.disp64 = 0;
3766 }
3767 }
3768
3769 /* Check if operands are valid for the instrucrtion. Update VEX
3770 operand types. */
3771
3772 static int
3773 VEX_check_operands (const insn_template *t)
3774 {
3775 if (!t->opcode_modifier.vex)
3776 return 0;
3777
3778 /* Only check VEX_Imm4, which must be the first operand. */
3779 if (t->operand_types[0].bitfield.vec_imm4)
3780 {
3781 if (i.op[0].imms->X_op != O_constant
3782 || !fits_in_imm4 (i.op[0].imms->X_add_number))
3783 {
3784 i.error = bad_imm4;
3785 return 1;
3786 }
3787
3788 /* Turn off Imm8 so that update_imm won't complain. */
3789 i.types[0] = vec_imm4;
3790 }
3791
3792 return 0;
3793 }
3794
3795 static const insn_template *
3796 match_template (void)
3797 {
3798 /* Points to template once we've found it. */
3799 const insn_template *t;
3800 i386_operand_type overlap0, overlap1, overlap2, overlap3;
3801 i386_operand_type overlap4;
3802 unsigned int found_reverse_match;
3803 i386_opcode_modifier suffix_check;
3804 i386_operand_type operand_types [MAX_OPERANDS];
3805 int addr_prefix_disp;
3806 unsigned int j;
3807 unsigned int found_cpu_match;
3808 unsigned int check_register;
3809
3810 #if MAX_OPERANDS != 5
3811 # error "MAX_OPERANDS must be 5."
3812 #endif
3813
3814 found_reverse_match = 0;
3815 addr_prefix_disp = -1;
3816
3817 memset (&suffix_check, 0, sizeof (suffix_check));
3818 if (i.suffix == BYTE_MNEM_SUFFIX)
3819 suffix_check.no_bsuf = 1;
3820 else if (i.suffix == WORD_MNEM_SUFFIX)
3821 suffix_check.no_wsuf = 1;
3822 else if (i.suffix == SHORT_MNEM_SUFFIX)
3823 suffix_check.no_ssuf = 1;
3824 else if (i.suffix == LONG_MNEM_SUFFIX)
3825 suffix_check.no_lsuf = 1;
3826 else if (i.suffix == QWORD_MNEM_SUFFIX)
3827 suffix_check.no_qsuf = 1;
3828 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
3829 suffix_check.no_ldsuf = 1;
3830
3831 for (t = current_templates->start; t < current_templates->end; t++)
3832 {
3833 addr_prefix_disp = -1;
3834
3835 /* Must have right number of operands. */
3836 i.error = number_of_operands_mismatch;
3837 if (i.operands != t->operands)
3838 continue;
3839
3840 /* Check processor support. */
3841 i.error = unsupported;
3842 found_cpu_match = (cpu_flags_match (t)
3843 == CPU_FLAGS_PERFECT_MATCH);
3844 if (!found_cpu_match)
3845 continue;
3846
3847 /* Check old gcc support. */
3848 i.error = old_gcc_only;
3849 if (!old_gcc && t->opcode_modifier.oldgcc)
3850 continue;
3851
3852 /* Check AT&T mnemonic. */
3853 i.error = unsupported_with_intel_mnemonic;
3854 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
3855 continue;
3856
3857 /* Check AT&T/Intel syntax. */
3858 i.error = unsupported_syntax;
3859 if ((intel_syntax && t->opcode_modifier.attsyntax)
3860 || (!intel_syntax && t->opcode_modifier.intelsyntax))
3861 continue;
3862
3863 /* Check the suffix, except for some instructions in intel mode. */
3864 i.error = invalid_instruction_suffix;
3865 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
3866 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
3867 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
3868 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
3869 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
3870 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
3871 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
3872 continue;
3873
3874 if (!operand_size_match (t))
3875 continue;
3876
3877 for (j = 0; j < MAX_OPERANDS; j++)
3878 operand_types[j] = t->operand_types[j];
3879
3880 /* In general, don't allow 64-bit operands in 32-bit mode. */
3881 if (i.suffix == QWORD_MNEM_SUFFIX
3882 && flag_code != CODE_64BIT
3883 && (intel_syntax
3884 ? (!t->opcode_modifier.ignoresize
3885 && !intel_float_operand (t->name))
3886 : intel_float_operand (t->name) != 2)
3887 && ((!operand_types[0].bitfield.regmmx
3888 && !operand_types[0].bitfield.regxmm
3889 && !operand_types[0].bitfield.regymm)
3890 || (!operand_types[t->operands > 1].bitfield.regmmx
3891 && !!operand_types[t->operands > 1].bitfield.regxmm
3892 && !!operand_types[t->operands > 1].bitfield.regymm))
3893 && (t->base_opcode != 0x0fc7
3894 || t->extension_opcode != 1 /* cmpxchg8b */))
3895 continue;
3896
3897 /* In general, don't allow 32-bit operands on pre-386. */
3898 else if (i.suffix == LONG_MNEM_SUFFIX
3899 && !cpu_arch_flags.bitfield.cpui386
3900 && (intel_syntax
3901 ? (!t->opcode_modifier.ignoresize
3902 && !intel_float_operand (t->name))
3903 : intel_float_operand (t->name) != 2)
3904 && ((!operand_types[0].bitfield.regmmx
3905 && !operand_types[0].bitfield.regxmm)
3906 || (!operand_types[t->operands > 1].bitfield.regmmx
3907 && !!operand_types[t->operands > 1].bitfield.regxmm)))
3908 continue;
3909
3910 /* Do not verify operands when there are none. */
3911 else
3912 {
3913 if (!t->operands)
3914 /* We've found a match; break out of loop. */
3915 break;
3916 }
3917
3918 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
3919 into Disp32/Disp16/Disp32 operand. */
3920 if (i.prefix[ADDR_PREFIX] != 0)
3921 {
3922 /* There should be only one Disp operand. */
3923 switch (flag_code)
3924 {
3925 case CODE_16BIT:
3926 for (j = 0; j < MAX_OPERANDS; j++)
3927 {
3928 if (operand_types[j].bitfield.disp16)
3929 {
3930 addr_prefix_disp = j;
3931 operand_types[j].bitfield.disp32 = 1;
3932 operand_types[j].bitfield.disp16 = 0;
3933 break;
3934 }
3935 }
3936 break;
3937 case CODE_32BIT:
3938 for (j = 0; j < MAX_OPERANDS; j++)
3939 {
3940 if (operand_types[j].bitfield.disp32)
3941 {
3942 addr_prefix_disp = j;
3943 operand_types[j].bitfield.disp32 = 0;
3944 operand_types[j].bitfield.disp16 = 1;
3945 break;
3946 }
3947 }
3948 break;
3949 case CODE_64BIT:
3950 for (j = 0; j < MAX_OPERANDS; j++)
3951 {
3952 if (operand_types[j].bitfield.disp64)
3953 {
3954 addr_prefix_disp = j;
3955 operand_types[j].bitfield.disp64 = 0;
3956 operand_types[j].bitfield.disp32 = 1;
3957 break;
3958 }
3959 }
3960 break;
3961 }
3962 }
3963
3964 /* We check register size only if size of operands can be
3965 encoded the canonical way. */
3966 check_register = t->opcode_modifier.w;
3967 overlap0 = operand_type_and (i.types[0], operand_types[0]);
3968 switch (t->operands)
3969 {
3970 case 1:
3971 if (!operand_type_match (overlap0, i.types[0]))
3972 continue;
3973 break;
3974 case 2:
3975 /* xchg %eax, %eax is a special case. It is an aliase for nop
3976 only in 32bit mode and we can use opcode 0x90. In 64bit
3977 mode, we can't use 0x90 for xchg %eax, %eax since it should
3978 zero-extend %eax to %rax. */
3979 if (flag_code == CODE_64BIT
3980 && t->base_opcode == 0x90
3981 && operand_type_equal (&i.types [0], &acc32)
3982 && operand_type_equal (&i.types [1], &acc32))
3983 continue;
3984 if (i.swap_operand)
3985 {
3986 /* If we swap operand in encoding, we either match
3987 the next one or reverse direction of operands. */
3988 if (t->opcode_modifier.s)
3989 continue;
3990 else if (t->opcode_modifier.d)
3991 goto check_reverse;
3992 }
3993
3994 case 3:
3995 /* If we swap operand in encoding, we match the next one. */
3996 if (i.swap_operand && t->opcode_modifier.s)
3997 continue;
3998 case 4:
3999 case 5:
4000 overlap1 = operand_type_and (i.types[1], operand_types[1]);
4001 if (!operand_type_match (overlap0, i.types[0])
4002 || !operand_type_match (overlap1, i.types[1])
4003 || (check_register
4004 && !operand_type_register_match (overlap0, i.types[0],
4005 operand_types[0],
4006 overlap1, i.types[1],
4007 operand_types[1])))
4008 {
4009 /* Check if other direction is valid ... */
4010 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
4011 continue;
4012
4013 check_reverse:
4014 /* Try reversing direction of operands. */
4015 overlap0 = operand_type_and (i.types[0], operand_types[1]);
4016 overlap1 = operand_type_and (i.types[1], operand_types[0]);
4017 if (!operand_type_match (overlap0, i.types[0])
4018 || !operand_type_match (overlap1, i.types[1])
4019 || (check_register
4020 && !operand_type_register_match (overlap0,
4021 i.types[0],
4022 operand_types[1],
4023 overlap1,
4024 i.types[1],
4025 operand_types[0])))
4026 {
4027 /* Does not match either direction. */
4028 continue;
4029 }
4030 /* found_reverse_match holds which of D or FloatDR
4031 we've found. */
4032 if (t->opcode_modifier.d)
4033 found_reverse_match = Opcode_D;
4034 else if (t->opcode_modifier.floatd)
4035 found_reverse_match = Opcode_FloatD;
4036 else
4037 found_reverse_match = 0;
4038 if (t->opcode_modifier.floatr)
4039 found_reverse_match |= Opcode_FloatR;
4040 }
4041 else
4042 {
4043 /* Found a forward 2 operand match here. */
4044 switch (t->operands)
4045 {
4046 case 5:
4047 overlap4 = operand_type_and (i.types[4],
4048 operand_types[4]);
4049 case 4:
4050 overlap3 = operand_type_and (i.types[3],
4051 operand_types[3]);
4052 case 3:
4053 overlap2 = operand_type_and (i.types[2],
4054 operand_types[2]);
4055 break;
4056 }
4057
4058 switch (t->operands)
4059 {
4060 case 5:
4061 if (!operand_type_match (overlap4, i.types[4])
4062 || !operand_type_register_match (overlap3,
4063 i.types[3],
4064 operand_types[3],
4065 overlap4,
4066 i.types[4],
4067 operand_types[4]))
4068 continue;
4069 case 4:
4070 if (!operand_type_match (overlap3, i.types[3])
4071 || (check_register
4072 && !operand_type_register_match (overlap2,
4073 i.types[2],
4074 operand_types[2],
4075 overlap3,
4076 i.types[3],
4077 operand_types[3])))
4078 continue;
4079 case 3:
4080 /* Here we make use of the fact that there are no
4081 reverse match 3 operand instructions, and all 3
4082 operand instructions only need to be checked for
4083 register consistency between operands 2 and 3. */
4084 if (!operand_type_match (overlap2, i.types[2])
4085 || (check_register
4086 && !operand_type_register_match (overlap1,
4087 i.types[1],
4088 operand_types[1],
4089 overlap2,
4090 i.types[2],
4091 operand_types[2])))
4092 continue;
4093 break;
4094 }
4095 }
4096 /* Found either forward/reverse 2, 3 or 4 operand match here:
4097 slip through to break. */
4098 }
4099 if (!found_cpu_match)
4100 {
4101 found_reverse_match = 0;
4102 continue;
4103 }
4104
4105 /* Check if VEX operands are valid. */
4106 if (VEX_check_operands (t))
4107 continue;
4108
4109 /* We've found a match; break out of loop. */
4110 break;
4111 }
4112
4113 if (t == current_templates->end)
4114 {
4115 /* We found no match. */
4116 const char *err_msg;
4117 switch (i.error)
4118 {
4119 default:
4120 abort ();
4121 case operand_size_mismatch:
4122 err_msg = _("operand size mismatch");
4123 break;
4124 case operand_type_mismatch:
4125 err_msg = _("operand type mismatch");
4126 break;
4127 case register_type_mismatch:
4128 err_msg = _("register type mismatch");
4129 break;
4130 case number_of_operands_mismatch:
4131 err_msg = _("number of operands mismatch");
4132 break;
4133 case invalid_instruction_suffix:
4134 err_msg = _("invalid instruction suffix");
4135 break;
4136 case bad_imm4:
4137 err_msg = _("Imm4 isn't the first operand");
4138 break;
4139 case old_gcc_only:
4140 err_msg = _("only supported with old gcc");
4141 break;
4142 case unsupported_with_intel_mnemonic:
4143 err_msg = _("unsupported with Intel mnemonic");
4144 break;
4145 case unsupported_syntax:
4146 err_msg = _("unsupported syntax");
4147 break;
4148 case unsupported:
4149 err_msg = _("unsupported");
4150 break;
4151 }
4152 as_bad (_("%s for `%s'"), err_msg,
4153 current_templates->start->name);
4154 return NULL;
4155 }
4156
4157 if (!quiet_warnings)
4158 {
4159 if (!intel_syntax
4160 && (i.types[0].bitfield.jumpabsolute
4161 != operand_types[0].bitfield.jumpabsolute))
4162 {
4163 as_warn (_("indirect %s without `*'"), t->name);
4164 }
4165
4166 if (t->opcode_modifier.isprefix
4167 && t->opcode_modifier.ignoresize)
4168 {
4169 /* Warn them that a data or address size prefix doesn't
4170 affect assembly of the next line of code. */
4171 as_warn (_("stand-alone `%s' prefix"), t->name);
4172 }
4173 }
4174
4175 /* Copy the template we found. */
4176 i.tm = *t;
4177
4178 if (addr_prefix_disp != -1)
4179 i.tm.operand_types[addr_prefix_disp]
4180 = operand_types[addr_prefix_disp];
4181
4182 if (found_reverse_match)
4183 {
4184 /* If we found a reverse match we must alter the opcode
4185 direction bit. found_reverse_match holds bits to change
4186 (different for int & float insns). */
4187
4188 i.tm.base_opcode ^= found_reverse_match;
4189
4190 i.tm.operand_types[0] = operand_types[1];
4191 i.tm.operand_types[1] = operand_types[0];
4192 }
4193
4194 return t;
4195 }
4196
4197 static int
4198 check_string (void)
4199 {
4200 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
4201 if (i.tm.operand_types[mem_op].bitfield.esseg)
4202 {
4203 if (i.seg[0] != NULL && i.seg[0] != &es)
4204 {
4205 as_bad (_("`%s' operand %d must use `%ses' segment"),
4206 i.tm.name,
4207 mem_op + 1,
4208 register_prefix);
4209 return 0;
4210 }
4211 /* There's only ever one segment override allowed per instruction.
4212 This instruction possibly has a legal segment override on the
4213 second operand, so copy the segment to where non-string
4214 instructions store it, allowing common code. */
4215 i.seg[0] = i.seg[1];
4216 }
4217 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
4218 {
4219 if (i.seg[1] != NULL && i.seg[1] != &es)
4220 {
4221 as_bad (_("`%s' operand %d must use `%ses' segment"),
4222 i.tm.name,
4223 mem_op + 2,
4224 register_prefix);
4225 return 0;
4226 }
4227 }
4228 return 1;
4229 }
4230
4231 static int
4232 process_suffix (void)
4233 {
4234 /* If matched instruction specifies an explicit instruction mnemonic
4235 suffix, use it. */
4236 if (i.tm.opcode_modifier.size16)
4237 i.suffix = WORD_MNEM_SUFFIX;
4238 else if (i.tm.opcode_modifier.size32)
4239 i.suffix = LONG_MNEM_SUFFIX;
4240 else if (i.tm.opcode_modifier.size64)
4241 i.suffix = QWORD_MNEM_SUFFIX;
4242 else if (i.reg_operands)
4243 {
4244 /* If there's no instruction mnemonic suffix we try to invent one
4245 based on register operands. */
4246 if (!i.suffix)
4247 {
4248 /* We take i.suffix from the last register operand specified,
4249 Destination register type is more significant than source
4250 register type. crc32 in SSE4.2 prefers source register
4251 type. */
4252 if (i.tm.base_opcode == 0xf20f38f1)
4253 {
4254 if (i.types[0].bitfield.reg16)
4255 i.suffix = WORD_MNEM_SUFFIX;
4256 else if (i.types[0].bitfield.reg32)
4257 i.suffix = LONG_MNEM_SUFFIX;
4258 else if (i.types[0].bitfield.reg64)
4259 i.suffix = QWORD_MNEM_SUFFIX;
4260 }
4261 else if (i.tm.base_opcode == 0xf20f38f0)
4262 {
4263 if (i.types[0].bitfield.reg8)
4264 i.suffix = BYTE_MNEM_SUFFIX;
4265 }
4266
4267 if (!i.suffix)
4268 {
4269 int op;
4270
4271 if (i.tm.base_opcode == 0xf20f38f1
4272 || i.tm.base_opcode == 0xf20f38f0)
4273 {
4274 /* We have to know the operand size for crc32. */
4275 as_bad (_("ambiguous memory operand size for `%s`"),
4276 i.tm.name);
4277 return 0;
4278 }
4279
4280 for (op = i.operands; --op >= 0;)
4281 if (!i.tm.operand_types[op].bitfield.inoutportreg)
4282 {
4283 if (i.types[op].bitfield.reg8)
4284 {
4285 i.suffix = BYTE_MNEM_SUFFIX;
4286 break;
4287 }
4288 else if (i.types[op].bitfield.reg16)
4289 {
4290 i.suffix = WORD_MNEM_SUFFIX;
4291 break;
4292 }
4293 else if (i.types[op].bitfield.reg32)
4294 {
4295 i.suffix = LONG_MNEM_SUFFIX;
4296 break;
4297 }
4298 else if (i.types[op].bitfield.reg64)
4299 {
4300 i.suffix = QWORD_MNEM_SUFFIX;
4301 break;
4302 }
4303 }
4304 }
4305 }
4306 else if (i.suffix == BYTE_MNEM_SUFFIX)
4307 {
4308 if (intel_syntax
4309 && i.tm.opcode_modifier.ignoresize
4310 && i.tm.opcode_modifier.no_bsuf)
4311 i.suffix = 0;
4312 else if (!check_byte_reg ())
4313 return 0;
4314 }
4315 else if (i.suffix == LONG_MNEM_SUFFIX)
4316 {
4317 if (intel_syntax
4318 && i.tm.opcode_modifier.ignoresize
4319 && i.tm.opcode_modifier.no_lsuf)
4320 i.suffix = 0;
4321 else if (!check_long_reg ())
4322 return 0;
4323 }
4324 else if (i.suffix == QWORD_MNEM_SUFFIX)
4325 {
4326 if (intel_syntax
4327 && i.tm.opcode_modifier.ignoresize
4328 && i.tm.opcode_modifier.no_qsuf)
4329 i.suffix = 0;
4330 else if (!check_qword_reg ())
4331 return 0;
4332 }
4333 else if (i.suffix == WORD_MNEM_SUFFIX)
4334 {
4335 if (intel_syntax
4336 && i.tm.opcode_modifier.ignoresize
4337 && i.tm.opcode_modifier.no_wsuf)
4338 i.suffix = 0;
4339 else if (!check_word_reg ())
4340 return 0;
4341 }
4342 else if (i.suffix == XMMWORD_MNEM_SUFFIX
4343 || i.suffix == YMMWORD_MNEM_SUFFIX)
4344 {
4345 /* Skip if the instruction has x/y suffix. match_template
4346 should check if it is a valid suffix. */
4347 }
4348 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
4349 /* Do nothing if the instruction is going to ignore the prefix. */
4350 ;
4351 else
4352 abort ();
4353 }
4354 else if (i.tm.opcode_modifier.defaultsize
4355 && !i.suffix
4356 /* exclude fldenv/frstor/fsave/fstenv */
4357 && i.tm.opcode_modifier.no_ssuf)
4358 {
4359 i.suffix = stackop_size;
4360 }
4361 else if (intel_syntax
4362 && !i.suffix
4363 && (i.tm.operand_types[0].bitfield.jumpabsolute
4364 || i.tm.opcode_modifier.jumpbyte
4365 || i.tm.opcode_modifier.jumpintersegment
4366 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
4367 && i.tm.extension_opcode <= 3)))
4368 {
4369 switch (flag_code)
4370 {
4371 case CODE_64BIT:
4372 if (!i.tm.opcode_modifier.no_qsuf)
4373 {
4374 i.suffix = QWORD_MNEM_SUFFIX;
4375 break;
4376 }
4377 case CODE_32BIT:
4378 if (!i.tm.opcode_modifier.no_lsuf)
4379 i.suffix = LONG_MNEM_SUFFIX;
4380 break;
4381 case CODE_16BIT:
4382 if (!i.tm.opcode_modifier.no_wsuf)
4383 i.suffix = WORD_MNEM_SUFFIX;
4384 break;
4385 }
4386 }
4387
4388 if (!i.suffix)
4389 {
4390 if (!intel_syntax)
4391 {
4392 if (i.tm.opcode_modifier.w)
4393 {
4394 as_bad (_("no instruction mnemonic suffix given and "
4395 "no register operands; can't size instruction"));
4396 return 0;
4397 }
4398 }
4399 else
4400 {
4401 unsigned int suffixes;
4402
4403 suffixes = !i.tm.opcode_modifier.no_bsuf;
4404 if (!i.tm.opcode_modifier.no_wsuf)
4405 suffixes |= 1 << 1;
4406 if (!i.tm.opcode_modifier.no_lsuf)
4407 suffixes |= 1 << 2;
4408 if (!i.tm.opcode_modifier.no_ldsuf)
4409 suffixes |= 1 << 3;
4410 if (!i.tm.opcode_modifier.no_ssuf)
4411 suffixes |= 1 << 4;
4412 if (!i.tm.opcode_modifier.no_qsuf)
4413 suffixes |= 1 << 5;
4414
4415 /* There are more than suffix matches. */
4416 if (i.tm.opcode_modifier.w
4417 || ((suffixes & (suffixes - 1))
4418 && !i.tm.opcode_modifier.defaultsize
4419 && !i.tm.opcode_modifier.ignoresize))
4420 {
4421 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
4422 return 0;
4423 }
4424 }
4425 }
4426
4427 /* Change the opcode based on the operand size given by i.suffix;
4428 We don't need to change things for byte insns. */
4429
4430 if (i.suffix
4431 && i.suffix != BYTE_MNEM_SUFFIX
4432 && i.suffix != XMMWORD_MNEM_SUFFIX
4433 && i.suffix != YMMWORD_MNEM_SUFFIX)
4434 {
4435 /* It's not a byte, select word/dword operation. */
4436 if (i.tm.opcode_modifier.w)
4437 {
4438 if (i.tm.opcode_modifier.shortform)
4439 i.tm.base_opcode |= 8;
4440 else
4441 i.tm.base_opcode |= 1;
4442 }
4443
4444 /* Now select between word & dword operations via the operand
4445 size prefix, except for instructions that will ignore this
4446 prefix anyway. */
4447 if (i.tm.opcode_modifier.addrprefixop0)
4448 {
4449 /* The address size override prefix changes the size of the
4450 first operand. */
4451 if ((flag_code == CODE_32BIT
4452 && i.op->regs[0].reg_type.bitfield.reg16)
4453 || (flag_code != CODE_32BIT
4454 && i.op->regs[0].reg_type.bitfield.reg32))
4455 if (!add_prefix (ADDR_PREFIX_OPCODE))
4456 return 0;
4457 }
4458 else if (i.suffix != QWORD_MNEM_SUFFIX
4459 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
4460 && !i.tm.opcode_modifier.ignoresize
4461 && !i.tm.opcode_modifier.floatmf
4462 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
4463 || (flag_code == CODE_64BIT
4464 && i.tm.opcode_modifier.jumpbyte)))
4465 {
4466 unsigned int prefix = DATA_PREFIX_OPCODE;
4467
4468 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
4469 prefix = ADDR_PREFIX_OPCODE;
4470
4471 if (!add_prefix (prefix))
4472 return 0;
4473 }
4474
4475 /* Set mode64 for an operand. */
4476 if (i.suffix == QWORD_MNEM_SUFFIX
4477 && flag_code == CODE_64BIT
4478 && !i.tm.opcode_modifier.norex64)
4479 {
4480 /* Special case for xchg %rax,%rax. It is NOP and doesn't
4481 need rex64. cmpxchg8b is also a special case. */
4482 if (! (i.operands == 2
4483 && i.tm.base_opcode == 0x90
4484 && i.tm.extension_opcode == None
4485 && operand_type_equal (&i.types [0], &acc64)
4486 && operand_type_equal (&i.types [1], &acc64))
4487 && ! (i.operands == 1
4488 && i.tm.base_opcode == 0xfc7
4489 && i.tm.extension_opcode == 1
4490 && !operand_type_check (i.types [0], reg)
4491 && operand_type_check (i.types [0], anymem)))
4492 i.rex |= REX_W;
4493 }
4494
4495 /* Size floating point instruction. */
4496 if (i.suffix == LONG_MNEM_SUFFIX)
4497 if (i.tm.opcode_modifier.floatmf)
4498 i.tm.base_opcode ^= 4;
4499 }
4500
4501 return 1;
4502 }
4503
4504 static int
4505 check_byte_reg (void)
4506 {
4507 int op;
4508
4509 for (op = i.operands; --op >= 0;)
4510 {
4511 /* If this is an eight bit register, it's OK. If it's the 16 or
4512 32 bit version of an eight bit register, we will just use the
4513 low portion, and that's OK too. */
4514 if (i.types[op].bitfield.reg8)
4515 continue;
4516
4517 /* crc32 doesn't generate this warning. */
4518 if (i.tm.base_opcode == 0xf20f38f0)
4519 continue;
4520
4521 if ((i.types[op].bitfield.reg16
4522 || i.types[op].bitfield.reg32
4523 || i.types[op].bitfield.reg64)
4524 && i.op[op].regs->reg_num < 4)
4525 {
4526 /* Prohibit these changes in the 64bit mode, since the
4527 lowering is more complicated. */
4528 if (flag_code == CODE_64BIT
4529 && !i.tm.operand_types[op].bitfield.inoutportreg)
4530 {
4531 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4532 register_prefix, i.op[op].regs->reg_name,
4533 i.suffix);
4534 return 0;
4535 }
4536 #if REGISTER_WARNINGS
4537 if (!quiet_warnings
4538 && !i.tm.operand_types[op].bitfield.inoutportreg)
4539 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4540 register_prefix,
4541 (i.op[op].regs + (i.types[op].bitfield.reg16
4542 ? REGNAM_AL - REGNAM_AX
4543 : REGNAM_AL - REGNAM_EAX))->reg_name,
4544 register_prefix,
4545 i.op[op].regs->reg_name,
4546 i.suffix);
4547 #endif
4548 continue;
4549 }
4550 /* Any other register is bad. */
4551 if (i.types[op].bitfield.reg16
4552 || i.types[op].bitfield.reg32
4553 || i.types[op].bitfield.reg64
4554 || i.types[op].bitfield.regmmx
4555 || i.types[op].bitfield.regxmm
4556 || i.types[op].bitfield.regymm
4557 || i.types[op].bitfield.sreg2
4558 || i.types[op].bitfield.sreg3
4559 || i.types[op].bitfield.control
4560 || i.types[op].bitfield.debug
4561 || i.types[op].bitfield.test
4562 || i.types[op].bitfield.floatreg
4563 || i.types[op].bitfield.floatacc)
4564 {
4565 as_bad (_("`%s%s' not allowed with `%s%c'"),
4566 register_prefix,
4567 i.op[op].regs->reg_name,
4568 i.tm.name,
4569 i.suffix);
4570 return 0;
4571 }
4572 }
4573 return 1;
4574 }
4575
4576 static int
4577 check_long_reg (void)
4578 {
4579 int op;
4580
4581 for (op = i.operands; --op >= 0;)
4582 /* Reject eight bit registers, except where the template requires
4583 them. (eg. movzb) */
4584 if (i.types[op].bitfield.reg8
4585 && (i.tm.operand_types[op].bitfield.reg16
4586 || i.tm.operand_types[op].bitfield.reg32
4587 || i.tm.operand_types[op].bitfield.acc))
4588 {
4589 as_bad (_("`%s%s' not allowed with `%s%c'"),
4590 register_prefix,
4591 i.op[op].regs->reg_name,
4592 i.tm.name,
4593 i.suffix);
4594 return 0;
4595 }
4596 /* Warn if the e prefix on a general reg is missing. */
4597 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4598 && i.types[op].bitfield.reg16
4599 && (i.tm.operand_types[op].bitfield.reg32
4600 || i.tm.operand_types[op].bitfield.acc))
4601 {
4602 /* Prohibit these changes in the 64bit mode, since the
4603 lowering is more complicated. */
4604 if (flag_code == CODE_64BIT)
4605 {
4606 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4607 register_prefix, i.op[op].regs->reg_name,
4608 i.suffix);
4609 return 0;
4610 }
4611 #if REGISTER_WARNINGS
4612 else
4613 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4614 register_prefix,
4615 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
4616 register_prefix,
4617 i.op[op].regs->reg_name,
4618 i.suffix);
4619 #endif
4620 }
4621 /* Warn if the r prefix on a general reg is missing. */
4622 else if (i.types[op].bitfield.reg64
4623 && (i.tm.operand_types[op].bitfield.reg32
4624 || i.tm.operand_types[op].bitfield.acc))
4625 {
4626 if (intel_syntax
4627 && i.tm.opcode_modifier.toqword
4628 && !i.types[0].bitfield.regxmm)
4629 {
4630 /* Convert to QWORD. We want REX byte. */
4631 i.suffix = QWORD_MNEM_SUFFIX;
4632 }
4633 else
4634 {
4635 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4636 register_prefix, i.op[op].regs->reg_name,
4637 i.suffix);
4638 return 0;
4639 }
4640 }
4641 return 1;
4642 }
4643
4644 static int
4645 check_qword_reg (void)
4646 {
4647 int op;
4648
4649 for (op = i.operands; --op >= 0; )
4650 /* Reject eight bit registers, except where the template requires
4651 them. (eg. movzb) */
4652 if (i.types[op].bitfield.reg8
4653 && (i.tm.operand_types[op].bitfield.reg16
4654 || i.tm.operand_types[op].bitfield.reg32
4655 || i.tm.operand_types[op].bitfield.acc))
4656 {
4657 as_bad (_("`%s%s' not allowed with `%s%c'"),
4658 register_prefix,
4659 i.op[op].regs->reg_name,
4660 i.tm.name,
4661 i.suffix);
4662 return 0;
4663 }
4664 /* Warn if the e prefix on a general reg is missing. */
4665 else if ((i.types[op].bitfield.reg16
4666 || i.types[op].bitfield.reg32)
4667 && (i.tm.operand_types[op].bitfield.reg32
4668 || i.tm.operand_types[op].bitfield.acc))
4669 {
4670 /* Prohibit these changes in the 64bit mode, since the
4671 lowering is more complicated. */
4672 if (intel_syntax
4673 && i.tm.opcode_modifier.todword
4674 && !i.types[0].bitfield.regxmm)
4675 {
4676 /* Convert to DWORD. We don't want REX byte. */
4677 i.suffix = LONG_MNEM_SUFFIX;
4678 }
4679 else
4680 {
4681 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4682 register_prefix, i.op[op].regs->reg_name,
4683 i.suffix);
4684 return 0;
4685 }
4686 }
4687 return 1;
4688 }
4689
4690 static int
4691 check_word_reg (void)
4692 {
4693 int op;
4694 for (op = i.operands; --op >= 0;)
4695 /* Reject eight bit registers, except where the template requires
4696 them. (eg. movzb) */
4697 if (i.types[op].bitfield.reg8
4698 && (i.tm.operand_types[op].bitfield.reg16
4699 || i.tm.operand_types[op].bitfield.reg32
4700 || i.tm.operand_types[op].bitfield.acc))
4701 {
4702 as_bad (_("`%s%s' not allowed with `%s%c'"),
4703 register_prefix,
4704 i.op[op].regs->reg_name,
4705 i.tm.name,
4706 i.suffix);
4707 return 0;
4708 }
4709 /* Warn if the e prefix on a general reg is present. */
4710 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4711 && i.types[op].bitfield.reg32
4712 && (i.tm.operand_types[op].bitfield.reg16
4713 || i.tm.operand_types[op].bitfield.acc))
4714 {
4715 /* Prohibit these changes in the 64bit mode, since the
4716 lowering is more complicated. */
4717 if (flag_code == CODE_64BIT)
4718 {
4719 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4720 register_prefix, i.op[op].regs->reg_name,
4721 i.suffix);
4722 return 0;
4723 }
4724 else
4725 #if REGISTER_WARNINGS
4726 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4727 register_prefix,
4728 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
4729 register_prefix,
4730 i.op[op].regs->reg_name,
4731 i.suffix);
4732 #endif
4733 }
4734 return 1;
4735 }
4736
4737 static int
4738 update_imm (unsigned int j)
4739 {
4740 i386_operand_type overlap = i.types[j];
4741 if ((overlap.bitfield.imm8
4742 || overlap.bitfield.imm8s
4743 || overlap.bitfield.imm16
4744 || overlap.bitfield.imm32
4745 || overlap.bitfield.imm32s
4746 || overlap.bitfield.imm64)
4747 && !operand_type_equal (&overlap, &imm8)
4748 && !operand_type_equal (&overlap, &imm8s)
4749 && !operand_type_equal (&overlap, &imm16)
4750 && !operand_type_equal (&overlap, &imm32)
4751 && !operand_type_equal (&overlap, &imm32s)
4752 && !operand_type_equal (&overlap, &imm64))
4753 {
4754 if (i.suffix)
4755 {
4756 i386_operand_type temp;
4757
4758 operand_type_set (&temp, 0);
4759 if (i.suffix == BYTE_MNEM_SUFFIX)
4760 {
4761 temp.bitfield.imm8 = overlap.bitfield.imm8;
4762 temp.bitfield.imm8s = overlap.bitfield.imm8s;
4763 }
4764 else if (i.suffix == WORD_MNEM_SUFFIX)
4765 temp.bitfield.imm16 = overlap.bitfield.imm16;
4766 else if (i.suffix == QWORD_MNEM_SUFFIX)
4767 {
4768 temp.bitfield.imm64 = overlap.bitfield.imm64;
4769 temp.bitfield.imm32s = overlap.bitfield.imm32s;
4770 }
4771 else
4772 temp.bitfield.imm32 = overlap.bitfield.imm32;
4773 overlap = temp;
4774 }
4775 else if (operand_type_equal (&overlap, &imm16_32_32s)
4776 || operand_type_equal (&overlap, &imm16_32)
4777 || operand_type_equal (&overlap, &imm16_32s))
4778 {
4779 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
4780 overlap = imm16;
4781 else
4782 overlap = imm32s;
4783 }
4784 if (!operand_type_equal (&overlap, &imm8)
4785 && !operand_type_equal (&overlap, &imm8s)
4786 && !operand_type_equal (&overlap, &imm16)
4787 && !operand_type_equal (&overlap, &imm32)
4788 && !operand_type_equal (&overlap, &imm32s)
4789 && !operand_type_equal (&overlap, &imm64))
4790 {
4791 as_bad (_("no instruction mnemonic suffix given; "
4792 "can't determine immediate size"));
4793 return 0;
4794 }
4795 }
4796 i.types[j] = overlap;
4797
4798 return 1;
4799 }
4800
4801 static int
4802 finalize_imm (void)
4803 {
4804 unsigned int j, n;
4805
4806 /* Update the first 2 immediate operands. */
4807 n = i.operands > 2 ? 2 : i.operands;
4808 if (n)
4809 {
4810 for (j = 0; j < n; j++)
4811 if (update_imm (j) == 0)
4812 return 0;
4813
4814 /* The 3rd operand can't be immediate operand. */
4815 gas_assert (operand_type_check (i.types[2], imm) == 0);
4816 }
4817
4818 return 1;
4819 }
4820
4821 static int
4822 bad_implicit_operand (int xmm)
4823 {
4824 const char *ireg = xmm ? "xmm0" : "ymm0";
4825
4826 if (intel_syntax)
4827 as_bad (_("the last operand of `%s' must be `%s%s'"),
4828 i.tm.name, register_prefix, ireg);
4829 else
4830 as_bad (_("the first operand of `%s' must be `%s%s'"),
4831 i.tm.name, register_prefix, ireg);
4832 return 0;
4833 }
4834
4835 static int
4836 process_operands (void)
4837 {
4838 /* Default segment register this instruction will use for memory
4839 accesses. 0 means unknown. This is only for optimizing out
4840 unnecessary segment overrides. */
4841 const seg_entry *default_seg = 0;
4842
4843 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
4844 {
4845 unsigned int dupl = i.operands;
4846 unsigned int dest = dupl - 1;
4847 unsigned int j;
4848
4849 /* The destination must be an xmm register. */
4850 gas_assert (i.reg_operands
4851 && MAX_OPERANDS > dupl
4852 && operand_type_equal (&i.types[dest], &regxmm));
4853
4854 if (i.tm.opcode_modifier.firstxmm0)
4855 {
4856 /* The first operand is implicit and must be xmm0. */
4857 gas_assert (operand_type_equal (&i.types[0], &regxmm));
4858 if (i.op[0].regs->reg_num != 0)
4859 return bad_implicit_operand (1);
4860
4861 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
4862 {
4863 /* Keep xmm0 for instructions with VEX prefix and 3
4864 sources. */
4865 goto duplicate;
4866 }
4867 else
4868 {
4869 /* We remove the first xmm0 and keep the number of
4870 operands unchanged, which in fact duplicates the
4871 destination. */
4872 for (j = 1; j < i.operands; j++)
4873 {
4874 i.op[j - 1] = i.op[j];
4875 i.types[j - 1] = i.types[j];
4876 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
4877 }
4878 }
4879 }
4880 else if (i.tm.opcode_modifier.implicit1stxmm0)
4881 {
4882 gas_assert ((MAX_OPERANDS - 1) > dupl
4883 && (i.tm.opcode_modifier.vexsources
4884 == VEX3SOURCES));
4885
4886 /* Add the implicit xmm0 for instructions with VEX prefix
4887 and 3 sources. */
4888 for (j = i.operands; j > 0; j--)
4889 {
4890 i.op[j] = i.op[j - 1];
4891 i.types[j] = i.types[j - 1];
4892 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
4893 }
4894 i.op[0].regs
4895 = (const reg_entry *) hash_find (reg_hash, "xmm0");
4896 i.types[0] = regxmm;
4897 i.tm.operand_types[0] = regxmm;
4898
4899 i.operands += 2;
4900 i.reg_operands += 2;
4901 i.tm.operands += 2;
4902
4903 dupl++;
4904 dest++;
4905 i.op[dupl] = i.op[dest];
4906 i.types[dupl] = i.types[dest];
4907 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
4908 }
4909 else
4910 {
4911 duplicate:
4912 i.operands++;
4913 i.reg_operands++;
4914 i.tm.operands++;
4915
4916 i.op[dupl] = i.op[dest];
4917 i.types[dupl] = i.types[dest];
4918 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
4919 }
4920
4921 if (i.tm.opcode_modifier.immext)
4922 process_immext ();
4923 }
4924 else if (i.tm.opcode_modifier.firstxmm0)
4925 {
4926 unsigned int j;
4927
4928 /* The first operand is implicit and must be xmm0/ymm0. */
4929 gas_assert (i.reg_operands
4930 && (operand_type_equal (&i.types[0], &regxmm)
4931 || operand_type_equal (&i.types[0], &regymm)));
4932 if (i.op[0].regs->reg_num != 0)
4933 return bad_implicit_operand (i.types[0].bitfield.regxmm);
4934
4935 for (j = 1; j < i.operands; j++)
4936 {
4937 i.op[j - 1] = i.op[j];
4938 i.types[j - 1] = i.types[j];
4939
4940 /* We need to adjust fields in i.tm since they are used by
4941 build_modrm_byte. */
4942 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
4943 }
4944
4945 i.operands--;
4946 i.reg_operands--;
4947 i.tm.operands--;
4948 }
4949 else if (i.tm.opcode_modifier.regkludge)
4950 {
4951 /* The imul $imm, %reg instruction is converted into
4952 imul $imm, %reg, %reg, and the clr %reg instruction
4953 is converted into xor %reg, %reg. */
4954
4955 unsigned int first_reg_op;
4956
4957 if (operand_type_check (i.types[0], reg))
4958 first_reg_op = 0;
4959 else
4960 first_reg_op = 1;
4961 /* Pretend we saw the extra register operand. */
4962 gas_assert (i.reg_operands == 1
4963 && i.op[first_reg_op + 1].regs == 0);
4964 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
4965 i.types[first_reg_op + 1] = i.types[first_reg_op];
4966 i.operands++;
4967 i.reg_operands++;
4968 }
4969
4970 if (i.tm.opcode_modifier.shortform)
4971 {
4972 if (i.types[0].bitfield.sreg2
4973 || i.types[0].bitfield.sreg3)
4974 {
4975 if (i.tm.base_opcode == POP_SEG_SHORT
4976 && i.op[0].regs->reg_num == 1)
4977 {
4978 as_bad (_("you can't `pop %scs'"), register_prefix);
4979 return 0;
4980 }
4981 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
4982 if ((i.op[0].regs->reg_flags & RegRex) != 0)
4983 i.rex |= REX_B;
4984 }
4985 else
4986 {
4987 /* The register or float register operand is in operand
4988 0 or 1. */
4989 unsigned int op;
4990
4991 if (i.types[0].bitfield.floatreg
4992 || operand_type_check (i.types[0], reg))
4993 op = 0;
4994 else
4995 op = 1;
4996 /* Register goes in low 3 bits of opcode. */
4997 i.tm.base_opcode |= i.op[op].regs->reg_num;
4998 if ((i.op[op].regs->reg_flags & RegRex) != 0)
4999 i.rex |= REX_B;
5000 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
5001 {
5002 /* Warn about some common errors, but press on regardless.
5003 The first case can be generated by gcc (<= 2.8.1). */
5004 if (i.operands == 2)
5005 {
5006 /* Reversed arguments on faddp, fsubp, etc. */
5007 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
5008 register_prefix, i.op[!intel_syntax].regs->reg_name,
5009 register_prefix, i.op[intel_syntax].regs->reg_name);
5010 }
5011 else
5012 {
5013 /* Extraneous `l' suffix on fp insn. */
5014 as_warn (_("translating to `%s %s%s'"), i.tm.name,
5015 register_prefix, i.op[0].regs->reg_name);
5016 }
5017 }
5018 }
5019 }
5020 else if (i.tm.opcode_modifier.modrm)
5021 {
5022 /* The opcode is completed (modulo i.tm.extension_opcode which
5023 must be put into the modrm byte). Now, we make the modrm and
5024 index base bytes based on all the info we've collected. */
5025
5026 default_seg = build_modrm_byte ();
5027 }
5028 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
5029 {
5030 default_seg = &ds;
5031 }
5032 else if (i.tm.opcode_modifier.isstring)
5033 {
5034 /* For the string instructions that allow a segment override
5035 on one of their operands, the default segment is ds. */
5036 default_seg = &ds;
5037 }
5038
5039 if (i.tm.base_opcode == 0x8d /* lea */
5040 && i.seg[0]
5041 && !quiet_warnings)
5042 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
5043
5044 /* If a segment was explicitly specified, and the specified segment
5045 is not the default, use an opcode prefix to select it. If we
5046 never figured out what the default segment is, then default_seg
5047 will be zero at this point, and the specified segment prefix will
5048 always be used. */
5049 if ((i.seg[0]) && (i.seg[0] != default_seg))
5050 {
5051 if (!add_prefix (i.seg[0]->seg_prefix))
5052 return 0;
5053 }
5054 return 1;
5055 }
5056
5057 static const seg_entry *
5058 build_modrm_byte (void)
5059 {
5060 const seg_entry *default_seg = 0;
5061 unsigned int source, dest;
5062 int vex_3_sources;
5063
5064 /* The first operand of instructions with VEX prefix and 3 sources
5065 must be VEX_Imm4. */
5066 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
5067 if (vex_3_sources)
5068 {
5069 unsigned int nds, reg_slot;
5070 expressionS *exp;
5071
5072 if (i.tm.opcode_modifier.veximmext
5073 && i.tm.opcode_modifier.immext)
5074 {
5075 dest = i.operands - 2;
5076 gas_assert (dest == 3);
5077 }
5078 else
5079 dest = i.operands - 1;
5080 nds = dest - 1;
5081
5082 /* There are 2 kinds of instructions:
5083 1. 5 operands: 4 register operands or 3 register operands
5084 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
5085 VexW0 or VexW1. The destination must be either XMM or YMM
5086 register.
5087 2. 4 operands: 4 register operands or 3 register operands
5088 plus 1 memory operand, VexXDS, and VexImmExt */
5089 gas_assert ((i.reg_operands == 4
5090 || (i.reg_operands == 3 && i.mem_operands == 1))
5091 && i.tm.opcode_modifier.vexvvvv == VEXXDS
5092 && (i.tm.opcode_modifier.veximmext
5093 || (i.imm_operands == 1
5094 && i.types[0].bitfield.vec_imm4
5095 && (i.tm.opcode_modifier.vexw == VEXW0
5096 || i.tm.opcode_modifier.vexw == VEXW1)
5097 && (operand_type_equal (&i.tm.operand_types[dest], &regxmm)
5098 || operand_type_equal (&i.tm.operand_types[dest], &regymm)))));
5099
5100 if (i.imm_operands == 0)
5101 {
5102 /* When there is no immediate operand, generate an 8bit
5103 immediate operand to encode the first operand. */
5104 exp = &im_expressions[i.imm_operands++];
5105 i.op[i.operands].imms = exp;
5106 i.types[i.operands] = imm8;
5107 i.operands++;
5108 /* If VexW1 is set, the first operand is the source and
5109 the second operand is encoded in the immediate operand. */
5110 if (i.tm.opcode_modifier.vexw == VEXW1)
5111 {
5112 source = 0;
5113 reg_slot = 1;
5114 }
5115 else
5116 {
5117 source = 1;
5118 reg_slot = 0;
5119 }
5120
5121 /* FMA swaps REG and NDS. */
5122 if (i.tm.cpu_flags.bitfield.cpufma)
5123 {
5124 unsigned int tmp;
5125 tmp = reg_slot;
5126 reg_slot = nds;
5127 nds = tmp;
5128 }
5129
5130 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5131 &regxmm)
5132 || operand_type_equal (&i.tm.operand_types[reg_slot],
5133 &regymm));
5134 exp->X_op = O_constant;
5135 exp->X_add_number
5136 = ((i.op[reg_slot].regs->reg_num
5137 + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
5138 << 4);
5139 }
5140 else
5141 {
5142 unsigned int imm_slot;
5143
5144 if (i.tm.opcode_modifier.vexw == VEXW0)
5145 {
5146 /* If VexW0 is set, the third operand is the source and
5147 the second operand is encoded in the immediate
5148 operand. */
5149 source = 2;
5150 reg_slot = 1;
5151 }
5152 else
5153 {
5154 /* VexW1 is set, the second operand is the source and
5155 the third operand is encoded in the immediate
5156 operand. */
5157 source = 1;
5158 reg_slot = 2;
5159 }
5160
5161 if (i.tm.opcode_modifier.immext)
5162 {
5163 /* When ImmExt is set, the immdiate byte is the last
5164 operand. */
5165 imm_slot = i.operands - 1;
5166 source--;
5167 reg_slot--;
5168 }
5169 else
5170 {
5171 imm_slot = 0;
5172
5173 /* Turn on Imm8 so that output_imm will generate it. */
5174 i.types[imm_slot].bitfield.imm8 = 1;
5175 }
5176
5177 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5178 &regxmm)
5179 || operand_type_equal (&i.tm.operand_types[reg_slot],
5180 &regymm));
5181 i.op[imm_slot].imms->X_add_number
5182 |= ((i.op[reg_slot].regs->reg_num
5183 + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
5184 << 4);
5185 }
5186
5187 gas_assert (operand_type_equal (&i.tm.operand_types[nds], &regxmm)
5188 || operand_type_equal (&i.tm.operand_types[nds],
5189 &regymm));
5190 i.vex.register_specifier = i.op[nds].regs;
5191 }
5192 else
5193 source = dest = 0;
5194
5195 /* i.reg_operands MUST be the number of real register operands;
5196 implicit registers do not count. If there are 3 register
5197 operands, it must be a instruction with VexNDS. For a
5198 instruction with VexNDD, the destination register is encoded
5199 in VEX prefix. If there are 4 register operands, it must be
5200 a instruction with VEX prefix and 3 sources. */
5201 if (i.mem_operands == 0
5202 && ((i.reg_operands == 2
5203 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
5204 || (i.reg_operands == 3
5205 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
5206 || (i.reg_operands == 4 && vex_3_sources)))
5207 {
5208 switch (i.operands)
5209 {
5210 case 2:
5211 source = 0;
5212 break;
5213 case 3:
5214 /* When there are 3 operands, one of them may be immediate,
5215 which may be the first or the last operand. Otherwise,
5216 the first operand must be shift count register (cl) or it
5217 is an instruction with VexNDS. */
5218 gas_assert (i.imm_operands == 1
5219 || (i.imm_operands == 0
5220 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
5221 || i.types[0].bitfield.shiftcount)));
5222 if (operand_type_check (i.types[0], imm)
5223 || i.types[0].bitfield.shiftcount)
5224 source = 1;
5225 else
5226 source = 0;
5227 break;
5228 case 4:
5229 /* When there are 4 operands, the first two must be 8bit
5230 immediate operands. The source operand will be the 3rd
5231 one.
5232
5233 For instructions with VexNDS, if the first operand
5234 an imm8, the source operand is the 2nd one. If the last
5235 operand is imm8, the source operand is the first one. */
5236 gas_assert ((i.imm_operands == 2
5237 && i.types[0].bitfield.imm8
5238 && i.types[1].bitfield.imm8)
5239 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
5240 && i.imm_operands == 1
5241 && (i.types[0].bitfield.imm8
5242 || i.types[i.operands - 1].bitfield.imm8)));
5243 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5244 {
5245 if (i.types[0].bitfield.imm8)
5246 source = 1;
5247 else
5248 source = 0;
5249 }
5250 else
5251 source = 2;
5252 break;
5253 case 5:
5254 break;
5255 default:
5256 abort ();
5257 }
5258
5259 if (!vex_3_sources)
5260 {
5261 dest = source + 1;
5262
5263 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5264 {
5265 /* For instructions with VexNDS, the register-only
5266 source operand must be XMM or YMM register. It is
5267 encoded in VEX prefix. We need to clear RegMem bit
5268 before calling operand_type_equal. */
5269 i386_operand_type op = i.tm.operand_types[dest];
5270 op.bitfield.regmem = 0;
5271 if ((dest + 1) >= i.operands
5272 || (!operand_type_equal (&op, &regxmm)
5273 && !operand_type_equal (&op, &regymm)))
5274 abort ();
5275 i.vex.register_specifier = i.op[dest].regs;
5276 dest++;
5277 }
5278 }
5279
5280 i.rm.mode = 3;
5281 /* One of the register operands will be encoded in the i.tm.reg
5282 field, the other in the combined i.tm.mode and i.tm.regmem
5283 fields. If no form of this instruction supports a memory
5284 destination operand, then we assume the source operand may
5285 sometimes be a memory operand and so we need to store the
5286 destination in the i.rm.reg field. */
5287 if (!i.tm.operand_types[dest].bitfield.regmem
5288 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
5289 {
5290 i.rm.reg = i.op[dest].regs->reg_num;
5291 i.rm.regmem = i.op[source].regs->reg_num;
5292 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5293 i.rex |= REX_R;
5294 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5295 i.rex |= REX_B;
5296 }
5297 else
5298 {
5299 i.rm.reg = i.op[source].regs->reg_num;
5300 i.rm.regmem = i.op[dest].regs->reg_num;
5301 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5302 i.rex |= REX_B;
5303 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5304 i.rex |= REX_R;
5305 }
5306 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
5307 {
5308 if (!i.types[0].bitfield.control
5309 && !i.types[1].bitfield.control)
5310 abort ();
5311 i.rex &= ~(REX_R | REX_B);
5312 add_prefix (LOCK_PREFIX_OPCODE);
5313 }
5314 }
5315 else
5316 { /* If it's not 2 reg operands... */
5317 unsigned int mem;
5318
5319 if (i.mem_operands)
5320 {
5321 unsigned int fake_zero_displacement = 0;
5322 unsigned int op;
5323
5324 for (op = 0; op < i.operands; op++)
5325 if (operand_type_check (i.types[op], anymem))
5326 break;
5327 gas_assert (op < i.operands);
5328
5329 default_seg = &ds;
5330
5331 if (i.base_reg == 0)
5332 {
5333 i.rm.mode = 0;
5334 if (!i.disp_operands)
5335 fake_zero_displacement = 1;
5336 if (i.index_reg == 0)
5337 {
5338 /* Operand is just <disp> */
5339 if (flag_code == CODE_64BIT)
5340 {
5341 /* 64bit mode overwrites the 32bit absolute
5342 addressing by RIP relative addressing and
5343 absolute addressing is encoded by one of the
5344 redundant SIB forms. */
5345 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5346 i.sib.base = NO_BASE_REGISTER;
5347 i.sib.index = NO_INDEX_REGISTER;
5348 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
5349 ? disp32s : disp32);
5350 }
5351 else if ((flag_code == CODE_16BIT)
5352 ^ (i.prefix[ADDR_PREFIX] != 0))
5353 {
5354 i.rm.regmem = NO_BASE_REGISTER_16;
5355 i.types[op] = disp16;
5356 }
5357 else
5358 {
5359 i.rm.regmem = NO_BASE_REGISTER;
5360 i.types[op] = disp32;
5361 }
5362 }
5363 else /* !i.base_reg && i.index_reg */
5364 {
5365 if (i.index_reg->reg_num == RegEiz
5366 || i.index_reg->reg_num == RegRiz)
5367 i.sib.index = NO_INDEX_REGISTER;
5368 else
5369 i.sib.index = i.index_reg->reg_num;
5370 i.sib.base = NO_BASE_REGISTER;
5371 i.sib.scale = i.log2_scale_factor;
5372 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5373 i.types[op].bitfield.disp8 = 0;
5374 i.types[op].bitfield.disp16 = 0;
5375 i.types[op].bitfield.disp64 = 0;
5376 if (flag_code != CODE_64BIT)
5377 {
5378 /* Must be 32 bit */
5379 i.types[op].bitfield.disp32 = 1;
5380 i.types[op].bitfield.disp32s = 0;
5381 }
5382 else
5383 {
5384 i.types[op].bitfield.disp32 = 0;
5385 i.types[op].bitfield.disp32s = 1;
5386 }
5387 if ((i.index_reg->reg_flags & RegRex) != 0)
5388 i.rex |= REX_X;
5389 }
5390 }
5391 /* RIP addressing for 64bit mode. */
5392 else if (i.base_reg->reg_num == RegRip ||
5393 i.base_reg->reg_num == RegEip)
5394 {
5395 i.rm.regmem = NO_BASE_REGISTER;
5396 i.types[op].bitfield.disp8 = 0;
5397 i.types[op].bitfield.disp16 = 0;
5398 i.types[op].bitfield.disp32 = 0;
5399 i.types[op].bitfield.disp32s = 1;
5400 i.types[op].bitfield.disp64 = 0;
5401 i.flags[op] |= Operand_PCrel;
5402 if (! i.disp_operands)
5403 fake_zero_displacement = 1;
5404 }
5405 else if (i.base_reg->reg_type.bitfield.reg16)
5406 {
5407 switch (i.base_reg->reg_num)
5408 {
5409 case 3: /* (%bx) */
5410 if (i.index_reg == 0)
5411 i.rm.regmem = 7;
5412 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
5413 i.rm.regmem = i.index_reg->reg_num - 6;
5414 break;
5415 case 5: /* (%bp) */
5416 default_seg = &ss;
5417 if (i.index_reg == 0)
5418 {
5419 i.rm.regmem = 6;
5420 if (operand_type_check (i.types[op], disp) == 0)
5421 {
5422 /* fake (%bp) into 0(%bp) */
5423 i.types[op].bitfield.disp8 = 1;
5424 fake_zero_displacement = 1;
5425 }
5426 }
5427 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
5428 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
5429 break;
5430 default: /* (%si) -> 4 or (%di) -> 5 */
5431 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
5432 }
5433 i.rm.mode = mode_from_disp_size (i.types[op]);
5434 }
5435 else /* i.base_reg and 32/64 bit mode */
5436 {
5437 if (flag_code == CODE_64BIT
5438 && operand_type_check (i.types[op], disp))
5439 {
5440 i386_operand_type temp;
5441 operand_type_set (&temp, 0);
5442 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
5443 i.types[op] = temp;
5444 if (i.prefix[ADDR_PREFIX] == 0)
5445 i.types[op].bitfield.disp32s = 1;
5446 else
5447 i.types[op].bitfield.disp32 = 1;
5448 }
5449
5450 i.rm.regmem = i.base_reg->reg_num;
5451 if ((i.base_reg->reg_flags & RegRex) != 0)
5452 i.rex |= REX_B;
5453 i.sib.base = i.base_reg->reg_num;
5454 /* x86-64 ignores REX prefix bit here to avoid decoder
5455 complications. */
5456 if ((i.base_reg->reg_num & 7) == EBP_REG_NUM)
5457 {
5458 default_seg = &ss;
5459 if (i.disp_operands == 0)
5460 {
5461 fake_zero_displacement = 1;
5462 i.types[op].bitfield.disp8 = 1;
5463 }
5464 }
5465 else if (i.base_reg->reg_num == ESP_REG_NUM)
5466 {
5467 default_seg = &ss;
5468 }
5469 i.sib.scale = i.log2_scale_factor;
5470 if (i.index_reg == 0)
5471 {
5472 /* <disp>(%esp) becomes two byte modrm with no index
5473 register. We've already stored the code for esp
5474 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
5475 Any base register besides %esp will not use the
5476 extra modrm byte. */
5477 i.sib.index = NO_INDEX_REGISTER;
5478 }
5479 else
5480 {
5481 if (i.index_reg->reg_num == RegEiz
5482 || i.index_reg->reg_num == RegRiz)
5483 i.sib.index = NO_INDEX_REGISTER;
5484 else
5485 i.sib.index = i.index_reg->reg_num;
5486 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5487 if ((i.index_reg->reg_flags & RegRex) != 0)
5488 i.rex |= REX_X;
5489 }
5490
5491 if (i.disp_operands
5492 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
5493 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
5494 i.rm.mode = 0;
5495 else
5496 i.rm.mode = mode_from_disp_size (i.types[op]);
5497 }
5498
5499 if (fake_zero_displacement)
5500 {
5501 /* Fakes a zero displacement assuming that i.types[op]
5502 holds the correct displacement size. */
5503 expressionS *exp;
5504
5505 gas_assert (i.op[op].disps == 0);
5506 exp = &disp_expressions[i.disp_operands++];
5507 i.op[op].disps = exp;
5508 exp->X_op = O_constant;
5509 exp->X_add_number = 0;
5510 exp->X_add_symbol = (symbolS *) 0;
5511 exp->X_op_symbol = (symbolS *) 0;
5512 }
5513
5514 mem = op;
5515 }
5516 else
5517 mem = ~0;
5518
5519 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
5520 {
5521 if (operand_type_check (i.types[0], imm))
5522 i.vex.register_specifier = NULL;
5523 else
5524 {
5525 /* VEX.vvvv encodes one of the sources when the first
5526 operand is not an immediate. */
5527 if (i.tm.opcode_modifier.vexw == VEXW0)
5528 i.vex.register_specifier = i.op[0].regs;
5529 else
5530 i.vex.register_specifier = i.op[1].regs;
5531 }
5532
5533 /* Destination is a XMM register encoded in the ModRM.reg
5534 and VEX.R bit. */
5535 i.rm.reg = i.op[2].regs->reg_num;
5536 if ((i.op[2].regs->reg_flags & RegRex) != 0)
5537 i.rex |= REX_R;
5538
5539 /* ModRM.rm and VEX.B encodes the other source. */
5540 if (!i.mem_operands)
5541 {
5542 i.rm.mode = 3;
5543
5544 if (i.tm.opcode_modifier.vexw == VEXW0)
5545 i.rm.regmem = i.op[1].regs->reg_num;
5546 else
5547 i.rm.regmem = i.op[0].regs->reg_num;
5548
5549 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5550 i.rex |= REX_B;
5551 }
5552 }
5553 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
5554 {
5555 i.vex.register_specifier = i.op[2].regs;
5556 if (!i.mem_operands)
5557 {
5558 i.rm.mode = 3;
5559 i.rm.regmem = i.op[1].regs->reg_num;
5560 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5561 i.rex |= REX_B;
5562 }
5563 }
5564 /* Fill in i.rm.reg or i.rm.regmem field with register operand
5565 (if any) based on i.tm.extension_opcode. Again, we must be
5566 careful to make sure that segment/control/debug/test/MMX
5567 registers are coded into the i.rm.reg field. */
5568 else if (i.reg_operands)
5569 {
5570 unsigned int op;
5571 unsigned int vex_reg = ~0;
5572
5573 for (op = 0; op < i.operands; op++)
5574 if (i.types[op].bitfield.reg8
5575 || i.types[op].bitfield.reg16
5576 || i.types[op].bitfield.reg32
5577 || i.types[op].bitfield.reg64
5578 || i.types[op].bitfield.regmmx
5579 || i.types[op].bitfield.regxmm
5580 || i.types[op].bitfield.regymm
5581 || i.types[op].bitfield.sreg2
5582 || i.types[op].bitfield.sreg3
5583 || i.types[op].bitfield.control
5584 || i.types[op].bitfield.debug
5585 || i.types[op].bitfield.test)
5586 break;
5587
5588 if (vex_3_sources)
5589 op = dest;
5590 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5591 {
5592 /* For instructions with VexNDS, the register-only
5593 source operand is encoded in VEX prefix. */
5594 gas_assert (mem != (unsigned int) ~0);
5595
5596 if (op > mem)
5597 {
5598 vex_reg = op++;
5599 gas_assert (op < i.operands);
5600 }
5601 else
5602 {
5603 vex_reg = op + 1;
5604 gas_assert (vex_reg < i.operands);
5605 }
5606 }
5607 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
5608 {
5609 /* For instructions with VexNDD, there should be
5610 no memory operand and the register destination
5611 is encoded in VEX prefix. */
5612 gas_assert (i.mem_operands == 0
5613 && (op + 2) == i.operands);
5614 vex_reg = op + 1;
5615 }
5616 else
5617 gas_assert (op < i.operands);
5618
5619 if (vex_reg != (unsigned int) ~0)
5620 {
5621 gas_assert (i.reg_operands == 2);
5622
5623 if (!operand_type_equal (&i.tm.operand_types[vex_reg],
5624 &regxmm)
5625 && !operand_type_equal (&i.tm.operand_types[vex_reg],
5626 &regymm))
5627 abort ();
5628
5629 i.vex.register_specifier = i.op[vex_reg].regs;
5630 }
5631
5632 /* Don't set OP operand twice. */
5633 if (vex_reg != op)
5634 {
5635 /* If there is an extension opcode to put here, the
5636 register number must be put into the regmem field. */
5637 if (i.tm.extension_opcode != None)
5638 {
5639 i.rm.regmem = i.op[op].regs->reg_num;
5640 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5641 i.rex |= REX_B;
5642 }
5643 else
5644 {
5645 i.rm.reg = i.op[op].regs->reg_num;
5646 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5647 i.rex |= REX_R;
5648 }
5649 }
5650
5651 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
5652 must set it to 3 to indicate this is a register operand
5653 in the regmem field. */
5654 if (!i.mem_operands)
5655 i.rm.mode = 3;
5656 }
5657
5658 /* Fill in i.rm.reg field with extension opcode (if any). */
5659 if (i.tm.extension_opcode != None)
5660 i.rm.reg = i.tm.extension_opcode;
5661 }
5662 return default_seg;
5663 }
5664
5665 static void
5666 output_branch (void)
5667 {
5668 char *p;
5669 int code16;
5670 int prefix;
5671 relax_substateT subtype;
5672 symbolS *sym;
5673 offsetT off;
5674
5675 code16 = 0;
5676 if (flag_code == CODE_16BIT)
5677 code16 = CODE16;
5678
5679 prefix = 0;
5680 if (i.prefix[DATA_PREFIX] != 0)
5681 {
5682 prefix = 1;
5683 i.prefixes -= 1;
5684 code16 ^= CODE16;
5685 }
5686 /* Pentium4 branch hints. */
5687 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
5688 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
5689 {
5690 prefix++;
5691 i.prefixes--;
5692 }
5693 if (i.prefix[REX_PREFIX] != 0)
5694 {
5695 prefix++;
5696 i.prefixes--;
5697 }
5698
5699 if (i.prefixes != 0 && !intel_syntax)
5700 as_warn (_("skipping prefixes on this instruction"));
5701
5702 /* It's always a symbol; End frag & setup for relax.
5703 Make sure there is enough room in this frag for the largest
5704 instruction we may generate in md_convert_frag. This is 2
5705 bytes for the opcode and room for the prefix and largest
5706 displacement. */
5707 frag_grow (prefix + 2 + 4);
5708 /* Prefix and 1 opcode byte go in fr_fix. */
5709 p = frag_more (prefix + 1);
5710 if (i.prefix[DATA_PREFIX] != 0)
5711 *p++ = DATA_PREFIX_OPCODE;
5712 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
5713 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
5714 *p++ = i.prefix[SEG_PREFIX];
5715 if (i.prefix[REX_PREFIX] != 0)
5716 *p++ = i.prefix[REX_PREFIX];
5717 *p = i.tm.base_opcode;
5718
5719 if ((unsigned char) *p == JUMP_PC_RELATIVE)
5720 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, SMALL);
5721 else if (cpu_arch_flags.bitfield.cpui386)
5722 subtype = ENCODE_RELAX_STATE (COND_JUMP, SMALL);
5723 else
5724 subtype = ENCODE_RELAX_STATE (COND_JUMP86, SMALL);
5725 subtype |= code16;
5726
5727 sym = i.op[0].disps->X_add_symbol;
5728 off = i.op[0].disps->X_add_number;
5729
5730 if (i.op[0].disps->X_op != O_constant
5731 && i.op[0].disps->X_op != O_symbol)
5732 {
5733 /* Handle complex expressions. */
5734 sym = make_expr_symbol (i.op[0].disps);
5735 off = 0;
5736 }
5737
5738 /* 1 possible extra opcode + 4 byte displacement go in var part.
5739 Pass reloc in fr_var. */
5740 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
5741 }
5742
5743 static void
5744 output_jump (void)
5745 {
5746 char *p;
5747 int size;
5748 fixS *fixP;
5749
5750 if (i.tm.opcode_modifier.jumpbyte)
5751 {
5752 /* This is a loop or jecxz type instruction. */
5753 size = 1;
5754 if (i.prefix[ADDR_PREFIX] != 0)
5755 {
5756 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
5757 i.prefixes -= 1;
5758 }
5759 /* Pentium4 branch hints. */
5760 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
5761 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
5762 {
5763 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
5764 i.prefixes--;
5765 }
5766 }
5767 else
5768 {
5769 int code16;
5770
5771 code16 = 0;
5772 if (flag_code == CODE_16BIT)
5773 code16 = CODE16;
5774
5775 if (i.prefix[DATA_PREFIX] != 0)
5776 {
5777 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
5778 i.prefixes -= 1;
5779 code16 ^= CODE16;
5780 }
5781
5782 size = 4;
5783 if (code16)
5784 size = 2;
5785 }
5786
5787 if (i.prefix[REX_PREFIX] != 0)
5788 {
5789 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
5790 i.prefixes -= 1;
5791 }
5792
5793 if (i.prefixes != 0 && !intel_syntax)
5794 as_warn (_("skipping prefixes on this instruction"));
5795
5796 p = frag_more (1 + size);
5797 *p++ = i.tm.base_opcode;
5798
5799 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
5800 i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
5801
5802 /* All jumps handled here are signed, but don't use a signed limit
5803 check for 32 and 16 bit jumps as we want to allow wrap around at
5804 4G and 64k respectively. */
5805 if (size == 1)
5806 fixP->fx_signed = 1;
5807 }
5808
5809 static void
5810 output_interseg_jump (void)
5811 {
5812 char *p;
5813 int size;
5814 int prefix;
5815 int code16;
5816
5817 code16 = 0;
5818 if (flag_code == CODE_16BIT)
5819 code16 = CODE16;
5820
5821 prefix = 0;
5822 if (i.prefix[DATA_PREFIX] != 0)
5823 {
5824 prefix = 1;
5825 i.prefixes -= 1;
5826 code16 ^= CODE16;
5827 }
5828 if (i.prefix[REX_PREFIX] != 0)
5829 {
5830 prefix++;
5831 i.prefixes -= 1;
5832 }
5833
5834 size = 4;
5835 if (code16)
5836 size = 2;
5837
5838 if (i.prefixes != 0 && !intel_syntax)
5839 as_warn (_("skipping prefixes on this instruction"));
5840
5841 /* 1 opcode; 2 segment; offset */
5842 p = frag_more (prefix + 1 + 2 + size);
5843
5844 if (i.prefix[DATA_PREFIX] != 0)
5845 *p++ = DATA_PREFIX_OPCODE;
5846
5847 if (i.prefix[REX_PREFIX] != 0)
5848 *p++ = i.prefix[REX_PREFIX];
5849
5850 *p++ = i.tm.base_opcode;
5851 if (i.op[1].imms->X_op == O_constant)
5852 {
5853 offsetT n = i.op[1].imms->X_add_number;
5854
5855 if (size == 2
5856 && !fits_in_unsigned_word (n)
5857 && !fits_in_signed_word (n))
5858 {
5859 as_bad (_("16-bit jump out of range"));
5860 return;
5861 }
5862 md_number_to_chars (p, n, size);
5863 }
5864 else
5865 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
5866 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
5867 if (i.op[0].imms->X_op != O_constant)
5868 as_bad (_("can't handle non absolute segment in `%s'"),
5869 i.tm.name);
5870 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
5871 }
5872
5873 static void
5874 output_insn (void)
5875 {
5876 fragS *insn_start_frag;
5877 offsetT insn_start_off;
5878
5879 /* Tie dwarf2 debug info to the address at the start of the insn.
5880 We can't do this after the insn has been output as the current
5881 frag may have been closed off. eg. by frag_var. */
5882 dwarf2_emit_insn (0);
5883
5884 insn_start_frag = frag_now;
5885 insn_start_off = frag_now_fix ();
5886
5887 /* Output jumps. */
5888 if (i.tm.opcode_modifier.jump)
5889 output_branch ();
5890 else if (i.tm.opcode_modifier.jumpbyte
5891 || i.tm.opcode_modifier.jumpdword)
5892 output_jump ();
5893 else if (i.tm.opcode_modifier.jumpintersegment)
5894 output_interseg_jump ();
5895 else
5896 {
5897 /* Output normal instructions here. */
5898 char *p;
5899 unsigned char *q;
5900 unsigned int j;
5901 unsigned int prefix;
5902
5903 /* Since the VEX prefix contains the implicit prefix, we don't
5904 need the explicit prefix. */
5905 if (!i.tm.opcode_modifier.vex)
5906 {
5907 switch (i.tm.opcode_length)
5908 {
5909 case 3:
5910 if (i.tm.base_opcode & 0xff000000)
5911 {
5912 prefix = (i.tm.base_opcode >> 24) & 0xff;
5913 goto check_prefix;
5914 }
5915 break;
5916 case 2:
5917 if ((i.tm.base_opcode & 0xff0000) != 0)
5918 {
5919 prefix = (i.tm.base_opcode >> 16) & 0xff;
5920 if (i.tm.cpu_flags.bitfield.cpupadlock)
5921 {
5922 check_prefix:
5923 if (prefix != REPE_PREFIX_OPCODE
5924 || (i.prefix[REP_PREFIX]
5925 != REPE_PREFIX_OPCODE))
5926 add_prefix (prefix);
5927 }
5928 else
5929 add_prefix (prefix);
5930 }
5931 break;
5932 case 1:
5933 break;
5934 default:
5935 abort ();
5936 }
5937
5938 /* The prefix bytes. */
5939 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
5940 if (*q)
5941 FRAG_APPEND_1_CHAR (*q);
5942 }
5943
5944 if (i.tm.opcode_modifier.vex)
5945 {
5946 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
5947 if (*q)
5948 switch (j)
5949 {
5950 case REX_PREFIX:
5951 /* REX byte is encoded in VEX prefix. */
5952 break;
5953 case SEG_PREFIX:
5954 case ADDR_PREFIX:
5955 FRAG_APPEND_1_CHAR (*q);
5956 break;
5957 default:
5958 /* There should be no other prefixes for instructions
5959 with VEX prefix. */
5960 abort ();
5961 }
5962
5963 /* Now the VEX prefix. */
5964 p = frag_more (i.vex.length);
5965 for (j = 0; j < i.vex.length; j++)
5966 p[j] = i.vex.bytes[j];
5967 }
5968
5969 /* Now the opcode; be careful about word order here! */
5970 if (i.tm.opcode_length == 1)
5971 {
5972 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
5973 }
5974 else
5975 {
5976 switch (i.tm.opcode_length)
5977 {
5978 case 3:
5979 p = frag_more (3);
5980 *p++ = (i.tm.base_opcode >> 16) & 0xff;
5981 break;
5982 case 2:
5983 p = frag_more (2);
5984 break;
5985 default:
5986 abort ();
5987 break;
5988 }
5989
5990 /* Put out high byte first: can't use md_number_to_chars! */
5991 *p++ = (i.tm.base_opcode >> 8) & 0xff;
5992 *p = i.tm.base_opcode & 0xff;
5993 }
5994
5995 /* Now the modrm byte and sib byte (if present). */
5996 if (i.tm.opcode_modifier.modrm)
5997 {
5998 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
5999 | i.rm.reg << 3
6000 | i.rm.mode << 6));
6001 /* If i.rm.regmem == ESP (4)
6002 && i.rm.mode != (Register mode)
6003 && not 16 bit
6004 ==> need second modrm byte. */
6005 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
6006 && i.rm.mode != 3
6007 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
6008 FRAG_APPEND_1_CHAR ((i.sib.base << 0
6009 | i.sib.index << 3
6010 | i.sib.scale << 6));
6011 }
6012
6013 if (i.disp_operands)
6014 output_disp (insn_start_frag, insn_start_off);
6015
6016 if (i.imm_operands)
6017 output_imm (insn_start_frag, insn_start_off);
6018 }
6019
6020 #ifdef DEBUG386
6021 if (flag_debug)
6022 {
6023 pi ("" /*line*/, &i);
6024 }
6025 #endif /* DEBUG386 */
6026 }
6027
6028 /* Return the size of the displacement operand N. */
6029
6030 static int
6031 disp_size (unsigned int n)
6032 {
6033 int size = 4;
6034 if (i.types[n].bitfield.disp64)
6035 size = 8;
6036 else if (i.types[n].bitfield.disp8)
6037 size = 1;
6038 else if (i.types[n].bitfield.disp16)
6039 size = 2;
6040 return size;
6041 }
6042
6043 /* Return the size of the immediate operand N. */
6044
6045 static int
6046 imm_size (unsigned int n)
6047 {
6048 int size = 4;
6049 if (i.types[n].bitfield.imm64)
6050 size = 8;
6051 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
6052 size = 1;
6053 else if (i.types[n].bitfield.imm16)
6054 size = 2;
6055 return size;
6056 }
6057
6058 static void
6059 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
6060 {
6061 char *p;
6062 unsigned int n;
6063
6064 for (n = 0; n < i.operands; n++)
6065 {
6066 if (operand_type_check (i.types[n], disp))
6067 {
6068 if (i.op[n].disps->X_op == O_constant)
6069 {
6070 int size = disp_size (n);
6071 offsetT val;
6072
6073 val = offset_in_range (i.op[n].disps->X_add_number,
6074 size);
6075 p = frag_more (size);
6076 md_number_to_chars (p, val, size);
6077 }
6078 else
6079 {
6080 enum bfd_reloc_code_real reloc_type;
6081 int size = disp_size (n);
6082 int sign = i.types[n].bitfield.disp32s;
6083 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
6084
6085 /* We can't have 8 bit displacement here. */
6086 gas_assert (!i.types[n].bitfield.disp8);
6087
6088 /* The PC relative address is computed relative
6089 to the instruction boundary, so in case immediate
6090 fields follows, we need to adjust the value. */
6091 if (pcrel && i.imm_operands)
6092 {
6093 unsigned int n1;
6094 int sz = 0;
6095
6096 for (n1 = 0; n1 < i.operands; n1++)
6097 if (operand_type_check (i.types[n1], imm))
6098 {
6099 /* Only one immediate is allowed for PC
6100 relative address. */
6101 gas_assert (sz == 0);
6102 sz = imm_size (n1);
6103 i.op[n].disps->X_add_number -= sz;
6104 }
6105 /* We should find the immediate. */
6106 gas_assert (sz != 0);
6107 }
6108
6109 p = frag_more (size);
6110 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
6111 if (GOT_symbol
6112 && GOT_symbol == i.op[n].disps->X_add_symbol
6113 && (((reloc_type == BFD_RELOC_32
6114 || reloc_type == BFD_RELOC_X86_64_32S
6115 || (reloc_type == BFD_RELOC_64
6116 && object_64bit))
6117 && (i.op[n].disps->X_op == O_symbol
6118 || (i.op[n].disps->X_op == O_add
6119 && ((symbol_get_value_expression
6120 (i.op[n].disps->X_op_symbol)->X_op)
6121 == O_subtract))))
6122 || reloc_type == BFD_RELOC_32_PCREL))
6123 {
6124 offsetT add;
6125
6126 if (insn_start_frag == frag_now)
6127 add = (p - frag_now->fr_literal) - insn_start_off;
6128 else
6129 {
6130 fragS *fr;
6131
6132 add = insn_start_frag->fr_fix - insn_start_off;
6133 for (fr = insn_start_frag->fr_next;
6134 fr && fr != frag_now; fr = fr->fr_next)
6135 add += fr->fr_fix;
6136 add += p - frag_now->fr_literal;
6137 }
6138
6139 if (!object_64bit)
6140 {
6141 reloc_type = BFD_RELOC_386_GOTPC;
6142 i.op[n].imms->X_add_number += add;
6143 }
6144 else if (reloc_type == BFD_RELOC_64)
6145 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6146 else
6147 /* Don't do the adjustment for x86-64, as there
6148 the pcrel addressing is relative to the _next_
6149 insn, and that is taken care of in other code. */
6150 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6151 }
6152 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6153 i.op[n].disps, pcrel, reloc_type);
6154 }
6155 }
6156 }
6157 }
6158
6159 static void
6160 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
6161 {
6162 char *p;
6163 unsigned int n;
6164
6165 for (n = 0; n < i.operands; n++)
6166 {
6167 if (operand_type_check (i.types[n], imm))
6168 {
6169 if (i.op[n].imms->X_op == O_constant)
6170 {
6171 int size = imm_size (n);
6172 offsetT val;
6173
6174 val = offset_in_range (i.op[n].imms->X_add_number,
6175 size);
6176 p = frag_more (size);
6177 md_number_to_chars (p, val, size);
6178 }
6179 else
6180 {
6181 /* Not absolute_section.
6182 Need a 32-bit fixup (don't support 8bit
6183 non-absolute imms). Try to support other
6184 sizes ... */
6185 enum bfd_reloc_code_real reloc_type;
6186 int size = imm_size (n);
6187 int sign;
6188
6189 if (i.types[n].bitfield.imm32s
6190 && (i.suffix == QWORD_MNEM_SUFFIX
6191 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
6192 sign = 1;
6193 else
6194 sign = 0;
6195
6196 p = frag_more (size);
6197 reloc_type = reloc (size, 0, sign, i.reloc[n]);
6198
6199 /* This is tough to explain. We end up with this one if we
6200 * have operands that look like
6201 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
6202 * obtain the absolute address of the GOT, and it is strongly
6203 * preferable from a performance point of view to avoid using
6204 * a runtime relocation for this. The actual sequence of
6205 * instructions often look something like:
6206 *
6207 * call .L66
6208 * .L66:
6209 * popl %ebx
6210 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
6211 *
6212 * The call and pop essentially return the absolute address
6213 * of the label .L66 and store it in %ebx. The linker itself
6214 * will ultimately change the first operand of the addl so
6215 * that %ebx points to the GOT, but to keep things simple, the
6216 * .o file must have this operand set so that it generates not
6217 * the absolute address of .L66, but the absolute address of
6218 * itself. This allows the linker itself simply treat a GOTPC
6219 * relocation as asking for a pcrel offset to the GOT to be
6220 * added in, and the addend of the relocation is stored in the
6221 * operand field for the instruction itself.
6222 *
6223 * Our job here is to fix the operand so that it would add
6224 * the correct offset so that %ebx would point to itself. The
6225 * thing that is tricky is that .-.L66 will point to the
6226 * beginning of the instruction, so we need to further modify
6227 * the operand so that it will point to itself. There are
6228 * other cases where you have something like:
6229 *
6230 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
6231 *
6232 * and here no correction would be required. Internally in
6233 * the assembler we treat operands of this form as not being
6234 * pcrel since the '.' is explicitly mentioned, and I wonder
6235 * whether it would simplify matters to do it this way. Who
6236 * knows. In earlier versions of the PIC patches, the
6237 * pcrel_adjust field was used to store the correction, but
6238 * since the expression is not pcrel, I felt it would be
6239 * confusing to do it this way. */
6240
6241 if ((reloc_type == BFD_RELOC_32
6242 || reloc_type == BFD_RELOC_X86_64_32S
6243 || reloc_type == BFD_RELOC_64)
6244 && GOT_symbol
6245 && GOT_symbol == i.op[n].imms->X_add_symbol
6246 && (i.op[n].imms->X_op == O_symbol
6247 || (i.op[n].imms->X_op == O_add
6248 && ((symbol_get_value_expression
6249 (i.op[n].imms->X_op_symbol)->X_op)
6250 == O_subtract))))
6251 {
6252 offsetT add;
6253
6254 if (insn_start_frag == frag_now)
6255 add = (p - frag_now->fr_literal) - insn_start_off;
6256 else
6257 {
6258 fragS *fr;
6259
6260 add = insn_start_frag->fr_fix - insn_start_off;
6261 for (fr = insn_start_frag->fr_next;
6262 fr && fr != frag_now; fr = fr->fr_next)
6263 add += fr->fr_fix;
6264 add += p - frag_now->fr_literal;
6265 }
6266
6267 if (!object_64bit)
6268 reloc_type = BFD_RELOC_386_GOTPC;
6269 else if (size == 4)
6270 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6271 else if (size == 8)
6272 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6273 i.op[n].imms->X_add_number += add;
6274 }
6275 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6276 i.op[n].imms, 0, reloc_type);
6277 }
6278 }
6279 }
6280 }
6281 \f
6282 /* x86_cons_fix_new is called via the expression parsing code when a
6283 reloc is needed. We use this hook to get the correct .got reloc. */
6284 static enum bfd_reloc_code_real got_reloc = NO_RELOC;
6285 static int cons_sign = -1;
6286
6287 void
6288 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
6289 expressionS *exp)
6290 {
6291 enum bfd_reloc_code_real r = reloc (len, 0, cons_sign, got_reloc);
6292
6293 got_reloc = NO_RELOC;
6294
6295 #ifdef TE_PE
6296 if (exp->X_op == O_secrel)
6297 {
6298 exp->X_op = O_symbol;
6299 r = BFD_RELOC_32_SECREL;
6300 }
6301 #endif
6302
6303 fix_new_exp (frag, off, len, exp, 0, r);
6304 }
6305
6306 #if (!defined (OBJ_ELF) && !defined (OBJ_MAYBE_ELF)) || defined (LEX_AT)
6307 # define lex_got(reloc, adjust, types) NULL
6308 #else
6309 /* Parse operands of the form
6310 <symbol>@GOTOFF+<nnn>
6311 and similar .plt or .got references.
6312
6313 If we find one, set up the correct relocation in RELOC and copy the
6314 input string, minus the `@GOTOFF' into a malloc'd buffer for
6315 parsing by the calling routine. Return this buffer, and if ADJUST
6316 is non-null set it to the length of the string we removed from the
6317 input line. Otherwise return NULL. */
6318 static char *
6319 lex_got (enum bfd_reloc_code_real *rel,
6320 int *adjust,
6321 i386_operand_type *types)
6322 {
6323 /* Some of the relocations depend on the size of what field is to
6324 be relocated. But in our callers i386_immediate and i386_displacement
6325 we don't yet know the operand size (this will be set by insn
6326 matching). Hence we record the word32 relocation here,
6327 and adjust the reloc according to the real size in reloc(). */
6328 static const struct {
6329 const char *str;
6330 int len;
6331 const enum bfd_reloc_code_real rel[2];
6332 const i386_operand_type types64;
6333 } gotrel[] = {
6334 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
6335 BFD_RELOC_X86_64_PLTOFF64 },
6336 OPERAND_TYPE_IMM64 },
6337 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
6338 BFD_RELOC_X86_64_PLT32 },
6339 OPERAND_TYPE_IMM32_32S_DISP32 },
6340 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
6341 BFD_RELOC_X86_64_GOTPLT64 },
6342 OPERAND_TYPE_IMM64_DISP64 },
6343 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
6344 BFD_RELOC_X86_64_GOTOFF64 },
6345 OPERAND_TYPE_IMM64_DISP64 },
6346 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
6347 BFD_RELOC_X86_64_GOTPCREL },
6348 OPERAND_TYPE_IMM32_32S_DISP32 },
6349 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
6350 BFD_RELOC_X86_64_TLSGD },
6351 OPERAND_TYPE_IMM32_32S_DISP32 },
6352 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
6353 _dummy_first_bfd_reloc_code_real },
6354 OPERAND_TYPE_NONE },
6355 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
6356 BFD_RELOC_X86_64_TLSLD },
6357 OPERAND_TYPE_IMM32_32S_DISP32 },
6358 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
6359 BFD_RELOC_X86_64_GOTTPOFF },
6360 OPERAND_TYPE_IMM32_32S_DISP32 },
6361 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
6362 BFD_RELOC_X86_64_TPOFF32 },
6363 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6364 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
6365 _dummy_first_bfd_reloc_code_real },
6366 OPERAND_TYPE_NONE },
6367 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
6368 BFD_RELOC_X86_64_DTPOFF32 },
6369 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6370 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
6371 _dummy_first_bfd_reloc_code_real },
6372 OPERAND_TYPE_NONE },
6373 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
6374 _dummy_first_bfd_reloc_code_real },
6375 OPERAND_TYPE_NONE },
6376 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
6377 BFD_RELOC_X86_64_GOT32 },
6378 OPERAND_TYPE_IMM32_32S_64_DISP32 },
6379 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
6380 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
6381 OPERAND_TYPE_IMM32_32S_DISP32 },
6382 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
6383 BFD_RELOC_X86_64_TLSDESC_CALL },
6384 OPERAND_TYPE_IMM32_32S_DISP32 },
6385 };
6386 char *cp;
6387 unsigned int j;
6388
6389 if (!IS_ELF)
6390 return NULL;
6391
6392 for (cp = input_line_pointer; *cp != '@'; cp++)
6393 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6394 return NULL;
6395
6396 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6397 {
6398 int len = gotrel[j].len;
6399 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6400 {
6401 if (gotrel[j].rel[object_64bit] != 0)
6402 {
6403 int first, second;
6404 char *tmpbuf, *past_reloc;
6405
6406 *rel = gotrel[j].rel[object_64bit];
6407 if (adjust)
6408 *adjust = len;
6409
6410 if (types)
6411 {
6412 if (flag_code != CODE_64BIT)
6413 {
6414 types->bitfield.imm32 = 1;
6415 types->bitfield.disp32 = 1;
6416 }
6417 else
6418 *types = gotrel[j].types64;
6419 }
6420
6421 if (GOT_symbol == NULL)
6422 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
6423
6424 /* The length of the first part of our input line. */
6425 first = cp - input_line_pointer;
6426
6427 /* The second part goes from after the reloc token until
6428 (and including) an end_of_line char or comma. */
6429 past_reloc = cp + 1 + len;
6430 cp = past_reloc;
6431 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6432 ++cp;
6433 second = cp + 1 - past_reloc;
6434
6435 /* Allocate and copy string. The trailing NUL shouldn't
6436 be necessary, but be safe. */
6437 tmpbuf = (char *) xmalloc (first + second + 2);
6438 memcpy (tmpbuf, input_line_pointer, first);
6439 if (second != 0 && *past_reloc != ' ')
6440 /* Replace the relocation token with ' ', so that
6441 errors like foo@GOTOFF1 will be detected. */
6442 tmpbuf[first++] = ' ';
6443 memcpy (tmpbuf + first, past_reloc, second);
6444 tmpbuf[first + second] = '\0';
6445 return tmpbuf;
6446 }
6447
6448 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6449 gotrel[j].str, 1 << (5 + object_64bit));
6450 return NULL;
6451 }
6452 }
6453
6454 /* Might be a symbol version string. Don't as_bad here. */
6455 return NULL;
6456 }
6457
6458 void
6459 x86_cons (expressionS *exp, int size)
6460 {
6461 intel_syntax = -intel_syntax;
6462
6463 exp->X_md = 0;
6464 if (size == 4 || (object_64bit && size == 8))
6465 {
6466 /* Handle @GOTOFF and the like in an expression. */
6467 char *save;
6468 char *gotfree_input_line;
6469 int adjust;
6470
6471 save = input_line_pointer;
6472 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
6473 if (gotfree_input_line)
6474 input_line_pointer = gotfree_input_line;
6475
6476 expression (exp);
6477
6478 if (gotfree_input_line)
6479 {
6480 /* expression () has merrily parsed up to the end of line,
6481 or a comma - in the wrong buffer. Transfer how far
6482 input_line_pointer has moved to the right buffer. */
6483 input_line_pointer = (save
6484 + (input_line_pointer - gotfree_input_line)
6485 + adjust);
6486 free (gotfree_input_line);
6487 if (exp->X_op == O_constant
6488 || exp->X_op == O_absent
6489 || exp->X_op == O_illegal
6490 || i386_is_register (exp, intel_syntax)
6491 || exp->X_op == O_big)
6492 {
6493 char c = *input_line_pointer;
6494 *input_line_pointer = 0;
6495 as_bad (_("missing or invalid expression `%s'"), save);
6496 *input_line_pointer = c;
6497 }
6498 }
6499 }
6500 else
6501 expression (exp);
6502
6503 intel_syntax = -intel_syntax;
6504
6505 if (intel_syntax)
6506 i386_intel_simplify (exp);
6507 }
6508 #endif
6509
6510 static void
6511 signed_cons (int size)
6512 {
6513 if (flag_code == CODE_64BIT)
6514 cons_sign = 1;
6515 cons (size);
6516 cons_sign = -1;
6517 }
6518
6519 #ifdef TE_PE
6520 static void
6521 pe_directive_secrel (dummy)
6522 int dummy ATTRIBUTE_UNUSED;
6523 {
6524 expressionS exp;
6525
6526 do
6527 {
6528 expression (&exp);
6529 if (exp.X_op == O_symbol)
6530 exp.X_op = O_secrel;
6531
6532 emit_expr (&exp, 4);
6533 }
6534 while (*input_line_pointer++ == ',');
6535
6536 input_line_pointer--;
6537 demand_empty_rest_of_line ();
6538 }
6539 #endif
6540
6541 static int
6542 i386_immediate (char *imm_start)
6543 {
6544 char *save_input_line_pointer;
6545 char *gotfree_input_line;
6546 segT exp_seg = 0;
6547 expressionS *exp;
6548 i386_operand_type types;
6549
6550 operand_type_set (&types, ~0);
6551
6552 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
6553 {
6554 as_bad (_("at most %d immediate operands are allowed"),
6555 MAX_IMMEDIATE_OPERANDS);
6556 return 0;
6557 }
6558
6559 exp = &im_expressions[i.imm_operands++];
6560 i.op[this_operand].imms = exp;
6561
6562 if (is_space_char (*imm_start))
6563 ++imm_start;
6564
6565 save_input_line_pointer = input_line_pointer;
6566 input_line_pointer = imm_start;
6567
6568 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
6569 if (gotfree_input_line)
6570 input_line_pointer = gotfree_input_line;
6571
6572 exp_seg = expression (exp);
6573
6574 SKIP_WHITESPACE ();
6575 if (*input_line_pointer)
6576 as_bad (_("junk `%s' after expression"), input_line_pointer);
6577
6578 input_line_pointer = save_input_line_pointer;
6579 if (gotfree_input_line)
6580 {
6581 free (gotfree_input_line);
6582
6583 if (exp->X_op == O_constant || exp->X_op == O_register)
6584 exp->X_op = O_illegal;
6585 }
6586
6587 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
6588 }
6589
6590 static int
6591 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
6592 i386_operand_type types, const char *imm_start)
6593 {
6594 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
6595 {
6596 if (imm_start)
6597 as_bad (_("missing or invalid immediate expression `%s'"),
6598 imm_start);
6599 return 0;
6600 }
6601 else if (exp->X_op == O_constant)
6602 {
6603 /* Size it properly later. */
6604 i.types[this_operand].bitfield.imm64 = 1;
6605 /* If BFD64, sign extend val. */
6606 if (!use_rela_relocations
6607 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
6608 exp->X_add_number
6609 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
6610 }
6611 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
6612 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
6613 && exp_seg != absolute_section
6614 && exp_seg != text_section
6615 && exp_seg != data_section
6616 && exp_seg != bss_section
6617 && exp_seg != undefined_section
6618 && !bfd_is_com_section (exp_seg))
6619 {
6620 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
6621 return 0;
6622 }
6623 #endif
6624 else if (!intel_syntax && exp->X_op == O_register)
6625 {
6626 if (imm_start)
6627 as_bad (_("illegal immediate register operand %s"), imm_start);
6628 return 0;
6629 }
6630 else
6631 {
6632 /* This is an address. The size of the address will be
6633 determined later, depending on destination register,
6634 suffix, or the default for the section. */
6635 i.types[this_operand].bitfield.imm8 = 1;
6636 i.types[this_operand].bitfield.imm16 = 1;
6637 i.types[this_operand].bitfield.imm32 = 1;
6638 i.types[this_operand].bitfield.imm32s = 1;
6639 i.types[this_operand].bitfield.imm64 = 1;
6640 i.types[this_operand] = operand_type_and (i.types[this_operand],
6641 types);
6642 }
6643
6644 return 1;
6645 }
6646
6647 static char *
6648 i386_scale (char *scale)
6649 {
6650 offsetT val;
6651 char *save = input_line_pointer;
6652
6653 input_line_pointer = scale;
6654 val = get_absolute_expression ();
6655
6656 switch (val)
6657 {
6658 case 1:
6659 i.log2_scale_factor = 0;
6660 break;
6661 case 2:
6662 i.log2_scale_factor = 1;
6663 break;
6664 case 4:
6665 i.log2_scale_factor = 2;
6666 break;
6667 case 8:
6668 i.log2_scale_factor = 3;
6669 break;
6670 default:
6671 {
6672 char sep = *input_line_pointer;
6673
6674 *input_line_pointer = '\0';
6675 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
6676 scale);
6677 *input_line_pointer = sep;
6678 input_line_pointer = save;
6679 return NULL;
6680 }
6681 }
6682 if (i.log2_scale_factor != 0 && i.index_reg == 0)
6683 {
6684 as_warn (_("scale factor of %d without an index register"),
6685 1 << i.log2_scale_factor);
6686 i.log2_scale_factor = 0;
6687 }
6688 scale = input_line_pointer;
6689 input_line_pointer = save;
6690 return scale;
6691 }
6692
6693 static int
6694 i386_displacement (char *disp_start, char *disp_end)
6695 {
6696 expressionS *exp;
6697 segT exp_seg = 0;
6698 char *save_input_line_pointer;
6699 char *gotfree_input_line;
6700 int override;
6701 i386_operand_type bigdisp, types = anydisp;
6702 int ret;
6703
6704 if (i.disp_operands == MAX_MEMORY_OPERANDS)
6705 {
6706 as_bad (_("at most %d displacement operands are allowed"),
6707 MAX_MEMORY_OPERANDS);
6708 return 0;
6709 }
6710
6711 operand_type_set (&bigdisp, 0);
6712 if ((i.types[this_operand].bitfield.jumpabsolute)
6713 || (!current_templates->start->opcode_modifier.jump
6714 && !current_templates->start->opcode_modifier.jumpdword))
6715 {
6716 bigdisp.bitfield.disp32 = 1;
6717 override = (i.prefix[ADDR_PREFIX] != 0);
6718 if (flag_code == CODE_64BIT)
6719 {
6720 if (!override)
6721 {
6722 bigdisp.bitfield.disp32s = 1;
6723 bigdisp.bitfield.disp64 = 1;
6724 }
6725 }
6726 else if ((flag_code == CODE_16BIT) ^ override)
6727 {
6728 bigdisp.bitfield.disp32 = 0;
6729 bigdisp.bitfield.disp16 = 1;
6730 }
6731 }
6732 else
6733 {
6734 /* For PC-relative branches, the width of the displacement
6735 is dependent upon data size, not address size. */
6736 override = (i.prefix[DATA_PREFIX] != 0);
6737 if (flag_code == CODE_64BIT)
6738 {
6739 if (override || i.suffix == WORD_MNEM_SUFFIX)
6740 bigdisp.bitfield.disp16 = 1;
6741 else
6742 {
6743 bigdisp.bitfield.disp32 = 1;
6744 bigdisp.bitfield.disp32s = 1;
6745 }
6746 }
6747 else
6748 {
6749 if (!override)
6750 override = (i.suffix == (flag_code != CODE_16BIT
6751 ? WORD_MNEM_SUFFIX
6752 : LONG_MNEM_SUFFIX));
6753 bigdisp.bitfield.disp32 = 1;
6754 if ((flag_code == CODE_16BIT) ^ override)
6755 {
6756 bigdisp.bitfield.disp32 = 0;
6757 bigdisp.bitfield.disp16 = 1;
6758 }
6759 }
6760 }
6761 i.types[this_operand] = operand_type_or (i.types[this_operand],
6762 bigdisp);
6763
6764 exp = &disp_expressions[i.disp_operands];
6765 i.op[this_operand].disps = exp;
6766 i.disp_operands++;
6767 save_input_line_pointer = input_line_pointer;
6768 input_line_pointer = disp_start;
6769 END_STRING_AND_SAVE (disp_end);
6770
6771 #ifndef GCC_ASM_O_HACK
6772 #define GCC_ASM_O_HACK 0
6773 #endif
6774 #if GCC_ASM_O_HACK
6775 END_STRING_AND_SAVE (disp_end + 1);
6776 if (i.types[this_operand].bitfield.baseIndex
6777 && displacement_string_end[-1] == '+')
6778 {
6779 /* This hack is to avoid a warning when using the "o"
6780 constraint within gcc asm statements.
6781 For instance:
6782
6783 #define _set_tssldt_desc(n,addr,limit,type) \
6784 __asm__ __volatile__ ( \
6785 "movw %w2,%0\n\t" \
6786 "movw %w1,2+%0\n\t" \
6787 "rorl $16,%1\n\t" \
6788 "movb %b1,4+%0\n\t" \
6789 "movb %4,5+%0\n\t" \
6790 "movb $0,6+%0\n\t" \
6791 "movb %h1,7+%0\n\t" \
6792 "rorl $16,%1" \
6793 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
6794
6795 This works great except that the output assembler ends
6796 up looking a bit weird if it turns out that there is
6797 no offset. You end up producing code that looks like:
6798
6799 #APP
6800 movw $235,(%eax)
6801 movw %dx,2+(%eax)
6802 rorl $16,%edx
6803 movb %dl,4+(%eax)
6804 movb $137,5+(%eax)
6805 movb $0,6+(%eax)
6806 movb %dh,7+(%eax)
6807 rorl $16,%edx
6808 #NO_APP
6809
6810 So here we provide the missing zero. */
6811
6812 *displacement_string_end = '0';
6813 }
6814 #endif
6815 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
6816 if (gotfree_input_line)
6817 input_line_pointer = gotfree_input_line;
6818
6819 exp_seg = expression (exp);
6820
6821 SKIP_WHITESPACE ();
6822 if (*input_line_pointer)
6823 as_bad (_("junk `%s' after expression"), input_line_pointer);
6824 #if GCC_ASM_O_HACK
6825 RESTORE_END_STRING (disp_end + 1);
6826 #endif
6827 input_line_pointer = save_input_line_pointer;
6828 if (gotfree_input_line)
6829 {
6830 free (gotfree_input_line);
6831
6832 if (exp->X_op == O_constant || exp->X_op == O_register)
6833 exp->X_op = O_illegal;
6834 }
6835
6836 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
6837
6838 RESTORE_END_STRING (disp_end);
6839
6840 return ret;
6841 }
6842
6843 static int
6844 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
6845 i386_operand_type types, const char *disp_start)
6846 {
6847 i386_operand_type bigdisp;
6848 int ret = 1;
6849
6850 /* We do this to make sure that the section symbol is in
6851 the symbol table. We will ultimately change the relocation
6852 to be relative to the beginning of the section. */
6853 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
6854 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
6855 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
6856 {
6857 if (exp->X_op != O_symbol)
6858 goto inv_disp;
6859
6860 if (S_IS_LOCAL (exp->X_add_symbol)
6861 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section)
6862 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
6863 exp->X_op = O_subtract;
6864 exp->X_op_symbol = GOT_symbol;
6865 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
6866 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
6867 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
6868 i.reloc[this_operand] = BFD_RELOC_64;
6869 else
6870 i.reloc[this_operand] = BFD_RELOC_32;
6871 }
6872
6873 else if (exp->X_op == O_absent
6874 || exp->X_op == O_illegal
6875 || exp->X_op == O_big)
6876 {
6877 inv_disp:
6878 as_bad (_("missing or invalid displacement expression `%s'"),
6879 disp_start);
6880 ret = 0;
6881 }
6882
6883 else if (flag_code == CODE_64BIT
6884 && !i.prefix[ADDR_PREFIX]
6885 && exp->X_op == O_constant)
6886 {
6887 /* Since displacement is signed extended to 64bit, don't allow
6888 disp32 and turn off disp32s if they are out of range. */
6889 i.types[this_operand].bitfield.disp32 = 0;
6890 if (!fits_in_signed_long (exp->X_add_number))
6891 {
6892 i.types[this_operand].bitfield.disp32s = 0;
6893 if (i.types[this_operand].bitfield.baseindex)
6894 {
6895 as_bad (_("0x%lx out range of signed 32bit displacement"),
6896 (long) exp->X_add_number);
6897 ret = 0;
6898 }
6899 }
6900 }
6901
6902 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
6903 else if (exp->X_op != O_constant
6904 && OUTPUT_FLAVOR == bfd_target_aout_flavour
6905 && exp_seg != absolute_section
6906 && exp_seg != text_section
6907 && exp_seg != data_section
6908 && exp_seg != bss_section
6909 && exp_seg != undefined_section
6910 && !bfd_is_com_section (exp_seg))
6911 {
6912 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
6913 ret = 0;
6914 }
6915 #endif
6916
6917 /* Check if this is a displacement only operand. */
6918 bigdisp = i.types[this_operand];
6919 bigdisp.bitfield.disp8 = 0;
6920 bigdisp.bitfield.disp16 = 0;
6921 bigdisp.bitfield.disp32 = 0;
6922 bigdisp.bitfield.disp32s = 0;
6923 bigdisp.bitfield.disp64 = 0;
6924 if (operand_type_all_zero (&bigdisp))
6925 i.types[this_operand] = operand_type_and (i.types[this_operand],
6926 types);
6927
6928 return ret;
6929 }
6930
6931 /* Make sure the memory operand we've been dealt is valid.
6932 Return 1 on success, 0 on a failure. */
6933
6934 static int
6935 i386_index_check (const char *operand_string)
6936 {
6937 int ok;
6938 const char *kind = "base/index";
6939 #if INFER_ADDR_PREFIX
6940 int fudged = 0;
6941
6942 tryprefix:
6943 #endif
6944 ok = 1;
6945 if (current_templates->start->opcode_modifier.isstring
6946 && !current_templates->start->opcode_modifier.immext
6947 && (current_templates->end[-1].opcode_modifier.isstring
6948 || i.mem_operands))
6949 {
6950 /* Memory operands of string insns are special in that they only allow
6951 a single register (rDI, rSI, or rBX) as their memory address. */
6952 unsigned int expected;
6953
6954 kind = "string address";
6955
6956 if (current_templates->start->opcode_modifier.w)
6957 {
6958 i386_operand_type type = current_templates->end[-1].operand_types[0];
6959
6960 if (!type.bitfield.baseindex
6961 || ((!i.mem_operands != !intel_syntax)
6962 && current_templates->end[-1].operand_types[1]
6963 .bitfield.baseindex))
6964 type = current_templates->end[-1].operand_types[1];
6965 expected = type.bitfield.esseg ? 7 /* rDI */ : 6 /* rSI */;
6966 }
6967 else
6968 expected = 3 /* rBX */;
6969
6970 if (!i.base_reg || i.index_reg
6971 || operand_type_check (i.types[this_operand], disp))
6972 ok = -1;
6973 else if (!(flag_code == CODE_64BIT
6974 ? i.prefix[ADDR_PREFIX]
6975 ? i.base_reg->reg_type.bitfield.reg32
6976 : i.base_reg->reg_type.bitfield.reg64
6977 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
6978 ? i.base_reg->reg_type.bitfield.reg32
6979 : i.base_reg->reg_type.bitfield.reg16))
6980 ok = 0;
6981 else if (i.base_reg->reg_num != expected)
6982 ok = -1;
6983
6984 if (ok < 0)
6985 {
6986 unsigned int j;
6987
6988 for (j = 0; j < i386_regtab_size; ++j)
6989 if ((flag_code == CODE_64BIT
6990 ? i.prefix[ADDR_PREFIX]
6991 ? i386_regtab[j].reg_type.bitfield.reg32
6992 : i386_regtab[j].reg_type.bitfield.reg64
6993 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
6994 ? i386_regtab[j].reg_type.bitfield.reg32
6995 : i386_regtab[j].reg_type.bitfield.reg16)
6996 && i386_regtab[j].reg_num == expected)
6997 break;
6998 gas_assert (j < i386_regtab_size);
6999 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
7000 operand_string,
7001 intel_syntax ? '[' : '(',
7002 register_prefix,
7003 i386_regtab[j].reg_name,
7004 intel_syntax ? ']' : ')');
7005 ok = 1;
7006 }
7007 }
7008 else if (flag_code == CODE_64BIT)
7009 {
7010 if ((i.base_reg
7011 && ((i.prefix[ADDR_PREFIX] == 0
7012 && !i.base_reg->reg_type.bitfield.reg64)
7013 || (i.prefix[ADDR_PREFIX]
7014 && !i.base_reg->reg_type.bitfield.reg32))
7015 && (i.index_reg
7016 || i.base_reg->reg_num !=
7017 (i.prefix[ADDR_PREFIX] == 0 ? RegRip : RegEip)))
7018 || (i.index_reg
7019 && (!i.index_reg->reg_type.bitfield.baseindex
7020 || (i.prefix[ADDR_PREFIX] == 0
7021 && i.index_reg->reg_num != RegRiz
7022 && !i.index_reg->reg_type.bitfield.reg64
7023 )
7024 || (i.prefix[ADDR_PREFIX]
7025 && i.index_reg->reg_num != RegEiz
7026 && !i.index_reg->reg_type.bitfield.reg32))))
7027 ok = 0;
7028 }
7029 else
7030 {
7031 if ((flag_code == CODE_16BIT) ^ (i.prefix[ADDR_PREFIX] != 0))
7032 {
7033 /* 16bit checks. */
7034 if ((i.base_reg
7035 && (!i.base_reg->reg_type.bitfield.reg16
7036 || !i.base_reg->reg_type.bitfield.baseindex))
7037 || (i.index_reg
7038 && (!i.index_reg->reg_type.bitfield.reg16
7039 || !i.index_reg->reg_type.bitfield.baseindex
7040 || !(i.base_reg
7041 && i.base_reg->reg_num < 6
7042 && i.index_reg->reg_num >= 6
7043 && i.log2_scale_factor == 0))))
7044 ok = 0;
7045 }
7046 else
7047 {
7048 /* 32bit checks. */
7049 if ((i.base_reg
7050 && !i.base_reg->reg_type.bitfield.reg32)
7051 || (i.index_reg
7052 && ((!i.index_reg->reg_type.bitfield.reg32
7053 && i.index_reg->reg_num != RegEiz)
7054 || !i.index_reg->reg_type.bitfield.baseindex)))
7055 ok = 0;
7056 }
7057 }
7058 if (!ok)
7059 {
7060 #if INFER_ADDR_PREFIX
7061 if (!i.mem_operands && !i.prefix[ADDR_PREFIX])
7062 {
7063 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
7064 i.prefixes += 1;
7065 /* Change the size of any displacement too. At most one of
7066 Disp16 or Disp32 is set.
7067 FIXME. There doesn't seem to be any real need for separate
7068 Disp16 and Disp32 flags. The same goes for Imm16 and Imm32.
7069 Removing them would probably clean up the code quite a lot. */
7070 if (flag_code != CODE_64BIT
7071 && (i.types[this_operand].bitfield.disp16
7072 || i.types[this_operand].bitfield.disp32))
7073 i.types[this_operand]
7074 = operand_type_xor (i.types[this_operand], disp16_32);
7075 fudged = 1;
7076 goto tryprefix;
7077 }
7078 if (fudged)
7079 as_bad (_("`%s' is not a valid %s expression"),
7080 operand_string,
7081 kind);
7082 else
7083 #endif
7084 as_bad (_("`%s' is not a valid %s-bit %s expression"),
7085 operand_string,
7086 flag_code_names[i.prefix[ADDR_PREFIX]
7087 ? flag_code == CODE_32BIT
7088 ? CODE_16BIT
7089 : CODE_32BIT
7090 : flag_code],
7091 kind);
7092 }
7093 return ok;
7094 }
7095
7096 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
7097 on error. */
7098
7099 static int
7100 i386_att_operand (char *operand_string)
7101 {
7102 const reg_entry *r;
7103 char *end_op;
7104 char *op_string = operand_string;
7105
7106 if (is_space_char (*op_string))
7107 ++op_string;
7108
7109 /* We check for an absolute prefix (differentiating,
7110 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
7111 if (*op_string == ABSOLUTE_PREFIX)
7112 {
7113 ++op_string;
7114 if (is_space_char (*op_string))
7115 ++op_string;
7116 i.types[this_operand].bitfield.jumpabsolute = 1;
7117 }
7118
7119 /* Check if operand is a register. */
7120 if ((r = parse_register (op_string, &end_op)) != NULL)
7121 {
7122 i386_operand_type temp;
7123
7124 /* Check for a segment override by searching for ':' after a
7125 segment register. */
7126 op_string = end_op;
7127 if (is_space_char (*op_string))
7128 ++op_string;
7129 if (*op_string == ':'
7130 && (r->reg_type.bitfield.sreg2
7131 || r->reg_type.bitfield.sreg3))
7132 {
7133 switch (r->reg_num)
7134 {
7135 case 0:
7136 i.seg[i.mem_operands] = &es;
7137 break;
7138 case 1:
7139 i.seg[i.mem_operands] = &cs;
7140 break;
7141 case 2:
7142 i.seg[i.mem_operands] = &ss;
7143 break;
7144 case 3:
7145 i.seg[i.mem_operands] = &ds;
7146 break;
7147 case 4:
7148 i.seg[i.mem_operands] = &fs;
7149 break;
7150 case 5:
7151 i.seg[i.mem_operands] = &gs;
7152 break;
7153 }
7154
7155 /* Skip the ':' and whitespace. */
7156 ++op_string;
7157 if (is_space_char (*op_string))
7158 ++op_string;
7159
7160 if (!is_digit_char (*op_string)
7161 && !is_identifier_char (*op_string)
7162 && *op_string != '('
7163 && *op_string != ABSOLUTE_PREFIX)
7164 {
7165 as_bad (_("bad memory operand `%s'"), op_string);
7166 return 0;
7167 }
7168 /* Handle case of %es:*foo. */
7169 if (*op_string == ABSOLUTE_PREFIX)
7170 {
7171 ++op_string;
7172 if (is_space_char (*op_string))
7173 ++op_string;
7174 i.types[this_operand].bitfield.jumpabsolute = 1;
7175 }
7176 goto do_memory_reference;
7177 }
7178 if (*op_string)
7179 {
7180 as_bad (_("junk `%s' after register"), op_string);
7181 return 0;
7182 }
7183 temp = r->reg_type;
7184 temp.bitfield.baseindex = 0;
7185 i.types[this_operand] = operand_type_or (i.types[this_operand],
7186 temp);
7187 i.types[this_operand].bitfield.unspecified = 0;
7188 i.op[this_operand].regs = r;
7189 i.reg_operands++;
7190 }
7191 else if (*op_string == REGISTER_PREFIX)
7192 {
7193 as_bad (_("bad register name `%s'"), op_string);
7194 return 0;
7195 }
7196 else if (*op_string == IMMEDIATE_PREFIX)
7197 {
7198 ++op_string;
7199 if (i.types[this_operand].bitfield.jumpabsolute)
7200 {
7201 as_bad (_("immediate operand illegal with absolute jump"));
7202 return 0;
7203 }
7204 if (!i386_immediate (op_string))
7205 return 0;
7206 }
7207 else if (is_digit_char (*op_string)
7208 || is_identifier_char (*op_string)
7209 || *op_string == '(')
7210 {
7211 /* This is a memory reference of some sort. */
7212 char *base_string;
7213
7214 /* Start and end of displacement string expression (if found). */
7215 char *displacement_string_start;
7216 char *displacement_string_end;
7217
7218 do_memory_reference:
7219 if ((i.mem_operands == 1
7220 && !current_templates->start->opcode_modifier.isstring)
7221 || i.mem_operands == 2)
7222 {
7223 as_bad (_("too many memory references for `%s'"),
7224 current_templates->start->name);
7225 return 0;
7226 }
7227
7228 /* Check for base index form. We detect the base index form by
7229 looking for an ')' at the end of the operand, searching
7230 for the '(' matching it, and finding a REGISTER_PREFIX or ','
7231 after the '('. */
7232 base_string = op_string + strlen (op_string);
7233
7234 --base_string;
7235 if (is_space_char (*base_string))
7236 --base_string;
7237
7238 /* If we only have a displacement, set-up for it to be parsed later. */
7239 displacement_string_start = op_string;
7240 displacement_string_end = base_string + 1;
7241
7242 if (*base_string == ')')
7243 {
7244 char *temp_string;
7245 unsigned int parens_balanced = 1;
7246 /* We've already checked that the number of left & right ()'s are
7247 equal, so this loop will not be infinite. */
7248 do
7249 {
7250 base_string--;
7251 if (*base_string == ')')
7252 parens_balanced++;
7253 if (*base_string == '(')
7254 parens_balanced--;
7255 }
7256 while (parens_balanced);
7257
7258 temp_string = base_string;
7259
7260 /* Skip past '(' and whitespace. */
7261 ++base_string;
7262 if (is_space_char (*base_string))
7263 ++base_string;
7264
7265 if (*base_string == ','
7266 || ((i.base_reg = parse_register (base_string, &end_op))
7267 != NULL))
7268 {
7269 displacement_string_end = temp_string;
7270
7271 i.types[this_operand].bitfield.baseindex = 1;
7272
7273 if (i.base_reg)
7274 {
7275 base_string = end_op;
7276 if (is_space_char (*base_string))
7277 ++base_string;
7278 }
7279
7280 /* There may be an index reg or scale factor here. */
7281 if (*base_string == ',')
7282 {
7283 ++base_string;
7284 if (is_space_char (*base_string))
7285 ++base_string;
7286
7287 if ((i.index_reg = parse_register (base_string, &end_op))
7288 != NULL)
7289 {
7290 base_string = end_op;
7291 if (is_space_char (*base_string))
7292 ++base_string;
7293 if (*base_string == ',')
7294 {
7295 ++base_string;
7296 if (is_space_char (*base_string))
7297 ++base_string;
7298 }
7299 else if (*base_string != ')')
7300 {
7301 as_bad (_("expecting `,' or `)' "
7302 "after index register in `%s'"),
7303 operand_string);
7304 return 0;
7305 }
7306 }
7307 else if (*base_string == REGISTER_PREFIX)
7308 {
7309 as_bad (_("bad register name `%s'"), base_string);
7310 return 0;
7311 }
7312
7313 /* Check for scale factor. */
7314 if (*base_string != ')')
7315 {
7316 char *end_scale = i386_scale (base_string);
7317
7318 if (!end_scale)
7319 return 0;
7320
7321 base_string = end_scale;
7322 if (is_space_char (*base_string))
7323 ++base_string;
7324 if (*base_string != ')')
7325 {
7326 as_bad (_("expecting `)' "
7327 "after scale factor in `%s'"),
7328 operand_string);
7329 return 0;
7330 }
7331 }
7332 else if (!i.index_reg)
7333 {
7334 as_bad (_("expecting index register or scale factor "
7335 "after `,'; got '%c'"),
7336 *base_string);
7337 return 0;
7338 }
7339 }
7340 else if (*base_string != ')')
7341 {
7342 as_bad (_("expecting `,' or `)' "
7343 "after base register in `%s'"),
7344 operand_string);
7345 return 0;
7346 }
7347 }
7348 else if (*base_string == REGISTER_PREFIX)
7349 {
7350 as_bad (_("bad register name `%s'"), base_string);
7351 return 0;
7352 }
7353 }
7354
7355 /* If there's an expression beginning the operand, parse it,
7356 assuming displacement_string_start and
7357 displacement_string_end are meaningful. */
7358 if (displacement_string_start != displacement_string_end)
7359 {
7360 if (!i386_displacement (displacement_string_start,
7361 displacement_string_end))
7362 return 0;
7363 }
7364
7365 /* Special case for (%dx) while doing input/output op. */
7366 if (i.base_reg
7367 && operand_type_equal (&i.base_reg->reg_type,
7368 &reg16_inoutportreg)
7369 && i.index_reg == 0
7370 && i.log2_scale_factor == 0
7371 && i.seg[i.mem_operands] == 0
7372 && !operand_type_check (i.types[this_operand], disp))
7373 {
7374 i.types[this_operand] = inoutportreg;
7375 return 1;
7376 }
7377
7378 if (i386_index_check (operand_string) == 0)
7379 return 0;
7380 i.types[this_operand].bitfield.mem = 1;
7381 i.mem_operands++;
7382 }
7383 else
7384 {
7385 /* It's not a memory operand; argh! */
7386 as_bad (_("invalid char %s beginning operand %d `%s'"),
7387 output_invalid (*op_string),
7388 this_operand + 1,
7389 op_string);
7390 return 0;
7391 }
7392 return 1; /* Normal return. */
7393 }
7394 \f
7395 /* md_estimate_size_before_relax()
7396
7397 Called just before relax() for rs_machine_dependent frags. The x86
7398 assembler uses these frags to handle variable size jump
7399 instructions.
7400
7401 Any symbol that is now undefined will not become defined.
7402 Return the correct fr_subtype in the frag.
7403 Return the initial "guess for variable size of frag" to caller.
7404 The guess is actually the growth beyond the fixed part. Whatever
7405 we do to grow the fixed or variable part contributes to our
7406 returned value. */
7407
7408 int
7409 md_estimate_size_before_relax (fragP, segment)
7410 fragS *fragP;
7411 segT segment;
7412 {
7413 /* We've already got fragP->fr_subtype right; all we have to do is
7414 check for un-relaxable symbols. On an ELF system, we can't relax
7415 an externally visible symbol, because it may be overridden by a
7416 shared library. */
7417 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
7418 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7419 || (IS_ELF
7420 && (S_IS_EXTERNAL (fragP->fr_symbol)
7421 || S_IS_WEAK (fragP->fr_symbol)
7422 || ((symbol_get_bfdsym (fragP->fr_symbol)->flags
7423 & BSF_GNU_INDIRECT_FUNCTION))))
7424 #endif
7425 #if defined (OBJ_COFF) && defined (TE_PE)
7426 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
7427 && S_IS_WEAK (fragP->fr_symbol))
7428 #endif
7429 )
7430 {
7431 /* Symbol is undefined in this segment, or we need to keep a
7432 reloc so that weak symbols can be overridden. */
7433 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
7434 enum bfd_reloc_code_real reloc_type;
7435 unsigned char *opcode;
7436 int old_fr_fix;
7437
7438 if (fragP->fr_var != NO_RELOC)
7439 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
7440 else if (size == 2)
7441 reloc_type = BFD_RELOC_16_PCREL;
7442 else
7443 reloc_type = BFD_RELOC_32_PCREL;
7444
7445 old_fr_fix = fragP->fr_fix;
7446 opcode = (unsigned char *) fragP->fr_opcode;
7447
7448 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
7449 {
7450 case UNCOND_JUMP:
7451 /* Make jmp (0xeb) a (d)word displacement jump. */
7452 opcode[0] = 0xe9;
7453 fragP->fr_fix += size;
7454 fix_new (fragP, old_fr_fix, size,
7455 fragP->fr_symbol,
7456 fragP->fr_offset, 1,
7457 reloc_type);
7458 break;
7459
7460 case COND_JUMP86:
7461 if (size == 2
7462 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
7463 {
7464 /* Negate the condition, and branch past an
7465 unconditional jump. */
7466 opcode[0] ^= 1;
7467 opcode[1] = 3;
7468 /* Insert an unconditional jump. */
7469 opcode[2] = 0xe9;
7470 /* We added two extra opcode bytes, and have a two byte
7471 offset. */
7472 fragP->fr_fix += 2 + 2;
7473 fix_new (fragP, old_fr_fix + 2, 2,
7474 fragP->fr_symbol,
7475 fragP->fr_offset, 1,
7476 reloc_type);
7477 break;
7478 }
7479 /* Fall through. */
7480
7481 case COND_JUMP:
7482 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
7483 {
7484 fixS *fixP;
7485
7486 fragP->fr_fix += 1;
7487 fixP = fix_new (fragP, old_fr_fix, 1,
7488 fragP->fr_symbol,
7489 fragP->fr_offset, 1,
7490 BFD_RELOC_8_PCREL);
7491 fixP->fx_signed = 1;
7492 break;
7493 }
7494
7495 /* This changes the byte-displacement jump 0x7N
7496 to the (d)word-displacement jump 0x0f,0x8N. */
7497 opcode[1] = opcode[0] + 0x10;
7498 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7499 /* We've added an opcode byte. */
7500 fragP->fr_fix += 1 + size;
7501 fix_new (fragP, old_fr_fix + 1, size,
7502 fragP->fr_symbol,
7503 fragP->fr_offset, 1,
7504 reloc_type);
7505 break;
7506
7507 default:
7508 BAD_CASE (fragP->fr_subtype);
7509 break;
7510 }
7511 frag_wane (fragP);
7512 return fragP->fr_fix - old_fr_fix;
7513 }
7514
7515 /* Guess size depending on current relax state. Initially the relax
7516 state will correspond to a short jump and we return 1, because
7517 the variable part of the frag (the branch offset) is one byte
7518 long. However, we can relax a section more than once and in that
7519 case we must either set fr_subtype back to the unrelaxed state,
7520 or return the value for the appropriate branch. */
7521 return md_relax_table[fragP->fr_subtype].rlx_length;
7522 }
7523
7524 /* Called after relax() is finished.
7525
7526 In: Address of frag.
7527 fr_type == rs_machine_dependent.
7528 fr_subtype is what the address relaxed to.
7529
7530 Out: Any fixSs and constants are set up.
7531 Caller will turn frag into a ".space 0". */
7532
7533 void
7534 md_convert_frag (abfd, sec, fragP)
7535 bfd *abfd ATTRIBUTE_UNUSED;
7536 segT sec ATTRIBUTE_UNUSED;
7537 fragS *fragP;
7538 {
7539 unsigned char *opcode;
7540 unsigned char *where_to_put_displacement = NULL;
7541 offsetT target_address;
7542 offsetT opcode_address;
7543 unsigned int extension = 0;
7544 offsetT displacement_from_opcode_start;
7545
7546 opcode = (unsigned char *) fragP->fr_opcode;
7547
7548 /* Address we want to reach in file space. */
7549 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
7550
7551 /* Address opcode resides at in file space. */
7552 opcode_address = fragP->fr_address + fragP->fr_fix;
7553
7554 /* Displacement from opcode start to fill into instruction. */
7555 displacement_from_opcode_start = target_address - opcode_address;
7556
7557 if ((fragP->fr_subtype & BIG) == 0)
7558 {
7559 /* Don't have to change opcode. */
7560 extension = 1; /* 1 opcode + 1 displacement */
7561 where_to_put_displacement = &opcode[1];
7562 }
7563 else
7564 {
7565 if (no_cond_jump_promotion
7566 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
7567 as_warn_where (fragP->fr_file, fragP->fr_line,
7568 _("long jump required"));
7569
7570 switch (fragP->fr_subtype)
7571 {
7572 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
7573 extension = 4; /* 1 opcode + 4 displacement */
7574 opcode[0] = 0xe9;
7575 where_to_put_displacement = &opcode[1];
7576 break;
7577
7578 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
7579 extension = 2; /* 1 opcode + 2 displacement */
7580 opcode[0] = 0xe9;
7581 where_to_put_displacement = &opcode[1];
7582 break;
7583
7584 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
7585 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
7586 extension = 5; /* 2 opcode + 4 displacement */
7587 opcode[1] = opcode[0] + 0x10;
7588 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7589 where_to_put_displacement = &opcode[2];
7590 break;
7591
7592 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
7593 extension = 3; /* 2 opcode + 2 displacement */
7594 opcode[1] = opcode[0] + 0x10;
7595 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7596 where_to_put_displacement = &opcode[2];
7597 break;
7598
7599 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
7600 extension = 4;
7601 opcode[0] ^= 1;
7602 opcode[1] = 3;
7603 opcode[2] = 0xe9;
7604 where_to_put_displacement = &opcode[3];
7605 break;
7606
7607 default:
7608 BAD_CASE (fragP->fr_subtype);
7609 break;
7610 }
7611 }
7612
7613 /* If size if less then four we are sure that the operand fits,
7614 but if it's 4, then it could be that the displacement is larger
7615 then -/+ 2GB. */
7616 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
7617 && object_64bit
7618 && ((addressT) (displacement_from_opcode_start - extension
7619 + ((addressT) 1 << 31))
7620 > (((addressT) 2 << 31) - 1)))
7621 {
7622 as_bad_where (fragP->fr_file, fragP->fr_line,
7623 _("jump target out of range"));
7624 /* Make us emit 0. */
7625 displacement_from_opcode_start = extension;
7626 }
7627 /* Now put displacement after opcode. */
7628 md_number_to_chars ((char *) where_to_put_displacement,
7629 (valueT) (displacement_from_opcode_start - extension),
7630 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
7631 fragP->fr_fix += extension;
7632 }
7633 \f
7634 /* Apply a fixup (fixS) to segment data, once it has been determined
7635 by our caller that we have all the info we need to fix it up.
7636
7637 On the 386, immediates, displacements, and data pointers are all in
7638 the same (little-endian) format, so we don't need to care about which
7639 we are handling. */
7640
7641 void
7642 md_apply_fix (fixP, valP, seg)
7643 /* The fix we're to put in. */
7644 fixS *fixP;
7645 /* Pointer to the value of the bits. */
7646 valueT *valP;
7647 /* Segment fix is from. */
7648 segT seg ATTRIBUTE_UNUSED;
7649 {
7650 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
7651 valueT value = *valP;
7652
7653 #if !defined (TE_Mach)
7654 if (fixP->fx_pcrel)
7655 {
7656 switch (fixP->fx_r_type)
7657 {
7658 default:
7659 break;
7660
7661 case BFD_RELOC_64:
7662 fixP->fx_r_type = BFD_RELOC_64_PCREL;
7663 break;
7664 case BFD_RELOC_32:
7665 case BFD_RELOC_X86_64_32S:
7666 fixP->fx_r_type = BFD_RELOC_32_PCREL;
7667 break;
7668 case BFD_RELOC_16:
7669 fixP->fx_r_type = BFD_RELOC_16_PCREL;
7670 break;
7671 case BFD_RELOC_8:
7672 fixP->fx_r_type = BFD_RELOC_8_PCREL;
7673 break;
7674 }
7675 }
7676
7677 if (fixP->fx_addsy != NULL
7678 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
7679 || fixP->fx_r_type == BFD_RELOC_64_PCREL
7680 || fixP->fx_r_type == BFD_RELOC_16_PCREL
7681 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
7682 && !use_rela_relocations)
7683 {
7684 /* This is a hack. There should be a better way to handle this.
7685 This covers for the fact that bfd_install_relocation will
7686 subtract the current location (for partial_inplace, PC relative
7687 relocations); see more below. */
7688 #ifndef OBJ_AOUT
7689 if (IS_ELF
7690 #ifdef TE_PE
7691 || OUTPUT_FLAVOR == bfd_target_coff_flavour
7692 #endif
7693 )
7694 value += fixP->fx_where + fixP->fx_frag->fr_address;
7695 #endif
7696 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7697 if (IS_ELF)
7698 {
7699 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
7700
7701 if ((sym_seg == seg
7702 || (symbol_section_p (fixP->fx_addsy)
7703 && sym_seg != absolute_section))
7704 && !generic_force_reloc (fixP))
7705 {
7706 /* Yes, we add the values in twice. This is because
7707 bfd_install_relocation subtracts them out again. I think
7708 bfd_install_relocation is broken, but I don't dare change
7709 it. FIXME. */
7710 value += fixP->fx_where + fixP->fx_frag->fr_address;
7711 }
7712 }
7713 #endif
7714 #if defined (OBJ_COFF) && defined (TE_PE)
7715 /* For some reason, the PE format does not store a
7716 section address offset for a PC relative symbol. */
7717 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
7718 || S_IS_WEAK (fixP->fx_addsy))
7719 value += md_pcrel_from (fixP);
7720 #endif
7721 }
7722 #if defined (OBJ_COFF) && defined (TE_PE)
7723 if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
7724 {
7725 value -= S_GET_VALUE (fixP->fx_addsy);
7726 }
7727 #endif
7728
7729 /* Fix a few things - the dynamic linker expects certain values here,
7730 and we must not disappoint it. */
7731 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7732 if (IS_ELF && fixP->fx_addsy)
7733 switch (fixP->fx_r_type)
7734 {
7735 case BFD_RELOC_386_PLT32:
7736 case BFD_RELOC_X86_64_PLT32:
7737 /* Make the jump instruction point to the address of the operand. At
7738 runtime we merely add the offset to the actual PLT entry. */
7739 value = -4;
7740 break;
7741
7742 case BFD_RELOC_386_TLS_GD:
7743 case BFD_RELOC_386_TLS_LDM:
7744 case BFD_RELOC_386_TLS_IE_32:
7745 case BFD_RELOC_386_TLS_IE:
7746 case BFD_RELOC_386_TLS_GOTIE:
7747 case BFD_RELOC_386_TLS_GOTDESC:
7748 case BFD_RELOC_X86_64_TLSGD:
7749 case BFD_RELOC_X86_64_TLSLD:
7750 case BFD_RELOC_X86_64_GOTTPOFF:
7751 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
7752 value = 0; /* Fully resolved at runtime. No addend. */
7753 /* Fallthrough */
7754 case BFD_RELOC_386_TLS_LE:
7755 case BFD_RELOC_386_TLS_LDO_32:
7756 case BFD_RELOC_386_TLS_LE_32:
7757 case BFD_RELOC_X86_64_DTPOFF32:
7758 case BFD_RELOC_X86_64_DTPOFF64:
7759 case BFD_RELOC_X86_64_TPOFF32:
7760 case BFD_RELOC_X86_64_TPOFF64:
7761 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7762 break;
7763
7764 case BFD_RELOC_386_TLS_DESC_CALL:
7765 case BFD_RELOC_X86_64_TLSDESC_CALL:
7766 value = 0; /* Fully resolved at runtime. No addend. */
7767 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7768 fixP->fx_done = 0;
7769 return;
7770
7771 case BFD_RELOC_386_GOT32:
7772 case BFD_RELOC_X86_64_GOT32:
7773 value = 0; /* Fully resolved at runtime. No addend. */
7774 break;
7775
7776 case BFD_RELOC_VTABLE_INHERIT:
7777 case BFD_RELOC_VTABLE_ENTRY:
7778 fixP->fx_done = 0;
7779 return;
7780
7781 default:
7782 break;
7783 }
7784 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
7785 *valP = value;
7786 #endif /* !defined (TE_Mach) */
7787
7788 /* Are we finished with this relocation now? */
7789 if (fixP->fx_addsy == NULL)
7790 fixP->fx_done = 1;
7791 #if defined (OBJ_COFF) && defined (TE_PE)
7792 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
7793 {
7794 fixP->fx_done = 0;
7795 /* Remember value for tc_gen_reloc. */
7796 fixP->fx_addnumber = value;
7797 /* Clear out the frag for now. */
7798 value = 0;
7799 }
7800 #endif
7801 else if (use_rela_relocations)
7802 {
7803 fixP->fx_no_overflow = 1;
7804 /* Remember value for tc_gen_reloc. */
7805 fixP->fx_addnumber = value;
7806 value = 0;
7807 }
7808
7809 md_number_to_chars (p, value, fixP->fx_size);
7810 }
7811 \f
7812 char *
7813 md_atof (int type, char *litP, int *sizeP)
7814 {
7815 /* This outputs the LITTLENUMs in REVERSE order;
7816 in accord with the bigendian 386. */
7817 return ieee_md_atof (type, litP, sizeP, FALSE);
7818 }
7819 \f
7820 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
7821
7822 static char *
7823 output_invalid (int c)
7824 {
7825 if (ISPRINT (c))
7826 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
7827 "'%c'", c);
7828 else
7829 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
7830 "(0x%x)", (unsigned char) c);
7831 return output_invalid_buf;
7832 }
7833
7834 /* REG_STRING starts *before* REGISTER_PREFIX. */
7835
7836 static const reg_entry *
7837 parse_real_register (char *reg_string, char **end_op)
7838 {
7839 char *s = reg_string;
7840 char *p;
7841 char reg_name_given[MAX_REG_NAME_SIZE + 1];
7842 const reg_entry *r;
7843
7844 /* Skip possible REGISTER_PREFIX and possible whitespace. */
7845 if (*s == REGISTER_PREFIX)
7846 ++s;
7847
7848 if (is_space_char (*s))
7849 ++s;
7850
7851 p = reg_name_given;
7852 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
7853 {
7854 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
7855 return (const reg_entry *) NULL;
7856 s++;
7857 }
7858
7859 /* For naked regs, make sure that we are not dealing with an identifier.
7860 This prevents confusing an identifier like `eax_var' with register
7861 `eax'. */
7862 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
7863 return (const reg_entry *) NULL;
7864
7865 *end_op = s;
7866
7867 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
7868
7869 /* Handle floating point regs, allowing spaces in the (i) part. */
7870 if (r == i386_regtab /* %st is first entry of table */)
7871 {
7872 if (is_space_char (*s))
7873 ++s;
7874 if (*s == '(')
7875 {
7876 ++s;
7877 if (is_space_char (*s))
7878 ++s;
7879 if (*s >= '0' && *s <= '7')
7880 {
7881 int fpr = *s - '0';
7882 ++s;
7883 if (is_space_char (*s))
7884 ++s;
7885 if (*s == ')')
7886 {
7887 *end_op = s + 1;
7888 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
7889 know (r);
7890 return r + fpr;
7891 }
7892 }
7893 /* We have "%st(" then garbage. */
7894 return (const reg_entry *) NULL;
7895 }
7896 }
7897
7898 if (r == NULL || allow_pseudo_reg)
7899 return r;
7900
7901 if (operand_type_all_zero (&r->reg_type))
7902 return (const reg_entry *) NULL;
7903
7904 if ((r->reg_type.bitfield.reg32
7905 || r->reg_type.bitfield.sreg3
7906 || r->reg_type.bitfield.control
7907 || r->reg_type.bitfield.debug
7908 || r->reg_type.bitfield.test)
7909 && !cpu_arch_flags.bitfield.cpui386)
7910 return (const reg_entry *) NULL;
7911
7912 if (r->reg_type.bitfield.floatreg
7913 && !cpu_arch_flags.bitfield.cpu8087
7914 && !cpu_arch_flags.bitfield.cpu287
7915 && !cpu_arch_flags.bitfield.cpu387)
7916 return (const reg_entry *) NULL;
7917
7918 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
7919 return (const reg_entry *) NULL;
7920
7921 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse)
7922 return (const reg_entry *) NULL;
7923
7924 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
7925 return (const reg_entry *) NULL;
7926
7927 /* Don't allow fake index register unless allow_index_reg isn't 0. */
7928 if (!allow_index_reg
7929 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
7930 return (const reg_entry *) NULL;
7931
7932 if (((r->reg_flags & (RegRex64 | RegRex))
7933 || r->reg_type.bitfield.reg64)
7934 && (!cpu_arch_flags.bitfield.cpulm
7935 || !operand_type_equal (&r->reg_type, &control))
7936 && flag_code != CODE_64BIT)
7937 return (const reg_entry *) NULL;
7938
7939 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
7940 return (const reg_entry *) NULL;
7941
7942 return r;
7943 }
7944
7945 /* REG_STRING starts *before* REGISTER_PREFIX. */
7946
7947 static const reg_entry *
7948 parse_register (char *reg_string, char **end_op)
7949 {
7950 const reg_entry *r;
7951
7952 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
7953 r = parse_real_register (reg_string, end_op);
7954 else
7955 r = NULL;
7956 if (!r)
7957 {
7958 char *save = input_line_pointer;
7959 char c;
7960 symbolS *symbolP;
7961
7962 input_line_pointer = reg_string;
7963 c = get_symbol_end ();
7964 symbolP = symbol_find (reg_string);
7965 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
7966 {
7967 const expressionS *e = symbol_get_value_expression (symbolP);
7968
7969 know (i386_is_register (e, intel_syntax));
7970 know (e->X_add_number >= 0
7971 && (valueT) e->X_add_number < i386_regtab_size);
7972 r = i386_regtab + e->X_add_number;
7973 *end_op = input_line_pointer;
7974 }
7975 *input_line_pointer = c;
7976 input_line_pointer = save;
7977 }
7978 return r;
7979 }
7980
7981 int
7982 i386_parse_name (char *name, expressionS *e, char *nextcharP)
7983 {
7984 const reg_entry *r;
7985 char *end = input_line_pointer;
7986
7987 *end = *nextcharP;
7988 r = parse_register (name, &input_line_pointer);
7989 if (r && end <= input_line_pointer)
7990 {
7991 *nextcharP = *input_line_pointer;
7992 *input_line_pointer = 0;
7993 e->X_op = O_register;
7994 e->X_add_number = r - i386_regtab;
7995 return 1;
7996 }
7997 input_line_pointer = end;
7998 *end = 0;
7999 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
8000 }
8001
8002 void
8003 md_operand (expressionS *e)
8004 {
8005 char *end;
8006 const reg_entry *r;
8007
8008 switch (*input_line_pointer)
8009 {
8010 case REGISTER_PREFIX:
8011 r = parse_real_register (input_line_pointer, &end);
8012 if (r)
8013 {
8014 e->X_op = O_register;
8015 e->X_add_number = r - i386_regtab;
8016 input_line_pointer = end;
8017 }
8018 break;
8019
8020 case '[':
8021 gas_assert (intel_syntax);
8022 end = input_line_pointer++;
8023 expression (e);
8024 if (*input_line_pointer == ']')
8025 {
8026 ++input_line_pointer;
8027 e->X_op_symbol = make_expr_symbol (e);
8028 e->X_add_symbol = NULL;
8029 e->X_add_number = 0;
8030 e->X_op = O_index;
8031 }
8032 else
8033 {
8034 e->X_op = O_absent;
8035 input_line_pointer = end;
8036 }
8037 break;
8038 }
8039 }
8040
8041 \f
8042 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8043 const char *md_shortopts = "kVQ:sqn";
8044 #else
8045 const char *md_shortopts = "qn";
8046 #endif
8047
8048 #define OPTION_32 (OPTION_MD_BASE + 0)
8049 #define OPTION_64 (OPTION_MD_BASE + 1)
8050 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
8051 #define OPTION_MARCH (OPTION_MD_BASE + 3)
8052 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
8053 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
8054 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
8055 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
8056 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
8057 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
8058 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
8059 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
8060 #define OPTION_MAVXSCALAR (OPTION_MSSE_CHECK + 11)
8061
8062 struct option md_longopts[] =
8063 {
8064 {"32", no_argument, NULL, OPTION_32},
8065 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8066 || defined (TE_PE) || defined (TE_PEP))
8067 {"64", no_argument, NULL, OPTION_64},
8068 #endif
8069 {"divide", no_argument, NULL, OPTION_DIVIDE},
8070 {"march", required_argument, NULL, OPTION_MARCH},
8071 {"mtune", required_argument, NULL, OPTION_MTUNE},
8072 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
8073 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
8074 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
8075 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
8076 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
8077 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
8078 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
8079 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
8080 {NULL, no_argument, NULL, 0}
8081 };
8082 size_t md_longopts_size = sizeof (md_longopts);
8083
8084 int
8085 md_parse_option (int c, char *arg)
8086 {
8087 unsigned int j;
8088 char *arch, *next;
8089
8090 switch (c)
8091 {
8092 case 'n':
8093 optimize_align_code = 0;
8094 break;
8095
8096 case 'q':
8097 quiet_warnings = 1;
8098 break;
8099
8100 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8101 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
8102 should be emitted or not. FIXME: Not implemented. */
8103 case 'Q':
8104 break;
8105
8106 /* -V: SVR4 argument to print version ID. */
8107 case 'V':
8108 print_version_id ();
8109 break;
8110
8111 /* -k: Ignore for FreeBSD compatibility. */
8112 case 'k':
8113 break;
8114
8115 case 's':
8116 /* -s: On i386 Solaris, this tells the native assembler to use
8117 .stab instead of .stab.excl. We always use .stab anyhow. */
8118 break;
8119 #endif
8120 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8121 || defined (TE_PE) || defined (TE_PEP))
8122 case OPTION_64:
8123 {
8124 const char **list, **l;
8125
8126 list = bfd_target_list ();
8127 for (l = list; *l != NULL; l++)
8128 if (CONST_STRNEQ (*l, "elf64-x86-64")
8129 || strcmp (*l, "coff-x86-64") == 0
8130 || strcmp (*l, "pe-x86-64") == 0
8131 || strcmp (*l, "pei-x86-64") == 0)
8132 {
8133 default_arch = "x86_64";
8134 break;
8135 }
8136 if (*l == NULL)
8137 as_fatal (_("No compiled in support for x86_64"));
8138 free (list);
8139 }
8140 break;
8141 #endif
8142
8143 case OPTION_32:
8144 default_arch = "i386";
8145 break;
8146
8147 case OPTION_DIVIDE:
8148 #ifdef SVR4_COMMENT_CHARS
8149 {
8150 char *n, *t;
8151 const char *s;
8152
8153 n = (char *) xmalloc (strlen (i386_comment_chars) + 1);
8154 t = n;
8155 for (s = i386_comment_chars; *s != '\0'; s++)
8156 if (*s != '/')
8157 *t++ = *s;
8158 *t = '\0';
8159 i386_comment_chars = n;
8160 }
8161 #endif
8162 break;
8163
8164 case OPTION_MARCH:
8165 arch = xstrdup (arg);
8166 do
8167 {
8168 if (*arch == '.')
8169 as_fatal (_("Invalid -march= option: `%s'"), arg);
8170 next = strchr (arch, '+');
8171 if (next)
8172 *next++ = '\0';
8173 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8174 {
8175 if (strcmp (arch, cpu_arch [j].name) == 0)
8176 {
8177 /* Processor. */
8178 cpu_arch_name = cpu_arch[j].name;
8179 cpu_sub_arch_name = NULL;
8180 cpu_arch_flags = cpu_arch[j].flags;
8181 cpu_arch_isa = cpu_arch[j].type;
8182 cpu_arch_isa_flags = cpu_arch[j].flags;
8183 if (!cpu_arch_tune_set)
8184 {
8185 cpu_arch_tune = cpu_arch_isa;
8186 cpu_arch_tune_flags = cpu_arch_isa_flags;
8187 }
8188 break;
8189 }
8190 else if (*cpu_arch [j].name == '.'
8191 && strcmp (arch, cpu_arch [j].name + 1) == 0)
8192 {
8193 /* ISA entension. */
8194 i386_cpu_flags flags;
8195
8196 if (strncmp (arch, "no", 2))
8197 flags = cpu_flags_or (cpu_arch_flags,
8198 cpu_arch[j].flags);
8199 else
8200 flags = cpu_flags_and_not (cpu_arch_flags,
8201 cpu_arch[j].flags);
8202 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
8203 {
8204 if (cpu_sub_arch_name)
8205 {
8206 char *name = cpu_sub_arch_name;
8207 cpu_sub_arch_name = concat (name,
8208 cpu_arch[j].name,
8209 (const char *) NULL);
8210 free (name);
8211 }
8212 else
8213 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
8214 cpu_arch_flags = flags;
8215 }
8216 break;
8217 }
8218 }
8219
8220 if (j >= ARRAY_SIZE (cpu_arch))
8221 as_fatal (_("Invalid -march= option: `%s'"), arg);
8222
8223 arch = next;
8224 }
8225 while (next != NULL );
8226 break;
8227
8228 case OPTION_MTUNE:
8229 if (*arg == '.')
8230 as_fatal (_("Invalid -mtune= option: `%s'"), arg);
8231 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8232 {
8233 if (strcmp (arg, cpu_arch [j].name) == 0)
8234 {
8235 cpu_arch_tune_set = 1;
8236 cpu_arch_tune = cpu_arch [j].type;
8237 cpu_arch_tune_flags = cpu_arch[j].flags;
8238 break;
8239 }
8240 }
8241 if (j >= ARRAY_SIZE (cpu_arch))
8242 as_fatal (_("Invalid -mtune= option: `%s'"), arg);
8243 break;
8244
8245 case OPTION_MMNEMONIC:
8246 if (strcasecmp (arg, "att") == 0)
8247 intel_mnemonic = 0;
8248 else if (strcasecmp (arg, "intel") == 0)
8249 intel_mnemonic = 1;
8250 else
8251 as_fatal (_("Invalid -mmnemonic= option: `%s'"), arg);
8252 break;
8253
8254 case OPTION_MSYNTAX:
8255 if (strcasecmp (arg, "att") == 0)
8256 intel_syntax = 0;
8257 else if (strcasecmp (arg, "intel") == 0)
8258 intel_syntax = 1;
8259 else
8260 as_fatal (_("Invalid -msyntax= option: `%s'"), arg);
8261 break;
8262
8263 case OPTION_MINDEX_REG:
8264 allow_index_reg = 1;
8265 break;
8266
8267 case OPTION_MNAKED_REG:
8268 allow_naked_reg = 1;
8269 break;
8270
8271 case OPTION_MOLD_GCC:
8272 old_gcc = 1;
8273 break;
8274
8275 case OPTION_MSSE2AVX:
8276 sse2avx = 1;
8277 break;
8278
8279 case OPTION_MSSE_CHECK:
8280 if (strcasecmp (arg, "error") == 0)
8281 sse_check = sse_check_error;
8282 else if (strcasecmp (arg, "warning") == 0)
8283 sse_check = sse_check_warning;
8284 else if (strcasecmp (arg, "none") == 0)
8285 sse_check = sse_check_none;
8286 else
8287 as_fatal (_("Invalid -msse-check= option: `%s'"), arg);
8288 break;
8289
8290 case OPTION_MAVXSCALAR:
8291 if (strcasecmp (arg, "128") == 0)
8292 avxscalar = vex128;
8293 else if (strcasecmp (arg, "256") == 0)
8294 avxscalar = vex256;
8295 else
8296 as_fatal (_("Invalid -mavxscalar= option: `%s'"), arg);
8297 break;
8298
8299 default:
8300 return 0;
8301 }
8302 return 1;
8303 }
8304
8305 #define MESSAGE_TEMPLATE \
8306 " "
8307
8308 static void
8309 show_arch (FILE *stream, int ext)
8310 {
8311 static char message[] = MESSAGE_TEMPLATE;
8312 char *start = message + 27;
8313 char *p;
8314 int size = sizeof (MESSAGE_TEMPLATE);
8315 int left;
8316 const char *name;
8317 int len;
8318 unsigned int j;
8319
8320 p = start;
8321 left = size - (start - message);
8322 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8323 {
8324 /* Should it be skipped? */
8325 if (cpu_arch [j].skip)
8326 continue;
8327
8328 name = cpu_arch [j].name;
8329 len = cpu_arch [j].len;
8330 if (*name == '.')
8331 {
8332 /* It is an extension. Skip if we aren't asked to show it. */
8333 if (ext)
8334 {
8335 name++;
8336 len--;
8337 }
8338 else
8339 continue;
8340 }
8341 else if (ext)
8342 {
8343 /* It is an processor. Skip if we show only extension. */
8344 continue;
8345 }
8346
8347 /* Reserve 2 spaces for ", " or ",\0" */
8348 left -= len + 2;
8349
8350 /* Check if there is any room. */
8351 if (left >= 0)
8352 {
8353 if (p != start)
8354 {
8355 *p++ = ',';
8356 *p++ = ' ';
8357 }
8358 p = mempcpy (p, name, len);
8359 }
8360 else
8361 {
8362 /* Output the current message now and start a new one. */
8363 *p++ = ',';
8364 *p = '\0';
8365 fprintf (stream, "%s\n", message);
8366 p = start;
8367 left = size - (start - message) - len - 2;
8368
8369 gas_assert (left >= 0);
8370
8371 p = mempcpy (p, name, len);
8372 }
8373 }
8374
8375 *p = '\0';
8376 fprintf (stream, "%s\n", message);
8377 }
8378
8379 void
8380 md_show_usage (FILE *stream)
8381 {
8382 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8383 fprintf (stream, _("\
8384 -Q ignored\n\
8385 -V print assembler version number\n\
8386 -k ignored\n"));
8387 #endif
8388 fprintf (stream, _("\
8389 -n Do not optimize code alignment\n\
8390 -q quieten some warnings\n"));
8391 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8392 fprintf (stream, _("\
8393 -s ignored\n"));
8394 #endif
8395 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8396 || defined (TE_PE) || defined (TE_PEP))
8397 fprintf (stream, _("\
8398 --32/--64 generate 32bit/64bit code\n"));
8399 #endif
8400 #ifdef SVR4_COMMENT_CHARS
8401 fprintf (stream, _("\
8402 --divide do not treat `/' as a comment character\n"));
8403 #else
8404 fprintf (stream, _("\
8405 --divide ignored\n"));
8406 #endif
8407 fprintf (stream, _("\
8408 -march=CPU[,+EXTENSION...]\n\
8409 generate code for CPU and EXTENSION, CPU is one of:\n"));
8410 show_arch (stream, 0);
8411 fprintf (stream, _("\
8412 EXTENSION is combination of:\n"));
8413 show_arch (stream, 1);
8414 fprintf (stream, _("\
8415 -mtune=CPU optimize for CPU, CPU is one of:\n"));
8416 show_arch (stream, 0);
8417 fprintf (stream, _("\
8418 -msse2avx encode SSE instructions with VEX prefix\n"));
8419 fprintf (stream, _("\
8420 -msse-check=[none|error|warning]\n\
8421 check SSE instructions\n"));
8422 fprintf (stream, _("\
8423 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
8424 length\n"));
8425 fprintf (stream, _("\
8426 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
8427 fprintf (stream, _("\
8428 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
8429 fprintf (stream, _("\
8430 -mindex-reg support pseudo index registers\n"));
8431 fprintf (stream, _("\
8432 -mnaked-reg don't require `%%' prefix for registers\n"));
8433 fprintf (stream, _("\
8434 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
8435 }
8436
8437 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
8438 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8439 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8440
8441 /* Pick the target format to use. */
8442
8443 const char *
8444 i386_target_format (void)
8445 {
8446 if (!strcmp (default_arch, "x86_64"))
8447 {
8448 set_code_flag (CODE_64BIT);
8449 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
8450 {
8451 cpu_arch_isa_flags.bitfield.cpui186 = 1;
8452 cpu_arch_isa_flags.bitfield.cpui286 = 1;
8453 cpu_arch_isa_flags.bitfield.cpui386 = 1;
8454 cpu_arch_isa_flags.bitfield.cpui486 = 1;
8455 cpu_arch_isa_flags.bitfield.cpui586 = 1;
8456 cpu_arch_isa_flags.bitfield.cpui686 = 1;
8457 cpu_arch_isa_flags.bitfield.cpuclflush = 1;
8458 cpu_arch_isa_flags.bitfield.cpummx= 1;
8459 cpu_arch_isa_flags.bitfield.cpusse = 1;
8460 cpu_arch_isa_flags.bitfield.cpusse2 = 1;
8461 cpu_arch_isa_flags.bitfield.cpulm = 1;
8462 }
8463 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
8464 {
8465 cpu_arch_tune_flags.bitfield.cpui186 = 1;
8466 cpu_arch_tune_flags.bitfield.cpui286 = 1;
8467 cpu_arch_tune_flags.bitfield.cpui386 = 1;
8468 cpu_arch_tune_flags.bitfield.cpui486 = 1;
8469 cpu_arch_tune_flags.bitfield.cpui586 = 1;
8470 cpu_arch_tune_flags.bitfield.cpui686 = 1;
8471 cpu_arch_tune_flags.bitfield.cpuclflush = 1;
8472 cpu_arch_tune_flags.bitfield.cpummx= 1;
8473 cpu_arch_tune_flags.bitfield.cpusse = 1;
8474 cpu_arch_tune_flags.bitfield.cpusse2 = 1;
8475 }
8476 }
8477 else if (!strcmp (default_arch, "i386"))
8478 {
8479 set_code_flag (CODE_32BIT);
8480 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
8481 {
8482 cpu_arch_isa_flags.bitfield.cpui186 = 1;
8483 cpu_arch_isa_flags.bitfield.cpui286 = 1;
8484 cpu_arch_isa_flags.bitfield.cpui386 = 1;
8485 }
8486 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
8487 {
8488 cpu_arch_tune_flags.bitfield.cpui186 = 1;
8489 cpu_arch_tune_flags.bitfield.cpui286 = 1;
8490 cpu_arch_tune_flags.bitfield.cpui386 = 1;
8491 }
8492 }
8493 else
8494 as_fatal (_("Unknown architecture"));
8495 switch (OUTPUT_FLAVOR)
8496 {
8497 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
8498 case bfd_target_aout_flavour:
8499 return AOUT_TARGET_FORMAT;
8500 #endif
8501 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
8502 # if defined (TE_PE) || defined (TE_PEP)
8503 case bfd_target_coff_flavour:
8504 return flag_code == CODE_64BIT ? "pe-x86-64" : "pe-i386";
8505 # elif defined (TE_GO32)
8506 case bfd_target_coff_flavour:
8507 return "coff-go32";
8508 # else
8509 case bfd_target_coff_flavour:
8510 return "coff-i386";
8511 # endif
8512 #endif
8513 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8514 case bfd_target_elf_flavour:
8515 {
8516 if (flag_code == CODE_64BIT)
8517 {
8518 object_64bit = 1;
8519 use_rela_relocations = 1;
8520 }
8521 if (cpu_arch_isa == PROCESSOR_L1OM)
8522 {
8523 if (flag_code != CODE_64BIT)
8524 as_fatal (_("Intel L1OM is 64bit only"));
8525 return ELF_TARGET_L1OM_FORMAT;
8526 }
8527 else
8528 return (flag_code == CODE_64BIT
8529 ? ELF_TARGET_FORMAT64 : ELF_TARGET_FORMAT);
8530 }
8531 #endif
8532 #if defined (OBJ_MACH_O)
8533 case bfd_target_mach_o_flavour:
8534 return flag_code == CODE_64BIT ? "mach-o-x86-64" : "mach-o-i386";
8535 #endif
8536 default:
8537 abort ();
8538 return NULL;
8539 }
8540 }
8541
8542 #endif /* OBJ_MAYBE_ more than one */
8543
8544 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
8545 void
8546 i386_elf_emit_arch_note (void)
8547 {
8548 if (IS_ELF && cpu_arch_name != NULL)
8549 {
8550 char *p;
8551 asection *seg = now_seg;
8552 subsegT subseg = now_subseg;
8553 Elf_Internal_Note i_note;
8554 Elf_External_Note e_note;
8555 asection *note_secp;
8556 int len;
8557
8558 /* Create the .note section. */
8559 note_secp = subseg_new (".note", 0);
8560 bfd_set_section_flags (stdoutput,
8561 note_secp,
8562 SEC_HAS_CONTENTS | SEC_READONLY);
8563
8564 /* Process the arch string. */
8565 len = strlen (cpu_arch_name);
8566
8567 i_note.namesz = len + 1;
8568 i_note.descsz = 0;
8569 i_note.type = NT_ARCH;
8570 p = frag_more (sizeof (e_note.namesz));
8571 md_number_to_chars (p, (valueT) i_note.namesz, sizeof (e_note.namesz));
8572 p = frag_more (sizeof (e_note.descsz));
8573 md_number_to_chars (p, (valueT) i_note.descsz, sizeof (e_note.descsz));
8574 p = frag_more (sizeof (e_note.type));
8575 md_number_to_chars (p, (valueT) i_note.type, sizeof (e_note.type));
8576 p = frag_more (len + 1);
8577 strcpy (p, cpu_arch_name);
8578
8579 frag_align (2, 0, 0);
8580
8581 subseg_set (seg, subseg);
8582 }
8583 }
8584 #endif
8585 \f
8586 symbolS *
8587 md_undefined_symbol (name)
8588 char *name;
8589 {
8590 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
8591 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
8592 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
8593 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
8594 {
8595 if (!GOT_symbol)
8596 {
8597 if (symbol_find (name))
8598 as_bad (_("GOT already in symbol table"));
8599 GOT_symbol = symbol_new (name, undefined_section,
8600 (valueT) 0, &zero_address_frag);
8601 };
8602 return GOT_symbol;
8603 }
8604 return 0;
8605 }
8606
8607 /* Round up a section size to the appropriate boundary. */
8608
8609 valueT
8610 md_section_align (segment, size)
8611 segT segment ATTRIBUTE_UNUSED;
8612 valueT size;
8613 {
8614 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8615 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
8616 {
8617 /* For a.out, force the section size to be aligned. If we don't do
8618 this, BFD will align it for us, but it will not write out the
8619 final bytes of the section. This may be a bug in BFD, but it is
8620 easier to fix it here since that is how the other a.out targets
8621 work. */
8622 int align;
8623
8624 align = bfd_get_section_alignment (stdoutput, segment);
8625 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
8626 }
8627 #endif
8628
8629 return size;
8630 }
8631
8632 /* On the i386, PC-relative offsets are relative to the start of the
8633 next instruction. That is, the address of the offset, plus its
8634 size, since the offset is always the last part of the insn. */
8635
8636 long
8637 md_pcrel_from (fixS *fixP)
8638 {
8639 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
8640 }
8641
8642 #ifndef I386COFF
8643
8644 static void
8645 s_bss (int ignore ATTRIBUTE_UNUSED)
8646 {
8647 int temp;
8648
8649 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8650 if (IS_ELF)
8651 obj_elf_section_change_hook ();
8652 #endif
8653 temp = get_absolute_expression ();
8654 subseg_set (bss_section, (subsegT) temp);
8655 demand_empty_rest_of_line ();
8656 }
8657
8658 #endif
8659
8660 void
8661 i386_validate_fix (fixS *fixp)
8662 {
8663 if (fixp->fx_subsy && fixp->fx_subsy == GOT_symbol)
8664 {
8665 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
8666 {
8667 if (!object_64bit)
8668 abort ();
8669 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
8670 }
8671 else
8672 {
8673 if (!object_64bit)
8674 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
8675 else
8676 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
8677 }
8678 fixp->fx_subsy = 0;
8679 }
8680 }
8681
8682 arelent *
8683 tc_gen_reloc (section, fixp)
8684 asection *section ATTRIBUTE_UNUSED;
8685 fixS *fixp;
8686 {
8687 arelent *rel;
8688 bfd_reloc_code_real_type code;
8689
8690 switch (fixp->fx_r_type)
8691 {
8692 case BFD_RELOC_X86_64_PLT32:
8693 case BFD_RELOC_X86_64_GOT32:
8694 case BFD_RELOC_X86_64_GOTPCREL:
8695 case BFD_RELOC_386_PLT32:
8696 case BFD_RELOC_386_GOT32:
8697 case BFD_RELOC_386_GOTOFF:
8698 case BFD_RELOC_386_GOTPC:
8699 case BFD_RELOC_386_TLS_GD:
8700 case BFD_RELOC_386_TLS_LDM:
8701 case BFD_RELOC_386_TLS_LDO_32:
8702 case BFD_RELOC_386_TLS_IE_32:
8703 case BFD_RELOC_386_TLS_IE:
8704 case BFD_RELOC_386_TLS_GOTIE:
8705 case BFD_RELOC_386_TLS_LE_32:
8706 case BFD_RELOC_386_TLS_LE:
8707 case BFD_RELOC_386_TLS_GOTDESC:
8708 case BFD_RELOC_386_TLS_DESC_CALL:
8709 case BFD_RELOC_X86_64_TLSGD:
8710 case BFD_RELOC_X86_64_TLSLD:
8711 case BFD_RELOC_X86_64_DTPOFF32:
8712 case BFD_RELOC_X86_64_DTPOFF64:
8713 case BFD_RELOC_X86_64_GOTTPOFF:
8714 case BFD_RELOC_X86_64_TPOFF32:
8715 case BFD_RELOC_X86_64_TPOFF64:
8716 case BFD_RELOC_X86_64_GOTOFF64:
8717 case BFD_RELOC_X86_64_GOTPC32:
8718 case BFD_RELOC_X86_64_GOT64:
8719 case BFD_RELOC_X86_64_GOTPCREL64:
8720 case BFD_RELOC_X86_64_GOTPC64:
8721 case BFD_RELOC_X86_64_GOTPLT64:
8722 case BFD_RELOC_X86_64_PLTOFF64:
8723 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8724 case BFD_RELOC_X86_64_TLSDESC_CALL:
8725 case BFD_RELOC_RVA:
8726 case BFD_RELOC_VTABLE_ENTRY:
8727 case BFD_RELOC_VTABLE_INHERIT:
8728 #ifdef TE_PE
8729 case BFD_RELOC_32_SECREL:
8730 #endif
8731 code = fixp->fx_r_type;
8732 break;
8733 case BFD_RELOC_X86_64_32S:
8734 if (!fixp->fx_pcrel)
8735 {
8736 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
8737 code = fixp->fx_r_type;
8738 break;
8739 }
8740 default:
8741 if (fixp->fx_pcrel)
8742 {
8743 switch (fixp->fx_size)
8744 {
8745 default:
8746 as_bad_where (fixp->fx_file, fixp->fx_line,
8747 _("can not do %d byte pc-relative relocation"),
8748 fixp->fx_size);
8749 code = BFD_RELOC_32_PCREL;
8750 break;
8751 case 1: code = BFD_RELOC_8_PCREL; break;
8752 case 2: code = BFD_RELOC_16_PCREL; break;
8753 case 4: code = BFD_RELOC_32_PCREL; break;
8754 #ifdef BFD64
8755 case 8: code = BFD_RELOC_64_PCREL; break;
8756 #endif
8757 }
8758 }
8759 else
8760 {
8761 switch (fixp->fx_size)
8762 {
8763 default:
8764 as_bad_where (fixp->fx_file, fixp->fx_line,
8765 _("can not do %d byte relocation"),
8766 fixp->fx_size);
8767 code = BFD_RELOC_32;
8768 break;
8769 case 1: code = BFD_RELOC_8; break;
8770 case 2: code = BFD_RELOC_16; break;
8771 case 4: code = BFD_RELOC_32; break;
8772 #ifdef BFD64
8773 case 8: code = BFD_RELOC_64; break;
8774 #endif
8775 }
8776 }
8777 break;
8778 }
8779
8780 if ((code == BFD_RELOC_32
8781 || code == BFD_RELOC_32_PCREL
8782 || code == BFD_RELOC_X86_64_32S)
8783 && GOT_symbol
8784 && fixp->fx_addsy == GOT_symbol)
8785 {
8786 if (!object_64bit)
8787 code = BFD_RELOC_386_GOTPC;
8788 else
8789 code = BFD_RELOC_X86_64_GOTPC32;
8790 }
8791 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
8792 && GOT_symbol
8793 && fixp->fx_addsy == GOT_symbol)
8794 {
8795 code = BFD_RELOC_X86_64_GOTPC64;
8796 }
8797
8798 rel = (arelent *) xmalloc (sizeof (arelent));
8799 rel->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
8800 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
8801
8802 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
8803
8804 if (!use_rela_relocations)
8805 {
8806 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
8807 vtable entry to be used in the relocation's section offset. */
8808 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
8809 rel->address = fixp->fx_offset;
8810 #if defined (OBJ_COFF) && defined (TE_PE)
8811 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
8812 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
8813 else
8814 #endif
8815 rel->addend = 0;
8816 }
8817 /* Use the rela in 64bit mode. */
8818 else
8819 {
8820 if (!fixp->fx_pcrel)
8821 rel->addend = fixp->fx_offset;
8822 else
8823 switch (code)
8824 {
8825 case BFD_RELOC_X86_64_PLT32:
8826 case BFD_RELOC_X86_64_GOT32:
8827 case BFD_RELOC_X86_64_GOTPCREL:
8828 case BFD_RELOC_X86_64_TLSGD:
8829 case BFD_RELOC_X86_64_TLSLD:
8830 case BFD_RELOC_X86_64_GOTTPOFF:
8831 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8832 case BFD_RELOC_X86_64_TLSDESC_CALL:
8833 rel->addend = fixp->fx_offset - fixp->fx_size;
8834 break;
8835 default:
8836 rel->addend = (section->vma
8837 - fixp->fx_size
8838 + fixp->fx_addnumber
8839 + md_pcrel_from (fixp));
8840 break;
8841 }
8842 }
8843
8844 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
8845 if (rel->howto == NULL)
8846 {
8847 as_bad_where (fixp->fx_file, fixp->fx_line,
8848 _("cannot represent relocation type %s"),
8849 bfd_get_reloc_code_name (code));
8850 /* Set howto to a garbage value so that we can keep going. */
8851 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
8852 gas_assert (rel->howto != NULL);
8853 }
8854
8855 return rel;
8856 }
8857
8858 #include "tc-i386-intel.c"
8859
8860 void
8861 tc_x86_parse_to_dw2regnum (expressionS *exp)
8862 {
8863 int saved_naked_reg;
8864 char saved_register_dot;
8865
8866 saved_naked_reg = allow_naked_reg;
8867 allow_naked_reg = 1;
8868 saved_register_dot = register_chars['.'];
8869 register_chars['.'] = '.';
8870 allow_pseudo_reg = 1;
8871 expression_and_evaluate (exp);
8872 allow_pseudo_reg = 0;
8873 register_chars['.'] = saved_register_dot;
8874 allow_naked_reg = saved_naked_reg;
8875
8876 if (i386_is_register (exp, intel_syntax) && exp->X_add_number >= 0)
8877 {
8878 if ((addressT) exp->X_add_number < i386_regtab_size)
8879 {
8880 exp->X_op = O_constant;
8881 exp->X_add_number = i386_regtab[exp->X_add_number]
8882 .dw2_regnum[flag_code >> 1];
8883 }
8884 else
8885 exp->X_op = O_illegal;
8886 }
8887 }
8888
8889 void
8890 tc_x86_frame_initial_instructions (void)
8891 {
8892 static unsigned int sp_regno[2];
8893
8894 if (!sp_regno[flag_code >> 1])
8895 {
8896 char *saved_input = input_line_pointer;
8897 char sp[][4] = {"esp", "rsp"};
8898 expressionS exp;
8899
8900 input_line_pointer = sp[flag_code >> 1];
8901 tc_x86_parse_to_dw2regnum (&exp);
8902 gas_assert (exp.X_op == O_constant);
8903 sp_regno[flag_code >> 1] = exp.X_add_number;
8904 input_line_pointer = saved_input;
8905 }
8906
8907 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
8908 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
8909 }
8910
8911 int
8912 i386_elf_section_type (const char *str, size_t len)
8913 {
8914 if (flag_code == CODE_64BIT
8915 && len == sizeof ("unwind") - 1
8916 && strncmp (str, "unwind", 6) == 0)
8917 return SHT_X86_64_UNWIND;
8918
8919 return -1;
8920 }
8921
8922 #ifdef TE_SOLARIS
8923 void
8924 i386_solaris_fix_up_eh_frame (segT sec)
8925 {
8926 if (flag_code == CODE_64BIT)
8927 elf_section_type (sec) = SHT_X86_64_UNWIND;
8928 }
8929 #endif
8930
8931 #ifdef TE_PE
8932 void
8933 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
8934 {
8935 expressionS exp;
8936
8937 exp.X_op = O_secrel;
8938 exp.X_add_symbol = symbol;
8939 exp.X_add_number = 0;
8940 emit_expr (&exp, size);
8941 }
8942 #endif
8943
8944 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8945 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
8946
8947 bfd_vma
8948 x86_64_section_letter (int letter, char **ptr_msg)
8949 {
8950 if (flag_code == CODE_64BIT)
8951 {
8952 if (letter == 'l')
8953 return SHF_X86_64_LARGE;
8954
8955 *ptr_msg = _("Bad .section directive: want a,l,w,x,M,S,G,T in string");
8956 }
8957 else
8958 *ptr_msg = _("Bad .section directive: want a,w,x,M,S,G,T in string");
8959 return -1;
8960 }
8961
8962 bfd_vma
8963 x86_64_section_word (char *str, size_t len)
8964 {
8965 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
8966 return SHF_X86_64_LARGE;
8967
8968 return -1;
8969 }
8970
8971 static void
8972 handle_large_common (int small ATTRIBUTE_UNUSED)
8973 {
8974 if (flag_code != CODE_64BIT)
8975 {
8976 s_comm_internal (0, elf_common_parse);
8977 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
8978 }
8979 else
8980 {
8981 static segT lbss_section;
8982 asection *saved_com_section_ptr = elf_com_section_ptr;
8983 asection *saved_bss_section = bss_section;
8984
8985 if (lbss_section == NULL)
8986 {
8987 flagword applicable;
8988 segT seg = now_seg;
8989 subsegT subseg = now_subseg;
8990
8991 /* The .lbss section is for local .largecomm symbols. */
8992 lbss_section = subseg_new (".lbss", 0);
8993 applicable = bfd_applicable_section_flags (stdoutput);
8994 bfd_set_section_flags (stdoutput, lbss_section,
8995 applicable & SEC_ALLOC);
8996 seg_info (lbss_section)->bss = 1;
8997
8998 subseg_set (seg, subseg);
8999 }
9000
9001 elf_com_section_ptr = &_bfd_elf_large_com_section;
9002 bss_section = lbss_section;
9003
9004 s_comm_internal (0, elf_common_parse);
9005
9006 elf_com_section_ptr = saved_com_section_ptr;
9007 bss_section = saved_bss_section;
9008 }
9009 }
9010 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */
This page took 0.233644 seconds and 4 git commands to generate.