Add .d32 encoding suffix.
[deliverable/binutils-gdb.git] / gas / config / tc-i386.c
1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5
6 This file is part of GAS, the GNU Assembler.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GAS; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
21 02110-1301, USA. */
22
23 /* Intel 80386 machine specific gas.
24 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
25 x86_64 support by Jan Hubicka (jh@suse.cz)
26 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
27 Bugs & suggestions are completely welcome. This is free software.
28 Please help us make it better. */
29
30 #include "as.h"
31 #include "safe-ctype.h"
32 #include "subsegs.h"
33 #include "dwarf2dbg.h"
34 #include "dw2gencfi.h"
35 #include "elf/x86-64.h"
36 #include "opcodes/i386-init.h"
37
38 #ifndef REGISTER_WARNINGS
39 #define REGISTER_WARNINGS 1
40 #endif
41
42 #ifndef INFER_ADDR_PREFIX
43 #define INFER_ADDR_PREFIX 1
44 #endif
45
46 #ifndef DEFAULT_ARCH
47 #define DEFAULT_ARCH "i386"
48 #endif
49
50 #ifndef INLINE
51 #if __GNUC__ >= 2
52 #define INLINE __inline__
53 #else
54 #define INLINE
55 #endif
56 #endif
57
58 /* Prefixes will be emitted in the order defined below.
59 WAIT_PREFIX must be the first prefix since FWAIT is really is an
60 instruction, and so must come before any prefixes.
61 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
62 REP_PREFIX, LOCK_PREFIX. */
63 #define WAIT_PREFIX 0
64 #define SEG_PREFIX 1
65 #define ADDR_PREFIX 2
66 #define DATA_PREFIX 3
67 #define REP_PREFIX 4
68 #define LOCK_PREFIX 5
69 #define REX_PREFIX 6 /* must come last. */
70 #define MAX_PREFIXES 7 /* max prefixes per opcode */
71
72 /* we define the syntax here (modulo base,index,scale syntax) */
73 #define REGISTER_PREFIX '%'
74 #define IMMEDIATE_PREFIX '$'
75 #define ABSOLUTE_PREFIX '*'
76
77 /* these are the instruction mnemonic suffixes in AT&T syntax or
78 memory operand size in Intel syntax. */
79 #define WORD_MNEM_SUFFIX 'w'
80 #define BYTE_MNEM_SUFFIX 'b'
81 #define SHORT_MNEM_SUFFIX 's'
82 #define LONG_MNEM_SUFFIX 'l'
83 #define QWORD_MNEM_SUFFIX 'q'
84 #define XMMWORD_MNEM_SUFFIX 'x'
85 #define YMMWORD_MNEM_SUFFIX 'y'
86 /* Intel Syntax. Use a non-ascii letter since since it never appears
87 in instructions. */
88 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
89
90 #define END_OF_INSN '\0'
91
92 /*
93 'templates' is for grouping together 'template' structures for opcodes
94 of the same name. This is only used for storing the insns in the grand
95 ole hash table of insns.
96 The templates themselves start at START and range up to (but not including)
97 END.
98 */
99 typedef struct
100 {
101 const insn_template *start;
102 const insn_template *end;
103 }
104 templates;
105
106 /* 386 operand encoding bytes: see 386 book for details of this. */
107 typedef struct
108 {
109 unsigned int regmem; /* codes register or memory operand */
110 unsigned int reg; /* codes register operand (or extended opcode) */
111 unsigned int mode; /* how to interpret regmem & reg */
112 }
113 modrm_byte;
114
115 /* x86-64 extension prefix. */
116 typedef int rex_byte;
117
118 /* 386 opcode byte to code indirect addressing. */
119 typedef struct
120 {
121 unsigned base;
122 unsigned index;
123 unsigned scale;
124 }
125 sib_byte;
126
127 /* x86 arch names, types and features */
128 typedef struct
129 {
130 const char *name; /* arch name */
131 unsigned int len; /* arch string length */
132 enum processor_type type; /* arch type */
133 i386_cpu_flags flags; /* cpu feature flags */
134 unsigned int skip; /* show_arch should skip this. */
135 unsigned int negated; /* turn off indicated flags. */
136 }
137 arch_entry;
138
139 static void update_code_flag (int, int);
140 static void set_code_flag (int);
141 static void set_16bit_gcc_code_flag (int);
142 static void set_intel_syntax (int);
143 static void set_intel_mnemonic (int);
144 static void set_allow_index_reg (int);
145 static void set_sse_check (int);
146 static void set_cpu_arch (int);
147 #ifdef TE_PE
148 static void pe_directive_secrel (int);
149 #endif
150 static void signed_cons (int);
151 static char *output_invalid (int c);
152 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
153 const char *);
154 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
155 const char *);
156 static int i386_att_operand (char *);
157 static int i386_intel_operand (char *, int);
158 static int i386_intel_simplify (expressionS *);
159 static int i386_intel_parse_name (const char *, expressionS *);
160 static const reg_entry *parse_register (char *, char **);
161 static char *parse_insn (char *, char *);
162 static char *parse_operands (char *, const char *);
163 static void swap_operands (void);
164 static void swap_2_operands (int, int);
165 static void optimize_imm (void);
166 static void optimize_disp (void);
167 static const insn_template *match_template (void);
168 static int check_string (void);
169 static int process_suffix (void);
170 static int check_byte_reg (void);
171 static int check_long_reg (void);
172 static int check_qword_reg (void);
173 static int check_word_reg (void);
174 static int finalize_imm (void);
175 static int process_operands (void);
176 static const seg_entry *build_modrm_byte (void);
177 static void output_insn (void);
178 static void output_imm (fragS *, offsetT);
179 static void output_disp (fragS *, offsetT);
180 #ifndef I386COFF
181 static void s_bss (int);
182 #endif
183 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
184 static void handle_large_common (int small ATTRIBUTE_UNUSED);
185 #endif
186
187 static const char *default_arch = DEFAULT_ARCH;
188
189 /* VEX prefix. */
190 typedef struct
191 {
192 /* VEX prefix is either 2 byte or 3 byte. */
193 unsigned char bytes[3];
194 unsigned int length;
195 /* Destination or source register specifier. */
196 const reg_entry *register_specifier;
197 } vex_prefix;
198
199 /* 'md_assemble ()' gathers together information and puts it into a
200 i386_insn. */
201
202 union i386_op
203 {
204 expressionS *disps;
205 expressionS *imms;
206 const reg_entry *regs;
207 };
208
209 enum i386_error
210 {
211 operand_size_mismatch,
212 operand_type_mismatch,
213 register_type_mismatch,
214 number_of_operands_mismatch,
215 invalid_instruction_suffix,
216 bad_imm4,
217 old_gcc_only,
218 unsupported_with_intel_mnemonic,
219 unsupported_syntax,
220 unsupported
221 };
222
223 struct _i386_insn
224 {
225 /* TM holds the template for the insn were currently assembling. */
226 insn_template tm;
227
228 /* SUFFIX holds the instruction size suffix for byte, word, dword
229 or qword, if given. */
230 char suffix;
231
232 /* OPERANDS gives the number of given operands. */
233 unsigned int operands;
234
235 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
236 of given register, displacement, memory operands and immediate
237 operands. */
238 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
239
240 /* TYPES [i] is the type (see above #defines) which tells us how to
241 use OP[i] for the corresponding operand. */
242 i386_operand_type types[MAX_OPERANDS];
243
244 /* Displacement expression, immediate expression, or register for each
245 operand. */
246 union i386_op op[MAX_OPERANDS];
247
248 /* Flags for operands. */
249 unsigned int flags[MAX_OPERANDS];
250 #define Operand_PCrel 1
251
252 /* Relocation type for operand */
253 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
254
255 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
256 the base index byte below. */
257 const reg_entry *base_reg;
258 const reg_entry *index_reg;
259 unsigned int log2_scale_factor;
260
261 /* SEG gives the seg_entries of this insn. They are zero unless
262 explicit segment overrides are given. */
263 const seg_entry *seg[2];
264
265 /* PREFIX holds all the given prefix opcodes (usually null).
266 PREFIXES is the number of prefix opcodes. */
267 unsigned int prefixes;
268 unsigned char prefix[MAX_PREFIXES];
269
270 /* RM and SIB are the modrm byte and the sib byte where the
271 addressing modes of this insn are encoded. */
272 modrm_byte rm;
273 rex_byte rex;
274 sib_byte sib;
275 vex_prefix vex;
276
277 /* Swap operand in encoding. */
278 unsigned int swap_operand;
279
280 /* Force 32bit displacement in encoding. */
281 unsigned int disp32_encoding;
282
283 /* Error message. */
284 enum i386_error error;
285 };
286
287 typedef struct _i386_insn i386_insn;
288
289 /* List of chars besides those in app.c:symbol_chars that can start an
290 operand. Used to prevent the scrubber eating vital white-space. */
291 const char extra_symbol_chars[] = "*%-(["
292 #ifdef LEX_AT
293 "@"
294 #endif
295 #ifdef LEX_QM
296 "?"
297 #endif
298 ;
299
300 #if (defined (TE_I386AIX) \
301 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
302 && !defined (TE_GNU) \
303 && !defined (TE_LINUX) \
304 && !defined (TE_NETWARE) \
305 && !defined (TE_FreeBSD) \
306 && !defined (TE_NetBSD)))
307 /* This array holds the chars that always start a comment. If the
308 pre-processor is disabled, these aren't very useful. The option
309 --divide will remove '/' from this list. */
310 const char *i386_comment_chars = "#/";
311 #define SVR4_COMMENT_CHARS 1
312 #define PREFIX_SEPARATOR '\\'
313
314 #else
315 const char *i386_comment_chars = "#";
316 #define PREFIX_SEPARATOR '/'
317 #endif
318
319 /* This array holds the chars that only start a comment at the beginning of
320 a line. If the line seems to have the form '# 123 filename'
321 .line and .file directives will appear in the pre-processed output.
322 Note that input_file.c hand checks for '#' at the beginning of the
323 first line of the input file. This is because the compiler outputs
324 #NO_APP at the beginning of its output.
325 Also note that comments started like this one will always work if
326 '/' isn't otherwise defined. */
327 const char line_comment_chars[] = "#/";
328
329 const char line_separator_chars[] = ";";
330
331 /* Chars that can be used to separate mant from exp in floating point
332 nums. */
333 const char EXP_CHARS[] = "eE";
334
335 /* Chars that mean this number is a floating point constant
336 As in 0f12.456
337 or 0d1.2345e12. */
338 const char FLT_CHARS[] = "fFdDxX";
339
340 /* Tables for lexical analysis. */
341 static char mnemonic_chars[256];
342 static char register_chars[256];
343 static char operand_chars[256];
344 static char identifier_chars[256];
345 static char digit_chars[256];
346
347 /* Lexical macros. */
348 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
349 #define is_operand_char(x) (operand_chars[(unsigned char) x])
350 #define is_register_char(x) (register_chars[(unsigned char) x])
351 #define is_space_char(x) ((x) == ' ')
352 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
353 #define is_digit_char(x) (digit_chars[(unsigned char) x])
354
355 /* All non-digit non-letter characters that may occur in an operand. */
356 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
357
358 /* md_assemble() always leaves the strings it's passed unaltered. To
359 effect this we maintain a stack of saved characters that we've smashed
360 with '\0's (indicating end of strings for various sub-fields of the
361 assembler instruction). */
362 static char save_stack[32];
363 static char *save_stack_p;
364 #define END_STRING_AND_SAVE(s) \
365 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
366 #define RESTORE_END_STRING(s) \
367 do { *(s) = *--save_stack_p; } while (0)
368
369 /* The instruction we're assembling. */
370 static i386_insn i;
371
372 /* Possible templates for current insn. */
373 static const templates *current_templates;
374
375 /* Per instruction expressionS buffers: max displacements & immediates. */
376 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
377 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
378
379 /* Current operand we are working on. */
380 static int this_operand = -1;
381
382 /* We support four different modes. FLAG_CODE variable is used to distinguish
383 these. */
384
385 enum flag_code {
386 CODE_32BIT,
387 CODE_16BIT,
388 CODE_64BIT };
389
390 static enum flag_code flag_code;
391 static unsigned int object_64bit;
392 static int use_rela_relocations = 0;
393
394 /* The names used to print error messages. */
395 static const char *flag_code_names[] =
396 {
397 "32",
398 "16",
399 "64"
400 };
401
402 /* 1 for intel syntax,
403 0 if att syntax. */
404 static int intel_syntax = 0;
405
406 /* 1 for intel mnemonic,
407 0 if att mnemonic. */
408 static int intel_mnemonic = !SYSV386_COMPAT;
409
410 /* 1 if support old (<= 2.8.1) versions of gcc. */
411 static int old_gcc = OLDGCC_COMPAT;
412
413 /* 1 if pseudo registers are permitted. */
414 static int allow_pseudo_reg = 0;
415
416 /* 1 if register prefix % not required. */
417 static int allow_naked_reg = 0;
418
419 /* 1 if pseudo index register, eiz/riz, is allowed . */
420 static int allow_index_reg = 0;
421
422 static enum
423 {
424 sse_check_none = 0,
425 sse_check_warning,
426 sse_check_error
427 }
428 sse_check;
429
430 /* Register prefix used for error message. */
431 static const char *register_prefix = "%";
432
433 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
434 leave, push, and pop instructions so that gcc has the same stack
435 frame as in 32 bit mode. */
436 static char stackop_size = '\0';
437
438 /* Non-zero to optimize code alignment. */
439 int optimize_align_code = 1;
440
441 /* Non-zero to quieten some warnings. */
442 static int quiet_warnings = 0;
443
444 /* CPU name. */
445 static const char *cpu_arch_name = NULL;
446 static char *cpu_sub_arch_name = NULL;
447
448 /* CPU feature flags. */
449 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
450
451 /* If we have selected a cpu we are generating instructions for. */
452 static int cpu_arch_tune_set = 0;
453
454 /* Cpu we are generating instructions for. */
455 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
456
457 /* CPU feature flags of cpu we are generating instructions for. */
458 static i386_cpu_flags cpu_arch_tune_flags;
459
460 /* CPU instruction set architecture used. */
461 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
462
463 /* CPU feature flags of instruction set architecture used. */
464 i386_cpu_flags cpu_arch_isa_flags;
465
466 /* If set, conditional jumps are not automatically promoted to handle
467 larger than a byte offset. */
468 static unsigned int no_cond_jump_promotion = 0;
469
470 /* Encode SSE instructions with VEX prefix. */
471 static unsigned int sse2avx;
472
473 /* Encode scalar AVX instructions with specific vector length. */
474 static enum
475 {
476 vex128 = 0,
477 vex256
478 } avxscalar;
479
480 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
481 static symbolS *GOT_symbol;
482
483 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
484 unsigned int x86_dwarf2_return_column;
485
486 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
487 int x86_cie_data_alignment;
488
489 /* Interface to relax_segment.
490 There are 3 major relax states for 386 jump insns because the
491 different types of jumps add different sizes to frags when we're
492 figuring out what sort of jump to choose to reach a given label. */
493
494 /* Types. */
495 #define UNCOND_JUMP 0
496 #define COND_JUMP 1
497 #define COND_JUMP86 2
498
499 /* Sizes. */
500 #define CODE16 1
501 #define SMALL 0
502 #define SMALL16 (SMALL | CODE16)
503 #define BIG 2
504 #define BIG16 (BIG | CODE16)
505
506 #ifndef INLINE
507 #ifdef __GNUC__
508 #define INLINE __inline__
509 #else
510 #define INLINE
511 #endif
512 #endif
513
514 #define ENCODE_RELAX_STATE(type, size) \
515 ((relax_substateT) (((type) << 2) | (size)))
516 #define TYPE_FROM_RELAX_STATE(s) \
517 ((s) >> 2)
518 #define DISP_SIZE_FROM_RELAX_STATE(s) \
519 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
520
521 /* This table is used by relax_frag to promote short jumps to long
522 ones where necessary. SMALL (short) jumps may be promoted to BIG
523 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
524 don't allow a short jump in a 32 bit code segment to be promoted to
525 a 16 bit offset jump because it's slower (requires data size
526 prefix), and doesn't work, unless the destination is in the bottom
527 64k of the code segment (The top 16 bits of eip are zeroed). */
528
529 const relax_typeS md_relax_table[] =
530 {
531 /* The fields are:
532 1) most positive reach of this state,
533 2) most negative reach of this state,
534 3) how many bytes this mode will have in the variable part of the frag
535 4) which index into the table to try if we can't fit into this one. */
536
537 /* UNCOND_JUMP states. */
538 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
539 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
540 /* dword jmp adds 4 bytes to frag:
541 0 extra opcode bytes, 4 displacement bytes. */
542 {0, 0, 4, 0},
543 /* word jmp adds 2 byte2 to frag:
544 0 extra opcode bytes, 2 displacement bytes. */
545 {0, 0, 2, 0},
546
547 /* COND_JUMP states. */
548 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
549 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
550 /* dword conditionals adds 5 bytes to frag:
551 1 extra opcode byte, 4 displacement bytes. */
552 {0, 0, 5, 0},
553 /* word conditionals add 3 bytes to frag:
554 1 extra opcode byte, 2 displacement bytes. */
555 {0, 0, 3, 0},
556
557 /* COND_JUMP86 states. */
558 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
559 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
560 /* dword conditionals adds 5 bytes to frag:
561 1 extra opcode byte, 4 displacement bytes. */
562 {0, 0, 5, 0},
563 /* word conditionals add 4 bytes to frag:
564 1 displacement byte and a 3 byte long branch insn. */
565 {0, 0, 4, 0}
566 };
567
568 static const arch_entry cpu_arch[] =
569 {
570 /* Do not replace the first two entries - i386_target_format()
571 relies on them being there in this order. */
572 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
573 CPU_GENERIC32_FLAGS, 0, 0 },
574 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
575 CPU_GENERIC64_FLAGS, 0, 0 },
576 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
577 CPU_NONE_FLAGS, 0, 0 },
578 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
579 CPU_I186_FLAGS, 0, 0 },
580 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
581 CPU_I286_FLAGS, 0, 0 },
582 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
583 CPU_I386_FLAGS, 0, 0 },
584 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
585 CPU_I486_FLAGS, 0, 0 },
586 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
587 CPU_I586_FLAGS, 0, 0 },
588 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
589 CPU_I686_FLAGS, 0, 0 },
590 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
591 CPU_I586_FLAGS, 0, 0 },
592 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
593 CPU_PENTIUMPRO_FLAGS, 0, 0 },
594 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
595 CPU_P2_FLAGS, 0, 0 },
596 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
597 CPU_P3_FLAGS, 0, 0 },
598 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
599 CPU_P4_FLAGS, 0, 0 },
600 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
601 CPU_CORE_FLAGS, 0, 0 },
602 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
603 CPU_NOCONA_FLAGS, 0, 0 },
604 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
605 CPU_CORE_FLAGS, 1, 0 },
606 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
607 CPU_CORE_FLAGS, 0, 0 },
608 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
609 CPU_CORE2_FLAGS, 1, 0 },
610 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
611 CPU_CORE2_FLAGS, 0, 0 },
612 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
613 CPU_COREI7_FLAGS, 0, 0 },
614 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
615 CPU_L1OM_FLAGS, 0, 0 },
616 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
617 CPU_K6_FLAGS, 0, 0 },
618 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
619 CPU_K6_2_FLAGS, 0, 0 },
620 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
621 CPU_ATHLON_FLAGS, 0, 0 },
622 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
623 CPU_K8_FLAGS, 1, 0 },
624 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
625 CPU_K8_FLAGS, 0, 0 },
626 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
627 CPU_K8_FLAGS, 0, 0 },
628 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
629 CPU_AMDFAM10_FLAGS, 0, 0 },
630 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BDVER1,
631 CPU_BDVER1_FLAGS, 0, 0 },
632 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
633 CPU_8087_FLAGS, 0, 0 },
634 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
635 CPU_287_FLAGS, 0, 0 },
636 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
637 CPU_387_FLAGS, 0, 0 },
638 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN,
639 CPU_ANY87_FLAGS, 0, 1 },
640 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
641 CPU_MMX_FLAGS, 0, 0 },
642 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN,
643 CPU_3DNOWA_FLAGS, 0, 1 },
644 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
645 CPU_SSE_FLAGS, 0, 0 },
646 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
647 CPU_SSE2_FLAGS, 0, 0 },
648 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
649 CPU_SSE3_FLAGS, 0, 0 },
650 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
651 CPU_SSSE3_FLAGS, 0, 0 },
652 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
653 CPU_SSE4_1_FLAGS, 0, 0 },
654 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
655 CPU_SSE4_2_FLAGS, 0, 0 },
656 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
657 CPU_SSE4_2_FLAGS, 0, 0 },
658 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN,
659 CPU_ANY_SSE_FLAGS, 0, 1 },
660 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
661 CPU_AVX_FLAGS, 0, 0 },
662 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
663 CPU_ANY_AVX_FLAGS, 0, 1 },
664 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
665 CPU_VMX_FLAGS, 0, 0 },
666 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
667 CPU_SMX_FLAGS, 0, 0 },
668 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
669 CPU_XSAVE_FLAGS, 0, 0 },
670 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
671 CPU_XSAVEOPT_FLAGS, 0, 0 },
672 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
673 CPU_AES_FLAGS, 0, 0 },
674 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
675 CPU_PCLMUL_FLAGS, 0, 0 },
676 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
677 CPU_PCLMUL_FLAGS, 1, 0 },
678 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
679 CPU_FSGSBASE_FLAGS, 0, 0 },
680 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
681 CPU_RDRND_FLAGS, 0, 0 },
682 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
683 CPU_F16C_FLAGS, 0, 0 },
684 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
685 CPU_FMA_FLAGS, 0, 0 },
686 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
687 CPU_FMA4_FLAGS, 0, 0 },
688 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
689 CPU_XOP_FLAGS, 0, 0 },
690 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
691 CPU_LWP_FLAGS, 0, 0 },
692 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
693 CPU_MOVBE_FLAGS, 0, 0 },
694 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
695 CPU_EPT_FLAGS, 0, 0 },
696 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
697 CPU_CLFLUSH_FLAGS, 0, 0 },
698 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
699 CPU_NOP_FLAGS, 0, 0 },
700 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
701 CPU_SYSCALL_FLAGS, 0, 0 },
702 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
703 CPU_RDTSCP_FLAGS, 0, 0 },
704 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
705 CPU_3DNOW_FLAGS, 0, 0 },
706 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
707 CPU_3DNOWA_FLAGS, 0, 0 },
708 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
709 CPU_PADLOCK_FLAGS, 0, 0 },
710 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
711 CPU_SVME_FLAGS, 1, 0 },
712 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
713 CPU_SVME_FLAGS, 0, 0 },
714 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
715 CPU_SSE4A_FLAGS, 0, 0 },
716 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
717 CPU_ABM_FLAGS, 0, 0 },
718 };
719
720 #ifdef I386COFF
721 /* Like s_lcomm_internal in gas/read.c but the alignment string
722 is allowed to be optional. */
723
724 static symbolS *
725 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
726 {
727 addressT align = 0;
728
729 SKIP_WHITESPACE ();
730
731 if (needs_align
732 && *input_line_pointer == ',')
733 {
734 align = parse_align (needs_align - 1);
735
736 if (align == (addressT) -1)
737 return NULL;
738 }
739 else
740 {
741 if (size >= 8)
742 align = 3;
743 else if (size >= 4)
744 align = 2;
745 else if (size >= 2)
746 align = 1;
747 else
748 align = 0;
749 }
750
751 bss_alloc (symbolP, size, align);
752 return symbolP;
753 }
754
755 static void
756 pe_lcomm (int needs_align)
757 {
758 s_comm_internal (needs_align * 2, pe_lcomm_internal);
759 }
760 #endif
761
762 const pseudo_typeS md_pseudo_table[] =
763 {
764 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
765 {"align", s_align_bytes, 0},
766 #else
767 {"align", s_align_ptwo, 0},
768 #endif
769 {"arch", set_cpu_arch, 0},
770 #ifndef I386COFF
771 {"bss", s_bss, 0},
772 #else
773 {"lcomm", pe_lcomm, 1},
774 #endif
775 {"ffloat", float_cons, 'f'},
776 {"dfloat", float_cons, 'd'},
777 {"tfloat", float_cons, 'x'},
778 {"value", cons, 2},
779 {"slong", signed_cons, 4},
780 {"noopt", s_ignore, 0},
781 {"optim", s_ignore, 0},
782 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
783 {"code16", set_code_flag, CODE_16BIT},
784 {"code32", set_code_flag, CODE_32BIT},
785 {"code64", set_code_flag, CODE_64BIT},
786 {"intel_syntax", set_intel_syntax, 1},
787 {"att_syntax", set_intel_syntax, 0},
788 {"intel_mnemonic", set_intel_mnemonic, 1},
789 {"att_mnemonic", set_intel_mnemonic, 0},
790 {"allow_index_reg", set_allow_index_reg, 1},
791 {"disallow_index_reg", set_allow_index_reg, 0},
792 {"sse_check", set_sse_check, 0},
793 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
794 {"largecomm", handle_large_common, 0},
795 #else
796 {"file", (void (*) (int)) dwarf2_directive_file, 0},
797 {"loc", dwarf2_directive_loc, 0},
798 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
799 #endif
800 #ifdef TE_PE
801 {"secrel32", pe_directive_secrel, 0},
802 #endif
803 {0, 0, 0}
804 };
805
806 /* For interface with expression (). */
807 extern char *input_line_pointer;
808
809 /* Hash table for instruction mnemonic lookup. */
810 static struct hash_control *op_hash;
811
812 /* Hash table for register lookup. */
813 static struct hash_control *reg_hash;
814 \f
815 void
816 i386_align_code (fragS *fragP, int count)
817 {
818 /* Various efficient no-op patterns for aligning code labels.
819 Note: Don't try to assemble the instructions in the comments.
820 0L and 0w are not legal. */
821 static const char f32_1[] =
822 {0x90}; /* nop */
823 static const char f32_2[] =
824 {0x66,0x90}; /* xchg %ax,%ax */
825 static const char f32_3[] =
826 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
827 static const char f32_4[] =
828 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
829 static const char f32_5[] =
830 {0x90, /* nop */
831 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
832 static const char f32_6[] =
833 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
834 static const char f32_7[] =
835 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
836 static const char f32_8[] =
837 {0x90, /* nop */
838 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
839 static const char f32_9[] =
840 {0x89,0xf6, /* movl %esi,%esi */
841 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
842 static const char f32_10[] =
843 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
844 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
845 static const char f32_11[] =
846 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
847 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
848 static const char f32_12[] =
849 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
850 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
851 static const char f32_13[] =
852 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
853 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
854 static const char f32_14[] =
855 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
856 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
857 static const char f16_3[] =
858 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
859 static const char f16_4[] =
860 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
861 static const char f16_5[] =
862 {0x90, /* nop */
863 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
864 static const char f16_6[] =
865 {0x89,0xf6, /* mov %si,%si */
866 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
867 static const char f16_7[] =
868 {0x8d,0x74,0x00, /* lea 0(%si),%si */
869 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
870 static const char f16_8[] =
871 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
872 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
873 static const char jump_31[] =
874 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
875 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
876 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
877 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
878 static const char *const f32_patt[] = {
879 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
880 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
881 };
882 static const char *const f16_patt[] = {
883 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
884 };
885 /* nopl (%[re]ax) */
886 static const char alt_3[] =
887 {0x0f,0x1f,0x00};
888 /* nopl 0(%[re]ax) */
889 static const char alt_4[] =
890 {0x0f,0x1f,0x40,0x00};
891 /* nopl 0(%[re]ax,%[re]ax,1) */
892 static const char alt_5[] =
893 {0x0f,0x1f,0x44,0x00,0x00};
894 /* nopw 0(%[re]ax,%[re]ax,1) */
895 static const char alt_6[] =
896 {0x66,0x0f,0x1f,0x44,0x00,0x00};
897 /* nopl 0L(%[re]ax) */
898 static const char alt_7[] =
899 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
900 /* nopl 0L(%[re]ax,%[re]ax,1) */
901 static const char alt_8[] =
902 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
903 /* nopw 0L(%[re]ax,%[re]ax,1) */
904 static const char alt_9[] =
905 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
906 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
907 static const char alt_10[] =
908 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
909 /* data16
910 nopw %cs:0L(%[re]ax,%[re]ax,1) */
911 static const char alt_long_11[] =
912 {0x66,
913 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
914 /* data16
915 data16
916 nopw %cs:0L(%[re]ax,%[re]ax,1) */
917 static const char alt_long_12[] =
918 {0x66,
919 0x66,
920 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
921 /* data16
922 data16
923 data16
924 nopw %cs:0L(%[re]ax,%[re]ax,1) */
925 static const char alt_long_13[] =
926 {0x66,
927 0x66,
928 0x66,
929 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
930 /* data16
931 data16
932 data16
933 data16
934 nopw %cs:0L(%[re]ax,%[re]ax,1) */
935 static const char alt_long_14[] =
936 {0x66,
937 0x66,
938 0x66,
939 0x66,
940 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
941 /* data16
942 data16
943 data16
944 data16
945 data16
946 nopw %cs:0L(%[re]ax,%[re]ax,1) */
947 static const char alt_long_15[] =
948 {0x66,
949 0x66,
950 0x66,
951 0x66,
952 0x66,
953 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
954 /* nopl 0(%[re]ax,%[re]ax,1)
955 nopw 0(%[re]ax,%[re]ax,1) */
956 static const char alt_short_11[] =
957 {0x0f,0x1f,0x44,0x00,0x00,
958 0x66,0x0f,0x1f,0x44,0x00,0x00};
959 /* nopw 0(%[re]ax,%[re]ax,1)
960 nopw 0(%[re]ax,%[re]ax,1) */
961 static const char alt_short_12[] =
962 {0x66,0x0f,0x1f,0x44,0x00,0x00,
963 0x66,0x0f,0x1f,0x44,0x00,0x00};
964 /* nopw 0(%[re]ax,%[re]ax,1)
965 nopl 0L(%[re]ax) */
966 static const char alt_short_13[] =
967 {0x66,0x0f,0x1f,0x44,0x00,0x00,
968 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
969 /* nopl 0L(%[re]ax)
970 nopl 0L(%[re]ax) */
971 static const char alt_short_14[] =
972 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
973 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
974 /* nopl 0L(%[re]ax)
975 nopl 0L(%[re]ax,%[re]ax,1) */
976 static const char alt_short_15[] =
977 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
978 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
979 static const char *const alt_short_patt[] = {
980 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
981 alt_9, alt_10, alt_short_11, alt_short_12, alt_short_13,
982 alt_short_14, alt_short_15
983 };
984 static const char *const alt_long_patt[] = {
985 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
986 alt_9, alt_10, alt_long_11, alt_long_12, alt_long_13,
987 alt_long_14, alt_long_15
988 };
989
990 /* Only align for at least a positive non-zero boundary. */
991 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
992 return;
993
994 /* We need to decide which NOP sequence to use for 32bit and
995 64bit. When -mtune= is used:
996
997 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
998 PROCESSOR_GENERIC32, f32_patt will be used.
999 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA,
1000 PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
1001 PROCESSOR_GENERIC64, alt_long_patt will be used.
1002 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
1003 PROCESSOR_AMDFAM10, and PROCESSOR_BDVER1, alt_short_patt
1004 will be used.
1005
1006 When -mtune= isn't used, alt_long_patt will be used if
1007 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1008 be used.
1009
1010 When -march= or .arch is used, we can't use anything beyond
1011 cpu_arch_isa_flags. */
1012
1013 if (flag_code == CODE_16BIT)
1014 {
1015 if (count > 8)
1016 {
1017 memcpy (fragP->fr_literal + fragP->fr_fix,
1018 jump_31, count);
1019 /* Adjust jump offset. */
1020 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1021 }
1022 else
1023 memcpy (fragP->fr_literal + fragP->fr_fix,
1024 f16_patt[count - 1], count);
1025 }
1026 else
1027 {
1028 const char *const *patt = NULL;
1029
1030 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1031 {
1032 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1033 switch (cpu_arch_tune)
1034 {
1035 case PROCESSOR_UNKNOWN:
1036 /* We use cpu_arch_isa_flags to check if we SHOULD
1037 optimize with nops. */
1038 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1039 patt = alt_long_patt;
1040 else
1041 patt = f32_patt;
1042 break;
1043 case PROCESSOR_PENTIUMPRO:
1044 case PROCESSOR_PENTIUM4:
1045 case PROCESSOR_NOCONA:
1046 case PROCESSOR_CORE:
1047 case PROCESSOR_CORE2:
1048 case PROCESSOR_COREI7:
1049 case PROCESSOR_L1OM:
1050 case PROCESSOR_GENERIC64:
1051 patt = alt_long_patt;
1052 break;
1053 case PROCESSOR_K6:
1054 case PROCESSOR_ATHLON:
1055 case PROCESSOR_K8:
1056 case PROCESSOR_AMDFAM10:
1057 case PROCESSOR_BDVER1:
1058 patt = alt_short_patt;
1059 break;
1060 case PROCESSOR_I386:
1061 case PROCESSOR_I486:
1062 case PROCESSOR_PENTIUM:
1063 case PROCESSOR_GENERIC32:
1064 patt = f32_patt;
1065 break;
1066 }
1067 }
1068 else
1069 {
1070 switch (fragP->tc_frag_data.tune)
1071 {
1072 case PROCESSOR_UNKNOWN:
1073 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1074 PROCESSOR_UNKNOWN. */
1075 abort ();
1076 break;
1077
1078 case PROCESSOR_I386:
1079 case PROCESSOR_I486:
1080 case PROCESSOR_PENTIUM:
1081 case PROCESSOR_K6:
1082 case PROCESSOR_ATHLON:
1083 case PROCESSOR_K8:
1084 case PROCESSOR_AMDFAM10:
1085 case PROCESSOR_BDVER1:
1086 case PROCESSOR_GENERIC32:
1087 /* We use cpu_arch_isa_flags to check if we CAN optimize
1088 with nops. */
1089 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1090 patt = alt_short_patt;
1091 else
1092 patt = f32_patt;
1093 break;
1094 case PROCESSOR_PENTIUMPRO:
1095 case PROCESSOR_PENTIUM4:
1096 case PROCESSOR_NOCONA:
1097 case PROCESSOR_CORE:
1098 case PROCESSOR_CORE2:
1099 case PROCESSOR_COREI7:
1100 case PROCESSOR_L1OM:
1101 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1102 patt = alt_long_patt;
1103 else
1104 patt = f32_patt;
1105 break;
1106 case PROCESSOR_GENERIC64:
1107 patt = alt_long_patt;
1108 break;
1109 }
1110 }
1111
1112 if (patt == f32_patt)
1113 {
1114 /* If the padding is less than 15 bytes, we use the normal
1115 ones. Otherwise, we use a jump instruction and adjust
1116 its offset. */
1117 int limit;
1118
1119 /* For 64bit, the limit is 3 bytes. */
1120 if (flag_code == CODE_64BIT
1121 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1122 limit = 3;
1123 else
1124 limit = 15;
1125 if (count < limit)
1126 memcpy (fragP->fr_literal + fragP->fr_fix,
1127 patt[count - 1], count);
1128 else
1129 {
1130 memcpy (fragP->fr_literal + fragP->fr_fix,
1131 jump_31, count);
1132 /* Adjust jump offset. */
1133 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1134 }
1135 }
1136 else
1137 {
1138 /* Maximum length of an instruction is 15 byte. If the
1139 padding is greater than 15 bytes and we don't use jump,
1140 we have to break it into smaller pieces. */
1141 int padding = count;
1142 while (padding > 15)
1143 {
1144 padding -= 15;
1145 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1146 patt [14], 15);
1147 }
1148
1149 if (padding)
1150 memcpy (fragP->fr_literal + fragP->fr_fix,
1151 patt [padding - 1], padding);
1152 }
1153 }
1154 fragP->fr_var = count;
1155 }
1156
1157 static INLINE int
1158 operand_type_all_zero (const union i386_operand_type *x)
1159 {
1160 switch (ARRAY_SIZE(x->array))
1161 {
1162 case 3:
1163 if (x->array[2])
1164 return 0;
1165 case 2:
1166 if (x->array[1])
1167 return 0;
1168 case 1:
1169 return !x->array[0];
1170 default:
1171 abort ();
1172 }
1173 }
1174
1175 static INLINE void
1176 operand_type_set (union i386_operand_type *x, unsigned int v)
1177 {
1178 switch (ARRAY_SIZE(x->array))
1179 {
1180 case 3:
1181 x->array[2] = v;
1182 case 2:
1183 x->array[1] = v;
1184 case 1:
1185 x->array[0] = v;
1186 break;
1187 default:
1188 abort ();
1189 }
1190 }
1191
1192 static INLINE int
1193 operand_type_equal (const union i386_operand_type *x,
1194 const union i386_operand_type *y)
1195 {
1196 switch (ARRAY_SIZE(x->array))
1197 {
1198 case 3:
1199 if (x->array[2] != y->array[2])
1200 return 0;
1201 case 2:
1202 if (x->array[1] != y->array[1])
1203 return 0;
1204 case 1:
1205 return x->array[0] == y->array[0];
1206 break;
1207 default:
1208 abort ();
1209 }
1210 }
1211
1212 static INLINE int
1213 cpu_flags_all_zero (const union i386_cpu_flags *x)
1214 {
1215 switch (ARRAY_SIZE(x->array))
1216 {
1217 case 3:
1218 if (x->array[2])
1219 return 0;
1220 case 2:
1221 if (x->array[1])
1222 return 0;
1223 case 1:
1224 return !x->array[0];
1225 default:
1226 abort ();
1227 }
1228 }
1229
1230 static INLINE void
1231 cpu_flags_set (union i386_cpu_flags *x, unsigned int v)
1232 {
1233 switch (ARRAY_SIZE(x->array))
1234 {
1235 case 3:
1236 x->array[2] = v;
1237 case 2:
1238 x->array[1] = v;
1239 case 1:
1240 x->array[0] = v;
1241 break;
1242 default:
1243 abort ();
1244 }
1245 }
1246
1247 static INLINE int
1248 cpu_flags_equal (const union i386_cpu_flags *x,
1249 const union i386_cpu_flags *y)
1250 {
1251 switch (ARRAY_SIZE(x->array))
1252 {
1253 case 3:
1254 if (x->array[2] != y->array[2])
1255 return 0;
1256 case 2:
1257 if (x->array[1] != y->array[1])
1258 return 0;
1259 case 1:
1260 return x->array[0] == y->array[0];
1261 break;
1262 default:
1263 abort ();
1264 }
1265 }
1266
1267 static INLINE int
1268 cpu_flags_check_cpu64 (i386_cpu_flags f)
1269 {
1270 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1271 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1272 }
1273
1274 static INLINE i386_cpu_flags
1275 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1276 {
1277 switch (ARRAY_SIZE (x.array))
1278 {
1279 case 3:
1280 x.array [2] &= y.array [2];
1281 case 2:
1282 x.array [1] &= y.array [1];
1283 case 1:
1284 x.array [0] &= y.array [0];
1285 break;
1286 default:
1287 abort ();
1288 }
1289 return x;
1290 }
1291
1292 static INLINE i386_cpu_flags
1293 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1294 {
1295 switch (ARRAY_SIZE (x.array))
1296 {
1297 case 3:
1298 x.array [2] |= y.array [2];
1299 case 2:
1300 x.array [1] |= y.array [1];
1301 case 1:
1302 x.array [0] |= y.array [0];
1303 break;
1304 default:
1305 abort ();
1306 }
1307 return x;
1308 }
1309
1310 static INLINE i386_cpu_flags
1311 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1312 {
1313 switch (ARRAY_SIZE (x.array))
1314 {
1315 case 3:
1316 x.array [2] &= ~y.array [2];
1317 case 2:
1318 x.array [1] &= ~y.array [1];
1319 case 1:
1320 x.array [0] &= ~y.array [0];
1321 break;
1322 default:
1323 abort ();
1324 }
1325 return x;
1326 }
1327
1328 #define CPU_FLAGS_ARCH_MATCH 0x1
1329 #define CPU_FLAGS_64BIT_MATCH 0x2
1330 #define CPU_FLAGS_AES_MATCH 0x4
1331 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1332 #define CPU_FLAGS_AVX_MATCH 0x10
1333
1334 #define CPU_FLAGS_32BIT_MATCH \
1335 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1336 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1337 #define CPU_FLAGS_PERFECT_MATCH \
1338 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1339
1340 /* Return CPU flags match bits. */
1341
1342 static int
1343 cpu_flags_match (const insn_template *t)
1344 {
1345 i386_cpu_flags x = t->cpu_flags;
1346 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1347
1348 x.bitfield.cpu64 = 0;
1349 x.bitfield.cpuno64 = 0;
1350
1351 if (cpu_flags_all_zero (&x))
1352 {
1353 /* This instruction is available on all archs. */
1354 match |= CPU_FLAGS_32BIT_MATCH;
1355 }
1356 else
1357 {
1358 /* This instruction is available only on some archs. */
1359 i386_cpu_flags cpu = cpu_arch_flags;
1360
1361 cpu.bitfield.cpu64 = 0;
1362 cpu.bitfield.cpuno64 = 0;
1363 cpu = cpu_flags_and (x, cpu);
1364 if (!cpu_flags_all_zero (&cpu))
1365 {
1366 if (x.bitfield.cpuavx)
1367 {
1368 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1369 if (cpu.bitfield.cpuavx)
1370 {
1371 /* Check SSE2AVX. */
1372 if (!t->opcode_modifier.sse2avx|| sse2avx)
1373 {
1374 match |= (CPU_FLAGS_ARCH_MATCH
1375 | CPU_FLAGS_AVX_MATCH);
1376 /* Check AES. */
1377 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1378 match |= CPU_FLAGS_AES_MATCH;
1379 /* Check PCLMUL. */
1380 if (!x.bitfield.cpupclmul
1381 || cpu.bitfield.cpupclmul)
1382 match |= CPU_FLAGS_PCLMUL_MATCH;
1383 }
1384 }
1385 else
1386 match |= CPU_FLAGS_ARCH_MATCH;
1387 }
1388 else
1389 match |= CPU_FLAGS_32BIT_MATCH;
1390 }
1391 }
1392 return match;
1393 }
1394
1395 static INLINE i386_operand_type
1396 operand_type_and (i386_operand_type x, i386_operand_type y)
1397 {
1398 switch (ARRAY_SIZE (x.array))
1399 {
1400 case 3:
1401 x.array [2] &= y.array [2];
1402 case 2:
1403 x.array [1] &= y.array [1];
1404 case 1:
1405 x.array [0] &= y.array [0];
1406 break;
1407 default:
1408 abort ();
1409 }
1410 return x;
1411 }
1412
1413 static INLINE i386_operand_type
1414 operand_type_or (i386_operand_type x, i386_operand_type y)
1415 {
1416 switch (ARRAY_SIZE (x.array))
1417 {
1418 case 3:
1419 x.array [2] |= y.array [2];
1420 case 2:
1421 x.array [1] |= y.array [1];
1422 case 1:
1423 x.array [0] |= y.array [0];
1424 break;
1425 default:
1426 abort ();
1427 }
1428 return x;
1429 }
1430
1431 static INLINE i386_operand_type
1432 operand_type_xor (i386_operand_type x, i386_operand_type y)
1433 {
1434 switch (ARRAY_SIZE (x.array))
1435 {
1436 case 3:
1437 x.array [2] ^= y.array [2];
1438 case 2:
1439 x.array [1] ^= y.array [1];
1440 case 1:
1441 x.array [0] ^= y.array [0];
1442 break;
1443 default:
1444 abort ();
1445 }
1446 return x;
1447 }
1448
1449 static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1450 static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1451 static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1452 static const i386_operand_type inoutportreg
1453 = OPERAND_TYPE_INOUTPORTREG;
1454 static const i386_operand_type reg16_inoutportreg
1455 = OPERAND_TYPE_REG16_INOUTPORTREG;
1456 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1457 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1458 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1459 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1460 static const i386_operand_type anydisp
1461 = OPERAND_TYPE_ANYDISP;
1462 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1463 static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1464 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1465 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1466 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1467 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1468 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1469 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1470 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1471 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1472 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1473 static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
1474
1475 enum operand_type
1476 {
1477 reg,
1478 imm,
1479 disp,
1480 anymem
1481 };
1482
1483 static INLINE int
1484 operand_type_check (i386_operand_type t, enum operand_type c)
1485 {
1486 switch (c)
1487 {
1488 case reg:
1489 return (t.bitfield.reg8
1490 || t.bitfield.reg16
1491 || t.bitfield.reg32
1492 || t.bitfield.reg64);
1493
1494 case imm:
1495 return (t.bitfield.imm8
1496 || t.bitfield.imm8s
1497 || t.bitfield.imm16
1498 || t.bitfield.imm32
1499 || t.bitfield.imm32s
1500 || t.bitfield.imm64);
1501
1502 case disp:
1503 return (t.bitfield.disp8
1504 || t.bitfield.disp16
1505 || t.bitfield.disp32
1506 || t.bitfield.disp32s
1507 || t.bitfield.disp64);
1508
1509 case anymem:
1510 return (t.bitfield.disp8
1511 || t.bitfield.disp16
1512 || t.bitfield.disp32
1513 || t.bitfield.disp32s
1514 || t.bitfield.disp64
1515 || t.bitfield.baseindex);
1516
1517 default:
1518 abort ();
1519 }
1520
1521 return 0;
1522 }
1523
1524 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1525 operand J for instruction template T. */
1526
1527 static INLINE int
1528 match_reg_size (const insn_template *t, unsigned int j)
1529 {
1530 return !((i.types[j].bitfield.byte
1531 && !t->operand_types[j].bitfield.byte)
1532 || (i.types[j].bitfield.word
1533 && !t->operand_types[j].bitfield.word)
1534 || (i.types[j].bitfield.dword
1535 && !t->operand_types[j].bitfield.dword)
1536 || (i.types[j].bitfield.qword
1537 && !t->operand_types[j].bitfield.qword));
1538 }
1539
1540 /* Return 1 if there is no conflict in any size on operand J for
1541 instruction template T. */
1542
1543 static INLINE int
1544 match_mem_size (const insn_template *t, unsigned int j)
1545 {
1546 return (match_reg_size (t, j)
1547 && !((i.types[j].bitfield.unspecified
1548 && !t->operand_types[j].bitfield.unspecified)
1549 || (i.types[j].bitfield.fword
1550 && !t->operand_types[j].bitfield.fword)
1551 || (i.types[j].bitfield.tbyte
1552 && !t->operand_types[j].bitfield.tbyte)
1553 || (i.types[j].bitfield.xmmword
1554 && !t->operand_types[j].bitfield.xmmword)
1555 || (i.types[j].bitfield.ymmword
1556 && !t->operand_types[j].bitfield.ymmword)));
1557 }
1558
1559 /* Return 1 if there is no size conflict on any operands for
1560 instruction template T. */
1561
1562 static INLINE int
1563 operand_size_match (const insn_template *t)
1564 {
1565 unsigned int j;
1566 int match = 1;
1567
1568 /* Don't check jump instructions. */
1569 if (t->opcode_modifier.jump
1570 || t->opcode_modifier.jumpbyte
1571 || t->opcode_modifier.jumpdword
1572 || t->opcode_modifier.jumpintersegment)
1573 return match;
1574
1575 /* Check memory and accumulator operand size. */
1576 for (j = 0; j < i.operands; j++)
1577 {
1578 if (t->operand_types[j].bitfield.anysize)
1579 continue;
1580
1581 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1582 {
1583 match = 0;
1584 break;
1585 }
1586
1587 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1588 {
1589 match = 0;
1590 break;
1591 }
1592 }
1593
1594 if (match)
1595 return match;
1596 else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
1597 {
1598 mismatch:
1599 i.error = operand_size_mismatch;
1600 return 0;
1601 }
1602
1603 /* Check reverse. */
1604 gas_assert (i.operands == 2);
1605
1606 match = 1;
1607 for (j = 0; j < 2; j++)
1608 {
1609 if (t->operand_types[j].bitfield.acc
1610 && !match_reg_size (t, j ? 0 : 1))
1611 goto mismatch;
1612
1613 if (i.types[j].bitfield.mem
1614 && !match_mem_size (t, j ? 0 : 1))
1615 goto mismatch;
1616 }
1617
1618 return match;
1619 }
1620
1621 static INLINE int
1622 operand_type_match (i386_operand_type overlap,
1623 i386_operand_type given)
1624 {
1625 i386_operand_type temp = overlap;
1626
1627 temp.bitfield.jumpabsolute = 0;
1628 temp.bitfield.unspecified = 0;
1629 temp.bitfield.byte = 0;
1630 temp.bitfield.word = 0;
1631 temp.bitfield.dword = 0;
1632 temp.bitfield.fword = 0;
1633 temp.bitfield.qword = 0;
1634 temp.bitfield.tbyte = 0;
1635 temp.bitfield.xmmword = 0;
1636 temp.bitfield.ymmword = 0;
1637 if (operand_type_all_zero (&temp))
1638 goto mismatch;
1639
1640 if (given.bitfield.baseindex == overlap.bitfield.baseindex
1641 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
1642 return 1;
1643
1644 mismatch:
1645 i.error = operand_type_mismatch;
1646 return 0;
1647 }
1648
1649 /* If given types g0 and g1 are registers they must be of the same type
1650 unless the expected operand type register overlap is null.
1651 Note that Acc in a template matches every size of reg. */
1652
1653 static INLINE int
1654 operand_type_register_match (i386_operand_type m0,
1655 i386_operand_type g0,
1656 i386_operand_type t0,
1657 i386_operand_type m1,
1658 i386_operand_type g1,
1659 i386_operand_type t1)
1660 {
1661 if (!operand_type_check (g0, reg))
1662 return 1;
1663
1664 if (!operand_type_check (g1, reg))
1665 return 1;
1666
1667 if (g0.bitfield.reg8 == g1.bitfield.reg8
1668 && g0.bitfield.reg16 == g1.bitfield.reg16
1669 && g0.bitfield.reg32 == g1.bitfield.reg32
1670 && g0.bitfield.reg64 == g1.bitfield.reg64)
1671 return 1;
1672
1673 if (m0.bitfield.acc)
1674 {
1675 t0.bitfield.reg8 = 1;
1676 t0.bitfield.reg16 = 1;
1677 t0.bitfield.reg32 = 1;
1678 t0.bitfield.reg64 = 1;
1679 }
1680
1681 if (m1.bitfield.acc)
1682 {
1683 t1.bitfield.reg8 = 1;
1684 t1.bitfield.reg16 = 1;
1685 t1.bitfield.reg32 = 1;
1686 t1.bitfield.reg64 = 1;
1687 }
1688
1689 if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1690 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1691 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1692 && !(t0.bitfield.reg64 & t1.bitfield.reg64))
1693 return 1;
1694
1695 i.error = register_type_mismatch;
1696
1697 return 0;
1698 }
1699
1700 static INLINE unsigned int
1701 mode_from_disp_size (i386_operand_type t)
1702 {
1703 if (t.bitfield.disp8)
1704 return 1;
1705 else if (t.bitfield.disp16
1706 || t.bitfield.disp32
1707 || t.bitfield.disp32s)
1708 return 2;
1709 else
1710 return 0;
1711 }
1712
1713 static INLINE int
1714 fits_in_signed_byte (offsetT num)
1715 {
1716 return (num >= -128) && (num <= 127);
1717 }
1718
1719 static INLINE int
1720 fits_in_unsigned_byte (offsetT num)
1721 {
1722 return (num & 0xff) == num;
1723 }
1724
1725 static INLINE int
1726 fits_in_unsigned_word (offsetT num)
1727 {
1728 return (num & 0xffff) == num;
1729 }
1730
1731 static INLINE int
1732 fits_in_signed_word (offsetT num)
1733 {
1734 return (-32768 <= num) && (num <= 32767);
1735 }
1736
1737 static INLINE int
1738 fits_in_signed_long (offsetT num ATTRIBUTE_UNUSED)
1739 {
1740 #ifndef BFD64
1741 return 1;
1742 #else
1743 return (!(((offsetT) -1 << 31) & num)
1744 || (((offsetT) -1 << 31) & num) == ((offsetT) -1 << 31));
1745 #endif
1746 } /* fits_in_signed_long() */
1747
1748 static INLINE int
1749 fits_in_unsigned_long (offsetT num ATTRIBUTE_UNUSED)
1750 {
1751 #ifndef BFD64
1752 return 1;
1753 #else
1754 return (num & (((offsetT) 2 << 31) - 1)) == num;
1755 #endif
1756 } /* fits_in_unsigned_long() */
1757
1758 static INLINE int
1759 fits_in_imm4 (offsetT num)
1760 {
1761 return (num & 0xf) == num;
1762 }
1763
1764 static i386_operand_type
1765 smallest_imm_type (offsetT num)
1766 {
1767 i386_operand_type t;
1768
1769 operand_type_set (&t, 0);
1770 t.bitfield.imm64 = 1;
1771
1772 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
1773 {
1774 /* This code is disabled on the 486 because all the Imm1 forms
1775 in the opcode table are slower on the i486. They're the
1776 versions with the implicitly specified single-position
1777 displacement, which has another syntax if you really want to
1778 use that form. */
1779 t.bitfield.imm1 = 1;
1780 t.bitfield.imm8 = 1;
1781 t.bitfield.imm8s = 1;
1782 t.bitfield.imm16 = 1;
1783 t.bitfield.imm32 = 1;
1784 t.bitfield.imm32s = 1;
1785 }
1786 else if (fits_in_signed_byte (num))
1787 {
1788 t.bitfield.imm8 = 1;
1789 t.bitfield.imm8s = 1;
1790 t.bitfield.imm16 = 1;
1791 t.bitfield.imm32 = 1;
1792 t.bitfield.imm32s = 1;
1793 }
1794 else if (fits_in_unsigned_byte (num))
1795 {
1796 t.bitfield.imm8 = 1;
1797 t.bitfield.imm16 = 1;
1798 t.bitfield.imm32 = 1;
1799 t.bitfield.imm32s = 1;
1800 }
1801 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
1802 {
1803 t.bitfield.imm16 = 1;
1804 t.bitfield.imm32 = 1;
1805 t.bitfield.imm32s = 1;
1806 }
1807 else if (fits_in_signed_long (num))
1808 {
1809 t.bitfield.imm32 = 1;
1810 t.bitfield.imm32s = 1;
1811 }
1812 else if (fits_in_unsigned_long (num))
1813 t.bitfield.imm32 = 1;
1814
1815 return t;
1816 }
1817
1818 static offsetT
1819 offset_in_range (offsetT val, int size)
1820 {
1821 addressT mask;
1822
1823 switch (size)
1824 {
1825 case 1: mask = ((addressT) 1 << 8) - 1; break;
1826 case 2: mask = ((addressT) 1 << 16) - 1; break;
1827 case 4: mask = ((addressT) 2 << 31) - 1; break;
1828 #ifdef BFD64
1829 case 8: mask = ((addressT) 2 << 63) - 1; break;
1830 #endif
1831 default: abort ();
1832 }
1833
1834 #ifdef BFD64
1835 /* If BFD64, sign extend val for 32bit address mode. */
1836 if (flag_code != CODE_64BIT
1837 || i.prefix[ADDR_PREFIX])
1838 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
1839 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
1840 #endif
1841
1842 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
1843 {
1844 char buf1[40], buf2[40];
1845
1846 sprint_value (buf1, val);
1847 sprint_value (buf2, val & mask);
1848 as_warn (_("%s shortened to %s"), buf1, buf2);
1849 }
1850 return val & mask;
1851 }
1852
1853 enum PREFIX_GROUP
1854 {
1855 PREFIX_EXIST = 0,
1856 PREFIX_LOCK,
1857 PREFIX_REP,
1858 PREFIX_OTHER
1859 };
1860
1861 /* Returns
1862 a. PREFIX_EXIST if attempting to add a prefix where one from the
1863 same class already exists.
1864 b. PREFIX_LOCK if lock prefix is added.
1865 c. PREFIX_REP if rep/repne prefix is added.
1866 d. PREFIX_OTHER if other prefix is added.
1867 */
1868
1869 static enum PREFIX_GROUP
1870 add_prefix (unsigned int prefix)
1871 {
1872 enum PREFIX_GROUP ret = PREFIX_OTHER;
1873 unsigned int q;
1874
1875 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
1876 && flag_code == CODE_64BIT)
1877 {
1878 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
1879 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
1880 && (prefix & (REX_R | REX_X | REX_B))))
1881 ret = PREFIX_EXIST;
1882 q = REX_PREFIX;
1883 }
1884 else
1885 {
1886 switch (prefix)
1887 {
1888 default:
1889 abort ();
1890
1891 case CS_PREFIX_OPCODE:
1892 case DS_PREFIX_OPCODE:
1893 case ES_PREFIX_OPCODE:
1894 case FS_PREFIX_OPCODE:
1895 case GS_PREFIX_OPCODE:
1896 case SS_PREFIX_OPCODE:
1897 q = SEG_PREFIX;
1898 break;
1899
1900 case REPNE_PREFIX_OPCODE:
1901 case REPE_PREFIX_OPCODE:
1902 q = REP_PREFIX;
1903 ret = PREFIX_REP;
1904 break;
1905
1906 case LOCK_PREFIX_OPCODE:
1907 q = LOCK_PREFIX;
1908 ret = PREFIX_LOCK;
1909 break;
1910
1911 case FWAIT_OPCODE:
1912 q = WAIT_PREFIX;
1913 break;
1914
1915 case ADDR_PREFIX_OPCODE:
1916 q = ADDR_PREFIX;
1917 break;
1918
1919 case DATA_PREFIX_OPCODE:
1920 q = DATA_PREFIX;
1921 break;
1922 }
1923 if (i.prefix[q] != 0)
1924 ret = PREFIX_EXIST;
1925 }
1926
1927 if (ret)
1928 {
1929 if (!i.prefix[q])
1930 ++i.prefixes;
1931 i.prefix[q] |= prefix;
1932 }
1933 else
1934 as_bad (_("same type of prefix used twice"));
1935
1936 return ret;
1937 }
1938
1939 static void
1940 update_code_flag (int value, int check)
1941 {
1942 PRINTF_LIKE ((*as_error));
1943
1944 flag_code = (enum flag_code) value;
1945 if (flag_code == CODE_64BIT)
1946 {
1947 cpu_arch_flags.bitfield.cpu64 = 1;
1948 cpu_arch_flags.bitfield.cpuno64 = 0;
1949 }
1950 else
1951 {
1952 cpu_arch_flags.bitfield.cpu64 = 0;
1953 cpu_arch_flags.bitfield.cpuno64 = 1;
1954 }
1955 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
1956 {
1957 if (check)
1958 as_error = as_fatal;
1959 else
1960 as_error = as_bad;
1961 (*as_error) (_("64bit mode not supported on `%s'."),
1962 cpu_arch_name ? cpu_arch_name : default_arch);
1963 }
1964 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
1965 {
1966 if (check)
1967 as_error = as_fatal;
1968 else
1969 as_error = as_bad;
1970 (*as_error) (_("32bit mode not supported on `%s'."),
1971 cpu_arch_name ? cpu_arch_name : default_arch);
1972 }
1973 stackop_size = '\0';
1974 }
1975
1976 static void
1977 set_code_flag (int value)
1978 {
1979 update_code_flag (value, 0);
1980 }
1981
1982 static void
1983 set_16bit_gcc_code_flag (int new_code_flag)
1984 {
1985 flag_code = (enum flag_code) new_code_flag;
1986 if (flag_code != CODE_16BIT)
1987 abort ();
1988 cpu_arch_flags.bitfield.cpu64 = 0;
1989 cpu_arch_flags.bitfield.cpuno64 = 1;
1990 stackop_size = LONG_MNEM_SUFFIX;
1991 }
1992
1993 static void
1994 set_intel_syntax (int syntax_flag)
1995 {
1996 /* Find out if register prefixing is specified. */
1997 int ask_naked_reg = 0;
1998
1999 SKIP_WHITESPACE ();
2000 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2001 {
2002 char *string = input_line_pointer;
2003 int e = get_symbol_end ();
2004
2005 if (strcmp (string, "prefix") == 0)
2006 ask_naked_reg = 1;
2007 else if (strcmp (string, "noprefix") == 0)
2008 ask_naked_reg = -1;
2009 else
2010 as_bad (_("bad argument to syntax directive."));
2011 *input_line_pointer = e;
2012 }
2013 demand_empty_rest_of_line ();
2014
2015 intel_syntax = syntax_flag;
2016
2017 if (ask_naked_reg == 0)
2018 allow_naked_reg = (intel_syntax
2019 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2020 else
2021 allow_naked_reg = (ask_naked_reg < 0);
2022
2023 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2024
2025 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2026 identifier_chars['$'] = intel_syntax ? '$' : 0;
2027 register_prefix = allow_naked_reg ? "" : "%";
2028 }
2029
2030 static void
2031 set_intel_mnemonic (int mnemonic_flag)
2032 {
2033 intel_mnemonic = mnemonic_flag;
2034 }
2035
2036 static void
2037 set_allow_index_reg (int flag)
2038 {
2039 allow_index_reg = flag;
2040 }
2041
2042 static void
2043 set_sse_check (int dummy ATTRIBUTE_UNUSED)
2044 {
2045 SKIP_WHITESPACE ();
2046
2047 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2048 {
2049 char *string = input_line_pointer;
2050 int e = get_symbol_end ();
2051
2052 if (strcmp (string, "none") == 0)
2053 sse_check = sse_check_none;
2054 else if (strcmp (string, "warning") == 0)
2055 sse_check = sse_check_warning;
2056 else if (strcmp (string, "error") == 0)
2057 sse_check = sse_check_error;
2058 else
2059 as_bad (_("bad argument to sse_check directive."));
2060 *input_line_pointer = e;
2061 }
2062 else
2063 as_bad (_("missing argument for sse_check directive"));
2064
2065 demand_empty_rest_of_line ();
2066 }
2067
2068 static void
2069 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2070 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2071 {
2072 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2073 static const char *arch;
2074
2075 /* Intel LIOM is only supported on ELF. */
2076 if (!IS_ELF)
2077 return;
2078
2079 if (!arch)
2080 {
2081 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2082 use default_arch. */
2083 arch = cpu_arch_name;
2084 if (!arch)
2085 arch = default_arch;
2086 }
2087
2088 /* If we are targeting Intel L1OM, we must enable it. */
2089 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2090 || new_flag.bitfield.cpul1om)
2091 return;
2092
2093 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2094 #endif
2095 }
2096
2097 static void
2098 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2099 {
2100 SKIP_WHITESPACE ();
2101
2102 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2103 {
2104 char *string = input_line_pointer;
2105 int e = get_symbol_end ();
2106 unsigned int j;
2107 i386_cpu_flags flags;
2108
2109 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2110 {
2111 if (strcmp (string, cpu_arch[j].name) == 0)
2112 {
2113 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2114
2115 if (*string != '.')
2116 {
2117 cpu_arch_name = cpu_arch[j].name;
2118 cpu_sub_arch_name = NULL;
2119 cpu_arch_flags = cpu_arch[j].flags;
2120 if (flag_code == CODE_64BIT)
2121 {
2122 cpu_arch_flags.bitfield.cpu64 = 1;
2123 cpu_arch_flags.bitfield.cpuno64 = 0;
2124 }
2125 else
2126 {
2127 cpu_arch_flags.bitfield.cpu64 = 0;
2128 cpu_arch_flags.bitfield.cpuno64 = 1;
2129 }
2130 cpu_arch_isa = cpu_arch[j].type;
2131 cpu_arch_isa_flags = cpu_arch[j].flags;
2132 if (!cpu_arch_tune_set)
2133 {
2134 cpu_arch_tune = cpu_arch_isa;
2135 cpu_arch_tune_flags = cpu_arch_isa_flags;
2136 }
2137 break;
2138 }
2139
2140 if (!cpu_arch[j].negated)
2141 flags = cpu_flags_or (cpu_arch_flags,
2142 cpu_arch[j].flags);
2143 else
2144 flags = cpu_flags_and_not (cpu_arch_flags,
2145 cpu_arch[j].flags);
2146 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2147 {
2148 if (cpu_sub_arch_name)
2149 {
2150 char *name = cpu_sub_arch_name;
2151 cpu_sub_arch_name = concat (name,
2152 cpu_arch[j].name,
2153 (const char *) NULL);
2154 free (name);
2155 }
2156 else
2157 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2158 cpu_arch_flags = flags;
2159 }
2160 *input_line_pointer = e;
2161 demand_empty_rest_of_line ();
2162 return;
2163 }
2164 }
2165 if (j >= ARRAY_SIZE (cpu_arch))
2166 as_bad (_("no such architecture: `%s'"), string);
2167
2168 *input_line_pointer = e;
2169 }
2170 else
2171 as_bad (_("missing cpu architecture"));
2172
2173 no_cond_jump_promotion = 0;
2174 if (*input_line_pointer == ','
2175 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2176 {
2177 char *string = ++input_line_pointer;
2178 int e = get_symbol_end ();
2179
2180 if (strcmp (string, "nojumps") == 0)
2181 no_cond_jump_promotion = 1;
2182 else if (strcmp (string, "jumps") == 0)
2183 ;
2184 else
2185 as_bad (_("no such architecture modifier: `%s'"), string);
2186
2187 *input_line_pointer = e;
2188 }
2189
2190 demand_empty_rest_of_line ();
2191 }
2192
2193 enum bfd_architecture
2194 i386_arch (void)
2195 {
2196 if (cpu_arch_isa == PROCESSOR_L1OM)
2197 {
2198 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2199 || flag_code != CODE_64BIT)
2200 as_fatal (_("Intel L1OM is 64bit ELF only"));
2201 return bfd_arch_l1om;
2202 }
2203 else
2204 return bfd_arch_i386;
2205 }
2206
2207 unsigned long
2208 i386_mach ()
2209 {
2210 if (!strcmp (default_arch, "x86_64"))
2211 {
2212 if (cpu_arch_isa == PROCESSOR_L1OM)
2213 {
2214 if (OUTPUT_FLAVOR != bfd_target_elf_flavour)
2215 as_fatal (_("Intel L1OM is 64bit ELF only"));
2216 return bfd_mach_l1om;
2217 }
2218 else
2219 return bfd_mach_x86_64;
2220 }
2221 else if (!strcmp (default_arch, "i386"))
2222 return bfd_mach_i386_i386;
2223 else
2224 as_fatal (_("Unknown architecture"));
2225 }
2226 \f
2227 void
2228 md_begin ()
2229 {
2230 const char *hash_err;
2231
2232 /* Initialize op_hash hash table. */
2233 op_hash = hash_new ();
2234
2235 {
2236 const insn_template *optab;
2237 templates *core_optab;
2238
2239 /* Setup for loop. */
2240 optab = i386_optab;
2241 core_optab = (templates *) xmalloc (sizeof (templates));
2242 core_optab->start = optab;
2243
2244 while (1)
2245 {
2246 ++optab;
2247 if (optab->name == NULL
2248 || strcmp (optab->name, (optab - 1)->name) != 0)
2249 {
2250 /* different name --> ship out current template list;
2251 add to hash table; & begin anew. */
2252 core_optab->end = optab;
2253 hash_err = hash_insert (op_hash,
2254 (optab - 1)->name,
2255 (void *) core_optab);
2256 if (hash_err)
2257 {
2258 as_fatal (_("Internal Error: Can't hash %s: %s"),
2259 (optab - 1)->name,
2260 hash_err);
2261 }
2262 if (optab->name == NULL)
2263 break;
2264 core_optab = (templates *) xmalloc (sizeof (templates));
2265 core_optab->start = optab;
2266 }
2267 }
2268 }
2269
2270 /* Initialize reg_hash hash table. */
2271 reg_hash = hash_new ();
2272 {
2273 const reg_entry *regtab;
2274 unsigned int regtab_size = i386_regtab_size;
2275
2276 for (regtab = i386_regtab; regtab_size--; regtab++)
2277 {
2278 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2279 if (hash_err)
2280 as_fatal (_("Internal Error: Can't hash %s: %s"),
2281 regtab->reg_name,
2282 hash_err);
2283 }
2284 }
2285
2286 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2287 {
2288 int c;
2289 char *p;
2290
2291 for (c = 0; c < 256; c++)
2292 {
2293 if (ISDIGIT (c))
2294 {
2295 digit_chars[c] = c;
2296 mnemonic_chars[c] = c;
2297 register_chars[c] = c;
2298 operand_chars[c] = c;
2299 }
2300 else if (ISLOWER (c))
2301 {
2302 mnemonic_chars[c] = c;
2303 register_chars[c] = c;
2304 operand_chars[c] = c;
2305 }
2306 else if (ISUPPER (c))
2307 {
2308 mnemonic_chars[c] = TOLOWER (c);
2309 register_chars[c] = mnemonic_chars[c];
2310 operand_chars[c] = c;
2311 }
2312
2313 if (ISALPHA (c) || ISDIGIT (c))
2314 identifier_chars[c] = c;
2315 else if (c >= 128)
2316 {
2317 identifier_chars[c] = c;
2318 operand_chars[c] = c;
2319 }
2320 }
2321
2322 #ifdef LEX_AT
2323 identifier_chars['@'] = '@';
2324 #endif
2325 #ifdef LEX_QM
2326 identifier_chars['?'] = '?';
2327 operand_chars['?'] = '?';
2328 #endif
2329 digit_chars['-'] = '-';
2330 mnemonic_chars['_'] = '_';
2331 mnemonic_chars['-'] = '-';
2332 mnemonic_chars['.'] = '.';
2333 identifier_chars['_'] = '_';
2334 identifier_chars['.'] = '.';
2335
2336 for (p = operand_special_chars; *p != '\0'; p++)
2337 operand_chars[(unsigned char) *p] = *p;
2338 }
2339
2340 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2341 if (IS_ELF)
2342 {
2343 record_alignment (text_section, 2);
2344 record_alignment (data_section, 2);
2345 record_alignment (bss_section, 2);
2346 }
2347 #endif
2348
2349 if (flag_code == CODE_64BIT)
2350 {
2351 x86_dwarf2_return_column = 16;
2352 x86_cie_data_alignment = -8;
2353 }
2354 else
2355 {
2356 x86_dwarf2_return_column = 8;
2357 x86_cie_data_alignment = -4;
2358 }
2359 }
2360
2361 void
2362 i386_print_statistics (FILE *file)
2363 {
2364 hash_print_statistics (file, "i386 opcode", op_hash);
2365 hash_print_statistics (file, "i386 register", reg_hash);
2366 }
2367 \f
2368 #ifdef DEBUG386
2369
2370 /* Debugging routines for md_assemble. */
2371 static void pte (insn_template *);
2372 static void pt (i386_operand_type);
2373 static void pe (expressionS *);
2374 static void ps (symbolS *);
2375
2376 static void
2377 pi (char *line, i386_insn *x)
2378 {
2379 unsigned int j;
2380
2381 fprintf (stdout, "%s: template ", line);
2382 pte (&x->tm);
2383 fprintf (stdout, " address: base %s index %s scale %x\n",
2384 x->base_reg ? x->base_reg->reg_name : "none",
2385 x->index_reg ? x->index_reg->reg_name : "none",
2386 x->log2_scale_factor);
2387 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2388 x->rm.mode, x->rm.reg, x->rm.regmem);
2389 fprintf (stdout, " sib: base %x index %x scale %x\n",
2390 x->sib.base, x->sib.index, x->sib.scale);
2391 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2392 (x->rex & REX_W) != 0,
2393 (x->rex & REX_R) != 0,
2394 (x->rex & REX_X) != 0,
2395 (x->rex & REX_B) != 0);
2396 for (j = 0; j < x->operands; j++)
2397 {
2398 fprintf (stdout, " #%d: ", j + 1);
2399 pt (x->types[j]);
2400 fprintf (stdout, "\n");
2401 if (x->types[j].bitfield.reg8
2402 || x->types[j].bitfield.reg16
2403 || x->types[j].bitfield.reg32
2404 || x->types[j].bitfield.reg64
2405 || x->types[j].bitfield.regmmx
2406 || x->types[j].bitfield.regxmm
2407 || x->types[j].bitfield.regymm
2408 || x->types[j].bitfield.sreg2
2409 || x->types[j].bitfield.sreg3
2410 || x->types[j].bitfield.control
2411 || x->types[j].bitfield.debug
2412 || x->types[j].bitfield.test)
2413 fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
2414 if (operand_type_check (x->types[j], imm))
2415 pe (x->op[j].imms);
2416 if (operand_type_check (x->types[j], disp))
2417 pe (x->op[j].disps);
2418 }
2419 }
2420
2421 static void
2422 pte (insn_template *t)
2423 {
2424 unsigned int j;
2425 fprintf (stdout, " %d operands ", t->operands);
2426 fprintf (stdout, "opcode %x ", t->base_opcode);
2427 if (t->extension_opcode != None)
2428 fprintf (stdout, "ext %x ", t->extension_opcode);
2429 if (t->opcode_modifier.d)
2430 fprintf (stdout, "D");
2431 if (t->opcode_modifier.w)
2432 fprintf (stdout, "W");
2433 fprintf (stdout, "\n");
2434 for (j = 0; j < t->operands; j++)
2435 {
2436 fprintf (stdout, " #%d type ", j + 1);
2437 pt (t->operand_types[j]);
2438 fprintf (stdout, "\n");
2439 }
2440 }
2441
2442 static void
2443 pe (expressionS *e)
2444 {
2445 fprintf (stdout, " operation %d\n", e->X_op);
2446 fprintf (stdout, " add_number %ld (%lx)\n",
2447 (long) e->X_add_number, (long) e->X_add_number);
2448 if (e->X_add_symbol)
2449 {
2450 fprintf (stdout, " add_symbol ");
2451 ps (e->X_add_symbol);
2452 fprintf (stdout, "\n");
2453 }
2454 if (e->X_op_symbol)
2455 {
2456 fprintf (stdout, " op_symbol ");
2457 ps (e->X_op_symbol);
2458 fprintf (stdout, "\n");
2459 }
2460 }
2461
2462 static void
2463 ps (symbolS *s)
2464 {
2465 fprintf (stdout, "%s type %s%s",
2466 S_GET_NAME (s),
2467 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2468 segment_name (S_GET_SEGMENT (s)));
2469 }
2470
2471 static struct type_name
2472 {
2473 i386_operand_type mask;
2474 const char *name;
2475 }
2476 const type_names[] =
2477 {
2478 { OPERAND_TYPE_REG8, "r8" },
2479 { OPERAND_TYPE_REG16, "r16" },
2480 { OPERAND_TYPE_REG32, "r32" },
2481 { OPERAND_TYPE_REG64, "r64" },
2482 { OPERAND_TYPE_IMM8, "i8" },
2483 { OPERAND_TYPE_IMM8, "i8s" },
2484 { OPERAND_TYPE_IMM16, "i16" },
2485 { OPERAND_TYPE_IMM32, "i32" },
2486 { OPERAND_TYPE_IMM32S, "i32s" },
2487 { OPERAND_TYPE_IMM64, "i64" },
2488 { OPERAND_TYPE_IMM1, "i1" },
2489 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2490 { OPERAND_TYPE_DISP8, "d8" },
2491 { OPERAND_TYPE_DISP16, "d16" },
2492 { OPERAND_TYPE_DISP32, "d32" },
2493 { OPERAND_TYPE_DISP32S, "d32s" },
2494 { OPERAND_TYPE_DISP64, "d64" },
2495 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2496 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2497 { OPERAND_TYPE_CONTROL, "control reg" },
2498 { OPERAND_TYPE_TEST, "test reg" },
2499 { OPERAND_TYPE_DEBUG, "debug reg" },
2500 { OPERAND_TYPE_FLOATREG, "FReg" },
2501 { OPERAND_TYPE_FLOATACC, "FAcc" },
2502 { OPERAND_TYPE_SREG2, "SReg2" },
2503 { OPERAND_TYPE_SREG3, "SReg3" },
2504 { OPERAND_TYPE_ACC, "Acc" },
2505 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2506 { OPERAND_TYPE_REGMMX, "rMMX" },
2507 { OPERAND_TYPE_REGXMM, "rXMM" },
2508 { OPERAND_TYPE_REGYMM, "rYMM" },
2509 { OPERAND_TYPE_ESSEG, "es" },
2510 };
2511
2512 static void
2513 pt (i386_operand_type t)
2514 {
2515 unsigned int j;
2516 i386_operand_type a;
2517
2518 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2519 {
2520 a = operand_type_and (t, type_names[j].mask);
2521 if (!operand_type_all_zero (&a))
2522 fprintf (stdout, "%s, ", type_names[j].name);
2523 }
2524 fflush (stdout);
2525 }
2526
2527 #endif /* DEBUG386 */
2528 \f
2529 static bfd_reloc_code_real_type
2530 reloc (unsigned int size,
2531 int pcrel,
2532 int sign,
2533 bfd_reloc_code_real_type other)
2534 {
2535 if (other != NO_RELOC)
2536 {
2537 reloc_howto_type *rel;
2538
2539 if (size == 8)
2540 switch (other)
2541 {
2542 case BFD_RELOC_X86_64_GOT32:
2543 return BFD_RELOC_X86_64_GOT64;
2544 break;
2545 case BFD_RELOC_X86_64_PLTOFF64:
2546 return BFD_RELOC_X86_64_PLTOFF64;
2547 break;
2548 case BFD_RELOC_X86_64_GOTPC32:
2549 other = BFD_RELOC_X86_64_GOTPC64;
2550 break;
2551 case BFD_RELOC_X86_64_GOTPCREL:
2552 other = BFD_RELOC_X86_64_GOTPCREL64;
2553 break;
2554 case BFD_RELOC_X86_64_TPOFF32:
2555 other = BFD_RELOC_X86_64_TPOFF64;
2556 break;
2557 case BFD_RELOC_X86_64_DTPOFF32:
2558 other = BFD_RELOC_X86_64_DTPOFF64;
2559 break;
2560 default:
2561 break;
2562 }
2563
2564 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2565 if (size == 4 && flag_code != CODE_64BIT)
2566 sign = -1;
2567
2568 rel = bfd_reloc_type_lookup (stdoutput, other);
2569 if (!rel)
2570 as_bad (_("unknown relocation (%u)"), other);
2571 else if (size != bfd_get_reloc_size (rel))
2572 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2573 bfd_get_reloc_size (rel),
2574 size);
2575 else if (pcrel && !rel->pc_relative)
2576 as_bad (_("non-pc-relative relocation for pc-relative field"));
2577 else if ((rel->complain_on_overflow == complain_overflow_signed
2578 && !sign)
2579 || (rel->complain_on_overflow == complain_overflow_unsigned
2580 && sign > 0))
2581 as_bad (_("relocated field and relocation type differ in signedness"));
2582 else
2583 return other;
2584 return NO_RELOC;
2585 }
2586
2587 if (pcrel)
2588 {
2589 if (!sign)
2590 as_bad (_("there are no unsigned pc-relative relocations"));
2591 switch (size)
2592 {
2593 case 1: return BFD_RELOC_8_PCREL;
2594 case 2: return BFD_RELOC_16_PCREL;
2595 case 4: return BFD_RELOC_32_PCREL;
2596 case 8: return BFD_RELOC_64_PCREL;
2597 }
2598 as_bad (_("cannot do %u byte pc-relative relocation"), size);
2599 }
2600 else
2601 {
2602 if (sign > 0)
2603 switch (size)
2604 {
2605 case 4: return BFD_RELOC_X86_64_32S;
2606 }
2607 else
2608 switch (size)
2609 {
2610 case 1: return BFD_RELOC_8;
2611 case 2: return BFD_RELOC_16;
2612 case 4: return BFD_RELOC_32;
2613 case 8: return BFD_RELOC_64;
2614 }
2615 as_bad (_("cannot do %s %u byte relocation"),
2616 sign > 0 ? "signed" : "unsigned", size);
2617 }
2618
2619 return NO_RELOC;
2620 }
2621
2622 /* Here we decide which fixups can be adjusted to make them relative to
2623 the beginning of the section instead of the symbol. Basically we need
2624 to make sure that the dynamic relocations are done correctly, so in
2625 some cases we force the original symbol to be used. */
2626
2627 int
2628 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2629 {
2630 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2631 if (!IS_ELF)
2632 return 1;
2633
2634 /* Don't adjust pc-relative references to merge sections in 64-bit
2635 mode. */
2636 if (use_rela_relocations
2637 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
2638 && fixP->fx_pcrel)
2639 return 0;
2640
2641 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2642 and changed later by validate_fix. */
2643 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
2644 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
2645 return 0;
2646
2647 /* adjust_reloc_syms doesn't know about the GOT. */
2648 if (fixP->fx_r_type == BFD_RELOC_386_GOTOFF
2649 || fixP->fx_r_type == BFD_RELOC_386_PLT32
2650 || fixP->fx_r_type == BFD_RELOC_386_GOT32
2651 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
2652 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
2653 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
2654 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
2655 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
2656 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
2657 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
2658 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
2659 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
2660 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
2661 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
2662 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
2663 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
2664 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
2665 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
2666 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
2667 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
2668 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
2669 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
2670 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
2671 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
2672 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
2673 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
2674 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
2675 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
2676 return 0;
2677 #endif
2678 return 1;
2679 }
2680
2681 static int
2682 intel_float_operand (const char *mnemonic)
2683 {
2684 /* Note that the value returned is meaningful only for opcodes with (memory)
2685 operands, hence the code here is free to improperly handle opcodes that
2686 have no operands (for better performance and smaller code). */
2687
2688 if (mnemonic[0] != 'f')
2689 return 0; /* non-math */
2690
2691 switch (mnemonic[1])
2692 {
2693 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2694 the fs segment override prefix not currently handled because no
2695 call path can make opcodes without operands get here */
2696 case 'i':
2697 return 2 /* integer op */;
2698 case 'l':
2699 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
2700 return 3; /* fldcw/fldenv */
2701 break;
2702 case 'n':
2703 if (mnemonic[2] != 'o' /* fnop */)
2704 return 3; /* non-waiting control op */
2705 break;
2706 case 'r':
2707 if (mnemonic[2] == 's')
2708 return 3; /* frstor/frstpm */
2709 break;
2710 case 's':
2711 if (mnemonic[2] == 'a')
2712 return 3; /* fsave */
2713 if (mnemonic[2] == 't')
2714 {
2715 switch (mnemonic[3])
2716 {
2717 case 'c': /* fstcw */
2718 case 'd': /* fstdw */
2719 case 'e': /* fstenv */
2720 case 's': /* fsts[gw] */
2721 return 3;
2722 }
2723 }
2724 break;
2725 case 'x':
2726 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
2727 return 0; /* fxsave/fxrstor are not really math ops */
2728 break;
2729 }
2730
2731 return 1;
2732 }
2733
2734 /* Build the VEX prefix. */
2735
2736 static void
2737 build_vex_prefix (const insn_template *t)
2738 {
2739 unsigned int register_specifier;
2740 unsigned int implied_prefix;
2741 unsigned int vector_length;
2742
2743 /* Check register specifier. */
2744 if (i.vex.register_specifier)
2745 {
2746 register_specifier = i.vex.register_specifier->reg_num;
2747 if ((i.vex.register_specifier->reg_flags & RegRex))
2748 register_specifier += 8;
2749 register_specifier = ~register_specifier & 0xf;
2750 }
2751 else
2752 register_specifier = 0xf;
2753
2754 /* Use 2-byte VEX prefix by swappping destination and source
2755 operand. */
2756 if (!i.swap_operand
2757 && i.operands == i.reg_operands
2758 && i.tm.opcode_modifier.vexopcode == VEX0F
2759 && i.tm.opcode_modifier.s
2760 && i.rex == REX_B)
2761 {
2762 unsigned int xchg = i.operands - 1;
2763 union i386_op temp_op;
2764 i386_operand_type temp_type;
2765
2766 temp_type = i.types[xchg];
2767 i.types[xchg] = i.types[0];
2768 i.types[0] = temp_type;
2769 temp_op = i.op[xchg];
2770 i.op[xchg] = i.op[0];
2771 i.op[0] = temp_op;
2772
2773 gas_assert (i.rm.mode == 3);
2774
2775 i.rex = REX_R;
2776 xchg = i.rm.regmem;
2777 i.rm.regmem = i.rm.reg;
2778 i.rm.reg = xchg;
2779
2780 /* Use the next insn. */
2781 i.tm = t[1];
2782 }
2783
2784 if (i.tm.opcode_modifier.vex == VEXScalar)
2785 vector_length = avxscalar;
2786 else
2787 vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
2788
2789 switch ((i.tm.base_opcode >> 8) & 0xff)
2790 {
2791 case 0:
2792 implied_prefix = 0;
2793 break;
2794 case DATA_PREFIX_OPCODE:
2795 implied_prefix = 1;
2796 break;
2797 case REPE_PREFIX_OPCODE:
2798 implied_prefix = 2;
2799 break;
2800 case REPNE_PREFIX_OPCODE:
2801 implied_prefix = 3;
2802 break;
2803 default:
2804 abort ();
2805 }
2806
2807 /* Use 2-byte VEX prefix if possible. */
2808 if (i.tm.opcode_modifier.vexopcode == VEX0F
2809 && i.tm.opcode_modifier.vexw != VEXW1
2810 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
2811 {
2812 /* 2-byte VEX prefix. */
2813 unsigned int r;
2814
2815 i.vex.length = 2;
2816 i.vex.bytes[0] = 0xc5;
2817
2818 /* Check the REX.R bit. */
2819 r = (i.rex & REX_R) ? 0 : 1;
2820 i.vex.bytes[1] = (r << 7
2821 | register_specifier << 3
2822 | vector_length << 2
2823 | implied_prefix);
2824 }
2825 else
2826 {
2827 /* 3-byte VEX prefix. */
2828 unsigned int m, w;
2829
2830 i.vex.length = 3;
2831
2832 switch (i.tm.opcode_modifier.vexopcode)
2833 {
2834 case VEX0F:
2835 m = 0x1;
2836 i.vex.bytes[0] = 0xc4;
2837 break;
2838 case VEX0F38:
2839 m = 0x2;
2840 i.vex.bytes[0] = 0xc4;
2841 break;
2842 case VEX0F3A:
2843 m = 0x3;
2844 i.vex.bytes[0] = 0xc4;
2845 break;
2846 case XOP08:
2847 m = 0x8;
2848 i.vex.bytes[0] = 0x8f;
2849 break;
2850 case XOP09:
2851 m = 0x9;
2852 i.vex.bytes[0] = 0x8f;
2853 break;
2854 case XOP0A:
2855 m = 0xa;
2856 i.vex.bytes[0] = 0x8f;
2857 break;
2858 default:
2859 abort ();
2860 }
2861
2862 /* The high 3 bits of the second VEX byte are 1's compliment
2863 of RXB bits from REX. */
2864 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
2865
2866 /* Check the REX.W bit. */
2867 w = (i.rex & REX_W) ? 1 : 0;
2868 if (i.tm.opcode_modifier.vexw)
2869 {
2870 if (w)
2871 abort ();
2872
2873 if (i.tm.opcode_modifier.vexw == VEXW1)
2874 w = 1;
2875 }
2876
2877 i.vex.bytes[2] = (w << 7
2878 | register_specifier << 3
2879 | vector_length << 2
2880 | implied_prefix);
2881 }
2882 }
2883
2884 static void
2885 process_immext (void)
2886 {
2887 expressionS *exp;
2888
2889 if (i.tm.cpu_flags.bitfield.cpusse3 && i.operands > 0)
2890 {
2891 /* SSE3 Instructions have the fixed operands with an opcode
2892 suffix which is coded in the same place as an 8-bit immediate
2893 field would be. Here we check those operands and remove them
2894 afterwards. */
2895 unsigned int x;
2896
2897 for (x = 0; x < i.operands; x++)
2898 if (i.op[x].regs->reg_num != x)
2899 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
2900 register_prefix, i.op[x].regs->reg_name, x + 1,
2901 i.tm.name);
2902
2903 i.operands = 0;
2904 }
2905
2906 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
2907 which is coded in the same place as an 8-bit immediate field
2908 would be. Here we fake an 8-bit immediate operand from the
2909 opcode suffix stored in tm.extension_opcode.
2910
2911 AVX instructions also use this encoding, for some of
2912 3 argument instructions. */
2913
2914 gas_assert (i.imm_operands == 0
2915 && (i.operands <= 2
2916 || (i.tm.opcode_modifier.vex
2917 && i.operands <= 4)));
2918
2919 exp = &im_expressions[i.imm_operands++];
2920 i.op[i.operands].imms = exp;
2921 i.types[i.operands] = imm8;
2922 i.operands++;
2923 exp->X_op = O_constant;
2924 exp->X_add_number = i.tm.extension_opcode;
2925 i.tm.extension_opcode = None;
2926 }
2927
2928 /* This is the guts of the machine-dependent assembler. LINE points to a
2929 machine dependent instruction. This function is supposed to emit
2930 the frags/bytes it assembles to. */
2931
2932 void
2933 md_assemble (char *line)
2934 {
2935 unsigned int j;
2936 char mnemonic[MAX_MNEM_SIZE];
2937 const insn_template *t;
2938
2939 /* Initialize globals. */
2940 memset (&i, '\0', sizeof (i));
2941 for (j = 0; j < MAX_OPERANDS; j++)
2942 i.reloc[j] = NO_RELOC;
2943 memset (disp_expressions, '\0', sizeof (disp_expressions));
2944 memset (im_expressions, '\0', sizeof (im_expressions));
2945 save_stack_p = save_stack;
2946
2947 /* First parse an instruction mnemonic & call i386_operand for the operands.
2948 We assume that the scrubber has arranged it so that line[0] is the valid
2949 start of a (possibly prefixed) mnemonic. */
2950
2951 line = parse_insn (line, mnemonic);
2952 if (line == NULL)
2953 return;
2954
2955 line = parse_operands (line, mnemonic);
2956 this_operand = -1;
2957 if (line == NULL)
2958 return;
2959
2960 /* Now we've parsed the mnemonic into a set of templates, and have the
2961 operands at hand. */
2962
2963 /* All intel opcodes have reversed operands except for "bound" and
2964 "enter". We also don't reverse intersegment "jmp" and "call"
2965 instructions with 2 immediate operands so that the immediate segment
2966 precedes the offset, as it does when in AT&T mode. */
2967 if (intel_syntax
2968 && i.operands > 1
2969 && (strcmp (mnemonic, "bound") != 0)
2970 && (strcmp (mnemonic, "invlpga") != 0)
2971 && !(operand_type_check (i.types[0], imm)
2972 && operand_type_check (i.types[1], imm)))
2973 swap_operands ();
2974
2975 /* The order of the immediates should be reversed
2976 for 2 immediates extrq and insertq instructions */
2977 if (i.imm_operands == 2
2978 && (strcmp (mnemonic, "extrq") == 0
2979 || strcmp (mnemonic, "insertq") == 0))
2980 swap_2_operands (0, 1);
2981
2982 if (i.imm_operands)
2983 optimize_imm ();
2984
2985 /* Don't optimize displacement for movabs since it only takes 64bit
2986 displacement. */
2987 if (i.disp_operands
2988 && !i.disp32_encoding
2989 && (flag_code != CODE_64BIT
2990 || strcmp (mnemonic, "movabs") != 0))
2991 optimize_disp ();
2992
2993 /* Next, we find a template that matches the given insn,
2994 making sure the overlap of the given operands types is consistent
2995 with the template operand types. */
2996
2997 if (!(t = match_template ()))
2998 return;
2999
3000 if (sse_check != sse_check_none
3001 && !i.tm.opcode_modifier.noavx
3002 && (i.tm.cpu_flags.bitfield.cpusse
3003 || i.tm.cpu_flags.bitfield.cpusse2
3004 || i.tm.cpu_flags.bitfield.cpusse3
3005 || i.tm.cpu_flags.bitfield.cpussse3
3006 || i.tm.cpu_flags.bitfield.cpusse4_1
3007 || i.tm.cpu_flags.bitfield.cpusse4_2))
3008 {
3009 (sse_check == sse_check_warning
3010 ? as_warn
3011 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
3012 }
3013
3014 /* Zap movzx and movsx suffix. The suffix has been set from
3015 "word ptr" or "byte ptr" on the source operand in Intel syntax
3016 or extracted from mnemonic in AT&T syntax. But we'll use
3017 the destination register to choose the suffix for encoding. */
3018 if ((i.tm.base_opcode & ~9) == 0x0fb6)
3019 {
3020 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3021 there is no suffix, the default will be byte extension. */
3022 if (i.reg_operands != 2
3023 && !i.suffix
3024 && intel_syntax)
3025 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
3026
3027 i.suffix = 0;
3028 }
3029
3030 if (i.tm.opcode_modifier.fwait)
3031 if (!add_prefix (FWAIT_OPCODE))
3032 return;
3033
3034 /* Check for lock without a lockable instruction. Destination operand
3035 must be memory unless it is xchg (0x86). */
3036 if (i.prefix[LOCK_PREFIX]
3037 && (!i.tm.opcode_modifier.islockable
3038 || i.mem_operands == 0
3039 || (i.tm.base_opcode != 0x86
3040 && !operand_type_check (i.types[i.operands - 1], anymem))))
3041 {
3042 as_bad (_("expecting lockable instruction after `lock'"));
3043 return;
3044 }
3045
3046 /* Check string instruction segment overrides. */
3047 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
3048 {
3049 if (!check_string ())
3050 return;
3051 i.disp_operands = 0;
3052 }
3053
3054 if (!process_suffix ())
3055 return;
3056
3057 /* Update operand types. */
3058 for (j = 0; j < i.operands; j++)
3059 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
3060
3061 /* Make still unresolved immediate matches conform to size of immediate
3062 given in i.suffix. */
3063 if (!finalize_imm ())
3064 return;
3065
3066 if (i.types[0].bitfield.imm1)
3067 i.imm_operands = 0; /* kludge for shift insns. */
3068
3069 /* We only need to check those implicit registers for instructions
3070 with 3 operands or less. */
3071 if (i.operands <= 3)
3072 for (j = 0; j < i.operands; j++)
3073 if (i.types[j].bitfield.inoutportreg
3074 || i.types[j].bitfield.shiftcount
3075 || i.types[j].bitfield.acc
3076 || i.types[j].bitfield.floatacc)
3077 i.reg_operands--;
3078
3079 /* ImmExt should be processed after SSE2AVX. */
3080 if (!i.tm.opcode_modifier.sse2avx
3081 && i.tm.opcode_modifier.immext)
3082 process_immext ();
3083
3084 /* For insns with operands there are more diddles to do to the opcode. */
3085 if (i.operands)
3086 {
3087 if (!process_operands ())
3088 return;
3089 }
3090 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
3091 {
3092 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3093 as_warn (_("translating to `%sp'"), i.tm.name);
3094 }
3095
3096 if (i.tm.opcode_modifier.vex)
3097 build_vex_prefix (t);
3098
3099 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3100 instructions may define INT_OPCODE as well, so avoid this corner
3101 case for those instructions that use MODRM. */
3102 if (i.tm.base_opcode == INT_OPCODE
3103 && !i.tm.opcode_modifier.modrm
3104 && i.op[0].imms->X_add_number == 3)
3105 {
3106 i.tm.base_opcode = INT3_OPCODE;
3107 i.imm_operands = 0;
3108 }
3109
3110 if ((i.tm.opcode_modifier.jump
3111 || i.tm.opcode_modifier.jumpbyte
3112 || i.tm.opcode_modifier.jumpdword)
3113 && i.op[0].disps->X_op == O_constant)
3114 {
3115 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3116 the absolute address given by the constant. Since ix86 jumps and
3117 calls are pc relative, we need to generate a reloc. */
3118 i.op[0].disps->X_add_symbol = &abs_symbol;
3119 i.op[0].disps->X_op = O_symbol;
3120 }
3121
3122 if (i.tm.opcode_modifier.rex64)
3123 i.rex |= REX_W;
3124
3125 /* For 8 bit registers we need an empty rex prefix. Also if the
3126 instruction already has a prefix, we need to convert old
3127 registers to new ones. */
3128
3129 if ((i.types[0].bitfield.reg8
3130 && (i.op[0].regs->reg_flags & RegRex64) != 0)
3131 || (i.types[1].bitfield.reg8
3132 && (i.op[1].regs->reg_flags & RegRex64) != 0)
3133 || ((i.types[0].bitfield.reg8
3134 || i.types[1].bitfield.reg8)
3135 && i.rex != 0))
3136 {
3137 int x;
3138
3139 i.rex |= REX_OPCODE;
3140 for (x = 0; x < 2; x++)
3141 {
3142 /* Look for 8 bit operand that uses old registers. */
3143 if (i.types[x].bitfield.reg8
3144 && (i.op[x].regs->reg_flags & RegRex64) == 0)
3145 {
3146 /* In case it is "hi" register, give up. */
3147 if (i.op[x].regs->reg_num > 3)
3148 as_bad (_("can't encode register '%s%s' in an "
3149 "instruction requiring REX prefix."),
3150 register_prefix, i.op[x].regs->reg_name);
3151
3152 /* Otherwise it is equivalent to the extended register.
3153 Since the encoding doesn't change this is merely
3154 cosmetic cleanup for debug output. */
3155
3156 i.op[x].regs = i.op[x].regs + 8;
3157 }
3158 }
3159 }
3160
3161 if (i.rex != 0)
3162 add_prefix (REX_OPCODE | i.rex);
3163
3164 /* We are ready to output the insn. */
3165 output_insn ();
3166 }
3167
3168 static char *
3169 parse_insn (char *line, char *mnemonic)
3170 {
3171 char *l = line;
3172 char *token_start = l;
3173 char *mnem_p;
3174 int supported;
3175 const insn_template *t;
3176 char *dot_p = NULL;
3177
3178 /* Non-zero if we found a prefix only acceptable with string insns. */
3179 const char *expecting_string_instruction = NULL;
3180
3181 while (1)
3182 {
3183 mnem_p = mnemonic;
3184 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3185 {
3186 if (*mnem_p == '.')
3187 dot_p = mnem_p;
3188 mnem_p++;
3189 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3190 {
3191 as_bad (_("no such instruction: `%s'"), token_start);
3192 return NULL;
3193 }
3194 l++;
3195 }
3196 if (!is_space_char (*l)
3197 && *l != END_OF_INSN
3198 && (intel_syntax
3199 || (*l != PREFIX_SEPARATOR
3200 && *l != ',')))
3201 {
3202 as_bad (_("invalid character %s in mnemonic"),
3203 output_invalid (*l));
3204 return NULL;
3205 }
3206 if (token_start == l)
3207 {
3208 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3209 as_bad (_("expecting prefix; got nothing"));
3210 else
3211 as_bad (_("expecting mnemonic; got nothing"));
3212 return NULL;
3213 }
3214
3215 /* Look up instruction (or prefix) via hash table. */
3216 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3217
3218 if (*l != END_OF_INSN
3219 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3220 && current_templates
3221 && current_templates->start->opcode_modifier.isprefix)
3222 {
3223 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3224 {
3225 as_bad ((flag_code != CODE_64BIT
3226 ? _("`%s' is only supported in 64-bit mode")
3227 : _("`%s' is not supported in 64-bit mode")),
3228 current_templates->start->name);
3229 return NULL;
3230 }
3231 /* If we are in 16-bit mode, do not allow addr16 or data16.
3232 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3233 if ((current_templates->start->opcode_modifier.size16
3234 || current_templates->start->opcode_modifier.size32)
3235 && flag_code != CODE_64BIT
3236 && (current_templates->start->opcode_modifier.size32
3237 ^ (flag_code == CODE_16BIT)))
3238 {
3239 as_bad (_("redundant %s prefix"),
3240 current_templates->start->name);
3241 return NULL;
3242 }
3243 /* Add prefix, checking for repeated prefixes. */
3244 switch (add_prefix (current_templates->start->base_opcode))
3245 {
3246 case PREFIX_EXIST:
3247 return NULL;
3248 case PREFIX_REP:
3249 expecting_string_instruction = current_templates->start->name;
3250 break;
3251 default:
3252 break;
3253 }
3254 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3255 token_start = ++l;
3256 }
3257 else
3258 break;
3259 }
3260
3261 if (!current_templates)
3262 {
3263 /* Check if we should swap operand or force 32bit displacement in
3264 encoding. */
3265 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3266 i.swap_operand = 1;
3267 else if (mnem_p - 4 == dot_p
3268 && dot_p[1] == 'd'
3269 && dot_p[2] == '3'
3270 && dot_p[3] == '2')
3271 i.disp32_encoding = 1;
3272 else
3273 goto check_suffix;
3274 mnem_p = dot_p;
3275 *dot_p = '\0';
3276 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3277 }
3278
3279 if (!current_templates)
3280 {
3281 check_suffix:
3282 /* See if we can get a match by trimming off a suffix. */
3283 switch (mnem_p[-1])
3284 {
3285 case WORD_MNEM_SUFFIX:
3286 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3287 i.suffix = SHORT_MNEM_SUFFIX;
3288 else
3289 case BYTE_MNEM_SUFFIX:
3290 case QWORD_MNEM_SUFFIX:
3291 i.suffix = mnem_p[-1];
3292 mnem_p[-1] = '\0';
3293 current_templates = (const templates *) hash_find (op_hash,
3294 mnemonic);
3295 break;
3296 case SHORT_MNEM_SUFFIX:
3297 case LONG_MNEM_SUFFIX:
3298 if (!intel_syntax)
3299 {
3300 i.suffix = mnem_p[-1];
3301 mnem_p[-1] = '\0';
3302 current_templates = (const templates *) hash_find (op_hash,
3303 mnemonic);
3304 }
3305 break;
3306
3307 /* Intel Syntax. */
3308 case 'd':
3309 if (intel_syntax)
3310 {
3311 if (intel_float_operand (mnemonic) == 1)
3312 i.suffix = SHORT_MNEM_SUFFIX;
3313 else
3314 i.suffix = LONG_MNEM_SUFFIX;
3315 mnem_p[-1] = '\0';
3316 current_templates = (const templates *) hash_find (op_hash,
3317 mnemonic);
3318 }
3319 break;
3320 }
3321 if (!current_templates)
3322 {
3323 as_bad (_("no such instruction: `%s'"), token_start);
3324 return NULL;
3325 }
3326 }
3327
3328 if (current_templates->start->opcode_modifier.jump
3329 || current_templates->start->opcode_modifier.jumpbyte)
3330 {
3331 /* Check for a branch hint. We allow ",pt" and ",pn" for
3332 predict taken and predict not taken respectively.
3333 I'm not sure that branch hints actually do anything on loop
3334 and jcxz insns (JumpByte) for current Pentium4 chips. They
3335 may work in the future and it doesn't hurt to accept them
3336 now. */
3337 if (l[0] == ',' && l[1] == 'p')
3338 {
3339 if (l[2] == 't')
3340 {
3341 if (!add_prefix (DS_PREFIX_OPCODE))
3342 return NULL;
3343 l += 3;
3344 }
3345 else if (l[2] == 'n')
3346 {
3347 if (!add_prefix (CS_PREFIX_OPCODE))
3348 return NULL;
3349 l += 3;
3350 }
3351 }
3352 }
3353 /* Any other comma loses. */
3354 if (*l == ',')
3355 {
3356 as_bad (_("invalid character %s in mnemonic"),
3357 output_invalid (*l));
3358 return NULL;
3359 }
3360
3361 /* Check if instruction is supported on specified architecture. */
3362 supported = 0;
3363 for (t = current_templates->start; t < current_templates->end; ++t)
3364 {
3365 supported |= cpu_flags_match (t);
3366 if (supported == CPU_FLAGS_PERFECT_MATCH)
3367 goto skip;
3368 }
3369
3370 if (!(supported & CPU_FLAGS_64BIT_MATCH))
3371 {
3372 as_bad (flag_code == CODE_64BIT
3373 ? _("`%s' is not supported in 64-bit mode")
3374 : _("`%s' is only supported in 64-bit mode"),
3375 current_templates->start->name);
3376 return NULL;
3377 }
3378 if (supported != CPU_FLAGS_PERFECT_MATCH)
3379 {
3380 as_bad (_("`%s' is not supported on `%s%s'"),
3381 current_templates->start->name,
3382 cpu_arch_name ? cpu_arch_name : default_arch,
3383 cpu_sub_arch_name ? cpu_sub_arch_name : "");
3384 return NULL;
3385 }
3386
3387 skip:
3388 if (!cpu_arch_flags.bitfield.cpui386
3389 && (flag_code != CODE_16BIT))
3390 {
3391 as_warn (_("use .code16 to ensure correct addressing mode"));
3392 }
3393
3394 /* Check for rep/repne without a string instruction. */
3395 if (expecting_string_instruction)
3396 {
3397 static templates override;
3398
3399 for (t = current_templates->start; t < current_templates->end; ++t)
3400 if (t->opcode_modifier.isstring)
3401 break;
3402 if (t >= current_templates->end)
3403 {
3404 as_bad (_("expecting string instruction after `%s'"),
3405 expecting_string_instruction);
3406 return NULL;
3407 }
3408 for (override.start = t; t < current_templates->end; ++t)
3409 if (!t->opcode_modifier.isstring)
3410 break;
3411 override.end = t;
3412 current_templates = &override;
3413 }
3414
3415 return l;
3416 }
3417
3418 static char *
3419 parse_operands (char *l, const char *mnemonic)
3420 {
3421 char *token_start;
3422
3423 /* 1 if operand is pending after ','. */
3424 unsigned int expecting_operand = 0;
3425
3426 /* Non-zero if operand parens not balanced. */
3427 unsigned int paren_not_balanced;
3428
3429 while (*l != END_OF_INSN)
3430 {
3431 /* Skip optional white space before operand. */
3432 if (is_space_char (*l))
3433 ++l;
3434 if (!is_operand_char (*l) && *l != END_OF_INSN)
3435 {
3436 as_bad (_("invalid character %s before operand %d"),
3437 output_invalid (*l),
3438 i.operands + 1);
3439 return NULL;
3440 }
3441 token_start = l; /* after white space */
3442 paren_not_balanced = 0;
3443 while (paren_not_balanced || *l != ',')
3444 {
3445 if (*l == END_OF_INSN)
3446 {
3447 if (paren_not_balanced)
3448 {
3449 if (!intel_syntax)
3450 as_bad (_("unbalanced parenthesis in operand %d."),
3451 i.operands + 1);
3452 else
3453 as_bad (_("unbalanced brackets in operand %d."),
3454 i.operands + 1);
3455 return NULL;
3456 }
3457 else
3458 break; /* we are done */
3459 }
3460 else if (!is_operand_char (*l) && !is_space_char (*l))
3461 {
3462 as_bad (_("invalid character %s in operand %d"),
3463 output_invalid (*l),
3464 i.operands + 1);
3465 return NULL;
3466 }
3467 if (!intel_syntax)
3468 {
3469 if (*l == '(')
3470 ++paren_not_balanced;
3471 if (*l == ')')
3472 --paren_not_balanced;
3473 }
3474 else
3475 {
3476 if (*l == '[')
3477 ++paren_not_balanced;
3478 if (*l == ']')
3479 --paren_not_balanced;
3480 }
3481 l++;
3482 }
3483 if (l != token_start)
3484 { /* Yes, we've read in another operand. */
3485 unsigned int operand_ok;
3486 this_operand = i.operands++;
3487 i.types[this_operand].bitfield.unspecified = 1;
3488 if (i.operands > MAX_OPERANDS)
3489 {
3490 as_bad (_("spurious operands; (%d operands/instruction max)"),
3491 MAX_OPERANDS);
3492 return NULL;
3493 }
3494 /* Now parse operand adding info to 'i' as we go along. */
3495 END_STRING_AND_SAVE (l);
3496
3497 if (intel_syntax)
3498 operand_ok =
3499 i386_intel_operand (token_start,
3500 intel_float_operand (mnemonic));
3501 else
3502 operand_ok = i386_att_operand (token_start);
3503
3504 RESTORE_END_STRING (l);
3505 if (!operand_ok)
3506 return NULL;
3507 }
3508 else
3509 {
3510 if (expecting_operand)
3511 {
3512 expecting_operand_after_comma:
3513 as_bad (_("expecting operand after ','; got nothing"));
3514 return NULL;
3515 }
3516 if (*l == ',')
3517 {
3518 as_bad (_("expecting operand before ','; got nothing"));
3519 return NULL;
3520 }
3521 }
3522
3523 /* Now *l must be either ',' or END_OF_INSN. */
3524 if (*l == ',')
3525 {
3526 if (*++l == END_OF_INSN)
3527 {
3528 /* Just skip it, if it's \n complain. */
3529 goto expecting_operand_after_comma;
3530 }
3531 expecting_operand = 1;
3532 }
3533 }
3534 return l;
3535 }
3536
3537 static void
3538 swap_2_operands (int xchg1, int xchg2)
3539 {
3540 union i386_op temp_op;
3541 i386_operand_type temp_type;
3542 enum bfd_reloc_code_real temp_reloc;
3543
3544 temp_type = i.types[xchg2];
3545 i.types[xchg2] = i.types[xchg1];
3546 i.types[xchg1] = temp_type;
3547 temp_op = i.op[xchg2];
3548 i.op[xchg2] = i.op[xchg1];
3549 i.op[xchg1] = temp_op;
3550 temp_reloc = i.reloc[xchg2];
3551 i.reloc[xchg2] = i.reloc[xchg1];
3552 i.reloc[xchg1] = temp_reloc;
3553 }
3554
3555 static void
3556 swap_operands (void)
3557 {
3558 switch (i.operands)
3559 {
3560 case 5:
3561 case 4:
3562 swap_2_operands (1, i.operands - 2);
3563 case 3:
3564 case 2:
3565 swap_2_operands (0, i.operands - 1);
3566 break;
3567 default:
3568 abort ();
3569 }
3570
3571 if (i.mem_operands == 2)
3572 {
3573 const seg_entry *temp_seg;
3574 temp_seg = i.seg[0];
3575 i.seg[0] = i.seg[1];
3576 i.seg[1] = temp_seg;
3577 }
3578 }
3579
3580 /* Try to ensure constant immediates are represented in the smallest
3581 opcode possible. */
3582 static void
3583 optimize_imm (void)
3584 {
3585 char guess_suffix = 0;
3586 int op;
3587
3588 if (i.suffix)
3589 guess_suffix = i.suffix;
3590 else if (i.reg_operands)
3591 {
3592 /* Figure out a suffix from the last register operand specified.
3593 We can't do this properly yet, ie. excluding InOutPortReg,
3594 but the following works for instructions with immediates.
3595 In any case, we can't set i.suffix yet. */
3596 for (op = i.operands; --op >= 0;)
3597 if (i.types[op].bitfield.reg8)
3598 {
3599 guess_suffix = BYTE_MNEM_SUFFIX;
3600 break;
3601 }
3602 else if (i.types[op].bitfield.reg16)
3603 {
3604 guess_suffix = WORD_MNEM_SUFFIX;
3605 break;
3606 }
3607 else if (i.types[op].bitfield.reg32)
3608 {
3609 guess_suffix = LONG_MNEM_SUFFIX;
3610 break;
3611 }
3612 else if (i.types[op].bitfield.reg64)
3613 {
3614 guess_suffix = QWORD_MNEM_SUFFIX;
3615 break;
3616 }
3617 }
3618 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
3619 guess_suffix = WORD_MNEM_SUFFIX;
3620
3621 for (op = i.operands; --op >= 0;)
3622 if (operand_type_check (i.types[op], imm))
3623 {
3624 switch (i.op[op].imms->X_op)
3625 {
3626 case O_constant:
3627 /* If a suffix is given, this operand may be shortened. */
3628 switch (guess_suffix)
3629 {
3630 case LONG_MNEM_SUFFIX:
3631 i.types[op].bitfield.imm32 = 1;
3632 i.types[op].bitfield.imm64 = 1;
3633 break;
3634 case WORD_MNEM_SUFFIX:
3635 i.types[op].bitfield.imm16 = 1;
3636 i.types[op].bitfield.imm32 = 1;
3637 i.types[op].bitfield.imm32s = 1;
3638 i.types[op].bitfield.imm64 = 1;
3639 break;
3640 case BYTE_MNEM_SUFFIX:
3641 i.types[op].bitfield.imm8 = 1;
3642 i.types[op].bitfield.imm8s = 1;
3643 i.types[op].bitfield.imm16 = 1;
3644 i.types[op].bitfield.imm32 = 1;
3645 i.types[op].bitfield.imm32s = 1;
3646 i.types[op].bitfield.imm64 = 1;
3647 break;
3648 }
3649
3650 /* If this operand is at most 16 bits, convert it
3651 to a signed 16 bit number before trying to see
3652 whether it will fit in an even smaller size.
3653 This allows a 16-bit operand such as $0xffe0 to
3654 be recognised as within Imm8S range. */
3655 if ((i.types[op].bitfield.imm16)
3656 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
3657 {
3658 i.op[op].imms->X_add_number =
3659 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
3660 }
3661 if ((i.types[op].bitfield.imm32)
3662 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
3663 == 0))
3664 {
3665 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
3666 ^ ((offsetT) 1 << 31))
3667 - ((offsetT) 1 << 31));
3668 }
3669 i.types[op]
3670 = operand_type_or (i.types[op],
3671 smallest_imm_type (i.op[op].imms->X_add_number));
3672
3673 /* We must avoid matching of Imm32 templates when 64bit
3674 only immediate is available. */
3675 if (guess_suffix == QWORD_MNEM_SUFFIX)
3676 i.types[op].bitfield.imm32 = 0;
3677 break;
3678
3679 case O_absent:
3680 case O_register:
3681 abort ();
3682
3683 /* Symbols and expressions. */
3684 default:
3685 /* Convert symbolic operand to proper sizes for matching, but don't
3686 prevent matching a set of insns that only supports sizes other
3687 than those matching the insn suffix. */
3688 {
3689 i386_operand_type mask, allowed;
3690 const insn_template *t;
3691
3692 operand_type_set (&mask, 0);
3693 operand_type_set (&allowed, 0);
3694
3695 for (t = current_templates->start;
3696 t < current_templates->end;
3697 ++t)
3698 allowed = operand_type_or (allowed,
3699 t->operand_types[op]);
3700 switch (guess_suffix)
3701 {
3702 case QWORD_MNEM_SUFFIX:
3703 mask.bitfield.imm64 = 1;
3704 mask.bitfield.imm32s = 1;
3705 break;
3706 case LONG_MNEM_SUFFIX:
3707 mask.bitfield.imm32 = 1;
3708 break;
3709 case WORD_MNEM_SUFFIX:
3710 mask.bitfield.imm16 = 1;
3711 break;
3712 case BYTE_MNEM_SUFFIX:
3713 mask.bitfield.imm8 = 1;
3714 break;
3715 default:
3716 break;
3717 }
3718 allowed = operand_type_and (mask, allowed);
3719 if (!operand_type_all_zero (&allowed))
3720 i.types[op] = operand_type_and (i.types[op], mask);
3721 }
3722 break;
3723 }
3724 }
3725 }
3726
3727 /* Try to use the smallest displacement type too. */
3728 static void
3729 optimize_disp (void)
3730 {
3731 int op;
3732
3733 for (op = i.operands; --op >= 0;)
3734 if (operand_type_check (i.types[op], disp))
3735 {
3736 if (i.op[op].disps->X_op == O_constant)
3737 {
3738 offsetT op_disp = i.op[op].disps->X_add_number;
3739
3740 if (i.types[op].bitfield.disp16
3741 && (op_disp & ~(offsetT) 0xffff) == 0)
3742 {
3743 /* If this operand is at most 16 bits, convert
3744 to a signed 16 bit number and don't use 64bit
3745 displacement. */
3746 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
3747 i.types[op].bitfield.disp64 = 0;
3748 }
3749 if (i.types[op].bitfield.disp32
3750 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
3751 {
3752 /* If this operand is at most 32 bits, convert
3753 to a signed 32 bit number and don't use 64bit
3754 displacement. */
3755 op_disp &= (((offsetT) 2 << 31) - 1);
3756 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
3757 i.types[op].bitfield.disp64 = 0;
3758 }
3759 if (!op_disp && i.types[op].bitfield.baseindex)
3760 {
3761 i.types[op].bitfield.disp8 = 0;
3762 i.types[op].bitfield.disp16 = 0;
3763 i.types[op].bitfield.disp32 = 0;
3764 i.types[op].bitfield.disp32s = 0;
3765 i.types[op].bitfield.disp64 = 0;
3766 i.op[op].disps = 0;
3767 i.disp_operands--;
3768 }
3769 else if (flag_code == CODE_64BIT)
3770 {
3771 if (fits_in_signed_long (op_disp))
3772 {
3773 i.types[op].bitfield.disp64 = 0;
3774 i.types[op].bitfield.disp32s = 1;
3775 }
3776 if (i.prefix[ADDR_PREFIX]
3777 && fits_in_unsigned_long (op_disp))
3778 i.types[op].bitfield.disp32 = 1;
3779 }
3780 if ((i.types[op].bitfield.disp32
3781 || i.types[op].bitfield.disp32s
3782 || i.types[op].bitfield.disp16)
3783 && fits_in_signed_byte (op_disp))
3784 i.types[op].bitfield.disp8 = 1;
3785 }
3786 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
3787 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
3788 {
3789 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
3790 i.op[op].disps, 0, i.reloc[op]);
3791 i.types[op].bitfield.disp8 = 0;
3792 i.types[op].bitfield.disp16 = 0;
3793 i.types[op].bitfield.disp32 = 0;
3794 i.types[op].bitfield.disp32s = 0;
3795 i.types[op].bitfield.disp64 = 0;
3796 }
3797 else
3798 /* We only support 64bit displacement on constants. */
3799 i.types[op].bitfield.disp64 = 0;
3800 }
3801 }
3802
3803 /* Check if operands are valid for the instruction. Update VEX
3804 operand types. */
3805
3806 static int
3807 VEX_check_operands (const insn_template *t)
3808 {
3809 if (!t->opcode_modifier.vex)
3810 return 0;
3811
3812 /* Only check VEX_Imm4, which must be the first operand. */
3813 if (t->operand_types[0].bitfield.vec_imm4)
3814 {
3815 if (i.op[0].imms->X_op != O_constant
3816 || !fits_in_imm4 (i.op[0].imms->X_add_number))
3817 {
3818 i.error = bad_imm4;
3819 return 1;
3820 }
3821
3822 /* Turn off Imm8 so that update_imm won't complain. */
3823 i.types[0] = vec_imm4;
3824 }
3825
3826 return 0;
3827 }
3828
3829 static const insn_template *
3830 match_template (void)
3831 {
3832 /* Points to template once we've found it. */
3833 const insn_template *t;
3834 i386_operand_type overlap0, overlap1, overlap2, overlap3;
3835 i386_operand_type overlap4;
3836 unsigned int found_reverse_match;
3837 i386_opcode_modifier suffix_check;
3838 i386_operand_type operand_types [MAX_OPERANDS];
3839 int addr_prefix_disp;
3840 unsigned int j;
3841 unsigned int found_cpu_match;
3842 unsigned int check_register;
3843
3844 #if MAX_OPERANDS != 5
3845 # error "MAX_OPERANDS must be 5."
3846 #endif
3847
3848 found_reverse_match = 0;
3849 addr_prefix_disp = -1;
3850
3851 memset (&suffix_check, 0, sizeof (suffix_check));
3852 if (i.suffix == BYTE_MNEM_SUFFIX)
3853 suffix_check.no_bsuf = 1;
3854 else if (i.suffix == WORD_MNEM_SUFFIX)
3855 suffix_check.no_wsuf = 1;
3856 else if (i.suffix == SHORT_MNEM_SUFFIX)
3857 suffix_check.no_ssuf = 1;
3858 else if (i.suffix == LONG_MNEM_SUFFIX)
3859 suffix_check.no_lsuf = 1;
3860 else if (i.suffix == QWORD_MNEM_SUFFIX)
3861 suffix_check.no_qsuf = 1;
3862 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
3863 suffix_check.no_ldsuf = 1;
3864
3865 /* Must have right number of operands. */
3866 i.error = number_of_operands_mismatch;
3867
3868 for (t = current_templates->start; t < current_templates->end; t++)
3869 {
3870 addr_prefix_disp = -1;
3871
3872 if (i.operands != t->operands)
3873 continue;
3874
3875 /* Check processor support. */
3876 i.error = unsupported;
3877 found_cpu_match = (cpu_flags_match (t)
3878 == CPU_FLAGS_PERFECT_MATCH);
3879 if (!found_cpu_match)
3880 continue;
3881
3882 /* Check old gcc support. */
3883 i.error = old_gcc_only;
3884 if (!old_gcc && t->opcode_modifier.oldgcc)
3885 continue;
3886
3887 /* Check AT&T mnemonic. */
3888 i.error = unsupported_with_intel_mnemonic;
3889 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
3890 continue;
3891
3892 /* Check AT&T/Intel syntax. */
3893 i.error = unsupported_syntax;
3894 if ((intel_syntax && t->opcode_modifier.attsyntax)
3895 || (!intel_syntax && t->opcode_modifier.intelsyntax))
3896 continue;
3897
3898 /* Check the suffix, except for some instructions in intel mode. */
3899 i.error = invalid_instruction_suffix;
3900 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
3901 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
3902 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
3903 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
3904 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
3905 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
3906 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
3907 continue;
3908
3909 if (!operand_size_match (t))
3910 continue;
3911
3912 for (j = 0; j < MAX_OPERANDS; j++)
3913 operand_types[j] = t->operand_types[j];
3914
3915 /* In general, don't allow 64-bit operands in 32-bit mode. */
3916 if (i.suffix == QWORD_MNEM_SUFFIX
3917 && flag_code != CODE_64BIT
3918 && (intel_syntax
3919 ? (!t->opcode_modifier.ignoresize
3920 && !intel_float_operand (t->name))
3921 : intel_float_operand (t->name) != 2)
3922 && ((!operand_types[0].bitfield.regmmx
3923 && !operand_types[0].bitfield.regxmm
3924 && !operand_types[0].bitfield.regymm)
3925 || (!operand_types[t->operands > 1].bitfield.regmmx
3926 && !!operand_types[t->operands > 1].bitfield.regxmm
3927 && !!operand_types[t->operands > 1].bitfield.regymm))
3928 && (t->base_opcode != 0x0fc7
3929 || t->extension_opcode != 1 /* cmpxchg8b */))
3930 continue;
3931
3932 /* In general, don't allow 32-bit operands on pre-386. */
3933 else if (i.suffix == LONG_MNEM_SUFFIX
3934 && !cpu_arch_flags.bitfield.cpui386
3935 && (intel_syntax
3936 ? (!t->opcode_modifier.ignoresize
3937 && !intel_float_operand (t->name))
3938 : intel_float_operand (t->name) != 2)
3939 && ((!operand_types[0].bitfield.regmmx
3940 && !operand_types[0].bitfield.regxmm)
3941 || (!operand_types[t->operands > 1].bitfield.regmmx
3942 && !!operand_types[t->operands > 1].bitfield.regxmm)))
3943 continue;
3944
3945 /* Do not verify operands when there are none. */
3946 else
3947 {
3948 if (!t->operands)
3949 /* We've found a match; break out of loop. */
3950 break;
3951 }
3952
3953 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
3954 into Disp32/Disp16/Disp32 operand. */
3955 if (i.prefix[ADDR_PREFIX] != 0)
3956 {
3957 /* There should be only one Disp operand. */
3958 switch (flag_code)
3959 {
3960 case CODE_16BIT:
3961 for (j = 0; j < MAX_OPERANDS; j++)
3962 {
3963 if (operand_types[j].bitfield.disp16)
3964 {
3965 addr_prefix_disp = j;
3966 operand_types[j].bitfield.disp32 = 1;
3967 operand_types[j].bitfield.disp16 = 0;
3968 break;
3969 }
3970 }
3971 break;
3972 case CODE_32BIT:
3973 for (j = 0; j < MAX_OPERANDS; j++)
3974 {
3975 if (operand_types[j].bitfield.disp32)
3976 {
3977 addr_prefix_disp = j;
3978 operand_types[j].bitfield.disp32 = 0;
3979 operand_types[j].bitfield.disp16 = 1;
3980 break;
3981 }
3982 }
3983 break;
3984 case CODE_64BIT:
3985 for (j = 0; j < MAX_OPERANDS; j++)
3986 {
3987 if (operand_types[j].bitfield.disp64)
3988 {
3989 addr_prefix_disp = j;
3990 operand_types[j].bitfield.disp64 = 0;
3991 operand_types[j].bitfield.disp32 = 1;
3992 break;
3993 }
3994 }
3995 break;
3996 }
3997 }
3998
3999 /* We check register size only if size of operands can be
4000 encoded the canonical way. */
4001 check_register = t->opcode_modifier.w;
4002 overlap0 = operand_type_and (i.types[0], operand_types[0]);
4003 switch (t->operands)
4004 {
4005 case 1:
4006 if (!operand_type_match (overlap0, i.types[0]))
4007 continue;
4008 break;
4009 case 2:
4010 /* xchg %eax, %eax is a special case. It is an aliase for nop
4011 only in 32bit mode and we can use opcode 0x90. In 64bit
4012 mode, we can't use 0x90 for xchg %eax, %eax since it should
4013 zero-extend %eax to %rax. */
4014 if (flag_code == CODE_64BIT
4015 && t->base_opcode == 0x90
4016 && operand_type_equal (&i.types [0], &acc32)
4017 && operand_type_equal (&i.types [1], &acc32))
4018 continue;
4019 if (i.swap_operand)
4020 {
4021 /* If we swap operand in encoding, we either match
4022 the next one or reverse direction of operands. */
4023 if (t->opcode_modifier.s)
4024 continue;
4025 else if (t->opcode_modifier.d)
4026 goto check_reverse;
4027 }
4028
4029 case 3:
4030 /* If we swap operand in encoding, we match the next one. */
4031 if (i.swap_operand && t->opcode_modifier.s)
4032 continue;
4033 case 4:
4034 case 5:
4035 overlap1 = operand_type_and (i.types[1], operand_types[1]);
4036 if (!operand_type_match (overlap0, i.types[0])
4037 || !operand_type_match (overlap1, i.types[1])
4038 || (check_register
4039 && !operand_type_register_match (overlap0, i.types[0],
4040 operand_types[0],
4041 overlap1, i.types[1],
4042 operand_types[1])))
4043 {
4044 /* Check if other direction is valid ... */
4045 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
4046 continue;
4047
4048 check_reverse:
4049 /* Try reversing direction of operands. */
4050 overlap0 = operand_type_and (i.types[0], operand_types[1]);
4051 overlap1 = operand_type_and (i.types[1], operand_types[0]);
4052 if (!operand_type_match (overlap0, i.types[0])
4053 || !operand_type_match (overlap1, i.types[1])
4054 || (check_register
4055 && !operand_type_register_match (overlap0,
4056 i.types[0],
4057 operand_types[1],
4058 overlap1,
4059 i.types[1],
4060 operand_types[0])))
4061 {
4062 /* Does not match either direction. */
4063 continue;
4064 }
4065 /* found_reverse_match holds which of D or FloatDR
4066 we've found. */
4067 if (t->opcode_modifier.d)
4068 found_reverse_match = Opcode_D;
4069 else if (t->opcode_modifier.floatd)
4070 found_reverse_match = Opcode_FloatD;
4071 else
4072 found_reverse_match = 0;
4073 if (t->opcode_modifier.floatr)
4074 found_reverse_match |= Opcode_FloatR;
4075 }
4076 else
4077 {
4078 /* Found a forward 2 operand match here. */
4079 switch (t->operands)
4080 {
4081 case 5:
4082 overlap4 = operand_type_and (i.types[4],
4083 operand_types[4]);
4084 case 4:
4085 overlap3 = operand_type_and (i.types[3],
4086 operand_types[3]);
4087 case 3:
4088 overlap2 = operand_type_and (i.types[2],
4089 operand_types[2]);
4090 break;
4091 }
4092
4093 switch (t->operands)
4094 {
4095 case 5:
4096 if (!operand_type_match (overlap4, i.types[4])
4097 || !operand_type_register_match (overlap3,
4098 i.types[3],
4099 operand_types[3],
4100 overlap4,
4101 i.types[4],
4102 operand_types[4]))
4103 continue;
4104 case 4:
4105 if (!operand_type_match (overlap3, i.types[3])
4106 || (check_register
4107 && !operand_type_register_match (overlap2,
4108 i.types[2],
4109 operand_types[2],
4110 overlap3,
4111 i.types[3],
4112 operand_types[3])))
4113 continue;
4114 case 3:
4115 /* Here we make use of the fact that there are no
4116 reverse match 3 operand instructions, and all 3
4117 operand instructions only need to be checked for
4118 register consistency between operands 2 and 3. */
4119 if (!operand_type_match (overlap2, i.types[2])
4120 || (check_register
4121 && !operand_type_register_match (overlap1,
4122 i.types[1],
4123 operand_types[1],
4124 overlap2,
4125 i.types[2],
4126 operand_types[2])))
4127 continue;
4128 break;
4129 }
4130 }
4131 /* Found either forward/reverse 2, 3 or 4 operand match here:
4132 slip through to break. */
4133 }
4134 if (!found_cpu_match)
4135 {
4136 found_reverse_match = 0;
4137 continue;
4138 }
4139
4140 /* Check if VEX operands are valid. */
4141 if (VEX_check_operands (t))
4142 continue;
4143
4144 /* We've found a match; break out of loop. */
4145 break;
4146 }
4147
4148 if (t == current_templates->end)
4149 {
4150 /* We found no match. */
4151 const char *err_msg;
4152 switch (i.error)
4153 {
4154 default:
4155 abort ();
4156 case operand_size_mismatch:
4157 err_msg = _("operand size mismatch");
4158 break;
4159 case operand_type_mismatch:
4160 err_msg = _("operand type mismatch");
4161 break;
4162 case register_type_mismatch:
4163 err_msg = _("register type mismatch");
4164 break;
4165 case number_of_operands_mismatch:
4166 err_msg = _("number of operands mismatch");
4167 break;
4168 case invalid_instruction_suffix:
4169 err_msg = _("invalid instruction suffix");
4170 break;
4171 case bad_imm4:
4172 err_msg = _("Imm4 isn't the first operand");
4173 break;
4174 case old_gcc_only:
4175 err_msg = _("only supported with old gcc");
4176 break;
4177 case unsupported_with_intel_mnemonic:
4178 err_msg = _("unsupported with Intel mnemonic");
4179 break;
4180 case unsupported_syntax:
4181 err_msg = _("unsupported syntax");
4182 break;
4183 case unsupported:
4184 err_msg = _("unsupported");
4185 break;
4186 }
4187 as_bad (_("%s for `%s'"), err_msg,
4188 current_templates->start->name);
4189 return NULL;
4190 }
4191
4192 if (!quiet_warnings)
4193 {
4194 if (!intel_syntax
4195 && (i.types[0].bitfield.jumpabsolute
4196 != operand_types[0].bitfield.jumpabsolute))
4197 {
4198 as_warn (_("indirect %s without `*'"), t->name);
4199 }
4200
4201 if (t->opcode_modifier.isprefix
4202 && t->opcode_modifier.ignoresize)
4203 {
4204 /* Warn them that a data or address size prefix doesn't
4205 affect assembly of the next line of code. */
4206 as_warn (_("stand-alone `%s' prefix"), t->name);
4207 }
4208 }
4209
4210 /* Copy the template we found. */
4211 i.tm = *t;
4212
4213 if (addr_prefix_disp != -1)
4214 i.tm.operand_types[addr_prefix_disp]
4215 = operand_types[addr_prefix_disp];
4216
4217 if (found_reverse_match)
4218 {
4219 /* If we found a reverse match we must alter the opcode
4220 direction bit. found_reverse_match holds bits to change
4221 (different for int & float insns). */
4222
4223 i.tm.base_opcode ^= found_reverse_match;
4224
4225 i.tm.operand_types[0] = operand_types[1];
4226 i.tm.operand_types[1] = operand_types[0];
4227 }
4228
4229 return t;
4230 }
4231
4232 static int
4233 check_string (void)
4234 {
4235 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
4236 if (i.tm.operand_types[mem_op].bitfield.esseg)
4237 {
4238 if (i.seg[0] != NULL && i.seg[0] != &es)
4239 {
4240 as_bad (_("`%s' operand %d must use `%ses' segment"),
4241 i.tm.name,
4242 mem_op + 1,
4243 register_prefix);
4244 return 0;
4245 }
4246 /* There's only ever one segment override allowed per instruction.
4247 This instruction possibly has a legal segment override on the
4248 second operand, so copy the segment to where non-string
4249 instructions store it, allowing common code. */
4250 i.seg[0] = i.seg[1];
4251 }
4252 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
4253 {
4254 if (i.seg[1] != NULL && i.seg[1] != &es)
4255 {
4256 as_bad (_("`%s' operand %d must use `%ses' segment"),
4257 i.tm.name,
4258 mem_op + 2,
4259 register_prefix);
4260 return 0;
4261 }
4262 }
4263 return 1;
4264 }
4265
4266 static int
4267 process_suffix (void)
4268 {
4269 /* If matched instruction specifies an explicit instruction mnemonic
4270 suffix, use it. */
4271 if (i.tm.opcode_modifier.size16)
4272 i.suffix = WORD_MNEM_SUFFIX;
4273 else if (i.tm.opcode_modifier.size32)
4274 i.suffix = LONG_MNEM_SUFFIX;
4275 else if (i.tm.opcode_modifier.size64)
4276 i.suffix = QWORD_MNEM_SUFFIX;
4277 else if (i.reg_operands)
4278 {
4279 /* If there's no instruction mnemonic suffix we try to invent one
4280 based on register operands. */
4281 if (!i.suffix)
4282 {
4283 /* We take i.suffix from the last register operand specified,
4284 Destination register type is more significant than source
4285 register type. crc32 in SSE4.2 prefers source register
4286 type. */
4287 if (i.tm.base_opcode == 0xf20f38f1)
4288 {
4289 if (i.types[0].bitfield.reg16)
4290 i.suffix = WORD_MNEM_SUFFIX;
4291 else if (i.types[0].bitfield.reg32)
4292 i.suffix = LONG_MNEM_SUFFIX;
4293 else if (i.types[0].bitfield.reg64)
4294 i.suffix = QWORD_MNEM_SUFFIX;
4295 }
4296 else if (i.tm.base_opcode == 0xf20f38f0)
4297 {
4298 if (i.types[0].bitfield.reg8)
4299 i.suffix = BYTE_MNEM_SUFFIX;
4300 }
4301
4302 if (!i.suffix)
4303 {
4304 int op;
4305
4306 if (i.tm.base_opcode == 0xf20f38f1
4307 || i.tm.base_opcode == 0xf20f38f0)
4308 {
4309 /* We have to know the operand size for crc32. */
4310 as_bad (_("ambiguous memory operand size for `%s`"),
4311 i.tm.name);
4312 return 0;
4313 }
4314
4315 for (op = i.operands; --op >= 0;)
4316 if (!i.tm.operand_types[op].bitfield.inoutportreg)
4317 {
4318 if (i.types[op].bitfield.reg8)
4319 {
4320 i.suffix = BYTE_MNEM_SUFFIX;
4321 break;
4322 }
4323 else if (i.types[op].bitfield.reg16)
4324 {
4325 i.suffix = WORD_MNEM_SUFFIX;
4326 break;
4327 }
4328 else if (i.types[op].bitfield.reg32)
4329 {
4330 i.suffix = LONG_MNEM_SUFFIX;
4331 break;
4332 }
4333 else if (i.types[op].bitfield.reg64)
4334 {
4335 i.suffix = QWORD_MNEM_SUFFIX;
4336 break;
4337 }
4338 }
4339 }
4340 }
4341 else if (i.suffix == BYTE_MNEM_SUFFIX)
4342 {
4343 if (intel_syntax
4344 && i.tm.opcode_modifier.ignoresize
4345 && i.tm.opcode_modifier.no_bsuf)
4346 i.suffix = 0;
4347 else if (!check_byte_reg ())
4348 return 0;
4349 }
4350 else if (i.suffix == LONG_MNEM_SUFFIX)
4351 {
4352 if (intel_syntax
4353 && i.tm.opcode_modifier.ignoresize
4354 && i.tm.opcode_modifier.no_lsuf)
4355 i.suffix = 0;
4356 else if (!check_long_reg ())
4357 return 0;
4358 }
4359 else if (i.suffix == QWORD_MNEM_SUFFIX)
4360 {
4361 if (intel_syntax
4362 && i.tm.opcode_modifier.ignoresize
4363 && i.tm.opcode_modifier.no_qsuf)
4364 i.suffix = 0;
4365 else if (!check_qword_reg ())
4366 return 0;
4367 }
4368 else if (i.suffix == WORD_MNEM_SUFFIX)
4369 {
4370 if (intel_syntax
4371 && i.tm.opcode_modifier.ignoresize
4372 && i.tm.opcode_modifier.no_wsuf)
4373 i.suffix = 0;
4374 else if (!check_word_reg ())
4375 return 0;
4376 }
4377 else if (i.suffix == XMMWORD_MNEM_SUFFIX
4378 || i.suffix == YMMWORD_MNEM_SUFFIX)
4379 {
4380 /* Skip if the instruction has x/y suffix. match_template
4381 should check if it is a valid suffix. */
4382 }
4383 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
4384 /* Do nothing if the instruction is going to ignore the prefix. */
4385 ;
4386 else
4387 abort ();
4388 }
4389 else if (i.tm.opcode_modifier.defaultsize
4390 && !i.suffix
4391 /* exclude fldenv/frstor/fsave/fstenv */
4392 && i.tm.opcode_modifier.no_ssuf)
4393 {
4394 i.suffix = stackop_size;
4395 }
4396 else if (intel_syntax
4397 && !i.suffix
4398 && (i.tm.operand_types[0].bitfield.jumpabsolute
4399 || i.tm.opcode_modifier.jumpbyte
4400 || i.tm.opcode_modifier.jumpintersegment
4401 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
4402 && i.tm.extension_opcode <= 3)))
4403 {
4404 switch (flag_code)
4405 {
4406 case CODE_64BIT:
4407 if (!i.tm.opcode_modifier.no_qsuf)
4408 {
4409 i.suffix = QWORD_MNEM_SUFFIX;
4410 break;
4411 }
4412 case CODE_32BIT:
4413 if (!i.tm.opcode_modifier.no_lsuf)
4414 i.suffix = LONG_MNEM_SUFFIX;
4415 break;
4416 case CODE_16BIT:
4417 if (!i.tm.opcode_modifier.no_wsuf)
4418 i.suffix = WORD_MNEM_SUFFIX;
4419 break;
4420 }
4421 }
4422
4423 if (!i.suffix)
4424 {
4425 if (!intel_syntax)
4426 {
4427 if (i.tm.opcode_modifier.w)
4428 {
4429 as_bad (_("no instruction mnemonic suffix given and "
4430 "no register operands; can't size instruction"));
4431 return 0;
4432 }
4433 }
4434 else
4435 {
4436 unsigned int suffixes;
4437
4438 suffixes = !i.tm.opcode_modifier.no_bsuf;
4439 if (!i.tm.opcode_modifier.no_wsuf)
4440 suffixes |= 1 << 1;
4441 if (!i.tm.opcode_modifier.no_lsuf)
4442 suffixes |= 1 << 2;
4443 if (!i.tm.opcode_modifier.no_ldsuf)
4444 suffixes |= 1 << 3;
4445 if (!i.tm.opcode_modifier.no_ssuf)
4446 suffixes |= 1 << 4;
4447 if (!i.tm.opcode_modifier.no_qsuf)
4448 suffixes |= 1 << 5;
4449
4450 /* There are more than suffix matches. */
4451 if (i.tm.opcode_modifier.w
4452 || ((suffixes & (suffixes - 1))
4453 && !i.tm.opcode_modifier.defaultsize
4454 && !i.tm.opcode_modifier.ignoresize))
4455 {
4456 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
4457 return 0;
4458 }
4459 }
4460 }
4461
4462 /* Change the opcode based on the operand size given by i.suffix;
4463 We don't need to change things for byte insns. */
4464
4465 if (i.suffix
4466 && i.suffix != BYTE_MNEM_SUFFIX
4467 && i.suffix != XMMWORD_MNEM_SUFFIX
4468 && i.suffix != YMMWORD_MNEM_SUFFIX)
4469 {
4470 /* It's not a byte, select word/dword operation. */
4471 if (i.tm.opcode_modifier.w)
4472 {
4473 if (i.tm.opcode_modifier.shortform)
4474 i.tm.base_opcode |= 8;
4475 else
4476 i.tm.base_opcode |= 1;
4477 }
4478
4479 /* Now select between word & dword operations via the operand
4480 size prefix, except for instructions that will ignore this
4481 prefix anyway. */
4482 if (i.tm.opcode_modifier.addrprefixop0)
4483 {
4484 /* The address size override prefix changes the size of the
4485 first operand. */
4486 if ((flag_code == CODE_32BIT
4487 && i.op->regs[0].reg_type.bitfield.reg16)
4488 || (flag_code != CODE_32BIT
4489 && i.op->regs[0].reg_type.bitfield.reg32))
4490 if (!add_prefix (ADDR_PREFIX_OPCODE))
4491 return 0;
4492 }
4493 else if (i.suffix != QWORD_MNEM_SUFFIX
4494 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
4495 && !i.tm.opcode_modifier.ignoresize
4496 && !i.tm.opcode_modifier.floatmf
4497 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
4498 || (flag_code == CODE_64BIT
4499 && i.tm.opcode_modifier.jumpbyte)))
4500 {
4501 unsigned int prefix = DATA_PREFIX_OPCODE;
4502
4503 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
4504 prefix = ADDR_PREFIX_OPCODE;
4505
4506 if (!add_prefix (prefix))
4507 return 0;
4508 }
4509
4510 /* Set mode64 for an operand. */
4511 if (i.suffix == QWORD_MNEM_SUFFIX
4512 && flag_code == CODE_64BIT
4513 && !i.tm.opcode_modifier.norex64)
4514 {
4515 /* Special case for xchg %rax,%rax. It is NOP and doesn't
4516 need rex64. cmpxchg8b is also a special case. */
4517 if (! (i.operands == 2
4518 && i.tm.base_opcode == 0x90
4519 && i.tm.extension_opcode == None
4520 && operand_type_equal (&i.types [0], &acc64)
4521 && operand_type_equal (&i.types [1], &acc64))
4522 && ! (i.operands == 1
4523 && i.tm.base_opcode == 0xfc7
4524 && i.tm.extension_opcode == 1
4525 && !operand_type_check (i.types [0], reg)
4526 && operand_type_check (i.types [0], anymem)))
4527 i.rex |= REX_W;
4528 }
4529
4530 /* Size floating point instruction. */
4531 if (i.suffix == LONG_MNEM_SUFFIX)
4532 if (i.tm.opcode_modifier.floatmf)
4533 i.tm.base_opcode ^= 4;
4534 }
4535
4536 return 1;
4537 }
4538
4539 static int
4540 check_byte_reg (void)
4541 {
4542 int op;
4543
4544 for (op = i.operands; --op >= 0;)
4545 {
4546 /* If this is an eight bit register, it's OK. If it's the 16 or
4547 32 bit version of an eight bit register, we will just use the
4548 low portion, and that's OK too. */
4549 if (i.types[op].bitfield.reg8)
4550 continue;
4551
4552 /* crc32 doesn't generate this warning. */
4553 if (i.tm.base_opcode == 0xf20f38f0)
4554 continue;
4555
4556 if ((i.types[op].bitfield.reg16
4557 || i.types[op].bitfield.reg32
4558 || i.types[op].bitfield.reg64)
4559 && i.op[op].regs->reg_num < 4)
4560 {
4561 /* Prohibit these changes in the 64bit mode, since the
4562 lowering is more complicated. */
4563 if (flag_code == CODE_64BIT
4564 && !i.tm.operand_types[op].bitfield.inoutportreg)
4565 {
4566 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4567 register_prefix, i.op[op].regs->reg_name,
4568 i.suffix);
4569 return 0;
4570 }
4571 #if REGISTER_WARNINGS
4572 if (!quiet_warnings
4573 && !i.tm.operand_types[op].bitfield.inoutportreg)
4574 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4575 register_prefix,
4576 (i.op[op].regs + (i.types[op].bitfield.reg16
4577 ? REGNAM_AL - REGNAM_AX
4578 : REGNAM_AL - REGNAM_EAX))->reg_name,
4579 register_prefix,
4580 i.op[op].regs->reg_name,
4581 i.suffix);
4582 #endif
4583 continue;
4584 }
4585 /* Any other register is bad. */
4586 if (i.types[op].bitfield.reg16
4587 || i.types[op].bitfield.reg32
4588 || i.types[op].bitfield.reg64
4589 || i.types[op].bitfield.regmmx
4590 || i.types[op].bitfield.regxmm
4591 || i.types[op].bitfield.regymm
4592 || i.types[op].bitfield.sreg2
4593 || i.types[op].bitfield.sreg3
4594 || i.types[op].bitfield.control
4595 || i.types[op].bitfield.debug
4596 || i.types[op].bitfield.test
4597 || i.types[op].bitfield.floatreg
4598 || i.types[op].bitfield.floatacc)
4599 {
4600 as_bad (_("`%s%s' not allowed with `%s%c'"),
4601 register_prefix,
4602 i.op[op].regs->reg_name,
4603 i.tm.name,
4604 i.suffix);
4605 return 0;
4606 }
4607 }
4608 return 1;
4609 }
4610
4611 static int
4612 check_long_reg (void)
4613 {
4614 int op;
4615
4616 for (op = i.operands; --op >= 0;)
4617 /* Reject eight bit registers, except where the template requires
4618 them. (eg. movzb) */
4619 if (i.types[op].bitfield.reg8
4620 && (i.tm.operand_types[op].bitfield.reg16
4621 || i.tm.operand_types[op].bitfield.reg32
4622 || i.tm.operand_types[op].bitfield.acc))
4623 {
4624 as_bad (_("`%s%s' not allowed with `%s%c'"),
4625 register_prefix,
4626 i.op[op].regs->reg_name,
4627 i.tm.name,
4628 i.suffix);
4629 return 0;
4630 }
4631 /* Warn if the e prefix on a general reg is missing. */
4632 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4633 && i.types[op].bitfield.reg16
4634 && (i.tm.operand_types[op].bitfield.reg32
4635 || i.tm.operand_types[op].bitfield.acc))
4636 {
4637 /* Prohibit these changes in the 64bit mode, since the
4638 lowering is more complicated. */
4639 if (flag_code == CODE_64BIT)
4640 {
4641 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4642 register_prefix, i.op[op].regs->reg_name,
4643 i.suffix);
4644 return 0;
4645 }
4646 #if REGISTER_WARNINGS
4647 else
4648 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4649 register_prefix,
4650 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
4651 register_prefix,
4652 i.op[op].regs->reg_name,
4653 i.suffix);
4654 #endif
4655 }
4656 /* Warn if the r prefix on a general reg is missing. */
4657 else if (i.types[op].bitfield.reg64
4658 && (i.tm.operand_types[op].bitfield.reg32
4659 || i.tm.operand_types[op].bitfield.acc))
4660 {
4661 if (intel_syntax
4662 && i.tm.opcode_modifier.toqword
4663 && !i.types[0].bitfield.regxmm)
4664 {
4665 /* Convert to QWORD. We want REX byte. */
4666 i.suffix = QWORD_MNEM_SUFFIX;
4667 }
4668 else
4669 {
4670 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4671 register_prefix, i.op[op].regs->reg_name,
4672 i.suffix);
4673 return 0;
4674 }
4675 }
4676 return 1;
4677 }
4678
4679 static int
4680 check_qword_reg (void)
4681 {
4682 int op;
4683
4684 for (op = i.operands; --op >= 0; )
4685 /* Reject eight bit registers, except where the template requires
4686 them. (eg. movzb) */
4687 if (i.types[op].bitfield.reg8
4688 && (i.tm.operand_types[op].bitfield.reg16
4689 || i.tm.operand_types[op].bitfield.reg32
4690 || i.tm.operand_types[op].bitfield.acc))
4691 {
4692 as_bad (_("`%s%s' not allowed with `%s%c'"),
4693 register_prefix,
4694 i.op[op].regs->reg_name,
4695 i.tm.name,
4696 i.suffix);
4697 return 0;
4698 }
4699 /* Warn if the e prefix on a general reg is missing. */
4700 else if ((i.types[op].bitfield.reg16
4701 || i.types[op].bitfield.reg32)
4702 && (i.tm.operand_types[op].bitfield.reg32
4703 || i.tm.operand_types[op].bitfield.acc))
4704 {
4705 /* Prohibit these changes in the 64bit mode, since the
4706 lowering is more complicated. */
4707 if (intel_syntax
4708 && i.tm.opcode_modifier.todword
4709 && !i.types[0].bitfield.regxmm)
4710 {
4711 /* Convert to DWORD. We don't want REX byte. */
4712 i.suffix = LONG_MNEM_SUFFIX;
4713 }
4714 else
4715 {
4716 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4717 register_prefix, i.op[op].regs->reg_name,
4718 i.suffix);
4719 return 0;
4720 }
4721 }
4722 return 1;
4723 }
4724
4725 static int
4726 check_word_reg (void)
4727 {
4728 int op;
4729 for (op = i.operands; --op >= 0;)
4730 /* Reject eight bit registers, except where the template requires
4731 them. (eg. movzb) */
4732 if (i.types[op].bitfield.reg8
4733 && (i.tm.operand_types[op].bitfield.reg16
4734 || i.tm.operand_types[op].bitfield.reg32
4735 || i.tm.operand_types[op].bitfield.acc))
4736 {
4737 as_bad (_("`%s%s' not allowed with `%s%c'"),
4738 register_prefix,
4739 i.op[op].regs->reg_name,
4740 i.tm.name,
4741 i.suffix);
4742 return 0;
4743 }
4744 /* Warn if the e prefix on a general reg is present. */
4745 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4746 && i.types[op].bitfield.reg32
4747 && (i.tm.operand_types[op].bitfield.reg16
4748 || i.tm.operand_types[op].bitfield.acc))
4749 {
4750 /* Prohibit these changes in the 64bit mode, since the
4751 lowering is more complicated. */
4752 if (flag_code == CODE_64BIT)
4753 {
4754 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4755 register_prefix, i.op[op].regs->reg_name,
4756 i.suffix);
4757 return 0;
4758 }
4759 else
4760 #if REGISTER_WARNINGS
4761 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4762 register_prefix,
4763 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
4764 register_prefix,
4765 i.op[op].regs->reg_name,
4766 i.suffix);
4767 #endif
4768 }
4769 return 1;
4770 }
4771
4772 static int
4773 update_imm (unsigned int j)
4774 {
4775 i386_operand_type overlap = i.types[j];
4776 if ((overlap.bitfield.imm8
4777 || overlap.bitfield.imm8s
4778 || overlap.bitfield.imm16
4779 || overlap.bitfield.imm32
4780 || overlap.bitfield.imm32s
4781 || overlap.bitfield.imm64)
4782 && !operand_type_equal (&overlap, &imm8)
4783 && !operand_type_equal (&overlap, &imm8s)
4784 && !operand_type_equal (&overlap, &imm16)
4785 && !operand_type_equal (&overlap, &imm32)
4786 && !operand_type_equal (&overlap, &imm32s)
4787 && !operand_type_equal (&overlap, &imm64))
4788 {
4789 if (i.suffix)
4790 {
4791 i386_operand_type temp;
4792
4793 operand_type_set (&temp, 0);
4794 if (i.suffix == BYTE_MNEM_SUFFIX)
4795 {
4796 temp.bitfield.imm8 = overlap.bitfield.imm8;
4797 temp.bitfield.imm8s = overlap.bitfield.imm8s;
4798 }
4799 else if (i.suffix == WORD_MNEM_SUFFIX)
4800 temp.bitfield.imm16 = overlap.bitfield.imm16;
4801 else if (i.suffix == QWORD_MNEM_SUFFIX)
4802 {
4803 temp.bitfield.imm64 = overlap.bitfield.imm64;
4804 temp.bitfield.imm32s = overlap.bitfield.imm32s;
4805 }
4806 else
4807 temp.bitfield.imm32 = overlap.bitfield.imm32;
4808 overlap = temp;
4809 }
4810 else if (operand_type_equal (&overlap, &imm16_32_32s)
4811 || operand_type_equal (&overlap, &imm16_32)
4812 || operand_type_equal (&overlap, &imm16_32s))
4813 {
4814 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
4815 overlap = imm16;
4816 else
4817 overlap = imm32s;
4818 }
4819 if (!operand_type_equal (&overlap, &imm8)
4820 && !operand_type_equal (&overlap, &imm8s)
4821 && !operand_type_equal (&overlap, &imm16)
4822 && !operand_type_equal (&overlap, &imm32)
4823 && !operand_type_equal (&overlap, &imm32s)
4824 && !operand_type_equal (&overlap, &imm64))
4825 {
4826 as_bad (_("no instruction mnemonic suffix given; "
4827 "can't determine immediate size"));
4828 return 0;
4829 }
4830 }
4831 i.types[j] = overlap;
4832
4833 return 1;
4834 }
4835
4836 static int
4837 finalize_imm (void)
4838 {
4839 unsigned int j, n;
4840
4841 /* Update the first 2 immediate operands. */
4842 n = i.operands > 2 ? 2 : i.operands;
4843 if (n)
4844 {
4845 for (j = 0; j < n; j++)
4846 if (update_imm (j) == 0)
4847 return 0;
4848
4849 /* The 3rd operand can't be immediate operand. */
4850 gas_assert (operand_type_check (i.types[2], imm) == 0);
4851 }
4852
4853 return 1;
4854 }
4855
4856 static int
4857 bad_implicit_operand (int xmm)
4858 {
4859 const char *ireg = xmm ? "xmm0" : "ymm0";
4860
4861 if (intel_syntax)
4862 as_bad (_("the last operand of `%s' must be `%s%s'"),
4863 i.tm.name, register_prefix, ireg);
4864 else
4865 as_bad (_("the first operand of `%s' must be `%s%s'"),
4866 i.tm.name, register_prefix, ireg);
4867 return 0;
4868 }
4869
4870 static int
4871 process_operands (void)
4872 {
4873 /* Default segment register this instruction will use for memory
4874 accesses. 0 means unknown. This is only for optimizing out
4875 unnecessary segment overrides. */
4876 const seg_entry *default_seg = 0;
4877
4878 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
4879 {
4880 unsigned int dupl = i.operands;
4881 unsigned int dest = dupl - 1;
4882 unsigned int j;
4883
4884 /* The destination must be an xmm register. */
4885 gas_assert (i.reg_operands
4886 && MAX_OPERANDS > dupl
4887 && operand_type_equal (&i.types[dest], &regxmm));
4888
4889 if (i.tm.opcode_modifier.firstxmm0)
4890 {
4891 /* The first operand is implicit and must be xmm0. */
4892 gas_assert (operand_type_equal (&i.types[0], &regxmm));
4893 if (i.op[0].regs->reg_num != 0)
4894 return bad_implicit_operand (1);
4895
4896 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
4897 {
4898 /* Keep xmm0 for instructions with VEX prefix and 3
4899 sources. */
4900 goto duplicate;
4901 }
4902 else
4903 {
4904 /* We remove the first xmm0 and keep the number of
4905 operands unchanged, which in fact duplicates the
4906 destination. */
4907 for (j = 1; j < i.operands; j++)
4908 {
4909 i.op[j - 1] = i.op[j];
4910 i.types[j - 1] = i.types[j];
4911 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
4912 }
4913 }
4914 }
4915 else if (i.tm.opcode_modifier.implicit1stxmm0)
4916 {
4917 gas_assert ((MAX_OPERANDS - 1) > dupl
4918 && (i.tm.opcode_modifier.vexsources
4919 == VEX3SOURCES));
4920
4921 /* Add the implicit xmm0 for instructions with VEX prefix
4922 and 3 sources. */
4923 for (j = i.operands; j > 0; j--)
4924 {
4925 i.op[j] = i.op[j - 1];
4926 i.types[j] = i.types[j - 1];
4927 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
4928 }
4929 i.op[0].regs
4930 = (const reg_entry *) hash_find (reg_hash, "xmm0");
4931 i.types[0] = regxmm;
4932 i.tm.operand_types[0] = regxmm;
4933
4934 i.operands += 2;
4935 i.reg_operands += 2;
4936 i.tm.operands += 2;
4937
4938 dupl++;
4939 dest++;
4940 i.op[dupl] = i.op[dest];
4941 i.types[dupl] = i.types[dest];
4942 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
4943 }
4944 else
4945 {
4946 duplicate:
4947 i.operands++;
4948 i.reg_operands++;
4949 i.tm.operands++;
4950
4951 i.op[dupl] = i.op[dest];
4952 i.types[dupl] = i.types[dest];
4953 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
4954 }
4955
4956 if (i.tm.opcode_modifier.immext)
4957 process_immext ();
4958 }
4959 else if (i.tm.opcode_modifier.firstxmm0)
4960 {
4961 unsigned int j;
4962
4963 /* The first operand is implicit and must be xmm0/ymm0. */
4964 gas_assert (i.reg_operands
4965 && (operand_type_equal (&i.types[0], &regxmm)
4966 || operand_type_equal (&i.types[0], &regymm)));
4967 if (i.op[0].regs->reg_num != 0)
4968 return bad_implicit_operand (i.types[0].bitfield.regxmm);
4969
4970 for (j = 1; j < i.operands; j++)
4971 {
4972 i.op[j - 1] = i.op[j];
4973 i.types[j - 1] = i.types[j];
4974
4975 /* We need to adjust fields in i.tm since they are used by
4976 build_modrm_byte. */
4977 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
4978 }
4979
4980 i.operands--;
4981 i.reg_operands--;
4982 i.tm.operands--;
4983 }
4984 else if (i.tm.opcode_modifier.regkludge)
4985 {
4986 /* The imul $imm, %reg instruction is converted into
4987 imul $imm, %reg, %reg, and the clr %reg instruction
4988 is converted into xor %reg, %reg. */
4989
4990 unsigned int first_reg_op;
4991
4992 if (operand_type_check (i.types[0], reg))
4993 first_reg_op = 0;
4994 else
4995 first_reg_op = 1;
4996 /* Pretend we saw the extra register operand. */
4997 gas_assert (i.reg_operands == 1
4998 && i.op[first_reg_op + 1].regs == 0);
4999 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
5000 i.types[first_reg_op + 1] = i.types[first_reg_op];
5001 i.operands++;
5002 i.reg_operands++;
5003 }
5004
5005 if (i.tm.opcode_modifier.shortform)
5006 {
5007 if (i.types[0].bitfield.sreg2
5008 || i.types[0].bitfield.sreg3)
5009 {
5010 if (i.tm.base_opcode == POP_SEG_SHORT
5011 && i.op[0].regs->reg_num == 1)
5012 {
5013 as_bad (_("you can't `pop %scs'"), register_prefix);
5014 return 0;
5015 }
5016 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
5017 if ((i.op[0].regs->reg_flags & RegRex) != 0)
5018 i.rex |= REX_B;
5019 }
5020 else
5021 {
5022 /* The register or float register operand is in operand
5023 0 or 1. */
5024 unsigned int op;
5025
5026 if (i.types[0].bitfield.floatreg
5027 || operand_type_check (i.types[0], reg))
5028 op = 0;
5029 else
5030 op = 1;
5031 /* Register goes in low 3 bits of opcode. */
5032 i.tm.base_opcode |= i.op[op].regs->reg_num;
5033 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5034 i.rex |= REX_B;
5035 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
5036 {
5037 /* Warn about some common errors, but press on regardless.
5038 The first case can be generated by gcc (<= 2.8.1). */
5039 if (i.operands == 2)
5040 {
5041 /* Reversed arguments on faddp, fsubp, etc. */
5042 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
5043 register_prefix, i.op[!intel_syntax].regs->reg_name,
5044 register_prefix, i.op[intel_syntax].regs->reg_name);
5045 }
5046 else
5047 {
5048 /* Extraneous `l' suffix on fp insn. */
5049 as_warn (_("translating to `%s %s%s'"), i.tm.name,
5050 register_prefix, i.op[0].regs->reg_name);
5051 }
5052 }
5053 }
5054 }
5055 else if (i.tm.opcode_modifier.modrm)
5056 {
5057 /* The opcode is completed (modulo i.tm.extension_opcode which
5058 must be put into the modrm byte). Now, we make the modrm and
5059 index base bytes based on all the info we've collected. */
5060
5061 default_seg = build_modrm_byte ();
5062 }
5063 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
5064 {
5065 default_seg = &ds;
5066 }
5067 else if (i.tm.opcode_modifier.isstring)
5068 {
5069 /* For the string instructions that allow a segment override
5070 on one of their operands, the default segment is ds. */
5071 default_seg = &ds;
5072 }
5073
5074 if (i.tm.base_opcode == 0x8d /* lea */
5075 && i.seg[0]
5076 && !quiet_warnings)
5077 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
5078
5079 /* If a segment was explicitly specified, and the specified segment
5080 is not the default, use an opcode prefix to select it. If we
5081 never figured out what the default segment is, then default_seg
5082 will be zero at this point, and the specified segment prefix will
5083 always be used. */
5084 if ((i.seg[0]) && (i.seg[0] != default_seg))
5085 {
5086 if (!add_prefix (i.seg[0]->seg_prefix))
5087 return 0;
5088 }
5089 return 1;
5090 }
5091
5092 static const seg_entry *
5093 build_modrm_byte (void)
5094 {
5095 const seg_entry *default_seg = 0;
5096 unsigned int source, dest;
5097 int vex_3_sources;
5098
5099 /* The first operand of instructions with VEX prefix and 3 sources
5100 must be VEX_Imm4. */
5101 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
5102 if (vex_3_sources)
5103 {
5104 unsigned int nds, reg_slot;
5105 expressionS *exp;
5106
5107 if (i.tm.opcode_modifier.veximmext
5108 && i.tm.opcode_modifier.immext)
5109 {
5110 dest = i.operands - 2;
5111 gas_assert (dest == 3);
5112 }
5113 else
5114 dest = i.operands - 1;
5115 nds = dest - 1;
5116
5117 /* There are 2 kinds of instructions:
5118 1. 5 operands: 4 register operands or 3 register operands
5119 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
5120 VexW0 or VexW1. The destination must be either XMM or YMM
5121 register.
5122 2. 4 operands: 4 register operands or 3 register operands
5123 plus 1 memory operand, VexXDS, and VexImmExt */
5124 gas_assert ((i.reg_operands == 4
5125 || (i.reg_operands == 3 && i.mem_operands == 1))
5126 && i.tm.opcode_modifier.vexvvvv == VEXXDS
5127 && (i.tm.opcode_modifier.veximmext
5128 || (i.imm_operands == 1
5129 && i.types[0].bitfield.vec_imm4
5130 && (i.tm.opcode_modifier.vexw == VEXW0
5131 || i.tm.opcode_modifier.vexw == VEXW1)
5132 && (operand_type_equal (&i.tm.operand_types[dest], &regxmm)
5133 || operand_type_equal (&i.tm.operand_types[dest], &regymm)))));
5134
5135 if (i.imm_operands == 0)
5136 {
5137 /* When there is no immediate operand, generate an 8bit
5138 immediate operand to encode the first operand. */
5139 exp = &im_expressions[i.imm_operands++];
5140 i.op[i.operands].imms = exp;
5141 i.types[i.operands] = imm8;
5142 i.operands++;
5143 /* If VexW1 is set, the first operand is the source and
5144 the second operand is encoded in the immediate operand. */
5145 if (i.tm.opcode_modifier.vexw == VEXW1)
5146 {
5147 source = 0;
5148 reg_slot = 1;
5149 }
5150 else
5151 {
5152 source = 1;
5153 reg_slot = 0;
5154 }
5155
5156 /* FMA swaps REG and NDS. */
5157 if (i.tm.cpu_flags.bitfield.cpufma)
5158 {
5159 unsigned int tmp;
5160 tmp = reg_slot;
5161 reg_slot = nds;
5162 nds = tmp;
5163 }
5164
5165 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5166 &regxmm)
5167 || operand_type_equal (&i.tm.operand_types[reg_slot],
5168 &regymm));
5169 exp->X_op = O_constant;
5170 exp->X_add_number
5171 = ((i.op[reg_slot].regs->reg_num
5172 + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
5173 << 4);
5174 }
5175 else
5176 {
5177 unsigned int imm_slot;
5178
5179 if (i.tm.opcode_modifier.vexw == VEXW0)
5180 {
5181 /* If VexW0 is set, the third operand is the source and
5182 the second operand is encoded in the immediate
5183 operand. */
5184 source = 2;
5185 reg_slot = 1;
5186 }
5187 else
5188 {
5189 /* VexW1 is set, the second operand is the source and
5190 the third operand is encoded in the immediate
5191 operand. */
5192 source = 1;
5193 reg_slot = 2;
5194 }
5195
5196 if (i.tm.opcode_modifier.immext)
5197 {
5198 /* When ImmExt is set, the immdiate byte is the last
5199 operand. */
5200 imm_slot = i.operands - 1;
5201 source--;
5202 reg_slot--;
5203 }
5204 else
5205 {
5206 imm_slot = 0;
5207
5208 /* Turn on Imm8 so that output_imm will generate it. */
5209 i.types[imm_slot].bitfield.imm8 = 1;
5210 }
5211
5212 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5213 &regxmm)
5214 || operand_type_equal (&i.tm.operand_types[reg_slot],
5215 &regymm));
5216 i.op[imm_slot].imms->X_add_number
5217 |= ((i.op[reg_slot].regs->reg_num
5218 + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
5219 << 4);
5220 }
5221
5222 gas_assert (operand_type_equal (&i.tm.operand_types[nds], &regxmm)
5223 || operand_type_equal (&i.tm.operand_types[nds],
5224 &regymm));
5225 i.vex.register_specifier = i.op[nds].regs;
5226 }
5227 else
5228 source = dest = 0;
5229
5230 /* i.reg_operands MUST be the number of real register operands;
5231 implicit registers do not count. If there are 3 register
5232 operands, it must be a instruction with VexNDS. For a
5233 instruction with VexNDD, the destination register is encoded
5234 in VEX prefix. If there are 4 register operands, it must be
5235 a instruction with VEX prefix and 3 sources. */
5236 if (i.mem_operands == 0
5237 && ((i.reg_operands == 2
5238 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
5239 || (i.reg_operands == 3
5240 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
5241 || (i.reg_operands == 4 && vex_3_sources)))
5242 {
5243 switch (i.operands)
5244 {
5245 case 2:
5246 source = 0;
5247 break;
5248 case 3:
5249 /* When there are 3 operands, one of them may be immediate,
5250 which may be the first or the last operand. Otherwise,
5251 the first operand must be shift count register (cl) or it
5252 is an instruction with VexNDS. */
5253 gas_assert (i.imm_operands == 1
5254 || (i.imm_operands == 0
5255 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
5256 || i.types[0].bitfield.shiftcount)));
5257 if (operand_type_check (i.types[0], imm)
5258 || i.types[0].bitfield.shiftcount)
5259 source = 1;
5260 else
5261 source = 0;
5262 break;
5263 case 4:
5264 /* When there are 4 operands, the first two must be 8bit
5265 immediate operands. The source operand will be the 3rd
5266 one.
5267
5268 For instructions with VexNDS, if the first operand
5269 an imm8, the source operand is the 2nd one. If the last
5270 operand is imm8, the source operand is the first one. */
5271 gas_assert ((i.imm_operands == 2
5272 && i.types[0].bitfield.imm8
5273 && i.types[1].bitfield.imm8)
5274 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
5275 && i.imm_operands == 1
5276 && (i.types[0].bitfield.imm8
5277 || i.types[i.operands - 1].bitfield.imm8)));
5278 if (i.imm_operands == 2)
5279 source = 2;
5280 else
5281 {
5282 if (i.types[0].bitfield.imm8)
5283 source = 1;
5284 else
5285 source = 0;
5286 }
5287 break;
5288 case 5:
5289 break;
5290 default:
5291 abort ();
5292 }
5293
5294 if (!vex_3_sources)
5295 {
5296 dest = source + 1;
5297
5298 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5299 {
5300 /* For instructions with VexNDS, the register-only
5301 source operand must be XMM or YMM register. It is
5302 encoded in VEX prefix. We need to clear RegMem bit
5303 before calling operand_type_equal. */
5304 i386_operand_type op = i.tm.operand_types[dest];
5305 op.bitfield.regmem = 0;
5306 if ((dest + 1) >= i.operands
5307 || (!operand_type_equal (&op, &regxmm)
5308 && !operand_type_equal (&op, &regymm)))
5309 abort ();
5310 i.vex.register_specifier = i.op[dest].regs;
5311 dest++;
5312 }
5313 }
5314
5315 i.rm.mode = 3;
5316 /* One of the register operands will be encoded in the i.tm.reg
5317 field, the other in the combined i.tm.mode and i.tm.regmem
5318 fields. If no form of this instruction supports a memory
5319 destination operand, then we assume the source operand may
5320 sometimes be a memory operand and so we need to store the
5321 destination in the i.rm.reg field. */
5322 if (!i.tm.operand_types[dest].bitfield.regmem
5323 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
5324 {
5325 i.rm.reg = i.op[dest].regs->reg_num;
5326 i.rm.regmem = i.op[source].regs->reg_num;
5327 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5328 i.rex |= REX_R;
5329 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5330 i.rex |= REX_B;
5331 }
5332 else
5333 {
5334 i.rm.reg = i.op[source].regs->reg_num;
5335 i.rm.regmem = i.op[dest].regs->reg_num;
5336 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5337 i.rex |= REX_B;
5338 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5339 i.rex |= REX_R;
5340 }
5341 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
5342 {
5343 if (!i.types[0].bitfield.control
5344 && !i.types[1].bitfield.control)
5345 abort ();
5346 i.rex &= ~(REX_R | REX_B);
5347 add_prefix (LOCK_PREFIX_OPCODE);
5348 }
5349 }
5350 else
5351 { /* If it's not 2 reg operands... */
5352 unsigned int mem;
5353
5354 if (i.mem_operands)
5355 {
5356 unsigned int fake_zero_displacement = 0;
5357 unsigned int op;
5358
5359 for (op = 0; op < i.operands; op++)
5360 if (operand_type_check (i.types[op], anymem))
5361 break;
5362 gas_assert (op < i.operands);
5363
5364 default_seg = &ds;
5365
5366 if (i.base_reg == 0)
5367 {
5368 i.rm.mode = 0;
5369 if (!i.disp_operands)
5370 fake_zero_displacement = 1;
5371 if (i.index_reg == 0)
5372 {
5373 /* Operand is just <disp> */
5374 if (flag_code == CODE_64BIT)
5375 {
5376 /* 64bit mode overwrites the 32bit absolute
5377 addressing by RIP relative addressing and
5378 absolute addressing is encoded by one of the
5379 redundant SIB forms. */
5380 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5381 i.sib.base = NO_BASE_REGISTER;
5382 i.sib.index = NO_INDEX_REGISTER;
5383 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
5384 ? disp32s : disp32);
5385 }
5386 else if ((flag_code == CODE_16BIT)
5387 ^ (i.prefix[ADDR_PREFIX] != 0))
5388 {
5389 i.rm.regmem = NO_BASE_REGISTER_16;
5390 i.types[op] = disp16;
5391 }
5392 else
5393 {
5394 i.rm.regmem = NO_BASE_REGISTER;
5395 i.types[op] = disp32;
5396 }
5397 }
5398 else /* !i.base_reg && i.index_reg */
5399 {
5400 if (i.index_reg->reg_num == RegEiz
5401 || i.index_reg->reg_num == RegRiz)
5402 i.sib.index = NO_INDEX_REGISTER;
5403 else
5404 i.sib.index = i.index_reg->reg_num;
5405 i.sib.base = NO_BASE_REGISTER;
5406 i.sib.scale = i.log2_scale_factor;
5407 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5408 i.types[op].bitfield.disp8 = 0;
5409 i.types[op].bitfield.disp16 = 0;
5410 i.types[op].bitfield.disp64 = 0;
5411 if (flag_code != CODE_64BIT)
5412 {
5413 /* Must be 32 bit */
5414 i.types[op].bitfield.disp32 = 1;
5415 i.types[op].bitfield.disp32s = 0;
5416 }
5417 else
5418 {
5419 i.types[op].bitfield.disp32 = 0;
5420 i.types[op].bitfield.disp32s = 1;
5421 }
5422 if ((i.index_reg->reg_flags & RegRex) != 0)
5423 i.rex |= REX_X;
5424 }
5425 }
5426 /* RIP addressing for 64bit mode. */
5427 else if (i.base_reg->reg_num == RegRip ||
5428 i.base_reg->reg_num == RegEip)
5429 {
5430 i.rm.regmem = NO_BASE_REGISTER;
5431 i.types[op].bitfield.disp8 = 0;
5432 i.types[op].bitfield.disp16 = 0;
5433 i.types[op].bitfield.disp32 = 0;
5434 i.types[op].bitfield.disp32s = 1;
5435 i.types[op].bitfield.disp64 = 0;
5436 i.flags[op] |= Operand_PCrel;
5437 if (! i.disp_operands)
5438 fake_zero_displacement = 1;
5439 }
5440 else if (i.base_reg->reg_type.bitfield.reg16)
5441 {
5442 switch (i.base_reg->reg_num)
5443 {
5444 case 3: /* (%bx) */
5445 if (i.index_reg == 0)
5446 i.rm.regmem = 7;
5447 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
5448 i.rm.regmem = i.index_reg->reg_num - 6;
5449 break;
5450 case 5: /* (%bp) */
5451 default_seg = &ss;
5452 if (i.index_reg == 0)
5453 {
5454 i.rm.regmem = 6;
5455 if (operand_type_check (i.types[op], disp) == 0)
5456 {
5457 /* fake (%bp) into 0(%bp) */
5458 i.types[op].bitfield.disp8 = 1;
5459 fake_zero_displacement = 1;
5460 }
5461 }
5462 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
5463 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
5464 break;
5465 default: /* (%si) -> 4 or (%di) -> 5 */
5466 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
5467 }
5468 i.rm.mode = mode_from_disp_size (i.types[op]);
5469 }
5470 else /* i.base_reg and 32/64 bit mode */
5471 {
5472 if (flag_code == CODE_64BIT
5473 && operand_type_check (i.types[op], disp))
5474 {
5475 i386_operand_type temp;
5476 operand_type_set (&temp, 0);
5477 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
5478 i.types[op] = temp;
5479 if (i.prefix[ADDR_PREFIX] == 0)
5480 i.types[op].bitfield.disp32s = 1;
5481 else
5482 i.types[op].bitfield.disp32 = 1;
5483 }
5484
5485 i.rm.regmem = i.base_reg->reg_num;
5486 if ((i.base_reg->reg_flags & RegRex) != 0)
5487 i.rex |= REX_B;
5488 i.sib.base = i.base_reg->reg_num;
5489 /* x86-64 ignores REX prefix bit here to avoid decoder
5490 complications. */
5491 if ((i.base_reg->reg_num & 7) == EBP_REG_NUM)
5492 {
5493 default_seg = &ss;
5494 if (i.disp_operands == 0)
5495 {
5496 fake_zero_displacement = 1;
5497 i.types[op].bitfield.disp8 = 1;
5498 }
5499 }
5500 else if (i.base_reg->reg_num == ESP_REG_NUM)
5501 {
5502 default_seg = &ss;
5503 }
5504 i.sib.scale = i.log2_scale_factor;
5505 if (i.index_reg == 0)
5506 {
5507 /* <disp>(%esp) becomes two byte modrm with no index
5508 register. We've already stored the code for esp
5509 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
5510 Any base register besides %esp will not use the
5511 extra modrm byte. */
5512 i.sib.index = NO_INDEX_REGISTER;
5513 }
5514 else
5515 {
5516 if (i.index_reg->reg_num == RegEiz
5517 || i.index_reg->reg_num == RegRiz)
5518 i.sib.index = NO_INDEX_REGISTER;
5519 else
5520 i.sib.index = i.index_reg->reg_num;
5521 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5522 if ((i.index_reg->reg_flags & RegRex) != 0)
5523 i.rex |= REX_X;
5524 }
5525
5526 if (i.disp_operands
5527 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
5528 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
5529 i.rm.mode = 0;
5530 else
5531 i.rm.mode = mode_from_disp_size (i.types[op]);
5532 }
5533
5534 if (fake_zero_displacement)
5535 {
5536 /* Fakes a zero displacement assuming that i.types[op]
5537 holds the correct displacement size. */
5538 expressionS *exp;
5539
5540 gas_assert (i.op[op].disps == 0);
5541 exp = &disp_expressions[i.disp_operands++];
5542 i.op[op].disps = exp;
5543 exp->X_op = O_constant;
5544 exp->X_add_number = 0;
5545 exp->X_add_symbol = (symbolS *) 0;
5546 exp->X_op_symbol = (symbolS *) 0;
5547 }
5548
5549 mem = op;
5550 }
5551 else
5552 mem = ~0;
5553
5554 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
5555 {
5556 if (operand_type_check (i.types[0], imm))
5557 i.vex.register_specifier = NULL;
5558 else
5559 {
5560 /* VEX.vvvv encodes one of the sources when the first
5561 operand is not an immediate. */
5562 if (i.tm.opcode_modifier.vexw == VEXW0)
5563 i.vex.register_specifier = i.op[0].regs;
5564 else
5565 i.vex.register_specifier = i.op[1].regs;
5566 }
5567
5568 /* Destination is a XMM register encoded in the ModRM.reg
5569 and VEX.R bit. */
5570 i.rm.reg = i.op[2].regs->reg_num;
5571 if ((i.op[2].regs->reg_flags & RegRex) != 0)
5572 i.rex |= REX_R;
5573
5574 /* ModRM.rm and VEX.B encodes the other source. */
5575 if (!i.mem_operands)
5576 {
5577 i.rm.mode = 3;
5578
5579 if (i.tm.opcode_modifier.vexw == VEXW0)
5580 i.rm.regmem = i.op[1].regs->reg_num;
5581 else
5582 i.rm.regmem = i.op[0].regs->reg_num;
5583
5584 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5585 i.rex |= REX_B;
5586 }
5587 }
5588 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
5589 {
5590 i.vex.register_specifier = i.op[2].regs;
5591 if (!i.mem_operands)
5592 {
5593 i.rm.mode = 3;
5594 i.rm.regmem = i.op[1].regs->reg_num;
5595 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5596 i.rex |= REX_B;
5597 }
5598 }
5599 /* Fill in i.rm.reg or i.rm.regmem field with register operand
5600 (if any) based on i.tm.extension_opcode. Again, we must be
5601 careful to make sure that segment/control/debug/test/MMX
5602 registers are coded into the i.rm.reg field. */
5603 else if (i.reg_operands)
5604 {
5605 unsigned int op;
5606 unsigned int vex_reg = ~0;
5607
5608 for (op = 0; op < i.operands; op++)
5609 if (i.types[op].bitfield.reg8
5610 || i.types[op].bitfield.reg16
5611 || i.types[op].bitfield.reg32
5612 || i.types[op].bitfield.reg64
5613 || i.types[op].bitfield.regmmx
5614 || i.types[op].bitfield.regxmm
5615 || i.types[op].bitfield.regymm
5616 || i.types[op].bitfield.sreg2
5617 || i.types[op].bitfield.sreg3
5618 || i.types[op].bitfield.control
5619 || i.types[op].bitfield.debug
5620 || i.types[op].bitfield.test)
5621 break;
5622
5623 if (vex_3_sources)
5624 op = dest;
5625 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5626 {
5627 /* For instructions with VexNDS, the register-only
5628 source operand is encoded in VEX prefix. */
5629 gas_assert (mem != (unsigned int) ~0);
5630
5631 if (op > mem)
5632 {
5633 vex_reg = op++;
5634 gas_assert (op < i.operands);
5635 }
5636 else
5637 {
5638 vex_reg = op + 1;
5639 gas_assert (vex_reg < i.operands);
5640 }
5641 }
5642 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
5643 {
5644 /* For instructions with VexNDD, there should be
5645 no memory operand and the register destination
5646 is encoded in VEX prefix. */
5647 gas_assert (i.mem_operands == 0
5648 && (op + 2) == i.operands);
5649 vex_reg = op + 1;
5650 }
5651 else
5652 gas_assert (op < i.operands);
5653
5654 if (vex_reg != (unsigned int) ~0)
5655 {
5656 gas_assert (i.reg_operands == 2);
5657
5658 if (!operand_type_equal (&i.tm.operand_types[vex_reg],
5659 &regxmm)
5660 && !operand_type_equal (&i.tm.operand_types[vex_reg],
5661 &regymm))
5662 abort ();
5663
5664 i.vex.register_specifier = i.op[vex_reg].regs;
5665 }
5666
5667 /* Don't set OP operand twice. */
5668 if (vex_reg != op)
5669 {
5670 /* If there is an extension opcode to put here, the
5671 register number must be put into the regmem field. */
5672 if (i.tm.extension_opcode != None)
5673 {
5674 i.rm.regmem = i.op[op].regs->reg_num;
5675 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5676 i.rex |= REX_B;
5677 }
5678 else
5679 {
5680 i.rm.reg = i.op[op].regs->reg_num;
5681 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5682 i.rex |= REX_R;
5683 }
5684 }
5685
5686 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
5687 must set it to 3 to indicate this is a register operand
5688 in the regmem field. */
5689 if (!i.mem_operands)
5690 i.rm.mode = 3;
5691 }
5692
5693 /* Fill in i.rm.reg field with extension opcode (if any). */
5694 if (i.tm.extension_opcode != None)
5695 i.rm.reg = i.tm.extension_opcode;
5696 }
5697 return default_seg;
5698 }
5699
5700 static void
5701 output_branch (void)
5702 {
5703 char *p;
5704 int size;
5705 int code16;
5706 int prefix;
5707 relax_substateT subtype;
5708 symbolS *sym;
5709 offsetT off;
5710
5711 code16 = flag_code == CODE_16BIT ? CODE16 : 0;
5712 size = i.disp32_encoding ? BIG : SMALL;
5713
5714 prefix = 0;
5715 if (i.prefix[DATA_PREFIX] != 0)
5716 {
5717 prefix = 1;
5718 i.prefixes -= 1;
5719 code16 ^= CODE16;
5720 }
5721 /* Pentium4 branch hints. */
5722 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
5723 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
5724 {
5725 prefix++;
5726 i.prefixes--;
5727 }
5728 if (i.prefix[REX_PREFIX] != 0)
5729 {
5730 prefix++;
5731 i.prefixes--;
5732 }
5733
5734 if (i.prefixes != 0 && !intel_syntax)
5735 as_warn (_("skipping prefixes on this instruction"));
5736
5737 /* It's always a symbol; End frag & setup for relax.
5738 Make sure there is enough room in this frag for the largest
5739 instruction we may generate in md_convert_frag. This is 2
5740 bytes for the opcode and room for the prefix and largest
5741 displacement. */
5742 frag_grow (prefix + 2 + 4);
5743 /* Prefix and 1 opcode byte go in fr_fix. */
5744 p = frag_more (prefix + 1);
5745 if (i.prefix[DATA_PREFIX] != 0)
5746 *p++ = DATA_PREFIX_OPCODE;
5747 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
5748 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
5749 *p++ = i.prefix[SEG_PREFIX];
5750 if (i.prefix[REX_PREFIX] != 0)
5751 *p++ = i.prefix[REX_PREFIX];
5752 *p = i.tm.base_opcode;
5753
5754 if ((unsigned char) *p == JUMP_PC_RELATIVE)
5755 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
5756 else if (cpu_arch_flags.bitfield.cpui386)
5757 subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
5758 else
5759 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
5760 subtype |= code16;
5761
5762 sym = i.op[0].disps->X_add_symbol;
5763 off = i.op[0].disps->X_add_number;
5764
5765 if (i.op[0].disps->X_op != O_constant
5766 && i.op[0].disps->X_op != O_symbol)
5767 {
5768 /* Handle complex expressions. */
5769 sym = make_expr_symbol (i.op[0].disps);
5770 off = 0;
5771 }
5772
5773 /* 1 possible extra opcode + 4 byte displacement go in var part.
5774 Pass reloc in fr_var. */
5775 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
5776 }
5777
5778 static void
5779 output_jump (void)
5780 {
5781 char *p;
5782 int size;
5783 fixS *fixP;
5784
5785 if (i.tm.opcode_modifier.jumpbyte)
5786 {
5787 /* This is a loop or jecxz type instruction. */
5788 size = 1;
5789 if (i.prefix[ADDR_PREFIX] != 0)
5790 {
5791 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
5792 i.prefixes -= 1;
5793 }
5794 /* Pentium4 branch hints. */
5795 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
5796 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
5797 {
5798 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
5799 i.prefixes--;
5800 }
5801 }
5802 else
5803 {
5804 int code16;
5805
5806 code16 = 0;
5807 if (flag_code == CODE_16BIT)
5808 code16 = CODE16;
5809
5810 if (i.prefix[DATA_PREFIX] != 0)
5811 {
5812 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
5813 i.prefixes -= 1;
5814 code16 ^= CODE16;
5815 }
5816
5817 size = 4;
5818 if (code16)
5819 size = 2;
5820 }
5821
5822 if (i.prefix[REX_PREFIX] != 0)
5823 {
5824 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
5825 i.prefixes -= 1;
5826 }
5827
5828 if (i.prefixes != 0 && !intel_syntax)
5829 as_warn (_("skipping prefixes on this instruction"));
5830
5831 p = frag_more (1 + size);
5832 *p++ = i.tm.base_opcode;
5833
5834 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
5835 i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
5836
5837 /* All jumps handled here are signed, but don't use a signed limit
5838 check for 32 and 16 bit jumps as we want to allow wrap around at
5839 4G and 64k respectively. */
5840 if (size == 1)
5841 fixP->fx_signed = 1;
5842 }
5843
5844 static void
5845 output_interseg_jump (void)
5846 {
5847 char *p;
5848 int size;
5849 int prefix;
5850 int code16;
5851
5852 code16 = 0;
5853 if (flag_code == CODE_16BIT)
5854 code16 = CODE16;
5855
5856 prefix = 0;
5857 if (i.prefix[DATA_PREFIX] != 0)
5858 {
5859 prefix = 1;
5860 i.prefixes -= 1;
5861 code16 ^= CODE16;
5862 }
5863 if (i.prefix[REX_PREFIX] != 0)
5864 {
5865 prefix++;
5866 i.prefixes -= 1;
5867 }
5868
5869 size = 4;
5870 if (code16)
5871 size = 2;
5872
5873 if (i.prefixes != 0 && !intel_syntax)
5874 as_warn (_("skipping prefixes on this instruction"));
5875
5876 /* 1 opcode; 2 segment; offset */
5877 p = frag_more (prefix + 1 + 2 + size);
5878
5879 if (i.prefix[DATA_PREFIX] != 0)
5880 *p++ = DATA_PREFIX_OPCODE;
5881
5882 if (i.prefix[REX_PREFIX] != 0)
5883 *p++ = i.prefix[REX_PREFIX];
5884
5885 *p++ = i.tm.base_opcode;
5886 if (i.op[1].imms->X_op == O_constant)
5887 {
5888 offsetT n = i.op[1].imms->X_add_number;
5889
5890 if (size == 2
5891 && !fits_in_unsigned_word (n)
5892 && !fits_in_signed_word (n))
5893 {
5894 as_bad (_("16-bit jump out of range"));
5895 return;
5896 }
5897 md_number_to_chars (p, n, size);
5898 }
5899 else
5900 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
5901 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
5902 if (i.op[0].imms->X_op != O_constant)
5903 as_bad (_("can't handle non absolute segment in `%s'"),
5904 i.tm.name);
5905 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
5906 }
5907
5908 static void
5909 output_insn (void)
5910 {
5911 fragS *insn_start_frag;
5912 offsetT insn_start_off;
5913
5914 /* Tie dwarf2 debug info to the address at the start of the insn.
5915 We can't do this after the insn has been output as the current
5916 frag may have been closed off. eg. by frag_var. */
5917 dwarf2_emit_insn (0);
5918
5919 insn_start_frag = frag_now;
5920 insn_start_off = frag_now_fix ();
5921
5922 /* Output jumps. */
5923 if (i.tm.opcode_modifier.jump)
5924 output_branch ();
5925 else if (i.tm.opcode_modifier.jumpbyte
5926 || i.tm.opcode_modifier.jumpdword)
5927 output_jump ();
5928 else if (i.tm.opcode_modifier.jumpintersegment)
5929 output_interseg_jump ();
5930 else
5931 {
5932 /* Output normal instructions here. */
5933 char *p;
5934 unsigned char *q;
5935 unsigned int j;
5936 unsigned int prefix;
5937
5938 /* Since the VEX prefix contains the implicit prefix, we don't
5939 need the explicit prefix. */
5940 if (!i.tm.opcode_modifier.vex)
5941 {
5942 switch (i.tm.opcode_length)
5943 {
5944 case 3:
5945 if (i.tm.base_opcode & 0xff000000)
5946 {
5947 prefix = (i.tm.base_opcode >> 24) & 0xff;
5948 goto check_prefix;
5949 }
5950 break;
5951 case 2:
5952 if ((i.tm.base_opcode & 0xff0000) != 0)
5953 {
5954 prefix = (i.tm.base_opcode >> 16) & 0xff;
5955 if (i.tm.cpu_flags.bitfield.cpupadlock)
5956 {
5957 check_prefix:
5958 if (prefix != REPE_PREFIX_OPCODE
5959 || (i.prefix[REP_PREFIX]
5960 != REPE_PREFIX_OPCODE))
5961 add_prefix (prefix);
5962 }
5963 else
5964 add_prefix (prefix);
5965 }
5966 break;
5967 case 1:
5968 break;
5969 default:
5970 abort ();
5971 }
5972
5973 /* The prefix bytes. */
5974 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
5975 if (*q)
5976 FRAG_APPEND_1_CHAR (*q);
5977 }
5978
5979 if (i.tm.opcode_modifier.vex)
5980 {
5981 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
5982 if (*q)
5983 switch (j)
5984 {
5985 case REX_PREFIX:
5986 /* REX byte is encoded in VEX prefix. */
5987 break;
5988 case SEG_PREFIX:
5989 case ADDR_PREFIX:
5990 FRAG_APPEND_1_CHAR (*q);
5991 break;
5992 default:
5993 /* There should be no other prefixes for instructions
5994 with VEX prefix. */
5995 abort ();
5996 }
5997
5998 /* Now the VEX prefix. */
5999 p = frag_more (i.vex.length);
6000 for (j = 0; j < i.vex.length; j++)
6001 p[j] = i.vex.bytes[j];
6002 }
6003
6004 /* Now the opcode; be careful about word order here! */
6005 if (i.tm.opcode_length == 1)
6006 {
6007 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
6008 }
6009 else
6010 {
6011 switch (i.tm.opcode_length)
6012 {
6013 case 3:
6014 p = frag_more (3);
6015 *p++ = (i.tm.base_opcode >> 16) & 0xff;
6016 break;
6017 case 2:
6018 p = frag_more (2);
6019 break;
6020 default:
6021 abort ();
6022 break;
6023 }
6024
6025 /* Put out high byte first: can't use md_number_to_chars! */
6026 *p++ = (i.tm.base_opcode >> 8) & 0xff;
6027 *p = i.tm.base_opcode & 0xff;
6028 }
6029
6030 /* Now the modrm byte and sib byte (if present). */
6031 if (i.tm.opcode_modifier.modrm)
6032 {
6033 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
6034 | i.rm.reg << 3
6035 | i.rm.mode << 6));
6036 /* If i.rm.regmem == ESP (4)
6037 && i.rm.mode != (Register mode)
6038 && not 16 bit
6039 ==> need second modrm byte. */
6040 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
6041 && i.rm.mode != 3
6042 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
6043 FRAG_APPEND_1_CHAR ((i.sib.base << 0
6044 | i.sib.index << 3
6045 | i.sib.scale << 6));
6046 }
6047
6048 if (i.disp_operands)
6049 output_disp (insn_start_frag, insn_start_off);
6050
6051 if (i.imm_operands)
6052 output_imm (insn_start_frag, insn_start_off);
6053 }
6054
6055 #ifdef DEBUG386
6056 if (flag_debug)
6057 {
6058 pi ("" /*line*/, &i);
6059 }
6060 #endif /* DEBUG386 */
6061 }
6062
6063 /* Return the size of the displacement operand N. */
6064
6065 static int
6066 disp_size (unsigned int n)
6067 {
6068 int size = 4;
6069 if (i.types[n].bitfield.disp64)
6070 size = 8;
6071 else if (i.types[n].bitfield.disp8)
6072 size = 1;
6073 else if (i.types[n].bitfield.disp16)
6074 size = 2;
6075 return size;
6076 }
6077
6078 /* Return the size of the immediate operand N. */
6079
6080 static int
6081 imm_size (unsigned int n)
6082 {
6083 int size = 4;
6084 if (i.types[n].bitfield.imm64)
6085 size = 8;
6086 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
6087 size = 1;
6088 else if (i.types[n].bitfield.imm16)
6089 size = 2;
6090 return size;
6091 }
6092
6093 static void
6094 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
6095 {
6096 char *p;
6097 unsigned int n;
6098
6099 for (n = 0; n < i.operands; n++)
6100 {
6101 if (operand_type_check (i.types[n], disp))
6102 {
6103 if (i.op[n].disps->X_op == O_constant)
6104 {
6105 int size = disp_size (n);
6106 offsetT val;
6107
6108 val = offset_in_range (i.op[n].disps->X_add_number,
6109 size);
6110 p = frag_more (size);
6111 md_number_to_chars (p, val, size);
6112 }
6113 else
6114 {
6115 enum bfd_reloc_code_real reloc_type;
6116 int size = disp_size (n);
6117 int sign = i.types[n].bitfield.disp32s;
6118 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
6119
6120 /* We can't have 8 bit displacement here. */
6121 gas_assert (!i.types[n].bitfield.disp8);
6122
6123 /* The PC relative address is computed relative
6124 to the instruction boundary, so in case immediate
6125 fields follows, we need to adjust the value. */
6126 if (pcrel && i.imm_operands)
6127 {
6128 unsigned int n1;
6129 int sz = 0;
6130
6131 for (n1 = 0; n1 < i.operands; n1++)
6132 if (operand_type_check (i.types[n1], imm))
6133 {
6134 /* Only one immediate is allowed for PC
6135 relative address. */
6136 gas_assert (sz == 0);
6137 sz = imm_size (n1);
6138 i.op[n].disps->X_add_number -= sz;
6139 }
6140 /* We should find the immediate. */
6141 gas_assert (sz != 0);
6142 }
6143
6144 p = frag_more (size);
6145 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
6146 if (GOT_symbol
6147 && GOT_symbol == i.op[n].disps->X_add_symbol
6148 && (((reloc_type == BFD_RELOC_32
6149 || reloc_type == BFD_RELOC_X86_64_32S
6150 || (reloc_type == BFD_RELOC_64
6151 && object_64bit))
6152 && (i.op[n].disps->X_op == O_symbol
6153 || (i.op[n].disps->X_op == O_add
6154 && ((symbol_get_value_expression
6155 (i.op[n].disps->X_op_symbol)->X_op)
6156 == O_subtract))))
6157 || reloc_type == BFD_RELOC_32_PCREL))
6158 {
6159 offsetT add;
6160
6161 if (insn_start_frag == frag_now)
6162 add = (p - frag_now->fr_literal) - insn_start_off;
6163 else
6164 {
6165 fragS *fr;
6166
6167 add = insn_start_frag->fr_fix - insn_start_off;
6168 for (fr = insn_start_frag->fr_next;
6169 fr && fr != frag_now; fr = fr->fr_next)
6170 add += fr->fr_fix;
6171 add += p - frag_now->fr_literal;
6172 }
6173
6174 if (!object_64bit)
6175 {
6176 reloc_type = BFD_RELOC_386_GOTPC;
6177 i.op[n].imms->X_add_number += add;
6178 }
6179 else if (reloc_type == BFD_RELOC_64)
6180 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6181 else
6182 /* Don't do the adjustment for x86-64, as there
6183 the pcrel addressing is relative to the _next_
6184 insn, and that is taken care of in other code. */
6185 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6186 }
6187 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6188 i.op[n].disps, pcrel, reloc_type);
6189 }
6190 }
6191 }
6192 }
6193
6194 static void
6195 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
6196 {
6197 char *p;
6198 unsigned int n;
6199
6200 for (n = 0; n < i.operands; n++)
6201 {
6202 if (operand_type_check (i.types[n], imm))
6203 {
6204 if (i.op[n].imms->X_op == O_constant)
6205 {
6206 int size = imm_size (n);
6207 offsetT val;
6208
6209 val = offset_in_range (i.op[n].imms->X_add_number,
6210 size);
6211 p = frag_more (size);
6212 md_number_to_chars (p, val, size);
6213 }
6214 else
6215 {
6216 /* Not absolute_section.
6217 Need a 32-bit fixup (don't support 8bit
6218 non-absolute imms). Try to support other
6219 sizes ... */
6220 enum bfd_reloc_code_real reloc_type;
6221 int size = imm_size (n);
6222 int sign;
6223
6224 if (i.types[n].bitfield.imm32s
6225 && (i.suffix == QWORD_MNEM_SUFFIX
6226 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
6227 sign = 1;
6228 else
6229 sign = 0;
6230
6231 p = frag_more (size);
6232 reloc_type = reloc (size, 0, sign, i.reloc[n]);
6233
6234 /* This is tough to explain. We end up with this one if we
6235 * have operands that look like
6236 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
6237 * obtain the absolute address of the GOT, and it is strongly
6238 * preferable from a performance point of view to avoid using
6239 * a runtime relocation for this. The actual sequence of
6240 * instructions often look something like:
6241 *
6242 * call .L66
6243 * .L66:
6244 * popl %ebx
6245 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
6246 *
6247 * The call and pop essentially return the absolute address
6248 * of the label .L66 and store it in %ebx. The linker itself
6249 * will ultimately change the first operand of the addl so
6250 * that %ebx points to the GOT, but to keep things simple, the
6251 * .o file must have this operand set so that it generates not
6252 * the absolute address of .L66, but the absolute address of
6253 * itself. This allows the linker itself simply treat a GOTPC
6254 * relocation as asking for a pcrel offset to the GOT to be
6255 * added in, and the addend of the relocation is stored in the
6256 * operand field for the instruction itself.
6257 *
6258 * Our job here is to fix the operand so that it would add
6259 * the correct offset so that %ebx would point to itself. The
6260 * thing that is tricky is that .-.L66 will point to the
6261 * beginning of the instruction, so we need to further modify
6262 * the operand so that it will point to itself. There are
6263 * other cases where you have something like:
6264 *
6265 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
6266 *
6267 * and here no correction would be required. Internally in
6268 * the assembler we treat operands of this form as not being
6269 * pcrel since the '.' is explicitly mentioned, and I wonder
6270 * whether it would simplify matters to do it this way. Who
6271 * knows. In earlier versions of the PIC patches, the
6272 * pcrel_adjust field was used to store the correction, but
6273 * since the expression is not pcrel, I felt it would be
6274 * confusing to do it this way. */
6275
6276 if ((reloc_type == BFD_RELOC_32
6277 || reloc_type == BFD_RELOC_X86_64_32S
6278 || reloc_type == BFD_RELOC_64)
6279 && GOT_symbol
6280 && GOT_symbol == i.op[n].imms->X_add_symbol
6281 && (i.op[n].imms->X_op == O_symbol
6282 || (i.op[n].imms->X_op == O_add
6283 && ((symbol_get_value_expression
6284 (i.op[n].imms->X_op_symbol)->X_op)
6285 == O_subtract))))
6286 {
6287 offsetT add;
6288
6289 if (insn_start_frag == frag_now)
6290 add = (p - frag_now->fr_literal) - insn_start_off;
6291 else
6292 {
6293 fragS *fr;
6294
6295 add = insn_start_frag->fr_fix - insn_start_off;
6296 for (fr = insn_start_frag->fr_next;
6297 fr && fr != frag_now; fr = fr->fr_next)
6298 add += fr->fr_fix;
6299 add += p - frag_now->fr_literal;
6300 }
6301
6302 if (!object_64bit)
6303 reloc_type = BFD_RELOC_386_GOTPC;
6304 else if (size == 4)
6305 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6306 else if (size == 8)
6307 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6308 i.op[n].imms->X_add_number += add;
6309 }
6310 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6311 i.op[n].imms, 0, reloc_type);
6312 }
6313 }
6314 }
6315 }
6316 \f
6317 /* x86_cons_fix_new is called via the expression parsing code when a
6318 reloc is needed. We use this hook to get the correct .got reloc. */
6319 static enum bfd_reloc_code_real got_reloc = NO_RELOC;
6320 static int cons_sign = -1;
6321
6322 void
6323 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
6324 expressionS *exp)
6325 {
6326 enum bfd_reloc_code_real r = reloc (len, 0, cons_sign, got_reloc);
6327
6328 got_reloc = NO_RELOC;
6329
6330 #ifdef TE_PE
6331 if (exp->X_op == O_secrel)
6332 {
6333 exp->X_op = O_symbol;
6334 r = BFD_RELOC_32_SECREL;
6335 }
6336 #endif
6337
6338 fix_new_exp (frag, off, len, exp, 0, r);
6339 }
6340
6341 #if (!defined (OBJ_ELF) && !defined (OBJ_MAYBE_ELF)) || defined (LEX_AT)
6342 # define lex_got(reloc, adjust, types) NULL
6343 #else
6344 /* Parse operands of the form
6345 <symbol>@GOTOFF+<nnn>
6346 and similar .plt or .got references.
6347
6348 If we find one, set up the correct relocation in RELOC and copy the
6349 input string, minus the `@GOTOFF' into a malloc'd buffer for
6350 parsing by the calling routine. Return this buffer, and if ADJUST
6351 is non-null set it to the length of the string we removed from the
6352 input line. Otherwise return NULL. */
6353 static char *
6354 lex_got (enum bfd_reloc_code_real *rel,
6355 int *adjust,
6356 i386_operand_type *types)
6357 {
6358 /* Some of the relocations depend on the size of what field is to
6359 be relocated. But in our callers i386_immediate and i386_displacement
6360 we don't yet know the operand size (this will be set by insn
6361 matching). Hence we record the word32 relocation here,
6362 and adjust the reloc according to the real size in reloc(). */
6363 static const struct {
6364 const char *str;
6365 int len;
6366 const enum bfd_reloc_code_real rel[2];
6367 const i386_operand_type types64;
6368 } gotrel[] = {
6369 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
6370 BFD_RELOC_X86_64_PLTOFF64 },
6371 OPERAND_TYPE_IMM64 },
6372 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
6373 BFD_RELOC_X86_64_PLT32 },
6374 OPERAND_TYPE_IMM32_32S_DISP32 },
6375 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
6376 BFD_RELOC_X86_64_GOTPLT64 },
6377 OPERAND_TYPE_IMM64_DISP64 },
6378 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
6379 BFD_RELOC_X86_64_GOTOFF64 },
6380 OPERAND_TYPE_IMM64_DISP64 },
6381 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
6382 BFD_RELOC_X86_64_GOTPCREL },
6383 OPERAND_TYPE_IMM32_32S_DISP32 },
6384 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
6385 BFD_RELOC_X86_64_TLSGD },
6386 OPERAND_TYPE_IMM32_32S_DISP32 },
6387 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
6388 _dummy_first_bfd_reloc_code_real },
6389 OPERAND_TYPE_NONE },
6390 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
6391 BFD_RELOC_X86_64_TLSLD },
6392 OPERAND_TYPE_IMM32_32S_DISP32 },
6393 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
6394 BFD_RELOC_X86_64_GOTTPOFF },
6395 OPERAND_TYPE_IMM32_32S_DISP32 },
6396 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
6397 BFD_RELOC_X86_64_TPOFF32 },
6398 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6399 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
6400 _dummy_first_bfd_reloc_code_real },
6401 OPERAND_TYPE_NONE },
6402 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
6403 BFD_RELOC_X86_64_DTPOFF32 },
6404 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6405 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
6406 _dummy_first_bfd_reloc_code_real },
6407 OPERAND_TYPE_NONE },
6408 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
6409 _dummy_first_bfd_reloc_code_real },
6410 OPERAND_TYPE_NONE },
6411 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
6412 BFD_RELOC_X86_64_GOT32 },
6413 OPERAND_TYPE_IMM32_32S_64_DISP32 },
6414 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
6415 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
6416 OPERAND_TYPE_IMM32_32S_DISP32 },
6417 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
6418 BFD_RELOC_X86_64_TLSDESC_CALL },
6419 OPERAND_TYPE_IMM32_32S_DISP32 },
6420 };
6421 char *cp;
6422 unsigned int j;
6423
6424 if (!IS_ELF)
6425 return NULL;
6426
6427 for (cp = input_line_pointer; *cp != '@'; cp++)
6428 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6429 return NULL;
6430
6431 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6432 {
6433 int len = gotrel[j].len;
6434 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6435 {
6436 if (gotrel[j].rel[object_64bit] != 0)
6437 {
6438 int first, second;
6439 char *tmpbuf, *past_reloc;
6440
6441 *rel = gotrel[j].rel[object_64bit];
6442 if (adjust)
6443 *adjust = len;
6444
6445 if (types)
6446 {
6447 if (flag_code != CODE_64BIT)
6448 {
6449 types->bitfield.imm32 = 1;
6450 types->bitfield.disp32 = 1;
6451 }
6452 else
6453 *types = gotrel[j].types64;
6454 }
6455
6456 if (GOT_symbol == NULL)
6457 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
6458
6459 /* The length of the first part of our input line. */
6460 first = cp - input_line_pointer;
6461
6462 /* The second part goes from after the reloc token until
6463 (and including) an end_of_line char or comma. */
6464 past_reloc = cp + 1 + len;
6465 cp = past_reloc;
6466 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6467 ++cp;
6468 second = cp + 1 - past_reloc;
6469
6470 /* Allocate and copy string. The trailing NUL shouldn't
6471 be necessary, but be safe. */
6472 tmpbuf = (char *) xmalloc (first + second + 2);
6473 memcpy (tmpbuf, input_line_pointer, first);
6474 if (second != 0 && *past_reloc != ' ')
6475 /* Replace the relocation token with ' ', so that
6476 errors like foo@GOTOFF1 will be detected. */
6477 tmpbuf[first++] = ' ';
6478 memcpy (tmpbuf + first, past_reloc, second);
6479 tmpbuf[first + second] = '\0';
6480 return tmpbuf;
6481 }
6482
6483 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6484 gotrel[j].str, 1 << (5 + object_64bit));
6485 return NULL;
6486 }
6487 }
6488
6489 /* Might be a symbol version string. Don't as_bad here. */
6490 return NULL;
6491 }
6492
6493 void
6494 x86_cons (expressionS *exp, int size)
6495 {
6496 intel_syntax = -intel_syntax;
6497
6498 exp->X_md = 0;
6499 if (size == 4 || (object_64bit && size == 8))
6500 {
6501 /* Handle @GOTOFF and the like in an expression. */
6502 char *save;
6503 char *gotfree_input_line;
6504 int adjust;
6505
6506 save = input_line_pointer;
6507 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
6508 if (gotfree_input_line)
6509 input_line_pointer = gotfree_input_line;
6510
6511 expression (exp);
6512
6513 if (gotfree_input_line)
6514 {
6515 /* expression () has merrily parsed up to the end of line,
6516 or a comma - in the wrong buffer. Transfer how far
6517 input_line_pointer has moved to the right buffer. */
6518 input_line_pointer = (save
6519 + (input_line_pointer - gotfree_input_line)
6520 + adjust);
6521 free (gotfree_input_line);
6522 if (exp->X_op == O_constant
6523 || exp->X_op == O_absent
6524 || exp->X_op == O_illegal
6525 || exp->X_op == O_register
6526 || exp->X_op == O_big)
6527 {
6528 char c = *input_line_pointer;
6529 *input_line_pointer = 0;
6530 as_bad (_("missing or invalid expression `%s'"), save);
6531 *input_line_pointer = c;
6532 }
6533 }
6534 }
6535 else
6536 expression (exp);
6537
6538 intel_syntax = -intel_syntax;
6539
6540 if (intel_syntax)
6541 i386_intel_simplify (exp);
6542 }
6543 #endif
6544
6545 static void
6546 signed_cons (int size)
6547 {
6548 if (flag_code == CODE_64BIT)
6549 cons_sign = 1;
6550 cons (size);
6551 cons_sign = -1;
6552 }
6553
6554 #ifdef TE_PE
6555 static void
6556 pe_directive_secrel (dummy)
6557 int dummy ATTRIBUTE_UNUSED;
6558 {
6559 expressionS exp;
6560
6561 do
6562 {
6563 expression (&exp);
6564 if (exp.X_op == O_symbol)
6565 exp.X_op = O_secrel;
6566
6567 emit_expr (&exp, 4);
6568 }
6569 while (*input_line_pointer++ == ',');
6570
6571 input_line_pointer--;
6572 demand_empty_rest_of_line ();
6573 }
6574 #endif
6575
6576 static int
6577 i386_immediate (char *imm_start)
6578 {
6579 char *save_input_line_pointer;
6580 char *gotfree_input_line;
6581 segT exp_seg = 0;
6582 expressionS *exp;
6583 i386_operand_type types;
6584
6585 operand_type_set (&types, ~0);
6586
6587 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
6588 {
6589 as_bad (_("at most %d immediate operands are allowed"),
6590 MAX_IMMEDIATE_OPERANDS);
6591 return 0;
6592 }
6593
6594 exp = &im_expressions[i.imm_operands++];
6595 i.op[this_operand].imms = exp;
6596
6597 if (is_space_char (*imm_start))
6598 ++imm_start;
6599
6600 save_input_line_pointer = input_line_pointer;
6601 input_line_pointer = imm_start;
6602
6603 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
6604 if (gotfree_input_line)
6605 input_line_pointer = gotfree_input_line;
6606
6607 exp_seg = expression (exp);
6608
6609 SKIP_WHITESPACE ();
6610 if (*input_line_pointer)
6611 as_bad (_("junk `%s' after expression"), input_line_pointer);
6612
6613 input_line_pointer = save_input_line_pointer;
6614 if (gotfree_input_line)
6615 {
6616 free (gotfree_input_line);
6617
6618 if (exp->X_op == O_constant || exp->X_op == O_register)
6619 exp->X_op = O_illegal;
6620 }
6621
6622 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
6623 }
6624
6625 static int
6626 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
6627 i386_operand_type types, const char *imm_start)
6628 {
6629 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
6630 {
6631 if (imm_start)
6632 as_bad (_("missing or invalid immediate expression `%s'"),
6633 imm_start);
6634 return 0;
6635 }
6636 else if (exp->X_op == O_constant)
6637 {
6638 /* Size it properly later. */
6639 i.types[this_operand].bitfield.imm64 = 1;
6640 /* If not 64bit, sign extend val. */
6641 if (flag_code != CODE_64BIT
6642 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
6643 exp->X_add_number
6644 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
6645 }
6646 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
6647 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
6648 && exp_seg != absolute_section
6649 && exp_seg != text_section
6650 && exp_seg != data_section
6651 && exp_seg != bss_section
6652 && exp_seg != undefined_section
6653 && !bfd_is_com_section (exp_seg))
6654 {
6655 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
6656 return 0;
6657 }
6658 #endif
6659 else if (!intel_syntax && exp->X_op == O_register)
6660 {
6661 if (imm_start)
6662 as_bad (_("illegal immediate register operand %s"), imm_start);
6663 return 0;
6664 }
6665 else
6666 {
6667 /* This is an address. The size of the address will be
6668 determined later, depending on destination register,
6669 suffix, or the default for the section. */
6670 i.types[this_operand].bitfield.imm8 = 1;
6671 i.types[this_operand].bitfield.imm16 = 1;
6672 i.types[this_operand].bitfield.imm32 = 1;
6673 i.types[this_operand].bitfield.imm32s = 1;
6674 i.types[this_operand].bitfield.imm64 = 1;
6675 i.types[this_operand] = operand_type_and (i.types[this_operand],
6676 types);
6677 }
6678
6679 return 1;
6680 }
6681
6682 static char *
6683 i386_scale (char *scale)
6684 {
6685 offsetT val;
6686 char *save = input_line_pointer;
6687
6688 input_line_pointer = scale;
6689 val = get_absolute_expression ();
6690
6691 switch (val)
6692 {
6693 case 1:
6694 i.log2_scale_factor = 0;
6695 break;
6696 case 2:
6697 i.log2_scale_factor = 1;
6698 break;
6699 case 4:
6700 i.log2_scale_factor = 2;
6701 break;
6702 case 8:
6703 i.log2_scale_factor = 3;
6704 break;
6705 default:
6706 {
6707 char sep = *input_line_pointer;
6708
6709 *input_line_pointer = '\0';
6710 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
6711 scale);
6712 *input_line_pointer = sep;
6713 input_line_pointer = save;
6714 return NULL;
6715 }
6716 }
6717 if (i.log2_scale_factor != 0 && i.index_reg == 0)
6718 {
6719 as_warn (_("scale factor of %d without an index register"),
6720 1 << i.log2_scale_factor);
6721 i.log2_scale_factor = 0;
6722 }
6723 scale = input_line_pointer;
6724 input_line_pointer = save;
6725 return scale;
6726 }
6727
6728 static int
6729 i386_displacement (char *disp_start, char *disp_end)
6730 {
6731 expressionS *exp;
6732 segT exp_seg = 0;
6733 char *save_input_line_pointer;
6734 char *gotfree_input_line;
6735 int override;
6736 i386_operand_type bigdisp, types = anydisp;
6737 int ret;
6738
6739 if (i.disp_operands == MAX_MEMORY_OPERANDS)
6740 {
6741 as_bad (_("at most %d displacement operands are allowed"),
6742 MAX_MEMORY_OPERANDS);
6743 return 0;
6744 }
6745
6746 operand_type_set (&bigdisp, 0);
6747 if ((i.types[this_operand].bitfield.jumpabsolute)
6748 || (!current_templates->start->opcode_modifier.jump
6749 && !current_templates->start->opcode_modifier.jumpdword))
6750 {
6751 bigdisp.bitfield.disp32 = 1;
6752 override = (i.prefix[ADDR_PREFIX] != 0);
6753 if (flag_code == CODE_64BIT)
6754 {
6755 if (!override)
6756 {
6757 bigdisp.bitfield.disp32s = 1;
6758 bigdisp.bitfield.disp64 = 1;
6759 }
6760 }
6761 else if ((flag_code == CODE_16BIT) ^ override)
6762 {
6763 bigdisp.bitfield.disp32 = 0;
6764 bigdisp.bitfield.disp16 = 1;
6765 }
6766 }
6767 else
6768 {
6769 /* For PC-relative branches, the width of the displacement
6770 is dependent upon data size, not address size. */
6771 override = (i.prefix[DATA_PREFIX] != 0);
6772 if (flag_code == CODE_64BIT)
6773 {
6774 if (override || i.suffix == WORD_MNEM_SUFFIX)
6775 bigdisp.bitfield.disp16 = 1;
6776 else
6777 {
6778 bigdisp.bitfield.disp32 = 1;
6779 bigdisp.bitfield.disp32s = 1;
6780 }
6781 }
6782 else
6783 {
6784 if (!override)
6785 override = (i.suffix == (flag_code != CODE_16BIT
6786 ? WORD_MNEM_SUFFIX
6787 : LONG_MNEM_SUFFIX));
6788 bigdisp.bitfield.disp32 = 1;
6789 if ((flag_code == CODE_16BIT) ^ override)
6790 {
6791 bigdisp.bitfield.disp32 = 0;
6792 bigdisp.bitfield.disp16 = 1;
6793 }
6794 }
6795 }
6796 i.types[this_operand] = operand_type_or (i.types[this_operand],
6797 bigdisp);
6798
6799 exp = &disp_expressions[i.disp_operands];
6800 i.op[this_operand].disps = exp;
6801 i.disp_operands++;
6802 save_input_line_pointer = input_line_pointer;
6803 input_line_pointer = disp_start;
6804 END_STRING_AND_SAVE (disp_end);
6805
6806 #ifndef GCC_ASM_O_HACK
6807 #define GCC_ASM_O_HACK 0
6808 #endif
6809 #if GCC_ASM_O_HACK
6810 END_STRING_AND_SAVE (disp_end + 1);
6811 if (i.types[this_operand].bitfield.baseIndex
6812 && displacement_string_end[-1] == '+')
6813 {
6814 /* This hack is to avoid a warning when using the "o"
6815 constraint within gcc asm statements.
6816 For instance:
6817
6818 #define _set_tssldt_desc(n,addr,limit,type) \
6819 __asm__ __volatile__ ( \
6820 "movw %w2,%0\n\t" \
6821 "movw %w1,2+%0\n\t" \
6822 "rorl $16,%1\n\t" \
6823 "movb %b1,4+%0\n\t" \
6824 "movb %4,5+%0\n\t" \
6825 "movb $0,6+%0\n\t" \
6826 "movb %h1,7+%0\n\t" \
6827 "rorl $16,%1" \
6828 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
6829
6830 This works great except that the output assembler ends
6831 up looking a bit weird if it turns out that there is
6832 no offset. You end up producing code that looks like:
6833
6834 #APP
6835 movw $235,(%eax)
6836 movw %dx,2+(%eax)
6837 rorl $16,%edx
6838 movb %dl,4+(%eax)
6839 movb $137,5+(%eax)
6840 movb $0,6+(%eax)
6841 movb %dh,7+(%eax)
6842 rorl $16,%edx
6843 #NO_APP
6844
6845 So here we provide the missing zero. */
6846
6847 *displacement_string_end = '0';
6848 }
6849 #endif
6850 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
6851 if (gotfree_input_line)
6852 input_line_pointer = gotfree_input_line;
6853
6854 exp_seg = expression (exp);
6855
6856 SKIP_WHITESPACE ();
6857 if (*input_line_pointer)
6858 as_bad (_("junk `%s' after expression"), input_line_pointer);
6859 #if GCC_ASM_O_HACK
6860 RESTORE_END_STRING (disp_end + 1);
6861 #endif
6862 input_line_pointer = save_input_line_pointer;
6863 if (gotfree_input_line)
6864 {
6865 free (gotfree_input_line);
6866
6867 if (exp->X_op == O_constant || exp->X_op == O_register)
6868 exp->X_op = O_illegal;
6869 }
6870
6871 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
6872
6873 RESTORE_END_STRING (disp_end);
6874
6875 return ret;
6876 }
6877
6878 static int
6879 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
6880 i386_operand_type types, const char *disp_start)
6881 {
6882 i386_operand_type bigdisp;
6883 int ret = 1;
6884
6885 /* We do this to make sure that the section symbol is in
6886 the symbol table. We will ultimately change the relocation
6887 to be relative to the beginning of the section. */
6888 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
6889 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
6890 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
6891 {
6892 if (exp->X_op != O_symbol)
6893 goto inv_disp;
6894
6895 if (S_IS_LOCAL (exp->X_add_symbol)
6896 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
6897 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
6898 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
6899 exp->X_op = O_subtract;
6900 exp->X_op_symbol = GOT_symbol;
6901 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
6902 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
6903 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
6904 i.reloc[this_operand] = BFD_RELOC_64;
6905 else
6906 i.reloc[this_operand] = BFD_RELOC_32;
6907 }
6908
6909 else if (exp->X_op == O_absent
6910 || exp->X_op == O_illegal
6911 || exp->X_op == O_big)
6912 {
6913 inv_disp:
6914 as_bad (_("missing or invalid displacement expression `%s'"),
6915 disp_start);
6916 ret = 0;
6917 }
6918
6919 else if (flag_code == CODE_64BIT
6920 && !i.prefix[ADDR_PREFIX]
6921 && exp->X_op == O_constant)
6922 {
6923 /* Since displacement is signed extended to 64bit, don't allow
6924 disp32 and turn off disp32s if they are out of range. */
6925 i.types[this_operand].bitfield.disp32 = 0;
6926 if (!fits_in_signed_long (exp->X_add_number))
6927 {
6928 i.types[this_operand].bitfield.disp32s = 0;
6929 if (i.types[this_operand].bitfield.baseindex)
6930 {
6931 as_bad (_("0x%lx out range of signed 32bit displacement"),
6932 (long) exp->X_add_number);
6933 ret = 0;
6934 }
6935 }
6936 }
6937
6938 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
6939 else if (exp->X_op != O_constant
6940 && OUTPUT_FLAVOR == bfd_target_aout_flavour
6941 && exp_seg != absolute_section
6942 && exp_seg != text_section
6943 && exp_seg != data_section
6944 && exp_seg != bss_section
6945 && exp_seg != undefined_section
6946 && !bfd_is_com_section (exp_seg))
6947 {
6948 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
6949 ret = 0;
6950 }
6951 #endif
6952
6953 /* Check if this is a displacement only operand. */
6954 bigdisp = i.types[this_operand];
6955 bigdisp.bitfield.disp8 = 0;
6956 bigdisp.bitfield.disp16 = 0;
6957 bigdisp.bitfield.disp32 = 0;
6958 bigdisp.bitfield.disp32s = 0;
6959 bigdisp.bitfield.disp64 = 0;
6960 if (operand_type_all_zero (&bigdisp))
6961 i.types[this_operand] = operand_type_and (i.types[this_operand],
6962 types);
6963
6964 return ret;
6965 }
6966
6967 /* Make sure the memory operand we've been dealt is valid.
6968 Return 1 on success, 0 on a failure. */
6969
6970 static int
6971 i386_index_check (const char *operand_string)
6972 {
6973 int ok;
6974 const char *kind = "base/index";
6975 #if INFER_ADDR_PREFIX
6976 int fudged = 0;
6977
6978 tryprefix:
6979 #endif
6980 ok = 1;
6981 if (current_templates->start->opcode_modifier.isstring
6982 && !current_templates->start->opcode_modifier.immext
6983 && (current_templates->end[-1].opcode_modifier.isstring
6984 || i.mem_operands))
6985 {
6986 /* Memory operands of string insns are special in that they only allow
6987 a single register (rDI, rSI, or rBX) as their memory address. */
6988 unsigned int expected;
6989
6990 kind = "string address";
6991
6992 if (current_templates->start->opcode_modifier.w)
6993 {
6994 i386_operand_type type = current_templates->end[-1].operand_types[0];
6995
6996 if (!type.bitfield.baseindex
6997 || ((!i.mem_operands != !intel_syntax)
6998 && current_templates->end[-1].operand_types[1]
6999 .bitfield.baseindex))
7000 type = current_templates->end[-1].operand_types[1];
7001 expected = type.bitfield.esseg ? 7 /* rDI */ : 6 /* rSI */;
7002 }
7003 else
7004 expected = 3 /* rBX */;
7005
7006 if (!i.base_reg || i.index_reg
7007 || operand_type_check (i.types[this_operand], disp))
7008 ok = -1;
7009 else if (!(flag_code == CODE_64BIT
7010 ? i.prefix[ADDR_PREFIX]
7011 ? i.base_reg->reg_type.bitfield.reg32
7012 : i.base_reg->reg_type.bitfield.reg64
7013 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7014 ? i.base_reg->reg_type.bitfield.reg32
7015 : i.base_reg->reg_type.bitfield.reg16))
7016 ok = 0;
7017 else if (i.base_reg->reg_num != expected)
7018 ok = -1;
7019
7020 if (ok < 0)
7021 {
7022 unsigned int j;
7023
7024 for (j = 0; j < i386_regtab_size; ++j)
7025 if ((flag_code == CODE_64BIT
7026 ? i.prefix[ADDR_PREFIX]
7027 ? i386_regtab[j].reg_type.bitfield.reg32
7028 : i386_regtab[j].reg_type.bitfield.reg64
7029 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7030 ? i386_regtab[j].reg_type.bitfield.reg32
7031 : i386_regtab[j].reg_type.bitfield.reg16)
7032 && i386_regtab[j].reg_num == expected)
7033 break;
7034 gas_assert (j < i386_regtab_size);
7035 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
7036 operand_string,
7037 intel_syntax ? '[' : '(',
7038 register_prefix,
7039 i386_regtab[j].reg_name,
7040 intel_syntax ? ']' : ')');
7041 ok = 1;
7042 }
7043 }
7044 else if (flag_code == CODE_64BIT)
7045 {
7046 if ((i.base_reg
7047 && ((i.prefix[ADDR_PREFIX] == 0
7048 && !i.base_reg->reg_type.bitfield.reg64)
7049 || (i.prefix[ADDR_PREFIX]
7050 && !i.base_reg->reg_type.bitfield.reg32))
7051 && (i.index_reg
7052 || i.base_reg->reg_num !=
7053 (i.prefix[ADDR_PREFIX] == 0 ? RegRip : RegEip)))
7054 || (i.index_reg
7055 && (!i.index_reg->reg_type.bitfield.baseindex
7056 || (i.prefix[ADDR_PREFIX] == 0
7057 && i.index_reg->reg_num != RegRiz
7058 && !i.index_reg->reg_type.bitfield.reg64
7059 )
7060 || (i.prefix[ADDR_PREFIX]
7061 && i.index_reg->reg_num != RegEiz
7062 && !i.index_reg->reg_type.bitfield.reg32))))
7063 ok = 0;
7064 }
7065 else
7066 {
7067 if ((flag_code == CODE_16BIT) ^ (i.prefix[ADDR_PREFIX] != 0))
7068 {
7069 /* 16bit checks. */
7070 if ((i.base_reg
7071 && (!i.base_reg->reg_type.bitfield.reg16
7072 || !i.base_reg->reg_type.bitfield.baseindex))
7073 || (i.index_reg
7074 && (!i.index_reg->reg_type.bitfield.reg16
7075 || !i.index_reg->reg_type.bitfield.baseindex
7076 || !(i.base_reg
7077 && i.base_reg->reg_num < 6
7078 && i.index_reg->reg_num >= 6
7079 && i.log2_scale_factor == 0))))
7080 ok = 0;
7081 }
7082 else
7083 {
7084 /* 32bit checks. */
7085 if ((i.base_reg
7086 && !i.base_reg->reg_type.bitfield.reg32)
7087 || (i.index_reg
7088 && ((!i.index_reg->reg_type.bitfield.reg32
7089 && i.index_reg->reg_num != RegEiz)
7090 || !i.index_reg->reg_type.bitfield.baseindex)))
7091 ok = 0;
7092 }
7093 }
7094 if (!ok)
7095 {
7096 #if INFER_ADDR_PREFIX
7097 if (!i.mem_operands && !i.prefix[ADDR_PREFIX])
7098 {
7099 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
7100 i.prefixes += 1;
7101 /* Change the size of any displacement too. At most one of
7102 Disp16 or Disp32 is set.
7103 FIXME. There doesn't seem to be any real need for separate
7104 Disp16 and Disp32 flags. The same goes for Imm16 and Imm32.
7105 Removing them would probably clean up the code quite a lot. */
7106 if (flag_code != CODE_64BIT
7107 && (i.types[this_operand].bitfield.disp16
7108 || i.types[this_operand].bitfield.disp32))
7109 i.types[this_operand]
7110 = operand_type_xor (i.types[this_operand], disp16_32);
7111 fudged = 1;
7112 goto tryprefix;
7113 }
7114 if (fudged)
7115 as_bad (_("`%s' is not a valid %s expression"),
7116 operand_string,
7117 kind);
7118 else
7119 #endif
7120 as_bad (_("`%s' is not a valid %s-bit %s expression"),
7121 operand_string,
7122 flag_code_names[i.prefix[ADDR_PREFIX]
7123 ? flag_code == CODE_32BIT
7124 ? CODE_16BIT
7125 : CODE_32BIT
7126 : flag_code],
7127 kind);
7128 }
7129 return ok;
7130 }
7131
7132 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
7133 on error. */
7134
7135 static int
7136 i386_att_operand (char *operand_string)
7137 {
7138 const reg_entry *r;
7139 char *end_op;
7140 char *op_string = operand_string;
7141
7142 if (is_space_char (*op_string))
7143 ++op_string;
7144
7145 /* We check for an absolute prefix (differentiating,
7146 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
7147 if (*op_string == ABSOLUTE_PREFIX)
7148 {
7149 ++op_string;
7150 if (is_space_char (*op_string))
7151 ++op_string;
7152 i.types[this_operand].bitfield.jumpabsolute = 1;
7153 }
7154
7155 /* Check if operand is a register. */
7156 if ((r = parse_register (op_string, &end_op)) != NULL)
7157 {
7158 i386_operand_type temp;
7159
7160 /* Check for a segment override by searching for ':' after a
7161 segment register. */
7162 op_string = end_op;
7163 if (is_space_char (*op_string))
7164 ++op_string;
7165 if (*op_string == ':'
7166 && (r->reg_type.bitfield.sreg2
7167 || r->reg_type.bitfield.sreg3))
7168 {
7169 switch (r->reg_num)
7170 {
7171 case 0:
7172 i.seg[i.mem_operands] = &es;
7173 break;
7174 case 1:
7175 i.seg[i.mem_operands] = &cs;
7176 break;
7177 case 2:
7178 i.seg[i.mem_operands] = &ss;
7179 break;
7180 case 3:
7181 i.seg[i.mem_operands] = &ds;
7182 break;
7183 case 4:
7184 i.seg[i.mem_operands] = &fs;
7185 break;
7186 case 5:
7187 i.seg[i.mem_operands] = &gs;
7188 break;
7189 }
7190
7191 /* Skip the ':' and whitespace. */
7192 ++op_string;
7193 if (is_space_char (*op_string))
7194 ++op_string;
7195
7196 if (!is_digit_char (*op_string)
7197 && !is_identifier_char (*op_string)
7198 && *op_string != '('
7199 && *op_string != ABSOLUTE_PREFIX)
7200 {
7201 as_bad (_("bad memory operand `%s'"), op_string);
7202 return 0;
7203 }
7204 /* Handle case of %es:*foo. */
7205 if (*op_string == ABSOLUTE_PREFIX)
7206 {
7207 ++op_string;
7208 if (is_space_char (*op_string))
7209 ++op_string;
7210 i.types[this_operand].bitfield.jumpabsolute = 1;
7211 }
7212 goto do_memory_reference;
7213 }
7214 if (*op_string)
7215 {
7216 as_bad (_("junk `%s' after register"), op_string);
7217 return 0;
7218 }
7219 temp = r->reg_type;
7220 temp.bitfield.baseindex = 0;
7221 i.types[this_operand] = operand_type_or (i.types[this_operand],
7222 temp);
7223 i.types[this_operand].bitfield.unspecified = 0;
7224 i.op[this_operand].regs = r;
7225 i.reg_operands++;
7226 }
7227 else if (*op_string == REGISTER_PREFIX)
7228 {
7229 as_bad (_("bad register name `%s'"), op_string);
7230 return 0;
7231 }
7232 else if (*op_string == IMMEDIATE_PREFIX)
7233 {
7234 ++op_string;
7235 if (i.types[this_operand].bitfield.jumpabsolute)
7236 {
7237 as_bad (_("immediate operand illegal with absolute jump"));
7238 return 0;
7239 }
7240 if (!i386_immediate (op_string))
7241 return 0;
7242 }
7243 else if (is_digit_char (*op_string)
7244 || is_identifier_char (*op_string)
7245 || *op_string == '(')
7246 {
7247 /* This is a memory reference of some sort. */
7248 char *base_string;
7249
7250 /* Start and end of displacement string expression (if found). */
7251 char *displacement_string_start;
7252 char *displacement_string_end;
7253
7254 do_memory_reference:
7255 if ((i.mem_operands == 1
7256 && !current_templates->start->opcode_modifier.isstring)
7257 || i.mem_operands == 2)
7258 {
7259 as_bad (_("too many memory references for `%s'"),
7260 current_templates->start->name);
7261 return 0;
7262 }
7263
7264 /* Check for base index form. We detect the base index form by
7265 looking for an ')' at the end of the operand, searching
7266 for the '(' matching it, and finding a REGISTER_PREFIX or ','
7267 after the '('. */
7268 base_string = op_string + strlen (op_string);
7269
7270 --base_string;
7271 if (is_space_char (*base_string))
7272 --base_string;
7273
7274 /* If we only have a displacement, set-up for it to be parsed later. */
7275 displacement_string_start = op_string;
7276 displacement_string_end = base_string + 1;
7277
7278 if (*base_string == ')')
7279 {
7280 char *temp_string;
7281 unsigned int parens_balanced = 1;
7282 /* We've already checked that the number of left & right ()'s are
7283 equal, so this loop will not be infinite. */
7284 do
7285 {
7286 base_string--;
7287 if (*base_string == ')')
7288 parens_balanced++;
7289 if (*base_string == '(')
7290 parens_balanced--;
7291 }
7292 while (parens_balanced);
7293
7294 temp_string = base_string;
7295
7296 /* Skip past '(' and whitespace. */
7297 ++base_string;
7298 if (is_space_char (*base_string))
7299 ++base_string;
7300
7301 if (*base_string == ','
7302 || ((i.base_reg = parse_register (base_string, &end_op))
7303 != NULL))
7304 {
7305 displacement_string_end = temp_string;
7306
7307 i.types[this_operand].bitfield.baseindex = 1;
7308
7309 if (i.base_reg)
7310 {
7311 base_string = end_op;
7312 if (is_space_char (*base_string))
7313 ++base_string;
7314 }
7315
7316 /* There may be an index reg or scale factor here. */
7317 if (*base_string == ',')
7318 {
7319 ++base_string;
7320 if (is_space_char (*base_string))
7321 ++base_string;
7322
7323 if ((i.index_reg = parse_register (base_string, &end_op))
7324 != NULL)
7325 {
7326 base_string = end_op;
7327 if (is_space_char (*base_string))
7328 ++base_string;
7329 if (*base_string == ',')
7330 {
7331 ++base_string;
7332 if (is_space_char (*base_string))
7333 ++base_string;
7334 }
7335 else if (*base_string != ')')
7336 {
7337 as_bad (_("expecting `,' or `)' "
7338 "after index register in `%s'"),
7339 operand_string);
7340 return 0;
7341 }
7342 }
7343 else if (*base_string == REGISTER_PREFIX)
7344 {
7345 as_bad (_("bad register name `%s'"), base_string);
7346 return 0;
7347 }
7348
7349 /* Check for scale factor. */
7350 if (*base_string != ')')
7351 {
7352 char *end_scale = i386_scale (base_string);
7353
7354 if (!end_scale)
7355 return 0;
7356
7357 base_string = end_scale;
7358 if (is_space_char (*base_string))
7359 ++base_string;
7360 if (*base_string != ')')
7361 {
7362 as_bad (_("expecting `)' "
7363 "after scale factor in `%s'"),
7364 operand_string);
7365 return 0;
7366 }
7367 }
7368 else if (!i.index_reg)
7369 {
7370 as_bad (_("expecting index register or scale factor "
7371 "after `,'; got '%c'"),
7372 *base_string);
7373 return 0;
7374 }
7375 }
7376 else if (*base_string != ')')
7377 {
7378 as_bad (_("expecting `,' or `)' "
7379 "after base register in `%s'"),
7380 operand_string);
7381 return 0;
7382 }
7383 }
7384 else if (*base_string == REGISTER_PREFIX)
7385 {
7386 as_bad (_("bad register name `%s'"), base_string);
7387 return 0;
7388 }
7389 }
7390
7391 /* If there's an expression beginning the operand, parse it,
7392 assuming displacement_string_start and
7393 displacement_string_end are meaningful. */
7394 if (displacement_string_start != displacement_string_end)
7395 {
7396 if (!i386_displacement (displacement_string_start,
7397 displacement_string_end))
7398 return 0;
7399 }
7400
7401 /* Special case for (%dx) while doing input/output op. */
7402 if (i.base_reg
7403 && operand_type_equal (&i.base_reg->reg_type,
7404 &reg16_inoutportreg)
7405 && i.index_reg == 0
7406 && i.log2_scale_factor == 0
7407 && i.seg[i.mem_operands] == 0
7408 && !operand_type_check (i.types[this_operand], disp))
7409 {
7410 i.types[this_operand] = inoutportreg;
7411 return 1;
7412 }
7413
7414 if (i386_index_check (operand_string) == 0)
7415 return 0;
7416 i.types[this_operand].bitfield.mem = 1;
7417 i.mem_operands++;
7418 }
7419 else
7420 {
7421 /* It's not a memory operand; argh! */
7422 as_bad (_("invalid char %s beginning operand %d `%s'"),
7423 output_invalid (*op_string),
7424 this_operand + 1,
7425 op_string);
7426 return 0;
7427 }
7428 return 1; /* Normal return. */
7429 }
7430 \f
7431 /* md_estimate_size_before_relax()
7432
7433 Called just before relax() for rs_machine_dependent frags. The x86
7434 assembler uses these frags to handle variable size jump
7435 instructions.
7436
7437 Any symbol that is now undefined will not become defined.
7438 Return the correct fr_subtype in the frag.
7439 Return the initial "guess for variable size of frag" to caller.
7440 The guess is actually the growth beyond the fixed part. Whatever
7441 we do to grow the fixed or variable part contributes to our
7442 returned value. */
7443
7444 int
7445 md_estimate_size_before_relax (fragP, segment)
7446 fragS *fragP;
7447 segT segment;
7448 {
7449 /* We've already got fragP->fr_subtype right; all we have to do is
7450 check for un-relaxable symbols. On an ELF system, we can't relax
7451 an externally visible symbol, because it may be overridden by a
7452 shared library. */
7453 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
7454 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7455 || (IS_ELF
7456 && (S_IS_EXTERNAL (fragP->fr_symbol)
7457 || S_IS_WEAK (fragP->fr_symbol)
7458 || ((symbol_get_bfdsym (fragP->fr_symbol)->flags
7459 & BSF_GNU_INDIRECT_FUNCTION))))
7460 #endif
7461 #if defined (OBJ_COFF) && defined (TE_PE)
7462 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
7463 && S_IS_WEAK (fragP->fr_symbol))
7464 #endif
7465 )
7466 {
7467 /* Symbol is undefined in this segment, or we need to keep a
7468 reloc so that weak symbols can be overridden. */
7469 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
7470 enum bfd_reloc_code_real reloc_type;
7471 unsigned char *opcode;
7472 int old_fr_fix;
7473
7474 if (fragP->fr_var != NO_RELOC)
7475 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
7476 else if (size == 2)
7477 reloc_type = BFD_RELOC_16_PCREL;
7478 else
7479 reloc_type = BFD_RELOC_32_PCREL;
7480
7481 old_fr_fix = fragP->fr_fix;
7482 opcode = (unsigned char *) fragP->fr_opcode;
7483
7484 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
7485 {
7486 case UNCOND_JUMP:
7487 /* Make jmp (0xeb) a (d)word displacement jump. */
7488 opcode[0] = 0xe9;
7489 fragP->fr_fix += size;
7490 fix_new (fragP, old_fr_fix, size,
7491 fragP->fr_symbol,
7492 fragP->fr_offset, 1,
7493 reloc_type);
7494 break;
7495
7496 case COND_JUMP86:
7497 if (size == 2
7498 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
7499 {
7500 /* Negate the condition, and branch past an
7501 unconditional jump. */
7502 opcode[0] ^= 1;
7503 opcode[1] = 3;
7504 /* Insert an unconditional jump. */
7505 opcode[2] = 0xe9;
7506 /* We added two extra opcode bytes, and have a two byte
7507 offset. */
7508 fragP->fr_fix += 2 + 2;
7509 fix_new (fragP, old_fr_fix + 2, 2,
7510 fragP->fr_symbol,
7511 fragP->fr_offset, 1,
7512 reloc_type);
7513 break;
7514 }
7515 /* Fall through. */
7516
7517 case COND_JUMP:
7518 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
7519 {
7520 fixS *fixP;
7521
7522 fragP->fr_fix += 1;
7523 fixP = fix_new (fragP, old_fr_fix, 1,
7524 fragP->fr_symbol,
7525 fragP->fr_offset, 1,
7526 BFD_RELOC_8_PCREL);
7527 fixP->fx_signed = 1;
7528 break;
7529 }
7530
7531 /* This changes the byte-displacement jump 0x7N
7532 to the (d)word-displacement jump 0x0f,0x8N. */
7533 opcode[1] = opcode[0] + 0x10;
7534 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7535 /* We've added an opcode byte. */
7536 fragP->fr_fix += 1 + size;
7537 fix_new (fragP, old_fr_fix + 1, size,
7538 fragP->fr_symbol,
7539 fragP->fr_offset, 1,
7540 reloc_type);
7541 break;
7542
7543 default:
7544 BAD_CASE (fragP->fr_subtype);
7545 break;
7546 }
7547 frag_wane (fragP);
7548 return fragP->fr_fix - old_fr_fix;
7549 }
7550
7551 /* Guess size depending on current relax state. Initially the relax
7552 state will correspond to a short jump and we return 1, because
7553 the variable part of the frag (the branch offset) is one byte
7554 long. However, we can relax a section more than once and in that
7555 case we must either set fr_subtype back to the unrelaxed state,
7556 or return the value for the appropriate branch. */
7557 return md_relax_table[fragP->fr_subtype].rlx_length;
7558 }
7559
7560 /* Called after relax() is finished.
7561
7562 In: Address of frag.
7563 fr_type == rs_machine_dependent.
7564 fr_subtype is what the address relaxed to.
7565
7566 Out: Any fixSs and constants are set up.
7567 Caller will turn frag into a ".space 0". */
7568
7569 void
7570 md_convert_frag (abfd, sec, fragP)
7571 bfd *abfd ATTRIBUTE_UNUSED;
7572 segT sec ATTRIBUTE_UNUSED;
7573 fragS *fragP;
7574 {
7575 unsigned char *opcode;
7576 unsigned char *where_to_put_displacement = NULL;
7577 offsetT target_address;
7578 offsetT opcode_address;
7579 unsigned int extension = 0;
7580 offsetT displacement_from_opcode_start;
7581
7582 opcode = (unsigned char *) fragP->fr_opcode;
7583
7584 /* Address we want to reach in file space. */
7585 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
7586
7587 /* Address opcode resides at in file space. */
7588 opcode_address = fragP->fr_address + fragP->fr_fix;
7589
7590 /* Displacement from opcode start to fill into instruction. */
7591 displacement_from_opcode_start = target_address - opcode_address;
7592
7593 if ((fragP->fr_subtype & BIG) == 0)
7594 {
7595 /* Don't have to change opcode. */
7596 extension = 1; /* 1 opcode + 1 displacement */
7597 where_to_put_displacement = &opcode[1];
7598 }
7599 else
7600 {
7601 if (no_cond_jump_promotion
7602 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
7603 as_warn_where (fragP->fr_file, fragP->fr_line,
7604 _("long jump required"));
7605
7606 switch (fragP->fr_subtype)
7607 {
7608 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
7609 extension = 4; /* 1 opcode + 4 displacement */
7610 opcode[0] = 0xe9;
7611 where_to_put_displacement = &opcode[1];
7612 break;
7613
7614 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
7615 extension = 2; /* 1 opcode + 2 displacement */
7616 opcode[0] = 0xe9;
7617 where_to_put_displacement = &opcode[1];
7618 break;
7619
7620 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
7621 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
7622 extension = 5; /* 2 opcode + 4 displacement */
7623 opcode[1] = opcode[0] + 0x10;
7624 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7625 where_to_put_displacement = &opcode[2];
7626 break;
7627
7628 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
7629 extension = 3; /* 2 opcode + 2 displacement */
7630 opcode[1] = opcode[0] + 0x10;
7631 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7632 where_to_put_displacement = &opcode[2];
7633 break;
7634
7635 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
7636 extension = 4;
7637 opcode[0] ^= 1;
7638 opcode[1] = 3;
7639 opcode[2] = 0xe9;
7640 where_to_put_displacement = &opcode[3];
7641 break;
7642
7643 default:
7644 BAD_CASE (fragP->fr_subtype);
7645 break;
7646 }
7647 }
7648
7649 /* If size if less then four we are sure that the operand fits,
7650 but if it's 4, then it could be that the displacement is larger
7651 then -/+ 2GB. */
7652 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
7653 && object_64bit
7654 && ((addressT) (displacement_from_opcode_start - extension
7655 + ((addressT) 1 << 31))
7656 > (((addressT) 2 << 31) - 1)))
7657 {
7658 as_bad_where (fragP->fr_file, fragP->fr_line,
7659 _("jump target out of range"));
7660 /* Make us emit 0. */
7661 displacement_from_opcode_start = extension;
7662 }
7663 /* Now put displacement after opcode. */
7664 md_number_to_chars ((char *) where_to_put_displacement,
7665 (valueT) (displacement_from_opcode_start - extension),
7666 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
7667 fragP->fr_fix += extension;
7668 }
7669 \f
7670 /* Apply a fixup (fixS) to segment data, once it has been determined
7671 by our caller that we have all the info we need to fix it up.
7672
7673 On the 386, immediates, displacements, and data pointers are all in
7674 the same (little-endian) format, so we don't need to care about which
7675 we are handling. */
7676
7677 void
7678 md_apply_fix (fixP, valP, seg)
7679 /* The fix we're to put in. */
7680 fixS *fixP;
7681 /* Pointer to the value of the bits. */
7682 valueT *valP;
7683 /* Segment fix is from. */
7684 segT seg ATTRIBUTE_UNUSED;
7685 {
7686 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
7687 valueT value = *valP;
7688
7689 #if !defined (TE_Mach)
7690 if (fixP->fx_pcrel)
7691 {
7692 switch (fixP->fx_r_type)
7693 {
7694 default:
7695 break;
7696
7697 case BFD_RELOC_64:
7698 fixP->fx_r_type = BFD_RELOC_64_PCREL;
7699 break;
7700 case BFD_RELOC_32:
7701 case BFD_RELOC_X86_64_32S:
7702 fixP->fx_r_type = BFD_RELOC_32_PCREL;
7703 break;
7704 case BFD_RELOC_16:
7705 fixP->fx_r_type = BFD_RELOC_16_PCREL;
7706 break;
7707 case BFD_RELOC_8:
7708 fixP->fx_r_type = BFD_RELOC_8_PCREL;
7709 break;
7710 }
7711 }
7712
7713 if (fixP->fx_addsy != NULL
7714 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
7715 || fixP->fx_r_type == BFD_RELOC_64_PCREL
7716 || fixP->fx_r_type == BFD_RELOC_16_PCREL
7717 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
7718 && !use_rela_relocations)
7719 {
7720 /* This is a hack. There should be a better way to handle this.
7721 This covers for the fact that bfd_install_relocation will
7722 subtract the current location (for partial_inplace, PC relative
7723 relocations); see more below. */
7724 #ifndef OBJ_AOUT
7725 if (IS_ELF
7726 #ifdef TE_PE
7727 || OUTPUT_FLAVOR == bfd_target_coff_flavour
7728 #endif
7729 )
7730 value += fixP->fx_where + fixP->fx_frag->fr_address;
7731 #endif
7732 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7733 if (IS_ELF)
7734 {
7735 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
7736
7737 if ((sym_seg == seg
7738 || (symbol_section_p (fixP->fx_addsy)
7739 && sym_seg != absolute_section))
7740 && !generic_force_reloc (fixP))
7741 {
7742 /* Yes, we add the values in twice. This is because
7743 bfd_install_relocation subtracts them out again. I think
7744 bfd_install_relocation is broken, but I don't dare change
7745 it. FIXME. */
7746 value += fixP->fx_where + fixP->fx_frag->fr_address;
7747 }
7748 }
7749 #endif
7750 #if defined (OBJ_COFF) && defined (TE_PE)
7751 /* For some reason, the PE format does not store a
7752 section address offset for a PC relative symbol. */
7753 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
7754 || S_IS_WEAK (fixP->fx_addsy))
7755 value += md_pcrel_from (fixP);
7756 #endif
7757 }
7758 #if defined (OBJ_COFF) && defined (TE_PE)
7759 if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
7760 {
7761 value -= S_GET_VALUE (fixP->fx_addsy);
7762 }
7763 #endif
7764
7765 /* Fix a few things - the dynamic linker expects certain values here,
7766 and we must not disappoint it. */
7767 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7768 if (IS_ELF && fixP->fx_addsy)
7769 switch (fixP->fx_r_type)
7770 {
7771 case BFD_RELOC_386_PLT32:
7772 case BFD_RELOC_X86_64_PLT32:
7773 /* Make the jump instruction point to the address of the operand. At
7774 runtime we merely add the offset to the actual PLT entry. */
7775 value = -4;
7776 break;
7777
7778 case BFD_RELOC_386_TLS_GD:
7779 case BFD_RELOC_386_TLS_LDM:
7780 case BFD_RELOC_386_TLS_IE_32:
7781 case BFD_RELOC_386_TLS_IE:
7782 case BFD_RELOC_386_TLS_GOTIE:
7783 case BFD_RELOC_386_TLS_GOTDESC:
7784 case BFD_RELOC_X86_64_TLSGD:
7785 case BFD_RELOC_X86_64_TLSLD:
7786 case BFD_RELOC_X86_64_GOTTPOFF:
7787 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
7788 value = 0; /* Fully resolved at runtime. No addend. */
7789 /* Fallthrough */
7790 case BFD_RELOC_386_TLS_LE:
7791 case BFD_RELOC_386_TLS_LDO_32:
7792 case BFD_RELOC_386_TLS_LE_32:
7793 case BFD_RELOC_X86_64_DTPOFF32:
7794 case BFD_RELOC_X86_64_DTPOFF64:
7795 case BFD_RELOC_X86_64_TPOFF32:
7796 case BFD_RELOC_X86_64_TPOFF64:
7797 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7798 break;
7799
7800 case BFD_RELOC_386_TLS_DESC_CALL:
7801 case BFD_RELOC_X86_64_TLSDESC_CALL:
7802 value = 0; /* Fully resolved at runtime. No addend. */
7803 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7804 fixP->fx_done = 0;
7805 return;
7806
7807 case BFD_RELOC_386_GOT32:
7808 case BFD_RELOC_X86_64_GOT32:
7809 value = 0; /* Fully resolved at runtime. No addend. */
7810 break;
7811
7812 case BFD_RELOC_VTABLE_INHERIT:
7813 case BFD_RELOC_VTABLE_ENTRY:
7814 fixP->fx_done = 0;
7815 return;
7816
7817 default:
7818 break;
7819 }
7820 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
7821 *valP = value;
7822 #endif /* !defined (TE_Mach) */
7823
7824 /* Are we finished with this relocation now? */
7825 if (fixP->fx_addsy == NULL)
7826 fixP->fx_done = 1;
7827 #if defined (OBJ_COFF) && defined (TE_PE)
7828 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
7829 {
7830 fixP->fx_done = 0;
7831 /* Remember value for tc_gen_reloc. */
7832 fixP->fx_addnumber = value;
7833 /* Clear out the frag for now. */
7834 value = 0;
7835 }
7836 #endif
7837 else if (use_rela_relocations)
7838 {
7839 fixP->fx_no_overflow = 1;
7840 /* Remember value for tc_gen_reloc. */
7841 fixP->fx_addnumber = value;
7842 value = 0;
7843 }
7844
7845 md_number_to_chars (p, value, fixP->fx_size);
7846 }
7847 \f
7848 char *
7849 md_atof (int type, char *litP, int *sizeP)
7850 {
7851 /* This outputs the LITTLENUMs in REVERSE order;
7852 in accord with the bigendian 386. */
7853 return ieee_md_atof (type, litP, sizeP, FALSE);
7854 }
7855 \f
7856 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
7857
7858 static char *
7859 output_invalid (int c)
7860 {
7861 if (ISPRINT (c))
7862 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
7863 "'%c'", c);
7864 else
7865 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
7866 "(0x%x)", (unsigned char) c);
7867 return output_invalid_buf;
7868 }
7869
7870 /* REG_STRING starts *before* REGISTER_PREFIX. */
7871
7872 static const reg_entry *
7873 parse_real_register (char *reg_string, char **end_op)
7874 {
7875 char *s = reg_string;
7876 char *p;
7877 char reg_name_given[MAX_REG_NAME_SIZE + 1];
7878 const reg_entry *r;
7879
7880 /* Skip possible REGISTER_PREFIX and possible whitespace. */
7881 if (*s == REGISTER_PREFIX)
7882 ++s;
7883
7884 if (is_space_char (*s))
7885 ++s;
7886
7887 p = reg_name_given;
7888 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
7889 {
7890 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
7891 return (const reg_entry *) NULL;
7892 s++;
7893 }
7894
7895 /* For naked regs, make sure that we are not dealing with an identifier.
7896 This prevents confusing an identifier like `eax_var' with register
7897 `eax'. */
7898 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
7899 return (const reg_entry *) NULL;
7900
7901 *end_op = s;
7902
7903 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
7904
7905 /* Handle floating point regs, allowing spaces in the (i) part. */
7906 if (r == i386_regtab /* %st is first entry of table */)
7907 {
7908 if (is_space_char (*s))
7909 ++s;
7910 if (*s == '(')
7911 {
7912 ++s;
7913 if (is_space_char (*s))
7914 ++s;
7915 if (*s >= '0' && *s <= '7')
7916 {
7917 int fpr = *s - '0';
7918 ++s;
7919 if (is_space_char (*s))
7920 ++s;
7921 if (*s == ')')
7922 {
7923 *end_op = s + 1;
7924 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
7925 know (r);
7926 return r + fpr;
7927 }
7928 }
7929 /* We have "%st(" then garbage. */
7930 return (const reg_entry *) NULL;
7931 }
7932 }
7933
7934 if (r == NULL || allow_pseudo_reg)
7935 return r;
7936
7937 if (operand_type_all_zero (&r->reg_type))
7938 return (const reg_entry *) NULL;
7939
7940 if ((r->reg_type.bitfield.reg32
7941 || r->reg_type.bitfield.sreg3
7942 || r->reg_type.bitfield.control
7943 || r->reg_type.bitfield.debug
7944 || r->reg_type.bitfield.test)
7945 && !cpu_arch_flags.bitfield.cpui386)
7946 return (const reg_entry *) NULL;
7947
7948 if (r->reg_type.bitfield.floatreg
7949 && !cpu_arch_flags.bitfield.cpu8087
7950 && !cpu_arch_flags.bitfield.cpu287
7951 && !cpu_arch_flags.bitfield.cpu387)
7952 return (const reg_entry *) NULL;
7953
7954 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
7955 return (const reg_entry *) NULL;
7956
7957 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse)
7958 return (const reg_entry *) NULL;
7959
7960 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
7961 return (const reg_entry *) NULL;
7962
7963 /* Don't allow fake index register unless allow_index_reg isn't 0. */
7964 if (!allow_index_reg
7965 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
7966 return (const reg_entry *) NULL;
7967
7968 if (((r->reg_flags & (RegRex64 | RegRex))
7969 || r->reg_type.bitfield.reg64)
7970 && (!cpu_arch_flags.bitfield.cpulm
7971 || !operand_type_equal (&r->reg_type, &control))
7972 && flag_code != CODE_64BIT)
7973 return (const reg_entry *) NULL;
7974
7975 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
7976 return (const reg_entry *) NULL;
7977
7978 return r;
7979 }
7980
7981 /* REG_STRING starts *before* REGISTER_PREFIX. */
7982
7983 static const reg_entry *
7984 parse_register (char *reg_string, char **end_op)
7985 {
7986 const reg_entry *r;
7987
7988 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
7989 r = parse_real_register (reg_string, end_op);
7990 else
7991 r = NULL;
7992 if (!r)
7993 {
7994 char *save = input_line_pointer;
7995 char c;
7996 symbolS *symbolP;
7997
7998 input_line_pointer = reg_string;
7999 c = get_symbol_end ();
8000 symbolP = symbol_find (reg_string);
8001 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
8002 {
8003 const expressionS *e = symbol_get_value_expression (symbolP);
8004
8005 know (e->X_op == O_register);
8006 know (e->X_add_number >= 0
8007 && (valueT) e->X_add_number < i386_regtab_size);
8008 r = i386_regtab + e->X_add_number;
8009 *end_op = input_line_pointer;
8010 }
8011 *input_line_pointer = c;
8012 input_line_pointer = save;
8013 }
8014 return r;
8015 }
8016
8017 int
8018 i386_parse_name (char *name, expressionS *e, char *nextcharP)
8019 {
8020 const reg_entry *r;
8021 char *end = input_line_pointer;
8022
8023 *end = *nextcharP;
8024 r = parse_register (name, &input_line_pointer);
8025 if (r && end <= input_line_pointer)
8026 {
8027 *nextcharP = *input_line_pointer;
8028 *input_line_pointer = 0;
8029 e->X_op = O_register;
8030 e->X_add_number = r - i386_regtab;
8031 return 1;
8032 }
8033 input_line_pointer = end;
8034 *end = 0;
8035 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
8036 }
8037
8038 void
8039 md_operand (expressionS *e)
8040 {
8041 char *end;
8042 const reg_entry *r;
8043
8044 switch (*input_line_pointer)
8045 {
8046 case REGISTER_PREFIX:
8047 r = parse_real_register (input_line_pointer, &end);
8048 if (r)
8049 {
8050 e->X_op = O_register;
8051 e->X_add_number = r - i386_regtab;
8052 input_line_pointer = end;
8053 }
8054 break;
8055
8056 case '[':
8057 gas_assert (intel_syntax);
8058 end = input_line_pointer++;
8059 expression (e);
8060 if (*input_line_pointer == ']')
8061 {
8062 ++input_line_pointer;
8063 e->X_op_symbol = make_expr_symbol (e);
8064 e->X_add_symbol = NULL;
8065 e->X_add_number = 0;
8066 e->X_op = O_index;
8067 }
8068 else
8069 {
8070 e->X_op = O_absent;
8071 input_line_pointer = end;
8072 }
8073 break;
8074 }
8075 }
8076
8077 \f
8078 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8079 const char *md_shortopts = "kVQ:sqn";
8080 #else
8081 const char *md_shortopts = "qn";
8082 #endif
8083
8084 #define OPTION_32 (OPTION_MD_BASE + 0)
8085 #define OPTION_64 (OPTION_MD_BASE + 1)
8086 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
8087 #define OPTION_MARCH (OPTION_MD_BASE + 3)
8088 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
8089 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
8090 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
8091 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
8092 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
8093 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
8094 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
8095 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
8096 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 12)
8097
8098 struct option md_longopts[] =
8099 {
8100 {"32", no_argument, NULL, OPTION_32},
8101 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8102 || defined (TE_PE) || defined (TE_PEP))
8103 {"64", no_argument, NULL, OPTION_64},
8104 #endif
8105 {"divide", no_argument, NULL, OPTION_DIVIDE},
8106 {"march", required_argument, NULL, OPTION_MARCH},
8107 {"mtune", required_argument, NULL, OPTION_MTUNE},
8108 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
8109 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
8110 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
8111 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
8112 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
8113 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
8114 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
8115 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
8116 {NULL, no_argument, NULL, 0}
8117 };
8118 size_t md_longopts_size = sizeof (md_longopts);
8119
8120 int
8121 md_parse_option (int c, char *arg)
8122 {
8123 unsigned int j;
8124 char *arch, *next;
8125
8126 switch (c)
8127 {
8128 case 'n':
8129 optimize_align_code = 0;
8130 break;
8131
8132 case 'q':
8133 quiet_warnings = 1;
8134 break;
8135
8136 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8137 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
8138 should be emitted or not. FIXME: Not implemented. */
8139 case 'Q':
8140 break;
8141
8142 /* -V: SVR4 argument to print version ID. */
8143 case 'V':
8144 print_version_id ();
8145 break;
8146
8147 /* -k: Ignore for FreeBSD compatibility. */
8148 case 'k':
8149 break;
8150
8151 case 's':
8152 /* -s: On i386 Solaris, this tells the native assembler to use
8153 .stab instead of .stab.excl. We always use .stab anyhow. */
8154 break;
8155 #endif
8156 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8157 || defined (TE_PE) || defined (TE_PEP))
8158 case OPTION_64:
8159 {
8160 const char **list, **l;
8161
8162 list = bfd_target_list ();
8163 for (l = list; *l != NULL; l++)
8164 if (CONST_STRNEQ (*l, "elf64-x86-64")
8165 || strcmp (*l, "coff-x86-64") == 0
8166 || strcmp (*l, "pe-x86-64") == 0
8167 || strcmp (*l, "pei-x86-64") == 0)
8168 {
8169 default_arch = "x86_64";
8170 break;
8171 }
8172 if (*l == NULL)
8173 as_fatal (_("No compiled in support for x86_64"));
8174 free (list);
8175 }
8176 break;
8177 #endif
8178
8179 case OPTION_32:
8180 default_arch = "i386";
8181 break;
8182
8183 case OPTION_DIVIDE:
8184 #ifdef SVR4_COMMENT_CHARS
8185 {
8186 char *n, *t;
8187 const char *s;
8188
8189 n = (char *) xmalloc (strlen (i386_comment_chars) + 1);
8190 t = n;
8191 for (s = i386_comment_chars; *s != '\0'; s++)
8192 if (*s != '/')
8193 *t++ = *s;
8194 *t = '\0';
8195 i386_comment_chars = n;
8196 }
8197 #endif
8198 break;
8199
8200 case OPTION_MARCH:
8201 arch = xstrdup (arg);
8202 do
8203 {
8204 if (*arch == '.')
8205 as_fatal (_("Invalid -march= option: `%s'"), arg);
8206 next = strchr (arch, '+');
8207 if (next)
8208 *next++ = '\0';
8209 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8210 {
8211 if (strcmp (arch, cpu_arch [j].name) == 0)
8212 {
8213 /* Processor. */
8214 if (! cpu_arch[j].flags.bitfield.cpui386)
8215 continue;
8216
8217 cpu_arch_name = cpu_arch[j].name;
8218 cpu_sub_arch_name = NULL;
8219 cpu_arch_flags = cpu_arch[j].flags;
8220 cpu_arch_isa = cpu_arch[j].type;
8221 cpu_arch_isa_flags = cpu_arch[j].flags;
8222 if (!cpu_arch_tune_set)
8223 {
8224 cpu_arch_tune = cpu_arch_isa;
8225 cpu_arch_tune_flags = cpu_arch_isa_flags;
8226 }
8227 break;
8228 }
8229 else if (*cpu_arch [j].name == '.'
8230 && strcmp (arch, cpu_arch [j].name + 1) == 0)
8231 {
8232 /* ISA entension. */
8233 i386_cpu_flags flags;
8234
8235 if (!cpu_arch[j].negated)
8236 flags = cpu_flags_or (cpu_arch_flags,
8237 cpu_arch[j].flags);
8238 else
8239 flags = cpu_flags_and_not (cpu_arch_flags,
8240 cpu_arch[j].flags);
8241 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
8242 {
8243 if (cpu_sub_arch_name)
8244 {
8245 char *name = cpu_sub_arch_name;
8246 cpu_sub_arch_name = concat (name,
8247 cpu_arch[j].name,
8248 (const char *) NULL);
8249 free (name);
8250 }
8251 else
8252 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
8253 cpu_arch_flags = flags;
8254 }
8255 break;
8256 }
8257 }
8258
8259 if (j >= ARRAY_SIZE (cpu_arch))
8260 as_fatal (_("Invalid -march= option: `%s'"), arg);
8261
8262 arch = next;
8263 }
8264 while (next != NULL );
8265 break;
8266
8267 case OPTION_MTUNE:
8268 if (*arg == '.')
8269 as_fatal (_("Invalid -mtune= option: `%s'"), arg);
8270 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8271 {
8272 if (strcmp (arg, cpu_arch [j].name) == 0)
8273 {
8274 cpu_arch_tune_set = 1;
8275 cpu_arch_tune = cpu_arch [j].type;
8276 cpu_arch_tune_flags = cpu_arch[j].flags;
8277 break;
8278 }
8279 }
8280 if (j >= ARRAY_SIZE (cpu_arch))
8281 as_fatal (_("Invalid -mtune= option: `%s'"), arg);
8282 break;
8283
8284 case OPTION_MMNEMONIC:
8285 if (strcasecmp (arg, "att") == 0)
8286 intel_mnemonic = 0;
8287 else if (strcasecmp (arg, "intel") == 0)
8288 intel_mnemonic = 1;
8289 else
8290 as_fatal (_("Invalid -mmnemonic= option: `%s'"), arg);
8291 break;
8292
8293 case OPTION_MSYNTAX:
8294 if (strcasecmp (arg, "att") == 0)
8295 intel_syntax = 0;
8296 else if (strcasecmp (arg, "intel") == 0)
8297 intel_syntax = 1;
8298 else
8299 as_fatal (_("Invalid -msyntax= option: `%s'"), arg);
8300 break;
8301
8302 case OPTION_MINDEX_REG:
8303 allow_index_reg = 1;
8304 break;
8305
8306 case OPTION_MNAKED_REG:
8307 allow_naked_reg = 1;
8308 break;
8309
8310 case OPTION_MOLD_GCC:
8311 old_gcc = 1;
8312 break;
8313
8314 case OPTION_MSSE2AVX:
8315 sse2avx = 1;
8316 break;
8317
8318 case OPTION_MSSE_CHECK:
8319 if (strcasecmp (arg, "error") == 0)
8320 sse_check = sse_check_error;
8321 else if (strcasecmp (arg, "warning") == 0)
8322 sse_check = sse_check_warning;
8323 else if (strcasecmp (arg, "none") == 0)
8324 sse_check = sse_check_none;
8325 else
8326 as_fatal (_("Invalid -msse-check= option: `%s'"), arg);
8327 break;
8328
8329 case OPTION_MAVXSCALAR:
8330 if (strcasecmp (arg, "128") == 0)
8331 avxscalar = vex128;
8332 else if (strcasecmp (arg, "256") == 0)
8333 avxscalar = vex256;
8334 else
8335 as_fatal (_("Invalid -mavxscalar= option: `%s'"), arg);
8336 break;
8337
8338 default:
8339 return 0;
8340 }
8341 return 1;
8342 }
8343
8344 #define MESSAGE_TEMPLATE \
8345 " "
8346
8347 static void
8348 show_arch (FILE *stream, int ext, int check)
8349 {
8350 static char message[] = MESSAGE_TEMPLATE;
8351 char *start = message + 27;
8352 char *p;
8353 int size = sizeof (MESSAGE_TEMPLATE);
8354 int left;
8355 const char *name;
8356 int len;
8357 unsigned int j;
8358
8359 p = start;
8360 left = size - (start - message);
8361 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8362 {
8363 /* Should it be skipped? */
8364 if (cpu_arch [j].skip)
8365 continue;
8366
8367 name = cpu_arch [j].name;
8368 len = cpu_arch [j].len;
8369 if (*name == '.')
8370 {
8371 /* It is an extension. Skip if we aren't asked to show it. */
8372 if (ext)
8373 {
8374 name++;
8375 len--;
8376 }
8377 else
8378 continue;
8379 }
8380 else if (ext)
8381 {
8382 /* It is an processor. Skip if we show only extension. */
8383 continue;
8384 }
8385 else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
8386 {
8387 /* It is an impossible processor - skip. */
8388 continue;
8389 }
8390
8391 /* Reserve 2 spaces for ", " or ",\0" */
8392 left -= len + 2;
8393
8394 /* Check if there is any room. */
8395 if (left >= 0)
8396 {
8397 if (p != start)
8398 {
8399 *p++ = ',';
8400 *p++ = ' ';
8401 }
8402 p = mempcpy (p, name, len);
8403 }
8404 else
8405 {
8406 /* Output the current message now and start a new one. */
8407 *p++ = ',';
8408 *p = '\0';
8409 fprintf (stream, "%s\n", message);
8410 p = start;
8411 left = size - (start - message) - len - 2;
8412
8413 gas_assert (left >= 0);
8414
8415 p = mempcpy (p, name, len);
8416 }
8417 }
8418
8419 *p = '\0';
8420 fprintf (stream, "%s\n", message);
8421 }
8422
8423 void
8424 md_show_usage (FILE *stream)
8425 {
8426 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8427 fprintf (stream, _("\
8428 -Q ignored\n\
8429 -V print assembler version number\n\
8430 -k ignored\n"));
8431 #endif
8432 fprintf (stream, _("\
8433 -n Do not optimize code alignment\n\
8434 -q quieten some warnings\n"));
8435 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8436 fprintf (stream, _("\
8437 -s ignored\n"));
8438 #endif
8439 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8440 || defined (TE_PE) || defined (TE_PEP))
8441 fprintf (stream, _("\
8442 --32/--64 generate 32bit/64bit code\n"));
8443 #endif
8444 #ifdef SVR4_COMMENT_CHARS
8445 fprintf (stream, _("\
8446 --divide do not treat `/' as a comment character\n"));
8447 #else
8448 fprintf (stream, _("\
8449 --divide ignored\n"));
8450 #endif
8451 fprintf (stream, _("\
8452 -march=CPU[,+EXTENSION...]\n\
8453 generate code for CPU and EXTENSION, CPU is one of:\n"));
8454 show_arch (stream, 0, 1);
8455 fprintf (stream, _("\
8456 EXTENSION is combination of:\n"));
8457 show_arch (stream, 1, 0);
8458 fprintf (stream, _("\
8459 -mtune=CPU optimize for CPU, CPU is one of:\n"));
8460 show_arch (stream, 0, 0);
8461 fprintf (stream, _("\
8462 -msse2avx encode SSE instructions with VEX prefix\n"));
8463 fprintf (stream, _("\
8464 -msse-check=[none|error|warning]\n\
8465 check SSE instructions\n"));
8466 fprintf (stream, _("\
8467 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
8468 length\n"));
8469 fprintf (stream, _("\
8470 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
8471 fprintf (stream, _("\
8472 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
8473 fprintf (stream, _("\
8474 -mindex-reg support pseudo index registers\n"));
8475 fprintf (stream, _("\
8476 -mnaked-reg don't require `%%' prefix for registers\n"));
8477 fprintf (stream, _("\
8478 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
8479 }
8480
8481 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
8482 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8483 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8484
8485 /* Pick the target format to use. */
8486
8487 const char *
8488 i386_target_format (void)
8489 {
8490 if (!strcmp (default_arch, "x86_64"))
8491 update_code_flag (CODE_64BIT, 1);
8492 else if (!strcmp (default_arch, "i386"))
8493 update_code_flag (CODE_32BIT, 1);
8494 else
8495 as_fatal (_("Unknown architecture"));
8496
8497 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
8498 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
8499 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
8500 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
8501
8502 switch (OUTPUT_FLAVOR)
8503 {
8504 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
8505 case bfd_target_aout_flavour:
8506 return AOUT_TARGET_FORMAT;
8507 #endif
8508 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
8509 # if defined (TE_PE) || defined (TE_PEP)
8510 case bfd_target_coff_flavour:
8511 return flag_code == CODE_64BIT ? "pe-x86-64" : "pe-i386";
8512 # elif defined (TE_GO32)
8513 case bfd_target_coff_flavour:
8514 return "coff-go32";
8515 # else
8516 case bfd_target_coff_flavour:
8517 return "coff-i386";
8518 # endif
8519 #endif
8520 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8521 case bfd_target_elf_flavour:
8522 {
8523 if (flag_code == CODE_64BIT)
8524 {
8525 object_64bit = 1;
8526 use_rela_relocations = 1;
8527 }
8528 if (cpu_arch_isa == PROCESSOR_L1OM)
8529 {
8530 if (flag_code != CODE_64BIT)
8531 as_fatal (_("Intel L1OM is 64bit only"));
8532 return ELF_TARGET_L1OM_FORMAT;
8533 }
8534 else
8535 return (flag_code == CODE_64BIT
8536 ? ELF_TARGET_FORMAT64 : ELF_TARGET_FORMAT);
8537 }
8538 #endif
8539 #if defined (OBJ_MACH_O)
8540 case bfd_target_mach_o_flavour:
8541 return flag_code == CODE_64BIT ? "mach-o-x86-64" : "mach-o-i386";
8542 #endif
8543 default:
8544 abort ();
8545 return NULL;
8546 }
8547 }
8548
8549 #endif /* OBJ_MAYBE_ more than one */
8550
8551 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
8552 void
8553 i386_elf_emit_arch_note (void)
8554 {
8555 if (IS_ELF && cpu_arch_name != NULL)
8556 {
8557 char *p;
8558 asection *seg = now_seg;
8559 subsegT subseg = now_subseg;
8560 Elf_Internal_Note i_note;
8561 Elf_External_Note e_note;
8562 asection *note_secp;
8563 int len;
8564
8565 /* Create the .note section. */
8566 note_secp = subseg_new (".note", 0);
8567 bfd_set_section_flags (stdoutput,
8568 note_secp,
8569 SEC_HAS_CONTENTS | SEC_READONLY);
8570
8571 /* Process the arch string. */
8572 len = strlen (cpu_arch_name);
8573
8574 i_note.namesz = len + 1;
8575 i_note.descsz = 0;
8576 i_note.type = NT_ARCH;
8577 p = frag_more (sizeof (e_note.namesz));
8578 md_number_to_chars (p, (valueT) i_note.namesz, sizeof (e_note.namesz));
8579 p = frag_more (sizeof (e_note.descsz));
8580 md_number_to_chars (p, (valueT) i_note.descsz, sizeof (e_note.descsz));
8581 p = frag_more (sizeof (e_note.type));
8582 md_number_to_chars (p, (valueT) i_note.type, sizeof (e_note.type));
8583 p = frag_more (len + 1);
8584 strcpy (p, cpu_arch_name);
8585
8586 frag_align (2, 0, 0);
8587
8588 subseg_set (seg, subseg);
8589 }
8590 }
8591 #endif
8592 \f
8593 symbolS *
8594 md_undefined_symbol (name)
8595 char *name;
8596 {
8597 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
8598 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
8599 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
8600 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
8601 {
8602 if (!GOT_symbol)
8603 {
8604 if (symbol_find (name))
8605 as_bad (_("GOT already in symbol table"));
8606 GOT_symbol = symbol_new (name, undefined_section,
8607 (valueT) 0, &zero_address_frag);
8608 };
8609 return GOT_symbol;
8610 }
8611 return 0;
8612 }
8613
8614 /* Round up a section size to the appropriate boundary. */
8615
8616 valueT
8617 md_section_align (segment, size)
8618 segT segment ATTRIBUTE_UNUSED;
8619 valueT size;
8620 {
8621 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8622 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
8623 {
8624 /* For a.out, force the section size to be aligned. If we don't do
8625 this, BFD will align it for us, but it will not write out the
8626 final bytes of the section. This may be a bug in BFD, but it is
8627 easier to fix it here since that is how the other a.out targets
8628 work. */
8629 int align;
8630
8631 align = bfd_get_section_alignment (stdoutput, segment);
8632 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
8633 }
8634 #endif
8635
8636 return size;
8637 }
8638
8639 /* On the i386, PC-relative offsets are relative to the start of the
8640 next instruction. That is, the address of the offset, plus its
8641 size, since the offset is always the last part of the insn. */
8642
8643 long
8644 md_pcrel_from (fixS *fixP)
8645 {
8646 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
8647 }
8648
8649 #ifndef I386COFF
8650
8651 static void
8652 s_bss (int ignore ATTRIBUTE_UNUSED)
8653 {
8654 int temp;
8655
8656 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8657 if (IS_ELF)
8658 obj_elf_section_change_hook ();
8659 #endif
8660 temp = get_absolute_expression ();
8661 subseg_set (bss_section, (subsegT) temp);
8662 demand_empty_rest_of_line ();
8663 }
8664
8665 #endif
8666
8667 void
8668 i386_validate_fix (fixS *fixp)
8669 {
8670 if (fixp->fx_subsy && fixp->fx_subsy == GOT_symbol)
8671 {
8672 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
8673 {
8674 if (!object_64bit)
8675 abort ();
8676 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
8677 }
8678 else
8679 {
8680 if (!object_64bit)
8681 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
8682 else
8683 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
8684 }
8685 fixp->fx_subsy = 0;
8686 }
8687 }
8688
8689 arelent *
8690 tc_gen_reloc (section, fixp)
8691 asection *section ATTRIBUTE_UNUSED;
8692 fixS *fixp;
8693 {
8694 arelent *rel;
8695 bfd_reloc_code_real_type code;
8696
8697 switch (fixp->fx_r_type)
8698 {
8699 case BFD_RELOC_X86_64_PLT32:
8700 case BFD_RELOC_X86_64_GOT32:
8701 case BFD_RELOC_X86_64_GOTPCREL:
8702 case BFD_RELOC_386_PLT32:
8703 case BFD_RELOC_386_GOT32:
8704 case BFD_RELOC_386_GOTOFF:
8705 case BFD_RELOC_386_GOTPC:
8706 case BFD_RELOC_386_TLS_GD:
8707 case BFD_RELOC_386_TLS_LDM:
8708 case BFD_RELOC_386_TLS_LDO_32:
8709 case BFD_RELOC_386_TLS_IE_32:
8710 case BFD_RELOC_386_TLS_IE:
8711 case BFD_RELOC_386_TLS_GOTIE:
8712 case BFD_RELOC_386_TLS_LE_32:
8713 case BFD_RELOC_386_TLS_LE:
8714 case BFD_RELOC_386_TLS_GOTDESC:
8715 case BFD_RELOC_386_TLS_DESC_CALL:
8716 case BFD_RELOC_X86_64_TLSGD:
8717 case BFD_RELOC_X86_64_TLSLD:
8718 case BFD_RELOC_X86_64_DTPOFF32:
8719 case BFD_RELOC_X86_64_DTPOFF64:
8720 case BFD_RELOC_X86_64_GOTTPOFF:
8721 case BFD_RELOC_X86_64_TPOFF32:
8722 case BFD_RELOC_X86_64_TPOFF64:
8723 case BFD_RELOC_X86_64_GOTOFF64:
8724 case BFD_RELOC_X86_64_GOTPC32:
8725 case BFD_RELOC_X86_64_GOT64:
8726 case BFD_RELOC_X86_64_GOTPCREL64:
8727 case BFD_RELOC_X86_64_GOTPC64:
8728 case BFD_RELOC_X86_64_GOTPLT64:
8729 case BFD_RELOC_X86_64_PLTOFF64:
8730 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8731 case BFD_RELOC_X86_64_TLSDESC_CALL:
8732 case BFD_RELOC_RVA:
8733 case BFD_RELOC_VTABLE_ENTRY:
8734 case BFD_RELOC_VTABLE_INHERIT:
8735 #ifdef TE_PE
8736 case BFD_RELOC_32_SECREL:
8737 #endif
8738 code = fixp->fx_r_type;
8739 break;
8740 case BFD_RELOC_X86_64_32S:
8741 if (!fixp->fx_pcrel)
8742 {
8743 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
8744 code = fixp->fx_r_type;
8745 break;
8746 }
8747 default:
8748 if (fixp->fx_pcrel)
8749 {
8750 switch (fixp->fx_size)
8751 {
8752 default:
8753 as_bad_where (fixp->fx_file, fixp->fx_line,
8754 _("can not do %d byte pc-relative relocation"),
8755 fixp->fx_size);
8756 code = BFD_RELOC_32_PCREL;
8757 break;
8758 case 1: code = BFD_RELOC_8_PCREL; break;
8759 case 2: code = BFD_RELOC_16_PCREL; break;
8760 case 4: code = BFD_RELOC_32_PCREL; break;
8761 #ifdef BFD64
8762 case 8: code = BFD_RELOC_64_PCREL; break;
8763 #endif
8764 }
8765 }
8766 else
8767 {
8768 switch (fixp->fx_size)
8769 {
8770 default:
8771 as_bad_where (fixp->fx_file, fixp->fx_line,
8772 _("can not do %d byte relocation"),
8773 fixp->fx_size);
8774 code = BFD_RELOC_32;
8775 break;
8776 case 1: code = BFD_RELOC_8; break;
8777 case 2: code = BFD_RELOC_16; break;
8778 case 4: code = BFD_RELOC_32; break;
8779 #ifdef BFD64
8780 case 8: code = BFD_RELOC_64; break;
8781 #endif
8782 }
8783 }
8784 break;
8785 }
8786
8787 if ((code == BFD_RELOC_32
8788 || code == BFD_RELOC_32_PCREL
8789 || code == BFD_RELOC_X86_64_32S)
8790 && GOT_symbol
8791 && fixp->fx_addsy == GOT_symbol)
8792 {
8793 if (!object_64bit)
8794 code = BFD_RELOC_386_GOTPC;
8795 else
8796 code = BFD_RELOC_X86_64_GOTPC32;
8797 }
8798 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
8799 && GOT_symbol
8800 && fixp->fx_addsy == GOT_symbol)
8801 {
8802 code = BFD_RELOC_X86_64_GOTPC64;
8803 }
8804
8805 rel = (arelent *) xmalloc (sizeof (arelent));
8806 rel->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
8807 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
8808
8809 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
8810
8811 if (!use_rela_relocations)
8812 {
8813 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
8814 vtable entry to be used in the relocation's section offset. */
8815 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
8816 rel->address = fixp->fx_offset;
8817 #if defined (OBJ_COFF) && defined (TE_PE)
8818 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
8819 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
8820 else
8821 #endif
8822 rel->addend = 0;
8823 }
8824 /* Use the rela in 64bit mode. */
8825 else
8826 {
8827 if (!fixp->fx_pcrel)
8828 rel->addend = fixp->fx_offset;
8829 else
8830 switch (code)
8831 {
8832 case BFD_RELOC_X86_64_PLT32:
8833 case BFD_RELOC_X86_64_GOT32:
8834 case BFD_RELOC_X86_64_GOTPCREL:
8835 case BFD_RELOC_X86_64_TLSGD:
8836 case BFD_RELOC_X86_64_TLSLD:
8837 case BFD_RELOC_X86_64_GOTTPOFF:
8838 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8839 case BFD_RELOC_X86_64_TLSDESC_CALL:
8840 rel->addend = fixp->fx_offset - fixp->fx_size;
8841 break;
8842 default:
8843 rel->addend = (section->vma
8844 - fixp->fx_size
8845 + fixp->fx_addnumber
8846 + md_pcrel_from (fixp));
8847 break;
8848 }
8849 }
8850
8851 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
8852 if (rel->howto == NULL)
8853 {
8854 as_bad_where (fixp->fx_file, fixp->fx_line,
8855 _("cannot represent relocation type %s"),
8856 bfd_get_reloc_code_name (code));
8857 /* Set howto to a garbage value so that we can keep going. */
8858 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
8859 gas_assert (rel->howto != NULL);
8860 }
8861
8862 return rel;
8863 }
8864
8865 #include "tc-i386-intel.c"
8866
8867 void
8868 tc_x86_parse_to_dw2regnum (expressionS *exp)
8869 {
8870 int saved_naked_reg;
8871 char saved_register_dot;
8872
8873 saved_naked_reg = allow_naked_reg;
8874 allow_naked_reg = 1;
8875 saved_register_dot = register_chars['.'];
8876 register_chars['.'] = '.';
8877 allow_pseudo_reg = 1;
8878 expression_and_evaluate (exp);
8879 allow_pseudo_reg = 0;
8880 register_chars['.'] = saved_register_dot;
8881 allow_naked_reg = saved_naked_reg;
8882
8883 if (exp->X_op == O_register && exp->X_add_number >= 0)
8884 {
8885 if ((addressT) exp->X_add_number < i386_regtab_size)
8886 {
8887 exp->X_op = O_constant;
8888 exp->X_add_number = i386_regtab[exp->X_add_number]
8889 .dw2_regnum[flag_code >> 1];
8890 }
8891 else
8892 exp->X_op = O_illegal;
8893 }
8894 }
8895
8896 void
8897 tc_x86_frame_initial_instructions (void)
8898 {
8899 static unsigned int sp_regno[2];
8900
8901 if (!sp_regno[flag_code >> 1])
8902 {
8903 char *saved_input = input_line_pointer;
8904 char sp[][4] = {"esp", "rsp"};
8905 expressionS exp;
8906
8907 input_line_pointer = sp[flag_code >> 1];
8908 tc_x86_parse_to_dw2regnum (&exp);
8909 gas_assert (exp.X_op == O_constant);
8910 sp_regno[flag_code >> 1] = exp.X_add_number;
8911 input_line_pointer = saved_input;
8912 }
8913
8914 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
8915 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
8916 }
8917
8918 int
8919 i386_elf_section_type (const char *str, size_t len)
8920 {
8921 if (flag_code == CODE_64BIT
8922 && len == sizeof ("unwind") - 1
8923 && strncmp (str, "unwind", 6) == 0)
8924 return SHT_X86_64_UNWIND;
8925
8926 return -1;
8927 }
8928
8929 #ifdef TE_SOLARIS
8930 void
8931 i386_solaris_fix_up_eh_frame (segT sec)
8932 {
8933 if (flag_code == CODE_64BIT)
8934 elf_section_type (sec) = SHT_X86_64_UNWIND;
8935 }
8936 #endif
8937
8938 #ifdef TE_PE
8939 void
8940 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
8941 {
8942 expressionS exp;
8943
8944 exp.X_op = O_secrel;
8945 exp.X_add_symbol = symbol;
8946 exp.X_add_number = 0;
8947 emit_expr (&exp, size);
8948 }
8949 #endif
8950
8951 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8952 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
8953
8954 bfd_vma
8955 x86_64_section_letter (int letter, char **ptr_msg)
8956 {
8957 if (flag_code == CODE_64BIT)
8958 {
8959 if (letter == 'l')
8960 return SHF_X86_64_LARGE;
8961
8962 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
8963 }
8964 else
8965 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
8966 return -1;
8967 }
8968
8969 bfd_vma
8970 x86_64_section_word (char *str, size_t len)
8971 {
8972 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
8973 return SHF_X86_64_LARGE;
8974
8975 return -1;
8976 }
8977
8978 static void
8979 handle_large_common (int small ATTRIBUTE_UNUSED)
8980 {
8981 if (flag_code != CODE_64BIT)
8982 {
8983 s_comm_internal (0, elf_common_parse);
8984 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
8985 }
8986 else
8987 {
8988 static segT lbss_section;
8989 asection *saved_com_section_ptr = elf_com_section_ptr;
8990 asection *saved_bss_section = bss_section;
8991
8992 if (lbss_section == NULL)
8993 {
8994 flagword applicable;
8995 segT seg = now_seg;
8996 subsegT subseg = now_subseg;
8997
8998 /* The .lbss section is for local .largecomm symbols. */
8999 lbss_section = subseg_new (".lbss", 0);
9000 applicable = bfd_applicable_section_flags (stdoutput);
9001 bfd_set_section_flags (stdoutput, lbss_section,
9002 applicable & SEC_ALLOC);
9003 seg_info (lbss_section)->bss = 1;
9004
9005 subseg_set (seg, subseg);
9006 }
9007
9008 elf_com_section_ptr = &_bfd_elf_large_com_section;
9009 bss_section = lbss_section;
9010
9011 s_comm_internal (0, elf_common_parse);
9012
9013 elf_com_section_ptr = saved_com_section_ptr;
9014 bss_section = saved_bss_section;
9015 }
9016 }
9017 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */
This page took 0.224253 seconds and 4 git commands to generate.