bb2f228553a2eba24bd6fb0dba38036bc24bb719
[deliverable/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 #ifdef OBJ_ELF
59 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
60 static symbolS *GOT_symbol;
61
62 /* Which ABI to use. */
63 enum aarch64_abi_type
64 {
65 AARCH64_ABI_LP64 = 0,
66 AARCH64_ABI_ILP32 = 1
67 };
68
69 /* AArch64 ABI for the output file. */
70 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_LP64;
71
72 /* When non-zero, program to a 32-bit model, in which the C data types
73 int, long and all pointer types are 32-bit objects (ILP32); or to a
74 64-bit model, in which the C int type is 32-bits but the C long type
75 and all pointer types are 64-bit objects (LP64). */
76 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
77 #endif
78
79 enum neon_el_type
80 {
81 NT_invtype = -1,
82 NT_b,
83 NT_h,
84 NT_s,
85 NT_d,
86 NT_q
87 };
88
89 /* Bits for DEFINED field in neon_type_el. */
90 #define NTA_HASTYPE 1
91 #define NTA_HASINDEX 2
92
93 struct neon_type_el
94 {
95 enum neon_el_type type;
96 unsigned char defined;
97 unsigned width;
98 int64_t index;
99 };
100
101 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
102
103 struct reloc
104 {
105 bfd_reloc_code_real_type type;
106 expressionS exp;
107 int pc_rel;
108 enum aarch64_opnd opnd;
109 uint32_t flags;
110 unsigned need_libopcodes_p : 1;
111 };
112
113 struct aarch64_instruction
114 {
115 /* libopcodes structure for instruction intermediate representation. */
116 aarch64_inst base;
117 /* Record assembly errors found during the parsing. */
118 struct
119 {
120 enum aarch64_operand_error_kind kind;
121 const char *error;
122 } parsing_error;
123 /* The condition that appears in the assembly line. */
124 int cond;
125 /* Relocation information (including the GAS internal fixup). */
126 struct reloc reloc;
127 /* Need to generate an immediate in the literal pool. */
128 unsigned gen_lit_pool : 1;
129 };
130
131 typedef struct aarch64_instruction aarch64_instruction;
132
133 static aarch64_instruction inst;
134
135 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
136 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
137
138 /* Diagnostics inline function utilites.
139
140 These are lightweight utlities which should only be called by parse_operands
141 and other parsers. GAS processes each assembly line by parsing it against
142 instruction template(s), in the case of multiple templates (for the same
143 mnemonic name), those templates are tried one by one until one succeeds or
144 all fail. An assembly line may fail a few templates before being
145 successfully parsed; an error saved here in most cases is not a user error
146 but an error indicating the current template is not the right template.
147 Therefore it is very important that errors can be saved at a low cost during
148 the parsing; we don't want to slow down the whole parsing by recording
149 non-user errors in detail.
150
151 Remember that the objective is to help GAS pick up the most approapriate
152 error message in the case of multiple templates, e.g. FMOV which has 8
153 templates. */
154
155 static inline void
156 clear_error (void)
157 {
158 inst.parsing_error.kind = AARCH64_OPDE_NIL;
159 inst.parsing_error.error = NULL;
160 }
161
162 static inline bfd_boolean
163 error_p (void)
164 {
165 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
166 }
167
168 static inline const char *
169 get_error_message (void)
170 {
171 return inst.parsing_error.error;
172 }
173
174 static inline enum aarch64_operand_error_kind
175 get_error_kind (void)
176 {
177 return inst.parsing_error.kind;
178 }
179
180 static inline void
181 set_error (enum aarch64_operand_error_kind kind, const char *error)
182 {
183 inst.parsing_error.kind = kind;
184 inst.parsing_error.error = error;
185 }
186
187 static inline void
188 set_recoverable_error (const char *error)
189 {
190 set_error (AARCH64_OPDE_RECOVERABLE, error);
191 }
192
193 /* Use the DESC field of the corresponding aarch64_operand entry to compose
194 the error message. */
195 static inline void
196 set_default_error (void)
197 {
198 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
199 }
200
201 static inline void
202 set_syntax_error (const char *error)
203 {
204 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
205 }
206
207 static inline void
208 set_first_syntax_error (const char *error)
209 {
210 if (! error_p ())
211 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
212 }
213
214 static inline void
215 set_fatal_syntax_error (const char *error)
216 {
217 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
218 }
219 \f
220 /* Number of littlenums required to hold an extended precision number. */
221 #define MAX_LITTLENUMS 6
222
223 /* Return value for certain parsers when the parsing fails; those parsers
224 return the information of the parsed result, e.g. register number, on
225 success. */
226 #define PARSE_FAIL -1
227
228 /* This is an invalid condition code that means no conditional field is
229 present. */
230 #define COND_ALWAYS 0x10
231
232 typedef struct
233 {
234 const char *template;
235 unsigned long value;
236 } asm_barrier_opt;
237
238 typedef struct
239 {
240 const char *template;
241 uint32_t value;
242 } asm_nzcv;
243
244 struct reloc_entry
245 {
246 char *name;
247 bfd_reloc_code_real_type reloc;
248 };
249
250 /* Structure for a hash table entry for a register. */
251 typedef struct
252 {
253 const char *name;
254 unsigned char number;
255 unsigned char type;
256 unsigned char builtin;
257 } reg_entry;
258
259 /* Macros to define the register types and masks for the purpose
260 of parsing. */
261
262 #undef AARCH64_REG_TYPES
263 #define AARCH64_REG_TYPES \
264 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
265 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
266 BASIC_REG_TYPE(SP_32) /* wsp */ \
267 BASIC_REG_TYPE(SP_64) /* sp */ \
268 BASIC_REG_TYPE(Z_32) /* wzr */ \
269 BASIC_REG_TYPE(Z_64) /* xzr */ \
270 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
271 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
272 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
273 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
274 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
275 BASIC_REG_TYPE(CN) /* c[0-7] */ \
276 BASIC_REG_TYPE(VN) /* v[0-31] */ \
277 /* Typecheck: any 64-bit int reg (inc SP exc XZR) */ \
278 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
279 /* Typecheck: any int (inc {W}SP inc [WX]ZR) */ \
280 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
281 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
282 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
283 /* Typecheck: any [BHSDQ]P FP. */ \
284 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
285 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
286 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR) */ \
287 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
288 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
289 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
290 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
291 /* Any integer register; used for error messages only. */ \
292 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
293 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
294 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
295 /* Pseudo type to mark the end of the enumerator sequence. */ \
296 BASIC_REG_TYPE(MAX)
297
298 #undef BASIC_REG_TYPE
299 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
300 #undef MULTI_REG_TYPE
301 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
302
303 /* Register type enumerators. */
304 typedef enum
305 {
306 /* A list of REG_TYPE_*. */
307 AARCH64_REG_TYPES
308 } aarch64_reg_type;
309
310 #undef BASIC_REG_TYPE
311 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
312 #undef REG_TYPE
313 #define REG_TYPE(T) (1 << REG_TYPE_##T)
314 #undef MULTI_REG_TYPE
315 #define MULTI_REG_TYPE(T,V) V,
316
317 /* Values indexed by aarch64_reg_type to assist the type checking. */
318 static const unsigned reg_type_masks[] =
319 {
320 AARCH64_REG_TYPES
321 };
322
323 #undef BASIC_REG_TYPE
324 #undef REG_TYPE
325 #undef MULTI_REG_TYPE
326 #undef AARCH64_REG_TYPES
327
328 /* Diagnostics used when we don't get a register of the expected type.
329 Note: this has to synchronized with aarch64_reg_type definitions
330 above. */
331 static const char *
332 get_reg_expected_msg (aarch64_reg_type reg_type)
333 {
334 const char *msg;
335
336 switch (reg_type)
337 {
338 case REG_TYPE_R_32:
339 msg = N_("integer 32-bit register expected");
340 break;
341 case REG_TYPE_R_64:
342 msg = N_("integer 64-bit register expected");
343 break;
344 case REG_TYPE_R_N:
345 msg = N_("integer register expected");
346 break;
347 case REG_TYPE_R_Z_SP:
348 msg = N_("integer, zero or SP register expected");
349 break;
350 case REG_TYPE_FP_B:
351 msg = N_("8-bit SIMD scalar register expected");
352 break;
353 case REG_TYPE_FP_H:
354 msg = N_("16-bit SIMD scalar or floating-point half precision "
355 "register expected");
356 break;
357 case REG_TYPE_FP_S:
358 msg = N_("32-bit SIMD scalar or floating-point single precision "
359 "register expected");
360 break;
361 case REG_TYPE_FP_D:
362 msg = N_("64-bit SIMD scalar or floating-point double precision "
363 "register expected");
364 break;
365 case REG_TYPE_FP_Q:
366 msg = N_("128-bit SIMD scalar or floating-point quad precision "
367 "register expected");
368 break;
369 case REG_TYPE_CN:
370 msg = N_("C0 - C15 expected");
371 break;
372 case REG_TYPE_R_Z_BHSDQ_V:
373 msg = N_("register expected");
374 break;
375 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
376 msg = N_("SIMD scalar or floating-point register expected");
377 break;
378 case REG_TYPE_VN: /* any V reg */
379 msg = N_("vector register expected");
380 break;
381 default:
382 as_fatal (_("invalid register type %d"), reg_type);
383 }
384 return msg;
385 }
386
387 /* Some well known registers that we refer to directly elsewhere. */
388 #define REG_SP 31
389
390 /* Instructions take 4 bytes in the object file. */
391 #define INSN_SIZE 4
392
393 /* Define some common error messages. */
394 #define BAD_SP _("SP not allowed here")
395
396 static struct hash_control *aarch64_ops_hsh;
397 static struct hash_control *aarch64_cond_hsh;
398 static struct hash_control *aarch64_shift_hsh;
399 static struct hash_control *aarch64_sys_regs_hsh;
400 static struct hash_control *aarch64_pstatefield_hsh;
401 static struct hash_control *aarch64_sys_regs_ic_hsh;
402 static struct hash_control *aarch64_sys_regs_dc_hsh;
403 static struct hash_control *aarch64_sys_regs_at_hsh;
404 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
405 static struct hash_control *aarch64_reg_hsh;
406 static struct hash_control *aarch64_barrier_opt_hsh;
407 static struct hash_control *aarch64_nzcv_hsh;
408 static struct hash_control *aarch64_pldop_hsh;
409
410 /* Stuff needed to resolve the label ambiguity
411 As:
412 ...
413 label: <insn>
414 may differ from:
415 ...
416 label:
417 <insn> */
418
419 static symbolS *last_label_seen;
420
421 /* Literal pool structure. Held on a per-section
422 and per-sub-section basis. */
423
424 #define MAX_LITERAL_POOL_SIZE 1024
425 typedef struct literal_expression
426 {
427 expressionS exp;
428 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
429 LITTLENUM_TYPE * bignum;
430 } literal_expression;
431
432 typedef struct literal_pool
433 {
434 literal_expression literals[MAX_LITERAL_POOL_SIZE];
435 unsigned int next_free_entry;
436 unsigned int id;
437 symbolS *symbol;
438 segT section;
439 subsegT sub_section;
440 int size;
441 struct literal_pool *next;
442 } literal_pool;
443
444 /* Pointer to a linked list of literal pools. */
445 static literal_pool *list_of_pools = NULL;
446 \f
447 /* Pure syntax. */
448
449 /* This array holds the chars that always start a comment. If the
450 pre-processor is disabled, these aren't very useful. */
451 const char comment_chars[] = "";
452
453 /* This array holds the chars that only start a comment at the beginning of
454 a line. If the line seems to have the form '# 123 filename'
455 .line and .file directives will appear in the pre-processed output. */
456 /* Note that input_file.c hand checks for '#' at the beginning of the
457 first line of the input file. This is because the compiler outputs
458 #NO_APP at the beginning of its output. */
459 /* Also note that comments like this one will always work. */
460 const char line_comment_chars[] = "#";
461
462 const char line_separator_chars[] = ";";
463
464 /* Chars that can be used to separate mant
465 from exp in floating point numbers. */
466 const char EXP_CHARS[] = "eE";
467
468 /* Chars that mean this number is a floating point constant. */
469 /* As in 0f12.456 */
470 /* or 0d1.2345e12 */
471
472 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
473
474 /* Prefix character that indicates the start of an immediate value. */
475 #define is_immediate_prefix(C) ((C) == '#')
476
477 /* Separator character handling. */
478
479 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
480
481 static inline bfd_boolean
482 skip_past_char (char **str, char c)
483 {
484 if (**str == c)
485 {
486 (*str)++;
487 return TRUE;
488 }
489 else
490 return FALSE;
491 }
492
493 #define skip_past_comma(str) skip_past_char (str, ',')
494
495 /* Arithmetic expressions (possibly involving symbols). */
496
497 static bfd_boolean in_my_get_expression_p = FALSE;
498
499 /* Third argument to my_get_expression. */
500 #define GE_NO_PREFIX 0
501 #define GE_OPT_PREFIX 1
502
503 /* Return TRUE if the string pointed by *STR is successfully parsed
504 as an valid expression; *EP will be filled with the information of
505 such an expression. Otherwise return FALSE. */
506
507 static bfd_boolean
508 my_get_expression (expressionS * ep, char **str, int prefix_mode,
509 int reject_absent)
510 {
511 char *save_in;
512 segT seg;
513 int prefix_present_p = 0;
514
515 switch (prefix_mode)
516 {
517 case GE_NO_PREFIX:
518 break;
519 case GE_OPT_PREFIX:
520 if (is_immediate_prefix (**str))
521 {
522 (*str)++;
523 prefix_present_p = 1;
524 }
525 break;
526 default:
527 abort ();
528 }
529
530 memset (ep, 0, sizeof (expressionS));
531
532 save_in = input_line_pointer;
533 input_line_pointer = *str;
534 in_my_get_expression_p = TRUE;
535 seg = expression (ep);
536 in_my_get_expression_p = FALSE;
537
538 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
539 {
540 /* We found a bad expression in md_operand(). */
541 *str = input_line_pointer;
542 input_line_pointer = save_in;
543 if (prefix_present_p && ! error_p ())
544 set_fatal_syntax_error (_("bad expression"));
545 else
546 set_first_syntax_error (_("bad expression"));
547 return FALSE;
548 }
549
550 #ifdef OBJ_AOUT
551 if (seg != absolute_section
552 && seg != text_section
553 && seg != data_section
554 && seg != bss_section && seg != undefined_section)
555 {
556 set_syntax_error (_("bad segment"));
557 *str = input_line_pointer;
558 input_line_pointer = save_in;
559 return FALSE;
560 }
561 #else
562 (void) seg;
563 #endif
564
565 *str = input_line_pointer;
566 input_line_pointer = save_in;
567 return TRUE;
568 }
569
570 /* Turn a string in input_line_pointer into a floating point constant
571 of type TYPE, and store the appropriate bytes in *LITP. The number
572 of LITTLENUMS emitted is stored in *SIZEP. An error message is
573 returned, or NULL on OK. */
574
575 char *
576 md_atof (int type, char *litP, int *sizeP)
577 {
578 return ieee_md_atof (type, litP, sizeP, target_big_endian);
579 }
580
581 /* We handle all bad expressions here, so that we can report the faulty
582 instruction in the error message. */
583 void
584 md_operand (expressionS * exp)
585 {
586 if (in_my_get_expression_p)
587 exp->X_op = O_illegal;
588 }
589
590 /* Immediate values. */
591
592 /* Errors may be set multiple times during parsing or bit encoding
593 (particularly in the Neon bits), but usually the earliest error which is set
594 will be the most meaningful. Avoid overwriting it with later (cascading)
595 errors by calling this function. */
596
597 static void
598 first_error (const char *error)
599 {
600 if (! error_p ())
601 set_syntax_error (error);
602 }
603
604 /* Similiar to first_error, but this function accepts formatted error
605 message. */
606 static void
607 first_error_fmt (const char *format, ...)
608 {
609 va_list args;
610 enum
611 { size = 100 };
612 /* N.B. this single buffer will not cause error messages for different
613 instructions to pollute each other; this is because at the end of
614 processing of each assembly line, error message if any will be
615 collected by as_bad. */
616 static char buffer[size];
617
618 if (! error_p ())
619 {
620 int ret ATTRIBUTE_UNUSED;
621 va_start (args, format);
622 ret = vsnprintf (buffer, size, format, args);
623 know (ret <= size - 1 && ret >= 0);
624 va_end (args);
625 set_syntax_error (buffer);
626 }
627 }
628
629 /* Register parsing. */
630
631 /* Generic register parser which is called by other specialized
632 register parsers.
633 CCP points to what should be the beginning of a register name.
634 If it is indeed a valid register name, advance CCP over it and
635 return the reg_entry structure; otherwise return NULL.
636 It does not issue diagnostics. */
637
638 static reg_entry *
639 parse_reg (char **ccp)
640 {
641 char *start = *ccp;
642 char *p;
643 reg_entry *reg;
644
645 #ifdef REGISTER_PREFIX
646 if (*start != REGISTER_PREFIX)
647 return NULL;
648 start++;
649 #endif
650
651 p = start;
652 if (!ISALPHA (*p) || !is_name_beginner (*p))
653 return NULL;
654
655 do
656 p++;
657 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
658
659 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
660
661 if (!reg)
662 return NULL;
663
664 *ccp = p;
665 return reg;
666 }
667
668 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
669 return FALSE. */
670 static bfd_boolean
671 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
672 {
673 if (reg->type == type)
674 return TRUE;
675
676 switch (type)
677 {
678 case REG_TYPE_R64_SP: /* 64-bit integer reg (inc SP exc XZR). */
679 case REG_TYPE_R_Z_SP: /* Integer reg (inc {X}SP inc [WX]ZR). */
680 case REG_TYPE_R_Z_BHSDQ_V: /* Any register apart from Cn. */
681 case REG_TYPE_BHSDQ: /* Any [BHSDQ]P FP or SIMD scalar register. */
682 case REG_TYPE_VN: /* Vector register. */
683 gas_assert (reg->type < REG_TYPE_MAX && type < REG_TYPE_MAX);
684 return ((reg_type_masks[reg->type] & reg_type_masks[type])
685 == reg_type_masks[reg->type]);
686 default:
687 as_fatal ("unhandled type %d", type);
688 abort ();
689 }
690 }
691
692 /* Parse a register and return PARSE_FAIL if the register is not of type R_Z_SP.
693 Return the register number otherwise. *ISREG32 is set to one if the
694 register is 32-bit wide; *ISREGZERO is set to one if the register is
695 of type Z_32 or Z_64.
696 Note that this function does not issue any diagnostics. */
697
698 static int
699 aarch64_reg_parse_32_64 (char **ccp, int reject_sp, int reject_rz,
700 int *isreg32, int *isregzero)
701 {
702 char *str = *ccp;
703 const reg_entry *reg = parse_reg (&str);
704
705 if (reg == NULL)
706 return PARSE_FAIL;
707
708 if (! aarch64_check_reg_type (reg, REG_TYPE_R_Z_SP))
709 return PARSE_FAIL;
710
711 switch (reg->type)
712 {
713 case REG_TYPE_SP_32:
714 case REG_TYPE_SP_64:
715 if (reject_sp)
716 return PARSE_FAIL;
717 *isreg32 = reg->type == REG_TYPE_SP_32;
718 *isregzero = 0;
719 break;
720 case REG_TYPE_R_32:
721 case REG_TYPE_R_64:
722 *isreg32 = reg->type == REG_TYPE_R_32;
723 *isregzero = 0;
724 break;
725 case REG_TYPE_Z_32:
726 case REG_TYPE_Z_64:
727 if (reject_rz)
728 return PARSE_FAIL;
729 *isreg32 = reg->type == REG_TYPE_Z_32;
730 *isregzero = 1;
731 break;
732 default:
733 return PARSE_FAIL;
734 }
735
736 *ccp = str;
737
738 return reg->number;
739 }
740
741 /* Parse the qualifier of a SIMD vector register or a SIMD vector element.
742 Fill in *PARSED_TYPE and return TRUE if the parsing succeeds;
743 otherwise return FALSE.
744
745 Accept only one occurrence of:
746 8b 16b 4h 8h 2s 4s 1d 2d
747 b h s d q */
748 static bfd_boolean
749 parse_neon_type_for_operand (struct neon_type_el *parsed_type, char **str)
750 {
751 char *ptr = *str;
752 unsigned width;
753 unsigned element_size;
754 enum neon_el_type type;
755
756 /* skip '.' */
757 ptr++;
758
759 if (!ISDIGIT (*ptr))
760 {
761 width = 0;
762 goto elt_size;
763 }
764 width = strtoul (ptr, &ptr, 10);
765 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
766 {
767 first_error_fmt (_("bad size %d in vector width specifier"), width);
768 return FALSE;
769 }
770
771 elt_size:
772 switch (TOLOWER (*ptr))
773 {
774 case 'b':
775 type = NT_b;
776 element_size = 8;
777 break;
778 case 'h':
779 type = NT_h;
780 element_size = 16;
781 break;
782 case 's':
783 type = NT_s;
784 element_size = 32;
785 break;
786 case 'd':
787 type = NT_d;
788 element_size = 64;
789 break;
790 case 'q':
791 if (width == 1)
792 {
793 type = NT_q;
794 element_size = 128;
795 break;
796 }
797 /* fall through. */
798 default:
799 if (*ptr != '\0')
800 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
801 else
802 first_error (_("missing element size"));
803 return FALSE;
804 }
805 if (width != 0 && width * element_size != 64 && width * element_size != 128)
806 {
807 first_error_fmt (_
808 ("invalid element size %d and vector size combination %c"),
809 width, *ptr);
810 return FALSE;
811 }
812 ptr++;
813
814 parsed_type->type = type;
815 parsed_type->width = width;
816
817 *str = ptr;
818
819 return TRUE;
820 }
821
822 /* Parse a single type, e.g. ".8b", leading period included.
823 Only applicable to Vn registers.
824
825 Return TRUE on success; otherwise return FALSE. */
826 static bfd_boolean
827 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
828 {
829 char *str = *ccp;
830
831 if (*str == '.')
832 {
833 if (! parse_neon_type_for_operand (vectype, &str))
834 {
835 first_error (_("vector type expected"));
836 return FALSE;
837 }
838 }
839 else
840 return FALSE;
841
842 *ccp = str;
843
844 return TRUE;
845 }
846
847 /* Parse a register of the type TYPE.
848
849 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
850 name or the parsed register is not of TYPE.
851
852 Otherwise return the register number, and optionally fill in the actual
853 type of the register in *RTYPE when multiple alternatives were given, and
854 return the register shape and element index information in *TYPEINFO.
855
856 IN_REG_LIST should be set with TRUE if the caller is parsing a register
857 list. */
858
859 static int
860 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
861 struct neon_type_el *typeinfo, bfd_boolean in_reg_list)
862 {
863 char *str = *ccp;
864 const reg_entry *reg = parse_reg (&str);
865 struct neon_type_el atype;
866 struct neon_type_el parsetype;
867 bfd_boolean is_typed_vecreg = FALSE;
868
869 atype.defined = 0;
870 atype.type = NT_invtype;
871 atype.width = -1;
872 atype.index = 0;
873
874 if (reg == NULL)
875 {
876 if (typeinfo)
877 *typeinfo = atype;
878 set_default_error ();
879 return PARSE_FAIL;
880 }
881
882 if (! aarch64_check_reg_type (reg, type))
883 {
884 DEBUG_TRACE ("reg type check failed");
885 set_default_error ();
886 return PARSE_FAIL;
887 }
888 type = reg->type;
889
890 if (type == REG_TYPE_VN
891 && parse_neon_operand_type (&parsetype, &str))
892 {
893 /* Register if of the form Vn.[bhsdq]. */
894 is_typed_vecreg = TRUE;
895
896 if (parsetype.width == 0)
897 /* Expect index. In the new scheme we cannot have
898 Vn.[bhsdq] represent a scalar. Therefore any
899 Vn.[bhsdq] should have an index following it.
900 Except in reglists ofcourse. */
901 atype.defined |= NTA_HASINDEX;
902 else
903 atype.defined |= NTA_HASTYPE;
904
905 atype.type = parsetype.type;
906 atype.width = parsetype.width;
907 }
908
909 if (skip_past_char (&str, '['))
910 {
911 expressionS exp;
912
913 /* Reject Sn[index] syntax. */
914 if (!is_typed_vecreg)
915 {
916 first_error (_("this type of register can't be indexed"));
917 return PARSE_FAIL;
918 }
919
920 if (in_reg_list == TRUE)
921 {
922 first_error (_("index not allowed inside register list"));
923 return PARSE_FAIL;
924 }
925
926 atype.defined |= NTA_HASINDEX;
927
928 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
929
930 if (exp.X_op != O_constant)
931 {
932 first_error (_("constant expression required"));
933 return PARSE_FAIL;
934 }
935
936 if (! skip_past_char (&str, ']'))
937 return PARSE_FAIL;
938
939 atype.index = exp.X_add_number;
940 }
941 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
942 {
943 /* Indexed vector register expected. */
944 first_error (_("indexed vector register expected"));
945 return PARSE_FAIL;
946 }
947
948 /* A vector reg Vn should be typed or indexed. */
949 if (type == REG_TYPE_VN && atype.defined == 0)
950 {
951 first_error (_("invalid use of vector register"));
952 }
953
954 if (typeinfo)
955 *typeinfo = atype;
956
957 if (rtype)
958 *rtype = type;
959
960 *ccp = str;
961
962 return reg->number;
963 }
964
965 /* Parse register.
966
967 Return the register number on success; return PARSE_FAIL otherwise.
968
969 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
970 the register (e.g. NEON double or quad reg when either has been requested).
971
972 If this is a NEON vector register with additional type information, fill
973 in the struct pointed to by VECTYPE (if non-NULL).
974
975 This parser does not handle register list. */
976
977 static int
978 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
979 aarch64_reg_type *rtype, struct neon_type_el *vectype)
980 {
981 struct neon_type_el atype;
982 char *str = *ccp;
983 int reg = parse_typed_reg (&str, type, rtype, &atype,
984 /*in_reg_list= */ FALSE);
985
986 if (reg == PARSE_FAIL)
987 return PARSE_FAIL;
988
989 if (vectype)
990 *vectype = atype;
991
992 *ccp = str;
993
994 return reg;
995 }
996
997 static inline bfd_boolean
998 eq_neon_type_el (struct neon_type_el e1, struct neon_type_el e2)
999 {
1000 return
1001 e1.type == e2.type
1002 && e1.defined == e2.defined
1003 && e1.width == e2.width && e1.index == e2.index;
1004 }
1005
1006 /* This function parses the NEON register list. On success, it returns
1007 the parsed register list information in the following encoded format:
1008
1009 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1010 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1011
1012 The information of the register shape and/or index is returned in
1013 *VECTYPE.
1014
1015 It returns PARSE_FAIL if the register list is invalid.
1016
1017 The list contains one to four registers.
1018 Each register can be one of:
1019 <Vt>.<T>[<index>]
1020 <Vt>.<T>
1021 All <T> should be identical.
1022 All <index> should be identical.
1023 There are restrictions on <Vt> numbers which are checked later
1024 (by reg_list_valid_p). */
1025
1026 static int
1027 parse_neon_reg_list (char **ccp, struct neon_type_el *vectype)
1028 {
1029 char *str = *ccp;
1030 int nb_regs;
1031 struct neon_type_el typeinfo, typeinfo_first;
1032 int val, val_range;
1033 int in_range;
1034 int ret_val;
1035 int i;
1036 bfd_boolean error = FALSE;
1037 bfd_boolean expect_index = FALSE;
1038
1039 if (*str != '{')
1040 {
1041 set_syntax_error (_("expecting {"));
1042 return PARSE_FAIL;
1043 }
1044 str++;
1045
1046 nb_regs = 0;
1047 typeinfo_first.defined = 0;
1048 typeinfo_first.type = NT_invtype;
1049 typeinfo_first.width = -1;
1050 typeinfo_first.index = 0;
1051 ret_val = 0;
1052 val = -1;
1053 val_range = -1;
1054 in_range = 0;
1055 do
1056 {
1057 if (in_range)
1058 {
1059 str++; /* skip over '-' */
1060 val_range = val;
1061 }
1062 val = parse_typed_reg (&str, REG_TYPE_VN, NULL, &typeinfo,
1063 /*in_reg_list= */ TRUE);
1064 if (val == PARSE_FAIL)
1065 {
1066 set_first_syntax_error (_("invalid vector register in list"));
1067 error = TRUE;
1068 continue;
1069 }
1070 /* reject [bhsd]n */
1071 if (typeinfo.defined == 0)
1072 {
1073 set_first_syntax_error (_("invalid scalar register in list"));
1074 error = TRUE;
1075 continue;
1076 }
1077
1078 if (typeinfo.defined & NTA_HASINDEX)
1079 expect_index = TRUE;
1080
1081 if (in_range)
1082 {
1083 if (val < val_range)
1084 {
1085 set_first_syntax_error
1086 (_("invalid range in vector register list"));
1087 error = TRUE;
1088 }
1089 val_range++;
1090 }
1091 else
1092 {
1093 val_range = val;
1094 if (nb_regs == 0)
1095 typeinfo_first = typeinfo;
1096 else if (! eq_neon_type_el (typeinfo_first, typeinfo))
1097 {
1098 set_first_syntax_error
1099 (_("type mismatch in vector register list"));
1100 error = TRUE;
1101 }
1102 }
1103 if (! error)
1104 for (i = val_range; i <= val; i++)
1105 {
1106 ret_val |= i << (5 * nb_regs);
1107 nb_regs++;
1108 }
1109 in_range = 0;
1110 }
1111 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1112
1113 skip_whitespace (str);
1114 if (*str != '}')
1115 {
1116 set_first_syntax_error (_("end of vector register list not found"));
1117 error = TRUE;
1118 }
1119 str++;
1120
1121 skip_whitespace (str);
1122
1123 if (expect_index)
1124 {
1125 if (skip_past_char (&str, '['))
1126 {
1127 expressionS exp;
1128
1129 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1130 if (exp.X_op != O_constant)
1131 {
1132 set_first_syntax_error (_("constant expression required."));
1133 error = TRUE;
1134 }
1135 if (! skip_past_char (&str, ']'))
1136 error = TRUE;
1137 else
1138 typeinfo_first.index = exp.X_add_number;
1139 }
1140 else
1141 {
1142 set_first_syntax_error (_("expected index"));
1143 error = TRUE;
1144 }
1145 }
1146
1147 if (nb_regs > 4)
1148 {
1149 set_first_syntax_error (_("too many registers in vector register list"));
1150 error = TRUE;
1151 }
1152 else if (nb_regs == 0)
1153 {
1154 set_first_syntax_error (_("empty vector register list"));
1155 error = TRUE;
1156 }
1157
1158 *ccp = str;
1159 if (! error)
1160 *vectype = typeinfo_first;
1161
1162 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1163 }
1164
1165 /* Directives: register aliases. */
1166
1167 static reg_entry *
1168 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1169 {
1170 reg_entry *new;
1171 const char *name;
1172
1173 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1174 {
1175 if (new->builtin)
1176 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1177 str);
1178
1179 /* Only warn about a redefinition if it's not defined as the
1180 same register. */
1181 else if (new->number != number || new->type != type)
1182 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1183
1184 return NULL;
1185 }
1186
1187 name = xstrdup (str);
1188 new = xmalloc (sizeof (reg_entry));
1189
1190 new->name = name;
1191 new->number = number;
1192 new->type = type;
1193 new->builtin = FALSE;
1194
1195 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1196 abort ();
1197
1198 return new;
1199 }
1200
1201 /* Look for the .req directive. This is of the form:
1202
1203 new_register_name .req existing_register_name
1204
1205 If we find one, or if it looks sufficiently like one that we want to
1206 handle any error here, return TRUE. Otherwise return FALSE. */
1207
1208 static bfd_boolean
1209 create_register_alias (char *newname, char *p)
1210 {
1211 const reg_entry *old;
1212 char *oldname, *nbuf;
1213 size_t nlen;
1214
1215 /* The input scrubber ensures that whitespace after the mnemonic is
1216 collapsed to single spaces. */
1217 oldname = p;
1218 if (strncmp (oldname, " .req ", 6) != 0)
1219 return FALSE;
1220
1221 oldname += 6;
1222 if (*oldname == '\0')
1223 return FALSE;
1224
1225 old = hash_find (aarch64_reg_hsh, oldname);
1226 if (!old)
1227 {
1228 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1229 return TRUE;
1230 }
1231
1232 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1233 the desired alias name, and p points to its end. If not, then
1234 the desired alias name is in the global original_case_string. */
1235 #ifdef TC_CASE_SENSITIVE
1236 nlen = p - newname;
1237 #else
1238 newname = original_case_string;
1239 nlen = strlen (newname);
1240 #endif
1241
1242 nbuf = alloca (nlen + 1);
1243 memcpy (nbuf, newname, nlen);
1244 nbuf[nlen] = '\0';
1245
1246 /* Create aliases under the new name as stated; an all-lowercase
1247 version of the new name; and an all-uppercase version of the new
1248 name. */
1249 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1250 {
1251 for (p = nbuf; *p; p++)
1252 *p = TOUPPER (*p);
1253
1254 if (strncmp (nbuf, newname, nlen))
1255 {
1256 /* If this attempt to create an additional alias fails, do not bother
1257 trying to create the all-lower case alias. We will fail and issue
1258 a second, duplicate error message. This situation arises when the
1259 programmer does something like:
1260 foo .req r0
1261 Foo .req r1
1262 The second .req creates the "Foo" alias but then fails to create
1263 the artificial FOO alias because it has already been created by the
1264 first .req. */
1265 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1266 return TRUE;
1267 }
1268
1269 for (p = nbuf; *p; p++)
1270 *p = TOLOWER (*p);
1271
1272 if (strncmp (nbuf, newname, nlen))
1273 insert_reg_alias (nbuf, old->number, old->type);
1274 }
1275
1276 return TRUE;
1277 }
1278
1279 /* Should never be called, as .req goes between the alias and the
1280 register name, not at the beginning of the line. */
1281 static void
1282 s_req (int a ATTRIBUTE_UNUSED)
1283 {
1284 as_bad (_("invalid syntax for .req directive"));
1285 }
1286
1287 /* The .unreq directive deletes an alias which was previously defined
1288 by .req. For example:
1289
1290 my_alias .req r11
1291 .unreq my_alias */
1292
1293 static void
1294 s_unreq (int a ATTRIBUTE_UNUSED)
1295 {
1296 char *name;
1297 char saved_char;
1298
1299 name = input_line_pointer;
1300
1301 while (*input_line_pointer != 0
1302 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1303 ++input_line_pointer;
1304
1305 saved_char = *input_line_pointer;
1306 *input_line_pointer = 0;
1307
1308 if (!*name)
1309 as_bad (_("invalid syntax for .unreq directive"));
1310 else
1311 {
1312 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1313
1314 if (!reg)
1315 as_bad (_("unknown register alias '%s'"), name);
1316 else if (reg->builtin)
1317 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1318 name);
1319 else
1320 {
1321 char *p;
1322 char *nbuf;
1323
1324 hash_delete (aarch64_reg_hsh, name, FALSE);
1325 free ((char *) reg->name);
1326 free (reg);
1327
1328 /* Also locate the all upper case and all lower case versions.
1329 Do not complain if we cannot find one or the other as it
1330 was probably deleted above. */
1331
1332 nbuf = strdup (name);
1333 for (p = nbuf; *p; p++)
1334 *p = TOUPPER (*p);
1335 reg = hash_find (aarch64_reg_hsh, nbuf);
1336 if (reg)
1337 {
1338 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1339 free ((char *) reg->name);
1340 free (reg);
1341 }
1342
1343 for (p = nbuf; *p; p++)
1344 *p = TOLOWER (*p);
1345 reg = hash_find (aarch64_reg_hsh, nbuf);
1346 if (reg)
1347 {
1348 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1349 free ((char *) reg->name);
1350 free (reg);
1351 }
1352
1353 free (nbuf);
1354 }
1355 }
1356
1357 *input_line_pointer = saved_char;
1358 demand_empty_rest_of_line ();
1359 }
1360
1361 /* Directives: Instruction set selection. */
1362
1363 #ifdef OBJ_ELF
1364 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1365 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1366 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1367 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1368
1369 /* Create a new mapping symbol for the transition to STATE. */
1370
1371 static void
1372 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1373 {
1374 symbolS *symbolP;
1375 const char *symname;
1376 int type;
1377
1378 switch (state)
1379 {
1380 case MAP_DATA:
1381 symname = "$d";
1382 type = BSF_NO_FLAGS;
1383 break;
1384 case MAP_INSN:
1385 symname = "$x";
1386 type = BSF_NO_FLAGS;
1387 break;
1388 default:
1389 abort ();
1390 }
1391
1392 symbolP = symbol_new (symname, now_seg, value, frag);
1393 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1394
1395 /* Save the mapping symbols for future reference. Also check that
1396 we do not place two mapping symbols at the same offset within a
1397 frag. We'll handle overlap between frags in
1398 check_mapping_symbols.
1399
1400 If .fill or other data filling directive generates zero sized data,
1401 the mapping symbol for the following code will have the same value
1402 as the one generated for the data filling directive. In this case,
1403 we replace the old symbol with the new one at the same address. */
1404 if (value == 0)
1405 {
1406 if (frag->tc_frag_data.first_map != NULL)
1407 {
1408 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1409 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1410 &symbol_lastP);
1411 }
1412 frag->tc_frag_data.first_map = symbolP;
1413 }
1414 if (frag->tc_frag_data.last_map != NULL)
1415 {
1416 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1417 S_GET_VALUE (symbolP));
1418 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1419 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1420 &symbol_lastP);
1421 }
1422 frag->tc_frag_data.last_map = symbolP;
1423 }
1424
1425 /* We must sometimes convert a region marked as code to data during
1426 code alignment, if an odd number of bytes have to be padded. The
1427 code mapping symbol is pushed to an aligned address. */
1428
1429 static void
1430 insert_data_mapping_symbol (enum mstate state,
1431 valueT value, fragS * frag, offsetT bytes)
1432 {
1433 /* If there was already a mapping symbol, remove it. */
1434 if (frag->tc_frag_data.last_map != NULL
1435 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1436 frag->fr_address + value)
1437 {
1438 symbolS *symp = frag->tc_frag_data.last_map;
1439
1440 if (value == 0)
1441 {
1442 know (frag->tc_frag_data.first_map == symp);
1443 frag->tc_frag_data.first_map = NULL;
1444 }
1445 frag->tc_frag_data.last_map = NULL;
1446 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1447 }
1448
1449 make_mapping_symbol (MAP_DATA, value, frag);
1450 make_mapping_symbol (state, value + bytes, frag);
1451 }
1452
1453 static void mapping_state_2 (enum mstate state, int max_chars);
1454
1455 /* Set the mapping state to STATE. Only call this when about to
1456 emit some STATE bytes to the file. */
1457
1458 void
1459 mapping_state (enum mstate state)
1460 {
1461 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1462
1463 if (state == MAP_INSN)
1464 /* AArch64 instructions require 4-byte alignment. When emitting
1465 instructions into any section, record the appropriate section
1466 alignment. */
1467 record_alignment (now_seg, 2);
1468
1469 if (mapstate == state)
1470 /* The mapping symbol has already been emitted.
1471 There is nothing else to do. */
1472 return;
1473
1474 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1475 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1476 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1477 evaluated later in the next else. */
1478 return;
1479 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1480 {
1481 /* Only add the symbol if the offset is > 0:
1482 if we're at the first frag, check it's size > 0;
1483 if we're not at the first frag, then for sure
1484 the offset is > 0. */
1485 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1486 const int add_symbol = (frag_now != frag_first)
1487 || (frag_now_fix () > 0);
1488
1489 if (add_symbol)
1490 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1491 }
1492 #undef TRANSITION
1493
1494 mapping_state_2 (state, 0);
1495 }
1496
1497 /* Same as mapping_state, but MAX_CHARS bytes have already been
1498 allocated. Put the mapping symbol that far back. */
1499
1500 static void
1501 mapping_state_2 (enum mstate state, int max_chars)
1502 {
1503 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1504
1505 if (!SEG_NORMAL (now_seg))
1506 return;
1507
1508 if (mapstate == state)
1509 /* The mapping symbol has already been emitted.
1510 There is nothing else to do. */
1511 return;
1512
1513 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1514 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1515 }
1516 #else
1517 #define mapping_state(x) /* nothing */
1518 #define mapping_state_2(x, y) /* nothing */
1519 #endif
1520
1521 /* Directives: sectioning and alignment. */
1522
1523 static void
1524 s_bss (int ignore ATTRIBUTE_UNUSED)
1525 {
1526 /* We don't support putting frags in the BSS segment, we fake it by
1527 marking in_bss, then looking at s_skip for clues. */
1528 subseg_set (bss_section, 0);
1529 demand_empty_rest_of_line ();
1530 mapping_state (MAP_DATA);
1531 }
1532
1533 static void
1534 s_even (int ignore ATTRIBUTE_UNUSED)
1535 {
1536 /* Never make frag if expect extra pass. */
1537 if (!need_pass_2)
1538 frag_align (1, 0, 0);
1539
1540 record_alignment (now_seg, 1);
1541
1542 demand_empty_rest_of_line ();
1543 }
1544
1545 /* Directives: Literal pools. */
1546
1547 static literal_pool *
1548 find_literal_pool (int size)
1549 {
1550 literal_pool *pool;
1551
1552 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1553 {
1554 if (pool->section == now_seg
1555 && pool->sub_section == now_subseg && pool->size == size)
1556 break;
1557 }
1558
1559 return pool;
1560 }
1561
1562 static literal_pool *
1563 find_or_make_literal_pool (int size)
1564 {
1565 /* Next literal pool ID number. */
1566 static unsigned int latest_pool_num = 1;
1567 literal_pool *pool;
1568
1569 pool = find_literal_pool (size);
1570
1571 if (pool == NULL)
1572 {
1573 /* Create a new pool. */
1574 pool = xmalloc (sizeof (*pool));
1575 if (!pool)
1576 return NULL;
1577
1578 /* Currently we always put the literal pool in the current text
1579 section. If we were generating "small" model code where we
1580 knew that all code and initialised data was within 1MB then
1581 we could output literals to mergeable, read-only data
1582 sections. */
1583
1584 pool->next_free_entry = 0;
1585 pool->section = now_seg;
1586 pool->sub_section = now_subseg;
1587 pool->size = size;
1588 pool->next = list_of_pools;
1589 pool->symbol = NULL;
1590
1591 /* Add it to the list. */
1592 list_of_pools = pool;
1593 }
1594
1595 /* New pools, and emptied pools, will have a NULL symbol. */
1596 if (pool->symbol == NULL)
1597 {
1598 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1599 (valueT) 0, &zero_address_frag);
1600 pool->id = latest_pool_num++;
1601 }
1602
1603 /* Done. */
1604 return pool;
1605 }
1606
1607 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1608 Return TRUE on success, otherwise return FALSE. */
1609 static bfd_boolean
1610 add_to_lit_pool (expressionS *exp, int size)
1611 {
1612 literal_pool *pool;
1613 unsigned int entry;
1614
1615 pool = find_or_make_literal_pool (size);
1616
1617 /* Check if this literal value is already in the pool. */
1618 for (entry = 0; entry < pool->next_free_entry; entry++)
1619 {
1620 expressionS * litexp = & pool->literals[entry].exp;
1621
1622 if ((litexp->X_op == exp->X_op)
1623 && (exp->X_op == O_constant)
1624 && (litexp->X_add_number == exp->X_add_number)
1625 && (litexp->X_unsigned == exp->X_unsigned))
1626 break;
1627
1628 if ((litexp->X_op == exp->X_op)
1629 && (exp->X_op == O_symbol)
1630 && (litexp->X_add_number == exp->X_add_number)
1631 && (litexp->X_add_symbol == exp->X_add_symbol)
1632 && (litexp->X_op_symbol == exp->X_op_symbol))
1633 break;
1634 }
1635
1636 /* Do we need to create a new entry? */
1637 if (entry == pool->next_free_entry)
1638 {
1639 if (entry >= MAX_LITERAL_POOL_SIZE)
1640 {
1641 set_syntax_error (_("literal pool overflow"));
1642 return FALSE;
1643 }
1644
1645 pool->literals[entry].exp = *exp;
1646 pool->next_free_entry += 1;
1647 if (exp->X_op == O_big)
1648 {
1649 /* PR 16688: Bignums are held in a single global array. We must
1650 copy and preserve that value now, before it is overwritten. */
1651 pool->literals[entry].bignum = xmalloc (CHARS_PER_LITTLENUM * exp->X_add_number);
1652 memcpy (pool->literals[entry].bignum, generic_bignum,
1653 CHARS_PER_LITTLENUM * exp->X_add_number);
1654 }
1655 else
1656 pool->literals[entry].bignum = NULL;
1657 }
1658
1659 exp->X_op = O_symbol;
1660 exp->X_add_number = ((int) entry) * size;
1661 exp->X_add_symbol = pool->symbol;
1662
1663 return TRUE;
1664 }
1665
1666 /* Can't use symbol_new here, so have to create a symbol and then at
1667 a later date assign it a value. Thats what these functions do. */
1668
1669 static void
1670 symbol_locate (symbolS * symbolP,
1671 const char *name,/* It is copied, the caller can modify. */
1672 segT segment, /* Segment identifier (SEG_<something>). */
1673 valueT valu, /* Symbol value. */
1674 fragS * frag) /* Associated fragment. */
1675 {
1676 size_t name_length;
1677 char *preserved_copy_of_name;
1678
1679 name_length = strlen (name) + 1; /* +1 for \0. */
1680 obstack_grow (&notes, name, name_length);
1681 preserved_copy_of_name = obstack_finish (&notes);
1682
1683 #ifdef tc_canonicalize_symbol_name
1684 preserved_copy_of_name =
1685 tc_canonicalize_symbol_name (preserved_copy_of_name);
1686 #endif
1687
1688 S_SET_NAME (symbolP, preserved_copy_of_name);
1689
1690 S_SET_SEGMENT (symbolP, segment);
1691 S_SET_VALUE (symbolP, valu);
1692 symbol_clear_list_pointers (symbolP);
1693
1694 symbol_set_frag (symbolP, frag);
1695
1696 /* Link to end of symbol chain. */
1697 {
1698 extern int symbol_table_frozen;
1699
1700 if (symbol_table_frozen)
1701 abort ();
1702 }
1703
1704 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1705
1706 obj_symbol_new_hook (symbolP);
1707
1708 #ifdef tc_symbol_new_hook
1709 tc_symbol_new_hook (symbolP);
1710 #endif
1711
1712 #ifdef DEBUG_SYMS
1713 verify_symbol_chain (symbol_rootP, symbol_lastP);
1714 #endif /* DEBUG_SYMS */
1715 }
1716
1717
1718 static void
1719 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1720 {
1721 unsigned int entry;
1722 literal_pool *pool;
1723 char sym_name[20];
1724 int align;
1725
1726 for (align = 2; align <= 4; align++)
1727 {
1728 int size = 1 << align;
1729
1730 pool = find_literal_pool (size);
1731 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1732 continue;
1733
1734 mapping_state (MAP_DATA);
1735
1736 /* Align pool as you have word accesses.
1737 Only make a frag if we have to. */
1738 if (!need_pass_2)
1739 frag_align (align, 0, 0);
1740
1741 record_alignment (now_seg, align);
1742
1743 sprintf (sym_name, "$$lit_\002%x", pool->id);
1744
1745 symbol_locate (pool->symbol, sym_name, now_seg,
1746 (valueT) frag_now_fix (), frag_now);
1747 symbol_table_insert (pool->symbol);
1748
1749 for (entry = 0; entry < pool->next_free_entry; entry++)
1750 {
1751 expressionS * exp = & pool->literals[entry].exp;
1752
1753 if (exp->X_op == O_big)
1754 {
1755 /* PR 16688: Restore the global bignum value. */
1756 gas_assert (pool->literals[entry].bignum != NULL);
1757 memcpy (generic_bignum, pool->literals[entry].bignum,
1758 CHARS_PER_LITTLENUM * exp->X_add_number);
1759 }
1760
1761 /* First output the expression in the instruction to the pool. */
1762 emit_expr (exp, size); /* .word|.xword */
1763
1764 if (exp->X_op == O_big)
1765 {
1766 free (pool->literals[entry].bignum);
1767 pool->literals[entry].bignum = NULL;
1768 }
1769 }
1770
1771 /* Mark the pool as empty. */
1772 pool->next_free_entry = 0;
1773 pool->symbol = NULL;
1774 }
1775 }
1776
1777 #ifdef OBJ_ELF
1778 /* Forward declarations for functions below, in the MD interface
1779 section. */
1780 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1781 static struct reloc_table_entry * find_reloc_table_entry (char **);
1782
1783 /* Directives: Data. */
1784 /* N.B. the support for relocation suffix in this directive needs to be
1785 implemented properly. */
1786
1787 static void
1788 s_aarch64_elf_cons (int nbytes)
1789 {
1790 expressionS exp;
1791
1792 #ifdef md_flush_pending_output
1793 md_flush_pending_output ();
1794 #endif
1795
1796 if (is_it_end_of_statement ())
1797 {
1798 demand_empty_rest_of_line ();
1799 return;
1800 }
1801
1802 #ifdef md_cons_align
1803 md_cons_align (nbytes);
1804 #endif
1805
1806 mapping_state (MAP_DATA);
1807 do
1808 {
1809 struct reloc_table_entry *reloc;
1810
1811 expression (&exp);
1812
1813 if (exp.X_op != O_symbol)
1814 emit_expr (&exp, (unsigned int) nbytes);
1815 else
1816 {
1817 skip_past_char (&input_line_pointer, '#');
1818 if (skip_past_char (&input_line_pointer, ':'))
1819 {
1820 reloc = find_reloc_table_entry (&input_line_pointer);
1821 if (reloc == NULL)
1822 as_bad (_("unrecognized relocation suffix"));
1823 else
1824 as_bad (_("unimplemented relocation suffix"));
1825 ignore_rest_of_line ();
1826 return;
1827 }
1828 else
1829 emit_expr (&exp, (unsigned int) nbytes);
1830 }
1831 }
1832 while (*input_line_pointer++ == ',');
1833
1834 /* Put terminator back into stream. */
1835 input_line_pointer--;
1836 demand_empty_rest_of_line ();
1837 }
1838
1839 #endif /* OBJ_ELF */
1840
1841 /* Output a 32-bit word, but mark as an instruction. */
1842
1843 static void
1844 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1845 {
1846 expressionS exp;
1847
1848 #ifdef md_flush_pending_output
1849 md_flush_pending_output ();
1850 #endif
1851
1852 if (is_it_end_of_statement ())
1853 {
1854 demand_empty_rest_of_line ();
1855 return;
1856 }
1857
1858 /* Sections are assumed to start aligned. In executable section, there is no
1859 MAP_DATA symbol pending. So we only align the address during
1860 MAP_DATA --> MAP_INSN transition.
1861 For other sections, this is not guaranteed. */
1862 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1863 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
1864 frag_align_code (2, 0);
1865
1866 #ifdef OBJ_ELF
1867 mapping_state (MAP_INSN);
1868 #endif
1869
1870 do
1871 {
1872 expression (&exp);
1873 if (exp.X_op != O_constant)
1874 {
1875 as_bad (_("constant expression required"));
1876 ignore_rest_of_line ();
1877 return;
1878 }
1879
1880 if (target_big_endian)
1881 {
1882 unsigned int val = exp.X_add_number;
1883 exp.X_add_number = SWAP_32 (val);
1884 }
1885 emit_expr (&exp, 4);
1886 }
1887 while (*input_line_pointer++ == ',');
1888
1889 /* Put terminator back into stream. */
1890 input_line_pointer--;
1891 demand_empty_rest_of_line ();
1892 }
1893
1894 #ifdef OBJ_ELF
1895 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
1896
1897 static void
1898 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
1899 {
1900 expressionS exp;
1901
1902 expression (&exp);
1903 frag_grow (4);
1904 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1905 BFD_RELOC_AARCH64_TLSDESC_ADD);
1906
1907 demand_empty_rest_of_line ();
1908 }
1909
1910 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
1911
1912 static void
1913 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
1914 {
1915 expressionS exp;
1916
1917 /* Since we're just labelling the code, there's no need to define a
1918 mapping symbol. */
1919 expression (&exp);
1920 /* Make sure there is enough room in this frag for the following
1921 blr. This trick only works if the blr follows immediately after
1922 the .tlsdesc directive. */
1923 frag_grow (4);
1924 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1925 BFD_RELOC_AARCH64_TLSDESC_CALL);
1926
1927 demand_empty_rest_of_line ();
1928 }
1929
1930 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
1931
1932 static void
1933 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
1934 {
1935 expressionS exp;
1936
1937 expression (&exp);
1938 frag_grow (4);
1939 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1940 BFD_RELOC_AARCH64_TLSDESC_LDR);
1941
1942 demand_empty_rest_of_line ();
1943 }
1944 #endif /* OBJ_ELF */
1945
1946 static void s_aarch64_arch (int);
1947 static void s_aarch64_cpu (int);
1948 static void s_aarch64_arch_extension (int);
1949
1950 /* This table describes all the machine specific pseudo-ops the assembler
1951 has to support. The fields are:
1952 pseudo-op name without dot
1953 function to call to execute this pseudo-op
1954 Integer arg to pass to the function. */
1955
1956 const pseudo_typeS md_pseudo_table[] = {
1957 /* Never called because '.req' does not start a line. */
1958 {"req", s_req, 0},
1959 {"unreq", s_unreq, 0},
1960 {"bss", s_bss, 0},
1961 {"even", s_even, 0},
1962 {"ltorg", s_ltorg, 0},
1963 {"pool", s_ltorg, 0},
1964 {"cpu", s_aarch64_cpu, 0},
1965 {"arch", s_aarch64_arch, 0},
1966 {"arch_extension", s_aarch64_arch_extension, 0},
1967 {"inst", s_aarch64_inst, 0},
1968 #ifdef OBJ_ELF
1969 {"tlsdescadd", s_tlsdescadd, 0},
1970 {"tlsdesccall", s_tlsdesccall, 0},
1971 {"tlsdescldr", s_tlsdescldr, 0},
1972 {"word", s_aarch64_elf_cons, 4},
1973 {"long", s_aarch64_elf_cons, 4},
1974 {"xword", s_aarch64_elf_cons, 8},
1975 {"dword", s_aarch64_elf_cons, 8},
1976 #endif
1977 {0, 0, 0}
1978 };
1979 \f
1980
1981 /* Check whether STR points to a register name followed by a comma or the
1982 end of line; REG_TYPE indicates which register types are checked
1983 against. Return TRUE if STR is such a register name; otherwise return
1984 FALSE. The function does not intend to produce any diagnostics, but since
1985 the register parser aarch64_reg_parse, which is called by this function,
1986 does produce diagnostics, we call clear_error to clear any diagnostics
1987 that may be generated by aarch64_reg_parse.
1988 Also, the function returns FALSE directly if there is any user error
1989 present at the function entry. This prevents the existing diagnostics
1990 state from being spoiled.
1991 The function currently serves parse_constant_immediate and
1992 parse_big_immediate only. */
1993 static bfd_boolean
1994 reg_name_p (char *str, aarch64_reg_type reg_type)
1995 {
1996 int reg;
1997
1998 /* Prevent the diagnostics state from being spoiled. */
1999 if (error_p ())
2000 return FALSE;
2001
2002 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2003
2004 /* Clear the parsing error that may be set by the reg parser. */
2005 clear_error ();
2006
2007 if (reg == PARSE_FAIL)
2008 return FALSE;
2009
2010 skip_whitespace (str);
2011 if (*str == ',' || is_end_of_line[(unsigned int) *str])
2012 return TRUE;
2013
2014 return FALSE;
2015 }
2016
2017 /* Parser functions used exclusively in instruction operands. */
2018
2019 /* Parse an immediate expression which may not be constant.
2020
2021 To prevent the expression parser from pushing a register name
2022 into the symbol table as an undefined symbol, firstly a check is
2023 done to find out whether STR is a valid register name followed
2024 by a comma or the end of line. Return FALSE if STR is such a
2025 string. */
2026
2027 static bfd_boolean
2028 parse_immediate_expression (char **str, expressionS *exp)
2029 {
2030 if (reg_name_p (*str, REG_TYPE_R_Z_BHSDQ_V))
2031 {
2032 set_recoverable_error (_("immediate operand required"));
2033 return FALSE;
2034 }
2035
2036 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2037
2038 if (exp->X_op == O_absent)
2039 {
2040 set_fatal_syntax_error (_("missing immediate expression"));
2041 return FALSE;
2042 }
2043
2044 return TRUE;
2045 }
2046
2047 /* Constant immediate-value read function for use in insn parsing.
2048 STR points to the beginning of the immediate (with the optional
2049 leading #); *VAL receives the value.
2050
2051 Return TRUE on success; otherwise return FALSE. */
2052
2053 static bfd_boolean
2054 parse_constant_immediate (char **str, int64_t * val)
2055 {
2056 expressionS exp;
2057
2058 if (! parse_immediate_expression (str, &exp))
2059 return FALSE;
2060
2061 if (exp.X_op != O_constant)
2062 {
2063 set_syntax_error (_("constant expression required"));
2064 return FALSE;
2065 }
2066
2067 *val = exp.X_add_number;
2068 return TRUE;
2069 }
2070
2071 static uint32_t
2072 encode_imm_float_bits (uint32_t imm)
2073 {
2074 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2075 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2076 }
2077
2078 /* Return TRUE if the single-precision floating-point value encoded in IMM
2079 can be expressed in the AArch64 8-bit signed floating-point format with
2080 3-bit exponent and normalized 4 bits of precision; in other words, the
2081 floating-point value must be expressable as
2082 (+/-) n / 16 * power (2, r)
2083 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2084
2085 static bfd_boolean
2086 aarch64_imm_float_p (uint32_t imm)
2087 {
2088 /* If a single-precision floating-point value has the following bit
2089 pattern, it can be expressed in the AArch64 8-bit floating-point
2090 format:
2091
2092 3 32222222 2221111111111
2093 1 09876543 21098765432109876543210
2094 n Eeeeeexx xxxx0000000000000000000
2095
2096 where n, e and each x are either 0 or 1 independently, with
2097 E == ~ e. */
2098
2099 uint32_t pattern;
2100
2101 /* Prepare the pattern for 'Eeeeee'. */
2102 if (((imm >> 30) & 0x1) == 0)
2103 pattern = 0x3e000000;
2104 else
2105 pattern = 0x40000000;
2106
2107 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2108 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2109 }
2110
2111 /* Like aarch64_imm_float_p but for a double-precision floating-point value.
2112
2113 Return TRUE if the value encoded in IMM can be expressed in the AArch64
2114 8-bit signed floating-point format with 3-bit exponent and normalized 4
2115 bits of precision (i.e. can be used in an FMOV instruction); return the
2116 equivalent single-precision encoding in *FPWORD.
2117
2118 Otherwise return FALSE. */
2119
2120 static bfd_boolean
2121 aarch64_double_precision_fmovable (uint64_t imm, uint32_t *fpword)
2122 {
2123 /* If a double-precision floating-point value has the following bit
2124 pattern, it can be expressed in the AArch64 8-bit floating-point
2125 format:
2126
2127 6 66655555555 554444444...21111111111
2128 3 21098765432 109876543...098765432109876543210
2129 n Eeeeeeeeexx xxxx00000...000000000000000000000
2130
2131 where n, e and each x are either 0 or 1 independently, with
2132 E == ~ e. */
2133
2134 uint32_t pattern;
2135 uint32_t high32 = imm >> 32;
2136
2137 /* Lower 32 bits need to be 0s. */
2138 if ((imm & 0xffffffff) != 0)
2139 return FALSE;
2140
2141 /* Prepare the pattern for 'Eeeeeeeee'. */
2142 if (((high32 >> 30) & 0x1) == 0)
2143 pattern = 0x3fc00000;
2144 else
2145 pattern = 0x40000000;
2146
2147 if ((high32 & 0xffff) == 0 /* bits 32 - 47 are 0. */
2148 && (high32 & 0x7fc00000) == pattern) /* bits 54 - 61 == ~ bit 62. */
2149 {
2150 /* Convert to the single-precision encoding.
2151 i.e. convert
2152 n Eeeeeeeeexx xxxx00000...000000000000000000000
2153 to
2154 n Eeeeeexx xxxx0000000000000000000. */
2155 *fpword = ((high32 & 0xfe000000) /* nEeeeee. */
2156 | (((high32 >> 16) & 0x3f) << 19)); /* xxxxxx. */
2157 return TRUE;
2158 }
2159 else
2160 return FALSE;
2161 }
2162
2163 /* Parse a floating-point immediate. Return TRUE on success and return the
2164 value in *IMMED in the format of IEEE754 single-precision encoding.
2165 *CCP points to the start of the string; DP_P is TRUE when the immediate
2166 is expected to be in double-precision (N.B. this only matters when
2167 hexadecimal representation is involved).
2168
2169 N.B. 0.0 is accepted by this function. */
2170
2171 static bfd_boolean
2172 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p)
2173 {
2174 char *str = *ccp;
2175 char *fpnum;
2176 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2177 int found_fpchar = 0;
2178 int64_t val = 0;
2179 unsigned fpword = 0;
2180 bfd_boolean hex_p = FALSE;
2181
2182 skip_past_char (&str, '#');
2183
2184 fpnum = str;
2185 skip_whitespace (fpnum);
2186
2187 if (strncmp (fpnum, "0x", 2) == 0)
2188 {
2189 /* Support the hexadecimal representation of the IEEE754 encoding.
2190 Double-precision is expected when DP_P is TRUE, otherwise the
2191 representation should be in single-precision. */
2192 if (! parse_constant_immediate (&str, &val))
2193 goto invalid_fp;
2194
2195 if (dp_p)
2196 {
2197 if (! aarch64_double_precision_fmovable (val, &fpword))
2198 goto invalid_fp;
2199 }
2200 else if ((uint64_t) val > 0xffffffff)
2201 goto invalid_fp;
2202 else
2203 fpword = val;
2204
2205 hex_p = TRUE;
2206 }
2207 else
2208 {
2209 /* We must not accidentally parse an integer as a floating-point number.
2210 Make sure that the value we parse is not an integer by checking for
2211 special characters '.' or 'e'. */
2212 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
2213 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
2214 {
2215 found_fpchar = 1;
2216 break;
2217 }
2218
2219 if (!found_fpchar)
2220 return FALSE;
2221 }
2222
2223 if (! hex_p)
2224 {
2225 int i;
2226
2227 if ((str = atof_ieee (str, 's', words)) == NULL)
2228 goto invalid_fp;
2229
2230 /* Our FP word must be 32 bits (single-precision FP). */
2231 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2232 {
2233 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2234 fpword |= words[i];
2235 }
2236 }
2237
2238 if (aarch64_imm_float_p (fpword) || (fpword & 0x7fffffff) == 0)
2239 {
2240 *immed = fpword;
2241 *ccp = str;
2242 return TRUE;
2243 }
2244
2245 invalid_fp:
2246 set_fatal_syntax_error (_("invalid floating-point constant"));
2247 return FALSE;
2248 }
2249
2250 /* Less-generic immediate-value read function with the possibility of loading
2251 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2252 instructions.
2253
2254 To prevent the expression parser from pushing a register name into the
2255 symbol table as an undefined symbol, a check is firstly done to find
2256 out whether STR is a valid register name followed by a comma or the end
2257 of line. Return FALSE if STR is such a register. */
2258
2259 static bfd_boolean
2260 parse_big_immediate (char **str, int64_t *imm)
2261 {
2262 char *ptr = *str;
2263
2264 if (reg_name_p (ptr, REG_TYPE_R_Z_BHSDQ_V))
2265 {
2266 set_syntax_error (_("immediate operand required"));
2267 return FALSE;
2268 }
2269
2270 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2271
2272 if (inst.reloc.exp.X_op == O_constant)
2273 *imm = inst.reloc.exp.X_add_number;
2274
2275 *str = ptr;
2276
2277 return TRUE;
2278 }
2279
2280 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2281 if NEED_LIBOPCODES is non-zero, the fixup will need
2282 assistance from the libopcodes. */
2283
2284 static inline void
2285 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2286 const aarch64_opnd_info *operand,
2287 int need_libopcodes_p)
2288 {
2289 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2290 reloc->opnd = operand->type;
2291 if (need_libopcodes_p)
2292 reloc->need_libopcodes_p = 1;
2293 };
2294
2295 /* Return TRUE if the instruction needs to be fixed up later internally by
2296 the GAS; otherwise return FALSE. */
2297
2298 static inline bfd_boolean
2299 aarch64_gas_internal_fixup_p (void)
2300 {
2301 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2302 }
2303
2304 /* Assign the immediate value to the relavant field in *OPERAND if
2305 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2306 needs an internal fixup in a later stage.
2307 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2308 IMM.VALUE that may get assigned with the constant. */
2309 static inline void
2310 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2311 aarch64_opnd_info *operand,
2312 int addr_off_p,
2313 int need_libopcodes_p,
2314 int skip_p)
2315 {
2316 if (reloc->exp.X_op == O_constant)
2317 {
2318 if (addr_off_p)
2319 operand->addr.offset.imm = reloc->exp.X_add_number;
2320 else
2321 operand->imm.value = reloc->exp.X_add_number;
2322 reloc->type = BFD_RELOC_UNUSED;
2323 }
2324 else
2325 {
2326 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2327 /* Tell libopcodes to ignore this operand or not. This is helpful
2328 when one of the operands needs to be fixed up later but we need
2329 libopcodes to check the other operands. */
2330 operand->skip = skip_p;
2331 }
2332 }
2333
2334 /* Relocation modifiers. Each entry in the table contains the textual
2335 name for the relocation which may be placed before a symbol used as
2336 a load/store offset, or add immediate. It must be surrounded by a
2337 leading and trailing colon, for example:
2338
2339 ldr x0, [x1, #:rello:varsym]
2340 add x0, x1, #:rello:varsym */
2341
2342 struct reloc_table_entry
2343 {
2344 const char *name;
2345 int pc_rel;
2346 bfd_reloc_code_real_type adr_type;
2347 bfd_reloc_code_real_type adrp_type;
2348 bfd_reloc_code_real_type movw_type;
2349 bfd_reloc_code_real_type add_type;
2350 bfd_reloc_code_real_type ldst_type;
2351 bfd_reloc_code_real_type ld_literal_type;
2352 };
2353
2354 static struct reloc_table_entry reloc_table[] = {
2355 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2356 {"lo12", 0,
2357 0, /* adr_type */
2358 0,
2359 0,
2360 BFD_RELOC_AARCH64_ADD_LO12,
2361 BFD_RELOC_AARCH64_LDST_LO12,
2362 0},
2363
2364 /* Higher 21 bits of pc-relative page offset: ADRP */
2365 {"pg_hi21", 1,
2366 0, /* adr_type */
2367 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2368 0,
2369 0,
2370 0,
2371 0},
2372
2373 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2374 {"pg_hi21_nc", 1,
2375 0, /* adr_type */
2376 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2377 0,
2378 0,
2379 0,
2380 0},
2381
2382 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2383 {"abs_g0", 0,
2384 0, /* adr_type */
2385 0,
2386 BFD_RELOC_AARCH64_MOVW_G0,
2387 0,
2388 0,
2389 0},
2390
2391 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2392 {"abs_g0_s", 0,
2393 0, /* adr_type */
2394 0,
2395 BFD_RELOC_AARCH64_MOVW_G0_S,
2396 0,
2397 0,
2398 0},
2399
2400 /* Less significant bits 0-15 of address/value: MOVK, no check */
2401 {"abs_g0_nc", 0,
2402 0, /* adr_type */
2403 0,
2404 BFD_RELOC_AARCH64_MOVW_G0_NC,
2405 0,
2406 0,
2407 0},
2408
2409 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2410 {"abs_g1", 0,
2411 0, /* adr_type */
2412 0,
2413 BFD_RELOC_AARCH64_MOVW_G1,
2414 0,
2415 0,
2416 0},
2417
2418 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2419 {"abs_g1_s", 0,
2420 0, /* adr_type */
2421 0,
2422 BFD_RELOC_AARCH64_MOVW_G1_S,
2423 0,
2424 0,
2425 0},
2426
2427 /* Less significant bits 16-31 of address/value: MOVK, no check */
2428 {"abs_g1_nc", 0,
2429 0, /* adr_type */
2430 0,
2431 BFD_RELOC_AARCH64_MOVW_G1_NC,
2432 0,
2433 0,
2434 0},
2435
2436 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2437 {"abs_g2", 0,
2438 0, /* adr_type */
2439 0,
2440 BFD_RELOC_AARCH64_MOVW_G2,
2441 0,
2442 0,
2443 0},
2444
2445 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2446 {"abs_g2_s", 0,
2447 0, /* adr_type */
2448 0,
2449 BFD_RELOC_AARCH64_MOVW_G2_S,
2450 0,
2451 0,
2452 0},
2453
2454 /* Less significant bits 32-47 of address/value: MOVK, no check */
2455 {"abs_g2_nc", 0,
2456 0, /* adr_type */
2457 0,
2458 BFD_RELOC_AARCH64_MOVW_G2_NC,
2459 0,
2460 0,
2461 0},
2462
2463 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2464 {"abs_g3", 0,
2465 0, /* adr_type */
2466 0,
2467 BFD_RELOC_AARCH64_MOVW_G3,
2468 0,
2469 0,
2470 0},
2471
2472 /* Get to the page containing GOT entry for a symbol. */
2473 {"got", 1,
2474 0, /* adr_type */
2475 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2476 0,
2477 0,
2478 0,
2479 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2480
2481 /* 12 bit offset into the page containing GOT entry for that symbol. */
2482 {"got_lo12", 0,
2483 0, /* adr_type */
2484 0,
2485 0,
2486 0,
2487 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2488 0},
2489
2490 /* 0-15 bits of address/value: MOVk, no check. */
2491 {"gotoff_g0_nc", 0,
2492 0, /* adr_type */
2493 0,
2494 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2495 0,
2496 0,
2497 0},
2498
2499 /* Most significant bits 16-31 of address/value: MOVZ. */
2500 {"gotoff_g1", 0,
2501 0, /* adr_type */
2502 0,
2503 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2504 0,
2505 0,
2506 0},
2507
2508 /* 15 bit offset into the page containing GOT entry for that symbol. */
2509 {"gotoff_lo15", 0,
2510 0, /* adr_type */
2511 0,
2512 0,
2513 0,
2514 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2515 0},
2516
2517 /* Get to the page containing GOT TLS entry for a symbol */
2518 {"gottprel_g0_nc", 0,
2519 0, /* adr_type */
2520 0,
2521 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2522 0,
2523 0,
2524 0},
2525
2526 /* Get to the page containing GOT TLS entry for a symbol */
2527 {"gottprel_g1", 0,
2528 0, /* adr_type */
2529 0,
2530 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2531 0,
2532 0,
2533 0},
2534
2535 /* Get to the page containing GOT TLS entry for a symbol */
2536 {"tlsgd", 0,
2537 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2538 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2539 0,
2540 0,
2541 0,
2542 0},
2543
2544 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2545 {"tlsgd_lo12", 0,
2546 0, /* adr_type */
2547 0,
2548 0,
2549 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2550 0,
2551 0},
2552
2553 /* Lower 16 bits address/value: MOVk. */
2554 {"tlsgd_g0_nc", 0,
2555 0, /* adr_type */
2556 0,
2557 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2558 0,
2559 0,
2560 0},
2561
2562 /* Most significant bits 16-31 of address/value: MOVZ. */
2563 {"tlsgd_g1", 0,
2564 0, /* adr_type */
2565 0,
2566 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2567 0,
2568 0,
2569 0},
2570
2571 /* Get to the page containing GOT TLS entry for a symbol */
2572 {"tlsdesc", 0,
2573 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2574 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2575 0,
2576 0,
2577 0,
2578 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2579
2580 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2581 {"tlsdesc_lo12", 0,
2582 0, /* adr_type */
2583 0,
2584 0,
2585 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC,
2586 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2587 0},
2588
2589 /* Get to the page containing GOT TLS entry for a symbol.
2590 The same as GD, we allocate two consecutive GOT slots
2591 for module index and module offset, the only difference
2592 with GD is the module offset should be intialized to
2593 zero without any outstanding runtime relocation. */
2594 {"tlsldm", 0,
2595 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2596 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2597 0,
2598 0,
2599 0,
2600 0},
2601
2602 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2603 {"tlsldm_lo12_nc", 0,
2604 0, /* adr_type */
2605 0,
2606 0,
2607 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2608 0,
2609 0},
2610
2611 /* 12 bit offset into the module TLS base address. */
2612 {"dtprel_lo12", 0,
2613 0, /* adr_type */
2614 0,
2615 0,
2616 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2617 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2618 0},
2619
2620 /* Same as dtprel_lo12, no overflow check. */
2621 {"dtprel_lo12_nc", 0,
2622 0, /* adr_type */
2623 0,
2624 0,
2625 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2626 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2627 0},
2628
2629 /* bits[23:12] of offset to the module TLS base address. */
2630 {"dtprel_hi12", 0,
2631 0, /* adr_type */
2632 0,
2633 0,
2634 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2635 0,
2636 0},
2637
2638 /* bits[15:0] of offset to the module TLS base address. */
2639 {"dtprel_g0", 0,
2640 0, /* adr_type */
2641 0,
2642 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2643 0,
2644 0,
2645 0},
2646
2647 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2648 {"dtprel_g0_nc", 0,
2649 0, /* adr_type */
2650 0,
2651 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2652 0,
2653 0,
2654 0},
2655
2656 /* bits[31:16] of offset to the module TLS base address. */
2657 {"dtprel_g1", 0,
2658 0, /* adr_type */
2659 0,
2660 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2661 0,
2662 0,
2663 0},
2664
2665 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2666 {"dtprel_g1_nc", 0,
2667 0, /* adr_type */
2668 0,
2669 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2670 0,
2671 0,
2672 0},
2673
2674 /* bits[47:32] of offset to the module TLS base address. */
2675 {"dtprel_g2", 0,
2676 0, /* adr_type */
2677 0,
2678 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2679 0,
2680 0,
2681 0},
2682
2683 /* Lower 16 bit offset into GOT entry for a symbol */
2684 {"tlsdesc_off_g0_nc", 0,
2685 0, /* adr_type */
2686 0,
2687 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2688 0,
2689 0,
2690 0},
2691
2692 /* Higher 16 bit offset into GOT entry for a symbol */
2693 {"tlsdesc_off_g1", 0,
2694 0, /* adr_type */
2695 0,
2696 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2697 0,
2698 0,
2699 0},
2700
2701 /* Get to the page containing GOT TLS entry for a symbol */
2702 {"gottprel", 0,
2703 0, /* adr_type */
2704 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2705 0,
2706 0,
2707 0,
2708 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2709
2710 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2711 {"gottprel_lo12", 0,
2712 0, /* adr_type */
2713 0,
2714 0,
2715 0,
2716 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2717 0},
2718
2719 /* Get tp offset for a symbol. */
2720 {"tprel", 0,
2721 0, /* adr_type */
2722 0,
2723 0,
2724 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2725 0,
2726 0},
2727
2728 /* Get tp offset for a symbol. */
2729 {"tprel_lo12", 0,
2730 0, /* adr_type */
2731 0,
2732 0,
2733 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2734 0,
2735 0},
2736
2737 /* Get tp offset for a symbol. */
2738 {"tprel_hi12", 0,
2739 0, /* adr_type */
2740 0,
2741 0,
2742 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2743 0,
2744 0},
2745
2746 /* Get tp offset for a symbol. */
2747 {"tprel_lo12_nc", 0,
2748 0, /* adr_type */
2749 0,
2750 0,
2751 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2752 0,
2753 0},
2754
2755 /* Most significant bits 32-47 of address/value: MOVZ. */
2756 {"tprel_g2", 0,
2757 0, /* adr_type */
2758 0,
2759 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2760 0,
2761 0,
2762 0},
2763
2764 /* Most significant bits 16-31 of address/value: MOVZ. */
2765 {"tprel_g1", 0,
2766 0, /* adr_type */
2767 0,
2768 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2769 0,
2770 0,
2771 0},
2772
2773 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2774 {"tprel_g1_nc", 0,
2775 0, /* adr_type */
2776 0,
2777 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2778 0,
2779 0,
2780 0},
2781
2782 /* Most significant bits 0-15 of address/value: MOVZ. */
2783 {"tprel_g0", 0,
2784 0, /* adr_type */
2785 0,
2786 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2787 0,
2788 0,
2789 0},
2790
2791 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2792 {"tprel_g0_nc", 0,
2793 0, /* adr_type */
2794 0,
2795 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2796 0,
2797 0,
2798 0},
2799
2800 /* 15bit offset from got entry to base address of GOT table. */
2801 {"gotpage_lo15", 0,
2802 0,
2803 0,
2804 0,
2805 0,
2806 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
2807 0},
2808
2809 /* 14bit offset from got entry to base address of GOT table. */
2810 {"gotpage_lo14", 0,
2811 0,
2812 0,
2813 0,
2814 0,
2815 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
2816 0},
2817 };
2818
2819 /* Given the address of a pointer pointing to the textual name of a
2820 relocation as may appear in assembler source, attempt to find its
2821 details in reloc_table. The pointer will be updated to the character
2822 after the trailing colon. On failure, NULL will be returned;
2823 otherwise return the reloc_table_entry. */
2824
2825 static struct reloc_table_entry *
2826 find_reloc_table_entry (char **str)
2827 {
2828 unsigned int i;
2829 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2830 {
2831 int length = strlen (reloc_table[i].name);
2832
2833 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2834 && (*str)[length] == ':')
2835 {
2836 *str += (length + 1);
2837 return &reloc_table[i];
2838 }
2839 }
2840
2841 return NULL;
2842 }
2843
2844 /* Mode argument to parse_shift and parser_shifter_operand. */
2845 enum parse_shift_mode
2846 {
2847 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
2848 "#imm{,lsl #n}" */
2849 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
2850 "#imm" */
2851 SHIFTED_LSL, /* bare "lsl #n" */
2852 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
2853 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
2854 };
2855
2856 /* Parse a <shift> operator on an AArch64 data processing instruction.
2857 Return TRUE on success; otherwise return FALSE. */
2858 static bfd_boolean
2859 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
2860 {
2861 const struct aarch64_name_value_pair *shift_op;
2862 enum aarch64_modifier_kind kind;
2863 expressionS exp;
2864 int exp_has_prefix;
2865 char *s = *str;
2866 char *p = s;
2867
2868 for (p = *str; ISALPHA (*p); p++)
2869 ;
2870
2871 if (p == *str)
2872 {
2873 set_syntax_error (_("shift expression expected"));
2874 return FALSE;
2875 }
2876
2877 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
2878
2879 if (shift_op == NULL)
2880 {
2881 set_syntax_error (_("shift operator expected"));
2882 return FALSE;
2883 }
2884
2885 kind = aarch64_get_operand_modifier (shift_op);
2886
2887 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
2888 {
2889 set_syntax_error (_("invalid use of 'MSL'"));
2890 return FALSE;
2891 }
2892
2893 switch (mode)
2894 {
2895 case SHIFTED_LOGIC_IMM:
2896 if (aarch64_extend_operator_p (kind) == TRUE)
2897 {
2898 set_syntax_error (_("extending shift is not permitted"));
2899 return FALSE;
2900 }
2901 break;
2902
2903 case SHIFTED_ARITH_IMM:
2904 if (kind == AARCH64_MOD_ROR)
2905 {
2906 set_syntax_error (_("'ROR' shift is not permitted"));
2907 return FALSE;
2908 }
2909 break;
2910
2911 case SHIFTED_LSL:
2912 if (kind != AARCH64_MOD_LSL)
2913 {
2914 set_syntax_error (_("only 'LSL' shift is permitted"));
2915 return FALSE;
2916 }
2917 break;
2918
2919 case SHIFTED_REG_OFFSET:
2920 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
2921 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
2922 {
2923 set_fatal_syntax_error
2924 (_("invalid shift for the register offset addressing mode"));
2925 return FALSE;
2926 }
2927 break;
2928
2929 case SHIFTED_LSL_MSL:
2930 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
2931 {
2932 set_syntax_error (_("invalid shift operator"));
2933 return FALSE;
2934 }
2935 break;
2936
2937 default:
2938 abort ();
2939 }
2940
2941 /* Whitespace can appear here if the next thing is a bare digit. */
2942 skip_whitespace (p);
2943
2944 /* Parse shift amount. */
2945 exp_has_prefix = 0;
2946 if (mode == SHIFTED_REG_OFFSET && *p == ']')
2947 exp.X_op = O_absent;
2948 else
2949 {
2950 if (is_immediate_prefix (*p))
2951 {
2952 p++;
2953 exp_has_prefix = 1;
2954 }
2955 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
2956 }
2957 if (exp.X_op == O_absent)
2958 {
2959 if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix)
2960 {
2961 set_syntax_error (_("missing shift amount"));
2962 return FALSE;
2963 }
2964 operand->shifter.amount = 0;
2965 }
2966 else if (exp.X_op != O_constant)
2967 {
2968 set_syntax_error (_("constant shift amount required"));
2969 return FALSE;
2970 }
2971 else if (exp.X_add_number < 0 || exp.X_add_number > 63)
2972 {
2973 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
2974 return FALSE;
2975 }
2976 else
2977 {
2978 operand->shifter.amount = exp.X_add_number;
2979 operand->shifter.amount_present = 1;
2980 }
2981
2982 operand->shifter.operator_present = 1;
2983 operand->shifter.kind = kind;
2984
2985 *str = p;
2986 return TRUE;
2987 }
2988
2989 /* Parse a <shifter_operand> for a data processing instruction:
2990
2991 #<immediate>
2992 #<immediate>, LSL #imm
2993
2994 Validation of immediate operands is deferred to md_apply_fix.
2995
2996 Return TRUE on success; otherwise return FALSE. */
2997
2998 static bfd_boolean
2999 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3000 enum parse_shift_mode mode)
3001 {
3002 char *p;
3003
3004 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3005 return FALSE;
3006
3007 p = *str;
3008
3009 /* Accept an immediate expression. */
3010 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
3011 return FALSE;
3012
3013 /* Accept optional LSL for arithmetic immediate values. */
3014 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3015 if (! parse_shift (&p, operand, SHIFTED_LSL))
3016 return FALSE;
3017
3018 /* Not accept any shifter for logical immediate values. */
3019 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3020 && parse_shift (&p, operand, mode))
3021 {
3022 set_syntax_error (_("unexpected shift operator"));
3023 return FALSE;
3024 }
3025
3026 *str = p;
3027 return TRUE;
3028 }
3029
3030 /* Parse a <shifter_operand> for a data processing instruction:
3031
3032 <Rm>
3033 <Rm>, <shift>
3034 #<immediate>
3035 #<immediate>, LSL #imm
3036
3037 where <shift> is handled by parse_shift above, and the last two
3038 cases are handled by the function above.
3039
3040 Validation of immediate operands is deferred to md_apply_fix.
3041
3042 Return TRUE on success; otherwise return FALSE. */
3043
3044 static bfd_boolean
3045 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3046 enum parse_shift_mode mode)
3047 {
3048 int reg;
3049 int isreg32, isregzero;
3050 enum aarch64_operand_class opd_class
3051 = aarch64_get_operand_class (operand->type);
3052
3053 if ((reg =
3054 aarch64_reg_parse_32_64 (str, 0, 0, &isreg32, &isregzero)) != PARSE_FAIL)
3055 {
3056 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3057 {
3058 set_syntax_error (_("unexpected register in the immediate operand"));
3059 return FALSE;
3060 }
3061
3062 if (!isregzero && reg == REG_SP)
3063 {
3064 set_syntax_error (BAD_SP);
3065 return FALSE;
3066 }
3067
3068 operand->reg.regno = reg;
3069 operand->qualifier = isreg32 ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
3070
3071 /* Accept optional shift operation on register. */
3072 if (! skip_past_comma (str))
3073 return TRUE;
3074
3075 if (! parse_shift (str, operand, mode))
3076 return FALSE;
3077
3078 return TRUE;
3079 }
3080 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3081 {
3082 set_syntax_error
3083 (_("integer register expected in the extended/shifted operand "
3084 "register"));
3085 return FALSE;
3086 }
3087
3088 /* We have a shifted immediate variable. */
3089 return parse_shifter_operand_imm (str, operand, mode);
3090 }
3091
3092 /* Return TRUE on success; return FALSE otherwise. */
3093
3094 static bfd_boolean
3095 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3096 enum parse_shift_mode mode)
3097 {
3098 char *p = *str;
3099
3100 /* Determine if we have the sequence of characters #: or just :
3101 coming next. If we do, then we check for a :rello: relocation
3102 modifier. If we don't, punt the whole lot to
3103 parse_shifter_operand. */
3104
3105 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3106 {
3107 struct reloc_table_entry *entry;
3108
3109 if (p[0] == '#')
3110 p += 2;
3111 else
3112 p++;
3113 *str = p;
3114
3115 /* Try to parse a relocation. Anything else is an error. */
3116 if (!(entry = find_reloc_table_entry (str)))
3117 {
3118 set_syntax_error (_("unknown relocation modifier"));
3119 return FALSE;
3120 }
3121
3122 if (entry->add_type == 0)
3123 {
3124 set_syntax_error
3125 (_("this relocation modifier is not allowed on this instruction"));
3126 return FALSE;
3127 }
3128
3129 /* Save str before we decompose it. */
3130 p = *str;
3131
3132 /* Next, we parse the expression. */
3133 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
3134 return FALSE;
3135
3136 /* Record the relocation type (use the ADD variant here). */
3137 inst.reloc.type = entry->add_type;
3138 inst.reloc.pc_rel = entry->pc_rel;
3139
3140 /* If str is empty, we've reached the end, stop here. */
3141 if (**str == '\0')
3142 return TRUE;
3143
3144 /* Otherwise, we have a shifted reloc modifier, so rewind to
3145 recover the variable name and continue parsing for the shifter. */
3146 *str = p;
3147 return parse_shifter_operand_imm (str, operand, mode);
3148 }
3149
3150 return parse_shifter_operand (str, operand, mode);
3151 }
3152
3153 /* Parse all forms of an address expression. Information is written
3154 to *OPERAND and/or inst.reloc.
3155
3156 The A64 instruction set has the following addressing modes:
3157
3158 Offset
3159 [base] // in SIMD ld/st structure
3160 [base{,#0}] // in ld/st exclusive
3161 [base{,#imm}]
3162 [base,Xm{,LSL #imm}]
3163 [base,Xm,SXTX {#imm}]
3164 [base,Wm,(S|U)XTW {#imm}]
3165 Pre-indexed
3166 [base,#imm]!
3167 Post-indexed
3168 [base],#imm
3169 [base],Xm // in SIMD ld/st structure
3170 PC-relative (literal)
3171 label
3172 =immediate
3173
3174 (As a convenience, the notation "=immediate" is permitted in conjunction
3175 with the pc-relative literal load instructions to automatically place an
3176 immediate value or symbolic address in a nearby literal pool and generate
3177 a hidden label which references it.)
3178
3179 Upon a successful parsing, the address structure in *OPERAND will be
3180 filled in the following way:
3181
3182 .base_regno = <base>
3183 .offset.is_reg // 1 if the offset is a register
3184 .offset.imm = <imm>
3185 .offset.regno = <Rm>
3186
3187 For different addressing modes defined in the A64 ISA:
3188
3189 Offset
3190 .pcrel=0; .preind=1; .postind=0; .writeback=0
3191 Pre-indexed
3192 .pcrel=0; .preind=1; .postind=0; .writeback=1
3193 Post-indexed
3194 .pcrel=0; .preind=0; .postind=1; .writeback=1
3195 PC-relative (literal)
3196 .pcrel=1; .preind=1; .postind=0; .writeback=0
3197
3198 The shift/extension information, if any, will be stored in .shifter.
3199
3200 It is the caller's responsibility to check for addressing modes not
3201 supported by the instruction, and to set inst.reloc.type. */
3202
3203 static bfd_boolean
3204 parse_address_main (char **str, aarch64_opnd_info *operand, int reloc,
3205 int accept_reg_post_index)
3206 {
3207 char *p = *str;
3208 int reg;
3209 int isreg32, isregzero;
3210 expressionS *exp = &inst.reloc.exp;
3211
3212 if (! skip_past_char (&p, '['))
3213 {
3214 /* =immediate or label. */
3215 operand->addr.pcrel = 1;
3216 operand->addr.preind = 1;
3217
3218 /* #:<reloc_op>:<symbol> */
3219 skip_past_char (&p, '#');
3220 if (reloc && skip_past_char (&p, ':'))
3221 {
3222 bfd_reloc_code_real_type ty;
3223 struct reloc_table_entry *entry;
3224
3225 /* Try to parse a relocation modifier. Anything else is
3226 an error. */
3227 entry = find_reloc_table_entry (&p);
3228 if (! entry)
3229 {
3230 set_syntax_error (_("unknown relocation modifier"));
3231 return FALSE;
3232 }
3233
3234 switch (operand->type)
3235 {
3236 case AARCH64_OPND_ADDR_PCREL21:
3237 /* adr */
3238 ty = entry->adr_type;
3239 break;
3240
3241 default:
3242 ty = entry->ld_literal_type;
3243 break;
3244 }
3245
3246 if (ty == 0)
3247 {
3248 set_syntax_error
3249 (_("this relocation modifier is not allowed on this "
3250 "instruction"));
3251 return FALSE;
3252 }
3253
3254 /* #:<reloc_op>: */
3255 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3256 {
3257 set_syntax_error (_("invalid relocation expression"));
3258 return FALSE;
3259 }
3260
3261 /* #:<reloc_op>:<expr> */
3262 /* Record the relocation type. */
3263 inst.reloc.type = ty;
3264 inst.reloc.pc_rel = entry->pc_rel;
3265 }
3266 else
3267 {
3268
3269 if (skip_past_char (&p, '='))
3270 /* =immediate; need to generate the literal in the literal pool. */
3271 inst.gen_lit_pool = 1;
3272
3273 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3274 {
3275 set_syntax_error (_("invalid address"));
3276 return FALSE;
3277 }
3278 }
3279
3280 *str = p;
3281 return TRUE;
3282 }
3283
3284 /* [ */
3285
3286 /* Accept SP and reject ZR */
3287 reg = aarch64_reg_parse_32_64 (&p, 0, 1, &isreg32, &isregzero);
3288 if (reg == PARSE_FAIL || isreg32)
3289 {
3290 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3291 return FALSE;
3292 }
3293 operand->addr.base_regno = reg;
3294
3295 /* [Xn */
3296 if (skip_past_comma (&p))
3297 {
3298 /* [Xn, */
3299 operand->addr.preind = 1;
3300
3301 /* Reject SP and accept ZR */
3302 reg = aarch64_reg_parse_32_64 (&p, 1, 0, &isreg32, &isregzero);
3303 if (reg != PARSE_FAIL)
3304 {
3305 /* [Xn,Rm */
3306 operand->addr.offset.regno = reg;
3307 operand->addr.offset.is_reg = 1;
3308 /* Shifted index. */
3309 if (skip_past_comma (&p))
3310 {
3311 /* [Xn,Rm, */
3312 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3313 /* Use the diagnostics set in parse_shift, so not set new
3314 error message here. */
3315 return FALSE;
3316 }
3317 /* We only accept:
3318 [base,Xm{,LSL #imm}]
3319 [base,Xm,SXTX {#imm}]
3320 [base,Wm,(S|U)XTW {#imm}] */
3321 if (operand->shifter.kind == AARCH64_MOD_NONE
3322 || operand->shifter.kind == AARCH64_MOD_LSL
3323 || operand->shifter.kind == AARCH64_MOD_SXTX)
3324 {
3325 if (isreg32)
3326 {
3327 set_syntax_error (_("invalid use of 32-bit register offset"));
3328 return FALSE;
3329 }
3330 }
3331 else if (!isreg32)
3332 {
3333 set_syntax_error (_("invalid use of 64-bit register offset"));
3334 return FALSE;
3335 }
3336 }
3337 else
3338 {
3339 /* [Xn,#:<reloc_op>:<symbol> */
3340 skip_past_char (&p, '#');
3341 if (reloc && skip_past_char (&p, ':'))
3342 {
3343 struct reloc_table_entry *entry;
3344
3345 /* Try to parse a relocation modifier. Anything else is
3346 an error. */
3347 if (!(entry = find_reloc_table_entry (&p)))
3348 {
3349 set_syntax_error (_("unknown relocation modifier"));
3350 return FALSE;
3351 }
3352
3353 if (entry->ldst_type == 0)
3354 {
3355 set_syntax_error
3356 (_("this relocation modifier is not allowed on this "
3357 "instruction"));
3358 return FALSE;
3359 }
3360
3361 /* [Xn,#:<reloc_op>: */
3362 /* We now have the group relocation table entry corresponding to
3363 the name in the assembler source. Next, we parse the
3364 expression. */
3365 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3366 {
3367 set_syntax_error (_("invalid relocation expression"));
3368 return FALSE;
3369 }
3370
3371 /* [Xn,#:<reloc_op>:<expr> */
3372 /* Record the load/store relocation type. */
3373 inst.reloc.type = entry->ldst_type;
3374 inst.reloc.pc_rel = entry->pc_rel;
3375 }
3376 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3377 {
3378 set_syntax_error (_("invalid expression in the address"));
3379 return FALSE;
3380 }
3381 /* [Xn,<expr> */
3382 }
3383 }
3384
3385 if (! skip_past_char (&p, ']'))
3386 {
3387 set_syntax_error (_("']' expected"));
3388 return FALSE;
3389 }
3390
3391 if (skip_past_char (&p, '!'))
3392 {
3393 if (operand->addr.preind && operand->addr.offset.is_reg)
3394 {
3395 set_syntax_error (_("register offset not allowed in pre-indexed "
3396 "addressing mode"));
3397 return FALSE;
3398 }
3399 /* [Xn]! */
3400 operand->addr.writeback = 1;
3401 }
3402 else if (skip_past_comma (&p))
3403 {
3404 /* [Xn], */
3405 operand->addr.postind = 1;
3406 operand->addr.writeback = 1;
3407
3408 if (operand->addr.preind)
3409 {
3410 set_syntax_error (_("cannot combine pre- and post-indexing"));
3411 return FALSE;
3412 }
3413
3414 if (accept_reg_post_index
3415 && (reg = aarch64_reg_parse_32_64 (&p, 1, 1, &isreg32,
3416 &isregzero)) != PARSE_FAIL)
3417 {
3418 /* [Xn],Xm */
3419 if (isreg32)
3420 {
3421 set_syntax_error (_("invalid 32-bit register offset"));
3422 return FALSE;
3423 }
3424 operand->addr.offset.regno = reg;
3425 operand->addr.offset.is_reg = 1;
3426 }
3427 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3428 {
3429 /* [Xn],#expr */
3430 set_syntax_error (_("invalid expression in the address"));
3431 return FALSE;
3432 }
3433 }
3434
3435 /* If at this point neither .preind nor .postind is set, we have a
3436 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
3437 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3438 {
3439 if (operand->addr.writeback)
3440 {
3441 /* Reject [Rn]! */
3442 set_syntax_error (_("missing offset in the pre-indexed address"));
3443 return FALSE;
3444 }
3445 operand->addr.preind = 1;
3446 inst.reloc.exp.X_op = O_constant;
3447 inst.reloc.exp.X_add_number = 0;
3448 }
3449
3450 *str = p;
3451 return TRUE;
3452 }
3453
3454 /* Return TRUE on success; otherwise return FALSE. */
3455 static bfd_boolean
3456 parse_address (char **str, aarch64_opnd_info *operand,
3457 int accept_reg_post_index)
3458 {
3459 return parse_address_main (str, operand, 0, accept_reg_post_index);
3460 }
3461
3462 /* Return TRUE on success; otherwise return FALSE. */
3463 static bfd_boolean
3464 parse_address_reloc (char **str, aarch64_opnd_info *operand)
3465 {
3466 return parse_address_main (str, operand, 1, 0);
3467 }
3468
3469 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3470 Return TRUE on success; otherwise return FALSE. */
3471 static bfd_boolean
3472 parse_half (char **str, int *internal_fixup_p)
3473 {
3474 char *p, *saved;
3475 int dummy;
3476
3477 p = *str;
3478 skip_past_char (&p, '#');
3479
3480 gas_assert (internal_fixup_p);
3481 *internal_fixup_p = 0;
3482
3483 if (*p == ':')
3484 {
3485 struct reloc_table_entry *entry;
3486
3487 /* Try to parse a relocation. Anything else is an error. */
3488 ++p;
3489 if (!(entry = find_reloc_table_entry (&p)))
3490 {
3491 set_syntax_error (_("unknown relocation modifier"));
3492 return FALSE;
3493 }
3494
3495 if (entry->movw_type == 0)
3496 {
3497 set_syntax_error
3498 (_("this relocation modifier is not allowed on this instruction"));
3499 return FALSE;
3500 }
3501
3502 inst.reloc.type = entry->movw_type;
3503 }
3504 else
3505 *internal_fixup_p = 1;
3506
3507 /* Avoid parsing a register as a general symbol. */
3508 saved = p;
3509 if (aarch64_reg_parse_32_64 (&p, 0, 0, &dummy, &dummy) != PARSE_FAIL)
3510 return FALSE;
3511 p = saved;
3512
3513 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3514 return FALSE;
3515
3516 *str = p;
3517 return TRUE;
3518 }
3519
3520 /* Parse an operand for an ADRP instruction:
3521 ADRP <Xd>, <label>
3522 Return TRUE on success; otherwise return FALSE. */
3523
3524 static bfd_boolean
3525 parse_adrp (char **str)
3526 {
3527 char *p;
3528
3529 p = *str;
3530 if (*p == ':')
3531 {
3532 struct reloc_table_entry *entry;
3533
3534 /* Try to parse a relocation. Anything else is an error. */
3535 ++p;
3536 if (!(entry = find_reloc_table_entry (&p)))
3537 {
3538 set_syntax_error (_("unknown relocation modifier"));
3539 return FALSE;
3540 }
3541
3542 if (entry->adrp_type == 0)
3543 {
3544 set_syntax_error
3545 (_("this relocation modifier is not allowed on this instruction"));
3546 return FALSE;
3547 }
3548
3549 inst.reloc.type = entry->adrp_type;
3550 }
3551 else
3552 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3553
3554 inst.reloc.pc_rel = 1;
3555
3556 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3557 return FALSE;
3558
3559 *str = p;
3560 return TRUE;
3561 }
3562
3563 /* Miscellaneous. */
3564
3565 /* Parse an option for a preload instruction. Returns the encoding for the
3566 option, or PARSE_FAIL. */
3567
3568 static int
3569 parse_pldop (char **str)
3570 {
3571 char *p, *q;
3572 const struct aarch64_name_value_pair *o;
3573
3574 p = q = *str;
3575 while (ISALNUM (*q))
3576 q++;
3577
3578 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3579 if (!o)
3580 return PARSE_FAIL;
3581
3582 *str = q;
3583 return o->value;
3584 }
3585
3586 /* Parse an option for a barrier instruction. Returns the encoding for the
3587 option, or PARSE_FAIL. */
3588
3589 static int
3590 parse_barrier (char **str)
3591 {
3592 char *p, *q;
3593 const asm_barrier_opt *o;
3594
3595 p = q = *str;
3596 while (ISALPHA (*q))
3597 q++;
3598
3599 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3600 if (!o)
3601 return PARSE_FAIL;
3602
3603 *str = q;
3604 return o->value;
3605 }
3606
3607 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3608 Returns the encoding for the option, or PARSE_FAIL.
3609
3610 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3611 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
3612
3613 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
3614 field, otherwise as a system register.
3615 */
3616
3617 static int
3618 parse_sys_reg (char **str, struct hash_control *sys_regs,
3619 int imple_defined_p, int pstatefield_p)
3620 {
3621 char *p, *q;
3622 char buf[32];
3623 const aarch64_sys_reg *o;
3624 int value;
3625
3626 p = buf;
3627 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3628 if (p < buf + 31)
3629 *p++ = TOLOWER (*q);
3630 *p = '\0';
3631 /* Assert that BUF be large enough. */
3632 gas_assert (p - buf == q - *str);
3633
3634 o = hash_find (sys_regs, buf);
3635 if (!o)
3636 {
3637 if (!imple_defined_p)
3638 return PARSE_FAIL;
3639 else
3640 {
3641 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
3642 unsigned int op0, op1, cn, cm, op2;
3643
3644 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
3645 != 5)
3646 return PARSE_FAIL;
3647 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
3648 return PARSE_FAIL;
3649 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3650 }
3651 }
3652 else
3653 {
3654 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
3655 as_bad (_("selected processor does not support PSTATE field "
3656 "name '%s'"), buf);
3657 if (!pstatefield_p && !aarch64_sys_reg_supported_p (cpu_variant, o))
3658 as_bad (_("selected processor does not support system register "
3659 "name '%s'"), buf);
3660 if (aarch64_sys_reg_deprecated_p (o))
3661 as_warn (_("system register name '%s' is deprecated and may be "
3662 "removed in a future release"), buf);
3663 value = o->value;
3664 }
3665
3666 *str = q;
3667 return value;
3668 }
3669
3670 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
3671 for the option, or NULL. */
3672
3673 static const aarch64_sys_ins_reg *
3674 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3675 {
3676 char *p, *q;
3677 char buf[32];
3678 const aarch64_sys_ins_reg *o;
3679
3680 p = buf;
3681 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3682 if (p < buf + 31)
3683 *p++ = TOLOWER (*q);
3684 *p = '\0';
3685
3686 o = hash_find (sys_ins_regs, buf);
3687 if (!o)
3688 return NULL;
3689
3690 if (!aarch64_sys_ins_reg_supported_p (cpu_variant, o))
3691 as_bad (_("selected processor does not support system register "
3692 "name '%s'"), buf);
3693
3694 *str = q;
3695 return o;
3696 }
3697 \f
3698 #define po_char_or_fail(chr) do { \
3699 if (! skip_past_char (&str, chr)) \
3700 goto failure; \
3701 } while (0)
3702
3703 #define po_reg_or_fail(regtype) do { \
3704 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
3705 if (val == PARSE_FAIL) \
3706 { \
3707 set_default_error (); \
3708 goto failure; \
3709 } \
3710 } while (0)
3711
3712 #define po_int_reg_or_fail(reject_sp, reject_rz) do { \
3713 val = aarch64_reg_parse_32_64 (&str, reject_sp, reject_rz, \
3714 &isreg32, &isregzero); \
3715 if (val == PARSE_FAIL) \
3716 { \
3717 set_default_error (); \
3718 goto failure; \
3719 } \
3720 info->reg.regno = val; \
3721 if (isreg32) \
3722 info->qualifier = AARCH64_OPND_QLF_W; \
3723 else \
3724 info->qualifier = AARCH64_OPND_QLF_X; \
3725 } while (0)
3726
3727 #define po_imm_nc_or_fail() do { \
3728 if (! parse_constant_immediate (&str, &val)) \
3729 goto failure; \
3730 } while (0)
3731
3732 #define po_imm_or_fail(min, max) do { \
3733 if (! parse_constant_immediate (&str, &val)) \
3734 goto failure; \
3735 if (val < min || val > max) \
3736 { \
3737 set_fatal_syntax_error (_("immediate value out of range "\
3738 #min " to "#max)); \
3739 goto failure; \
3740 } \
3741 } while (0)
3742
3743 #define po_misc_or_fail(expr) do { \
3744 if (!expr) \
3745 goto failure; \
3746 } while (0)
3747 \f
3748 /* encode the 12-bit imm field of Add/sub immediate */
3749 static inline uint32_t
3750 encode_addsub_imm (uint32_t imm)
3751 {
3752 return imm << 10;
3753 }
3754
3755 /* encode the shift amount field of Add/sub immediate */
3756 static inline uint32_t
3757 encode_addsub_imm_shift_amount (uint32_t cnt)
3758 {
3759 return cnt << 22;
3760 }
3761
3762
3763 /* encode the imm field of Adr instruction */
3764 static inline uint32_t
3765 encode_adr_imm (uint32_t imm)
3766 {
3767 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
3768 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
3769 }
3770
3771 /* encode the immediate field of Move wide immediate */
3772 static inline uint32_t
3773 encode_movw_imm (uint32_t imm)
3774 {
3775 return imm << 5;
3776 }
3777
3778 /* encode the 26-bit offset of unconditional branch */
3779 static inline uint32_t
3780 encode_branch_ofs_26 (uint32_t ofs)
3781 {
3782 return ofs & ((1 << 26) - 1);
3783 }
3784
3785 /* encode the 19-bit offset of conditional branch and compare & branch */
3786 static inline uint32_t
3787 encode_cond_branch_ofs_19 (uint32_t ofs)
3788 {
3789 return (ofs & ((1 << 19) - 1)) << 5;
3790 }
3791
3792 /* encode the 19-bit offset of ld literal */
3793 static inline uint32_t
3794 encode_ld_lit_ofs_19 (uint32_t ofs)
3795 {
3796 return (ofs & ((1 << 19) - 1)) << 5;
3797 }
3798
3799 /* Encode the 14-bit offset of test & branch. */
3800 static inline uint32_t
3801 encode_tst_branch_ofs_14 (uint32_t ofs)
3802 {
3803 return (ofs & ((1 << 14) - 1)) << 5;
3804 }
3805
3806 /* Encode the 16-bit imm field of svc/hvc/smc. */
3807 static inline uint32_t
3808 encode_svc_imm (uint32_t imm)
3809 {
3810 return imm << 5;
3811 }
3812
3813 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
3814 static inline uint32_t
3815 reencode_addsub_switch_add_sub (uint32_t opcode)
3816 {
3817 return opcode ^ (1 << 30);
3818 }
3819
3820 static inline uint32_t
3821 reencode_movzn_to_movz (uint32_t opcode)
3822 {
3823 return opcode | (1 << 30);
3824 }
3825
3826 static inline uint32_t
3827 reencode_movzn_to_movn (uint32_t opcode)
3828 {
3829 return opcode & ~(1 << 30);
3830 }
3831
3832 /* Overall per-instruction processing. */
3833
3834 /* We need to be able to fix up arbitrary expressions in some statements.
3835 This is so that we can handle symbols that are an arbitrary distance from
3836 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
3837 which returns part of an address in a form which will be valid for
3838 a data instruction. We do this by pushing the expression into a symbol
3839 in the expr_section, and creating a fix for that. */
3840
3841 static fixS *
3842 fix_new_aarch64 (fragS * frag,
3843 int where,
3844 short int size, expressionS * exp, int pc_rel, int reloc)
3845 {
3846 fixS *new_fix;
3847
3848 switch (exp->X_op)
3849 {
3850 case O_constant:
3851 case O_symbol:
3852 case O_add:
3853 case O_subtract:
3854 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
3855 break;
3856
3857 default:
3858 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
3859 pc_rel, reloc);
3860 break;
3861 }
3862 return new_fix;
3863 }
3864 \f
3865 /* Diagnostics on operands errors. */
3866
3867 /* By default, output verbose error message.
3868 Disable the verbose error message by -mno-verbose-error. */
3869 static int verbose_error_p = 1;
3870
3871 #ifdef DEBUG_AARCH64
3872 /* N.B. this is only for the purpose of debugging. */
3873 const char* operand_mismatch_kind_names[] =
3874 {
3875 "AARCH64_OPDE_NIL",
3876 "AARCH64_OPDE_RECOVERABLE",
3877 "AARCH64_OPDE_SYNTAX_ERROR",
3878 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
3879 "AARCH64_OPDE_INVALID_VARIANT",
3880 "AARCH64_OPDE_OUT_OF_RANGE",
3881 "AARCH64_OPDE_UNALIGNED",
3882 "AARCH64_OPDE_REG_LIST",
3883 "AARCH64_OPDE_OTHER_ERROR",
3884 };
3885 #endif /* DEBUG_AARCH64 */
3886
3887 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
3888
3889 When multiple errors of different kinds are found in the same assembly
3890 line, only the error of the highest severity will be picked up for
3891 issuing the diagnostics. */
3892
3893 static inline bfd_boolean
3894 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
3895 enum aarch64_operand_error_kind rhs)
3896 {
3897 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
3898 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
3899 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
3900 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
3901 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
3902 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
3903 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
3904 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
3905 return lhs > rhs;
3906 }
3907
3908 /* Helper routine to get the mnemonic name from the assembly instruction
3909 line; should only be called for the diagnosis purpose, as there is
3910 string copy operation involved, which may affect the runtime
3911 performance if used in elsewhere. */
3912
3913 static const char*
3914 get_mnemonic_name (const char *str)
3915 {
3916 static char mnemonic[32];
3917 char *ptr;
3918
3919 /* Get the first 15 bytes and assume that the full name is included. */
3920 strncpy (mnemonic, str, 31);
3921 mnemonic[31] = '\0';
3922
3923 /* Scan up to the end of the mnemonic, which must end in white space,
3924 '.', or end of string. */
3925 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
3926 ;
3927
3928 *ptr = '\0';
3929
3930 /* Append '...' to the truncated long name. */
3931 if (ptr - mnemonic == 31)
3932 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
3933
3934 return mnemonic;
3935 }
3936
3937 static void
3938 reset_aarch64_instruction (aarch64_instruction *instruction)
3939 {
3940 memset (instruction, '\0', sizeof (aarch64_instruction));
3941 instruction->reloc.type = BFD_RELOC_UNUSED;
3942 }
3943
3944 /* Data strutures storing one user error in the assembly code related to
3945 operands. */
3946
3947 struct operand_error_record
3948 {
3949 const aarch64_opcode *opcode;
3950 aarch64_operand_error detail;
3951 struct operand_error_record *next;
3952 };
3953
3954 typedef struct operand_error_record operand_error_record;
3955
3956 struct operand_errors
3957 {
3958 operand_error_record *head;
3959 operand_error_record *tail;
3960 };
3961
3962 typedef struct operand_errors operand_errors;
3963
3964 /* Top-level data structure reporting user errors for the current line of
3965 the assembly code.
3966 The way md_assemble works is that all opcodes sharing the same mnemonic
3967 name are iterated to find a match to the assembly line. In this data
3968 structure, each of the such opcodes will have one operand_error_record
3969 allocated and inserted. In other words, excessive errors related with
3970 a single opcode are disregarded. */
3971 operand_errors operand_error_report;
3972
3973 /* Free record nodes. */
3974 static operand_error_record *free_opnd_error_record_nodes = NULL;
3975
3976 /* Initialize the data structure that stores the operand mismatch
3977 information on assembling one line of the assembly code. */
3978 static void
3979 init_operand_error_report (void)
3980 {
3981 if (operand_error_report.head != NULL)
3982 {
3983 gas_assert (operand_error_report.tail != NULL);
3984 operand_error_report.tail->next = free_opnd_error_record_nodes;
3985 free_opnd_error_record_nodes = operand_error_report.head;
3986 operand_error_report.head = NULL;
3987 operand_error_report.tail = NULL;
3988 return;
3989 }
3990 gas_assert (operand_error_report.tail == NULL);
3991 }
3992
3993 /* Return TRUE if some operand error has been recorded during the
3994 parsing of the current assembly line using the opcode *OPCODE;
3995 otherwise return FALSE. */
3996 static inline bfd_boolean
3997 opcode_has_operand_error_p (const aarch64_opcode *opcode)
3998 {
3999 operand_error_record *record = operand_error_report.head;
4000 return record && record->opcode == opcode;
4001 }
4002
4003 /* Add the error record *NEW_RECORD to operand_error_report. The record's
4004 OPCODE field is initialized with OPCODE.
4005 N.B. only one record for each opcode, i.e. the maximum of one error is
4006 recorded for each instruction template. */
4007
4008 static void
4009 add_operand_error_record (const operand_error_record* new_record)
4010 {
4011 const aarch64_opcode *opcode = new_record->opcode;
4012 operand_error_record* record = operand_error_report.head;
4013
4014 /* The record may have been created for this opcode. If not, we need
4015 to prepare one. */
4016 if (! opcode_has_operand_error_p (opcode))
4017 {
4018 /* Get one empty record. */
4019 if (free_opnd_error_record_nodes == NULL)
4020 {
4021 record = xmalloc (sizeof (operand_error_record));
4022 if (record == NULL)
4023 abort ();
4024 }
4025 else
4026 {
4027 record = free_opnd_error_record_nodes;
4028 free_opnd_error_record_nodes = record->next;
4029 }
4030 record->opcode = opcode;
4031 /* Insert at the head. */
4032 record->next = operand_error_report.head;
4033 operand_error_report.head = record;
4034 if (operand_error_report.tail == NULL)
4035 operand_error_report.tail = record;
4036 }
4037 else if (record->detail.kind != AARCH64_OPDE_NIL
4038 && record->detail.index <= new_record->detail.index
4039 && operand_error_higher_severity_p (record->detail.kind,
4040 new_record->detail.kind))
4041 {
4042 /* In the case of multiple errors found on operands related with a
4043 single opcode, only record the error of the leftmost operand and
4044 only if the error is of higher severity. */
4045 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4046 " the existing error %s on operand %d",
4047 operand_mismatch_kind_names[new_record->detail.kind],
4048 new_record->detail.index,
4049 operand_mismatch_kind_names[record->detail.kind],
4050 record->detail.index);
4051 return;
4052 }
4053
4054 record->detail = new_record->detail;
4055 }
4056
4057 static inline void
4058 record_operand_error_info (const aarch64_opcode *opcode,
4059 aarch64_operand_error *error_info)
4060 {
4061 operand_error_record record;
4062 record.opcode = opcode;
4063 record.detail = *error_info;
4064 add_operand_error_record (&record);
4065 }
4066
4067 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4068 error message *ERROR, for operand IDX (count from 0). */
4069
4070 static void
4071 record_operand_error (const aarch64_opcode *opcode, int idx,
4072 enum aarch64_operand_error_kind kind,
4073 const char* error)
4074 {
4075 aarch64_operand_error info;
4076 memset(&info, 0, sizeof (info));
4077 info.index = idx;
4078 info.kind = kind;
4079 info.error = error;
4080 record_operand_error_info (opcode, &info);
4081 }
4082
4083 static void
4084 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4085 enum aarch64_operand_error_kind kind,
4086 const char* error, const int *extra_data)
4087 {
4088 aarch64_operand_error info;
4089 info.index = idx;
4090 info.kind = kind;
4091 info.error = error;
4092 info.data[0] = extra_data[0];
4093 info.data[1] = extra_data[1];
4094 info.data[2] = extra_data[2];
4095 record_operand_error_info (opcode, &info);
4096 }
4097
4098 static void
4099 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4100 const char* error, int lower_bound,
4101 int upper_bound)
4102 {
4103 int data[3] = {lower_bound, upper_bound, 0};
4104 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4105 error, data);
4106 }
4107
4108 /* Remove the operand error record for *OPCODE. */
4109 static void ATTRIBUTE_UNUSED
4110 remove_operand_error_record (const aarch64_opcode *opcode)
4111 {
4112 if (opcode_has_operand_error_p (opcode))
4113 {
4114 operand_error_record* record = operand_error_report.head;
4115 gas_assert (record != NULL && operand_error_report.tail != NULL);
4116 operand_error_report.head = record->next;
4117 record->next = free_opnd_error_record_nodes;
4118 free_opnd_error_record_nodes = record;
4119 if (operand_error_report.head == NULL)
4120 {
4121 gas_assert (operand_error_report.tail == record);
4122 operand_error_report.tail = NULL;
4123 }
4124 }
4125 }
4126
4127 /* Given the instruction in *INSTR, return the index of the best matched
4128 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4129
4130 Return -1 if there is no qualifier sequence; return the first match
4131 if there is multiple matches found. */
4132
4133 static int
4134 find_best_match (const aarch64_inst *instr,
4135 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4136 {
4137 int i, num_opnds, max_num_matched, idx;
4138
4139 num_opnds = aarch64_num_of_operands (instr->opcode);
4140 if (num_opnds == 0)
4141 {
4142 DEBUG_TRACE ("no operand");
4143 return -1;
4144 }
4145
4146 max_num_matched = 0;
4147 idx = -1;
4148
4149 /* For each pattern. */
4150 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4151 {
4152 int j, num_matched;
4153 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4154
4155 /* Most opcodes has much fewer patterns in the list. */
4156 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
4157 {
4158 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4159 if (i != 0 && idx == -1)
4160 /* If nothing has been matched, return the 1st sequence. */
4161 idx = 0;
4162 break;
4163 }
4164
4165 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4166 if (*qualifiers == instr->operands[j].qualifier)
4167 ++num_matched;
4168
4169 if (num_matched > max_num_matched)
4170 {
4171 max_num_matched = num_matched;
4172 idx = i;
4173 }
4174 }
4175
4176 DEBUG_TRACE ("return with %d", idx);
4177 return idx;
4178 }
4179
4180 /* Assign qualifiers in the qualifier seqence (headed by QUALIFIERS) to the
4181 corresponding operands in *INSTR. */
4182
4183 static inline void
4184 assign_qualifier_sequence (aarch64_inst *instr,
4185 const aarch64_opnd_qualifier_t *qualifiers)
4186 {
4187 int i = 0;
4188 int num_opnds = aarch64_num_of_operands (instr->opcode);
4189 gas_assert (num_opnds);
4190 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4191 instr->operands[i].qualifier = *qualifiers;
4192 }
4193
4194 /* Print operands for the diagnosis purpose. */
4195
4196 static void
4197 print_operands (char *buf, const aarch64_opcode *opcode,
4198 const aarch64_opnd_info *opnds)
4199 {
4200 int i;
4201
4202 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4203 {
4204 const size_t size = 128;
4205 char str[size];
4206
4207 /* We regard the opcode operand info more, however we also look into
4208 the inst->operands to support the disassembling of the optional
4209 operand.
4210 The two operand code should be the same in all cases, apart from
4211 when the operand can be optional. */
4212 if (opcode->operands[i] == AARCH64_OPND_NIL
4213 || opnds[i].type == AARCH64_OPND_NIL)
4214 break;
4215
4216 /* Generate the operand string in STR. */
4217 aarch64_print_operand (str, size, 0, opcode, opnds, i, NULL, NULL);
4218
4219 /* Delimiter. */
4220 if (str[0] != '\0')
4221 strcat (buf, i == 0 ? " " : ",");
4222
4223 /* Append the operand string. */
4224 strcat (buf, str);
4225 }
4226 }
4227
4228 /* Send to stderr a string as information. */
4229
4230 static void
4231 output_info (const char *format, ...)
4232 {
4233 char *file;
4234 unsigned int line;
4235 va_list args;
4236
4237 as_where (&file, &line);
4238 if (file)
4239 {
4240 if (line != 0)
4241 fprintf (stderr, "%s:%u: ", file, line);
4242 else
4243 fprintf (stderr, "%s: ", file);
4244 }
4245 fprintf (stderr, _("Info: "));
4246 va_start (args, format);
4247 vfprintf (stderr, format, args);
4248 va_end (args);
4249 (void) putc ('\n', stderr);
4250 }
4251
4252 /* Output one operand error record. */
4253
4254 static void
4255 output_operand_error_record (const operand_error_record *record, char *str)
4256 {
4257 const aarch64_operand_error *detail = &record->detail;
4258 int idx = detail->index;
4259 const aarch64_opcode *opcode = record->opcode;
4260 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4261 : AARCH64_OPND_NIL);
4262
4263 switch (detail->kind)
4264 {
4265 case AARCH64_OPDE_NIL:
4266 gas_assert (0);
4267 break;
4268
4269 case AARCH64_OPDE_SYNTAX_ERROR:
4270 case AARCH64_OPDE_RECOVERABLE:
4271 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4272 case AARCH64_OPDE_OTHER_ERROR:
4273 /* Use the prepared error message if there is, otherwise use the
4274 operand description string to describe the error. */
4275 if (detail->error != NULL)
4276 {
4277 if (idx < 0)
4278 as_bad (_("%s -- `%s'"), detail->error, str);
4279 else
4280 as_bad (_("%s at operand %d -- `%s'"),
4281 detail->error, idx + 1, str);
4282 }
4283 else
4284 {
4285 gas_assert (idx >= 0);
4286 as_bad (_("operand %d should be %s -- `%s'"), idx + 1,
4287 aarch64_get_operand_desc (opd_code), str);
4288 }
4289 break;
4290
4291 case AARCH64_OPDE_INVALID_VARIANT:
4292 as_bad (_("operand mismatch -- `%s'"), str);
4293 if (verbose_error_p)
4294 {
4295 /* We will try to correct the erroneous instruction and also provide
4296 more information e.g. all other valid variants.
4297
4298 The string representation of the corrected instruction and other
4299 valid variants are generated by
4300
4301 1) obtaining the intermediate representation of the erroneous
4302 instruction;
4303 2) manipulating the IR, e.g. replacing the operand qualifier;
4304 3) printing out the instruction by calling the printer functions
4305 shared with the disassembler.
4306
4307 The limitation of this method is that the exact input assembly
4308 line cannot be accurately reproduced in some cases, for example an
4309 optional operand present in the actual assembly line will be
4310 omitted in the output; likewise for the optional syntax rules,
4311 e.g. the # before the immediate. Another limitation is that the
4312 assembly symbols and relocation operations in the assembly line
4313 currently cannot be printed out in the error report. Last but not
4314 least, when there is other error(s) co-exist with this error, the
4315 'corrected' instruction may be still incorrect, e.g. given
4316 'ldnp h0,h1,[x0,#6]!'
4317 this diagnosis will provide the version:
4318 'ldnp s0,s1,[x0,#6]!'
4319 which is still not right. */
4320 size_t len = strlen (get_mnemonic_name (str));
4321 int i, qlf_idx;
4322 bfd_boolean result;
4323 const size_t size = 2048;
4324 char buf[size];
4325 aarch64_inst *inst_base = &inst.base;
4326 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4327
4328 /* Init inst. */
4329 reset_aarch64_instruction (&inst);
4330 inst_base->opcode = opcode;
4331
4332 /* Reset the error report so that there is no side effect on the
4333 following operand parsing. */
4334 init_operand_error_report ();
4335
4336 /* Fill inst. */
4337 result = parse_operands (str + len, opcode)
4338 && programmer_friendly_fixup (&inst);
4339 gas_assert (result);
4340 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4341 NULL, NULL);
4342 gas_assert (!result);
4343
4344 /* Find the most matched qualifier sequence. */
4345 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4346 gas_assert (qlf_idx > -1);
4347
4348 /* Assign the qualifiers. */
4349 assign_qualifier_sequence (inst_base,
4350 opcode->qualifiers_list[qlf_idx]);
4351
4352 /* Print the hint. */
4353 output_info (_(" did you mean this?"));
4354 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4355 print_operands (buf, opcode, inst_base->operands);
4356 output_info (_(" %s"), buf);
4357
4358 /* Print out other variant(s) if there is any. */
4359 if (qlf_idx != 0 ||
4360 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4361 output_info (_(" other valid variant(s):"));
4362
4363 /* For each pattern. */
4364 qualifiers_list = opcode->qualifiers_list;
4365 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4366 {
4367 /* Most opcodes has much fewer patterns in the list.
4368 First NIL qualifier indicates the end in the list. */
4369 if (empty_qualifier_sequence_p (*qualifiers_list) == TRUE)
4370 break;
4371
4372 if (i != qlf_idx)
4373 {
4374 /* Mnemonics name. */
4375 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4376
4377 /* Assign the qualifiers. */
4378 assign_qualifier_sequence (inst_base, *qualifiers_list);
4379
4380 /* Print instruction. */
4381 print_operands (buf, opcode, inst_base->operands);
4382
4383 output_info (_(" %s"), buf);
4384 }
4385 }
4386 }
4387 break;
4388
4389 case AARCH64_OPDE_OUT_OF_RANGE:
4390 if (detail->data[0] != detail->data[1])
4391 as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
4392 detail->error ? detail->error : _("immediate value"),
4393 detail->data[0], detail->data[1], idx + 1, str);
4394 else
4395 as_bad (_("%s expected to be %d at operand %d -- `%s'"),
4396 detail->error ? detail->error : _("immediate value"),
4397 detail->data[0], idx + 1, str);
4398 break;
4399
4400 case AARCH64_OPDE_REG_LIST:
4401 if (detail->data[0] == 1)
4402 as_bad (_("invalid number of registers in the list; "
4403 "only 1 register is expected at operand %d -- `%s'"),
4404 idx + 1, str);
4405 else
4406 as_bad (_("invalid number of registers in the list; "
4407 "%d registers are expected at operand %d -- `%s'"),
4408 detail->data[0], idx + 1, str);
4409 break;
4410
4411 case AARCH64_OPDE_UNALIGNED:
4412 as_bad (_("immediate value should be a multiple of "
4413 "%d at operand %d -- `%s'"),
4414 detail->data[0], idx + 1, str);
4415 break;
4416
4417 default:
4418 gas_assert (0);
4419 break;
4420 }
4421 }
4422
4423 /* Process and output the error message about the operand mismatching.
4424
4425 When this function is called, the operand error information had
4426 been collected for an assembly line and there will be multiple
4427 errors in the case of mulitple instruction templates; output the
4428 error message that most closely describes the problem. */
4429
4430 static void
4431 output_operand_error_report (char *str)
4432 {
4433 int largest_error_pos;
4434 const char *msg = NULL;
4435 enum aarch64_operand_error_kind kind;
4436 operand_error_record *curr;
4437 operand_error_record *head = operand_error_report.head;
4438 operand_error_record *record = NULL;
4439
4440 /* No error to report. */
4441 if (head == NULL)
4442 return;
4443
4444 gas_assert (head != NULL && operand_error_report.tail != NULL);
4445
4446 /* Only one error. */
4447 if (head == operand_error_report.tail)
4448 {
4449 DEBUG_TRACE ("single opcode entry with error kind: %s",
4450 operand_mismatch_kind_names[head->detail.kind]);
4451 output_operand_error_record (head, str);
4452 return;
4453 }
4454
4455 /* Find the error kind of the highest severity. */
4456 DEBUG_TRACE ("multiple opcode entres with error kind");
4457 kind = AARCH64_OPDE_NIL;
4458 for (curr = head; curr != NULL; curr = curr->next)
4459 {
4460 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4461 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4462 if (operand_error_higher_severity_p (curr->detail.kind, kind))
4463 kind = curr->detail.kind;
4464 }
4465 gas_assert (kind != AARCH64_OPDE_NIL);
4466
4467 /* Pick up one of errors of KIND to report. */
4468 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4469 for (curr = head; curr != NULL; curr = curr->next)
4470 {
4471 if (curr->detail.kind != kind)
4472 continue;
4473 /* If there are multiple errors, pick up the one with the highest
4474 mismatching operand index. In the case of multiple errors with
4475 the equally highest operand index, pick up the first one or the
4476 first one with non-NULL error message. */
4477 if (curr->detail.index > largest_error_pos
4478 || (curr->detail.index == largest_error_pos && msg == NULL
4479 && curr->detail.error != NULL))
4480 {
4481 largest_error_pos = curr->detail.index;
4482 record = curr;
4483 msg = record->detail.error;
4484 }
4485 }
4486
4487 gas_assert (largest_error_pos != -2 && record != NULL);
4488 DEBUG_TRACE ("Pick up error kind %s to report",
4489 operand_mismatch_kind_names[record->detail.kind]);
4490
4491 /* Output. */
4492 output_operand_error_record (record, str);
4493 }
4494 \f
4495 /* Write an AARCH64 instruction to buf - always little-endian. */
4496 static void
4497 put_aarch64_insn (char *buf, uint32_t insn)
4498 {
4499 unsigned char *where = (unsigned char *) buf;
4500 where[0] = insn;
4501 where[1] = insn >> 8;
4502 where[2] = insn >> 16;
4503 where[3] = insn >> 24;
4504 }
4505
4506 static uint32_t
4507 get_aarch64_insn (char *buf)
4508 {
4509 unsigned char *where = (unsigned char *) buf;
4510 uint32_t result;
4511 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4512 return result;
4513 }
4514
4515 static void
4516 output_inst (struct aarch64_inst *new_inst)
4517 {
4518 char *to = NULL;
4519
4520 to = frag_more (INSN_SIZE);
4521
4522 frag_now->tc_frag_data.recorded = 1;
4523
4524 put_aarch64_insn (to, inst.base.value);
4525
4526 if (inst.reloc.type != BFD_RELOC_UNUSED)
4527 {
4528 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4529 INSN_SIZE, &inst.reloc.exp,
4530 inst.reloc.pc_rel,
4531 inst.reloc.type);
4532 DEBUG_TRACE ("Prepared relocation fix up");
4533 /* Don't check the addend value against the instruction size,
4534 that's the job of our code in md_apply_fix(). */
4535 fixp->fx_no_overflow = 1;
4536 if (new_inst != NULL)
4537 fixp->tc_fix_data.inst = new_inst;
4538 if (aarch64_gas_internal_fixup_p ())
4539 {
4540 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4541 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4542 fixp->fx_addnumber = inst.reloc.flags;
4543 }
4544 }
4545
4546 dwarf2_emit_insn (INSN_SIZE);
4547 }
4548
4549 /* Link together opcodes of the same name. */
4550
4551 struct templates
4552 {
4553 aarch64_opcode *opcode;
4554 struct templates *next;
4555 };
4556
4557 typedef struct templates templates;
4558
4559 static templates *
4560 lookup_mnemonic (const char *start, int len)
4561 {
4562 templates *templ = NULL;
4563
4564 templ = hash_find_n (aarch64_ops_hsh, start, len);
4565 return templ;
4566 }
4567
4568 /* Subroutine of md_assemble, responsible for looking up the primary
4569 opcode from the mnemonic the user wrote. STR points to the
4570 beginning of the mnemonic. */
4571
4572 static templates *
4573 opcode_lookup (char **str)
4574 {
4575 char *end, *base;
4576 const aarch64_cond *cond;
4577 char condname[16];
4578 int len;
4579
4580 /* Scan up to the end of the mnemonic, which must end in white space,
4581 '.', or end of string. */
4582 for (base = end = *str; is_part_of_name(*end); end++)
4583 if (*end == '.')
4584 break;
4585
4586 if (end == base)
4587 return 0;
4588
4589 inst.cond = COND_ALWAYS;
4590
4591 /* Handle a possible condition. */
4592 if (end[0] == '.')
4593 {
4594 cond = hash_find_n (aarch64_cond_hsh, end + 1, 2);
4595 if (cond)
4596 {
4597 inst.cond = cond->value;
4598 *str = end + 3;
4599 }
4600 else
4601 {
4602 *str = end;
4603 return 0;
4604 }
4605 }
4606 else
4607 *str = end;
4608
4609 len = end - base;
4610
4611 if (inst.cond == COND_ALWAYS)
4612 {
4613 /* Look for unaffixed mnemonic. */
4614 return lookup_mnemonic (base, len);
4615 }
4616 else if (len <= 13)
4617 {
4618 /* append ".c" to mnemonic if conditional */
4619 memcpy (condname, base, len);
4620 memcpy (condname + len, ".c", 2);
4621 base = condname;
4622 len += 2;
4623 return lookup_mnemonic (base, len);
4624 }
4625
4626 return NULL;
4627 }
4628
4629 /* Internal helper routine converting a vector neon_type_el structure
4630 *VECTYPE to a corresponding operand qualifier. */
4631
4632 static inline aarch64_opnd_qualifier_t
4633 vectype_to_qualifier (const struct neon_type_el *vectype)
4634 {
4635 /* Element size in bytes indexed by neon_el_type. */
4636 const unsigned char ele_size[5]
4637 = {1, 2, 4, 8, 16};
4638
4639 if (!vectype->defined || vectype->type == NT_invtype)
4640 goto vectype_conversion_fail;
4641
4642 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
4643
4644 if (vectype->defined & NTA_HASINDEX)
4645 /* Vector element register. */
4646 return AARCH64_OPND_QLF_S_B + vectype->type;
4647 else
4648 {
4649 /* Vector register. */
4650 int reg_size = ele_size[vectype->type] * vectype->width;
4651 unsigned offset;
4652 if (reg_size != 16 && reg_size != 8)
4653 goto vectype_conversion_fail;
4654 /* The conversion is calculated based on the relation of the order of
4655 qualifiers to the vector element size and vector register size. */
4656 offset = (vectype->type == NT_q)
4657 ? 8 : (vectype->type << 1) + (reg_size >> 4);
4658 gas_assert (offset <= 8);
4659 return AARCH64_OPND_QLF_V_8B + offset;
4660 }
4661
4662 vectype_conversion_fail:
4663 first_error (_("bad vector arrangement type"));
4664 return AARCH64_OPND_QLF_NIL;
4665 }
4666
4667 /* Process an optional operand that is found omitted from the assembly line.
4668 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
4669 instruction's opcode entry while IDX is the index of this omitted operand.
4670 */
4671
4672 static void
4673 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
4674 int idx, aarch64_opnd_info *operand)
4675 {
4676 aarch64_insn default_value = get_optional_operand_default_value (opcode);
4677 gas_assert (optional_operand_p (opcode, idx));
4678 gas_assert (!operand->present);
4679
4680 switch (type)
4681 {
4682 case AARCH64_OPND_Rd:
4683 case AARCH64_OPND_Rn:
4684 case AARCH64_OPND_Rm:
4685 case AARCH64_OPND_Rt:
4686 case AARCH64_OPND_Rt2:
4687 case AARCH64_OPND_Rs:
4688 case AARCH64_OPND_Ra:
4689 case AARCH64_OPND_Rt_SYS:
4690 case AARCH64_OPND_Rd_SP:
4691 case AARCH64_OPND_Rn_SP:
4692 case AARCH64_OPND_Fd:
4693 case AARCH64_OPND_Fn:
4694 case AARCH64_OPND_Fm:
4695 case AARCH64_OPND_Fa:
4696 case AARCH64_OPND_Ft:
4697 case AARCH64_OPND_Ft2:
4698 case AARCH64_OPND_Sd:
4699 case AARCH64_OPND_Sn:
4700 case AARCH64_OPND_Sm:
4701 case AARCH64_OPND_Vd:
4702 case AARCH64_OPND_Vn:
4703 case AARCH64_OPND_Vm:
4704 case AARCH64_OPND_VdD1:
4705 case AARCH64_OPND_VnD1:
4706 operand->reg.regno = default_value;
4707 break;
4708
4709 case AARCH64_OPND_Ed:
4710 case AARCH64_OPND_En:
4711 case AARCH64_OPND_Em:
4712 operand->reglane.regno = default_value;
4713 break;
4714
4715 case AARCH64_OPND_IDX:
4716 case AARCH64_OPND_BIT_NUM:
4717 case AARCH64_OPND_IMMR:
4718 case AARCH64_OPND_IMMS:
4719 case AARCH64_OPND_SHLL_IMM:
4720 case AARCH64_OPND_IMM_VLSL:
4721 case AARCH64_OPND_IMM_VLSR:
4722 case AARCH64_OPND_CCMP_IMM:
4723 case AARCH64_OPND_FBITS:
4724 case AARCH64_OPND_UIMM4:
4725 case AARCH64_OPND_UIMM3_OP1:
4726 case AARCH64_OPND_UIMM3_OP2:
4727 case AARCH64_OPND_IMM:
4728 case AARCH64_OPND_WIDTH:
4729 case AARCH64_OPND_UIMM7:
4730 case AARCH64_OPND_NZCV:
4731 operand->imm.value = default_value;
4732 break;
4733
4734 case AARCH64_OPND_EXCEPTION:
4735 inst.reloc.type = BFD_RELOC_UNUSED;
4736 break;
4737
4738 case AARCH64_OPND_BARRIER_ISB:
4739 operand->barrier = aarch64_barrier_options + default_value;
4740
4741 default:
4742 break;
4743 }
4744 }
4745
4746 /* Process the relocation type for move wide instructions.
4747 Return TRUE on success; otherwise return FALSE. */
4748
4749 static bfd_boolean
4750 process_movw_reloc_info (void)
4751 {
4752 int is32;
4753 unsigned shift;
4754
4755 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
4756
4757 if (inst.base.opcode->op == OP_MOVK)
4758 switch (inst.reloc.type)
4759 {
4760 case BFD_RELOC_AARCH64_MOVW_G0_S:
4761 case BFD_RELOC_AARCH64_MOVW_G1_S:
4762 case BFD_RELOC_AARCH64_MOVW_G2_S:
4763 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
4764 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4765 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4766 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4767 set_syntax_error
4768 (_("the specified relocation type is not allowed for MOVK"));
4769 return FALSE;
4770 default:
4771 break;
4772 }
4773
4774 switch (inst.reloc.type)
4775 {
4776 case BFD_RELOC_AARCH64_MOVW_G0:
4777 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4778 case BFD_RELOC_AARCH64_MOVW_G0_S:
4779 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
4780 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
4781 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
4782 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
4783 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
4784 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
4785 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4786 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4787 shift = 0;
4788 break;
4789 case BFD_RELOC_AARCH64_MOVW_G1:
4790 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4791 case BFD_RELOC_AARCH64_MOVW_G1_S:
4792 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
4793 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
4794 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
4795 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
4796 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
4797 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
4798 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4799 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4800 shift = 16;
4801 break;
4802 case BFD_RELOC_AARCH64_MOVW_G2:
4803 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4804 case BFD_RELOC_AARCH64_MOVW_G2_S:
4805 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
4806 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4807 if (is32)
4808 {
4809 set_fatal_syntax_error
4810 (_("the specified relocation type is not allowed for 32-bit "
4811 "register"));
4812 return FALSE;
4813 }
4814 shift = 32;
4815 break;
4816 case BFD_RELOC_AARCH64_MOVW_G3:
4817 if (is32)
4818 {
4819 set_fatal_syntax_error
4820 (_("the specified relocation type is not allowed for 32-bit "
4821 "register"));
4822 return FALSE;
4823 }
4824 shift = 48;
4825 break;
4826 default:
4827 /* More cases should be added when more MOVW-related relocation types
4828 are supported in GAS. */
4829 gas_assert (aarch64_gas_internal_fixup_p ());
4830 /* The shift amount should have already been set by the parser. */
4831 return TRUE;
4832 }
4833 inst.base.operands[1].shifter.amount = shift;
4834 return TRUE;
4835 }
4836
4837 /* A primitive log caculator. */
4838
4839 static inline unsigned int
4840 get_logsz (unsigned int size)
4841 {
4842 const unsigned char ls[16] =
4843 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
4844 if (size > 16)
4845 {
4846 gas_assert (0);
4847 return -1;
4848 }
4849 gas_assert (ls[size - 1] != (unsigned char)-1);
4850 return ls[size - 1];
4851 }
4852
4853 /* Determine and return the real reloc type code for an instruction
4854 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
4855
4856 static inline bfd_reloc_code_real_type
4857 ldst_lo12_determine_real_reloc_type (void)
4858 {
4859 unsigned logsz;
4860 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
4861 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
4862
4863 const bfd_reloc_code_real_type reloc_ldst_lo12[3][5] = {
4864 {
4865 BFD_RELOC_AARCH64_LDST8_LO12,
4866 BFD_RELOC_AARCH64_LDST16_LO12,
4867 BFD_RELOC_AARCH64_LDST32_LO12,
4868 BFD_RELOC_AARCH64_LDST64_LO12,
4869 BFD_RELOC_AARCH64_LDST128_LO12
4870 },
4871 {
4872 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
4873 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
4874 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
4875 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
4876 BFD_RELOC_AARCH64_NONE
4877 },
4878 {
4879 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
4880 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
4881 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
4882 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
4883 BFD_RELOC_AARCH64_NONE
4884 }
4885 };
4886
4887 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
4888 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
4889 || (inst.reloc.type
4890 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC));
4891 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
4892
4893 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
4894 opd1_qlf =
4895 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
4896 1, opd0_qlf, 0);
4897 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
4898
4899 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
4900 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
4901 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
4902 gas_assert (logsz <= 3);
4903 else
4904 gas_assert (logsz <= 4);
4905
4906 /* In reloc.c, these pseudo relocation types should be defined in similar
4907 order as above reloc_ldst_lo12 array. Because the array index calcuation
4908 below relies on this. */
4909 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
4910 }
4911
4912 /* Check whether a register list REGINFO is valid. The registers must be
4913 numbered in increasing order (modulo 32), in increments of one or two.
4914
4915 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
4916 increments of two.
4917
4918 Return FALSE if such a register list is invalid, otherwise return TRUE. */
4919
4920 static bfd_boolean
4921 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
4922 {
4923 uint32_t i, nb_regs, prev_regno, incr;
4924
4925 nb_regs = 1 + (reginfo & 0x3);
4926 reginfo >>= 2;
4927 prev_regno = reginfo & 0x1f;
4928 incr = accept_alternate ? 2 : 1;
4929
4930 for (i = 1; i < nb_regs; ++i)
4931 {
4932 uint32_t curr_regno;
4933 reginfo >>= 5;
4934 curr_regno = reginfo & 0x1f;
4935 if (curr_regno != ((prev_regno + incr) & 0x1f))
4936 return FALSE;
4937 prev_regno = curr_regno;
4938 }
4939
4940 return TRUE;
4941 }
4942
4943 /* Generic instruction operand parser. This does no encoding and no
4944 semantic validation; it merely squirrels values away in the inst
4945 structure. Returns TRUE or FALSE depending on whether the
4946 specified grammar matched. */
4947
4948 static bfd_boolean
4949 parse_operands (char *str, const aarch64_opcode *opcode)
4950 {
4951 int i;
4952 char *backtrack_pos = 0;
4953 const enum aarch64_opnd *operands = opcode->operands;
4954
4955 clear_error ();
4956 skip_whitespace (str);
4957
4958 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
4959 {
4960 int64_t val;
4961 int isreg32, isregzero;
4962 int comma_skipped_p = 0;
4963 aarch64_reg_type rtype;
4964 struct neon_type_el vectype;
4965 aarch64_opnd_info *info = &inst.base.operands[i];
4966
4967 DEBUG_TRACE ("parse operand %d", i);
4968
4969 /* Assign the operand code. */
4970 info->type = operands[i];
4971
4972 if (optional_operand_p (opcode, i))
4973 {
4974 /* Remember where we are in case we need to backtrack. */
4975 gas_assert (!backtrack_pos);
4976 backtrack_pos = str;
4977 }
4978
4979 /* Expect comma between operands; the backtrack mechanizm will take
4980 care of cases of omitted optional operand. */
4981 if (i > 0 && ! skip_past_char (&str, ','))
4982 {
4983 set_syntax_error (_("comma expected between operands"));
4984 goto failure;
4985 }
4986 else
4987 comma_skipped_p = 1;
4988
4989 switch (operands[i])
4990 {
4991 case AARCH64_OPND_Rd:
4992 case AARCH64_OPND_Rn:
4993 case AARCH64_OPND_Rm:
4994 case AARCH64_OPND_Rt:
4995 case AARCH64_OPND_Rt2:
4996 case AARCH64_OPND_Rs:
4997 case AARCH64_OPND_Ra:
4998 case AARCH64_OPND_Rt_SYS:
4999 case AARCH64_OPND_PAIRREG:
5000 po_int_reg_or_fail (1, 0);
5001 break;
5002
5003 case AARCH64_OPND_Rd_SP:
5004 case AARCH64_OPND_Rn_SP:
5005 po_int_reg_or_fail (0, 1);
5006 break;
5007
5008 case AARCH64_OPND_Rm_EXT:
5009 case AARCH64_OPND_Rm_SFT:
5010 po_misc_or_fail (parse_shifter_operand
5011 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
5012 ? SHIFTED_ARITH_IMM
5013 : SHIFTED_LOGIC_IMM)));
5014 if (!info->shifter.operator_present)
5015 {
5016 /* Default to LSL if not present. Libopcodes prefers shifter
5017 kind to be explicit. */
5018 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5019 info->shifter.kind = AARCH64_MOD_LSL;
5020 /* For Rm_EXT, libopcodes will carry out further check on whether
5021 or not stack pointer is used in the instruction (Recall that
5022 "the extend operator is not optional unless at least one of
5023 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
5024 }
5025 break;
5026
5027 case AARCH64_OPND_Fd:
5028 case AARCH64_OPND_Fn:
5029 case AARCH64_OPND_Fm:
5030 case AARCH64_OPND_Fa:
5031 case AARCH64_OPND_Ft:
5032 case AARCH64_OPND_Ft2:
5033 case AARCH64_OPND_Sd:
5034 case AARCH64_OPND_Sn:
5035 case AARCH64_OPND_Sm:
5036 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
5037 if (val == PARSE_FAIL)
5038 {
5039 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
5040 goto failure;
5041 }
5042 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
5043
5044 info->reg.regno = val;
5045 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
5046 break;
5047
5048 case AARCH64_OPND_Vd:
5049 case AARCH64_OPND_Vn:
5050 case AARCH64_OPND_Vm:
5051 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5052 if (val == PARSE_FAIL)
5053 {
5054 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5055 goto failure;
5056 }
5057 if (vectype.defined & NTA_HASINDEX)
5058 goto failure;
5059
5060 info->reg.regno = val;
5061 info->qualifier = vectype_to_qualifier (&vectype);
5062 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5063 goto failure;
5064 break;
5065
5066 case AARCH64_OPND_VdD1:
5067 case AARCH64_OPND_VnD1:
5068 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5069 if (val == PARSE_FAIL)
5070 {
5071 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5072 goto failure;
5073 }
5074 if (vectype.type != NT_d || vectype.index != 1)
5075 {
5076 set_fatal_syntax_error
5077 (_("the top half of a 128-bit FP/SIMD register is expected"));
5078 goto failure;
5079 }
5080 info->reg.regno = val;
5081 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5082 here; it is correct for the purpose of encoding/decoding since
5083 only the register number is explicitly encoded in the related
5084 instructions, although this appears a bit hacky. */
5085 info->qualifier = AARCH64_OPND_QLF_S_D;
5086 break;
5087
5088 case AARCH64_OPND_Ed:
5089 case AARCH64_OPND_En:
5090 case AARCH64_OPND_Em:
5091 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5092 if (val == PARSE_FAIL)
5093 {
5094 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5095 goto failure;
5096 }
5097 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5098 goto failure;
5099
5100 info->reglane.regno = val;
5101 info->reglane.index = vectype.index;
5102 info->qualifier = vectype_to_qualifier (&vectype);
5103 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5104 goto failure;
5105 break;
5106
5107 case AARCH64_OPND_LVn:
5108 case AARCH64_OPND_LVt:
5109 case AARCH64_OPND_LVt_AL:
5110 case AARCH64_OPND_LEt:
5111 if ((val = parse_neon_reg_list (&str, &vectype)) == PARSE_FAIL)
5112 goto failure;
5113 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5114 {
5115 set_fatal_syntax_error (_("invalid register list"));
5116 goto failure;
5117 }
5118 info->reglist.first_regno = (val >> 2) & 0x1f;
5119 info->reglist.num_regs = (val & 0x3) + 1;
5120 if (operands[i] == AARCH64_OPND_LEt)
5121 {
5122 if (!(vectype.defined & NTA_HASINDEX))
5123 goto failure;
5124 info->reglist.has_index = 1;
5125 info->reglist.index = vectype.index;
5126 }
5127 else if (!(vectype.defined & NTA_HASTYPE))
5128 goto failure;
5129 info->qualifier = vectype_to_qualifier (&vectype);
5130 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5131 goto failure;
5132 break;
5133
5134 case AARCH64_OPND_Cn:
5135 case AARCH64_OPND_Cm:
5136 po_reg_or_fail (REG_TYPE_CN);
5137 if (val > 15)
5138 {
5139 set_fatal_syntax_error (_(get_reg_expected_msg (REG_TYPE_CN)));
5140 goto failure;
5141 }
5142 inst.base.operands[i].reg.regno = val;
5143 break;
5144
5145 case AARCH64_OPND_SHLL_IMM:
5146 case AARCH64_OPND_IMM_VLSR:
5147 po_imm_or_fail (1, 64);
5148 info->imm.value = val;
5149 break;
5150
5151 case AARCH64_OPND_CCMP_IMM:
5152 case AARCH64_OPND_FBITS:
5153 case AARCH64_OPND_UIMM4:
5154 case AARCH64_OPND_UIMM3_OP1:
5155 case AARCH64_OPND_UIMM3_OP2:
5156 case AARCH64_OPND_IMM_VLSL:
5157 case AARCH64_OPND_IMM:
5158 case AARCH64_OPND_WIDTH:
5159 po_imm_nc_or_fail ();
5160 info->imm.value = val;
5161 break;
5162
5163 case AARCH64_OPND_UIMM7:
5164 po_imm_or_fail (0, 127);
5165 info->imm.value = val;
5166 break;
5167
5168 case AARCH64_OPND_IDX:
5169 case AARCH64_OPND_BIT_NUM:
5170 case AARCH64_OPND_IMMR:
5171 case AARCH64_OPND_IMMS:
5172 po_imm_or_fail (0, 63);
5173 info->imm.value = val;
5174 break;
5175
5176 case AARCH64_OPND_IMM0:
5177 po_imm_nc_or_fail ();
5178 if (val != 0)
5179 {
5180 set_fatal_syntax_error (_("immediate zero expected"));
5181 goto failure;
5182 }
5183 info->imm.value = 0;
5184 break;
5185
5186 case AARCH64_OPND_FPIMM0:
5187 {
5188 int qfloat;
5189 bfd_boolean res1 = FALSE, res2 = FALSE;
5190 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
5191 it is probably not worth the effort to support it. */
5192 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE))
5193 && !(res2 = parse_constant_immediate (&str, &val)))
5194 goto failure;
5195 if ((res1 && qfloat == 0) || (res2 && val == 0))
5196 {
5197 info->imm.value = 0;
5198 info->imm.is_fp = 1;
5199 break;
5200 }
5201 set_fatal_syntax_error (_("immediate zero expected"));
5202 goto failure;
5203 }
5204
5205 case AARCH64_OPND_IMM_MOV:
5206 {
5207 char *saved = str;
5208 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
5209 reg_name_p (str, REG_TYPE_VN))
5210 goto failure;
5211 str = saved;
5212 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5213 GE_OPT_PREFIX, 1));
5214 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
5215 later. fix_mov_imm_insn will try to determine a machine
5216 instruction (MOVZ, MOVN or ORR) for it and will issue an error
5217 message if the immediate cannot be moved by a single
5218 instruction. */
5219 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5220 inst.base.operands[i].skip = 1;
5221 }
5222 break;
5223
5224 case AARCH64_OPND_SIMD_IMM:
5225 case AARCH64_OPND_SIMD_IMM_SFT:
5226 if (! parse_big_immediate (&str, &val))
5227 goto failure;
5228 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5229 /* addr_off_p */ 0,
5230 /* need_libopcodes_p */ 1,
5231 /* skip_p */ 1);
5232 /* Parse shift.
5233 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
5234 shift, we don't check it here; we leave the checking to
5235 the libopcodes (operand_general_constraint_met_p). By
5236 doing this, we achieve better diagnostics. */
5237 if (skip_past_comma (&str)
5238 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
5239 goto failure;
5240 if (!info->shifter.operator_present
5241 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
5242 {
5243 /* Default to LSL if not present. Libopcodes prefers shifter
5244 kind to be explicit. */
5245 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5246 info->shifter.kind = AARCH64_MOD_LSL;
5247 }
5248 break;
5249
5250 case AARCH64_OPND_FPIMM:
5251 case AARCH64_OPND_SIMD_FPIMM:
5252 {
5253 int qfloat;
5254 bfd_boolean dp_p
5255 = (aarch64_get_qualifier_esize (inst.base.operands[0].qualifier)
5256 == 8);
5257 if (! parse_aarch64_imm_float (&str, &qfloat, dp_p))
5258 goto failure;
5259 if (qfloat == 0)
5260 {
5261 set_fatal_syntax_error (_("invalid floating-point constant"));
5262 goto failure;
5263 }
5264 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
5265 inst.base.operands[i].imm.is_fp = 1;
5266 }
5267 break;
5268
5269 case AARCH64_OPND_LIMM:
5270 po_misc_or_fail (parse_shifter_operand (&str, info,
5271 SHIFTED_LOGIC_IMM));
5272 if (info->shifter.operator_present)
5273 {
5274 set_fatal_syntax_error
5275 (_("shift not allowed for bitmask immediate"));
5276 goto failure;
5277 }
5278 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5279 /* addr_off_p */ 0,
5280 /* need_libopcodes_p */ 1,
5281 /* skip_p */ 1);
5282 break;
5283
5284 case AARCH64_OPND_AIMM:
5285 if (opcode->op == OP_ADD)
5286 /* ADD may have relocation types. */
5287 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
5288 SHIFTED_ARITH_IMM));
5289 else
5290 po_misc_or_fail (parse_shifter_operand (&str, info,
5291 SHIFTED_ARITH_IMM));
5292 switch (inst.reloc.type)
5293 {
5294 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5295 info->shifter.amount = 12;
5296 break;
5297 case BFD_RELOC_UNUSED:
5298 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5299 if (info->shifter.kind != AARCH64_MOD_NONE)
5300 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
5301 inst.reloc.pc_rel = 0;
5302 break;
5303 default:
5304 break;
5305 }
5306 info->imm.value = 0;
5307 if (!info->shifter.operator_present)
5308 {
5309 /* Default to LSL if not present. Libopcodes prefers shifter
5310 kind to be explicit. */
5311 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5312 info->shifter.kind = AARCH64_MOD_LSL;
5313 }
5314 break;
5315
5316 case AARCH64_OPND_HALF:
5317 {
5318 /* #<imm16> or relocation. */
5319 int internal_fixup_p;
5320 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
5321 if (internal_fixup_p)
5322 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5323 skip_whitespace (str);
5324 if (skip_past_comma (&str))
5325 {
5326 /* {, LSL #<shift>} */
5327 if (! aarch64_gas_internal_fixup_p ())
5328 {
5329 set_fatal_syntax_error (_("can't mix relocation modifier "
5330 "with explicit shift"));
5331 goto failure;
5332 }
5333 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5334 }
5335 else
5336 inst.base.operands[i].shifter.amount = 0;
5337 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5338 inst.base.operands[i].imm.value = 0;
5339 if (! process_movw_reloc_info ())
5340 goto failure;
5341 }
5342 break;
5343
5344 case AARCH64_OPND_EXCEPTION:
5345 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp));
5346 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5347 /* addr_off_p */ 0,
5348 /* need_libopcodes_p */ 0,
5349 /* skip_p */ 1);
5350 break;
5351
5352 case AARCH64_OPND_NZCV:
5353 {
5354 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
5355 if (nzcv != NULL)
5356 {
5357 str += 4;
5358 info->imm.value = nzcv->value;
5359 break;
5360 }
5361 po_imm_or_fail (0, 15);
5362 info->imm.value = val;
5363 }
5364 break;
5365
5366 case AARCH64_OPND_COND:
5367 case AARCH64_OPND_COND1:
5368 info->cond = hash_find_n (aarch64_cond_hsh, str, 2);
5369 str += 2;
5370 if (info->cond == NULL)
5371 {
5372 set_syntax_error (_("invalid condition"));
5373 goto failure;
5374 }
5375 else if (operands[i] == AARCH64_OPND_COND1
5376 && (info->cond->value & 0xe) == 0xe)
5377 {
5378 /* Not allow AL or NV. */
5379 set_default_error ();
5380 goto failure;
5381 }
5382 break;
5383
5384 case AARCH64_OPND_ADDR_ADRP:
5385 po_misc_or_fail (parse_adrp (&str));
5386 /* Clear the value as operand needs to be relocated. */
5387 info->imm.value = 0;
5388 break;
5389
5390 case AARCH64_OPND_ADDR_PCREL14:
5391 case AARCH64_OPND_ADDR_PCREL19:
5392 case AARCH64_OPND_ADDR_PCREL21:
5393 case AARCH64_OPND_ADDR_PCREL26:
5394 po_misc_or_fail (parse_address_reloc (&str, info));
5395 if (!info->addr.pcrel)
5396 {
5397 set_syntax_error (_("invalid pc-relative address"));
5398 goto failure;
5399 }
5400 if (inst.gen_lit_pool
5401 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
5402 {
5403 /* Only permit "=value" in the literal load instructions.
5404 The literal will be generated by programmer_friendly_fixup. */
5405 set_syntax_error (_("invalid use of \"=immediate\""));
5406 goto failure;
5407 }
5408 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
5409 {
5410 set_syntax_error (_("unrecognized relocation suffix"));
5411 goto failure;
5412 }
5413 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
5414 {
5415 info->imm.value = inst.reloc.exp.X_add_number;
5416 inst.reloc.type = BFD_RELOC_UNUSED;
5417 }
5418 else
5419 {
5420 info->imm.value = 0;
5421 if (inst.reloc.type == BFD_RELOC_UNUSED)
5422 switch (opcode->iclass)
5423 {
5424 case compbranch:
5425 case condbranch:
5426 /* e.g. CBZ or B.COND */
5427 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5428 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
5429 break;
5430 case testbranch:
5431 /* e.g. TBZ */
5432 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
5433 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
5434 break;
5435 case branch_imm:
5436 /* e.g. B or BL */
5437 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
5438 inst.reloc.type =
5439 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
5440 : BFD_RELOC_AARCH64_JUMP26;
5441 break;
5442 case loadlit:
5443 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5444 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
5445 break;
5446 case pcreladdr:
5447 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
5448 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
5449 break;
5450 default:
5451 gas_assert (0);
5452 abort ();
5453 }
5454 inst.reloc.pc_rel = 1;
5455 }
5456 break;
5457
5458 case AARCH64_OPND_ADDR_SIMPLE:
5459 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
5460 /* [<Xn|SP>{, #<simm>}] */
5461 po_char_or_fail ('[');
5462 po_reg_or_fail (REG_TYPE_R64_SP);
5463 /* Accept optional ", #0". */
5464 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
5465 && skip_past_char (&str, ','))
5466 {
5467 skip_past_char (&str, '#');
5468 if (! skip_past_char (&str, '0'))
5469 {
5470 set_fatal_syntax_error
5471 (_("the optional immediate offset can only be 0"));
5472 goto failure;
5473 }
5474 }
5475 po_char_or_fail (']');
5476 info->addr.base_regno = val;
5477 break;
5478
5479 case AARCH64_OPND_ADDR_REGOFF:
5480 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
5481 po_misc_or_fail (parse_address (&str, info, 0));
5482 if (info->addr.pcrel || !info->addr.offset.is_reg
5483 || !info->addr.preind || info->addr.postind
5484 || info->addr.writeback)
5485 {
5486 set_syntax_error (_("invalid addressing mode"));
5487 goto failure;
5488 }
5489 if (!info->shifter.operator_present)
5490 {
5491 /* Default to LSL if not present. Libopcodes prefers shifter
5492 kind to be explicit. */
5493 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5494 info->shifter.kind = AARCH64_MOD_LSL;
5495 }
5496 /* Qualifier to be deduced by libopcodes. */
5497 break;
5498
5499 case AARCH64_OPND_ADDR_SIMM7:
5500 po_misc_or_fail (parse_address (&str, info, 0));
5501 if (info->addr.pcrel || info->addr.offset.is_reg
5502 || (!info->addr.preind && !info->addr.postind))
5503 {
5504 set_syntax_error (_("invalid addressing mode"));
5505 goto failure;
5506 }
5507 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5508 /* addr_off_p */ 1,
5509 /* need_libopcodes_p */ 1,
5510 /* skip_p */ 0);
5511 break;
5512
5513 case AARCH64_OPND_ADDR_SIMM9:
5514 case AARCH64_OPND_ADDR_SIMM9_2:
5515 po_misc_or_fail (parse_address_reloc (&str, info));
5516 if (info->addr.pcrel || info->addr.offset.is_reg
5517 || (!info->addr.preind && !info->addr.postind)
5518 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
5519 && info->addr.writeback))
5520 {
5521 set_syntax_error (_("invalid addressing mode"));
5522 goto failure;
5523 }
5524 if (inst.reloc.type != BFD_RELOC_UNUSED)
5525 {
5526 set_syntax_error (_("relocation not allowed"));
5527 goto failure;
5528 }
5529 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5530 /* addr_off_p */ 1,
5531 /* need_libopcodes_p */ 1,
5532 /* skip_p */ 0);
5533 break;
5534
5535 case AARCH64_OPND_ADDR_UIMM12:
5536 po_misc_or_fail (parse_address_reloc (&str, info));
5537 if (info->addr.pcrel || info->addr.offset.is_reg
5538 || !info->addr.preind || info->addr.writeback)
5539 {
5540 set_syntax_error (_("invalid addressing mode"));
5541 goto failure;
5542 }
5543 if (inst.reloc.type == BFD_RELOC_UNUSED)
5544 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5545 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5546 || (inst.reloc.type
5547 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
5548 || (inst.reloc.type
5549 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC))
5550 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
5551 /* Leave qualifier to be determined by libopcodes. */
5552 break;
5553
5554 case AARCH64_OPND_SIMD_ADDR_POST:
5555 /* [<Xn|SP>], <Xm|#<amount>> */
5556 po_misc_or_fail (parse_address (&str, info, 1));
5557 if (!info->addr.postind || !info->addr.writeback)
5558 {
5559 set_syntax_error (_("invalid addressing mode"));
5560 goto failure;
5561 }
5562 if (!info->addr.offset.is_reg)
5563 {
5564 if (inst.reloc.exp.X_op == O_constant)
5565 info->addr.offset.imm = inst.reloc.exp.X_add_number;
5566 else
5567 {
5568 set_fatal_syntax_error
5569 (_("writeback value should be an immediate constant"));
5570 goto failure;
5571 }
5572 }
5573 /* No qualifier. */
5574 break;
5575
5576 case AARCH64_OPND_SYSREG:
5577 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0))
5578 == PARSE_FAIL)
5579 {
5580 set_syntax_error (_("unknown or missing system register name"));
5581 goto failure;
5582 }
5583 inst.base.operands[i].sysreg = val;
5584 break;
5585
5586 case AARCH64_OPND_PSTATEFIELD:
5587 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1))
5588 == PARSE_FAIL)
5589 {
5590 set_syntax_error (_("unknown or missing PSTATE field name"));
5591 goto failure;
5592 }
5593 inst.base.operands[i].pstatefield = val;
5594 break;
5595
5596 case AARCH64_OPND_SYSREG_IC:
5597 inst.base.operands[i].sysins_op =
5598 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
5599 goto sys_reg_ins;
5600 case AARCH64_OPND_SYSREG_DC:
5601 inst.base.operands[i].sysins_op =
5602 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
5603 goto sys_reg_ins;
5604 case AARCH64_OPND_SYSREG_AT:
5605 inst.base.operands[i].sysins_op =
5606 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
5607 goto sys_reg_ins;
5608 case AARCH64_OPND_SYSREG_TLBI:
5609 inst.base.operands[i].sysins_op =
5610 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
5611 sys_reg_ins:
5612 if (inst.base.operands[i].sysins_op == NULL)
5613 {
5614 set_fatal_syntax_error ( _("unknown or missing operation name"));
5615 goto failure;
5616 }
5617 break;
5618
5619 case AARCH64_OPND_BARRIER:
5620 case AARCH64_OPND_BARRIER_ISB:
5621 val = parse_barrier (&str);
5622 if (val != PARSE_FAIL
5623 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
5624 {
5625 /* ISB only accepts options name 'sy'. */
5626 set_syntax_error
5627 (_("the specified option is not accepted in ISB"));
5628 /* Turn off backtrack as this optional operand is present. */
5629 backtrack_pos = 0;
5630 goto failure;
5631 }
5632 /* This is an extension to accept a 0..15 immediate. */
5633 if (val == PARSE_FAIL)
5634 po_imm_or_fail (0, 15);
5635 info->barrier = aarch64_barrier_options + val;
5636 break;
5637
5638 case AARCH64_OPND_PRFOP:
5639 val = parse_pldop (&str);
5640 /* This is an extension to accept a 0..31 immediate. */
5641 if (val == PARSE_FAIL)
5642 po_imm_or_fail (0, 31);
5643 inst.base.operands[i].prfop = aarch64_prfops + val;
5644 break;
5645
5646 default:
5647 as_fatal (_("unhandled operand code %d"), operands[i]);
5648 }
5649
5650 /* If we get here, this operand was successfully parsed. */
5651 inst.base.operands[i].present = 1;
5652 continue;
5653
5654 failure:
5655 /* The parse routine should already have set the error, but in case
5656 not, set a default one here. */
5657 if (! error_p ())
5658 set_default_error ();
5659
5660 if (! backtrack_pos)
5661 goto parse_operands_return;
5662
5663 {
5664 /* We reach here because this operand is marked as optional, and
5665 either no operand was supplied or the operand was supplied but it
5666 was syntactically incorrect. In the latter case we report an
5667 error. In the former case we perform a few more checks before
5668 dropping through to the code to insert the default operand. */
5669
5670 char *tmp = backtrack_pos;
5671 char endchar = END_OF_INSN;
5672
5673 if (i != (aarch64_num_of_operands (opcode) - 1))
5674 endchar = ',';
5675 skip_past_char (&tmp, ',');
5676
5677 if (*tmp != endchar)
5678 /* The user has supplied an operand in the wrong format. */
5679 goto parse_operands_return;
5680
5681 /* Make sure there is not a comma before the optional operand.
5682 For example the fifth operand of 'sys' is optional:
5683
5684 sys #0,c0,c0,#0, <--- wrong
5685 sys #0,c0,c0,#0 <--- correct. */
5686 if (comma_skipped_p && i && endchar == END_OF_INSN)
5687 {
5688 set_fatal_syntax_error
5689 (_("unexpected comma before the omitted optional operand"));
5690 goto parse_operands_return;
5691 }
5692 }
5693
5694 /* Reaching here means we are dealing with an optional operand that is
5695 omitted from the assembly line. */
5696 gas_assert (optional_operand_p (opcode, i));
5697 info->present = 0;
5698 process_omitted_operand (operands[i], opcode, i, info);
5699
5700 /* Try again, skipping the optional operand at backtrack_pos. */
5701 str = backtrack_pos;
5702 backtrack_pos = 0;
5703
5704 /* Clear any error record after the omitted optional operand has been
5705 successfully handled. */
5706 clear_error ();
5707 }
5708
5709 /* Check if we have parsed all the operands. */
5710 if (*str != '\0' && ! error_p ())
5711 {
5712 /* Set I to the index of the last present operand; this is
5713 for the purpose of diagnostics. */
5714 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
5715 ;
5716 set_fatal_syntax_error
5717 (_("unexpected characters following instruction"));
5718 }
5719
5720 parse_operands_return:
5721
5722 if (error_p ())
5723 {
5724 DEBUG_TRACE ("parsing FAIL: %s - %s",
5725 operand_mismatch_kind_names[get_error_kind ()],
5726 get_error_message ());
5727 /* Record the operand error properly; this is useful when there
5728 are multiple instruction templates for a mnemonic name, so that
5729 later on, we can select the error that most closely describes
5730 the problem. */
5731 record_operand_error (opcode, i, get_error_kind (),
5732 get_error_message ());
5733 return FALSE;
5734 }
5735 else
5736 {
5737 DEBUG_TRACE ("parsing SUCCESS");
5738 return TRUE;
5739 }
5740 }
5741
5742 /* It does some fix-up to provide some programmer friendly feature while
5743 keeping the libopcodes happy, i.e. libopcodes only accepts
5744 the preferred architectural syntax.
5745 Return FALSE if there is any failure; otherwise return TRUE. */
5746
5747 static bfd_boolean
5748 programmer_friendly_fixup (aarch64_instruction *instr)
5749 {
5750 aarch64_inst *base = &instr->base;
5751 const aarch64_opcode *opcode = base->opcode;
5752 enum aarch64_op op = opcode->op;
5753 aarch64_opnd_info *operands = base->operands;
5754
5755 DEBUG_TRACE ("enter");
5756
5757 switch (opcode->iclass)
5758 {
5759 case testbranch:
5760 /* TBNZ Xn|Wn, #uimm6, label
5761 Test and Branch Not Zero: conditionally jumps to label if bit number
5762 uimm6 in register Xn is not zero. The bit number implies the width of
5763 the register, which may be written and should be disassembled as Wn if
5764 uimm is less than 32. */
5765 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
5766 {
5767 if (operands[1].imm.value >= 32)
5768 {
5769 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
5770 0, 31);
5771 return FALSE;
5772 }
5773 operands[0].qualifier = AARCH64_OPND_QLF_X;
5774 }
5775 break;
5776 case loadlit:
5777 /* LDR Wt, label | =value
5778 As a convenience assemblers will typically permit the notation
5779 "=value" in conjunction with the pc-relative literal load instructions
5780 to automatically place an immediate value or symbolic address in a
5781 nearby literal pool and generate a hidden label which references it.
5782 ISREG has been set to 0 in the case of =value. */
5783 if (instr->gen_lit_pool
5784 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
5785 {
5786 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
5787 if (op == OP_LDRSW_LIT)
5788 size = 4;
5789 if (instr->reloc.exp.X_op != O_constant
5790 && instr->reloc.exp.X_op != O_big
5791 && instr->reloc.exp.X_op != O_symbol)
5792 {
5793 record_operand_error (opcode, 1,
5794 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
5795 _("constant expression expected"));
5796 return FALSE;
5797 }
5798 if (! add_to_lit_pool (&instr->reloc.exp, size))
5799 {
5800 record_operand_error (opcode, 1,
5801 AARCH64_OPDE_OTHER_ERROR,
5802 _("literal pool insertion failed"));
5803 return FALSE;
5804 }
5805 }
5806 break;
5807 case log_shift:
5808 case bitfield:
5809 /* UXT[BHW] Wd, Wn
5810 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
5811 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
5812 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
5813 A programmer-friendly assembler should accept a destination Xd in
5814 place of Wd, however that is not the preferred form for disassembly.
5815 */
5816 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
5817 && operands[1].qualifier == AARCH64_OPND_QLF_W
5818 && operands[0].qualifier == AARCH64_OPND_QLF_X)
5819 operands[0].qualifier = AARCH64_OPND_QLF_W;
5820 break;
5821
5822 case addsub_ext:
5823 {
5824 /* In the 64-bit form, the final register operand is written as Wm
5825 for all but the (possibly omitted) UXTX/LSL and SXTX
5826 operators.
5827 As a programmer-friendly assembler, we accept e.g.
5828 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
5829 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
5830 int idx = aarch64_operand_index (opcode->operands,
5831 AARCH64_OPND_Rm_EXT);
5832 gas_assert (idx == 1 || idx == 2);
5833 if (operands[0].qualifier == AARCH64_OPND_QLF_X
5834 && operands[idx].qualifier == AARCH64_OPND_QLF_X
5835 && operands[idx].shifter.kind != AARCH64_MOD_LSL
5836 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
5837 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
5838 operands[idx].qualifier = AARCH64_OPND_QLF_W;
5839 }
5840 break;
5841
5842 default:
5843 break;
5844 }
5845
5846 DEBUG_TRACE ("exit with SUCCESS");
5847 return TRUE;
5848 }
5849
5850 /* Check for loads and stores that will cause unpredictable behavior. */
5851
5852 static void
5853 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
5854 {
5855 aarch64_inst *base = &instr->base;
5856 const aarch64_opcode *opcode = base->opcode;
5857 const aarch64_opnd_info *opnds = base->operands;
5858 switch (opcode->iclass)
5859 {
5860 case ldst_pos:
5861 case ldst_imm9:
5862 case ldst_unscaled:
5863 case ldst_unpriv:
5864 /* Loading/storing the base register is unpredictable if writeback. */
5865 if ((aarch64_get_operand_class (opnds[0].type)
5866 == AARCH64_OPND_CLASS_INT_REG)
5867 && opnds[0].reg.regno == opnds[1].addr.base_regno
5868 && opnds[1].addr.base_regno != REG_SP
5869 && opnds[1].addr.writeback)
5870 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
5871 break;
5872 case ldstpair_off:
5873 case ldstnapair_offs:
5874 case ldstpair_indexed:
5875 /* Loading/storing the base register is unpredictable if writeback. */
5876 if ((aarch64_get_operand_class (opnds[0].type)
5877 == AARCH64_OPND_CLASS_INT_REG)
5878 && (opnds[0].reg.regno == opnds[2].addr.base_regno
5879 || opnds[1].reg.regno == opnds[2].addr.base_regno)
5880 && opnds[2].addr.base_regno != REG_SP
5881 && opnds[2].addr.writeback)
5882 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
5883 /* Load operations must load different registers. */
5884 if ((opcode->opcode & (1 << 22))
5885 && opnds[0].reg.regno == opnds[1].reg.regno)
5886 as_warn (_("unpredictable load of register pair -- `%s'"), str);
5887 break;
5888 default:
5889 break;
5890 }
5891 }
5892
5893 /* A wrapper function to interface with libopcodes on encoding and
5894 record the error message if there is any.
5895
5896 Return TRUE on success; otherwise return FALSE. */
5897
5898 static bfd_boolean
5899 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
5900 aarch64_insn *code)
5901 {
5902 aarch64_operand_error error_info;
5903 error_info.kind = AARCH64_OPDE_NIL;
5904 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info))
5905 return TRUE;
5906 else
5907 {
5908 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
5909 record_operand_error_info (opcode, &error_info);
5910 return FALSE;
5911 }
5912 }
5913
5914 #ifdef DEBUG_AARCH64
5915 static inline void
5916 dump_opcode_operands (const aarch64_opcode *opcode)
5917 {
5918 int i = 0;
5919 while (opcode->operands[i] != AARCH64_OPND_NIL)
5920 {
5921 aarch64_verbose ("\t\t opnd%d: %s", i,
5922 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
5923 ? aarch64_get_operand_name (opcode->operands[i])
5924 : aarch64_get_operand_desc (opcode->operands[i]));
5925 ++i;
5926 }
5927 }
5928 #endif /* DEBUG_AARCH64 */
5929
5930 /* This is the guts of the machine-dependent assembler. STR points to a
5931 machine dependent instruction. This function is supposed to emit
5932 the frags/bytes it assembles to. */
5933
5934 void
5935 md_assemble (char *str)
5936 {
5937 char *p = str;
5938 templates *template;
5939 aarch64_opcode *opcode;
5940 aarch64_inst *inst_base;
5941 unsigned saved_cond;
5942
5943 /* Align the previous label if needed. */
5944 if (last_label_seen != NULL)
5945 {
5946 symbol_set_frag (last_label_seen, frag_now);
5947 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
5948 S_SET_SEGMENT (last_label_seen, now_seg);
5949 }
5950
5951 inst.reloc.type = BFD_RELOC_UNUSED;
5952
5953 DEBUG_TRACE ("\n\n");
5954 DEBUG_TRACE ("==============================");
5955 DEBUG_TRACE ("Enter md_assemble with %s", str);
5956
5957 template = opcode_lookup (&p);
5958 if (!template)
5959 {
5960 /* It wasn't an instruction, but it might be a register alias of
5961 the form alias .req reg directive. */
5962 if (!create_register_alias (str, p))
5963 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
5964 str);
5965 return;
5966 }
5967
5968 skip_whitespace (p);
5969 if (*p == ',')
5970 {
5971 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
5972 get_mnemonic_name (str), str);
5973 return;
5974 }
5975
5976 init_operand_error_report ();
5977
5978 /* Sections are assumed to start aligned. In executable section, there is no
5979 MAP_DATA symbol pending. So we only align the address during
5980 MAP_DATA --> MAP_INSN transition.
5981 For other sections, this is not guaranteed. */
5982 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
5983 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
5984 frag_align_code (2, 0);
5985
5986 saved_cond = inst.cond;
5987 reset_aarch64_instruction (&inst);
5988 inst.cond = saved_cond;
5989
5990 /* Iterate through all opcode entries with the same mnemonic name. */
5991 do
5992 {
5993 opcode = template->opcode;
5994
5995 DEBUG_TRACE ("opcode %s found", opcode->name);
5996 #ifdef DEBUG_AARCH64
5997 if (debug_dump)
5998 dump_opcode_operands (opcode);
5999 #endif /* DEBUG_AARCH64 */
6000
6001 mapping_state (MAP_INSN);
6002
6003 inst_base = &inst.base;
6004 inst_base->opcode = opcode;
6005
6006 /* Truly conditionally executed instructions, e.g. b.cond. */
6007 if (opcode->flags & F_COND)
6008 {
6009 gas_assert (inst.cond != COND_ALWAYS);
6010 inst_base->cond = get_cond_from_value (inst.cond);
6011 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
6012 }
6013 else if (inst.cond != COND_ALWAYS)
6014 {
6015 /* It shouldn't arrive here, where the assembly looks like a
6016 conditional instruction but the found opcode is unconditional. */
6017 gas_assert (0);
6018 continue;
6019 }
6020
6021 if (parse_operands (p, opcode)
6022 && programmer_friendly_fixup (&inst)
6023 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
6024 {
6025 /* Check that this instruction is supported for this CPU. */
6026 if (!opcode->avariant
6027 || !AARCH64_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
6028 {
6029 as_bad (_("selected processor does not support `%s'"), str);
6030 return;
6031 }
6032
6033 warn_unpredictable_ldst (&inst, str);
6034
6035 if (inst.reloc.type == BFD_RELOC_UNUSED
6036 || !inst.reloc.need_libopcodes_p)
6037 output_inst (NULL);
6038 else
6039 {
6040 /* If there is relocation generated for the instruction,
6041 store the instruction information for the future fix-up. */
6042 struct aarch64_inst *copy;
6043 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
6044 if ((copy = xmalloc (sizeof (struct aarch64_inst))) == NULL)
6045 abort ();
6046 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
6047 output_inst (copy);
6048 }
6049 return;
6050 }
6051
6052 template = template->next;
6053 if (template != NULL)
6054 {
6055 reset_aarch64_instruction (&inst);
6056 inst.cond = saved_cond;
6057 }
6058 }
6059 while (template != NULL);
6060
6061 /* Issue the error messages if any. */
6062 output_operand_error_report (str);
6063 }
6064
6065 /* Various frobbings of labels and their addresses. */
6066
6067 void
6068 aarch64_start_line_hook (void)
6069 {
6070 last_label_seen = NULL;
6071 }
6072
6073 void
6074 aarch64_frob_label (symbolS * sym)
6075 {
6076 last_label_seen = sym;
6077
6078 dwarf2_emit_label (sym);
6079 }
6080
6081 int
6082 aarch64_data_in_code (void)
6083 {
6084 if (!strncmp (input_line_pointer + 1, "data:", 5))
6085 {
6086 *input_line_pointer = '/';
6087 input_line_pointer += 5;
6088 *input_line_pointer = 0;
6089 return 1;
6090 }
6091
6092 return 0;
6093 }
6094
6095 char *
6096 aarch64_canonicalize_symbol_name (char *name)
6097 {
6098 int len;
6099
6100 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
6101 *(name + len - 5) = 0;
6102
6103 return name;
6104 }
6105 \f
6106 /* Table of all register names defined by default. The user can
6107 define additional names with .req. Note that all register names
6108 should appear in both upper and lowercase variants. Some registers
6109 also have mixed-case names. */
6110
6111 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
6112 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
6113 #define REGSET31(p,t) \
6114 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
6115 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
6116 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
6117 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t), \
6118 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
6119 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
6120 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
6121 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
6122 #define REGSET(p,t) \
6123 REGSET31(p,t), REGNUM(p,31,t)
6124
6125 /* These go into aarch64_reg_hsh hash-table. */
6126 static const reg_entry reg_names[] = {
6127 /* Integer registers. */
6128 REGSET31 (x, R_64), REGSET31 (X, R_64),
6129 REGSET31 (w, R_32), REGSET31 (W, R_32),
6130
6131 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
6132 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
6133
6134 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
6135 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
6136
6137 /* Coprocessor register numbers. */
6138 REGSET (c, CN), REGSET (C, CN),
6139
6140 /* Floating-point single precision registers. */
6141 REGSET (s, FP_S), REGSET (S, FP_S),
6142
6143 /* Floating-point double precision registers. */
6144 REGSET (d, FP_D), REGSET (D, FP_D),
6145
6146 /* Floating-point half precision registers. */
6147 REGSET (h, FP_H), REGSET (H, FP_H),
6148
6149 /* Floating-point byte precision registers. */
6150 REGSET (b, FP_B), REGSET (B, FP_B),
6151
6152 /* Floating-point quad precision registers. */
6153 REGSET (q, FP_Q), REGSET (Q, FP_Q),
6154
6155 /* FP/SIMD registers. */
6156 REGSET (v, VN), REGSET (V, VN),
6157 };
6158
6159 #undef REGDEF
6160 #undef REGNUM
6161 #undef REGSET
6162
6163 #define N 1
6164 #define n 0
6165 #define Z 1
6166 #define z 0
6167 #define C 1
6168 #define c 0
6169 #define V 1
6170 #define v 0
6171 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
6172 static const asm_nzcv nzcv_names[] = {
6173 {"nzcv", B (n, z, c, v)},
6174 {"nzcV", B (n, z, c, V)},
6175 {"nzCv", B (n, z, C, v)},
6176 {"nzCV", B (n, z, C, V)},
6177 {"nZcv", B (n, Z, c, v)},
6178 {"nZcV", B (n, Z, c, V)},
6179 {"nZCv", B (n, Z, C, v)},
6180 {"nZCV", B (n, Z, C, V)},
6181 {"Nzcv", B (N, z, c, v)},
6182 {"NzcV", B (N, z, c, V)},
6183 {"NzCv", B (N, z, C, v)},
6184 {"NzCV", B (N, z, C, V)},
6185 {"NZcv", B (N, Z, c, v)},
6186 {"NZcV", B (N, Z, c, V)},
6187 {"NZCv", B (N, Z, C, v)},
6188 {"NZCV", B (N, Z, C, V)}
6189 };
6190
6191 #undef N
6192 #undef n
6193 #undef Z
6194 #undef z
6195 #undef C
6196 #undef c
6197 #undef V
6198 #undef v
6199 #undef B
6200 \f
6201 /* MD interface: bits in the object file. */
6202
6203 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
6204 for use in the a.out file, and stores them in the array pointed to by buf.
6205 This knows about the endian-ness of the target machine and does
6206 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
6207 2 (short) and 4 (long) Floating numbers are put out as a series of
6208 LITTLENUMS (shorts, here at least). */
6209
6210 void
6211 md_number_to_chars (char *buf, valueT val, int n)
6212 {
6213 if (target_big_endian)
6214 number_to_chars_bigendian (buf, val, n);
6215 else
6216 number_to_chars_littleendian (buf, val, n);
6217 }
6218
6219 /* MD interface: Sections. */
6220
6221 /* Estimate the size of a frag before relaxing. Assume everything fits in
6222 4 bytes. */
6223
6224 int
6225 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
6226 {
6227 fragp->fr_var = 4;
6228 return 4;
6229 }
6230
6231 /* Round up a section size to the appropriate boundary. */
6232
6233 valueT
6234 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
6235 {
6236 return size;
6237 }
6238
6239 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
6240 of an rs_align_code fragment.
6241
6242 Here we fill the frag with the appropriate info for padding the
6243 output stream. The resulting frag will consist of a fixed (fr_fix)
6244 and of a repeating (fr_var) part.
6245
6246 The fixed content is always emitted before the repeating content and
6247 these two parts are used as follows in constructing the output:
6248 - the fixed part will be used to align to a valid instruction word
6249 boundary, in case that we start at a misaligned address; as no
6250 executable instruction can live at the misaligned location, we
6251 simply fill with zeros;
6252 - the variable part will be used to cover the remaining padding and
6253 we fill using the AArch64 NOP instruction.
6254
6255 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
6256 enough storage space for up to 3 bytes for padding the back to a valid
6257 instruction alignment and exactly 4 bytes to store the NOP pattern. */
6258
6259 void
6260 aarch64_handle_align (fragS * fragP)
6261 {
6262 /* NOP = d503201f */
6263 /* AArch64 instructions are always little-endian. */
6264 static char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
6265
6266 int bytes, fix, noop_size;
6267 char *p;
6268
6269 if (fragP->fr_type != rs_align_code)
6270 return;
6271
6272 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
6273 p = fragP->fr_literal + fragP->fr_fix;
6274
6275 #ifdef OBJ_ELF
6276 gas_assert (fragP->tc_frag_data.recorded);
6277 #endif
6278
6279 noop_size = sizeof (aarch64_noop);
6280
6281 fix = bytes & (noop_size - 1);
6282 if (fix)
6283 {
6284 #ifdef OBJ_ELF
6285 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
6286 #endif
6287 memset (p, 0, fix);
6288 p += fix;
6289 fragP->fr_fix += fix;
6290 }
6291
6292 if (noop_size)
6293 memcpy (p, aarch64_noop, noop_size);
6294 fragP->fr_var = noop_size;
6295 }
6296
6297 /* Perform target specific initialisation of a frag.
6298 Note - despite the name this initialisation is not done when the frag
6299 is created, but only when its type is assigned. A frag can be created
6300 and used a long time before its type is set, so beware of assuming that
6301 this initialisationis performed first. */
6302
6303 #ifndef OBJ_ELF
6304 void
6305 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
6306 int max_chars ATTRIBUTE_UNUSED)
6307 {
6308 }
6309
6310 #else /* OBJ_ELF is defined. */
6311 void
6312 aarch64_init_frag (fragS * fragP, int max_chars)
6313 {
6314 /* Record a mapping symbol for alignment frags. We will delete this
6315 later if the alignment ends up empty. */
6316 if (!fragP->tc_frag_data.recorded)
6317 fragP->tc_frag_data.recorded = 1;
6318
6319 switch (fragP->fr_type)
6320 {
6321 case rs_align:
6322 case rs_align_test:
6323 case rs_fill:
6324 mapping_state_2 (MAP_DATA, max_chars);
6325 break;
6326 case rs_align_code:
6327 mapping_state_2 (MAP_INSN, max_chars);
6328 break;
6329 default:
6330 break;
6331 }
6332 }
6333 \f
6334 /* Initialize the DWARF-2 unwind information for this procedure. */
6335
6336 void
6337 tc_aarch64_frame_initial_instructions (void)
6338 {
6339 cfi_add_CFA_def_cfa (REG_SP, 0);
6340 }
6341 #endif /* OBJ_ELF */
6342
6343 /* Convert REGNAME to a DWARF-2 register number. */
6344
6345 int
6346 tc_aarch64_regname_to_dw2regnum (char *regname)
6347 {
6348 const reg_entry *reg = parse_reg (&regname);
6349 if (reg == NULL)
6350 return -1;
6351
6352 switch (reg->type)
6353 {
6354 case REG_TYPE_SP_32:
6355 case REG_TYPE_SP_64:
6356 case REG_TYPE_R_32:
6357 case REG_TYPE_R_64:
6358 return reg->number;
6359
6360 case REG_TYPE_FP_B:
6361 case REG_TYPE_FP_H:
6362 case REG_TYPE_FP_S:
6363 case REG_TYPE_FP_D:
6364 case REG_TYPE_FP_Q:
6365 return reg->number + 64;
6366
6367 default:
6368 break;
6369 }
6370 return -1;
6371 }
6372
6373 /* Implement DWARF2_ADDR_SIZE. */
6374
6375 int
6376 aarch64_dwarf2_addr_size (void)
6377 {
6378 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
6379 if (ilp32_p)
6380 return 4;
6381 #endif
6382 return bfd_arch_bits_per_address (stdoutput) / 8;
6383 }
6384
6385 /* MD interface: Symbol and relocation handling. */
6386
6387 /* Return the address within the segment that a PC-relative fixup is
6388 relative to. For AArch64 PC-relative fixups applied to instructions
6389 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
6390
6391 long
6392 md_pcrel_from_section (fixS * fixP, segT seg)
6393 {
6394 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
6395
6396 /* If this is pc-relative and we are going to emit a relocation
6397 then we just want to put out any pipeline compensation that the linker
6398 will need. Otherwise we want to use the calculated base. */
6399 if (fixP->fx_pcrel
6400 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
6401 || aarch64_force_relocation (fixP)))
6402 base = 0;
6403
6404 /* AArch64 should be consistent for all pc-relative relocations. */
6405 return base + AARCH64_PCREL_OFFSET;
6406 }
6407
6408 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
6409 Otherwise we have no need to default values of symbols. */
6410
6411 symbolS *
6412 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
6413 {
6414 #ifdef OBJ_ELF
6415 if (name[0] == '_' && name[1] == 'G'
6416 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
6417 {
6418 if (!GOT_symbol)
6419 {
6420 if (symbol_find (name))
6421 as_bad (_("GOT already in the symbol table"));
6422
6423 GOT_symbol = symbol_new (name, undefined_section,
6424 (valueT) 0, &zero_address_frag);
6425 }
6426
6427 return GOT_symbol;
6428 }
6429 #endif
6430
6431 return 0;
6432 }
6433
6434 /* Return non-zero if the indicated VALUE has overflowed the maximum
6435 range expressible by a unsigned number with the indicated number of
6436 BITS. */
6437
6438 static bfd_boolean
6439 unsigned_overflow (valueT value, unsigned bits)
6440 {
6441 valueT lim;
6442 if (bits >= sizeof (valueT) * 8)
6443 return FALSE;
6444 lim = (valueT) 1 << bits;
6445 return (value >= lim);
6446 }
6447
6448
6449 /* Return non-zero if the indicated VALUE has overflowed the maximum
6450 range expressible by an signed number with the indicated number of
6451 BITS. */
6452
6453 static bfd_boolean
6454 signed_overflow (offsetT value, unsigned bits)
6455 {
6456 offsetT lim;
6457 if (bits >= sizeof (offsetT) * 8)
6458 return FALSE;
6459 lim = (offsetT) 1 << (bits - 1);
6460 return (value < -lim || value >= lim);
6461 }
6462
6463 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
6464 unsigned immediate offset load/store instruction, try to encode it as
6465 an unscaled, 9-bit, signed immediate offset load/store instruction.
6466 Return TRUE if it is successful; otherwise return FALSE.
6467
6468 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
6469 in response to the standard LDR/STR mnemonics when the immediate offset is
6470 unambiguous, i.e. when it is negative or unaligned. */
6471
6472 static bfd_boolean
6473 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
6474 {
6475 int idx;
6476 enum aarch64_op new_op;
6477 const aarch64_opcode *new_opcode;
6478
6479 gas_assert (instr->opcode->iclass == ldst_pos);
6480
6481 switch (instr->opcode->op)
6482 {
6483 case OP_LDRB_POS:new_op = OP_LDURB; break;
6484 case OP_STRB_POS: new_op = OP_STURB; break;
6485 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
6486 case OP_LDRH_POS: new_op = OP_LDURH; break;
6487 case OP_STRH_POS: new_op = OP_STURH; break;
6488 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
6489 case OP_LDR_POS: new_op = OP_LDUR; break;
6490 case OP_STR_POS: new_op = OP_STUR; break;
6491 case OP_LDRF_POS: new_op = OP_LDURV; break;
6492 case OP_STRF_POS: new_op = OP_STURV; break;
6493 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
6494 case OP_PRFM_POS: new_op = OP_PRFUM; break;
6495 default: new_op = OP_NIL; break;
6496 }
6497
6498 if (new_op == OP_NIL)
6499 return FALSE;
6500
6501 new_opcode = aarch64_get_opcode (new_op);
6502 gas_assert (new_opcode != NULL);
6503
6504 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
6505 instr->opcode->op, new_opcode->op);
6506
6507 aarch64_replace_opcode (instr, new_opcode);
6508
6509 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
6510 qualifier matching may fail because the out-of-date qualifier will
6511 prevent the operand being updated with a new and correct qualifier. */
6512 idx = aarch64_operand_index (instr->opcode->operands,
6513 AARCH64_OPND_ADDR_SIMM9);
6514 gas_assert (idx == 1);
6515 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
6516
6517 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
6518
6519 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
6520 return FALSE;
6521
6522 return TRUE;
6523 }
6524
6525 /* Called by fix_insn to fix a MOV immediate alias instruction.
6526
6527 Operand for a generic move immediate instruction, which is an alias
6528 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
6529 a 32-bit/64-bit immediate value into general register. An assembler error
6530 shall result if the immediate cannot be created by a single one of these
6531 instructions. If there is a choice, then to ensure reversability an
6532 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
6533
6534 static void
6535 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
6536 {
6537 const aarch64_opcode *opcode;
6538
6539 /* Need to check if the destination is SP/ZR. The check has to be done
6540 before any aarch64_replace_opcode. */
6541 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
6542 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
6543
6544 instr->operands[1].imm.value = value;
6545 instr->operands[1].skip = 0;
6546
6547 if (try_mov_wide_p)
6548 {
6549 /* Try the MOVZ alias. */
6550 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
6551 aarch64_replace_opcode (instr, opcode);
6552 if (aarch64_opcode_encode (instr->opcode, instr,
6553 &instr->value, NULL, NULL))
6554 {
6555 put_aarch64_insn (buf, instr->value);
6556 return;
6557 }
6558 /* Try the MOVK alias. */
6559 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
6560 aarch64_replace_opcode (instr, opcode);
6561 if (aarch64_opcode_encode (instr->opcode, instr,
6562 &instr->value, NULL, NULL))
6563 {
6564 put_aarch64_insn (buf, instr->value);
6565 return;
6566 }
6567 }
6568
6569 if (try_mov_bitmask_p)
6570 {
6571 /* Try the ORR alias. */
6572 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
6573 aarch64_replace_opcode (instr, opcode);
6574 if (aarch64_opcode_encode (instr->opcode, instr,
6575 &instr->value, NULL, NULL))
6576 {
6577 put_aarch64_insn (buf, instr->value);
6578 return;
6579 }
6580 }
6581
6582 as_bad_where (fixP->fx_file, fixP->fx_line,
6583 _("immediate cannot be moved by a single instruction"));
6584 }
6585
6586 /* An instruction operand which is immediate related may have symbol used
6587 in the assembly, e.g.
6588
6589 mov w0, u32
6590 .set u32, 0x00ffff00
6591
6592 At the time when the assembly instruction is parsed, a referenced symbol,
6593 like 'u32' in the above example may not have been seen; a fixS is created
6594 in such a case and is handled here after symbols have been resolved.
6595 Instruction is fixed up with VALUE using the information in *FIXP plus
6596 extra information in FLAGS.
6597
6598 This function is called by md_apply_fix to fix up instructions that need
6599 a fix-up described above but does not involve any linker-time relocation. */
6600
6601 static void
6602 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
6603 {
6604 int idx;
6605 uint32_t insn;
6606 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6607 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
6608 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
6609
6610 if (new_inst)
6611 {
6612 /* Now the instruction is about to be fixed-up, so the operand that
6613 was previously marked as 'ignored' needs to be unmarked in order
6614 to get the encoding done properly. */
6615 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6616 new_inst->operands[idx].skip = 0;
6617 }
6618
6619 gas_assert (opnd != AARCH64_OPND_NIL);
6620
6621 switch (opnd)
6622 {
6623 case AARCH64_OPND_EXCEPTION:
6624 if (unsigned_overflow (value, 16))
6625 as_bad_where (fixP->fx_file, fixP->fx_line,
6626 _("immediate out of range"));
6627 insn = get_aarch64_insn (buf);
6628 insn |= encode_svc_imm (value);
6629 put_aarch64_insn (buf, insn);
6630 break;
6631
6632 case AARCH64_OPND_AIMM:
6633 /* ADD or SUB with immediate.
6634 NOTE this assumes we come here with a add/sub shifted reg encoding
6635 3 322|2222|2 2 2 21111 111111
6636 1 098|7654|3 2 1 09876 543210 98765 43210
6637 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
6638 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
6639 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
6640 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
6641 ->
6642 3 322|2222|2 2 221111111111
6643 1 098|7654|3 2 109876543210 98765 43210
6644 11000000 sf 001|0001|shift imm12 Rn Rd ADD
6645 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
6646 51000000 sf 101|0001|shift imm12 Rn Rd SUB
6647 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
6648 Fields sf Rn Rd are already set. */
6649 insn = get_aarch64_insn (buf);
6650 if (value < 0)
6651 {
6652 /* Add <-> sub. */
6653 insn = reencode_addsub_switch_add_sub (insn);
6654 value = -value;
6655 }
6656
6657 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
6658 && unsigned_overflow (value, 12))
6659 {
6660 /* Try to shift the value by 12 to make it fit. */
6661 if (((value >> 12) << 12) == value
6662 && ! unsigned_overflow (value, 12 + 12))
6663 {
6664 value >>= 12;
6665 insn |= encode_addsub_imm_shift_amount (1);
6666 }
6667 }
6668
6669 if (unsigned_overflow (value, 12))
6670 as_bad_where (fixP->fx_file, fixP->fx_line,
6671 _("immediate out of range"));
6672
6673 insn |= encode_addsub_imm (value);
6674
6675 put_aarch64_insn (buf, insn);
6676 break;
6677
6678 case AARCH64_OPND_SIMD_IMM:
6679 case AARCH64_OPND_SIMD_IMM_SFT:
6680 case AARCH64_OPND_LIMM:
6681 /* Bit mask immediate. */
6682 gas_assert (new_inst != NULL);
6683 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6684 new_inst->operands[idx].imm.value = value;
6685 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6686 &new_inst->value, NULL, NULL))
6687 put_aarch64_insn (buf, new_inst->value);
6688 else
6689 as_bad_where (fixP->fx_file, fixP->fx_line,
6690 _("invalid immediate"));
6691 break;
6692
6693 case AARCH64_OPND_HALF:
6694 /* 16-bit unsigned immediate. */
6695 if (unsigned_overflow (value, 16))
6696 as_bad_where (fixP->fx_file, fixP->fx_line,
6697 _("immediate out of range"));
6698 insn = get_aarch64_insn (buf);
6699 insn |= encode_movw_imm (value & 0xffff);
6700 put_aarch64_insn (buf, insn);
6701 break;
6702
6703 case AARCH64_OPND_IMM_MOV:
6704 /* Operand for a generic move immediate instruction, which is
6705 an alias instruction that generates a single MOVZ, MOVN or ORR
6706 instruction to loads a 32-bit/64-bit immediate value into general
6707 register. An assembler error shall result if the immediate cannot be
6708 created by a single one of these instructions. If there is a choice,
6709 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
6710 and MOVZ or MOVN to ORR. */
6711 gas_assert (new_inst != NULL);
6712 fix_mov_imm_insn (fixP, buf, new_inst, value);
6713 break;
6714
6715 case AARCH64_OPND_ADDR_SIMM7:
6716 case AARCH64_OPND_ADDR_SIMM9:
6717 case AARCH64_OPND_ADDR_SIMM9_2:
6718 case AARCH64_OPND_ADDR_UIMM12:
6719 /* Immediate offset in an address. */
6720 insn = get_aarch64_insn (buf);
6721
6722 gas_assert (new_inst != NULL && new_inst->value == insn);
6723 gas_assert (new_inst->opcode->operands[1] == opnd
6724 || new_inst->opcode->operands[2] == opnd);
6725
6726 /* Get the index of the address operand. */
6727 if (new_inst->opcode->operands[1] == opnd)
6728 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
6729 idx = 1;
6730 else
6731 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
6732 idx = 2;
6733
6734 /* Update the resolved offset value. */
6735 new_inst->operands[idx].addr.offset.imm = value;
6736
6737 /* Encode/fix-up. */
6738 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6739 &new_inst->value, NULL, NULL))
6740 {
6741 put_aarch64_insn (buf, new_inst->value);
6742 break;
6743 }
6744 else if (new_inst->opcode->iclass == ldst_pos
6745 && try_to_encode_as_unscaled_ldst (new_inst))
6746 {
6747 put_aarch64_insn (buf, new_inst->value);
6748 break;
6749 }
6750
6751 as_bad_where (fixP->fx_file, fixP->fx_line,
6752 _("immediate offset out of range"));
6753 break;
6754
6755 default:
6756 gas_assert (0);
6757 as_fatal (_("unhandled operand code %d"), opnd);
6758 }
6759 }
6760
6761 /* Apply a fixup (fixP) to segment data, once it has been determined
6762 by our caller that we have all the info we need to fix it up.
6763
6764 Parameter valP is the pointer to the value of the bits. */
6765
6766 void
6767 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
6768 {
6769 offsetT value = *valP;
6770 uint32_t insn;
6771 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6772 int scale;
6773 unsigned flags = fixP->fx_addnumber;
6774
6775 DEBUG_TRACE ("\n\n");
6776 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
6777 DEBUG_TRACE ("Enter md_apply_fix");
6778
6779 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
6780
6781 /* Note whether this will delete the relocation. */
6782
6783 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
6784 fixP->fx_done = 1;
6785
6786 /* Process the relocations. */
6787 switch (fixP->fx_r_type)
6788 {
6789 case BFD_RELOC_NONE:
6790 /* This will need to go in the object file. */
6791 fixP->fx_done = 0;
6792 break;
6793
6794 case BFD_RELOC_8:
6795 case BFD_RELOC_8_PCREL:
6796 if (fixP->fx_done || !seg->use_rela_p)
6797 md_number_to_chars (buf, value, 1);
6798 break;
6799
6800 case BFD_RELOC_16:
6801 case BFD_RELOC_16_PCREL:
6802 if (fixP->fx_done || !seg->use_rela_p)
6803 md_number_to_chars (buf, value, 2);
6804 break;
6805
6806 case BFD_RELOC_32:
6807 case BFD_RELOC_32_PCREL:
6808 if (fixP->fx_done || !seg->use_rela_p)
6809 md_number_to_chars (buf, value, 4);
6810 break;
6811
6812 case BFD_RELOC_64:
6813 case BFD_RELOC_64_PCREL:
6814 if (fixP->fx_done || !seg->use_rela_p)
6815 md_number_to_chars (buf, value, 8);
6816 break;
6817
6818 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6819 /* We claim that these fixups have been processed here, even if
6820 in fact we generate an error because we do not have a reloc
6821 for them, so tc_gen_reloc() will reject them. */
6822 fixP->fx_done = 1;
6823 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
6824 {
6825 as_bad_where (fixP->fx_file, fixP->fx_line,
6826 _("undefined symbol %s used as an immediate value"),
6827 S_GET_NAME (fixP->fx_addsy));
6828 goto apply_fix_return;
6829 }
6830 fix_insn (fixP, flags, value);
6831 break;
6832
6833 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
6834 if (fixP->fx_done || !seg->use_rela_p)
6835 {
6836 if (value & 3)
6837 as_bad_where (fixP->fx_file, fixP->fx_line,
6838 _("pc-relative load offset not word aligned"));
6839 if (signed_overflow (value, 21))
6840 as_bad_where (fixP->fx_file, fixP->fx_line,
6841 _("pc-relative load offset out of range"));
6842 insn = get_aarch64_insn (buf);
6843 insn |= encode_ld_lit_ofs_19 (value >> 2);
6844 put_aarch64_insn (buf, insn);
6845 }
6846 break;
6847
6848 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6849 if (fixP->fx_done || !seg->use_rela_p)
6850 {
6851 if (signed_overflow (value, 21))
6852 as_bad_where (fixP->fx_file, fixP->fx_line,
6853 _("pc-relative address offset out of range"));
6854 insn = get_aarch64_insn (buf);
6855 insn |= encode_adr_imm (value);
6856 put_aarch64_insn (buf, insn);
6857 }
6858 break;
6859
6860 case BFD_RELOC_AARCH64_BRANCH19:
6861 if (fixP->fx_done || !seg->use_rela_p)
6862 {
6863 if (value & 3)
6864 as_bad_where (fixP->fx_file, fixP->fx_line,
6865 _("conditional branch target not word aligned"));
6866 if (signed_overflow (value, 21))
6867 as_bad_where (fixP->fx_file, fixP->fx_line,
6868 _("conditional branch out of range"));
6869 insn = get_aarch64_insn (buf);
6870 insn |= encode_cond_branch_ofs_19 (value >> 2);
6871 put_aarch64_insn (buf, insn);
6872 }
6873 break;
6874
6875 case BFD_RELOC_AARCH64_TSTBR14:
6876 if (fixP->fx_done || !seg->use_rela_p)
6877 {
6878 if (value & 3)
6879 as_bad_where (fixP->fx_file, fixP->fx_line,
6880 _("conditional branch target not word aligned"));
6881 if (signed_overflow (value, 16))
6882 as_bad_where (fixP->fx_file, fixP->fx_line,
6883 _("conditional branch out of range"));
6884 insn = get_aarch64_insn (buf);
6885 insn |= encode_tst_branch_ofs_14 (value >> 2);
6886 put_aarch64_insn (buf, insn);
6887 }
6888 break;
6889
6890 case BFD_RELOC_AARCH64_CALL26:
6891 case BFD_RELOC_AARCH64_JUMP26:
6892 if (fixP->fx_done || !seg->use_rela_p)
6893 {
6894 if (value & 3)
6895 as_bad_where (fixP->fx_file, fixP->fx_line,
6896 _("branch target not word aligned"));
6897 if (signed_overflow (value, 28))
6898 as_bad_where (fixP->fx_file, fixP->fx_line,
6899 _("branch out of range"));
6900 insn = get_aarch64_insn (buf);
6901 insn |= encode_branch_ofs_26 (value >> 2);
6902 put_aarch64_insn (buf, insn);
6903 }
6904 break;
6905
6906 case BFD_RELOC_AARCH64_MOVW_G0:
6907 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6908 case BFD_RELOC_AARCH64_MOVW_G0_S:
6909 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
6910 scale = 0;
6911 goto movw_common;
6912 case BFD_RELOC_AARCH64_MOVW_G1:
6913 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6914 case BFD_RELOC_AARCH64_MOVW_G1_S:
6915 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6916 scale = 16;
6917 goto movw_common;
6918 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6919 scale = 0;
6920 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6921 /* Should always be exported to object file, see
6922 aarch64_force_relocation(). */
6923 gas_assert (!fixP->fx_done);
6924 gas_assert (seg->use_rela_p);
6925 goto movw_common;
6926 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6927 scale = 16;
6928 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6929 /* Should always be exported to object file, see
6930 aarch64_force_relocation(). */
6931 gas_assert (!fixP->fx_done);
6932 gas_assert (seg->use_rela_p);
6933 goto movw_common;
6934 case BFD_RELOC_AARCH64_MOVW_G2:
6935 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6936 case BFD_RELOC_AARCH64_MOVW_G2_S:
6937 scale = 32;
6938 goto movw_common;
6939 case BFD_RELOC_AARCH64_MOVW_G3:
6940 scale = 48;
6941 movw_common:
6942 if (fixP->fx_done || !seg->use_rela_p)
6943 {
6944 insn = get_aarch64_insn (buf);
6945
6946 if (!fixP->fx_done)
6947 {
6948 /* REL signed addend must fit in 16 bits */
6949 if (signed_overflow (value, 16))
6950 as_bad_where (fixP->fx_file, fixP->fx_line,
6951 _("offset out of range"));
6952 }
6953 else
6954 {
6955 /* Check for overflow and scale. */
6956 switch (fixP->fx_r_type)
6957 {
6958 case BFD_RELOC_AARCH64_MOVW_G0:
6959 case BFD_RELOC_AARCH64_MOVW_G1:
6960 case BFD_RELOC_AARCH64_MOVW_G2:
6961 case BFD_RELOC_AARCH64_MOVW_G3:
6962 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6963 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6964 if (unsigned_overflow (value, scale + 16))
6965 as_bad_where (fixP->fx_file, fixP->fx_line,
6966 _("unsigned value out of range"));
6967 break;
6968 case BFD_RELOC_AARCH64_MOVW_G0_S:
6969 case BFD_RELOC_AARCH64_MOVW_G1_S:
6970 case BFD_RELOC_AARCH64_MOVW_G2_S:
6971 /* NOTE: We can only come here with movz or movn. */
6972 if (signed_overflow (value, scale + 16))
6973 as_bad_where (fixP->fx_file, fixP->fx_line,
6974 _("signed value out of range"));
6975 if (value < 0)
6976 {
6977 /* Force use of MOVN. */
6978 value = ~value;
6979 insn = reencode_movzn_to_movn (insn);
6980 }
6981 else
6982 {
6983 /* Force use of MOVZ. */
6984 insn = reencode_movzn_to_movz (insn);
6985 }
6986 break;
6987 default:
6988 /* Unchecked relocations. */
6989 break;
6990 }
6991 value >>= scale;
6992 }
6993
6994 /* Insert value into MOVN/MOVZ/MOVK instruction. */
6995 insn |= encode_movw_imm (value & 0xffff);
6996
6997 put_aarch64_insn (buf, insn);
6998 }
6999 break;
7000
7001 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
7002 fixP->fx_r_type = (ilp32_p
7003 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
7004 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
7005 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7006 /* Should always be exported to object file, see
7007 aarch64_force_relocation(). */
7008 gas_assert (!fixP->fx_done);
7009 gas_assert (seg->use_rela_p);
7010 break;
7011
7012 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
7013 fixP->fx_r_type = (ilp32_p
7014 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
7015 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC);
7016 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7017 /* Should always be exported to object file, see
7018 aarch64_force_relocation(). */
7019 gas_assert (!fixP->fx_done);
7020 gas_assert (seg->use_rela_p);
7021 break;
7022
7023 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
7024 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7025 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7026 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7027 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
7028 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7029 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7030 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7031 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7032 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7033 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7034 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7035 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7036 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7037 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7038 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7039 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7040 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
7041 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
7042 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7043 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7044 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7045 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7046 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
7047 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
7048 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
7049 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
7050 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
7051 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
7052 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
7053 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
7054 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
7055 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7056 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
7057 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
7058 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
7059 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7060 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
7061 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7062 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
7063 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7064 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7065 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7066 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7067 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7068 /* Should always be exported to object file, see
7069 aarch64_force_relocation(). */
7070 gas_assert (!fixP->fx_done);
7071 gas_assert (seg->use_rela_p);
7072 break;
7073
7074 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
7075 /* Should always be exported to object file, see
7076 aarch64_force_relocation(). */
7077 fixP->fx_r_type = (ilp32_p
7078 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
7079 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
7080 gas_assert (!fixP->fx_done);
7081 gas_assert (seg->use_rela_p);
7082 break;
7083
7084 case BFD_RELOC_AARCH64_ADD_LO12:
7085 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7086 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7087 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7088 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7089 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7090 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7091 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7092 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7093 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7094 case BFD_RELOC_AARCH64_LDST128_LO12:
7095 case BFD_RELOC_AARCH64_LDST16_LO12:
7096 case BFD_RELOC_AARCH64_LDST32_LO12:
7097 case BFD_RELOC_AARCH64_LDST64_LO12:
7098 case BFD_RELOC_AARCH64_LDST8_LO12:
7099 /* Should always be exported to object file, see
7100 aarch64_force_relocation(). */
7101 gas_assert (!fixP->fx_done);
7102 gas_assert (seg->use_rela_p);
7103 break;
7104
7105 case BFD_RELOC_AARCH64_TLSDESC_ADD:
7106 case BFD_RELOC_AARCH64_TLSDESC_CALL:
7107 case BFD_RELOC_AARCH64_TLSDESC_LDR:
7108 break;
7109
7110 case BFD_RELOC_UNUSED:
7111 /* An error will already have been reported. */
7112 break;
7113
7114 default:
7115 as_bad_where (fixP->fx_file, fixP->fx_line,
7116 _("unexpected %s fixup"),
7117 bfd_get_reloc_code_name (fixP->fx_r_type));
7118 break;
7119 }
7120
7121 apply_fix_return:
7122 /* Free the allocated the struct aarch64_inst.
7123 N.B. currently there are very limited number of fix-up types actually use
7124 this field, so the impact on the performance should be minimal . */
7125 if (fixP->tc_fix_data.inst != NULL)
7126 free (fixP->tc_fix_data.inst);
7127
7128 return;
7129 }
7130
7131 /* Translate internal representation of relocation info to BFD target
7132 format. */
7133
7134 arelent *
7135 tc_gen_reloc (asection * section, fixS * fixp)
7136 {
7137 arelent *reloc;
7138 bfd_reloc_code_real_type code;
7139
7140 reloc = xmalloc (sizeof (arelent));
7141
7142 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
7143 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
7144 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
7145
7146 if (fixp->fx_pcrel)
7147 {
7148 if (section->use_rela_p)
7149 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
7150 else
7151 fixp->fx_offset = reloc->address;
7152 }
7153 reloc->addend = fixp->fx_offset;
7154
7155 code = fixp->fx_r_type;
7156 switch (code)
7157 {
7158 case BFD_RELOC_16:
7159 if (fixp->fx_pcrel)
7160 code = BFD_RELOC_16_PCREL;
7161 break;
7162
7163 case BFD_RELOC_32:
7164 if (fixp->fx_pcrel)
7165 code = BFD_RELOC_32_PCREL;
7166 break;
7167
7168 case BFD_RELOC_64:
7169 if (fixp->fx_pcrel)
7170 code = BFD_RELOC_64_PCREL;
7171 break;
7172
7173 default:
7174 break;
7175 }
7176
7177 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
7178 if (reloc->howto == NULL)
7179 {
7180 as_bad_where (fixp->fx_file, fixp->fx_line,
7181 _
7182 ("cannot represent %s relocation in this object file format"),
7183 bfd_get_reloc_code_name (code));
7184 return NULL;
7185 }
7186
7187 return reloc;
7188 }
7189
7190 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
7191
7192 void
7193 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
7194 {
7195 bfd_reloc_code_real_type type;
7196 int pcrel = 0;
7197
7198 /* Pick a reloc.
7199 FIXME: @@ Should look at CPU word size. */
7200 switch (size)
7201 {
7202 case 1:
7203 type = BFD_RELOC_8;
7204 break;
7205 case 2:
7206 type = BFD_RELOC_16;
7207 break;
7208 case 4:
7209 type = BFD_RELOC_32;
7210 break;
7211 case 8:
7212 type = BFD_RELOC_64;
7213 break;
7214 default:
7215 as_bad (_("cannot do %u-byte relocation"), size);
7216 type = BFD_RELOC_UNUSED;
7217 break;
7218 }
7219
7220 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
7221 }
7222
7223 int
7224 aarch64_force_relocation (struct fix *fixp)
7225 {
7226 switch (fixp->fx_r_type)
7227 {
7228 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7229 /* Perform these "immediate" internal relocations
7230 even if the symbol is extern or weak. */
7231 return 0;
7232
7233 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
7234 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
7235 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
7236 /* Pseudo relocs that need to be fixed up according to
7237 ilp32_p. */
7238 return 0;
7239
7240 case BFD_RELOC_AARCH64_ADD_LO12:
7241 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7242 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7243 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7244 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7245 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7246 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7247 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7248 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7249 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7250 case BFD_RELOC_AARCH64_LDST128_LO12:
7251 case BFD_RELOC_AARCH64_LDST16_LO12:
7252 case BFD_RELOC_AARCH64_LDST32_LO12:
7253 case BFD_RELOC_AARCH64_LDST64_LO12:
7254 case BFD_RELOC_AARCH64_LDST8_LO12:
7255 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
7256 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7257 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7258 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7259 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
7260 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7261 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7262 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7263 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7264 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7265 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7266 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7267 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7268 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7269 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7270 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7271 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7272 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7273 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7274 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
7275 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
7276 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7277 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7278 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7279 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7280 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
7281 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
7282 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
7283 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
7284 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
7285 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
7286 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
7287 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
7288 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
7289 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7290 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
7291 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
7292 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
7293 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7294 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
7295 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7296 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
7297 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7298 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7299 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7300 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7301 /* Always leave these relocations for the linker. */
7302 return 1;
7303
7304 default:
7305 break;
7306 }
7307
7308 return generic_force_reloc (fixp);
7309 }
7310
7311 #ifdef OBJ_ELF
7312
7313 const char *
7314 elf64_aarch64_target_format (void)
7315 {
7316 if (strcmp (TARGET_OS, "cloudabi") == 0)
7317 {
7318 /* FIXME: What to do for ilp32_p ? */
7319 return target_big_endian ? "elf64-bigaarch64-cloudabi" : "elf64-littleaarch64-cloudabi";
7320 }
7321 if (target_big_endian)
7322 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
7323 else
7324 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
7325 }
7326
7327 void
7328 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
7329 {
7330 elf_frob_symbol (symp, puntp);
7331 }
7332 #endif
7333
7334 /* MD interface: Finalization. */
7335
7336 /* A good place to do this, although this was probably not intended
7337 for this kind of use. We need to dump the literal pool before
7338 references are made to a null symbol pointer. */
7339
7340 void
7341 aarch64_cleanup (void)
7342 {
7343 literal_pool *pool;
7344
7345 for (pool = list_of_pools; pool; pool = pool->next)
7346 {
7347 /* Put it at the end of the relevant section. */
7348 subseg_set (pool->section, pool->sub_section);
7349 s_ltorg (0);
7350 }
7351 }
7352
7353 #ifdef OBJ_ELF
7354 /* Remove any excess mapping symbols generated for alignment frags in
7355 SEC. We may have created a mapping symbol before a zero byte
7356 alignment; remove it if there's a mapping symbol after the
7357 alignment. */
7358 static void
7359 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
7360 void *dummy ATTRIBUTE_UNUSED)
7361 {
7362 segment_info_type *seginfo = seg_info (sec);
7363 fragS *fragp;
7364
7365 if (seginfo == NULL || seginfo->frchainP == NULL)
7366 return;
7367
7368 for (fragp = seginfo->frchainP->frch_root;
7369 fragp != NULL; fragp = fragp->fr_next)
7370 {
7371 symbolS *sym = fragp->tc_frag_data.last_map;
7372 fragS *next = fragp->fr_next;
7373
7374 /* Variable-sized frags have been converted to fixed size by
7375 this point. But if this was variable-sized to start with,
7376 there will be a fixed-size frag after it. So don't handle
7377 next == NULL. */
7378 if (sym == NULL || next == NULL)
7379 continue;
7380
7381 if (S_GET_VALUE (sym) < next->fr_address)
7382 /* Not at the end of this frag. */
7383 continue;
7384 know (S_GET_VALUE (sym) == next->fr_address);
7385
7386 do
7387 {
7388 if (next->tc_frag_data.first_map != NULL)
7389 {
7390 /* Next frag starts with a mapping symbol. Discard this
7391 one. */
7392 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
7393 break;
7394 }
7395
7396 if (next->fr_next == NULL)
7397 {
7398 /* This mapping symbol is at the end of the section. Discard
7399 it. */
7400 know (next->fr_fix == 0 && next->fr_var == 0);
7401 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
7402 break;
7403 }
7404
7405 /* As long as we have empty frags without any mapping symbols,
7406 keep looking. */
7407 /* If the next frag is non-empty and does not start with a
7408 mapping symbol, then this mapping symbol is required. */
7409 if (next->fr_address != next->fr_next->fr_address)
7410 break;
7411
7412 next = next->fr_next;
7413 }
7414 while (next != NULL);
7415 }
7416 }
7417 #endif
7418
7419 /* Adjust the symbol table. */
7420
7421 void
7422 aarch64_adjust_symtab (void)
7423 {
7424 #ifdef OBJ_ELF
7425 /* Remove any overlapping mapping symbols generated by alignment frags. */
7426 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
7427 /* Now do generic ELF adjustments. */
7428 elf_adjust_symtab ();
7429 #endif
7430 }
7431
7432 static void
7433 checked_hash_insert (struct hash_control *table, const char *key, void *value)
7434 {
7435 const char *hash_err;
7436
7437 hash_err = hash_insert (table, key, value);
7438 if (hash_err)
7439 printf ("Internal Error: Can't hash %s\n", key);
7440 }
7441
7442 static void
7443 fill_instruction_hash_table (void)
7444 {
7445 aarch64_opcode *opcode = aarch64_opcode_table;
7446
7447 while (opcode->name != NULL)
7448 {
7449 templates *templ, *new_templ;
7450 templ = hash_find (aarch64_ops_hsh, opcode->name);
7451
7452 new_templ = (templates *) xmalloc (sizeof (templates));
7453 new_templ->opcode = opcode;
7454 new_templ->next = NULL;
7455
7456 if (!templ)
7457 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
7458 else
7459 {
7460 new_templ->next = templ->next;
7461 templ->next = new_templ;
7462 }
7463 ++opcode;
7464 }
7465 }
7466
7467 static inline void
7468 convert_to_upper (char *dst, const char *src, size_t num)
7469 {
7470 unsigned int i;
7471 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
7472 *dst = TOUPPER (*src);
7473 *dst = '\0';
7474 }
7475
7476 /* Assume STR point to a lower-case string, allocate, convert and return
7477 the corresponding upper-case string. */
7478 static inline const char*
7479 get_upper_str (const char *str)
7480 {
7481 char *ret;
7482 size_t len = strlen (str);
7483 if ((ret = xmalloc (len + 1)) == NULL)
7484 abort ();
7485 convert_to_upper (ret, str, len);
7486 return ret;
7487 }
7488
7489 /* MD interface: Initialization. */
7490
7491 void
7492 md_begin (void)
7493 {
7494 unsigned mach;
7495 unsigned int i;
7496
7497 if ((aarch64_ops_hsh = hash_new ()) == NULL
7498 || (aarch64_cond_hsh = hash_new ()) == NULL
7499 || (aarch64_shift_hsh = hash_new ()) == NULL
7500 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
7501 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
7502 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
7503 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
7504 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
7505 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
7506 || (aarch64_reg_hsh = hash_new ()) == NULL
7507 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
7508 || (aarch64_nzcv_hsh = hash_new ()) == NULL
7509 || (aarch64_pldop_hsh = hash_new ()) == NULL)
7510 as_fatal (_("virtual memory exhausted"));
7511
7512 fill_instruction_hash_table ();
7513
7514 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
7515 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
7516 (void *) (aarch64_sys_regs + i));
7517
7518 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
7519 checked_hash_insert (aarch64_pstatefield_hsh,
7520 aarch64_pstatefields[i].name,
7521 (void *) (aarch64_pstatefields + i));
7522
7523 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
7524 checked_hash_insert (aarch64_sys_regs_ic_hsh,
7525 aarch64_sys_regs_ic[i].name,
7526 (void *) (aarch64_sys_regs_ic + i));
7527
7528 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
7529 checked_hash_insert (aarch64_sys_regs_dc_hsh,
7530 aarch64_sys_regs_dc[i].name,
7531 (void *) (aarch64_sys_regs_dc + i));
7532
7533 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
7534 checked_hash_insert (aarch64_sys_regs_at_hsh,
7535 aarch64_sys_regs_at[i].name,
7536 (void *) (aarch64_sys_regs_at + i));
7537
7538 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
7539 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
7540 aarch64_sys_regs_tlbi[i].name,
7541 (void *) (aarch64_sys_regs_tlbi + i));
7542
7543 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
7544 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
7545 (void *) (reg_names + i));
7546
7547 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
7548 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
7549 (void *) (nzcv_names + i));
7550
7551 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
7552 {
7553 const char *name = aarch64_operand_modifiers[i].name;
7554 checked_hash_insert (aarch64_shift_hsh, name,
7555 (void *) (aarch64_operand_modifiers + i));
7556 /* Also hash the name in the upper case. */
7557 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
7558 (void *) (aarch64_operand_modifiers + i));
7559 }
7560
7561 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
7562 {
7563 unsigned int j;
7564 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
7565 the same condition code. */
7566 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
7567 {
7568 const char *name = aarch64_conds[i].names[j];
7569 if (name == NULL)
7570 break;
7571 checked_hash_insert (aarch64_cond_hsh, name,
7572 (void *) (aarch64_conds + i));
7573 /* Also hash the name in the upper case. */
7574 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
7575 (void *) (aarch64_conds + i));
7576 }
7577 }
7578
7579 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
7580 {
7581 const char *name = aarch64_barrier_options[i].name;
7582 /* Skip xx00 - the unallocated values of option. */
7583 if ((i & 0x3) == 0)
7584 continue;
7585 checked_hash_insert (aarch64_barrier_opt_hsh, name,
7586 (void *) (aarch64_barrier_options + i));
7587 /* Also hash the name in the upper case. */
7588 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
7589 (void *) (aarch64_barrier_options + i));
7590 }
7591
7592 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
7593 {
7594 const char* name = aarch64_prfops[i].name;
7595 /* Skip the unallocated hint encodings. */
7596 if (name == NULL)
7597 continue;
7598 checked_hash_insert (aarch64_pldop_hsh, name,
7599 (void *) (aarch64_prfops + i));
7600 /* Also hash the name in the upper case. */
7601 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
7602 (void *) (aarch64_prfops + i));
7603 }
7604
7605 /* Set the cpu variant based on the command-line options. */
7606 if (!mcpu_cpu_opt)
7607 mcpu_cpu_opt = march_cpu_opt;
7608
7609 if (!mcpu_cpu_opt)
7610 mcpu_cpu_opt = &cpu_default;
7611
7612 cpu_variant = *mcpu_cpu_opt;
7613
7614 /* Record the CPU type. */
7615 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
7616
7617 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
7618 }
7619
7620 /* Command line processing. */
7621
7622 const char *md_shortopts = "m:";
7623
7624 #ifdef AARCH64_BI_ENDIAN
7625 #define OPTION_EB (OPTION_MD_BASE + 0)
7626 #define OPTION_EL (OPTION_MD_BASE + 1)
7627 #else
7628 #if TARGET_BYTES_BIG_ENDIAN
7629 #define OPTION_EB (OPTION_MD_BASE + 0)
7630 #else
7631 #define OPTION_EL (OPTION_MD_BASE + 1)
7632 #endif
7633 #endif
7634
7635 struct option md_longopts[] = {
7636 #ifdef OPTION_EB
7637 {"EB", no_argument, NULL, OPTION_EB},
7638 #endif
7639 #ifdef OPTION_EL
7640 {"EL", no_argument, NULL, OPTION_EL},
7641 #endif
7642 {NULL, no_argument, NULL, 0}
7643 };
7644
7645 size_t md_longopts_size = sizeof (md_longopts);
7646
7647 struct aarch64_option_table
7648 {
7649 char *option; /* Option name to match. */
7650 char *help; /* Help information. */
7651 int *var; /* Variable to change. */
7652 int value; /* What to change it to. */
7653 char *deprecated; /* If non-null, print this message. */
7654 };
7655
7656 static struct aarch64_option_table aarch64_opts[] = {
7657 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
7658 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
7659 NULL},
7660 #ifdef DEBUG_AARCH64
7661 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
7662 #endif /* DEBUG_AARCH64 */
7663 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
7664 NULL},
7665 {"mno-verbose-error", N_("do not output verbose error messages"),
7666 &verbose_error_p, 0, NULL},
7667 {NULL, NULL, NULL, 0, NULL}
7668 };
7669
7670 struct aarch64_cpu_option_table
7671 {
7672 char *name;
7673 const aarch64_feature_set value;
7674 /* The canonical name of the CPU, or NULL to use NAME converted to upper
7675 case. */
7676 const char *canonical_name;
7677 };
7678
7679 /* This list should, at a minimum, contain all the cpu names
7680 recognized by GCC. */
7681 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
7682 {"all", AARCH64_ANY, NULL},
7683 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
7684 AARCH64_FEATURE_CRC), "Cortex-A35"},
7685 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
7686 AARCH64_FEATURE_CRC), "Cortex-A53"},
7687 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
7688 AARCH64_FEATURE_CRC), "Cortex-A57"},
7689 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
7690 AARCH64_FEATURE_CRC), "Cortex-A72"},
7691 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
7692 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
7693 "Samsung Exynos M1"},
7694 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
7695 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
7696 "Qualcomm QDF24XX"},
7697 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
7698 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
7699 "Cavium ThunderX"},
7700 /* The 'xgene-1' name is an older name for 'xgene1', which was used
7701 in earlier releases and is superseded by 'xgene1' in all
7702 tools. */
7703 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7704 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7705 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
7706 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
7707 {"generic", AARCH64_ARCH_V8, NULL},
7708
7709 {NULL, AARCH64_ARCH_NONE, NULL}
7710 };
7711
7712 struct aarch64_arch_option_table
7713 {
7714 char *name;
7715 const aarch64_feature_set value;
7716 };
7717
7718 /* This list should, at a minimum, contain all the architecture names
7719 recognized by GCC. */
7720 static const struct aarch64_arch_option_table aarch64_archs[] = {
7721 {"all", AARCH64_ANY},
7722 {"armv8-a", AARCH64_ARCH_V8},
7723 {"armv8.1-a", AARCH64_ARCH_V8_1},
7724 {"armv8.2-a", AARCH64_ARCH_V8_2},
7725 {NULL, AARCH64_ARCH_NONE}
7726 };
7727
7728 /* ISA extensions. */
7729 struct aarch64_option_cpu_value_table
7730 {
7731 char *name;
7732 const aarch64_feature_set value;
7733 };
7734
7735 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
7736 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0)},
7737 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0)},
7738 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
7739 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0)},
7740 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
7741 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0)},
7742 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0)},
7743 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_SIMD
7744 | AARCH64_FEATURE_RDMA, 0)},
7745 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16
7746 | AARCH64_FEATURE_FP, 0)},
7747 {NULL, AARCH64_ARCH_NONE}
7748 };
7749
7750 struct aarch64_long_option_table
7751 {
7752 char *option; /* Substring to match. */
7753 char *help; /* Help information. */
7754 int (*func) (char *subopt); /* Function to decode sub-option. */
7755 char *deprecated; /* If non-null, print this message. */
7756 };
7757
7758 static int
7759 aarch64_parse_features (char *str, const aarch64_feature_set **opt_p,
7760 bfd_boolean ext_only)
7761 {
7762 /* We insist on extensions being added before being removed. We achieve
7763 this by using the ADDING_VALUE variable to indicate whether we are
7764 adding an extension (1) or removing it (0) and only allowing it to
7765 change in the order -1 -> 1 -> 0. */
7766 int adding_value = -1;
7767 aarch64_feature_set *ext_set = xmalloc (sizeof (aarch64_feature_set));
7768
7769 /* Copy the feature set, so that we can modify it. */
7770 *ext_set = **opt_p;
7771 *opt_p = ext_set;
7772
7773 while (str != NULL && *str != 0)
7774 {
7775 const struct aarch64_option_cpu_value_table *opt;
7776 char *ext = NULL;
7777 int optlen;
7778
7779 if (!ext_only)
7780 {
7781 if (*str != '+')
7782 {
7783 as_bad (_("invalid architectural extension"));
7784 return 0;
7785 }
7786
7787 ext = strchr (++str, '+');
7788 }
7789
7790 if (ext != NULL)
7791 optlen = ext - str;
7792 else
7793 optlen = strlen (str);
7794
7795 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
7796 {
7797 if (adding_value != 0)
7798 adding_value = 0;
7799 optlen -= 2;
7800 str += 2;
7801 }
7802 else if (optlen > 0)
7803 {
7804 if (adding_value == -1)
7805 adding_value = 1;
7806 else if (adding_value != 1)
7807 {
7808 as_bad (_("must specify extensions to add before specifying "
7809 "those to remove"));
7810 return FALSE;
7811 }
7812 }
7813
7814 if (optlen == 0)
7815 {
7816 as_bad (_("missing architectural extension"));
7817 return 0;
7818 }
7819
7820 gas_assert (adding_value != -1);
7821
7822 for (opt = aarch64_features; opt->name != NULL; opt++)
7823 if (strncmp (opt->name, str, optlen) == 0)
7824 {
7825 /* Add or remove the extension. */
7826 if (adding_value)
7827 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
7828 else
7829 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
7830 break;
7831 }
7832
7833 if (opt->name == NULL)
7834 {
7835 as_bad (_("unknown architectural extension `%s'"), str);
7836 return 0;
7837 }
7838
7839 str = ext;
7840 };
7841
7842 return 1;
7843 }
7844
7845 static int
7846 aarch64_parse_cpu (char *str)
7847 {
7848 const struct aarch64_cpu_option_table *opt;
7849 char *ext = strchr (str, '+');
7850 size_t optlen;
7851
7852 if (ext != NULL)
7853 optlen = ext - str;
7854 else
7855 optlen = strlen (str);
7856
7857 if (optlen == 0)
7858 {
7859 as_bad (_("missing cpu name `%s'"), str);
7860 return 0;
7861 }
7862
7863 for (opt = aarch64_cpus; opt->name != NULL; opt++)
7864 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7865 {
7866 mcpu_cpu_opt = &opt->value;
7867 if (ext != NULL)
7868 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
7869
7870 return 1;
7871 }
7872
7873 as_bad (_("unknown cpu `%s'"), str);
7874 return 0;
7875 }
7876
7877 static int
7878 aarch64_parse_arch (char *str)
7879 {
7880 const struct aarch64_arch_option_table *opt;
7881 char *ext = strchr (str, '+');
7882 size_t optlen;
7883
7884 if (ext != NULL)
7885 optlen = ext - str;
7886 else
7887 optlen = strlen (str);
7888
7889 if (optlen == 0)
7890 {
7891 as_bad (_("missing architecture name `%s'"), str);
7892 return 0;
7893 }
7894
7895 for (opt = aarch64_archs; opt->name != NULL; opt++)
7896 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7897 {
7898 march_cpu_opt = &opt->value;
7899 if (ext != NULL)
7900 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
7901
7902 return 1;
7903 }
7904
7905 as_bad (_("unknown architecture `%s'\n"), str);
7906 return 0;
7907 }
7908
7909 /* ABIs. */
7910 struct aarch64_option_abi_value_table
7911 {
7912 char *name;
7913 enum aarch64_abi_type value;
7914 };
7915
7916 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
7917 {"ilp32", AARCH64_ABI_ILP32},
7918 {"lp64", AARCH64_ABI_LP64},
7919 {NULL, 0}
7920 };
7921
7922 static int
7923 aarch64_parse_abi (char *str)
7924 {
7925 const struct aarch64_option_abi_value_table *opt;
7926 size_t optlen = strlen (str);
7927
7928 if (optlen == 0)
7929 {
7930 as_bad (_("missing abi name `%s'"), str);
7931 return 0;
7932 }
7933
7934 for (opt = aarch64_abis; opt->name != NULL; opt++)
7935 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7936 {
7937 aarch64_abi = opt->value;
7938 return 1;
7939 }
7940
7941 as_bad (_("unknown abi `%s'\n"), str);
7942 return 0;
7943 }
7944
7945 static struct aarch64_long_option_table aarch64_long_opts[] = {
7946 #ifdef OBJ_ELF
7947 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
7948 aarch64_parse_abi, NULL},
7949 #endif /* OBJ_ELF */
7950 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
7951 aarch64_parse_cpu, NULL},
7952 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
7953 aarch64_parse_arch, NULL},
7954 {NULL, NULL, 0, NULL}
7955 };
7956
7957 int
7958 md_parse_option (int c, char *arg)
7959 {
7960 struct aarch64_option_table *opt;
7961 struct aarch64_long_option_table *lopt;
7962
7963 switch (c)
7964 {
7965 #ifdef OPTION_EB
7966 case OPTION_EB:
7967 target_big_endian = 1;
7968 break;
7969 #endif
7970
7971 #ifdef OPTION_EL
7972 case OPTION_EL:
7973 target_big_endian = 0;
7974 break;
7975 #endif
7976
7977 case 'a':
7978 /* Listing option. Just ignore these, we don't support additional
7979 ones. */
7980 return 0;
7981
7982 default:
7983 for (opt = aarch64_opts; opt->option != NULL; opt++)
7984 {
7985 if (c == opt->option[0]
7986 && ((arg == NULL && opt->option[1] == 0)
7987 || streq (arg, opt->option + 1)))
7988 {
7989 /* If the option is deprecated, tell the user. */
7990 if (opt->deprecated != NULL)
7991 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
7992 arg ? arg : "", _(opt->deprecated));
7993
7994 if (opt->var != NULL)
7995 *opt->var = opt->value;
7996
7997 return 1;
7998 }
7999 }
8000
8001 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
8002 {
8003 /* These options are expected to have an argument. */
8004 if (c == lopt->option[0]
8005 && arg != NULL
8006 && strncmp (arg, lopt->option + 1,
8007 strlen (lopt->option + 1)) == 0)
8008 {
8009 /* If the option is deprecated, tell the user. */
8010 if (lopt->deprecated != NULL)
8011 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
8012 _(lopt->deprecated));
8013
8014 /* Call the sup-option parser. */
8015 return lopt->func (arg + strlen (lopt->option) - 1);
8016 }
8017 }
8018
8019 return 0;
8020 }
8021
8022 return 1;
8023 }
8024
8025 void
8026 md_show_usage (FILE * fp)
8027 {
8028 struct aarch64_option_table *opt;
8029 struct aarch64_long_option_table *lopt;
8030
8031 fprintf (fp, _(" AArch64-specific assembler options:\n"));
8032
8033 for (opt = aarch64_opts; opt->option != NULL; opt++)
8034 if (opt->help != NULL)
8035 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
8036
8037 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
8038 if (lopt->help != NULL)
8039 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
8040
8041 #ifdef OPTION_EB
8042 fprintf (fp, _("\
8043 -EB assemble code for a big-endian cpu\n"));
8044 #endif
8045
8046 #ifdef OPTION_EL
8047 fprintf (fp, _("\
8048 -EL assemble code for a little-endian cpu\n"));
8049 #endif
8050 }
8051
8052 /* Parse a .cpu directive. */
8053
8054 static void
8055 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
8056 {
8057 const struct aarch64_cpu_option_table *opt;
8058 char saved_char;
8059 char *name;
8060 char *ext;
8061 size_t optlen;
8062
8063 name = input_line_pointer;
8064 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8065 input_line_pointer++;
8066 saved_char = *input_line_pointer;
8067 *input_line_pointer = 0;
8068
8069 ext = strchr (name, '+');
8070
8071 if (ext != NULL)
8072 optlen = ext - name;
8073 else
8074 optlen = strlen (name);
8075
8076 /* Skip the first "all" entry. */
8077 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
8078 if (strlen (opt->name) == optlen
8079 && strncmp (name, opt->name, optlen) == 0)
8080 {
8081 mcpu_cpu_opt = &opt->value;
8082 if (ext != NULL)
8083 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
8084 return;
8085
8086 cpu_variant = *mcpu_cpu_opt;
8087
8088 *input_line_pointer = saved_char;
8089 demand_empty_rest_of_line ();
8090 return;
8091 }
8092 as_bad (_("unknown cpu `%s'"), name);
8093 *input_line_pointer = saved_char;
8094 ignore_rest_of_line ();
8095 }
8096
8097
8098 /* Parse a .arch directive. */
8099
8100 static void
8101 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
8102 {
8103 const struct aarch64_arch_option_table *opt;
8104 char saved_char;
8105 char *name;
8106 char *ext;
8107 size_t optlen;
8108
8109 name = input_line_pointer;
8110 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8111 input_line_pointer++;
8112 saved_char = *input_line_pointer;
8113 *input_line_pointer = 0;
8114
8115 ext = strchr (name, '+');
8116
8117 if (ext != NULL)
8118 optlen = ext - name;
8119 else
8120 optlen = strlen (name);
8121
8122 /* Skip the first "all" entry. */
8123 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
8124 if (strlen (opt->name) == optlen
8125 && strncmp (name, opt->name, optlen) == 0)
8126 {
8127 mcpu_cpu_opt = &opt->value;
8128 if (ext != NULL)
8129 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
8130 return;
8131
8132 cpu_variant = *mcpu_cpu_opt;
8133
8134 *input_line_pointer = saved_char;
8135 demand_empty_rest_of_line ();
8136 return;
8137 }
8138
8139 as_bad (_("unknown architecture `%s'\n"), name);
8140 *input_line_pointer = saved_char;
8141 ignore_rest_of_line ();
8142 }
8143
8144 /* Parse a .arch_extension directive. */
8145
8146 static void
8147 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
8148 {
8149 char saved_char;
8150 char *ext = input_line_pointer;;
8151
8152 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8153 input_line_pointer++;
8154 saved_char = *input_line_pointer;
8155 *input_line_pointer = 0;
8156
8157 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
8158 return;
8159
8160 cpu_variant = *mcpu_cpu_opt;
8161
8162 *input_line_pointer = saved_char;
8163 demand_empty_rest_of_line ();
8164 }
8165
8166 /* Copy symbol information. */
8167
8168 void
8169 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
8170 {
8171 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
8172 }
This page took 0.197576 seconds and 4 git commands to generate.