Adds the new Fields and Operand types for the new instructions in Armv8.4-a.
[deliverable/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2017 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 #ifdef OBJ_ELF
59 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
60 static symbolS *GOT_symbol;
61
62 /* Which ABI to use. */
63 enum aarch64_abi_type
64 {
65 AARCH64_ABI_NONE = 0,
66 AARCH64_ABI_LP64 = 1,
67 AARCH64_ABI_ILP32 = 2
68 };
69
70 #ifndef DEFAULT_ARCH
71 #define DEFAULT_ARCH "aarch64"
72 #endif
73
74 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
75 static const char *default_arch = DEFAULT_ARCH;
76
77 /* AArch64 ABI for the output file. */
78 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
79
80 /* When non-zero, program to a 32-bit model, in which the C data types
81 int, long and all pointer types are 32-bit objects (ILP32); or to a
82 64-bit model, in which the C int type is 32-bits but the C long type
83 and all pointer types are 64-bit objects (LP64). */
84 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
85 #endif
86
87 enum vector_el_type
88 {
89 NT_invtype = -1,
90 NT_b,
91 NT_h,
92 NT_s,
93 NT_d,
94 NT_q,
95 NT_zero,
96 NT_merge
97 };
98
99 /* Bits for DEFINED field in vector_type_el. */
100 #define NTA_HASTYPE 1
101 #define NTA_HASINDEX 2
102 #define NTA_HASVARWIDTH 4
103
104 struct vector_type_el
105 {
106 enum vector_el_type type;
107 unsigned char defined;
108 unsigned width;
109 int64_t index;
110 };
111
112 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
113
114 struct reloc
115 {
116 bfd_reloc_code_real_type type;
117 expressionS exp;
118 int pc_rel;
119 enum aarch64_opnd opnd;
120 uint32_t flags;
121 unsigned need_libopcodes_p : 1;
122 };
123
124 struct aarch64_instruction
125 {
126 /* libopcodes structure for instruction intermediate representation. */
127 aarch64_inst base;
128 /* Record assembly errors found during the parsing. */
129 struct
130 {
131 enum aarch64_operand_error_kind kind;
132 const char *error;
133 } parsing_error;
134 /* The condition that appears in the assembly line. */
135 int cond;
136 /* Relocation information (including the GAS internal fixup). */
137 struct reloc reloc;
138 /* Need to generate an immediate in the literal pool. */
139 unsigned gen_lit_pool : 1;
140 };
141
142 typedef struct aarch64_instruction aarch64_instruction;
143
144 static aarch64_instruction inst;
145
146 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
147 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
148
149 /* Diagnostics inline function utilities.
150
151 These are lightweight utilities which should only be called by parse_operands
152 and other parsers. GAS processes each assembly line by parsing it against
153 instruction template(s), in the case of multiple templates (for the same
154 mnemonic name), those templates are tried one by one until one succeeds or
155 all fail. An assembly line may fail a few templates before being
156 successfully parsed; an error saved here in most cases is not a user error
157 but an error indicating the current template is not the right template.
158 Therefore it is very important that errors can be saved at a low cost during
159 the parsing; we don't want to slow down the whole parsing by recording
160 non-user errors in detail.
161
162 Remember that the objective is to help GAS pick up the most appropriate
163 error message in the case of multiple templates, e.g. FMOV which has 8
164 templates. */
165
166 static inline void
167 clear_error (void)
168 {
169 inst.parsing_error.kind = AARCH64_OPDE_NIL;
170 inst.parsing_error.error = NULL;
171 }
172
173 static inline bfd_boolean
174 error_p (void)
175 {
176 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
177 }
178
179 static inline const char *
180 get_error_message (void)
181 {
182 return inst.parsing_error.error;
183 }
184
185 static inline enum aarch64_operand_error_kind
186 get_error_kind (void)
187 {
188 return inst.parsing_error.kind;
189 }
190
191 static inline void
192 set_error (enum aarch64_operand_error_kind kind, const char *error)
193 {
194 inst.parsing_error.kind = kind;
195 inst.parsing_error.error = error;
196 }
197
198 static inline void
199 set_recoverable_error (const char *error)
200 {
201 set_error (AARCH64_OPDE_RECOVERABLE, error);
202 }
203
204 /* Use the DESC field of the corresponding aarch64_operand entry to compose
205 the error message. */
206 static inline void
207 set_default_error (void)
208 {
209 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
210 }
211
212 static inline void
213 set_syntax_error (const char *error)
214 {
215 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
216 }
217
218 static inline void
219 set_first_syntax_error (const char *error)
220 {
221 if (! error_p ())
222 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
223 }
224
225 static inline void
226 set_fatal_syntax_error (const char *error)
227 {
228 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
229 }
230 \f
231 /* Number of littlenums required to hold an extended precision number. */
232 #define MAX_LITTLENUMS 6
233
234 /* Return value for certain parsers when the parsing fails; those parsers
235 return the information of the parsed result, e.g. register number, on
236 success. */
237 #define PARSE_FAIL -1
238
239 /* This is an invalid condition code that means no conditional field is
240 present. */
241 #define COND_ALWAYS 0x10
242
243 typedef struct
244 {
245 const char *template;
246 unsigned long value;
247 } asm_barrier_opt;
248
249 typedef struct
250 {
251 const char *template;
252 uint32_t value;
253 } asm_nzcv;
254
255 struct reloc_entry
256 {
257 char *name;
258 bfd_reloc_code_real_type reloc;
259 };
260
261 /* Macros to define the register types and masks for the purpose
262 of parsing. */
263
264 #undef AARCH64_REG_TYPES
265 #define AARCH64_REG_TYPES \
266 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
267 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
268 BASIC_REG_TYPE(SP_32) /* wsp */ \
269 BASIC_REG_TYPE(SP_64) /* sp */ \
270 BASIC_REG_TYPE(Z_32) /* wzr */ \
271 BASIC_REG_TYPE(Z_64) /* xzr */ \
272 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
273 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
274 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
275 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
276 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
277 BASIC_REG_TYPE(VN) /* v[0-31] */ \
278 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
279 BASIC_REG_TYPE(PN) /* p[0-15] */ \
280 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
281 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
282 /* Typecheck: same, plus SVE registers. */ \
283 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
284 | REG_TYPE(ZN)) \
285 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
286 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
287 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
288 /* Typecheck: same, plus SVE registers. */ \
289 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
290 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
291 | REG_TYPE(ZN)) \
292 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
293 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
294 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
295 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
296 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
297 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
298 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
299 /* Typecheck: any [BHSDQ]P FP. */ \
300 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
301 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
302 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
303 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
304 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
305 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
306 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
307 /* Typecheck: as above, but also Zn and Pn. This should only be \
308 used for SVE instructions, since Zn and Pn are valid symbols \
309 in other contexts. */ \
310 MULTI_REG_TYPE(R_Z_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
311 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
312 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
313 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
314 | REG_TYPE(ZN) | REG_TYPE(PN)) \
315 /* Any integer register; used for error messages only. */ \
316 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
317 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
318 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
319 /* Pseudo type to mark the end of the enumerator sequence. */ \
320 BASIC_REG_TYPE(MAX)
321
322 #undef BASIC_REG_TYPE
323 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
324 #undef MULTI_REG_TYPE
325 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
326
327 /* Register type enumerators. */
328 typedef enum aarch64_reg_type_
329 {
330 /* A list of REG_TYPE_*. */
331 AARCH64_REG_TYPES
332 } aarch64_reg_type;
333
334 #undef BASIC_REG_TYPE
335 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
336 #undef REG_TYPE
337 #define REG_TYPE(T) (1 << REG_TYPE_##T)
338 #undef MULTI_REG_TYPE
339 #define MULTI_REG_TYPE(T,V) V,
340
341 /* Structure for a hash table entry for a register. */
342 typedef struct
343 {
344 const char *name;
345 unsigned char number;
346 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
347 unsigned char builtin;
348 } reg_entry;
349
350 /* Values indexed by aarch64_reg_type to assist the type checking. */
351 static const unsigned reg_type_masks[] =
352 {
353 AARCH64_REG_TYPES
354 };
355
356 #undef BASIC_REG_TYPE
357 #undef REG_TYPE
358 #undef MULTI_REG_TYPE
359 #undef AARCH64_REG_TYPES
360
361 /* Diagnostics used when we don't get a register of the expected type.
362 Note: this has to synchronized with aarch64_reg_type definitions
363 above. */
364 static const char *
365 get_reg_expected_msg (aarch64_reg_type reg_type)
366 {
367 const char *msg;
368
369 switch (reg_type)
370 {
371 case REG_TYPE_R_32:
372 msg = N_("integer 32-bit register expected");
373 break;
374 case REG_TYPE_R_64:
375 msg = N_("integer 64-bit register expected");
376 break;
377 case REG_TYPE_R_N:
378 msg = N_("integer register expected");
379 break;
380 case REG_TYPE_R64_SP:
381 msg = N_("64-bit integer or SP register expected");
382 break;
383 case REG_TYPE_SVE_BASE:
384 msg = N_("base register expected");
385 break;
386 case REG_TYPE_R_Z:
387 msg = N_("integer or zero register expected");
388 break;
389 case REG_TYPE_SVE_OFFSET:
390 msg = N_("offset register expected");
391 break;
392 case REG_TYPE_R_SP:
393 msg = N_("integer or SP register expected");
394 break;
395 case REG_TYPE_R_Z_SP:
396 msg = N_("integer, zero or SP register expected");
397 break;
398 case REG_TYPE_FP_B:
399 msg = N_("8-bit SIMD scalar register expected");
400 break;
401 case REG_TYPE_FP_H:
402 msg = N_("16-bit SIMD scalar or floating-point half precision "
403 "register expected");
404 break;
405 case REG_TYPE_FP_S:
406 msg = N_("32-bit SIMD scalar or floating-point single precision "
407 "register expected");
408 break;
409 case REG_TYPE_FP_D:
410 msg = N_("64-bit SIMD scalar or floating-point double precision "
411 "register expected");
412 break;
413 case REG_TYPE_FP_Q:
414 msg = N_("128-bit SIMD scalar or floating-point quad precision "
415 "register expected");
416 break;
417 case REG_TYPE_R_Z_BHSDQ_V:
418 case REG_TYPE_R_Z_BHSDQ_VZP:
419 msg = N_("register expected");
420 break;
421 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
422 msg = N_("SIMD scalar or floating-point register expected");
423 break;
424 case REG_TYPE_VN: /* any V reg */
425 msg = N_("vector register expected");
426 break;
427 case REG_TYPE_ZN:
428 msg = N_("SVE vector register expected");
429 break;
430 case REG_TYPE_PN:
431 msg = N_("SVE predicate register expected");
432 break;
433 default:
434 as_fatal (_("invalid register type %d"), reg_type);
435 }
436 return msg;
437 }
438
439 /* Some well known registers that we refer to directly elsewhere. */
440 #define REG_SP 31
441
442 /* Instructions take 4 bytes in the object file. */
443 #define INSN_SIZE 4
444
445 static struct hash_control *aarch64_ops_hsh;
446 static struct hash_control *aarch64_cond_hsh;
447 static struct hash_control *aarch64_shift_hsh;
448 static struct hash_control *aarch64_sys_regs_hsh;
449 static struct hash_control *aarch64_pstatefield_hsh;
450 static struct hash_control *aarch64_sys_regs_ic_hsh;
451 static struct hash_control *aarch64_sys_regs_dc_hsh;
452 static struct hash_control *aarch64_sys_regs_at_hsh;
453 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
454 static struct hash_control *aarch64_reg_hsh;
455 static struct hash_control *aarch64_barrier_opt_hsh;
456 static struct hash_control *aarch64_nzcv_hsh;
457 static struct hash_control *aarch64_pldop_hsh;
458 static struct hash_control *aarch64_hint_opt_hsh;
459
460 /* Stuff needed to resolve the label ambiguity
461 As:
462 ...
463 label: <insn>
464 may differ from:
465 ...
466 label:
467 <insn> */
468
469 static symbolS *last_label_seen;
470
471 /* Literal pool structure. Held on a per-section
472 and per-sub-section basis. */
473
474 #define MAX_LITERAL_POOL_SIZE 1024
475 typedef struct literal_expression
476 {
477 expressionS exp;
478 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
479 LITTLENUM_TYPE * bignum;
480 } literal_expression;
481
482 typedef struct literal_pool
483 {
484 literal_expression literals[MAX_LITERAL_POOL_SIZE];
485 unsigned int next_free_entry;
486 unsigned int id;
487 symbolS *symbol;
488 segT section;
489 subsegT sub_section;
490 int size;
491 struct literal_pool *next;
492 } literal_pool;
493
494 /* Pointer to a linked list of literal pools. */
495 static literal_pool *list_of_pools = NULL;
496 \f
497 /* Pure syntax. */
498
499 /* This array holds the chars that always start a comment. If the
500 pre-processor is disabled, these aren't very useful. */
501 const char comment_chars[] = "";
502
503 /* This array holds the chars that only start a comment at the beginning of
504 a line. If the line seems to have the form '# 123 filename'
505 .line and .file directives will appear in the pre-processed output. */
506 /* Note that input_file.c hand checks for '#' at the beginning of the
507 first line of the input file. This is because the compiler outputs
508 #NO_APP at the beginning of its output. */
509 /* Also note that comments like this one will always work. */
510 const char line_comment_chars[] = "#";
511
512 const char line_separator_chars[] = ";";
513
514 /* Chars that can be used to separate mant
515 from exp in floating point numbers. */
516 const char EXP_CHARS[] = "eE";
517
518 /* Chars that mean this number is a floating point constant. */
519 /* As in 0f12.456 */
520 /* or 0d1.2345e12 */
521
522 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
523
524 /* Prefix character that indicates the start of an immediate value. */
525 #define is_immediate_prefix(C) ((C) == '#')
526
527 /* Separator character handling. */
528
529 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
530
531 static inline bfd_boolean
532 skip_past_char (char **str, char c)
533 {
534 if (**str == c)
535 {
536 (*str)++;
537 return TRUE;
538 }
539 else
540 return FALSE;
541 }
542
543 #define skip_past_comma(str) skip_past_char (str, ',')
544
545 /* Arithmetic expressions (possibly involving symbols). */
546
547 static bfd_boolean in_my_get_expression_p = FALSE;
548
549 /* Third argument to my_get_expression. */
550 #define GE_NO_PREFIX 0
551 #define GE_OPT_PREFIX 1
552
553 /* Return TRUE if the string pointed by *STR is successfully parsed
554 as an valid expression; *EP will be filled with the information of
555 such an expression. Otherwise return FALSE. */
556
557 static bfd_boolean
558 my_get_expression (expressionS * ep, char **str, int prefix_mode,
559 int reject_absent)
560 {
561 char *save_in;
562 segT seg;
563 int prefix_present_p = 0;
564
565 switch (prefix_mode)
566 {
567 case GE_NO_PREFIX:
568 break;
569 case GE_OPT_PREFIX:
570 if (is_immediate_prefix (**str))
571 {
572 (*str)++;
573 prefix_present_p = 1;
574 }
575 break;
576 default:
577 abort ();
578 }
579
580 memset (ep, 0, sizeof (expressionS));
581
582 save_in = input_line_pointer;
583 input_line_pointer = *str;
584 in_my_get_expression_p = TRUE;
585 seg = expression (ep);
586 in_my_get_expression_p = FALSE;
587
588 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
589 {
590 /* We found a bad expression in md_operand(). */
591 *str = input_line_pointer;
592 input_line_pointer = save_in;
593 if (prefix_present_p && ! error_p ())
594 set_fatal_syntax_error (_("bad expression"));
595 else
596 set_first_syntax_error (_("bad expression"));
597 return FALSE;
598 }
599
600 #ifdef OBJ_AOUT
601 if (seg != absolute_section
602 && seg != text_section
603 && seg != data_section
604 && seg != bss_section && seg != undefined_section)
605 {
606 set_syntax_error (_("bad segment"));
607 *str = input_line_pointer;
608 input_line_pointer = save_in;
609 return FALSE;
610 }
611 #else
612 (void) seg;
613 #endif
614
615 *str = input_line_pointer;
616 input_line_pointer = save_in;
617 return TRUE;
618 }
619
620 /* Turn a string in input_line_pointer into a floating point constant
621 of type TYPE, and store the appropriate bytes in *LITP. The number
622 of LITTLENUMS emitted is stored in *SIZEP. An error message is
623 returned, or NULL on OK. */
624
625 const char *
626 md_atof (int type, char *litP, int *sizeP)
627 {
628 return ieee_md_atof (type, litP, sizeP, target_big_endian);
629 }
630
631 /* We handle all bad expressions here, so that we can report the faulty
632 instruction in the error message. */
633 void
634 md_operand (expressionS * exp)
635 {
636 if (in_my_get_expression_p)
637 exp->X_op = O_illegal;
638 }
639
640 /* Immediate values. */
641
642 /* Errors may be set multiple times during parsing or bit encoding
643 (particularly in the Neon bits), but usually the earliest error which is set
644 will be the most meaningful. Avoid overwriting it with later (cascading)
645 errors by calling this function. */
646
647 static void
648 first_error (const char *error)
649 {
650 if (! error_p ())
651 set_syntax_error (error);
652 }
653
654 /* Similar to first_error, but this function accepts formatted error
655 message. */
656 static void
657 first_error_fmt (const char *format, ...)
658 {
659 va_list args;
660 enum
661 { size = 100 };
662 /* N.B. this single buffer will not cause error messages for different
663 instructions to pollute each other; this is because at the end of
664 processing of each assembly line, error message if any will be
665 collected by as_bad. */
666 static char buffer[size];
667
668 if (! error_p ())
669 {
670 int ret ATTRIBUTE_UNUSED;
671 va_start (args, format);
672 ret = vsnprintf (buffer, size, format, args);
673 know (ret <= size - 1 && ret >= 0);
674 va_end (args);
675 set_syntax_error (buffer);
676 }
677 }
678
679 /* Register parsing. */
680
681 /* Generic register parser which is called by other specialized
682 register parsers.
683 CCP points to what should be the beginning of a register name.
684 If it is indeed a valid register name, advance CCP over it and
685 return the reg_entry structure; otherwise return NULL.
686 It does not issue diagnostics. */
687
688 static reg_entry *
689 parse_reg (char **ccp)
690 {
691 char *start = *ccp;
692 char *p;
693 reg_entry *reg;
694
695 #ifdef REGISTER_PREFIX
696 if (*start != REGISTER_PREFIX)
697 return NULL;
698 start++;
699 #endif
700
701 p = start;
702 if (!ISALPHA (*p) || !is_name_beginner (*p))
703 return NULL;
704
705 do
706 p++;
707 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
708
709 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
710
711 if (!reg)
712 return NULL;
713
714 *ccp = p;
715 return reg;
716 }
717
718 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
719 return FALSE. */
720 static bfd_boolean
721 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
722 {
723 return (reg_type_masks[type] & (1 << reg->type)) != 0;
724 }
725
726 /* Try to parse a base or offset register. Allow SVE base and offset
727 registers if REG_TYPE includes SVE registers. Return the register
728 entry on success, setting *QUALIFIER to the register qualifier.
729 Return null otherwise.
730
731 Note that this function does not issue any diagnostics. */
732
733 static const reg_entry *
734 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
735 aarch64_opnd_qualifier_t *qualifier)
736 {
737 char *str = *ccp;
738 const reg_entry *reg = parse_reg (&str);
739
740 if (reg == NULL)
741 return NULL;
742
743 switch (reg->type)
744 {
745 case REG_TYPE_R_32:
746 case REG_TYPE_SP_32:
747 case REG_TYPE_Z_32:
748 *qualifier = AARCH64_OPND_QLF_W;
749 break;
750
751 case REG_TYPE_R_64:
752 case REG_TYPE_SP_64:
753 case REG_TYPE_Z_64:
754 *qualifier = AARCH64_OPND_QLF_X;
755 break;
756
757 case REG_TYPE_ZN:
758 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
759 || str[0] != '.')
760 return NULL;
761 switch (TOLOWER (str[1]))
762 {
763 case 's':
764 *qualifier = AARCH64_OPND_QLF_S_S;
765 break;
766 case 'd':
767 *qualifier = AARCH64_OPND_QLF_S_D;
768 break;
769 default:
770 return NULL;
771 }
772 str += 2;
773 break;
774
775 default:
776 return NULL;
777 }
778
779 *ccp = str;
780
781 return reg;
782 }
783
784 /* Try to parse a base or offset register. Return the register entry
785 on success, setting *QUALIFIER to the register qualifier. Return null
786 otherwise.
787
788 Note that this function does not issue any diagnostics. */
789
790 static const reg_entry *
791 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
792 {
793 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
794 }
795
796 /* Parse the qualifier of a vector register or vector element of type
797 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
798 succeeds; otherwise return FALSE.
799
800 Accept only one occurrence of:
801 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
802 b h s d q */
803 static bfd_boolean
804 parse_vector_type_for_operand (aarch64_reg_type reg_type,
805 struct vector_type_el *parsed_type, char **str)
806 {
807 char *ptr = *str;
808 unsigned width;
809 unsigned element_size;
810 enum vector_el_type type;
811
812 /* skip '.' */
813 gas_assert (*ptr == '.');
814 ptr++;
815
816 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
817 {
818 width = 0;
819 goto elt_size;
820 }
821 width = strtoul (ptr, &ptr, 10);
822 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
823 {
824 first_error_fmt (_("bad size %d in vector width specifier"), width);
825 return FALSE;
826 }
827
828 elt_size:
829 switch (TOLOWER (*ptr))
830 {
831 case 'b':
832 type = NT_b;
833 element_size = 8;
834 break;
835 case 'h':
836 type = NT_h;
837 element_size = 16;
838 break;
839 case 's':
840 type = NT_s;
841 element_size = 32;
842 break;
843 case 'd':
844 type = NT_d;
845 element_size = 64;
846 break;
847 case 'q':
848 if (reg_type == REG_TYPE_ZN || width == 1)
849 {
850 type = NT_q;
851 element_size = 128;
852 break;
853 }
854 /* fall through. */
855 default:
856 if (*ptr != '\0')
857 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
858 else
859 first_error (_("missing element size"));
860 return FALSE;
861 }
862 if (width != 0 && width * element_size != 64
863 && width * element_size != 128
864 && !(width == 2 && element_size == 16)
865 && !(width == 4 && element_size == 8))
866 {
867 first_error_fmt (_
868 ("invalid element size %d and vector size combination %c"),
869 width, *ptr);
870 return FALSE;
871 }
872 ptr++;
873
874 parsed_type->type = type;
875 parsed_type->width = width;
876
877 *str = ptr;
878
879 return TRUE;
880 }
881
882 /* *STR contains an SVE zero/merge predication suffix. Parse it into
883 *PARSED_TYPE and point *STR at the end of the suffix. */
884
885 static bfd_boolean
886 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
887 {
888 char *ptr = *str;
889
890 /* Skip '/'. */
891 gas_assert (*ptr == '/');
892 ptr++;
893 switch (TOLOWER (*ptr))
894 {
895 case 'z':
896 parsed_type->type = NT_zero;
897 break;
898 case 'm':
899 parsed_type->type = NT_merge;
900 break;
901 default:
902 if (*ptr != '\0' && *ptr != ',')
903 first_error_fmt (_("unexpected character `%c' in predication type"),
904 *ptr);
905 else
906 first_error (_("missing predication type"));
907 return FALSE;
908 }
909 parsed_type->width = 0;
910 *str = ptr + 1;
911 return TRUE;
912 }
913
914 /* Parse a register of the type TYPE.
915
916 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
917 name or the parsed register is not of TYPE.
918
919 Otherwise return the register number, and optionally fill in the actual
920 type of the register in *RTYPE when multiple alternatives were given, and
921 return the register shape and element index information in *TYPEINFO.
922
923 IN_REG_LIST should be set with TRUE if the caller is parsing a register
924 list. */
925
926 static int
927 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
928 struct vector_type_el *typeinfo, bfd_boolean in_reg_list)
929 {
930 char *str = *ccp;
931 const reg_entry *reg = parse_reg (&str);
932 struct vector_type_el atype;
933 struct vector_type_el parsetype;
934 bfd_boolean is_typed_vecreg = FALSE;
935
936 atype.defined = 0;
937 atype.type = NT_invtype;
938 atype.width = -1;
939 atype.index = 0;
940
941 if (reg == NULL)
942 {
943 if (typeinfo)
944 *typeinfo = atype;
945 set_default_error ();
946 return PARSE_FAIL;
947 }
948
949 if (! aarch64_check_reg_type (reg, type))
950 {
951 DEBUG_TRACE ("reg type check failed");
952 set_default_error ();
953 return PARSE_FAIL;
954 }
955 type = reg->type;
956
957 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
958 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
959 {
960 if (*str == '.')
961 {
962 if (!parse_vector_type_for_operand (type, &parsetype, &str))
963 return PARSE_FAIL;
964 }
965 else
966 {
967 if (!parse_predication_for_operand (&parsetype, &str))
968 return PARSE_FAIL;
969 }
970
971 /* Register if of the form Vn.[bhsdq]. */
972 is_typed_vecreg = TRUE;
973
974 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
975 {
976 /* The width is always variable; we don't allow an integer width
977 to be specified. */
978 gas_assert (parsetype.width == 0);
979 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
980 }
981 else if (parsetype.width == 0)
982 /* Expect index. In the new scheme we cannot have
983 Vn.[bhsdq] represent a scalar. Therefore any
984 Vn.[bhsdq] should have an index following it.
985 Except in reglists of course. */
986 atype.defined |= NTA_HASINDEX;
987 else
988 atype.defined |= NTA_HASTYPE;
989
990 atype.type = parsetype.type;
991 atype.width = parsetype.width;
992 }
993
994 if (skip_past_char (&str, '['))
995 {
996 expressionS exp;
997
998 /* Reject Sn[index] syntax. */
999 if (!is_typed_vecreg)
1000 {
1001 first_error (_("this type of register can't be indexed"));
1002 return PARSE_FAIL;
1003 }
1004
1005 if (in_reg_list)
1006 {
1007 first_error (_("index not allowed inside register list"));
1008 return PARSE_FAIL;
1009 }
1010
1011 atype.defined |= NTA_HASINDEX;
1012
1013 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1014
1015 if (exp.X_op != O_constant)
1016 {
1017 first_error (_("constant expression required"));
1018 return PARSE_FAIL;
1019 }
1020
1021 if (! skip_past_char (&str, ']'))
1022 return PARSE_FAIL;
1023
1024 atype.index = exp.X_add_number;
1025 }
1026 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1027 {
1028 /* Indexed vector register expected. */
1029 first_error (_("indexed vector register expected"));
1030 return PARSE_FAIL;
1031 }
1032
1033 /* A vector reg Vn should be typed or indexed. */
1034 if (type == REG_TYPE_VN && atype.defined == 0)
1035 {
1036 first_error (_("invalid use of vector register"));
1037 }
1038
1039 if (typeinfo)
1040 *typeinfo = atype;
1041
1042 if (rtype)
1043 *rtype = type;
1044
1045 *ccp = str;
1046
1047 return reg->number;
1048 }
1049
1050 /* Parse register.
1051
1052 Return the register number on success; return PARSE_FAIL otherwise.
1053
1054 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1055 the register (e.g. NEON double or quad reg when either has been requested).
1056
1057 If this is a NEON vector register with additional type information, fill
1058 in the struct pointed to by VECTYPE (if non-NULL).
1059
1060 This parser does not handle register list. */
1061
1062 static int
1063 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1064 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1065 {
1066 struct vector_type_el atype;
1067 char *str = *ccp;
1068 int reg = parse_typed_reg (&str, type, rtype, &atype,
1069 /*in_reg_list= */ FALSE);
1070
1071 if (reg == PARSE_FAIL)
1072 return PARSE_FAIL;
1073
1074 if (vectype)
1075 *vectype = atype;
1076
1077 *ccp = str;
1078
1079 return reg;
1080 }
1081
1082 static inline bfd_boolean
1083 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1084 {
1085 return
1086 e1.type == e2.type
1087 && e1.defined == e2.defined
1088 && e1.width == e2.width && e1.index == e2.index;
1089 }
1090
1091 /* This function parses a list of vector registers of type TYPE.
1092 On success, it returns the parsed register list information in the
1093 following encoded format:
1094
1095 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1096 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1097
1098 The information of the register shape and/or index is returned in
1099 *VECTYPE.
1100
1101 It returns PARSE_FAIL if the register list is invalid.
1102
1103 The list contains one to four registers.
1104 Each register can be one of:
1105 <Vt>.<T>[<index>]
1106 <Vt>.<T>
1107 All <T> should be identical.
1108 All <index> should be identical.
1109 There are restrictions on <Vt> numbers which are checked later
1110 (by reg_list_valid_p). */
1111
1112 static int
1113 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1114 struct vector_type_el *vectype)
1115 {
1116 char *str = *ccp;
1117 int nb_regs;
1118 struct vector_type_el typeinfo, typeinfo_first;
1119 int val, val_range;
1120 int in_range;
1121 int ret_val;
1122 int i;
1123 bfd_boolean error = FALSE;
1124 bfd_boolean expect_index = FALSE;
1125
1126 if (*str != '{')
1127 {
1128 set_syntax_error (_("expecting {"));
1129 return PARSE_FAIL;
1130 }
1131 str++;
1132
1133 nb_regs = 0;
1134 typeinfo_first.defined = 0;
1135 typeinfo_first.type = NT_invtype;
1136 typeinfo_first.width = -1;
1137 typeinfo_first.index = 0;
1138 ret_val = 0;
1139 val = -1;
1140 val_range = -1;
1141 in_range = 0;
1142 do
1143 {
1144 if (in_range)
1145 {
1146 str++; /* skip over '-' */
1147 val_range = val;
1148 }
1149 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1150 /*in_reg_list= */ TRUE);
1151 if (val == PARSE_FAIL)
1152 {
1153 set_first_syntax_error (_("invalid vector register in list"));
1154 error = TRUE;
1155 continue;
1156 }
1157 /* reject [bhsd]n */
1158 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1159 {
1160 set_first_syntax_error (_("invalid scalar register in list"));
1161 error = TRUE;
1162 continue;
1163 }
1164
1165 if (typeinfo.defined & NTA_HASINDEX)
1166 expect_index = TRUE;
1167
1168 if (in_range)
1169 {
1170 if (val < val_range)
1171 {
1172 set_first_syntax_error
1173 (_("invalid range in vector register list"));
1174 error = TRUE;
1175 }
1176 val_range++;
1177 }
1178 else
1179 {
1180 val_range = val;
1181 if (nb_regs == 0)
1182 typeinfo_first = typeinfo;
1183 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1184 {
1185 set_first_syntax_error
1186 (_("type mismatch in vector register list"));
1187 error = TRUE;
1188 }
1189 }
1190 if (! error)
1191 for (i = val_range; i <= val; i++)
1192 {
1193 ret_val |= i << (5 * nb_regs);
1194 nb_regs++;
1195 }
1196 in_range = 0;
1197 }
1198 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1199
1200 skip_whitespace (str);
1201 if (*str != '}')
1202 {
1203 set_first_syntax_error (_("end of vector register list not found"));
1204 error = TRUE;
1205 }
1206 str++;
1207
1208 skip_whitespace (str);
1209
1210 if (expect_index)
1211 {
1212 if (skip_past_char (&str, '['))
1213 {
1214 expressionS exp;
1215
1216 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1217 if (exp.X_op != O_constant)
1218 {
1219 set_first_syntax_error (_("constant expression required."));
1220 error = TRUE;
1221 }
1222 if (! skip_past_char (&str, ']'))
1223 error = TRUE;
1224 else
1225 typeinfo_first.index = exp.X_add_number;
1226 }
1227 else
1228 {
1229 set_first_syntax_error (_("expected index"));
1230 error = TRUE;
1231 }
1232 }
1233
1234 if (nb_regs > 4)
1235 {
1236 set_first_syntax_error (_("too many registers in vector register list"));
1237 error = TRUE;
1238 }
1239 else if (nb_regs == 0)
1240 {
1241 set_first_syntax_error (_("empty vector register list"));
1242 error = TRUE;
1243 }
1244
1245 *ccp = str;
1246 if (! error)
1247 *vectype = typeinfo_first;
1248
1249 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1250 }
1251
1252 /* Directives: register aliases. */
1253
1254 static reg_entry *
1255 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1256 {
1257 reg_entry *new;
1258 const char *name;
1259
1260 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1261 {
1262 if (new->builtin)
1263 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1264 str);
1265
1266 /* Only warn about a redefinition if it's not defined as the
1267 same register. */
1268 else if (new->number != number || new->type != type)
1269 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1270
1271 return NULL;
1272 }
1273
1274 name = xstrdup (str);
1275 new = XNEW (reg_entry);
1276
1277 new->name = name;
1278 new->number = number;
1279 new->type = type;
1280 new->builtin = FALSE;
1281
1282 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1283 abort ();
1284
1285 return new;
1286 }
1287
1288 /* Look for the .req directive. This is of the form:
1289
1290 new_register_name .req existing_register_name
1291
1292 If we find one, or if it looks sufficiently like one that we want to
1293 handle any error here, return TRUE. Otherwise return FALSE. */
1294
1295 static bfd_boolean
1296 create_register_alias (char *newname, char *p)
1297 {
1298 const reg_entry *old;
1299 char *oldname, *nbuf;
1300 size_t nlen;
1301
1302 /* The input scrubber ensures that whitespace after the mnemonic is
1303 collapsed to single spaces. */
1304 oldname = p;
1305 if (strncmp (oldname, " .req ", 6) != 0)
1306 return FALSE;
1307
1308 oldname += 6;
1309 if (*oldname == '\0')
1310 return FALSE;
1311
1312 old = hash_find (aarch64_reg_hsh, oldname);
1313 if (!old)
1314 {
1315 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1316 return TRUE;
1317 }
1318
1319 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1320 the desired alias name, and p points to its end. If not, then
1321 the desired alias name is in the global original_case_string. */
1322 #ifdef TC_CASE_SENSITIVE
1323 nlen = p - newname;
1324 #else
1325 newname = original_case_string;
1326 nlen = strlen (newname);
1327 #endif
1328
1329 nbuf = xmemdup0 (newname, nlen);
1330
1331 /* Create aliases under the new name as stated; an all-lowercase
1332 version of the new name; and an all-uppercase version of the new
1333 name. */
1334 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1335 {
1336 for (p = nbuf; *p; p++)
1337 *p = TOUPPER (*p);
1338
1339 if (strncmp (nbuf, newname, nlen))
1340 {
1341 /* If this attempt to create an additional alias fails, do not bother
1342 trying to create the all-lower case alias. We will fail and issue
1343 a second, duplicate error message. This situation arises when the
1344 programmer does something like:
1345 foo .req r0
1346 Foo .req r1
1347 The second .req creates the "Foo" alias but then fails to create
1348 the artificial FOO alias because it has already been created by the
1349 first .req. */
1350 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1351 {
1352 free (nbuf);
1353 return TRUE;
1354 }
1355 }
1356
1357 for (p = nbuf; *p; p++)
1358 *p = TOLOWER (*p);
1359
1360 if (strncmp (nbuf, newname, nlen))
1361 insert_reg_alias (nbuf, old->number, old->type);
1362 }
1363
1364 free (nbuf);
1365 return TRUE;
1366 }
1367
1368 /* Should never be called, as .req goes between the alias and the
1369 register name, not at the beginning of the line. */
1370 static void
1371 s_req (int a ATTRIBUTE_UNUSED)
1372 {
1373 as_bad (_("invalid syntax for .req directive"));
1374 }
1375
1376 /* The .unreq directive deletes an alias which was previously defined
1377 by .req. For example:
1378
1379 my_alias .req r11
1380 .unreq my_alias */
1381
1382 static void
1383 s_unreq (int a ATTRIBUTE_UNUSED)
1384 {
1385 char *name;
1386 char saved_char;
1387
1388 name = input_line_pointer;
1389
1390 while (*input_line_pointer != 0
1391 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1392 ++input_line_pointer;
1393
1394 saved_char = *input_line_pointer;
1395 *input_line_pointer = 0;
1396
1397 if (!*name)
1398 as_bad (_("invalid syntax for .unreq directive"));
1399 else
1400 {
1401 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1402
1403 if (!reg)
1404 as_bad (_("unknown register alias '%s'"), name);
1405 else if (reg->builtin)
1406 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1407 name);
1408 else
1409 {
1410 char *p;
1411 char *nbuf;
1412
1413 hash_delete (aarch64_reg_hsh, name, FALSE);
1414 free ((char *) reg->name);
1415 free (reg);
1416
1417 /* Also locate the all upper case and all lower case versions.
1418 Do not complain if we cannot find one or the other as it
1419 was probably deleted above. */
1420
1421 nbuf = strdup (name);
1422 for (p = nbuf; *p; p++)
1423 *p = TOUPPER (*p);
1424 reg = hash_find (aarch64_reg_hsh, nbuf);
1425 if (reg)
1426 {
1427 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1428 free ((char *) reg->name);
1429 free (reg);
1430 }
1431
1432 for (p = nbuf; *p; p++)
1433 *p = TOLOWER (*p);
1434 reg = hash_find (aarch64_reg_hsh, nbuf);
1435 if (reg)
1436 {
1437 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1438 free ((char *) reg->name);
1439 free (reg);
1440 }
1441
1442 free (nbuf);
1443 }
1444 }
1445
1446 *input_line_pointer = saved_char;
1447 demand_empty_rest_of_line ();
1448 }
1449
1450 /* Directives: Instruction set selection. */
1451
1452 #ifdef OBJ_ELF
1453 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1454 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1455 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1456 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1457
1458 /* Create a new mapping symbol for the transition to STATE. */
1459
1460 static void
1461 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1462 {
1463 symbolS *symbolP;
1464 const char *symname;
1465 int type;
1466
1467 switch (state)
1468 {
1469 case MAP_DATA:
1470 symname = "$d";
1471 type = BSF_NO_FLAGS;
1472 break;
1473 case MAP_INSN:
1474 symname = "$x";
1475 type = BSF_NO_FLAGS;
1476 break;
1477 default:
1478 abort ();
1479 }
1480
1481 symbolP = symbol_new (symname, now_seg, value, frag);
1482 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1483
1484 /* Save the mapping symbols for future reference. Also check that
1485 we do not place two mapping symbols at the same offset within a
1486 frag. We'll handle overlap between frags in
1487 check_mapping_symbols.
1488
1489 If .fill or other data filling directive generates zero sized data,
1490 the mapping symbol for the following code will have the same value
1491 as the one generated for the data filling directive. In this case,
1492 we replace the old symbol with the new one at the same address. */
1493 if (value == 0)
1494 {
1495 if (frag->tc_frag_data.first_map != NULL)
1496 {
1497 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1498 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1499 &symbol_lastP);
1500 }
1501 frag->tc_frag_data.first_map = symbolP;
1502 }
1503 if (frag->tc_frag_data.last_map != NULL)
1504 {
1505 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1506 S_GET_VALUE (symbolP));
1507 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1508 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1509 &symbol_lastP);
1510 }
1511 frag->tc_frag_data.last_map = symbolP;
1512 }
1513
1514 /* We must sometimes convert a region marked as code to data during
1515 code alignment, if an odd number of bytes have to be padded. The
1516 code mapping symbol is pushed to an aligned address. */
1517
1518 static void
1519 insert_data_mapping_symbol (enum mstate state,
1520 valueT value, fragS * frag, offsetT bytes)
1521 {
1522 /* If there was already a mapping symbol, remove it. */
1523 if (frag->tc_frag_data.last_map != NULL
1524 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1525 frag->fr_address + value)
1526 {
1527 symbolS *symp = frag->tc_frag_data.last_map;
1528
1529 if (value == 0)
1530 {
1531 know (frag->tc_frag_data.first_map == symp);
1532 frag->tc_frag_data.first_map = NULL;
1533 }
1534 frag->tc_frag_data.last_map = NULL;
1535 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1536 }
1537
1538 make_mapping_symbol (MAP_DATA, value, frag);
1539 make_mapping_symbol (state, value + bytes, frag);
1540 }
1541
1542 static void mapping_state_2 (enum mstate state, int max_chars);
1543
1544 /* Set the mapping state to STATE. Only call this when about to
1545 emit some STATE bytes to the file. */
1546
1547 void
1548 mapping_state (enum mstate state)
1549 {
1550 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1551
1552 if (state == MAP_INSN)
1553 /* AArch64 instructions require 4-byte alignment. When emitting
1554 instructions into any section, record the appropriate section
1555 alignment. */
1556 record_alignment (now_seg, 2);
1557
1558 if (mapstate == state)
1559 /* The mapping symbol has already been emitted.
1560 There is nothing else to do. */
1561 return;
1562
1563 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1564 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1565 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1566 evaluated later in the next else. */
1567 return;
1568 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1569 {
1570 /* Only add the symbol if the offset is > 0:
1571 if we're at the first frag, check it's size > 0;
1572 if we're not at the first frag, then for sure
1573 the offset is > 0. */
1574 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1575 const int add_symbol = (frag_now != frag_first)
1576 || (frag_now_fix () > 0);
1577
1578 if (add_symbol)
1579 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1580 }
1581 #undef TRANSITION
1582
1583 mapping_state_2 (state, 0);
1584 }
1585
1586 /* Same as mapping_state, but MAX_CHARS bytes have already been
1587 allocated. Put the mapping symbol that far back. */
1588
1589 static void
1590 mapping_state_2 (enum mstate state, int max_chars)
1591 {
1592 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1593
1594 if (!SEG_NORMAL (now_seg))
1595 return;
1596
1597 if (mapstate == state)
1598 /* The mapping symbol has already been emitted.
1599 There is nothing else to do. */
1600 return;
1601
1602 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1603 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1604 }
1605 #else
1606 #define mapping_state(x) /* nothing */
1607 #define mapping_state_2(x, y) /* nothing */
1608 #endif
1609
1610 /* Directives: sectioning and alignment. */
1611
1612 static void
1613 s_bss (int ignore ATTRIBUTE_UNUSED)
1614 {
1615 /* We don't support putting frags in the BSS segment, we fake it by
1616 marking in_bss, then looking at s_skip for clues. */
1617 subseg_set (bss_section, 0);
1618 demand_empty_rest_of_line ();
1619 mapping_state (MAP_DATA);
1620 }
1621
1622 static void
1623 s_even (int ignore ATTRIBUTE_UNUSED)
1624 {
1625 /* Never make frag if expect extra pass. */
1626 if (!need_pass_2)
1627 frag_align (1, 0, 0);
1628
1629 record_alignment (now_seg, 1);
1630
1631 demand_empty_rest_of_line ();
1632 }
1633
1634 /* Directives: Literal pools. */
1635
1636 static literal_pool *
1637 find_literal_pool (int size)
1638 {
1639 literal_pool *pool;
1640
1641 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1642 {
1643 if (pool->section == now_seg
1644 && pool->sub_section == now_subseg && pool->size == size)
1645 break;
1646 }
1647
1648 return pool;
1649 }
1650
1651 static literal_pool *
1652 find_or_make_literal_pool (int size)
1653 {
1654 /* Next literal pool ID number. */
1655 static unsigned int latest_pool_num = 1;
1656 literal_pool *pool;
1657
1658 pool = find_literal_pool (size);
1659
1660 if (pool == NULL)
1661 {
1662 /* Create a new pool. */
1663 pool = XNEW (literal_pool);
1664 if (!pool)
1665 return NULL;
1666
1667 /* Currently we always put the literal pool in the current text
1668 section. If we were generating "small" model code where we
1669 knew that all code and initialised data was within 1MB then
1670 we could output literals to mergeable, read-only data
1671 sections. */
1672
1673 pool->next_free_entry = 0;
1674 pool->section = now_seg;
1675 pool->sub_section = now_subseg;
1676 pool->size = size;
1677 pool->next = list_of_pools;
1678 pool->symbol = NULL;
1679
1680 /* Add it to the list. */
1681 list_of_pools = pool;
1682 }
1683
1684 /* New pools, and emptied pools, will have a NULL symbol. */
1685 if (pool->symbol == NULL)
1686 {
1687 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1688 (valueT) 0, &zero_address_frag);
1689 pool->id = latest_pool_num++;
1690 }
1691
1692 /* Done. */
1693 return pool;
1694 }
1695
1696 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1697 Return TRUE on success, otherwise return FALSE. */
1698 static bfd_boolean
1699 add_to_lit_pool (expressionS *exp, int size)
1700 {
1701 literal_pool *pool;
1702 unsigned int entry;
1703
1704 pool = find_or_make_literal_pool (size);
1705
1706 /* Check if this literal value is already in the pool. */
1707 for (entry = 0; entry < pool->next_free_entry; entry++)
1708 {
1709 expressionS * litexp = & pool->literals[entry].exp;
1710
1711 if ((litexp->X_op == exp->X_op)
1712 && (exp->X_op == O_constant)
1713 && (litexp->X_add_number == exp->X_add_number)
1714 && (litexp->X_unsigned == exp->X_unsigned))
1715 break;
1716
1717 if ((litexp->X_op == exp->X_op)
1718 && (exp->X_op == O_symbol)
1719 && (litexp->X_add_number == exp->X_add_number)
1720 && (litexp->X_add_symbol == exp->X_add_symbol)
1721 && (litexp->X_op_symbol == exp->X_op_symbol))
1722 break;
1723 }
1724
1725 /* Do we need to create a new entry? */
1726 if (entry == pool->next_free_entry)
1727 {
1728 if (entry >= MAX_LITERAL_POOL_SIZE)
1729 {
1730 set_syntax_error (_("literal pool overflow"));
1731 return FALSE;
1732 }
1733
1734 pool->literals[entry].exp = *exp;
1735 pool->next_free_entry += 1;
1736 if (exp->X_op == O_big)
1737 {
1738 /* PR 16688: Bignums are held in a single global array. We must
1739 copy and preserve that value now, before it is overwritten. */
1740 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1741 exp->X_add_number);
1742 memcpy (pool->literals[entry].bignum, generic_bignum,
1743 CHARS_PER_LITTLENUM * exp->X_add_number);
1744 }
1745 else
1746 pool->literals[entry].bignum = NULL;
1747 }
1748
1749 exp->X_op = O_symbol;
1750 exp->X_add_number = ((int) entry) * size;
1751 exp->X_add_symbol = pool->symbol;
1752
1753 return TRUE;
1754 }
1755
1756 /* Can't use symbol_new here, so have to create a symbol and then at
1757 a later date assign it a value. That's what these functions do. */
1758
1759 static void
1760 symbol_locate (symbolS * symbolP,
1761 const char *name,/* It is copied, the caller can modify. */
1762 segT segment, /* Segment identifier (SEG_<something>). */
1763 valueT valu, /* Symbol value. */
1764 fragS * frag) /* Associated fragment. */
1765 {
1766 size_t name_length;
1767 char *preserved_copy_of_name;
1768
1769 name_length = strlen (name) + 1; /* +1 for \0. */
1770 obstack_grow (&notes, name, name_length);
1771 preserved_copy_of_name = obstack_finish (&notes);
1772
1773 #ifdef tc_canonicalize_symbol_name
1774 preserved_copy_of_name =
1775 tc_canonicalize_symbol_name (preserved_copy_of_name);
1776 #endif
1777
1778 S_SET_NAME (symbolP, preserved_copy_of_name);
1779
1780 S_SET_SEGMENT (symbolP, segment);
1781 S_SET_VALUE (symbolP, valu);
1782 symbol_clear_list_pointers (symbolP);
1783
1784 symbol_set_frag (symbolP, frag);
1785
1786 /* Link to end of symbol chain. */
1787 {
1788 extern int symbol_table_frozen;
1789
1790 if (symbol_table_frozen)
1791 abort ();
1792 }
1793
1794 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1795
1796 obj_symbol_new_hook (symbolP);
1797
1798 #ifdef tc_symbol_new_hook
1799 tc_symbol_new_hook (symbolP);
1800 #endif
1801
1802 #ifdef DEBUG_SYMS
1803 verify_symbol_chain (symbol_rootP, symbol_lastP);
1804 #endif /* DEBUG_SYMS */
1805 }
1806
1807
1808 static void
1809 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1810 {
1811 unsigned int entry;
1812 literal_pool *pool;
1813 char sym_name[20];
1814 int align;
1815
1816 for (align = 2; align <= 4; align++)
1817 {
1818 int size = 1 << align;
1819
1820 pool = find_literal_pool (size);
1821 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1822 continue;
1823
1824 /* Align pool as you have word accesses.
1825 Only make a frag if we have to. */
1826 if (!need_pass_2)
1827 frag_align (align, 0, 0);
1828
1829 mapping_state (MAP_DATA);
1830
1831 record_alignment (now_seg, align);
1832
1833 sprintf (sym_name, "$$lit_\002%x", pool->id);
1834
1835 symbol_locate (pool->symbol, sym_name, now_seg,
1836 (valueT) frag_now_fix (), frag_now);
1837 symbol_table_insert (pool->symbol);
1838
1839 for (entry = 0; entry < pool->next_free_entry; entry++)
1840 {
1841 expressionS * exp = & pool->literals[entry].exp;
1842
1843 if (exp->X_op == O_big)
1844 {
1845 /* PR 16688: Restore the global bignum value. */
1846 gas_assert (pool->literals[entry].bignum != NULL);
1847 memcpy (generic_bignum, pool->literals[entry].bignum,
1848 CHARS_PER_LITTLENUM * exp->X_add_number);
1849 }
1850
1851 /* First output the expression in the instruction to the pool. */
1852 emit_expr (exp, size); /* .word|.xword */
1853
1854 if (exp->X_op == O_big)
1855 {
1856 free (pool->literals[entry].bignum);
1857 pool->literals[entry].bignum = NULL;
1858 }
1859 }
1860
1861 /* Mark the pool as empty. */
1862 pool->next_free_entry = 0;
1863 pool->symbol = NULL;
1864 }
1865 }
1866
1867 #ifdef OBJ_ELF
1868 /* Forward declarations for functions below, in the MD interface
1869 section. */
1870 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1871 static struct reloc_table_entry * find_reloc_table_entry (char **);
1872
1873 /* Directives: Data. */
1874 /* N.B. the support for relocation suffix in this directive needs to be
1875 implemented properly. */
1876
1877 static void
1878 s_aarch64_elf_cons (int nbytes)
1879 {
1880 expressionS exp;
1881
1882 #ifdef md_flush_pending_output
1883 md_flush_pending_output ();
1884 #endif
1885
1886 if (is_it_end_of_statement ())
1887 {
1888 demand_empty_rest_of_line ();
1889 return;
1890 }
1891
1892 #ifdef md_cons_align
1893 md_cons_align (nbytes);
1894 #endif
1895
1896 mapping_state (MAP_DATA);
1897 do
1898 {
1899 struct reloc_table_entry *reloc;
1900
1901 expression (&exp);
1902
1903 if (exp.X_op != O_symbol)
1904 emit_expr (&exp, (unsigned int) nbytes);
1905 else
1906 {
1907 skip_past_char (&input_line_pointer, '#');
1908 if (skip_past_char (&input_line_pointer, ':'))
1909 {
1910 reloc = find_reloc_table_entry (&input_line_pointer);
1911 if (reloc == NULL)
1912 as_bad (_("unrecognized relocation suffix"));
1913 else
1914 as_bad (_("unimplemented relocation suffix"));
1915 ignore_rest_of_line ();
1916 return;
1917 }
1918 else
1919 emit_expr (&exp, (unsigned int) nbytes);
1920 }
1921 }
1922 while (*input_line_pointer++ == ',');
1923
1924 /* Put terminator back into stream. */
1925 input_line_pointer--;
1926 demand_empty_rest_of_line ();
1927 }
1928
1929 #endif /* OBJ_ELF */
1930
1931 /* Output a 32-bit word, but mark as an instruction. */
1932
1933 static void
1934 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1935 {
1936 expressionS exp;
1937
1938 #ifdef md_flush_pending_output
1939 md_flush_pending_output ();
1940 #endif
1941
1942 if (is_it_end_of_statement ())
1943 {
1944 demand_empty_rest_of_line ();
1945 return;
1946 }
1947
1948 /* Sections are assumed to start aligned. In executable section, there is no
1949 MAP_DATA symbol pending. So we only align the address during
1950 MAP_DATA --> MAP_INSN transition.
1951 For other sections, this is not guaranteed. */
1952 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1953 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
1954 frag_align_code (2, 0);
1955
1956 #ifdef OBJ_ELF
1957 mapping_state (MAP_INSN);
1958 #endif
1959
1960 do
1961 {
1962 expression (&exp);
1963 if (exp.X_op != O_constant)
1964 {
1965 as_bad (_("constant expression required"));
1966 ignore_rest_of_line ();
1967 return;
1968 }
1969
1970 if (target_big_endian)
1971 {
1972 unsigned int val = exp.X_add_number;
1973 exp.X_add_number = SWAP_32 (val);
1974 }
1975 emit_expr (&exp, 4);
1976 }
1977 while (*input_line_pointer++ == ',');
1978
1979 /* Put terminator back into stream. */
1980 input_line_pointer--;
1981 demand_empty_rest_of_line ();
1982 }
1983
1984 #ifdef OBJ_ELF
1985 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
1986
1987 static void
1988 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
1989 {
1990 expressionS exp;
1991
1992 expression (&exp);
1993 frag_grow (4);
1994 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1995 BFD_RELOC_AARCH64_TLSDESC_ADD);
1996
1997 demand_empty_rest_of_line ();
1998 }
1999
2000 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2001
2002 static void
2003 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2004 {
2005 expressionS exp;
2006
2007 /* Since we're just labelling the code, there's no need to define a
2008 mapping symbol. */
2009 expression (&exp);
2010 /* Make sure there is enough room in this frag for the following
2011 blr. This trick only works if the blr follows immediately after
2012 the .tlsdesc directive. */
2013 frag_grow (4);
2014 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2015 BFD_RELOC_AARCH64_TLSDESC_CALL);
2016
2017 demand_empty_rest_of_line ();
2018 }
2019
2020 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2021
2022 static void
2023 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2024 {
2025 expressionS exp;
2026
2027 expression (&exp);
2028 frag_grow (4);
2029 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2030 BFD_RELOC_AARCH64_TLSDESC_LDR);
2031
2032 demand_empty_rest_of_line ();
2033 }
2034 #endif /* OBJ_ELF */
2035
2036 static void s_aarch64_arch (int);
2037 static void s_aarch64_cpu (int);
2038 static void s_aarch64_arch_extension (int);
2039
2040 /* This table describes all the machine specific pseudo-ops the assembler
2041 has to support. The fields are:
2042 pseudo-op name without dot
2043 function to call to execute this pseudo-op
2044 Integer arg to pass to the function. */
2045
2046 const pseudo_typeS md_pseudo_table[] = {
2047 /* Never called because '.req' does not start a line. */
2048 {"req", s_req, 0},
2049 {"unreq", s_unreq, 0},
2050 {"bss", s_bss, 0},
2051 {"even", s_even, 0},
2052 {"ltorg", s_ltorg, 0},
2053 {"pool", s_ltorg, 0},
2054 {"cpu", s_aarch64_cpu, 0},
2055 {"arch", s_aarch64_arch, 0},
2056 {"arch_extension", s_aarch64_arch_extension, 0},
2057 {"inst", s_aarch64_inst, 0},
2058 #ifdef OBJ_ELF
2059 {"tlsdescadd", s_tlsdescadd, 0},
2060 {"tlsdesccall", s_tlsdesccall, 0},
2061 {"tlsdescldr", s_tlsdescldr, 0},
2062 {"word", s_aarch64_elf_cons, 4},
2063 {"long", s_aarch64_elf_cons, 4},
2064 {"xword", s_aarch64_elf_cons, 8},
2065 {"dword", s_aarch64_elf_cons, 8},
2066 #endif
2067 {0, 0, 0}
2068 };
2069 \f
2070
2071 /* Check whether STR points to a register name followed by a comma or the
2072 end of line; REG_TYPE indicates which register types are checked
2073 against. Return TRUE if STR is such a register name; otherwise return
2074 FALSE. The function does not intend to produce any diagnostics, but since
2075 the register parser aarch64_reg_parse, which is called by this function,
2076 does produce diagnostics, we call clear_error to clear any diagnostics
2077 that may be generated by aarch64_reg_parse.
2078 Also, the function returns FALSE directly if there is any user error
2079 present at the function entry. This prevents the existing diagnostics
2080 state from being spoiled.
2081 The function currently serves parse_constant_immediate and
2082 parse_big_immediate only. */
2083 static bfd_boolean
2084 reg_name_p (char *str, aarch64_reg_type reg_type)
2085 {
2086 int reg;
2087
2088 /* Prevent the diagnostics state from being spoiled. */
2089 if (error_p ())
2090 return FALSE;
2091
2092 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2093
2094 /* Clear the parsing error that may be set by the reg parser. */
2095 clear_error ();
2096
2097 if (reg == PARSE_FAIL)
2098 return FALSE;
2099
2100 skip_whitespace (str);
2101 if (*str == ',' || is_end_of_line[(unsigned int) *str])
2102 return TRUE;
2103
2104 return FALSE;
2105 }
2106
2107 /* Parser functions used exclusively in instruction operands. */
2108
2109 /* Parse an immediate expression which may not be constant.
2110
2111 To prevent the expression parser from pushing a register name
2112 into the symbol table as an undefined symbol, firstly a check is
2113 done to find out whether STR is a register of type REG_TYPE followed
2114 by a comma or the end of line. Return FALSE if STR is such a string. */
2115
2116 static bfd_boolean
2117 parse_immediate_expression (char **str, expressionS *exp,
2118 aarch64_reg_type reg_type)
2119 {
2120 if (reg_name_p (*str, reg_type))
2121 {
2122 set_recoverable_error (_("immediate operand required"));
2123 return FALSE;
2124 }
2125
2126 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2127
2128 if (exp->X_op == O_absent)
2129 {
2130 set_fatal_syntax_error (_("missing immediate expression"));
2131 return FALSE;
2132 }
2133
2134 return TRUE;
2135 }
2136
2137 /* Constant immediate-value read function for use in insn parsing.
2138 STR points to the beginning of the immediate (with the optional
2139 leading #); *VAL receives the value. REG_TYPE says which register
2140 names should be treated as registers rather than as symbolic immediates.
2141
2142 Return TRUE on success; otherwise return FALSE. */
2143
2144 static bfd_boolean
2145 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2146 {
2147 expressionS exp;
2148
2149 if (! parse_immediate_expression (str, &exp, reg_type))
2150 return FALSE;
2151
2152 if (exp.X_op != O_constant)
2153 {
2154 set_syntax_error (_("constant expression required"));
2155 return FALSE;
2156 }
2157
2158 *val = exp.X_add_number;
2159 return TRUE;
2160 }
2161
2162 static uint32_t
2163 encode_imm_float_bits (uint32_t imm)
2164 {
2165 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2166 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2167 }
2168
2169 /* Return TRUE if the single-precision floating-point value encoded in IMM
2170 can be expressed in the AArch64 8-bit signed floating-point format with
2171 3-bit exponent and normalized 4 bits of precision; in other words, the
2172 floating-point value must be expressable as
2173 (+/-) n / 16 * power (2, r)
2174 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2175
2176 static bfd_boolean
2177 aarch64_imm_float_p (uint32_t imm)
2178 {
2179 /* If a single-precision floating-point value has the following bit
2180 pattern, it can be expressed in the AArch64 8-bit floating-point
2181 format:
2182
2183 3 32222222 2221111111111
2184 1 09876543 21098765432109876543210
2185 n Eeeeeexx xxxx0000000000000000000
2186
2187 where n, e and each x are either 0 or 1 independently, with
2188 E == ~ e. */
2189
2190 uint32_t pattern;
2191
2192 /* Prepare the pattern for 'Eeeeee'. */
2193 if (((imm >> 30) & 0x1) == 0)
2194 pattern = 0x3e000000;
2195 else
2196 pattern = 0x40000000;
2197
2198 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2199 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2200 }
2201
2202 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2203 as an IEEE float without any loss of precision. Store the value in
2204 *FPWORD if so. */
2205
2206 static bfd_boolean
2207 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2208 {
2209 /* If a double-precision floating-point value has the following bit
2210 pattern, it can be expressed in a float:
2211
2212 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2213 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2214 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2215
2216 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2217 if Eeee_eeee != 1111_1111
2218
2219 where n, e, s and S are either 0 or 1 independently and where ~ is the
2220 inverse of E. */
2221
2222 uint32_t pattern;
2223 uint32_t high32 = imm >> 32;
2224 uint32_t low32 = imm;
2225
2226 /* Lower 29 bits need to be 0s. */
2227 if ((imm & 0x1fffffff) != 0)
2228 return FALSE;
2229
2230 /* Prepare the pattern for 'Eeeeeeeee'. */
2231 if (((high32 >> 30) & 0x1) == 0)
2232 pattern = 0x38000000;
2233 else
2234 pattern = 0x40000000;
2235
2236 /* Check E~~~. */
2237 if ((high32 & 0x78000000) != pattern)
2238 return FALSE;
2239
2240 /* Check Eeee_eeee != 1111_1111. */
2241 if ((high32 & 0x7ff00000) == 0x47f00000)
2242 return FALSE;
2243
2244 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2245 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2246 | (low32 >> 29)); /* 3 S bits. */
2247 return TRUE;
2248 }
2249
2250 /* Return true if we should treat OPERAND as a double-precision
2251 floating-point operand rather than a single-precision one. */
2252 static bfd_boolean
2253 double_precision_operand_p (const aarch64_opnd_info *operand)
2254 {
2255 /* Check for unsuffixed SVE registers, which are allowed
2256 for LDR and STR but not in instructions that require an
2257 immediate. We get better error messages if we arbitrarily
2258 pick one size, parse the immediate normally, and then
2259 report the match failure in the normal way. */
2260 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2261 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2262 }
2263
2264 /* Parse a floating-point immediate. Return TRUE on success and return the
2265 value in *IMMED in the format of IEEE754 single-precision encoding.
2266 *CCP points to the start of the string; DP_P is TRUE when the immediate
2267 is expected to be in double-precision (N.B. this only matters when
2268 hexadecimal representation is involved). REG_TYPE says which register
2269 names should be treated as registers rather than as symbolic immediates.
2270
2271 This routine accepts any IEEE float; it is up to the callers to reject
2272 invalid ones. */
2273
2274 static bfd_boolean
2275 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p,
2276 aarch64_reg_type reg_type)
2277 {
2278 char *str = *ccp;
2279 char *fpnum;
2280 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2281 int found_fpchar = 0;
2282 int64_t val = 0;
2283 unsigned fpword = 0;
2284 bfd_boolean hex_p = FALSE;
2285
2286 skip_past_char (&str, '#');
2287
2288 fpnum = str;
2289 skip_whitespace (fpnum);
2290
2291 if (strncmp (fpnum, "0x", 2) == 0)
2292 {
2293 /* Support the hexadecimal representation of the IEEE754 encoding.
2294 Double-precision is expected when DP_P is TRUE, otherwise the
2295 representation should be in single-precision. */
2296 if (! parse_constant_immediate (&str, &val, reg_type))
2297 goto invalid_fp;
2298
2299 if (dp_p)
2300 {
2301 if (!can_convert_double_to_float (val, &fpword))
2302 goto invalid_fp;
2303 }
2304 else if ((uint64_t) val > 0xffffffff)
2305 goto invalid_fp;
2306 else
2307 fpword = val;
2308
2309 hex_p = TRUE;
2310 }
2311 else
2312 {
2313 if (reg_name_p (str, reg_type))
2314 {
2315 set_recoverable_error (_("immediate operand required"));
2316 return FALSE;
2317 }
2318
2319 /* We must not accidentally parse an integer as a floating-point number.
2320 Make sure that the value we parse is not an integer by checking for
2321 special characters '.' or 'e'. */
2322 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
2323 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
2324 {
2325 found_fpchar = 1;
2326 break;
2327 }
2328
2329 if (!found_fpchar)
2330 return FALSE;
2331 }
2332
2333 if (! hex_p)
2334 {
2335 int i;
2336
2337 if ((str = atof_ieee (str, 's', words)) == NULL)
2338 goto invalid_fp;
2339
2340 /* Our FP word must be 32 bits (single-precision FP). */
2341 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2342 {
2343 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2344 fpword |= words[i];
2345 }
2346 }
2347
2348 *immed = fpword;
2349 *ccp = str;
2350 return TRUE;
2351
2352 invalid_fp:
2353 set_fatal_syntax_error (_("invalid floating-point constant"));
2354 return FALSE;
2355 }
2356
2357 /* Less-generic immediate-value read function with the possibility of loading
2358 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2359 instructions.
2360
2361 To prevent the expression parser from pushing a register name into the
2362 symbol table as an undefined symbol, a check is firstly done to find
2363 out whether STR is a register of type REG_TYPE followed by a comma or
2364 the end of line. Return FALSE if STR is such a register. */
2365
2366 static bfd_boolean
2367 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2368 {
2369 char *ptr = *str;
2370
2371 if (reg_name_p (ptr, reg_type))
2372 {
2373 set_syntax_error (_("immediate operand required"));
2374 return FALSE;
2375 }
2376
2377 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2378
2379 if (inst.reloc.exp.X_op == O_constant)
2380 *imm = inst.reloc.exp.X_add_number;
2381
2382 *str = ptr;
2383
2384 return TRUE;
2385 }
2386
2387 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2388 if NEED_LIBOPCODES is non-zero, the fixup will need
2389 assistance from the libopcodes. */
2390
2391 static inline void
2392 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2393 const aarch64_opnd_info *operand,
2394 int need_libopcodes_p)
2395 {
2396 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2397 reloc->opnd = operand->type;
2398 if (need_libopcodes_p)
2399 reloc->need_libopcodes_p = 1;
2400 };
2401
2402 /* Return TRUE if the instruction needs to be fixed up later internally by
2403 the GAS; otherwise return FALSE. */
2404
2405 static inline bfd_boolean
2406 aarch64_gas_internal_fixup_p (void)
2407 {
2408 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2409 }
2410
2411 /* Assign the immediate value to the relevant field in *OPERAND if
2412 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2413 needs an internal fixup in a later stage.
2414 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2415 IMM.VALUE that may get assigned with the constant. */
2416 static inline void
2417 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2418 aarch64_opnd_info *operand,
2419 int addr_off_p,
2420 int need_libopcodes_p,
2421 int skip_p)
2422 {
2423 if (reloc->exp.X_op == O_constant)
2424 {
2425 if (addr_off_p)
2426 operand->addr.offset.imm = reloc->exp.X_add_number;
2427 else
2428 operand->imm.value = reloc->exp.X_add_number;
2429 reloc->type = BFD_RELOC_UNUSED;
2430 }
2431 else
2432 {
2433 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2434 /* Tell libopcodes to ignore this operand or not. This is helpful
2435 when one of the operands needs to be fixed up later but we need
2436 libopcodes to check the other operands. */
2437 operand->skip = skip_p;
2438 }
2439 }
2440
2441 /* Relocation modifiers. Each entry in the table contains the textual
2442 name for the relocation which may be placed before a symbol used as
2443 a load/store offset, or add immediate. It must be surrounded by a
2444 leading and trailing colon, for example:
2445
2446 ldr x0, [x1, #:rello:varsym]
2447 add x0, x1, #:rello:varsym */
2448
2449 struct reloc_table_entry
2450 {
2451 const char *name;
2452 int pc_rel;
2453 bfd_reloc_code_real_type adr_type;
2454 bfd_reloc_code_real_type adrp_type;
2455 bfd_reloc_code_real_type movw_type;
2456 bfd_reloc_code_real_type add_type;
2457 bfd_reloc_code_real_type ldst_type;
2458 bfd_reloc_code_real_type ld_literal_type;
2459 };
2460
2461 static struct reloc_table_entry reloc_table[] = {
2462 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2463 {"lo12", 0,
2464 0, /* adr_type */
2465 0,
2466 0,
2467 BFD_RELOC_AARCH64_ADD_LO12,
2468 BFD_RELOC_AARCH64_LDST_LO12,
2469 0},
2470
2471 /* Higher 21 bits of pc-relative page offset: ADRP */
2472 {"pg_hi21", 1,
2473 0, /* adr_type */
2474 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2475 0,
2476 0,
2477 0,
2478 0},
2479
2480 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2481 {"pg_hi21_nc", 1,
2482 0, /* adr_type */
2483 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2484 0,
2485 0,
2486 0,
2487 0},
2488
2489 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2490 {"abs_g0", 0,
2491 0, /* adr_type */
2492 0,
2493 BFD_RELOC_AARCH64_MOVW_G0,
2494 0,
2495 0,
2496 0},
2497
2498 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2499 {"abs_g0_s", 0,
2500 0, /* adr_type */
2501 0,
2502 BFD_RELOC_AARCH64_MOVW_G0_S,
2503 0,
2504 0,
2505 0},
2506
2507 /* Less significant bits 0-15 of address/value: MOVK, no check */
2508 {"abs_g0_nc", 0,
2509 0, /* adr_type */
2510 0,
2511 BFD_RELOC_AARCH64_MOVW_G0_NC,
2512 0,
2513 0,
2514 0},
2515
2516 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2517 {"abs_g1", 0,
2518 0, /* adr_type */
2519 0,
2520 BFD_RELOC_AARCH64_MOVW_G1,
2521 0,
2522 0,
2523 0},
2524
2525 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2526 {"abs_g1_s", 0,
2527 0, /* adr_type */
2528 0,
2529 BFD_RELOC_AARCH64_MOVW_G1_S,
2530 0,
2531 0,
2532 0},
2533
2534 /* Less significant bits 16-31 of address/value: MOVK, no check */
2535 {"abs_g1_nc", 0,
2536 0, /* adr_type */
2537 0,
2538 BFD_RELOC_AARCH64_MOVW_G1_NC,
2539 0,
2540 0,
2541 0},
2542
2543 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2544 {"abs_g2", 0,
2545 0, /* adr_type */
2546 0,
2547 BFD_RELOC_AARCH64_MOVW_G2,
2548 0,
2549 0,
2550 0},
2551
2552 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2553 {"abs_g2_s", 0,
2554 0, /* adr_type */
2555 0,
2556 BFD_RELOC_AARCH64_MOVW_G2_S,
2557 0,
2558 0,
2559 0},
2560
2561 /* Less significant bits 32-47 of address/value: MOVK, no check */
2562 {"abs_g2_nc", 0,
2563 0, /* adr_type */
2564 0,
2565 BFD_RELOC_AARCH64_MOVW_G2_NC,
2566 0,
2567 0,
2568 0},
2569
2570 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2571 {"abs_g3", 0,
2572 0, /* adr_type */
2573 0,
2574 BFD_RELOC_AARCH64_MOVW_G3,
2575 0,
2576 0,
2577 0},
2578
2579 /* Get to the page containing GOT entry for a symbol. */
2580 {"got", 1,
2581 0, /* adr_type */
2582 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2583 0,
2584 0,
2585 0,
2586 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2587
2588 /* 12 bit offset into the page containing GOT entry for that symbol. */
2589 {"got_lo12", 0,
2590 0, /* adr_type */
2591 0,
2592 0,
2593 0,
2594 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2595 0},
2596
2597 /* 0-15 bits of address/value: MOVk, no check. */
2598 {"gotoff_g0_nc", 0,
2599 0, /* adr_type */
2600 0,
2601 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2602 0,
2603 0,
2604 0},
2605
2606 /* Most significant bits 16-31 of address/value: MOVZ. */
2607 {"gotoff_g1", 0,
2608 0, /* adr_type */
2609 0,
2610 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2611 0,
2612 0,
2613 0},
2614
2615 /* 15 bit offset into the page containing GOT entry for that symbol. */
2616 {"gotoff_lo15", 0,
2617 0, /* adr_type */
2618 0,
2619 0,
2620 0,
2621 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2622 0},
2623
2624 /* Get to the page containing GOT TLS entry for a symbol */
2625 {"gottprel_g0_nc", 0,
2626 0, /* adr_type */
2627 0,
2628 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2629 0,
2630 0,
2631 0},
2632
2633 /* Get to the page containing GOT TLS entry for a symbol */
2634 {"gottprel_g1", 0,
2635 0, /* adr_type */
2636 0,
2637 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2638 0,
2639 0,
2640 0},
2641
2642 /* Get to the page containing GOT TLS entry for a symbol */
2643 {"tlsgd", 0,
2644 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2645 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2646 0,
2647 0,
2648 0,
2649 0},
2650
2651 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2652 {"tlsgd_lo12", 0,
2653 0, /* adr_type */
2654 0,
2655 0,
2656 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2657 0,
2658 0},
2659
2660 /* Lower 16 bits address/value: MOVk. */
2661 {"tlsgd_g0_nc", 0,
2662 0, /* adr_type */
2663 0,
2664 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2665 0,
2666 0,
2667 0},
2668
2669 /* Most significant bits 16-31 of address/value: MOVZ. */
2670 {"tlsgd_g1", 0,
2671 0, /* adr_type */
2672 0,
2673 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2674 0,
2675 0,
2676 0},
2677
2678 /* Get to the page containing GOT TLS entry for a symbol */
2679 {"tlsdesc", 0,
2680 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2681 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2682 0,
2683 0,
2684 0,
2685 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2686
2687 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2688 {"tlsdesc_lo12", 0,
2689 0, /* adr_type */
2690 0,
2691 0,
2692 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2693 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2694 0},
2695
2696 /* Get to the page containing GOT TLS entry for a symbol.
2697 The same as GD, we allocate two consecutive GOT slots
2698 for module index and module offset, the only difference
2699 with GD is the module offset should be initialized to
2700 zero without any outstanding runtime relocation. */
2701 {"tlsldm", 0,
2702 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2703 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2704 0,
2705 0,
2706 0,
2707 0},
2708
2709 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2710 {"tlsldm_lo12_nc", 0,
2711 0, /* adr_type */
2712 0,
2713 0,
2714 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2715 0,
2716 0},
2717
2718 /* 12 bit offset into the module TLS base address. */
2719 {"dtprel_lo12", 0,
2720 0, /* adr_type */
2721 0,
2722 0,
2723 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2724 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2725 0},
2726
2727 /* Same as dtprel_lo12, no overflow check. */
2728 {"dtprel_lo12_nc", 0,
2729 0, /* adr_type */
2730 0,
2731 0,
2732 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2733 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2734 0},
2735
2736 /* bits[23:12] of offset to the module TLS base address. */
2737 {"dtprel_hi12", 0,
2738 0, /* adr_type */
2739 0,
2740 0,
2741 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2742 0,
2743 0},
2744
2745 /* bits[15:0] of offset to the module TLS base address. */
2746 {"dtprel_g0", 0,
2747 0, /* adr_type */
2748 0,
2749 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2750 0,
2751 0,
2752 0},
2753
2754 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2755 {"dtprel_g0_nc", 0,
2756 0, /* adr_type */
2757 0,
2758 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2759 0,
2760 0,
2761 0},
2762
2763 /* bits[31:16] of offset to the module TLS base address. */
2764 {"dtprel_g1", 0,
2765 0, /* adr_type */
2766 0,
2767 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2768 0,
2769 0,
2770 0},
2771
2772 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2773 {"dtprel_g1_nc", 0,
2774 0, /* adr_type */
2775 0,
2776 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2777 0,
2778 0,
2779 0},
2780
2781 /* bits[47:32] of offset to the module TLS base address. */
2782 {"dtprel_g2", 0,
2783 0, /* adr_type */
2784 0,
2785 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2786 0,
2787 0,
2788 0},
2789
2790 /* Lower 16 bit offset into GOT entry for a symbol */
2791 {"tlsdesc_off_g0_nc", 0,
2792 0, /* adr_type */
2793 0,
2794 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2795 0,
2796 0,
2797 0},
2798
2799 /* Higher 16 bit offset into GOT entry for a symbol */
2800 {"tlsdesc_off_g1", 0,
2801 0, /* adr_type */
2802 0,
2803 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2804 0,
2805 0,
2806 0},
2807
2808 /* Get to the page containing GOT TLS entry for a symbol */
2809 {"gottprel", 0,
2810 0, /* adr_type */
2811 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2812 0,
2813 0,
2814 0,
2815 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2816
2817 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2818 {"gottprel_lo12", 0,
2819 0, /* adr_type */
2820 0,
2821 0,
2822 0,
2823 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2824 0},
2825
2826 /* Get tp offset for a symbol. */
2827 {"tprel", 0,
2828 0, /* adr_type */
2829 0,
2830 0,
2831 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2832 0,
2833 0},
2834
2835 /* Get tp offset for a symbol. */
2836 {"tprel_lo12", 0,
2837 0, /* adr_type */
2838 0,
2839 0,
2840 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2841 0,
2842 0},
2843
2844 /* Get tp offset for a symbol. */
2845 {"tprel_hi12", 0,
2846 0, /* adr_type */
2847 0,
2848 0,
2849 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2850 0,
2851 0},
2852
2853 /* Get tp offset for a symbol. */
2854 {"tprel_lo12_nc", 0,
2855 0, /* adr_type */
2856 0,
2857 0,
2858 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2859 0,
2860 0},
2861
2862 /* Most significant bits 32-47 of address/value: MOVZ. */
2863 {"tprel_g2", 0,
2864 0, /* adr_type */
2865 0,
2866 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2867 0,
2868 0,
2869 0},
2870
2871 /* Most significant bits 16-31 of address/value: MOVZ. */
2872 {"tprel_g1", 0,
2873 0, /* adr_type */
2874 0,
2875 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2876 0,
2877 0,
2878 0},
2879
2880 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2881 {"tprel_g1_nc", 0,
2882 0, /* adr_type */
2883 0,
2884 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2885 0,
2886 0,
2887 0},
2888
2889 /* Most significant bits 0-15 of address/value: MOVZ. */
2890 {"tprel_g0", 0,
2891 0, /* adr_type */
2892 0,
2893 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2894 0,
2895 0,
2896 0},
2897
2898 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2899 {"tprel_g0_nc", 0,
2900 0, /* adr_type */
2901 0,
2902 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2903 0,
2904 0,
2905 0},
2906
2907 /* 15bit offset from got entry to base address of GOT table. */
2908 {"gotpage_lo15", 0,
2909 0,
2910 0,
2911 0,
2912 0,
2913 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
2914 0},
2915
2916 /* 14bit offset from got entry to base address of GOT table. */
2917 {"gotpage_lo14", 0,
2918 0,
2919 0,
2920 0,
2921 0,
2922 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
2923 0},
2924 };
2925
2926 /* Given the address of a pointer pointing to the textual name of a
2927 relocation as may appear in assembler source, attempt to find its
2928 details in reloc_table. The pointer will be updated to the character
2929 after the trailing colon. On failure, NULL will be returned;
2930 otherwise return the reloc_table_entry. */
2931
2932 static struct reloc_table_entry *
2933 find_reloc_table_entry (char **str)
2934 {
2935 unsigned int i;
2936 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2937 {
2938 int length = strlen (reloc_table[i].name);
2939
2940 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2941 && (*str)[length] == ':')
2942 {
2943 *str += (length + 1);
2944 return &reloc_table[i];
2945 }
2946 }
2947
2948 return NULL;
2949 }
2950
2951 /* Mode argument to parse_shift and parser_shifter_operand. */
2952 enum parse_shift_mode
2953 {
2954 SHIFTED_NONE, /* no shifter allowed */
2955 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
2956 "#imm{,lsl #n}" */
2957 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
2958 "#imm" */
2959 SHIFTED_LSL, /* bare "lsl #n" */
2960 SHIFTED_MUL, /* bare "mul #n" */
2961 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
2962 SHIFTED_MUL_VL, /* "mul vl" */
2963 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
2964 };
2965
2966 /* Parse a <shift> operator on an AArch64 data processing instruction.
2967 Return TRUE on success; otherwise return FALSE. */
2968 static bfd_boolean
2969 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
2970 {
2971 const struct aarch64_name_value_pair *shift_op;
2972 enum aarch64_modifier_kind kind;
2973 expressionS exp;
2974 int exp_has_prefix;
2975 char *s = *str;
2976 char *p = s;
2977
2978 for (p = *str; ISALPHA (*p); p++)
2979 ;
2980
2981 if (p == *str)
2982 {
2983 set_syntax_error (_("shift expression expected"));
2984 return FALSE;
2985 }
2986
2987 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
2988
2989 if (shift_op == NULL)
2990 {
2991 set_syntax_error (_("shift operator expected"));
2992 return FALSE;
2993 }
2994
2995 kind = aarch64_get_operand_modifier (shift_op);
2996
2997 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
2998 {
2999 set_syntax_error (_("invalid use of 'MSL'"));
3000 return FALSE;
3001 }
3002
3003 if (kind == AARCH64_MOD_MUL
3004 && mode != SHIFTED_MUL
3005 && mode != SHIFTED_MUL_VL)
3006 {
3007 set_syntax_error (_("invalid use of 'MUL'"));
3008 return FALSE;
3009 }
3010
3011 switch (mode)
3012 {
3013 case SHIFTED_LOGIC_IMM:
3014 if (aarch64_extend_operator_p (kind))
3015 {
3016 set_syntax_error (_("extending shift is not permitted"));
3017 return FALSE;
3018 }
3019 break;
3020
3021 case SHIFTED_ARITH_IMM:
3022 if (kind == AARCH64_MOD_ROR)
3023 {
3024 set_syntax_error (_("'ROR' shift is not permitted"));
3025 return FALSE;
3026 }
3027 break;
3028
3029 case SHIFTED_LSL:
3030 if (kind != AARCH64_MOD_LSL)
3031 {
3032 set_syntax_error (_("only 'LSL' shift is permitted"));
3033 return FALSE;
3034 }
3035 break;
3036
3037 case SHIFTED_MUL:
3038 if (kind != AARCH64_MOD_MUL)
3039 {
3040 set_syntax_error (_("only 'MUL' is permitted"));
3041 return FALSE;
3042 }
3043 break;
3044
3045 case SHIFTED_MUL_VL:
3046 /* "MUL VL" consists of two separate tokens. Require the first
3047 token to be "MUL" and look for a following "VL". */
3048 if (kind == AARCH64_MOD_MUL)
3049 {
3050 skip_whitespace (p);
3051 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3052 {
3053 p += 2;
3054 kind = AARCH64_MOD_MUL_VL;
3055 break;
3056 }
3057 }
3058 set_syntax_error (_("only 'MUL VL' is permitted"));
3059 return FALSE;
3060
3061 case SHIFTED_REG_OFFSET:
3062 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3063 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3064 {
3065 set_fatal_syntax_error
3066 (_("invalid shift for the register offset addressing mode"));
3067 return FALSE;
3068 }
3069 break;
3070
3071 case SHIFTED_LSL_MSL:
3072 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3073 {
3074 set_syntax_error (_("invalid shift operator"));
3075 return FALSE;
3076 }
3077 break;
3078
3079 default:
3080 abort ();
3081 }
3082
3083 /* Whitespace can appear here if the next thing is a bare digit. */
3084 skip_whitespace (p);
3085
3086 /* Parse shift amount. */
3087 exp_has_prefix = 0;
3088 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3089 exp.X_op = O_absent;
3090 else
3091 {
3092 if (is_immediate_prefix (*p))
3093 {
3094 p++;
3095 exp_has_prefix = 1;
3096 }
3097 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
3098 }
3099 if (kind == AARCH64_MOD_MUL_VL)
3100 /* For consistency, give MUL VL the same shift amount as an implicit
3101 MUL #1. */
3102 operand->shifter.amount = 1;
3103 else if (exp.X_op == O_absent)
3104 {
3105 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3106 {
3107 set_syntax_error (_("missing shift amount"));
3108 return FALSE;
3109 }
3110 operand->shifter.amount = 0;
3111 }
3112 else if (exp.X_op != O_constant)
3113 {
3114 set_syntax_error (_("constant shift amount required"));
3115 return FALSE;
3116 }
3117 /* For parsing purposes, MUL #n has no inherent range. The range
3118 depends on the operand and will be checked by operand-specific
3119 routines. */
3120 else if (kind != AARCH64_MOD_MUL
3121 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3122 {
3123 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3124 return FALSE;
3125 }
3126 else
3127 {
3128 operand->shifter.amount = exp.X_add_number;
3129 operand->shifter.amount_present = 1;
3130 }
3131
3132 operand->shifter.operator_present = 1;
3133 operand->shifter.kind = kind;
3134
3135 *str = p;
3136 return TRUE;
3137 }
3138
3139 /* Parse a <shifter_operand> for a data processing instruction:
3140
3141 #<immediate>
3142 #<immediate>, LSL #imm
3143
3144 Validation of immediate operands is deferred to md_apply_fix.
3145
3146 Return TRUE on success; otherwise return FALSE. */
3147
3148 static bfd_boolean
3149 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3150 enum parse_shift_mode mode)
3151 {
3152 char *p;
3153
3154 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3155 return FALSE;
3156
3157 p = *str;
3158
3159 /* Accept an immediate expression. */
3160 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
3161 return FALSE;
3162
3163 /* Accept optional LSL for arithmetic immediate values. */
3164 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3165 if (! parse_shift (&p, operand, SHIFTED_LSL))
3166 return FALSE;
3167
3168 /* Not accept any shifter for logical immediate values. */
3169 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3170 && parse_shift (&p, operand, mode))
3171 {
3172 set_syntax_error (_("unexpected shift operator"));
3173 return FALSE;
3174 }
3175
3176 *str = p;
3177 return TRUE;
3178 }
3179
3180 /* Parse a <shifter_operand> for a data processing instruction:
3181
3182 <Rm>
3183 <Rm>, <shift>
3184 #<immediate>
3185 #<immediate>, LSL #imm
3186
3187 where <shift> is handled by parse_shift above, and the last two
3188 cases are handled by the function above.
3189
3190 Validation of immediate operands is deferred to md_apply_fix.
3191
3192 Return TRUE on success; otherwise return FALSE. */
3193
3194 static bfd_boolean
3195 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3196 enum parse_shift_mode mode)
3197 {
3198 const reg_entry *reg;
3199 aarch64_opnd_qualifier_t qualifier;
3200 enum aarch64_operand_class opd_class
3201 = aarch64_get_operand_class (operand->type);
3202
3203 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3204 if (reg)
3205 {
3206 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3207 {
3208 set_syntax_error (_("unexpected register in the immediate operand"));
3209 return FALSE;
3210 }
3211
3212 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3213 {
3214 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3215 return FALSE;
3216 }
3217
3218 operand->reg.regno = reg->number;
3219 operand->qualifier = qualifier;
3220
3221 /* Accept optional shift operation on register. */
3222 if (! skip_past_comma (str))
3223 return TRUE;
3224
3225 if (! parse_shift (str, operand, mode))
3226 return FALSE;
3227
3228 return TRUE;
3229 }
3230 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3231 {
3232 set_syntax_error
3233 (_("integer register expected in the extended/shifted operand "
3234 "register"));
3235 return FALSE;
3236 }
3237
3238 /* We have a shifted immediate variable. */
3239 return parse_shifter_operand_imm (str, operand, mode);
3240 }
3241
3242 /* Return TRUE on success; return FALSE otherwise. */
3243
3244 static bfd_boolean
3245 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3246 enum parse_shift_mode mode)
3247 {
3248 char *p = *str;
3249
3250 /* Determine if we have the sequence of characters #: or just :
3251 coming next. If we do, then we check for a :rello: relocation
3252 modifier. If we don't, punt the whole lot to
3253 parse_shifter_operand. */
3254
3255 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3256 {
3257 struct reloc_table_entry *entry;
3258
3259 if (p[0] == '#')
3260 p += 2;
3261 else
3262 p++;
3263 *str = p;
3264
3265 /* Try to parse a relocation. Anything else is an error. */
3266 if (!(entry = find_reloc_table_entry (str)))
3267 {
3268 set_syntax_error (_("unknown relocation modifier"));
3269 return FALSE;
3270 }
3271
3272 if (entry->add_type == 0)
3273 {
3274 set_syntax_error
3275 (_("this relocation modifier is not allowed on this instruction"));
3276 return FALSE;
3277 }
3278
3279 /* Save str before we decompose it. */
3280 p = *str;
3281
3282 /* Next, we parse the expression. */
3283 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
3284 return FALSE;
3285
3286 /* Record the relocation type (use the ADD variant here). */
3287 inst.reloc.type = entry->add_type;
3288 inst.reloc.pc_rel = entry->pc_rel;
3289
3290 /* If str is empty, we've reached the end, stop here. */
3291 if (**str == '\0')
3292 return TRUE;
3293
3294 /* Otherwise, we have a shifted reloc modifier, so rewind to
3295 recover the variable name and continue parsing for the shifter. */
3296 *str = p;
3297 return parse_shifter_operand_imm (str, operand, mode);
3298 }
3299
3300 return parse_shifter_operand (str, operand, mode);
3301 }
3302
3303 /* Parse all forms of an address expression. Information is written
3304 to *OPERAND and/or inst.reloc.
3305
3306 The A64 instruction set has the following addressing modes:
3307
3308 Offset
3309 [base] // in SIMD ld/st structure
3310 [base{,#0}] // in ld/st exclusive
3311 [base{,#imm}]
3312 [base,Xm{,LSL #imm}]
3313 [base,Xm,SXTX {#imm}]
3314 [base,Wm,(S|U)XTW {#imm}]
3315 Pre-indexed
3316 [base,#imm]!
3317 Post-indexed
3318 [base],#imm
3319 [base],Xm // in SIMD ld/st structure
3320 PC-relative (literal)
3321 label
3322 SVE:
3323 [base,#imm,MUL VL]
3324 [base,Zm.D{,LSL #imm}]
3325 [base,Zm.S,(S|U)XTW {#imm}]
3326 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3327 [Zn.S,#imm]
3328 [Zn.D,#imm]
3329 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3330 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3331 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3332
3333 (As a convenience, the notation "=immediate" is permitted in conjunction
3334 with the pc-relative literal load instructions to automatically place an
3335 immediate value or symbolic address in a nearby literal pool and generate
3336 a hidden label which references it.)
3337
3338 Upon a successful parsing, the address structure in *OPERAND will be
3339 filled in the following way:
3340
3341 .base_regno = <base>
3342 .offset.is_reg // 1 if the offset is a register
3343 .offset.imm = <imm>
3344 .offset.regno = <Rm>
3345
3346 For different addressing modes defined in the A64 ISA:
3347
3348 Offset
3349 .pcrel=0; .preind=1; .postind=0; .writeback=0
3350 Pre-indexed
3351 .pcrel=0; .preind=1; .postind=0; .writeback=1
3352 Post-indexed
3353 .pcrel=0; .preind=0; .postind=1; .writeback=1
3354 PC-relative (literal)
3355 .pcrel=1; .preind=1; .postind=0; .writeback=0
3356
3357 The shift/extension information, if any, will be stored in .shifter.
3358 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3359 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3360 corresponding register.
3361
3362 BASE_TYPE says which types of base register should be accepted and
3363 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3364 is the type of shifter that is allowed for immediate offsets,
3365 or SHIFTED_NONE if none.
3366
3367 In all other respects, it is the caller's responsibility to check
3368 for addressing modes not supported by the instruction, and to set
3369 inst.reloc.type. */
3370
3371 static bfd_boolean
3372 parse_address_main (char **str, aarch64_opnd_info *operand,
3373 aarch64_opnd_qualifier_t *base_qualifier,
3374 aarch64_opnd_qualifier_t *offset_qualifier,
3375 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3376 enum parse_shift_mode imm_shift_mode)
3377 {
3378 char *p = *str;
3379 const reg_entry *reg;
3380 expressionS *exp = &inst.reloc.exp;
3381
3382 *base_qualifier = AARCH64_OPND_QLF_NIL;
3383 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3384 if (! skip_past_char (&p, '['))
3385 {
3386 /* =immediate or label. */
3387 operand->addr.pcrel = 1;
3388 operand->addr.preind = 1;
3389
3390 /* #:<reloc_op>:<symbol> */
3391 skip_past_char (&p, '#');
3392 if (skip_past_char (&p, ':'))
3393 {
3394 bfd_reloc_code_real_type ty;
3395 struct reloc_table_entry *entry;
3396
3397 /* Try to parse a relocation modifier. Anything else is
3398 an error. */
3399 entry = find_reloc_table_entry (&p);
3400 if (! entry)
3401 {
3402 set_syntax_error (_("unknown relocation modifier"));
3403 return FALSE;
3404 }
3405
3406 switch (operand->type)
3407 {
3408 case AARCH64_OPND_ADDR_PCREL21:
3409 /* adr */
3410 ty = entry->adr_type;
3411 break;
3412
3413 default:
3414 ty = entry->ld_literal_type;
3415 break;
3416 }
3417
3418 if (ty == 0)
3419 {
3420 set_syntax_error
3421 (_("this relocation modifier is not allowed on this "
3422 "instruction"));
3423 return FALSE;
3424 }
3425
3426 /* #:<reloc_op>: */
3427 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3428 {
3429 set_syntax_error (_("invalid relocation expression"));
3430 return FALSE;
3431 }
3432
3433 /* #:<reloc_op>:<expr> */
3434 /* Record the relocation type. */
3435 inst.reloc.type = ty;
3436 inst.reloc.pc_rel = entry->pc_rel;
3437 }
3438 else
3439 {
3440
3441 if (skip_past_char (&p, '='))
3442 /* =immediate; need to generate the literal in the literal pool. */
3443 inst.gen_lit_pool = 1;
3444
3445 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3446 {
3447 set_syntax_error (_("invalid address"));
3448 return FALSE;
3449 }
3450 }
3451
3452 *str = p;
3453 return TRUE;
3454 }
3455
3456 /* [ */
3457
3458 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3459 if (!reg || !aarch64_check_reg_type (reg, base_type))
3460 {
3461 set_syntax_error (_(get_reg_expected_msg (base_type)));
3462 return FALSE;
3463 }
3464 operand->addr.base_regno = reg->number;
3465
3466 /* [Xn */
3467 if (skip_past_comma (&p))
3468 {
3469 /* [Xn, */
3470 operand->addr.preind = 1;
3471
3472 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3473 if (reg)
3474 {
3475 if (!aarch64_check_reg_type (reg, offset_type))
3476 {
3477 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3478 return FALSE;
3479 }
3480
3481 /* [Xn,Rm */
3482 operand->addr.offset.regno = reg->number;
3483 operand->addr.offset.is_reg = 1;
3484 /* Shifted index. */
3485 if (skip_past_comma (&p))
3486 {
3487 /* [Xn,Rm, */
3488 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3489 /* Use the diagnostics set in parse_shift, so not set new
3490 error message here. */
3491 return FALSE;
3492 }
3493 /* We only accept:
3494 [base,Xm{,LSL #imm}]
3495 [base,Xm,SXTX {#imm}]
3496 [base,Wm,(S|U)XTW {#imm}] */
3497 if (operand->shifter.kind == AARCH64_MOD_NONE
3498 || operand->shifter.kind == AARCH64_MOD_LSL
3499 || operand->shifter.kind == AARCH64_MOD_SXTX)
3500 {
3501 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3502 {
3503 set_syntax_error (_("invalid use of 32-bit register offset"));
3504 return FALSE;
3505 }
3506 if (aarch64_get_qualifier_esize (*base_qualifier)
3507 != aarch64_get_qualifier_esize (*offset_qualifier))
3508 {
3509 set_syntax_error (_("offset has different size from base"));
3510 return FALSE;
3511 }
3512 }
3513 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3514 {
3515 set_syntax_error (_("invalid use of 64-bit register offset"));
3516 return FALSE;
3517 }
3518 }
3519 else
3520 {
3521 /* [Xn,#:<reloc_op>:<symbol> */
3522 skip_past_char (&p, '#');
3523 if (skip_past_char (&p, ':'))
3524 {
3525 struct reloc_table_entry *entry;
3526
3527 /* Try to parse a relocation modifier. Anything else is
3528 an error. */
3529 if (!(entry = find_reloc_table_entry (&p)))
3530 {
3531 set_syntax_error (_("unknown relocation modifier"));
3532 return FALSE;
3533 }
3534
3535 if (entry->ldst_type == 0)
3536 {
3537 set_syntax_error
3538 (_("this relocation modifier is not allowed on this "
3539 "instruction"));
3540 return FALSE;
3541 }
3542
3543 /* [Xn,#:<reloc_op>: */
3544 /* We now have the group relocation table entry corresponding to
3545 the name in the assembler source. Next, we parse the
3546 expression. */
3547 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3548 {
3549 set_syntax_error (_("invalid relocation expression"));
3550 return FALSE;
3551 }
3552
3553 /* [Xn,#:<reloc_op>:<expr> */
3554 /* Record the load/store relocation type. */
3555 inst.reloc.type = entry->ldst_type;
3556 inst.reloc.pc_rel = entry->pc_rel;
3557 }
3558 else
3559 {
3560 if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3561 {
3562 set_syntax_error (_("invalid expression in the address"));
3563 return FALSE;
3564 }
3565 /* [Xn,<expr> */
3566 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3567 /* [Xn,<expr>,<shifter> */
3568 if (! parse_shift (&p, operand, imm_shift_mode))
3569 return FALSE;
3570 }
3571 }
3572 }
3573
3574 if (! skip_past_char (&p, ']'))
3575 {
3576 set_syntax_error (_("']' expected"));
3577 return FALSE;
3578 }
3579
3580 if (skip_past_char (&p, '!'))
3581 {
3582 if (operand->addr.preind && operand->addr.offset.is_reg)
3583 {
3584 set_syntax_error (_("register offset not allowed in pre-indexed "
3585 "addressing mode"));
3586 return FALSE;
3587 }
3588 /* [Xn]! */
3589 operand->addr.writeback = 1;
3590 }
3591 else if (skip_past_comma (&p))
3592 {
3593 /* [Xn], */
3594 operand->addr.postind = 1;
3595 operand->addr.writeback = 1;
3596
3597 if (operand->addr.preind)
3598 {
3599 set_syntax_error (_("cannot combine pre- and post-indexing"));
3600 return FALSE;
3601 }
3602
3603 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3604 if (reg)
3605 {
3606 /* [Xn],Xm */
3607 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3608 {
3609 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3610 return FALSE;
3611 }
3612
3613 operand->addr.offset.regno = reg->number;
3614 operand->addr.offset.is_reg = 1;
3615 }
3616 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3617 {
3618 /* [Xn],#expr */
3619 set_syntax_error (_("invalid expression in the address"));
3620 return FALSE;
3621 }
3622 }
3623
3624 /* If at this point neither .preind nor .postind is set, we have a
3625 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
3626 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3627 {
3628 if (operand->addr.writeback)
3629 {
3630 /* Reject [Rn]! */
3631 set_syntax_error (_("missing offset in the pre-indexed address"));
3632 return FALSE;
3633 }
3634 operand->addr.preind = 1;
3635 inst.reloc.exp.X_op = O_constant;
3636 inst.reloc.exp.X_add_number = 0;
3637 }
3638
3639 *str = p;
3640 return TRUE;
3641 }
3642
3643 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3644 on success. */
3645 static bfd_boolean
3646 parse_address (char **str, aarch64_opnd_info *operand)
3647 {
3648 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3649 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3650 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3651 }
3652
3653 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3654 The arguments have the same meaning as for parse_address_main.
3655 Return TRUE on success. */
3656 static bfd_boolean
3657 parse_sve_address (char **str, aarch64_opnd_info *operand,
3658 aarch64_opnd_qualifier_t *base_qualifier,
3659 aarch64_opnd_qualifier_t *offset_qualifier)
3660 {
3661 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3662 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3663 SHIFTED_MUL_VL);
3664 }
3665
3666 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3667 Return TRUE on success; otherwise return FALSE. */
3668 static bfd_boolean
3669 parse_half (char **str, int *internal_fixup_p)
3670 {
3671 char *p = *str;
3672
3673 skip_past_char (&p, '#');
3674
3675 gas_assert (internal_fixup_p);
3676 *internal_fixup_p = 0;
3677
3678 if (*p == ':')
3679 {
3680 struct reloc_table_entry *entry;
3681
3682 /* Try to parse a relocation. Anything else is an error. */
3683 ++p;
3684 if (!(entry = find_reloc_table_entry (&p)))
3685 {
3686 set_syntax_error (_("unknown relocation modifier"));
3687 return FALSE;
3688 }
3689
3690 if (entry->movw_type == 0)
3691 {
3692 set_syntax_error
3693 (_("this relocation modifier is not allowed on this instruction"));
3694 return FALSE;
3695 }
3696
3697 inst.reloc.type = entry->movw_type;
3698 }
3699 else
3700 *internal_fixup_p = 1;
3701
3702 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3703 return FALSE;
3704
3705 *str = p;
3706 return TRUE;
3707 }
3708
3709 /* Parse an operand for an ADRP instruction:
3710 ADRP <Xd>, <label>
3711 Return TRUE on success; otherwise return FALSE. */
3712
3713 static bfd_boolean
3714 parse_adrp (char **str)
3715 {
3716 char *p;
3717
3718 p = *str;
3719 if (*p == ':')
3720 {
3721 struct reloc_table_entry *entry;
3722
3723 /* Try to parse a relocation. Anything else is an error. */
3724 ++p;
3725 if (!(entry = find_reloc_table_entry (&p)))
3726 {
3727 set_syntax_error (_("unknown relocation modifier"));
3728 return FALSE;
3729 }
3730
3731 if (entry->adrp_type == 0)
3732 {
3733 set_syntax_error
3734 (_("this relocation modifier is not allowed on this instruction"));
3735 return FALSE;
3736 }
3737
3738 inst.reloc.type = entry->adrp_type;
3739 }
3740 else
3741 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3742
3743 inst.reloc.pc_rel = 1;
3744
3745 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3746 return FALSE;
3747
3748 *str = p;
3749 return TRUE;
3750 }
3751
3752 /* Miscellaneous. */
3753
3754 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
3755 of SIZE tokens in which index I gives the token for field value I,
3756 or is null if field value I is invalid. REG_TYPE says which register
3757 names should be treated as registers rather than as symbolic immediates.
3758
3759 Return true on success, moving *STR past the operand and storing the
3760 field value in *VAL. */
3761
3762 static int
3763 parse_enum_string (char **str, int64_t *val, const char *const *array,
3764 size_t size, aarch64_reg_type reg_type)
3765 {
3766 expressionS exp;
3767 char *p, *q;
3768 size_t i;
3769
3770 /* Match C-like tokens. */
3771 p = q = *str;
3772 while (ISALNUM (*q))
3773 q++;
3774
3775 for (i = 0; i < size; ++i)
3776 if (array[i]
3777 && strncasecmp (array[i], p, q - p) == 0
3778 && array[i][q - p] == 0)
3779 {
3780 *val = i;
3781 *str = q;
3782 return TRUE;
3783 }
3784
3785 if (!parse_immediate_expression (&p, &exp, reg_type))
3786 return FALSE;
3787
3788 if (exp.X_op == O_constant
3789 && (uint64_t) exp.X_add_number < size)
3790 {
3791 *val = exp.X_add_number;
3792 *str = p;
3793 return TRUE;
3794 }
3795
3796 /* Use the default error for this operand. */
3797 return FALSE;
3798 }
3799
3800 /* Parse an option for a preload instruction. Returns the encoding for the
3801 option, or PARSE_FAIL. */
3802
3803 static int
3804 parse_pldop (char **str)
3805 {
3806 char *p, *q;
3807 const struct aarch64_name_value_pair *o;
3808
3809 p = q = *str;
3810 while (ISALNUM (*q))
3811 q++;
3812
3813 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3814 if (!o)
3815 return PARSE_FAIL;
3816
3817 *str = q;
3818 return o->value;
3819 }
3820
3821 /* Parse an option for a barrier instruction. Returns the encoding for the
3822 option, or PARSE_FAIL. */
3823
3824 static int
3825 parse_barrier (char **str)
3826 {
3827 char *p, *q;
3828 const asm_barrier_opt *o;
3829
3830 p = q = *str;
3831 while (ISALPHA (*q))
3832 q++;
3833
3834 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3835 if (!o)
3836 return PARSE_FAIL;
3837
3838 *str = q;
3839 return o->value;
3840 }
3841
3842 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
3843 return 0 if successful. Otherwise return PARSE_FAIL. */
3844
3845 static int
3846 parse_barrier_psb (char **str,
3847 const struct aarch64_name_value_pair ** hint_opt)
3848 {
3849 char *p, *q;
3850 const struct aarch64_name_value_pair *o;
3851
3852 p = q = *str;
3853 while (ISALPHA (*q))
3854 q++;
3855
3856 o = hash_find_n (aarch64_hint_opt_hsh, p, q - p);
3857 if (!o)
3858 {
3859 set_fatal_syntax_error
3860 ( _("unknown or missing option to PSB"));
3861 return PARSE_FAIL;
3862 }
3863
3864 if (o->value != 0x11)
3865 {
3866 /* PSB only accepts option name 'CSYNC'. */
3867 set_syntax_error
3868 (_("the specified option is not accepted for PSB"));
3869 return PARSE_FAIL;
3870 }
3871
3872 *str = q;
3873 *hint_opt = o;
3874 return 0;
3875 }
3876
3877 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3878 Returns the encoding for the option, or PARSE_FAIL.
3879
3880 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3881 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
3882
3883 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
3884 field, otherwise as a system register.
3885 */
3886
3887 static int
3888 parse_sys_reg (char **str, struct hash_control *sys_regs,
3889 int imple_defined_p, int pstatefield_p)
3890 {
3891 char *p, *q;
3892 char buf[32];
3893 const aarch64_sys_reg *o;
3894 int value;
3895
3896 p = buf;
3897 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3898 if (p < buf + 31)
3899 *p++ = TOLOWER (*q);
3900 *p = '\0';
3901 /* Assert that BUF be large enough. */
3902 gas_assert (p - buf == q - *str);
3903
3904 o = hash_find (sys_regs, buf);
3905 if (!o)
3906 {
3907 if (!imple_defined_p)
3908 return PARSE_FAIL;
3909 else
3910 {
3911 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
3912 unsigned int op0, op1, cn, cm, op2;
3913
3914 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
3915 != 5)
3916 return PARSE_FAIL;
3917 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
3918 return PARSE_FAIL;
3919 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3920 }
3921 }
3922 else
3923 {
3924 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
3925 as_bad (_("selected processor does not support PSTATE field "
3926 "name '%s'"), buf);
3927 if (!pstatefield_p && !aarch64_sys_reg_supported_p (cpu_variant, o))
3928 as_bad (_("selected processor does not support system register "
3929 "name '%s'"), buf);
3930 if (aarch64_sys_reg_deprecated_p (o))
3931 as_warn (_("system register name '%s' is deprecated and may be "
3932 "removed in a future release"), buf);
3933 value = o->value;
3934 }
3935
3936 *str = q;
3937 return value;
3938 }
3939
3940 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
3941 for the option, or NULL. */
3942
3943 static const aarch64_sys_ins_reg *
3944 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3945 {
3946 char *p, *q;
3947 char buf[32];
3948 const aarch64_sys_ins_reg *o;
3949
3950 p = buf;
3951 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3952 if (p < buf + 31)
3953 *p++ = TOLOWER (*q);
3954 *p = '\0';
3955
3956 o = hash_find (sys_ins_regs, buf);
3957 if (!o)
3958 return NULL;
3959
3960 if (!aarch64_sys_ins_reg_supported_p (cpu_variant, o))
3961 as_bad (_("selected processor does not support system register "
3962 "name '%s'"), buf);
3963
3964 *str = q;
3965 return o;
3966 }
3967 \f
3968 #define po_char_or_fail(chr) do { \
3969 if (! skip_past_char (&str, chr)) \
3970 goto failure; \
3971 } while (0)
3972
3973 #define po_reg_or_fail(regtype) do { \
3974 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
3975 if (val == PARSE_FAIL) \
3976 { \
3977 set_default_error (); \
3978 goto failure; \
3979 } \
3980 } while (0)
3981
3982 #define po_int_reg_or_fail(reg_type) do { \
3983 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
3984 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
3985 { \
3986 set_default_error (); \
3987 goto failure; \
3988 } \
3989 info->reg.regno = reg->number; \
3990 info->qualifier = qualifier; \
3991 } while (0)
3992
3993 #define po_imm_nc_or_fail() do { \
3994 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
3995 goto failure; \
3996 } while (0)
3997
3998 #define po_imm_or_fail(min, max) do { \
3999 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4000 goto failure; \
4001 if (val < min || val > max) \
4002 { \
4003 set_fatal_syntax_error (_("immediate value out of range "\
4004 #min " to "#max)); \
4005 goto failure; \
4006 } \
4007 } while (0)
4008
4009 #define po_enum_or_fail(array) do { \
4010 if (!parse_enum_string (&str, &val, array, \
4011 ARRAY_SIZE (array), imm_reg_type)) \
4012 goto failure; \
4013 } while (0)
4014
4015 #define po_misc_or_fail(expr) do { \
4016 if (!expr) \
4017 goto failure; \
4018 } while (0)
4019 \f
4020 /* encode the 12-bit imm field of Add/sub immediate */
4021 static inline uint32_t
4022 encode_addsub_imm (uint32_t imm)
4023 {
4024 return imm << 10;
4025 }
4026
4027 /* encode the shift amount field of Add/sub immediate */
4028 static inline uint32_t
4029 encode_addsub_imm_shift_amount (uint32_t cnt)
4030 {
4031 return cnt << 22;
4032 }
4033
4034
4035 /* encode the imm field of Adr instruction */
4036 static inline uint32_t
4037 encode_adr_imm (uint32_t imm)
4038 {
4039 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4040 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4041 }
4042
4043 /* encode the immediate field of Move wide immediate */
4044 static inline uint32_t
4045 encode_movw_imm (uint32_t imm)
4046 {
4047 return imm << 5;
4048 }
4049
4050 /* encode the 26-bit offset of unconditional branch */
4051 static inline uint32_t
4052 encode_branch_ofs_26 (uint32_t ofs)
4053 {
4054 return ofs & ((1 << 26) - 1);
4055 }
4056
4057 /* encode the 19-bit offset of conditional branch and compare & branch */
4058 static inline uint32_t
4059 encode_cond_branch_ofs_19 (uint32_t ofs)
4060 {
4061 return (ofs & ((1 << 19) - 1)) << 5;
4062 }
4063
4064 /* encode the 19-bit offset of ld literal */
4065 static inline uint32_t
4066 encode_ld_lit_ofs_19 (uint32_t ofs)
4067 {
4068 return (ofs & ((1 << 19) - 1)) << 5;
4069 }
4070
4071 /* Encode the 14-bit offset of test & branch. */
4072 static inline uint32_t
4073 encode_tst_branch_ofs_14 (uint32_t ofs)
4074 {
4075 return (ofs & ((1 << 14) - 1)) << 5;
4076 }
4077
4078 /* Encode the 16-bit imm field of svc/hvc/smc. */
4079 static inline uint32_t
4080 encode_svc_imm (uint32_t imm)
4081 {
4082 return imm << 5;
4083 }
4084
4085 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4086 static inline uint32_t
4087 reencode_addsub_switch_add_sub (uint32_t opcode)
4088 {
4089 return opcode ^ (1 << 30);
4090 }
4091
4092 static inline uint32_t
4093 reencode_movzn_to_movz (uint32_t opcode)
4094 {
4095 return opcode | (1 << 30);
4096 }
4097
4098 static inline uint32_t
4099 reencode_movzn_to_movn (uint32_t opcode)
4100 {
4101 return opcode & ~(1 << 30);
4102 }
4103
4104 /* Overall per-instruction processing. */
4105
4106 /* We need to be able to fix up arbitrary expressions in some statements.
4107 This is so that we can handle symbols that are an arbitrary distance from
4108 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4109 which returns part of an address in a form which will be valid for
4110 a data instruction. We do this by pushing the expression into a symbol
4111 in the expr_section, and creating a fix for that. */
4112
4113 static fixS *
4114 fix_new_aarch64 (fragS * frag,
4115 int where,
4116 short int size, expressionS * exp, int pc_rel, int reloc)
4117 {
4118 fixS *new_fix;
4119
4120 switch (exp->X_op)
4121 {
4122 case O_constant:
4123 case O_symbol:
4124 case O_add:
4125 case O_subtract:
4126 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4127 break;
4128
4129 default:
4130 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4131 pc_rel, reloc);
4132 break;
4133 }
4134 return new_fix;
4135 }
4136 \f
4137 /* Diagnostics on operands errors. */
4138
4139 /* By default, output verbose error message.
4140 Disable the verbose error message by -mno-verbose-error. */
4141 static int verbose_error_p = 1;
4142
4143 #ifdef DEBUG_AARCH64
4144 /* N.B. this is only for the purpose of debugging. */
4145 const char* operand_mismatch_kind_names[] =
4146 {
4147 "AARCH64_OPDE_NIL",
4148 "AARCH64_OPDE_RECOVERABLE",
4149 "AARCH64_OPDE_SYNTAX_ERROR",
4150 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
4151 "AARCH64_OPDE_INVALID_VARIANT",
4152 "AARCH64_OPDE_OUT_OF_RANGE",
4153 "AARCH64_OPDE_UNALIGNED",
4154 "AARCH64_OPDE_REG_LIST",
4155 "AARCH64_OPDE_OTHER_ERROR",
4156 };
4157 #endif /* DEBUG_AARCH64 */
4158
4159 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
4160
4161 When multiple errors of different kinds are found in the same assembly
4162 line, only the error of the highest severity will be picked up for
4163 issuing the diagnostics. */
4164
4165 static inline bfd_boolean
4166 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
4167 enum aarch64_operand_error_kind rhs)
4168 {
4169 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
4170 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
4171 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
4172 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
4173 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
4174 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
4175 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
4176 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
4177 return lhs > rhs;
4178 }
4179
4180 /* Helper routine to get the mnemonic name from the assembly instruction
4181 line; should only be called for the diagnosis purpose, as there is
4182 string copy operation involved, which may affect the runtime
4183 performance if used in elsewhere. */
4184
4185 static const char*
4186 get_mnemonic_name (const char *str)
4187 {
4188 static char mnemonic[32];
4189 char *ptr;
4190
4191 /* Get the first 15 bytes and assume that the full name is included. */
4192 strncpy (mnemonic, str, 31);
4193 mnemonic[31] = '\0';
4194
4195 /* Scan up to the end of the mnemonic, which must end in white space,
4196 '.', or end of string. */
4197 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
4198 ;
4199
4200 *ptr = '\0';
4201
4202 /* Append '...' to the truncated long name. */
4203 if (ptr - mnemonic == 31)
4204 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
4205
4206 return mnemonic;
4207 }
4208
4209 static void
4210 reset_aarch64_instruction (aarch64_instruction *instruction)
4211 {
4212 memset (instruction, '\0', sizeof (aarch64_instruction));
4213 instruction->reloc.type = BFD_RELOC_UNUSED;
4214 }
4215
4216 /* Data structures storing one user error in the assembly code related to
4217 operands. */
4218
4219 struct operand_error_record
4220 {
4221 const aarch64_opcode *opcode;
4222 aarch64_operand_error detail;
4223 struct operand_error_record *next;
4224 };
4225
4226 typedef struct operand_error_record operand_error_record;
4227
4228 struct operand_errors
4229 {
4230 operand_error_record *head;
4231 operand_error_record *tail;
4232 };
4233
4234 typedef struct operand_errors operand_errors;
4235
4236 /* Top-level data structure reporting user errors for the current line of
4237 the assembly code.
4238 The way md_assemble works is that all opcodes sharing the same mnemonic
4239 name are iterated to find a match to the assembly line. In this data
4240 structure, each of the such opcodes will have one operand_error_record
4241 allocated and inserted. In other words, excessive errors related with
4242 a single opcode are disregarded. */
4243 operand_errors operand_error_report;
4244
4245 /* Free record nodes. */
4246 static operand_error_record *free_opnd_error_record_nodes = NULL;
4247
4248 /* Initialize the data structure that stores the operand mismatch
4249 information on assembling one line of the assembly code. */
4250 static void
4251 init_operand_error_report (void)
4252 {
4253 if (operand_error_report.head != NULL)
4254 {
4255 gas_assert (operand_error_report.tail != NULL);
4256 operand_error_report.tail->next = free_opnd_error_record_nodes;
4257 free_opnd_error_record_nodes = operand_error_report.head;
4258 operand_error_report.head = NULL;
4259 operand_error_report.tail = NULL;
4260 return;
4261 }
4262 gas_assert (operand_error_report.tail == NULL);
4263 }
4264
4265 /* Return TRUE if some operand error has been recorded during the
4266 parsing of the current assembly line using the opcode *OPCODE;
4267 otherwise return FALSE. */
4268 static inline bfd_boolean
4269 opcode_has_operand_error_p (const aarch64_opcode *opcode)
4270 {
4271 operand_error_record *record = operand_error_report.head;
4272 return record && record->opcode == opcode;
4273 }
4274
4275 /* Add the error record *NEW_RECORD to operand_error_report. The record's
4276 OPCODE field is initialized with OPCODE.
4277 N.B. only one record for each opcode, i.e. the maximum of one error is
4278 recorded for each instruction template. */
4279
4280 static void
4281 add_operand_error_record (const operand_error_record* new_record)
4282 {
4283 const aarch64_opcode *opcode = new_record->opcode;
4284 operand_error_record* record = operand_error_report.head;
4285
4286 /* The record may have been created for this opcode. If not, we need
4287 to prepare one. */
4288 if (! opcode_has_operand_error_p (opcode))
4289 {
4290 /* Get one empty record. */
4291 if (free_opnd_error_record_nodes == NULL)
4292 {
4293 record = XNEW (operand_error_record);
4294 }
4295 else
4296 {
4297 record = free_opnd_error_record_nodes;
4298 free_opnd_error_record_nodes = record->next;
4299 }
4300 record->opcode = opcode;
4301 /* Insert at the head. */
4302 record->next = operand_error_report.head;
4303 operand_error_report.head = record;
4304 if (operand_error_report.tail == NULL)
4305 operand_error_report.tail = record;
4306 }
4307 else if (record->detail.kind != AARCH64_OPDE_NIL
4308 && record->detail.index <= new_record->detail.index
4309 && operand_error_higher_severity_p (record->detail.kind,
4310 new_record->detail.kind))
4311 {
4312 /* In the case of multiple errors found on operands related with a
4313 single opcode, only record the error of the leftmost operand and
4314 only if the error is of higher severity. */
4315 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4316 " the existing error %s on operand %d",
4317 operand_mismatch_kind_names[new_record->detail.kind],
4318 new_record->detail.index,
4319 operand_mismatch_kind_names[record->detail.kind],
4320 record->detail.index);
4321 return;
4322 }
4323
4324 record->detail = new_record->detail;
4325 }
4326
4327 static inline void
4328 record_operand_error_info (const aarch64_opcode *opcode,
4329 aarch64_operand_error *error_info)
4330 {
4331 operand_error_record record;
4332 record.opcode = opcode;
4333 record.detail = *error_info;
4334 add_operand_error_record (&record);
4335 }
4336
4337 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4338 error message *ERROR, for operand IDX (count from 0). */
4339
4340 static void
4341 record_operand_error (const aarch64_opcode *opcode, int idx,
4342 enum aarch64_operand_error_kind kind,
4343 const char* error)
4344 {
4345 aarch64_operand_error info;
4346 memset(&info, 0, sizeof (info));
4347 info.index = idx;
4348 info.kind = kind;
4349 info.error = error;
4350 record_operand_error_info (opcode, &info);
4351 }
4352
4353 static void
4354 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4355 enum aarch64_operand_error_kind kind,
4356 const char* error, const int *extra_data)
4357 {
4358 aarch64_operand_error info;
4359 info.index = idx;
4360 info.kind = kind;
4361 info.error = error;
4362 info.data[0] = extra_data[0];
4363 info.data[1] = extra_data[1];
4364 info.data[2] = extra_data[2];
4365 record_operand_error_info (opcode, &info);
4366 }
4367
4368 static void
4369 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4370 const char* error, int lower_bound,
4371 int upper_bound)
4372 {
4373 int data[3] = {lower_bound, upper_bound, 0};
4374 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4375 error, data);
4376 }
4377
4378 /* Remove the operand error record for *OPCODE. */
4379 static void ATTRIBUTE_UNUSED
4380 remove_operand_error_record (const aarch64_opcode *opcode)
4381 {
4382 if (opcode_has_operand_error_p (opcode))
4383 {
4384 operand_error_record* record = operand_error_report.head;
4385 gas_assert (record != NULL && operand_error_report.tail != NULL);
4386 operand_error_report.head = record->next;
4387 record->next = free_opnd_error_record_nodes;
4388 free_opnd_error_record_nodes = record;
4389 if (operand_error_report.head == NULL)
4390 {
4391 gas_assert (operand_error_report.tail == record);
4392 operand_error_report.tail = NULL;
4393 }
4394 }
4395 }
4396
4397 /* Given the instruction in *INSTR, return the index of the best matched
4398 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4399
4400 Return -1 if there is no qualifier sequence; return the first match
4401 if there is multiple matches found. */
4402
4403 static int
4404 find_best_match (const aarch64_inst *instr,
4405 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4406 {
4407 int i, num_opnds, max_num_matched, idx;
4408
4409 num_opnds = aarch64_num_of_operands (instr->opcode);
4410 if (num_opnds == 0)
4411 {
4412 DEBUG_TRACE ("no operand");
4413 return -1;
4414 }
4415
4416 max_num_matched = 0;
4417 idx = 0;
4418
4419 /* For each pattern. */
4420 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4421 {
4422 int j, num_matched;
4423 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4424
4425 /* Most opcodes has much fewer patterns in the list. */
4426 if (empty_qualifier_sequence_p (qualifiers))
4427 {
4428 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4429 break;
4430 }
4431
4432 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4433 if (*qualifiers == instr->operands[j].qualifier)
4434 ++num_matched;
4435
4436 if (num_matched > max_num_matched)
4437 {
4438 max_num_matched = num_matched;
4439 idx = i;
4440 }
4441 }
4442
4443 DEBUG_TRACE ("return with %d", idx);
4444 return idx;
4445 }
4446
4447 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
4448 corresponding operands in *INSTR. */
4449
4450 static inline void
4451 assign_qualifier_sequence (aarch64_inst *instr,
4452 const aarch64_opnd_qualifier_t *qualifiers)
4453 {
4454 int i = 0;
4455 int num_opnds = aarch64_num_of_operands (instr->opcode);
4456 gas_assert (num_opnds);
4457 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4458 instr->operands[i].qualifier = *qualifiers;
4459 }
4460
4461 /* Print operands for the diagnosis purpose. */
4462
4463 static void
4464 print_operands (char *buf, const aarch64_opcode *opcode,
4465 const aarch64_opnd_info *opnds)
4466 {
4467 int i;
4468
4469 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4470 {
4471 char str[128];
4472
4473 /* We regard the opcode operand info more, however we also look into
4474 the inst->operands to support the disassembling of the optional
4475 operand.
4476 The two operand code should be the same in all cases, apart from
4477 when the operand can be optional. */
4478 if (opcode->operands[i] == AARCH64_OPND_NIL
4479 || opnds[i].type == AARCH64_OPND_NIL)
4480 break;
4481
4482 /* Generate the operand string in STR. */
4483 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL);
4484
4485 /* Delimiter. */
4486 if (str[0] != '\0')
4487 strcat (buf, i == 0 ? " " : ", ");
4488
4489 /* Append the operand string. */
4490 strcat (buf, str);
4491 }
4492 }
4493
4494 /* Send to stderr a string as information. */
4495
4496 static void
4497 output_info (const char *format, ...)
4498 {
4499 const char *file;
4500 unsigned int line;
4501 va_list args;
4502
4503 file = as_where (&line);
4504 if (file)
4505 {
4506 if (line != 0)
4507 fprintf (stderr, "%s:%u: ", file, line);
4508 else
4509 fprintf (stderr, "%s: ", file);
4510 }
4511 fprintf (stderr, _("Info: "));
4512 va_start (args, format);
4513 vfprintf (stderr, format, args);
4514 va_end (args);
4515 (void) putc ('\n', stderr);
4516 }
4517
4518 /* Output one operand error record. */
4519
4520 static void
4521 output_operand_error_record (const operand_error_record *record, char *str)
4522 {
4523 const aarch64_operand_error *detail = &record->detail;
4524 int idx = detail->index;
4525 const aarch64_opcode *opcode = record->opcode;
4526 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4527 : AARCH64_OPND_NIL);
4528
4529 switch (detail->kind)
4530 {
4531 case AARCH64_OPDE_NIL:
4532 gas_assert (0);
4533 break;
4534
4535 case AARCH64_OPDE_SYNTAX_ERROR:
4536 case AARCH64_OPDE_RECOVERABLE:
4537 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4538 case AARCH64_OPDE_OTHER_ERROR:
4539 /* Use the prepared error message if there is, otherwise use the
4540 operand description string to describe the error. */
4541 if (detail->error != NULL)
4542 {
4543 if (idx < 0)
4544 as_bad (_("%s -- `%s'"), detail->error, str);
4545 else
4546 as_bad (_("%s at operand %d -- `%s'"),
4547 detail->error, idx + 1, str);
4548 }
4549 else
4550 {
4551 gas_assert (idx >= 0);
4552 as_bad (_("operand %d must be %s -- `%s'"), idx + 1,
4553 aarch64_get_operand_desc (opd_code), str);
4554 }
4555 break;
4556
4557 case AARCH64_OPDE_INVALID_VARIANT:
4558 as_bad (_("operand mismatch -- `%s'"), str);
4559 if (verbose_error_p)
4560 {
4561 /* We will try to correct the erroneous instruction and also provide
4562 more information e.g. all other valid variants.
4563
4564 The string representation of the corrected instruction and other
4565 valid variants are generated by
4566
4567 1) obtaining the intermediate representation of the erroneous
4568 instruction;
4569 2) manipulating the IR, e.g. replacing the operand qualifier;
4570 3) printing out the instruction by calling the printer functions
4571 shared with the disassembler.
4572
4573 The limitation of this method is that the exact input assembly
4574 line cannot be accurately reproduced in some cases, for example an
4575 optional operand present in the actual assembly line will be
4576 omitted in the output; likewise for the optional syntax rules,
4577 e.g. the # before the immediate. Another limitation is that the
4578 assembly symbols and relocation operations in the assembly line
4579 currently cannot be printed out in the error report. Last but not
4580 least, when there is other error(s) co-exist with this error, the
4581 'corrected' instruction may be still incorrect, e.g. given
4582 'ldnp h0,h1,[x0,#6]!'
4583 this diagnosis will provide the version:
4584 'ldnp s0,s1,[x0,#6]!'
4585 which is still not right. */
4586 size_t len = strlen (get_mnemonic_name (str));
4587 int i, qlf_idx;
4588 bfd_boolean result;
4589 char buf[2048];
4590 aarch64_inst *inst_base = &inst.base;
4591 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4592
4593 /* Init inst. */
4594 reset_aarch64_instruction (&inst);
4595 inst_base->opcode = opcode;
4596
4597 /* Reset the error report so that there is no side effect on the
4598 following operand parsing. */
4599 init_operand_error_report ();
4600
4601 /* Fill inst. */
4602 result = parse_operands (str + len, opcode)
4603 && programmer_friendly_fixup (&inst);
4604 gas_assert (result);
4605 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4606 NULL, NULL);
4607 gas_assert (!result);
4608
4609 /* Find the most matched qualifier sequence. */
4610 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4611 gas_assert (qlf_idx > -1);
4612
4613 /* Assign the qualifiers. */
4614 assign_qualifier_sequence (inst_base,
4615 opcode->qualifiers_list[qlf_idx]);
4616
4617 /* Print the hint. */
4618 output_info (_(" did you mean this?"));
4619 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4620 print_operands (buf, opcode, inst_base->operands);
4621 output_info (_(" %s"), buf);
4622
4623 /* Print out other variant(s) if there is any. */
4624 if (qlf_idx != 0 ||
4625 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4626 output_info (_(" other valid variant(s):"));
4627
4628 /* For each pattern. */
4629 qualifiers_list = opcode->qualifiers_list;
4630 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4631 {
4632 /* Most opcodes has much fewer patterns in the list.
4633 First NIL qualifier indicates the end in the list. */
4634 if (empty_qualifier_sequence_p (*qualifiers_list))
4635 break;
4636
4637 if (i != qlf_idx)
4638 {
4639 /* Mnemonics name. */
4640 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4641
4642 /* Assign the qualifiers. */
4643 assign_qualifier_sequence (inst_base, *qualifiers_list);
4644
4645 /* Print instruction. */
4646 print_operands (buf, opcode, inst_base->operands);
4647
4648 output_info (_(" %s"), buf);
4649 }
4650 }
4651 }
4652 break;
4653
4654 case AARCH64_OPDE_UNTIED_OPERAND:
4655 as_bad (_("operand %d must be the same register as operand 1 -- `%s'"),
4656 detail->index + 1, str);
4657 break;
4658
4659 case AARCH64_OPDE_OUT_OF_RANGE:
4660 if (detail->data[0] != detail->data[1])
4661 as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
4662 detail->error ? detail->error : _("immediate value"),
4663 detail->data[0], detail->data[1], idx + 1, str);
4664 else
4665 as_bad (_("%s must be %d at operand %d -- `%s'"),
4666 detail->error ? detail->error : _("immediate value"),
4667 detail->data[0], idx + 1, str);
4668 break;
4669
4670 case AARCH64_OPDE_REG_LIST:
4671 if (detail->data[0] == 1)
4672 as_bad (_("invalid number of registers in the list; "
4673 "only 1 register is expected at operand %d -- `%s'"),
4674 idx + 1, str);
4675 else
4676 as_bad (_("invalid number of registers in the list; "
4677 "%d registers are expected at operand %d -- `%s'"),
4678 detail->data[0], idx + 1, str);
4679 break;
4680
4681 case AARCH64_OPDE_UNALIGNED:
4682 as_bad (_("immediate value must be a multiple of "
4683 "%d at operand %d -- `%s'"),
4684 detail->data[0], idx + 1, str);
4685 break;
4686
4687 default:
4688 gas_assert (0);
4689 break;
4690 }
4691 }
4692
4693 /* Process and output the error message about the operand mismatching.
4694
4695 When this function is called, the operand error information had
4696 been collected for an assembly line and there will be multiple
4697 errors in the case of multiple instruction templates; output the
4698 error message that most closely describes the problem. */
4699
4700 static void
4701 output_operand_error_report (char *str)
4702 {
4703 int largest_error_pos;
4704 const char *msg = NULL;
4705 enum aarch64_operand_error_kind kind;
4706 operand_error_record *curr;
4707 operand_error_record *head = operand_error_report.head;
4708 operand_error_record *record = NULL;
4709
4710 /* No error to report. */
4711 if (head == NULL)
4712 return;
4713
4714 gas_assert (head != NULL && operand_error_report.tail != NULL);
4715
4716 /* Only one error. */
4717 if (head == operand_error_report.tail)
4718 {
4719 DEBUG_TRACE ("single opcode entry with error kind: %s",
4720 operand_mismatch_kind_names[head->detail.kind]);
4721 output_operand_error_record (head, str);
4722 return;
4723 }
4724
4725 /* Find the error kind of the highest severity. */
4726 DEBUG_TRACE ("multiple opcode entries with error kind");
4727 kind = AARCH64_OPDE_NIL;
4728 for (curr = head; curr != NULL; curr = curr->next)
4729 {
4730 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4731 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4732 if (operand_error_higher_severity_p (curr->detail.kind, kind))
4733 kind = curr->detail.kind;
4734 }
4735 gas_assert (kind != AARCH64_OPDE_NIL);
4736
4737 /* Pick up one of errors of KIND to report. */
4738 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4739 for (curr = head; curr != NULL; curr = curr->next)
4740 {
4741 if (curr->detail.kind != kind)
4742 continue;
4743 /* If there are multiple errors, pick up the one with the highest
4744 mismatching operand index. In the case of multiple errors with
4745 the equally highest operand index, pick up the first one or the
4746 first one with non-NULL error message. */
4747 if (curr->detail.index > largest_error_pos
4748 || (curr->detail.index == largest_error_pos && msg == NULL
4749 && curr->detail.error != NULL))
4750 {
4751 largest_error_pos = curr->detail.index;
4752 record = curr;
4753 msg = record->detail.error;
4754 }
4755 }
4756
4757 gas_assert (largest_error_pos != -2 && record != NULL);
4758 DEBUG_TRACE ("Pick up error kind %s to report",
4759 operand_mismatch_kind_names[record->detail.kind]);
4760
4761 /* Output. */
4762 output_operand_error_record (record, str);
4763 }
4764 \f
4765 /* Write an AARCH64 instruction to buf - always little-endian. */
4766 static void
4767 put_aarch64_insn (char *buf, uint32_t insn)
4768 {
4769 unsigned char *where = (unsigned char *) buf;
4770 where[0] = insn;
4771 where[1] = insn >> 8;
4772 where[2] = insn >> 16;
4773 where[3] = insn >> 24;
4774 }
4775
4776 static uint32_t
4777 get_aarch64_insn (char *buf)
4778 {
4779 unsigned char *where = (unsigned char *) buf;
4780 uint32_t result;
4781 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4782 return result;
4783 }
4784
4785 static void
4786 output_inst (struct aarch64_inst *new_inst)
4787 {
4788 char *to = NULL;
4789
4790 to = frag_more (INSN_SIZE);
4791
4792 frag_now->tc_frag_data.recorded = 1;
4793
4794 put_aarch64_insn (to, inst.base.value);
4795
4796 if (inst.reloc.type != BFD_RELOC_UNUSED)
4797 {
4798 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4799 INSN_SIZE, &inst.reloc.exp,
4800 inst.reloc.pc_rel,
4801 inst.reloc.type);
4802 DEBUG_TRACE ("Prepared relocation fix up");
4803 /* Don't check the addend value against the instruction size,
4804 that's the job of our code in md_apply_fix(). */
4805 fixp->fx_no_overflow = 1;
4806 if (new_inst != NULL)
4807 fixp->tc_fix_data.inst = new_inst;
4808 if (aarch64_gas_internal_fixup_p ())
4809 {
4810 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4811 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4812 fixp->fx_addnumber = inst.reloc.flags;
4813 }
4814 }
4815
4816 dwarf2_emit_insn (INSN_SIZE);
4817 }
4818
4819 /* Link together opcodes of the same name. */
4820
4821 struct templates
4822 {
4823 aarch64_opcode *opcode;
4824 struct templates *next;
4825 };
4826
4827 typedef struct templates templates;
4828
4829 static templates *
4830 lookup_mnemonic (const char *start, int len)
4831 {
4832 templates *templ = NULL;
4833
4834 templ = hash_find_n (aarch64_ops_hsh, start, len);
4835 return templ;
4836 }
4837
4838 /* Subroutine of md_assemble, responsible for looking up the primary
4839 opcode from the mnemonic the user wrote. STR points to the
4840 beginning of the mnemonic. */
4841
4842 static templates *
4843 opcode_lookup (char **str)
4844 {
4845 char *end, *base, *dot;
4846 const aarch64_cond *cond;
4847 char condname[16];
4848 int len;
4849
4850 /* Scan up to the end of the mnemonic, which must end in white space,
4851 '.', or end of string. */
4852 dot = 0;
4853 for (base = end = *str; is_part_of_name(*end); end++)
4854 if (*end == '.' && !dot)
4855 dot = end;
4856
4857 if (end == base || dot == base)
4858 return 0;
4859
4860 inst.cond = COND_ALWAYS;
4861
4862 /* Handle a possible condition. */
4863 if (dot)
4864 {
4865 cond = hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
4866 if (cond)
4867 {
4868 inst.cond = cond->value;
4869 *str = end;
4870 }
4871 else
4872 {
4873 *str = dot;
4874 return 0;
4875 }
4876 len = dot - base;
4877 }
4878 else
4879 {
4880 *str = end;
4881 len = end - base;
4882 }
4883
4884 if (inst.cond == COND_ALWAYS)
4885 {
4886 /* Look for unaffixed mnemonic. */
4887 return lookup_mnemonic (base, len);
4888 }
4889 else if (len <= 13)
4890 {
4891 /* append ".c" to mnemonic if conditional */
4892 memcpy (condname, base, len);
4893 memcpy (condname + len, ".c", 2);
4894 base = condname;
4895 len += 2;
4896 return lookup_mnemonic (base, len);
4897 }
4898
4899 return NULL;
4900 }
4901
4902 /* Internal helper routine converting a vector_type_el structure *VECTYPE
4903 to a corresponding operand qualifier. */
4904
4905 static inline aarch64_opnd_qualifier_t
4906 vectype_to_qualifier (const struct vector_type_el *vectype)
4907 {
4908 /* Element size in bytes indexed by vector_el_type. */
4909 const unsigned char ele_size[5]
4910 = {1, 2, 4, 8, 16};
4911 const unsigned int ele_base [5] =
4912 {
4913 AARCH64_OPND_QLF_V_8B,
4914 AARCH64_OPND_QLF_V_2H,
4915 AARCH64_OPND_QLF_V_2S,
4916 AARCH64_OPND_QLF_V_1D,
4917 AARCH64_OPND_QLF_V_1Q
4918 };
4919
4920 if (!vectype->defined || vectype->type == NT_invtype)
4921 goto vectype_conversion_fail;
4922
4923 if (vectype->type == NT_zero)
4924 return AARCH64_OPND_QLF_P_Z;
4925 if (vectype->type == NT_merge)
4926 return AARCH64_OPND_QLF_P_M;
4927
4928 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
4929
4930 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
4931 /* Vector element register. */
4932 return AARCH64_OPND_QLF_S_B + vectype->type;
4933 else
4934 {
4935 /* Vector register. */
4936 int reg_size = ele_size[vectype->type] * vectype->width;
4937 unsigned offset;
4938 unsigned shift;
4939 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
4940 goto vectype_conversion_fail;
4941
4942 /* The conversion is by calculating the offset from the base operand
4943 qualifier for the vector type. The operand qualifiers are regular
4944 enough that the offset can established by shifting the vector width by
4945 a vector-type dependent amount. */
4946 shift = 0;
4947 if (vectype->type == NT_b)
4948 shift = 4;
4949 else if (vectype->type == NT_h || vectype->type == NT_s)
4950 shift = 2;
4951 else if (vectype->type >= NT_d)
4952 shift = 1;
4953 else
4954 gas_assert (0);
4955
4956 offset = ele_base [vectype->type] + (vectype->width >> shift);
4957 gas_assert (AARCH64_OPND_QLF_V_8B <= offset
4958 && offset <= AARCH64_OPND_QLF_V_1Q);
4959 return offset;
4960 }
4961
4962 vectype_conversion_fail:
4963 first_error (_("bad vector arrangement type"));
4964 return AARCH64_OPND_QLF_NIL;
4965 }
4966
4967 /* Process an optional operand that is found omitted from the assembly line.
4968 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
4969 instruction's opcode entry while IDX is the index of this omitted operand.
4970 */
4971
4972 static void
4973 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
4974 int idx, aarch64_opnd_info *operand)
4975 {
4976 aarch64_insn default_value = get_optional_operand_default_value (opcode);
4977 gas_assert (optional_operand_p (opcode, idx));
4978 gas_assert (!operand->present);
4979
4980 switch (type)
4981 {
4982 case AARCH64_OPND_Rd:
4983 case AARCH64_OPND_Rn:
4984 case AARCH64_OPND_Rm:
4985 case AARCH64_OPND_Rt:
4986 case AARCH64_OPND_Rt2:
4987 case AARCH64_OPND_Rs:
4988 case AARCH64_OPND_Ra:
4989 case AARCH64_OPND_Rt_SYS:
4990 case AARCH64_OPND_Rd_SP:
4991 case AARCH64_OPND_Rn_SP:
4992 case AARCH64_OPND_Rm_SP:
4993 case AARCH64_OPND_Fd:
4994 case AARCH64_OPND_Fn:
4995 case AARCH64_OPND_Fm:
4996 case AARCH64_OPND_Fa:
4997 case AARCH64_OPND_Ft:
4998 case AARCH64_OPND_Ft2:
4999 case AARCH64_OPND_Sd:
5000 case AARCH64_OPND_Sn:
5001 case AARCH64_OPND_Sm:
5002 case AARCH64_OPND_Va:
5003 case AARCH64_OPND_Vd:
5004 case AARCH64_OPND_Vn:
5005 case AARCH64_OPND_Vm:
5006 case AARCH64_OPND_VdD1:
5007 case AARCH64_OPND_VnD1:
5008 operand->reg.regno = default_value;
5009 break;
5010
5011 case AARCH64_OPND_Ed:
5012 case AARCH64_OPND_En:
5013 case AARCH64_OPND_Em:
5014 case AARCH64_OPND_SM3_IMM2:
5015 operand->reglane.regno = default_value;
5016 break;
5017
5018 case AARCH64_OPND_IDX:
5019 case AARCH64_OPND_BIT_NUM:
5020 case AARCH64_OPND_IMMR:
5021 case AARCH64_OPND_IMMS:
5022 case AARCH64_OPND_SHLL_IMM:
5023 case AARCH64_OPND_IMM_VLSL:
5024 case AARCH64_OPND_IMM_VLSR:
5025 case AARCH64_OPND_CCMP_IMM:
5026 case AARCH64_OPND_FBITS:
5027 case AARCH64_OPND_UIMM4:
5028 case AARCH64_OPND_UIMM3_OP1:
5029 case AARCH64_OPND_UIMM3_OP2:
5030 case AARCH64_OPND_IMM:
5031 case AARCH64_OPND_IMM_2:
5032 case AARCH64_OPND_WIDTH:
5033 case AARCH64_OPND_UIMM7:
5034 case AARCH64_OPND_NZCV:
5035 case AARCH64_OPND_SVE_PATTERN:
5036 case AARCH64_OPND_SVE_PRFOP:
5037 operand->imm.value = default_value;
5038 break;
5039
5040 case AARCH64_OPND_SVE_PATTERN_SCALED:
5041 operand->imm.value = default_value;
5042 operand->shifter.kind = AARCH64_MOD_MUL;
5043 operand->shifter.amount = 1;
5044 break;
5045
5046 case AARCH64_OPND_EXCEPTION:
5047 inst.reloc.type = BFD_RELOC_UNUSED;
5048 break;
5049
5050 case AARCH64_OPND_BARRIER_ISB:
5051 operand->barrier = aarch64_barrier_options + default_value;
5052
5053 default:
5054 break;
5055 }
5056 }
5057
5058 /* Process the relocation type for move wide instructions.
5059 Return TRUE on success; otherwise return FALSE. */
5060
5061 static bfd_boolean
5062 process_movw_reloc_info (void)
5063 {
5064 int is32;
5065 unsigned shift;
5066
5067 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5068
5069 if (inst.base.opcode->op == OP_MOVK)
5070 switch (inst.reloc.type)
5071 {
5072 case BFD_RELOC_AARCH64_MOVW_G0_S:
5073 case BFD_RELOC_AARCH64_MOVW_G1_S:
5074 case BFD_RELOC_AARCH64_MOVW_G2_S:
5075 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5076 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5077 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5078 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5079 set_syntax_error
5080 (_("the specified relocation type is not allowed for MOVK"));
5081 return FALSE;
5082 default:
5083 break;
5084 }
5085
5086 switch (inst.reloc.type)
5087 {
5088 case BFD_RELOC_AARCH64_MOVW_G0:
5089 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5090 case BFD_RELOC_AARCH64_MOVW_G0_S:
5091 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5092 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5093 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5094 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5095 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
5096 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5097 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5098 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5099 shift = 0;
5100 break;
5101 case BFD_RELOC_AARCH64_MOVW_G1:
5102 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5103 case BFD_RELOC_AARCH64_MOVW_G1_S:
5104 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5105 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5106 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5107 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5108 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
5109 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
5110 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5111 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5112 shift = 16;
5113 break;
5114 case BFD_RELOC_AARCH64_MOVW_G2:
5115 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5116 case BFD_RELOC_AARCH64_MOVW_G2_S:
5117 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
5118 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5119 if (is32)
5120 {
5121 set_fatal_syntax_error
5122 (_("the specified relocation type is not allowed for 32-bit "
5123 "register"));
5124 return FALSE;
5125 }
5126 shift = 32;
5127 break;
5128 case BFD_RELOC_AARCH64_MOVW_G3:
5129 if (is32)
5130 {
5131 set_fatal_syntax_error
5132 (_("the specified relocation type is not allowed for 32-bit "
5133 "register"));
5134 return FALSE;
5135 }
5136 shift = 48;
5137 break;
5138 default:
5139 /* More cases should be added when more MOVW-related relocation types
5140 are supported in GAS. */
5141 gas_assert (aarch64_gas_internal_fixup_p ());
5142 /* The shift amount should have already been set by the parser. */
5143 return TRUE;
5144 }
5145 inst.base.operands[1].shifter.amount = shift;
5146 return TRUE;
5147 }
5148
5149 /* A primitive log calculator. */
5150
5151 static inline unsigned int
5152 get_logsz (unsigned int size)
5153 {
5154 const unsigned char ls[16] =
5155 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
5156 if (size > 16)
5157 {
5158 gas_assert (0);
5159 return -1;
5160 }
5161 gas_assert (ls[size - 1] != (unsigned char)-1);
5162 return ls[size - 1];
5163 }
5164
5165 /* Determine and return the real reloc type code for an instruction
5166 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
5167
5168 static inline bfd_reloc_code_real_type
5169 ldst_lo12_determine_real_reloc_type (void)
5170 {
5171 unsigned logsz;
5172 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
5173 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
5174
5175 const bfd_reloc_code_real_type reloc_ldst_lo12[3][5] = {
5176 {
5177 BFD_RELOC_AARCH64_LDST8_LO12,
5178 BFD_RELOC_AARCH64_LDST16_LO12,
5179 BFD_RELOC_AARCH64_LDST32_LO12,
5180 BFD_RELOC_AARCH64_LDST64_LO12,
5181 BFD_RELOC_AARCH64_LDST128_LO12
5182 },
5183 {
5184 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
5185 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
5186 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
5187 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
5188 BFD_RELOC_AARCH64_NONE
5189 },
5190 {
5191 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
5192 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
5193 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
5194 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
5195 BFD_RELOC_AARCH64_NONE
5196 }
5197 };
5198
5199 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5200 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5201 || (inst.reloc.type
5202 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC));
5203 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
5204
5205 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
5206 opd1_qlf =
5207 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
5208 1, opd0_qlf, 0);
5209 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
5210
5211 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
5212 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5213 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
5214 gas_assert (logsz <= 3);
5215 else
5216 gas_assert (logsz <= 4);
5217
5218 /* In reloc.c, these pseudo relocation types should be defined in similar
5219 order as above reloc_ldst_lo12 array. Because the array index calculation
5220 below relies on this. */
5221 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
5222 }
5223
5224 /* Check whether a register list REGINFO is valid. The registers must be
5225 numbered in increasing order (modulo 32), in increments of one or two.
5226
5227 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
5228 increments of two.
5229
5230 Return FALSE if such a register list is invalid, otherwise return TRUE. */
5231
5232 static bfd_boolean
5233 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
5234 {
5235 uint32_t i, nb_regs, prev_regno, incr;
5236
5237 nb_regs = 1 + (reginfo & 0x3);
5238 reginfo >>= 2;
5239 prev_regno = reginfo & 0x1f;
5240 incr = accept_alternate ? 2 : 1;
5241
5242 for (i = 1; i < nb_regs; ++i)
5243 {
5244 uint32_t curr_regno;
5245 reginfo >>= 5;
5246 curr_regno = reginfo & 0x1f;
5247 if (curr_regno != ((prev_regno + incr) & 0x1f))
5248 return FALSE;
5249 prev_regno = curr_regno;
5250 }
5251
5252 return TRUE;
5253 }
5254
5255 /* Generic instruction operand parser. This does no encoding and no
5256 semantic validation; it merely squirrels values away in the inst
5257 structure. Returns TRUE or FALSE depending on whether the
5258 specified grammar matched. */
5259
5260 static bfd_boolean
5261 parse_operands (char *str, const aarch64_opcode *opcode)
5262 {
5263 int i;
5264 char *backtrack_pos = 0;
5265 const enum aarch64_opnd *operands = opcode->operands;
5266 aarch64_reg_type imm_reg_type;
5267
5268 clear_error ();
5269 skip_whitespace (str);
5270
5271 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
5272 imm_reg_type = REG_TYPE_R_Z_BHSDQ_VZP;
5273 else
5274 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
5275
5276 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
5277 {
5278 int64_t val;
5279 const reg_entry *reg;
5280 int comma_skipped_p = 0;
5281 aarch64_reg_type rtype;
5282 struct vector_type_el vectype;
5283 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
5284 aarch64_opnd_info *info = &inst.base.operands[i];
5285 aarch64_reg_type reg_type;
5286
5287 DEBUG_TRACE ("parse operand %d", i);
5288
5289 /* Assign the operand code. */
5290 info->type = operands[i];
5291
5292 if (optional_operand_p (opcode, i))
5293 {
5294 /* Remember where we are in case we need to backtrack. */
5295 gas_assert (!backtrack_pos);
5296 backtrack_pos = str;
5297 }
5298
5299 /* Expect comma between operands; the backtrack mechanism will take
5300 care of cases of omitted optional operand. */
5301 if (i > 0 && ! skip_past_char (&str, ','))
5302 {
5303 set_syntax_error (_("comma expected between operands"));
5304 goto failure;
5305 }
5306 else
5307 comma_skipped_p = 1;
5308
5309 switch (operands[i])
5310 {
5311 case AARCH64_OPND_Rd:
5312 case AARCH64_OPND_Rn:
5313 case AARCH64_OPND_Rm:
5314 case AARCH64_OPND_Rt:
5315 case AARCH64_OPND_Rt2:
5316 case AARCH64_OPND_Rs:
5317 case AARCH64_OPND_Ra:
5318 case AARCH64_OPND_Rt_SYS:
5319 case AARCH64_OPND_PAIRREG:
5320 case AARCH64_OPND_SVE_Rm:
5321 po_int_reg_or_fail (REG_TYPE_R_Z);
5322 break;
5323
5324 case AARCH64_OPND_Rd_SP:
5325 case AARCH64_OPND_Rn_SP:
5326 case AARCH64_OPND_SVE_Rn_SP:
5327 case AARCH64_OPND_Rm_SP:
5328 po_int_reg_or_fail (REG_TYPE_R_SP);
5329 break;
5330
5331 case AARCH64_OPND_Rm_EXT:
5332 case AARCH64_OPND_Rm_SFT:
5333 po_misc_or_fail (parse_shifter_operand
5334 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
5335 ? SHIFTED_ARITH_IMM
5336 : SHIFTED_LOGIC_IMM)));
5337 if (!info->shifter.operator_present)
5338 {
5339 /* Default to LSL if not present. Libopcodes prefers shifter
5340 kind to be explicit. */
5341 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5342 info->shifter.kind = AARCH64_MOD_LSL;
5343 /* For Rm_EXT, libopcodes will carry out further check on whether
5344 or not stack pointer is used in the instruction (Recall that
5345 "the extend operator is not optional unless at least one of
5346 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
5347 }
5348 break;
5349
5350 case AARCH64_OPND_Fd:
5351 case AARCH64_OPND_Fn:
5352 case AARCH64_OPND_Fm:
5353 case AARCH64_OPND_Fa:
5354 case AARCH64_OPND_Ft:
5355 case AARCH64_OPND_Ft2:
5356 case AARCH64_OPND_Sd:
5357 case AARCH64_OPND_Sn:
5358 case AARCH64_OPND_Sm:
5359 case AARCH64_OPND_SVE_VZn:
5360 case AARCH64_OPND_SVE_Vd:
5361 case AARCH64_OPND_SVE_Vm:
5362 case AARCH64_OPND_SVE_Vn:
5363 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
5364 if (val == PARSE_FAIL)
5365 {
5366 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
5367 goto failure;
5368 }
5369 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
5370
5371 info->reg.regno = val;
5372 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
5373 break;
5374
5375 case AARCH64_OPND_SVE_Pd:
5376 case AARCH64_OPND_SVE_Pg3:
5377 case AARCH64_OPND_SVE_Pg4_5:
5378 case AARCH64_OPND_SVE_Pg4_10:
5379 case AARCH64_OPND_SVE_Pg4_16:
5380 case AARCH64_OPND_SVE_Pm:
5381 case AARCH64_OPND_SVE_Pn:
5382 case AARCH64_OPND_SVE_Pt:
5383 reg_type = REG_TYPE_PN;
5384 goto vector_reg;
5385
5386 case AARCH64_OPND_SVE_Za_5:
5387 case AARCH64_OPND_SVE_Za_16:
5388 case AARCH64_OPND_SVE_Zd:
5389 case AARCH64_OPND_SVE_Zm_5:
5390 case AARCH64_OPND_SVE_Zm_16:
5391 case AARCH64_OPND_SVE_Zn:
5392 case AARCH64_OPND_SVE_Zt:
5393 reg_type = REG_TYPE_ZN;
5394 goto vector_reg;
5395
5396 case AARCH64_OPND_Va:
5397 case AARCH64_OPND_Vd:
5398 case AARCH64_OPND_Vn:
5399 case AARCH64_OPND_Vm:
5400 reg_type = REG_TYPE_VN;
5401 vector_reg:
5402 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5403 if (val == PARSE_FAIL)
5404 {
5405 first_error (_(get_reg_expected_msg (reg_type)));
5406 goto failure;
5407 }
5408 if (vectype.defined & NTA_HASINDEX)
5409 goto failure;
5410
5411 info->reg.regno = val;
5412 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
5413 && vectype.type == NT_invtype)
5414 /* Unqualified Pn and Zn registers are allowed in certain
5415 contexts. Rely on F_STRICT qualifier checking to catch
5416 invalid uses. */
5417 info->qualifier = AARCH64_OPND_QLF_NIL;
5418 else
5419 {
5420 info->qualifier = vectype_to_qualifier (&vectype);
5421 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5422 goto failure;
5423 }
5424 break;
5425
5426 case AARCH64_OPND_VdD1:
5427 case AARCH64_OPND_VnD1:
5428 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5429 if (val == PARSE_FAIL)
5430 {
5431 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5432 goto failure;
5433 }
5434 if (vectype.type != NT_d || vectype.index != 1)
5435 {
5436 set_fatal_syntax_error
5437 (_("the top half of a 128-bit FP/SIMD register is expected"));
5438 goto failure;
5439 }
5440 info->reg.regno = val;
5441 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5442 here; it is correct for the purpose of encoding/decoding since
5443 only the register number is explicitly encoded in the related
5444 instructions, although this appears a bit hacky. */
5445 info->qualifier = AARCH64_OPND_QLF_S_D;
5446 break;
5447
5448 case AARCH64_OPND_SVE_Zm3_INDEX:
5449 case AARCH64_OPND_SVE_Zm3_22_INDEX:
5450 case AARCH64_OPND_SVE_Zm4_INDEX:
5451 case AARCH64_OPND_SVE_Zn_INDEX:
5452 reg_type = REG_TYPE_ZN;
5453 goto vector_reg_index;
5454
5455 case AARCH64_OPND_Ed:
5456 case AARCH64_OPND_En:
5457 case AARCH64_OPND_Em:
5458 case AARCH64_OPND_SM3_IMM2:
5459 reg_type = REG_TYPE_VN;
5460 vector_reg_index:
5461 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5462 if (val == PARSE_FAIL)
5463 {
5464 first_error (_(get_reg_expected_msg (reg_type)));
5465 goto failure;
5466 }
5467 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5468 goto failure;
5469
5470 info->reglane.regno = val;
5471 info->reglane.index = vectype.index;
5472 info->qualifier = vectype_to_qualifier (&vectype);
5473 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5474 goto failure;
5475 break;
5476
5477 case AARCH64_OPND_SVE_ZnxN:
5478 case AARCH64_OPND_SVE_ZtxN:
5479 reg_type = REG_TYPE_ZN;
5480 goto vector_reg_list;
5481
5482 case AARCH64_OPND_LVn:
5483 case AARCH64_OPND_LVt:
5484 case AARCH64_OPND_LVt_AL:
5485 case AARCH64_OPND_LEt:
5486 reg_type = REG_TYPE_VN;
5487 vector_reg_list:
5488 if (reg_type == REG_TYPE_ZN
5489 && get_opcode_dependent_value (opcode) == 1
5490 && *str != '{')
5491 {
5492 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5493 if (val == PARSE_FAIL)
5494 {
5495 first_error (_(get_reg_expected_msg (reg_type)));
5496 goto failure;
5497 }
5498 info->reglist.first_regno = val;
5499 info->reglist.num_regs = 1;
5500 }
5501 else
5502 {
5503 val = parse_vector_reg_list (&str, reg_type, &vectype);
5504 if (val == PARSE_FAIL)
5505 goto failure;
5506 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5507 {
5508 set_fatal_syntax_error (_("invalid register list"));
5509 goto failure;
5510 }
5511 info->reglist.first_regno = (val >> 2) & 0x1f;
5512 info->reglist.num_regs = (val & 0x3) + 1;
5513 }
5514 if (operands[i] == AARCH64_OPND_LEt)
5515 {
5516 if (!(vectype.defined & NTA_HASINDEX))
5517 goto failure;
5518 info->reglist.has_index = 1;
5519 info->reglist.index = vectype.index;
5520 }
5521 else
5522 {
5523 if (vectype.defined & NTA_HASINDEX)
5524 goto failure;
5525 if (!(vectype.defined & NTA_HASTYPE))
5526 {
5527 if (reg_type == REG_TYPE_ZN)
5528 set_fatal_syntax_error (_("missing type suffix"));
5529 goto failure;
5530 }
5531 }
5532 info->qualifier = vectype_to_qualifier (&vectype);
5533 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5534 goto failure;
5535 break;
5536
5537 case AARCH64_OPND_CRn:
5538 case AARCH64_OPND_CRm:
5539 {
5540 char prefix = *(str++);
5541 if (prefix != 'c' && prefix != 'C')
5542 goto failure;
5543
5544 po_imm_nc_or_fail ();
5545 if (val > 15)
5546 {
5547 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
5548 goto failure;
5549 }
5550 info->qualifier = AARCH64_OPND_QLF_CR;
5551 info->imm.value = val;
5552 break;
5553 }
5554
5555 case AARCH64_OPND_SHLL_IMM:
5556 case AARCH64_OPND_IMM_VLSR:
5557 po_imm_or_fail (1, 64);
5558 info->imm.value = val;
5559 break;
5560
5561 case AARCH64_OPND_CCMP_IMM:
5562 case AARCH64_OPND_SIMM5:
5563 case AARCH64_OPND_FBITS:
5564 case AARCH64_OPND_UIMM4:
5565 case AARCH64_OPND_UIMM3_OP1:
5566 case AARCH64_OPND_UIMM3_OP2:
5567 case AARCH64_OPND_IMM_VLSL:
5568 case AARCH64_OPND_IMM:
5569 case AARCH64_OPND_IMM_2:
5570 case AARCH64_OPND_WIDTH:
5571 case AARCH64_OPND_SVE_INV_LIMM:
5572 case AARCH64_OPND_SVE_LIMM:
5573 case AARCH64_OPND_SVE_LIMM_MOV:
5574 case AARCH64_OPND_SVE_SHLIMM_PRED:
5575 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
5576 case AARCH64_OPND_SVE_SHRIMM_PRED:
5577 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
5578 case AARCH64_OPND_SVE_SIMM5:
5579 case AARCH64_OPND_SVE_SIMM5B:
5580 case AARCH64_OPND_SVE_SIMM6:
5581 case AARCH64_OPND_SVE_SIMM8:
5582 case AARCH64_OPND_SVE_UIMM3:
5583 case AARCH64_OPND_SVE_UIMM7:
5584 case AARCH64_OPND_SVE_UIMM8:
5585 case AARCH64_OPND_SVE_UIMM8_53:
5586 case AARCH64_OPND_IMM_ROT1:
5587 case AARCH64_OPND_IMM_ROT2:
5588 case AARCH64_OPND_IMM_ROT3:
5589 case AARCH64_OPND_SVE_IMM_ROT1:
5590 case AARCH64_OPND_SVE_IMM_ROT2:
5591 po_imm_nc_or_fail ();
5592 info->imm.value = val;
5593 break;
5594
5595 case AARCH64_OPND_SVE_AIMM:
5596 case AARCH64_OPND_SVE_ASIMM:
5597 po_imm_nc_or_fail ();
5598 info->imm.value = val;
5599 skip_whitespace (str);
5600 if (skip_past_comma (&str))
5601 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5602 else
5603 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5604 break;
5605
5606 case AARCH64_OPND_SVE_PATTERN:
5607 po_enum_or_fail (aarch64_sve_pattern_array);
5608 info->imm.value = val;
5609 break;
5610
5611 case AARCH64_OPND_SVE_PATTERN_SCALED:
5612 po_enum_or_fail (aarch64_sve_pattern_array);
5613 info->imm.value = val;
5614 if (skip_past_comma (&str)
5615 && !parse_shift (&str, info, SHIFTED_MUL))
5616 goto failure;
5617 if (!info->shifter.operator_present)
5618 {
5619 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5620 info->shifter.kind = AARCH64_MOD_MUL;
5621 info->shifter.amount = 1;
5622 }
5623 break;
5624
5625 case AARCH64_OPND_SVE_PRFOP:
5626 po_enum_or_fail (aarch64_sve_prfop_array);
5627 info->imm.value = val;
5628 break;
5629
5630 case AARCH64_OPND_UIMM7:
5631 po_imm_or_fail (0, 127);
5632 info->imm.value = val;
5633 break;
5634
5635 case AARCH64_OPND_IDX:
5636 case AARCH64_OPND_MASK:
5637 case AARCH64_OPND_BIT_NUM:
5638 case AARCH64_OPND_IMMR:
5639 case AARCH64_OPND_IMMS:
5640 po_imm_or_fail (0, 63);
5641 info->imm.value = val;
5642 break;
5643
5644 case AARCH64_OPND_IMM0:
5645 po_imm_nc_or_fail ();
5646 if (val != 0)
5647 {
5648 set_fatal_syntax_error (_("immediate zero expected"));
5649 goto failure;
5650 }
5651 info->imm.value = 0;
5652 break;
5653
5654 case AARCH64_OPND_FPIMM0:
5655 {
5656 int qfloat;
5657 bfd_boolean res1 = FALSE, res2 = FALSE;
5658 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
5659 it is probably not worth the effort to support it. */
5660 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE,
5661 imm_reg_type))
5662 && (error_p ()
5663 || !(res2 = parse_constant_immediate (&str, &val,
5664 imm_reg_type))))
5665 goto failure;
5666 if ((res1 && qfloat == 0) || (res2 && val == 0))
5667 {
5668 info->imm.value = 0;
5669 info->imm.is_fp = 1;
5670 break;
5671 }
5672 set_fatal_syntax_error (_("immediate zero expected"));
5673 goto failure;
5674 }
5675
5676 case AARCH64_OPND_IMM_MOV:
5677 {
5678 char *saved = str;
5679 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
5680 reg_name_p (str, REG_TYPE_VN))
5681 goto failure;
5682 str = saved;
5683 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5684 GE_OPT_PREFIX, 1));
5685 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
5686 later. fix_mov_imm_insn will try to determine a machine
5687 instruction (MOVZ, MOVN or ORR) for it and will issue an error
5688 message if the immediate cannot be moved by a single
5689 instruction. */
5690 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5691 inst.base.operands[i].skip = 1;
5692 }
5693 break;
5694
5695 case AARCH64_OPND_SIMD_IMM:
5696 case AARCH64_OPND_SIMD_IMM_SFT:
5697 if (! parse_big_immediate (&str, &val, imm_reg_type))
5698 goto failure;
5699 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5700 /* addr_off_p */ 0,
5701 /* need_libopcodes_p */ 1,
5702 /* skip_p */ 1);
5703 /* Parse shift.
5704 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
5705 shift, we don't check it here; we leave the checking to
5706 the libopcodes (operand_general_constraint_met_p). By
5707 doing this, we achieve better diagnostics. */
5708 if (skip_past_comma (&str)
5709 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
5710 goto failure;
5711 if (!info->shifter.operator_present
5712 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
5713 {
5714 /* Default to LSL if not present. Libopcodes prefers shifter
5715 kind to be explicit. */
5716 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5717 info->shifter.kind = AARCH64_MOD_LSL;
5718 }
5719 break;
5720
5721 case AARCH64_OPND_FPIMM:
5722 case AARCH64_OPND_SIMD_FPIMM:
5723 case AARCH64_OPND_SVE_FPIMM8:
5724 {
5725 int qfloat;
5726 bfd_boolean dp_p;
5727
5728 dp_p = double_precision_operand_p (&inst.base.operands[0]);
5729 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
5730 || !aarch64_imm_float_p (qfloat))
5731 {
5732 if (!error_p ())
5733 set_fatal_syntax_error (_("invalid floating-point"
5734 " constant"));
5735 goto failure;
5736 }
5737 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
5738 inst.base.operands[i].imm.is_fp = 1;
5739 }
5740 break;
5741
5742 case AARCH64_OPND_SVE_I1_HALF_ONE:
5743 case AARCH64_OPND_SVE_I1_HALF_TWO:
5744 case AARCH64_OPND_SVE_I1_ZERO_ONE:
5745 {
5746 int qfloat;
5747 bfd_boolean dp_p;
5748
5749 dp_p = double_precision_operand_p (&inst.base.operands[0]);
5750 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
5751 {
5752 if (!error_p ())
5753 set_fatal_syntax_error (_("invalid floating-point"
5754 " constant"));
5755 goto failure;
5756 }
5757 inst.base.operands[i].imm.value = qfloat;
5758 inst.base.operands[i].imm.is_fp = 1;
5759 }
5760 break;
5761
5762 case AARCH64_OPND_LIMM:
5763 po_misc_or_fail (parse_shifter_operand (&str, info,
5764 SHIFTED_LOGIC_IMM));
5765 if (info->shifter.operator_present)
5766 {
5767 set_fatal_syntax_error
5768 (_("shift not allowed for bitmask immediate"));
5769 goto failure;
5770 }
5771 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5772 /* addr_off_p */ 0,
5773 /* need_libopcodes_p */ 1,
5774 /* skip_p */ 1);
5775 break;
5776
5777 case AARCH64_OPND_AIMM:
5778 if (opcode->op == OP_ADD)
5779 /* ADD may have relocation types. */
5780 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
5781 SHIFTED_ARITH_IMM));
5782 else
5783 po_misc_or_fail (parse_shifter_operand (&str, info,
5784 SHIFTED_ARITH_IMM));
5785 switch (inst.reloc.type)
5786 {
5787 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5788 info->shifter.amount = 12;
5789 break;
5790 case BFD_RELOC_UNUSED:
5791 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5792 if (info->shifter.kind != AARCH64_MOD_NONE)
5793 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
5794 inst.reloc.pc_rel = 0;
5795 break;
5796 default:
5797 break;
5798 }
5799 info->imm.value = 0;
5800 if (!info->shifter.operator_present)
5801 {
5802 /* Default to LSL if not present. Libopcodes prefers shifter
5803 kind to be explicit. */
5804 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5805 info->shifter.kind = AARCH64_MOD_LSL;
5806 }
5807 break;
5808
5809 case AARCH64_OPND_HALF:
5810 {
5811 /* #<imm16> or relocation. */
5812 int internal_fixup_p;
5813 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
5814 if (internal_fixup_p)
5815 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5816 skip_whitespace (str);
5817 if (skip_past_comma (&str))
5818 {
5819 /* {, LSL #<shift>} */
5820 if (! aarch64_gas_internal_fixup_p ())
5821 {
5822 set_fatal_syntax_error (_("can't mix relocation modifier "
5823 "with explicit shift"));
5824 goto failure;
5825 }
5826 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5827 }
5828 else
5829 inst.base.operands[i].shifter.amount = 0;
5830 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5831 inst.base.operands[i].imm.value = 0;
5832 if (! process_movw_reloc_info ())
5833 goto failure;
5834 }
5835 break;
5836
5837 case AARCH64_OPND_EXCEPTION:
5838 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
5839 imm_reg_type));
5840 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5841 /* addr_off_p */ 0,
5842 /* need_libopcodes_p */ 0,
5843 /* skip_p */ 1);
5844 break;
5845
5846 case AARCH64_OPND_NZCV:
5847 {
5848 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
5849 if (nzcv != NULL)
5850 {
5851 str += 4;
5852 info->imm.value = nzcv->value;
5853 break;
5854 }
5855 po_imm_or_fail (0, 15);
5856 info->imm.value = val;
5857 }
5858 break;
5859
5860 case AARCH64_OPND_COND:
5861 case AARCH64_OPND_COND1:
5862 {
5863 char *start = str;
5864 do
5865 str++;
5866 while (ISALPHA (*str));
5867 info->cond = hash_find_n (aarch64_cond_hsh, start, str - start);
5868 if (info->cond == NULL)
5869 {
5870 set_syntax_error (_("invalid condition"));
5871 goto failure;
5872 }
5873 else if (operands[i] == AARCH64_OPND_COND1
5874 && (info->cond->value & 0xe) == 0xe)
5875 {
5876 /* Do not allow AL or NV. */
5877 set_default_error ();
5878 goto failure;
5879 }
5880 }
5881 break;
5882
5883 case AARCH64_OPND_ADDR_ADRP:
5884 po_misc_or_fail (parse_adrp (&str));
5885 /* Clear the value as operand needs to be relocated. */
5886 info->imm.value = 0;
5887 break;
5888
5889 case AARCH64_OPND_ADDR_PCREL14:
5890 case AARCH64_OPND_ADDR_PCREL19:
5891 case AARCH64_OPND_ADDR_PCREL21:
5892 case AARCH64_OPND_ADDR_PCREL26:
5893 po_misc_or_fail (parse_address (&str, info));
5894 if (!info->addr.pcrel)
5895 {
5896 set_syntax_error (_("invalid pc-relative address"));
5897 goto failure;
5898 }
5899 if (inst.gen_lit_pool
5900 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
5901 {
5902 /* Only permit "=value" in the literal load instructions.
5903 The literal will be generated by programmer_friendly_fixup. */
5904 set_syntax_error (_("invalid use of \"=immediate\""));
5905 goto failure;
5906 }
5907 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
5908 {
5909 set_syntax_error (_("unrecognized relocation suffix"));
5910 goto failure;
5911 }
5912 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
5913 {
5914 info->imm.value = inst.reloc.exp.X_add_number;
5915 inst.reloc.type = BFD_RELOC_UNUSED;
5916 }
5917 else
5918 {
5919 info->imm.value = 0;
5920 if (inst.reloc.type == BFD_RELOC_UNUSED)
5921 switch (opcode->iclass)
5922 {
5923 case compbranch:
5924 case condbranch:
5925 /* e.g. CBZ or B.COND */
5926 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5927 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
5928 break;
5929 case testbranch:
5930 /* e.g. TBZ */
5931 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
5932 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
5933 break;
5934 case branch_imm:
5935 /* e.g. B or BL */
5936 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
5937 inst.reloc.type =
5938 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
5939 : BFD_RELOC_AARCH64_JUMP26;
5940 break;
5941 case loadlit:
5942 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5943 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
5944 break;
5945 case pcreladdr:
5946 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
5947 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
5948 break;
5949 default:
5950 gas_assert (0);
5951 abort ();
5952 }
5953 inst.reloc.pc_rel = 1;
5954 }
5955 break;
5956
5957 case AARCH64_OPND_ADDR_SIMPLE:
5958 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
5959 {
5960 /* [<Xn|SP>{, #<simm>}] */
5961 char *start = str;
5962 /* First use the normal address-parsing routines, to get
5963 the usual syntax errors. */
5964 po_misc_or_fail (parse_address (&str, info));
5965 if (info->addr.pcrel || info->addr.offset.is_reg
5966 || !info->addr.preind || info->addr.postind
5967 || info->addr.writeback)
5968 {
5969 set_syntax_error (_("invalid addressing mode"));
5970 goto failure;
5971 }
5972
5973 /* Then retry, matching the specific syntax of these addresses. */
5974 str = start;
5975 po_char_or_fail ('[');
5976 po_reg_or_fail (REG_TYPE_R64_SP);
5977 /* Accept optional ", #0". */
5978 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
5979 && skip_past_char (&str, ','))
5980 {
5981 skip_past_char (&str, '#');
5982 if (! skip_past_char (&str, '0'))
5983 {
5984 set_fatal_syntax_error
5985 (_("the optional immediate offset can only be 0"));
5986 goto failure;
5987 }
5988 }
5989 po_char_or_fail (']');
5990 break;
5991 }
5992
5993 case AARCH64_OPND_ADDR_REGOFF:
5994 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
5995 po_misc_or_fail (parse_address (&str, info));
5996 regoff_addr:
5997 if (info->addr.pcrel || !info->addr.offset.is_reg
5998 || !info->addr.preind || info->addr.postind
5999 || info->addr.writeback)
6000 {
6001 set_syntax_error (_("invalid addressing mode"));
6002 goto failure;
6003 }
6004 if (!info->shifter.operator_present)
6005 {
6006 /* Default to LSL if not present. Libopcodes prefers shifter
6007 kind to be explicit. */
6008 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6009 info->shifter.kind = AARCH64_MOD_LSL;
6010 }
6011 /* Qualifier to be deduced by libopcodes. */
6012 break;
6013
6014 case AARCH64_OPND_ADDR_SIMM7:
6015 po_misc_or_fail (parse_address (&str, info));
6016 if (info->addr.pcrel || info->addr.offset.is_reg
6017 || (!info->addr.preind && !info->addr.postind))
6018 {
6019 set_syntax_error (_("invalid addressing mode"));
6020 goto failure;
6021 }
6022 if (inst.reloc.type != BFD_RELOC_UNUSED)
6023 {
6024 set_syntax_error (_("relocation not allowed"));
6025 goto failure;
6026 }
6027 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6028 /* addr_off_p */ 1,
6029 /* need_libopcodes_p */ 1,
6030 /* skip_p */ 0);
6031 break;
6032
6033 case AARCH64_OPND_ADDR_SIMM9:
6034 case AARCH64_OPND_ADDR_SIMM9_2:
6035 po_misc_or_fail (parse_address (&str, info));
6036 if (info->addr.pcrel || info->addr.offset.is_reg
6037 || (!info->addr.preind && !info->addr.postind)
6038 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
6039 && info->addr.writeback))
6040 {
6041 set_syntax_error (_("invalid addressing mode"));
6042 goto failure;
6043 }
6044 if (inst.reloc.type != BFD_RELOC_UNUSED)
6045 {
6046 set_syntax_error (_("relocation not allowed"));
6047 goto failure;
6048 }
6049 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6050 /* addr_off_p */ 1,
6051 /* need_libopcodes_p */ 1,
6052 /* skip_p */ 0);
6053 break;
6054
6055 case AARCH64_OPND_ADDR_SIMM10:
6056 case AARCH64_OPND_ADDR_OFFSET:
6057 po_misc_or_fail (parse_address (&str, info));
6058 if (info->addr.pcrel || info->addr.offset.is_reg
6059 || !info->addr.preind || info->addr.postind)
6060 {
6061 set_syntax_error (_("invalid addressing mode"));
6062 goto failure;
6063 }
6064 if (inst.reloc.type != BFD_RELOC_UNUSED)
6065 {
6066 set_syntax_error (_("relocation not allowed"));
6067 goto failure;
6068 }
6069 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6070 /* addr_off_p */ 1,
6071 /* need_libopcodes_p */ 1,
6072 /* skip_p */ 0);
6073 break;
6074
6075 case AARCH64_OPND_ADDR_UIMM12:
6076 po_misc_or_fail (parse_address (&str, info));
6077 if (info->addr.pcrel || info->addr.offset.is_reg
6078 || !info->addr.preind || info->addr.writeback)
6079 {
6080 set_syntax_error (_("invalid addressing mode"));
6081 goto failure;
6082 }
6083 if (inst.reloc.type == BFD_RELOC_UNUSED)
6084 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6085 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6086 || (inst.reloc.type
6087 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
6088 || (inst.reloc.type
6089 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC))
6090 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
6091 /* Leave qualifier to be determined by libopcodes. */
6092 break;
6093
6094 case AARCH64_OPND_SIMD_ADDR_POST:
6095 /* [<Xn|SP>], <Xm|#<amount>> */
6096 po_misc_or_fail (parse_address (&str, info));
6097 if (!info->addr.postind || !info->addr.writeback)
6098 {
6099 set_syntax_error (_("invalid addressing mode"));
6100 goto failure;
6101 }
6102 if (!info->addr.offset.is_reg)
6103 {
6104 if (inst.reloc.exp.X_op == O_constant)
6105 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6106 else
6107 {
6108 set_fatal_syntax_error
6109 (_("writeback value must be an immediate constant"));
6110 goto failure;
6111 }
6112 }
6113 /* No qualifier. */
6114 break;
6115
6116 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
6117 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
6118 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
6119 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
6120 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
6121 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
6122 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
6123 case AARCH64_OPND_SVE_ADDR_RI_U6:
6124 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
6125 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
6126 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
6127 /* [X<n>{, #imm, MUL VL}]
6128 [X<n>{, #imm}]
6129 but recognizing SVE registers. */
6130 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6131 &offset_qualifier));
6132 if (base_qualifier != AARCH64_OPND_QLF_X)
6133 {
6134 set_syntax_error (_("invalid addressing mode"));
6135 goto failure;
6136 }
6137 sve_regimm:
6138 if (info->addr.pcrel || info->addr.offset.is_reg
6139 || !info->addr.preind || info->addr.writeback)
6140 {
6141 set_syntax_error (_("invalid addressing mode"));
6142 goto failure;
6143 }
6144 if (inst.reloc.type != BFD_RELOC_UNUSED
6145 || inst.reloc.exp.X_op != O_constant)
6146 {
6147 /* Make sure this has priority over
6148 "invalid addressing mode". */
6149 set_fatal_syntax_error (_("constant offset required"));
6150 goto failure;
6151 }
6152 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6153 break;
6154
6155 case AARCH64_OPND_SVE_ADDR_RR:
6156 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
6157 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
6158 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
6159 case AARCH64_OPND_SVE_ADDR_RX:
6160 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
6161 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
6162 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
6163 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
6164 but recognizing SVE registers. */
6165 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6166 &offset_qualifier));
6167 if (base_qualifier != AARCH64_OPND_QLF_X
6168 || offset_qualifier != AARCH64_OPND_QLF_X)
6169 {
6170 set_syntax_error (_("invalid addressing mode"));
6171 goto failure;
6172 }
6173 goto regoff_addr;
6174
6175 case AARCH64_OPND_SVE_ADDR_RZ:
6176 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
6177 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
6178 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
6179 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
6180 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
6181 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
6182 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
6183 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
6184 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
6185 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
6186 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
6187 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
6188 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
6189 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6190 &offset_qualifier));
6191 if (base_qualifier != AARCH64_OPND_QLF_X
6192 || (offset_qualifier != AARCH64_OPND_QLF_S_S
6193 && offset_qualifier != AARCH64_OPND_QLF_S_D))
6194 {
6195 set_syntax_error (_("invalid addressing mode"));
6196 goto failure;
6197 }
6198 info->qualifier = offset_qualifier;
6199 goto regoff_addr;
6200
6201 case AARCH64_OPND_SVE_ADDR_ZI_U5:
6202 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
6203 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
6204 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
6205 /* [Z<n>.<T>{, #imm}] */
6206 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6207 &offset_qualifier));
6208 if (base_qualifier != AARCH64_OPND_QLF_S_S
6209 && base_qualifier != AARCH64_OPND_QLF_S_D)
6210 {
6211 set_syntax_error (_("invalid addressing mode"));
6212 goto failure;
6213 }
6214 info->qualifier = base_qualifier;
6215 goto sve_regimm;
6216
6217 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
6218 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
6219 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
6220 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
6221 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
6222
6223 We don't reject:
6224
6225 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
6226
6227 here since we get better error messages by leaving it to
6228 the qualifier checking routines. */
6229 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6230 &offset_qualifier));
6231 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6232 && base_qualifier != AARCH64_OPND_QLF_S_D)
6233 || offset_qualifier != base_qualifier)
6234 {
6235 set_syntax_error (_("invalid addressing mode"));
6236 goto failure;
6237 }
6238 info->qualifier = base_qualifier;
6239 goto regoff_addr;
6240
6241 case AARCH64_OPND_SYSREG:
6242 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0))
6243 == PARSE_FAIL)
6244 {
6245 set_syntax_error (_("unknown or missing system register name"));
6246 goto failure;
6247 }
6248 inst.base.operands[i].sysreg = val;
6249 break;
6250
6251 case AARCH64_OPND_PSTATEFIELD:
6252 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1))
6253 == PARSE_FAIL)
6254 {
6255 set_syntax_error (_("unknown or missing PSTATE field name"));
6256 goto failure;
6257 }
6258 inst.base.operands[i].pstatefield = val;
6259 break;
6260
6261 case AARCH64_OPND_SYSREG_IC:
6262 inst.base.operands[i].sysins_op =
6263 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
6264 goto sys_reg_ins;
6265 case AARCH64_OPND_SYSREG_DC:
6266 inst.base.operands[i].sysins_op =
6267 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
6268 goto sys_reg_ins;
6269 case AARCH64_OPND_SYSREG_AT:
6270 inst.base.operands[i].sysins_op =
6271 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
6272 goto sys_reg_ins;
6273 case AARCH64_OPND_SYSREG_TLBI:
6274 inst.base.operands[i].sysins_op =
6275 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
6276 sys_reg_ins:
6277 if (inst.base.operands[i].sysins_op == NULL)
6278 {
6279 set_fatal_syntax_error ( _("unknown or missing operation name"));
6280 goto failure;
6281 }
6282 break;
6283
6284 case AARCH64_OPND_BARRIER:
6285 case AARCH64_OPND_BARRIER_ISB:
6286 val = parse_barrier (&str);
6287 if (val != PARSE_FAIL
6288 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
6289 {
6290 /* ISB only accepts options name 'sy'. */
6291 set_syntax_error
6292 (_("the specified option is not accepted in ISB"));
6293 /* Turn off backtrack as this optional operand is present. */
6294 backtrack_pos = 0;
6295 goto failure;
6296 }
6297 /* This is an extension to accept a 0..15 immediate. */
6298 if (val == PARSE_FAIL)
6299 po_imm_or_fail (0, 15);
6300 info->barrier = aarch64_barrier_options + val;
6301 break;
6302
6303 case AARCH64_OPND_PRFOP:
6304 val = parse_pldop (&str);
6305 /* This is an extension to accept a 0..31 immediate. */
6306 if (val == PARSE_FAIL)
6307 po_imm_or_fail (0, 31);
6308 inst.base.operands[i].prfop = aarch64_prfops + val;
6309 break;
6310
6311 case AARCH64_OPND_BARRIER_PSB:
6312 val = parse_barrier_psb (&str, &(info->hint_option));
6313 if (val == PARSE_FAIL)
6314 goto failure;
6315 break;
6316
6317 default:
6318 as_fatal (_("unhandled operand code %d"), operands[i]);
6319 }
6320
6321 /* If we get here, this operand was successfully parsed. */
6322 inst.base.operands[i].present = 1;
6323 continue;
6324
6325 failure:
6326 /* The parse routine should already have set the error, but in case
6327 not, set a default one here. */
6328 if (! error_p ())
6329 set_default_error ();
6330
6331 if (! backtrack_pos)
6332 goto parse_operands_return;
6333
6334 {
6335 /* We reach here because this operand is marked as optional, and
6336 either no operand was supplied or the operand was supplied but it
6337 was syntactically incorrect. In the latter case we report an
6338 error. In the former case we perform a few more checks before
6339 dropping through to the code to insert the default operand. */
6340
6341 char *tmp = backtrack_pos;
6342 char endchar = END_OF_INSN;
6343
6344 if (i != (aarch64_num_of_operands (opcode) - 1))
6345 endchar = ',';
6346 skip_past_char (&tmp, ',');
6347
6348 if (*tmp != endchar)
6349 /* The user has supplied an operand in the wrong format. */
6350 goto parse_operands_return;
6351
6352 /* Make sure there is not a comma before the optional operand.
6353 For example the fifth operand of 'sys' is optional:
6354
6355 sys #0,c0,c0,#0, <--- wrong
6356 sys #0,c0,c0,#0 <--- correct. */
6357 if (comma_skipped_p && i && endchar == END_OF_INSN)
6358 {
6359 set_fatal_syntax_error
6360 (_("unexpected comma before the omitted optional operand"));
6361 goto parse_operands_return;
6362 }
6363 }
6364
6365 /* Reaching here means we are dealing with an optional operand that is
6366 omitted from the assembly line. */
6367 gas_assert (optional_operand_p (opcode, i));
6368 info->present = 0;
6369 process_omitted_operand (operands[i], opcode, i, info);
6370
6371 /* Try again, skipping the optional operand at backtrack_pos. */
6372 str = backtrack_pos;
6373 backtrack_pos = 0;
6374
6375 /* Clear any error record after the omitted optional operand has been
6376 successfully handled. */
6377 clear_error ();
6378 }
6379
6380 /* Check if we have parsed all the operands. */
6381 if (*str != '\0' && ! error_p ())
6382 {
6383 /* Set I to the index of the last present operand; this is
6384 for the purpose of diagnostics. */
6385 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
6386 ;
6387 set_fatal_syntax_error
6388 (_("unexpected characters following instruction"));
6389 }
6390
6391 parse_operands_return:
6392
6393 if (error_p ())
6394 {
6395 DEBUG_TRACE ("parsing FAIL: %s - %s",
6396 operand_mismatch_kind_names[get_error_kind ()],
6397 get_error_message ());
6398 /* Record the operand error properly; this is useful when there
6399 are multiple instruction templates for a mnemonic name, so that
6400 later on, we can select the error that most closely describes
6401 the problem. */
6402 record_operand_error (opcode, i, get_error_kind (),
6403 get_error_message ());
6404 return FALSE;
6405 }
6406 else
6407 {
6408 DEBUG_TRACE ("parsing SUCCESS");
6409 return TRUE;
6410 }
6411 }
6412
6413 /* It does some fix-up to provide some programmer friendly feature while
6414 keeping the libopcodes happy, i.e. libopcodes only accepts
6415 the preferred architectural syntax.
6416 Return FALSE if there is any failure; otherwise return TRUE. */
6417
6418 static bfd_boolean
6419 programmer_friendly_fixup (aarch64_instruction *instr)
6420 {
6421 aarch64_inst *base = &instr->base;
6422 const aarch64_opcode *opcode = base->opcode;
6423 enum aarch64_op op = opcode->op;
6424 aarch64_opnd_info *operands = base->operands;
6425
6426 DEBUG_TRACE ("enter");
6427
6428 switch (opcode->iclass)
6429 {
6430 case testbranch:
6431 /* TBNZ Xn|Wn, #uimm6, label
6432 Test and Branch Not Zero: conditionally jumps to label if bit number
6433 uimm6 in register Xn is not zero. The bit number implies the width of
6434 the register, which may be written and should be disassembled as Wn if
6435 uimm is less than 32. */
6436 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
6437 {
6438 if (operands[1].imm.value >= 32)
6439 {
6440 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
6441 0, 31);
6442 return FALSE;
6443 }
6444 operands[0].qualifier = AARCH64_OPND_QLF_X;
6445 }
6446 break;
6447 case loadlit:
6448 /* LDR Wt, label | =value
6449 As a convenience assemblers will typically permit the notation
6450 "=value" in conjunction with the pc-relative literal load instructions
6451 to automatically place an immediate value or symbolic address in a
6452 nearby literal pool and generate a hidden label which references it.
6453 ISREG has been set to 0 in the case of =value. */
6454 if (instr->gen_lit_pool
6455 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
6456 {
6457 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
6458 if (op == OP_LDRSW_LIT)
6459 size = 4;
6460 if (instr->reloc.exp.X_op != O_constant
6461 && instr->reloc.exp.X_op != O_big
6462 && instr->reloc.exp.X_op != O_symbol)
6463 {
6464 record_operand_error (opcode, 1,
6465 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
6466 _("constant expression expected"));
6467 return FALSE;
6468 }
6469 if (! add_to_lit_pool (&instr->reloc.exp, size))
6470 {
6471 record_operand_error (opcode, 1,
6472 AARCH64_OPDE_OTHER_ERROR,
6473 _("literal pool insertion failed"));
6474 return FALSE;
6475 }
6476 }
6477 break;
6478 case log_shift:
6479 case bitfield:
6480 /* UXT[BHW] Wd, Wn
6481 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
6482 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
6483 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
6484 A programmer-friendly assembler should accept a destination Xd in
6485 place of Wd, however that is not the preferred form for disassembly.
6486 */
6487 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
6488 && operands[1].qualifier == AARCH64_OPND_QLF_W
6489 && operands[0].qualifier == AARCH64_OPND_QLF_X)
6490 operands[0].qualifier = AARCH64_OPND_QLF_W;
6491 break;
6492
6493 case addsub_ext:
6494 {
6495 /* In the 64-bit form, the final register operand is written as Wm
6496 for all but the (possibly omitted) UXTX/LSL and SXTX
6497 operators.
6498 As a programmer-friendly assembler, we accept e.g.
6499 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
6500 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
6501 int idx = aarch64_operand_index (opcode->operands,
6502 AARCH64_OPND_Rm_EXT);
6503 gas_assert (idx == 1 || idx == 2);
6504 if (operands[0].qualifier == AARCH64_OPND_QLF_X
6505 && operands[idx].qualifier == AARCH64_OPND_QLF_X
6506 && operands[idx].shifter.kind != AARCH64_MOD_LSL
6507 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
6508 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
6509 operands[idx].qualifier = AARCH64_OPND_QLF_W;
6510 }
6511 break;
6512
6513 default:
6514 break;
6515 }
6516
6517 DEBUG_TRACE ("exit with SUCCESS");
6518 return TRUE;
6519 }
6520
6521 /* Check for loads and stores that will cause unpredictable behavior. */
6522
6523 static void
6524 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
6525 {
6526 aarch64_inst *base = &instr->base;
6527 const aarch64_opcode *opcode = base->opcode;
6528 const aarch64_opnd_info *opnds = base->operands;
6529 switch (opcode->iclass)
6530 {
6531 case ldst_pos:
6532 case ldst_imm9:
6533 case ldst_imm10:
6534 case ldst_unscaled:
6535 case ldst_unpriv:
6536 /* Loading/storing the base register is unpredictable if writeback. */
6537 if ((aarch64_get_operand_class (opnds[0].type)
6538 == AARCH64_OPND_CLASS_INT_REG)
6539 && opnds[0].reg.regno == opnds[1].addr.base_regno
6540 && opnds[1].addr.base_regno != REG_SP
6541 && opnds[1].addr.writeback)
6542 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6543 break;
6544 case ldstpair_off:
6545 case ldstnapair_offs:
6546 case ldstpair_indexed:
6547 /* Loading/storing the base register is unpredictable if writeback. */
6548 if ((aarch64_get_operand_class (opnds[0].type)
6549 == AARCH64_OPND_CLASS_INT_REG)
6550 && (opnds[0].reg.regno == opnds[2].addr.base_regno
6551 || opnds[1].reg.regno == opnds[2].addr.base_regno)
6552 && opnds[2].addr.base_regno != REG_SP
6553 && opnds[2].addr.writeback)
6554 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6555 /* Load operations must load different registers. */
6556 if ((opcode->opcode & (1 << 22))
6557 && opnds[0].reg.regno == opnds[1].reg.regno)
6558 as_warn (_("unpredictable load of register pair -- `%s'"), str);
6559 break;
6560 default:
6561 break;
6562 }
6563 }
6564
6565 /* A wrapper function to interface with libopcodes on encoding and
6566 record the error message if there is any.
6567
6568 Return TRUE on success; otherwise return FALSE. */
6569
6570 static bfd_boolean
6571 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
6572 aarch64_insn *code)
6573 {
6574 aarch64_operand_error error_info;
6575 error_info.kind = AARCH64_OPDE_NIL;
6576 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info))
6577 return TRUE;
6578 else
6579 {
6580 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
6581 record_operand_error_info (opcode, &error_info);
6582 return FALSE;
6583 }
6584 }
6585
6586 #ifdef DEBUG_AARCH64
6587 static inline void
6588 dump_opcode_operands (const aarch64_opcode *opcode)
6589 {
6590 int i = 0;
6591 while (opcode->operands[i] != AARCH64_OPND_NIL)
6592 {
6593 aarch64_verbose ("\t\t opnd%d: %s", i,
6594 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
6595 ? aarch64_get_operand_name (opcode->operands[i])
6596 : aarch64_get_operand_desc (opcode->operands[i]));
6597 ++i;
6598 }
6599 }
6600 #endif /* DEBUG_AARCH64 */
6601
6602 /* This is the guts of the machine-dependent assembler. STR points to a
6603 machine dependent instruction. This function is supposed to emit
6604 the frags/bytes it assembles to. */
6605
6606 void
6607 md_assemble (char *str)
6608 {
6609 char *p = str;
6610 templates *template;
6611 aarch64_opcode *opcode;
6612 aarch64_inst *inst_base;
6613 unsigned saved_cond;
6614
6615 /* Align the previous label if needed. */
6616 if (last_label_seen != NULL)
6617 {
6618 symbol_set_frag (last_label_seen, frag_now);
6619 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
6620 S_SET_SEGMENT (last_label_seen, now_seg);
6621 }
6622
6623 inst.reloc.type = BFD_RELOC_UNUSED;
6624
6625 DEBUG_TRACE ("\n\n");
6626 DEBUG_TRACE ("==============================");
6627 DEBUG_TRACE ("Enter md_assemble with %s", str);
6628
6629 template = opcode_lookup (&p);
6630 if (!template)
6631 {
6632 /* It wasn't an instruction, but it might be a register alias of
6633 the form alias .req reg directive. */
6634 if (!create_register_alias (str, p))
6635 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
6636 str);
6637 return;
6638 }
6639
6640 skip_whitespace (p);
6641 if (*p == ',')
6642 {
6643 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
6644 get_mnemonic_name (str), str);
6645 return;
6646 }
6647
6648 init_operand_error_report ();
6649
6650 /* Sections are assumed to start aligned. In executable section, there is no
6651 MAP_DATA symbol pending. So we only align the address during
6652 MAP_DATA --> MAP_INSN transition.
6653 For other sections, this is not guaranteed. */
6654 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
6655 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
6656 frag_align_code (2, 0);
6657
6658 saved_cond = inst.cond;
6659 reset_aarch64_instruction (&inst);
6660 inst.cond = saved_cond;
6661
6662 /* Iterate through all opcode entries with the same mnemonic name. */
6663 do
6664 {
6665 opcode = template->opcode;
6666
6667 DEBUG_TRACE ("opcode %s found", opcode->name);
6668 #ifdef DEBUG_AARCH64
6669 if (debug_dump)
6670 dump_opcode_operands (opcode);
6671 #endif /* DEBUG_AARCH64 */
6672
6673 mapping_state (MAP_INSN);
6674
6675 inst_base = &inst.base;
6676 inst_base->opcode = opcode;
6677
6678 /* Truly conditionally executed instructions, e.g. b.cond. */
6679 if (opcode->flags & F_COND)
6680 {
6681 gas_assert (inst.cond != COND_ALWAYS);
6682 inst_base->cond = get_cond_from_value (inst.cond);
6683 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
6684 }
6685 else if (inst.cond != COND_ALWAYS)
6686 {
6687 /* It shouldn't arrive here, where the assembly looks like a
6688 conditional instruction but the found opcode is unconditional. */
6689 gas_assert (0);
6690 continue;
6691 }
6692
6693 if (parse_operands (p, opcode)
6694 && programmer_friendly_fixup (&inst)
6695 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
6696 {
6697 /* Check that this instruction is supported for this CPU. */
6698 if (!opcode->avariant
6699 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
6700 {
6701 as_bad (_("selected processor does not support `%s'"), str);
6702 return;
6703 }
6704
6705 warn_unpredictable_ldst (&inst, str);
6706
6707 if (inst.reloc.type == BFD_RELOC_UNUSED
6708 || !inst.reloc.need_libopcodes_p)
6709 output_inst (NULL);
6710 else
6711 {
6712 /* If there is relocation generated for the instruction,
6713 store the instruction information for the future fix-up. */
6714 struct aarch64_inst *copy;
6715 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
6716 copy = XNEW (struct aarch64_inst);
6717 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
6718 output_inst (copy);
6719 }
6720 return;
6721 }
6722
6723 template = template->next;
6724 if (template != NULL)
6725 {
6726 reset_aarch64_instruction (&inst);
6727 inst.cond = saved_cond;
6728 }
6729 }
6730 while (template != NULL);
6731
6732 /* Issue the error messages if any. */
6733 output_operand_error_report (str);
6734 }
6735
6736 /* Various frobbings of labels and their addresses. */
6737
6738 void
6739 aarch64_start_line_hook (void)
6740 {
6741 last_label_seen = NULL;
6742 }
6743
6744 void
6745 aarch64_frob_label (symbolS * sym)
6746 {
6747 last_label_seen = sym;
6748
6749 dwarf2_emit_label (sym);
6750 }
6751
6752 int
6753 aarch64_data_in_code (void)
6754 {
6755 if (!strncmp (input_line_pointer + 1, "data:", 5))
6756 {
6757 *input_line_pointer = '/';
6758 input_line_pointer += 5;
6759 *input_line_pointer = 0;
6760 return 1;
6761 }
6762
6763 return 0;
6764 }
6765
6766 char *
6767 aarch64_canonicalize_symbol_name (char *name)
6768 {
6769 int len;
6770
6771 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
6772 *(name + len - 5) = 0;
6773
6774 return name;
6775 }
6776 \f
6777 /* Table of all register names defined by default. The user can
6778 define additional names with .req. Note that all register names
6779 should appear in both upper and lowercase variants. Some registers
6780 also have mixed-case names. */
6781
6782 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
6783 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, FALSE}
6784 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
6785 #define REGSET16(p,t) \
6786 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
6787 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
6788 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
6789 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
6790 #define REGSET31(p,t) \
6791 REGSET16(p, t), \
6792 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
6793 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
6794 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
6795 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
6796 #define REGSET(p,t) \
6797 REGSET31(p,t), REGNUM(p,31,t)
6798
6799 /* These go into aarch64_reg_hsh hash-table. */
6800 static const reg_entry reg_names[] = {
6801 /* Integer registers. */
6802 REGSET31 (x, R_64), REGSET31 (X, R_64),
6803 REGSET31 (w, R_32), REGSET31 (W, R_32),
6804
6805 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
6806 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 16, R_64),
6807 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
6808 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
6809 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
6810 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
6811
6812 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
6813 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
6814
6815 /* Floating-point single precision registers. */
6816 REGSET (s, FP_S), REGSET (S, FP_S),
6817
6818 /* Floating-point double precision registers. */
6819 REGSET (d, FP_D), REGSET (D, FP_D),
6820
6821 /* Floating-point half precision registers. */
6822 REGSET (h, FP_H), REGSET (H, FP_H),
6823
6824 /* Floating-point byte precision registers. */
6825 REGSET (b, FP_B), REGSET (B, FP_B),
6826
6827 /* Floating-point quad precision registers. */
6828 REGSET (q, FP_Q), REGSET (Q, FP_Q),
6829
6830 /* FP/SIMD registers. */
6831 REGSET (v, VN), REGSET (V, VN),
6832
6833 /* SVE vector registers. */
6834 REGSET (z, ZN), REGSET (Z, ZN),
6835
6836 /* SVE predicate registers. */
6837 REGSET16 (p, PN), REGSET16 (P, PN)
6838 };
6839
6840 #undef REGDEF
6841 #undef REGDEF_ALIAS
6842 #undef REGNUM
6843 #undef REGSET16
6844 #undef REGSET31
6845 #undef REGSET
6846
6847 #define N 1
6848 #define n 0
6849 #define Z 1
6850 #define z 0
6851 #define C 1
6852 #define c 0
6853 #define V 1
6854 #define v 0
6855 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
6856 static const asm_nzcv nzcv_names[] = {
6857 {"nzcv", B (n, z, c, v)},
6858 {"nzcV", B (n, z, c, V)},
6859 {"nzCv", B (n, z, C, v)},
6860 {"nzCV", B (n, z, C, V)},
6861 {"nZcv", B (n, Z, c, v)},
6862 {"nZcV", B (n, Z, c, V)},
6863 {"nZCv", B (n, Z, C, v)},
6864 {"nZCV", B (n, Z, C, V)},
6865 {"Nzcv", B (N, z, c, v)},
6866 {"NzcV", B (N, z, c, V)},
6867 {"NzCv", B (N, z, C, v)},
6868 {"NzCV", B (N, z, C, V)},
6869 {"NZcv", B (N, Z, c, v)},
6870 {"NZcV", B (N, Z, c, V)},
6871 {"NZCv", B (N, Z, C, v)},
6872 {"NZCV", B (N, Z, C, V)}
6873 };
6874
6875 #undef N
6876 #undef n
6877 #undef Z
6878 #undef z
6879 #undef C
6880 #undef c
6881 #undef V
6882 #undef v
6883 #undef B
6884 \f
6885 /* MD interface: bits in the object file. */
6886
6887 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
6888 for use in the a.out file, and stores them in the array pointed to by buf.
6889 This knows about the endian-ness of the target machine and does
6890 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
6891 2 (short) and 4 (long) Floating numbers are put out as a series of
6892 LITTLENUMS (shorts, here at least). */
6893
6894 void
6895 md_number_to_chars (char *buf, valueT val, int n)
6896 {
6897 if (target_big_endian)
6898 number_to_chars_bigendian (buf, val, n);
6899 else
6900 number_to_chars_littleendian (buf, val, n);
6901 }
6902
6903 /* MD interface: Sections. */
6904
6905 /* Estimate the size of a frag before relaxing. Assume everything fits in
6906 4 bytes. */
6907
6908 int
6909 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
6910 {
6911 fragp->fr_var = 4;
6912 return 4;
6913 }
6914
6915 /* Round up a section size to the appropriate boundary. */
6916
6917 valueT
6918 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
6919 {
6920 return size;
6921 }
6922
6923 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
6924 of an rs_align_code fragment.
6925
6926 Here we fill the frag with the appropriate info for padding the
6927 output stream. The resulting frag will consist of a fixed (fr_fix)
6928 and of a repeating (fr_var) part.
6929
6930 The fixed content is always emitted before the repeating content and
6931 these two parts are used as follows in constructing the output:
6932 - the fixed part will be used to align to a valid instruction word
6933 boundary, in case that we start at a misaligned address; as no
6934 executable instruction can live at the misaligned location, we
6935 simply fill with zeros;
6936 - the variable part will be used to cover the remaining padding and
6937 we fill using the AArch64 NOP instruction.
6938
6939 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
6940 enough storage space for up to 3 bytes for padding the back to a valid
6941 instruction alignment and exactly 4 bytes to store the NOP pattern. */
6942
6943 void
6944 aarch64_handle_align (fragS * fragP)
6945 {
6946 /* NOP = d503201f */
6947 /* AArch64 instructions are always little-endian. */
6948 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
6949
6950 int bytes, fix, noop_size;
6951 char *p;
6952
6953 if (fragP->fr_type != rs_align_code)
6954 return;
6955
6956 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
6957 p = fragP->fr_literal + fragP->fr_fix;
6958
6959 #ifdef OBJ_ELF
6960 gas_assert (fragP->tc_frag_data.recorded);
6961 #endif
6962
6963 noop_size = sizeof (aarch64_noop);
6964
6965 fix = bytes & (noop_size - 1);
6966 if (fix)
6967 {
6968 #ifdef OBJ_ELF
6969 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
6970 #endif
6971 memset (p, 0, fix);
6972 p += fix;
6973 fragP->fr_fix += fix;
6974 }
6975
6976 if (noop_size)
6977 memcpy (p, aarch64_noop, noop_size);
6978 fragP->fr_var = noop_size;
6979 }
6980
6981 /* Perform target specific initialisation of a frag.
6982 Note - despite the name this initialisation is not done when the frag
6983 is created, but only when its type is assigned. A frag can be created
6984 and used a long time before its type is set, so beware of assuming that
6985 this initialisation is performed first. */
6986
6987 #ifndef OBJ_ELF
6988 void
6989 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
6990 int max_chars ATTRIBUTE_UNUSED)
6991 {
6992 }
6993
6994 #else /* OBJ_ELF is defined. */
6995 void
6996 aarch64_init_frag (fragS * fragP, int max_chars)
6997 {
6998 /* Record a mapping symbol for alignment frags. We will delete this
6999 later if the alignment ends up empty. */
7000 if (!fragP->tc_frag_data.recorded)
7001 fragP->tc_frag_data.recorded = 1;
7002
7003 /* PR 21809: Do not set a mapping state for debug sections
7004 - it just confuses other tools. */
7005 if (bfd_get_section_flags (NULL, now_seg) & SEC_DEBUGGING)
7006 return;
7007
7008 switch (fragP->fr_type)
7009 {
7010 case rs_align_test:
7011 case rs_fill:
7012 mapping_state_2 (MAP_DATA, max_chars);
7013 break;
7014 case rs_align:
7015 /* PR 20364: We can get alignment frags in code sections,
7016 so do not just assume that we should use the MAP_DATA state. */
7017 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
7018 break;
7019 case rs_align_code:
7020 mapping_state_2 (MAP_INSN, max_chars);
7021 break;
7022 default:
7023 break;
7024 }
7025 }
7026 \f
7027 /* Initialize the DWARF-2 unwind information for this procedure. */
7028
7029 void
7030 tc_aarch64_frame_initial_instructions (void)
7031 {
7032 cfi_add_CFA_def_cfa (REG_SP, 0);
7033 }
7034 #endif /* OBJ_ELF */
7035
7036 /* Convert REGNAME to a DWARF-2 register number. */
7037
7038 int
7039 tc_aarch64_regname_to_dw2regnum (char *regname)
7040 {
7041 const reg_entry *reg = parse_reg (&regname);
7042 if (reg == NULL)
7043 return -1;
7044
7045 switch (reg->type)
7046 {
7047 case REG_TYPE_SP_32:
7048 case REG_TYPE_SP_64:
7049 case REG_TYPE_R_32:
7050 case REG_TYPE_R_64:
7051 return reg->number;
7052
7053 case REG_TYPE_FP_B:
7054 case REG_TYPE_FP_H:
7055 case REG_TYPE_FP_S:
7056 case REG_TYPE_FP_D:
7057 case REG_TYPE_FP_Q:
7058 return reg->number + 64;
7059
7060 default:
7061 break;
7062 }
7063 return -1;
7064 }
7065
7066 /* Implement DWARF2_ADDR_SIZE. */
7067
7068 int
7069 aarch64_dwarf2_addr_size (void)
7070 {
7071 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7072 if (ilp32_p)
7073 return 4;
7074 #endif
7075 return bfd_arch_bits_per_address (stdoutput) / 8;
7076 }
7077
7078 /* MD interface: Symbol and relocation handling. */
7079
7080 /* Return the address within the segment that a PC-relative fixup is
7081 relative to. For AArch64 PC-relative fixups applied to instructions
7082 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
7083
7084 long
7085 md_pcrel_from_section (fixS * fixP, segT seg)
7086 {
7087 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
7088
7089 /* If this is pc-relative and we are going to emit a relocation
7090 then we just want to put out any pipeline compensation that the linker
7091 will need. Otherwise we want to use the calculated base. */
7092 if (fixP->fx_pcrel
7093 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
7094 || aarch64_force_relocation (fixP)))
7095 base = 0;
7096
7097 /* AArch64 should be consistent for all pc-relative relocations. */
7098 return base + AARCH64_PCREL_OFFSET;
7099 }
7100
7101 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
7102 Otherwise we have no need to default values of symbols. */
7103
7104 symbolS *
7105 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
7106 {
7107 #ifdef OBJ_ELF
7108 if (name[0] == '_' && name[1] == 'G'
7109 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
7110 {
7111 if (!GOT_symbol)
7112 {
7113 if (symbol_find (name))
7114 as_bad (_("GOT already in the symbol table"));
7115
7116 GOT_symbol = symbol_new (name, undefined_section,
7117 (valueT) 0, &zero_address_frag);
7118 }
7119
7120 return GOT_symbol;
7121 }
7122 #endif
7123
7124 return 0;
7125 }
7126
7127 /* Return non-zero if the indicated VALUE has overflowed the maximum
7128 range expressible by a unsigned number with the indicated number of
7129 BITS. */
7130
7131 static bfd_boolean
7132 unsigned_overflow (valueT value, unsigned bits)
7133 {
7134 valueT lim;
7135 if (bits >= sizeof (valueT) * 8)
7136 return FALSE;
7137 lim = (valueT) 1 << bits;
7138 return (value >= lim);
7139 }
7140
7141
7142 /* Return non-zero if the indicated VALUE has overflowed the maximum
7143 range expressible by an signed number with the indicated number of
7144 BITS. */
7145
7146 static bfd_boolean
7147 signed_overflow (offsetT value, unsigned bits)
7148 {
7149 offsetT lim;
7150 if (bits >= sizeof (offsetT) * 8)
7151 return FALSE;
7152 lim = (offsetT) 1 << (bits - 1);
7153 return (value < -lim || value >= lim);
7154 }
7155
7156 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
7157 unsigned immediate offset load/store instruction, try to encode it as
7158 an unscaled, 9-bit, signed immediate offset load/store instruction.
7159 Return TRUE if it is successful; otherwise return FALSE.
7160
7161 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
7162 in response to the standard LDR/STR mnemonics when the immediate offset is
7163 unambiguous, i.e. when it is negative or unaligned. */
7164
7165 static bfd_boolean
7166 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
7167 {
7168 int idx;
7169 enum aarch64_op new_op;
7170 const aarch64_opcode *new_opcode;
7171
7172 gas_assert (instr->opcode->iclass == ldst_pos);
7173
7174 switch (instr->opcode->op)
7175 {
7176 case OP_LDRB_POS:new_op = OP_LDURB; break;
7177 case OP_STRB_POS: new_op = OP_STURB; break;
7178 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
7179 case OP_LDRH_POS: new_op = OP_LDURH; break;
7180 case OP_STRH_POS: new_op = OP_STURH; break;
7181 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
7182 case OP_LDR_POS: new_op = OP_LDUR; break;
7183 case OP_STR_POS: new_op = OP_STUR; break;
7184 case OP_LDRF_POS: new_op = OP_LDURV; break;
7185 case OP_STRF_POS: new_op = OP_STURV; break;
7186 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
7187 case OP_PRFM_POS: new_op = OP_PRFUM; break;
7188 default: new_op = OP_NIL; break;
7189 }
7190
7191 if (new_op == OP_NIL)
7192 return FALSE;
7193
7194 new_opcode = aarch64_get_opcode (new_op);
7195 gas_assert (new_opcode != NULL);
7196
7197 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
7198 instr->opcode->op, new_opcode->op);
7199
7200 aarch64_replace_opcode (instr, new_opcode);
7201
7202 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
7203 qualifier matching may fail because the out-of-date qualifier will
7204 prevent the operand being updated with a new and correct qualifier. */
7205 idx = aarch64_operand_index (instr->opcode->operands,
7206 AARCH64_OPND_ADDR_SIMM9);
7207 gas_assert (idx == 1);
7208 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
7209
7210 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
7211
7212 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
7213 return FALSE;
7214
7215 return TRUE;
7216 }
7217
7218 /* Called by fix_insn to fix a MOV immediate alias instruction.
7219
7220 Operand for a generic move immediate instruction, which is an alias
7221 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
7222 a 32-bit/64-bit immediate value into general register. An assembler error
7223 shall result if the immediate cannot be created by a single one of these
7224 instructions. If there is a choice, then to ensure reversability an
7225 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
7226
7227 static void
7228 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
7229 {
7230 const aarch64_opcode *opcode;
7231
7232 /* Need to check if the destination is SP/ZR. The check has to be done
7233 before any aarch64_replace_opcode. */
7234 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
7235 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
7236
7237 instr->operands[1].imm.value = value;
7238 instr->operands[1].skip = 0;
7239
7240 if (try_mov_wide_p)
7241 {
7242 /* Try the MOVZ alias. */
7243 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
7244 aarch64_replace_opcode (instr, opcode);
7245 if (aarch64_opcode_encode (instr->opcode, instr,
7246 &instr->value, NULL, NULL))
7247 {
7248 put_aarch64_insn (buf, instr->value);
7249 return;
7250 }
7251 /* Try the MOVK alias. */
7252 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
7253 aarch64_replace_opcode (instr, opcode);
7254 if (aarch64_opcode_encode (instr->opcode, instr,
7255 &instr->value, NULL, NULL))
7256 {
7257 put_aarch64_insn (buf, instr->value);
7258 return;
7259 }
7260 }
7261
7262 if (try_mov_bitmask_p)
7263 {
7264 /* Try the ORR alias. */
7265 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
7266 aarch64_replace_opcode (instr, opcode);
7267 if (aarch64_opcode_encode (instr->opcode, instr,
7268 &instr->value, NULL, NULL))
7269 {
7270 put_aarch64_insn (buf, instr->value);
7271 return;
7272 }
7273 }
7274
7275 as_bad_where (fixP->fx_file, fixP->fx_line,
7276 _("immediate cannot be moved by a single instruction"));
7277 }
7278
7279 /* An instruction operand which is immediate related may have symbol used
7280 in the assembly, e.g.
7281
7282 mov w0, u32
7283 .set u32, 0x00ffff00
7284
7285 At the time when the assembly instruction is parsed, a referenced symbol,
7286 like 'u32' in the above example may not have been seen; a fixS is created
7287 in such a case and is handled here after symbols have been resolved.
7288 Instruction is fixed up with VALUE using the information in *FIXP plus
7289 extra information in FLAGS.
7290
7291 This function is called by md_apply_fix to fix up instructions that need
7292 a fix-up described above but does not involve any linker-time relocation. */
7293
7294 static void
7295 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
7296 {
7297 int idx;
7298 uint32_t insn;
7299 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7300 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
7301 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
7302
7303 if (new_inst)
7304 {
7305 /* Now the instruction is about to be fixed-up, so the operand that
7306 was previously marked as 'ignored' needs to be unmarked in order
7307 to get the encoding done properly. */
7308 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7309 new_inst->operands[idx].skip = 0;
7310 }
7311
7312 gas_assert (opnd != AARCH64_OPND_NIL);
7313
7314 switch (opnd)
7315 {
7316 case AARCH64_OPND_EXCEPTION:
7317 if (unsigned_overflow (value, 16))
7318 as_bad_where (fixP->fx_file, fixP->fx_line,
7319 _("immediate out of range"));
7320 insn = get_aarch64_insn (buf);
7321 insn |= encode_svc_imm (value);
7322 put_aarch64_insn (buf, insn);
7323 break;
7324
7325 case AARCH64_OPND_AIMM:
7326 /* ADD or SUB with immediate.
7327 NOTE this assumes we come here with a add/sub shifted reg encoding
7328 3 322|2222|2 2 2 21111 111111
7329 1 098|7654|3 2 1 09876 543210 98765 43210
7330 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
7331 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
7332 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
7333 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
7334 ->
7335 3 322|2222|2 2 221111111111
7336 1 098|7654|3 2 109876543210 98765 43210
7337 11000000 sf 001|0001|shift imm12 Rn Rd ADD
7338 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
7339 51000000 sf 101|0001|shift imm12 Rn Rd SUB
7340 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
7341 Fields sf Rn Rd are already set. */
7342 insn = get_aarch64_insn (buf);
7343 if (value < 0)
7344 {
7345 /* Add <-> sub. */
7346 insn = reencode_addsub_switch_add_sub (insn);
7347 value = -value;
7348 }
7349
7350 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
7351 && unsigned_overflow (value, 12))
7352 {
7353 /* Try to shift the value by 12 to make it fit. */
7354 if (((value >> 12) << 12) == value
7355 && ! unsigned_overflow (value, 12 + 12))
7356 {
7357 value >>= 12;
7358 insn |= encode_addsub_imm_shift_amount (1);
7359 }
7360 }
7361
7362 if (unsigned_overflow (value, 12))
7363 as_bad_where (fixP->fx_file, fixP->fx_line,
7364 _("immediate out of range"));
7365
7366 insn |= encode_addsub_imm (value);
7367
7368 put_aarch64_insn (buf, insn);
7369 break;
7370
7371 case AARCH64_OPND_SIMD_IMM:
7372 case AARCH64_OPND_SIMD_IMM_SFT:
7373 case AARCH64_OPND_LIMM:
7374 /* Bit mask immediate. */
7375 gas_assert (new_inst != NULL);
7376 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7377 new_inst->operands[idx].imm.value = value;
7378 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7379 &new_inst->value, NULL, NULL))
7380 put_aarch64_insn (buf, new_inst->value);
7381 else
7382 as_bad_where (fixP->fx_file, fixP->fx_line,
7383 _("invalid immediate"));
7384 break;
7385
7386 case AARCH64_OPND_HALF:
7387 /* 16-bit unsigned immediate. */
7388 if (unsigned_overflow (value, 16))
7389 as_bad_where (fixP->fx_file, fixP->fx_line,
7390 _("immediate out of range"));
7391 insn = get_aarch64_insn (buf);
7392 insn |= encode_movw_imm (value & 0xffff);
7393 put_aarch64_insn (buf, insn);
7394 break;
7395
7396 case AARCH64_OPND_IMM_MOV:
7397 /* Operand for a generic move immediate instruction, which is
7398 an alias instruction that generates a single MOVZ, MOVN or ORR
7399 instruction to loads a 32-bit/64-bit immediate value into general
7400 register. An assembler error shall result if the immediate cannot be
7401 created by a single one of these instructions. If there is a choice,
7402 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
7403 and MOVZ or MOVN to ORR. */
7404 gas_assert (new_inst != NULL);
7405 fix_mov_imm_insn (fixP, buf, new_inst, value);
7406 break;
7407
7408 case AARCH64_OPND_ADDR_SIMM7:
7409 case AARCH64_OPND_ADDR_SIMM9:
7410 case AARCH64_OPND_ADDR_SIMM9_2:
7411 case AARCH64_OPND_ADDR_SIMM10:
7412 case AARCH64_OPND_ADDR_UIMM12:
7413 /* Immediate offset in an address. */
7414 insn = get_aarch64_insn (buf);
7415
7416 gas_assert (new_inst != NULL && new_inst->value == insn);
7417 gas_assert (new_inst->opcode->operands[1] == opnd
7418 || new_inst->opcode->operands[2] == opnd);
7419
7420 /* Get the index of the address operand. */
7421 if (new_inst->opcode->operands[1] == opnd)
7422 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
7423 idx = 1;
7424 else
7425 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
7426 idx = 2;
7427
7428 /* Update the resolved offset value. */
7429 new_inst->operands[idx].addr.offset.imm = value;
7430
7431 /* Encode/fix-up. */
7432 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7433 &new_inst->value, NULL, NULL))
7434 {
7435 put_aarch64_insn (buf, new_inst->value);
7436 break;
7437 }
7438 else if (new_inst->opcode->iclass == ldst_pos
7439 && try_to_encode_as_unscaled_ldst (new_inst))
7440 {
7441 put_aarch64_insn (buf, new_inst->value);
7442 break;
7443 }
7444
7445 as_bad_where (fixP->fx_file, fixP->fx_line,
7446 _("immediate offset out of range"));
7447 break;
7448
7449 default:
7450 gas_assert (0);
7451 as_fatal (_("unhandled operand code %d"), opnd);
7452 }
7453 }
7454
7455 /* Apply a fixup (fixP) to segment data, once it has been determined
7456 by our caller that we have all the info we need to fix it up.
7457
7458 Parameter valP is the pointer to the value of the bits. */
7459
7460 void
7461 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
7462 {
7463 offsetT value = *valP;
7464 uint32_t insn;
7465 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7466 int scale;
7467 unsigned flags = fixP->fx_addnumber;
7468
7469 DEBUG_TRACE ("\n\n");
7470 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
7471 DEBUG_TRACE ("Enter md_apply_fix");
7472
7473 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
7474
7475 /* Note whether this will delete the relocation. */
7476
7477 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
7478 fixP->fx_done = 1;
7479
7480 /* Process the relocations. */
7481 switch (fixP->fx_r_type)
7482 {
7483 case BFD_RELOC_NONE:
7484 /* This will need to go in the object file. */
7485 fixP->fx_done = 0;
7486 break;
7487
7488 case BFD_RELOC_8:
7489 case BFD_RELOC_8_PCREL:
7490 if (fixP->fx_done || !seg->use_rela_p)
7491 md_number_to_chars (buf, value, 1);
7492 break;
7493
7494 case BFD_RELOC_16:
7495 case BFD_RELOC_16_PCREL:
7496 if (fixP->fx_done || !seg->use_rela_p)
7497 md_number_to_chars (buf, value, 2);
7498 break;
7499
7500 case BFD_RELOC_32:
7501 case BFD_RELOC_32_PCREL:
7502 if (fixP->fx_done || !seg->use_rela_p)
7503 md_number_to_chars (buf, value, 4);
7504 break;
7505
7506 case BFD_RELOC_64:
7507 case BFD_RELOC_64_PCREL:
7508 if (fixP->fx_done || !seg->use_rela_p)
7509 md_number_to_chars (buf, value, 8);
7510 break;
7511
7512 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7513 /* We claim that these fixups have been processed here, even if
7514 in fact we generate an error because we do not have a reloc
7515 for them, so tc_gen_reloc() will reject them. */
7516 fixP->fx_done = 1;
7517 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
7518 {
7519 as_bad_where (fixP->fx_file, fixP->fx_line,
7520 _("undefined symbol %s used as an immediate value"),
7521 S_GET_NAME (fixP->fx_addsy));
7522 goto apply_fix_return;
7523 }
7524 fix_insn (fixP, flags, value);
7525 break;
7526
7527 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
7528 if (fixP->fx_done || !seg->use_rela_p)
7529 {
7530 if (value & 3)
7531 as_bad_where (fixP->fx_file, fixP->fx_line,
7532 _("pc-relative load offset not word aligned"));
7533 if (signed_overflow (value, 21))
7534 as_bad_where (fixP->fx_file, fixP->fx_line,
7535 _("pc-relative load offset out of range"));
7536 insn = get_aarch64_insn (buf);
7537 insn |= encode_ld_lit_ofs_19 (value >> 2);
7538 put_aarch64_insn (buf, insn);
7539 }
7540 break;
7541
7542 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
7543 if (fixP->fx_done || !seg->use_rela_p)
7544 {
7545 if (signed_overflow (value, 21))
7546 as_bad_where (fixP->fx_file, fixP->fx_line,
7547 _("pc-relative address offset out of range"));
7548 insn = get_aarch64_insn (buf);
7549 insn |= encode_adr_imm (value);
7550 put_aarch64_insn (buf, insn);
7551 }
7552 break;
7553
7554 case BFD_RELOC_AARCH64_BRANCH19:
7555 if (fixP->fx_done || !seg->use_rela_p)
7556 {
7557 if (value & 3)
7558 as_bad_where (fixP->fx_file, fixP->fx_line,
7559 _("conditional branch target not word aligned"));
7560 if (signed_overflow (value, 21))
7561 as_bad_where (fixP->fx_file, fixP->fx_line,
7562 _("conditional branch out of range"));
7563 insn = get_aarch64_insn (buf);
7564 insn |= encode_cond_branch_ofs_19 (value >> 2);
7565 put_aarch64_insn (buf, insn);
7566 }
7567 break;
7568
7569 case BFD_RELOC_AARCH64_TSTBR14:
7570 if (fixP->fx_done || !seg->use_rela_p)
7571 {
7572 if (value & 3)
7573 as_bad_where (fixP->fx_file, fixP->fx_line,
7574 _("conditional branch target not word aligned"));
7575 if (signed_overflow (value, 16))
7576 as_bad_where (fixP->fx_file, fixP->fx_line,
7577 _("conditional branch out of range"));
7578 insn = get_aarch64_insn (buf);
7579 insn |= encode_tst_branch_ofs_14 (value >> 2);
7580 put_aarch64_insn (buf, insn);
7581 }
7582 break;
7583
7584 case BFD_RELOC_AARCH64_CALL26:
7585 case BFD_RELOC_AARCH64_JUMP26:
7586 if (fixP->fx_done || !seg->use_rela_p)
7587 {
7588 if (value & 3)
7589 as_bad_where (fixP->fx_file, fixP->fx_line,
7590 _("branch target not word aligned"));
7591 if (signed_overflow (value, 28))
7592 as_bad_where (fixP->fx_file, fixP->fx_line,
7593 _("branch out of range"));
7594 insn = get_aarch64_insn (buf);
7595 insn |= encode_branch_ofs_26 (value >> 2);
7596 put_aarch64_insn (buf, insn);
7597 }
7598 break;
7599
7600 case BFD_RELOC_AARCH64_MOVW_G0:
7601 case BFD_RELOC_AARCH64_MOVW_G0_NC:
7602 case BFD_RELOC_AARCH64_MOVW_G0_S:
7603 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
7604 scale = 0;
7605 goto movw_common;
7606 case BFD_RELOC_AARCH64_MOVW_G1:
7607 case BFD_RELOC_AARCH64_MOVW_G1_NC:
7608 case BFD_RELOC_AARCH64_MOVW_G1_S:
7609 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7610 scale = 16;
7611 goto movw_common;
7612 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7613 scale = 0;
7614 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7615 /* Should always be exported to object file, see
7616 aarch64_force_relocation(). */
7617 gas_assert (!fixP->fx_done);
7618 gas_assert (seg->use_rela_p);
7619 goto movw_common;
7620 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7621 scale = 16;
7622 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7623 /* Should always be exported to object file, see
7624 aarch64_force_relocation(). */
7625 gas_assert (!fixP->fx_done);
7626 gas_assert (seg->use_rela_p);
7627 goto movw_common;
7628 case BFD_RELOC_AARCH64_MOVW_G2:
7629 case BFD_RELOC_AARCH64_MOVW_G2_NC:
7630 case BFD_RELOC_AARCH64_MOVW_G2_S:
7631 scale = 32;
7632 goto movw_common;
7633 case BFD_RELOC_AARCH64_MOVW_G3:
7634 scale = 48;
7635 movw_common:
7636 if (fixP->fx_done || !seg->use_rela_p)
7637 {
7638 insn = get_aarch64_insn (buf);
7639
7640 if (!fixP->fx_done)
7641 {
7642 /* REL signed addend must fit in 16 bits */
7643 if (signed_overflow (value, 16))
7644 as_bad_where (fixP->fx_file, fixP->fx_line,
7645 _("offset out of range"));
7646 }
7647 else
7648 {
7649 /* Check for overflow and scale. */
7650 switch (fixP->fx_r_type)
7651 {
7652 case BFD_RELOC_AARCH64_MOVW_G0:
7653 case BFD_RELOC_AARCH64_MOVW_G1:
7654 case BFD_RELOC_AARCH64_MOVW_G2:
7655 case BFD_RELOC_AARCH64_MOVW_G3:
7656 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7657 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7658 if (unsigned_overflow (value, scale + 16))
7659 as_bad_where (fixP->fx_file, fixP->fx_line,
7660 _("unsigned value out of range"));
7661 break;
7662 case BFD_RELOC_AARCH64_MOVW_G0_S:
7663 case BFD_RELOC_AARCH64_MOVW_G1_S:
7664 case BFD_RELOC_AARCH64_MOVW_G2_S:
7665 /* NOTE: We can only come here with movz or movn. */
7666 if (signed_overflow (value, scale + 16))
7667 as_bad_where (fixP->fx_file, fixP->fx_line,
7668 _("signed value out of range"));
7669 if (value < 0)
7670 {
7671 /* Force use of MOVN. */
7672 value = ~value;
7673 insn = reencode_movzn_to_movn (insn);
7674 }
7675 else
7676 {
7677 /* Force use of MOVZ. */
7678 insn = reencode_movzn_to_movz (insn);
7679 }
7680 break;
7681 default:
7682 /* Unchecked relocations. */
7683 break;
7684 }
7685 value >>= scale;
7686 }
7687
7688 /* Insert value into MOVN/MOVZ/MOVK instruction. */
7689 insn |= encode_movw_imm (value & 0xffff);
7690
7691 put_aarch64_insn (buf, insn);
7692 }
7693 break;
7694
7695 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
7696 fixP->fx_r_type = (ilp32_p
7697 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
7698 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
7699 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7700 /* Should always be exported to object file, see
7701 aarch64_force_relocation(). */
7702 gas_assert (!fixP->fx_done);
7703 gas_assert (seg->use_rela_p);
7704 break;
7705
7706 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
7707 fixP->fx_r_type = (ilp32_p
7708 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
7709 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
7710 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7711 /* Should always be exported to object file, see
7712 aarch64_force_relocation(). */
7713 gas_assert (!fixP->fx_done);
7714 gas_assert (seg->use_rela_p);
7715 break;
7716
7717 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
7718 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7719 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7720 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7721 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
7722 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7723 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7724 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7725 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7726 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7727 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7728 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7729 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7730 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7731 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7732 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7733 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7734 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
7735 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
7736 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7737 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7738 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7739 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7740 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
7741 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
7742 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
7743 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
7744 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
7745 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
7746 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
7747 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
7748 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
7749 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7750 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
7751 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
7752 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
7753 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7754 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
7755 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7756 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
7757 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7758 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7759 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7760 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7761 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7762 /* Should always be exported to object file, see
7763 aarch64_force_relocation(). */
7764 gas_assert (!fixP->fx_done);
7765 gas_assert (seg->use_rela_p);
7766 break;
7767
7768 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
7769 /* Should always be exported to object file, see
7770 aarch64_force_relocation(). */
7771 fixP->fx_r_type = (ilp32_p
7772 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
7773 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
7774 gas_assert (!fixP->fx_done);
7775 gas_assert (seg->use_rela_p);
7776 break;
7777
7778 case BFD_RELOC_AARCH64_ADD_LO12:
7779 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7780 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7781 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7782 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7783 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7784 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7785 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7786 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7787 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7788 case BFD_RELOC_AARCH64_LDST128_LO12:
7789 case BFD_RELOC_AARCH64_LDST16_LO12:
7790 case BFD_RELOC_AARCH64_LDST32_LO12:
7791 case BFD_RELOC_AARCH64_LDST64_LO12:
7792 case BFD_RELOC_AARCH64_LDST8_LO12:
7793 /* Should always be exported to object file, see
7794 aarch64_force_relocation(). */
7795 gas_assert (!fixP->fx_done);
7796 gas_assert (seg->use_rela_p);
7797 break;
7798
7799 case BFD_RELOC_AARCH64_TLSDESC_ADD:
7800 case BFD_RELOC_AARCH64_TLSDESC_CALL:
7801 case BFD_RELOC_AARCH64_TLSDESC_LDR:
7802 break;
7803
7804 case BFD_RELOC_UNUSED:
7805 /* An error will already have been reported. */
7806 break;
7807
7808 default:
7809 as_bad_where (fixP->fx_file, fixP->fx_line,
7810 _("unexpected %s fixup"),
7811 bfd_get_reloc_code_name (fixP->fx_r_type));
7812 break;
7813 }
7814
7815 apply_fix_return:
7816 /* Free the allocated the struct aarch64_inst.
7817 N.B. currently there are very limited number of fix-up types actually use
7818 this field, so the impact on the performance should be minimal . */
7819 if (fixP->tc_fix_data.inst != NULL)
7820 free (fixP->tc_fix_data.inst);
7821
7822 return;
7823 }
7824
7825 /* Translate internal representation of relocation info to BFD target
7826 format. */
7827
7828 arelent *
7829 tc_gen_reloc (asection * section, fixS * fixp)
7830 {
7831 arelent *reloc;
7832 bfd_reloc_code_real_type code;
7833
7834 reloc = XNEW (arelent);
7835
7836 reloc->sym_ptr_ptr = XNEW (asymbol *);
7837 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
7838 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
7839
7840 if (fixp->fx_pcrel)
7841 {
7842 if (section->use_rela_p)
7843 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
7844 else
7845 fixp->fx_offset = reloc->address;
7846 }
7847 reloc->addend = fixp->fx_offset;
7848
7849 code = fixp->fx_r_type;
7850 switch (code)
7851 {
7852 case BFD_RELOC_16:
7853 if (fixp->fx_pcrel)
7854 code = BFD_RELOC_16_PCREL;
7855 break;
7856
7857 case BFD_RELOC_32:
7858 if (fixp->fx_pcrel)
7859 code = BFD_RELOC_32_PCREL;
7860 break;
7861
7862 case BFD_RELOC_64:
7863 if (fixp->fx_pcrel)
7864 code = BFD_RELOC_64_PCREL;
7865 break;
7866
7867 default:
7868 break;
7869 }
7870
7871 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
7872 if (reloc->howto == NULL)
7873 {
7874 as_bad_where (fixp->fx_file, fixp->fx_line,
7875 _
7876 ("cannot represent %s relocation in this object file format"),
7877 bfd_get_reloc_code_name (code));
7878 return NULL;
7879 }
7880
7881 return reloc;
7882 }
7883
7884 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
7885
7886 void
7887 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
7888 {
7889 bfd_reloc_code_real_type type;
7890 int pcrel = 0;
7891
7892 /* Pick a reloc.
7893 FIXME: @@ Should look at CPU word size. */
7894 switch (size)
7895 {
7896 case 1:
7897 type = BFD_RELOC_8;
7898 break;
7899 case 2:
7900 type = BFD_RELOC_16;
7901 break;
7902 case 4:
7903 type = BFD_RELOC_32;
7904 break;
7905 case 8:
7906 type = BFD_RELOC_64;
7907 break;
7908 default:
7909 as_bad (_("cannot do %u-byte relocation"), size);
7910 type = BFD_RELOC_UNUSED;
7911 break;
7912 }
7913
7914 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
7915 }
7916
7917 int
7918 aarch64_force_relocation (struct fix *fixp)
7919 {
7920 switch (fixp->fx_r_type)
7921 {
7922 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7923 /* Perform these "immediate" internal relocations
7924 even if the symbol is extern or weak. */
7925 return 0;
7926
7927 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
7928 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
7929 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
7930 /* Pseudo relocs that need to be fixed up according to
7931 ilp32_p. */
7932 return 0;
7933
7934 case BFD_RELOC_AARCH64_ADD_LO12:
7935 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7936 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7937 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7938 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7939 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7940 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7941 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7942 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7943 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7944 case BFD_RELOC_AARCH64_LDST128_LO12:
7945 case BFD_RELOC_AARCH64_LDST16_LO12:
7946 case BFD_RELOC_AARCH64_LDST32_LO12:
7947 case BFD_RELOC_AARCH64_LDST64_LO12:
7948 case BFD_RELOC_AARCH64_LDST8_LO12:
7949 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
7950 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7951 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7952 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7953 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
7954 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7955 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7956 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7957 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7958 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7959 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7960 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7961 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7962 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7963 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7964 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7965 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7966 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7967 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7968 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
7969 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
7970 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7971 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7972 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7973 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7974 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
7975 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
7976 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
7977 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
7978 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
7979 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
7980 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
7981 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
7982 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
7983 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7984 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
7985 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
7986 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
7987 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7988 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
7989 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7990 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
7991 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7992 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7993 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7994 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7995 /* Always leave these relocations for the linker. */
7996 return 1;
7997
7998 default:
7999 break;
8000 }
8001
8002 return generic_force_reloc (fixp);
8003 }
8004
8005 #ifdef OBJ_ELF
8006
8007 /* Implement md_after_parse_args. This is the earliest time we need to decide
8008 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
8009
8010 void
8011 aarch64_after_parse_args (void)
8012 {
8013 if (aarch64_abi != AARCH64_ABI_NONE)
8014 return;
8015
8016 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
8017 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
8018 aarch64_abi = AARCH64_ABI_ILP32;
8019 else
8020 aarch64_abi = AARCH64_ABI_LP64;
8021 }
8022
8023 const char *
8024 elf64_aarch64_target_format (void)
8025 {
8026 if (strcmp (TARGET_OS, "cloudabi") == 0)
8027 {
8028 /* FIXME: What to do for ilp32_p ? */
8029 return target_big_endian ? "elf64-bigaarch64-cloudabi" : "elf64-littleaarch64-cloudabi";
8030 }
8031 if (target_big_endian)
8032 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
8033 else
8034 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
8035 }
8036
8037 void
8038 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
8039 {
8040 elf_frob_symbol (symp, puntp);
8041 }
8042 #endif
8043
8044 /* MD interface: Finalization. */
8045
8046 /* A good place to do this, although this was probably not intended
8047 for this kind of use. We need to dump the literal pool before
8048 references are made to a null symbol pointer. */
8049
8050 void
8051 aarch64_cleanup (void)
8052 {
8053 literal_pool *pool;
8054
8055 for (pool = list_of_pools; pool; pool = pool->next)
8056 {
8057 /* Put it at the end of the relevant section. */
8058 subseg_set (pool->section, pool->sub_section);
8059 s_ltorg (0);
8060 }
8061 }
8062
8063 #ifdef OBJ_ELF
8064 /* Remove any excess mapping symbols generated for alignment frags in
8065 SEC. We may have created a mapping symbol before a zero byte
8066 alignment; remove it if there's a mapping symbol after the
8067 alignment. */
8068 static void
8069 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
8070 void *dummy ATTRIBUTE_UNUSED)
8071 {
8072 segment_info_type *seginfo = seg_info (sec);
8073 fragS *fragp;
8074
8075 if (seginfo == NULL || seginfo->frchainP == NULL)
8076 return;
8077
8078 for (fragp = seginfo->frchainP->frch_root;
8079 fragp != NULL; fragp = fragp->fr_next)
8080 {
8081 symbolS *sym = fragp->tc_frag_data.last_map;
8082 fragS *next = fragp->fr_next;
8083
8084 /* Variable-sized frags have been converted to fixed size by
8085 this point. But if this was variable-sized to start with,
8086 there will be a fixed-size frag after it. So don't handle
8087 next == NULL. */
8088 if (sym == NULL || next == NULL)
8089 continue;
8090
8091 if (S_GET_VALUE (sym) < next->fr_address)
8092 /* Not at the end of this frag. */
8093 continue;
8094 know (S_GET_VALUE (sym) == next->fr_address);
8095
8096 do
8097 {
8098 if (next->tc_frag_data.first_map != NULL)
8099 {
8100 /* Next frag starts with a mapping symbol. Discard this
8101 one. */
8102 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8103 break;
8104 }
8105
8106 if (next->fr_next == NULL)
8107 {
8108 /* This mapping symbol is at the end of the section. Discard
8109 it. */
8110 know (next->fr_fix == 0 && next->fr_var == 0);
8111 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8112 break;
8113 }
8114
8115 /* As long as we have empty frags without any mapping symbols,
8116 keep looking. */
8117 /* If the next frag is non-empty and does not start with a
8118 mapping symbol, then this mapping symbol is required. */
8119 if (next->fr_address != next->fr_next->fr_address)
8120 break;
8121
8122 next = next->fr_next;
8123 }
8124 while (next != NULL);
8125 }
8126 }
8127 #endif
8128
8129 /* Adjust the symbol table. */
8130
8131 void
8132 aarch64_adjust_symtab (void)
8133 {
8134 #ifdef OBJ_ELF
8135 /* Remove any overlapping mapping symbols generated by alignment frags. */
8136 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
8137 /* Now do generic ELF adjustments. */
8138 elf_adjust_symtab ();
8139 #endif
8140 }
8141
8142 static void
8143 checked_hash_insert (struct hash_control *table, const char *key, void *value)
8144 {
8145 const char *hash_err;
8146
8147 hash_err = hash_insert (table, key, value);
8148 if (hash_err)
8149 printf ("Internal Error: Can't hash %s\n", key);
8150 }
8151
8152 static void
8153 fill_instruction_hash_table (void)
8154 {
8155 aarch64_opcode *opcode = aarch64_opcode_table;
8156
8157 while (opcode->name != NULL)
8158 {
8159 templates *templ, *new_templ;
8160 templ = hash_find (aarch64_ops_hsh, opcode->name);
8161
8162 new_templ = XNEW (templates);
8163 new_templ->opcode = opcode;
8164 new_templ->next = NULL;
8165
8166 if (!templ)
8167 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
8168 else
8169 {
8170 new_templ->next = templ->next;
8171 templ->next = new_templ;
8172 }
8173 ++opcode;
8174 }
8175 }
8176
8177 static inline void
8178 convert_to_upper (char *dst, const char *src, size_t num)
8179 {
8180 unsigned int i;
8181 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
8182 *dst = TOUPPER (*src);
8183 *dst = '\0';
8184 }
8185
8186 /* Assume STR point to a lower-case string, allocate, convert and return
8187 the corresponding upper-case string. */
8188 static inline const char*
8189 get_upper_str (const char *str)
8190 {
8191 char *ret;
8192 size_t len = strlen (str);
8193 ret = XNEWVEC (char, len + 1);
8194 convert_to_upper (ret, str, len);
8195 return ret;
8196 }
8197
8198 /* MD interface: Initialization. */
8199
8200 void
8201 md_begin (void)
8202 {
8203 unsigned mach;
8204 unsigned int i;
8205
8206 if ((aarch64_ops_hsh = hash_new ()) == NULL
8207 || (aarch64_cond_hsh = hash_new ()) == NULL
8208 || (aarch64_shift_hsh = hash_new ()) == NULL
8209 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
8210 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
8211 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
8212 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
8213 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
8214 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
8215 || (aarch64_reg_hsh = hash_new ()) == NULL
8216 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
8217 || (aarch64_nzcv_hsh = hash_new ()) == NULL
8218 || (aarch64_pldop_hsh = hash_new ()) == NULL
8219 || (aarch64_hint_opt_hsh = hash_new ()) == NULL)
8220 as_fatal (_("virtual memory exhausted"));
8221
8222 fill_instruction_hash_table ();
8223
8224 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
8225 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
8226 (void *) (aarch64_sys_regs + i));
8227
8228 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
8229 checked_hash_insert (aarch64_pstatefield_hsh,
8230 aarch64_pstatefields[i].name,
8231 (void *) (aarch64_pstatefields + i));
8232
8233 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
8234 checked_hash_insert (aarch64_sys_regs_ic_hsh,
8235 aarch64_sys_regs_ic[i].name,
8236 (void *) (aarch64_sys_regs_ic + i));
8237
8238 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
8239 checked_hash_insert (aarch64_sys_regs_dc_hsh,
8240 aarch64_sys_regs_dc[i].name,
8241 (void *) (aarch64_sys_regs_dc + i));
8242
8243 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
8244 checked_hash_insert (aarch64_sys_regs_at_hsh,
8245 aarch64_sys_regs_at[i].name,
8246 (void *) (aarch64_sys_regs_at + i));
8247
8248 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
8249 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
8250 aarch64_sys_regs_tlbi[i].name,
8251 (void *) (aarch64_sys_regs_tlbi + i));
8252
8253 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
8254 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
8255 (void *) (reg_names + i));
8256
8257 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
8258 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
8259 (void *) (nzcv_names + i));
8260
8261 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
8262 {
8263 const char *name = aarch64_operand_modifiers[i].name;
8264 checked_hash_insert (aarch64_shift_hsh, name,
8265 (void *) (aarch64_operand_modifiers + i));
8266 /* Also hash the name in the upper case. */
8267 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
8268 (void *) (aarch64_operand_modifiers + i));
8269 }
8270
8271 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
8272 {
8273 unsigned int j;
8274 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
8275 the same condition code. */
8276 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
8277 {
8278 const char *name = aarch64_conds[i].names[j];
8279 if (name == NULL)
8280 break;
8281 checked_hash_insert (aarch64_cond_hsh, name,
8282 (void *) (aarch64_conds + i));
8283 /* Also hash the name in the upper case. */
8284 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
8285 (void *) (aarch64_conds + i));
8286 }
8287 }
8288
8289 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
8290 {
8291 const char *name = aarch64_barrier_options[i].name;
8292 /* Skip xx00 - the unallocated values of option. */
8293 if ((i & 0x3) == 0)
8294 continue;
8295 checked_hash_insert (aarch64_barrier_opt_hsh, name,
8296 (void *) (aarch64_barrier_options + i));
8297 /* Also hash the name in the upper case. */
8298 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
8299 (void *) (aarch64_barrier_options + i));
8300 }
8301
8302 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
8303 {
8304 const char* name = aarch64_prfops[i].name;
8305 /* Skip the unallocated hint encodings. */
8306 if (name == NULL)
8307 continue;
8308 checked_hash_insert (aarch64_pldop_hsh, name,
8309 (void *) (aarch64_prfops + i));
8310 /* Also hash the name in the upper case. */
8311 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8312 (void *) (aarch64_prfops + i));
8313 }
8314
8315 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
8316 {
8317 const char* name = aarch64_hint_options[i].name;
8318
8319 checked_hash_insert (aarch64_hint_opt_hsh, name,
8320 (void *) (aarch64_hint_options + i));
8321 /* Also hash the name in the upper case. */
8322 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8323 (void *) (aarch64_hint_options + i));
8324 }
8325
8326 /* Set the cpu variant based on the command-line options. */
8327 if (!mcpu_cpu_opt)
8328 mcpu_cpu_opt = march_cpu_opt;
8329
8330 if (!mcpu_cpu_opt)
8331 mcpu_cpu_opt = &cpu_default;
8332
8333 cpu_variant = *mcpu_cpu_opt;
8334
8335 /* Record the CPU type. */
8336 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
8337
8338 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
8339 }
8340
8341 /* Command line processing. */
8342
8343 const char *md_shortopts = "m:";
8344
8345 #ifdef AARCH64_BI_ENDIAN
8346 #define OPTION_EB (OPTION_MD_BASE + 0)
8347 #define OPTION_EL (OPTION_MD_BASE + 1)
8348 #else
8349 #if TARGET_BYTES_BIG_ENDIAN
8350 #define OPTION_EB (OPTION_MD_BASE + 0)
8351 #else
8352 #define OPTION_EL (OPTION_MD_BASE + 1)
8353 #endif
8354 #endif
8355
8356 struct option md_longopts[] = {
8357 #ifdef OPTION_EB
8358 {"EB", no_argument, NULL, OPTION_EB},
8359 #endif
8360 #ifdef OPTION_EL
8361 {"EL", no_argument, NULL, OPTION_EL},
8362 #endif
8363 {NULL, no_argument, NULL, 0}
8364 };
8365
8366 size_t md_longopts_size = sizeof (md_longopts);
8367
8368 struct aarch64_option_table
8369 {
8370 const char *option; /* Option name to match. */
8371 const char *help; /* Help information. */
8372 int *var; /* Variable to change. */
8373 int value; /* What to change it to. */
8374 char *deprecated; /* If non-null, print this message. */
8375 };
8376
8377 static struct aarch64_option_table aarch64_opts[] = {
8378 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
8379 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
8380 NULL},
8381 #ifdef DEBUG_AARCH64
8382 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
8383 #endif /* DEBUG_AARCH64 */
8384 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
8385 NULL},
8386 {"mno-verbose-error", N_("do not output verbose error messages"),
8387 &verbose_error_p, 0, NULL},
8388 {NULL, NULL, NULL, 0, NULL}
8389 };
8390
8391 struct aarch64_cpu_option_table
8392 {
8393 const char *name;
8394 const aarch64_feature_set value;
8395 /* The canonical name of the CPU, or NULL to use NAME converted to upper
8396 case. */
8397 const char *canonical_name;
8398 };
8399
8400 /* This list should, at a minimum, contain all the cpu names
8401 recognized by GCC. */
8402 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
8403 {"all", AARCH64_ANY, NULL},
8404 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
8405 AARCH64_FEATURE_CRC), "Cortex-A35"},
8406 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
8407 AARCH64_FEATURE_CRC), "Cortex-A53"},
8408 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
8409 AARCH64_FEATURE_CRC), "Cortex-A57"},
8410 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
8411 AARCH64_FEATURE_CRC), "Cortex-A72"},
8412 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
8413 AARCH64_FEATURE_CRC), "Cortex-A73"},
8414 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8415 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8416 "Cortex-A55"},
8417 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8418 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8419 "Cortex-A75"},
8420 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
8421 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8422 "Samsung Exynos M1"},
8423 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
8424 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
8425 | AARCH64_FEATURE_RDMA),
8426 "Qualcomm Falkor"},
8427 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8428 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
8429 | AARCH64_FEATURE_RDMA),
8430 "Qualcomm QDF24XX"},
8431 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_3,
8432 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
8433 "Qualcomm Saphira"},
8434 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8435 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8436 "Cavium ThunderX"},
8437 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
8438 AARCH64_FEATURE_CRYPTO),
8439 "Broadcom Vulcan"},
8440 /* The 'xgene-1' name is an older name for 'xgene1', which was used
8441 in earlier releases and is superseded by 'xgene1' in all
8442 tools. */
8443 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8444 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8445 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
8446 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
8447 {"generic", AARCH64_ARCH_V8, NULL},
8448
8449 {NULL, AARCH64_ARCH_NONE, NULL}
8450 };
8451
8452 struct aarch64_arch_option_table
8453 {
8454 const char *name;
8455 const aarch64_feature_set value;
8456 };
8457
8458 /* This list should, at a minimum, contain all the architecture names
8459 recognized by GCC. */
8460 static const struct aarch64_arch_option_table aarch64_archs[] = {
8461 {"all", AARCH64_ANY},
8462 {"armv8-a", AARCH64_ARCH_V8},
8463 {"armv8.1-a", AARCH64_ARCH_V8_1},
8464 {"armv8.2-a", AARCH64_ARCH_V8_2},
8465 {"armv8.3-a", AARCH64_ARCH_V8_3},
8466 {"armv8.4-a", AARCH64_ARCH_V8_4},
8467 {NULL, AARCH64_ARCH_NONE}
8468 };
8469
8470 /* ISA extensions. */
8471 struct aarch64_option_cpu_value_table
8472 {
8473 const char *name;
8474 const aarch64_feature_set value;
8475 const aarch64_feature_set require; /* Feature dependencies. */
8476 };
8477
8478 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
8479 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
8480 AARCH64_ARCH_NONE},
8481 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO
8482 | AARCH64_FEATURE_AES
8483 | AARCH64_FEATURE_SHA2, 0),
8484 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
8485 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
8486 AARCH64_ARCH_NONE},
8487 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
8488 AARCH64_ARCH_NONE},
8489 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
8490 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
8491 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
8492 AARCH64_ARCH_NONE},
8493 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
8494 AARCH64_ARCH_NONE},
8495 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
8496 AARCH64_ARCH_NONE},
8497 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
8498 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
8499 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
8500 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
8501 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
8502 AARCH64_ARCH_NONE},
8503 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
8504 AARCH64_FEATURE (AARCH64_FEATURE_F16
8505 | AARCH64_FEATURE_SIMD
8506 | AARCH64_FEATURE_COMPNUM, 0)},
8507 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
8508 AARCH64_FEATURE (AARCH64_FEATURE_F16
8509 | AARCH64_FEATURE_SIMD, 0)},
8510 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
8511 AARCH64_ARCH_NONE},
8512 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
8513 AARCH64_ARCH_NONE},
8514 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
8515 AARCH64_ARCH_NONE},
8516 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
8517 AARCH64_ARCH_NONE},
8518 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
8519 AARCH64_ARCH_NONE},
8520 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA2
8521 | AARCH64_FEATURE_SHA3, 0),
8522 AARCH64_ARCH_NONE},
8523 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
8524 };
8525
8526 struct aarch64_long_option_table
8527 {
8528 const char *option; /* Substring to match. */
8529 const char *help; /* Help information. */
8530 int (*func) (const char *subopt); /* Function to decode sub-option. */
8531 char *deprecated; /* If non-null, print this message. */
8532 };
8533
8534 /* Transitive closure of features depending on set. */
8535 static aarch64_feature_set
8536 aarch64_feature_disable_set (aarch64_feature_set set)
8537 {
8538 const struct aarch64_option_cpu_value_table *opt;
8539 aarch64_feature_set prev = 0;
8540
8541 while (prev != set) {
8542 prev = set;
8543 for (opt = aarch64_features; opt->name != NULL; opt++)
8544 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
8545 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
8546 }
8547 return set;
8548 }
8549
8550 /* Transitive closure of dependencies of set. */
8551 static aarch64_feature_set
8552 aarch64_feature_enable_set (aarch64_feature_set set)
8553 {
8554 const struct aarch64_option_cpu_value_table *opt;
8555 aarch64_feature_set prev = 0;
8556
8557 while (prev != set) {
8558 prev = set;
8559 for (opt = aarch64_features; opt->name != NULL; opt++)
8560 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
8561 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
8562 }
8563 return set;
8564 }
8565
8566 static int
8567 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
8568 bfd_boolean ext_only)
8569 {
8570 /* We insist on extensions being added before being removed. We achieve
8571 this by using the ADDING_VALUE variable to indicate whether we are
8572 adding an extension (1) or removing it (0) and only allowing it to
8573 change in the order -1 -> 1 -> 0. */
8574 int adding_value = -1;
8575 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
8576
8577 /* Copy the feature set, so that we can modify it. */
8578 *ext_set = **opt_p;
8579 *opt_p = ext_set;
8580
8581 while (str != NULL && *str != 0)
8582 {
8583 const struct aarch64_option_cpu_value_table *opt;
8584 const char *ext = NULL;
8585 int optlen;
8586
8587 if (!ext_only)
8588 {
8589 if (*str != '+')
8590 {
8591 as_bad (_("invalid architectural extension"));
8592 return 0;
8593 }
8594
8595 ext = strchr (++str, '+');
8596 }
8597
8598 if (ext != NULL)
8599 optlen = ext - str;
8600 else
8601 optlen = strlen (str);
8602
8603 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
8604 {
8605 if (adding_value != 0)
8606 adding_value = 0;
8607 optlen -= 2;
8608 str += 2;
8609 }
8610 else if (optlen > 0)
8611 {
8612 if (adding_value == -1)
8613 adding_value = 1;
8614 else if (adding_value != 1)
8615 {
8616 as_bad (_("must specify extensions to add before specifying "
8617 "those to remove"));
8618 return FALSE;
8619 }
8620 }
8621
8622 if (optlen == 0)
8623 {
8624 as_bad (_("missing architectural extension"));
8625 return 0;
8626 }
8627
8628 gas_assert (adding_value != -1);
8629
8630 for (opt = aarch64_features; opt->name != NULL; opt++)
8631 if (strncmp (opt->name, str, optlen) == 0)
8632 {
8633 aarch64_feature_set set;
8634
8635 /* Add or remove the extension. */
8636 if (adding_value)
8637 {
8638 set = aarch64_feature_enable_set (opt->value);
8639 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
8640 }
8641 else
8642 {
8643 set = aarch64_feature_disable_set (opt->value);
8644 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
8645 }
8646 break;
8647 }
8648
8649 if (opt->name == NULL)
8650 {
8651 as_bad (_("unknown architectural extension `%s'"), str);
8652 return 0;
8653 }
8654
8655 str = ext;
8656 };
8657
8658 return 1;
8659 }
8660
8661 static int
8662 aarch64_parse_cpu (const char *str)
8663 {
8664 const struct aarch64_cpu_option_table *opt;
8665 const char *ext = strchr (str, '+');
8666 size_t optlen;
8667
8668 if (ext != NULL)
8669 optlen = ext - str;
8670 else
8671 optlen = strlen (str);
8672
8673 if (optlen == 0)
8674 {
8675 as_bad (_("missing cpu name `%s'"), str);
8676 return 0;
8677 }
8678
8679 for (opt = aarch64_cpus; opt->name != NULL; opt++)
8680 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
8681 {
8682 mcpu_cpu_opt = &opt->value;
8683 if (ext != NULL)
8684 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
8685
8686 return 1;
8687 }
8688
8689 as_bad (_("unknown cpu `%s'"), str);
8690 return 0;
8691 }
8692
8693 static int
8694 aarch64_parse_arch (const char *str)
8695 {
8696 const struct aarch64_arch_option_table *opt;
8697 const char *ext = strchr (str, '+');
8698 size_t optlen;
8699
8700 if (ext != NULL)
8701 optlen = ext - str;
8702 else
8703 optlen = strlen (str);
8704
8705 if (optlen == 0)
8706 {
8707 as_bad (_("missing architecture name `%s'"), str);
8708 return 0;
8709 }
8710
8711 for (opt = aarch64_archs; opt->name != NULL; opt++)
8712 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
8713 {
8714 march_cpu_opt = &opt->value;
8715 if (ext != NULL)
8716 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
8717
8718 return 1;
8719 }
8720
8721 as_bad (_("unknown architecture `%s'\n"), str);
8722 return 0;
8723 }
8724
8725 /* ABIs. */
8726 struct aarch64_option_abi_value_table
8727 {
8728 const char *name;
8729 enum aarch64_abi_type value;
8730 };
8731
8732 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
8733 {"ilp32", AARCH64_ABI_ILP32},
8734 {"lp64", AARCH64_ABI_LP64},
8735 };
8736
8737 static int
8738 aarch64_parse_abi (const char *str)
8739 {
8740 unsigned int i;
8741
8742 if (str[0] == '\0')
8743 {
8744 as_bad (_("missing abi name `%s'"), str);
8745 return 0;
8746 }
8747
8748 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
8749 if (strcmp (str, aarch64_abis[i].name) == 0)
8750 {
8751 aarch64_abi = aarch64_abis[i].value;
8752 return 1;
8753 }
8754
8755 as_bad (_("unknown abi `%s'\n"), str);
8756 return 0;
8757 }
8758
8759 static struct aarch64_long_option_table aarch64_long_opts[] = {
8760 #ifdef OBJ_ELF
8761 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
8762 aarch64_parse_abi, NULL},
8763 #endif /* OBJ_ELF */
8764 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
8765 aarch64_parse_cpu, NULL},
8766 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
8767 aarch64_parse_arch, NULL},
8768 {NULL, NULL, 0, NULL}
8769 };
8770
8771 int
8772 md_parse_option (int c, const char *arg)
8773 {
8774 struct aarch64_option_table *opt;
8775 struct aarch64_long_option_table *lopt;
8776
8777 switch (c)
8778 {
8779 #ifdef OPTION_EB
8780 case OPTION_EB:
8781 target_big_endian = 1;
8782 break;
8783 #endif
8784
8785 #ifdef OPTION_EL
8786 case OPTION_EL:
8787 target_big_endian = 0;
8788 break;
8789 #endif
8790
8791 case 'a':
8792 /* Listing option. Just ignore these, we don't support additional
8793 ones. */
8794 return 0;
8795
8796 default:
8797 for (opt = aarch64_opts; opt->option != NULL; opt++)
8798 {
8799 if (c == opt->option[0]
8800 && ((arg == NULL && opt->option[1] == 0)
8801 || streq (arg, opt->option + 1)))
8802 {
8803 /* If the option is deprecated, tell the user. */
8804 if (opt->deprecated != NULL)
8805 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
8806 arg ? arg : "", _(opt->deprecated));
8807
8808 if (opt->var != NULL)
8809 *opt->var = opt->value;
8810
8811 return 1;
8812 }
8813 }
8814
8815 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
8816 {
8817 /* These options are expected to have an argument. */
8818 if (c == lopt->option[0]
8819 && arg != NULL
8820 && strncmp (arg, lopt->option + 1,
8821 strlen (lopt->option + 1)) == 0)
8822 {
8823 /* If the option is deprecated, tell the user. */
8824 if (lopt->deprecated != NULL)
8825 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
8826 _(lopt->deprecated));
8827
8828 /* Call the sup-option parser. */
8829 return lopt->func (arg + strlen (lopt->option) - 1);
8830 }
8831 }
8832
8833 return 0;
8834 }
8835
8836 return 1;
8837 }
8838
8839 void
8840 md_show_usage (FILE * fp)
8841 {
8842 struct aarch64_option_table *opt;
8843 struct aarch64_long_option_table *lopt;
8844
8845 fprintf (fp, _(" AArch64-specific assembler options:\n"));
8846
8847 for (opt = aarch64_opts; opt->option != NULL; opt++)
8848 if (opt->help != NULL)
8849 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
8850
8851 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
8852 if (lopt->help != NULL)
8853 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
8854
8855 #ifdef OPTION_EB
8856 fprintf (fp, _("\
8857 -EB assemble code for a big-endian cpu\n"));
8858 #endif
8859
8860 #ifdef OPTION_EL
8861 fprintf (fp, _("\
8862 -EL assemble code for a little-endian cpu\n"));
8863 #endif
8864 }
8865
8866 /* Parse a .cpu directive. */
8867
8868 static void
8869 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
8870 {
8871 const struct aarch64_cpu_option_table *opt;
8872 char saved_char;
8873 char *name;
8874 char *ext;
8875 size_t optlen;
8876
8877 name = input_line_pointer;
8878 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8879 input_line_pointer++;
8880 saved_char = *input_line_pointer;
8881 *input_line_pointer = 0;
8882
8883 ext = strchr (name, '+');
8884
8885 if (ext != NULL)
8886 optlen = ext - name;
8887 else
8888 optlen = strlen (name);
8889
8890 /* Skip the first "all" entry. */
8891 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
8892 if (strlen (opt->name) == optlen
8893 && strncmp (name, opt->name, optlen) == 0)
8894 {
8895 mcpu_cpu_opt = &opt->value;
8896 if (ext != NULL)
8897 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
8898 return;
8899
8900 cpu_variant = *mcpu_cpu_opt;
8901
8902 *input_line_pointer = saved_char;
8903 demand_empty_rest_of_line ();
8904 return;
8905 }
8906 as_bad (_("unknown cpu `%s'"), name);
8907 *input_line_pointer = saved_char;
8908 ignore_rest_of_line ();
8909 }
8910
8911
8912 /* Parse a .arch directive. */
8913
8914 static void
8915 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
8916 {
8917 const struct aarch64_arch_option_table *opt;
8918 char saved_char;
8919 char *name;
8920 char *ext;
8921 size_t optlen;
8922
8923 name = input_line_pointer;
8924 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8925 input_line_pointer++;
8926 saved_char = *input_line_pointer;
8927 *input_line_pointer = 0;
8928
8929 ext = strchr (name, '+');
8930
8931 if (ext != NULL)
8932 optlen = ext - name;
8933 else
8934 optlen = strlen (name);
8935
8936 /* Skip the first "all" entry. */
8937 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
8938 if (strlen (opt->name) == optlen
8939 && strncmp (name, opt->name, optlen) == 0)
8940 {
8941 mcpu_cpu_opt = &opt->value;
8942 if (ext != NULL)
8943 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
8944 return;
8945
8946 cpu_variant = *mcpu_cpu_opt;
8947
8948 *input_line_pointer = saved_char;
8949 demand_empty_rest_of_line ();
8950 return;
8951 }
8952
8953 as_bad (_("unknown architecture `%s'\n"), name);
8954 *input_line_pointer = saved_char;
8955 ignore_rest_of_line ();
8956 }
8957
8958 /* Parse a .arch_extension directive. */
8959
8960 static void
8961 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
8962 {
8963 char saved_char;
8964 char *ext = input_line_pointer;;
8965
8966 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8967 input_line_pointer++;
8968 saved_char = *input_line_pointer;
8969 *input_line_pointer = 0;
8970
8971 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
8972 return;
8973
8974 cpu_variant = *mcpu_cpu_opt;
8975
8976 *input_line_pointer = saved_char;
8977 demand_empty_rest_of_line ();
8978 }
8979
8980 /* Copy symbol information. */
8981
8982 void
8983 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
8984 {
8985 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
8986 }
This page took 0.229548 seconds and 5 git commands to generate.