d17d118cc6915de625cce3f797caba92df6dbe9a
[deliverable/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2020 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 /* Currently active instruction sequence. */
59 static aarch64_instr_sequence *insn_sequence = NULL;
60
61 #ifdef OBJ_ELF
62 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
63 static symbolS *GOT_symbol;
64
65 /* Which ABI to use. */
66 enum aarch64_abi_type
67 {
68 AARCH64_ABI_NONE = 0,
69 AARCH64_ABI_LP64 = 1,
70 AARCH64_ABI_ILP32 = 2
71 };
72
73 #ifndef DEFAULT_ARCH
74 #define DEFAULT_ARCH "aarch64"
75 #endif
76
77 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
78 static const char *default_arch = DEFAULT_ARCH;
79
80 /* AArch64 ABI for the output file. */
81 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
82
83 /* When non-zero, program to a 32-bit model, in which the C data types
84 int, long and all pointer types are 32-bit objects (ILP32); or to a
85 64-bit model, in which the C int type is 32-bits but the C long type
86 and all pointer types are 64-bit objects (LP64). */
87 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
88 #endif
89
90 enum vector_el_type
91 {
92 NT_invtype = -1,
93 NT_b,
94 NT_h,
95 NT_s,
96 NT_d,
97 NT_q,
98 NT_zero,
99 NT_merge
100 };
101
102 /* Bits for DEFINED field in vector_type_el. */
103 #define NTA_HASTYPE 1
104 #define NTA_HASINDEX 2
105 #define NTA_HASVARWIDTH 4
106
107 struct vector_type_el
108 {
109 enum vector_el_type type;
110 unsigned char defined;
111 unsigned width;
112 int64_t index;
113 };
114
115 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
116
117 struct reloc
118 {
119 bfd_reloc_code_real_type type;
120 expressionS exp;
121 int pc_rel;
122 enum aarch64_opnd opnd;
123 uint32_t flags;
124 unsigned need_libopcodes_p : 1;
125 };
126
127 struct aarch64_instruction
128 {
129 /* libopcodes structure for instruction intermediate representation. */
130 aarch64_inst base;
131 /* Record assembly errors found during the parsing. */
132 struct
133 {
134 enum aarch64_operand_error_kind kind;
135 const char *error;
136 } parsing_error;
137 /* The condition that appears in the assembly line. */
138 int cond;
139 /* Relocation information (including the GAS internal fixup). */
140 struct reloc reloc;
141 /* Need to generate an immediate in the literal pool. */
142 unsigned gen_lit_pool : 1;
143 };
144
145 typedef struct aarch64_instruction aarch64_instruction;
146
147 static aarch64_instruction inst;
148
149 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
150 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
151
152 #ifdef OBJ_ELF
153 # define now_instr_sequence seg_info \
154 (now_seg)->tc_segment_info_data.insn_sequence
155 #else
156 static struct aarch64_instr_sequence now_instr_sequence;
157 #endif
158
159 /* Diagnostics inline function utilities.
160
161 These are lightweight utilities which should only be called by parse_operands
162 and other parsers. GAS processes each assembly line by parsing it against
163 instruction template(s), in the case of multiple templates (for the same
164 mnemonic name), those templates are tried one by one until one succeeds or
165 all fail. An assembly line may fail a few templates before being
166 successfully parsed; an error saved here in most cases is not a user error
167 but an error indicating the current template is not the right template.
168 Therefore it is very important that errors can be saved at a low cost during
169 the parsing; we don't want to slow down the whole parsing by recording
170 non-user errors in detail.
171
172 Remember that the objective is to help GAS pick up the most appropriate
173 error message in the case of multiple templates, e.g. FMOV which has 8
174 templates. */
175
176 static inline void
177 clear_error (void)
178 {
179 inst.parsing_error.kind = AARCH64_OPDE_NIL;
180 inst.parsing_error.error = NULL;
181 }
182
183 static inline bfd_boolean
184 error_p (void)
185 {
186 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
187 }
188
189 static inline const char *
190 get_error_message (void)
191 {
192 return inst.parsing_error.error;
193 }
194
195 static inline enum aarch64_operand_error_kind
196 get_error_kind (void)
197 {
198 return inst.parsing_error.kind;
199 }
200
201 static inline void
202 set_error (enum aarch64_operand_error_kind kind, const char *error)
203 {
204 inst.parsing_error.kind = kind;
205 inst.parsing_error.error = error;
206 }
207
208 static inline void
209 set_recoverable_error (const char *error)
210 {
211 set_error (AARCH64_OPDE_RECOVERABLE, error);
212 }
213
214 /* Use the DESC field of the corresponding aarch64_operand entry to compose
215 the error message. */
216 static inline void
217 set_default_error (void)
218 {
219 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
220 }
221
222 static inline void
223 set_syntax_error (const char *error)
224 {
225 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
226 }
227
228 static inline void
229 set_first_syntax_error (const char *error)
230 {
231 if (! error_p ())
232 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
233 }
234
235 static inline void
236 set_fatal_syntax_error (const char *error)
237 {
238 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
239 }
240 \f
241 /* Return value for certain parsers when the parsing fails; those parsers
242 return the information of the parsed result, e.g. register number, on
243 success. */
244 #define PARSE_FAIL -1
245
246 /* This is an invalid condition code that means no conditional field is
247 present. */
248 #define COND_ALWAYS 0x10
249
250 typedef struct
251 {
252 const char *template;
253 uint32_t value;
254 } asm_nzcv;
255
256 struct reloc_entry
257 {
258 char *name;
259 bfd_reloc_code_real_type reloc;
260 };
261
262 /* Macros to define the register types and masks for the purpose
263 of parsing. */
264
265 #undef AARCH64_REG_TYPES
266 #define AARCH64_REG_TYPES \
267 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
268 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
269 BASIC_REG_TYPE(SP_32) /* wsp */ \
270 BASIC_REG_TYPE(SP_64) /* sp */ \
271 BASIC_REG_TYPE(Z_32) /* wzr */ \
272 BASIC_REG_TYPE(Z_64) /* xzr */ \
273 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
274 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
275 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
276 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
277 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
278 BASIC_REG_TYPE(VN) /* v[0-31] */ \
279 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
280 BASIC_REG_TYPE(PN) /* p[0-15] */ \
281 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
282 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
283 /* Typecheck: same, plus SVE registers. */ \
284 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
285 | REG_TYPE(ZN)) \
286 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
287 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
288 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
289 /* Typecheck: same, plus SVE registers. */ \
290 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
291 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
292 | REG_TYPE(ZN)) \
293 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
294 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
295 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
296 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
297 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
298 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
299 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
300 /* Typecheck: any [BHSDQ]P FP. */ \
301 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
302 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
303 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
304 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
305 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
306 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
307 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
308 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
309 be used for SVE instructions, since Zn and Pn are valid symbols \
310 in other contexts. */ \
311 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
312 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
313 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
314 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
315 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
316 | REG_TYPE(ZN) | REG_TYPE(PN)) \
317 /* Any integer register; used for error messages only. */ \
318 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
319 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
320 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
321 /* Pseudo type to mark the end of the enumerator sequence. */ \
322 BASIC_REG_TYPE(MAX)
323
324 #undef BASIC_REG_TYPE
325 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
326 #undef MULTI_REG_TYPE
327 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
328
329 /* Register type enumerators. */
330 typedef enum aarch64_reg_type_
331 {
332 /* A list of REG_TYPE_*. */
333 AARCH64_REG_TYPES
334 } aarch64_reg_type;
335
336 #undef BASIC_REG_TYPE
337 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
338 #undef REG_TYPE
339 #define REG_TYPE(T) (1 << REG_TYPE_##T)
340 #undef MULTI_REG_TYPE
341 #define MULTI_REG_TYPE(T,V) V,
342
343 /* Structure for a hash table entry for a register. */
344 typedef struct
345 {
346 const char *name;
347 unsigned char number;
348 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
349 unsigned char builtin;
350 } reg_entry;
351
352 /* Values indexed by aarch64_reg_type to assist the type checking. */
353 static const unsigned reg_type_masks[] =
354 {
355 AARCH64_REG_TYPES
356 };
357
358 #undef BASIC_REG_TYPE
359 #undef REG_TYPE
360 #undef MULTI_REG_TYPE
361 #undef AARCH64_REG_TYPES
362
363 /* Diagnostics used when we don't get a register of the expected type.
364 Note: this has to synchronized with aarch64_reg_type definitions
365 above. */
366 static const char *
367 get_reg_expected_msg (aarch64_reg_type reg_type)
368 {
369 const char *msg;
370
371 switch (reg_type)
372 {
373 case REG_TYPE_R_32:
374 msg = N_("integer 32-bit register expected");
375 break;
376 case REG_TYPE_R_64:
377 msg = N_("integer 64-bit register expected");
378 break;
379 case REG_TYPE_R_N:
380 msg = N_("integer register expected");
381 break;
382 case REG_TYPE_R64_SP:
383 msg = N_("64-bit integer or SP register expected");
384 break;
385 case REG_TYPE_SVE_BASE:
386 msg = N_("base register expected");
387 break;
388 case REG_TYPE_R_Z:
389 msg = N_("integer or zero register expected");
390 break;
391 case REG_TYPE_SVE_OFFSET:
392 msg = N_("offset register expected");
393 break;
394 case REG_TYPE_R_SP:
395 msg = N_("integer or SP register expected");
396 break;
397 case REG_TYPE_R_Z_SP:
398 msg = N_("integer, zero or SP register expected");
399 break;
400 case REG_TYPE_FP_B:
401 msg = N_("8-bit SIMD scalar register expected");
402 break;
403 case REG_TYPE_FP_H:
404 msg = N_("16-bit SIMD scalar or floating-point half precision "
405 "register expected");
406 break;
407 case REG_TYPE_FP_S:
408 msg = N_("32-bit SIMD scalar or floating-point single precision "
409 "register expected");
410 break;
411 case REG_TYPE_FP_D:
412 msg = N_("64-bit SIMD scalar or floating-point double precision "
413 "register expected");
414 break;
415 case REG_TYPE_FP_Q:
416 msg = N_("128-bit SIMD scalar or floating-point quad precision "
417 "register expected");
418 break;
419 case REG_TYPE_R_Z_BHSDQ_V:
420 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
421 msg = N_("register expected");
422 break;
423 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
424 msg = N_("SIMD scalar or floating-point register expected");
425 break;
426 case REG_TYPE_VN: /* any V reg */
427 msg = N_("vector register expected");
428 break;
429 case REG_TYPE_ZN:
430 msg = N_("SVE vector register expected");
431 break;
432 case REG_TYPE_PN:
433 msg = N_("SVE predicate register expected");
434 break;
435 default:
436 as_fatal (_("invalid register type %d"), reg_type);
437 }
438 return msg;
439 }
440
441 /* Some well known registers that we refer to directly elsewhere. */
442 #define REG_SP 31
443 #define REG_ZR 31
444
445 /* Instructions take 4 bytes in the object file. */
446 #define INSN_SIZE 4
447
448 static htab_t aarch64_ops_hsh;
449 static htab_t aarch64_cond_hsh;
450 static htab_t aarch64_shift_hsh;
451 static htab_t aarch64_sys_regs_hsh;
452 static htab_t aarch64_pstatefield_hsh;
453 static htab_t aarch64_sys_regs_ic_hsh;
454 static htab_t aarch64_sys_regs_dc_hsh;
455 static htab_t aarch64_sys_regs_at_hsh;
456 static htab_t aarch64_sys_regs_tlbi_hsh;
457 static htab_t aarch64_sys_regs_sr_hsh;
458 static htab_t aarch64_reg_hsh;
459 static htab_t aarch64_barrier_opt_hsh;
460 static htab_t aarch64_nzcv_hsh;
461 static htab_t aarch64_pldop_hsh;
462 static htab_t aarch64_hint_opt_hsh;
463
464 /* Stuff needed to resolve the label ambiguity
465 As:
466 ...
467 label: <insn>
468 may differ from:
469 ...
470 label:
471 <insn> */
472
473 static symbolS *last_label_seen;
474
475 /* Literal pool structure. Held on a per-section
476 and per-sub-section basis. */
477
478 #define MAX_LITERAL_POOL_SIZE 1024
479 typedef struct literal_expression
480 {
481 expressionS exp;
482 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
483 LITTLENUM_TYPE * bignum;
484 } literal_expression;
485
486 typedef struct literal_pool
487 {
488 literal_expression literals[MAX_LITERAL_POOL_SIZE];
489 unsigned int next_free_entry;
490 unsigned int id;
491 symbolS *symbol;
492 segT section;
493 subsegT sub_section;
494 int size;
495 struct literal_pool *next;
496 } literal_pool;
497
498 /* Pointer to a linked list of literal pools. */
499 static literal_pool *list_of_pools = NULL;
500 \f
501 /* Pure syntax. */
502
503 /* This array holds the chars that always start a comment. If the
504 pre-processor is disabled, these aren't very useful. */
505 const char comment_chars[] = "";
506
507 /* This array holds the chars that only start a comment at the beginning of
508 a line. If the line seems to have the form '# 123 filename'
509 .line and .file directives will appear in the pre-processed output. */
510 /* Note that input_file.c hand checks for '#' at the beginning of the
511 first line of the input file. This is because the compiler outputs
512 #NO_APP at the beginning of its output. */
513 /* Also note that comments like this one will always work. */
514 const char line_comment_chars[] = "#";
515
516 const char line_separator_chars[] = ";";
517
518 /* Chars that can be used to separate mant
519 from exp in floating point numbers. */
520 const char EXP_CHARS[] = "eE";
521
522 /* Chars that mean this number is a floating point constant. */
523 /* As in 0f12.456 */
524 /* or 0d1.2345e12 */
525
526 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhH";
527
528 /* Prefix character that indicates the start of an immediate value. */
529 #define is_immediate_prefix(C) ((C) == '#')
530
531 /* Separator character handling. */
532
533 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
534
535 static inline bfd_boolean
536 skip_past_char (char **str, char c)
537 {
538 if (**str == c)
539 {
540 (*str)++;
541 return TRUE;
542 }
543 else
544 return FALSE;
545 }
546
547 #define skip_past_comma(str) skip_past_char (str, ',')
548
549 /* Arithmetic expressions (possibly involving symbols). */
550
551 static bfd_boolean in_my_get_expression_p = FALSE;
552
553 /* Third argument to my_get_expression. */
554 #define GE_NO_PREFIX 0
555 #define GE_OPT_PREFIX 1
556
557 /* Return TRUE if the string pointed by *STR is successfully parsed
558 as an valid expression; *EP will be filled with the information of
559 such an expression. Otherwise return FALSE. */
560
561 static bfd_boolean
562 my_get_expression (expressionS * ep, char **str, int prefix_mode,
563 int reject_absent)
564 {
565 char *save_in;
566 segT seg;
567 int prefix_present_p = 0;
568
569 switch (prefix_mode)
570 {
571 case GE_NO_PREFIX:
572 break;
573 case GE_OPT_PREFIX:
574 if (is_immediate_prefix (**str))
575 {
576 (*str)++;
577 prefix_present_p = 1;
578 }
579 break;
580 default:
581 abort ();
582 }
583
584 memset (ep, 0, sizeof (expressionS));
585
586 save_in = input_line_pointer;
587 input_line_pointer = *str;
588 in_my_get_expression_p = TRUE;
589 seg = expression (ep);
590 in_my_get_expression_p = FALSE;
591
592 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
593 {
594 /* We found a bad expression in md_operand(). */
595 *str = input_line_pointer;
596 input_line_pointer = save_in;
597 if (prefix_present_p && ! error_p ())
598 set_fatal_syntax_error (_("bad expression"));
599 else
600 set_first_syntax_error (_("bad expression"));
601 return FALSE;
602 }
603
604 #ifdef OBJ_AOUT
605 if (seg != absolute_section
606 && seg != text_section
607 && seg != data_section
608 && seg != bss_section && seg != undefined_section)
609 {
610 set_syntax_error (_("bad segment"));
611 *str = input_line_pointer;
612 input_line_pointer = save_in;
613 return FALSE;
614 }
615 #else
616 (void) seg;
617 #endif
618
619 *str = input_line_pointer;
620 input_line_pointer = save_in;
621 return TRUE;
622 }
623
624 /* Turn a string in input_line_pointer into a floating point constant
625 of type TYPE, and store the appropriate bytes in *LITP. The number
626 of LITTLENUMS emitted is stored in *SIZEP. An error message is
627 returned, or NULL on OK. */
628
629 const char *
630 md_atof (int type, char *litP, int *sizeP)
631 {
632 /* If this is a bfloat16 type, then parse it slightly differently -
633 as it does not follow the IEEE standard exactly. */
634 if (type == 'b')
635 {
636 char * t;
637 LITTLENUM_TYPE words[MAX_LITTLENUMS];
638 FLONUM_TYPE generic_float;
639
640 t = atof_ieee_detail (input_line_pointer, 1, 8, words, &generic_float);
641
642 if (t)
643 input_line_pointer = t;
644 else
645 return _("invalid floating point number");
646
647 switch (generic_float.sign)
648 {
649 /* Is +Inf. */
650 case 'P':
651 words[0] = 0x7f80;
652 break;
653
654 /* Is -Inf. */
655 case 'N':
656 words[0] = 0xff80;
657 break;
658
659 /* Is NaN. */
660 /* bfloat16 has two types of NaN - quiet and signalling.
661 Quiet NaN has bit[6] == 1 && faction != 0, whereas
662 signalling Nan's have bit[0] == 0 && fraction != 0.
663 Chose this specific encoding as it is the same form
664 as used by other IEEE 754 encodings in GAS. */
665 case 0:
666 words[0] = 0x7fff;
667 break;
668
669 default:
670 break;
671 }
672
673 *sizeP = 2;
674
675 md_number_to_chars (litP, (valueT) words[0], sizeof (LITTLENUM_TYPE));
676
677 return NULL;
678 }
679
680 return ieee_md_atof (type, litP, sizeP, target_big_endian);
681 }
682
683 /* We handle all bad expressions here, so that we can report the faulty
684 instruction in the error message. */
685 void
686 md_operand (expressionS * exp)
687 {
688 if (in_my_get_expression_p)
689 exp->X_op = O_illegal;
690 }
691
692 /* Immediate values. */
693
694 /* Errors may be set multiple times during parsing or bit encoding
695 (particularly in the Neon bits), but usually the earliest error which is set
696 will be the most meaningful. Avoid overwriting it with later (cascading)
697 errors by calling this function. */
698
699 static void
700 first_error (const char *error)
701 {
702 if (! error_p ())
703 set_syntax_error (error);
704 }
705
706 /* Similar to first_error, but this function accepts formatted error
707 message. */
708 static void
709 first_error_fmt (const char *format, ...)
710 {
711 va_list args;
712 enum
713 { size = 100 };
714 /* N.B. this single buffer will not cause error messages for different
715 instructions to pollute each other; this is because at the end of
716 processing of each assembly line, error message if any will be
717 collected by as_bad. */
718 static char buffer[size];
719
720 if (! error_p ())
721 {
722 int ret ATTRIBUTE_UNUSED;
723 va_start (args, format);
724 ret = vsnprintf (buffer, size, format, args);
725 know (ret <= size - 1 && ret >= 0);
726 va_end (args);
727 set_syntax_error (buffer);
728 }
729 }
730
731 /* Register parsing. */
732
733 /* Generic register parser which is called by other specialized
734 register parsers.
735 CCP points to what should be the beginning of a register name.
736 If it is indeed a valid register name, advance CCP over it and
737 return the reg_entry structure; otherwise return NULL.
738 It does not issue diagnostics. */
739
740 static reg_entry *
741 parse_reg (char **ccp)
742 {
743 char *start = *ccp;
744 char *p;
745 reg_entry *reg;
746
747 #ifdef REGISTER_PREFIX
748 if (*start != REGISTER_PREFIX)
749 return NULL;
750 start++;
751 #endif
752
753 p = start;
754 if (!ISALPHA (*p) || !is_name_beginner (*p))
755 return NULL;
756
757 do
758 p++;
759 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
760
761 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
762
763 if (!reg)
764 return NULL;
765
766 *ccp = p;
767 return reg;
768 }
769
770 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
771 return FALSE. */
772 static bfd_boolean
773 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
774 {
775 return (reg_type_masks[type] & (1 << reg->type)) != 0;
776 }
777
778 /* Try to parse a base or offset register. Allow SVE base and offset
779 registers if REG_TYPE includes SVE registers. Return the register
780 entry on success, setting *QUALIFIER to the register qualifier.
781 Return null otherwise.
782
783 Note that this function does not issue any diagnostics. */
784
785 static const reg_entry *
786 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
787 aarch64_opnd_qualifier_t *qualifier)
788 {
789 char *str = *ccp;
790 const reg_entry *reg = parse_reg (&str);
791
792 if (reg == NULL)
793 return NULL;
794
795 switch (reg->type)
796 {
797 case REG_TYPE_R_32:
798 case REG_TYPE_SP_32:
799 case REG_TYPE_Z_32:
800 *qualifier = AARCH64_OPND_QLF_W;
801 break;
802
803 case REG_TYPE_R_64:
804 case REG_TYPE_SP_64:
805 case REG_TYPE_Z_64:
806 *qualifier = AARCH64_OPND_QLF_X;
807 break;
808
809 case REG_TYPE_ZN:
810 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
811 || str[0] != '.')
812 return NULL;
813 switch (TOLOWER (str[1]))
814 {
815 case 's':
816 *qualifier = AARCH64_OPND_QLF_S_S;
817 break;
818 case 'd':
819 *qualifier = AARCH64_OPND_QLF_S_D;
820 break;
821 default:
822 return NULL;
823 }
824 str += 2;
825 break;
826
827 default:
828 return NULL;
829 }
830
831 *ccp = str;
832
833 return reg;
834 }
835
836 /* Try to parse a base or offset register. Return the register entry
837 on success, setting *QUALIFIER to the register qualifier. Return null
838 otherwise.
839
840 Note that this function does not issue any diagnostics. */
841
842 static const reg_entry *
843 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
844 {
845 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
846 }
847
848 /* Parse the qualifier of a vector register or vector element of type
849 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
850 succeeds; otherwise return FALSE.
851
852 Accept only one occurrence of:
853 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
854 b h s d q */
855 static bfd_boolean
856 parse_vector_type_for_operand (aarch64_reg_type reg_type,
857 struct vector_type_el *parsed_type, char **str)
858 {
859 char *ptr = *str;
860 unsigned width;
861 unsigned element_size;
862 enum vector_el_type type;
863
864 /* skip '.' */
865 gas_assert (*ptr == '.');
866 ptr++;
867
868 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
869 {
870 width = 0;
871 goto elt_size;
872 }
873 width = strtoul (ptr, &ptr, 10);
874 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
875 {
876 first_error_fmt (_("bad size %d in vector width specifier"), width);
877 return FALSE;
878 }
879
880 elt_size:
881 switch (TOLOWER (*ptr))
882 {
883 case 'b':
884 type = NT_b;
885 element_size = 8;
886 break;
887 case 'h':
888 type = NT_h;
889 element_size = 16;
890 break;
891 case 's':
892 type = NT_s;
893 element_size = 32;
894 break;
895 case 'd':
896 type = NT_d;
897 element_size = 64;
898 break;
899 case 'q':
900 if (reg_type == REG_TYPE_ZN || width == 1)
901 {
902 type = NT_q;
903 element_size = 128;
904 break;
905 }
906 /* fall through. */
907 default:
908 if (*ptr != '\0')
909 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
910 else
911 first_error (_("missing element size"));
912 return FALSE;
913 }
914 if (width != 0 && width * element_size != 64
915 && width * element_size != 128
916 && !(width == 2 && element_size == 16)
917 && !(width == 4 && element_size == 8))
918 {
919 first_error_fmt (_
920 ("invalid element size %d and vector size combination %c"),
921 width, *ptr);
922 return FALSE;
923 }
924 ptr++;
925
926 parsed_type->type = type;
927 parsed_type->width = width;
928
929 *str = ptr;
930
931 return TRUE;
932 }
933
934 /* *STR contains an SVE zero/merge predication suffix. Parse it into
935 *PARSED_TYPE and point *STR at the end of the suffix. */
936
937 static bfd_boolean
938 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
939 {
940 char *ptr = *str;
941
942 /* Skip '/'. */
943 gas_assert (*ptr == '/');
944 ptr++;
945 switch (TOLOWER (*ptr))
946 {
947 case 'z':
948 parsed_type->type = NT_zero;
949 break;
950 case 'm':
951 parsed_type->type = NT_merge;
952 break;
953 default:
954 if (*ptr != '\0' && *ptr != ',')
955 first_error_fmt (_("unexpected character `%c' in predication type"),
956 *ptr);
957 else
958 first_error (_("missing predication type"));
959 return FALSE;
960 }
961 parsed_type->width = 0;
962 *str = ptr + 1;
963 return TRUE;
964 }
965
966 /* Parse a register of the type TYPE.
967
968 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
969 name or the parsed register is not of TYPE.
970
971 Otherwise return the register number, and optionally fill in the actual
972 type of the register in *RTYPE when multiple alternatives were given, and
973 return the register shape and element index information in *TYPEINFO.
974
975 IN_REG_LIST should be set with TRUE if the caller is parsing a register
976 list. */
977
978 static int
979 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
980 struct vector_type_el *typeinfo, bfd_boolean in_reg_list)
981 {
982 char *str = *ccp;
983 const reg_entry *reg = parse_reg (&str);
984 struct vector_type_el atype;
985 struct vector_type_el parsetype;
986 bfd_boolean is_typed_vecreg = FALSE;
987
988 atype.defined = 0;
989 atype.type = NT_invtype;
990 atype.width = -1;
991 atype.index = 0;
992
993 if (reg == NULL)
994 {
995 if (typeinfo)
996 *typeinfo = atype;
997 set_default_error ();
998 return PARSE_FAIL;
999 }
1000
1001 if (! aarch64_check_reg_type (reg, type))
1002 {
1003 DEBUG_TRACE ("reg type check failed");
1004 set_default_error ();
1005 return PARSE_FAIL;
1006 }
1007 type = reg->type;
1008
1009 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
1010 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
1011 {
1012 if (*str == '.')
1013 {
1014 if (!parse_vector_type_for_operand (type, &parsetype, &str))
1015 return PARSE_FAIL;
1016 }
1017 else
1018 {
1019 if (!parse_predication_for_operand (&parsetype, &str))
1020 return PARSE_FAIL;
1021 }
1022
1023 /* Register if of the form Vn.[bhsdq]. */
1024 is_typed_vecreg = TRUE;
1025
1026 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
1027 {
1028 /* The width is always variable; we don't allow an integer width
1029 to be specified. */
1030 gas_assert (parsetype.width == 0);
1031 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1032 }
1033 else if (parsetype.width == 0)
1034 /* Expect index. In the new scheme we cannot have
1035 Vn.[bhsdq] represent a scalar. Therefore any
1036 Vn.[bhsdq] should have an index following it.
1037 Except in reglists of course. */
1038 atype.defined |= NTA_HASINDEX;
1039 else
1040 atype.defined |= NTA_HASTYPE;
1041
1042 atype.type = parsetype.type;
1043 atype.width = parsetype.width;
1044 }
1045
1046 if (skip_past_char (&str, '['))
1047 {
1048 expressionS exp;
1049
1050 /* Reject Sn[index] syntax. */
1051 if (!is_typed_vecreg)
1052 {
1053 first_error (_("this type of register can't be indexed"));
1054 return PARSE_FAIL;
1055 }
1056
1057 if (in_reg_list)
1058 {
1059 first_error (_("index not allowed inside register list"));
1060 return PARSE_FAIL;
1061 }
1062
1063 atype.defined |= NTA_HASINDEX;
1064
1065 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1066
1067 if (exp.X_op != O_constant)
1068 {
1069 first_error (_("constant expression required"));
1070 return PARSE_FAIL;
1071 }
1072
1073 if (! skip_past_char (&str, ']'))
1074 return PARSE_FAIL;
1075
1076 atype.index = exp.X_add_number;
1077 }
1078 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1079 {
1080 /* Indexed vector register expected. */
1081 first_error (_("indexed vector register expected"));
1082 return PARSE_FAIL;
1083 }
1084
1085 /* A vector reg Vn should be typed or indexed. */
1086 if (type == REG_TYPE_VN && atype.defined == 0)
1087 {
1088 first_error (_("invalid use of vector register"));
1089 }
1090
1091 if (typeinfo)
1092 *typeinfo = atype;
1093
1094 if (rtype)
1095 *rtype = type;
1096
1097 *ccp = str;
1098
1099 return reg->number;
1100 }
1101
1102 /* Parse register.
1103
1104 Return the register number on success; return PARSE_FAIL otherwise.
1105
1106 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1107 the register (e.g. NEON double or quad reg when either has been requested).
1108
1109 If this is a NEON vector register with additional type information, fill
1110 in the struct pointed to by VECTYPE (if non-NULL).
1111
1112 This parser does not handle register list. */
1113
1114 static int
1115 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1116 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1117 {
1118 struct vector_type_el atype;
1119 char *str = *ccp;
1120 int reg = parse_typed_reg (&str, type, rtype, &atype,
1121 /*in_reg_list= */ FALSE);
1122
1123 if (reg == PARSE_FAIL)
1124 return PARSE_FAIL;
1125
1126 if (vectype)
1127 *vectype = atype;
1128
1129 *ccp = str;
1130
1131 return reg;
1132 }
1133
1134 static inline bfd_boolean
1135 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1136 {
1137 return
1138 e1.type == e2.type
1139 && e1.defined == e2.defined
1140 && e1.width == e2.width && e1.index == e2.index;
1141 }
1142
1143 /* This function parses a list of vector registers of type TYPE.
1144 On success, it returns the parsed register list information in the
1145 following encoded format:
1146
1147 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1148 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1149
1150 The information of the register shape and/or index is returned in
1151 *VECTYPE.
1152
1153 It returns PARSE_FAIL if the register list is invalid.
1154
1155 The list contains one to four registers.
1156 Each register can be one of:
1157 <Vt>.<T>[<index>]
1158 <Vt>.<T>
1159 All <T> should be identical.
1160 All <index> should be identical.
1161 There are restrictions on <Vt> numbers which are checked later
1162 (by reg_list_valid_p). */
1163
1164 static int
1165 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1166 struct vector_type_el *vectype)
1167 {
1168 char *str = *ccp;
1169 int nb_regs;
1170 struct vector_type_el typeinfo, typeinfo_first;
1171 int val, val_range;
1172 int in_range;
1173 int ret_val;
1174 int i;
1175 bfd_boolean error = FALSE;
1176 bfd_boolean expect_index = FALSE;
1177
1178 if (*str != '{')
1179 {
1180 set_syntax_error (_("expecting {"));
1181 return PARSE_FAIL;
1182 }
1183 str++;
1184
1185 nb_regs = 0;
1186 typeinfo_first.defined = 0;
1187 typeinfo_first.type = NT_invtype;
1188 typeinfo_first.width = -1;
1189 typeinfo_first.index = 0;
1190 ret_val = 0;
1191 val = -1;
1192 val_range = -1;
1193 in_range = 0;
1194 do
1195 {
1196 if (in_range)
1197 {
1198 str++; /* skip over '-' */
1199 val_range = val;
1200 }
1201 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1202 /*in_reg_list= */ TRUE);
1203 if (val == PARSE_FAIL)
1204 {
1205 set_first_syntax_error (_("invalid vector register in list"));
1206 error = TRUE;
1207 continue;
1208 }
1209 /* reject [bhsd]n */
1210 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1211 {
1212 set_first_syntax_error (_("invalid scalar register in list"));
1213 error = TRUE;
1214 continue;
1215 }
1216
1217 if (typeinfo.defined & NTA_HASINDEX)
1218 expect_index = TRUE;
1219
1220 if (in_range)
1221 {
1222 if (val < val_range)
1223 {
1224 set_first_syntax_error
1225 (_("invalid range in vector register list"));
1226 error = TRUE;
1227 }
1228 val_range++;
1229 }
1230 else
1231 {
1232 val_range = val;
1233 if (nb_regs == 0)
1234 typeinfo_first = typeinfo;
1235 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1236 {
1237 set_first_syntax_error
1238 (_("type mismatch in vector register list"));
1239 error = TRUE;
1240 }
1241 }
1242 if (! error)
1243 for (i = val_range; i <= val; i++)
1244 {
1245 ret_val |= i << (5 * nb_regs);
1246 nb_regs++;
1247 }
1248 in_range = 0;
1249 }
1250 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1251
1252 skip_whitespace (str);
1253 if (*str != '}')
1254 {
1255 set_first_syntax_error (_("end of vector register list not found"));
1256 error = TRUE;
1257 }
1258 str++;
1259
1260 skip_whitespace (str);
1261
1262 if (expect_index)
1263 {
1264 if (skip_past_char (&str, '['))
1265 {
1266 expressionS exp;
1267
1268 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1269 if (exp.X_op != O_constant)
1270 {
1271 set_first_syntax_error (_("constant expression required."));
1272 error = TRUE;
1273 }
1274 if (! skip_past_char (&str, ']'))
1275 error = TRUE;
1276 else
1277 typeinfo_first.index = exp.X_add_number;
1278 }
1279 else
1280 {
1281 set_first_syntax_error (_("expected index"));
1282 error = TRUE;
1283 }
1284 }
1285
1286 if (nb_regs > 4)
1287 {
1288 set_first_syntax_error (_("too many registers in vector register list"));
1289 error = TRUE;
1290 }
1291 else if (nb_regs == 0)
1292 {
1293 set_first_syntax_error (_("empty vector register list"));
1294 error = TRUE;
1295 }
1296
1297 *ccp = str;
1298 if (! error)
1299 *vectype = typeinfo_first;
1300
1301 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1302 }
1303
1304 /* Directives: register aliases. */
1305
1306 static reg_entry *
1307 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1308 {
1309 reg_entry *new;
1310 const char *name;
1311
1312 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1313 {
1314 if (new->builtin)
1315 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1316 str);
1317
1318 /* Only warn about a redefinition if it's not defined as the
1319 same register. */
1320 else if (new->number != number || new->type != type)
1321 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1322
1323 return NULL;
1324 }
1325
1326 name = xstrdup (str);
1327 new = XNEW (reg_entry);
1328
1329 new->name = name;
1330 new->number = number;
1331 new->type = type;
1332 new->builtin = FALSE;
1333
1334 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1335
1336 return new;
1337 }
1338
1339 /* Look for the .req directive. This is of the form:
1340
1341 new_register_name .req existing_register_name
1342
1343 If we find one, or if it looks sufficiently like one that we want to
1344 handle any error here, return TRUE. Otherwise return FALSE. */
1345
1346 static bfd_boolean
1347 create_register_alias (char *newname, char *p)
1348 {
1349 const reg_entry *old;
1350 char *oldname, *nbuf;
1351 size_t nlen;
1352
1353 /* The input scrubber ensures that whitespace after the mnemonic is
1354 collapsed to single spaces. */
1355 oldname = p;
1356 if (strncmp (oldname, " .req ", 6) != 0)
1357 return FALSE;
1358
1359 oldname += 6;
1360 if (*oldname == '\0')
1361 return FALSE;
1362
1363 old = str_hash_find (aarch64_reg_hsh, oldname);
1364 if (!old)
1365 {
1366 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1367 return TRUE;
1368 }
1369
1370 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1371 the desired alias name, and p points to its end. If not, then
1372 the desired alias name is in the global original_case_string. */
1373 #ifdef TC_CASE_SENSITIVE
1374 nlen = p - newname;
1375 #else
1376 newname = original_case_string;
1377 nlen = strlen (newname);
1378 #endif
1379
1380 nbuf = xmemdup0 (newname, nlen);
1381
1382 /* Create aliases under the new name as stated; an all-lowercase
1383 version of the new name; and an all-uppercase version of the new
1384 name. */
1385 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1386 {
1387 for (p = nbuf; *p; p++)
1388 *p = TOUPPER (*p);
1389
1390 if (strncmp (nbuf, newname, nlen))
1391 {
1392 /* If this attempt to create an additional alias fails, do not bother
1393 trying to create the all-lower case alias. We will fail and issue
1394 a second, duplicate error message. This situation arises when the
1395 programmer does something like:
1396 foo .req r0
1397 Foo .req r1
1398 The second .req creates the "Foo" alias but then fails to create
1399 the artificial FOO alias because it has already been created by the
1400 first .req. */
1401 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1402 {
1403 free (nbuf);
1404 return TRUE;
1405 }
1406 }
1407
1408 for (p = nbuf; *p; p++)
1409 *p = TOLOWER (*p);
1410
1411 if (strncmp (nbuf, newname, nlen))
1412 insert_reg_alias (nbuf, old->number, old->type);
1413 }
1414
1415 free (nbuf);
1416 return TRUE;
1417 }
1418
1419 /* Should never be called, as .req goes between the alias and the
1420 register name, not at the beginning of the line. */
1421 static void
1422 s_req (int a ATTRIBUTE_UNUSED)
1423 {
1424 as_bad (_("invalid syntax for .req directive"));
1425 }
1426
1427 /* The .unreq directive deletes an alias which was previously defined
1428 by .req. For example:
1429
1430 my_alias .req r11
1431 .unreq my_alias */
1432
1433 static void
1434 s_unreq (int a ATTRIBUTE_UNUSED)
1435 {
1436 char *name;
1437 char saved_char;
1438
1439 name = input_line_pointer;
1440
1441 while (*input_line_pointer != 0
1442 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1443 ++input_line_pointer;
1444
1445 saved_char = *input_line_pointer;
1446 *input_line_pointer = 0;
1447
1448 if (!*name)
1449 as_bad (_("invalid syntax for .unreq directive"));
1450 else
1451 {
1452 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1453
1454 if (!reg)
1455 as_bad (_("unknown register alias '%s'"), name);
1456 else if (reg->builtin)
1457 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1458 name);
1459 else
1460 {
1461 char *p;
1462 char *nbuf;
1463
1464 str_hash_delete (aarch64_reg_hsh, name);
1465 free ((char *) reg->name);
1466 free (reg);
1467
1468 /* Also locate the all upper case and all lower case versions.
1469 Do not complain if we cannot find one or the other as it
1470 was probably deleted above. */
1471
1472 nbuf = strdup (name);
1473 for (p = nbuf; *p; p++)
1474 *p = TOUPPER (*p);
1475 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1476 if (reg)
1477 {
1478 str_hash_delete (aarch64_reg_hsh, nbuf);
1479 free ((char *) reg->name);
1480 free (reg);
1481 }
1482
1483 for (p = nbuf; *p; p++)
1484 *p = TOLOWER (*p);
1485 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1486 if (reg)
1487 {
1488 str_hash_delete (aarch64_reg_hsh, nbuf);
1489 free ((char *) reg->name);
1490 free (reg);
1491 }
1492
1493 free (nbuf);
1494 }
1495 }
1496
1497 *input_line_pointer = saved_char;
1498 demand_empty_rest_of_line ();
1499 }
1500
1501 /* Directives: Instruction set selection. */
1502
1503 #ifdef OBJ_ELF
1504 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1505 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1506 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1507 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1508
1509 /* Create a new mapping symbol for the transition to STATE. */
1510
1511 static void
1512 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1513 {
1514 symbolS *symbolP;
1515 const char *symname;
1516 int type;
1517
1518 switch (state)
1519 {
1520 case MAP_DATA:
1521 symname = "$d";
1522 type = BSF_NO_FLAGS;
1523 break;
1524 case MAP_INSN:
1525 symname = "$x";
1526 type = BSF_NO_FLAGS;
1527 break;
1528 default:
1529 abort ();
1530 }
1531
1532 symbolP = symbol_new (symname, now_seg, frag, value);
1533 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1534
1535 /* Save the mapping symbols for future reference. Also check that
1536 we do not place two mapping symbols at the same offset within a
1537 frag. We'll handle overlap between frags in
1538 check_mapping_symbols.
1539
1540 If .fill or other data filling directive generates zero sized data,
1541 the mapping symbol for the following code will have the same value
1542 as the one generated for the data filling directive. In this case,
1543 we replace the old symbol with the new one at the same address. */
1544 if (value == 0)
1545 {
1546 if (frag->tc_frag_data.first_map != NULL)
1547 {
1548 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1549 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1550 &symbol_lastP);
1551 }
1552 frag->tc_frag_data.first_map = symbolP;
1553 }
1554 if (frag->tc_frag_data.last_map != NULL)
1555 {
1556 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1557 S_GET_VALUE (symbolP));
1558 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1559 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1560 &symbol_lastP);
1561 }
1562 frag->tc_frag_data.last_map = symbolP;
1563 }
1564
1565 /* We must sometimes convert a region marked as code to data during
1566 code alignment, if an odd number of bytes have to be padded. The
1567 code mapping symbol is pushed to an aligned address. */
1568
1569 static void
1570 insert_data_mapping_symbol (enum mstate state,
1571 valueT value, fragS * frag, offsetT bytes)
1572 {
1573 /* If there was already a mapping symbol, remove it. */
1574 if (frag->tc_frag_data.last_map != NULL
1575 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1576 frag->fr_address + value)
1577 {
1578 symbolS *symp = frag->tc_frag_data.last_map;
1579
1580 if (value == 0)
1581 {
1582 know (frag->tc_frag_data.first_map == symp);
1583 frag->tc_frag_data.first_map = NULL;
1584 }
1585 frag->tc_frag_data.last_map = NULL;
1586 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1587 }
1588
1589 make_mapping_symbol (MAP_DATA, value, frag);
1590 make_mapping_symbol (state, value + bytes, frag);
1591 }
1592
1593 static void mapping_state_2 (enum mstate state, int max_chars);
1594
1595 /* Set the mapping state to STATE. Only call this when about to
1596 emit some STATE bytes to the file. */
1597
1598 void
1599 mapping_state (enum mstate state)
1600 {
1601 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1602
1603 if (state == MAP_INSN)
1604 /* AArch64 instructions require 4-byte alignment. When emitting
1605 instructions into any section, record the appropriate section
1606 alignment. */
1607 record_alignment (now_seg, 2);
1608
1609 if (mapstate == state)
1610 /* The mapping symbol has already been emitted.
1611 There is nothing else to do. */
1612 return;
1613
1614 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1615 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1616 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1617 evaluated later in the next else. */
1618 return;
1619 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1620 {
1621 /* Only add the symbol if the offset is > 0:
1622 if we're at the first frag, check it's size > 0;
1623 if we're not at the first frag, then for sure
1624 the offset is > 0. */
1625 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1626 const int add_symbol = (frag_now != frag_first)
1627 || (frag_now_fix () > 0);
1628
1629 if (add_symbol)
1630 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1631 }
1632 #undef TRANSITION
1633
1634 mapping_state_2 (state, 0);
1635 }
1636
1637 /* Same as mapping_state, but MAX_CHARS bytes have already been
1638 allocated. Put the mapping symbol that far back. */
1639
1640 static void
1641 mapping_state_2 (enum mstate state, int max_chars)
1642 {
1643 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1644
1645 if (!SEG_NORMAL (now_seg))
1646 return;
1647
1648 if (mapstate == state)
1649 /* The mapping symbol has already been emitted.
1650 There is nothing else to do. */
1651 return;
1652
1653 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1654 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1655 }
1656 #else
1657 #define mapping_state(x) /* nothing */
1658 #define mapping_state_2(x, y) /* nothing */
1659 #endif
1660
1661 /* Directives: sectioning and alignment. */
1662
1663 static void
1664 s_bss (int ignore ATTRIBUTE_UNUSED)
1665 {
1666 /* We don't support putting frags in the BSS segment, we fake it by
1667 marking in_bss, then looking at s_skip for clues. */
1668 subseg_set (bss_section, 0);
1669 demand_empty_rest_of_line ();
1670 mapping_state (MAP_DATA);
1671 }
1672
1673 static void
1674 s_even (int ignore ATTRIBUTE_UNUSED)
1675 {
1676 /* Never make frag if expect extra pass. */
1677 if (!need_pass_2)
1678 frag_align (1, 0, 0);
1679
1680 record_alignment (now_seg, 1);
1681
1682 demand_empty_rest_of_line ();
1683 }
1684
1685 /* Directives: Literal pools. */
1686
1687 static literal_pool *
1688 find_literal_pool (int size)
1689 {
1690 literal_pool *pool;
1691
1692 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1693 {
1694 if (pool->section == now_seg
1695 && pool->sub_section == now_subseg && pool->size == size)
1696 break;
1697 }
1698
1699 return pool;
1700 }
1701
1702 static literal_pool *
1703 find_or_make_literal_pool (int size)
1704 {
1705 /* Next literal pool ID number. */
1706 static unsigned int latest_pool_num = 1;
1707 literal_pool *pool;
1708
1709 pool = find_literal_pool (size);
1710
1711 if (pool == NULL)
1712 {
1713 /* Create a new pool. */
1714 pool = XNEW (literal_pool);
1715 if (!pool)
1716 return NULL;
1717
1718 /* Currently we always put the literal pool in the current text
1719 section. If we were generating "small" model code where we
1720 knew that all code and initialised data was within 1MB then
1721 we could output literals to mergeable, read-only data
1722 sections. */
1723
1724 pool->next_free_entry = 0;
1725 pool->section = now_seg;
1726 pool->sub_section = now_subseg;
1727 pool->size = size;
1728 pool->next = list_of_pools;
1729 pool->symbol = NULL;
1730
1731 /* Add it to the list. */
1732 list_of_pools = pool;
1733 }
1734
1735 /* New pools, and emptied pools, will have a NULL symbol. */
1736 if (pool->symbol == NULL)
1737 {
1738 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1739 &zero_address_frag, 0);
1740 pool->id = latest_pool_num++;
1741 }
1742
1743 /* Done. */
1744 return pool;
1745 }
1746
1747 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1748 Return TRUE on success, otherwise return FALSE. */
1749 static bfd_boolean
1750 add_to_lit_pool (expressionS *exp, int size)
1751 {
1752 literal_pool *pool;
1753 unsigned int entry;
1754
1755 pool = find_or_make_literal_pool (size);
1756
1757 /* Check if this literal value is already in the pool. */
1758 for (entry = 0; entry < pool->next_free_entry; entry++)
1759 {
1760 expressionS * litexp = & pool->literals[entry].exp;
1761
1762 if ((litexp->X_op == exp->X_op)
1763 && (exp->X_op == O_constant)
1764 && (litexp->X_add_number == exp->X_add_number)
1765 && (litexp->X_unsigned == exp->X_unsigned))
1766 break;
1767
1768 if ((litexp->X_op == exp->X_op)
1769 && (exp->X_op == O_symbol)
1770 && (litexp->X_add_number == exp->X_add_number)
1771 && (litexp->X_add_symbol == exp->X_add_symbol)
1772 && (litexp->X_op_symbol == exp->X_op_symbol))
1773 break;
1774 }
1775
1776 /* Do we need to create a new entry? */
1777 if (entry == pool->next_free_entry)
1778 {
1779 if (entry >= MAX_LITERAL_POOL_SIZE)
1780 {
1781 set_syntax_error (_("literal pool overflow"));
1782 return FALSE;
1783 }
1784
1785 pool->literals[entry].exp = *exp;
1786 pool->next_free_entry += 1;
1787 if (exp->X_op == O_big)
1788 {
1789 /* PR 16688: Bignums are held in a single global array. We must
1790 copy and preserve that value now, before it is overwritten. */
1791 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1792 exp->X_add_number);
1793 memcpy (pool->literals[entry].bignum, generic_bignum,
1794 CHARS_PER_LITTLENUM * exp->X_add_number);
1795 }
1796 else
1797 pool->literals[entry].bignum = NULL;
1798 }
1799
1800 exp->X_op = O_symbol;
1801 exp->X_add_number = ((int) entry) * size;
1802 exp->X_add_symbol = pool->symbol;
1803
1804 return TRUE;
1805 }
1806
1807 /* Can't use symbol_new here, so have to create a symbol and then at
1808 a later date assign it a value. That's what these functions do. */
1809
1810 static void
1811 symbol_locate (symbolS * symbolP,
1812 const char *name,/* It is copied, the caller can modify. */
1813 segT segment, /* Segment identifier (SEG_<something>). */
1814 valueT valu, /* Symbol value. */
1815 fragS * frag) /* Associated fragment. */
1816 {
1817 size_t name_length;
1818 char *preserved_copy_of_name;
1819
1820 name_length = strlen (name) + 1; /* +1 for \0. */
1821 obstack_grow (&notes, name, name_length);
1822 preserved_copy_of_name = obstack_finish (&notes);
1823
1824 #ifdef tc_canonicalize_symbol_name
1825 preserved_copy_of_name =
1826 tc_canonicalize_symbol_name (preserved_copy_of_name);
1827 #endif
1828
1829 S_SET_NAME (symbolP, preserved_copy_of_name);
1830
1831 S_SET_SEGMENT (symbolP, segment);
1832 S_SET_VALUE (symbolP, valu);
1833 symbol_clear_list_pointers (symbolP);
1834
1835 symbol_set_frag (symbolP, frag);
1836
1837 /* Link to end of symbol chain. */
1838 {
1839 extern int symbol_table_frozen;
1840
1841 if (symbol_table_frozen)
1842 abort ();
1843 }
1844
1845 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1846
1847 obj_symbol_new_hook (symbolP);
1848
1849 #ifdef tc_symbol_new_hook
1850 tc_symbol_new_hook (symbolP);
1851 #endif
1852
1853 #ifdef DEBUG_SYMS
1854 verify_symbol_chain (symbol_rootP, symbol_lastP);
1855 #endif /* DEBUG_SYMS */
1856 }
1857
1858
1859 static void
1860 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1861 {
1862 unsigned int entry;
1863 literal_pool *pool;
1864 char sym_name[20];
1865 int align;
1866
1867 for (align = 2; align <= 4; align++)
1868 {
1869 int size = 1 << align;
1870
1871 pool = find_literal_pool (size);
1872 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1873 continue;
1874
1875 /* Align pool as you have word accesses.
1876 Only make a frag if we have to. */
1877 if (!need_pass_2)
1878 frag_align (align, 0, 0);
1879
1880 mapping_state (MAP_DATA);
1881
1882 record_alignment (now_seg, align);
1883
1884 sprintf (sym_name, "$$lit_\002%x", pool->id);
1885
1886 symbol_locate (pool->symbol, sym_name, now_seg,
1887 (valueT) frag_now_fix (), frag_now);
1888 symbol_table_insert (pool->symbol);
1889
1890 for (entry = 0; entry < pool->next_free_entry; entry++)
1891 {
1892 expressionS * exp = & pool->literals[entry].exp;
1893
1894 if (exp->X_op == O_big)
1895 {
1896 /* PR 16688: Restore the global bignum value. */
1897 gas_assert (pool->literals[entry].bignum != NULL);
1898 memcpy (generic_bignum, pool->literals[entry].bignum,
1899 CHARS_PER_LITTLENUM * exp->X_add_number);
1900 }
1901
1902 /* First output the expression in the instruction to the pool. */
1903 emit_expr (exp, size); /* .word|.xword */
1904
1905 if (exp->X_op == O_big)
1906 {
1907 free (pool->literals[entry].bignum);
1908 pool->literals[entry].bignum = NULL;
1909 }
1910 }
1911
1912 /* Mark the pool as empty. */
1913 pool->next_free_entry = 0;
1914 pool->symbol = NULL;
1915 }
1916 }
1917
1918 #ifdef OBJ_ELF
1919 /* Forward declarations for functions below, in the MD interface
1920 section. */
1921 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1922 static struct reloc_table_entry * find_reloc_table_entry (char **);
1923
1924 /* Directives: Data. */
1925 /* N.B. the support for relocation suffix in this directive needs to be
1926 implemented properly. */
1927
1928 static void
1929 s_aarch64_elf_cons (int nbytes)
1930 {
1931 expressionS exp;
1932
1933 #ifdef md_flush_pending_output
1934 md_flush_pending_output ();
1935 #endif
1936
1937 if (is_it_end_of_statement ())
1938 {
1939 demand_empty_rest_of_line ();
1940 return;
1941 }
1942
1943 #ifdef md_cons_align
1944 md_cons_align (nbytes);
1945 #endif
1946
1947 mapping_state (MAP_DATA);
1948 do
1949 {
1950 struct reloc_table_entry *reloc;
1951
1952 expression (&exp);
1953
1954 if (exp.X_op != O_symbol)
1955 emit_expr (&exp, (unsigned int) nbytes);
1956 else
1957 {
1958 skip_past_char (&input_line_pointer, '#');
1959 if (skip_past_char (&input_line_pointer, ':'))
1960 {
1961 reloc = find_reloc_table_entry (&input_line_pointer);
1962 if (reloc == NULL)
1963 as_bad (_("unrecognized relocation suffix"));
1964 else
1965 as_bad (_("unimplemented relocation suffix"));
1966 ignore_rest_of_line ();
1967 return;
1968 }
1969 else
1970 emit_expr (&exp, (unsigned int) nbytes);
1971 }
1972 }
1973 while (*input_line_pointer++ == ',');
1974
1975 /* Put terminator back into stream. */
1976 input_line_pointer--;
1977 demand_empty_rest_of_line ();
1978 }
1979
1980 /* Mark symbol that it follows a variant PCS convention. */
1981
1982 static void
1983 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
1984 {
1985 char *name;
1986 char c;
1987 symbolS *sym;
1988 asymbol *bfdsym;
1989 elf_symbol_type *elfsym;
1990
1991 c = get_symbol_name (&name);
1992 if (!*name)
1993 as_bad (_("Missing symbol name in directive"));
1994 sym = symbol_find_or_make (name);
1995 restore_line_pointer (c);
1996 demand_empty_rest_of_line ();
1997 bfdsym = symbol_get_bfdsym (sym);
1998 elfsym = elf_symbol_from (bfdsym);
1999 gas_assert (elfsym);
2000 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
2001 }
2002 #endif /* OBJ_ELF */
2003
2004 /* Output a 32-bit word, but mark as an instruction. */
2005
2006 static void
2007 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
2008 {
2009 expressionS exp;
2010
2011 #ifdef md_flush_pending_output
2012 md_flush_pending_output ();
2013 #endif
2014
2015 if (is_it_end_of_statement ())
2016 {
2017 demand_empty_rest_of_line ();
2018 return;
2019 }
2020
2021 /* Sections are assumed to start aligned. In executable section, there is no
2022 MAP_DATA symbol pending. So we only align the address during
2023 MAP_DATA --> MAP_INSN transition.
2024 For other sections, this is not guaranteed. */
2025 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2026 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
2027 frag_align_code (2, 0);
2028
2029 #ifdef OBJ_ELF
2030 mapping_state (MAP_INSN);
2031 #endif
2032
2033 do
2034 {
2035 expression (&exp);
2036 if (exp.X_op != O_constant)
2037 {
2038 as_bad (_("constant expression required"));
2039 ignore_rest_of_line ();
2040 return;
2041 }
2042
2043 if (target_big_endian)
2044 {
2045 unsigned int val = exp.X_add_number;
2046 exp.X_add_number = SWAP_32 (val);
2047 }
2048 emit_expr (&exp, 4);
2049 }
2050 while (*input_line_pointer++ == ',');
2051
2052 /* Put terminator back into stream. */
2053 input_line_pointer--;
2054 demand_empty_rest_of_line ();
2055 }
2056
2057 static void
2058 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2059 {
2060 demand_empty_rest_of_line ();
2061 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2062 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2063 }
2064
2065 #ifdef OBJ_ELF
2066 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2067
2068 static void
2069 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2070 {
2071 expressionS exp;
2072
2073 expression (&exp);
2074 frag_grow (4);
2075 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2076 BFD_RELOC_AARCH64_TLSDESC_ADD);
2077
2078 demand_empty_rest_of_line ();
2079 }
2080
2081 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2082
2083 static void
2084 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2085 {
2086 expressionS exp;
2087
2088 /* Since we're just labelling the code, there's no need to define a
2089 mapping symbol. */
2090 expression (&exp);
2091 /* Make sure there is enough room in this frag for the following
2092 blr. This trick only works if the blr follows immediately after
2093 the .tlsdesc directive. */
2094 frag_grow (4);
2095 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2096 BFD_RELOC_AARCH64_TLSDESC_CALL);
2097
2098 demand_empty_rest_of_line ();
2099 }
2100
2101 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2102
2103 static void
2104 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2105 {
2106 expressionS exp;
2107
2108 expression (&exp);
2109 frag_grow (4);
2110 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2111 BFD_RELOC_AARCH64_TLSDESC_LDR);
2112
2113 demand_empty_rest_of_line ();
2114 }
2115 #endif /* OBJ_ELF */
2116
2117 static void s_aarch64_arch (int);
2118 static void s_aarch64_cpu (int);
2119 static void s_aarch64_arch_extension (int);
2120
2121 /* This table describes all the machine specific pseudo-ops the assembler
2122 has to support. The fields are:
2123 pseudo-op name without dot
2124 function to call to execute this pseudo-op
2125 Integer arg to pass to the function. */
2126
2127 const pseudo_typeS md_pseudo_table[] = {
2128 /* Never called because '.req' does not start a line. */
2129 {"req", s_req, 0},
2130 {"unreq", s_unreq, 0},
2131 {"bss", s_bss, 0},
2132 {"even", s_even, 0},
2133 {"ltorg", s_ltorg, 0},
2134 {"pool", s_ltorg, 0},
2135 {"cpu", s_aarch64_cpu, 0},
2136 {"arch", s_aarch64_arch, 0},
2137 {"arch_extension", s_aarch64_arch_extension, 0},
2138 {"inst", s_aarch64_inst, 0},
2139 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2140 #ifdef OBJ_ELF
2141 {"tlsdescadd", s_tlsdescadd, 0},
2142 {"tlsdesccall", s_tlsdesccall, 0},
2143 {"tlsdescldr", s_tlsdescldr, 0},
2144 {"word", s_aarch64_elf_cons, 4},
2145 {"long", s_aarch64_elf_cons, 4},
2146 {"xword", s_aarch64_elf_cons, 8},
2147 {"dword", s_aarch64_elf_cons, 8},
2148 {"variant_pcs", s_variant_pcs, 0},
2149 #endif
2150 {"float16", float_cons, 'h'},
2151 {"bfloat16", float_cons, 'b'},
2152 {0, 0, 0}
2153 };
2154 \f
2155
2156 /* Check whether STR points to a register name followed by a comma or the
2157 end of line; REG_TYPE indicates which register types are checked
2158 against. Return TRUE if STR is such a register name; otherwise return
2159 FALSE. The function does not intend to produce any diagnostics, but since
2160 the register parser aarch64_reg_parse, which is called by this function,
2161 does produce diagnostics, we call clear_error to clear any diagnostics
2162 that may be generated by aarch64_reg_parse.
2163 Also, the function returns FALSE directly if there is any user error
2164 present at the function entry. This prevents the existing diagnostics
2165 state from being spoiled.
2166 The function currently serves parse_constant_immediate and
2167 parse_big_immediate only. */
2168 static bfd_boolean
2169 reg_name_p (char *str, aarch64_reg_type reg_type)
2170 {
2171 int reg;
2172
2173 /* Prevent the diagnostics state from being spoiled. */
2174 if (error_p ())
2175 return FALSE;
2176
2177 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2178
2179 /* Clear the parsing error that may be set by the reg parser. */
2180 clear_error ();
2181
2182 if (reg == PARSE_FAIL)
2183 return FALSE;
2184
2185 skip_whitespace (str);
2186 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2187 return TRUE;
2188
2189 return FALSE;
2190 }
2191
2192 /* Parser functions used exclusively in instruction operands. */
2193
2194 /* Parse an immediate expression which may not be constant.
2195
2196 To prevent the expression parser from pushing a register name
2197 into the symbol table as an undefined symbol, firstly a check is
2198 done to find out whether STR is a register of type REG_TYPE followed
2199 by a comma or the end of line. Return FALSE if STR is such a string. */
2200
2201 static bfd_boolean
2202 parse_immediate_expression (char **str, expressionS *exp,
2203 aarch64_reg_type reg_type)
2204 {
2205 if (reg_name_p (*str, reg_type))
2206 {
2207 set_recoverable_error (_("immediate operand required"));
2208 return FALSE;
2209 }
2210
2211 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2212
2213 if (exp->X_op == O_absent)
2214 {
2215 set_fatal_syntax_error (_("missing immediate expression"));
2216 return FALSE;
2217 }
2218
2219 return TRUE;
2220 }
2221
2222 /* Constant immediate-value read function for use in insn parsing.
2223 STR points to the beginning of the immediate (with the optional
2224 leading #); *VAL receives the value. REG_TYPE says which register
2225 names should be treated as registers rather than as symbolic immediates.
2226
2227 Return TRUE on success; otherwise return FALSE. */
2228
2229 static bfd_boolean
2230 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2231 {
2232 expressionS exp;
2233
2234 if (! parse_immediate_expression (str, &exp, reg_type))
2235 return FALSE;
2236
2237 if (exp.X_op != O_constant)
2238 {
2239 set_syntax_error (_("constant expression required"));
2240 return FALSE;
2241 }
2242
2243 *val = exp.X_add_number;
2244 return TRUE;
2245 }
2246
2247 static uint32_t
2248 encode_imm_float_bits (uint32_t imm)
2249 {
2250 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2251 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2252 }
2253
2254 /* Return TRUE if the single-precision floating-point value encoded in IMM
2255 can be expressed in the AArch64 8-bit signed floating-point format with
2256 3-bit exponent and normalized 4 bits of precision; in other words, the
2257 floating-point value must be expressable as
2258 (+/-) n / 16 * power (2, r)
2259 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2260
2261 static bfd_boolean
2262 aarch64_imm_float_p (uint32_t imm)
2263 {
2264 /* If a single-precision floating-point value has the following bit
2265 pattern, it can be expressed in the AArch64 8-bit floating-point
2266 format:
2267
2268 3 32222222 2221111111111
2269 1 09876543 21098765432109876543210
2270 n Eeeeeexx xxxx0000000000000000000
2271
2272 where n, e and each x are either 0 or 1 independently, with
2273 E == ~ e. */
2274
2275 uint32_t pattern;
2276
2277 /* Prepare the pattern for 'Eeeeee'. */
2278 if (((imm >> 30) & 0x1) == 0)
2279 pattern = 0x3e000000;
2280 else
2281 pattern = 0x40000000;
2282
2283 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2284 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2285 }
2286
2287 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2288 as an IEEE float without any loss of precision. Store the value in
2289 *FPWORD if so. */
2290
2291 static bfd_boolean
2292 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2293 {
2294 /* If a double-precision floating-point value has the following bit
2295 pattern, it can be expressed in a float:
2296
2297 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2298 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2299 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2300
2301 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2302 if Eeee_eeee != 1111_1111
2303
2304 where n, e, s and S are either 0 or 1 independently and where ~ is the
2305 inverse of E. */
2306
2307 uint32_t pattern;
2308 uint32_t high32 = imm >> 32;
2309 uint32_t low32 = imm;
2310
2311 /* Lower 29 bits need to be 0s. */
2312 if ((imm & 0x1fffffff) != 0)
2313 return FALSE;
2314
2315 /* Prepare the pattern for 'Eeeeeeeee'. */
2316 if (((high32 >> 30) & 0x1) == 0)
2317 pattern = 0x38000000;
2318 else
2319 pattern = 0x40000000;
2320
2321 /* Check E~~~. */
2322 if ((high32 & 0x78000000) != pattern)
2323 return FALSE;
2324
2325 /* Check Eeee_eeee != 1111_1111. */
2326 if ((high32 & 0x7ff00000) == 0x47f00000)
2327 return FALSE;
2328
2329 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2330 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2331 | (low32 >> 29)); /* 3 S bits. */
2332 return TRUE;
2333 }
2334
2335 /* Return true if we should treat OPERAND as a double-precision
2336 floating-point operand rather than a single-precision one. */
2337 static bfd_boolean
2338 double_precision_operand_p (const aarch64_opnd_info *operand)
2339 {
2340 /* Check for unsuffixed SVE registers, which are allowed
2341 for LDR and STR but not in instructions that require an
2342 immediate. We get better error messages if we arbitrarily
2343 pick one size, parse the immediate normally, and then
2344 report the match failure in the normal way. */
2345 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2346 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2347 }
2348
2349 /* Parse a floating-point immediate. Return TRUE on success and return the
2350 value in *IMMED in the format of IEEE754 single-precision encoding.
2351 *CCP points to the start of the string; DP_P is TRUE when the immediate
2352 is expected to be in double-precision (N.B. this only matters when
2353 hexadecimal representation is involved). REG_TYPE says which register
2354 names should be treated as registers rather than as symbolic immediates.
2355
2356 This routine accepts any IEEE float; it is up to the callers to reject
2357 invalid ones. */
2358
2359 static bfd_boolean
2360 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p,
2361 aarch64_reg_type reg_type)
2362 {
2363 char *str = *ccp;
2364 char *fpnum;
2365 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2366 int64_t val = 0;
2367 unsigned fpword = 0;
2368 bfd_boolean hex_p = FALSE;
2369
2370 skip_past_char (&str, '#');
2371
2372 fpnum = str;
2373 skip_whitespace (fpnum);
2374
2375 if (strncmp (fpnum, "0x", 2) == 0)
2376 {
2377 /* Support the hexadecimal representation of the IEEE754 encoding.
2378 Double-precision is expected when DP_P is TRUE, otherwise the
2379 representation should be in single-precision. */
2380 if (! parse_constant_immediate (&str, &val, reg_type))
2381 goto invalid_fp;
2382
2383 if (dp_p)
2384 {
2385 if (!can_convert_double_to_float (val, &fpword))
2386 goto invalid_fp;
2387 }
2388 else if ((uint64_t) val > 0xffffffff)
2389 goto invalid_fp;
2390 else
2391 fpword = val;
2392
2393 hex_p = TRUE;
2394 }
2395 else if (reg_name_p (str, reg_type))
2396 {
2397 set_recoverable_error (_("immediate operand required"));
2398 return FALSE;
2399 }
2400
2401 if (! hex_p)
2402 {
2403 int i;
2404
2405 if ((str = atof_ieee (str, 's', words)) == NULL)
2406 goto invalid_fp;
2407
2408 /* Our FP word must be 32 bits (single-precision FP). */
2409 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2410 {
2411 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2412 fpword |= words[i];
2413 }
2414 }
2415
2416 *immed = fpword;
2417 *ccp = str;
2418 return TRUE;
2419
2420 invalid_fp:
2421 set_fatal_syntax_error (_("invalid floating-point constant"));
2422 return FALSE;
2423 }
2424
2425 /* Less-generic immediate-value read function with the possibility of loading
2426 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2427 instructions.
2428
2429 To prevent the expression parser from pushing a register name into the
2430 symbol table as an undefined symbol, a check is firstly done to find
2431 out whether STR is a register of type REG_TYPE followed by a comma or
2432 the end of line. Return FALSE if STR is such a register. */
2433
2434 static bfd_boolean
2435 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2436 {
2437 char *ptr = *str;
2438
2439 if (reg_name_p (ptr, reg_type))
2440 {
2441 set_syntax_error (_("immediate operand required"));
2442 return FALSE;
2443 }
2444
2445 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2446
2447 if (inst.reloc.exp.X_op == O_constant)
2448 *imm = inst.reloc.exp.X_add_number;
2449
2450 *str = ptr;
2451
2452 return TRUE;
2453 }
2454
2455 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2456 if NEED_LIBOPCODES is non-zero, the fixup will need
2457 assistance from the libopcodes. */
2458
2459 static inline void
2460 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2461 const aarch64_opnd_info *operand,
2462 int need_libopcodes_p)
2463 {
2464 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2465 reloc->opnd = operand->type;
2466 if (need_libopcodes_p)
2467 reloc->need_libopcodes_p = 1;
2468 };
2469
2470 /* Return TRUE if the instruction needs to be fixed up later internally by
2471 the GAS; otherwise return FALSE. */
2472
2473 static inline bfd_boolean
2474 aarch64_gas_internal_fixup_p (void)
2475 {
2476 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2477 }
2478
2479 /* Assign the immediate value to the relevant field in *OPERAND if
2480 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2481 needs an internal fixup in a later stage.
2482 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2483 IMM.VALUE that may get assigned with the constant. */
2484 static inline void
2485 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2486 aarch64_opnd_info *operand,
2487 int addr_off_p,
2488 int need_libopcodes_p,
2489 int skip_p)
2490 {
2491 if (reloc->exp.X_op == O_constant)
2492 {
2493 if (addr_off_p)
2494 operand->addr.offset.imm = reloc->exp.X_add_number;
2495 else
2496 operand->imm.value = reloc->exp.X_add_number;
2497 reloc->type = BFD_RELOC_UNUSED;
2498 }
2499 else
2500 {
2501 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2502 /* Tell libopcodes to ignore this operand or not. This is helpful
2503 when one of the operands needs to be fixed up later but we need
2504 libopcodes to check the other operands. */
2505 operand->skip = skip_p;
2506 }
2507 }
2508
2509 /* Relocation modifiers. Each entry in the table contains the textual
2510 name for the relocation which may be placed before a symbol used as
2511 a load/store offset, or add immediate. It must be surrounded by a
2512 leading and trailing colon, for example:
2513
2514 ldr x0, [x1, #:rello:varsym]
2515 add x0, x1, #:rello:varsym */
2516
2517 struct reloc_table_entry
2518 {
2519 const char *name;
2520 int pc_rel;
2521 bfd_reloc_code_real_type adr_type;
2522 bfd_reloc_code_real_type adrp_type;
2523 bfd_reloc_code_real_type movw_type;
2524 bfd_reloc_code_real_type add_type;
2525 bfd_reloc_code_real_type ldst_type;
2526 bfd_reloc_code_real_type ld_literal_type;
2527 };
2528
2529 static struct reloc_table_entry reloc_table[] = {
2530 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2531 {"lo12", 0,
2532 0, /* adr_type */
2533 0,
2534 0,
2535 BFD_RELOC_AARCH64_ADD_LO12,
2536 BFD_RELOC_AARCH64_LDST_LO12,
2537 0},
2538
2539 /* Higher 21 bits of pc-relative page offset: ADRP */
2540 {"pg_hi21", 1,
2541 0, /* adr_type */
2542 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2543 0,
2544 0,
2545 0,
2546 0},
2547
2548 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2549 {"pg_hi21_nc", 1,
2550 0, /* adr_type */
2551 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2552 0,
2553 0,
2554 0,
2555 0},
2556
2557 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2558 {"abs_g0", 0,
2559 0, /* adr_type */
2560 0,
2561 BFD_RELOC_AARCH64_MOVW_G0,
2562 0,
2563 0,
2564 0},
2565
2566 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2567 {"abs_g0_s", 0,
2568 0, /* adr_type */
2569 0,
2570 BFD_RELOC_AARCH64_MOVW_G0_S,
2571 0,
2572 0,
2573 0},
2574
2575 /* Less significant bits 0-15 of address/value: MOVK, no check */
2576 {"abs_g0_nc", 0,
2577 0, /* adr_type */
2578 0,
2579 BFD_RELOC_AARCH64_MOVW_G0_NC,
2580 0,
2581 0,
2582 0},
2583
2584 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2585 {"abs_g1", 0,
2586 0, /* adr_type */
2587 0,
2588 BFD_RELOC_AARCH64_MOVW_G1,
2589 0,
2590 0,
2591 0},
2592
2593 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2594 {"abs_g1_s", 0,
2595 0, /* adr_type */
2596 0,
2597 BFD_RELOC_AARCH64_MOVW_G1_S,
2598 0,
2599 0,
2600 0},
2601
2602 /* Less significant bits 16-31 of address/value: MOVK, no check */
2603 {"abs_g1_nc", 0,
2604 0, /* adr_type */
2605 0,
2606 BFD_RELOC_AARCH64_MOVW_G1_NC,
2607 0,
2608 0,
2609 0},
2610
2611 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2612 {"abs_g2", 0,
2613 0, /* adr_type */
2614 0,
2615 BFD_RELOC_AARCH64_MOVW_G2,
2616 0,
2617 0,
2618 0},
2619
2620 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2621 {"abs_g2_s", 0,
2622 0, /* adr_type */
2623 0,
2624 BFD_RELOC_AARCH64_MOVW_G2_S,
2625 0,
2626 0,
2627 0},
2628
2629 /* Less significant bits 32-47 of address/value: MOVK, no check */
2630 {"abs_g2_nc", 0,
2631 0, /* adr_type */
2632 0,
2633 BFD_RELOC_AARCH64_MOVW_G2_NC,
2634 0,
2635 0,
2636 0},
2637
2638 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2639 {"abs_g3", 0,
2640 0, /* adr_type */
2641 0,
2642 BFD_RELOC_AARCH64_MOVW_G3,
2643 0,
2644 0,
2645 0},
2646
2647 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2648 {"prel_g0", 1,
2649 0, /* adr_type */
2650 0,
2651 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2652 0,
2653 0,
2654 0},
2655
2656 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2657 {"prel_g0_nc", 1,
2658 0, /* adr_type */
2659 0,
2660 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2661 0,
2662 0,
2663 0},
2664
2665 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2666 {"prel_g1", 1,
2667 0, /* adr_type */
2668 0,
2669 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2670 0,
2671 0,
2672 0},
2673
2674 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2675 {"prel_g1_nc", 1,
2676 0, /* adr_type */
2677 0,
2678 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2679 0,
2680 0,
2681 0},
2682
2683 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2684 {"prel_g2", 1,
2685 0, /* adr_type */
2686 0,
2687 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2688 0,
2689 0,
2690 0},
2691
2692 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2693 {"prel_g2_nc", 1,
2694 0, /* adr_type */
2695 0,
2696 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2697 0,
2698 0,
2699 0},
2700
2701 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2702 {"prel_g3", 1,
2703 0, /* adr_type */
2704 0,
2705 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2706 0,
2707 0,
2708 0},
2709
2710 /* Get to the page containing GOT entry for a symbol. */
2711 {"got", 1,
2712 0, /* adr_type */
2713 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2714 0,
2715 0,
2716 0,
2717 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2718
2719 /* 12 bit offset into the page containing GOT entry for that symbol. */
2720 {"got_lo12", 0,
2721 0, /* adr_type */
2722 0,
2723 0,
2724 0,
2725 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2726 0},
2727
2728 /* 0-15 bits of address/value: MOVk, no check. */
2729 {"gotoff_g0_nc", 0,
2730 0, /* adr_type */
2731 0,
2732 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2733 0,
2734 0,
2735 0},
2736
2737 /* Most significant bits 16-31 of address/value: MOVZ. */
2738 {"gotoff_g1", 0,
2739 0, /* adr_type */
2740 0,
2741 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2742 0,
2743 0,
2744 0},
2745
2746 /* 15 bit offset into the page containing GOT entry for that symbol. */
2747 {"gotoff_lo15", 0,
2748 0, /* adr_type */
2749 0,
2750 0,
2751 0,
2752 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2753 0},
2754
2755 /* Get to the page containing GOT TLS entry for a symbol */
2756 {"gottprel_g0_nc", 0,
2757 0, /* adr_type */
2758 0,
2759 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2760 0,
2761 0,
2762 0},
2763
2764 /* Get to the page containing GOT TLS entry for a symbol */
2765 {"gottprel_g1", 0,
2766 0, /* adr_type */
2767 0,
2768 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2769 0,
2770 0,
2771 0},
2772
2773 /* Get to the page containing GOT TLS entry for a symbol */
2774 {"tlsgd", 0,
2775 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2776 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2777 0,
2778 0,
2779 0,
2780 0},
2781
2782 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2783 {"tlsgd_lo12", 0,
2784 0, /* adr_type */
2785 0,
2786 0,
2787 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2788 0,
2789 0},
2790
2791 /* Lower 16 bits address/value: MOVk. */
2792 {"tlsgd_g0_nc", 0,
2793 0, /* adr_type */
2794 0,
2795 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2796 0,
2797 0,
2798 0},
2799
2800 /* Most significant bits 16-31 of address/value: MOVZ. */
2801 {"tlsgd_g1", 0,
2802 0, /* adr_type */
2803 0,
2804 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2805 0,
2806 0,
2807 0},
2808
2809 /* Get to the page containing GOT TLS entry for a symbol */
2810 {"tlsdesc", 0,
2811 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2812 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2813 0,
2814 0,
2815 0,
2816 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2817
2818 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2819 {"tlsdesc_lo12", 0,
2820 0, /* adr_type */
2821 0,
2822 0,
2823 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2824 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2825 0},
2826
2827 /* Get to the page containing GOT TLS entry for a symbol.
2828 The same as GD, we allocate two consecutive GOT slots
2829 for module index and module offset, the only difference
2830 with GD is the module offset should be initialized to
2831 zero without any outstanding runtime relocation. */
2832 {"tlsldm", 0,
2833 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2834 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2835 0,
2836 0,
2837 0,
2838 0},
2839
2840 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2841 {"tlsldm_lo12_nc", 0,
2842 0, /* adr_type */
2843 0,
2844 0,
2845 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2846 0,
2847 0},
2848
2849 /* 12 bit offset into the module TLS base address. */
2850 {"dtprel_lo12", 0,
2851 0, /* adr_type */
2852 0,
2853 0,
2854 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2855 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2856 0},
2857
2858 /* Same as dtprel_lo12, no overflow check. */
2859 {"dtprel_lo12_nc", 0,
2860 0, /* adr_type */
2861 0,
2862 0,
2863 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2864 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2865 0},
2866
2867 /* bits[23:12] of offset to the module TLS base address. */
2868 {"dtprel_hi12", 0,
2869 0, /* adr_type */
2870 0,
2871 0,
2872 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2873 0,
2874 0},
2875
2876 /* bits[15:0] of offset to the module TLS base address. */
2877 {"dtprel_g0", 0,
2878 0, /* adr_type */
2879 0,
2880 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2881 0,
2882 0,
2883 0},
2884
2885 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2886 {"dtprel_g0_nc", 0,
2887 0, /* adr_type */
2888 0,
2889 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2890 0,
2891 0,
2892 0},
2893
2894 /* bits[31:16] of offset to the module TLS base address. */
2895 {"dtprel_g1", 0,
2896 0, /* adr_type */
2897 0,
2898 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2899 0,
2900 0,
2901 0},
2902
2903 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2904 {"dtprel_g1_nc", 0,
2905 0, /* adr_type */
2906 0,
2907 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2908 0,
2909 0,
2910 0},
2911
2912 /* bits[47:32] of offset to the module TLS base address. */
2913 {"dtprel_g2", 0,
2914 0, /* adr_type */
2915 0,
2916 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2917 0,
2918 0,
2919 0},
2920
2921 /* Lower 16 bit offset into GOT entry for a symbol */
2922 {"tlsdesc_off_g0_nc", 0,
2923 0, /* adr_type */
2924 0,
2925 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2926 0,
2927 0,
2928 0},
2929
2930 /* Higher 16 bit offset into GOT entry for a symbol */
2931 {"tlsdesc_off_g1", 0,
2932 0, /* adr_type */
2933 0,
2934 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2935 0,
2936 0,
2937 0},
2938
2939 /* Get to the page containing GOT TLS entry for a symbol */
2940 {"gottprel", 0,
2941 0, /* adr_type */
2942 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2943 0,
2944 0,
2945 0,
2946 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2947
2948 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2949 {"gottprel_lo12", 0,
2950 0, /* adr_type */
2951 0,
2952 0,
2953 0,
2954 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2955 0},
2956
2957 /* Get tp offset for a symbol. */
2958 {"tprel", 0,
2959 0, /* adr_type */
2960 0,
2961 0,
2962 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2963 0,
2964 0},
2965
2966 /* Get tp offset for a symbol. */
2967 {"tprel_lo12", 0,
2968 0, /* adr_type */
2969 0,
2970 0,
2971 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2972 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
2973 0},
2974
2975 /* Get tp offset for a symbol. */
2976 {"tprel_hi12", 0,
2977 0, /* adr_type */
2978 0,
2979 0,
2980 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2981 0,
2982 0},
2983
2984 /* Get tp offset for a symbol. */
2985 {"tprel_lo12_nc", 0,
2986 0, /* adr_type */
2987 0,
2988 0,
2989 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2990 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
2991 0},
2992
2993 /* Most significant bits 32-47 of address/value: MOVZ. */
2994 {"tprel_g2", 0,
2995 0, /* adr_type */
2996 0,
2997 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2998 0,
2999 0,
3000 0},
3001
3002 /* Most significant bits 16-31 of address/value: MOVZ. */
3003 {"tprel_g1", 0,
3004 0, /* adr_type */
3005 0,
3006 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
3007 0,
3008 0,
3009 0},
3010
3011 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
3012 {"tprel_g1_nc", 0,
3013 0, /* adr_type */
3014 0,
3015 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
3016 0,
3017 0,
3018 0},
3019
3020 /* Most significant bits 0-15 of address/value: MOVZ. */
3021 {"tprel_g0", 0,
3022 0, /* adr_type */
3023 0,
3024 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
3025 0,
3026 0,
3027 0},
3028
3029 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3030 {"tprel_g0_nc", 0,
3031 0, /* adr_type */
3032 0,
3033 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3034 0,
3035 0,
3036 0},
3037
3038 /* 15bit offset from got entry to base address of GOT table. */
3039 {"gotpage_lo15", 0,
3040 0,
3041 0,
3042 0,
3043 0,
3044 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3045 0},
3046
3047 /* 14bit offset from got entry to base address of GOT table. */
3048 {"gotpage_lo14", 0,
3049 0,
3050 0,
3051 0,
3052 0,
3053 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3054 0},
3055 };
3056
3057 /* Given the address of a pointer pointing to the textual name of a
3058 relocation as may appear in assembler source, attempt to find its
3059 details in reloc_table. The pointer will be updated to the character
3060 after the trailing colon. On failure, NULL will be returned;
3061 otherwise return the reloc_table_entry. */
3062
3063 static struct reloc_table_entry *
3064 find_reloc_table_entry (char **str)
3065 {
3066 unsigned int i;
3067 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3068 {
3069 int length = strlen (reloc_table[i].name);
3070
3071 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3072 && (*str)[length] == ':')
3073 {
3074 *str += (length + 1);
3075 return &reloc_table[i];
3076 }
3077 }
3078
3079 return NULL;
3080 }
3081
3082 /* Mode argument to parse_shift and parser_shifter_operand. */
3083 enum parse_shift_mode
3084 {
3085 SHIFTED_NONE, /* no shifter allowed */
3086 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3087 "#imm{,lsl #n}" */
3088 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3089 "#imm" */
3090 SHIFTED_LSL, /* bare "lsl #n" */
3091 SHIFTED_MUL, /* bare "mul #n" */
3092 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3093 SHIFTED_MUL_VL, /* "mul vl" */
3094 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3095 };
3096
3097 /* Parse a <shift> operator on an AArch64 data processing instruction.
3098 Return TRUE on success; otherwise return FALSE. */
3099 static bfd_boolean
3100 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3101 {
3102 const struct aarch64_name_value_pair *shift_op;
3103 enum aarch64_modifier_kind kind;
3104 expressionS exp;
3105 int exp_has_prefix;
3106 char *s = *str;
3107 char *p = s;
3108
3109 for (p = *str; ISALPHA (*p); p++)
3110 ;
3111
3112 if (p == *str)
3113 {
3114 set_syntax_error (_("shift expression expected"));
3115 return FALSE;
3116 }
3117
3118 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3119
3120 if (shift_op == NULL)
3121 {
3122 set_syntax_error (_("shift operator expected"));
3123 return FALSE;
3124 }
3125
3126 kind = aarch64_get_operand_modifier (shift_op);
3127
3128 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3129 {
3130 set_syntax_error (_("invalid use of 'MSL'"));
3131 return FALSE;
3132 }
3133
3134 if (kind == AARCH64_MOD_MUL
3135 && mode != SHIFTED_MUL
3136 && mode != SHIFTED_MUL_VL)
3137 {
3138 set_syntax_error (_("invalid use of 'MUL'"));
3139 return FALSE;
3140 }
3141
3142 switch (mode)
3143 {
3144 case SHIFTED_LOGIC_IMM:
3145 if (aarch64_extend_operator_p (kind))
3146 {
3147 set_syntax_error (_("extending shift is not permitted"));
3148 return FALSE;
3149 }
3150 break;
3151
3152 case SHIFTED_ARITH_IMM:
3153 if (kind == AARCH64_MOD_ROR)
3154 {
3155 set_syntax_error (_("'ROR' shift is not permitted"));
3156 return FALSE;
3157 }
3158 break;
3159
3160 case SHIFTED_LSL:
3161 if (kind != AARCH64_MOD_LSL)
3162 {
3163 set_syntax_error (_("only 'LSL' shift is permitted"));
3164 return FALSE;
3165 }
3166 break;
3167
3168 case SHIFTED_MUL:
3169 if (kind != AARCH64_MOD_MUL)
3170 {
3171 set_syntax_error (_("only 'MUL' is permitted"));
3172 return FALSE;
3173 }
3174 break;
3175
3176 case SHIFTED_MUL_VL:
3177 /* "MUL VL" consists of two separate tokens. Require the first
3178 token to be "MUL" and look for a following "VL". */
3179 if (kind == AARCH64_MOD_MUL)
3180 {
3181 skip_whitespace (p);
3182 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3183 {
3184 p += 2;
3185 kind = AARCH64_MOD_MUL_VL;
3186 break;
3187 }
3188 }
3189 set_syntax_error (_("only 'MUL VL' is permitted"));
3190 return FALSE;
3191
3192 case SHIFTED_REG_OFFSET:
3193 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3194 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3195 {
3196 set_fatal_syntax_error
3197 (_("invalid shift for the register offset addressing mode"));
3198 return FALSE;
3199 }
3200 break;
3201
3202 case SHIFTED_LSL_MSL:
3203 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3204 {
3205 set_syntax_error (_("invalid shift operator"));
3206 return FALSE;
3207 }
3208 break;
3209
3210 default:
3211 abort ();
3212 }
3213
3214 /* Whitespace can appear here if the next thing is a bare digit. */
3215 skip_whitespace (p);
3216
3217 /* Parse shift amount. */
3218 exp_has_prefix = 0;
3219 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3220 exp.X_op = O_absent;
3221 else
3222 {
3223 if (is_immediate_prefix (*p))
3224 {
3225 p++;
3226 exp_has_prefix = 1;
3227 }
3228 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
3229 }
3230 if (kind == AARCH64_MOD_MUL_VL)
3231 /* For consistency, give MUL VL the same shift amount as an implicit
3232 MUL #1. */
3233 operand->shifter.amount = 1;
3234 else if (exp.X_op == O_absent)
3235 {
3236 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3237 {
3238 set_syntax_error (_("missing shift amount"));
3239 return FALSE;
3240 }
3241 operand->shifter.amount = 0;
3242 }
3243 else if (exp.X_op != O_constant)
3244 {
3245 set_syntax_error (_("constant shift amount required"));
3246 return FALSE;
3247 }
3248 /* For parsing purposes, MUL #n has no inherent range. The range
3249 depends on the operand and will be checked by operand-specific
3250 routines. */
3251 else if (kind != AARCH64_MOD_MUL
3252 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3253 {
3254 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3255 return FALSE;
3256 }
3257 else
3258 {
3259 operand->shifter.amount = exp.X_add_number;
3260 operand->shifter.amount_present = 1;
3261 }
3262
3263 operand->shifter.operator_present = 1;
3264 operand->shifter.kind = kind;
3265
3266 *str = p;
3267 return TRUE;
3268 }
3269
3270 /* Parse a <shifter_operand> for a data processing instruction:
3271
3272 #<immediate>
3273 #<immediate>, LSL #imm
3274
3275 Validation of immediate operands is deferred to md_apply_fix.
3276
3277 Return TRUE on success; otherwise return FALSE. */
3278
3279 static bfd_boolean
3280 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3281 enum parse_shift_mode mode)
3282 {
3283 char *p;
3284
3285 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3286 return FALSE;
3287
3288 p = *str;
3289
3290 /* Accept an immediate expression. */
3291 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
3292 return FALSE;
3293
3294 /* Accept optional LSL for arithmetic immediate values. */
3295 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3296 if (! parse_shift (&p, operand, SHIFTED_LSL))
3297 return FALSE;
3298
3299 /* Not accept any shifter for logical immediate values. */
3300 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3301 && parse_shift (&p, operand, mode))
3302 {
3303 set_syntax_error (_("unexpected shift operator"));
3304 return FALSE;
3305 }
3306
3307 *str = p;
3308 return TRUE;
3309 }
3310
3311 /* Parse a <shifter_operand> for a data processing instruction:
3312
3313 <Rm>
3314 <Rm>, <shift>
3315 #<immediate>
3316 #<immediate>, LSL #imm
3317
3318 where <shift> is handled by parse_shift above, and the last two
3319 cases are handled by the function above.
3320
3321 Validation of immediate operands is deferred to md_apply_fix.
3322
3323 Return TRUE on success; otherwise return FALSE. */
3324
3325 static bfd_boolean
3326 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3327 enum parse_shift_mode mode)
3328 {
3329 const reg_entry *reg;
3330 aarch64_opnd_qualifier_t qualifier;
3331 enum aarch64_operand_class opd_class
3332 = aarch64_get_operand_class (operand->type);
3333
3334 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3335 if (reg)
3336 {
3337 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3338 {
3339 set_syntax_error (_("unexpected register in the immediate operand"));
3340 return FALSE;
3341 }
3342
3343 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3344 {
3345 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3346 return FALSE;
3347 }
3348
3349 operand->reg.regno = reg->number;
3350 operand->qualifier = qualifier;
3351
3352 /* Accept optional shift operation on register. */
3353 if (! skip_past_comma (str))
3354 return TRUE;
3355
3356 if (! parse_shift (str, operand, mode))
3357 return FALSE;
3358
3359 return TRUE;
3360 }
3361 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3362 {
3363 set_syntax_error
3364 (_("integer register expected in the extended/shifted operand "
3365 "register"));
3366 return FALSE;
3367 }
3368
3369 /* We have a shifted immediate variable. */
3370 return parse_shifter_operand_imm (str, operand, mode);
3371 }
3372
3373 /* Return TRUE on success; return FALSE otherwise. */
3374
3375 static bfd_boolean
3376 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3377 enum parse_shift_mode mode)
3378 {
3379 char *p = *str;
3380
3381 /* Determine if we have the sequence of characters #: or just :
3382 coming next. If we do, then we check for a :rello: relocation
3383 modifier. If we don't, punt the whole lot to
3384 parse_shifter_operand. */
3385
3386 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3387 {
3388 struct reloc_table_entry *entry;
3389
3390 if (p[0] == '#')
3391 p += 2;
3392 else
3393 p++;
3394 *str = p;
3395
3396 /* Try to parse a relocation. Anything else is an error. */
3397 if (!(entry = find_reloc_table_entry (str)))
3398 {
3399 set_syntax_error (_("unknown relocation modifier"));
3400 return FALSE;
3401 }
3402
3403 if (entry->add_type == 0)
3404 {
3405 set_syntax_error
3406 (_("this relocation modifier is not allowed on this instruction"));
3407 return FALSE;
3408 }
3409
3410 /* Save str before we decompose it. */
3411 p = *str;
3412
3413 /* Next, we parse the expression. */
3414 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
3415 return FALSE;
3416
3417 /* Record the relocation type (use the ADD variant here). */
3418 inst.reloc.type = entry->add_type;
3419 inst.reloc.pc_rel = entry->pc_rel;
3420
3421 /* If str is empty, we've reached the end, stop here. */
3422 if (**str == '\0')
3423 return TRUE;
3424
3425 /* Otherwise, we have a shifted reloc modifier, so rewind to
3426 recover the variable name and continue parsing for the shifter. */
3427 *str = p;
3428 return parse_shifter_operand_imm (str, operand, mode);
3429 }
3430
3431 return parse_shifter_operand (str, operand, mode);
3432 }
3433
3434 /* Parse all forms of an address expression. Information is written
3435 to *OPERAND and/or inst.reloc.
3436
3437 The A64 instruction set has the following addressing modes:
3438
3439 Offset
3440 [base] // in SIMD ld/st structure
3441 [base{,#0}] // in ld/st exclusive
3442 [base{,#imm}]
3443 [base,Xm{,LSL #imm}]
3444 [base,Xm,SXTX {#imm}]
3445 [base,Wm,(S|U)XTW {#imm}]
3446 Pre-indexed
3447 [base]! // in ldraa/ldrab exclusive
3448 [base,#imm]!
3449 Post-indexed
3450 [base],#imm
3451 [base],Xm // in SIMD ld/st structure
3452 PC-relative (literal)
3453 label
3454 SVE:
3455 [base,#imm,MUL VL]
3456 [base,Zm.D{,LSL #imm}]
3457 [base,Zm.S,(S|U)XTW {#imm}]
3458 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3459 [Zn.S,#imm]
3460 [Zn.D,#imm]
3461 [Zn.S{, Xm}]
3462 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3463 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3464 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3465
3466 (As a convenience, the notation "=immediate" is permitted in conjunction
3467 with the pc-relative literal load instructions to automatically place an
3468 immediate value or symbolic address in a nearby literal pool and generate
3469 a hidden label which references it.)
3470
3471 Upon a successful parsing, the address structure in *OPERAND will be
3472 filled in the following way:
3473
3474 .base_regno = <base>
3475 .offset.is_reg // 1 if the offset is a register
3476 .offset.imm = <imm>
3477 .offset.regno = <Rm>
3478
3479 For different addressing modes defined in the A64 ISA:
3480
3481 Offset
3482 .pcrel=0; .preind=1; .postind=0; .writeback=0
3483 Pre-indexed
3484 .pcrel=0; .preind=1; .postind=0; .writeback=1
3485 Post-indexed
3486 .pcrel=0; .preind=0; .postind=1; .writeback=1
3487 PC-relative (literal)
3488 .pcrel=1; .preind=1; .postind=0; .writeback=0
3489
3490 The shift/extension information, if any, will be stored in .shifter.
3491 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3492 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3493 corresponding register.
3494
3495 BASE_TYPE says which types of base register should be accepted and
3496 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3497 is the type of shifter that is allowed for immediate offsets,
3498 or SHIFTED_NONE if none.
3499
3500 In all other respects, it is the caller's responsibility to check
3501 for addressing modes not supported by the instruction, and to set
3502 inst.reloc.type. */
3503
3504 static bfd_boolean
3505 parse_address_main (char **str, aarch64_opnd_info *operand,
3506 aarch64_opnd_qualifier_t *base_qualifier,
3507 aarch64_opnd_qualifier_t *offset_qualifier,
3508 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3509 enum parse_shift_mode imm_shift_mode)
3510 {
3511 char *p = *str;
3512 const reg_entry *reg;
3513 expressionS *exp = &inst.reloc.exp;
3514
3515 *base_qualifier = AARCH64_OPND_QLF_NIL;
3516 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3517 if (! skip_past_char (&p, '['))
3518 {
3519 /* =immediate or label. */
3520 operand->addr.pcrel = 1;
3521 operand->addr.preind = 1;
3522
3523 /* #:<reloc_op>:<symbol> */
3524 skip_past_char (&p, '#');
3525 if (skip_past_char (&p, ':'))
3526 {
3527 bfd_reloc_code_real_type ty;
3528 struct reloc_table_entry *entry;
3529
3530 /* Try to parse a relocation modifier. Anything else is
3531 an error. */
3532 entry = find_reloc_table_entry (&p);
3533 if (! entry)
3534 {
3535 set_syntax_error (_("unknown relocation modifier"));
3536 return FALSE;
3537 }
3538
3539 switch (operand->type)
3540 {
3541 case AARCH64_OPND_ADDR_PCREL21:
3542 /* adr */
3543 ty = entry->adr_type;
3544 break;
3545
3546 default:
3547 ty = entry->ld_literal_type;
3548 break;
3549 }
3550
3551 if (ty == 0)
3552 {
3553 set_syntax_error
3554 (_("this relocation modifier is not allowed on this "
3555 "instruction"));
3556 return FALSE;
3557 }
3558
3559 /* #:<reloc_op>: */
3560 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3561 {
3562 set_syntax_error (_("invalid relocation expression"));
3563 return FALSE;
3564 }
3565
3566 /* #:<reloc_op>:<expr> */
3567 /* Record the relocation type. */
3568 inst.reloc.type = ty;
3569 inst.reloc.pc_rel = entry->pc_rel;
3570 }
3571 else
3572 {
3573
3574 if (skip_past_char (&p, '='))
3575 /* =immediate; need to generate the literal in the literal pool. */
3576 inst.gen_lit_pool = 1;
3577
3578 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3579 {
3580 set_syntax_error (_("invalid address"));
3581 return FALSE;
3582 }
3583 }
3584
3585 *str = p;
3586 return TRUE;
3587 }
3588
3589 /* [ */
3590
3591 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3592 if (!reg || !aarch64_check_reg_type (reg, base_type))
3593 {
3594 set_syntax_error (_(get_reg_expected_msg (base_type)));
3595 return FALSE;
3596 }
3597 operand->addr.base_regno = reg->number;
3598
3599 /* [Xn */
3600 if (skip_past_comma (&p))
3601 {
3602 /* [Xn, */
3603 operand->addr.preind = 1;
3604
3605 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3606 if (reg)
3607 {
3608 if (!aarch64_check_reg_type (reg, offset_type))
3609 {
3610 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3611 return FALSE;
3612 }
3613
3614 /* [Xn,Rm */
3615 operand->addr.offset.regno = reg->number;
3616 operand->addr.offset.is_reg = 1;
3617 /* Shifted index. */
3618 if (skip_past_comma (&p))
3619 {
3620 /* [Xn,Rm, */
3621 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3622 /* Use the diagnostics set in parse_shift, so not set new
3623 error message here. */
3624 return FALSE;
3625 }
3626 /* We only accept:
3627 [base,Xm] # For vector plus scalar SVE2 indexing.
3628 [base,Xm{,LSL #imm}]
3629 [base,Xm,SXTX {#imm}]
3630 [base,Wm,(S|U)XTW {#imm}] */
3631 if (operand->shifter.kind == AARCH64_MOD_NONE
3632 || operand->shifter.kind == AARCH64_MOD_LSL
3633 || operand->shifter.kind == AARCH64_MOD_SXTX)
3634 {
3635 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3636 {
3637 set_syntax_error (_("invalid use of 32-bit register offset"));
3638 return FALSE;
3639 }
3640 if (aarch64_get_qualifier_esize (*base_qualifier)
3641 != aarch64_get_qualifier_esize (*offset_qualifier)
3642 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
3643 || *base_qualifier != AARCH64_OPND_QLF_S_S
3644 || *offset_qualifier != AARCH64_OPND_QLF_X))
3645 {
3646 set_syntax_error (_("offset has different size from base"));
3647 return FALSE;
3648 }
3649 }
3650 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3651 {
3652 set_syntax_error (_("invalid use of 64-bit register offset"));
3653 return FALSE;
3654 }
3655 }
3656 else
3657 {
3658 /* [Xn,#:<reloc_op>:<symbol> */
3659 skip_past_char (&p, '#');
3660 if (skip_past_char (&p, ':'))
3661 {
3662 struct reloc_table_entry *entry;
3663
3664 /* Try to parse a relocation modifier. Anything else is
3665 an error. */
3666 if (!(entry = find_reloc_table_entry (&p)))
3667 {
3668 set_syntax_error (_("unknown relocation modifier"));
3669 return FALSE;
3670 }
3671
3672 if (entry->ldst_type == 0)
3673 {
3674 set_syntax_error
3675 (_("this relocation modifier is not allowed on this "
3676 "instruction"));
3677 return FALSE;
3678 }
3679
3680 /* [Xn,#:<reloc_op>: */
3681 /* We now have the group relocation table entry corresponding to
3682 the name in the assembler source. Next, we parse the
3683 expression. */
3684 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3685 {
3686 set_syntax_error (_("invalid relocation expression"));
3687 return FALSE;
3688 }
3689
3690 /* [Xn,#:<reloc_op>:<expr> */
3691 /* Record the load/store relocation type. */
3692 inst.reloc.type = entry->ldst_type;
3693 inst.reloc.pc_rel = entry->pc_rel;
3694 }
3695 else
3696 {
3697 if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3698 {
3699 set_syntax_error (_("invalid expression in the address"));
3700 return FALSE;
3701 }
3702 /* [Xn,<expr> */
3703 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3704 /* [Xn,<expr>,<shifter> */
3705 if (! parse_shift (&p, operand, imm_shift_mode))
3706 return FALSE;
3707 }
3708 }
3709 }
3710
3711 if (! skip_past_char (&p, ']'))
3712 {
3713 set_syntax_error (_("']' expected"));
3714 return FALSE;
3715 }
3716
3717 if (skip_past_char (&p, '!'))
3718 {
3719 if (operand->addr.preind && operand->addr.offset.is_reg)
3720 {
3721 set_syntax_error (_("register offset not allowed in pre-indexed "
3722 "addressing mode"));
3723 return FALSE;
3724 }
3725 /* [Xn]! */
3726 operand->addr.writeback = 1;
3727 }
3728 else if (skip_past_comma (&p))
3729 {
3730 /* [Xn], */
3731 operand->addr.postind = 1;
3732 operand->addr.writeback = 1;
3733
3734 if (operand->addr.preind)
3735 {
3736 set_syntax_error (_("cannot combine pre- and post-indexing"));
3737 return FALSE;
3738 }
3739
3740 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3741 if (reg)
3742 {
3743 /* [Xn],Xm */
3744 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3745 {
3746 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3747 return FALSE;
3748 }
3749
3750 operand->addr.offset.regno = reg->number;
3751 operand->addr.offset.is_reg = 1;
3752 }
3753 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3754 {
3755 /* [Xn],#expr */
3756 set_syntax_error (_("invalid expression in the address"));
3757 return FALSE;
3758 }
3759 }
3760
3761 /* If at this point neither .preind nor .postind is set, we have a
3762 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
3763 ldrab, accept [Rn] as a shorthand for [Rn,#0].
3764 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
3765 [Zn.<T>, xzr]. */
3766 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3767 {
3768 if (operand->addr.writeback)
3769 {
3770 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
3771 {
3772 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
3773 operand->addr.offset.is_reg = 0;
3774 operand->addr.offset.imm = 0;
3775 operand->addr.preind = 1;
3776 }
3777 else
3778 {
3779 /* Reject [Rn]! */
3780 set_syntax_error (_("missing offset in the pre-indexed address"));
3781 return FALSE;
3782 }
3783 }
3784 else
3785 {
3786 operand->addr.preind = 1;
3787 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
3788 {
3789 operand->addr.offset.is_reg = 1;
3790 operand->addr.offset.regno = REG_ZR;
3791 *offset_qualifier = AARCH64_OPND_QLF_X;
3792 }
3793 else
3794 {
3795 inst.reloc.exp.X_op = O_constant;
3796 inst.reloc.exp.X_add_number = 0;
3797 }
3798 }
3799 }
3800
3801 *str = p;
3802 return TRUE;
3803 }
3804
3805 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3806 on success. */
3807 static bfd_boolean
3808 parse_address (char **str, aarch64_opnd_info *operand)
3809 {
3810 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3811 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3812 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3813 }
3814
3815 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3816 The arguments have the same meaning as for parse_address_main.
3817 Return TRUE on success. */
3818 static bfd_boolean
3819 parse_sve_address (char **str, aarch64_opnd_info *operand,
3820 aarch64_opnd_qualifier_t *base_qualifier,
3821 aarch64_opnd_qualifier_t *offset_qualifier)
3822 {
3823 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3824 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3825 SHIFTED_MUL_VL);
3826 }
3827
3828 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3829 Return TRUE on success; otherwise return FALSE. */
3830 static bfd_boolean
3831 parse_half (char **str, int *internal_fixup_p)
3832 {
3833 char *p = *str;
3834
3835 skip_past_char (&p, '#');
3836
3837 gas_assert (internal_fixup_p);
3838 *internal_fixup_p = 0;
3839
3840 if (*p == ':')
3841 {
3842 struct reloc_table_entry *entry;
3843
3844 /* Try to parse a relocation. Anything else is an error. */
3845 ++p;
3846 if (!(entry = find_reloc_table_entry (&p)))
3847 {
3848 set_syntax_error (_("unknown relocation modifier"));
3849 return FALSE;
3850 }
3851
3852 if (entry->movw_type == 0)
3853 {
3854 set_syntax_error
3855 (_("this relocation modifier is not allowed on this instruction"));
3856 return FALSE;
3857 }
3858
3859 inst.reloc.type = entry->movw_type;
3860 }
3861 else
3862 *internal_fixup_p = 1;
3863
3864 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3865 return FALSE;
3866
3867 *str = p;
3868 return TRUE;
3869 }
3870
3871 /* Parse an operand for an ADRP instruction:
3872 ADRP <Xd>, <label>
3873 Return TRUE on success; otherwise return FALSE. */
3874
3875 static bfd_boolean
3876 parse_adrp (char **str)
3877 {
3878 char *p;
3879
3880 p = *str;
3881 if (*p == ':')
3882 {
3883 struct reloc_table_entry *entry;
3884
3885 /* Try to parse a relocation. Anything else is an error. */
3886 ++p;
3887 if (!(entry = find_reloc_table_entry (&p)))
3888 {
3889 set_syntax_error (_("unknown relocation modifier"));
3890 return FALSE;
3891 }
3892
3893 if (entry->adrp_type == 0)
3894 {
3895 set_syntax_error
3896 (_("this relocation modifier is not allowed on this instruction"));
3897 return FALSE;
3898 }
3899
3900 inst.reloc.type = entry->adrp_type;
3901 }
3902 else
3903 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3904
3905 inst.reloc.pc_rel = 1;
3906
3907 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3908 return FALSE;
3909
3910 *str = p;
3911 return TRUE;
3912 }
3913
3914 /* Miscellaneous. */
3915
3916 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
3917 of SIZE tokens in which index I gives the token for field value I,
3918 or is null if field value I is invalid. REG_TYPE says which register
3919 names should be treated as registers rather than as symbolic immediates.
3920
3921 Return true on success, moving *STR past the operand and storing the
3922 field value in *VAL. */
3923
3924 static int
3925 parse_enum_string (char **str, int64_t *val, const char *const *array,
3926 size_t size, aarch64_reg_type reg_type)
3927 {
3928 expressionS exp;
3929 char *p, *q;
3930 size_t i;
3931
3932 /* Match C-like tokens. */
3933 p = q = *str;
3934 while (ISALNUM (*q))
3935 q++;
3936
3937 for (i = 0; i < size; ++i)
3938 if (array[i]
3939 && strncasecmp (array[i], p, q - p) == 0
3940 && array[i][q - p] == 0)
3941 {
3942 *val = i;
3943 *str = q;
3944 return TRUE;
3945 }
3946
3947 if (!parse_immediate_expression (&p, &exp, reg_type))
3948 return FALSE;
3949
3950 if (exp.X_op == O_constant
3951 && (uint64_t) exp.X_add_number < size)
3952 {
3953 *val = exp.X_add_number;
3954 *str = p;
3955 return TRUE;
3956 }
3957
3958 /* Use the default error for this operand. */
3959 return FALSE;
3960 }
3961
3962 /* Parse an option for a preload instruction. Returns the encoding for the
3963 option, or PARSE_FAIL. */
3964
3965 static int
3966 parse_pldop (char **str)
3967 {
3968 char *p, *q;
3969 const struct aarch64_name_value_pair *o;
3970
3971 p = q = *str;
3972 while (ISALNUM (*q))
3973 q++;
3974
3975 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
3976 if (!o)
3977 return PARSE_FAIL;
3978
3979 *str = q;
3980 return o->value;
3981 }
3982
3983 /* Parse an option for a barrier instruction. Returns the encoding for the
3984 option, or PARSE_FAIL. */
3985
3986 static int
3987 parse_barrier (char **str)
3988 {
3989 char *p, *q;
3990 const struct aarch64_name_value_pair *o;
3991
3992 p = q = *str;
3993 while (ISALPHA (*q))
3994 q++;
3995
3996 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3997 if (!o)
3998 return PARSE_FAIL;
3999
4000 *str = q;
4001 return o->value;
4002 }
4003
4004 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
4005 return 0 if successful. Otherwise return PARSE_FAIL. */
4006
4007 static int
4008 parse_barrier_psb (char **str,
4009 const struct aarch64_name_value_pair ** hint_opt)
4010 {
4011 char *p, *q;
4012 const struct aarch64_name_value_pair *o;
4013
4014 p = q = *str;
4015 while (ISALPHA (*q))
4016 q++;
4017
4018 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4019 if (!o)
4020 {
4021 set_fatal_syntax_error
4022 ( _("unknown or missing option to PSB/TSB"));
4023 return PARSE_FAIL;
4024 }
4025
4026 if (o->value != 0x11)
4027 {
4028 /* PSB only accepts option name 'CSYNC'. */
4029 set_syntax_error
4030 (_("the specified option is not accepted for PSB/TSB"));
4031 return PARSE_FAIL;
4032 }
4033
4034 *str = q;
4035 *hint_opt = o;
4036 return 0;
4037 }
4038
4039 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
4040 return 0 if successful. Otherwise return PARSE_FAIL. */
4041
4042 static int
4043 parse_bti_operand (char **str,
4044 const struct aarch64_name_value_pair ** hint_opt)
4045 {
4046 char *p, *q;
4047 const struct aarch64_name_value_pair *o;
4048
4049 p = q = *str;
4050 while (ISALPHA (*q))
4051 q++;
4052
4053 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4054 if (!o)
4055 {
4056 set_fatal_syntax_error
4057 ( _("unknown option to BTI"));
4058 return PARSE_FAIL;
4059 }
4060
4061 switch (o->value)
4062 {
4063 /* Valid BTI operands. */
4064 case HINT_OPD_C:
4065 case HINT_OPD_J:
4066 case HINT_OPD_JC:
4067 break;
4068
4069 default:
4070 set_syntax_error
4071 (_("unknown option to BTI"));
4072 return PARSE_FAIL;
4073 }
4074
4075 *str = q;
4076 *hint_opt = o;
4077 return 0;
4078 }
4079
4080 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4081 Returns the encoding for the option, or PARSE_FAIL.
4082
4083 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4084 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4085
4086 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4087 field, otherwise as a system register.
4088 */
4089
4090 static int
4091 parse_sys_reg (char **str, htab_t sys_regs,
4092 int imple_defined_p, int pstatefield_p,
4093 uint32_t* flags)
4094 {
4095 char *p, *q;
4096 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4097 const aarch64_sys_reg *o;
4098 int value;
4099
4100 p = buf;
4101 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4102 if (p < buf + (sizeof (buf) - 1))
4103 *p++ = TOLOWER (*q);
4104 *p = '\0';
4105
4106 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4107 valid system register. This is enforced by construction of the hash
4108 table. */
4109 if (p - buf != q - *str)
4110 return PARSE_FAIL;
4111
4112 o = str_hash_find (sys_regs, buf);
4113 if (!o)
4114 {
4115 if (!imple_defined_p)
4116 return PARSE_FAIL;
4117 else
4118 {
4119 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4120 unsigned int op0, op1, cn, cm, op2;
4121
4122 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4123 != 5)
4124 return PARSE_FAIL;
4125 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4126 return PARSE_FAIL;
4127 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4128 if (flags)
4129 *flags = 0;
4130 }
4131 }
4132 else
4133 {
4134 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4135 as_bad (_("selected processor does not support PSTATE field "
4136 "name '%s'"), buf);
4137 if (!pstatefield_p
4138 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4139 o->value, o->flags, o->features))
4140 as_bad (_("selected processor does not support system register "
4141 "name '%s'"), buf);
4142 if (aarch64_sys_reg_deprecated_p (o->flags))
4143 as_warn (_("system register name '%s' is deprecated and may be "
4144 "removed in a future release"), buf);
4145 value = o->value;
4146 if (flags)
4147 *flags = o->flags;
4148 }
4149
4150 *str = q;
4151 return value;
4152 }
4153
4154 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4155 for the option, or NULL. */
4156
4157 static const aarch64_sys_ins_reg *
4158 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4159 {
4160 char *p, *q;
4161 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4162 const aarch64_sys_ins_reg *o;
4163
4164 p = buf;
4165 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4166 if (p < buf + (sizeof (buf) - 1))
4167 *p++ = TOLOWER (*q);
4168 *p = '\0';
4169
4170 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4171 valid system register. This is enforced by construction of the hash
4172 table. */
4173 if (p - buf != q - *str)
4174 return NULL;
4175
4176 o = str_hash_find (sys_ins_regs, buf);
4177 if (!o)
4178 return NULL;
4179
4180 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4181 o->name, o->value, o->flags, 0))
4182 as_bad (_("selected processor does not support system register "
4183 "name '%s'"), buf);
4184 if (aarch64_sys_reg_deprecated_p (o->flags))
4185 as_warn (_("system register name '%s' is deprecated and may be "
4186 "removed in a future release"), buf);
4187
4188 *str = q;
4189 return o;
4190 }
4191 \f
4192 #define po_char_or_fail(chr) do { \
4193 if (! skip_past_char (&str, chr)) \
4194 goto failure; \
4195 } while (0)
4196
4197 #define po_reg_or_fail(regtype) do { \
4198 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
4199 if (val == PARSE_FAIL) \
4200 { \
4201 set_default_error (); \
4202 goto failure; \
4203 } \
4204 } while (0)
4205
4206 #define po_int_reg_or_fail(reg_type) do { \
4207 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4208 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4209 { \
4210 set_default_error (); \
4211 goto failure; \
4212 } \
4213 info->reg.regno = reg->number; \
4214 info->qualifier = qualifier; \
4215 } while (0)
4216
4217 #define po_imm_nc_or_fail() do { \
4218 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4219 goto failure; \
4220 } while (0)
4221
4222 #define po_imm_or_fail(min, max) do { \
4223 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4224 goto failure; \
4225 if (val < min || val > max) \
4226 { \
4227 set_fatal_syntax_error (_("immediate value out of range "\
4228 #min " to "#max)); \
4229 goto failure; \
4230 } \
4231 } while (0)
4232
4233 #define po_enum_or_fail(array) do { \
4234 if (!parse_enum_string (&str, &val, array, \
4235 ARRAY_SIZE (array), imm_reg_type)) \
4236 goto failure; \
4237 } while (0)
4238
4239 #define po_misc_or_fail(expr) do { \
4240 if (!expr) \
4241 goto failure; \
4242 } while (0)
4243 \f
4244 /* encode the 12-bit imm field of Add/sub immediate */
4245 static inline uint32_t
4246 encode_addsub_imm (uint32_t imm)
4247 {
4248 return imm << 10;
4249 }
4250
4251 /* encode the shift amount field of Add/sub immediate */
4252 static inline uint32_t
4253 encode_addsub_imm_shift_amount (uint32_t cnt)
4254 {
4255 return cnt << 22;
4256 }
4257
4258
4259 /* encode the imm field of Adr instruction */
4260 static inline uint32_t
4261 encode_adr_imm (uint32_t imm)
4262 {
4263 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4264 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4265 }
4266
4267 /* encode the immediate field of Move wide immediate */
4268 static inline uint32_t
4269 encode_movw_imm (uint32_t imm)
4270 {
4271 return imm << 5;
4272 }
4273
4274 /* encode the 26-bit offset of unconditional branch */
4275 static inline uint32_t
4276 encode_branch_ofs_26 (uint32_t ofs)
4277 {
4278 return ofs & ((1 << 26) - 1);
4279 }
4280
4281 /* encode the 19-bit offset of conditional branch and compare & branch */
4282 static inline uint32_t
4283 encode_cond_branch_ofs_19 (uint32_t ofs)
4284 {
4285 return (ofs & ((1 << 19) - 1)) << 5;
4286 }
4287
4288 /* encode the 19-bit offset of ld literal */
4289 static inline uint32_t
4290 encode_ld_lit_ofs_19 (uint32_t ofs)
4291 {
4292 return (ofs & ((1 << 19) - 1)) << 5;
4293 }
4294
4295 /* Encode the 14-bit offset of test & branch. */
4296 static inline uint32_t
4297 encode_tst_branch_ofs_14 (uint32_t ofs)
4298 {
4299 return (ofs & ((1 << 14) - 1)) << 5;
4300 }
4301
4302 /* Encode the 16-bit imm field of svc/hvc/smc. */
4303 static inline uint32_t
4304 encode_svc_imm (uint32_t imm)
4305 {
4306 return imm << 5;
4307 }
4308
4309 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4310 static inline uint32_t
4311 reencode_addsub_switch_add_sub (uint32_t opcode)
4312 {
4313 return opcode ^ (1 << 30);
4314 }
4315
4316 static inline uint32_t
4317 reencode_movzn_to_movz (uint32_t opcode)
4318 {
4319 return opcode | (1 << 30);
4320 }
4321
4322 static inline uint32_t
4323 reencode_movzn_to_movn (uint32_t opcode)
4324 {
4325 return opcode & ~(1 << 30);
4326 }
4327
4328 /* Overall per-instruction processing. */
4329
4330 /* We need to be able to fix up arbitrary expressions in some statements.
4331 This is so that we can handle symbols that are an arbitrary distance from
4332 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4333 which returns part of an address in a form which will be valid for
4334 a data instruction. We do this by pushing the expression into a symbol
4335 in the expr_section, and creating a fix for that. */
4336
4337 static fixS *
4338 fix_new_aarch64 (fragS * frag,
4339 int where,
4340 short int size,
4341 expressionS * exp,
4342 int pc_rel,
4343 int reloc)
4344 {
4345 fixS *new_fix;
4346
4347 switch (exp->X_op)
4348 {
4349 case O_constant:
4350 case O_symbol:
4351 case O_add:
4352 case O_subtract:
4353 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4354 break;
4355
4356 default:
4357 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4358 pc_rel, reloc);
4359 break;
4360 }
4361 return new_fix;
4362 }
4363 \f
4364 /* Diagnostics on operands errors. */
4365
4366 /* By default, output verbose error message.
4367 Disable the verbose error message by -mno-verbose-error. */
4368 static int verbose_error_p = 1;
4369
4370 #ifdef DEBUG_AARCH64
4371 /* N.B. this is only for the purpose of debugging. */
4372 const char* operand_mismatch_kind_names[] =
4373 {
4374 "AARCH64_OPDE_NIL",
4375 "AARCH64_OPDE_RECOVERABLE",
4376 "AARCH64_OPDE_SYNTAX_ERROR",
4377 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
4378 "AARCH64_OPDE_INVALID_VARIANT",
4379 "AARCH64_OPDE_OUT_OF_RANGE",
4380 "AARCH64_OPDE_UNALIGNED",
4381 "AARCH64_OPDE_REG_LIST",
4382 "AARCH64_OPDE_OTHER_ERROR",
4383 };
4384 #endif /* DEBUG_AARCH64 */
4385
4386 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
4387
4388 When multiple errors of different kinds are found in the same assembly
4389 line, only the error of the highest severity will be picked up for
4390 issuing the diagnostics. */
4391
4392 static inline bfd_boolean
4393 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
4394 enum aarch64_operand_error_kind rhs)
4395 {
4396 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
4397 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
4398 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
4399 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
4400 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
4401 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
4402 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
4403 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
4404 return lhs > rhs;
4405 }
4406
4407 /* Helper routine to get the mnemonic name from the assembly instruction
4408 line; should only be called for the diagnosis purpose, as there is
4409 string copy operation involved, which may affect the runtime
4410 performance if used in elsewhere. */
4411
4412 static const char*
4413 get_mnemonic_name (const char *str)
4414 {
4415 static char mnemonic[32];
4416 char *ptr;
4417
4418 /* Get the first 15 bytes and assume that the full name is included. */
4419 strncpy (mnemonic, str, 31);
4420 mnemonic[31] = '\0';
4421
4422 /* Scan up to the end of the mnemonic, which must end in white space,
4423 '.', or end of string. */
4424 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
4425 ;
4426
4427 *ptr = '\0';
4428
4429 /* Append '...' to the truncated long name. */
4430 if (ptr - mnemonic == 31)
4431 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
4432
4433 return mnemonic;
4434 }
4435
4436 static void
4437 reset_aarch64_instruction (aarch64_instruction *instruction)
4438 {
4439 memset (instruction, '\0', sizeof (aarch64_instruction));
4440 instruction->reloc.type = BFD_RELOC_UNUSED;
4441 }
4442
4443 /* Data structures storing one user error in the assembly code related to
4444 operands. */
4445
4446 struct operand_error_record
4447 {
4448 const aarch64_opcode *opcode;
4449 aarch64_operand_error detail;
4450 struct operand_error_record *next;
4451 };
4452
4453 typedef struct operand_error_record operand_error_record;
4454
4455 struct operand_errors
4456 {
4457 operand_error_record *head;
4458 operand_error_record *tail;
4459 };
4460
4461 typedef struct operand_errors operand_errors;
4462
4463 /* Top-level data structure reporting user errors for the current line of
4464 the assembly code.
4465 The way md_assemble works is that all opcodes sharing the same mnemonic
4466 name are iterated to find a match to the assembly line. In this data
4467 structure, each of the such opcodes will have one operand_error_record
4468 allocated and inserted. In other words, excessive errors related with
4469 a single opcode are disregarded. */
4470 operand_errors operand_error_report;
4471
4472 /* Free record nodes. */
4473 static operand_error_record *free_opnd_error_record_nodes = NULL;
4474
4475 /* Initialize the data structure that stores the operand mismatch
4476 information on assembling one line of the assembly code. */
4477 static void
4478 init_operand_error_report (void)
4479 {
4480 if (operand_error_report.head != NULL)
4481 {
4482 gas_assert (operand_error_report.tail != NULL);
4483 operand_error_report.tail->next = free_opnd_error_record_nodes;
4484 free_opnd_error_record_nodes = operand_error_report.head;
4485 operand_error_report.head = NULL;
4486 operand_error_report.tail = NULL;
4487 return;
4488 }
4489 gas_assert (operand_error_report.tail == NULL);
4490 }
4491
4492 /* Return TRUE if some operand error has been recorded during the
4493 parsing of the current assembly line using the opcode *OPCODE;
4494 otherwise return FALSE. */
4495 static inline bfd_boolean
4496 opcode_has_operand_error_p (const aarch64_opcode *opcode)
4497 {
4498 operand_error_record *record = operand_error_report.head;
4499 return record && record->opcode == opcode;
4500 }
4501
4502 /* Add the error record *NEW_RECORD to operand_error_report. The record's
4503 OPCODE field is initialized with OPCODE.
4504 N.B. only one record for each opcode, i.e. the maximum of one error is
4505 recorded for each instruction template. */
4506
4507 static void
4508 add_operand_error_record (const operand_error_record* new_record)
4509 {
4510 const aarch64_opcode *opcode = new_record->opcode;
4511 operand_error_record* record = operand_error_report.head;
4512
4513 /* The record may have been created for this opcode. If not, we need
4514 to prepare one. */
4515 if (! opcode_has_operand_error_p (opcode))
4516 {
4517 /* Get one empty record. */
4518 if (free_opnd_error_record_nodes == NULL)
4519 {
4520 record = XNEW (operand_error_record);
4521 }
4522 else
4523 {
4524 record = free_opnd_error_record_nodes;
4525 free_opnd_error_record_nodes = record->next;
4526 }
4527 record->opcode = opcode;
4528 /* Insert at the head. */
4529 record->next = operand_error_report.head;
4530 operand_error_report.head = record;
4531 if (operand_error_report.tail == NULL)
4532 operand_error_report.tail = record;
4533 }
4534 else if (record->detail.kind != AARCH64_OPDE_NIL
4535 && record->detail.index <= new_record->detail.index
4536 && operand_error_higher_severity_p (record->detail.kind,
4537 new_record->detail.kind))
4538 {
4539 /* In the case of multiple errors found on operands related with a
4540 single opcode, only record the error of the leftmost operand and
4541 only if the error is of higher severity. */
4542 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4543 " the existing error %s on operand %d",
4544 operand_mismatch_kind_names[new_record->detail.kind],
4545 new_record->detail.index,
4546 operand_mismatch_kind_names[record->detail.kind],
4547 record->detail.index);
4548 return;
4549 }
4550
4551 record->detail = new_record->detail;
4552 }
4553
4554 static inline void
4555 record_operand_error_info (const aarch64_opcode *opcode,
4556 aarch64_operand_error *error_info)
4557 {
4558 operand_error_record record;
4559 record.opcode = opcode;
4560 record.detail = *error_info;
4561 add_operand_error_record (&record);
4562 }
4563
4564 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4565 error message *ERROR, for operand IDX (count from 0). */
4566
4567 static void
4568 record_operand_error (const aarch64_opcode *opcode, int idx,
4569 enum aarch64_operand_error_kind kind,
4570 const char* error)
4571 {
4572 aarch64_operand_error info;
4573 memset(&info, 0, sizeof (info));
4574 info.index = idx;
4575 info.kind = kind;
4576 info.error = error;
4577 info.non_fatal = FALSE;
4578 record_operand_error_info (opcode, &info);
4579 }
4580
4581 static void
4582 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4583 enum aarch64_operand_error_kind kind,
4584 const char* error, const int *extra_data)
4585 {
4586 aarch64_operand_error info;
4587 info.index = idx;
4588 info.kind = kind;
4589 info.error = error;
4590 info.data[0] = extra_data[0];
4591 info.data[1] = extra_data[1];
4592 info.data[2] = extra_data[2];
4593 info.non_fatal = FALSE;
4594 record_operand_error_info (opcode, &info);
4595 }
4596
4597 static void
4598 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4599 const char* error, int lower_bound,
4600 int upper_bound)
4601 {
4602 int data[3] = {lower_bound, upper_bound, 0};
4603 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4604 error, data);
4605 }
4606
4607 /* Remove the operand error record for *OPCODE. */
4608 static void ATTRIBUTE_UNUSED
4609 remove_operand_error_record (const aarch64_opcode *opcode)
4610 {
4611 if (opcode_has_operand_error_p (opcode))
4612 {
4613 operand_error_record* record = operand_error_report.head;
4614 gas_assert (record != NULL && operand_error_report.tail != NULL);
4615 operand_error_report.head = record->next;
4616 record->next = free_opnd_error_record_nodes;
4617 free_opnd_error_record_nodes = record;
4618 if (operand_error_report.head == NULL)
4619 {
4620 gas_assert (operand_error_report.tail == record);
4621 operand_error_report.tail = NULL;
4622 }
4623 }
4624 }
4625
4626 /* Given the instruction in *INSTR, return the index of the best matched
4627 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4628
4629 Return -1 if there is no qualifier sequence; return the first match
4630 if there is multiple matches found. */
4631
4632 static int
4633 find_best_match (const aarch64_inst *instr,
4634 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4635 {
4636 int i, num_opnds, max_num_matched, idx;
4637
4638 num_opnds = aarch64_num_of_operands (instr->opcode);
4639 if (num_opnds == 0)
4640 {
4641 DEBUG_TRACE ("no operand");
4642 return -1;
4643 }
4644
4645 max_num_matched = 0;
4646 idx = 0;
4647
4648 /* For each pattern. */
4649 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4650 {
4651 int j, num_matched;
4652 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4653
4654 /* Most opcodes has much fewer patterns in the list. */
4655 if (empty_qualifier_sequence_p (qualifiers))
4656 {
4657 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4658 break;
4659 }
4660
4661 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4662 if (*qualifiers == instr->operands[j].qualifier)
4663 ++num_matched;
4664
4665 if (num_matched > max_num_matched)
4666 {
4667 max_num_matched = num_matched;
4668 idx = i;
4669 }
4670 }
4671
4672 DEBUG_TRACE ("return with %d", idx);
4673 return idx;
4674 }
4675
4676 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
4677 corresponding operands in *INSTR. */
4678
4679 static inline void
4680 assign_qualifier_sequence (aarch64_inst *instr,
4681 const aarch64_opnd_qualifier_t *qualifiers)
4682 {
4683 int i = 0;
4684 int num_opnds = aarch64_num_of_operands (instr->opcode);
4685 gas_assert (num_opnds);
4686 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4687 instr->operands[i].qualifier = *qualifiers;
4688 }
4689
4690 /* Print operands for the diagnosis purpose. */
4691
4692 static void
4693 print_operands (char *buf, const aarch64_opcode *opcode,
4694 const aarch64_opnd_info *opnds)
4695 {
4696 int i;
4697
4698 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4699 {
4700 char str[128];
4701
4702 /* We regard the opcode operand info more, however we also look into
4703 the inst->operands to support the disassembling of the optional
4704 operand.
4705 The two operand code should be the same in all cases, apart from
4706 when the operand can be optional. */
4707 if (opcode->operands[i] == AARCH64_OPND_NIL
4708 || opnds[i].type == AARCH64_OPND_NIL)
4709 break;
4710
4711 /* Generate the operand string in STR. */
4712 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
4713 NULL, cpu_variant);
4714
4715 /* Delimiter. */
4716 if (str[0] != '\0')
4717 strcat (buf, i == 0 ? " " : ", ");
4718
4719 /* Append the operand string. */
4720 strcat (buf, str);
4721 }
4722 }
4723
4724 /* Send to stderr a string as information. */
4725
4726 static void
4727 output_info (const char *format, ...)
4728 {
4729 const char *file;
4730 unsigned int line;
4731 va_list args;
4732
4733 file = as_where (&line);
4734 if (file)
4735 {
4736 if (line != 0)
4737 fprintf (stderr, "%s:%u: ", file, line);
4738 else
4739 fprintf (stderr, "%s: ", file);
4740 }
4741 fprintf (stderr, _("Info: "));
4742 va_start (args, format);
4743 vfprintf (stderr, format, args);
4744 va_end (args);
4745 (void) putc ('\n', stderr);
4746 }
4747
4748 /* Output one operand error record. */
4749
4750 static void
4751 output_operand_error_record (const operand_error_record *record, char *str)
4752 {
4753 const aarch64_operand_error *detail = &record->detail;
4754 int idx = detail->index;
4755 const aarch64_opcode *opcode = record->opcode;
4756 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4757 : AARCH64_OPND_NIL);
4758
4759 typedef void (*handler_t)(const char *format, ...);
4760 handler_t handler = detail->non_fatal ? as_warn : as_bad;
4761
4762 switch (detail->kind)
4763 {
4764 case AARCH64_OPDE_NIL:
4765 gas_assert (0);
4766 break;
4767 case AARCH64_OPDE_SYNTAX_ERROR:
4768 case AARCH64_OPDE_RECOVERABLE:
4769 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4770 case AARCH64_OPDE_OTHER_ERROR:
4771 /* Use the prepared error message if there is, otherwise use the
4772 operand description string to describe the error. */
4773 if (detail->error != NULL)
4774 {
4775 if (idx < 0)
4776 handler (_("%s -- `%s'"), detail->error, str);
4777 else
4778 handler (_("%s at operand %d -- `%s'"),
4779 detail->error, idx + 1, str);
4780 }
4781 else
4782 {
4783 gas_assert (idx >= 0);
4784 handler (_("operand %d must be %s -- `%s'"), idx + 1,
4785 aarch64_get_operand_desc (opd_code), str);
4786 }
4787 break;
4788
4789 case AARCH64_OPDE_INVALID_VARIANT:
4790 handler (_("operand mismatch -- `%s'"), str);
4791 if (verbose_error_p)
4792 {
4793 /* We will try to correct the erroneous instruction and also provide
4794 more information e.g. all other valid variants.
4795
4796 The string representation of the corrected instruction and other
4797 valid variants are generated by
4798
4799 1) obtaining the intermediate representation of the erroneous
4800 instruction;
4801 2) manipulating the IR, e.g. replacing the operand qualifier;
4802 3) printing out the instruction by calling the printer functions
4803 shared with the disassembler.
4804
4805 The limitation of this method is that the exact input assembly
4806 line cannot be accurately reproduced in some cases, for example an
4807 optional operand present in the actual assembly line will be
4808 omitted in the output; likewise for the optional syntax rules,
4809 e.g. the # before the immediate. Another limitation is that the
4810 assembly symbols and relocation operations in the assembly line
4811 currently cannot be printed out in the error report. Last but not
4812 least, when there is other error(s) co-exist with this error, the
4813 'corrected' instruction may be still incorrect, e.g. given
4814 'ldnp h0,h1,[x0,#6]!'
4815 this diagnosis will provide the version:
4816 'ldnp s0,s1,[x0,#6]!'
4817 which is still not right. */
4818 size_t len = strlen (get_mnemonic_name (str));
4819 int i, qlf_idx;
4820 bfd_boolean result;
4821 char buf[2048];
4822 aarch64_inst *inst_base = &inst.base;
4823 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4824
4825 /* Init inst. */
4826 reset_aarch64_instruction (&inst);
4827 inst_base->opcode = opcode;
4828
4829 /* Reset the error report so that there is no side effect on the
4830 following operand parsing. */
4831 init_operand_error_report ();
4832
4833 /* Fill inst. */
4834 result = parse_operands (str + len, opcode)
4835 && programmer_friendly_fixup (&inst);
4836 gas_assert (result);
4837 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4838 NULL, NULL, insn_sequence);
4839 gas_assert (!result);
4840
4841 /* Find the most matched qualifier sequence. */
4842 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4843 gas_assert (qlf_idx > -1);
4844
4845 /* Assign the qualifiers. */
4846 assign_qualifier_sequence (inst_base,
4847 opcode->qualifiers_list[qlf_idx]);
4848
4849 /* Print the hint. */
4850 output_info (_(" did you mean this?"));
4851 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4852 print_operands (buf, opcode, inst_base->operands);
4853 output_info (_(" %s"), buf);
4854
4855 /* Print out other variant(s) if there is any. */
4856 if (qlf_idx != 0 ||
4857 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4858 output_info (_(" other valid variant(s):"));
4859
4860 /* For each pattern. */
4861 qualifiers_list = opcode->qualifiers_list;
4862 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4863 {
4864 /* Most opcodes has much fewer patterns in the list.
4865 First NIL qualifier indicates the end in the list. */
4866 if (empty_qualifier_sequence_p (*qualifiers_list))
4867 break;
4868
4869 if (i != qlf_idx)
4870 {
4871 /* Mnemonics name. */
4872 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4873
4874 /* Assign the qualifiers. */
4875 assign_qualifier_sequence (inst_base, *qualifiers_list);
4876
4877 /* Print instruction. */
4878 print_operands (buf, opcode, inst_base->operands);
4879
4880 output_info (_(" %s"), buf);
4881 }
4882 }
4883 }
4884 break;
4885
4886 case AARCH64_OPDE_UNTIED_OPERAND:
4887 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
4888 detail->index + 1, str);
4889 break;
4890
4891 case AARCH64_OPDE_OUT_OF_RANGE:
4892 if (detail->data[0] != detail->data[1])
4893 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
4894 detail->error ? detail->error : _("immediate value"),
4895 detail->data[0], detail->data[1], idx + 1, str);
4896 else
4897 handler (_("%s must be %d at operand %d -- `%s'"),
4898 detail->error ? detail->error : _("immediate value"),
4899 detail->data[0], idx + 1, str);
4900 break;
4901
4902 case AARCH64_OPDE_REG_LIST:
4903 if (detail->data[0] == 1)
4904 handler (_("invalid number of registers in the list; "
4905 "only 1 register is expected at operand %d -- `%s'"),
4906 idx + 1, str);
4907 else
4908 handler (_("invalid number of registers in the list; "
4909 "%d registers are expected at operand %d -- `%s'"),
4910 detail->data[0], idx + 1, str);
4911 break;
4912
4913 case AARCH64_OPDE_UNALIGNED:
4914 handler (_("immediate value must be a multiple of "
4915 "%d at operand %d -- `%s'"),
4916 detail->data[0], idx + 1, str);
4917 break;
4918
4919 default:
4920 gas_assert (0);
4921 break;
4922 }
4923 }
4924
4925 /* Process and output the error message about the operand mismatching.
4926
4927 When this function is called, the operand error information had
4928 been collected for an assembly line and there will be multiple
4929 errors in the case of multiple instruction templates; output the
4930 error message that most closely describes the problem.
4931
4932 The errors to be printed can be filtered on printing all errors
4933 or only non-fatal errors. This distinction has to be made because
4934 the error buffer may already be filled with fatal errors we don't want to
4935 print due to the different instruction templates. */
4936
4937 static void
4938 output_operand_error_report (char *str, bfd_boolean non_fatal_only)
4939 {
4940 int largest_error_pos;
4941 const char *msg = NULL;
4942 enum aarch64_operand_error_kind kind;
4943 operand_error_record *curr;
4944 operand_error_record *head = operand_error_report.head;
4945 operand_error_record *record = NULL;
4946
4947 /* No error to report. */
4948 if (head == NULL)
4949 return;
4950
4951 gas_assert (head != NULL && operand_error_report.tail != NULL);
4952
4953 /* Only one error. */
4954 if (head == operand_error_report.tail)
4955 {
4956 /* If the only error is a non-fatal one and we don't want to print it,
4957 just exit. */
4958 if (!non_fatal_only || head->detail.non_fatal)
4959 {
4960 DEBUG_TRACE ("single opcode entry with error kind: %s",
4961 operand_mismatch_kind_names[head->detail.kind]);
4962 output_operand_error_record (head, str);
4963 }
4964 return;
4965 }
4966
4967 /* Find the error kind of the highest severity. */
4968 DEBUG_TRACE ("multiple opcode entries with error kind");
4969 kind = AARCH64_OPDE_NIL;
4970 for (curr = head; curr != NULL; curr = curr->next)
4971 {
4972 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4973 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4974 if (operand_error_higher_severity_p (curr->detail.kind, kind)
4975 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
4976 kind = curr->detail.kind;
4977 }
4978
4979 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
4980
4981 /* Pick up one of errors of KIND to report. */
4982 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4983 for (curr = head; curr != NULL; curr = curr->next)
4984 {
4985 /* If we don't want to print non-fatal errors then don't consider them
4986 at all. */
4987 if (curr->detail.kind != kind
4988 || (non_fatal_only && !curr->detail.non_fatal))
4989 continue;
4990 /* If there are multiple errors, pick up the one with the highest
4991 mismatching operand index. In the case of multiple errors with
4992 the equally highest operand index, pick up the first one or the
4993 first one with non-NULL error message. */
4994 if (curr->detail.index > largest_error_pos
4995 || (curr->detail.index == largest_error_pos && msg == NULL
4996 && curr->detail.error != NULL))
4997 {
4998 largest_error_pos = curr->detail.index;
4999 record = curr;
5000 msg = record->detail.error;
5001 }
5002 }
5003
5004 /* The way errors are collected in the back-end is a bit non-intuitive. But
5005 essentially, because each operand template is tried recursively you may
5006 always have errors collected from the previous tried OPND. These are
5007 usually skipped if there is one successful match. However now with the
5008 non-fatal errors we have to ignore those previously collected hard errors
5009 when we're only interested in printing the non-fatal ones. This condition
5010 prevents us from printing errors that are not appropriate, since we did
5011 match a condition, but it also has warnings that it wants to print. */
5012 if (non_fatal_only && !record)
5013 return;
5014
5015 gas_assert (largest_error_pos != -2 && record != NULL);
5016 DEBUG_TRACE ("Pick up error kind %s to report",
5017 operand_mismatch_kind_names[record->detail.kind]);
5018
5019 /* Output. */
5020 output_operand_error_record (record, str);
5021 }
5022 \f
5023 /* Write an AARCH64 instruction to buf - always little-endian. */
5024 static void
5025 put_aarch64_insn (char *buf, uint32_t insn)
5026 {
5027 unsigned char *where = (unsigned char *) buf;
5028 where[0] = insn;
5029 where[1] = insn >> 8;
5030 where[2] = insn >> 16;
5031 where[3] = insn >> 24;
5032 }
5033
5034 static uint32_t
5035 get_aarch64_insn (char *buf)
5036 {
5037 unsigned char *where = (unsigned char *) buf;
5038 uint32_t result;
5039 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
5040 | ((uint32_t) where[3] << 24)));
5041 return result;
5042 }
5043
5044 static void
5045 output_inst (struct aarch64_inst *new_inst)
5046 {
5047 char *to = NULL;
5048
5049 to = frag_more (INSN_SIZE);
5050
5051 frag_now->tc_frag_data.recorded = 1;
5052
5053 put_aarch64_insn (to, inst.base.value);
5054
5055 if (inst.reloc.type != BFD_RELOC_UNUSED)
5056 {
5057 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
5058 INSN_SIZE, &inst.reloc.exp,
5059 inst.reloc.pc_rel,
5060 inst.reloc.type);
5061 DEBUG_TRACE ("Prepared relocation fix up");
5062 /* Don't check the addend value against the instruction size,
5063 that's the job of our code in md_apply_fix(). */
5064 fixp->fx_no_overflow = 1;
5065 if (new_inst != NULL)
5066 fixp->tc_fix_data.inst = new_inst;
5067 if (aarch64_gas_internal_fixup_p ())
5068 {
5069 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
5070 fixp->tc_fix_data.opnd = inst.reloc.opnd;
5071 fixp->fx_addnumber = inst.reloc.flags;
5072 }
5073 }
5074
5075 dwarf2_emit_insn (INSN_SIZE);
5076 }
5077
5078 /* Link together opcodes of the same name. */
5079
5080 struct templates
5081 {
5082 aarch64_opcode *opcode;
5083 struct templates *next;
5084 };
5085
5086 typedef struct templates templates;
5087
5088 static templates *
5089 lookup_mnemonic (const char *start, int len)
5090 {
5091 templates *templ = NULL;
5092
5093 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
5094 return templ;
5095 }
5096
5097 /* Subroutine of md_assemble, responsible for looking up the primary
5098 opcode from the mnemonic the user wrote. STR points to the
5099 beginning of the mnemonic. */
5100
5101 static templates *
5102 opcode_lookup (char **str)
5103 {
5104 char *end, *base, *dot;
5105 const aarch64_cond *cond;
5106 char condname[16];
5107 int len;
5108
5109 /* Scan up to the end of the mnemonic, which must end in white space,
5110 '.', or end of string. */
5111 dot = 0;
5112 for (base = end = *str; is_part_of_name(*end); end++)
5113 if (*end == '.' && !dot)
5114 dot = end;
5115
5116 if (end == base || dot == base)
5117 return 0;
5118
5119 inst.cond = COND_ALWAYS;
5120
5121 /* Handle a possible condition. */
5122 if (dot)
5123 {
5124 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5125 if (cond)
5126 {
5127 inst.cond = cond->value;
5128 *str = end;
5129 }
5130 else
5131 {
5132 *str = dot;
5133 return 0;
5134 }
5135 len = dot - base;
5136 }
5137 else
5138 {
5139 *str = end;
5140 len = end - base;
5141 }
5142
5143 if (inst.cond == COND_ALWAYS)
5144 {
5145 /* Look for unaffixed mnemonic. */
5146 return lookup_mnemonic (base, len);
5147 }
5148 else if (len <= 13)
5149 {
5150 /* append ".c" to mnemonic if conditional */
5151 memcpy (condname, base, len);
5152 memcpy (condname + len, ".c", 2);
5153 base = condname;
5154 len += 2;
5155 return lookup_mnemonic (base, len);
5156 }
5157
5158 return NULL;
5159 }
5160
5161 /* Internal helper routine converting a vector_type_el structure *VECTYPE
5162 to a corresponding operand qualifier. */
5163
5164 static inline aarch64_opnd_qualifier_t
5165 vectype_to_qualifier (const struct vector_type_el *vectype)
5166 {
5167 /* Element size in bytes indexed by vector_el_type. */
5168 const unsigned char ele_size[5]
5169 = {1, 2, 4, 8, 16};
5170 const unsigned int ele_base [5] =
5171 {
5172 AARCH64_OPND_QLF_V_4B,
5173 AARCH64_OPND_QLF_V_2H,
5174 AARCH64_OPND_QLF_V_2S,
5175 AARCH64_OPND_QLF_V_1D,
5176 AARCH64_OPND_QLF_V_1Q
5177 };
5178
5179 if (!vectype->defined || vectype->type == NT_invtype)
5180 goto vectype_conversion_fail;
5181
5182 if (vectype->type == NT_zero)
5183 return AARCH64_OPND_QLF_P_Z;
5184 if (vectype->type == NT_merge)
5185 return AARCH64_OPND_QLF_P_M;
5186
5187 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
5188
5189 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
5190 {
5191 /* Special case S_4B. */
5192 if (vectype->type == NT_b && vectype->width == 4)
5193 return AARCH64_OPND_QLF_S_4B;
5194
5195 /* Special case S_2H. */
5196 if (vectype->type == NT_h && vectype->width == 2)
5197 return AARCH64_OPND_QLF_S_2H;
5198
5199 /* Vector element register. */
5200 return AARCH64_OPND_QLF_S_B + vectype->type;
5201 }
5202 else
5203 {
5204 /* Vector register. */
5205 int reg_size = ele_size[vectype->type] * vectype->width;
5206 unsigned offset;
5207 unsigned shift;
5208 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
5209 goto vectype_conversion_fail;
5210
5211 /* The conversion is by calculating the offset from the base operand
5212 qualifier for the vector type. The operand qualifiers are regular
5213 enough that the offset can established by shifting the vector width by
5214 a vector-type dependent amount. */
5215 shift = 0;
5216 if (vectype->type == NT_b)
5217 shift = 3;
5218 else if (vectype->type == NT_h || vectype->type == NT_s)
5219 shift = 2;
5220 else if (vectype->type >= NT_d)
5221 shift = 1;
5222 else
5223 gas_assert (0);
5224
5225 offset = ele_base [vectype->type] + (vectype->width >> shift);
5226 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
5227 && offset <= AARCH64_OPND_QLF_V_1Q);
5228 return offset;
5229 }
5230
5231 vectype_conversion_fail:
5232 first_error (_("bad vector arrangement type"));
5233 return AARCH64_OPND_QLF_NIL;
5234 }
5235
5236 /* Process an optional operand that is found omitted from the assembly line.
5237 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5238 instruction's opcode entry while IDX is the index of this omitted operand.
5239 */
5240
5241 static void
5242 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5243 int idx, aarch64_opnd_info *operand)
5244 {
5245 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5246 gas_assert (optional_operand_p (opcode, idx));
5247 gas_assert (!operand->present);
5248
5249 switch (type)
5250 {
5251 case AARCH64_OPND_Rd:
5252 case AARCH64_OPND_Rn:
5253 case AARCH64_OPND_Rm:
5254 case AARCH64_OPND_Rt:
5255 case AARCH64_OPND_Rt2:
5256 case AARCH64_OPND_Rt_SP:
5257 case AARCH64_OPND_Rs:
5258 case AARCH64_OPND_Ra:
5259 case AARCH64_OPND_Rt_SYS:
5260 case AARCH64_OPND_Rd_SP:
5261 case AARCH64_OPND_Rn_SP:
5262 case AARCH64_OPND_Rm_SP:
5263 case AARCH64_OPND_Fd:
5264 case AARCH64_OPND_Fn:
5265 case AARCH64_OPND_Fm:
5266 case AARCH64_OPND_Fa:
5267 case AARCH64_OPND_Ft:
5268 case AARCH64_OPND_Ft2:
5269 case AARCH64_OPND_Sd:
5270 case AARCH64_OPND_Sn:
5271 case AARCH64_OPND_Sm:
5272 case AARCH64_OPND_Va:
5273 case AARCH64_OPND_Vd:
5274 case AARCH64_OPND_Vn:
5275 case AARCH64_OPND_Vm:
5276 case AARCH64_OPND_VdD1:
5277 case AARCH64_OPND_VnD1:
5278 operand->reg.regno = default_value;
5279 break;
5280
5281 case AARCH64_OPND_Ed:
5282 case AARCH64_OPND_En:
5283 case AARCH64_OPND_Em:
5284 case AARCH64_OPND_Em16:
5285 case AARCH64_OPND_SM3_IMM2:
5286 operand->reglane.regno = default_value;
5287 break;
5288
5289 case AARCH64_OPND_IDX:
5290 case AARCH64_OPND_BIT_NUM:
5291 case AARCH64_OPND_IMMR:
5292 case AARCH64_OPND_IMMS:
5293 case AARCH64_OPND_SHLL_IMM:
5294 case AARCH64_OPND_IMM_VLSL:
5295 case AARCH64_OPND_IMM_VLSR:
5296 case AARCH64_OPND_CCMP_IMM:
5297 case AARCH64_OPND_FBITS:
5298 case AARCH64_OPND_UIMM4:
5299 case AARCH64_OPND_UIMM3_OP1:
5300 case AARCH64_OPND_UIMM3_OP2:
5301 case AARCH64_OPND_IMM:
5302 case AARCH64_OPND_IMM_2:
5303 case AARCH64_OPND_WIDTH:
5304 case AARCH64_OPND_UIMM7:
5305 case AARCH64_OPND_NZCV:
5306 case AARCH64_OPND_SVE_PATTERN:
5307 case AARCH64_OPND_SVE_PRFOP:
5308 operand->imm.value = default_value;
5309 break;
5310
5311 case AARCH64_OPND_SVE_PATTERN_SCALED:
5312 operand->imm.value = default_value;
5313 operand->shifter.kind = AARCH64_MOD_MUL;
5314 operand->shifter.amount = 1;
5315 break;
5316
5317 case AARCH64_OPND_EXCEPTION:
5318 inst.reloc.type = BFD_RELOC_UNUSED;
5319 break;
5320
5321 case AARCH64_OPND_BARRIER_ISB:
5322 operand->barrier = aarch64_barrier_options + default_value;
5323 break;
5324
5325 case AARCH64_OPND_BTI_TARGET:
5326 operand->hint_option = aarch64_hint_options + default_value;
5327 break;
5328
5329 default:
5330 break;
5331 }
5332 }
5333
5334 /* Process the relocation type for move wide instructions.
5335 Return TRUE on success; otherwise return FALSE. */
5336
5337 static bfd_boolean
5338 process_movw_reloc_info (void)
5339 {
5340 int is32;
5341 unsigned shift;
5342
5343 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5344
5345 if (inst.base.opcode->op == OP_MOVK)
5346 switch (inst.reloc.type)
5347 {
5348 case BFD_RELOC_AARCH64_MOVW_G0_S:
5349 case BFD_RELOC_AARCH64_MOVW_G1_S:
5350 case BFD_RELOC_AARCH64_MOVW_G2_S:
5351 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5352 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5353 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5354 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5355 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5356 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5357 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5358 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5359 set_syntax_error
5360 (_("the specified relocation type is not allowed for MOVK"));
5361 return FALSE;
5362 default:
5363 break;
5364 }
5365
5366 switch (inst.reloc.type)
5367 {
5368 case BFD_RELOC_AARCH64_MOVW_G0:
5369 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5370 case BFD_RELOC_AARCH64_MOVW_G0_S:
5371 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5372 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5373 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
5374 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5375 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5376 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5377 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
5378 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5379 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5380 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5381 shift = 0;
5382 break;
5383 case BFD_RELOC_AARCH64_MOVW_G1:
5384 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5385 case BFD_RELOC_AARCH64_MOVW_G1_S:
5386 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5387 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5388 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
5389 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5390 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5391 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5392 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
5393 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
5394 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5395 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5396 shift = 16;
5397 break;
5398 case BFD_RELOC_AARCH64_MOVW_G2:
5399 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5400 case BFD_RELOC_AARCH64_MOVW_G2_S:
5401 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5402 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
5403 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
5404 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5405 if (is32)
5406 {
5407 set_fatal_syntax_error
5408 (_("the specified relocation type is not allowed for 32-bit "
5409 "register"));
5410 return FALSE;
5411 }
5412 shift = 32;
5413 break;
5414 case BFD_RELOC_AARCH64_MOVW_G3:
5415 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5416 if (is32)
5417 {
5418 set_fatal_syntax_error
5419 (_("the specified relocation type is not allowed for 32-bit "
5420 "register"));
5421 return FALSE;
5422 }
5423 shift = 48;
5424 break;
5425 default:
5426 /* More cases should be added when more MOVW-related relocation types
5427 are supported in GAS. */
5428 gas_assert (aarch64_gas_internal_fixup_p ());
5429 /* The shift amount should have already been set by the parser. */
5430 return TRUE;
5431 }
5432 inst.base.operands[1].shifter.amount = shift;
5433 return TRUE;
5434 }
5435
5436 /* A primitive log calculator. */
5437
5438 static inline unsigned int
5439 get_logsz (unsigned int size)
5440 {
5441 const unsigned char ls[16] =
5442 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
5443 if (size > 16)
5444 {
5445 gas_assert (0);
5446 return -1;
5447 }
5448 gas_assert (ls[size - 1] != (unsigned char)-1);
5449 return ls[size - 1];
5450 }
5451
5452 /* Determine and return the real reloc type code for an instruction
5453 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
5454
5455 static inline bfd_reloc_code_real_type
5456 ldst_lo12_determine_real_reloc_type (void)
5457 {
5458 unsigned logsz;
5459 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
5460 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
5461
5462 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
5463 {
5464 BFD_RELOC_AARCH64_LDST8_LO12,
5465 BFD_RELOC_AARCH64_LDST16_LO12,
5466 BFD_RELOC_AARCH64_LDST32_LO12,
5467 BFD_RELOC_AARCH64_LDST64_LO12,
5468 BFD_RELOC_AARCH64_LDST128_LO12
5469 },
5470 {
5471 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
5472 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
5473 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
5474 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
5475 BFD_RELOC_AARCH64_NONE
5476 },
5477 {
5478 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
5479 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
5480 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
5481 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
5482 BFD_RELOC_AARCH64_NONE
5483 },
5484 {
5485 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
5486 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
5487 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
5488 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
5489 BFD_RELOC_AARCH64_NONE
5490 },
5491 {
5492 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
5493 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
5494 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
5495 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
5496 BFD_RELOC_AARCH64_NONE
5497 }
5498 };
5499
5500 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5501 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5502 || (inst.reloc.type
5503 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
5504 || (inst.reloc.type
5505 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
5506 || (inst.reloc.type
5507 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
5508 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
5509
5510 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
5511 opd1_qlf =
5512 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
5513 1, opd0_qlf, 0);
5514 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
5515
5516 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
5517 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5518 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
5519 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
5520 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
5521 gas_assert (logsz <= 3);
5522 else
5523 gas_assert (logsz <= 4);
5524
5525 /* In reloc.c, these pseudo relocation types should be defined in similar
5526 order as above reloc_ldst_lo12 array. Because the array index calculation
5527 below relies on this. */
5528 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
5529 }
5530
5531 /* Check whether a register list REGINFO is valid. The registers must be
5532 numbered in increasing order (modulo 32), in increments of one or two.
5533
5534 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
5535 increments of two.
5536
5537 Return FALSE if such a register list is invalid, otherwise return TRUE. */
5538
5539 static bfd_boolean
5540 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
5541 {
5542 uint32_t i, nb_regs, prev_regno, incr;
5543
5544 nb_regs = 1 + (reginfo & 0x3);
5545 reginfo >>= 2;
5546 prev_regno = reginfo & 0x1f;
5547 incr = accept_alternate ? 2 : 1;
5548
5549 for (i = 1; i < nb_regs; ++i)
5550 {
5551 uint32_t curr_regno;
5552 reginfo >>= 5;
5553 curr_regno = reginfo & 0x1f;
5554 if (curr_regno != ((prev_regno + incr) & 0x1f))
5555 return FALSE;
5556 prev_regno = curr_regno;
5557 }
5558
5559 return TRUE;
5560 }
5561
5562 /* Generic instruction operand parser. This does no encoding and no
5563 semantic validation; it merely squirrels values away in the inst
5564 structure. Returns TRUE or FALSE depending on whether the
5565 specified grammar matched. */
5566
5567 static bfd_boolean
5568 parse_operands (char *str, const aarch64_opcode *opcode)
5569 {
5570 int i;
5571 char *backtrack_pos = 0;
5572 const enum aarch64_opnd *operands = opcode->operands;
5573 aarch64_reg_type imm_reg_type;
5574
5575 clear_error ();
5576 skip_whitespace (str);
5577
5578 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
5579 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
5580 else
5581 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
5582
5583 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
5584 {
5585 int64_t val;
5586 const reg_entry *reg;
5587 int comma_skipped_p = 0;
5588 aarch64_reg_type rtype;
5589 struct vector_type_el vectype;
5590 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
5591 aarch64_opnd_info *info = &inst.base.operands[i];
5592 aarch64_reg_type reg_type;
5593
5594 DEBUG_TRACE ("parse operand %d", i);
5595
5596 /* Assign the operand code. */
5597 info->type = operands[i];
5598
5599 if (optional_operand_p (opcode, i))
5600 {
5601 /* Remember where we are in case we need to backtrack. */
5602 gas_assert (!backtrack_pos);
5603 backtrack_pos = str;
5604 }
5605
5606 /* Expect comma between operands; the backtrack mechanism will take
5607 care of cases of omitted optional operand. */
5608 if (i > 0 && ! skip_past_char (&str, ','))
5609 {
5610 set_syntax_error (_("comma expected between operands"));
5611 goto failure;
5612 }
5613 else
5614 comma_skipped_p = 1;
5615
5616 switch (operands[i])
5617 {
5618 case AARCH64_OPND_Rd:
5619 case AARCH64_OPND_Rn:
5620 case AARCH64_OPND_Rm:
5621 case AARCH64_OPND_Rt:
5622 case AARCH64_OPND_Rt2:
5623 case AARCH64_OPND_Rs:
5624 case AARCH64_OPND_Ra:
5625 case AARCH64_OPND_Rt_SYS:
5626 case AARCH64_OPND_PAIRREG:
5627 case AARCH64_OPND_SVE_Rm:
5628 po_int_reg_or_fail (REG_TYPE_R_Z);
5629 break;
5630
5631 case AARCH64_OPND_Rd_SP:
5632 case AARCH64_OPND_Rn_SP:
5633 case AARCH64_OPND_Rt_SP:
5634 case AARCH64_OPND_SVE_Rn_SP:
5635 case AARCH64_OPND_Rm_SP:
5636 po_int_reg_or_fail (REG_TYPE_R_SP);
5637 break;
5638
5639 case AARCH64_OPND_Rm_EXT:
5640 case AARCH64_OPND_Rm_SFT:
5641 po_misc_or_fail (parse_shifter_operand
5642 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
5643 ? SHIFTED_ARITH_IMM
5644 : SHIFTED_LOGIC_IMM)));
5645 if (!info->shifter.operator_present)
5646 {
5647 /* Default to LSL if not present. Libopcodes prefers shifter
5648 kind to be explicit. */
5649 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5650 info->shifter.kind = AARCH64_MOD_LSL;
5651 /* For Rm_EXT, libopcodes will carry out further check on whether
5652 or not stack pointer is used in the instruction (Recall that
5653 "the extend operator is not optional unless at least one of
5654 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
5655 }
5656 break;
5657
5658 case AARCH64_OPND_Fd:
5659 case AARCH64_OPND_Fn:
5660 case AARCH64_OPND_Fm:
5661 case AARCH64_OPND_Fa:
5662 case AARCH64_OPND_Ft:
5663 case AARCH64_OPND_Ft2:
5664 case AARCH64_OPND_Sd:
5665 case AARCH64_OPND_Sn:
5666 case AARCH64_OPND_Sm:
5667 case AARCH64_OPND_SVE_VZn:
5668 case AARCH64_OPND_SVE_Vd:
5669 case AARCH64_OPND_SVE_Vm:
5670 case AARCH64_OPND_SVE_Vn:
5671 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
5672 if (val == PARSE_FAIL)
5673 {
5674 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
5675 goto failure;
5676 }
5677 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
5678
5679 info->reg.regno = val;
5680 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
5681 break;
5682
5683 case AARCH64_OPND_SVE_Pd:
5684 case AARCH64_OPND_SVE_Pg3:
5685 case AARCH64_OPND_SVE_Pg4_5:
5686 case AARCH64_OPND_SVE_Pg4_10:
5687 case AARCH64_OPND_SVE_Pg4_16:
5688 case AARCH64_OPND_SVE_Pm:
5689 case AARCH64_OPND_SVE_Pn:
5690 case AARCH64_OPND_SVE_Pt:
5691 reg_type = REG_TYPE_PN;
5692 goto vector_reg;
5693
5694 case AARCH64_OPND_SVE_Za_5:
5695 case AARCH64_OPND_SVE_Za_16:
5696 case AARCH64_OPND_SVE_Zd:
5697 case AARCH64_OPND_SVE_Zm_5:
5698 case AARCH64_OPND_SVE_Zm_16:
5699 case AARCH64_OPND_SVE_Zn:
5700 case AARCH64_OPND_SVE_Zt:
5701 reg_type = REG_TYPE_ZN;
5702 goto vector_reg;
5703
5704 case AARCH64_OPND_Va:
5705 case AARCH64_OPND_Vd:
5706 case AARCH64_OPND_Vn:
5707 case AARCH64_OPND_Vm:
5708 reg_type = REG_TYPE_VN;
5709 vector_reg:
5710 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5711 if (val == PARSE_FAIL)
5712 {
5713 first_error (_(get_reg_expected_msg (reg_type)));
5714 goto failure;
5715 }
5716 if (vectype.defined & NTA_HASINDEX)
5717 goto failure;
5718
5719 info->reg.regno = val;
5720 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
5721 && vectype.type == NT_invtype)
5722 /* Unqualified Pn and Zn registers are allowed in certain
5723 contexts. Rely on F_STRICT qualifier checking to catch
5724 invalid uses. */
5725 info->qualifier = AARCH64_OPND_QLF_NIL;
5726 else
5727 {
5728 info->qualifier = vectype_to_qualifier (&vectype);
5729 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5730 goto failure;
5731 }
5732 break;
5733
5734 case AARCH64_OPND_VdD1:
5735 case AARCH64_OPND_VnD1:
5736 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5737 if (val == PARSE_FAIL)
5738 {
5739 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5740 goto failure;
5741 }
5742 if (vectype.type != NT_d || vectype.index != 1)
5743 {
5744 set_fatal_syntax_error
5745 (_("the top half of a 128-bit FP/SIMD register is expected"));
5746 goto failure;
5747 }
5748 info->reg.regno = val;
5749 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5750 here; it is correct for the purpose of encoding/decoding since
5751 only the register number is explicitly encoded in the related
5752 instructions, although this appears a bit hacky. */
5753 info->qualifier = AARCH64_OPND_QLF_S_D;
5754 break;
5755
5756 case AARCH64_OPND_SVE_Zm3_INDEX:
5757 case AARCH64_OPND_SVE_Zm3_22_INDEX:
5758 case AARCH64_OPND_SVE_Zm3_11_INDEX:
5759 case AARCH64_OPND_SVE_Zm4_11_INDEX:
5760 case AARCH64_OPND_SVE_Zm4_INDEX:
5761 case AARCH64_OPND_SVE_Zn_INDEX:
5762 reg_type = REG_TYPE_ZN;
5763 goto vector_reg_index;
5764
5765 case AARCH64_OPND_Ed:
5766 case AARCH64_OPND_En:
5767 case AARCH64_OPND_Em:
5768 case AARCH64_OPND_Em16:
5769 case AARCH64_OPND_SM3_IMM2:
5770 reg_type = REG_TYPE_VN;
5771 vector_reg_index:
5772 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5773 if (val == PARSE_FAIL)
5774 {
5775 first_error (_(get_reg_expected_msg (reg_type)));
5776 goto failure;
5777 }
5778 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5779 goto failure;
5780
5781 info->reglane.regno = val;
5782 info->reglane.index = vectype.index;
5783 info->qualifier = vectype_to_qualifier (&vectype);
5784 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5785 goto failure;
5786 break;
5787
5788 case AARCH64_OPND_SVE_ZnxN:
5789 case AARCH64_OPND_SVE_ZtxN:
5790 reg_type = REG_TYPE_ZN;
5791 goto vector_reg_list;
5792
5793 case AARCH64_OPND_LVn:
5794 case AARCH64_OPND_LVt:
5795 case AARCH64_OPND_LVt_AL:
5796 case AARCH64_OPND_LEt:
5797 reg_type = REG_TYPE_VN;
5798 vector_reg_list:
5799 if (reg_type == REG_TYPE_ZN
5800 && get_opcode_dependent_value (opcode) == 1
5801 && *str != '{')
5802 {
5803 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5804 if (val == PARSE_FAIL)
5805 {
5806 first_error (_(get_reg_expected_msg (reg_type)));
5807 goto failure;
5808 }
5809 info->reglist.first_regno = val;
5810 info->reglist.num_regs = 1;
5811 }
5812 else
5813 {
5814 val = parse_vector_reg_list (&str, reg_type, &vectype);
5815 if (val == PARSE_FAIL)
5816 goto failure;
5817
5818 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5819 {
5820 set_fatal_syntax_error (_("invalid register list"));
5821 goto failure;
5822 }
5823
5824 if (vectype.width != 0 && *str != ',')
5825 {
5826 set_fatal_syntax_error
5827 (_("expected element type rather than vector type"));
5828 goto failure;
5829 }
5830
5831 info->reglist.first_regno = (val >> 2) & 0x1f;
5832 info->reglist.num_regs = (val & 0x3) + 1;
5833 }
5834 if (operands[i] == AARCH64_OPND_LEt)
5835 {
5836 if (!(vectype.defined & NTA_HASINDEX))
5837 goto failure;
5838 info->reglist.has_index = 1;
5839 info->reglist.index = vectype.index;
5840 }
5841 else
5842 {
5843 if (vectype.defined & NTA_HASINDEX)
5844 goto failure;
5845 if (!(vectype.defined & NTA_HASTYPE))
5846 {
5847 if (reg_type == REG_TYPE_ZN)
5848 set_fatal_syntax_error (_("missing type suffix"));
5849 goto failure;
5850 }
5851 }
5852 info->qualifier = vectype_to_qualifier (&vectype);
5853 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5854 goto failure;
5855 break;
5856
5857 case AARCH64_OPND_CRn:
5858 case AARCH64_OPND_CRm:
5859 {
5860 char prefix = *(str++);
5861 if (prefix != 'c' && prefix != 'C')
5862 goto failure;
5863
5864 po_imm_nc_or_fail ();
5865 if (val > 15)
5866 {
5867 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
5868 goto failure;
5869 }
5870 info->qualifier = AARCH64_OPND_QLF_CR;
5871 info->imm.value = val;
5872 break;
5873 }
5874
5875 case AARCH64_OPND_SHLL_IMM:
5876 case AARCH64_OPND_IMM_VLSR:
5877 po_imm_or_fail (1, 64);
5878 info->imm.value = val;
5879 break;
5880
5881 case AARCH64_OPND_CCMP_IMM:
5882 case AARCH64_OPND_SIMM5:
5883 case AARCH64_OPND_FBITS:
5884 case AARCH64_OPND_TME_UIMM16:
5885 case AARCH64_OPND_UIMM4:
5886 case AARCH64_OPND_UIMM4_ADDG:
5887 case AARCH64_OPND_UIMM10:
5888 case AARCH64_OPND_UIMM3_OP1:
5889 case AARCH64_OPND_UIMM3_OP2:
5890 case AARCH64_OPND_IMM_VLSL:
5891 case AARCH64_OPND_IMM:
5892 case AARCH64_OPND_IMM_2:
5893 case AARCH64_OPND_WIDTH:
5894 case AARCH64_OPND_SVE_INV_LIMM:
5895 case AARCH64_OPND_SVE_LIMM:
5896 case AARCH64_OPND_SVE_LIMM_MOV:
5897 case AARCH64_OPND_SVE_SHLIMM_PRED:
5898 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
5899 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
5900 case AARCH64_OPND_SVE_SHRIMM_PRED:
5901 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
5902 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
5903 case AARCH64_OPND_SVE_SIMM5:
5904 case AARCH64_OPND_SVE_SIMM5B:
5905 case AARCH64_OPND_SVE_SIMM6:
5906 case AARCH64_OPND_SVE_SIMM8:
5907 case AARCH64_OPND_SVE_UIMM3:
5908 case AARCH64_OPND_SVE_UIMM7:
5909 case AARCH64_OPND_SVE_UIMM8:
5910 case AARCH64_OPND_SVE_UIMM8_53:
5911 case AARCH64_OPND_IMM_ROT1:
5912 case AARCH64_OPND_IMM_ROT2:
5913 case AARCH64_OPND_IMM_ROT3:
5914 case AARCH64_OPND_SVE_IMM_ROT1:
5915 case AARCH64_OPND_SVE_IMM_ROT2:
5916 case AARCH64_OPND_SVE_IMM_ROT3:
5917 po_imm_nc_or_fail ();
5918 info->imm.value = val;
5919 break;
5920
5921 case AARCH64_OPND_SVE_AIMM:
5922 case AARCH64_OPND_SVE_ASIMM:
5923 po_imm_nc_or_fail ();
5924 info->imm.value = val;
5925 skip_whitespace (str);
5926 if (skip_past_comma (&str))
5927 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5928 else
5929 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5930 break;
5931
5932 case AARCH64_OPND_SVE_PATTERN:
5933 po_enum_or_fail (aarch64_sve_pattern_array);
5934 info->imm.value = val;
5935 break;
5936
5937 case AARCH64_OPND_SVE_PATTERN_SCALED:
5938 po_enum_or_fail (aarch64_sve_pattern_array);
5939 info->imm.value = val;
5940 if (skip_past_comma (&str)
5941 && !parse_shift (&str, info, SHIFTED_MUL))
5942 goto failure;
5943 if (!info->shifter.operator_present)
5944 {
5945 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5946 info->shifter.kind = AARCH64_MOD_MUL;
5947 info->shifter.amount = 1;
5948 }
5949 break;
5950
5951 case AARCH64_OPND_SVE_PRFOP:
5952 po_enum_or_fail (aarch64_sve_prfop_array);
5953 info->imm.value = val;
5954 break;
5955
5956 case AARCH64_OPND_UIMM7:
5957 po_imm_or_fail (0, 127);
5958 info->imm.value = val;
5959 break;
5960
5961 case AARCH64_OPND_IDX:
5962 case AARCH64_OPND_MASK:
5963 case AARCH64_OPND_BIT_NUM:
5964 case AARCH64_OPND_IMMR:
5965 case AARCH64_OPND_IMMS:
5966 po_imm_or_fail (0, 63);
5967 info->imm.value = val;
5968 break;
5969
5970 case AARCH64_OPND_IMM0:
5971 po_imm_nc_or_fail ();
5972 if (val != 0)
5973 {
5974 set_fatal_syntax_error (_("immediate zero expected"));
5975 goto failure;
5976 }
5977 info->imm.value = 0;
5978 break;
5979
5980 case AARCH64_OPND_FPIMM0:
5981 {
5982 int qfloat;
5983 bfd_boolean res1 = FALSE, res2 = FALSE;
5984 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
5985 it is probably not worth the effort to support it. */
5986 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE,
5987 imm_reg_type))
5988 && (error_p ()
5989 || !(res2 = parse_constant_immediate (&str, &val,
5990 imm_reg_type))))
5991 goto failure;
5992 if ((res1 && qfloat == 0) || (res2 && val == 0))
5993 {
5994 info->imm.value = 0;
5995 info->imm.is_fp = 1;
5996 break;
5997 }
5998 set_fatal_syntax_error (_("immediate zero expected"));
5999 goto failure;
6000 }
6001
6002 case AARCH64_OPND_IMM_MOV:
6003 {
6004 char *saved = str;
6005 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
6006 reg_name_p (str, REG_TYPE_VN))
6007 goto failure;
6008 str = saved;
6009 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6010 GE_OPT_PREFIX, 1));
6011 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
6012 later. fix_mov_imm_insn will try to determine a machine
6013 instruction (MOVZ, MOVN or ORR) for it and will issue an error
6014 message if the immediate cannot be moved by a single
6015 instruction. */
6016 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6017 inst.base.operands[i].skip = 1;
6018 }
6019 break;
6020
6021 case AARCH64_OPND_SIMD_IMM:
6022 case AARCH64_OPND_SIMD_IMM_SFT:
6023 if (! parse_big_immediate (&str, &val, imm_reg_type))
6024 goto failure;
6025 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6026 /* addr_off_p */ 0,
6027 /* need_libopcodes_p */ 1,
6028 /* skip_p */ 1);
6029 /* Parse shift.
6030 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
6031 shift, we don't check it here; we leave the checking to
6032 the libopcodes (operand_general_constraint_met_p). By
6033 doing this, we achieve better diagnostics. */
6034 if (skip_past_comma (&str)
6035 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
6036 goto failure;
6037 if (!info->shifter.operator_present
6038 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
6039 {
6040 /* Default to LSL if not present. Libopcodes prefers shifter
6041 kind to be explicit. */
6042 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6043 info->shifter.kind = AARCH64_MOD_LSL;
6044 }
6045 break;
6046
6047 case AARCH64_OPND_FPIMM:
6048 case AARCH64_OPND_SIMD_FPIMM:
6049 case AARCH64_OPND_SVE_FPIMM8:
6050 {
6051 int qfloat;
6052 bfd_boolean dp_p;
6053
6054 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6055 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
6056 || !aarch64_imm_float_p (qfloat))
6057 {
6058 if (!error_p ())
6059 set_fatal_syntax_error (_("invalid floating-point"
6060 " constant"));
6061 goto failure;
6062 }
6063 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
6064 inst.base.operands[i].imm.is_fp = 1;
6065 }
6066 break;
6067
6068 case AARCH64_OPND_SVE_I1_HALF_ONE:
6069 case AARCH64_OPND_SVE_I1_HALF_TWO:
6070 case AARCH64_OPND_SVE_I1_ZERO_ONE:
6071 {
6072 int qfloat;
6073 bfd_boolean dp_p;
6074
6075 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6076 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
6077 {
6078 if (!error_p ())
6079 set_fatal_syntax_error (_("invalid floating-point"
6080 " constant"));
6081 goto failure;
6082 }
6083 inst.base.operands[i].imm.value = qfloat;
6084 inst.base.operands[i].imm.is_fp = 1;
6085 }
6086 break;
6087
6088 case AARCH64_OPND_LIMM:
6089 po_misc_or_fail (parse_shifter_operand (&str, info,
6090 SHIFTED_LOGIC_IMM));
6091 if (info->shifter.operator_present)
6092 {
6093 set_fatal_syntax_error
6094 (_("shift not allowed for bitmask immediate"));
6095 goto failure;
6096 }
6097 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6098 /* addr_off_p */ 0,
6099 /* need_libopcodes_p */ 1,
6100 /* skip_p */ 1);
6101 break;
6102
6103 case AARCH64_OPND_AIMM:
6104 if (opcode->op == OP_ADD)
6105 /* ADD may have relocation types. */
6106 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
6107 SHIFTED_ARITH_IMM));
6108 else
6109 po_misc_or_fail (parse_shifter_operand (&str, info,
6110 SHIFTED_ARITH_IMM));
6111 switch (inst.reloc.type)
6112 {
6113 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6114 info->shifter.amount = 12;
6115 break;
6116 case BFD_RELOC_UNUSED:
6117 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6118 if (info->shifter.kind != AARCH64_MOD_NONE)
6119 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6120 inst.reloc.pc_rel = 0;
6121 break;
6122 default:
6123 break;
6124 }
6125 info->imm.value = 0;
6126 if (!info->shifter.operator_present)
6127 {
6128 /* Default to LSL if not present. Libopcodes prefers shifter
6129 kind to be explicit. */
6130 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6131 info->shifter.kind = AARCH64_MOD_LSL;
6132 }
6133 break;
6134
6135 case AARCH64_OPND_HALF:
6136 {
6137 /* #<imm16> or relocation. */
6138 int internal_fixup_p;
6139 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6140 if (internal_fixup_p)
6141 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6142 skip_whitespace (str);
6143 if (skip_past_comma (&str))
6144 {
6145 /* {, LSL #<shift>} */
6146 if (! aarch64_gas_internal_fixup_p ())
6147 {
6148 set_fatal_syntax_error (_("can't mix relocation modifier "
6149 "with explicit shift"));
6150 goto failure;
6151 }
6152 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6153 }
6154 else
6155 inst.base.operands[i].shifter.amount = 0;
6156 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6157 inst.base.operands[i].imm.value = 0;
6158 if (! process_movw_reloc_info ())
6159 goto failure;
6160 }
6161 break;
6162
6163 case AARCH64_OPND_EXCEPTION:
6164 case AARCH64_OPND_UNDEFINED:
6165 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6166 imm_reg_type));
6167 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6168 /* addr_off_p */ 0,
6169 /* need_libopcodes_p */ 0,
6170 /* skip_p */ 1);
6171 break;
6172
6173 case AARCH64_OPND_NZCV:
6174 {
6175 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
6176 if (nzcv != NULL)
6177 {
6178 str += 4;
6179 info->imm.value = nzcv->value;
6180 break;
6181 }
6182 po_imm_or_fail (0, 15);
6183 info->imm.value = val;
6184 }
6185 break;
6186
6187 case AARCH64_OPND_COND:
6188 case AARCH64_OPND_COND1:
6189 {
6190 char *start = str;
6191 do
6192 str++;
6193 while (ISALPHA (*str));
6194 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
6195 if (info->cond == NULL)
6196 {
6197 set_syntax_error (_("invalid condition"));
6198 goto failure;
6199 }
6200 else if (operands[i] == AARCH64_OPND_COND1
6201 && (info->cond->value & 0xe) == 0xe)
6202 {
6203 /* Do not allow AL or NV. */
6204 set_default_error ();
6205 goto failure;
6206 }
6207 }
6208 break;
6209
6210 case AARCH64_OPND_ADDR_ADRP:
6211 po_misc_or_fail (parse_adrp (&str));
6212 /* Clear the value as operand needs to be relocated. */
6213 info->imm.value = 0;
6214 break;
6215
6216 case AARCH64_OPND_ADDR_PCREL14:
6217 case AARCH64_OPND_ADDR_PCREL19:
6218 case AARCH64_OPND_ADDR_PCREL21:
6219 case AARCH64_OPND_ADDR_PCREL26:
6220 po_misc_or_fail (parse_address (&str, info));
6221 if (!info->addr.pcrel)
6222 {
6223 set_syntax_error (_("invalid pc-relative address"));
6224 goto failure;
6225 }
6226 if (inst.gen_lit_pool
6227 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6228 {
6229 /* Only permit "=value" in the literal load instructions.
6230 The literal will be generated by programmer_friendly_fixup. */
6231 set_syntax_error (_("invalid use of \"=immediate\""));
6232 goto failure;
6233 }
6234 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6235 {
6236 set_syntax_error (_("unrecognized relocation suffix"));
6237 goto failure;
6238 }
6239 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6240 {
6241 info->imm.value = inst.reloc.exp.X_add_number;
6242 inst.reloc.type = BFD_RELOC_UNUSED;
6243 }
6244 else
6245 {
6246 info->imm.value = 0;
6247 if (inst.reloc.type == BFD_RELOC_UNUSED)
6248 switch (opcode->iclass)
6249 {
6250 case compbranch:
6251 case condbranch:
6252 /* e.g. CBZ or B.COND */
6253 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6254 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
6255 break;
6256 case testbranch:
6257 /* e.g. TBZ */
6258 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6259 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
6260 break;
6261 case branch_imm:
6262 /* e.g. B or BL */
6263 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6264 inst.reloc.type =
6265 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
6266 : BFD_RELOC_AARCH64_JUMP26;
6267 break;
6268 case loadlit:
6269 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6270 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
6271 break;
6272 case pcreladdr:
6273 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6274 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6275 break;
6276 default:
6277 gas_assert (0);
6278 abort ();
6279 }
6280 inst.reloc.pc_rel = 1;
6281 }
6282 break;
6283
6284 case AARCH64_OPND_ADDR_SIMPLE:
6285 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
6286 {
6287 /* [<Xn|SP>{, #<simm>}] */
6288 char *start = str;
6289 /* First use the normal address-parsing routines, to get
6290 the usual syntax errors. */
6291 po_misc_or_fail (parse_address (&str, info));
6292 if (info->addr.pcrel || info->addr.offset.is_reg
6293 || !info->addr.preind || info->addr.postind
6294 || info->addr.writeback)
6295 {
6296 set_syntax_error (_("invalid addressing mode"));
6297 goto failure;
6298 }
6299
6300 /* Then retry, matching the specific syntax of these addresses. */
6301 str = start;
6302 po_char_or_fail ('[');
6303 po_reg_or_fail (REG_TYPE_R64_SP);
6304 /* Accept optional ", #0". */
6305 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
6306 && skip_past_char (&str, ','))
6307 {
6308 skip_past_char (&str, '#');
6309 if (! skip_past_char (&str, '0'))
6310 {
6311 set_fatal_syntax_error
6312 (_("the optional immediate offset can only be 0"));
6313 goto failure;
6314 }
6315 }
6316 po_char_or_fail (']');
6317 break;
6318 }
6319
6320 case AARCH64_OPND_ADDR_REGOFF:
6321 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
6322 po_misc_or_fail (parse_address (&str, info));
6323 regoff_addr:
6324 if (info->addr.pcrel || !info->addr.offset.is_reg
6325 || !info->addr.preind || info->addr.postind
6326 || info->addr.writeback)
6327 {
6328 set_syntax_error (_("invalid addressing mode"));
6329 goto failure;
6330 }
6331 if (!info->shifter.operator_present)
6332 {
6333 /* Default to LSL if not present. Libopcodes prefers shifter
6334 kind to be explicit. */
6335 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6336 info->shifter.kind = AARCH64_MOD_LSL;
6337 }
6338 /* Qualifier to be deduced by libopcodes. */
6339 break;
6340
6341 case AARCH64_OPND_ADDR_SIMM7:
6342 po_misc_or_fail (parse_address (&str, info));
6343 if (info->addr.pcrel || info->addr.offset.is_reg
6344 || (!info->addr.preind && !info->addr.postind))
6345 {
6346 set_syntax_error (_("invalid addressing mode"));
6347 goto failure;
6348 }
6349 if (inst.reloc.type != BFD_RELOC_UNUSED)
6350 {
6351 set_syntax_error (_("relocation not allowed"));
6352 goto failure;
6353 }
6354 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6355 /* addr_off_p */ 1,
6356 /* need_libopcodes_p */ 1,
6357 /* skip_p */ 0);
6358 break;
6359
6360 case AARCH64_OPND_ADDR_SIMM9:
6361 case AARCH64_OPND_ADDR_SIMM9_2:
6362 case AARCH64_OPND_ADDR_SIMM11:
6363 case AARCH64_OPND_ADDR_SIMM13:
6364 po_misc_or_fail (parse_address (&str, info));
6365 if (info->addr.pcrel || info->addr.offset.is_reg
6366 || (!info->addr.preind && !info->addr.postind)
6367 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
6368 && info->addr.writeback))
6369 {
6370 set_syntax_error (_("invalid addressing mode"));
6371 goto failure;
6372 }
6373 if (inst.reloc.type != BFD_RELOC_UNUSED)
6374 {
6375 set_syntax_error (_("relocation not allowed"));
6376 goto failure;
6377 }
6378 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6379 /* addr_off_p */ 1,
6380 /* need_libopcodes_p */ 1,
6381 /* skip_p */ 0);
6382 break;
6383
6384 case AARCH64_OPND_ADDR_SIMM10:
6385 case AARCH64_OPND_ADDR_OFFSET:
6386 po_misc_or_fail (parse_address (&str, info));
6387 if (info->addr.pcrel || info->addr.offset.is_reg
6388 || !info->addr.preind || info->addr.postind)
6389 {
6390 set_syntax_error (_("invalid addressing mode"));
6391 goto failure;
6392 }
6393 if (inst.reloc.type != BFD_RELOC_UNUSED)
6394 {
6395 set_syntax_error (_("relocation not allowed"));
6396 goto failure;
6397 }
6398 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6399 /* addr_off_p */ 1,
6400 /* need_libopcodes_p */ 1,
6401 /* skip_p */ 0);
6402 break;
6403
6404 case AARCH64_OPND_ADDR_UIMM12:
6405 po_misc_or_fail (parse_address (&str, info));
6406 if (info->addr.pcrel || info->addr.offset.is_reg
6407 || !info->addr.preind || info->addr.writeback)
6408 {
6409 set_syntax_error (_("invalid addressing mode"));
6410 goto failure;
6411 }
6412 if (inst.reloc.type == BFD_RELOC_UNUSED)
6413 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6414 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6415 || (inst.reloc.type
6416 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
6417 || (inst.reloc.type
6418 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6419 || (inst.reloc.type
6420 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6421 || (inst.reloc.type
6422 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
6423 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
6424 /* Leave qualifier to be determined by libopcodes. */
6425 break;
6426
6427 case AARCH64_OPND_SIMD_ADDR_POST:
6428 /* [<Xn|SP>], <Xm|#<amount>> */
6429 po_misc_or_fail (parse_address (&str, info));
6430 if (!info->addr.postind || !info->addr.writeback)
6431 {
6432 set_syntax_error (_("invalid addressing mode"));
6433 goto failure;
6434 }
6435 if (!info->addr.offset.is_reg)
6436 {
6437 if (inst.reloc.exp.X_op == O_constant)
6438 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6439 else
6440 {
6441 set_fatal_syntax_error
6442 (_("writeback value must be an immediate constant"));
6443 goto failure;
6444 }
6445 }
6446 /* No qualifier. */
6447 break;
6448
6449 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
6450 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
6451 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
6452 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
6453 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
6454 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
6455 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
6456 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
6457 case AARCH64_OPND_SVE_ADDR_RI_U6:
6458 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
6459 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
6460 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
6461 /* [X<n>{, #imm, MUL VL}]
6462 [X<n>{, #imm}]
6463 but recognizing SVE registers. */
6464 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6465 &offset_qualifier));
6466 if (base_qualifier != AARCH64_OPND_QLF_X)
6467 {
6468 set_syntax_error (_("invalid addressing mode"));
6469 goto failure;
6470 }
6471 sve_regimm:
6472 if (info->addr.pcrel || info->addr.offset.is_reg
6473 || !info->addr.preind || info->addr.writeback)
6474 {
6475 set_syntax_error (_("invalid addressing mode"));
6476 goto failure;
6477 }
6478 if (inst.reloc.type != BFD_RELOC_UNUSED
6479 || inst.reloc.exp.X_op != O_constant)
6480 {
6481 /* Make sure this has priority over
6482 "invalid addressing mode". */
6483 set_fatal_syntax_error (_("constant offset required"));
6484 goto failure;
6485 }
6486 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6487 break;
6488
6489 case AARCH64_OPND_SVE_ADDR_R:
6490 /* [<Xn|SP>{, <R><m>}]
6491 but recognizing SVE registers. */
6492 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6493 &offset_qualifier));
6494 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
6495 {
6496 offset_qualifier = AARCH64_OPND_QLF_X;
6497 info->addr.offset.is_reg = 1;
6498 info->addr.offset.regno = 31;
6499 }
6500 else if (base_qualifier != AARCH64_OPND_QLF_X
6501 || offset_qualifier != AARCH64_OPND_QLF_X)
6502 {
6503 set_syntax_error (_("invalid addressing mode"));
6504 goto failure;
6505 }
6506 goto regoff_addr;
6507
6508 case AARCH64_OPND_SVE_ADDR_RR:
6509 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
6510 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
6511 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
6512 case AARCH64_OPND_SVE_ADDR_RX:
6513 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
6514 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
6515 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
6516 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
6517 but recognizing SVE registers. */
6518 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6519 &offset_qualifier));
6520 if (base_qualifier != AARCH64_OPND_QLF_X
6521 || offset_qualifier != AARCH64_OPND_QLF_X)
6522 {
6523 set_syntax_error (_("invalid addressing mode"));
6524 goto failure;
6525 }
6526 goto regoff_addr;
6527
6528 case AARCH64_OPND_SVE_ADDR_RZ:
6529 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
6530 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
6531 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
6532 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
6533 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
6534 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
6535 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
6536 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
6537 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
6538 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
6539 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
6540 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
6541 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
6542 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6543 &offset_qualifier));
6544 if (base_qualifier != AARCH64_OPND_QLF_X
6545 || (offset_qualifier != AARCH64_OPND_QLF_S_S
6546 && offset_qualifier != AARCH64_OPND_QLF_S_D))
6547 {
6548 set_syntax_error (_("invalid addressing mode"));
6549 goto failure;
6550 }
6551 info->qualifier = offset_qualifier;
6552 goto regoff_addr;
6553
6554 case AARCH64_OPND_SVE_ADDR_ZX:
6555 /* [Zn.<T>{, <Xm>}]. */
6556 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6557 &offset_qualifier));
6558 /* Things to check:
6559 base_qualifier either S_S or S_D
6560 offset_qualifier must be X
6561 */
6562 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6563 && base_qualifier != AARCH64_OPND_QLF_S_D)
6564 || offset_qualifier != AARCH64_OPND_QLF_X)
6565 {
6566 set_syntax_error (_("invalid addressing mode"));
6567 goto failure;
6568 }
6569 info->qualifier = base_qualifier;
6570 if (!info->addr.offset.is_reg || info->addr.pcrel
6571 || !info->addr.preind || info->addr.writeback
6572 || info->shifter.operator_present != 0)
6573 {
6574 set_syntax_error (_("invalid addressing mode"));
6575 goto failure;
6576 }
6577 info->shifter.kind = AARCH64_MOD_LSL;
6578 break;
6579
6580
6581 case AARCH64_OPND_SVE_ADDR_ZI_U5:
6582 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
6583 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
6584 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
6585 /* [Z<n>.<T>{, #imm}] */
6586 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6587 &offset_qualifier));
6588 if (base_qualifier != AARCH64_OPND_QLF_S_S
6589 && base_qualifier != AARCH64_OPND_QLF_S_D)
6590 {
6591 set_syntax_error (_("invalid addressing mode"));
6592 goto failure;
6593 }
6594 info->qualifier = base_qualifier;
6595 goto sve_regimm;
6596
6597 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
6598 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
6599 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
6600 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
6601 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
6602
6603 We don't reject:
6604
6605 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
6606
6607 here since we get better error messages by leaving it to
6608 the qualifier checking routines. */
6609 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6610 &offset_qualifier));
6611 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6612 && base_qualifier != AARCH64_OPND_QLF_S_D)
6613 || offset_qualifier != base_qualifier)
6614 {
6615 set_syntax_error (_("invalid addressing mode"));
6616 goto failure;
6617 }
6618 info->qualifier = base_qualifier;
6619 goto regoff_addr;
6620
6621 case AARCH64_OPND_SYSREG:
6622 {
6623 uint32_t sysreg_flags;
6624 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
6625 &sysreg_flags)) == PARSE_FAIL)
6626 {
6627 set_syntax_error (_("unknown or missing system register name"));
6628 goto failure;
6629 }
6630 inst.base.operands[i].sysreg.value = val;
6631 inst.base.operands[i].sysreg.flags = sysreg_flags;
6632 break;
6633 }
6634
6635 case AARCH64_OPND_PSTATEFIELD:
6636 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1, NULL))
6637 == PARSE_FAIL)
6638 {
6639 set_syntax_error (_("unknown or missing PSTATE field name"));
6640 goto failure;
6641 }
6642 inst.base.operands[i].pstatefield = val;
6643 break;
6644
6645 case AARCH64_OPND_SYSREG_IC:
6646 inst.base.operands[i].sysins_op =
6647 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
6648 goto sys_reg_ins;
6649
6650 case AARCH64_OPND_SYSREG_DC:
6651 inst.base.operands[i].sysins_op =
6652 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
6653 goto sys_reg_ins;
6654
6655 case AARCH64_OPND_SYSREG_AT:
6656 inst.base.operands[i].sysins_op =
6657 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
6658 goto sys_reg_ins;
6659
6660 case AARCH64_OPND_SYSREG_SR:
6661 inst.base.operands[i].sysins_op =
6662 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
6663 goto sys_reg_ins;
6664
6665 case AARCH64_OPND_SYSREG_TLBI:
6666 inst.base.operands[i].sysins_op =
6667 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
6668 sys_reg_ins:
6669 if (inst.base.operands[i].sysins_op == NULL)
6670 {
6671 set_fatal_syntax_error ( _("unknown or missing operation name"));
6672 goto failure;
6673 }
6674 break;
6675
6676 case AARCH64_OPND_BARRIER:
6677 case AARCH64_OPND_BARRIER_ISB:
6678 val = parse_barrier (&str);
6679 if (val != PARSE_FAIL
6680 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
6681 {
6682 /* ISB only accepts options name 'sy'. */
6683 set_syntax_error
6684 (_("the specified option is not accepted in ISB"));
6685 /* Turn off backtrack as this optional operand is present. */
6686 backtrack_pos = 0;
6687 goto failure;
6688 }
6689 /* This is an extension to accept a 0..15 immediate. */
6690 if (val == PARSE_FAIL)
6691 po_imm_or_fail (0, 15);
6692 info->barrier = aarch64_barrier_options + val;
6693 break;
6694
6695 case AARCH64_OPND_PRFOP:
6696 val = parse_pldop (&str);
6697 /* This is an extension to accept a 0..31 immediate. */
6698 if (val == PARSE_FAIL)
6699 po_imm_or_fail (0, 31);
6700 inst.base.operands[i].prfop = aarch64_prfops + val;
6701 break;
6702
6703 case AARCH64_OPND_BARRIER_PSB:
6704 val = parse_barrier_psb (&str, &(info->hint_option));
6705 if (val == PARSE_FAIL)
6706 goto failure;
6707 break;
6708
6709 case AARCH64_OPND_BTI_TARGET:
6710 val = parse_bti_operand (&str, &(info->hint_option));
6711 if (val == PARSE_FAIL)
6712 goto failure;
6713 break;
6714
6715 default:
6716 as_fatal (_("unhandled operand code %d"), operands[i]);
6717 }
6718
6719 /* If we get here, this operand was successfully parsed. */
6720 inst.base.operands[i].present = 1;
6721 continue;
6722
6723 failure:
6724 /* The parse routine should already have set the error, but in case
6725 not, set a default one here. */
6726 if (! error_p ())
6727 set_default_error ();
6728
6729 if (! backtrack_pos)
6730 goto parse_operands_return;
6731
6732 {
6733 /* We reach here because this operand is marked as optional, and
6734 either no operand was supplied or the operand was supplied but it
6735 was syntactically incorrect. In the latter case we report an
6736 error. In the former case we perform a few more checks before
6737 dropping through to the code to insert the default operand. */
6738
6739 char *tmp = backtrack_pos;
6740 char endchar = END_OF_INSN;
6741
6742 if (i != (aarch64_num_of_operands (opcode) - 1))
6743 endchar = ',';
6744 skip_past_char (&tmp, ',');
6745
6746 if (*tmp != endchar)
6747 /* The user has supplied an operand in the wrong format. */
6748 goto parse_operands_return;
6749
6750 /* Make sure there is not a comma before the optional operand.
6751 For example the fifth operand of 'sys' is optional:
6752
6753 sys #0,c0,c0,#0, <--- wrong
6754 sys #0,c0,c0,#0 <--- correct. */
6755 if (comma_skipped_p && i && endchar == END_OF_INSN)
6756 {
6757 set_fatal_syntax_error
6758 (_("unexpected comma before the omitted optional operand"));
6759 goto parse_operands_return;
6760 }
6761 }
6762
6763 /* Reaching here means we are dealing with an optional operand that is
6764 omitted from the assembly line. */
6765 gas_assert (optional_operand_p (opcode, i));
6766 info->present = 0;
6767 process_omitted_operand (operands[i], opcode, i, info);
6768
6769 /* Try again, skipping the optional operand at backtrack_pos. */
6770 str = backtrack_pos;
6771 backtrack_pos = 0;
6772
6773 /* Clear any error record after the omitted optional operand has been
6774 successfully handled. */
6775 clear_error ();
6776 }
6777
6778 /* Check if we have parsed all the operands. */
6779 if (*str != '\0' && ! error_p ())
6780 {
6781 /* Set I to the index of the last present operand; this is
6782 for the purpose of diagnostics. */
6783 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
6784 ;
6785 set_fatal_syntax_error
6786 (_("unexpected characters following instruction"));
6787 }
6788
6789 parse_operands_return:
6790
6791 if (error_p ())
6792 {
6793 DEBUG_TRACE ("parsing FAIL: %s - %s",
6794 operand_mismatch_kind_names[get_error_kind ()],
6795 get_error_message ());
6796 /* Record the operand error properly; this is useful when there
6797 are multiple instruction templates for a mnemonic name, so that
6798 later on, we can select the error that most closely describes
6799 the problem. */
6800 record_operand_error (opcode, i, get_error_kind (),
6801 get_error_message ());
6802 return FALSE;
6803 }
6804 else
6805 {
6806 DEBUG_TRACE ("parsing SUCCESS");
6807 return TRUE;
6808 }
6809 }
6810
6811 /* It does some fix-up to provide some programmer friendly feature while
6812 keeping the libopcodes happy, i.e. libopcodes only accepts
6813 the preferred architectural syntax.
6814 Return FALSE if there is any failure; otherwise return TRUE. */
6815
6816 static bfd_boolean
6817 programmer_friendly_fixup (aarch64_instruction *instr)
6818 {
6819 aarch64_inst *base = &instr->base;
6820 const aarch64_opcode *opcode = base->opcode;
6821 enum aarch64_op op = opcode->op;
6822 aarch64_opnd_info *operands = base->operands;
6823
6824 DEBUG_TRACE ("enter");
6825
6826 switch (opcode->iclass)
6827 {
6828 case testbranch:
6829 /* TBNZ Xn|Wn, #uimm6, label
6830 Test and Branch Not Zero: conditionally jumps to label if bit number
6831 uimm6 in register Xn is not zero. The bit number implies the width of
6832 the register, which may be written and should be disassembled as Wn if
6833 uimm is less than 32. */
6834 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
6835 {
6836 if (operands[1].imm.value >= 32)
6837 {
6838 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
6839 0, 31);
6840 return FALSE;
6841 }
6842 operands[0].qualifier = AARCH64_OPND_QLF_X;
6843 }
6844 break;
6845 case loadlit:
6846 /* LDR Wt, label | =value
6847 As a convenience assemblers will typically permit the notation
6848 "=value" in conjunction with the pc-relative literal load instructions
6849 to automatically place an immediate value or symbolic address in a
6850 nearby literal pool and generate a hidden label which references it.
6851 ISREG has been set to 0 in the case of =value. */
6852 if (instr->gen_lit_pool
6853 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
6854 {
6855 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
6856 if (op == OP_LDRSW_LIT)
6857 size = 4;
6858 if (instr->reloc.exp.X_op != O_constant
6859 && instr->reloc.exp.X_op != O_big
6860 && instr->reloc.exp.X_op != O_symbol)
6861 {
6862 record_operand_error (opcode, 1,
6863 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
6864 _("constant expression expected"));
6865 return FALSE;
6866 }
6867 if (! add_to_lit_pool (&instr->reloc.exp, size))
6868 {
6869 record_operand_error (opcode, 1,
6870 AARCH64_OPDE_OTHER_ERROR,
6871 _("literal pool insertion failed"));
6872 return FALSE;
6873 }
6874 }
6875 break;
6876 case log_shift:
6877 case bitfield:
6878 /* UXT[BHW] Wd, Wn
6879 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
6880 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
6881 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
6882 A programmer-friendly assembler should accept a destination Xd in
6883 place of Wd, however that is not the preferred form for disassembly.
6884 */
6885 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
6886 && operands[1].qualifier == AARCH64_OPND_QLF_W
6887 && operands[0].qualifier == AARCH64_OPND_QLF_X)
6888 operands[0].qualifier = AARCH64_OPND_QLF_W;
6889 break;
6890
6891 case addsub_ext:
6892 {
6893 /* In the 64-bit form, the final register operand is written as Wm
6894 for all but the (possibly omitted) UXTX/LSL and SXTX
6895 operators.
6896 As a programmer-friendly assembler, we accept e.g.
6897 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
6898 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
6899 int idx = aarch64_operand_index (opcode->operands,
6900 AARCH64_OPND_Rm_EXT);
6901 gas_assert (idx == 1 || idx == 2);
6902 if (operands[0].qualifier == AARCH64_OPND_QLF_X
6903 && operands[idx].qualifier == AARCH64_OPND_QLF_X
6904 && operands[idx].shifter.kind != AARCH64_MOD_LSL
6905 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
6906 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
6907 operands[idx].qualifier = AARCH64_OPND_QLF_W;
6908 }
6909 break;
6910
6911 default:
6912 break;
6913 }
6914
6915 DEBUG_TRACE ("exit with SUCCESS");
6916 return TRUE;
6917 }
6918
6919 /* Check for loads and stores that will cause unpredictable behavior. */
6920
6921 static void
6922 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
6923 {
6924 aarch64_inst *base = &instr->base;
6925 const aarch64_opcode *opcode = base->opcode;
6926 const aarch64_opnd_info *opnds = base->operands;
6927 switch (opcode->iclass)
6928 {
6929 case ldst_pos:
6930 case ldst_imm9:
6931 case ldst_imm10:
6932 case ldst_unscaled:
6933 case ldst_unpriv:
6934 /* Loading/storing the base register is unpredictable if writeback. */
6935 if ((aarch64_get_operand_class (opnds[0].type)
6936 == AARCH64_OPND_CLASS_INT_REG)
6937 && opnds[0].reg.regno == opnds[1].addr.base_regno
6938 && opnds[1].addr.base_regno != REG_SP
6939 /* Exempt STG/STZG/ST2G/STZ2G. */
6940 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
6941 && opnds[1].addr.writeback)
6942 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6943 break;
6944
6945 case ldstpair_off:
6946 case ldstnapair_offs:
6947 case ldstpair_indexed:
6948 /* Loading/storing the base register is unpredictable if writeback. */
6949 if ((aarch64_get_operand_class (opnds[0].type)
6950 == AARCH64_OPND_CLASS_INT_REG)
6951 && (opnds[0].reg.regno == opnds[2].addr.base_regno
6952 || opnds[1].reg.regno == opnds[2].addr.base_regno)
6953 && opnds[2].addr.base_regno != REG_SP
6954 /* Exempt STGP. */
6955 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
6956 && opnds[2].addr.writeback)
6957 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6958 /* Load operations must load different registers. */
6959 if ((opcode->opcode & (1 << 22))
6960 && opnds[0].reg.regno == opnds[1].reg.regno)
6961 as_warn (_("unpredictable load of register pair -- `%s'"), str);
6962 break;
6963
6964 case ldstexcl:
6965 /* It is unpredictable if the destination and status registers are the
6966 same. */
6967 if ((aarch64_get_operand_class (opnds[0].type)
6968 == AARCH64_OPND_CLASS_INT_REG)
6969 && (aarch64_get_operand_class (opnds[1].type)
6970 == AARCH64_OPND_CLASS_INT_REG)
6971 && (opnds[0].reg.regno == opnds[1].reg.regno
6972 || opnds[0].reg.regno == opnds[2].reg.regno))
6973 as_warn (_("unpredictable: identical transfer and status registers"
6974 " --`%s'"),
6975 str);
6976
6977 break;
6978
6979 default:
6980 break;
6981 }
6982 }
6983
6984 static void
6985 force_automatic_sequence_close (void)
6986 {
6987 if (now_instr_sequence.instr)
6988 {
6989 as_warn (_("previous `%s' sequence has not been closed"),
6990 now_instr_sequence.instr->opcode->name);
6991 init_insn_sequence (NULL, &now_instr_sequence);
6992 }
6993 }
6994
6995 /* A wrapper function to interface with libopcodes on encoding and
6996 record the error message if there is any.
6997
6998 Return TRUE on success; otherwise return FALSE. */
6999
7000 static bfd_boolean
7001 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
7002 aarch64_insn *code)
7003 {
7004 aarch64_operand_error error_info;
7005 memset (&error_info, '\0', sizeof (error_info));
7006 error_info.kind = AARCH64_OPDE_NIL;
7007 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
7008 && !error_info.non_fatal)
7009 return TRUE;
7010
7011 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
7012 record_operand_error_info (opcode, &error_info);
7013 return error_info.non_fatal;
7014 }
7015
7016 #ifdef DEBUG_AARCH64
7017 static inline void
7018 dump_opcode_operands (const aarch64_opcode *opcode)
7019 {
7020 int i = 0;
7021 while (opcode->operands[i] != AARCH64_OPND_NIL)
7022 {
7023 aarch64_verbose ("\t\t opnd%d: %s", i,
7024 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
7025 ? aarch64_get_operand_name (opcode->operands[i])
7026 : aarch64_get_operand_desc (opcode->operands[i]));
7027 ++i;
7028 }
7029 }
7030 #endif /* DEBUG_AARCH64 */
7031
7032 /* This is the guts of the machine-dependent assembler. STR points to a
7033 machine dependent instruction. This function is supposed to emit
7034 the frags/bytes it assembles to. */
7035
7036 void
7037 md_assemble (char *str)
7038 {
7039 char *p = str;
7040 templates *template;
7041 aarch64_opcode *opcode;
7042 aarch64_inst *inst_base;
7043 unsigned saved_cond;
7044
7045 /* Align the previous label if needed. */
7046 if (last_label_seen != NULL)
7047 {
7048 symbol_set_frag (last_label_seen, frag_now);
7049 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
7050 S_SET_SEGMENT (last_label_seen, now_seg);
7051 }
7052
7053 /* Update the current insn_sequence from the segment. */
7054 insn_sequence = &seg_info (now_seg)->tc_segment_info_data.insn_sequence;
7055
7056 inst.reloc.type = BFD_RELOC_UNUSED;
7057
7058 DEBUG_TRACE ("\n\n");
7059 DEBUG_TRACE ("==============================");
7060 DEBUG_TRACE ("Enter md_assemble with %s", str);
7061
7062 template = opcode_lookup (&p);
7063 if (!template)
7064 {
7065 /* It wasn't an instruction, but it might be a register alias of
7066 the form alias .req reg directive. */
7067 if (!create_register_alias (str, p))
7068 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
7069 str);
7070 return;
7071 }
7072
7073 skip_whitespace (p);
7074 if (*p == ',')
7075 {
7076 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
7077 get_mnemonic_name (str), str);
7078 return;
7079 }
7080
7081 init_operand_error_report ();
7082
7083 /* Sections are assumed to start aligned. In executable section, there is no
7084 MAP_DATA symbol pending. So we only align the address during
7085 MAP_DATA --> MAP_INSN transition.
7086 For other sections, this is not guaranteed. */
7087 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
7088 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
7089 frag_align_code (2, 0);
7090
7091 saved_cond = inst.cond;
7092 reset_aarch64_instruction (&inst);
7093 inst.cond = saved_cond;
7094
7095 /* Iterate through all opcode entries with the same mnemonic name. */
7096 do
7097 {
7098 opcode = template->opcode;
7099
7100 DEBUG_TRACE ("opcode %s found", opcode->name);
7101 #ifdef DEBUG_AARCH64
7102 if (debug_dump)
7103 dump_opcode_operands (opcode);
7104 #endif /* DEBUG_AARCH64 */
7105
7106 mapping_state (MAP_INSN);
7107
7108 inst_base = &inst.base;
7109 inst_base->opcode = opcode;
7110
7111 /* Truly conditionally executed instructions, e.g. b.cond. */
7112 if (opcode->flags & F_COND)
7113 {
7114 gas_assert (inst.cond != COND_ALWAYS);
7115 inst_base->cond = get_cond_from_value (inst.cond);
7116 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
7117 }
7118 else if (inst.cond != COND_ALWAYS)
7119 {
7120 /* It shouldn't arrive here, where the assembly looks like a
7121 conditional instruction but the found opcode is unconditional. */
7122 gas_assert (0);
7123 continue;
7124 }
7125
7126 if (parse_operands (p, opcode)
7127 && programmer_friendly_fixup (&inst)
7128 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
7129 {
7130 /* Check that this instruction is supported for this CPU. */
7131 if (!opcode->avariant
7132 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
7133 {
7134 as_bad (_("selected processor does not support `%s'"), str);
7135 return;
7136 }
7137
7138 warn_unpredictable_ldst (&inst, str);
7139
7140 if (inst.reloc.type == BFD_RELOC_UNUSED
7141 || !inst.reloc.need_libopcodes_p)
7142 output_inst (NULL);
7143 else
7144 {
7145 /* If there is relocation generated for the instruction,
7146 store the instruction information for the future fix-up. */
7147 struct aarch64_inst *copy;
7148 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
7149 copy = XNEW (struct aarch64_inst);
7150 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
7151 output_inst (copy);
7152 }
7153
7154 /* Issue non-fatal messages if any. */
7155 output_operand_error_report (str, TRUE);
7156 return;
7157 }
7158
7159 template = template->next;
7160 if (template != NULL)
7161 {
7162 reset_aarch64_instruction (&inst);
7163 inst.cond = saved_cond;
7164 }
7165 }
7166 while (template != NULL);
7167
7168 /* Issue the error messages if any. */
7169 output_operand_error_report (str, FALSE);
7170 }
7171
7172 /* Various frobbings of labels and their addresses. */
7173
7174 void
7175 aarch64_start_line_hook (void)
7176 {
7177 last_label_seen = NULL;
7178 }
7179
7180 void
7181 aarch64_frob_label (symbolS * sym)
7182 {
7183 last_label_seen = sym;
7184
7185 dwarf2_emit_label (sym);
7186 }
7187
7188 void
7189 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
7190 {
7191 /* Check to see if we have a block to close. */
7192 force_automatic_sequence_close ();
7193 }
7194
7195 int
7196 aarch64_data_in_code (void)
7197 {
7198 if (!strncmp (input_line_pointer + 1, "data:", 5))
7199 {
7200 *input_line_pointer = '/';
7201 input_line_pointer += 5;
7202 *input_line_pointer = 0;
7203 return 1;
7204 }
7205
7206 return 0;
7207 }
7208
7209 char *
7210 aarch64_canonicalize_symbol_name (char *name)
7211 {
7212 int len;
7213
7214 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
7215 *(name + len - 5) = 0;
7216
7217 return name;
7218 }
7219 \f
7220 /* Table of all register names defined by default. The user can
7221 define additional names with .req. Note that all register names
7222 should appear in both upper and lowercase variants. Some registers
7223 also have mixed-case names. */
7224
7225 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
7226 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, FALSE}
7227 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
7228 #define REGSET16(p,t) \
7229 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
7230 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
7231 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
7232 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
7233 #define REGSET31(p,t) \
7234 REGSET16(p, t), \
7235 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
7236 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
7237 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
7238 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
7239 #define REGSET(p,t) \
7240 REGSET31(p,t), REGNUM(p,31,t)
7241
7242 /* These go into aarch64_reg_hsh hash-table. */
7243 static const reg_entry reg_names[] = {
7244 /* Integer registers. */
7245 REGSET31 (x, R_64), REGSET31 (X, R_64),
7246 REGSET31 (w, R_32), REGSET31 (W, R_32),
7247
7248 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
7249 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
7250 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
7251 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
7252 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
7253 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
7254
7255 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
7256 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
7257
7258 /* Floating-point single precision registers. */
7259 REGSET (s, FP_S), REGSET (S, FP_S),
7260
7261 /* Floating-point double precision registers. */
7262 REGSET (d, FP_D), REGSET (D, FP_D),
7263
7264 /* Floating-point half precision registers. */
7265 REGSET (h, FP_H), REGSET (H, FP_H),
7266
7267 /* Floating-point byte precision registers. */
7268 REGSET (b, FP_B), REGSET (B, FP_B),
7269
7270 /* Floating-point quad precision registers. */
7271 REGSET (q, FP_Q), REGSET (Q, FP_Q),
7272
7273 /* FP/SIMD registers. */
7274 REGSET (v, VN), REGSET (V, VN),
7275
7276 /* SVE vector registers. */
7277 REGSET (z, ZN), REGSET (Z, ZN),
7278
7279 /* SVE predicate registers. */
7280 REGSET16 (p, PN), REGSET16 (P, PN)
7281 };
7282
7283 #undef REGDEF
7284 #undef REGDEF_ALIAS
7285 #undef REGNUM
7286 #undef REGSET16
7287 #undef REGSET31
7288 #undef REGSET
7289
7290 #define N 1
7291 #define n 0
7292 #define Z 1
7293 #define z 0
7294 #define C 1
7295 #define c 0
7296 #define V 1
7297 #define v 0
7298 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
7299 static const asm_nzcv nzcv_names[] = {
7300 {"nzcv", B (n, z, c, v)},
7301 {"nzcV", B (n, z, c, V)},
7302 {"nzCv", B (n, z, C, v)},
7303 {"nzCV", B (n, z, C, V)},
7304 {"nZcv", B (n, Z, c, v)},
7305 {"nZcV", B (n, Z, c, V)},
7306 {"nZCv", B (n, Z, C, v)},
7307 {"nZCV", B (n, Z, C, V)},
7308 {"Nzcv", B (N, z, c, v)},
7309 {"NzcV", B (N, z, c, V)},
7310 {"NzCv", B (N, z, C, v)},
7311 {"NzCV", B (N, z, C, V)},
7312 {"NZcv", B (N, Z, c, v)},
7313 {"NZcV", B (N, Z, c, V)},
7314 {"NZCv", B (N, Z, C, v)},
7315 {"NZCV", B (N, Z, C, V)}
7316 };
7317
7318 #undef N
7319 #undef n
7320 #undef Z
7321 #undef z
7322 #undef C
7323 #undef c
7324 #undef V
7325 #undef v
7326 #undef B
7327 \f
7328 /* MD interface: bits in the object file. */
7329
7330 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
7331 for use in the a.out file, and stores them in the array pointed to by buf.
7332 This knows about the endian-ness of the target machine and does
7333 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
7334 2 (short) and 4 (long) Floating numbers are put out as a series of
7335 LITTLENUMS (shorts, here at least). */
7336
7337 void
7338 md_number_to_chars (char *buf, valueT val, int n)
7339 {
7340 if (target_big_endian)
7341 number_to_chars_bigendian (buf, val, n);
7342 else
7343 number_to_chars_littleendian (buf, val, n);
7344 }
7345
7346 /* MD interface: Sections. */
7347
7348 /* Estimate the size of a frag before relaxing. Assume everything fits in
7349 4 bytes. */
7350
7351 int
7352 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
7353 {
7354 fragp->fr_var = 4;
7355 return 4;
7356 }
7357
7358 /* Round up a section size to the appropriate boundary. */
7359
7360 valueT
7361 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
7362 {
7363 return size;
7364 }
7365
7366 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
7367 of an rs_align_code fragment.
7368
7369 Here we fill the frag with the appropriate info for padding the
7370 output stream. The resulting frag will consist of a fixed (fr_fix)
7371 and of a repeating (fr_var) part.
7372
7373 The fixed content is always emitted before the repeating content and
7374 these two parts are used as follows in constructing the output:
7375 - the fixed part will be used to align to a valid instruction word
7376 boundary, in case that we start at a misaligned address; as no
7377 executable instruction can live at the misaligned location, we
7378 simply fill with zeros;
7379 - the variable part will be used to cover the remaining padding and
7380 we fill using the AArch64 NOP instruction.
7381
7382 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
7383 enough storage space for up to 3 bytes for padding the back to a valid
7384 instruction alignment and exactly 4 bytes to store the NOP pattern. */
7385
7386 void
7387 aarch64_handle_align (fragS * fragP)
7388 {
7389 /* NOP = d503201f */
7390 /* AArch64 instructions are always little-endian. */
7391 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
7392
7393 int bytes, fix, noop_size;
7394 char *p;
7395
7396 if (fragP->fr_type != rs_align_code)
7397 return;
7398
7399 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
7400 p = fragP->fr_literal + fragP->fr_fix;
7401
7402 #ifdef OBJ_ELF
7403 gas_assert (fragP->tc_frag_data.recorded);
7404 #endif
7405
7406 noop_size = sizeof (aarch64_noop);
7407
7408 fix = bytes & (noop_size - 1);
7409 if (fix)
7410 {
7411 #ifdef OBJ_ELF
7412 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
7413 #endif
7414 memset (p, 0, fix);
7415 p += fix;
7416 fragP->fr_fix += fix;
7417 }
7418
7419 if (noop_size)
7420 memcpy (p, aarch64_noop, noop_size);
7421 fragP->fr_var = noop_size;
7422 }
7423
7424 /* Perform target specific initialisation of a frag.
7425 Note - despite the name this initialisation is not done when the frag
7426 is created, but only when its type is assigned. A frag can be created
7427 and used a long time before its type is set, so beware of assuming that
7428 this initialisation is performed first. */
7429
7430 #ifndef OBJ_ELF
7431 void
7432 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
7433 int max_chars ATTRIBUTE_UNUSED)
7434 {
7435 }
7436
7437 #else /* OBJ_ELF is defined. */
7438 void
7439 aarch64_init_frag (fragS * fragP, int max_chars)
7440 {
7441 /* Record a mapping symbol for alignment frags. We will delete this
7442 later if the alignment ends up empty. */
7443 if (!fragP->tc_frag_data.recorded)
7444 fragP->tc_frag_data.recorded = 1;
7445
7446 /* PR 21809: Do not set a mapping state for debug sections
7447 - it just confuses other tools. */
7448 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
7449 return;
7450
7451 switch (fragP->fr_type)
7452 {
7453 case rs_align_test:
7454 case rs_fill:
7455 mapping_state_2 (MAP_DATA, max_chars);
7456 break;
7457 case rs_align:
7458 /* PR 20364: We can get alignment frags in code sections,
7459 so do not just assume that we should use the MAP_DATA state. */
7460 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
7461 break;
7462 case rs_align_code:
7463 mapping_state_2 (MAP_INSN, max_chars);
7464 break;
7465 default:
7466 break;
7467 }
7468 }
7469 \f
7470 /* Initialize the DWARF-2 unwind information for this procedure. */
7471
7472 void
7473 tc_aarch64_frame_initial_instructions (void)
7474 {
7475 cfi_add_CFA_def_cfa (REG_SP, 0);
7476 }
7477 #endif /* OBJ_ELF */
7478
7479 /* Convert REGNAME to a DWARF-2 register number. */
7480
7481 int
7482 tc_aarch64_regname_to_dw2regnum (char *regname)
7483 {
7484 const reg_entry *reg = parse_reg (&regname);
7485 if (reg == NULL)
7486 return -1;
7487
7488 switch (reg->type)
7489 {
7490 case REG_TYPE_SP_32:
7491 case REG_TYPE_SP_64:
7492 case REG_TYPE_R_32:
7493 case REG_TYPE_R_64:
7494 return reg->number;
7495
7496 case REG_TYPE_FP_B:
7497 case REG_TYPE_FP_H:
7498 case REG_TYPE_FP_S:
7499 case REG_TYPE_FP_D:
7500 case REG_TYPE_FP_Q:
7501 return reg->number + 64;
7502
7503 default:
7504 break;
7505 }
7506 return -1;
7507 }
7508
7509 /* Implement DWARF2_ADDR_SIZE. */
7510
7511 int
7512 aarch64_dwarf2_addr_size (void)
7513 {
7514 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7515 if (ilp32_p)
7516 return 4;
7517 #endif
7518 return bfd_arch_bits_per_address (stdoutput) / 8;
7519 }
7520
7521 /* MD interface: Symbol and relocation handling. */
7522
7523 /* Return the address within the segment that a PC-relative fixup is
7524 relative to. For AArch64 PC-relative fixups applied to instructions
7525 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
7526
7527 long
7528 md_pcrel_from_section (fixS * fixP, segT seg)
7529 {
7530 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
7531
7532 /* If this is pc-relative and we are going to emit a relocation
7533 then we just want to put out any pipeline compensation that the linker
7534 will need. Otherwise we want to use the calculated base. */
7535 if (fixP->fx_pcrel
7536 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
7537 || aarch64_force_relocation (fixP)))
7538 base = 0;
7539
7540 /* AArch64 should be consistent for all pc-relative relocations. */
7541 return base + AARCH64_PCREL_OFFSET;
7542 }
7543
7544 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
7545 Otherwise we have no need to default values of symbols. */
7546
7547 symbolS *
7548 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
7549 {
7550 #ifdef OBJ_ELF
7551 if (name[0] == '_' && name[1] == 'G'
7552 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
7553 {
7554 if (!GOT_symbol)
7555 {
7556 if (symbol_find (name))
7557 as_bad (_("GOT already in the symbol table"));
7558
7559 GOT_symbol = symbol_new (name, undefined_section,
7560 &zero_address_frag, 0);
7561 }
7562
7563 return GOT_symbol;
7564 }
7565 #endif
7566
7567 return 0;
7568 }
7569
7570 /* Return non-zero if the indicated VALUE has overflowed the maximum
7571 range expressible by a unsigned number with the indicated number of
7572 BITS. */
7573
7574 static bfd_boolean
7575 unsigned_overflow (valueT value, unsigned bits)
7576 {
7577 valueT lim;
7578 if (bits >= sizeof (valueT) * 8)
7579 return FALSE;
7580 lim = (valueT) 1 << bits;
7581 return (value >= lim);
7582 }
7583
7584
7585 /* Return non-zero if the indicated VALUE has overflowed the maximum
7586 range expressible by an signed number with the indicated number of
7587 BITS. */
7588
7589 static bfd_boolean
7590 signed_overflow (offsetT value, unsigned bits)
7591 {
7592 offsetT lim;
7593 if (bits >= sizeof (offsetT) * 8)
7594 return FALSE;
7595 lim = (offsetT) 1 << (bits - 1);
7596 return (value < -lim || value >= lim);
7597 }
7598
7599 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
7600 unsigned immediate offset load/store instruction, try to encode it as
7601 an unscaled, 9-bit, signed immediate offset load/store instruction.
7602 Return TRUE if it is successful; otherwise return FALSE.
7603
7604 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
7605 in response to the standard LDR/STR mnemonics when the immediate offset is
7606 unambiguous, i.e. when it is negative or unaligned. */
7607
7608 static bfd_boolean
7609 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
7610 {
7611 int idx;
7612 enum aarch64_op new_op;
7613 const aarch64_opcode *new_opcode;
7614
7615 gas_assert (instr->opcode->iclass == ldst_pos);
7616
7617 switch (instr->opcode->op)
7618 {
7619 case OP_LDRB_POS:new_op = OP_LDURB; break;
7620 case OP_STRB_POS: new_op = OP_STURB; break;
7621 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
7622 case OP_LDRH_POS: new_op = OP_LDURH; break;
7623 case OP_STRH_POS: new_op = OP_STURH; break;
7624 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
7625 case OP_LDR_POS: new_op = OP_LDUR; break;
7626 case OP_STR_POS: new_op = OP_STUR; break;
7627 case OP_LDRF_POS: new_op = OP_LDURV; break;
7628 case OP_STRF_POS: new_op = OP_STURV; break;
7629 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
7630 case OP_PRFM_POS: new_op = OP_PRFUM; break;
7631 default: new_op = OP_NIL; break;
7632 }
7633
7634 if (new_op == OP_NIL)
7635 return FALSE;
7636
7637 new_opcode = aarch64_get_opcode (new_op);
7638 gas_assert (new_opcode != NULL);
7639
7640 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
7641 instr->opcode->op, new_opcode->op);
7642
7643 aarch64_replace_opcode (instr, new_opcode);
7644
7645 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
7646 qualifier matching may fail because the out-of-date qualifier will
7647 prevent the operand being updated with a new and correct qualifier. */
7648 idx = aarch64_operand_index (instr->opcode->operands,
7649 AARCH64_OPND_ADDR_SIMM9);
7650 gas_assert (idx == 1);
7651 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
7652
7653 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
7654
7655 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
7656 insn_sequence))
7657 return FALSE;
7658
7659 return TRUE;
7660 }
7661
7662 /* Called by fix_insn to fix a MOV immediate alias instruction.
7663
7664 Operand for a generic move immediate instruction, which is an alias
7665 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
7666 a 32-bit/64-bit immediate value into general register. An assembler error
7667 shall result if the immediate cannot be created by a single one of these
7668 instructions. If there is a choice, then to ensure reversability an
7669 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
7670
7671 static void
7672 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
7673 {
7674 const aarch64_opcode *opcode;
7675
7676 /* Need to check if the destination is SP/ZR. The check has to be done
7677 before any aarch64_replace_opcode. */
7678 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
7679 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
7680
7681 instr->operands[1].imm.value = value;
7682 instr->operands[1].skip = 0;
7683
7684 if (try_mov_wide_p)
7685 {
7686 /* Try the MOVZ alias. */
7687 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
7688 aarch64_replace_opcode (instr, opcode);
7689 if (aarch64_opcode_encode (instr->opcode, instr,
7690 &instr->value, NULL, NULL, insn_sequence))
7691 {
7692 put_aarch64_insn (buf, instr->value);
7693 return;
7694 }
7695 /* Try the MOVK alias. */
7696 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
7697 aarch64_replace_opcode (instr, opcode);
7698 if (aarch64_opcode_encode (instr->opcode, instr,
7699 &instr->value, NULL, NULL, insn_sequence))
7700 {
7701 put_aarch64_insn (buf, instr->value);
7702 return;
7703 }
7704 }
7705
7706 if (try_mov_bitmask_p)
7707 {
7708 /* Try the ORR alias. */
7709 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
7710 aarch64_replace_opcode (instr, opcode);
7711 if (aarch64_opcode_encode (instr->opcode, instr,
7712 &instr->value, NULL, NULL, insn_sequence))
7713 {
7714 put_aarch64_insn (buf, instr->value);
7715 return;
7716 }
7717 }
7718
7719 as_bad_where (fixP->fx_file, fixP->fx_line,
7720 _("immediate cannot be moved by a single instruction"));
7721 }
7722
7723 /* An instruction operand which is immediate related may have symbol used
7724 in the assembly, e.g.
7725
7726 mov w0, u32
7727 .set u32, 0x00ffff00
7728
7729 At the time when the assembly instruction is parsed, a referenced symbol,
7730 like 'u32' in the above example may not have been seen; a fixS is created
7731 in such a case and is handled here after symbols have been resolved.
7732 Instruction is fixed up with VALUE using the information in *FIXP plus
7733 extra information in FLAGS.
7734
7735 This function is called by md_apply_fix to fix up instructions that need
7736 a fix-up described above but does not involve any linker-time relocation. */
7737
7738 static void
7739 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
7740 {
7741 int idx;
7742 uint32_t insn;
7743 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7744 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
7745 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
7746
7747 if (new_inst)
7748 {
7749 /* Now the instruction is about to be fixed-up, so the operand that
7750 was previously marked as 'ignored' needs to be unmarked in order
7751 to get the encoding done properly. */
7752 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7753 new_inst->operands[idx].skip = 0;
7754 }
7755
7756 gas_assert (opnd != AARCH64_OPND_NIL);
7757
7758 switch (opnd)
7759 {
7760 case AARCH64_OPND_EXCEPTION:
7761 case AARCH64_OPND_UNDEFINED:
7762 if (unsigned_overflow (value, 16))
7763 as_bad_where (fixP->fx_file, fixP->fx_line,
7764 _("immediate out of range"));
7765 insn = get_aarch64_insn (buf);
7766 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
7767 put_aarch64_insn (buf, insn);
7768 break;
7769
7770 case AARCH64_OPND_AIMM:
7771 /* ADD or SUB with immediate.
7772 NOTE this assumes we come here with a add/sub shifted reg encoding
7773 3 322|2222|2 2 2 21111 111111
7774 1 098|7654|3 2 1 09876 543210 98765 43210
7775 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
7776 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
7777 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
7778 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
7779 ->
7780 3 322|2222|2 2 221111111111
7781 1 098|7654|3 2 109876543210 98765 43210
7782 11000000 sf 001|0001|shift imm12 Rn Rd ADD
7783 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
7784 51000000 sf 101|0001|shift imm12 Rn Rd SUB
7785 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
7786 Fields sf Rn Rd are already set. */
7787 insn = get_aarch64_insn (buf);
7788 if (value < 0)
7789 {
7790 /* Add <-> sub. */
7791 insn = reencode_addsub_switch_add_sub (insn);
7792 value = -value;
7793 }
7794
7795 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
7796 && unsigned_overflow (value, 12))
7797 {
7798 /* Try to shift the value by 12 to make it fit. */
7799 if (((value >> 12) << 12) == value
7800 && ! unsigned_overflow (value, 12 + 12))
7801 {
7802 value >>= 12;
7803 insn |= encode_addsub_imm_shift_amount (1);
7804 }
7805 }
7806
7807 if (unsigned_overflow (value, 12))
7808 as_bad_where (fixP->fx_file, fixP->fx_line,
7809 _("immediate out of range"));
7810
7811 insn |= encode_addsub_imm (value);
7812
7813 put_aarch64_insn (buf, insn);
7814 break;
7815
7816 case AARCH64_OPND_SIMD_IMM:
7817 case AARCH64_OPND_SIMD_IMM_SFT:
7818 case AARCH64_OPND_LIMM:
7819 /* Bit mask immediate. */
7820 gas_assert (new_inst != NULL);
7821 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7822 new_inst->operands[idx].imm.value = value;
7823 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7824 &new_inst->value, NULL, NULL, insn_sequence))
7825 put_aarch64_insn (buf, new_inst->value);
7826 else
7827 as_bad_where (fixP->fx_file, fixP->fx_line,
7828 _("invalid immediate"));
7829 break;
7830
7831 case AARCH64_OPND_HALF:
7832 /* 16-bit unsigned immediate. */
7833 if (unsigned_overflow (value, 16))
7834 as_bad_where (fixP->fx_file, fixP->fx_line,
7835 _("immediate out of range"));
7836 insn = get_aarch64_insn (buf);
7837 insn |= encode_movw_imm (value & 0xffff);
7838 put_aarch64_insn (buf, insn);
7839 break;
7840
7841 case AARCH64_OPND_IMM_MOV:
7842 /* Operand for a generic move immediate instruction, which is
7843 an alias instruction that generates a single MOVZ, MOVN or ORR
7844 instruction to loads a 32-bit/64-bit immediate value into general
7845 register. An assembler error shall result if the immediate cannot be
7846 created by a single one of these instructions. If there is a choice,
7847 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
7848 and MOVZ or MOVN to ORR. */
7849 gas_assert (new_inst != NULL);
7850 fix_mov_imm_insn (fixP, buf, new_inst, value);
7851 break;
7852
7853 case AARCH64_OPND_ADDR_SIMM7:
7854 case AARCH64_OPND_ADDR_SIMM9:
7855 case AARCH64_OPND_ADDR_SIMM9_2:
7856 case AARCH64_OPND_ADDR_SIMM10:
7857 case AARCH64_OPND_ADDR_UIMM12:
7858 case AARCH64_OPND_ADDR_SIMM11:
7859 case AARCH64_OPND_ADDR_SIMM13:
7860 /* Immediate offset in an address. */
7861 insn = get_aarch64_insn (buf);
7862
7863 gas_assert (new_inst != NULL && new_inst->value == insn);
7864 gas_assert (new_inst->opcode->operands[1] == opnd
7865 || new_inst->opcode->operands[2] == opnd);
7866
7867 /* Get the index of the address operand. */
7868 if (new_inst->opcode->operands[1] == opnd)
7869 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
7870 idx = 1;
7871 else
7872 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
7873 idx = 2;
7874
7875 /* Update the resolved offset value. */
7876 new_inst->operands[idx].addr.offset.imm = value;
7877
7878 /* Encode/fix-up. */
7879 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7880 &new_inst->value, NULL, NULL, insn_sequence))
7881 {
7882 put_aarch64_insn (buf, new_inst->value);
7883 break;
7884 }
7885 else if (new_inst->opcode->iclass == ldst_pos
7886 && try_to_encode_as_unscaled_ldst (new_inst))
7887 {
7888 put_aarch64_insn (buf, new_inst->value);
7889 break;
7890 }
7891
7892 as_bad_where (fixP->fx_file, fixP->fx_line,
7893 _("immediate offset out of range"));
7894 break;
7895
7896 default:
7897 gas_assert (0);
7898 as_fatal (_("unhandled operand code %d"), opnd);
7899 }
7900 }
7901
7902 /* Apply a fixup (fixP) to segment data, once it has been determined
7903 by our caller that we have all the info we need to fix it up.
7904
7905 Parameter valP is the pointer to the value of the bits. */
7906
7907 void
7908 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
7909 {
7910 offsetT value = *valP;
7911 uint32_t insn;
7912 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7913 int scale;
7914 unsigned flags = fixP->fx_addnumber;
7915
7916 DEBUG_TRACE ("\n\n");
7917 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
7918 DEBUG_TRACE ("Enter md_apply_fix");
7919
7920 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
7921
7922 /* Note whether this will delete the relocation. */
7923
7924 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
7925 fixP->fx_done = 1;
7926
7927 /* Process the relocations. */
7928 switch (fixP->fx_r_type)
7929 {
7930 case BFD_RELOC_NONE:
7931 /* This will need to go in the object file. */
7932 fixP->fx_done = 0;
7933 break;
7934
7935 case BFD_RELOC_8:
7936 case BFD_RELOC_8_PCREL:
7937 if (fixP->fx_done || !seg->use_rela_p)
7938 md_number_to_chars (buf, value, 1);
7939 break;
7940
7941 case BFD_RELOC_16:
7942 case BFD_RELOC_16_PCREL:
7943 if (fixP->fx_done || !seg->use_rela_p)
7944 md_number_to_chars (buf, value, 2);
7945 break;
7946
7947 case BFD_RELOC_32:
7948 case BFD_RELOC_32_PCREL:
7949 if (fixP->fx_done || !seg->use_rela_p)
7950 md_number_to_chars (buf, value, 4);
7951 break;
7952
7953 case BFD_RELOC_64:
7954 case BFD_RELOC_64_PCREL:
7955 if (fixP->fx_done || !seg->use_rela_p)
7956 md_number_to_chars (buf, value, 8);
7957 break;
7958
7959 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7960 /* We claim that these fixups have been processed here, even if
7961 in fact we generate an error because we do not have a reloc
7962 for them, so tc_gen_reloc() will reject them. */
7963 fixP->fx_done = 1;
7964 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
7965 {
7966 as_bad_where (fixP->fx_file, fixP->fx_line,
7967 _("undefined symbol %s used as an immediate value"),
7968 S_GET_NAME (fixP->fx_addsy));
7969 goto apply_fix_return;
7970 }
7971 fix_insn (fixP, flags, value);
7972 break;
7973
7974 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
7975 if (fixP->fx_done || !seg->use_rela_p)
7976 {
7977 if (value & 3)
7978 as_bad_where (fixP->fx_file, fixP->fx_line,
7979 _("pc-relative load offset not word aligned"));
7980 if (signed_overflow (value, 21))
7981 as_bad_where (fixP->fx_file, fixP->fx_line,
7982 _("pc-relative load offset out of range"));
7983 insn = get_aarch64_insn (buf);
7984 insn |= encode_ld_lit_ofs_19 (value >> 2);
7985 put_aarch64_insn (buf, insn);
7986 }
7987 break;
7988
7989 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
7990 if (fixP->fx_done || !seg->use_rela_p)
7991 {
7992 if (signed_overflow (value, 21))
7993 as_bad_where (fixP->fx_file, fixP->fx_line,
7994 _("pc-relative address offset out of range"));
7995 insn = get_aarch64_insn (buf);
7996 insn |= encode_adr_imm (value);
7997 put_aarch64_insn (buf, insn);
7998 }
7999 break;
8000
8001 case BFD_RELOC_AARCH64_BRANCH19:
8002 if (fixP->fx_done || !seg->use_rela_p)
8003 {
8004 if (value & 3)
8005 as_bad_where (fixP->fx_file, fixP->fx_line,
8006 _("conditional branch target not word aligned"));
8007 if (signed_overflow (value, 21))
8008 as_bad_where (fixP->fx_file, fixP->fx_line,
8009 _("conditional branch out of range"));
8010 insn = get_aarch64_insn (buf);
8011 insn |= encode_cond_branch_ofs_19 (value >> 2);
8012 put_aarch64_insn (buf, insn);
8013 }
8014 break;
8015
8016 case BFD_RELOC_AARCH64_TSTBR14:
8017 if (fixP->fx_done || !seg->use_rela_p)
8018 {
8019 if (value & 3)
8020 as_bad_where (fixP->fx_file, fixP->fx_line,
8021 _("conditional branch target not word aligned"));
8022 if (signed_overflow (value, 16))
8023 as_bad_where (fixP->fx_file, fixP->fx_line,
8024 _("conditional branch out of range"));
8025 insn = get_aarch64_insn (buf);
8026 insn |= encode_tst_branch_ofs_14 (value >> 2);
8027 put_aarch64_insn (buf, insn);
8028 }
8029 break;
8030
8031 case BFD_RELOC_AARCH64_CALL26:
8032 case BFD_RELOC_AARCH64_JUMP26:
8033 if (fixP->fx_done || !seg->use_rela_p)
8034 {
8035 if (value & 3)
8036 as_bad_where (fixP->fx_file, fixP->fx_line,
8037 _("branch target not word aligned"));
8038 if (signed_overflow (value, 28))
8039 as_bad_where (fixP->fx_file, fixP->fx_line,
8040 _("branch out of range"));
8041 insn = get_aarch64_insn (buf);
8042 insn |= encode_branch_ofs_26 (value >> 2);
8043 put_aarch64_insn (buf, insn);
8044 }
8045 break;
8046
8047 case BFD_RELOC_AARCH64_MOVW_G0:
8048 case BFD_RELOC_AARCH64_MOVW_G0_NC:
8049 case BFD_RELOC_AARCH64_MOVW_G0_S:
8050 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
8051 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8052 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
8053 scale = 0;
8054 goto movw_common;
8055 case BFD_RELOC_AARCH64_MOVW_G1:
8056 case BFD_RELOC_AARCH64_MOVW_G1_NC:
8057 case BFD_RELOC_AARCH64_MOVW_G1_S:
8058 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8059 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8060 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
8061 scale = 16;
8062 goto movw_common;
8063 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8064 scale = 0;
8065 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8066 /* Should always be exported to object file, see
8067 aarch64_force_relocation(). */
8068 gas_assert (!fixP->fx_done);
8069 gas_assert (seg->use_rela_p);
8070 goto movw_common;
8071 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8072 scale = 16;
8073 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8074 /* Should always be exported to object file, see
8075 aarch64_force_relocation(). */
8076 gas_assert (!fixP->fx_done);
8077 gas_assert (seg->use_rela_p);
8078 goto movw_common;
8079 case BFD_RELOC_AARCH64_MOVW_G2:
8080 case BFD_RELOC_AARCH64_MOVW_G2_NC:
8081 case BFD_RELOC_AARCH64_MOVW_G2_S:
8082 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8083 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
8084 scale = 32;
8085 goto movw_common;
8086 case BFD_RELOC_AARCH64_MOVW_G3:
8087 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
8088 scale = 48;
8089 movw_common:
8090 if (fixP->fx_done || !seg->use_rela_p)
8091 {
8092 insn = get_aarch64_insn (buf);
8093
8094 if (!fixP->fx_done)
8095 {
8096 /* REL signed addend must fit in 16 bits */
8097 if (signed_overflow (value, 16))
8098 as_bad_where (fixP->fx_file, fixP->fx_line,
8099 _("offset out of range"));
8100 }
8101 else
8102 {
8103 /* Check for overflow and scale. */
8104 switch (fixP->fx_r_type)
8105 {
8106 case BFD_RELOC_AARCH64_MOVW_G0:
8107 case BFD_RELOC_AARCH64_MOVW_G1:
8108 case BFD_RELOC_AARCH64_MOVW_G2:
8109 case BFD_RELOC_AARCH64_MOVW_G3:
8110 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8111 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8112 if (unsigned_overflow (value, scale + 16))
8113 as_bad_where (fixP->fx_file, fixP->fx_line,
8114 _("unsigned value out of range"));
8115 break;
8116 case BFD_RELOC_AARCH64_MOVW_G0_S:
8117 case BFD_RELOC_AARCH64_MOVW_G1_S:
8118 case BFD_RELOC_AARCH64_MOVW_G2_S:
8119 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8120 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8121 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8122 /* NOTE: We can only come here with movz or movn. */
8123 if (signed_overflow (value, scale + 16))
8124 as_bad_where (fixP->fx_file, fixP->fx_line,
8125 _("signed value out of range"));
8126 if (value < 0)
8127 {
8128 /* Force use of MOVN. */
8129 value = ~value;
8130 insn = reencode_movzn_to_movn (insn);
8131 }
8132 else
8133 {
8134 /* Force use of MOVZ. */
8135 insn = reencode_movzn_to_movz (insn);
8136 }
8137 break;
8138 default:
8139 /* Unchecked relocations. */
8140 break;
8141 }
8142 value >>= scale;
8143 }
8144
8145 /* Insert value into MOVN/MOVZ/MOVK instruction. */
8146 insn |= encode_movw_imm (value & 0xffff);
8147
8148 put_aarch64_insn (buf, insn);
8149 }
8150 break;
8151
8152 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8153 fixP->fx_r_type = (ilp32_p
8154 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
8155 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
8156 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8157 /* Should always be exported to object file, see
8158 aarch64_force_relocation(). */
8159 gas_assert (!fixP->fx_done);
8160 gas_assert (seg->use_rela_p);
8161 break;
8162
8163 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8164 fixP->fx_r_type = (ilp32_p
8165 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
8166 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
8167 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8168 /* Should always be exported to object file, see
8169 aarch64_force_relocation(). */
8170 gas_assert (!fixP->fx_done);
8171 gas_assert (seg->use_rela_p);
8172 break;
8173
8174 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8175 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8176 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8177 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8178 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8179 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8180 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8181 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8182 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8183 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8184 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8185 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8186 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8187 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8188 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8189 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8190 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8191 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8192 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8193 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8194 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8195 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8196 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8197 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8198 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8199 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8200 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8201 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8202 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8203 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8204 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8205 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8206 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8207 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8208 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8209 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8210 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8211 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8212 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8213 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8214 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8215 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8216 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8217 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8218 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8219 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8220 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8221 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8222 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8223 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8224 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8225 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8226 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8227 /* Should always be exported to object file, see
8228 aarch64_force_relocation(). */
8229 gas_assert (!fixP->fx_done);
8230 gas_assert (seg->use_rela_p);
8231 break;
8232
8233 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8234 /* Should always be exported to object file, see
8235 aarch64_force_relocation(). */
8236 fixP->fx_r_type = (ilp32_p
8237 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
8238 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
8239 gas_assert (!fixP->fx_done);
8240 gas_assert (seg->use_rela_p);
8241 break;
8242
8243 case BFD_RELOC_AARCH64_ADD_LO12:
8244 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8245 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8246 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8247 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8248 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8249 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8250 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8251 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8252 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8253 case BFD_RELOC_AARCH64_LDST128_LO12:
8254 case BFD_RELOC_AARCH64_LDST16_LO12:
8255 case BFD_RELOC_AARCH64_LDST32_LO12:
8256 case BFD_RELOC_AARCH64_LDST64_LO12:
8257 case BFD_RELOC_AARCH64_LDST8_LO12:
8258 /* Should always be exported to object file, see
8259 aarch64_force_relocation(). */
8260 gas_assert (!fixP->fx_done);
8261 gas_assert (seg->use_rela_p);
8262 break;
8263
8264 case BFD_RELOC_AARCH64_TLSDESC_ADD:
8265 case BFD_RELOC_AARCH64_TLSDESC_CALL:
8266 case BFD_RELOC_AARCH64_TLSDESC_LDR:
8267 break;
8268
8269 case BFD_RELOC_UNUSED:
8270 /* An error will already have been reported. */
8271 break;
8272
8273 default:
8274 as_bad_where (fixP->fx_file, fixP->fx_line,
8275 _("unexpected %s fixup"),
8276 bfd_get_reloc_code_name (fixP->fx_r_type));
8277 break;
8278 }
8279
8280 apply_fix_return:
8281 /* Free the allocated the struct aarch64_inst.
8282 N.B. currently there are very limited number of fix-up types actually use
8283 this field, so the impact on the performance should be minimal . */
8284 free (fixP->tc_fix_data.inst);
8285
8286 return;
8287 }
8288
8289 /* Translate internal representation of relocation info to BFD target
8290 format. */
8291
8292 arelent *
8293 tc_gen_reloc (asection * section, fixS * fixp)
8294 {
8295 arelent *reloc;
8296 bfd_reloc_code_real_type code;
8297
8298 reloc = XNEW (arelent);
8299
8300 reloc->sym_ptr_ptr = XNEW (asymbol *);
8301 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
8302 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
8303
8304 if (fixp->fx_pcrel)
8305 {
8306 if (section->use_rela_p)
8307 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
8308 else
8309 fixp->fx_offset = reloc->address;
8310 }
8311 reloc->addend = fixp->fx_offset;
8312
8313 code = fixp->fx_r_type;
8314 switch (code)
8315 {
8316 case BFD_RELOC_16:
8317 if (fixp->fx_pcrel)
8318 code = BFD_RELOC_16_PCREL;
8319 break;
8320
8321 case BFD_RELOC_32:
8322 if (fixp->fx_pcrel)
8323 code = BFD_RELOC_32_PCREL;
8324 break;
8325
8326 case BFD_RELOC_64:
8327 if (fixp->fx_pcrel)
8328 code = BFD_RELOC_64_PCREL;
8329 break;
8330
8331 default:
8332 break;
8333 }
8334
8335 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
8336 if (reloc->howto == NULL)
8337 {
8338 as_bad_where (fixp->fx_file, fixp->fx_line,
8339 _
8340 ("cannot represent %s relocation in this object file format"),
8341 bfd_get_reloc_code_name (code));
8342 return NULL;
8343 }
8344
8345 return reloc;
8346 }
8347
8348 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
8349
8350 void
8351 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
8352 {
8353 bfd_reloc_code_real_type type;
8354 int pcrel = 0;
8355
8356 /* Pick a reloc.
8357 FIXME: @@ Should look at CPU word size. */
8358 switch (size)
8359 {
8360 case 1:
8361 type = BFD_RELOC_8;
8362 break;
8363 case 2:
8364 type = BFD_RELOC_16;
8365 break;
8366 case 4:
8367 type = BFD_RELOC_32;
8368 break;
8369 case 8:
8370 type = BFD_RELOC_64;
8371 break;
8372 default:
8373 as_bad (_("cannot do %u-byte relocation"), size);
8374 type = BFD_RELOC_UNUSED;
8375 break;
8376 }
8377
8378 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
8379 }
8380
8381 int
8382 aarch64_force_relocation (struct fix *fixp)
8383 {
8384 switch (fixp->fx_r_type)
8385 {
8386 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8387 /* Perform these "immediate" internal relocations
8388 even if the symbol is extern or weak. */
8389 return 0;
8390
8391 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8392 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8393 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8394 /* Pseudo relocs that need to be fixed up according to
8395 ilp32_p. */
8396 return 0;
8397
8398 case BFD_RELOC_AARCH64_ADD_LO12:
8399 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8400 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8401 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8402 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8403 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8404 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8405 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8406 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8407 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8408 case BFD_RELOC_AARCH64_LDST128_LO12:
8409 case BFD_RELOC_AARCH64_LDST16_LO12:
8410 case BFD_RELOC_AARCH64_LDST32_LO12:
8411 case BFD_RELOC_AARCH64_LDST64_LO12:
8412 case BFD_RELOC_AARCH64_LDST8_LO12:
8413 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8414 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8415 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8416 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8417 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8418 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8419 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8420 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8421 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8422 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8423 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8424 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8425 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8426 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8427 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8428 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8429 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8430 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8431 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8432 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8433 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8434 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8435 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8436 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8437 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8438 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8439 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8440 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8441 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8442 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8443 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8444 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8445 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8446 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8447 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8448 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8449 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8450 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8451 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8452 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8453 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8454 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8455 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8456 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8457 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8458 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8459 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8460 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8461 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8462 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8463 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8464 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8465 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8466 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8467 /* Always leave these relocations for the linker. */
8468 return 1;
8469
8470 default:
8471 break;
8472 }
8473
8474 return generic_force_reloc (fixp);
8475 }
8476
8477 #ifdef OBJ_ELF
8478
8479 /* Implement md_after_parse_args. This is the earliest time we need to decide
8480 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
8481
8482 void
8483 aarch64_after_parse_args (void)
8484 {
8485 if (aarch64_abi != AARCH64_ABI_NONE)
8486 return;
8487
8488 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
8489 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
8490 aarch64_abi = AARCH64_ABI_ILP32;
8491 else
8492 aarch64_abi = AARCH64_ABI_LP64;
8493 }
8494
8495 const char *
8496 elf64_aarch64_target_format (void)
8497 {
8498 #ifdef TE_CLOUDABI
8499 /* FIXME: What to do for ilp32_p ? */
8500 if (target_big_endian)
8501 return "elf64-bigaarch64-cloudabi";
8502 else
8503 return "elf64-littleaarch64-cloudabi";
8504 #else
8505 if (target_big_endian)
8506 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
8507 else
8508 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
8509 #endif
8510 }
8511
8512 void
8513 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
8514 {
8515 elf_frob_symbol (symp, puntp);
8516 }
8517 #endif
8518
8519 /* MD interface: Finalization. */
8520
8521 /* A good place to do this, although this was probably not intended
8522 for this kind of use. We need to dump the literal pool before
8523 references are made to a null symbol pointer. */
8524
8525 void
8526 aarch64_cleanup (void)
8527 {
8528 literal_pool *pool;
8529
8530 for (pool = list_of_pools; pool; pool = pool->next)
8531 {
8532 /* Put it at the end of the relevant section. */
8533 subseg_set (pool->section, pool->sub_section);
8534 s_ltorg (0);
8535 }
8536 }
8537
8538 #ifdef OBJ_ELF
8539 /* Remove any excess mapping symbols generated for alignment frags in
8540 SEC. We may have created a mapping symbol before a zero byte
8541 alignment; remove it if there's a mapping symbol after the
8542 alignment. */
8543 static void
8544 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
8545 void *dummy ATTRIBUTE_UNUSED)
8546 {
8547 segment_info_type *seginfo = seg_info (sec);
8548 fragS *fragp;
8549
8550 if (seginfo == NULL || seginfo->frchainP == NULL)
8551 return;
8552
8553 for (fragp = seginfo->frchainP->frch_root;
8554 fragp != NULL; fragp = fragp->fr_next)
8555 {
8556 symbolS *sym = fragp->tc_frag_data.last_map;
8557 fragS *next = fragp->fr_next;
8558
8559 /* Variable-sized frags have been converted to fixed size by
8560 this point. But if this was variable-sized to start with,
8561 there will be a fixed-size frag after it. So don't handle
8562 next == NULL. */
8563 if (sym == NULL || next == NULL)
8564 continue;
8565
8566 if (S_GET_VALUE (sym) < next->fr_address)
8567 /* Not at the end of this frag. */
8568 continue;
8569 know (S_GET_VALUE (sym) == next->fr_address);
8570
8571 do
8572 {
8573 if (next->tc_frag_data.first_map != NULL)
8574 {
8575 /* Next frag starts with a mapping symbol. Discard this
8576 one. */
8577 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8578 break;
8579 }
8580
8581 if (next->fr_next == NULL)
8582 {
8583 /* This mapping symbol is at the end of the section. Discard
8584 it. */
8585 know (next->fr_fix == 0 && next->fr_var == 0);
8586 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8587 break;
8588 }
8589
8590 /* As long as we have empty frags without any mapping symbols,
8591 keep looking. */
8592 /* If the next frag is non-empty and does not start with a
8593 mapping symbol, then this mapping symbol is required. */
8594 if (next->fr_address != next->fr_next->fr_address)
8595 break;
8596
8597 next = next->fr_next;
8598 }
8599 while (next != NULL);
8600 }
8601 }
8602 #endif
8603
8604 /* Adjust the symbol table. */
8605
8606 void
8607 aarch64_adjust_symtab (void)
8608 {
8609 #ifdef OBJ_ELF
8610 /* Remove any overlapping mapping symbols generated by alignment frags. */
8611 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
8612 /* Now do generic ELF adjustments. */
8613 elf_adjust_symtab ();
8614 #endif
8615 }
8616
8617 static void
8618 checked_hash_insert (htab_t table, const char *key, void *value)
8619 {
8620 str_hash_insert (table, key, value, 0);
8621 }
8622
8623 static void
8624 sysreg_hash_insert (htab_t table, const char *key, void *value)
8625 {
8626 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
8627 checked_hash_insert (table, key, value);
8628 }
8629
8630 static void
8631 fill_instruction_hash_table (void)
8632 {
8633 aarch64_opcode *opcode = aarch64_opcode_table;
8634
8635 while (opcode->name != NULL)
8636 {
8637 templates *templ, *new_templ;
8638 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
8639
8640 new_templ = XNEW (templates);
8641 new_templ->opcode = opcode;
8642 new_templ->next = NULL;
8643
8644 if (!templ)
8645 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
8646 else
8647 {
8648 new_templ->next = templ->next;
8649 templ->next = new_templ;
8650 }
8651 ++opcode;
8652 }
8653 }
8654
8655 static inline void
8656 convert_to_upper (char *dst, const char *src, size_t num)
8657 {
8658 unsigned int i;
8659 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
8660 *dst = TOUPPER (*src);
8661 *dst = '\0';
8662 }
8663
8664 /* Assume STR point to a lower-case string, allocate, convert and return
8665 the corresponding upper-case string. */
8666 static inline const char*
8667 get_upper_str (const char *str)
8668 {
8669 char *ret;
8670 size_t len = strlen (str);
8671 ret = XNEWVEC (char, len + 1);
8672 convert_to_upper (ret, str, len);
8673 return ret;
8674 }
8675
8676 /* MD interface: Initialization. */
8677
8678 void
8679 md_begin (void)
8680 {
8681 unsigned mach;
8682 unsigned int i;
8683
8684 aarch64_ops_hsh = str_htab_create ();
8685 aarch64_cond_hsh = str_htab_create ();
8686 aarch64_shift_hsh = str_htab_create ();
8687 aarch64_sys_regs_hsh = str_htab_create ();
8688 aarch64_pstatefield_hsh = str_htab_create ();
8689 aarch64_sys_regs_ic_hsh = str_htab_create ();
8690 aarch64_sys_regs_dc_hsh = str_htab_create ();
8691 aarch64_sys_regs_at_hsh = str_htab_create ();
8692 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
8693 aarch64_sys_regs_sr_hsh = str_htab_create ();
8694 aarch64_reg_hsh = str_htab_create ();
8695 aarch64_barrier_opt_hsh = str_htab_create ();
8696 aarch64_nzcv_hsh = str_htab_create ();
8697 aarch64_pldop_hsh = str_htab_create ();
8698 aarch64_hint_opt_hsh = str_htab_create ();
8699
8700 fill_instruction_hash_table ();
8701
8702 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
8703 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
8704 (void *) (aarch64_sys_regs + i));
8705
8706 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
8707 sysreg_hash_insert (aarch64_pstatefield_hsh,
8708 aarch64_pstatefields[i].name,
8709 (void *) (aarch64_pstatefields + i));
8710
8711 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
8712 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
8713 aarch64_sys_regs_ic[i].name,
8714 (void *) (aarch64_sys_regs_ic + i));
8715
8716 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
8717 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
8718 aarch64_sys_regs_dc[i].name,
8719 (void *) (aarch64_sys_regs_dc + i));
8720
8721 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
8722 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
8723 aarch64_sys_regs_at[i].name,
8724 (void *) (aarch64_sys_regs_at + i));
8725
8726 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
8727 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
8728 aarch64_sys_regs_tlbi[i].name,
8729 (void *) (aarch64_sys_regs_tlbi + i));
8730
8731 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
8732 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
8733 aarch64_sys_regs_sr[i].name,
8734 (void *) (aarch64_sys_regs_sr + i));
8735
8736 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
8737 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
8738 (void *) (reg_names + i));
8739
8740 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
8741 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
8742 (void *) (nzcv_names + i));
8743
8744 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
8745 {
8746 const char *name = aarch64_operand_modifiers[i].name;
8747 checked_hash_insert (aarch64_shift_hsh, name,
8748 (void *) (aarch64_operand_modifiers + i));
8749 /* Also hash the name in the upper case. */
8750 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
8751 (void *) (aarch64_operand_modifiers + i));
8752 }
8753
8754 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
8755 {
8756 unsigned int j;
8757 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
8758 the same condition code. */
8759 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
8760 {
8761 const char *name = aarch64_conds[i].names[j];
8762 if (name == NULL)
8763 break;
8764 checked_hash_insert (aarch64_cond_hsh, name,
8765 (void *) (aarch64_conds + i));
8766 /* Also hash the name in the upper case. */
8767 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
8768 (void *) (aarch64_conds + i));
8769 }
8770 }
8771
8772 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
8773 {
8774 const char *name = aarch64_barrier_options[i].name;
8775 /* Skip xx00 - the unallocated values of option. */
8776 if ((i & 0x3) == 0)
8777 continue;
8778 checked_hash_insert (aarch64_barrier_opt_hsh, name,
8779 (void *) (aarch64_barrier_options + i));
8780 /* Also hash the name in the upper case. */
8781 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
8782 (void *) (aarch64_barrier_options + i));
8783 }
8784
8785 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
8786 {
8787 const char* name = aarch64_prfops[i].name;
8788 /* Skip the unallocated hint encodings. */
8789 if (name == NULL)
8790 continue;
8791 checked_hash_insert (aarch64_pldop_hsh, name,
8792 (void *) (aarch64_prfops + i));
8793 /* Also hash the name in the upper case. */
8794 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8795 (void *) (aarch64_prfops + i));
8796 }
8797
8798 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
8799 {
8800 const char* name = aarch64_hint_options[i].name;
8801 const char* upper_name = get_upper_str(name);
8802
8803 checked_hash_insert (aarch64_hint_opt_hsh, name,
8804 (void *) (aarch64_hint_options + i));
8805
8806 /* Also hash the name in the upper case if not the same. */
8807 if (strcmp (name, upper_name) != 0)
8808 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
8809 (void *) (aarch64_hint_options + i));
8810 }
8811
8812 /* Set the cpu variant based on the command-line options. */
8813 if (!mcpu_cpu_opt)
8814 mcpu_cpu_opt = march_cpu_opt;
8815
8816 if (!mcpu_cpu_opt)
8817 mcpu_cpu_opt = &cpu_default;
8818
8819 cpu_variant = *mcpu_cpu_opt;
8820
8821 /* Record the CPU type. */
8822 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
8823
8824 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
8825 }
8826
8827 /* Command line processing. */
8828
8829 const char *md_shortopts = "m:";
8830
8831 #ifdef AARCH64_BI_ENDIAN
8832 #define OPTION_EB (OPTION_MD_BASE + 0)
8833 #define OPTION_EL (OPTION_MD_BASE + 1)
8834 #else
8835 #if TARGET_BYTES_BIG_ENDIAN
8836 #define OPTION_EB (OPTION_MD_BASE + 0)
8837 #else
8838 #define OPTION_EL (OPTION_MD_BASE + 1)
8839 #endif
8840 #endif
8841
8842 struct option md_longopts[] = {
8843 #ifdef OPTION_EB
8844 {"EB", no_argument, NULL, OPTION_EB},
8845 #endif
8846 #ifdef OPTION_EL
8847 {"EL", no_argument, NULL, OPTION_EL},
8848 #endif
8849 {NULL, no_argument, NULL, 0}
8850 };
8851
8852 size_t md_longopts_size = sizeof (md_longopts);
8853
8854 struct aarch64_option_table
8855 {
8856 const char *option; /* Option name to match. */
8857 const char *help; /* Help information. */
8858 int *var; /* Variable to change. */
8859 int value; /* What to change it to. */
8860 char *deprecated; /* If non-null, print this message. */
8861 };
8862
8863 static struct aarch64_option_table aarch64_opts[] = {
8864 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
8865 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
8866 NULL},
8867 #ifdef DEBUG_AARCH64
8868 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
8869 #endif /* DEBUG_AARCH64 */
8870 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
8871 NULL},
8872 {"mno-verbose-error", N_("do not output verbose error messages"),
8873 &verbose_error_p, 0, NULL},
8874 {NULL, NULL, NULL, 0, NULL}
8875 };
8876
8877 struct aarch64_cpu_option_table
8878 {
8879 const char *name;
8880 const aarch64_feature_set value;
8881 /* The canonical name of the CPU, or NULL to use NAME converted to upper
8882 case. */
8883 const char *canonical_name;
8884 };
8885
8886 /* This list should, at a minimum, contain all the cpu names
8887 recognized by GCC. */
8888 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
8889 {"all", AARCH64_ANY, NULL},
8890 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
8891 AARCH64_FEATURE_CRC), "Cortex-A34"},
8892 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
8893 AARCH64_FEATURE_CRC), "Cortex-A35"},
8894 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
8895 AARCH64_FEATURE_CRC), "Cortex-A53"},
8896 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
8897 AARCH64_FEATURE_CRC), "Cortex-A57"},
8898 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
8899 AARCH64_FEATURE_CRC), "Cortex-A72"},
8900 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
8901 AARCH64_FEATURE_CRC), "Cortex-A73"},
8902 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8903 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8904 "Cortex-A55"},
8905 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8906 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8907 "Cortex-A75"},
8908 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8909 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8910 "Cortex-A76"},
8911 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8912 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
8913 | AARCH64_FEATURE_DOTPROD
8914 | AARCH64_FEATURE_SSBS),
8915 "Cortex-A76AE"},
8916 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8917 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
8918 | AARCH64_FEATURE_DOTPROD
8919 | AARCH64_FEATURE_SSBS),
8920 "Cortex-A77"},
8921 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8922 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
8923 | AARCH64_FEATURE_DOTPROD
8924 | AARCH64_FEATURE_SSBS),
8925 "Cortex-A65"},
8926 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8927 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
8928 | AARCH64_FEATURE_DOTPROD
8929 | AARCH64_FEATURE_SSBS),
8930 "Cortex-A65AE"},
8931 {"cortex-a78", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8932 AARCH64_FEATURE_F16
8933 | AARCH64_FEATURE_RCPC
8934 | AARCH64_FEATURE_DOTPROD
8935 | AARCH64_FEATURE_SSBS
8936 | AARCH64_FEATURE_PROFILE),
8937 "Cortex-A78"},
8938 {"cortex-a78ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8939 AARCH64_FEATURE_F16
8940 | AARCH64_FEATURE_RCPC
8941 | AARCH64_FEATURE_DOTPROD
8942 | AARCH64_FEATURE_SSBS
8943 | AARCH64_FEATURE_PROFILE),
8944 "Cortex-A78AE"},
8945 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8946 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
8947 | AARCH64_FEATURE_DOTPROD
8948 | AARCH64_FEATURE_PROFILE),
8949 "Ares"},
8950 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
8951 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8952 "Samsung Exynos M1"},
8953 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
8954 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
8955 | AARCH64_FEATURE_RDMA),
8956 "Qualcomm Falkor"},
8957 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8958 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
8959 | AARCH64_FEATURE_DOTPROD
8960 | AARCH64_FEATURE_SSBS),
8961 "Neoverse E1"},
8962 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8963 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
8964 | AARCH64_FEATURE_DOTPROD
8965 | AARCH64_FEATURE_PROFILE),
8966 "Neoverse N1"},
8967 {"neoverse-n2", AARCH64_FEATURE (AARCH64_ARCH_V8_5,
8968 AARCH64_FEATURE_BFLOAT16
8969 | AARCH64_FEATURE_I8MM
8970 | AARCH64_FEATURE_F16
8971 | AARCH64_FEATURE_SVE
8972 | AARCH64_FEATURE_SVE2
8973 | AARCH64_FEATURE_SVE2_BITPERM
8974 | AARCH64_FEATURE_MEMTAG
8975 | AARCH64_FEATURE_RNG),
8976 "Neoverse N2"},
8977 {"neoverse-v1", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
8978 AARCH64_FEATURE_PROFILE
8979 | AARCH64_FEATURE_CVADP
8980 | AARCH64_FEATURE_SVE
8981 | AARCH64_FEATURE_SSBS
8982 | AARCH64_FEATURE_RNG
8983 | AARCH64_FEATURE_F16
8984 | AARCH64_FEATURE_BFLOAT16
8985 | AARCH64_FEATURE_I8MM), "Neoverse V1"},
8986 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8987 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
8988 | AARCH64_FEATURE_RDMA),
8989 "Qualcomm QDF24XX"},
8990 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
8991 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
8992 "Qualcomm Saphira"},
8993 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8994 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8995 "Cavium ThunderX"},
8996 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
8997 AARCH64_FEATURE_CRYPTO),
8998 "Broadcom Vulcan"},
8999 /* The 'xgene-1' name is an older name for 'xgene1', which was used
9000 in earlier releases and is superseded by 'xgene1' in all
9001 tools. */
9002 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9003 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9004 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
9005 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
9006 {"cortex-r82", AARCH64_ARCH_V8_R, "Cortex-R82"},
9007 {"cortex-x1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9008 AARCH64_FEATURE_F16
9009 | AARCH64_FEATURE_RCPC
9010 | AARCH64_FEATURE_DOTPROD
9011 | AARCH64_FEATURE_SSBS
9012 | AARCH64_FEATURE_PROFILE),
9013 "Cortex-X1"},
9014 {"generic", AARCH64_ARCH_V8, NULL},
9015
9016 {NULL, AARCH64_ARCH_NONE, NULL}
9017 };
9018
9019 struct aarch64_arch_option_table
9020 {
9021 const char *name;
9022 const aarch64_feature_set value;
9023 };
9024
9025 /* This list should, at a minimum, contain all the architecture names
9026 recognized by GCC. */
9027 static const struct aarch64_arch_option_table aarch64_archs[] = {
9028 {"all", AARCH64_ANY},
9029 {"armv8-a", AARCH64_ARCH_V8},
9030 {"armv8.1-a", AARCH64_ARCH_V8_1},
9031 {"armv8.2-a", AARCH64_ARCH_V8_2},
9032 {"armv8.3-a", AARCH64_ARCH_V8_3},
9033 {"armv8.4-a", AARCH64_ARCH_V8_4},
9034 {"armv8.5-a", AARCH64_ARCH_V8_5},
9035 {"armv8.6-a", AARCH64_ARCH_V8_6},
9036 {"armv8.7-a", AARCH64_ARCH_V8_7},
9037 {"armv8-r", AARCH64_ARCH_V8_R},
9038 {NULL, AARCH64_ARCH_NONE}
9039 };
9040
9041 /* ISA extensions. */
9042 struct aarch64_option_cpu_value_table
9043 {
9044 const char *name;
9045 const aarch64_feature_set value;
9046 const aarch64_feature_set require; /* Feature dependencies. */
9047 };
9048
9049 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
9050 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
9051 AARCH64_ARCH_NONE},
9052 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
9053 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9054 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
9055 AARCH64_ARCH_NONE},
9056 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
9057 AARCH64_ARCH_NONE},
9058 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
9059 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9060 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
9061 AARCH64_ARCH_NONE},
9062 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
9063 AARCH64_ARCH_NONE},
9064 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
9065 AARCH64_ARCH_NONE},
9066 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
9067 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9068 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
9069 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9070 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
9071 AARCH64_FEATURE (AARCH64_FEATURE_FP
9072 | AARCH64_FEATURE_F16, 0)},
9073 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
9074 AARCH64_ARCH_NONE},
9075 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
9076 AARCH64_FEATURE (AARCH64_FEATURE_F16
9077 | AARCH64_FEATURE_SIMD
9078 | AARCH64_FEATURE_COMPNUM, 0)},
9079 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
9080 AARCH64_ARCH_NONE},
9081 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
9082 AARCH64_FEATURE (AARCH64_FEATURE_F16
9083 | AARCH64_FEATURE_SIMD, 0)},
9084 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
9085 AARCH64_ARCH_NONE},
9086 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
9087 AARCH64_ARCH_NONE},
9088 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
9089 AARCH64_ARCH_NONE},
9090 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
9091 AARCH64_ARCH_NONE},
9092 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
9093 AARCH64_ARCH_NONE},
9094 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
9095 AARCH64_ARCH_NONE},
9096 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
9097 AARCH64_ARCH_NONE},
9098 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
9099 AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
9100 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
9101 AARCH64_ARCH_NONE},
9102 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
9103 AARCH64_ARCH_NONE},
9104 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
9105 AARCH64_ARCH_NONE},
9106 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
9107 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9108 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
9109 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9110 | AARCH64_FEATURE_SM4, 0)},
9111 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
9112 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9113 | AARCH64_FEATURE_AES, 0)},
9114 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
9115 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9116 | AARCH64_FEATURE_SHA3, 0)},
9117 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
9118 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
9119 {"bf16", AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
9120 AARCH64_ARCH_NONE},
9121 {"i8mm", AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
9122 AARCH64_ARCH_NONE},
9123 {"f32mm", AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
9124 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9125 {"f64mm", AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
9126 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9127 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
9128 };
9129
9130 struct aarch64_long_option_table
9131 {
9132 const char *option; /* Substring to match. */
9133 const char *help; /* Help information. */
9134 int (*func) (const char *subopt); /* Function to decode sub-option. */
9135 char *deprecated; /* If non-null, print this message. */
9136 };
9137
9138 /* Transitive closure of features depending on set. */
9139 static aarch64_feature_set
9140 aarch64_feature_disable_set (aarch64_feature_set set)
9141 {
9142 const struct aarch64_option_cpu_value_table *opt;
9143 aarch64_feature_set prev = 0;
9144
9145 while (prev != set) {
9146 prev = set;
9147 for (opt = aarch64_features; opt->name != NULL; opt++)
9148 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
9149 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
9150 }
9151 return set;
9152 }
9153
9154 /* Transitive closure of dependencies of set. */
9155 static aarch64_feature_set
9156 aarch64_feature_enable_set (aarch64_feature_set set)
9157 {
9158 const struct aarch64_option_cpu_value_table *opt;
9159 aarch64_feature_set prev = 0;
9160
9161 while (prev != set) {
9162 prev = set;
9163 for (opt = aarch64_features; opt->name != NULL; opt++)
9164 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
9165 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
9166 }
9167 return set;
9168 }
9169
9170 static int
9171 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
9172 bfd_boolean ext_only)
9173 {
9174 /* We insist on extensions being added before being removed. We achieve
9175 this by using the ADDING_VALUE variable to indicate whether we are
9176 adding an extension (1) or removing it (0) and only allowing it to
9177 change in the order -1 -> 1 -> 0. */
9178 int adding_value = -1;
9179 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
9180
9181 /* Copy the feature set, so that we can modify it. */
9182 *ext_set = **opt_p;
9183 *opt_p = ext_set;
9184
9185 while (str != NULL && *str != 0)
9186 {
9187 const struct aarch64_option_cpu_value_table *opt;
9188 const char *ext = NULL;
9189 int optlen;
9190
9191 if (!ext_only)
9192 {
9193 if (*str != '+')
9194 {
9195 as_bad (_("invalid architectural extension"));
9196 return 0;
9197 }
9198
9199 ext = strchr (++str, '+');
9200 }
9201
9202 if (ext != NULL)
9203 optlen = ext - str;
9204 else
9205 optlen = strlen (str);
9206
9207 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
9208 {
9209 if (adding_value != 0)
9210 adding_value = 0;
9211 optlen -= 2;
9212 str += 2;
9213 }
9214 else if (optlen > 0)
9215 {
9216 if (adding_value == -1)
9217 adding_value = 1;
9218 else if (adding_value != 1)
9219 {
9220 as_bad (_("must specify extensions to add before specifying "
9221 "those to remove"));
9222 return FALSE;
9223 }
9224 }
9225
9226 if (optlen == 0)
9227 {
9228 as_bad (_("missing architectural extension"));
9229 return 0;
9230 }
9231
9232 gas_assert (adding_value != -1);
9233
9234 for (opt = aarch64_features; opt->name != NULL; opt++)
9235 if (strncmp (opt->name, str, optlen) == 0)
9236 {
9237 aarch64_feature_set set;
9238
9239 /* Add or remove the extension. */
9240 if (adding_value)
9241 {
9242 set = aarch64_feature_enable_set (opt->value);
9243 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
9244 }
9245 else
9246 {
9247 set = aarch64_feature_disable_set (opt->value);
9248 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
9249 }
9250 break;
9251 }
9252
9253 if (opt->name == NULL)
9254 {
9255 as_bad (_("unknown architectural extension `%s'"), str);
9256 return 0;
9257 }
9258
9259 str = ext;
9260 };
9261
9262 return 1;
9263 }
9264
9265 static int
9266 aarch64_parse_cpu (const char *str)
9267 {
9268 const struct aarch64_cpu_option_table *opt;
9269 const char *ext = strchr (str, '+');
9270 size_t optlen;
9271
9272 if (ext != NULL)
9273 optlen = ext - str;
9274 else
9275 optlen = strlen (str);
9276
9277 if (optlen == 0)
9278 {
9279 as_bad (_("missing cpu name `%s'"), str);
9280 return 0;
9281 }
9282
9283 for (opt = aarch64_cpus; opt->name != NULL; opt++)
9284 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9285 {
9286 mcpu_cpu_opt = &opt->value;
9287 if (ext != NULL)
9288 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
9289
9290 return 1;
9291 }
9292
9293 as_bad (_("unknown cpu `%s'"), str);
9294 return 0;
9295 }
9296
9297 static int
9298 aarch64_parse_arch (const char *str)
9299 {
9300 const struct aarch64_arch_option_table *opt;
9301 const char *ext = strchr (str, '+');
9302 size_t optlen;
9303
9304 if (ext != NULL)
9305 optlen = ext - str;
9306 else
9307 optlen = strlen (str);
9308
9309 if (optlen == 0)
9310 {
9311 as_bad (_("missing architecture name `%s'"), str);
9312 return 0;
9313 }
9314
9315 for (opt = aarch64_archs; opt->name != NULL; opt++)
9316 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9317 {
9318 march_cpu_opt = &opt->value;
9319 if (ext != NULL)
9320 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
9321
9322 return 1;
9323 }
9324
9325 as_bad (_("unknown architecture `%s'\n"), str);
9326 return 0;
9327 }
9328
9329 /* ABIs. */
9330 struct aarch64_option_abi_value_table
9331 {
9332 const char *name;
9333 enum aarch64_abi_type value;
9334 };
9335
9336 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
9337 {"ilp32", AARCH64_ABI_ILP32},
9338 {"lp64", AARCH64_ABI_LP64},
9339 };
9340
9341 static int
9342 aarch64_parse_abi (const char *str)
9343 {
9344 unsigned int i;
9345
9346 if (str[0] == '\0')
9347 {
9348 as_bad (_("missing abi name `%s'"), str);
9349 return 0;
9350 }
9351
9352 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
9353 if (strcmp (str, aarch64_abis[i].name) == 0)
9354 {
9355 aarch64_abi = aarch64_abis[i].value;
9356 return 1;
9357 }
9358
9359 as_bad (_("unknown abi `%s'\n"), str);
9360 return 0;
9361 }
9362
9363 static struct aarch64_long_option_table aarch64_long_opts[] = {
9364 #ifdef OBJ_ELF
9365 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
9366 aarch64_parse_abi, NULL},
9367 #endif /* OBJ_ELF */
9368 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
9369 aarch64_parse_cpu, NULL},
9370 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
9371 aarch64_parse_arch, NULL},
9372 {NULL, NULL, 0, NULL}
9373 };
9374
9375 int
9376 md_parse_option (int c, const char *arg)
9377 {
9378 struct aarch64_option_table *opt;
9379 struct aarch64_long_option_table *lopt;
9380
9381 switch (c)
9382 {
9383 #ifdef OPTION_EB
9384 case OPTION_EB:
9385 target_big_endian = 1;
9386 break;
9387 #endif
9388
9389 #ifdef OPTION_EL
9390 case OPTION_EL:
9391 target_big_endian = 0;
9392 break;
9393 #endif
9394
9395 case 'a':
9396 /* Listing option. Just ignore these, we don't support additional
9397 ones. */
9398 return 0;
9399
9400 default:
9401 for (opt = aarch64_opts; opt->option != NULL; opt++)
9402 {
9403 if (c == opt->option[0]
9404 && ((arg == NULL && opt->option[1] == 0)
9405 || streq (arg, opt->option + 1)))
9406 {
9407 /* If the option is deprecated, tell the user. */
9408 if (opt->deprecated != NULL)
9409 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
9410 arg ? arg : "", _(opt->deprecated));
9411
9412 if (opt->var != NULL)
9413 *opt->var = opt->value;
9414
9415 return 1;
9416 }
9417 }
9418
9419 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9420 {
9421 /* These options are expected to have an argument. */
9422 if (c == lopt->option[0]
9423 && arg != NULL
9424 && strncmp (arg, lopt->option + 1,
9425 strlen (lopt->option + 1)) == 0)
9426 {
9427 /* If the option is deprecated, tell the user. */
9428 if (lopt->deprecated != NULL)
9429 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
9430 _(lopt->deprecated));
9431
9432 /* Call the sup-option parser. */
9433 return lopt->func (arg + strlen (lopt->option) - 1);
9434 }
9435 }
9436
9437 return 0;
9438 }
9439
9440 return 1;
9441 }
9442
9443 void
9444 md_show_usage (FILE * fp)
9445 {
9446 struct aarch64_option_table *opt;
9447 struct aarch64_long_option_table *lopt;
9448
9449 fprintf (fp, _(" AArch64-specific assembler options:\n"));
9450
9451 for (opt = aarch64_opts; opt->option != NULL; opt++)
9452 if (opt->help != NULL)
9453 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
9454
9455 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9456 if (lopt->help != NULL)
9457 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
9458
9459 #ifdef OPTION_EB
9460 fprintf (fp, _("\
9461 -EB assemble code for a big-endian cpu\n"));
9462 #endif
9463
9464 #ifdef OPTION_EL
9465 fprintf (fp, _("\
9466 -EL assemble code for a little-endian cpu\n"));
9467 #endif
9468 }
9469
9470 /* Parse a .cpu directive. */
9471
9472 static void
9473 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
9474 {
9475 const struct aarch64_cpu_option_table *opt;
9476 char saved_char;
9477 char *name;
9478 char *ext;
9479 size_t optlen;
9480
9481 name = input_line_pointer;
9482 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9483 input_line_pointer++;
9484 saved_char = *input_line_pointer;
9485 *input_line_pointer = 0;
9486
9487 ext = strchr (name, '+');
9488
9489 if (ext != NULL)
9490 optlen = ext - name;
9491 else
9492 optlen = strlen (name);
9493
9494 /* Skip the first "all" entry. */
9495 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
9496 if (strlen (opt->name) == optlen
9497 && strncmp (name, opt->name, optlen) == 0)
9498 {
9499 mcpu_cpu_opt = &opt->value;
9500 if (ext != NULL)
9501 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
9502 return;
9503
9504 cpu_variant = *mcpu_cpu_opt;
9505
9506 *input_line_pointer = saved_char;
9507 demand_empty_rest_of_line ();
9508 return;
9509 }
9510 as_bad (_("unknown cpu `%s'"), name);
9511 *input_line_pointer = saved_char;
9512 ignore_rest_of_line ();
9513 }
9514
9515
9516 /* Parse a .arch directive. */
9517
9518 static void
9519 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
9520 {
9521 const struct aarch64_arch_option_table *opt;
9522 char saved_char;
9523 char *name;
9524 char *ext;
9525 size_t optlen;
9526
9527 name = input_line_pointer;
9528 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9529 input_line_pointer++;
9530 saved_char = *input_line_pointer;
9531 *input_line_pointer = 0;
9532
9533 ext = strchr (name, '+');
9534
9535 if (ext != NULL)
9536 optlen = ext - name;
9537 else
9538 optlen = strlen (name);
9539
9540 /* Skip the first "all" entry. */
9541 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
9542 if (strlen (opt->name) == optlen
9543 && strncmp (name, opt->name, optlen) == 0)
9544 {
9545 mcpu_cpu_opt = &opt->value;
9546 if (ext != NULL)
9547 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
9548 return;
9549
9550 cpu_variant = *mcpu_cpu_opt;
9551
9552 *input_line_pointer = saved_char;
9553 demand_empty_rest_of_line ();
9554 return;
9555 }
9556
9557 as_bad (_("unknown architecture `%s'\n"), name);
9558 *input_line_pointer = saved_char;
9559 ignore_rest_of_line ();
9560 }
9561
9562 /* Parse a .arch_extension directive. */
9563
9564 static void
9565 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
9566 {
9567 char saved_char;
9568 char *ext = input_line_pointer;;
9569
9570 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9571 input_line_pointer++;
9572 saved_char = *input_line_pointer;
9573 *input_line_pointer = 0;
9574
9575 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
9576 return;
9577
9578 cpu_variant = *mcpu_cpu_opt;
9579
9580 *input_line_pointer = saved_char;
9581 demand_empty_rest_of_line ();
9582 }
9583
9584 /* Copy symbol information. */
9585
9586 void
9587 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
9588 {
9589 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
9590 }
9591
9592 #ifdef OBJ_ELF
9593 /* Same as elf_copy_symbol_attributes, but without copying st_other.
9594 This is needed so AArch64 specific st_other values can be independently
9595 specified for an IFUNC resolver (that is called by the dynamic linker)
9596 and the symbol it resolves (aliased to the resolver). In particular,
9597 if a function symbol has special st_other value set via directives,
9598 then attaching an IFUNC resolver to that symbol should not override
9599 the st_other setting. Requiring the directive on the IFUNC resolver
9600 symbol would be unexpected and problematic in C code, where the two
9601 symbols appear as two independent function declarations. */
9602
9603 void
9604 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
9605 {
9606 struct elf_obj_sy *srcelf = symbol_get_obj (src);
9607 struct elf_obj_sy *destelf = symbol_get_obj (dest);
9608 if (srcelf->size)
9609 {
9610 if (destelf->size == NULL)
9611 destelf->size = XNEW (expressionS);
9612 *destelf->size = *srcelf->size;
9613 }
9614 else
9615 {
9616 free (destelf->size);
9617 destelf->size = NULL;
9618 }
9619 S_SET_SIZE (dest, S_GET_SIZE (src));
9620 }
9621 #endif
This page took 0.246054 seconds and 4 git commands to generate.