[AArch64][SVE 21/32] Add Zn and Pn registers
[deliverable/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2016 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 #ifdef OBJ_ELF
59 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
60 static symbolS *GOT_symbol;
61
62 /* Which ABI to use. */
63 enum aarch64_abi_type
64 {
65 AARCH64_ABI_LP64 = 0,
66 AARCH64_ABI_ILP32 = 1
67 };
68
69 /* AArch64 ABI for the output file. */
70 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_LP64;
71
72 /* When non-zero, program to a 32-bit model, in which the C data types
73 int, long and all pointer types are 32-bit objects (ILP32); or to a
74 64-bit model, in which the C int type is 32-bits but the C long type
75 and all pointer types are 64-bit objects (LP64). */
76 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
77 #endif
78
79 enum vector_el_type
80 {
81 NT_invtype = -1,
82 NT_b,
83 NT_h,
84 NT_s,
85 NT_d,
86 NT_q
87 };
88
89 /* Bits for DEFINED field in vector_type_el. */
90 #define NTA_HASTYPE 1
91 #define NTA_HASINDEX 2
92 #define NTA_HASVARWIDTH 4
93
94 struct vector_type_el
95 {
96 enum vector_el_type type;
97 unsigned char defined;
98 unsigned width;
99 int64_t index;
100 };
101
102 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
103
104 struct reloc
105 {
106 bfd_reloc_code_real_type type;
107 expressionS exp;
108 int pc_rel;
109 enum aarch64_opnd opnd;
110 uint32_t flags;
111 unsigned need_libopcodes_p : 1;
112 };
113
114 struct aarch64_instruction
115 {
116 /* libopcodes structure for instruction intermediate representation. */
117 aarch64_inst base;
118 /* Record assembly errors found during the parsing. */
119 struct
120 {
121 enum aarch64_operand_error_kind kind;
122 const char *error;
123 } parsing_error;
124 /* The condition that appears in the assembly line. */
125 int cond;
126 /* Relocation information (including the GAS internal fixup). */
127 struct reloc reloc;
128 /* Need to generate an immediate in the literal pool. */
129 unsigned gen_lit_pool : 1;
130 };
131
132 typedef struct aarch64_instruction aarch64_instruction;
133
134 static aarch64_instruction inst;
135
136 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
137 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
138
139 /* Diagnostics inline function utilites.
140
141 These are lightweight utlities which should only be called by parse_operands
142 and other parsers. GAS processes each assembly line by parsing it against
143 instruction template(s), in the case of multiple templates (for the same
144 mnemonic name), those templates are tried one by one until one succeeds or
145 all fail. An assembly line may fail a few templates before being
146 successfully parsed; an error saved here in most cases is not a user error
147 but an error indicating the current template is not the right template.
148 Therefore it is very important that errors can be saved at a low cost during
149 the parsing; we don't want to slow down the whole parsing by recording
150 non-user errors in detail.
151
152 Remember that the objective is to help GAS pick up the most approapriate
153 error message in the case of multiple templates, e.g. FMOV which has 8
154 templates. */
155
156 static inline void
157 clear_error (void)
158 {
159 inst.parsing_error.kind = AARCH64_OPDE_NIL;
160 inst.parsing_error.error = NULL;
161 }
162
163 static inline bfd_boolean
164 error_p (void)
165 {
166 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
167 }
168
169 static inline const char *
170 get_error_message (void)
171 {
172 return inst.parsing_error.error;
173 }
174
175 static inline enum aarch64_operand_error_kind
176 get_error_kind (void)
177 {
178 return inst.parsing_error.kind;
179 }
180
181 static inline void
182 set_error (enum aarch64_operand_error_kind kind, const char *error)
183 {
184 inst.parsing_error.kind = kind;
185 inst.parsing_error.error = error;
186 }
187
188 static inline void
189 set_recoverable_error (const char *error)
190 {
191 set_error (AARCH64_OPDE_RECOVERABLE, error);
192 }
193
194 /* Use the DESC field of the corresponding aarch64_operand entry to compose
195 the error message. */
196 static inline void
197 set_default_error (void)
198 {
199 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
200 }
201
202 static inline void
203 set_syntax_error (const char *error)
204 {
205 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
206 }
207
208 static inline void
209 set_first_syntax_error (const char *error)
210 {
211 if (! error_p ())
212 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
213 }
214
215 static inline void
216 set_fatal_syntax_error (const char *error)
217 {
218 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
219 }
220 \f
221 /* Number of littlenums required to hold an extended precision number. */
222 #define MAX_LITTLENUMS 6
223
224 /* Return value for certain parsers when the parsing fails; those parsers
225 return the information of the parsed result, e.g. register number, on
226 success. */
227 #define PARSE_FAIL -1
228
229 /* This is an invalid condition code that means no conditional field is
230 present. */
231 #define COND_ALWAYS 0x10
232
233 typedef struct
234 {
235 const char *template;
236 unsigned long value;
237 } asm_barrier_opt;
238
239 typedef struct
240 {
241 const char *template;
242 uint32_t value;
243 } asm_nzcv;
244
245 struct reloc_entry
246 {
247 char *name;
248 bfd_reloc_code_real_type reloc;
249 };
250
251 /* Macros to define the register types and masks for the purpose
252 of parsing. */
253
254 #undef AARCH64_REG_TYPES
255 #define AARCH64_REG_TYPES \
256 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
257 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
258 BASIC_REG_TYPE(SP_32) /* wsp */ \
259 BASIC_REG_TYPE(SP_64) /* sp */ \
260 BASIC_REG_TYPE(Z_32) /* wzr */ \
261 BASIC_REG_TYPE(Z_64) /* xzr */ \
262 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
263 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
264 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
265 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
266 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
267 BASIC_REG_TYPE(CN) /* c[0-7] */ \
268 BASIC_REG_TYPE(VN) /* v[0-31] */ \
269 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
270 BASIC_REG_TYPE(PN) /* p[0-15] */ \
271 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
272 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
273 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
274 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
275 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
276 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
277 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
278 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
279 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
280 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
281 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
282 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
283 /* Typecheck: any [BHSDQ]P FP. */ \
284 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
285 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
286 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
287 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
288 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
289 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
290 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
291 /* Any integer register; used for error messages only. */ \
292 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
293 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
294 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
295 /* Pseudo type to mark the end of the enumerator sequence. */ \
296 BASIC_REG_TYPE(MAX)
297
298 #undef BASIC_REG_TYPE
299 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
300 #undef MULTI_REG_TYPE
301 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
302
303 /* Register type enumerators. */
304 typedef enum aarch64_reg_type_
305 {
306 /* A list of REG_TYPE_*. */
307 AARCH64_REG_TYPES
308 } aarch64_reg_type;
309
310 #undef BASIC_REG_TYPE
311 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
312 #undef REG_TYPE
313 #define REG_TYPE(T) (1 << REG_TYPE_##T)
314 #undef MULTI_REG_TYPE
315 #define MULTI_REG_TYPE(T,V) V,
316
317 /* Structure for a hash table entry for a register. */
318 typedef struct
319 {
320 const char *name;
321 unsigned char number;
322 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
323 unsigned char builtin;
324 } reg_entry;
325
326 /* Values indexed by aarch64_reg_type to assist the type checking. */
327 static const unsigned reg_type_masks[] =
328 {
329 AARCH64_REG_TYPES
330 };
331
332 #undef BASIC_REG_TYPE
333 #undef REG_TYPE
334 #undef MULTI_REG_TYPE
335 #undef AARCH64_REG_TYPES
336
337 /* Diagnostics used when we don't get a register of the expected type.
338 Note: this has to synchronized with aarch64_reg_type definitions
339 above. */
340 static const char *
341 get_reg_expected_msg (aarch64_reg_type reg_type)
342 {
343 const char *msg;
344
345 switch (reg_type)
346 {
347 case REG_TYPE_R_32:
348 msg = N_("integer 32-bit register expected");
349 break;
350 case REG_TYPE_R_64:
351 msg = N_("integer 64-bit register expected");
352 break;
353 case REG_TYPE_R_N:
354 msg = N_("integer register expected");
355 break;
356 case REG_TYPE_R64_SP:
357 msg = N_("64-bit integer or SP register expected");
358 break;
359 case REG_TYPE_R_Z:
360 msg = N_("integer or zero register expected");
361 break;
362 case REG_TYPE_R_SP:
363 msg = N_("integer or SP register expected");
364 break;
365 case REG_TYPE_R_Z_SP:
366 msg = N_("integer, zero or SP register expected");
367 break;
368 case REG_TYPE_FP_B:
369 msg = N_("8-bit SIMD scalar register expected");
370 break;
371 case REG_TYPE_FP_H:
372 msg = N_("16-bit SIMD scalar or floating-point half precision "
373 "register expected");
374 break;
375 case REG_TYPE_FP_S:
376 msg = N_("32-bit SIMD scalar or floating-point single precision "
377 "register expected");
378 break;
379 case REG_TYPE_FP_D:
380 msg = N_("64-bit SIMD scalar or floating-point double precision "
381 "register expected");
382 break;
383 case REG_TYPE_FP_Q:
384 msg = N_("128-bit SIMD scalar or floating-point quad precision "
385 "register expected");
386 break;
387 case REG_TYPE_CN:
388 msg = N_("C0 - C15 expected");
389 break;
390 case REG_TYPE_R_Z_BHSDQ_V:
391 msg = N_("register expected");
392 break;
393 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
394 msg = N_("SIMD scalar or floating-point register expected");
395 break;
396 case REG_TYPE_VN: /* any V reg */
397 msg = N_("vector register expected");
398 break;
399 case REG_TYPE_ZN:
400 msg = N_("SVE vector register expected");
401 break;
402 case REG_TYPE_PN:
403 msg = N_("SVE predicate register expected");
404 break;
405 default:
406 as_fatal (_("invalid register type %d"), reg_type);
407 }
408 return msg;
409 }
410
411 /* Some well known registers that we refer to directly elsewhere. */
412 #define REG_SP 31
413
414 /* Instructions take 4 bytes in the object file. */
415 #define INSN_SIZE 4
416
417 static struct hash_control *aarch64_ops_hsh;
418 static struct hash_control *aarch64_cond_hsh;
419 static struct hash_control *aarch64_shift_hsh;
420 static struct hash_control *aarch64_sys_regs_hsh;
421 static struct hash_control *aarch64_pstatefield_hsh;
422 static struct hash_control *aarch64_sys_regs_ic_hsh;
423 static struct hash_control *aarch64_sys_regs_dc_hsh;
424 static struct hash_control *aarch64_sys_regs_at_hsh;
425 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
426 static struct hash_control *aarch64_reg_hsh;
427 static struct hash_control *aarch64_barrier_opt_hsh;
428 static struct hash_control *aarch64_nzcv_hsh;
429 static struct hash_control *aarch64_pldop_hsh;
430 static struct hash_control *aarch64_hint_opt_hsh;
431
432 /* Stuff needed to resolve the label ambiguity
433 As:
434 ...
435 label: <insn>
436 may differ from:
437 ...
438 label:
439 <insn> */
440
441 static symbolS *last_label_seen;
442
443 /* Literal pool structure. Held on a per-section
444 and per-sub-section basis. */
445
446 #define MAX_LITERAL_POOL_SIZE 1024
447 typedef struct literal_expression
448 {
449 expressionS exp;
450 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
451 LITTLENUM_TYPE * bignum;
452 } literal_expression;
453
454 typedef struct literal_pool
455 {
456 literal_expression literals[MAX_LITERAL_POOL_SIZE];
457 unsigned int next_free_entry;
458 unsigned int id;
459 symbolS *symbol;
460 segT section;
461 subsegT sub_section;
462 int size;
463 struct literal_pool *next;
464 } literal_pool;
465
466 /* Pointer to a linked list of literal pools. */
467 static literal_pool *list_of_pools = NULL;
468 \f
469 /* Pure syntax. */
470
471 /* This array holds the chars that always start a comment. If the
472 pre-processor is disabled, these aren't very useful. */
473 const char comment_chars[] = "";
474
475 /* This array holds the chars that only start a comment at the beginning of
476 a line. If the line seems to have the form '# 123 filename'
477 .line and .file directives will appear in the pre-processed output. */
478 /* Note that input_file.c hand checks for '#' at the beginning of the
479 first line of the input file. This is because the compiler outputs
480 #NO_APP at the beginning of its output. */
481 /* Also note that comments like this one will always work. */
482 const char line_comment_chars[] = "#";
483
484 const char line_separator_chars[] = ";";
485
486 /* Chars that can be used to separate mant
487 from exp in floating point numbers. */
488 const char EXP_CHARS[] = "eE";
489
490 /* Chars that mean this number is a floating point constant. */
491 /* As in 0f12.456 */
492 /* or 0d1.2345e12 */
493
494 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
495
496 /* Prefix character that indicates the start of an immediate value. */
497 #define is_immediate_prefix(C) ((C) == '#')
498
499 /* Separator character handling. */
500
501 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
502
503 static inline bfd_boolean
504 skip_past_char (char **str, char c)
505 {
506 if (**str == c)
507 {
508 (*str)++;
509 return TRUE;
510 }
511 else
512 return FALSE;
513 }
514
515 #define skip_past_comma(str) skip_past_char (str, ',')
516
517 /* Arithmetic expressions (possibly involving symbols). */
518
519 static bfd_boolean in_my_get_expression_p = FALSE;
520
521 /* Third argument to my_get_expression. */
522 #define GE_NO_PREFIX 0
523 #define GE_OPT_PREFIX 1
524
525 /* Return TRUE if the string pointed by *STR is successfully parsed
526 as an valid expression; *EP will be filled with the information of
527 such an expression. Otherwise return FALSE. */
528
529 static bfd_boolean
530 my_get_expression (expressionS * ep, char **str, int prefix_mode,
531 int reject_absent)
532 {
533 char *save_in;
534 segT seg;
535 int prefix_present_p = 0;
536
537 switch (prefix_mode)
538 {
539 case GE_NO_PREFIX:
540 break;
541 case GE_OPT_PREFIX:
542 if (is_immediate_prefix (**str))
543 {
544 (*str)++;
545 prefix_present_p = 1;
546 }
547 break;
548 default:
549 abort ();
550 }
551
552 memset (ep, 0, sizeof (expressionS));
553
554 save_in = input_line_pointer;
555 input_line_pointer = *str;
556 in_my_get_expression_p = TRUE;
557 seg = expression (ep);
558 in_my_get_expression_p = FALSE;
559
560 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
561 {
562 /* We found a bad expression in md_operand(). */
563 *str = input_line_pointer;
564 input_line_pointer = save_in;
565 if (prefix_present_p && ! error_p ())
566 set_fatal_syntax_error (_("bad expression"));
567 else
568 set_first_syntax_error (_("bad expression"));
569 return FALSE;
570 }
571
572 #ifdef OBJ_AOUT
573 if (seg != absolute_section
574 && seg != text_section
575 && seg != data_section
576 && seg != bss_section && seg != undefined_section)
577 {
578 set_syntax_error (_("bad segment"));
579 *str = input_line_pointer;
580 input_line_pointer = save_in;
581 return FALSE;
582 }
583 #else
584 (void) seg;
585 #endif
586
587 *str = input_line_pointer;
588 input_line_pointer = save_in;
589 return TRUE;
590 }
591
592 /* Turn a string in input_line_pointer into a floating point constant
593 of type TYPE, and store the appropriate bytes in *LITP. The number
594 of LITTLENUMS emitted is stored in *SIZEP. An error message is
595 returned, or NULL on OK. */
596
597 const char *
598 md_atof (int type, char *litP, int *sizeP)
599 {
600 return ieee_md_atof (type, litP, sizeP, target_big_endian);
601 }
602
603 /* We handle all bad expressions here, so that we can report the faulty
604 instruction in the error message. */
605 void
606 md_operand (expressionS * exp)
607 {
608 if (in_my_get_expression_p)
609 exp->X_op = O_illegal;
610 }
611
612 /* Immediate values. */
613
614 /* Errors may be set multiple times during parsing or bit encoding
615 (particularly in the Neon bits), but usually the earliest error which is set
616 will be the most meaningful. Avoid overwriting it with later (cascading)
617 errors by calling this function. */
618
619 static void
620 first_error (const char *error)
621 {
622 if (! error_p ())
623 set_syntax_error (error);
624 }
625
626 /* Similiar to first_error, but this function accepts formatted error
627 message. */
628 static void
629 first_error_fmt (const char *format, ...)
630 {
631 va_list args;
632 enum
633 { size = 100 };
634 /* N.B. this single buffer will not cause error messages for different
635 instructions to pollute each other; this is because at the end of
636 processing of each assembly line, error message if any will be
637 collected by as_bad. */
638 static char buffer[size];
639
640 if (! error_p ())
641 {
642 int ret ATTRIBUTE_UNUSED;
643 va_start (args, format);
644 ret = vsnprintf (buffer, size, format, args);
645 know (ret <= size - 1 && ret >= 0);
646 va_end (args);
647 set_syntax_error (buffer);
648 }
649 }
650
651 /* Register parsing. */
652
653 /* Generic register parser which is called by other specialized
654 register parsers.
655 CCP points to what should be the beginning of a register name.
656 If it is indeed a valid register name, advance CCP over it and
657 return the reg_entry structure; otherwise return NULL.
658 It does not issue diagnostics. */
659
660 static reg_entry *
661 parse_reg (char **ccp)
662 {
663 char *start = *ccp;
664 char *p;
665 reg_entry *reg;
666
667 #ifdef REGISTER_PREFIX
668 if (*start != REGISTER_PREFIX)
669 return NULL;
670 start++;
671 #endif
672
673 p = start;
674 if (!ISALPHA (*p) || !is_name_beginner (*p))
675 return NULL;
676
677 do
678 p++;
679 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
680
681 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
682
683 if (!reg)
684 return NULL;
685
686 *ccp = p;
687 return reg;
688 }
689
690 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
691 return FALSE. */
692 static bfd_boolean
693 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
694 {
695 return (reg_type_masks[type] & (1 << reg->type)) != 0;
696 }
697
698 /* Try to parse a base or offset register. Return the register entry
699 on success, setting *QUALIFIER to the register qualifier. Return null
700 otherwise.
701
702 Note that this function does not issue any diagnostics. */
703
704 static const reg_entry *
705 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
706 {
707 char *str = *ccp;
708 const reg_entry *reg = parse_reg (&str);
709
710 if (reg == NULL)
711 return NULL;
712
713 switch (reg->type)
714 {
715 case REG_TYPE_R_32:
716 case REG_TYPE_SP_32:
717 case REG_TYPE_Z_32:
718 *qualifier = AARCH64_OPND_QLF_W;
719 break;
720
721 case REG_TYPE_R_64:
722 case REG_TYPE_SP_64:
723 case REG_TYPE_Z_64:
724 *qualifier = AARCH64_OPND_QLF_X;
725 break;
726
727 default:
728 return NULL;
729 }
730
731 *ccp = str;
732
733 return reg;
734 }
735
736 /* Parse the qualifier of a vector register or vector element of type
737 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
738 succeeds; otherwise return FALSE.
739
740 Accept only one occurrence of:
741 8b 16b 2h 4h 8h 2s 4s 1d 2d
742 b h s d q */
743 static bfd_boolean
744 parse_vector_type_for_operand (aarch64_reg_type reg_type,
745 struct vector_type_el *parsed_type, char **str)
746 {
747 char *ptr = *str;
748 unsigned width;
749 unsigned element_size;
750 enum vector_el_type type;
751
752 /* skip '.' */
753 ptr++;
754
755 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
756 {
757 width = 0;
758 goto elt_size;
759 }
760 width = strtoul (ptr, &ptr, 10);
761 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
762 {
763 first_error_fmt (_("bad size %d in vector width specifier"), width);
764 return FALSE;
765 }
766
767 elt_size:
768 switch (TOLOWER (*ptr))
769 {
770 case 'b':
771 type = NT_b;
772 element_size = 8;
773 break;
774 case 'h':
775 type = NT_h;
776 element_size = 16;
777 break;
778 case 's':
779 type = NT_s;
780 element_size = 32;
781 break;
782 case 'd':
783 type = NT_d;
784 element_size = 64;
785 break;
786 case 'q':
787 if (width == 1)
788 {
789 type = NT_q;
790 element_size = 128;
791 break;
792 }
793 /* fall through. */
794 default:
795 if (*ptr != '\0')
796 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
797 else
798 first_error (_("missing element size"));
799 return FALSE;
800 }
801 if (width != 0 && width * element_size != 64 && width * element_size != 128
802 && !(width == 2 && element_size == 16))
803 {
804 first_error_fmt (_
805 ("invalid element size %d and vector size combination %c"),
806 width, *ptr);
807 return FALSE;
808 }
809 ptr++;
810
811 parsed_type->type = type;
812 parsed_type->width = width;
813
814 *str = ptr;
815
816 return TRUE;
817 }
818
819 /* Parse a register of the type TYPE.
820
821 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
822 name or the parsed register is not of TYPE.
823
824 Otherwise return the register number, and optionally fill in the actual
825 type of the register in *RTYPE when multiple alternatives were given, and
826 return the register shape and element index information in *TYPEINFO.
827
828 IN_REG_LIST should be set with TRUE if the caller is parsing a register
829 list. */
830
831 static int
832 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
833 struct vector_type_el *typeinfo, bfd_boolean in_reg_list)
834 {
835 char *str = *ccp;
836 const reg_entry *reg = parse_reg (&str);
837 struct vector_type_el atype;
838 struct vector_type_el parsetype;
839 bfd_boolean is_typed_vecreg = FALSE;
840
841 atype.defined = 0;
842 atype.type = NT_invtype;
843 atype.width = -1;
844 atype.index = 0;
845
846 if (reg == NULL)
847 {
848 if (typeinfo)
849 *typeinfo = atype;
850 set_default_error ();
851 return PARSE_FAIL;
852 }
853
854 if (! aarch64_check_reg_type (reg, type))
855 {
856 DEBUG_TRACE ("reg type check failed");
857 set_default_error ();
858 return PARSE_FAIL;
859 }
860 type = reg->type;
861
862 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
863 && *str == '.')
864 {
865 if (!parse_vector_type_for_operand (type, &parsetype, &str))
866 return PARSE_FAIL;
867
868 /* Register if of the form Vn.[bhsdq]. */
869 is_typed_vecreg = TRUE;
870
871 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
872 {
873 /* The width is always variable; we don't allow an integer width
874 to be specified. */
875 gas_assert (parsetype.width == 0);
876 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
877 }
878 else if (parsetype.width == 0)
879 /* Expect index. In the new scheme we cannot have
880 Vn.[bhsdq] represent a scalar. Therefore any
881 Vn.[bhsdq] should have an index following it.
882 Except in reglists ofcourse. */
883 atype.defined |= NTA_HASINDEX;
884 else
885 atype.defined |= NTA_HASTYPE;
886
887 atype.type = parsetype.type;
888 atype.width = parsetype.width;
889 }
890
891 if (skip_past_char (&str, '['))
892 {
893 expressionS exp;
894
895 /* Reject Sn[index] syntax. */
896 if (!is_typed_vecreg)
897 {
898 first_error (_("this type of register can't be indexed"));
899 return PARSE_FAIL;
900 }
901
902 if (in_reg_list == TRUE)
903 {
904 first_error (_("index not allowed inside register list"));
905 return PARSE_FAIL;
906 }
907
908 atype.defined |= NTA_HASINDEX;
909
910 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
911
912 if (exp.X_op != O_constant)
913 {
914 first_error (_("constant expression required"));
915 return PARSE_FAIL;
916 }
917
918 if (! skip_past_char (&str, ']'))
919 return PARSE_FAIL;
920
921 atype.index = exp.X_add_number;
922 }
923 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
924 {
925 /* Indexed vector register expected. */
926 first_error (_("indexed vector register expected"));
927 return PARSE_FAIL;
928 }
929
930 /* A vector reg Vn should be typed or indexed. */
931 if (type == REG_TYPE_VN && atype.defined == 0)
932 {
933 first_error (_("invalid use of vector register"));
934 }
935
936 if (typeinfo)
937 *typeinfo = atype;
938
939 if (rtype)
940 *rtype = type;
941
942 *ccp = str;
943
944 return reg->number;
945 }
946
947 /* Parse register.
948
949 Return the register number on success; return PARSE_FAIL otherwise.
950
951 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
952 the register (e.g. NEON double or quad reg when either has been requested).
953
954 If this is a NEON vector register with additional type information, fill
955 in the struct pointed to by VECTYPE (if non-NULL).
956
957 This parser does not handle register list. */
958
959 static int
960 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
961 aarch64_reg_type *rtype, struct vector_type_el *vectype)
962 {
963 struct vector_type_el atype;
964 char *str = *ccp;
965 int reg = parse_typed_reg (&str, type, rtype, &atype,
966 /*in_reg_list= */ FALSE);
967
968 if (reg == PARSE_FAIL)
969 return PARSE_FAIL;
970
971 if (vectype)
972 *vectype = atype;
973
974 *ccp = str;
975
976 return reg;
977 }
978
979 static inline bfd_boolean
980 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
981 {
982 return
983 e1.type == e2.type
984 && e1.defined == e2.defined
985 && e1.width == e2.width && e1.index == e2.index;
986 }
987
988 /* This function parses a list of vector registers of type TYPE.
989 On success, it returns the parsed register list information in the
990 following encoded format:
991
992 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
993 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
994
995 The information of the register shape and/or index is returned in
996 *VECTYPE.
997
998 It returns PARSE_FAIL if the register list is invalid.
999
1000 The list contains one to four registers.
1001 Each register can be one of:
1002 <Vt>.<T>[<index>]
1003 <Vt>.<T>
1004 All <T> should be identical.
1005 All <index> should be identical.
1006 There are restrictions on <Vt> numbers which are checked later
1007 (by reg_list_valid_p). */
1008
1009 static int
1010 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1011 struct vector_type_el *vectype)
1012 {
1013 char *str = *ccp;
1014 int nb_regs;
1015 struct vector_type_el typeinfo, typeinfo_first;
1016 int val, val_range;
1017 int in_range;
1018 int ret_val;
1019 int i;
1020 bfd_boolean error = FALSE;
1021 bfd_boolean expect_index = FALSE;
1022
1023 if (*str != '{')
1024 {
1025 set_syntax_error (_("expecting {"));
1026 return PARSE_FAIL;
1027 }
1028 str++;
1029
1030 nb_regs = 0;
1031 typeinfo_first.defined = 0;
1032 typeinfo_first.type = NT_invtype;
1033 typeinfo_first.width = -1;
1034 typeinfo_first.index = 0;
1035 ret_val = 0;
1036 val = -1;
1037 val_range = -1;
1038 in_range = 0;
1039 do
1040 {
1041 if (in_range)
1042 {
1043 str++; /* skip over '-' */
1044 val_range = val;
1045 }
1046 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1047 /*in_reg_list= */ TRUE);
1048 if (val == PARSE_FAIL)
1049 {
1050 set_first_syntax_error (_("invalid vector register in list"));
1051 error = TRUE;
1052 continue;
1053 }
1054 /* reject [bhsd]n */
1055 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1056 {
1057 set_first_syntax_error (_("invalid scalar register in list"));
1058 error = TRUE;
1059 continue;
1060 }
1061
1062 if (typeinfo.defined & NTA_HASINDEX)
1063 expect_index = TRUE;
1064
1065 if (in_range)
1066 {
1067 if (val < val_range)
1068 {
1069 set_first_syntax_error
1070 (_("invalid range in vector register list"));
1071 error = TRUE;
1072 }
1073 val_range++;
1074 }
1075 else
1076 {
1077 val_range = val;
1078 if (nb_regs == 0)
1079 typeinfo_first = typeinfo;
1080 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1081 {
1082 set_first_syntax_error
1083 (_("type mismatch in vector register list"));
1084 error = TRUE;
1085 }
1086 }
1087 if (! error)
1088 for (i = val_range; i <= val; i++)
1089 {
1090 ret_val |= i << (5 * nb_regs);
1091 nb_regs++;
1092 }
1093 in_range = 0;
1094 }
1095 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1096
1097 skip_whitespace (str);
1098 if (*str != '}')
1099 {
1100 set_first_syntax_error (_("end of vector register list not found"));
1101 error = TRUE;
1102 }
1103 str++;
1104
1105 skip_whitespace (str);
1106
1107 if (expect_index)
1108 {
1109 if (skip_past_char (&str, '['))
1110 {
1111 expressionS exp;
1112
1113 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1114 if (exp.X_op != O_constant)
1115 {
1116 set_first_syntax_error (_("constant expression required."));
1117 error = TRUE;
1118 }
1119 if (! skip_past_char (&str, ']'))
1120 error = TRUE;
1121 else
1122 typeinfo_first.index = exp.X_add_number;
1123 }
1124 else
1125 {
1126 set_first_syntax_error (_("expected index"));
1127 error = TRUE;
1128 }
1129 }
1130
1131 if (nb_regs > 4)
1132 {
1133 set_first_syntax_error (_("too many registers in vector register list"));
1134 error = TRUE;
1135 }
1136 else if (nb_regs == 0)
1137 {
1138 set_first_syntax_error (_("empty vector register list"));
1139 error = TRUE;
1140 }
1141
1142 *ccp = str;
1143 if (! error)
1144 *vectype = typeinfo_first;
1145
1146 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1147 }
1148
1149 /* Directives: register aliases. */
1150
1151 static reg_entry *
1152 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1153 {
1154 reg_entry *new;
1155 const char *name;
1156
1157 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1158 {
1159 if (new->builtin)
1160 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1161 str);
1162
1163 /* Only warn about a redefinition if it's not defined as the
1164 same register. */
1165 else if (new->number != number || new->type != type)
1166 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1167
1168 return NULL;
1169 }
1170
1171 name = xstrdup (str);
1172 new = XNEW (reg_entry);
1173
1174 new->name = name;
1175 new->number = number;
1176 new->type = type;
1177 new->builtin = FALSE;
1178
1179 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1180 abort ();
1181
1182 return new;
1183 }
1184
1185 /* Look for the .req directive. This is of the form:
1186
1187 new_register_name .req existing_register_name
1188
1189 If we find one, or if it looks sufficiently like one that we want to
1190 handle any error here, return TRUE. Otherwise return FALSE. */
1191
1192 static bfd_boolean
1193 create_register_alias (char *newname, char *p)
1194 {
1195 const reg_entry *old;
1196 char *oldname, *nbuf;
1197 size_t nlen;
1198
1199 /* The input scrubber ensures that whitespace after the mnemonic is
1200 collapsed to single spaces. */
1201 oldname = p;
1202 if (strncmp (oldname, " .req ", 6) != 0)
1203 return FALSE;
1204
1205 oldname += 6;
1206 if (*oldname == '\0')
1207 return FALSE;
1208
1209 old = hash_find (aarch64_reg_hsh, oldname);
1210 if (!old)
1211 {
1212 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1213 return TRUE;
1214 }
1215
1216 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1217 the desired alias name, and p points to its end. If not, then
1218 the desired alias name is in the global original_case_string. */
1219 #ifdef TC_CASE_SENSITIVE
1220 nlen = p - newname;
1221 #else
1222 newname = original_case_string;
1223 nlen = strlen (newname);
1224 #endif
1225
1226 nbuf = xmemdup0 (newname, nlen);
1227
1228 /* Create aliases under the new name as stated; an all-lowercase
1229 version of the new name; and an all-uppercase version of the new
1230 name. */
1231 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1232 {
1233 for (p = nbuf; *p; p++)
1234 *p = TOUPPER (*p);
1235
1236 if (strncmp (nbuf, newname, nlen))
1237 {
1238 /* If this attempt to create an additional alias fails, do not bother
1239 trying to create the all-lower case alias. We will fail and issue
1240 a second, duplicate error message. This situation arises when the
1241 programmer does something like:
1242 foo .req r0
1243 Foo .req r1
1244 The second .req creates the "Foo" alias but then fails to create
1245 the artificial FOO alias because it has already been created by the
1246 first .req. */
1247 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1248 {
1249 free (nbuf);
1250 return TRUE;
1251 }
1252 }
1253
1254 for (p = nbuf; *p; p++)
1255 *p = TOLOWER (*p);
1256
1257 if (strncmp (nbuf, newname, nlen))
1258 insert_reg_alias (nbuf, old->number, old->type);
1259 }
1260
1261 free (nbuf);
1262 return TRUE;
1263 }
1264
1265 /* Should never be called, as .req goes between the alias and the
1266 register name, not at the beginning of the line. */
1267 static void
1268 s_req (int a ATTRIBUTE_UNUSED)
1269 {
1270 as_bad (_("invalid syntax for .req directive"));
1271 }
1272
1273 /* The .unreq directive deletes an alias which was previously defined
1274 by .req. For example:
1275
1276 my_alias .req r11
1277 .unreq my_alias */
1278
1279 static void
1280 s_unreq (int a ATTRIBUTE_UNUSED)
1281 {
1282 char *name;
1283 char saved_char;
1284
1285 name = input_line_pointer;
1286
1287 while (*input_line_pointer != 0
1288 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1289 ++input_line_pointer;
1290
1291 saved_char = *input_line_pointer;
1292 *input_line_pointer = 0;
1293
1294 if (!*name)
1295 as_bad (_("invalid syntax for .unreq directive"));
1296 else
1297 {
1298 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1299
1300 if (!reg)
1301 as_bad (_("unknown register alias '%s'"), name);
1302 else if (reg->builtin)
1303 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1304 name);
1305 else
1306 {
1307 char *p;
1308 char *nbuf;
1309
1310 hash_delete (aarch64_reg_hsh, name, FALSE);
1311 free ((char *) reg->name);
1312 free (reg);
1313
1314 /* Also locate the all upper case and all lower case versions.
1315 Do not complain if we cannot find one or the other as it
1316 was probably deleted above. */
1317
1318 nbuf = strdup (name);
1319 for (p = nbuf; *p; p++)
1320 *p = TOUPPER (*p);
1321 reg = hash_find (aarch64_reg_hsh, nbuf);
1322 if (reg)
1323 {
1324 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1325 free ((char *) reg->name);
1326 free (reg);
1327 }
1328
1329 for (p = nbuf; *p; p++)
1330 *p = TOLOWER (*p);
1331 reg = hash_find (aarch64_reg_hsh, nbuf);
1332 if (reg)
1333 {
1334 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1335 free ((char *) reg->name);
1336 free (reg);
1337 }
1338
1339 free (nbuf);
1340 }
1341 }
1342
1343 *input_line_pointer = saved_char;
1344 demand_empty_rest_of_line ();
1345 }
1346
1347 /* Directives: Instruction set selection. */
1348
1349 #ifdef OBJ_ELF
1350 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1351 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1352 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1353 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1354
1355 /* Create a new mapping symbol for the transition to STATE. */
1356
1357 static void
1358 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1359 {
1360 symbolS *symbolP;
1361 const char *symname;
1362 int type;
1363
1364 switch (state)
1365 {
1366 case MAP_DATA:
1367 symname = "$d";
1368 type = BSF_NO_FLAGS;
1369 break;
1370 case MAP_INSN:
1371 symname = "$x";
1372 type = BSF_NO_FLAGS;
1373 break;
1374 default:
1375 abort ();
1376 }
1377
1378 symbolP = symbol_new (symname, now_seg, value, frag);
1379 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1380
1381 /* Save the mapping symbols for future reference. Also check that
1382 we do not place two mapping symbols at the same offset within a
1383 frag. We'll handle overlap between frags in
1384 check_mapping_symbols.
1385
1386 If .fill or other data filling directive generates zero sized data,
1387 the mapping symbol for the following code will have the same value
1388 as the one generated for the data filling directive. In this case,
1389 we replace the old symbol with the new one at the same address. */
1390 if (value == 0)
1391 {
1392 if (frag->tc_frag_data.first_map != NULL)
1393 {
1394 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1395 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1396 &symbol_lastP);
1397 }
1398 frag->tc_frag_data.first_map = symbolP;
1399 }
1400 if (frag->tc_frag_data.last_map != NULL)
1401 {
1402 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1403 S_GET_VALUE (symbolP));
1404 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1405 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1406 &symbol_lastP);
1407 }
1408 frag->tc_frag_data.last_map = symbolP;
1409 }
1410
1411 /* We must sometimes convert a region marked as code to data during
1412 code alignment, if an odd number of bytes have to be padded. The
1413 code mapping symbol is pushed to an aligned address. */
1414
1415 static void
1416 insert_data_mapping_symbol (enum mstate state,
1417 valueT value, fragS * frag, offsetT bytes)
1418 {
1419 /* If there was already a mapping symbol, remove it. */
1420 if (frag->tc_frag_data.last_map != NULL
1421 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1422 frag->fr_address + value)
1423 {
1424 symbolS *symp = frag->tc_frag_data.last_map;
1425
1426 if (value == 0)
1427 {
1428 know (frag->tc_frag_data.first_map == symp);
1429 frag->tc_frag_data.first_map = NULL;
1430 }
1431 frag->tc_frag_data.last_map = NULL;
1432 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1433 }
1434
1435 make_mapping_symbol (MAP_DATA, value, frag);
1436 make_mapping_symbol (state, value + bytes, frag);
1437 }
1438
1439 static void mapping_state_2 (enum mstate state, int max_chars);
1440
1441 /* Set the mapping state to STATE. Only call this when about to
1442 emit some STATE bytes to the file. */
1443
1444 void
1445 mapping_state (enum mstate state)
1446 {
1447 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1448
1449 if (state == MAP_INSN)
1450 /* AArch64 instructions require 4-byte alignment. When emitting
1451 instructions into any section, record the appropriate section
1452 alignment. */
1453 record_alignment (now_seg, 2);
1454
1455 if (mapstate == state)
1456 /* The mapping symbol has already been emitted.
1457 There is nothing else to do. */
1458 return;
1459
1460 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1461 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1462 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1463 evaluated later in the next else. */
1464 return;
1465 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1466 {
1467 /* Only add the symbol if the offset is > 0:
1468 if we're at the first frag, check it's size > 0;
1469 if we're not at the first frag, then for sure
1470 the offset is > 0. */
1471 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1472 const int add_symbol = (frag_now != frag_first)
1473 || (frag_now_fix () > 0);
1474
1475 if (add_symbol)
1476 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1477 }
1478 #undef TRANSITION
1479
1480 mapping_state_2 (state, 0);
1481 }
1482
1483 /* Same as mapping_state, but MAX_CHARS bytes have already been
1484 allocated. Put the mapping symbol that far back. */
1485
1486 static void
1487 mapping_state_2 (enum mstate state, int max_chars)
1488 {
1489 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1490
1491 if (!SEG_NORMAL (now_seg))
1492 return;
1493
1494 if (mapstate == state)
1495 /* The mapping symbol has already been emitted.
1496 There is nothing else to do. */
1497 return;
1498
1499 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1500 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1501 }
1502 #else
1503 #define mapping_state(x) /* nothing */
1504 #define mapping_state_2(x, y) /* nothing */
1505 #endif
1506
1507 /* Directives: sectioning and alignment. */
1508
1509 static void
1510 s_bss (int ignore ATTRIBUTE_UNUSED)
1511 {
1512 /* We don't support putting frags in the BSS segment, we fake it by
1513 marking in_bss, then looking at s_skip for clues. */
1514 subseg_set (bss_section, 0);
1515 demand_empty_rest_of_line ();
1516 mapping_state (MAP_DATA);
1517 }
1518
1519 static void
1520 s_even (int ignore ATTRIBUTE_UNUSED)
1521 {
1522 /* Never make frag if expect extra pass. */
1523 if (!need_pass_2)
1524 frag_align (1, 0, 0);
1525
1526 record_alignment (now_seg, 1);
1527
1528 demand_empty_rest_of_line ();
1529 }
1530
1531 /* Directives: Literal pools. */
1532
1533 static literal_pool *
1534 find_literal_pool (int size)
1535 {
1536 literal_pool *pool;
1537
1538 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1539 {
1540 if (pool->section == now_seg
1541 && pool->sub_section == now_subseg && pool->size == size)
1542 break;
1543 }
1544
1545 return pool;
1546 }
1547
1548 static literal_pool *
1549 find_or_make_literal_pool (int size)
1550 {
1551 /* Next literal pool ID number. */
1552 static unsigned int latest_pool_num = 1;
1553 literal_pool *pool;
1554
1555 pool = find_literal_pool (size);
1556
1557 if (pool == NULL)
1558 {
1559 /* Create a new pool. */
1560 pool = XNEW (literal_pool);
1561 if (!pool)
1562 return NULL;
1563
1564 /* Currently we always put the literal pool in the current text
1565 section. If we were generating "small" model code where we
1566 knew that all code and initialised data was within 1MB then
1567 we could output literals to mergeable, read-only data
1568 sections. */
1569
1570 pool->next_free_entry = 0;
1571 pool->section = now_seg;
1572 pool->sub_section = now_subseg;
1573 pool->size = size;
1574 pool->next = list_of_pools;
1575 pool->symbol = NULL;
1576
1577 /* Add it to the list. */
1578 list_of_pools = pool;
1579 }
1580
1581 /* New pools, and emptied pools, will have a NULL symbol. */
1582 if (pool->symbol == NULL)
1583 {
1584 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1585 (valueT) 0, &zero_address_frag);
1586 pool->id = latest_pool_num++;
1587 }
1588
1589 /* Done. */
1590 return pool;
1591 }
1592
1593 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1594 Return TRUE on success, otherwise return FALSE. */
1595 static bfd_boolean
1596 add_to_lit_pool (expressionS *exp, int size)
1597 {
1598 literal_pool *pool;
1599 unsigned int entry;
1600
1601 pool = find_or_make_literal_pool (size);
1602
1603 /* Check if this literal value is already in the pool. */
1604 for (entry = 0; entry < pool->next_free_entry; entry++)
1605 {
1606 expressionS * litexp = & pool->literals[entry].exp;
1607
1608 if ((litexp->X_op == exp->X_op)
1609 && (exp->X_op == O_constant)
1610 && (litexp->X_add_number == exp->X_add_number)
1611 && (litexp->X_unsigned == exp->X_unsigned))
1612 break;
1613
1614 if ((litexp->X_op == exp->X_op)
1615 && (exp->X_op == O_symbol)
1616 && (litexp->X_add_number == exp->X_add_number)
1617 && (litexp->X_add_symbol == exp->X_add_symbol)
1618 && (litexp->X_op_symbol == exp->X_op_symbol))
1619 break;
1620 }
1621
1622 /* Do we need to create a new entry? */
1623 if (entry == pool->next_free_entry)
1624 {
1625 if (entry >= MAX_LITERAL_POOL_SIZE)
1626 {
1627 set_syntax_error (_("literal pool overflow"));
1628 return FALSE;
1629 }
1630
1631 pool->literals[entry].exp = *exp;
1632 pool->next_free_entry += 1;
1633 if (exp->X_op == O_big)
1634 {
1635 /* PR 16688: Bignums are held in a single global array. We must
1636 copy and preserve that value now, before it is overwritten. */
1637 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1638 exp->X_add_number);
1639 memcpy (pool->literals[entry].bignum, generic_bignum,
1640 CHARS_PER_LITTLENUM * exp->X_add_number);
1641 }
1642 else
1643 pool->literals[entry].bignum = NULL;
1644 }
1645
1646 exp->X_op = O_symbol;
1647 exp->X_add_number = ((int) entry) * size;
1648 exp->X_add_symbol = pool->symbol;
1649
1650 return TRUE;
1651 }
1652
1653 /* Can't use symbol_new here, so have to create a symbol and then at
1654 a later date assign it a value. Thats what these functions do. */
1655
1656 static void
1657 symbol_locate (symbolS * symbolP,
1658 const char *name,/* It is copied, the caller can modify. */
1659 segT segment, /* Segment identifier (SEG_<something>). */
1660 valueT valu, /* Symbol value. */
1661 fragS * frag) /* Associated fragment. */
1662 {
1663 size_t name_length;
1664 char *preserved_copy_of_name;
1665
1666 name_length = strlen (name) + 1; /* +1 for \0. */
1667 obstack_grow (&notes, name, name_length);
1668 preserved_copy_of_name = obstack_finish (&notes);
1669
1670 #ifdef tc_canonicalize_symbol_name
1671 preserved_copy_of_name =
1672 tc_canonicalize_symbol_name (preserved_copy_of_name);
1673 #endif
1674
1675 S_SET_NAME (symbolP, preserved_copy_of_name);
1676
1677 S_SET_SEGMENT (symbolP, segment);
1678 S_SET_VALUE (symbolP, valu);
1679 symbol_clear_list_pointers (symbolP);
1680
1681 symbol_set_frag (symbolP, frag);
1682
1683 /* Link to end of symbol chain. */
1684 {
1685 extern int symbol_table_frozen;
1686
1687 if (symbol_table_frozen)
1688 abort ();
1689 }
1690
1691 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1692
1693 obj_symbol_new_hook (symbolP);
1694
1695 #ifdef tc_symbol_new_hook
1696 tc_symbol_new_hook (symbolP);
1697 #endif
1698
1699 #ifdef DEBUG_SYMS
1700 verify_symbol_chain (symbol_rootP, symbol_lastP);
1701 #endif /* DEBUG_SYMS */
1702 }
1703
1704
1705 static void
1706 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1707 {
1708 unsigned int entry;
1709 literal_pool *pool;
1710 char sym_name[20];
1711 int align;
1712
1713 for (align = 2; align <= 4; align++)
1714 {
1715 int size = 1 << align;
1716
1717 pool = find_literal_pool (size);
1718 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1719 continue;
1720
1721 /* Align pool as you have word accesses.
1722 Only make a frag if we have to. */
1723 if (!need_pass_2)
1724 frag_align (align, 0, 0);
1725
1726 mapping_state (MAP_DATA);
1727
1728 record_alignment (now_seg, align);
1729
1730 sprintf (sym_name, "$$lit_\002%x", pool->id);
1731
1732 symbol_locate (pool->symbol, sym_name, now_seg,
1733 (valueT) frag_now_fix (), frag_now);
1734 symbol_table_insert (pool->symbol);
1735
1736 for (entry = 0; entry < pool->next_free_entry; entry++)
1737 {
1738 expressionS * exp = & pool->literals[entry].exp;
1739
1740 if (exp->X_op == O_big)
1741 {
1742 /* PR 16688: Restore the global bignum value. */
1743 gas_assert (pool->literals[entry].bignum != NULL);
1744 memcpy (generic_bignum, pool->literals[entry].bignum,
1745 CHARS_PER_LITTLENUM * exp->X_add_number);
1746 }
1747
1748 /* First output the expression in the instruction to the pool. */
1749 emit_expr (exp, size); /* .word|.xword */
1750
1751 if (exp->X_op == O_big)
1752 {
1753 free (pool->literals[entry].bignum);
1754 pool->literals[entry].bignum = NULL;
1755 }
1756 }
1757
1758 /* Mark the pool as empty. */
1759 pool->next_free_entry = 0;
1760 pool->symbol = NULL;
1761 }
1762 }
1763
1764 #ifdef OBJ_ELF
1765 /* Forward declarations for functions below, in the MD interface
1766 section. */
1767 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1768 static struct reloc_table_entry * find_reloc_table_entry (char **);
1769
1770 /* Directives: Data. */
1771 /* N.B. the support for relocation suffix in this directive needs to be
1772 implemented properly. */
1773
1774 static void
1775 s_aarch64_elf_cons (int nbytes)
1776 {
1777 expressionS exp;
1778
1779 #ifdef md_flush_pending_output
1780 md_flush_pending_output ();
1781 #endif
1782
1783 if (is_it_end_of_statement ())
1784 {
1785 demand_empty_rest_of_line ();
1786 return;
1787 }
1788
1789 #ifdef md_cons_align
1790 md_cons_align (nbytes);
1791 #endif
1792
1793 mapping_state (MAP_DATA);
1794 do
1795 {
1796 struct reloc_table_entry *reloc;
1797
1798 expression (&exp);
1799
1800 if (exp.X_op != O_symbol)
1801 emit_expr (&exp, (unsigned int) nbytes);
1802 else
1803 {
1804 skip_past_char (&input_line_pointer, '#');
1805 if (skip_past_char (&input_line_pointer, ':'))
1806 {
1807 reloc = find_reloc_table_entry (&input_line_pointer);
1808 if (reloc == NULL)
1809 as_bad (_("unrecognized relocation suffix"));
1810 else
1811 as_bad (_("unimplemented relocation suffix"));
1812 ignore_rest_of_line ();
1813 return;
1814 }
1815 else
1816 emit_expr (&exp, (unsigned int) nbytes);
1817 }
1818 }
1819 while (*input_line_pointer++ == ',');
1820
1821 /* Put terminator back into stream. */
1822 input_line_pointer--;
1823 demand_empty_rest_of_line ();
1824 }
1825
1826 #endif /* OBJ_ELF */
1827
1828 /* Output a 32-bit word, but mark as an instruction. */
1829
1830 static void
1831 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1832 {
1833 expressionS exp;
1834
1835 #ifdef md_flush_pending_output
1836 md_flush_pending_output ();
1837 #endif
1838
1839 if (is_it_end_of_statement ())
1840 {
1841 demand_empty_rest_of_line ();
1842 return;
1843 }
1844
1845 /* Sections are assumed to start aligned. In executable section, there is no
1846 MAP_DATA symbol pending. So we only align the address during
1847 MAP_DATA --> MAP_INSN transition.
1848 For other sections, this is not guaranteed. */
1849 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1850 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
1851 frag_align_code (2, 0);
1852
1853 #ifdef OBJ_ELF
1854 mapping_state (MAP_INSN);
1855 #endif
1856
1857 do
1858 {
1859 expression (&exp);
1860 if (exp.X_op != O_constant)
1861 {
1862 as_bad (_("constant expression required"));
1863 ignore_rest_of_line ();
1864 return;
1865 }
1866
1867 if (target_big_endian)
1868 {
1869 unsigned int val = exp.X_add_number;
1870 exp.X_add_number = SWAP_32 (val);
1871 }
1872 emit_expr (&exp, 4);
1873 }
1874 while (*input_line_pointer++ == ',');
1875
1876 /* Put terminator back into stream. */
1877 input_line_pointer--;
1878 demand_empty_rest_of_line ();
1879 }
1880
1881 #ifdef OBJ_ELF
1882 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
1883
1884 static void
1885 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
1886 {
1887 expressionS exp;
1888
1889 expression (&exp);
1890 frag_grow (4);
1891 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1892 BFD_RELOC_AARCH64_TLSDESC_ADD);
1893
1894 demand_empty_rest_of_line ();
1895 }
1896
1897 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
1898
1899 static void
1900 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
1901 {
1902 expressionS exp;
1903
1904 /* Since we're just labelling the code, there's no need to define a
1905 mapping symbol. */
1906 expression (&exp);
1907 /* Make sure there is enough room in this frag for the following
1908 blr. This trick only works if the blr follows immediately after
1909 the .tlsdesc directive. */
1910 frag_grow (4);
1911 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1912 BFD_RELOC_AARCH64_TLSDESC_CALL);
1913
1914 demand_empty_rest_of_line ();
1915 }
1916
1917 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
1918
1919 static void
1920 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
1921 {
1922 expressionS exp;
1923
1924 expression (&exp);
1925 frag_grow (4);
1926 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1927 BFD_RELOC_AARCH64_TLSDESC_LDR);
1928
1929 demand_empty_rest_of_line ();
1930 }
1931 #endif /* OBJ_ELF */
1932
1933 static void s_aarch64_arch (int);
1934 static void s_aarch64_cpu (int);
1935 static void s_aarch64_arch_extension (int);
1936
1937 /* This table describes all the machine specific pseudo-ops the assembler
1938 has to support. The fields are:
1939 pseudo-op name without dot
1940 function to call to execute this pseudo-op
1941 Integer arg to pass to the function. */
1942
1943 const pseudo_typeS md_pseudo_table[] = {
1944 /* Never called because '.req' does not start a line. */
1945 {"req", s_req, 0},
1946 {"unreq", s_unreq, 0},
1947 {"bss", s_bss, 0},
1948 {"even", s_even, 0},
1949 {"ltorg", s_ltorg, 0},
1950 {"pool", s_ltorg, 0},
1951 {"cpu", s_aarch64_cpu, 0},
1952 {"arch", s_aarch64_arch, 0},
1953 {"arch_extension", s_aarch64_arch_extension, 0},
1954 {"inst", s_aarch64_inst, 0},
1955 #ifdef OBJ_ELF
1956 {"tlsdescadd", s_tlsdescadd, 0},
1957 {"tlsdesccall", s_tlsdesccall, 0},
1958 {"tlsdescldr", s_tlsdescldr, 0},
1959 {"word", s_aarch64_elf_cons, 4},
1960 {"long", s_aarch64_elf_cons, 4},
1961 {"xword", s_aarch64_elf_cons, 8},
1962 {"dword", s_aarch64_elf_cons, 8},
1963 #endif
1964 {0, 0, 0}
1965 };
1966 \f
1967
1968 /* Check whether STR points to a register name followed by a comma or the
1969 end of line; REG_TYPE indicates which register types are checked
1970 against. Return TRUE if STR is such a register name; otherwise return
1971 FALSE. The function does not intend to produce any diagnostics, but since
1972 the register parser aarch64_reg_parse, which is called by this function,
1973 does produce diagnostics, we call clear_error to clear any diagnostics
1974 that may be generated by aarch64_reg_parse.
1975 Also, the function returns FALSE directly if there is any user error
1976 present at the function entry. This prevents the existing diagnostics
1977 state from being spoiled.
1978 The function currently serves parse_constant_immediate and
1979 parse_big_immediate only. */
1980 static bfd_boolean
1981 reg_name_p (char *str, aarch64_reg_type reg_type)
1982 {
1983 int reg;
1984
1985 /* Prevent the diagnostics state from being spoiled. */
1986 if (error_p ())
1987 return FALSE;
1988
1989 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
1990
1991 /* Clear the parsing error that may be set by the reg parser. */
1992 clear_error ();
1993
1994 if (reg == PARSE_FAIL)
1995 return FALSE;
1996
1997 skip_whitespace (str);
1998 if (*str == ',' || is_end_of_line[(unsigned int) *str])
1999 return TRUE;
2000
2001 return FALSE;
2002 }
2003
2004 /* Parser functions used exclusively in instruction operands. */
2005
2006 /* Parse an immediate expression which may not be constant.
2007
2008 To prevent the expression parser from pushing a register name
2009 into the symbol table as an undefined symbol, firstly a check is
2010 done to find out whether STR is a register of type REG_TYPE followed
2011 by a comma or the end of line. Return FALSE if STR is such a string. */
2012
2013 static bfd_boolean
2014 parse_immediate_expression (char **str, expressionS *exp,
2015 aarch64_reg_type reg_type)
2016 {
2017 if (reg_name_p (*str, reg_type))
2018 {
2019 set_recoverable_error (_("immediate operand required"));
2020 return FALSE;
2021 }
2022
2023 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2024
2025 if (exp->X_op == O_absent)
2026 {
2027 set_fatal_syntax_error (_("missing immediate expression"));
2028 return FALSE;
2029 }
2030
2031 return TRUE;
2032 }
2033
2034 /* Constant immediate-value read function for use in insn parsing.
2035 STR points to the beginning of the immediate (with the optional
2036 leading #); *VAL receives the value. REG_TYPE says which register
2037 names should be treated as registers rather than as symbolic immediates.
2038
2039 Return TRUE on success; otherwise return FALSE. */
2040
2041 static bfd_boolean
2042 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2043 {
2044 expressionS exp;
2045
2046 if (! parse_immediate_expression (str, &exp, reg_type))
2047 return FALSE;
2048
2049 if (exp.X_op != O_constant)
2050 {
2051 set_syntax_error (_("constant expression required"));
2052 return FALSE;
2053 }
2054
2055 *val = exp.X_add_number;
2056 return TRUE;
2057 }
2058
2059 static uint32_t
2060 encode_imm_float_bits (uint32_t imm)
2061 {
2062 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2063 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2064 }
2065
2066 /* Return TRUE if the single-precision floating-point value encoded in IMM
2067 can be expressed in the AArch64 8-bit signed floating-point format with
2068 3-bit exponent and normalized 4 bits of precision; in other words, the
2069 floating-point value must be expressable as
2070 (+/-) n / 16 * power (2, r)
2071 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2072
2073 static bfd_boolean
2074 aarch64_imm_float_p (uint32_t imm)
2075 {
2076 /* If a single-precision floating-point value has the following bit
2077 pattern, it can be expressed in the AArch64 8-bit floating-point
2078 format:
2079
2080 3 32222222 2221111111111
2081 1 09876543 21098765432109876543210
2082 n Eeeeeexx xxxx0000000000000000000
2083
2084 where n, e and each x are either 0 or 1 independently, with
2085 E == ~ e. */
2086
2087 uint32_t pattern;
2088
2089 /* Prepare the pattern for 'Eeeeee'. */
2090 if (((imm >> 30) & 0x1) == 0)
2091 pattern = 0x3e000000;
2092 else
2093 pattern = 0x40000000;
2094
2095 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2096 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2097 }
2098
2099 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2100 as an IEEE float without any loss of precision. Store the value in
2101 *FPWORD if so. */
2102
2103 static bfd_boolean
2104 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2105 {
2106 /* If a double-precision floating-point value has the following bit
2107 pattern, it can be expressed in a float:
2108
2109 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2110 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2111 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2112
2113 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2114 if Eeee_eeee != 1111_1111
2115
2116 where n, e, s and S are either 0 or 1 independently and where ~ is the
2117 inverse of E. */
2118
2119 uint32_t pattern;
2120 uint32_t high32 = imm >> 32;
2121 uint32_t low32 = imm;
2122
2123 /* Lower 29 bits need to be 0s. */
2124 if ((imm & 0x1fffffff) != 0)
2125 return FALSE;
2126
2127 /* Prepare the pattern for 'Eeeeeeeee'. */
2128 if (((high32 >> 30) & 0x1) == 0)
2129 pattern = 0x38000000;
2130 else
2131 pattern = 0x40000000;
2132
2133 /* Check E~~~. */
2134 if ((high32 & 0x78000000) != pattern)
2135 return FALSE;
2136
2137 /* Check Eeee_eeee != 1111_1111. */
2138 if ((high32 & 0x7ff00000) == 0x47f00000)
2139 return FALSE;
2140
2141 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2142 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2143 | (low32 >> 29)); /* 3 S bits. */
2144 return TRUE;
2145 }
2146
2147 /* Parse a floating-point immediate. Return TRUE on success and return the
2148 value in *IMMED in the format of IEEE754 single-precision encoding.
2149 *CCP points to the start of the string; DP_P is TRUE when the immediate
2150 is expected to be in double-precision (N.B. this only matters when
2151 hexadecimal representation is involved). REG_TYPE says which register
2152 names should be treated as registers rather than as symbolic immediates.
2153
2154 This routine accepts any IEEE float; it is up to the callers to reject
2155 invalid ones. */
2156
2157 static bfd_boolean
2158 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p,
2159 aarch64_reg_type reg_type)
2160 {
2161 char *str = *ccp;
2162 char *fpnum;
2163 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2164 int found_fpchar = 0;
2165 int64_t val = 0;
2166 unsigned fpword = 0;
2167 bfd_boolean hex_p = FALSE;
2168
2169 skip_past_char (&str, '#');
2170
2171 fpnum = str;
2172 skip_whitespace (fpnum);
2173
2174 if (strncmp (fpnum, "0x", 2) == 0)
2175 {
2176 /* Support the hexadecimal representation of the IEEE754 encoding.
2177 Double-precision is expected when DP_P is TRUE, otherwise the
2178 representation should be in single-precision. */
2179 if (! parse_constant_immediate (&str, &val, reg_type))
2180 goto invalid_fp;
2181
2182 if (dp_p)
2183 {
2184 if (!can_convert_double_to_float (val, &fpword))
2185 goto invalid_fp;
2186 }
2187 else if ((uint64_t) val > 0xffffffff)
2188 goto invalid_fp;
2189 else
2190 fpword = val;
2191
2192 hex_p = TRUE;
2193 }
2194 else
2195 {
2196 if (reg_name_p (str, reg_type))
2197 {
2198 set_recoverable_error (_("immediate operand required"));
2199 return FALSE;
2200 }
2201
2202 /* We must not accidentally parse an integer as a floating-point number.
2203 Make sure that the value we parse is not an integer by checking for
2204 special characters '.' or 'e'. */
2205 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
2206 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
2207 {
2208 found_fpchar = 1;
2209 break;
2210 }
2211
2212 if (!found_fpchar)
2213 return FALSE;
2214 }
2215
2216 if (! hex_p)
2217 {
2218 int i;
2219
2220 if ((str = atof_ieee (str, 's', words)) == NULL)
2221 goto invalid_fp;
2222
2223 /* Our FP word must be 32 bits (single-precision FP). */
2224 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2225 {
2226 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2227 fpword |= words[i];
2228 }
2229 }
2230
2231 *immed = fpword;
2232 *ccp = str;
2233 return TRUE;
2234
2235 invalid_fp:
2236 set_fatal_syntax_error (_("invalid floating-point constant"));
2237 return FALSE;
2238 }
2239
2240 /* Less-generic immediate-value read function with the possibility of loading
2241 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2242 instructions.
2243
2244 To prevent the expression parser from pushing a register name into the
2245 symbol table as an undefined symbol, a check is firstly done to find
2246 out whether STR is a register of type REG_TYPE followed by a comma or
2247 the end of line. Return FALSE if STR is such a register. */
2248
2249 static bfd_boolean
2250 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2251 {
2252 char *ptr = *str;
2253
2254 if (reg_name_p (ptr, reg_type))
2255 {
2256 set_syntax_error (_("immediate operand required"));
2257 return FALSE;
2258 }
2259
2260 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2261
2262 if (inst.reloc.exp.X_op == O_constant)
2263 *imm = inst.reloc.exp.X_add_number;
2264
2265 *str = ptr;
2266
2267 return TRUE;
2268 }
2269
2270 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2271 if NEED_LIBOPCODES is non-zero, the fixup will need
2272 assistance from the libopcodes. */
2273
2274 static inline void
2275 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2276 const aarch64_opnd_info *operand,
2277 int need_libopcodes_p)
2278 {
2279 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2280 reloc->opnd = operand->type;
2281 if (need_libopcodes_p)
2282 reloc->need_libopcodes_p = 1;
2283 };
2284
2285 /* Return TRUE if the instruction needs to be fixed up later internally by
2286 the GAS; otherwise return FALSE. */
2287
2288 static inline bfd_boolean
2289 aarch64_gas_internal_fixup_p (void)
2290 {
2291 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2292 }
2293
2294 /* Assign the immediate value to the relavant field in *OPERAND if
2295 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2296 needs an internal fixup in a later stage.
2297 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2298 IMM.VALUE that may get assigned with the constant. */
2299 static inline void
2300 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2301 aarch64_opnd_info *operand,
2302 int addr_off_p,
2303 int need_libopcodes_p,
2304 int skip_p)
2305 {
2306 if (reloc->exp.X_op == O_constant)
2307 {
2308 if (addr_off_p)
2309 operand->addr.offset.imm = reloc->exp.X_add_number;
2310 else
2311 operand->imm.value = reloc->exp.X_add_number;
2312 reloc->type = BFD_RELOC_UNUSED;
2313 }
2314 else
2315 {
2316 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2317 /* Tell libopcodes to ignore this operand or not. This is helpful
2318 when one of the operands needs to be fixed up later but we need
2319 libopcodes to check the other operands. */
2320 operand->skip = skip_p;
2321 }
2322 }
2323
2324 /* Relocation modifiers. Each entry in the table contains the textual
2325 name for the relocation which may be placed before a symbol used as
2326 a load/store offset, or add immediate. It must be surrounded by a
2327 leading and trailing colon, for example:
2328
2329 ldr x0, [x1, #:rello:varsym]
2330 add x0, x1, #:rello:varsym */
2331
2332 struct reloc_table_entry
2333 {
2334 const char *name;
2335 int pc_rel;
2336 bfd_reloc_code_real_type adr_type;
2337 bfd_reloc_code_real_type adrp_type;
2338 bfd_reloc_code_real_type movw_type;
2339 bfd_reloc_code_real_type add_type;
2340 bfd_reloc_code_real_type ldst_type;
2341 bfd_reloc_code_real_type ld_literal_type;
2342 };
2343
2344 static struct reloc_table_entry reloc_table[] = {
2345 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2346 {"lo12", 0,
2347 0, /* adr_type */
2348 0,
2349 0,
2350 BFD_RELOC_AARCH64_ADD_LO12,
2351 BFD_RELOC_AARCH64_LDST_LO12,
2352 0},
2353
2354 /* Higher 21 bits of pc-relative page offset: ADRP */
2355 {"pg_hi21", 1,
2356 0, /* adr_type */
2357 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2358 0,
2359 0,
2360 0,
2361 0},
2362
2363 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2364 {"pg_hi21_nc", 1,
2365 0, /* adr_type */
2366 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2367 0,
2368 0,
2369 0,
2370 0},
2371
2372 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2373 {"abs_g0", 0,
2374 0, /* adr_type */
2375 0,
2376 BFD_RELOC_AARCH64_MOVW_G0,
2377 0,
2378 0,
2379 0},
2380
2381 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2382 {"abs_g0_s", 0,
2383 0, /* adr_type */
2384 0,
2385 BFD_RELOC_AARCH64_MOVW_G0_S,
2386 0,
2387 0,
2388 0},
2389
2390 /* Less significant bits 0-15 of address/value: MOVK, no check */
2391 {"abs_g0_nc", 0,
2392 0, /* adr_type */
2393 0,
2394 BFD_RELOC_AARCH64_MOVW_G0_NC,
2395 0,
2396 0,
2397 0},
2398
2399 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2400 {"abs_g1", 0,
2401 0, /* adr_type */
2402 0,
2403 BFD_RELOC_AARCH64_MOVW_G1,
2404 0,
2405 0,
2406 0},
2407
2408 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2409 {"abs_g1_s", 0,
2410 0, /* adr_type */
2411 0,
2412 BFD_RELOC_AARCH64_MOVW_G1_S,
2413 0,
2414 0,
2415 0},
2416
2417 /* Less significant bits 16-31 of address/value: MOVK, no check */
2418 {"abs_g1_nc", 0,
2419 0, /* adr_type */
2420 0,
2421 BFD_RELOC_AARCH64_MOVW_G1_NC,
2422 0,
2423 0,
2424 0},
2425
2426 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2427 {"abs_g2", 0,
2428 0, /* adr_type */
2429 0,
2430 BFD_RELOC_AARCH64_MOVW_G2,
2431 0,
2432 0,
2433 0},
2434
2435 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2436 {"abs_g2_s", 0,
2437 0, /* adr_type */
2438 0,
2439 BFD_RELOC_AARCH64_MOVW_G2_S,
2440 0,
2441 0,
2442 0},
2443
2444 /* Less significant bits 32-47 of address/value: MOVK, no check */
2445 {"abs_g2_nc", 0,
2446 0, /* adr_type */
2447 0,
2448 BFD_RELOC_AARCH64_MOVW_G2_NC,
2449 0,
2450 0,
2451 0},
2452
2453 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2454 {"abs_g3", 0,
2455 0, /* adr_type */
2456 0,
2457 BFD_RELOC_AARCH64_MOVW_G3,
2458 0,
2459 0,
2460 0},
2461
2462 /* Get to the page containing GOT entry for a symbol. */
2463 {"got", 1,
2464 0, /* adr_type */
2465 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2466 0,
2467 0,
2468 0,
2469 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2470
2471 /* 12 bit offset into the page containing GOT entry for that symbol. */
2472 {"got_lo12", 0,
2473 0, /* adr_type */
2474 0,
2475 0,
2476 0,
2477 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2478 0},
2479
2480 /* 0-15 bits of address/value: MOVk, no check. */
2481 {"gotoff_g0_nc", 0,
2482 0, /* adr_type */
2483 0,
2484 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2485 0,
2486 0,
2487 0},
2488
2489 /* Most significant bits 16-31 of address/value: MOVZ. */
2490 {"gotoff_g1", 0,
2491 0, /* adr_type */
2492 0,
2493 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2494 0,
2495 0,
2496 0},
2497
2498 /* 15 bit offset into the page containing GOT entry for that symbol. */
2499 {"gotoff_lo15", 0,
2500 0, /* adr_type */
2501 0,
2502 0,
2503 0,
2504 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2505 0},
2506
2507 /* Get to the page containing GOT TLS entry for a symbol */
2508 {"gottprel_g0_nc", 0,
2509 0, /* adr_type */
2510 0,
2511 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2512 0,
2513 0,
2514 0},
2515
2516 /* Get to the page containing GOT TLS entry for a symbol */
2517 {"gottprel_g1", 0,
2518 0, /* adr_type */
2519 0,
2520 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2521 0,
2522 0,
2523 0},
2524
2525 /* Get to the page containing GOT TLS entry for a symbol */
2526 {"tlsgd", 0,
2527 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2528 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2529 0,
2530 0,
2531 0,
2532 0},
2533
2534 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2535 {"tlsgd_lo12", 0,
2536 0, /* adr_type */
2537 0,
2538 0,
2539 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2540 0,
2541 0},
2542
2543 /* Lower 16 bits address/value: MOVk. */
2544 {"tlsgd_g0_nc", 0,
2545 0, /* adr_type */
2546 0,
2547 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2548 0,
2549 0,
2550 0},
2551
2552 /* Most significant bits 16-31 of address/value: MOVZ. */
2553 {"tlsgd_g1", 0,
2554 0, /* adr_type */
2555 0,
2556 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2557 0,
2558 0,
2559 0},
2560
2561 /* Get to the page containing GOT TLS entry for a symbol */
2562 {"tlsdesc", 0,
2563 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2564 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2565 0,
2566 0,
2567 0,
2568 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2569
2570 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2571 {"tlsdesc_lo12", 0,
2572 0, /* adr_type */
2573 0,
2574 0,
2575 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC,
2576 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2577 0},
2578
2579 /* Get to the page containing GOT TLS entry for a symbol.
2580 The same as GD, we allocate two consecutive GOT slots
2581 for module index and module offset, the only difference
2582 with GD is the module offset should be intialized to
2583 zero without any outstanding runtime relocation. */
2584 {"tlsldm", 0,
2585 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2586 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2587 0,
2588 0,
2589 0,
2590 0},
2591
2592 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2593 {"tlsldm_lo12_nc", 0,
2594 0, /* adr_type */
2595 0,
2596 0,
2597 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2598 0,
2599 0},
2600
2601 /* 12 bit offset into the module TLS base address. */
2602 {"dtprel_lo12", 0,
2603 0, /* adr_type */
2604 0,
2605 0,
2606 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2607 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2608 0},
2609
2610 /* Same as dtprel_lo12, no overflow check. */
2611 {"dtprel_lo12_nc", 0,
2612 0, /* adr_type */
2613 0,
2614 0,
2615 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2616 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2617 0},
2618
2619 /* bits[23:12] of offset to the module TLS base address. */
2620 {"dtprel_hi12", 0,
2621 0, /* adr_type */
2622 0,
2623 0,
2624 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2625 0,
2626 0},
2627
2628 /* bits[15:0] of offset to the module TLS base address. */
2629 {"dtprel_g0", 0,
2630 0, /* adr_type */
2631 0,
2632 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2633 0,
2634 0,
2635 0},
2636
2637 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2638 {"dtprel_g0_nc", 0,
2639 0, /* adr_type */
2640 0,
2641 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2642 0,
2643 0,
2644 0},
2645
2646 /* bits[31:16] of offset to the module TLS base address. */
2647 {"dtprel_g1", 0,
2648 0, /* adr_type */
2649 0,
2650 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2651 0,
2652 0,
2653 0},
2654
2655 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2656 {"dtprel_g1_nc", 0,
2657 0, /* adr_type */
2658 0,
2659 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2660 0,
2661 0,
2662 0},
2663
2664 /* bits[47:32] of offset to the module TLS base address. */
2665 {"dtprel_g2", 0,
2666 0, /* adr_type */
2667 0,
2668 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2669 0,
2670 0,
2671 0},
2672
2673 /* Lower 16 bit offset into GOT entry for a symbol */
2674 {"tlsdesc_off_g0_nc", 0,
2675 0, /* adr_type */
2676 0,
2677 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2678 0,
2679 0,
2680 0},
2681
2682 /* Higher 16 bit offset into GOT entry for a symbol */
2683 {"tlsdesc_off_g1", 0,
2684 0, /* adr_type */
2685 0,
2686 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2687 0,
2688 0,
2689 0},
2690
2691 /* Get to the page containing GOT TLS entry for a symbol */
2692 {"gottprel", 0,
2693 0, /* adr_type */
2694 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2695 0,
2696 0,
2697 0,
2698 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2699
2700 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2701 {"gottprel_lo12", 0,
2702 0, /* adr_type */
2703 0,
2704 0,
2705 0,
2706 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2707 0},
2708
2709 /* Get tp offset for a symbol. */
2710 {"tprel", 0,
2711 0, /* adr_type */
2712 0,
2713 0,
2714 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2715 0,
2716 0},
2717
2718 /* Get tp offset for a symbol. */
2719 {"tprel_lo12", 0,
2720 0, /* adr_type */
2721 0,
2722 0,
2723 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2724 0,
2725 0},
2726
2727 /* Get tp offset for a symbol. */
2728 {"tprel_hi12", 0,
2729 0, /* adr_type */
2730 0,
2731 0,
2732 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2733 0,
2734 0},
2735
2736 /* Get tp offset for a symbol. */
2737 {"tprel_lo12_nc", 0,
2738 0, /* adr_type */
2739 0,
2740 0,
2741 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2742 0,
2743 0},
2744
2745 /* Most significant bits 32-47 of address/value: MOVZ. */
2746 {"tprel_g2", 0,
2747 0, /* adr_type */
2748 0,
2749 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2750 0,
2751 0,
2752 0},
2753
2754 /* Most significant bits 16-31 of address/value: MOVZ. */
2755 {"tprel_g1", 0,
2756 0, /* adr_type */
2757 0,
2758 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2759 0,
2760 0,
2761 0},
2762
2763 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2764 {"tprel_g1_nc", 0,
2765 0, /* adr_type */
2766 0,
2767 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2768 0,
2769 0,
2770 0},
2771
2772 /* Most significant bits 0-15 of address/value: MOVZ. */
2773 {"tprel_g0", 0,
2774 0, /* adr_type */
2775 0,
2776 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2777 0,
2778 0,
2779 0},
2780
2781 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2782 {"tprel_g0_nc", 0,
2783 0, /* adr_type */
2784 0,
2785 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2786 0,
2787 0,
2788 0},
2789
2790 /* 15bit offset from got entry to base address of GOT table. */
2791 {"gotpage_lo15", 0,
2792 0,
2793 0,
2794 0,
2795 0,
2796 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
2797 0},
2798
2799 /* 14bit offset from got entry to base address of GOT table. */
2800 {"gotpage_lo14", 0,
2801 0,
2802 0,
2803 0,
2804 0,
2805 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
2806 0},
2807 };
2808
2809 /* Given the address of a pointer pointing to the textual name of a
2810 relocation as may appear in assembler source, attempt to find its
2811 details in reloc_table. The pointer will be updated to the character
2812 after the trailing colon. On failure, NULL will be returned;
2813 otherwise return the reloc_table_entry. */
2814
2815 static struct reloc_table_entry *
2816 find_reloc_table_entry (char **str)
2817 {
2818 unsigned int i;
2819 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2820 {
2821 int length = strlen (reloc_table[i].name);
2822
2823 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2824 && (*str)[length] == ':')
2825 {
2826 *str += (length + 1);
2827 return &reloc_table[i];
2828 }
2829 }
2830
2831 return NULL;
2832 }
2833
2834 /* Mode argument to parse_shift and parser_shifter_operand. */
2835 enum parse_shift_mode
2836 {
2837 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
2838 "#imm{,lsl #n}" */
2839 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
2840 "#imm" */
2841 SHIFTED_LSL, /* bare "lsl #n" */
2842 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
2843 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
2844 };
2845
2846 /* Parse a <shift> operator on an AArch64 data processing instruction.
2847 Return TRUE on success; otherwise return FALSE. */
2848 static bfd_boolean
2849 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
2850 {
2851 const struct aarch64_name_value_pair *shift_op;
2852 enum aarch64_modifier_kind kind;
2853 expressionS exp;
2854 int exp_has_prefix;
2855 char *s = *str;
2856 char *p = s;
2857
2858 for (p = *str; ISALPHA (*p); p++)
2859 ;
2860
2861 if (p == *str)
2862 {
2863 set_syntax_error (_("shift expression expected"));
2864 return FALSE;
2865 }
2866
2867 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
2868
2869 if (shift_op == NULL)
2870 {
2871 set_syntax_error (_("shift operator expected"));
2872 return FALSE;
2873 }
2874
2875 kind = aarch64_get_operand_modifier (shift_op);
2876
2877 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
2878 {
2879 set_syntax_error (_("invalid use of 'MSL'"));
2880 return FALSE;
2881 }
2882
2883 switch (mode)
2884 {
2885 case SHIFTED_LOGIC_IMM:
2886 if (aarch64_extend_operator_p (kind) == TRUE)
2887 {
2888 set_syntax_error (_("extending shift is not permitted"));
2889 return FALSE;
2890 }
2891 break;
2892
2893 case SHIFTED_ARITH_IMM:
2894 if (kind == AARCH64_MOD_ROR)
2895 {
2896 set_syntax_error (_("'ROR' shift is not permitted"));
2897 return FALSE;
2898 }
2899 break;
2900
2901 case SHIFTED_LSL:
2902 if (kind != AARCH64_MOD_LSL)
2903 {
2904 set_syntax_error (_("only 'LSL' shift is permitted"));
2905 return FALSE;
2906 }
2907 break;
2908
2909 case SHIFTED_REG_OFFSET:
2910 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
2911 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
2912 {
2913 set_fatal_syntax_error
2914 (_("invalid shift for the register offset addressing mode"));
2915 return FALSE;
2916 }
2917 break;
2918
2919 case SHIFTED_LSL_MSL:
2920 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
2921 {
2922 set_syntax_error (_("invalid shift operator"));
2923 return FALSE;
2924 }
2925 break;
2926
2927 default:
2928 abort ();
2929 }
2930
2931 /* Whitespace can appear here if the next thing is a bare digit. */
2932 skip_whitespace (p);
2933
2934 /* Parse shift amount. */
2935 exp_has_prefix = 0;
2936 if (mode == SHIFTED_REG_OFFSET && *p == ']')
2937 exp.X_op = O_absent;
2938 else
2939 {
2940 if (is_immediate_prefix (*p))
2941 {
2942 p++;
2943 exp_has_prefix = 1;
2944 }
2945 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
2946 }
2947 if (exp.X_op == O_absent)
2948 {
2949 if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix)
2950 {
2951 set_syntax_error (_("missing shift amount"));
2952 return FALSE;
2953 }
2954 operand->shifter.amount = 0;
2955 }
2956 else if (exp.X_op != O_constant)
2957 {
2958 set_syntax_error (_("constant shift amount required"));
2959 return FALSE;
2960 }
2961 else if (exp.X_add_number < 0 || exp.X_add_number > 63)
2962 {
2963 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
2964 return FALSE;
2965 }
2966 else
2967 {
2968 operand->shifter.amount = exp.X_add_number;
2969 operand->shifter.amount_present = 1;
2970 }
2971
2972 operand->shifter.operator_present = 1;
2973 operand->shifter.kind = kind;
2974
2975 *str = p;
2976 return TRUE;
2977 }
2978
2979 /* Parse a <shifter_operand> for a data processing instruction:
2980
2981 #<immediate>
2982 #<immediate>, LSL #imm
2983
2984 Validation of immediate operands is deferred to md_apply_fix.
2985
2986 Return TRUE on success; otherwise return FALSE. */
2987
2988 static bfd_boolean
2989 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
2990 enum parse_shift_mode mode)
2991 {
2992 char *p;
2993
2994 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
2995 return FALSE;
2996
2997 p = *str;
2998
2999 /* Accept an immediate expression. */
3000 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
3001 return FALSE;
3002
3003 /* Accept optional LSL for arithmetic immediate values. */
3004 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3005 if (! parse_shift (&p, operand, SHIFTED_LSL))
3006 return FALSE;
3007
3008 /* Not accept any shifter for logical immediate values. */
3009 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3010 && parse_shift (&p, operand, mode))
3011 {
3012 set_syntax_error (_("unexpected shift operator"));
3013 return FALSE;
3014 }
3015
3016 *str = p;
3017 return TRUE;
3018 }
3019
3020 /* Parse a <shifter_operand> for a data processing instruction:
3021
3022 <Rm>
3023 <Rm>, <shift>
3024 #<immediate>
3025 #<immediate>, LSL #imm
3026
3027 where <shift> is handled by parse_shift above, and the last two
3028 cases are handled by the function above.
3029
3030 Validation of immediate operands is deferred to md_apply_fix.
3031
3032 Return TRUE on success; otherwise return FALSE. */
3033
3034 static bfd_boolean
3035 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3036 enum parse_shift_mode mode)
3037 {
3038 const reg_entry *reg;
3039 aarch64_opnd_qualifier_t qualifier;
3040 enum aarch64_operand_class opd_class
3041 = aarch64_get_operand_class (operand->type);
3042
3043 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3044 if (reg)
3045 {
3046 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3047 {
3048 set_syntax_error (_("unexpected register in the immediate operand"));
3049 return FALSE;
3050 }
3051
3052 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3053 {
3054 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3055 return FALSE;
3056 }
3057
3058 operand->reg.regno = reg->number;
3059 operand->qualifier = qualifier;
3060
3061 /* Accept optional shift operation on register. */
3062 if (! skip_past_comma (str))
3063 return TRUE;
3064
3065 if (! parse_shift (str, operand, mode))
3066 return FALSE;
3067
3068 return TRUE;
3069 }
3070 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3071 {
3072 set_syntax_error
3073 (_("integer register expected in the extended/shifted operand "
3074 "register"));
3075 return FALSE;
3076 }
3077
3078 /* We have a shifted immediate variable. */
3079 return parse_shifter_operand_imm (str, operand, mode);
3080 }
3081
3082 /* Return TRUE on success; return FALSE otherwise. */
3083
3084 static bfd_boolean
3085 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3086 enum parse_shift_mode mode)
3087 {
3088 char *p = *str;
3089
3090 /* Determine if we have the sequence of characters #: or just :
3091 coming next. If we do, then we check for a :rello: relocation
3092 modifier. If we don't, punt the whole lot to
3093 parse_shifter_operand. */
3094
3095 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3096 {
3097 struct reloc_table_entry *entry;
3098
3099 if (p[0] == '#')
3100 p += 2;
3101 else
3102 p++;
3103 *str = p;
3104
3105 /* Try to parse a relocation. Anything else is an error. */
3106 if (!(entry = find_reloc_table_entry (str)))
3107 {
3108 set_syntax_error (_("unknown relocation modifier"));
3109 return FALSE;
3110 }
3111
3112 if (entry->add_type == 0)
3113 {
3114 set_syntax_error
3115 (_("this relocation modifier is not allowed on this instruction"));
3116 return FALSE;
3117 }
3118
3119 /* Save str before we decompose it. */
3120 p = *str;
3121
3122 /* Next, we parse the expression. */
3123 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
3124 return FALSE;
3125
3126 /* Record the relocation type (use the ADD variant here). */
3127 inst.reloc.type = entry->add_type;
3128 inst.reloc.pc_rel = entry->pc_rel;
3129
3130 /* If str is empty, we've reached the end, stop here. */
3131 if (**str == '\0')
3132 return TRUE;
3133
3134 /* Otherwise, we have a shifted reloc modifier, so rewind to
3135 recover the variable name and continue parsing for the shifter. */
3136 *str = p;
3137 return parse_shifter_operand_imm (str, operand, mode);
3138 }
3139
3140 return parse_shifter_operand (str, operand, mode);
3141 }
3142
3143 /* Parse all forms of an address expression. Information is written
3144 to *OPERAND and/or inst.reloc.
3145
3146 The A64 instruction set has the following addressing modes:
3147
3148 Offset
3149 [base] // in SIMD ld/st structure
3150 [base{,#0}] // in ld/st exclusive
3151 [base{,#imm}]
3152 [base,Xm{,LSL #imm}]
3153 [base,Xm,SXTX {#imm}]
3154 [base,Wm,(S|U)XTW {#imm}]
3155 Pre-indexed
3156 [base,#imm]!
3157 Post-indexed
3158 [base],#imm
3159 [base],Xm // in SIMD ld/st structure
3160 PC-relative (literal)
3161 label
3162 =immediate
3163
3164 (As a convenience, the notation "=immediate" is permitted in conjunction
3165 with the pc-relative literal load instructions to automatically place an
3166 immediate value or symbolic address in a nearby literal pool and generate
3167 a hidden label which references it.)
3168
3169 Upon a successful parsing, the address structure in *OPERAND will be
3170 filled in the following way:
3171
3172 .base_regno = <base>
3173 .offset.is_reg // 1 if the offset is a register
3174 .offset.imm = <imm>
3175 .offset.regno = <Rm>
3176
3177 For different addressing modes defined in the A64 ISA:
3178
3179 Offset
3180 .pcrel=0; .preind=1; .postind=0; .writeback=0
3181 Pre-indexed
3182 .pcrel=0; .preind=1; .postind=0; .writeback=1
3183 Post-indexed
3184 .pcrel=0; .preind=0; .postind=1; .writeback=1
3185 PC-relative (literal)
3186 .pcrel=1; .preind=1; .postind=0; .writeback=0
3187
3188 The shift/extension information, if any, will be stored in .shifter.
3189
3190 It is the caller's responsibility to check for addressing modes not
3191 supported by the instruction, and to set inst.reloc.type. */
3192
3193 static bfd_boolean
3194 parse_address_main (char **str, aarch64_opnd_info *operand)
3195 {
3196 char *p = *str;
3197 const reg_entry *reg;
3198 aarch64_opnd_qualifier_t base_qualifier;
3199 aarch64_opnd_qualifier_t offset_qualifier;
3200 expressionS *exp = &inst.reloc.exp;
3201
3202 if (! skip_past_char (&p, '['))
3203 {
3204 /* =immediate or label. */
3205 operand->addr.pcrel = 1;
3206 operand->addr.preind = 1;
3207
3208 /* #:<reloc_op>:<symbol> */
3209 skip_past_char (&p, '#');
3210 if (skip_past_char (&p, ':'))
3211 {
3212 bfd_reloc_code_real_type ty;
3213 struct reloc_table_entry *entry;
3214
3215 /* Try to parse a relocation modifier. Anything else is
3216 an error. */
3217 entry = find_reloc_table_entry (&p);
3218 if (! entry)
3219 {
3220 set_syntax_error (_("unknown relocation modifier"));
3221 return FALSE;
3222 }
3223
3224 switch (operand->type)
3225 {
3226 case AARCH64_OPND_ADDR_PCREL21:
3227 /* adr */
3228 ty = entry->adr_type;
3229 break;
3230
3231 default:
3232 ty = entry->ld_literal_type;
3233 break;
3234 }
3235
3236 if (ty == 0)
3237 {
3238 set_syntax_error
3239 (_("this relocation modifier is not allowed on this "
3240 "instruction"));
3241 return FALSE;
3242 }
3243
3244 /* #:<reloc_op>: */
3245 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3246 {
3247 set_syntax_error (_("invalid relocation expression"));
3248 return FALSE;
3249 }
3250
3251 /* #:<reloc_op>:<expr> */
3252 /* Record the relocation type. */
3253 inst.reloc.type = ty;
3254 inst.reloc.pc_rel = entry->pc_rel;
3255 }
3256 else
3257 {
3258
3259 if (skip_past_char (&p, '='))
3260 /* =immediate; need to generate the literal in the literal pool. */
3261 inst.gen_lit_pool = 1;
3262
3263 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3264 {
3265 set_syntax_error (_("invalid address"));
3266 return FALSE;
3267 }
3268 }
3269
3270 *str = p;
3271 return TRUE;
3272 }
3273
3274 /* [ */
3275
3276 reg = aarch64_reg_parse_32_64 (&p, &base_qualifier);
3277 if (!reg || !aarch64_check_reg_type (reg, REG_TYPE_R64_SP))
3278 {
3279 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R64_SP)));
3280 return FALSE;
3281 }
3282 operand->addr.base_regno = reg->number;
3283
3284 /* [Xn */
3285 if (skip_past_comma (&p))
3286 {
3287 /* [Xn, */
3288 operand->addr.preind = 1;
3289
3290 reg = aarch64_reg_parse_32_64 (&p, &offset_qualifier);
3291 if (reg)
3292 {
3293 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3294 {
3295 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3296 return FALSE;
3297 }
3298
3299 /* [Xn,Rm */
3300 operand->addr.offset.regno = reg->number;
3301 operand->addr.offset.is_reg = 1;
3302 /* Shifted index. */
3303 if (skip_past_comma (&p))
3304 {
3305 /* [Xn,Rm, */
3306 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3307 /* Use the diagnostics set in parse_shift, so not set new
3308 error message here. */
3309 return FALSE;
3310 }
3311 /* We only accept:
3312 [base,Xm{,LSL #imm}]
3313 [base,Xm,SXTX {#imm}]
3314 [base,Wm,(S|U)XTW {#imm}] */
3315 if (operand->shifter.kind == AARCH64_MOD_NONE
3316 || operand->shifter.kind == AARCH64_MOD_LSL
3317 || operand->shifter.kind == AARCH64_MOD_SXTX)
3318 {
3319 if (offset_qualifier == AARCH64_OPND_QLF_W)
3320 {
3321 set_syntax_error (_("invalid use of 32-bit register offset"));
3322 return FALSE;
3323 }
3324 }
3325 else if (offset_qualifier == AARCH64_OPND_QLF_X)
3326 {
3327 set_syntax_error (_("invalid use of 64-bit register offset"));
3328 return FALSE;
3329 }
3330 }
3331 else
3332 {
3333 /* [Xn,#:<reloc_op>:<symbol> */
3334 skip_past_char (&p, '#');
3335 if (skip_past_char (&p, ':'))
3336 {
3337 struct reloc_table_entry *entry;
3338
3339 /* Try to parse a relocation modifier. Anything else is
3340 an error. */
3341 if (!(entry = find_reloc_table_entry (&p)))
3342 {
3343 set_syntax_error (_("unknown relocation modifier"));
3344 return FALSE;
3345 }
3346
3347 if (entry->ldst_type == 0)
3348 {
3349 set_syntax_error
3350 (_("this relocation modifier is not allowed on this "
3351 "instruction"));
3352 return FALSE;
3353 }
3354
3355 /* [Xn,#:<reloc_op>: */
3356 /* We now have the group relocation table entry corresponding to
3357 the name in the assembler source. Next, we parse the
3358 expression. */
3359 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3360 {
3361 set_syntax_error (_("invalid relocation expression"));
3362 return FALSE;
3363 }
3364
3365 /* [Xn,#:<reloc_op>:<expr> */
3366 /* Record the load/store relocation type. */
3367 inst.reloc.type = entry->ldst_type;
3368 inst.reloc.pc_rel = entry->pc_rel;
3369 }
3370 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3371 {
3372 set_syntax_error (_("invalid expression in the address"));
3373 return FALSE;
3374 }
3375 /* [Xn,<expr> */
3376 }
3377 }
3378
3379 if (! skip_past_char (&p, ']'))
3380 {
3381 set_syntax_error (_("']' expected"));
3382 return FALSE;
3383 }
3384
3385 if (skip_past_char (&p, '!'))
3386 {
3387 if (operand->addr.preind && operand->addr.offset.is_reg)
3388 {
3389 set_syntax_error (_("register offset not allowed in pre-indexed "
3390 "addressing mode"));
3391 return FALSE;
3392 }
3393 /* [Xn]! */
3394 operand->addr.writeback = 1;
3395 }
3396 else if (skip_past_comma (&p))
3397 {
3398 /* [Xn], */
3399 operand->addr.postind = 1;
3400 operand->addr.writeback = 1;
3401
3402 if (operand->addr.preind)
3403 {
3404 set_syntax_error (_("cannot combine pre- and post-indexing"));
3405 return FALSE;
3406 }
3407
3408 reg = aarch64_reg_parse_32_64 (&p, &offset_qualifier);
3409 if (reg)
3410 {
3411 /* [Xn],Xm */
3412 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3413 {
3414 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3415 return FALSE;
3416 }
3417
3418 operand->addr.offset.regno = reg->number;
3419 operand->addr.offset.is_reg = 1;
3420 }
3421 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3422 {
3423 /* [Xn],#expr */
3424 set_syntax_error (_("invalid expression in the address"));
3425 return FALSE;
3426 }
3427 }
3428
3429 /* If at this point neither .preind nor .postind is set, we have a
3430 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
3431 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3432 {
3433 if (operand->addr.writeback)
3434 {
3435 /* Reject [Rn]! */
3436 set_syntax_error (_("missing offset in the pre-indexed address"));
3437 return FALSE;
3438 }
3439 operand->addr.preind = 1;
3440 inst.reloc.exp.X_op = O_constant;
3441 inst.reloc.exp.X_add_number = 0;
3442 }
3443
3444 *str = p;
3445 return TRUE;
3446 }
3447
3448 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3449 on success. */
3450 static bfd_boolean
3451 parse_address (char **str, aarch64_opnd_info *operand)
3452 {
3453 return parse_address_main (str, operand);
3454 }
3455
3456 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3457 Return TRUE on success; otherwise return FALSE. */
3458 static bfd_boolean
3459 parse_half (char **str, int *internal_fixup_p)
3460 {
3461 char *p = *str;
3462
3463 skip_past_char (&p, '#');
3464
3465 gas_assert (internal_fixup_p);
3466 *internal_fixup_p = 0;
3467
3468 if (*p == ':')
3469 {
3470 struct reloc_table_entry *entry;
3471
3472 /* Try to parse a relocation. Anything else is an error. */
3473 ++p;
3474 if (!(entry = find_reloc_table_entry (&p)))
3475 {
3476 set_syntax_error (_("unknown relocation modifier"));
3477 return FALSE;
3478 }
3479
3480 if (entry->movw_type == 0)
3481 {
3482 set_syntax_error
3483 (_("this relocation modifier is not allowed on this instruction"));
3484 return FALSE;
3485 }
3486
3487 inst.reloc.type = entry->movw_type;
3488 }
3489 else
3490 *internal_fixup_p = 1;
3491
3492 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3493 return FALSE;
3494
3495 *str = p;
3496 return TRUE;
3497 }
3498
3499 /* Parse an operand for an ADRP instruction:
3500 ADRP <Xd>, <label>
3501 Return TRUE on success; otherwise return FALSE. */
3502
3503 static bfd_boolean
3504 parse_adrp (char **str)
3505 {
3506 char *p;
3507
3508 p = *str;
3509 if (*p == ':')
3510 {
3511 struct reloc_table_entry *entry;
3512
3513 /* Try to parse a relocation. Anything else is an error. */
3514 ++p;
3515 if (!(entry = find_reloc_table_entry (&p)))
3516 {
3517 set_syntax_error (_("unknown relocation modifier"));
3518 return FALSE;
3519 }
3520
3521 if (entry->adrp_type == 0)
3522 {
3523 set_syntax_error
3524 (_("this relocation modifier is not allowed on this instruction"));
3525 return FALSE;
3526 }
3527
3528 inst.reloc.type = entry->adrp_type;
3529 }
3530 else
3531 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3532
3533 inst.reloc.pc_rel = 1;
3534
3535 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3536 return FALSE;
3537
3538 *str = p;
3539 return TRUE;
3540 }
3541
3542 /* Miscellaneous. */
3543
3544 /* Parse an option for a preload instruction. Returns the encoding for the
3545 option, or PARSE_FAIL. */
3546
3547 static int
3548 parse_pldop (char **str)
3549 {
3550 char *p, *q;
3551 const struct aarch64_name_value_pair *o;
3552
3553 p = q = *str;
3554 while (ISALNUM (*q))
3555 q++;
3556
3557 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3558 if (!o)
3559 return PARSE_FAIL;
3560
3561 *str = q;
3562 return o->value;
3563 }
3564
3565 /* Parse an option for a barrier instruction. Returns the encoding for the
3566 option, or PARSE_FAIL. */
3567
3568 static int
3569 parse_barrier (char **str)
3570 {
3571 char *p, *q;
3572 const asm_barrier_opt *o;
3573
3574 p = q = *str;
3575 while (ISALPHA (*q))
3576 q++;
3577
3578 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3579 if (!o)
3580 return PARSE_FAIL;
3581
3582 *str = q;
3583 return o->value;
3584 }
3585
3586 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
3587 return 0 if successful. Otherwise return PARSE_FAIL. */
3588
3589 static int
3590 parse_barrier_psb (char **str,
3591 const struct aarch64_name_value_pair ** hint_opt)
3592 {
3593 char *p, *q;
3594 const struct aarch64_name_value_pair *o;
3595
3596 p = q = *str;
3597 while (ISALPHA (*q))
3598 q++;
3599
3600 o = hash_find_n (aarch64_hint_opt_hsh, p, q - p);
3601 if (!o)
3602 {
3603 set_fatal_syntax_error
3604 ( _("unknown or missing option to PSB"));
3605 return PARSE_FAIL;
3606 }
3607
3608 if (o->value != 0x11)
3609 {
3610 /* PSB only accepts option name 'CSYNC'. */
3611 set_syntax_error
3612 (_("the specified option is not accepted for PSB"));
3613 return PARSE_FAIL;
3614 }
3615
3616 *str = q;
3617 *hint_opt = o;
3618 return 0;
3619 }
3620
3621 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3622 Returns the encoding for the option, or PARSE_FAIL.
3623
3624 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3625 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
3626
3627 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
3628 field, otherwise as a system register.
3629 */
3630
3631 static int
3632 parse_sys_reg (char **str, struct hash_control *sys_regs,
3633 int imple_defined_p, int pstatefield_p)
3634 {
3635 char *p, *q;
3636 char buf[32];
3637 const aarch64_sys_reg *o;
3638 int value;
3639
3640 p = buf;
3641 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3642 if (p < buf + 31)
3643 *p++ = TOLOWER (*q);
3644 *p = '\0';
3645 /* Assert that BUF be large enough. */
3646 gas_assert (p - buf == q - *str);
3647
3648 o = hash_find (sys_regs, buf);
3649 if (!o)
3650 {
3651 if (!imple_defined_p)
3652 return PARSE_FAIL;
3653 else
3654 {
3655 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
3656 unsigned int op0, op1, cn, cm, op2;
3657
3658 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
3659 != 5)
3660 return PARSE_FAIL;
3661 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
3662 return PARSE_FAIL;
3663 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3664 }
3665 }
3666 else
3667 {
3668 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
3669 as_bad (_("selected processor does not support PSTATE field "
3670 "name '%s'"), buf);
3671 if (!pstatefield_p && !aarch64_sys_reg_supported_p (cpu_variant, o))
3672 as_bad (_("selected processor does not support system register "
3673 "name '%s'"), buf);
3674 if (aarch64_sys_reg_deprecated_p (o))
3675 as_warn (_("system register name '%s' is deprecated and may be "
3676 "removed in a future release"), buf);
3677 value = o->value;
3678 }
3679
3680 *str = q;
3681 return value;
3682 }
3683
3684 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
3685 for the option, or NULL. */
3686
3687 static const aarch64_sys_ins_reg *
3688 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3689 {
3690 char *p, *q;
3691 char buf[32];
3692 const aarch64_sys_ins_reg *o;
3693
3694 p = buf;
3695 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3696 if (p < buf + 31)
3697 *p++ = TOLOWER (*q);
3698 *p = '\0';
3699
3700 o = hash_find (sys_ins_regs, buf);
3701 if (!o)
3702 return NULL;
3703
3704 if (!aarch64_sys_ins_reg_supported_p (cpu_variant, o))
3705 as_bad (_("selected processor does not support system register "
3706 "name '%s'"), buf);
3707
3708 *str = q;
3709 return o;
3710 }
3711 \f
3712 #define po_char_or_fail(chr) do { \
3713 if (! skip_past_char (&str, chr)) \
3714 goto failure; \
3715 } while (0)
3716
3717 #define po_reg_or_fail(regtype) do { \
3718 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
3719 if (val == PARSE_FAIL) \
3720 { \
3721 set_default_error (); \
3722 goto failure; \
3723 } \
3724 } while (0)
3725
3726 #define po_int_reg_or_fail(reg_type) do { \
3727 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
3728 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
3729 { \
3730 set_default_error (); \
3731 goto failure; \
3732 } \
3733 info->reg.regno = reg->number; \
3734 info->qualifier = qualifier; \
3735 } while (0)
3736
3737 #define po_imm_nc_or_fail() do { \
3738 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
3739 goto failure; \
3740 } while (0)
3741
3742 #define po_imm_or_fail(min, max) do { \
3743 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
3744 goto failure; \
3745 if (val < min || val > max) \
3746 { \
3747 set_fatal_syntax_error (_("immediate value out of range "\
3748 #min " to "#max)); \
3749 goto failure; \
3750 } \
3751 } while (0)
3752
3753 #define po_misc_or_fail(expr) do { \
3754 if (!expr) \
3755 goto failure; \
3756 } while (0)
3757 \f
3758 /* encode the 12-bit imm field of Add/sub immediate */
3759 static inline uint32_t
3760 encode_addsub_imm (uint32_t imm)
3761 {
3762 return imm << 10;
3763 }
3764
3765 /* encode the shift amount field of Add/sub immediate */
3766 static inline uint32_t
3767 encode_addsub_imm_shift_amount (uint32_t cnt)
3768 {
3769 return cnt << 22;
3770 }
3771
3772
3773 /* encode the imm field of Adr instruction */
3774 static inline uint32_t
3775 encode_adr_imm (uint32_t imm)
3776 {
3777 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
3778 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
3779 }
3780
3781 /* encode the immediate field of Move wide immediate */
3782 static inline uint32_t
3783 encode_movw_imm (uint32_t imm)
3784 {
3785 return imm << 5;
3786 }
3787
3788 /* encode the 26-bit offset of unconditional branch */
3789 static inline uint32_t
3790 encode_branch_ofs_26 (uint32_t ofs)
3791 {
3792 return ofs & ((1 << 26) - 1);
3793 }
3794
3795 /* encode the 19-bit offset of conditional branch and compare & branch */
3796 static inline uint32_t
3797 encode_cond_branch_ofs_19 (uint32_t ofs)
3798 {
3799 return (ofs & ((1 << 19) - 1)) << 5;
3800 }
3801
3802 /* encode the 19-bit offset of ld literal */
3803 static inline uint32_t
3804 encode_ld_lit_ofs_19 (uint32_t ofs)
3805 {
3806 return (ofs & ((1 << 19) - 1)) << 5;
3807 }
3808
3809 /* Encode the 14-bit offset of test & branch. */
3810 static inline uint32_t
3811 encode_tst_branch_ofs_14 (uint32_t ofs)
3812 {
3813 return (ofs & ((1 << 14) - 1)) << 5;
3814 }
3815
3816 /* Encode the 16-bit imm field of svc/hvc/smc. */
3817 static inline uint32_t
3818 encode_svc_imm (uint32_t imm)
3819 {
3820 return imm << 5;
3821 }
3822
3823 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
3824 static inline uint32_t
3825 reencode_addsub_switch_add_sub (uint32_t opcode)
3826 {
3827 return opcode ^ (1 << 30);
3828 }
3829
3830 static inline uint32_t
3831 reencode_movzn_to_movz (uint32_t opcode)
3832 {
3833 return opcode | (1 << 30);
3834 }
3835
3836 static inline uint32_t
3837 reencode_movzn_to_movn (uint32_t opcode)
3838 {
3839 return opcode & ~(1 << 30);
3840 }
3841
3842 /* Overall per-instruction processing. */
3843
3844 /* We need to be able to fix up arbitrary expressions in some statements.
3845 This is so that we can handle symbols that are an arbitrary distance from
3846 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
3847 which returns part of an address in a form which will be valid for
3848 a data instruction. We do this by pushing the expression into a symbol
3849 in the expr_section, and creating a fix for that. */
3850
3851 static fixS *
3852 fix_new_aarch64 (fragS * frag,
3853 int where,
3854 short int size, expressionS * exp, int pc_rel, int reloc)
3855 {
3856 fixS *new_fix;
3857
3858 switch (exp->X_op)
3859 {
3860 case O_constant:
3861 case O_symbol:
3862 case O_add:
3863 case O_subtract:
3864 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
3865 break;
3866
3867 default:
3868 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
3869 pc_rel, reloc);
3870 break;
3871 }
3872 return new_fix;
3873 }
3874 \f
3875 /* Diagnostics on operands errors. */
3876
3877 /* By default, output verbose error message.
3878 Disable the verbose error message by -mno-verbose-error. */
3879 static int verbose_error_p = 1;
3880
3881 #ifdef DEBUG_AARCH64
3882 /* N.B. this is only for the purpose of debugging. */
3883 const char* operand_mismatch_kind_names[] =
3884 {
3885 "AARCH64_OPDE_NIL",
3886 "AARCH64_OPDE_RECOVERABLE",
3887 "AARCH64_OPDE_SYNTAX_ERROR",
3888 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
3889 "AARCH64_OPDE_INVALID_VARIANT",
3890 "AARCH64_OPDE_OUT_OF_RANGE",
3891 "AARCH64_OPDE_UNALIGNED",
3892 "AARCH64_OPDE_REG_LIST",
3893 "AARCH64_OPDE_OTHER_ERROR",
3894 };
3895 #endif /* DEBUG_AARCH64 */
3896
3897 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
3898
3899 When multiple errors of different kinds are found in the same assembly
3900 line, only the error of the highest severity will be picked up for
3901 issuing the diagnostics. */
3902
3903 static inline bfd_boolean
3904 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
3905 enum aarch64_operand_error_kind rhs)
3906 {
3907 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
3908 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
3909 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
3910 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
3911 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
3912 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
3913 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
3914 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
3915 return lhs > rhs;
3916 }
3917
3918 /* Helper routine to get the mnemonic name from the assembly instruction
3919 line; should only be called for the diagnosis purpose, as there is
3920 string copy operation involved, which may affect the runtime
3921 performance if used in elsewhere. */
3922
3923 static const char*
3924 get_mnemonic_name (const char *str)
3925 {
3926 static char mnemonic[32];
3927 char *ptr;
3928
3929 /* Get the first 15 bytes and assume that the full name is included. */
3930 strncpy (mnemonic, str, 31);
3931 mnemonic[31] = '\0';
3932
3933 /* Scan up to the end of the mnemonic, which must end in white space,
3934 '.', or end of string. */
3935 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
3936 ;
3937
3938 *ptr = '\0';
3939
3940 /* Append '...' to the truncated long name. */
3941 if (ptr - mnemonic == 31)
3942 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
3943
3944 return mnemonic;
3945 }
3946
3947 static void
3948 reset_aarch64_instruction (aarch64_instruction *instruction)
3949 {
3950 memset (instruction, '\0', sizeof (aarch64_instruction));
3951 instruction->reloc.type = BFD_RELOC_UNUSED;
3952 }
3953
3954 /* Data strutures storing one user error in the assembly code related to
3955 operands. */
3956
3957 struct operand_error_record
3958 {
3959 const aarch64_opcode *opcode;
3960 aarch64_operand_error detail;
3961 struct operand_error_record *next;
3962 };
3963
3964 typedef struct operand_error_record operand_error_record;
3965
3966 struct operand_errors
3967 {
3968 operand_error_record *head;
3969 operand_error_record *tail;
3970 };
3971
3972 typedef struct operand_errors operand_errors;
3973
3974 /* Top-level data structure reporting user errors for the current line of
3975 the assembly code.
3976 The way md_assemble works is that all opcodes sharing the same mnemonic
3977 name are iterated to find a match to the assembly line. In this data
3978 structure, each of the such opcodes will have one operand_error_record
3979 allocated and inserted. In other words, excessive errors related with
3980 a single opcode are disregarded. */
3981 operand_errors operand_error_report;
3982
3983 /* Free record nodes. */
3984 static operand_error_record *free_opnd_error_record_nodes = NULL;
3985
3986 /* Initialize the data structure that stores the operand mismatch
3987 information on assembling one line of the assembly code. */
3988 static void
3989 init_operand_error_report (void)
3990 {
3991 if (operand_error_report.head != NULL)
3992 {
3993 gas_assert (operand_error_report.tail != NULL);
3994 operand_error_report.tail->next = free_opnd_error_record_nodes;
3995 free_opnd_error_record_nodes = operand_error_report.head;
3996 operand_error_report.head = NULL;
3997 operand_error_report.tail = NULL;
3998 return;
3999 }
4000 gas_assert (operand_error_report.tail == NULL);
4001 }
4002
4003 /* Return TRUE if some operand error has been recorded during the
4004 parsing of the current assembly line using the opcode *OPCODE;
4005 otherwise return FALSE. */
4006 static inline bfd_boolean
4007 opcode_has_operand_error_p (const aarch64_opcode *opcode)
4008 {
4009 operand_error_record *record = operand_error_report.head;
4010 return record && record->opcode == opcode;
4011 }
4012
4013 /* Add the error record *NEW_RECORD to operand_error_report. The record's
4014 OPCODE field is initialized with OPCODE.
4015 N.B. only one record for each opcode, i.e. the maximum of one error is
4016 recorded for each instruction template. */
4017
4018 static void
4019 add_operand_error_record (const operand_error_record* new_record)
4020 {
4021 const aarch64_opcode *opcode = new_record->opcode;
4022 operand_error_record* record = operand_error_report.head;
4023
4024 /* The record may have been created for this opcode. If not, we need
4025 to prepare one. */
4026 if (! opcode_has_operand_error_p (opcode))
4027 {
4028 /* Get one empty record. */
4029 if (free_opnd_error_record_nodes == NULL)
4030 {
4031 record = XNEW (operand_error_record);
4032 }
4033 else
4034 {
4035 record = free_opnd_error_record_nodes;
4036 free_opnd_error_record_nodes = record->next;
4037 }
4038 record->opcode = opcode;
4039 /* Insert at the head. */
4040 record->next = operand_error_report.head;
4041 operand_error_report.head = record;
4042 if (operand_error_report.tail == NULL)
4043 operand_error_report.tail = record;
4044 }
4045 else if (record->detail.kind != AARCH64_OPDE_NIL
4046 && record->detail.index <= new_record->detail.index
4047 && operand_error_higher_severity_p (record->detail.kind,
4048 new_record->detail.kind))
4049 {
4050 /* In the case of multiple errors found on operands related with a
4051 single opcode, only record the error of the leftmost operand and
4052 only if the error is of higher severity. */
4053 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4054 " the existing error %s on operand %d",
4055 operand_mismatch_kind_names[new_record->detail.kind],
4056 new_record->detail.index,
4057 operand_mismatch_kind_names[record->detail.kind],
4058 record->detail.index);
4059 return;
4060 }
4061
4062 record->detail = new_record->detail;
4063 }
4064
4065 static inline void
4066 record_operand_error_info (const aarch64_opcode *opcode,
4067 aarch64_operand_error *error_info)
4068 {
4069 operand_error_record record;
4070 record.opcode = opcode;
4071 record.detail = *error_info;
4072 add_operand_error_record (&record);
4073 }
4074
4075 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4076 error message *ERROR, for operand IDX (count from 0). */
4077
4078 static void
4079 record_operand_error (const aarch64_opcode *opcode, int idx,
4080 enum aarch64_operand_error_kind kind,
4081 const char* error)
4082 {
4083 aarch64_operand_error info;
4084 memset(&info, 0, sizeof (info));
4085 info.index = idx;
4086 info.kind = kind;
4087 info.error = error;
4088 record_operand_error_info (opcode, &info);
4089 }
4090
4091 static void
4092 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4093 enum aarch64_operand_error_kind kind,
4094 const char* error, const int *extra_data)
4095 {
4096 aarch64_operand_error info;
4097 info.index = idx;
4098 info.kind = kind;
4099 info.error = error;
4100 info.data[0] = extra_data[0];
4101 info.data[1] = extra_data[1];
4102 info.data[2] = extra_data[2];
4103 record_operand_error_info (opcode, &info);
4104 }
4105
4106 static void
4107 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4108 const char* error, int lower_bound,
4109 int upper_bound)
4110 {
4111 int data[3] = {lower_bound, upper_bound, 0};
4112 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4113 error, data);
4114 }
4115
4116 /* Remove the operand error record for *OPCODE. */
4117 static void ATTRIBUTE_UNUSED
4118 remove_operand_error_record (const aarch64_opcode *opcode)
4119 {
4120 if (opcode_has_operand_error_p (opcode))
4121 {
4122 operand_error_record* record = operand_error_report.head;
4123 gas_assert (record != NULL && operand_error_report.tail != NULL);
4124 operand_error_report.head = record->next;
4125 record->next = free_opnd_error_record_nodes;
4126 free_opnd_error_record_nodes = record;
4127 if (operand_error_report.head == NULL)
4128 {
4129 gas_assert (operand_error_report.tail == record);
4130 operand_error_report.tail = NULL;
4131 }
4132 }
4133 }
4134
4135 /* Given the instruction in *INSTR, return the index of the best matched
4136 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4137
4138 Return -1 if there is no qualifier sequence; return the first match
4139 if there is multiple matches found. */
4140
4141 static int
4142 find_best_match (const aarch64_inst *instr,
4143 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4144 {
4145 int i, num_opnds, max_num_matched, idx;
4146
4147 num_opnds = aarch64_num_of_operands (instr->opcode);
4148 if (num_opnds == 0)
4149 {
4150 DEBUG_TRACE ("no operand");
4151 return -1;
4152 }
4153
4154 max_num_matched = 0;
4155 idx = 0;
4156
4157 /* For each pattern. */
4158 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4159 {
4160 int j, num_matched;
4161 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4162
4163 /* Most opcodes has much fewer patterns in the list. */
4164 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
4165 {
4166 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4167 break;
4168 }
4169
4170 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4171 if (*qualifiers == instr->operands[j].qualifier)
4172 ++num_matched;
4173
4174 if (num_matched > max_num_matched)
4175 {
4176 max_num_matched = num_matched;
4177 idx = i;
4178 }
4179 }
4180
4181 DEBUG_TRACE ("return with %d", idx);
4182 return idx;
4183 }
4184
4185 /* Assign qualifiers in the qualifier seqence (headed by QUALIFIERS) to the
4186 corresponding operands in *INSTR. */
4187
4188 static inline void
4189 assign_qualifier_sequence (aarch64_inst *instr,
4190 const aarch64_opnd_qualifier_t *qualifiers)
4191 {
4192 int i = 0;
4193 int num_opnds = aarch64_num_of_operands (instr->opcode);
4194 gas_assert (num_opnds);
4195 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4196 instr->operands[i].qualifier = *qualifiers;
4197 }
4198
4199 /* Print operands for the diagnosis purpose. */
4200
4201 static void
4202 print_operands (char *buf, const aarch64_opcode *opcode,
4203 const aarch64_opnd_info *opnds)
4204 {
4205 int i;
4206
4207 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4208 {
4209 char str[128];
4210
4211 /* We regard the opcode operand info more, however we also look into
4212 the inst->operands to support the disassembling of the optional
4213 operand.
4214 The two operand code should be the same in all cases, apart from
4215 when the operand can be optional. */
4216 if (opcode->operands[i] == AARCH64_OPND_NIL
4217 || opnds[i].type == AARCH64_OPND_NIL)
4218 break;
4219
4220 /* Generate the operand string in STR. */
4221 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL);
4222
4223 /* Delimiter. */
4224 if (str[0] != '\0')
4225 strcat (buf, i == 0 ? " " : ",");
4226
4227 /* Append the operand string. */
4228 strcat (buf, str);
4229 }
4230 }
4231
4232 /* Send to stderr a string as information. */
4233
4234 static void
4235 output_info (const char *format, ...)
4236 {
4237 const char *file;
4238 unsigned int line;
4239 va_list args;
4240
4241 file = as_where (&line);
4242 if (file)
4243 {
4244 if (line != 0)
4245 fprintf (stderr, "%s:%u: ", file, line);
4246 else
4247 fprintf (stderr, "%s: ", file);
4248 }
4249 fprintf (stderr, _("Info: "));
4250 va_start (args, format);
4251 vfprintf (stderr, format, args);
4252 va_end (args);
4253 (void) putc ('\n', stderr);
4254 }
4255
4256 /* Output one operand error record. */
4257
4258 static void
4259 output_operand_error_record (const operand_error_record *record, char *str)
4260 {
4261 const aarch64_operand_error *detail = &record->detail;
4262 int idx = detail->index;
4263 const aarch64_opcode *opcode = record->opcode;
4264 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4265 : AARCH64_OPND_NIL);
4266
4267 switch (detail->kind)
4268 {
4269 case AARCH64_OPDE_NIL:
4270 gas_assert (0);
4271 break;
4272
4273 case AARCH64_OPDE_SYNTAX_ERROR:
4274 case AARCH64_OPDE_RECOVERABLE:
4275 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4276 case AARCH64_OPDE_OTHER_ERROR:
4277 /* Use the prepared error message if there is, otherwise use the
4278 operand description string to describe the error. */
4279 if (detail->error != NULL)
4280 {
4281 if (idx < 0)
4282 as_bad (_("%s -- `%s'"), detail->error, str);
4283 else
4284 as_bad (_("%s at operand %d -- `%s'"),
4285 detail->error, idx + 1, str);
4286 }
4287 else
4288 {
4289 gas_assert (idx >= 0);
4290 as_bad (_("operand %d should be %s -- `%s'"), idx + 1,
4291 aarch64_get_operand_desc (opd_code), str);
4292 }
4293 break;
4294
4295 case AARCH64_OPDE_INVALID_VARIANT:
4296 as_bad (_("operand mismatch -- `%s'"), str);
4297 if (verbose_error_p)
4298 {
4299 /* We will try to correct the erroneous instruction and also provide
4300 more information e.g. all other valid variants.
4301
4302 The string representation of the corrected instruction and other
4303 valid variants are generated by
4304
4305 1) obtaining the intermediate representation of the erroneous
4306 instruction;
4307 2) manipulating the IR, e.g. replacing the operand qualifier;
4308 3) printing out the instruction by calling the printer functions
4309 shared with the disassembler.
4310
4311 The limitation of this method is that the exact input assembly
4312 line cannot be accurately reproduced in some cases, for example an
4313 optional operand present in the actual assembly line will be
4314 omitted in the output; likewise for the optional syntax rules,
4315 e.g. the # before the immediate. Another limitation is that the
4316 assembly symbols and relocation operations in the assembly line
4317 currently cannot be printed out in the error report. Last but not
4318 least, when there is other error(s) co-exist with this error, the
4319 'corrected' instruction may be still incorrect, e.g. given
4320 'ldnp h0,h1,[x0,#6]!'
4321 this diagnosis will provide the version:
4322 'ldnp s0,s1,[x0,#6]!'
4323 which is still not right. */
4324 size_t len = strlen (get_mnemonic_name (str));
4325 int i, qlf_idx;
4326 bfd_boolean result;
4327 char buf[2048];
4328 aarch64_inst *inst_base = &inst.base;
4329 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4330
4331 /* Init inst. */
4332 reset_aarch64_instruction (&inst);
4333 inst_base->opcode = opcode;
4334
4335 /* Reset the error report so that there is no side effect on the
4336 following operand parsing. */
4337 init_operand_error_report ();
4338
4339 /* Fill inst. */
4340 result = parse_operands (str + len, opcode)
4341 && programmer_friendly_fixup (&inst);
4342 gas_assert (result);
4343 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4344 NULL, NULL);
4345 gas_assert (!result);
4346
4347 /* Find the most matched qualifier sequence. */
4348 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4349 gas_assert (qlf_idx > -1);
4350
4351 /* Assign the qualifiers. */
4352 assign_qualifier_sequence (inst_base,
4353 opcode->qualifiers_list[qlf_idx]);
4354
4355 /* Print the hint. */
4356 output_info (_(" did you mean this?"));
4357 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4358 print_operands (buf, opcode, inst_base->operands);
4359 output_info (_(" %s"), buf);
4360
4361 /* Print out other variant(s) if there is any. */
4362 if (qlf_idx != 0 ||
4363 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4364 output_info (_(" other valid variant(s):"));
4365
4366 /* For each pattern. */
4367 qualifiers_list = opcode->qualifiers_list;
4368 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4369 {
4370 /* Most opcodes has much fewer patterns in the list.
4371 First NIL qualifier indicates the end in the list. */
4372 if (empty_qualifier_sequence_p (*qualifiers_list) == TRUE)
4373 break;
4374
4375 if (i != qlf_idx)
4376 {
4377 /* Mnemonics name. */
4378 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4379
4380 /* Assign the qualifiers. */
4381 assign_qualifier_sequence (inst_base, *qualifiers_list);
4382
4383 /* Print instruction. */
4384 print_operands (buf, opcode, inst_base->operands);
4385
4386 output_info (_(" %s"), buf);
4387 }
4388 }
4389 }
4390 break;
4391
4392 case AARCH64_OPDE_UNTIED_OPERAND:
4393 as_bad (_("operand %d must be the same register as operand 1 -- `%s'"),
4394 detail->index + 1, str);
4395 break;
4396
4397 case AARCH64_OPDE_OUT_OF_RANGE:
4398 if (detail->data[0] != detail->data[1])
4399 as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
4400 detail->error ? detail->error : _("immediate value"),
4401 detail->data[0], detail->data[1], idx + 1, str);
4402 else
4403 as_bad (_("%s expected to be %d at operand %d -- `%s'"),
4404 detail->error ? detail->error : _("immediate value"),
4405 detail->data[0], idx + 1, str);
4406 break;
4407
4408 case AARCH64_OPDE_REG_LIST:
4409 if (detail->data[0] == 1)
4410 as_bad (_("invalid number of registers in the list; "
4411 "only 1 register is expected at operand %d -- `%s'"),
4412 idx + 1, str);
4413 else
4414 as_bad (_("invalid number of registers in the list; "
4415 "%d registers are expected at operand %d -- `%s'"),
4416 detail->data[0], idx + 1, str);
4417 break;
4418
4419 case AARCH64_OPDE_UNALIGNED:
4420 as_bad (_("immediate value should be a multiple of "
4421 "%d at operand %d -- `%s'"),
4422 detail->data[0], idx + 1, str);
4423 break;
4424
4425 default:
4426 gas_assert (0);
4427 break;
4428 }
4429 }
4430
4431 /* Process and output the error message about the operand mismatching.
4432
4433 When this function is called, the operand error information had
4434 been collected for an assembly line and there will be multiple
4435 errors in the case of mulitple instruction templates; output the
4436 error message that most closely describes the problem. */
4437
4438 static void
4439 output_operand_error_report (char *str)
4440 {
4441 int largest_error_pos;
4442 const char *msg = NULL;
4443 enum aarch64_operand_error_kind kind;
4444 operand_error_record *curr;
4445 operand_error_record *head = operand_error_report.head;
4446 operand_error_record *record = NULL;
4447
4448 /* No error to report. */
4449 if (head == NULL)
4450 return;
4451
4452 gas_assert (head != NULL && operand_error_report.tail != NULL);
4453
4454 /* Only one error. */
4455 if (head == operand_error_report.tail)
4456 {
4457 DEBUG_TRACE ("single opcode entry with error kind: %s",
4458 operand_mismatch_kind_names[head->detail.kind]);
4459 output_operand_error_record (head, str);
4460 return;
4461 }
4462
4463 /* Find the error kind of the highest severity. */
4464 DEBUG_TRACE ("multiple opcode entres with error kind");
4465 kind = AARCH64_OPDE_NIL;
4466 for (curr = head; curr != NULL; curr = curr->next)
4467 {
4468 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4469 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4470 if (operand_error_higher_severity_p (curr->detail.kind, kind))
4471 kind = curr->detail.kind;
4472 }
4473 gas_assert (kind != AARCH64_OPDE_NIL);
4474
4475 /* Pick up one of errors of KIND to report. */
4476 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4477 for (curr = head; curr != NULL; curr = curr->next)
4478 {
4479 if (curr->detail.kind != kind)
4480 continue;
4481 /* If there are multiple errors, pick up the one with the highest
4482 mismatching operand index. In the case of multiple errors with
4483 the equally highest operand index, pick up the first one or the
4484 first one with non-NULL error message. */
4485 if (curr->detail.index > largest_error_pos
4486 || (curr->detail.index == largest_error_pos && msg == NULL
4487 && curr->detail.error != NULL))
4488 {
4489 largest_error_pos = curr->detail.index;
4490 record = curr;
4491 msg = record->detail.error;
4492 }
4493 }
4494
4495 gas_assert (largest_error_pos != -2 && record != NULL);
4496 DEBUG_TRACE ("Pick up error kind %s to report",
4497 operand_mismatch_kind_names[record->detail.kind]);
4498
4499 /* Output. */
4500 output_operand_error_record (record, str);
4501 }
4502 \f
4503 /* Write an AARCH64 instruction to buf - always little-endian. */
4504 static void
4505 put_aarch64_insn (char *buf, uint32_t insn)
4506 {
4507 unsigned char *where = (unsigned char *) buf;
4508 where[0] = insn;
4509 where[1] = insn >> 8;
4510 where[2] = insn >> 16;
4511 where[3] = insn >> 24;
4512 }
4513
4514 static uint32_t
4515 get_aarch64_insn (char *buf)
4516 {
4517 unsigned char *where = (unsigned char *) buf;
4518 uint32_t result;
4519 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4520 return result;
4521 }
4522
4523 static void
4524 output_inst (struct aarch64_inst *new_inst)
4525 {
4526 char *to = NULL;
4527
4528 to = frag_more (INSN_SIZE);
4529
4530 frag_now->tc_frag_data.recorded = 1;
4531
4532 put_aarch64_insn (to, inst.base.value);
4533
4534 if (inst.reloc.type != BFD_RELOC_UNUSED)
4535 {
4536 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4537 INSN_SIZE, &inst.reloc.exp,
4538 inst.reloc.pc_rel,
4539 inst.reloc.type);
4540 DEBUG_TRACE ("Prepared relocation fix up");
4541 /* Don't check the addend value against the instruction size,
4542 that's the job of our code in md_apply_fix(). */
4543 fixp->fx_no_overflow = 1;
4544 if (new_inst != NULL)
4545 fixp->tc_fix_data.inst = new_inst;
4546 if (aarch64_gas_internal_fixup_p ())
4547 {
4548 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4549 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4550 fixp->fx_addnumber = inst.reloc.flags;
4551 }
4552 }
4553
4554 dwarf2_emit_insn (INSN_SIZE);
4555 }
4556
4557 /* Link together opcodes of the same name. */
4558
4559 struct templates
4560 {
4561 aarch64_opcode *opcode;
4562 struct templates *next;
4563 };
4564
4565 typedef struct templates templates;
4566
4567 static templates *
4568 lookup_mnemonic (const char *start, int len)
4569 {
4570 templates *templ = NULL;
4571
4572 templ = hash_find_n (aarch64_ops_hsh, start, len);
4573 return templ;
4574 }
4575
4576 /* Subroutine of md_assemble, responsible for looking up the primary
4577 opcode from the mnemonic the user wrote. STR points to the
4578 beginning of the mnemonic. */
4579
4580 static templates *
4581 opcode_lookup (char **str)
4582 {
4583 char *end, *base;
4584 const aarch64_cond *cond;
4585 char condname[16];
4586 int len;
4587
4588 /* Scan up to the end of the mnemonic, which must end in white space,
4589 '.', or end of string. */
4590 for (base = end = *str; is_part_of_name(*end); end++)
4591 if (*end == '.')
4592 break;
4593
4594 if (end == base)
4595 return 0;
4596
4597 inst.cond = COND_ALWAYS;
4598
4599 /* Handle a possible condition. */
4600 if (end[0] == '.')
4601 {
4602 cond = hash_find_n (aarch64_cond_hsh, end + 1, 2);
4603 if (cond)
4604 {
4605 inst.cond = cond->value;
4606 *str = end + 3;
4607 }
4608 else
4609 {
4610 *str = end;
4611 return 0;
4612 }
4613 }
4614 else
4615 *str = end;
4616
4617 len = end - base;
4618
4619 if (inst.cond == COND_ALWAYS)
4620 {
4621 /* Look for unaffixed mnemonic. */
4622 return lookup_mnemonic (base, len);
4623 }
4624 else if (len <= 13)
4625 {
4626 /* append ".c" to mnemonic if conditional */
4627 memcpy (condname, base, len);
4628 memcpy (condname + len, ".c", 2);
4629 base = condname;
4630 len += 2;
4631 return lookup_mnemonic (base, len);
4632 }
4633
4634 return NULL;
4635 }
4636
4637 /* Internal helper routine converting a vector_type_el structure *VECTYPE
4638 to a corresponding operand qualifier. */
4639
4640 static inline aarch64_opnd_qualifier_t
4641 vectype_to_qualifier (const struct vector_type_el *vectype)
4642 {
4643 /* Element size in bytes indexed by vector_el_type. */
4644 const unsigned char ele_size[5]
4645 = {1, 2, 4, 8, 16};
4646 const unsigned int ele_base [5] =
4647 {
4648 AARCH64_OPND_QLF_V_8B,
4649 AARCH64_OPND_QLF_V_2H,
4650 AARCH64_OPND_QLF_V_2S,
4651 AARCH64_OPND_QLF_V_1D,
4652 AARCH64_OPND_QLF_V_1Q
4653 };
4654
4655 if (!vectype->defined || vectype->type == NT_invtype)
4656 goto vectype_conversion_fail;
4657
4658 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
4659
4660 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
4661 /* Vector element register. */
4662 return AARCH64_OPND_QLF_S_B + vectype->type;
4663 else
4664 {
4665 /* Vector register. */
4666 int reg_size = ele_size[vectype->type] * vectype->width;
4667 unsigned offset;
4668 unsigned shift;
4669 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
4670 goto vectype_conversion_fail;
4671
4672 /* The conversion is by calculating the offset from the base operand
4673 qualifier for the vector type. The operand qualifiers are regular
4674 enough that the offset can established by shifting the vector width by
4675 a vector-type dependent amount. */
4676 shift = 0;
4677 if (vectype->type == NT_b)
4678 shift = 4;
4679 else if (vectype->type == NT_h || vectype->type == NT_s)
4680 shift = 2;
4681 else if (vectype->type >= NT_d)
4682 shift = 1;
4683 else
4684 gas_assert (0);
4685
4686 offset = ele_base [vectype->type] + (vectype->width >> shift);
4687 gas_assert (AARCH64_OPND_QLF_V_8B <= offset
4688 && offset <= AARCH64_OPND_QLF_V_1Q);
4689 return offset;
4690 }
4691
4692 vectype_conversion_fail:
4693 first_error (_("bad vector arrangement type"));
4694 return AARCH64_OPND_QLF_NIL;
4695 }
4696
4697 /* Process an optional operand that is found omitted from the assembly line.
4698 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
4699 instruction's opcode entry while IDX is the index of this omitted operand.
4700 */
4701
4702 static void
4703 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
4704 int idx, aarch64_opnd_info *operand)
4705 {
4706 aarch64_insn default_value = get_optional_operand_default_value (opcode);
4707 gas_assert (optional_operand_p (opcode, idx));
4708 gas_assert (!operand->present);
4709
4710 switch (type)
4711 {
4712 case AARCH64_OPND_Rd:
4713 case AARCH64_OPND_Rn:
4714 case AARCH64_OPND_Rm:
4715 case AARCH64_OPND_Rt:
4716 case AARCH64_OPND_Rt2:
4717 case AARCH64_OPND_Rs:
4718 case AARCH64_OPND_Ra:
4719 case AARCH64_OPND_Rt_SYS:
4720 case AARCH64_OPND_Rd_SP:
4721 case AARCH64_OPND_Rn_SP:
4722 case AARCH64_OPND_Fd:
4723 case AARCH64_OPND_Fn:
4724 case AARCH64_OPND_Fm:
4725 case AARCH64_OPND_Fa:
4726 case AARCH64_OPND_Ft:
4727 case AARCH64_OPND_Ft2:
4728 case AARCH64_OPND_Sd:
4729 case AARCH64_OPND_Sn:
4730 case AARCH64_OPND_Sm:
4731 case AARCH64_OPND_Vd:
4732 case AARCH64_OPND_Vn:
4733 case AARCH64_OPND_Vm:
4734 case AARCH64_OPND_VdD1:
4735 case AARCH64_OPND_VnD1:
4736 operand->reg.regno = default_value;
4737 break;
4738
4739 case AARCH64_OPND_Ed:
4740 case AARCH64_OPND_En:
4741 case AARCH64_OPND_Em:
4742 operand->reglane.regno = default_value;
4743 break;
4744
4745 case AARCH64_OPND_IDX:
4746 case AARCH64_OPND_BIT_NUM:
4747 case AARCH64_OPND_IMMR:
4748 case AARCH64_OPND_IMMS:
4749 case AARCH64_OPND_SHLL_IMM:
4750 case AARCH64_OPND_IMM_VLSL:
4751 case AARCH64_OPND_IMM_VLSR:
4752 case AARCH64_OPND_CCMP_IMM:
4753 case AARCH64_OPND_FBITS:
4754 case AARCH64_OPND_UIMM4:
4755 case AARCH64_OPND_UIMM3_OP1:
4756 case AARCH64_OPND_UIMM3_OP2:
4757 case AARCH64_OPND_IMM:
4758 case AARCH64_OPND_WIDTH:
4759 case AARCH64_OPND_UIMM7:
4760 case AARCH64_OPND_NZCV:
4761 operand->imm.value = default_value;
4762 break;
4763
4764 case AARCH64_OPND_EXCEPTION:
4765 inst.reloc.type = BFD_RELOC_UNUSED;
4766 break;
4767
4768 case AARCH64_OPND_BARRIER_ISB:
4769 operand->barrier = aarch64_barrier_options + default_value;
4770
4771 default:
4772 break;
4773 }
4774 }
4775
4776 /* Process the relocation type for move wide instructions.
4777 Return TRUE on success; otherwise return FALSE. */
4778
4779 static bfd_boolean
4780 process_movw_reloc_info (void)
4781 {
4782 int is32;
4783 unsigned shift;
4784
4785 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
4786
4787 if (inst.base.opcode->op == OP_MOVK)
4788 switch (inst.reloc.type)
4789 {
4790 case BFD_RELOC_AARCH64_MOVW_G0_S:
4791 case BFD_RELOC_AARCH64_MOVW_G1_S:
4792 case BFD_RELOC_AARCH64_MOVW_G2_S:
4793 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
4794 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4795 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4796 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4797 set_syntax_error
4798 (_("the specified relocation type is not allowed for MOVK"));
4799 return FALSE;
4800 default:
4801 break;
4802 }
4803
4804 switch (inst.reloc.type)
4805 {
4806 case BFD_RELOC_AARCH64_MOVW_G0:
4807 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4808 case BFD_RELOC_AARCH64_MOVW_G0_S:
4809 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
4810 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
4811 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
4812 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
4813 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
4814 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
4815 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4816 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4817 shift = 0;
4818 break;
4819 case BFD_RELOC_AARCH64_MOVW_G1:
4820 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4821 case BFD_RELOC_AARCH64_MOVW_G1_S:
4822 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
4823 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
4824 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
4825 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
4826 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
4827 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
4828 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4829 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4830 shift = 16;
4831 break;
4832 case BFD_RELOC_AARCH64_MOVW_G2:
4833 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4834 case BFD_RELOC_AARCH64_MOVW_G2_S:
4835 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
4836 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4837 if (is32)
4838 {
4839 set_fatal_syntax_error
4840 (_("the specified relocation type is not allowed for 32-bit "
4841 "register"));
4842 return FALSE;
4843 }
4844 shift = 32;
4845 break;
4846 case BFD_RELOC_AARCH64_MOVW_G3:
4847 if (is32)
4848 {
4849 set_fatal_syntax_error
4850 (_("the specified relocation type is not allowed for 32-bit "
4851 "register"));
4852 return FALSE;
4853 }
4854 shift = 48;
4855 break;
4856 default:
4857 /* More cases should be added when more MOVW-related relocation types
4858 are supported in GAS. */
4859 gas_assert (aarch64_gas_internal_fixup_p ());
4860 /* The shift amount should have already been set by the parser. */
4861 return TRUE;
4862 }
4863 inst.base.operands[1].shifter.amount = shift;
4864 return TRUE;
4865 }
4866
4867 /* A primitive log caculator. */
4868
4869 static inline unsigned int
4870 get_logsz (unsigned int size)
4871 {
4872 const unsigned char ls[16] =
4873 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
4874 if (size > 16)
4875 {
4876 gas_assert (0);
4877 return -1;
4878 }
4879 gas_assert (ls[size - 1] != (unsigned char)-1);
4880 return ls[size - 1];
4881 }
4882
4883 /* Determine and return the real reloc type code for an instruction
4884 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
4885
4886 static inline bfd_reloc_code_real_type
4887 ldst_lo12_determine_real_reloc_type (void)
4888 {
4889 unsigned logsz;
4890 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
4891 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
4892
4893 const bfd_reloc_code_real_type reloc_ldst_lo12[3][5] = {
4894 {
4895 BFD_RELOC_AARCH64_LDST8_LO12,
4896 BFD_RELOC_AARCH64_LDST16_LO12,
4897 BFD_RELOC_AARCH64_LDST32_LO12,
4898 BFD_RELOC_AARCH64_LDST64_LO12,
4899 BFD_RELOC_AARCH64_LDST128_LO12
4900 },
4901 {
4902 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
4903 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
4904 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
4905 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
4906 BFD_RELOC_AARCH64_NONE
4907 },
4908 {
4909 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
4910 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
4911 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
4912 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
4913 BFD_RELOC_AARCH64_NONE
4914 }
4915 };
4916
4917 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
4918 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
4919 || (inst.reloc.type
4920 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC));
4921 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
4922
4923 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
4924 opd1_qlf =
4925 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
4926 1, opd0_qlf, 0);
4927 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
4928
4929 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
4930 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
4931 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
4932 gas_assert (logsz <= 3);
4933 else
4934 gas_assert (logsz <= 4);
4935
4936 /* In reloc.c, these pseudo relocation types should be defined in similar
4937 order as above reloc_ldst_lo12 array. Because the array index calcuation
4938 below relies on this. */
4939 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
4940 }
4941
4942 /* Check whether a register list REGINFO is valid. The registers must be
4943 numbered in increasing order (modulo 32), in increments of one or two.
4944
4945 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
4946 increments of two.
4947
4948 Return FALSE if such a register list is invalid, otherwise return TRUE. */
4949
4950 static bfd_boolean
4951 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
4952 {
4953 uint32_t i, nb_regs, prev_regno, incr;
4954
4955 nb_regs = 1 + (reginfo & 0x3);
4956 reginfo >>= 2;
4957 prev_regno = reginfo & 0x1f;
4958 incr = accept_alternate ? 2 : 1;
4959
4960 for (i = 1; i < nb_regs; ++i)
4961 {
4962 uint32_t curr_regno;
4963 reginfo >>= 5;
4964 curr_regno = reginfo & 0x1f;
4965 if (curr_regno != ((prev_regno + incr) & 0x1f))
4966 return FALSE;
4967 prev_regno = curr_regno;
4968 }
4969
4970 return TRUE;
4971 }
4972
4973 /* Generic instruction operand parser. This does no encoding and no
4974 semantic validation; it merely squirrels values away in the inst
4975 structure. Returns TRUE or FALSE depending on whether the
4976 specified grammar matched. */
4977
4978 static bfd_boolean
4979 parse_operands (char *str, const aarch64_opcode *opcode)
4980 {
4981 int i;
4982 char *backtrack_pos = 0;
4983 const enum aarch64_opnd *operands = opcode->operands;
4984 aarch64_reg_type imm_reg_type;
4985
4986 clear_error ();
4987 skip_whitespace (str);
4988
4989 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
4990
4991 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
4992 {
4993 int64_t val;
4994 const reg_entry *reg;
4995 int comma_skipped_p = 0;
4996 aarch64_reg_type rtype;
4997 struct vector_type_el vectype;
4998 aarch64_opnd_qualifier_t qualifier;
4999 aarch64_opnd_info *info = &inst.base.operands[i];
5000 aarch64_reg_type reg_type;
5001
5002 DEBUG_TRACE ("parse operand %d", i);
5003
5004 /* Assign the operand code. */
5005 info->type = operands[i];
5006
5007 if (optional_operand_p (opcode, i))
5008 {
5009 /* Remember where we are in case we need to backtrack. */
5010 gas_assert (!backtrack_pos);
5011 backtrack_pos = str;
5012 }
5013
5014 /* Expect comma between operands; the backtrack mechanizm will take
5015 care of cases of omitted optional operand. */
5016 if (i > 0 && ! skip_past_char (&str, ','))
5017 {
5018 set_syntax_error (_("comma expected between operands"));
5019 goto failure;
5020 }
5021 else
5022 comma_skipped_p = 1;
5023
5024 switch (operands[i])
5025 {
5026 case AARCH64_OPND_Rd:
5027 case AARCH64_OPND_Rn:
5028 case AARCH64_OPND_Rm:
5029 case AARCH64_OPND_Rt:
5030 case AARCH64_OPND_Rt2:
5031 case AARCH64_OPND_Rs:
5032 case AARCH64_OPND_Ra:
5033 case AARCH64_OPND_Rt_SYS:
5034 case AARCH64_OPND_PAIRREG:
5035 po_int_reg_or_fail (REG_TYPE_R_Z);
5036 break;
5037
5038 case AARCH64_OPND_Rd_SP:
5039 case AARCH64_OPND_Rn_SP:
5040 po_int_reg_or_fail (REG_TYPE_R_SP);
5041 break;
5042
5043 case AARCH64_OPND_Rm_EXT:
5044 case AARCH64_OPND_Rm_SFT:
5045 po_misc_or_fail (parse_shifter_operand
5046 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
5047 ? SHIFTED_ARITH_IMM
5048 : SHIFTED_LOGIC_IMM)));
5049 if (!info->shifter.operator_present)
5050 {
5051 /* Default to LSL if not present. Libopcodes prefers shifter
5052 kind to be explicit. */
5053 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5054 info->shifter.kind = AARCH64_MOD_LSL;
5055 /* For Rm_EXT, libopcodes will carry out further check on whether
5056 or not stack pointer is used in the instruction (Recall that
5057 "the extend operator is not optional unless at least one of
5058 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
5059 }
5060 break;
5061
5062 case AARCH64_OPND_Fd:
5063 case AARCH64_OPND_Fn:
5064 case AARCH64_OPND_Fm:
5065 case AARCH64_OPND_Fa:
5066 case AARCH64_OPND_Ft:
5067 case AARCH64_OPND_Ft2:
5068 case AARCH64_OPND_Sd:
5069 case AARCH64_OPND_Sn:
5070 case AARCH64_OPND_Sm:
5071 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
5072 if (val == PARSE_FAIL)
5073 {
5074 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
5075 goto failure;
5076 }
5077 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
5078
5079 info->reg.regno = val;
5080 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
5081 break;
5082
5083 case AARCH64_OPND_SVE_Pd:
5084 case AARCH64_OPND_SVE_Pg3:
5085 case AARCH64_OPND_SVE_Pg4_5:
5086 case AARCH64_OPND_SVE_Pg4_10:
5087 case AARCH64_OPND_SVE_Pg4_16:
5088 case AARCH64_OPND_SVE_Pm:
5089 case AARCH64_OPND_SVE_Pn:
5090 case AARCH64_OPND_SVE_Pt:
5091 reg_type = REG_TYPE_PN;
5092 goto vector_reg;
5093
5094 case AARCH64_OPND_SVE_Za_5:
5095 case AARCH64_OPND_SVE_Za_16:
5096 case AARCH64_OPND_SVE_Zd:
5097 case AARCH64_OPND_SVE_Zm_5:
5098 case AARCH64_OPND_SVE_Zm_16:
5099 case AARCH64_OPND_SVE_Zn:
5100 case AARCH64_OPND_SVE_Zt:
5101 reg_type = REG_TYPE_ZN;
5102 goto vector_reg;
5103
5104 case AARCH64_OPND_Vd:
5105 case AARCH64_OPND_Vn:
5106 case AARCH64_OPND_Vm:
5107 reg_type = REG_TYPE_VN;
5108 vector_reg:
5109 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5110 if (val == PARSE_FAIL)
5111 {
5112 first_error (_(get_reg_expected_msg (reg_type)));
5113 goto failure;
5114 }
5115 if (vectype.defined & NTA_HASINDEX)
5116 goto failure;
5117
5118 info->reg.regno = val;
5119 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
5120 && vectype.type == NT_invtype)
5121 /* Unqualified Pn and Zn registers are allowed in certain
5122 contexts. Rely on F_STRICT qualifier checking to catch
5123 invalid uses. */
5124 info->qualifier = AARCH64_OPND_QLF_NIL;
5125 else
5126 {
5127 info->qualifier = vectype_to_qualifier (&vectype);
5128 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5129 goto failure;
5130 }
5131 break;
5132
5133 case AARCH64_OPND_VdD1:
5134 case AARCH64_OPND_VnD1:
5135 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5136 if (val == PARSE_FAIL)
5137 {
5138 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5139 goto failure;
5140 }
5141 if (vectype.type != NT_d || vectype.index != 1)
5142 {
5143 set_fatal_syntax_error
5144 (_("the top half of a 128-bit FP/SIMD register is expected"));
5145 goto failure;
5146 }
5147 info->reg.regno = val;
5148 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5149 here; it is correct for the purpose of encoding/decoding since
5150 only the register number is explicitly encoded in the related
5151 instructions, although this appears a bit hacky. */
5152 info->qualifier = AARCH64_OPND_QLF_S_D;
5153 break;
5154
5155 case AARCH64_OPND_SVE_Zn_INDEX:
5156 reg_type = REG_TYPE_ZN;
5157 goto vector_reg_index;
5158
5159 case AARCH64_OPND_Ed:
5160 case AARCH64_OPND_En:
5161 case AARCH64_OPND_Em:
5162 reg_type = REG_TYPE_VN;
5163 vector_reg_index:
5164 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5165 if (val == PARSE_FAIL)
5166 {
5167 first_error (_(get_reg_expected_msg (reg_type)));
5168 goto failure;
5169 }
5170 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5171 goto failure;
5172
5173 info->reglane.regno = val;
5174 info->reglane.index = vectype.index;
5175 info->qualifier = vectype_to_qualifier (&vectype);
5176 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5177 goto failure;
5178 break;
5179
5180 case AARCH64_OPND_SVE_ZnxN:
5181 case AARCH64_OPND_SVE_ZtxN:
5182 reg_type = REG_TYPE_ZN;
5183 goto vector_reg_list;
5184
5185 case AARCH64_OPND_LVn:
5186 case AARCH64_OPND_LVt:
5187 case AARCH64_OPND_LVt_AL:
5188 case AARCH64_OPND_LEt:
5189 reg_type = REG_TYPE_VN;
5190 vector_reg_list:
5191 if (reg_type == REG_TYPE_ZN
5192 && get_opcode_dependent_value (opcode) == 1
5193 && *str != '{')
5194 {
5195 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5196 if (val == PARSE_FAIL)
5197 {
5198 first_error (_(get_reg_expected_msg (reg_type)));
5199 goto failure;
5200 }
5201 info->reglist.first_regno = val;
5202 info->reglist.num_regs = 1;
5203 }
5204 else
5205 {
5206 val = parse_vector_reg_list (&str, reg_type, &vectype);
5207 if (val == PARSE_FAIL)
5208 goto failure;
5209 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5210 {
5211 set_fatal_syntax_error (_("invalid register list"));
5212 goto failure;
5213 }
5214 info->reglist.first_regno = (val >> 2) & 0x1f;
5215 info->reglist.num_regs = (val & 0x3) + 1;
5216 }
5217 if (operands[i] == AARCH64_OPND_LEt)
5218 {
5219 if (!(vectype.defined & NTA_HASINDEX))
5220 goto failure;
5221 info->reglist.has_index = 1;
5222 info->reglist.index = vectype.index;
5223 }
5224 else
5225 {
5226 if (vectype.defined & NTA_HASINDEX)
5227 goto failure;
5228 if (!(vectype.defined & NTA_HASTYPE))
5229 {
5230 if (reg_type == REG_TYPE_ZN)
5231 set_fatal_syntax_error (_("missing type suffix"));
5232 goto failure;
5233 }
5234 }
5235 info->qualifier = vectype_to_qualifier (&vectype);
5236 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5237 goto failure;
5238 break;
5239
5240 case AARCH64_OPND_Cn:
5241 case AARCH64_OPND_Cm:
5242 po_reg_or_fail (REG_TYPE_CN);
5243 if (val > 15)
5244 {
5245 set_fatal_syntax_error (_(get_reg_expected_msg (REG_TYPE_CN)));
5246 goto failure;
5247 }
5248 inst.base.operands[i].reg.regno = val;
5249 break;
5250
5251 case AARCH64_OPND_SHLL_IMM:
5252 case AARCH64_OPND_IMM_VLSR:
5253 po_imm_or_fail (1, 64);
5254 info->imm.value = val;
5255 break;
5256
5257 case AARCH64_OPND_CCMP_IMM:
5258 case AARCH64_OPND_FBITS:
5259 case AARCH64_OPND_UIMM4:
5260 case AARCH64_OPND_UIMM3_OP1:
5261 case AARCH64_OPND_UIMM3_OP2:
5262 case AARCH64_OPND_IMM_VLSL:
5263 case AARCH64_OPND_IMM:
5264 case AARCH64_OPND_WIDTH:
5265 po_imm_nc_or_fail ();
5266 info->imm.value = val;
5267 break;
5268
5269 case AARCH64_OPND_UIMM7:
5270 po_imm_or_fail (0, 127);
5271 info->imm.value = val;
5272 break;
5273
5274 case AARCH64_OPND_IDX:
5275 case AARCH64_OPND_BIT_NUM:
5276 case AARCH64_OPND_IMMR:
5277 case AARCH64_OPND_IMMS:
5278 po_imm_or_fail (0, 63);
5279 info->imm.value = val;
5280 break;
5281
5282 case AARCH64_OPND_IMM0:
5283 po_imm_nc_or_fail ();
5284 if (val != 0)
5285 {
5286 set_fatal_syntax_error (_("immediate zero expected"));
5287 goto failure;
5288 }
5289 info->imm.value = 0;
5290 break;
5291
5292 case AARCH64_OPND_FPIMM0:
5293 {
5294 int qfloat;
5295 bfd_boolean res1 = FALSE, res2 = FALSE;
5296 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
5297 it is probably not worth the effort to support it. */
5298 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE,
5299 imm_reg_type))
5300 && (error_p ()
5301 || !(res2 = parse_constant_immediate (&str, &val,
5302 imm_reg_type))))
5303 goto failure;
5304 if ((res1 && qfloat == 0) || (res2 && val == 0))
5305 {
5306 info->imm.value = 0;
5307 info->imm.is_fp = 1;
5308 break;
5309 }
5310 set_fatal_syntax_error (_("immediate zero expected"));
5311 goto failure;
5312 }
5313
5314 case AARCH64_OPND_IMM_MOV:
5315 {
5316 char *saved = str;
5317 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
5318 reg_name_p (str, REG_TYPE_VN))
5319 goto failure;
5320 str = saved;
5321 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5322 GE_OPT_PREFIX, 1));
5323 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
5324 later. fix_mov_imm_insn will try to determine a machine
5325 instruction (MOVZ, MOVN or ORR) for it and will issue an error
5326 message if the immediate cannot be moved by a single
5327 instruction. */
5328 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5329 inst.base.operands[i].skip = 1;
5330 }
5331 break;
5332
5333 case AARCH64_OPND_SIMD_IMM:
5334 case AARCH64_OPND_SIMD_IMM_SFT:
5335 if (! parse_big_immediate (&str, &val, imm_reg_type))
5336 goto failure;
5337 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5338 /* addr_off_p */ 0,
5339 /* need_libopcodes_p */ 1,
5340 /* skip_p */ 1);
5341 /* Parse shift.
5342 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
5343 shift, we don't check it here; we leave the checking to
5344 the libopcodes (operand_general_constraint_met_p). By
5345 doing this, we achieve better diagnostics. */
5346 if (skip_past_comma (&str)
5347 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
5348 goto failure;
5349 if (!info->shifter.operator_present
5350 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
5351 {
5352 /* Default to LSL if not present. Libopcodes prefers shifter
5353 kind to be explicit. */
5354 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5355 info->shifter.kind = AARCH64_MOD_LSL;
5356 }
5357 break;
5358
5359 case AARCH64_OPND_FPIMM:
5360 case AARCH64_OPND_SIMD_FPIMM:
5361 {
5362 int qfloat;
5363 bfd_boolean dp_p
5364 = (aarch64_get_qualifier_esize (inst.base.operands[0].qualifier)
5365 == 8);
5366 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
5367 || !aarch64_imm_float_p (qfloat))
5368 {
5369 if (!error_p ())
5370 set_fatal_syntax_error (_("invalid floating-point"
5371 " constant"));
5372 goto failure;
5373 }
5374 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
5375 inst.base.operands[i].imm.is_fp = 1;
5376 }
5377 break;
5378
5379 case AARCH64_OPND_LIMM:
5380 po_misc_or_fail (parse_shifter_operand (&str, info,
5381 SHIFTED_LOGIC_IMM));
5382 if (info->shifter.operator_present)
5383 {
5384 set_fatal_syntax_error
5385 (_("shift not allowed for bitmask immediate"));
5386 goto failure;
5387 }
5388 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5389 /* addr_off_p */ 0,
5390 /* need_libopcodes_p */ 1,
5391 /* skip_p */ 1);
5392 break;
5393
5394 case AARCH64_OPND_AIMM:
5395 if (opcode->op == OP_ADD)
5396 /* ADD may have relocation types. */
5397 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
5398 SHIFTED_ARITH_IMM));
5399 else
5400 po_misc_or_fail (parse_shifter_operand (&str, info,
5401 SHIFTED_ARITH_IMM));
5402 switch (inst.reloc.type)
5403 {
5404 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5405 info->shifter.amount = 12;
5406 break;
5407 case BFD_RELOC_UNUSED:
5408 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5409 if (info->shifter.kind != AARCH64_MOD_NONE)
5410 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
5411 inst.reloc.pc_rel = 0;
5412 break;
5413 default:
5414 break;
5415 }
5416 info->imm.value = 0;
5417 if (!info->shifter.operator_present)
5418 {
5419 /* Default to LSL if not present. Libopcodes prefers shifter
5420 kind to be explicit. */
5421 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5422 info->shifter.kind = AARCH64_MOD_LSL;
5423 }
5424 break;
5425
5426 case AARCH64_OPND_HALF:
5427 {
5428 /* #<imm16> or relocation. */
5429 int internal_fixup_p;
5430 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
5431 if (internal_fixup_p)
5432 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5433 skip_whitespace (str);
5434 if (skip_past_comma (&str))
5435 {
5436 /* {, LSL #<shift>} */
5437 if (! aarch64_gas_internal_fixup_p ())
5438 {
5439 set_fatal_syntax_error (_("can't mix relocation modifier "
5440 "with explicit shift"));
5441 goto failure;
5442 }
5443 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5444 }
5445 else
5446 inst.base.operands[i].shifter.amount = 0;
5447 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5448 inst.base.operands[i].imm.value = 0;
5449 if (! process_movw_reloc_info ())
5450 goto failure;
5451 }
5452 break;
5453
5454 case AARCH64_OPND_EXCEPTION:
5455 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
5456 imm_reg_type));
5457 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5458 /* addr_off_p */ 0,
5459 /* need_libopcodes_p */ 0,
5460 /* skip_p */ 1);
5461 break;
5462
5463 case AARCH64_OPND_NZCV:
5464 {
5465 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
5466 if (nzcv != NULL)
5467 {
5468 str += 4;
5469 info->imm.value = nzcv->value;
5470 break;
5471 }
5472 po_imm_or_fail (0, 15);
5473 info->imm.value = val;
5474 }
5475 break;
5476
5477 case AARCH64_OPND_COND:
5478 case AARCH64_OPND_COND1:
5479 info->cond = hash_find_n (aarch64_cond_hsh, str, 2);
5480 str += 2;
5481 if (info->cond == NULL)
5482 {
5483 set_syntax_error (_("invalid condition"));
5484 goto failure;
5485 }
5486 else if (operands[i] == AARCH64_OPND_COND1
5487 && (info->cond->value & 0xe) == 0xe)
5488 {
5489 /* Not allow AL or NV. */
5490 set_default_error ();
5491 goto failure;
5492 }
5493 break;
5494
5495 case AARCH64_OPND_ADDR_ADRP:
5496 po_misc_or_fail (parse_adrp (&str));
5497 /* Clear the value as operand needs to be relocated. */
5498 info->imm.value = 0;
5499 break;
5500
5501 case AARCH64_OPND_ADDR_PCREL14:
5502 case AARCH64_OPND_ADDR_PCREL19:
5503 case AARCH64_OPND_ADDR_PCREL21:
5504 case AARCH64_OPND_ADDR_PCREL26:
5505 po_misc_or_fail (parse_address (&str, info));
5506 if (!info->addr.pcrel)
5507 {
5508 set_syntax_error (_("invalid pc-relative address"));
5509 goto failure;
5510 }
5511 if (inst.gen_lit_pool
5512 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
5513 {
5514 /* Only permit "=value" in the literal load instructions.
5515 The literal will be generated by programmer_friendly_fixup. */
5516 set_syntax_error (_("invalid use of \"=immediate\""));
5517 goto failure;
5518 }
5519 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
5520 {
5521 set_syntax_error (_("unrecognized relocation suffix"));
5522 goto failure;
5523 }
5524 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
5525 {
5526 info->imm.value = inst.reloc.exp.X_add_number;
5527 inst.reloc.type = BFD_RELOC_UNUSED;
5528 }
5529 else
5530 {
5531 info->imm.value = 0;
5532 if (inst.reloc.type == BFD_RELOC_UNUSED)
5533 switch (opcode->iclass)
5534 {
5535 case compbranch:
5536 case condbranch:
5537 /* e.g. CBZ or B.COND */
5538 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5539 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
5540 break;
5541 case testbranch:
5542 /* e.g. TBZ */
5543 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
5544 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
5545 break;
5546 case branch_imm:
5547 /* e.g. B or BL */
5548 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
5549 inst.reloc.type =
5550 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
5551 : BFD_RELOC_AARCH64_JUMP26;
5552 break;
5553 case loadlit:
5554 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5555 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
5556 break;
5557 case pcreladdr:
5558 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
5559 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
5560 break;
5561 default:
5562 gas_assert (0);
5563 abort ();
5564 }
5565 inst.reloc.pc_rel = 1;
5566 }
5567 break;
5568
5569 case AARCH64_OPND_ADDR_SIMPLE:
5570 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
5571 {
5572 /* [<Xn|SP>{, #<simm>}] */
5573 char *start = str;
5574 /* First use the normal address-parsing routines, to get
5575 the usual syntax errors. */
5576 po_misc_or_fail (parse_address (&str, info));
5577 if (info->addr.pcrel || info->addr.offset.is_reg
5578 || !info->addr.preind || info->addr.postind
5579 || info->addr.writeback)
5580 {
5581 set_syntax_error (_("invalid addressing mode"));
5582 goto failure;
5583 }
5584
5585 /* Then retry, matching the specific syntax of these addresses. */
5586 str = start;
5587 po_char_or_fail ('[');
5588 po_reg_or_fail (REG_TYPE_R64_SP);
5589 /* Accept optional ", #0". */
5590 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
5591 && skip_past_char (&str, ','))
5592 {
5593 skip_past_char (&str, '#');
5594 if (! skip_past_char (&str, '0'))
5595 {
5596 set_fatal_syntax_error
5597 (_("the optional immediate offset can only be 0"));
5598 goto failure;
5599 }
5600 }
5601 po_char_or_fail (']');
5602 break;
5603 }
5604
5605 case AARCH64_OPND_ADDR_REGOFF:
5606 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
5607 po_misc_or_fail (parse_address (&str, info));
5608 if (info->addr.pcrel || !info->addr.offset.is_reg
5609 || !info->addr.preind || info->addr.postind
5610 || info->addr.writeback)
5611 {
5612 set_syntax_error (_("invalid addressing mode"));
5613 goto failure;
5614 }
5615 if (!info->shifter.operator_present)
5616 {
5617 /* Default to LSL if not present. Libopcodes prefers shifter
5618 kind to be explicit. */
5619 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5620 info->shifter.kind = AARCH64_MOD_LSL;
5621 }
5622 /* Qualifier to be deduced by libopcodes. */
5623 break;
5624
5625 case AARCH64_OPND_ADDR_SIMM7:
5626 po_misc_or_fail (parse_address (&str, info));
5627 if (info->addr.pcrel || info->addr.offset.is_reg
5628 || (!info->addr.preind && !info->addr.postind))
5629 {
5630 set_syntax_error (_("invalid addressing mode"));
5631 goto failure;
5632 }
5633 if (inst.reloc.type != BFD_RELOC_UNUSED)
5634 {
5635 set_syntax_error (_("relocation not allowed"));
5636 goto failure;
5637 }
5638 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5639 /* addr_off_p */ 1,
5640 /* need_libopcodes_p */ 1,
5641 /* skip_p */ 0);
5642 break;
5643
5644 case AARCH64_OPND_ADDR_SIMM9:
5645 case AARCH64_OPND_ADDR_SIMM9_2:
5646 po_misc_or_fail (parse_address (&str, info));
5647 if (info->addr.pcrel || info->addr.offset.is_reg
5648 || (!info->addr.preind && !info->addr.postind)
5649 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
5650 && info->addr.writeback))
5651 {
5652 set_syntax_error (_("invalid addressing mode"));
5653 goto failure;
5654 }
5655 if (inst.reloc.type != BFD_RELOC_UNUSED)
5656 {
5657 set_syntax_error (_("relocation not allowed"));
5658 goto failure;
5659 }
5660 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5661 /* addr_off_p */ 1,
5662 /* need_libopcodes_p */ 1,
5663 /* skip_p */ 0);
5664 break;
5665
5666 case AARCH64_OPND_ADDR_UIMM12:
5667 po_misc_or_fail (parse_address (&str, info));
5668 if (info->addr.pcrel || info->addr.offset.is_reg
5669 || !info->addr.preind || info->addr.writeback)
5670 {
5671 set_syntax_error (_("invalid addressing mode"));
5672 goto failure;
5673 }
5674 if (inst.reloc.type == BFD_RELOC_UNUSED)
5675 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5676 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5677 || (inst.reloc.type
5678 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
5679 || (inst.reloc.type
5680 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC))
5681 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
5682 /* Leave qualifier to be determined by libopcodes. */
5683 break;
5684
5685 case AARCH64_OPND_SIMD_ADDR_POST:
5686 /* [<Xn|SP>], <Xm|#<amount>> */
5687 po_misc_or_fail (parse_address (&str, info));
5688 if (!info->addr.postind || !info->addr.writeback)
5689 {
5690 set_syntax_error (_("invalid addressing mode"));
5691 goto failure;
5692 }
5693 if (!info->addr.offset.is_reg)
5694 {
5695 if (inst.reloc.exp.X_op == O_constant)
5696 info->addr.offset.imm = inst.reloc.exp.X_add_number;
5697 else
5698 {
5699 set_fatal_syntax_error
5700 (_("writeback value should be an immediate constant"));
5701 goto failure;
5702 }
5703 }
5704 /* No qualifier. */
5705 break;
5706
5707 case AARCH64_OPND_SYSREG:
5708 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0))
5709 == PARSE_FAIL)
5710 {
5711 set_syntax_error (_("unknown or missing system register name"));
5712 goto failure;
5713 }
5714 inst.base.operands[i].sysreg = val;
5715 break;
5716
5717 case AARCH64_OPND_PSTATEFIELD:
5718 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1))
5719 == PARSE_FAIL)
5720 {
5721 set_syntax_error (_("unknown or missing PSTATE field name"));
5722 goto failure;
5723 }
5724 inst.base.operands[i].pstatefield = val;
5725 break;
5726
5727 case AARCH64_OPND_SYSREG_IC:
5728 inst.base.operands[i].sysins_op =
5729 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
5730 goto sys_reg_ins;
5731 case AARCH64_OPND_SYSREG_DC:
5732 inst.base.operands[i].sysins_op =
5733 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
5734 goto sys_reg_ins;
5735 case AARCH64_OPND_SYSREG_AT:
5736 inst.base.operands[i].sysins_op =
5737 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
5738 goto sys_reg_ins;
5739 case AARCH64_OPND_SYSREG_TLBI:
5740 inst.base.operands[i].sysins_op =
5741 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
5742 sys_reg_ins:
5743 if (inst.base.operands[i].sysins_op == NULL)
5744 {
5745 set_fatal_syntax_error ( _("unknown or missing operation name"));
5746 goto failure;
5747 }
5748 break;
5749
5750 case AARCH64_OPND_BARRIER:
5751 case AARCH64_OPND_BARRIER_ISB:
5752 val = parse_barrier (&str);
5753 if (val != PARSE_FAIL
5754 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
5755 {
5756 /* ISB only accepts options name 'sy'. */
5757 set_syntax_error
5758 (_("the specified option is not accepted in ISB"));
5759 /* Turn off backtrack as this optional operand is present. */
5760 backtrack_pos = 0;
5761 goto failure;
5762 }
5763 /* This is an extension to accept a 0..15 immediate. */
5764 if (val == PARSE_FAIL)
5765 po_imm_or_fail (0, 15);
5766 info->barrier = aarch64_barrier_options + val;
5767 break;
5768
5769 case AARCH64_OPND_PRFOP:
5770 val = parse_pldop (&str);
5771 /* This is an extension to accept a 0..31 immediate. */
5772 if (val == PARSE_FAIL)
5773 po_imm_or_fail (0, 31);
5774 inst.base.operands[i].prfop = aarch64_prfops + val;
5775 break;
5776
5777 case AARCH64_OPND_BARRIER_PSB:
5778 val = parse_barrier_psb (&str, &(info->hint_option));
5779 if (val == PARSE_FAIL)
5780 goto failure;
5781 break;
5782
5783 default:
5784 as_fatal (_("unhandled operand code %d"), operands[i]);
5785 }
5786
5787 /* If we get here, this operand was successfully parsed. */
5788 inst.base.operands[i].present = 1;
5789 continue;
5790
5791 failure:
5792 /* The parse routine should already have set the error, but in case
5793 not, set a default one here. */
5794 if (! error_p ())
5795 set_default_error ();
5796
5797 if (! backtrack_pos)
5798 goto parse_operands_return;
5799
5800 {
5801 /* We reach here because this operand is marked as optional, and
5802 either no operand was supplied or the operand was supplied but it
5803 was syntactically incorrect. In the latter case we report an
5804 error. In the former case we perform a few more checks before
5805 dropping through to the code to insert the default operand. */
5806
5807 char *tmp = backtrack_pos;
5808 char endchar = END_OF_INSN;
5809
5810 if (i != (aarch64_num_of_operands (opcode) - 1))
5811 endchar = ',';
5812 skip_past_char (&tmp, ',');
5813
5814 if (*tmp != endchar)
5815 /* The user has supplied an operand in the wrong format. */
5816 goto parse_operands_return;
5817
5818 /* Make sure there is not a comma before the optional operand.
5819 For example the fifth operand of 'sys' is optional:
5820
5821 sys #0,c0,c0,#0, <--- wrong
5822 sys #0,c0,c0,#0 <--- correct. */
5823 if (comma_skipped_p && i && endchar == END_OF_INSN)
5824 {
5825 set_fatal_syntax_error
5826 (_("unexpected comma before the omitted optional operand"));
5827 goto parse_operands_return;
5828 }
5829 }
5830
5831 /* Reaching here means we are dealing with an optional operand that is
5832 omitted from the assembly line. */
5833 gas_assert (optional_operand_p (opcode, i));
5834 info->present = 0;
5835 process_omitted_operand (operands[i], opcode, i, info);
5836
5837 /* Try again, skipping the optional operand at backtrack_pos. */
5838 str = backtrack_pos;
5839 backtrack_pos = 0;
5840
5841 /* Clear any error record after the omitted optional operand has been
5842 successfully handled. */
5843 clear_error ();
5844 }
5845
5846 /* Check if we have parsed all the operands. */
5847 if (*str != '\0' && ! error_p ())
5848 {
5849 /* Set I to the index of the last present operand; this is
5850 for the purpose of diagnostics. */
5851 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
5852 ;
5853 set_fatal_syntax_error
5854 (_("unexpected characters following instruction"));
5855 }
5856
5857 parse_operands_return:
5858
5859 if (error_p ())
5860 {
5861 DEBUG_TRACE ("parsing FAIL: %s - %s",
5862 operand_mismatch_kind_names[get_error_kind ()],
5863 get_error_message ());
5864 /* Record the operand error properly; this is useful when there
5865 are multiple instruction templates for a mnemonic name, so that
5866 later on, we can select the error that most closely describes
5867 the problem. */
5868 record_operand_error (opcode, i, get_error_kind (),
5869 get_error_message ());
5870 return FALSE;
5871 }
5872 else
5873 {
5874 DEBUG_TRACE ("parsing SUCCESS");
5875 return TRUE;
5876 }
5877 }
5878
5879 /* It does some fix-up to provide some programmer friendly feature while
5880 keeping the libopcodes happy, i.e. libopcodes only accepts
5881 the preferred architectural syntax.
5882 Return FALSE if there is any failure; otherwise return TRUE. */
5883
5884 static bfd_boolean
5885 programmer_friendly_fixup (aarch64_instruction *instr)
5886 {
5887 aarch64_inst *base = &instr->base;
5888 const aarch64_opcode *opcode = base->opcode;
5889 enum aarch64_op op = opcode->op;
5890 aarch64_opnd_info *operands = base->operands;
5891
5892 DEBUG_TRACE ("enter");
5893
5894 switch (opcode->iclass)
5895 {
5896 case testbranch:
5897 /* TBNZ Xn|Wn, #uimm6, label
5898 Test and Branch Not Zero: conditionally jumps to label if bit number
5899 uimm6 in register Xn is not zero. The bit number implies the width of
5900 the register, which may be written and should be disassembled as Wn if
5901 uimm is less than 32. */
5902 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
5903 {
5904 if (operands[1].imm.value >= 32)
5905 {
5906 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
5907 0, 31);
5908 return FALSE;
5909 }
5910 operands[0].qualifier = AARCH64_OPND_QLF_X;
5911 }
5912 break;
5913 case loadlit:
5914 /* LDR Wt, label | =value
5915 As a convenience assemblers will typically permit the notation
5916 "=value" in conjunction with the pc-relative literal load instructions
5917 to automatically place an immediate value or symbolic address in a
5918 nearby literal pool and generate a hidden label which references it.
5919 ISREG has been set to 0 in the case of =value. */
5920 if (instr->gen_lit_pool
5921 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
5922 {
5923 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
5924 if (op == OP_LDRSW_LIT)
5925 size = 4;
5926 if (instr->reloc.exp.X_op != O_constant
5927 && instr->reloc.exp.X_op != O_big
5928 && instr->reloc.exp.X_op != O_symbol)
5929 {
5930 record_operand_error (opcode, 1,
5931 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
5932 _("constant expression expected"));
5933 return FALSE;
5934 }
5935 if (! add_to_lit_pool (&instr->reloc.exp, size))
5936 {
5937 record_operand_error (opcode, 1,
5938 AARCH64_OPDE_OTHER_ERROR,
5939 _("literal pool insertion failed"));
5940 return FALSE;
5941 }
5942 }
5943 break;
5944 case log_shift:
5945 case bitfield:
5946 /* UXT[BHW] Wd, Wn
5947 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
5948 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
5949 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
5950 A programmer-friendly assembler should accept a destination Xd in
5951 place of Wd, however that is not the preferred form for disassembly.
5952 */
5953 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
5954 && operands[1].qualifier == AARCH64_OPND_QLF_W
5955 && operands[0].qualifier == AARCH64_OPND_QLF_X)
5956 operands[0].qualifier = AARCH64_OPND_QLF_W;
5957 break;
5958
5959 case addsub_ext:
5960 {
5961 /* In the 64-bit form, the final register operand is written as Wm
5962 for all but the (possibly omitted) UXTX/LSL and SXTX
5963 operators.
5964 As a programmer-friendly assembler, we accept e.g.
5965 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
5966 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
5967 int idx = aarch64_operand_index (opcode->operands,
5968 AARCH64_OPND_Rm_EXT);
5969 gas_assert (idx == 1 || idx == 2);
5970 if (operands[0].qualifier == AARCH64_OPND_QLF_X
5971 && operands[idx].qualifier == AARCH64_OPND_QLF_X
5972 && operands[idx].shifter.kind != AARCH64_MOD_LSL
5973 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
5974 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
5975 operands[idx].qualifier = AARCH64_OPND_QLF_W;
5976 }
5977 break;
5978
5979 default:
5980 break;
5981 }
5982
5983 DEBUG_TRACE ("exit with SUCCESS");
5984 return TRUE;
5985 }
5986
5987 /* Check for loads and stores that will cause unpredictable behavior. */
5988
5989 static void
5990 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
5991 {
5992 aarch64_inst *base = &instr->base;
5993 const aarch64_opcode *opcode = base->opcode;
5994 const aarch64_opnd_info *opnds = base->operands;
5995 switch (opcode->iclass)
5996 {
5997 case ldst_pos:
5998 case ldst_imm9:
5999 case ldst_unscaled:
6000 case ldst_unpriv:
6001 /* Loading/storing the base register is unpredictable if writeback. */
6002 if ((aarch64_get_operand_class (opnds[0].type)
6003 == AARCH64_OPND_CLASS_INT_REG)
6004 && opnds[0].reg.regno == opnds[1].addr.base_regno
6005 && opnds[1].addr.base_regno != REG_SP
6006 && opnds[1].addr.writeback)
6007 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6008 break;
6009 case ldstpair_off:
6010 case ldstnapair_offs:
6011 case ldstpair_indexed:
6012 /* Loading/storing the base register is unpredictable if writeback. */
6013 if ((aarch64_get_operand_class (opnds[0].type)
6014 == AARCH64_OPND_CLASS_INT_REG)
6015 && (opnds[0].reg.regno == opnds[2].addr.base_regno
6016 || opnds[1].reg.regno == opnds[2].addr.base_regno)
6017 && opnds[2].addr.base_regno != REG_SP
6018 && opnds[2].addr.writeback)
6019 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6020 /* Load operations must load different registers. */
6021 if ((opcode->opcode & (1 << 22))
6022 && opnds[0].reg.regno == opnds[1].reg.regno)
6023 as_warn (_("unpredictable load of register pair -- `%s'"), str);
6024 break;
6025 default:
6026 break;
6027 }
6028 }
6029
6030 /* A wrapper function to interface with libopcodes on encoding and
6031 record the error message if there is any.
6032
6033 Return TRUE on success; otherwise return FALSE. */
6034
6035 static bfd_boolean
6036 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
6037 aarch64_insn *code)
6038 {
6039 aarch64_operand_error error_info;
6040 error_info.kind = AARCH64_OPDE_NIL;
6041 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info))
6042 return TRUE;
6043 else
6044 {
6045 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
6046 record_operand_error_info (opcode, &error_info);
6047 return FALSE;
6048 }
6049 }
6050
6051 #ifdef DEBUG_AARCH64
6052 static inline void
6053 dump_opcode_operands (const aarch64_opcode *opcode)
6054 {
6055 int i = 0;
6056 while (opcode->operands[i] != AARCH64_OPND_NIL)
6057 {
6058 aarch64_verbose ("\t\t opnd%d: %s", i,
6059 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
6060 ? aarch64_get_operand_name (opcode->operands[i])
6061 : aarch64_get_operand_desc (opcode->operands[i]));
6062 ++i;
6063 }
6064 }
6065 #endif /* DEBUG_AARCH64 */
6066
6067 /* This is the guts of the machine-dependent assembler. STR points to a
6068 machine dependent instruction. This function is supposed to emit
6069 the frags/bytes it assembles to. */
6070
6071 void
6072 md_assemble (char *str)
6073 {
6074 char *p = str;
6075 templates *template;
6076 aarch64_opcode *opcode;
6077 aarch64_inst *inst_base;
6078 unsigned saved_cond;
6079
6080 /* Align the previous label if needed. */
6081 if (last_label_seen != NULL)
6082 {
6083 symbol_set_frag (last_label_seen, frag_now);
6084 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
6085 S_SET_SEGMENT (last_label_seen, now_seg);
6086 }
6087
6088 inst.reloc.type = BFD_RELOC_UNUSED;
6089
6090 DEBUG_TRACE ("\n\n");
6091 DEBUG_TRACE ("==============================");
6092 DEBUG_TRACE ("Enter md_assemble with %s", str);
6093
6094 template = opcode_lookup (&p);
6095 if (!template)
6096 {
6097 /* It wasn't an instruction, but it might be a register alias of
6098 the form alias .req reg directive. */
6099 if (!create_register_alias (str, p))
6100 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
6101 str);
6102 return;
6103 }
6104
6105 skip_whitespace (p);
6106 if (*p == ',')
6107 {
6108 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
6109 get_mnemonic_name (str), str);
6110 return;
6111 }
6112
6113 init_operand_error_report ();
6114
6115 /* Sections are assumed to start aligned. In executable section, there is no
6116 MAP_DATA symbol pending. So we only align the address during
6117 MAP_DATA --> MAP_INSN transition.
6118 For other sections, this is not guaranteed. */
6119 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
6120 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
6121 frag_align_code (2, 0);
6122
6123 saved_cond = inst.cond;
6124 reset_aarch64_instruction (&inst);
6125 inst.cond = saved_cond;
6126
6127 /* Iterate through all opcode entries with the same mnemonic name. */
6128 do
6129 {
6130 opcode = template->opcode;
6131
6132 DEBUG_TRACE ("opcode %s found", opcode->name);
6133 #ifdef DEBUG_AARCH64
6134 if (debug_dump)
6135 dump_opcode_operands (opcode);
6136 #endif /* DEBUG_AARCH64 */
6137
6138 mapping_state (MAP_INSN);
6139
6140 inst_base = &inst.base;
6141 inst_base->opcode = opcode;
6142
6143 /* Truly conditionally executed instructions, e.g. b.cond. */
6144 if (opcode->flags & F_COND)
6145 {
6146 gas_assert (inst.cond != COND_ALWAYS);
6147 inst_base->cond = get_cond_from_value (inst.cond);
6148 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
6149 }
6150 else if (inst.cond != COND_ALWAYS)
6151 {
6152 /* It shouldn't arrive here, where the assembly looks like a
6153 conditional instruction but the found opcode is unconditional. */
6154 gas_assert (0);
6155 continue;
6156 }
6157
6158 if (parse_operands (p, opcode)
6159 && programmer_friendly_fixup (&inst)
6160 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
6161 {
6162 /* Check that this instruction is supported for this CPU. */
6163 if (!opcode->avariant
6164 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
6165 {
6166 as_bad (_("selected processor does not support `%s'"), str);
6167 return;
6168 }
6169
6170 warn_unpredictable_ldst (&inst, str);
6171
6172 if (inst.reloc.type == BFD_RELOC_UNUSED
6173 || !inst.reloc.need_libopcodes_p)
6174 output_inst (NULL);
6175 else
6176 {
6177 /* If there is relocation generated for the instruction,
6178 store the instruction information for the future fix-up. */
6179 struct aarch64_inst *copy;
6180 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
6181 copy = XNEW (struct aarch64_inst);
6182 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
6183 output_inst (copy);
6184 }
6185 return;
6186 }
6187
6188 template = template->next;
6189 if (template != NULL)
6190 {
6191 reset_aarch64_instruction (&inst);
6192 inst.cond = saved_cond;
6193 }
6194 }
6195 while (template != NULL);
6196
6197 /* Issue the error messages if any. */
6198 output_operand_error_report (str);
6199 }
6200
6201 /* Various frobbings of labels and their addresses. */
6202
6203 void
6204 aarch64_start_line_hook (void)
6205 {
6206 last_label_seen = NULL;
6207 }
6208
6209 void
6210 aarch64_frob_label (symbolS * sym)
6211 {
6212 last_label_seen = sym;
6213
6214 dwarf2_emit_label (sym);
6215 }
6216
6217 int
6218 aarch64_data_in_code (void)
6219 {
6220 if (!strncmp (input_line_pointer + 1, "data:", 5))
6221 {
6222 *input_line_pointer = '/';
6223 input_line_pointer += 5;
6224 *input_line_pointer = 0;
6225 return 1;
6226 }
6227
6228 return 0;
6229 }
6230
6231 char *
6232 aarch64_canonicalize_symbol_name (char *name)
6233 {
6234 int len;
6235
6236 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
6237 *(name + len - 5) = 0;
6238
6239 return name;
6240 }
6241 \f
6242 /* Table of all register names defined by default. The user can
6243 define additional names with .req. Note that all register names
6244 should appear in both upper and lowercase variants. Some registers
6245 also have mixed-case names. */
6246
6247 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
6248 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
6249 #define REGSET16(p,t) \
6250 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
6251 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
6252 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
6253 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
6254 #define REGSET31(p,t) \
6255 REGSET16(p, t), \
6256 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
6257 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
6258 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
6259 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
6260 #define REGSET(p,t) \
6261 REGSET31(p,t), REGNUM(p,31,t)
6262
6263 /* These go into aarch64_reg_hsh hash-table. */
6264 static const reg_entry reg_names[] = {
6265 /* Integer registers. */
6266 REGSET31 (x, R_64), REGSET31 (X, R_64),
6267 REGSET31 (w, R_32), REGSET31 (W, R_32),
6268
6269 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
6270 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
6271
6272 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
6273 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
6274
6275 /* Coprocessor register numbers. */
6276 REGSET (c, CN), REGSET (C, CN),
6277
6278 /* Floating-point single precision registers. */
6279 REGSET (s, FP_S), REGSET (S, FP_S),
6280
6281 /* Floating-point double precision registers. */
6282 REGSET (d, FP_D), REGSET (D, FP_D),
6283
6284 /* Floating-point half precision registers. */
6285 REGSET (h, FP_H), REGSET (H, FP_H),
6286
6287 /* Floating-point byte precision registers. */
6288 REGSET (b, FP_B), REGSET (B, FP_B),
6289
6290 /* Floating-point quad precision registers. */
6291 REGSET (q, FP_Q), REGSET (Q, FP_Q),
6292
6293 /* FP/SIMD registers. */
6294 REGSET (v, VN), REGSET (V, VN),
6295
6296 /* SVE vector registers. */
6297 REGSET (z, ZN), REGSET (Z, ZN),
6298
6299 /* SVE predicate registers. */
6300 REGSET16 (p, PN), REGSET16 (P, PN)
6301 };
6302
6303 #undef REGDEF
6304 #undef REGNUM
6305 #undef REGSET16
6306 #undef REGSET31
6307 #undef REGSET
6308
6309 #define N 1
6310 #define n 0
6311 #define Z 1
6312 #define z 0
6313 #define C 1
6314 #define c 0
6315 #define V 1
6316 #define v 0
6317 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
6318 static const asm_nzcv nzcv_names[] = {
6319 {"nzcv", B (n, z, c, v)},
6320 {"nzcV", B (n, z, c, V)},
6321 {"nzCv", B (n, z, C, v)},
6322 {"nzCV", B (n, z, C, V)},
6323 {"nZcv", B (n, Z, c, v)},
6324 {"nZcV", B (n, Z, c, V)},
6325 {"nZCv", B (n, Z, C, v)},
6326 {"nZCV", B (n, Z, C, V)},
6327 {"Nzcv", B (N, z, c, v)},
6328 {"NzcV", B (N, z, c, V)},
6329 {"NzCv", B (N, z, C, v)},
6330 {"NzCV", B (N, z, C, V)},
6331 {"NZcv", B (N, Z, c, v)},
6332 {"NZcV", B (N, Z, c, V)},
6333 {"NZCv", B (N, Z, C, v)},
6334 {"NZCV", B (N, Z, C, V)}
6335 };
6336
6337 #undef N
6338 #undef n
6339 #undef Z
6340 #undef z
6341 #undef C
6342 #undef c
6343 #undef V
6344 #undef v
6345 #undef B
6346 \f
6347 /* MD interface: bits in the object file. */
6348
6349 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
6350 for use in the a.out file, and stores them in the array pointed to by buf.
6351 This knows about the endian-ness of the target machine and does
6352 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
6353 2 (short) and 4 (long) Floating numbers are put out as a series of
6354 LITTLENUMS (shorts, here at least). */
6355
6356 void
6357 md_number_to_chars (char *buf, valueT val, int n)
6358 {
6359 if (target_big_endian)
6360 number_to_chars_bigendian (buf, val, n);
6361 else
6362 number_to_chars_littleendian (buf, val, n);
6363 }
6364
6365 /* MD interface: Sections. */
6366
6367 /* Estimate the size of a frag before relaxing. Assume everything fits in
6368 4 bytes. */
6369
6370 int
6371 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
6372 {
6373 fragp->fr_var = 4;
6374 return 4;
6375 }
6376
6377 /* Round up a section size to the appropriate boundary. */
6378
6379 valueT
6380 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
6381 {
6382 return size;
6383 }
6384
6385 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
6386 of an rs_align_code fragment.
6387
6388 Here we fill the frag with the appropriate info for padding the
6389 output stream. The resulting frag will consist of a fixed (fr_fix)
6390 and of a repeating (fr_var) part.
6391
6392 The fixed content is always emitted before the repeating content and
6393 these two parts are used as follows in constructing the output:
6394 - the fixed part will be used to align to a valid instruction word
6395 boundary, in case that we start at a misaligned address; as no
6396 executable instruction can live at the misaligned location, we
6397 simply fill with zeros;
6398 - the variable part will be used to cover the remaining padding and
6399 we fill using the AArch64 NOP instruction.
6400
6401 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
6402 enough storage space for up to 3 bytes for padding the back to a valid
6403 instruction alignment and exactly 4 bytes to store the NOP pattern. */
6404
6405 void
6406 aarch64_handle_align (fragS * fragP)
6407 {
6408 /* NOP = d503201f */
6409 /* AArch64 instructions are always little-endian. */
6410 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
6411
6412 int bytes, fix, noop_size;
6413 char *p;
6414
6415 if (fragP->fr_type != rs_align_code)
6416 return;
6417
6418 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
6419 p = fragP->fr_literal + fragP->fr_fix;
6420
6421 #ifdef OBJ_ELF
6422 gas_assert (fragP->tc_frag_data.recorded);
6423 #endif
6424
6425 noop_size = sizeof (aarch64_noop);
6426
6427 fix = bytes & (noop_size - 1);
6428 if (fix)
6429 {
6430 #ifdef OBJ_ELF
6431 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
6432 #endif
6433 memset (p, 0, fix);
6434 p += fix;
6435 fragP->fr_fix += fix;
6436 }
6437
6438 if (noop_size)
6439 memcpy (p, aarch64_noop, noop_size);
6440 fragP->fr_var = noop_size;
6441 }
6442
6443 /* Perform target specific initialisation of a frag.
6444 Note - despite the name this initialisation is not done when the frag
6445 is created, but only when its type is assigned. A frag can be created
6446 and used a long time before its type is set, so beware of assuming that
6447 this initialisationis performed first. */
6448
6449 #ifndef OBJ_ELF
6450 void
6451 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
6452 int max_chars ATTRIBUTE_UNUSED)
6453 {
6454 }
6455
6456 #else /* OBJ_ELF is defined. */
6457 void
6458 aarch64_init_frag (fragS * fragP, int max_chars)
6459 {
6460 /* Record a mapping symbol for alignment frags. We will delete this
6461 later if the alignment ends up empty. */
6462 if (!fragP->tc_frag_data.recorded)
6463 fragP->tc_frag_data.recorded = 1;
6464
6465 switch (fragP->fr_type)
6466 {
6467 case rs_align_test:
6468 case rs_fill:
6469 mapping_state_2 (MAP_DATA, max_chars);
6470 break;
6471 case rs_align:
6472 /* PR 20364: We can get alignment frags in code sections,
6473 so do not just assume that we should use the MAP_DATA state. */
6474 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
6475 break;
6476 case rs_align_code:
6477 mapping_state_2 (MAP_INSN, max_chars);
6478 break;
6479 default:
6480 break;
6481 }
6482 }
6483 \f
6484 /* Initialize the DWARF-2 unwind information for this procedure. */
6485
6486 void
6487 tc_aarch64_frame_initial_instructions (void)
6488 {
6489 cfi_add_CFA_def_cfa (REG_SP, 0);
6490 }
6491 #endif /* OBJ_ELF */
6492
6493 /* Convert REGNAME to a DWARF-2 register number. */
6494
6495 int
6496 tc_aarch64_regname_to_dw2regnum (char *regname)
6497 {
6498 const reg_entry *reg = parse_reg (&regname);
6499 if (reg == NULL)
6500 return -1;
6501
6502 switch (reg->type)
6503 {
6504 case REG_TYPE_SP_32:
6505 case REG_TYPE_SP_64:
6506 case REG_TYPE_R_32:
6507 case REG_TYPE_R_64:
6508 return reg->number;
6509
6510 case REG_TYPE_FP_B:
6511 case REG_TYPE_FP_H:
6512 case REG_TYPE_FP_S:
6513 case REG_TYPE_FP_D:
6514 case REG_TYPE_FP_Q:
6515 return reg->number + 64;
6516
6517 default:
6518 break;
6519 }
6520 return -1;
6521 }
6522
6523 /* Implement DWARF2_ADDR_SIZE. */
6524
6525 int
6526 aarch64_dwarf2_addr_size (void)
6527 {
6528 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
6529 if (ilp32_p)
6530 return 4;
6531 #endif
6532 return bfd_arch_bits_per_address (stdoutput) / 8;
6533 }
6534
6535 /* MD interface: Symbol and relocation handling. */
6536
6537 /* Return the address within the segment that a PC-relative fixup is
6538 relative to. For AArch64 PC-relative fixups applied to instructions
6539 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
6540
6541 long
6542 md_pcrel_from_section (fixS * fixP, segT seg)
6543 {
6544 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
6545
6546 /* If this is pc-relative and we are going to emit a relocation
6547 then we just want to put out any pipeline compensation that the linker
6548 will need. Otherwise we want to use the calculated base. */
6549 if (fixP->fx_pcrel
6550 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
6551 || aarch64_force_relocation (fixP)))
6552 base = 0;
6553
6554 /* AArch64 should be consistent for all pc-relative relocations. */
6555 return base + AARCH64_PCREL_OFFSET;
6556 }
6557
6558 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
6559 Otherwise we have no need to default values of symbols. */
6560
6561 symbolS *
6562 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
6563 {
6564 #ifdef OBJ_ELF
6565 if (name[0] == '_' && name[1] == 'G'
6566 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
6567 {
6568 if (!GOT_symbol)
6569 {
6570 if (symbol_find (name))
6571 as_bad (_("GOT already in the symbol table"));
6572
6573 GOT_symbol = symbol_new (name, undefined_section,
6574 (valueT) 0, &zero_address_frag);
6575 }
6576
6577 return GOT_symbol;
6578 }
6579 #endif
6580
6581 return 0;
6582 }
6583
6584 /* Return non-zero if the indicated VALUE has overflowed the maximum
6585 range expressible by a unsigned number with the indicated number of
6586 BITS. */
6587
6588 static bfd_boolean
6589 unsigned_overflow (valueT value, unsigned bits)
6590 {
6591 valueT lim;
6592 if (bits >= sizeof (valueT) * 8)
6593 return FALSE;
6594 lim = (valueT) 1 << bits;
6595 return (value >= lim);
6596 }
6597
6598
6599 /* Return non-zero if the indicated VALUE has overflowed the maximum
6600 range expressible by an signed number with the indicated number of
6601 BITS. */
6602
6603 static bfd_boolean
6604 signed_overflow (offsetT value, unsigned bits)
6605 {
6606 offsetT lim;
6607 if (bits >= sizeof (offsetT) * 8)
6608 return FALSE;
6609 lim = (offsetT) 1 << (bits - 1);
6610 return (value < -lim || value >= lim);
6611 }
6612
6613 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
6614 unsigned immediate offset load/store instruction, try to encode it as
6615 an unscaled, 9-bit, signed immediate offset load/store instruction.
6616 Return TRUE if it is successful; otherwise return FALSE.
6617
6618 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
6619 in response to the standard LDR/STR mnemonics when the immediate offset is
6620 unambiguous, i.e. when it is negative or unaligned. */
6621
6622 static bfd_boolean
6623 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
6624 {
6625 int idx;
6626 enum aarch64_op new_op;
6627 const aarch64_opcode *new_opcode;
6628
6629 gas_assert (instr->opcode->iclass == ldst_pos);
6630
6631 switch (instr->opcode->op)
6632 {
6633 case OP_LDRB_POS:new_op = OP_LDURB; break;
6634 case OP_STRB_POS: new_op = OP_STURB; break;
6635 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
6636 case OP_LDRH_POS: new_op = OP_LDURH; break;
6637 case OP_STRH_POS: new_op = OP_STURH; break;
6638 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
6639 case OP_LDR_POS: new_op = OP_LDUR; break;
6640 case OP_STR_POS: new_op = OP_STUR; break;
6641 case OP_LDRF_POS: new_op = OP_LDURV; break;
6642 case OP_STRF_POS: new_op = OP_STURV; break;
6643 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
6644 case OP_PRFM_POS: new_op = OP_PRFUM; break;
6645 default: new_op = OP_NIL; break;
6646 }
6647
6648 if (new_op == OP_NIL)
6649 return FALSE;
6650
6651 new_opcode = aarch64_get_opcode (new_op);
6652 gas_assert (new_opcode != NULL);
6653
6654 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
6655 instr->opcode->op, new_opcode->op);
6656
6657 aarch64_replace_opcode (instr, new_opcode);
6658
6659 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
6660 qualifier matching may fail because the out-of-date qualifier will
6661 prevent the operand being updated with a new and correct qualifier. */
6662 idx = aarch64_operand_index (instr->opcode->operands,
6663 AARCH64_OPND_ADDR_SIMM9);
6664 gas_assert (idx == 1);
6665 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
6666
6667 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
6668
6669 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
6670 return FALSE;
6671
6672 return TRUE;
6673 }
6674
6675 /* Called by fix_insn to fix a MOV immediate alias instruction.
6676
6677 Operand for a generic move immediate instruction, which is an alias
6678 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
6679 a 32-bit/64-bit immediate value into general register. An assembler error
6680 shall result if the immediate cannot be created by a single one of these
6681 instructions. If there is a choice, then to ensure reversability an
6682 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
6683
6684 static void
6685 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
6686 {
6687 const aarch64_opcode *opcode;
6688
6689 /* Need to check if the destination is SP/ZR. The check has to be done
6690 before any aarch64_replace_opcode. */
6691 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
6692 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
6693
6694 instr->operands[1].imm.value = value;
6695 instr->operands[1].skip = 0;
6696
6697 if (try_mov_wide_p)
6698 {
6699 /* Try the MOVZ alias. */
6700 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
6701 aarch64_replace_opcode (instr, opcode);
6702 if (aarch64_opcode_encode (instr->opcode, instr,
6703 &instr->value, NULL, NULL))
6704 {
6705 put_aarch64_insn (buf, instr->value);
6706 return;
6707 }
6708 /* Try the MOVK alias. */
6709 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
6710 aarch64_replace_opcode (instr, opcode);
6711 if (aarch64_opcode_encode (instr->opcode, instr,
6712 &instr->value, NULL, NULL))
6713 {
6714 put_aarch64_insn (buf, instr->value);
6715 return;
6716 }
6717 }
6718
6719 if (try_mov_bitmask_p)
6720 {
6721 /* Try the ORR alias. */
6722 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
6723 aarch64_replace_opcode (instr, opcode);
6724 if (aarch64_opcode_encode (instr->opcode, instr,
6725 &instr->value, NULL, NULL))
6726 {
6727 put_aarch64_insn (buf, instr->value);
6728 return;
6729 }
6730 }
6731
6732 as_bad_where (fixP->fx_file, fixP->fx_line,
6733 _("immediate cannot be moved by a single instruction"));
6734 }
6735
6736 /* An instruction operand which is immediate related may have symbol used
6737 in the assembly, e.g.
6738
6739 mov w0, u32
6740 .set u32, 0x00ffff00
6741
6742 At the time when the assembly instruction is parsed, a referenced symbol,
6743 like 'u32' in the above example may not have been seen; a fixS is created
6744 in such a case and is handled here after symbols have been resolved.
6745 Instruction is fixed up with VALUE using the information in *FIXP plus
6746 extra information in FLAGS.
6747
6748 This function is called by md_apply_fix to fix up instructions that need
6749 a fix-up described above but does not involve any linker-time relocation. */
6750
6751 static void
6752 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
6753 {
6754 int idx;
6755 uint32_t insn;
6756 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6757 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
6758 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
6759
6760 if (new_inst)
6761 {
6762 /* Now the instruction is about to be fixed-up, so the operand that
6763 was previously marked as 'ignored' needs to be unmarked in order
6764 to get the encoding done properly. */
6765 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6766 new_inst->operands[idx].skip = 0;
6767 }
6768
6769 gas_assert (opnd != AARCH64_OPND_NIL);
6770
6771 switch (opnd)
6772 {
6773 case AARCH64_OPND_EXCEPTION:
6774 if (unsigned_overflow (value, 16))
6775 as_bad_where (fixP->fx_file, fixP->fx_line,
6776 _("immediate out of range"));
6777 insn = get_aarch64_insn (buf);
6778 insn |= encode_svc_imm (value);
6779 put_aarch64_insn (buf, insn);
6780 break;
6781
6782 case AARCH64_OPND_AIMM:
6783 /* ADD or SUB with immediate.
6784 NOTE this assumes we come here with a add/sub shifted reg encoding
6785 3 322|2222|2 2 2 21111 111111
6786 1 098|7654|3 2 1 09876 543210 98765 43210
6787 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
6788 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
6789 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
6790 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
6791 ->
6792 3 322|2222|2 2 221111111111
6793 1 098|7654|3 2 109876543210 98765 43210
6794 11000000 sf 001|0001|shift imm12 Rn Rd ADD
6795 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
6796 51000000 sf 101|0001|shift imm12 Rn Rd SUB
6797 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
6798 Fields sf Rn Rd are already set. */
6799 insn = get_aarch64_insn (buf);
6800 if (value < 0)
6801 {
6802 /* Add <-> sub. */
6803 insn = reencode_addsub_switch_add_sub (insn);
6804 value = -value;
6805 }
6806
6807 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
6808 && unsigned_overflow (value, 12))
6809 {
6810 /* Try to shift the value by 12 to make it fit. */
6811 if (((value >> 12) << 12) == value
6812 && ! unsigned_overflow (value, 12 + 12))
6813 {
6814 value >>= 12;
6815 insn |= encode_addsub_imm_shift_amount (1);
6816 }
6817 }
6818
6819 if (unsigned_overflow (value, 12))
6820 as_bad_where (fixP->fx_file, fixP->fx_line,
6821 _("immediate out of range"));
6822
6823 insn |= encode_addsub_imm (value);
6824
6825 put_aarch64_insn (buf, insn);
6826 break;
6827
6828 case AARCH64_OPND_SIMD_IMM:
6829 case AARCH64_OPND_SIMD_IMM_SFT:
6830 case AARCH64_OPND_LIMM:
6831 /* Bit mask immediate. */
6832 gas_assert (new_inst != NULL);
6833 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6834 new_inst->operands[idx].imm.value = value;
6835 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6836 &new_inst->value, NULL, NULL))
6837 put_aarch64_insn (buf, new_inst->value);
6838 else
6839 as_bad_where (fixP->fx_file, fixP->fx_line,
6840 _("invalid immediate"));
6841 break;
6842
6843 case AARCH64_OPND_HALF:
6844 /* 16-bit unsigned immediate. */
6845 if (unsigned_overflow (value, 16))
6846 as_bad_where (fixP->fx_file, fixP->fx_line,
6847 _("immediate out of range"));
6848 insn = get_aarch64_insn (buf);
6849 insn |= encode_movw_imm (value & 0xffff);
6850 put_aarch64_insn (buf, insn);
6851 break;
6852
6853 case AARCH64_OPND_IMM_MOV:
6854 /* Operand for a generic move immediate instruction, which is
6855 an alias instruction that generates a single MOVZ, MOVN or ORR
6856 instruction to loads a 32-bit/64-bit immediate value into general
6857 register. An assembler error shall result if the immediate cannot be
6858 created by a single one of these instructions. If there is a choice,
6859 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
6860 and MOVZ or MOVN to ORR. */
6861 gas_assert (new_inst != NULL);
6862 fix_mov_imm_insn (fixP, buf, new_inst, value);
6863 break;
6864
6865 case AARCH64_OPND_ADDR_SIMM7:
6866 case AARCH64_OPND_ADDR_SIMM9:
6867 case AARCH64_OPND_ADDR_SIMM9_2:
6868 case AARCH64_OPND_ADDR_UIMM12:
6869 /* Immediate offset in an address. */
6870 insn = get_aarch64_insn (buf);
6871
6872 gas_assert (new_inst != NULL && new_inst->value == insn);
6873 gas_assert (new_inst->opcode->operands[1] == opnd
6874 || new_inst->opcode->operands[2] == opnd);
6875
6876 /* Get the index of the address operand. */
6877 if (new_inst->opcode->operands[1] == opnd)
6878 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
6879 idx = 1;
6880 else
6881 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
6882 idx = 2;
6883
6884 /* Update the resolved offset value. */
6885 new_inst->operands[idx].addr.offset.imm = value;
6886
6887 /* Encode/fix-up. */
6888 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6889 &new_inst->value, NULL, NULL))
6890 {
6891 put_aarch64_insn (buf, new_inst->value);
6892 break;
6893 }
6894 else if (new_inst->opcode->iclass == ldst_pos
6895 && try_to_encode_as_unscaled_ldst (new_inst))
6896 {
6897 put_aarch64_insn (buf, new_inst->value);
6898 break;
6899 }
6900
6901 as_bad_where (fixP->fx_file, fixP->fx_line,
6902 _("immediate offset out of range"));
6903 break;
6904
6905 default:
6906 gas_assert (0);
6907 as_fatal (_("unhandled operand code %d"), opnd);
6908 }
6909 }
6910
6911 /* Apply a fixup (fixP) to segment data, once it has been determined
6912 by our caller that we have all the info we need to fix it up.
6913
6914 Parameter valP is the pointer to the value of the bits. */
6915
6916 void
6917 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
6918 {
6919 offsetT value = *valP;
6920 uint32_t insn;
6921 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6922 int scale;
6923 unsigned flags = fixP->fx_addnumber;
6924
6925 DEBUG_TRACE ("\n\n");
6926 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
6927 DEBUG_TRACE ("Enter md_apply_fix");
6928
6929 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
6930
6931 /* Note whether this will delete the relocation. */
6932
6933 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
6934 fixP->fx_done = 1;
6935
6936 /* Process the relocations. */
6937 switch (fixP->fx_r_type)
6938 {
6939 case BFD_RELOC_NONE:
6940 /* This will need to go in the object file. */
6941 fixP->fx_done = 0;
6942 break;
6943
6944 case BFD_RELOC_8:
6945 case BFD_RELOC_8_PCREL:
6946 if (fixP->fx_done || !seg->use_rela_p)
6947 md_number_to_chars (buf, value, 1);
6948 break;
6949
6950 case BFD_RELOC_16:
6951 case BFD_RELOC_16_PCREL:
6952 if (fixP->fx_done || !seg->use_rela_p)
6953 md_number_to_chars (buf, value, 2);
6954 break;
6955
6956 case BFD_RELOC_32:
6957 case BFD_RELOC_32_PCREL:
6958 if (fixP->fx_done || !seg->use_rela_p)
6959 md_number_to_chars (buf, value, 4);
6960 break;
6961
6962 case BFD_RELOC_64:
6963 case BFD_RELOC_64_PCREL:
6964 if (fixP->fx_done || !seg->use_rela_p)
6965 md_number_to_chars (buf, value, 8);
6966 break;
6967
6968 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6969 /* We claim that these fixups have been processed here, even if
6970 in fact we generate an error because we do not have a reloc
6971 for them, so tc_gen_reloc() will reject them. */
6972 fixP->fx_done = 1;
6973 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
6974 {
6975 as_bad_where (fixP->fx_file, fixP->fx_line,
6976 _("undefined symbol %s used as an immediate value"),
6977 S_GET_NAME (fixP->fx_addsy));
6978 goto apply_fix_return;
6979 }
6980 fix_insn (fixP, flags, value);
6981 break;
6982
6983 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
6984 if (fixP->fx_done || !seg->use_rela_p)
6985 {
6986 if (value & 3)
6987 as_bad_where (fixP->fx_file, fixP->fx_line,
6988 _("pc-relative load offset not word aligned"));
6989 if (signed_overflow (value, 21))
6990 as_bad_where (fixP->fx_file, fixP->fx_line,
6991 _("pc-relative load offset out of range"));
6992 insn = get_aarch64_insn (buf);
6993 insn |= encode_ld_lit_ofs_19 (value >> 2);
6994 put_aarch64_insn (buf, insn);
6995 }
6996 break;
6997
6998 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6999 if (fixP->fx_done || !seg->use_rela_p)
7000 {
7001 if (signed_overflow (value, 21))
7002 as_bad_where (fixP->fx_file, fixP->fx_line,
7003 _("pc-relative address offset out of range"));
7004 insn = get_aarch64_insn (buf);
7005 insn |= encode_adr_imm (value);
7006 put_aarch64_insn (buf, insn);
7007 }
7008 break;
7009
7010 case BFD_RELOC_AARCH64_BRANCH19:
7011 if (fixP->fx_done || !seg->use_rela_p)
7012 {
7013 if (value & 3)
7014 as_bad_where (fixP->fx_file, fixP->fx_line,
7015 _("conditional branch target not word aligned"));
7016 if (signed_overflow (value, 21))
7017 as_bad_where (fixP->fx_file, fixP->fx_line,
7018 _("conditional branch out of range"));
7019 insn = get_aarch64_insn (buf);
7020 insn |= encode_cond_branch_ofs_19 (value >> 2);
7021 put_aarch64_insn (buf, insn);
7022 }
7023 break;
7024
7025 case BFD_RELOC_AARCH64_TSTBR14:
7026 if (fixP->fx_done || !seg->use_rela_p)
7027 {
7028 if (value & 3)
7029 as_bad_where (fixP->fx_file, fixP->fx_line,
7030 _("conditional branch target not word aligned"));
7031 if (signed_overflow (value, 16))
7032 as_bad_where (fixP->fx_file, fixP->fx_line,
7033 _("conditional branch out of range"));
7034 insn = get_aarch64_insn (buf);
7035 insn |= encode_tst_branch_ofs_14 (value >> 2);
7036 put_aarch64_insn (buf, insn);
7037 }
7038 break;
7039
7040 case BFD_RELOC_AARCH64_CALL26:
7041 case BFD_RELOC_AARCH64_JUMP26:
7042 if (fixP->fx_done || !seg->use_rela_p)
7043 {
7044 if (value & 3)
7045 as_bad_where (fixP->fx_file, fixP->fx_line,
7046 _("branch target not word aligned"));
7047 if (signed_overflow (value, 28))
7048 as_bad_where (fixP->fx_file, fixP->fx_line,
7049 _("branch out of range"));
7050 insn = get_aarch64_insn (buf);
7051 insn |= encode_branch_ofs_26 (value >> 2);
7052 put_aarch64_insn (buf, insn);
7053 }
7054 break;
7055
7056 case BFD_RELOC_AARCH64_MOVW_G0:
7057 case BFD_RELOC_AARCH64_MOVW_G0_NC:
7058 case BFD_RELOC_AARCH64_MOVW_G0_S:
7059 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
7060 scale = 0;
7061 goto movw_common;
7062 case BFD_RELOC_AARCH64_MOVW_G1:
7063 case BFD_RELOC_AARCH64_MOVW_G1_NC:
7064 case BFD_RELOC_AARCH64_MOVW_G1_S:
7065 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7066 scale = 16;
7067 goto movw_common;
7068 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7069 scale = 0;
7070 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7071 /* Should always be exported to object file, see
7072 aarch64_force_relocation(). */
7073 gas_assert (!fixP->fx_done);
7074 gas_assert (seg->use_rela_p);
7075 goto movw_common;
7076 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7077 scale = 16;
7078 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7079 /* Should always be exported to object file, see
7080 aarch64_force_relocation(). */
7081 gas_assert (!fixP->fx_done);
7082 gas_assert (seg->use_rela_p);
7083 goto movw_common;
7084 case BFD_RELOC_AARCH64_MOVW_G2:
7085 case BFD_RELOC_AARCH64_MOVW_G2_NC:
7086 case BFD_RELOC_AARCH64_MOVW_G2_S:
7087 scale = 32;
7088 goto movw_common;
7089 case BFD_RELOC_AARCH64_MOVW_G3:
7090 scale = 48;
7091 movw_common:
7092 if (fixP->fx_done || !seg->use_rela_p)
7093 {
7094 insn = get_aarch64_insn (buf);
7095
7096 if (!fixP->fx_done)
7097 {
7098 /* REL signed addend must fit in 16 bits */
7099 if (signed_overflow (value, 16))
7100 as_bad_where (fixP->fx_file, fixP->fx_line,
7101 _("offset out of range"));
7102 }
7103 else
7104 {
7105 /* Check for overflow and scale. */
7106 switch (fixP->fx_r_type)
7107 {
7108 case BFD_RELOC_AARCH64_MOVW_G0:
7109 case BFD_RELOC_AARCH64_MOVW_G1:
7110 case BFD_RELOC_AARCH64_MOVW_G2:
7111 case BFD_RELOC_AARCH64_MOVW_G3:
7112 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7113 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7114 if (unsigned_overflow (value, scale + 16))
7115 as_bad_where (fixP->fx_file, fixP->fx_line,
7116 _("unsigned value out of range"));
7117 break;
7118 case BFD_RELOC_AARCH64_MOVW_G0_S:
7119 case BFD_RELOC_AARCH64_MOVW_G1_S:
7120 case BFD_RELOC_AARCH64_MOVW_G2_S:
7121 /* NOTE: We can only come here with movz or movn. */
7122 if (signed_overflow (value, scale + 16))
7123 as_bad_where (fixP->fx_file, fixP->fx_line,
7124 _("signed value out of range"));
7125 if (value < 0)
7126 {
7127 /* Force use of MOVN. */
7128 value = ~value;
7129 insn = reencode_movzn_to_movn (insn);
7130 }
7131 else
7132 {
7133 /* Force use of MOVZ. */
7134 insn = reencode_movzn_to_movz (insn);
7135 }
7136 break;
7137 default:
7138 /* Unchecked relocations. */
7139 break;
7140 }
7141 value >>= scale;
7142 }
7143
7144 /* Insert value into MOVN/MOVZ/MOVK instruction. */
7145 insn |= encode_movw_imm (value & 0xffff);
7146
7147 put_aarch64_insn (buf, insn);
7148 }
7149 break;
7150
7151 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
7152 fixP->fx_r_type = (ilp32_p
7153 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
7154 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
7155 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7156 /* Should always be exported to object file, see
7157 aarch64_force_relocation(). */
7158 gas_assert (!fixP->fx_done);
7159 gas_assert (seg->use_rela_p);
7160 break;
7161
7162 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
7163 fixP->fx_r_type = (ilp32_p
7164 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
7165 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC);
7166 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7167 /* Should always be exported to object file, see
7168 aarch64_force_relocation(). */
7169 gas_assert (!fixP->fx_done);
7170 gas_assert (seg->use_rela_p);
7171 break;
7172
7173 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
7174 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7175 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7176 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7177 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
7178 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7179 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7180 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7181 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7182 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7183 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7184 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7185 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7186 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7187 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7188 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7189 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7190 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
7191 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
7192 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7193 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7194 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7195 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7196 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
7197 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
7198 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
7199 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
7200 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
7201 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
7202 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
7203 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
7204 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
7205 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7206 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
7207 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
7208 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
7209 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7210 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
7211 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7212 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
7213 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7214 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7215 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7216 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7217 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7218 /* Should always be exported to object file, see
7219 aarch64_force_relocation(). */
7220 gas_assert (!fixP->fx_done);
7221 gas_assert (seg->use_rela_p);
7222 break;
7223
7224 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
7225 /* Should always be exported to object file, see
7226 aarch64_force_relocation(). */
7227 fixP->fx_r_type = (ilp32_p
7228 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
7229 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
7230 gas_assert (!fixP->fx_done);
7231 gas_assert (seg->use_rela_p);
7232 break;
7233
7234 case BFD_RELOC_AARCH64_ADD_LO12:
7235 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7236 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7237 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7238 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7239 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7240 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7241 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7242 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7243 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7244 case BFD_RELOC_AARCH64_LDST128_LO12:
7245 case BFD_RELOC_AARCH64_LDST16_LO12:
7246 case BFD_RELOC_AARCH64_LDST32_LO12:
7247 case BFD_RELOC_AARCH64_LDST64_LO12:
7248 case BFD_RELOC_AARCH64_LDST8_LO12:
7249 /* Should always be exported to object file, see
7250 aarch64_force_relocation(). */
7251 gas_assert (!fixP->fx_done);
7252 gas_assert (seg->use_rela_p);
7253 break;
7254
7255 case BFD_RELOC_AARCH64_TLSDESC_ADD:
7256 case BFD_RELOC_AARCH64_TLSDESC_CALL:
7257 case BFD_RELOC_AARCH64_TLSDESC_LDR:
7258 break;
7259
7260 case BFD_RELOC_UNUSED:
7261 /* An error will already have been reported. */
7262 break;
7263
7264 default:
7265 as_bad_where (fixP->fx_file, fixP->fx_line,
7266 _("unexpected %s fixup"),
7267 bfd_get_reloc_code_name (fixP->fx_r_type));
7268 break;
7269 }
7270
7271 apply_fix_return:
7272 /* Free the allocated the struct aarch64_inst.
7273 N.B. currently there are very limited number of fix-up types actually use
7274 this field, so the impact on the performance should be minimal . */
7275 if (fixP->tc_fix_data.inst != NULL)
7276 free (fixP->tc_fix_data.inst);
7277
7278 return;
7279 }
7280
7281 /* Translate internal representation of relocation info to BFD target
7282 format. */
7283
7284 arelent *
7285 tc_gen_reloc (asection * section, fixS * fixp)
7286 {
7287 arelent *reloc;
7288 bfd_reloc_code_real_type code;
7289
7290 reloc = XNEW (arelent);
7291
7292 reloc->sym_ptr_ptr = XNEW (asymbol *);
7293 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
7294 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
7295
7296 if (fixp->fx_pcrel)
7297 {
7298 if (section->use_rela_p)
7299 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
7300 else
7301 fixp->fx_offset = reloc->address;
7302 }
7303 reloc->addend = fixp->fx_offset;
7304
7305 code = fixp->fx_r_type;
7306 switch (code)
7307 {
7308 case BFD_RELOC_16:
7309 if (fixp->fx_pcrel)
7310 code = BFD_RELOC_16_PCREL;
7311 break;
7312
7313 case BFD_RELOC_32:
7314 if (fixp->fx_pcrel)
7315 code = BFD_RELOC_32_PCREL;
7316 break;
7317
7318 case BFD_RELOC_64:
7319 if (fixp->fx_pcrel)
7320 code = BFD_RELOC_64_PCREL;
7321 break;
7322
7323 default:
7324 break;
7325 }
7326
7327 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
7328 if (reloc->howto == NULL)
7329 {
7330 as_bad_where (fixp->fx_file, fixp->fx_line,
7331 _
7332 ("cannot represent %s relocation in this object file format"),
7333 bfd_get_reloc_code_name (code));
7334 return NULL;
7335 }
7336
7337 return reloc;
7338 }
7339
7340 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
7341
7342 void
7343 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
7344 {
7345 bfd_reloc_code_real_type type;
7346 int pcrel = 0;
7347
7348 /* Pick a reloc.
7349 FIXME: @@ Should look at CPU word size. */
7350 switch (size)
7351 {
7352 case 1:
7353 type = BFD_RELOC_8;
7354 break;
7355 case 2:
7356 type = BFD_RELOC_16;
7357 break;
7358 case 4:
7359 type = BFD_RELOC_32;
7360 break;
7361 case 8:
7362 type = BFD_RELOC_64;
7363 break;
7364 default:
7365 as_bad (_("cannot do %u-byte relocation"), size);
7366 type = BFD_RELOC_UNUSED;
7367 break;
7368 }
7369
7370 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
7371 }
7372
7373 int
7374 aarch64_force_relocation (struct fix *fixp)
7375 {
7376 switch (fixp->fx_r_type)
7377 {
7378 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7379 /* Perform these "immediate" internal relocations
7380 even if the symbol is extern or weak. */
7381 return 0;
7382
7383 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
7384 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
7385 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
7386 /* Pseudo relocs that need to be fixed up according to
7387 ilp32_p. */
7388 return 0;
7389
7390 case BFD_RELOC_AARCH64_ADD_LO12:
7391 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7392 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7393 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7394 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7395 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7396 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7397 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7398 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7399 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7400 case BFD_RELOC_AARCH64_LDST128_LO12:
7401 case BFD_RELOC_AARCH64_LDST16_LO12:
7402 case BFD_RELOC_AARCH64_LDST32_LO12:
7403 case BFD_RELOC_AARCH64_LDST64_LO12:
7404 case BFD_RELOC_AARCH64_LDST8_LO12:
7405 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
7406 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7407 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7408 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7409 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
7410 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7411 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7412 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7413 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7414 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7415 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7416 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7417 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7418 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7419 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7420 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7421 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7422 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7423 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7424 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
7425 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
7426 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7427 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7428 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7429 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7430 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
7431 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
7432 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
7433 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
7434 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
7435 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
7436 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
7437 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
7438 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
7439 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7440 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
7441 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
7442 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
7443 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7444 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
7445 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7446 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
7447 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7448 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7449 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7450 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7451 /* Always leave these relocations for the linker. */
7452 return 1;
7453
7454 default:
7455 break;
7456 }
7457
7458 return generic_force_reloc (fixp);
7459 }
7460
7461 #ifdef OBJ_ELF
7462
7463 const char *
7464 elf64_aarch64_target_format (void)
7465 {
7466 if (strcmp (TARGET_OS, "cloudabi") == 0)
7467 {
7468 /* FIXME: What to do for ilp32_p ? */
7469 return target_big_endian ? "elf64-bigaarch64-cloudabi" : "elf64-littleaarch64-cloudabi";
7470 }
7471 if (target_big_endian)
7472 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
7473 else
7474 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
7475 }
7476
7477 void
7478 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
7479 {
7480 elf_frob_symbol (symp, puntp);
7481 }
7482 #endif
7483
7484 /* MD interface: Finalization. */
7485
7486 /* A good place to do this, although this was probably not intended
7487 for this kind of use. We need to dump the literal pool before
7488 references are made to a null symbol pointer. */
7489
7490 void
7491 aarch64_cleanup (void)
7492 {
7493 literal_pool *pool;
7494
7495 for (pool = list_of_pools; pool; pool = pool->next)
7496 {
7497 /* Put it at the end of the relevant section. */
7498 subseg_set (pool->section, pool->sub_section);
7499 s_ltorg (0);
7500 }
7501 }
7502
7503 #ifdef OBJ_ELF
7504 /* Remove any excess mapping symbols generated for alignment frags in
7505 SEC. We may have created a mapping symbol before a zero byte
7506 alignment; remove it if there's a mapping symbol after the
7507 alignment. */
7508 static void
7509 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
7510 void *dummy ATTRIBUTE_UNUSED)
7511 {
7512 segment_info_type *seginfo = seg_info (sec);
7513 fragS *fragp;
7514
7515 if (seginfo == NULL || seginfo->frchainP == NULL)
7516 return;
7517
7518 for (fragp = seginfo->frchainP->frch_root;
7519 fragp != NULL; fragp = fragp->fr_next)
7520 {
7521 symbolS *sym = fragp->tc_frag_data.last_map;
7522 fragS *next = fragp->fr_next;
7523
7524 /* Variable-sized frags have been converted to fixed size by
7525 this point. But if this was variable-sized to start with,
7526 there will be a fixed-size frag after it. So don't handle
7527 next == NULL. */
7528 if (sym == NULL || next == NULL)
7529 continue;
7530
7531 if (S_GET_VALUE (sym) < next->fr_address)
7532 /* Not at the end of this frag. */
7533 continue;
7534 know (S_GET_VALUE (sym) == next->fr_address);
7535
7536 do
7537 {
7538 if (next->tc_frag_data.first_map != NULL)
7539 {
7540 /* Next frag starts with a mapping symbol. Discard this
7541 one. */
7542 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
7543 break;
7544 }
7545
7546 if (next->fr_next == NULL)
7547 {
7548 /* This mapping symbol is at the end of the section. Discard
7549 it. */
7550 know (next->fr_fix == 0 && next->fr_var == 0);
7551 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
7552 break;
7553 }
7554
7555 /* As long as we have empty frags without any mapping symbols,
7556 keep looking. */
7557 /* If the next frag is non-empty and does not start with a
7558 mapping symbol, then this mapping symbol is required. */
7559 if (next->fr_address != next->fr_next->fr_address)
7560 break;
7561
7562 next = next->fr_next;
7563 }
7564 while (next != NULL);
7565 }
7566 }
7567 #endif
7568
7569 /* Adjust the symbol table. */
7570
7571 void
7572 aarch64_adjust_symtab (void)
7573 {
7574 #ifdef OBJ_ELF
7575 /* Remove any overlapping mapping symbols generated by alignment frags. */
7576 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
7577 /* Now do generic ELF adjustments. */
7578 elf_adjust_symtab ();
7579 #endif
7580 }
7581
7582 static void
7583 checked_hash_insert (struct hash_control *table, const char *key, void *value)
7584 {
7585 const char *hash_err;
7586
7587 hash_err = hash_insert (table, key, value);
7588 if (hash_err)
7589 printf ("Internal Error: Can't hash %s\n", key);
7590 }
7591
7592 static void
7593 fill_instruction_hash_table (void)
7594 {
7595 aarch64_opcode *opcode = aarch64_opcode_table;
7596
7597 while (opcode->name != NULL)
7598 {
7599 templates *templ, *new_templ;
7600 templ = hash_find (aarch64_ops_hsh, opcode->name);
7601
7602 new_templ = XNEW (templates);
7603 new_templ->opcode = opcode;
7604 new_templ->next = NULL;
7605
7606 if (!templ)
7607 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
7608 else
7609 {
7610 new_templ->next = templ->next;
7611 templ->next = new_templ;
7612 }
7613 ++opcode;
7614 }
7615 }
7616
7617 static inline void
7618 convert_to_upper (char *dst, const char *src, size_t num)
7619 {
7620 unsigned int i;
7621 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
7622 *dst = TOUPPER (*src);
7623 *dst = '\0';
7624 }
7625
7626 /* Assume STR point to a lower-case string, allocate, convert and return
7627 the corresponding upper-case string. */
7628 static inline const char*
7629 get_upper_str (const char *str)
7630 {
7631 char *ret;
7632 size_t len = strlen (str);
7633 ret = XNEWVEC (char, len + 1);
7634 convert_to_upper (ret, str, len);
7635 return ret;
7636 }
7637
7638 /* MD interface: Initialization. */
7639
7640 void
7641 md_begin (void)
7642 {
7643 unsigned mach;
7644 unsigned int i;
7645
7646 if ((aarch64_ops_hsh = hash_new ()) == NULL
7647 || (aarch64_cond_hsh = hash_new ()) == NULL
7648 || (aarch64_shift_hsh = hash_new ()) == NULL
7649 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
7650 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
7651 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
7652 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
7653 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
7654 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
7655 || (aarch64_reg_hsh = hash_new ()) == NULL
7656 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
7657 || (aarch64_nzcv_hsh = hash_new ()) == NULL
7658 || (aarch64_pldop_hsh = hash_new ()) == NULL
7659 || (aarch64_hint_opt_hsh = hash_new ()) == NULL)
7660 as_fatal (_("virtual memory exhausted"));
7661
7662 fill_instruction_hash_table ();
7663
7664 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
7665 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
7666 (void *) (aarch64_sys_regs + i));
7667
7668 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
7669 checked_hash_insert (aarch64_pstatefield_hsh,
7670 aarch64_pstatefields[i].name,
7671 (void *) (aarch64_pstatefields + i));
7672
7673 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
7674 checked_hash_insert (aarch64_sys_regs_ic_hsh,
7675 aarch64_sys_regs_ic[i].name,
7676 (void *) (aarch64_sys_regs_ic + i));
7677
7678 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
7679 checked_hash_insert (aarch64_sys_regs_dc_hsh,
7680 aarch64_sys_regs_dc[i].name,
7681 (void *) (aarch64_sys_regs_dc + i));
7682
7683 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
7684 checked_hash_insert (aarch64_sys_regs_at_hsh,
7685 aarch64_sys_regs_at[i].name,
7686 (void *) (aarch64_sys_regs_at + i));
7687
7688 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
7689 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
7690 aarch64_sys_regs_tlbi[i].name,
7691 (void *) (aarch64_sys_regs_tlbi + i));
7692
7693 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
7694 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
7695 (void *) (reg_names + i));
7696
7697 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
7698 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
7699 (void *) (nzcv_names + i));
7700
7701 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
7702 {
7703 const char *name = aarch64_operand_modifiers[i].name;
7704 checked_hash_insert (aarch64_shift_hsh, name,
7705 (void *) (aarch64_operand_modifiers + i));
7706 /* Also hash the name in the upper case. */
7707 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
7708 (void *) (aarch64_operand_modifiers + i));
7709 }
7710
7711 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
7712 {
7713 unsigned int j;
7714 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
7715 the same condition code. */
7716 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
7717 {
7718 const char *name = aarch64_conds[i].names[j];
7719 if (name == NULL)
7720 break;
7721 checked_hash_insert (aarch64_cond_hsh, name,
7722 (void *) (aarch64_conds + i));
7723 /* Also hash the name in the upper case. */
7724 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
7725 (void *) (aarch64_conds + i));
7726 }
7727 }
7728
7729 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
7730 {
7731 const char *name = aarch64_barrier_options[i].name;
7732 /* Skip xx00 - the unallocated values of option. */
7733 if ((i & 0x3) == 0)
7734 continue;
7735 checked_hash_insert (aarch64_barrier_opt_hsh, name,
7736 (void *) (aarch64_barrier_options + i));
7737 /* Also hash the name in the upper case. */
7738 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
7739 (void *) (aarch64_barrier_options + i));
7740 }
7741
7742 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
7743 {
7744 const char* name = aarch64_prfops[i].name;
7745 /* Skip the unallocated hint encodings. */
7746 if (name == NULL)
7747 continue;
7748 checked_hash_insert (aarch64_pldop_hsh, name,
7749 (void *) (aarch64_prfops + i));
7750 /* Also hash the name in the upper case. */
7751 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
7752 (void *) (aarch64_prfops + i));
7753 }
7754
7755 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
7756 {
7757 const char* name = aarch64_hint_options[i].name;
7758
7759 checked_hash_insert (aarch64_hint_opt_hsh, name,
7760 (void *) (aarch64_hint_options + i));
7761 /* Also hash the name in the upper case. */
7762 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
7763 (void *) (aarch64_hint_options + i));
7764 }
7765
7766 /* Set the cpu variant based on the command-line options. */
7767 if (!mcpu_cpu_opt)
7768 mcpu_cpu_opt = march_cpu_opt;
7769
7770 if (!mcpu_cpu_opt)
7771 mcpu_cpu_opt = &cpu_default;
7772
7773 cpu_variant = *mcpu_cpu_opt;
7774
7775 /* Record the CPU type. */
7776 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
7777
7778 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
7779 }
7780
7781 /* Command line processing. */
7782
7783 const char *md_shortopts = "m:";
7784
7785 #ifdef AARCH64_BI_ENDIAN
7786 #define OPTION_EB (OPTION_MD_BASE + 0)
7787 #define OPTION_EL (OPTION_MD_BASE + 1)
7788 #else
7789 #if TARGET_BYTES_BIG_ENDIAN
7790 #define OPTION_EB (OPTION_MD_BASE + 0)
7791 #else
7792 #define OPTION_EL (OPTION_MD_BASE + 1)
7793 #endif
7794 #endif
7795
7796 struct option md_longopts[] = {
7797 #ifdef OPTION_EB
7798 {"EB", no_argument, NULL, OPTION_EB},
7799 #endif
7800 #ifdef OPTION_EL
7801 {"EL", no_argument, NULL, OPTION_EL},
7802 #endif
7803 {NULL, no_argument, NULL, 0}
7804 };
7805
7806 size_t md_longopts_size = sizeof (md_longopts);
7807
7808 struct aarch64_option_table
7809 {
7810 const char *option; /* Option name to match. */
7811 const char *help; /* Help information. */
7812 int *var; /* Variable to change. */
7813 int value; /* What to change it to. */
7814 char *deprecated; /* If non-null, print this message. */
7815 };
7816
7817 static struct aarch64_option_table aarch64_opts[] = {
7818 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
7819 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
7820 NULL},
7821 #ifdef DEBUG_AARCH64
7822 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
7823 #endif /* DEBUG_AARCH64 */
7824 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
7825 NULL},
7826 {"mno-verbose-error", N_("do not output verbose error messages"),
7827 &verbose_error_p, 0, NULL},
7828 {NULL, NULL, NULL, 0, NULL}
7829 };
7830
7831 struct aarch64_cpu_option_table
7832 {
7833 const char *name;
7834 const aarch64_feature_set value;
7835 /* The canonical name of the CPU, or NULL to use NAME converted to upper
7836 case. */
7837 const char *canonical_name;
7838 };
7839
7840 /* This list should, at a minimum, contain all the cpu names
7841 recognized by GCC. */
7842 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
7843 {"all", AARCH64_ANY, NULL},
7844 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
7845 AARCH64_FEATURE_CRC), "Cortex-A35"},
7846 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
7847 AARCH64_FEATURE_CRC), "Cortex-A53"},
7848 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
7849 AARCH64_FEATURE_CRC), "Cortex-A57"},
7850 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
7851 AARCH64_FEATURE_CRC), "Cortex-A72"},
7852 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
7853 AARCH64_FEATURE_CRC), "Cortex-A73"},
7854 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
7855 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
7856 "Samsung Exynos M1"},
7857 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
7858 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
7859 "Qualcomm QDF24XX"},
7860 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
7861 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
7862 "Cavium ThunderX"},
7863 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
7864 AARCH64_FEATURE_CRYPTO),
7865 "Broadcom Vulcan"},
7866 /* The 'xgene-1' name is an older name for 'xgene1', which was used
7867 in earlier releases and is superseded by 'xgene1' in all
7868 tools. */
7869 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7870 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7871 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
7872 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
7873 {"generic", AARCH64_ARCH_V8, NULL},
7874
7875 {NULL, AARCH64_ARCH_NONE, NULL}
7876 };
7877
7878 struct aarch64_arch_option_table
7879 {
7880 const char *name;
7881 const aarch64_feature_set value;
7882 };
7883
7884 /* This list should, at a minimum, contain all the architecture names
7885 recognized by GCC. */
7886 static const struct aarch64_arch_option_table aarch64_archs[] = {
7887 {"all", AARCH64_ANY},
7888 {"armv8-a", AARCH64_ARCH_V8},
7889 {"armv8.1-a", AARCH64_ARCH_V8_1},
7890 {"armv8.2-a", AARCH64_ARCH_V8_2},
7891 {NULL, AARCH64_ARCH_NONE}
7892 };
7893
7894 /* ISA extensions. */
7895 struct aarch64_option_cpu_value_table
7896 {
7897 const char *name;
7898 const aarch64_feature_set value;
7899 const aarch64_feature_set require; /* Feature dependencies. */
7900 };
7901
7902 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
7903 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
7904 AARCH64_ARCH_NONE},
7905 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
7906 AARCH64_ARCH_NONE},
7907 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
7908 AARCH64_ARCH_NONE},
7909 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
7910 AARCH64_ARCH_NONE},
7911 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
7912 AARCH64_ARCH_NONE},
7913 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
7914 AARCH64_ARCH_NONE},
7915 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
7916 AARCH64_ARCH_NONE},
7917 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
7918 AARCH64_ARCH_NONE},
7919 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
7920 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
7921 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
7922 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
7923 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
7924 AARCH64_ARCH_NONE},
7925 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
7926 };
7927
7928 struct aarch64_long_option_table
7929 {
7930 const char *option; /* Substring to match. */
7931 const char *help; /* Help information. */
7932 int (*func) (const char *subopt); /* Function to decode sub-option. */
7933 char *deprecated; /* If non-null, print this message. */
7934 };
7935
7936 /* Transitive closure of features depending on set. */
7937 static aarch64_feature_set
7938 aarch64_feature_disable_set (aarch64_feature_set set)
7939 {
7940 const struct aarch64_option_cpu_value_table *opt;
7941 aarch64_feature_set prev = 0;
7942
7943 while (prev != set) {
7944 prev = set;
7945 for (opt = aarch64_features; opt->name != NULL; opt++)
7946 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
7947 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
7948 }
7949 return set;
7950 }
7951
7952 /* Transitive closure of dependencies of set. */
7953 static aarch64_feature_set
7954 aarch64_feature_enable_set (aarch64_feature_set set)
7955 {
7956 const struct aarch64_option_cpu_value_table *opt;
7957 aarch64_feature_set prev = 0;
7958
7959 while (prev != set) {
7960 prev = set;
7961 for (opt = aarch64_features; opt->name != NULL; opt++)
7962 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
7963 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
7964 }
7965 return set;
7966 }
7967
7968 static int
7969 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
7970 bfd_boolean ext_only)
7971 {
7972 /* We insist on extensions being added before being removed. We achieve
7973 this by using the ADDING_VALUE variable to indicate whether we are
7974 adding an extension (1) or removing it (0) and only allowing it to
7975 change in the order -1 -> 1 -> 0. */
7976 int adding_value = -1;
7977 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
7978
7979 /* Copy the feature set, so that we can modify it. */
7980 *ext_set = **opt_p;
7981 *opt_p = ext_set;
7982
7983 while (str != NULL && *str != 0)
7984 {
7985 const struct aarch64_option_cpu_value_table *opt;
7986 const char *ext = NULL;
7987 int optlen;
7988
7989 if (!ext_only)
7990 {
7991 if (*str != '+')
7992 {
7993 as_bad (_("invalid architectural extension"));
7994 return 0;
7995 }
7996
7997 ext = strchr (++str, '+');
7998 }
7999
8000 if (ext != NULL)
8001 optlen = ext - str;
8002 else
8003 optlen = strlen (str);
8004
8005 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
8006 {
8007 if (adding_value != 0)
8008 adding_value = 0;
8009 optlen -= 2;
8010 str += 2;
8011 }
8012 else if (optlen > 0)
8013 {
8014 if (adding_value == -1)
8015 adding_value = 1;
8016 else if (adding_value != 1)
8017 {
8018 as_bad (_("must specify extensions to add before specifying "
8019 "those to remove"));
8020 return FALSE;
8021 }
8022 }
8023
8024 if (optlen == 0)
8025 {
8026 as_bad (_("missing architectural extension"));
8027 return 0;
8028 }
8029
8030 gas_assert (adding_value != -1);
8031
8032 for (opt = aarch64_features; opt->name != NULL; opt++)
8033 if (strncmp (opt->name, str, optlen) == 0)
8034 {
8035 aarch64_feature_set set;
8036
8037 /* Add or remove the extension. */
8038 if (adding_value)
8039 {
8040 set = aarch64_feature_enable_set (opt->value);
8041 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
8042 }
8043 else
8044 {
8045 set = aarch64_feature_disable_set (opt->value);
8046 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
8047 }
8048 break;
8049 }
8050
8051 if (opt->name == NULL)
8052 {
8053 as_bad (_("unknown architectural extension `%s'"), str);
8054 return 0;
8055 }
8056
8057 str = ext;
8058 };
8059
8060 return 1;
8061 }
8062
8063 static int
8064 aarch64_parse_cpu (const char *str)
8065 {
8066 const struct aarch64_cpu_option_table *opt;
8067 const char *ext = strchr (str, '+');
8068 size_t optlen;
8069
8070 if (ext != NULL)
8071 optlen = ext - str;
8072 else
8073 optlen = strlen (str);
8074
8075 if (optlen == 0)
8076 {
8077 as_bad (_("missing cpu name `%s'"), str);
8078 return 0;
8079 }
8080
8081 for (opt = aarch64_cpus; opt->name != NULL; opt++)
8082 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
8083 {
8084 mcpu_cpu_opt = &opt->value;
8085 if (ext != NULL)
8086 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
8087
8088 return 1;
8089 }
8090
8091 as_bad (_("unknown cpu `%s'"), str);
8092 return 0;
8093 }
8094
8095 static int
8096 aarch64_parse_arch (const char *str)
8097 {
8098 const struct aarch64_arch_option_table *opt;
8099 const char *ext = strchr (str, '+');
8100 size_t optlen;
8101
8102 if (ext != NULL)
8103 optlen = ext - str;
8104 else
8105 optlen = strlen (str);
8106
8107 if (optlen == 0)
8108 {
8109 as_bad (_("missing architecture name `%s'"), str);
8110 return 0;
8111 }
8112
8113 for (opt = aarch64_archs; opt->name != NULL; opt++)
8114 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
8115 {
8116 march_cpu_opt = &opt->value;
8117 if (ext != NULL)
8118 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
8119
8120 return 1;
8121 }
8122
8123 as_bad (_("unknown architecture `%s'\n"), str);
8124 return 0;
8125 }
8126
8127 /* ABIs. */
8128 struct aarch64_option_abi_value_table
8129 {
8130 const char *name;
8131 enum aarch64_abi_type value;
8132 };
8133
8134 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
8135 {"ilp32", AARCH64_ABI_ILP32},
8136 {"lp64", AARCH64_ABI_LP64},
8137 };
8138
8139 static int
8140 aarch64_parse_abi (const char *str)
8141 {
8142 unsigned int i;
8143
8144 if (str[0] == '\0')
8145 {
8146 as_bad (_("missing abi name `%s'"), str);
8147 return 0;
8148 }
8149
8150 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
8151 if (strcmp (str, aarch64_abis[i].name) == 0)
8152 {
8153 aarch64_abi = aarch64_abis[i].value;
8154 return 1;
8155 }
8156
8157 as_bad (_("unknown abi `%s'\n"), str);
8158 return 0;
8159 }
8160
8161 static struct aarch64_long_option_table aarch64_long_opts[] = {
8162 #ifdef OBJ_ELF
8163 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
8164 aarch64_parse_abi, NULL},
8165 #endif /* OBJ_ELF */
8166 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
8167 aarch64_parse_cpu, NULL},
8168 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
8169 aarch64_parse_arch, NULL},
8170 {NULL, NULL, 0, NULL}
8171 };
8172
8173 int
8174 md_parse_option (int c, const char *arg)
8175 {
8176 struct aarch64_option_table *opt;
8177 struct aarch64_long_option_table *lopt;
8178
8179 switch (c)
8180 {
8181 #ifdef OPTION_EB
8182 case OPTION_EB:
8183 target_big_endian = 1;
8184 break;
8185 #endif
8186
8187 #ifdef OPTION_EL
8188 case OPTION_EL:
8189 target_big_endian = 0;
8190 break;
8191 #endif
8192
8193 case 'a':
8194 /* Listing option. Just ignore these, we don't support additional
8195 ones. */
8196 return 0;
8197
8198 default:
8199 for (opt = aarch64_opts; opt->option != NULL; opt++)
8200 {
8201 if (c == opt->option[0]
8202 && ((arg == NULL && opt->option[1] == 0)
8203 || streq (arg, opt->option + 1)))
8204 {
8205 /* If the option is deprecated, tell the user. */
8206 if (opt->deprecated != NULL)
8207 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
8208 arg ? arg : "", _(opt->deprecated));
8209
8210 if (opt->var != NULL)
8211 *opt->var = opt->value;
8212
8213 return 1;
8214 }
8215 }
8216
8217 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
8218 {
8219 /* These options are expected to have an argument. */
8220 if (c == lopt->option[0]
8221 && arg != NULL
8222 && strncmp (arg, lopt->option + 1,
8223 strlen (lopt->option + 1)) == 0)
8224 {
8225 /* If the option is deprecated, tell the user. */
8226 if (lopt->deprecated != NULL)
8227 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
8228 _(lopt->deprecated));
8229
8230 /* Call the sup-option parser. */
8231 return lopt->func (arg + strlen (lopt->option) - 1);
8232 }
8233 }
8234
8235 return 0;
8236 }
8237
8238 return 1;
8239 }
8240
8241 void
8242 md_show_usage (FILE * fp)
8243 {
8244 struct aarch64_option_table *opt;
8245 struct aarch64_long_option_table *lopt;
8246
8247 fprintf (fp, _(" AArch64-specific assembler options:\n"));
8248
8249 for (opt = aarch64_opts; opt->option != NULL; opt++)
8250 if (opt->help != NULL)
8251 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
8252
8253 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
8254 if (lopt->help != NULL)
8255 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
8256
8257 #ifdef OPTION_EB
8258 fprintf (fp, _("\
8259 -EB assemble code for a big-endian cpu\n"));
8260 #endif
8261
8262 #ifdef OPTION_EL
8263 fprintf (fp, _("\
8264 -EL assemble code for a little-endian cpu\n"));
8265 #endif
8266 }
8267
8268 /* Parse a .cpu directive. */
8269
8270 static void
8271 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
8272 {
8273 const struct aarch64_cpu_option_table *opt;
8274 char saved_char;
8275 char *name;
8276 char *ext;
8277 size_t optlen;
8278
8279 name = input_line_pointer;
8280 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8281 input_line_pointer++;
8282 saved_char = *input_line_pointer;
8283 *input_line_pointer = 0;
8284
8285 ext = strchr (name, '+');
8286
8287 if (ext != NULL)
8288 optlen = ext - name;
8289 else
8290 optlen = strlen (name);
8291
8292 /* Skip the first "all" entry. */
8293 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
8294 if (strlen (opt->name) == optlen
8295 && strncmp (name, opt->name, optlen) == 0)
8296 {
8297 mcpu_cpu_opt = &opt->value;
8298 if (ext != NULL)
8299 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
8300 return;
8301
8302 cpu_variant = *mcpu_cpu_opt;
8303
8304 *input_line_pointer = saved_char;
8305 demand_empty_rest_of_line ();
8306 return;
8307 }
8308 as_bad (_("unknown cpu `%s'"), name);
8309 *input_line_pointer = saved_char;
8310 ignore_rest_of_line ();
8311 }
8312
8313
8314 /* Parse a .arch directive. */
8315
8316 static void
8317 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
8318 {
8319 const struct aarch64_arch_option_table *opt;
8320 char saved_char;
8321 char *name;
8322 char *ext;
8323 size_t optlen;
8324
8325 name = input_line_pointer;
8326 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8327 input_line_pointer++;
8328 saved_char = *input_line_pointer;
8329 *input_line_pointer = 0;
8330
8331 ext = strchr (name, '+');
8332
8333 if (ext != NULL)
8334 optlen = ext - name;
8335 else
8336 optlen = strlen (name);
8337
8338 /* Skip the first "all" entry. */
8339 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
8340 if (strlen (opt->name) == optlen
8341 && strncmp (name, opt->name, optlen) == 0)
8342 {
8343 mcpu_cpu_opt = &opt->value;
8344 if (ext != NULL)
8345 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
8346 return;
8347
8348 cpu_variant = *mcpu_cpu_opt;
8349
8350 *input_line_pointer = saved_char;
8351 demand_empty_rest_of_line ();
8352 return;
8353 }
8354
8355 as_bad (_("unknown architecture `%s'\n"), name);
8356 *input_line_pointer = saved_char;
8357 ignore_rest_of_line ();
8358 }
8359
8360 /* Parse a .arch_extension directive. */
8361
8362 static void
8363 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
8364 {
8365 char saved_char;
8366 char *ext = input_line_pointer;;
8367
8368 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8369 input_line_pointer++;
8370 saved_char = *input_line_pointer;
8371 *input_line_pointer = 0;
8372
8373 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
8374 return;
8375
8376 cpu_variant = *mcpu_cpu_opt;
8377
8378 *input_line_pointer = saved_char;
8379 demand_empty_rest_of_line ();
8380 }
8381
8382 /* Copy symbol information. */
8383
8384 void
8385 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
8386 {
8387 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
8388 }
This page took 0.199886 seconds and 5 git commands to generate.