df37541615f734449d6094a77721ebaf83387701
[deliverable/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 #ifdef OBJ_ELF
59 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
60 static symbolS *GOT_symbol;
61
62 /* Which ABI to use. */
63 enum aarch64_abi_type
64 {
65 AARCH64_ABI_LP64 = 0,
66 AARCH64_ABI_ILP32 = 1
67 };
68
69 /* AArch64 ABI for the output file. */
70 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_LP64;
71
72 /* When non-zero, program to a 32-bit model, in which the C data types
73 int, long and all pointer types are 32-bit objects (ILP32); or to a
74 64-bit model, in which the C int type is 32-bits but the C long type
75 and all pointer types are 64-bit objects (LP64). */
76 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
77 #endif
78
79 enum neon_el_type
80 {
81 NT_invtype = -1,
82 NT_b,
83 NT_h,
84 NT_s,
85 NT_d,
86 NT_q
87 };
88
89 /* Bits for DEFINED field in neon_type_el. */
90 #define NTA_HASTYPE 1
91 #define NTA_HASINDEX 2
92
93 struct neon_type_el
94 {
95 enum neon_el_type type;
96 unsigned char defined;
97 unsigned width;
98 int64_t index;
99 };
100
101 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
102
103 struct reloc
104 {
105 bfd_reloc_code_real_type type;
106 expressionS exp;
107 int pc_rel;
108 enum aarch64_opnd opnd;
109 uint32_t flags;
110 unsigned need_libopcodes_p : 1;
111 };
112
113 struct aarch64_instruction
114 {
115 /* libopcodes structure for instruction intermediate representation. */
116 aarch64_inst base;
117 /* Record assembly errors found during the parsing. */
118 struct
119 {
120 enum aarch64_operand_error_kind kind;
121 const char *error;
122 } parsing_error;
123 /* The condition that appears in the assembly line. */
124 int cond;
125 /* Relocation information (including the GAS internal fixup). */
126 struct reloc reloc;
127 /* Need to generate an immediate in the literal pool. */
128 unsigned gen_lit_pool : 1;
129 };
130
131 typedef struct aarch64_instruction aarch64_instruction;
132
133 static aarch64_instruction inst;
134
135 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
136 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
137
138 /* Diagnostics inline function utilites.
139
140 These are lightweight utlities which should only be called by parse_operands
141 and other parsers. GAS processes each assembly line by parsing it against
142 instruction template(s), in the case of multiple templates (for the same
143 mnemonic name), those templates are tried one by one until one succeeds or
144 all fail. An assembly line may fail a few templates before being
145 successfully parsed; an error saved here in most cases is not a user error
146 but an error indicating the current template is not the right template.
147 Therefore it is very important that errors can be saved at a low cost during
148 the parsing; we don't want to slow down the whole parsing by recording
149 non-user errors in detail.
150
151 Remember that the objective is to help GAS pick up the most approapriate
152 error message in the case of multiple templates, e.g. FMOV which has 8
153 templates. */
154
155 static inline void
156 clear_error (void)
157 {
158 inst.parsing_error.kind = AARCH64_OPDE_NIL;
159 inst.parsing_error.error = NULL;
160 }
161
162 static inline bfd_boolean
163 error_p (void)
164 {
165 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
166 }
167
168 static inline const char *
169 get_error_message (void)
170 {
171 return inst.parsing_error.error;
172 }
173
174 static inline enum aarch64_operand_error_kind
175 get_error_kind (void)
176 {
177 return inst.parsing_error.kind;
178 }
179
180 static inline void
181 set_error (enum aarch64_operand_error_kind kind, const char *error)
182 {
183 inst.parsing_error.kind = kind;
184 inst.parsing_error.error = error;
185 }
186
187 static inline void
188 set_recoverable_error (const char *error)
189 {
190 set_error (AARCH64_OPDE_RECOVERABLE, error);
191 }
192
193 /* Use the DESC field of the corresponding aarch64_operand entry to compose
194 the error message. */
195 static inline void
196 set_default_error (void)
197 {
198 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
199 }
200
201 static inline void
202 set_syntax_error (const char *error)
203 {
204 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
205 }
206
207 static inline void
208 set_first_syntax_error (const char *error)
209 {
210 if (! error_p ())
211 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
212 }
213
214 static inline void
215 set_fatal_syntax_error (const char *error)
216 {
217 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
218 }
219 \f
220 /* Number of littlenums required to hold an extended precision number. */
221 #define MAX_LITTLENUMS 6
222
223 /* Return value for certain parsers when the parsing fails; those parsers
224 return the information of the parsed result, e.g. register number, on
225 success. */
226 #define PARSE_FAIL -1
227
228 /* This is an invalid condition code that means no conditional field is
229 present. */
230 #define COND_ALWAYS 0x10
231
232 typedef struct
233 {
234 const char *template;
235 unsigned long value;
236 } asm_barrier_opt;
237
238 typedef struct
239 {
240 const char *template;
241 uint32_t value;
242 } asm_nzcv;
243
244 struct reloc_entry
245 {
246 char *name;
247 bfd_reloc_code_real_type reloc;
248 };
249
250 /* Structure for a hash table entry for a register. */
251 typedef struct
252 {
253 const char *name;
254 unsigned char number;
255 unsigned char type;
256 unsigned char builtin;
257 } reg_entry;
258
259 /* Macros to define the register types and masks for the purpose
260 of parsing. */
261
262 #undef AARCH64_REG_TYPES
263 #define AARCH64_REG_TYPES \
264 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
265 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
266 BASIC_REG_TYPE(SP_32) /* wsp */ \
267 BASIC_REG_TYPE(SP_64) /* sp */ \
268 BASIC_REG_TYPE(Z_32) /* wzr */ \
269 BASIC_REG_TYPE(Z_64) /* xzr */ \
270 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
271 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
272 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
273 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
274 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
275 BASIC_REG_TYPE(CN) /* c[0-7] */ \
276 BASIC_REG_TYPE(VN) /* v[0-31] */ \
277 /* Typecheck: any 64-bit int reg (inc SP exc XZR) */ \
278 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
279 /* Typecheck: any int (inc {W}SP inc [WX]ZR) */ \
280 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
281 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
282 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
283 /* Typecheck: any [BHSDQ]P FP. */ \
284 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
285 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
286 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR) */ \
287 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
288 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
289 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
290 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
291 /* Any integer register; used for error messages only. */ \
292 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
293 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
294 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
295 /* Pseudo type to mark the end of the enumerator sequence. */ \
296 BASIC_REG_TYPE(MAX)
297
298 #undef BASIC_REG_TYPE
299 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
300 #undef MULTI_REG_TYPE
301 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
302
303 /* Register type enumerators. */
304 typedef enum
305 {
306 /* A list of REG_TYPE_*. */
307 AARCH64_REG_TYPES
308 } aarch64_reg_type;
309
310 #undef BASIC_REG_TYPE
311 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
312 #undef REG_TYPE
313 #define REG_TYPE(T) (1 << REG_TYPE_##T)
314 #undef MULTI_REG_TYPE
315 #define MULTI_REG_TYPE(T,V) V,
316
317 /* Values indexed by aarch64_reg_type to assist the type checking. */
318 static const unsigned reg_type_masks[] =
319 {
320 AARCH64_REG_TYPES
321 };
322
323 #undef BASIC_REG_TYPE
324 #undef REG_TYPE
325 #undef MULTI_REG_TYPE
326 #undef AARCH64_REG_TYPES
327
328 /* Diagnostics used when we don't get a register of the expected type.
329 Note: this has to synchronized with aarch64_reg_type definitions
330 above. */
331 static const char *
332 get_reg_expected_msg (aarch64_reg_type reg_type)
333 {
334 const char *msg;
335
336 switch (reg_type)
337 {
338 case REG_TYPE_R_32:
339 msg = N_("integer 32-bit register expected");
340 break;
341 case REG_TYPE_R_64:
342 msg = N_("integer 64-bit register expected");
343 break;
344 case REG_TYPE_R_N:
345 msg = N_("integer register expected");
346 break;
347 case REG_TYPE_R_Z_SP:
348 msg = N_("integer, zero or SP register expected");
349 break;
350 case REG_TYPE_FP_B:
351 msg = N_("8-bit SIMD scalar register expected");
352 break;
353 case REG_TYPE_FP_H:
354 msg = N_("16-bit SIMD scalar or floating-point half precision "
355 "register expected");
356 break;
357 case REG_TYPE_FP_S:
358 msg = N_("32-bit SIMD scalar or floating-point single precision "
359 "register expected");
360 break;
361 case REG_TYPE_FP_D:
362 msg = N_("64-bit SIMD scalar or floating-point double precision "
363 "register expected");
364 break;
365 case REG_TYPE_FP_Q:
366 msg = N_("128-bit SIMD scalar or floating-point quad precision "
367 "register expected");
368 break;
369 case REG_TYPE_CN:
370 msg = N_("C0 - C15 expected");
371 break;
372 case REG_TYPE_R_Z_BHSDQ_V:
373 msg = N_("register expected");
374 break;
375 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
376 msg = N_("SIMD scalar or floating-point register expected");
377 break;
378 case REG_TYPE_VN: /* any V reg */
379 msg = N_("vector register expected");
380 break;
381 default:
382 as_fatal (_("invalid register type %d"), reg_type);
383 }
384 return msg;
385 }
386
387 /* Some well known registers that we refer to directly elsewhere. */
388 #define REG_SP 31
389
390 /* Instructions take 4 bytes in the object file. */
391 #define INSN_SIZE 4
392
393 /* Define some common error messages. */
394 #define BAD_SP _("SP not allowed here")
395
396 static struct hash_control *aarch64_ops_hsh;
397 static struct hash_control *aarch64_cond_hsh;
398 static struct hash_control *aarch64_shift_hsh;
399 static struct hash_control *aarch64_sys_regs_hsh;
400 static struct hash_control *aarch64_pstatefield_hsh;
401 static struct hash_control *aarch64_sys_regs_ic_hsh;
402 static struct hash_control *aarch64_sys_regs_dc_hsh;
403 static struct hash_control *aarch64_sys_regs_at_hsh;
404 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
405 static struct hash_control *aarch64_reg_hsh;
406 static struct hash_control *aarch64_barrier_opt_hsh;
407 static struct hash_control *aarch64_nzcv_hsh;
408 static struct hash_control *aarch64_pldop_hsh;
409
410 /* Stuff needed to resolve the label ambiguity
411 As:
412 ...
413 label: <insn>
414 may differ from:
415 ...
416 label:
417 <insn> */
418
419 static symbolS *last_label_seen;
420
421 /* Literal pool structure. Held on a per-section
422 and per-sub-section basis. */
423
424 #define MAX_LITERAL_POOL_SIZE 1024
425 typedef struct literal_expression
426 {
427 expressionS exp;
428 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
429 LITTLENUM_TYPE * bignum;
430 } literal_expression;
431
432 typedef struct literal_pool
433 {
434 literal_expression literals[MAX_LITERAL_POOL_SIZE];
435 unsigned int next_free_entry;
436 unsigned int id;
437 symbolS *symbol;
438 segT section;
439 subsegT sub_section;
440 int size;
441 struct literal_pool *next;
442 } literal_pool;
443
444 /* Pointer to a linked list of literal pools. */
445 static literal_pool *list_of_pools = NULL;
446 \f
447 /* Pure syntax. */
448
449 /* This array holds the chars that always start a comment. If the
450 pre-processor is disabled, these aren't very useful. */
451 const char comment_chars[] = "";
452
453 /* This array holds the chars that only start a comment at the beginning of
454 a line. If the line seems to have the form '# 123 filename'
455 .line and .file directives will appear in the pre-processed output. */
456 /* Note that input_file.c hand checks for '#' at the beginning of the
457 first line of the input file. This is because the compiler outputs
458 #NO_APP at the beginning of its output. */
459 /* Also note that comments like this one will always work. */
460 const char line_comment_chars[] = "#";
461
462 const char line_separator_chars[] = ";";
463
464 /* Chars that can be used to separate mant
465 from exp in floating point numbers. */
466 const char EXP_CHARS[] = "eE";
467
468 /* Chars that mean this number is a floating point constant. */
469 /* As in 0f12.456 */
470 /* or 0d1.2345e12 */
471
472 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
473
474 /* Prefix character that indicates the start of an immediate value. */
475 #define is_immediate_prefix(C) ((C) == '#')
476
477 /* Separator character handling. */
478
479 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
480
481 static inline bfd_boolean
482 skip_past_char (char **str, char c)
483 {
484 if (**str == c)
485 {
486 (*str)++;
487 return TRUE;
488 }
489 else
490 return FALSE;
491 }
492
493 #define skip_past_comma(str) skip_past_char (str, ',')
494
495 /* Arithmetic expressions (possibly involving symbols). */
496
497 static bfd_boolean in_my_get_expression_p = FALSE;
498
499 /* Third argument to my_get_expression. */
500 #define GE_NO_PREFIX 0
501 #define GE_OPT_PREFIX 1
502
503 /* Return TRUE if the string pointed by *STR is successfully parsed
504 as an valid expression; *EP will be filled with the information of
505 such an expression. Otherwise return FALSE. */
506
507 static bfd_boolean
508 my_get_expression (expressionS * ep, char **str, int prefix_mode,
509 int reject_absent)
510 {
511 char *save_in;
512 segT seg;
513 int prefix_present_p = 0;
514
515 switch (prefix_mode)
516 {
517 case GE_NO_PREFIX:
518 break;
519 case GE_OPT_PREFIX:
520 if (is_immediate_prefix (**str))
521 {
522 (*str)++;
523 prefix_present_p = 1;
524 }
525 break;
526 default:
527 abort ();
528 }
529
530 memset (ep, 0, sizeof (expressionS));
531
532 save_in = input_line_pointer;
533 input_line_pointer = *str;
534 in_my_get_expression_p = TRUE;
535 seg = expression (ep);
536 in_my_get_expression_p = FALSE;
537
538 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
539 {
540 /* We found a bad expression in md_operand(). */
541 *str = input_line_pointer;
542 input_line_pointer = save_in;
543 if (prefix_present_p && ! error_p ())
544 set_fatal_syntax_error (_("bad expression"));
545 else
546 set_first_syntax_error (_("bad expression"));
547 return FALSE;
548 }
549
550 #ifdef OBJ_AOUT
551 if (seg != absolute_section
552 && seg != text_section
553 && seg != data_section
554 && seg != bss_section && seg != undefined_section)
555 {
556 set_syntax_error (_("bad segment"));
557 *str = input_line_pointer;
558 input_line_pointer = save_in;
559 return FALSE;
560 }
561 #else
562 (void) seg;
563 #endif
564
565 *str = input_line_pointer;
566 input_line_pointer = save_in;
567 return TRUE;
568 }
569
570 /* Turn a string in input_line_pointer into a floating point constant
571 of type TYPE, and store the appropriate bytes in *LITP. The number
572 of LITTLENUMS emitted is stored in *SIZEP. An error message is
573 returned, or NULL on OK. */
574
575 char *
576 md_atof (int type, char *litP, int *sizeP)
577 {
578 return ieee_md_atof (type, litP, sizeP, target_big_endian);
579 }
580
581 /* We handle all bad expressions here, so that we can report the faulty
582 instruction in the error message. */
583 void
584 md_operand (expressionS * exp)
585 {
586 if (in_my_get_expression_p)
587 exp->X_op = O_illegal;
588 }
589
590 /* Immediate values. */
591
592 /* Errors may be set multiple times during parsing or bit encoding
593 (particularly in the Neon bits), but usually the earliest error which is set
594 will be the most meaningful. Avoid overwriting it with later (cascading)
595 errors by calling this function. */
596
597 static void
598 first_error (const char *error)
599 {
600 if (! error_p ())
601 set_syntax_error (error);
602 }
603
604 /* Similiar to first_error, but this function accepts formatted error
605 message. */
606 static void
607 first_error_fmt (const char *format, ...)
608 {
609 va_list args;
610 enum
611 { size = 100 };
612 /* N.B. this single buffer will not cause error messages for different
613 instructions to pollute each other; this is because at the end of
614 processing of each assembly line, error message if any will be
615 collected by as_bad. */
616 static char buffer[size];
617
618 if (! error_p ())
619 {
620 int ret ATTRIBUTE_UNUSED;
621 va_start (args, format);
622 ret = vsnprintf (buffer, size, format, args);
623 know (ret <= size - 1 && ret >= 0);
624 va_end (args);
625 set_syntax_error (buffer);
626 }
627 }
628
629 /* Register parsing. */
630
631 /* Generic register parser which is called by other specialized
632 register parsers.
633 CCP points to what should be the beginning of a register name.
634 If it is indeed a valid register name, advance CCP over it and
635 return the reg_entry structure; otherwise return NULL.
636 It does not issue diagnostics. */
637
638 static reg_entry *
639 parse_reg (char **ccp)
640 {
641 char *start = *ccp;
642 char *p;
643 reg_entry *reg;
644
645 #ifdef REGISTER_PREFIX
646 if (*start != REGISTER_PREFIX)
647 return NULL;
648 start++;
649 #endif
650
651 p = start;
652 if (!ISALPHA (*p) || !is_name_beginner (*p))
653 return NULL;
654
655 do
656 p++;
657 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
658
659 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
660
661 if (!reg)
662 return NULL;
663
664 *ccp = p;
665 return reg;
666 }
667
668 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
669 return FALSE. */
670 static bfd_boolean
671 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
672 {
673 if (reg->type == type)
674 return TRUE;
675
676 switch (type)
677 {
678 case REG_TYPE_R64_SP: /* 64-bit integer reg (inc SP exc XZR). */
679 case REG_TYPE_R_Z_SP: /* Integer reg (inc {X}SP inc [WX]ZR). */
680 case REG_TYPE_R_Z_BHSDQ_V: /* Any register apart from Cn. */
681 case REG_TYPE_BHSDQ: /* Any [BHSDQ]P FP or SIMD scalar register. */
682 case REG_TYPE_VN: /* Vector register. */
683 gas_assert (reg->type < REG_TYPE_MAX && type < REG_TYPE_MAX);
684 return ((reg_type_masks[reg->type] & reg_type_masks[type])
685 == reg_type_masks[reg->type]);
686 default:
687 as_fatal ("unhandled type %d", type);
688 abort ();
689 }
690 }
691
692 /* Parse a register and return PARSE_FAIL if the register is not of type R_Z_SP.
693 Return the register number otherwise. *ISREG32 is set to one if the
694 register is 32-bit wide; *ISREGZERO is set to one if the register is
695 of type Z_32 or Z_64.
696 Note that this function does not issue any diagnostics. */
697
698 static int
699 aarch64_reg_parse_32_64 (char **ccp, int reject_sp, int reject_rz,
700 int *isreg32, int *isregzero)
701 {
702 char *str = *ccp;
703 const reg_entry *reg = parse_reg (&str);
704
705 if (reg == NULL)
706 return PARSE_FAIL;
707
708 if (! aarch64_check_reg_type (reg, REG_TYPE_R_Z_SP))
709 return PARSE_FAIL;
710
711 switch (reg->type)
712 {
713 case REG_TYPE_SP_32:
714 case REG_TYPE_SP_64:
715 if (reject_sp)
716 return PARSE_FAIL;
717 *isreg32 = reg->type == REG_TYPE_SP_32;
718 *isregzero = 0;
719 break;
720 case REG_TYPE_R_32:
721 case REG_TYPE_R_64:
722 *isreg32 = reg->type == REG_TYPE_R_32;
723 *isregzero = 0;
724 break;
725 case REG_TYPE_Z_32:
726 case REG_TYPE_Z_64:
727 if (reject_rz)
728 return PARSE_FAIL;
729 *isreg32 = reg->type == REG_TYPE_Z_32;
730 *isregzero = 1;
731 break;
732 default:
733 return PARSE_FAIL;
734 }
735
736 *ccp = str;
737
738 return reg->number;
739 }
740
741 /* Parse the qualifier of a SIMD vector register or a SIMD vector element.
742 Fill in *PARSED_TYPE and return TRUE if the parsing succeeds;
743 otherwise return FALSE.
744
745 Accept only one occurrence of:
746 8b 16b 4h 8h 2s 4s 1d 2d
747 b h s d q */
748 static bfd_boolean
749 parse_neon_type_for_operand (struct neon_type_el *parsed_type, char **str)
750 {
751 char *ptr = *str;
752 unsigned width;
753 unsigned element_size;
754 enum neon_el_type type;
755
756 /* skip '.' */
757 ptr++;
758
759 if (!ISDIGIT (*ptr))
760 {
761 width = 0;
762 goto elt_size;
763 }
764 width = strtoul (ptr, &ptr, 10);
765 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
766 {
767 first_error_fmt (_("bad size %d in vector width specifier"), width);
768 return FALSE;
769 }
770
771 elt_size:
772 switch (TOLOWER (*ptr))
773 {
774 case 'b':
775 type = NT_b;
776 element_size = 8;
777 break;
778 case 'h':
779 type = NT_h;
780 element_size = 16;
781 break;
782 case 's':
783 type = NT_s;
784 element_size = 32;
785 break;
786 case 'd':
787 type = NT_d;
788 element_size = 64;
789 break;
790 case 'q':
791 if (width == 1)
792 {
793 type = NT_q;
794 element_size = 128;
795 break;
796 }
797 /* fall through. */
798 default:
799 if (*ptr != '\0')
800 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
801 else
802 first_error (_("missing element size"));
803 return FALSE;
804 }
805 if (width != 0 && width * element_size != 64 && width * element_size != 128)
806 {
807 first_error_fmt (_
808 ("invalid element size %d and vector size combination %c"),
809 width, *ptr);
810 return FALSE;
811 }
812 ptr++;
813
814 parsed_type->type = type;
815 parsed_type->width = width;
816
817 *str = ptr;
818
819 return TRUE;
820 }
821
822 /* Parse a single type, e.g. ".8b", leading period included.
823 Only applicable to Vn registers.
824
825 Return TRUE on success; otherwise return FALSE. */
826 static bfd_boolean
827 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
828 {
829 char *str = *ccp;
830
831 if (*str == '.')
832 {
833 if (! parse_neon_type_for_operand (vectype, &str))
834 {
835 first_error (_("vector type expected"));
836 return FALSE;
837 }
838 }
839 else
840 return FALSE;
841
842 *ccp = str;
843
844 return TRUE;
845 }
846
847 /* Parse a register of the type TYPE.
848
849 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
850 name or the parsed register is not of TYPE.
851
852 Otherwise return the register number, and optionally fill in the actual
853 type of the register in *RTYPE when multiple alternatives were given, and
854 return the register shape and element index information in *TYPEINFO.
855
856 IN_REG_LIST should be set with TRUE if the caller is parsing a register
857 list. */
858
859 static int
860 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
861 struct neon_type_el *typeinfo, bfd_boolean in_reg_list)
862 {
863 char *str = *ccp;
864 const reg_entry *reg = parse_reg (&str);
865 struct neon_type_el atype;
866 struct neon_type_el parsetype;
867 bfd_boolean is_typed_vecreg = FALSE;
868
869 atype.defined = 0;
870 atype.type = NT_invtype;
871 atype.width = -1;
872 atype.index = 0;
873
874 if (reg == NULL)
875 {
876 if (typeinfo)
877 *typeinfo = atype;
878 set_default_error ();
879 return PARSE_FAIL;
880 }
881
882 if (! aarch64_check_reg_type (reg, type))
883 {
884 DEBUG_TRACE ("reg type check failed");
885 set_default_error ();
886 return PARSE_FAIL;
887 }
888 type = reg->type;
889
890 if (type == REG_TYPE_VN
891 && parse_neon_operand_type (&parsetype, &str))
892 {
893 /* Register if of the form Vn.[bhsdq]. */
894 is_typed_vecreg = TRUE;
895
896 if (parsetype.width == 0)
897 /* Expect index. In the new scheme we cannot have
898 Vn.[bhsdq] represent a scalar. Therefore any
899 Vn.[bhsdq] should have an index following it.
900 Except in reglists ofcourse. */
901 atype.defined |= NTA_HASINDEX;
902 else
903 atype.defined |= NTA_HASTYPE;
904
905 atype.type = parsetype.type;
906 atype.width = parsetype.width;
907 }
908
909 if (skip_past_char (&str, '['))
910 {
911 expressionS exp;
912
913 /* Reject Sn[index] syntax. */
914 if (!is_typed_vecreg)
915 {
916 first_error (_("this type of register can't be indexed"));
917 return PARSE_FAIL;
918 }
919
920 if (in_reg_list == TRUE)
921 {
922 first_error (_("index not allowed inside register list"));
923 return PARSE_FAIL;
924 }
925
926 atype.defined |= NTA_HASINDEX;
927
928 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
929
930 if (exp.X_op != O_constant)
931 {
932 first_error (_("constant expression required"));
933 return PARSE_FAIL;
934 }
935
936 if (! skip_past_char (&str, ']'))
937 return PARSE_FAIL;
938
939 atype.index = exp.X_add_number;
940 }
941 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
942 {
943 /* Indexed vector register expected. */
944 first_error (_("indexed vector register expected"));
945 return PARSE_FAIL;
946 }
947
948 /* A vector reg Vn should be typed or indexed. */
949 if (type == REG_TYPE_VN && atype.defined == 0)
950 {
951 first_error (_("invalid use of vector register"));
952 }
953
954 if (typeinfo)
955 *typeinfo = atype;
956
957 if (rtype)
958 *rtype = type;
959
960 *ccp = str;
961
962 return reg->number;
963 }
964
965 /* Parse register.
966
967 Return the register number on success; return PARSE_FAIL otherwise.
968
969 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
970 the register (e.g. NEON double or quad reg when either has been requested).
971
972 If this is a NEON vector register with additional type information, fill
973 in the struct pointed to by VECTYPE (if non-NULL).
974
975 This parser does not handle register list. */
976
977 static int
978 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
979 aarch64_reg_type *rtype, struct neon_type_el *vectype)
980 {
981 struct neon_type_el atype;
982 char *str = *ccp;
983 int reg = parse_typed_reg (&str, type, rtype, &atype,
984 /*in_reg_list= */ FALSE);
985
986 if (reg == PARSE_FAIL)
987 return PARSE_FAIL;
988
989 if (vectype)
990 *vectype = atype;
991
992 *ccp = str;
993
994 return reg;
995 }
996
997 static inline bfd_boolean
998 eq_neon_type_el (struct neon_type_el e1, struct neon_type_el e2)
999 {
1000 return
1001 e1.type == e2.type
1002 && e1.defined == e2.defined
1003 && e1.width == e2.width && e1.index == e2.index;
1004 }
1005
1006 /* This function parses the NEON register list. On success, it returns
1007 the parsed register list information in the following encoded format:
1008
1009 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1010 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1011
1012 The information of the register shape and/or index is returned in
1013 *VECTYPE.
1014
1015 It returns PARSE_FAIL if the register list is invalid.
1016
1017 The list contains one to four registers.
1018 Each register can be one of:
1019 <Vt>.<T>[<index>]
1020 <Vt>.<T>
1021 All <T> should be identical.
1022 All <index> should be identical.
1023 There are restrictions on <Vt> numbers which are checked later
1024 (by reg_list_valid_p). */
1025
1026 static int
1027 parse_neon_reg_list (char **ccp, struct neon_type_el *vectype)
1028 {
1029 char *str = *ccp;
1030 int nb_regs;
1031 struct neon_type_el typeinfo, typeinfo_first;
1032 int val, val_range;
1033 int in_range;
1034 int ret_val;
1035 int i;
1036 bfd_boolean error = FALSE;
1037 bfd_boolean expect_index = FALSE;
1038
1039 if (*str != '{')
1040 {
1041 set_syntax_error (_("expecting {"));
1042 return PARSE_FAIL;
1043 }
1044 str++;
1045
1046 nb_regs = 0;
1047 typeinfo_first.defined = 0;
1048 typeinfo_first.type = NT_invtype;
1049 typeinfo_first.width = -1;
1050 typeinfo_first.index = 0;
1051 ret_val = 0;
1052 val = -1;
1053 val_range = -1;
1054 in_range = 0;
1055 do
1056 {
1057 if (in_range)
1058 {
1059 str++; /* skip over '-' */
1060 val_range = val;
1061 }
1062 val = parse_typed_reg (&str, REG_TYPE_VN, NULL, &typeinfo,
1063 /*in_reg_list= */ TRUE);
1064 if (val == PARSE_FAIL)
1065 {
1066 set_first_syntax_error (_("invalid vector register in list"));
1067 error = TRUE;
1068 continue;
1069 }
1070 /* reject [bhsd]n */
1071 if (typeinfo.defined == 0)
1072 {
1073 set_first_syntax_error (_("invalid scalar register in list"));
1074 error = TRUE;
1075 continue;
1076 }
1077
1078 if (typeinfo.defined & NTA_HASINDEX)
1079 expect_index = TRUE;
1080
1081 if (in_range)
1082 {
1083 if (val < val_range)
1084 {
1085 set_first_syntax_error
1086 (_("invalid range in vector register list"));
1087 error = TRUE;
1088 }
1089 val_range++;
1090 }
1091 else
1092 {
1093 val_range = val;
1094 if (nb_regs == 0)
1095 typeinfo_first = typeinfo;
1096 else if (! eq_neon_type_el (typeinfo_first, typeinfo))
1097 {
1098 set_first_syntax_error
1099 (_("type mismatch in vector register list"));
1100 error = TRUE;
1101 }
1102 }
1103 if (! error)
1104 for (i = val_range; i <= val; i++)
1105 {
1106 ret_val |= i << (5 * nb_regs);
1107 nb_regs++;
1108 }
1109 in_range = 0;
1110 }
1111 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1112
1113 skip_whitespace (str);
1114 if (*str != '}')
1115 {
1116 set_first_syntax_error (_("end of vector register list not found"));
1117 error = TRUE;
1118 }
1119 str++;
1120
1121 skip_whitespace (str);
1122
1123 if (expect_index)
1124 {
1125 if (skip_past_char (&str, '['))
1126 {
1127 expressionS exp;
1128
1129 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1130 if (exp.X_op != O_constant)
1131 {
1132 set_first_syntax_error (_("constant expression required."));
1133 error = TRUE;
1134 }
1135 if (! skip_past_char (&str, ']'))
1136 error = TRUE;
1137 else
1138 typeinfo_first.index = exp.X_add_number;
1139 }
1140 else
1141 {
1142 set_first_syntax_error (_("expected index"));
1143 error = TRUE;
1144 }
1145 }
1146
1147 if (nb_regs > 4)
1148 {
1149 set_first_syntax_error (_("too many registers in vector register list"));
1150 error = TRUE;
1151 }
1152 else if (nb_regs == 0)
1153 {
1154 set_first_syntax_error (_("empty vector register list"));
1155 error = TRUE;
1156 }
1157
1158 *ccp = str;
1159 if (! error)
1160 *vectype = typeinfo_first;
1161
1162 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1163 }
1164
1165 /* Directives: register aliases. */
1166
1167 static reg_entry *
1168 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1169 {
1170 reg_entry *new;
1171 const char *name;
1172
1173 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1174 {
1175 if (new->builtin)
1176 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1177 str);
1178
1179 /* Only warn about a redefinition if it's not defined as the
1180 same register. */
1181 else if (new->number != number || new->type != type)
1182 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1183
1184 return NULL;
1185 }
1186
1187 name = xstrdup (str);
1188 new = xmalloc (sizeof (reg_entry));
1189
1190 new->name = name;
1191 new->number = number;
1192 new->type = type;
1193 new->builtin = FALSE;
1194
1195 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1196 abort ();
1197
1198 return new;
1199 }
1200
1201 /* Look for the .req directive. This is of the form:
1202
1203 new_register_name .req existing_register_name
1204
1205 If we find one, or if it looks sufficiently like one that we want to
1206 handle any error here, return TRUE. Otherwise return FALSE. */
1207
1208 static bfd_boolean
1209 create_register_alias (char *newname, char *p)
1210 {
1211 const reg_entry *old;
1212 char *oldname, *nbuf;
1213 size_t nlen;
1214
1215 /* The input scrubber ensures that whitespace after the mnemonic is
1216 collapsed to single spaces. */
1217 oldname = p;
1218 if (strncmp (oldname, " .req ", 6) != 0)
1219 return FALSE;
1220
1221 oldname += 6;
1222 if (*oldname == '\0')
1223 return FALSE;
1224
1225 old = hash_find (aarch64_reg_hsh, oldname);
1226 if (!old)
1227 {
1228 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1229 return TRUE;
1230 }
1231
1232 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1233 the desired alias name, and p points to its end. If not, then
1234 the desired alias name is in the global original_case_string. */
1235 #ifdef TC_CASE_SENSITIVE
1236 nlen = p - newname;
1237 #else
1238 newname = original_case_string;
1239 nlen = strlen (newname);
1240 #endif
1241
1242 nbuf = alloca (nlen + 1);
1243 memcpy (nbuf, newname, nlen);
1244 nbuf[nlen] = '\0';
1245
1246 /* Create aliases under the new name as stated; an all-lowercase
1247 version of the new name; and an all-uppercase version of the new
1248 name. */
1249 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1250 {
1251 for (p = nbuf; *p; p++)
1252 *p = TOUPPER (*p);
1253
1254 if (strncmp (nbuf, newname, nlen))
1255 {
1256 /* If this attempt to create an additional alias fails, do not bother
1257 trying to create the all-lower case alias. We will fail and issue
1258 a second, duplicate error message. This situation arises when the
1259 programmer does something like:
1260 foo .req r0
1261 Foo .req r1
1262 The second .req creates the "Foo" alias but then fails to create
1263 the artificial FOO alias because it has already been created by the
1264 first .req. */
1265 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1266 return TRUE;
1267 }
1268
1269 for (p = nbuf; *p; p++)
1270 *p = TOLOWER (*p);
1271
1272 if (strncmp (nbuf, newname, nlen))
1273 insert_reg_alias (nbuf, old->number, old->type);
1274 }
1275
1276 return TRUE;
1277 }
1278
1279 /* Should never be called, as .req goes between the alias and the
1280 register name, not at the beginning of the line. */
1281 static void
1282 s_req (int a ATTRIBUTE_UNUSED)
1283 {
1284 as_bad (_("invalid syntax for .req directive"));
1285 }
1286
1287 /* The .unreq directive deletes an alias which was previously defined
1288 by .req. For example:
1289
1290 my_alias .req r11
1291 .unreq my_alias */
1292
1293 static void
1294 s_unreq (int a ATTRIBUTE_UNUSED)
1295 {
1296 char *name;
1297 char saved_char;
1298
1299 name = input_line_pointer;
1300
1301 while (*input_line_pointer != 0
1302 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1303 ++input_line_pointer;
1304
1305 saved_char = *input_line_pointer;
1306 *input_line_pointer = 0;
1307
1308 if (!*name)
1309 as_bad (_("invalid syntax for .unreq directive"));
1310 else
1311 {
1312 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1313
1314 if (!reg)
1315 as_bad (_("unknown register alias '%s'"), name);
1316 else if (reg->builtin)
1317 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1318 name);
1319 else
1320 {
1321 char *p;
1322 char *nbuf;
1323
1324 hash_delete (aarch64_reg_hsh, name, FALSE);
1325 free ((char *) reg->name);
1326 free (reg);
1327
1328 /* Also locate the all upper case and all lower case versions.
1329 Do not complain if we cannot find one or the other as it
1330 was probably deleted above. */
1331
1332 nbuf = strdup (name);
1333 for (p = nbuf; *p; p++)
1334 *p = TOUPPER (*p);
1335 reg = hash_find (aarch64_reg_hsh, nbuf);
1336 if (reg)
1337 {
1338 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1339 free ((char *) reg->name);
1340 free (reg);
1341 }
1342
1343 for (p = nbuf; *p; p++)
1344 *p = TOLOWER (*p);
1345 reg = hash_find (aarch64_reg_hsh, nbuf);
1346 if (reg)
1347 {
1348 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1349 free ((char *) reg->name);
1350 free (reg);
1351 }
1352
1353 free (nbuf);
1354 }
1355 }
1356
1357 *input_line_pointer = saved_char;
1358 demand_empty_rest_of_line ();
1359 }
1360
1361 /* Directives: Instruction set selection. */
1362
1363 #ifdef OBJ_ELF
1364 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1365 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1366 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1367 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1368
1369 /* Create a new mapping symbol for the transition to STATE. */
1370
1371 static void
1372 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1373 {
1374 symbolS *symbolP;
1375 const char *symname;
1376 int type;
1377
1378 switch (state)
1379 {
1380 case MAP_DATA:
1381 symname = "$d";
1382 type = BSF_NO_FLAGS;
1383 break;
1384 case MAP_INSN:
1385 symname = "$x";
1386 type = BSF_NO_FLAGS;
1387 break;
1388 default:
1389 abort ();
1390 }
1391
1392 symbolP = symbol_new (symname, now_seg, value, frag);
1393 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1394
1395 /* Save the mapping symbols for future reference. Also check that
1396 we do not place two mapping symbols at the same offset within a
1397 frag. We'll handle overlap between frags in
1398 check_mapping_symbols.
1399
1400 If .fill or other data filling directive generates zero sized data,
1401 the mapping symbol for the following code will have the same value
1402 as the one generated for the data filling directive. In this case,
1403 we replace the old symbol with the new one at the same address. */
1404 if (value == 0)
1405 {
1406 if (frag->tc_frag_data.first_map != NULL)
1407 {
1408 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1409 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1410 &symbol_lastP);
1411 }
1412 frag->tc_frag_data.first_map = symbolP;
1413 }
1414 if (frag->tc_frag_data.last_map != NULL)
1415 {
1416 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1417 S_GET_VALUE (symbolP));
1418 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1419 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1420 &symbol_lastP);
1421 }
1422 frag->tc_frag_data.last_map = symbolP;
1423 }
1424
1425 /* We must sometimes convert a region marked as code to data during
1426 code alignment, if an odd number of bytes have to be padded. The
1427 code mapping symbol is pushed to an aligned address. */
1428
1429 static void
1430 insert_data_mapping_symbol (enum mstate state,
1431 valueT value, fragS * frag, offsetT bytes)
1432 {
1433 /* If there was already a mapping symbol, remove it. */
1434 if (frag->tc_frag_data.last_map != NULL
1435 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1436 frag->fr_address + value)
1437 {
1438 symbolS *symp = frag->tc_frag_data.last_map;
1439
1440 if (value == 0)
1441 {
1442 know (frag->tc_frag_data.first_map == symp);
1443 frag->tc_frag_data.first_map = NULL;
1444 }
1445 frag->tc_frag_data.last_map = NULL;
1446 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1447 }
1448
1449 make_mapping_symbol (MAP_DATA, value, frag);
1450 make_mapping_symbol (state, value + bytes, frag);
1451 }
1452
1453 static void mapping_state_2 (enum mstate state, int max_chars);
1454
1455 /* Set the mapping state to STATE. Only call this when about to
1456 emit some STATE bytes to the file. */
1457
1458 void
1459 mapping_state (enum mstate state)
1460 {
1461 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1462
1463 if (state == MAP_INSN)
1464 /* AArch64 instructions require 4-byte alignment. When emitting
1465 instructions into any section, record the appropriate section
1466 alignment. */
1467 record_alignment (now_seg, 2);
1468
1469 if (mapstate == state)
1470 /* The mapping symbol has already been emitted.
1471 There is nothing else to do. */
1472 return;
1473
1474 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1475 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1476 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1477 evaluated later in the next else. */
1478 return;
1479 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1480 {
1481 /* Only add the symbol if the offset is > 0:
1482 if we're at the first frag, check it's size > 0;
1483 if we're not at the first frag, then for sure
1484 the offset is > 0. */
1485 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1486 const int add_symbol = (frag_now != frag_first)
1487 || (frag_now_fix () > 0);
1488
1489 if (add_symbol)
1490 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1491 }
1492 #undef TRANSITION
1493
1494 mapping_state_2 (state, 0);
1495 }
1496
1497 /* Same as mapping_state, but MAX_CHARS bytes have already been
1498 allocated. Put the mapping symbol that far back. */
1499
1500 static void
1501 mapping_state_2 (enum mstate state, int max_chars)
1502 {
1503 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1504
1505 if (!SEG_NORMAL (now_seg))
1506 return;
1507
1508 if (mapstate == state)
1509 /* The mapping symbol has already been emitted.
1510 There is nothing else to do. */
1511 return;
1512
1513 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1514 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1515 }
1516 #else
1517 #define mapping_state(x) /* nothing */
1518 #define mapping_state_2(x, y) /* nothing */
1519 #endif
1520
1521 /* Directives: sectioning and alignment. */
1522
1523 static void
1524 s_bss (int ignore ATTRIBUTE_UNUSED)
1525 {
1526 /* We don't support putting frags in the BSS segment, we fake it by
1527 marking in_bss, then looking at s_skip for clues. */
1528 subseg_set (bss_section, 0);
1529 demand_empty_rest_of_line ();
1530 mapping_state (MAP_DATA);
1531 }
1532
1533 static void
1534 s_even (int ignore ATTRIBUTE_UNUSED)
1535 {
1536 /* Never make frag if expect extra pass. */
1537 if (!need_pass_2)
1538 frag_align (1, 0, 0);
1539
1540 record_alignment (now_seg, 1);
1541
1542 demand_empty_rest_of_line ();
1543 }
1544
1545 /* Directives: Literal pools. */
1546
1547 static literal_pool *
1548 find_literal_pool (int size)
1549 {
1550 literal_pool *pool;
1551
1552 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1553 {
1554 if (pool->section == now_seg
1555 && pool->sub_section == now_subseg && pool->size == size)
1556 break;
1557 }
1558
1559 return pool;
1560 }
1561
1562 static literal_pool *
1563 find_or_make_literal_pool (int size)
1564 {
1565 /* Next literal pool ID number. */
1566 static unsigned int latest_pool_num = 1;
1567 literal_pool *pool;
1568
1569 pool = find_literal_pool (size);
1570
1571 if (pool == NULL)
1572 {
1573 /* Create a new pool. */
1574 pool = xmalloc (sizeof (*pool));
1575 if (!pool)
1576 return NULL;
1577
1578 /* Currently we always put the literal pool in the current text
1579 section. If we were generating "small" model code where we
1580 knew that all code and initialised data was within 1MB then
1581 we could output literals to mergeable, read-only data
1582 sections. */
1583
1584 pool->next_free_entry = 0;
1585 pool->section = now_seg;
1586 pool->sub_section = now_subseg;
1587 pool->size = size;
1588 pool->next = list_of_pools;
1589 pool->symbol = NULL;
1590
1591 /* Add it to the list. */
1592 list_of_pools = pool;
1593 }
1594
1595 /* New pools, and emptied pools, will have a NULL symbol. */
1596 if (pool->symbol == NULL)
1597 {
1598 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1599 (valueT) 0, &zero_address_frag);
1600 pool->id = latest_pool_num++;
1601 }
1602
1603 /* Done. */
1604 return pool;
1605 }
1606
1607 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1608 Return TRUE on success, otherwise return FALSE. */
1609 static bfd_boolean
1610 add_to_lit_pool (expressionS *exp, int size)
1611 {
1612 literal_pool *pool;
1613 unsigned int entry;
1614
1615 pool = find_or_make_literal_pool (size);
1616
1617 /* Check if this literal value is already in the pool. */
1618 for (entry = 0; entry < pool->next_free_entry; entry++)
1619 {
1620 expressionS * litexp = & pool->literals[entry].exp;
1621
1622 if ((litexp->X_op == exp->X_op)
1623 && (exp->X_op == O_constant)
1624 && (litexp->X_add_number == exp->X_add_number)
1625 && (litexp->X_unsigned == exp->X_unsigned))
1626 break;
1627
1628 if ((litexp->X_op == exp->X_op)
1629 && (exp->X_op == O_symbol)
1630 && (litexp->X_add_number == exp->X_add_number)
1631 && (litexp->X_add_symbol == exp->X_add_symbol)
1632 && (litexp->X_op_symbol == exp->X_op_symbol))
1633 break;
1634 }
1635
1636 /* Do we need to create a new entry? */
1637 if (entry == pool->next_free_entry)
1638 {
1639 if (entry >= MAX_LITERAL_POOL_SIZE)
1640 {
1641 set_syntax_error (_("literal pool overflow"));
1642 return FALSE;
1643 }
1644
1645 pool->literals[entry].exp = *exp;
1646 pool->next_free_entry += 1;
1647 if (exp->X_op == O_big)
1648 {
1649 /* PR 16688: Bignums are held in a single global array. We must
1650 copy and preserve that value now, before it is overwritten. */
1651 pool->literals[entry].bignum = xmalloc (CHARS_PER_LITTLENUM * exp->X_add_number);
1652 memcpy (pool->literals[entry].bignum, generic_bignum,
1653 CHARS_PER_LITTLENUM * exp->X_add_number);
1654 }
1655 else
1656 pool->literals[entry].bignum = NULL;
1657 }
1658
1659 exp->X_op = O_symbol;
1660 exp->X_add_number = ((int) entry) * size;
1661 exp->X_add_symbol = pool->symbol;
1662
1663 return TRUE;
1664 }
1665
1666 /* Can't use symbol_new here, so have to create a symbol and then at
1667 a later date assign it a value. Thats what these functions do. */
1668
1669 static void
1670 symbol_locate (symbolS * symbolP,
1671 const char *name,/* It is copied, the caller can modify. */
1672 segT segment, /* Segment identifier (SEG_<something>). */
1673 valueT valu, /* Symbol value. */
1674 fragS * frag) /* Associated fragment. */
1675 {
1676 size_t name_length;
1677 char *preserved_copy_of_name;
1678
1679 name_length = strlen (name) + 1; /* +1 for \0. */
1680 obstack_grow (&notes, name, name_length);
1681 preserved_copy_of_name = obstack_finish (&notes);
1682
1683 #ifdef tc_canonicalize_symbol_name
1684 preserved_copy_of_name =
1685 tc_canonicalize_symbol_name (preserved_copy_of_name);
1686 #endif
1687
1688 S_SET_NAME (symbolP, preserved_copy_of_name);
1689
1690 S_SET_SEGMENT (symbolP, segment);
1691 S_SET_VALUE (symbolP, valu);
1692 symbol_clear_list_pointers (symbolP);
1693
1694 symbol_set_frag (symbolP, frag);
1695
1696 /* Link to end of symbol chain. */
1697 {
1698 extern int symbol_table_frozen;
1699
1700 if (symbol_table_frozen)
1701 abort ();
1702 }
1703
1704 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1705
1706 obj_symbol_new_hook (symbolP);
1707
1708 #ifdef tc_symbol_new_hook
1709 tc_symbol_new_hook (symbolP);
1710 #endif
1711
1712 #ifdef DEBUG_SYMS
1713 verify_symbol_chain (symbol_rootP, symbol_lastP);
1714 #endif /* DEBUG_SYMS */
1715 }
1716
1717
1718 static void
1719 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1720 {
1721 unsigned int entry;
1722 literal_pool *pool;
1723 char sym_name[20];
1724 int align;
1725
1726 for (align = 2; align <= 4; align++)
1727 {
1728 int size = 1 << align;
1729
1730 pool = find_literal_pool (size);
1731 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1732 continue;
1733
1734 mapping_state (MAP_DATA);
1735
1736 /* Align pool as you have word accesses.
1737 Only make a frag if we have to. */
1738 if (!need_pass_2)
1739 frag_align (align, 0, 0);
1740
1741 record_alignment (now_seg, align);
1742
1743 sprintf (sym_name, "$$lit_\002%x", pool->id);
1744
1745 symbol_locate (pool->symbol, sym_name, now_seg,
1746 (valueT) frag_now_fix (), frag_now);
1747 symbol_table_insert (pool->symbol);
1748
1749 for (entry = 0; entry < pool->next_free_entry; entry++)
1750 {
1751 expressionS * exp = & pool->literals[entry].exp;
1752
1753 if (exp->X_op == O_big)
1754 {
1755 /* PR 16688: Restore the global bignum value. */
1756 gas_assert (pool->literals[entry].bignum != NULL);
1757 memcpy (generic_bignum, pool->literals[entry].bignum,
1758 CHARS_PER_LITTLENUM * exp->X_add_number);
1759 }
1760
1761 /* First output the expression in the instruction to the pool. */
1762 emit_expr (exp, size); /* .word|.xword */
1763
1764 if (exp->X_op == O_big)
1765 {
1766 free (pool->literals[entry].bignum);
1767 pool->literals[entry].bignum = NULL;
1768 }
1769 }
1770
1771 /* Mark the pool as empty. */
1772 pool->next_free_entry = 0;
1773 pool->symbol = NULL;
1774 }
1775 }
1776
1777 #ifdef OBJ_ELF
1778 /* Forward declarations for functions below, in the MD interface
1779 section. */
1780 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1781 static struct reloc_table_entry * find_reloc_table_entry (char **);
1782
1783 /* Directives: Data. */
1784 /* N.B. the support for relocation suffix in this directive needs to be
1785 implemented properly. */
1786
1787 static void
1788 s_aarch64_elf_cons (int nbytes)
1789 {
1790 expressionS exp;
1791
1792 #ifdef md_flush_pending_output
1793 md_flush_pending_output ();
1794 #endif
1795
1796 if (is_it_end_of_statement ())
1797 {
1798 demand_empty_rest_of_line ();
1799 return;
1800 }
1801
1802 #ifdef md_cons_align
1803 md_cons_align (nbytes);
1804 #endif
1805
1806 mapping_state (MAP_DATA);
1807 do
1808 {
1809 struct reloc_table_entry *reloc;
1810
1811 expression (&exp);
1812
1813 if (exp.X_op != O_symbol)
1814 emit_expr (&exp, (unsigned int) nbytes);
1815 else
1816 {
1817 skip_past_char (&input_line_pointer, '#');
1818 if (skip_past_char (&input_line_pointer, ':'))
1819 {
1820 reloc = find_reloc_table_entry (&input_line_pointer);
1821 if (reloc == NULL)
1822 as_bad (_("unrecognized relocation suffix"));
1823 else
1824 as_bad (_("unimplemented relocation suffix"));
1825 ignore_rest_of_line ();
1826 return;
1827 }
1828 else
1829 emit_expr (&exp, (unsigned int) nbytes);
1830 }
1831 }
1832 while (*input_line_pointer++ == ',');
1833
1834 /* Put terminator back into stream. */
1835 input_line_pointer--;
1836 demand_empty_rest_of_line ();
1837 }
1838
1839 #endif /* OBJ_ELF */
1840
1841 /* Output a 32-bit word, but mark as an instruction. */
1842
1843 static void
1844 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1845 {
1846 expressionS exp;
1847
1848 #ifdef md_flush_pending_output
1849 md_flush_pending_output ();
1850 #endif
1851
1852 if (is_it_end_of_statement ())
1853 {
1854 demand_empty_rest_of_line ();
1855 return;
1856 }
1857
1858 /* Sections are assumed to start aligned. In executable section, there is no
1859 MAP_DATA symbol pending. So we only align the address during
1860 MAP_DATA --> MAP_INSN transition.
1861 For other sections, this is not guaranteed. */
1862 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1863 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
1864 frag_align_code (2, 0);
1865
1866 #ifdef OBJ_ELF
1867 mapping_state (MAP_INSN);
1868 #endif
1869
1870 do
1871 {
1872 expression (&exp);
1873 if (exp.X_op != O_constant)
1874 {
1875 as_bad (_("constant expression required"));
1876 ignore_rest_of_line ();
1877 return;
1878 }
1879
1880 if (target_big_endian)
1881 {
1882 unsigned int val = exp.X_add_number;
1883 exp.X_add_number = SWAP_32 (val);
1884 }
1885 emit_expr (&exp, 4);
1886 }
1887 while (*input_line_pointer++ == ',');
1888
1889 /* Put terminator back into stream. */
1890 input_line_pointer--;
1891 demand_empty_rest_of_line ();
1892 }
1893
1894 #ifdef OBJ_ELF
1895 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
1896
1897 static void
1898 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
1899 {
1900 expressionS exp;
1901
1902 /* Since we're just labelling the code, there's no need to define a
1903 mapping symbol. */
1904 expression (&exp);
1905 /* Make sure there is enough room in this frag for the following
1906 blr. This trick only works if the blr follows immediately after
1907 the .tlsdesc directive. */
1908 frag_grow (4);
1909 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1910 BFD_RELOC_AARCH64_TLSDESC_CALL);
1911
1912 demand_empty_rest_of_line ();
1913 }
1914 #endif /* OBJ_ELF */
1915
1916 static void s_aarch64_arch (int);
1917 static void s_aarch64_cpu (int);
1918 static void s_aarch64_arch_extension (int);
1919
1920 /* This table describes all the machine specific pseudo-ops the assembler
1921 has to support. The fields are:
1922 pseudo-op name without dot
1923 function to call to execute this pseudo-op
1924 Integer arg to pass to the function. */
1925
1926 const pseudo_typeS md_pseudo_table[] = {
1927 /* Never called because '.req' does not start a line. */
1928 {"req", s_req, 0},
1929 {"unreq", s_unreq, 0},
1930 {"bss", s_bss, 0},
1931 {"even", s_even, 0},
1932 {"ltorg", s_ltorg, 0},
1933 {"pool", s_ltorg, 0},
1934 {"cpu", s_aarch64_cpu, 0},
1935 {"arch", s_aarch64_arch, 0},
1936 {"arch_extension", s_aarch64_arch_extension, 0},
1937 {"inst", s_aarch64_inst, 0},
1938 #ifdef OBJ_ELF
1939 {"tlsdesccall", s_tlsdesccall, 0},
1940 {"word", s_aarch64_elf_cons, 4},
1941 {"long", s_aarch64_elf_cons, 4},
1942 {"xword", s_aarch64_elf_cons, 8},
1943 {"dword", s_aarch64_elf_cons, 8},
1944 #endif
1945 {0, 0, 0}
1946 };
1947 \f
1948
1949 /* Check whether STR points to a register name followed by a comma or the
1950 end of line; REG_TYPE indicates which register types are checked
1951 against. Return TRUE if STR is such a register name; otherwise return
1952 FALSE. The function does not intend to produce any diagnostics, but since
1953 the register parser aarch64_reg_parse, which is called by this function,
1954 does produce diagnostics, we call clear_error to clear any diagnostics
1955 that may be generated by aarch64_reg_parse.
1956 Also, the function returns FALSE directly if there is any user error
1957 present at the function entry. This prevents the existing diagnostics
1958 state from being spoiled.
1959 The function currently serves parse_constant_immediate and
1960 parse_big_immediate only. */
1961 static bfd_boolean
1962 reg_name_p (char *str, aarch64_reg_type reg_type)
1963 {
1964 int reg;
1965
1966 /* Prevent the diagnostics state from being spoiled. */
1967 if (error_p ())
1968 return FALSE;
1969
1970 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
1971
1972 /* Clear the parsing error that may be set by the reg parser. */
1973 clear_error ();
1974
1975 if (reg == PARSE_FAIL)
1976 return FALSE;
1977
1978 skip_whitespace (str);
1979 if (*str == ',' || is_end_of_line[(unsigned int) *str])
1980 return TRUE;
1981
1982 return FALSE;
1983 }
1984
1985 /* Parser functions used exclusively in instruction operands. */
1986
1987 /* Parse an immediate expression which may not be constant.
1988
1989 To prevent the expression parser from pushing a register name
1990 into the symbol table as an undefined symbol, firstly a check is
1991 done to find out whether STR is a valid register name followed
1992 by a comma or the end of line. Return FALSE if STR is such a
1993 string. */
1994
1995 static bfd_boolean
1996 parse_immediate_expression (char **str, expressionS *exp)
1997 {
1998 if (reg_name_p (*str, REG_TYPE_R_Z_BHSDQ_V))
1999 {
2000 set_recoverable_error (_("immediate operand required"));
2001 return FALSE;
2002 }
2003
2004 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2005
2006 if (exp->X_op == O_absent)
2007 {
2008 set_fatal_syntax_error (_("missing immediate expression"));
2009 return FALSE;
2010 }
2011
2012 return TRUE;
2013 }
2014
2015 /* Constant immediate-value read function for use in insn parsing.
2016 STR points to the beginning of the immediate (with the optional
2017 leading #); *VAL receives the value.
2018
2019 Return TRUE on success; otherwise return FALSE. */
2020
2021 static bfd_boolean
2022 parse_constant_immediate (char **str, int64_t * val)
2023 {
2024 expressionS exp;
2025
2026 if (! parse_immediate_expression (str, &exp))
2027 return FALSE;
2028
2029 if (exp.X_op != O_constant)
2030 {
2031 set_syntax_error (_("constant expression required"));
2032 return FALSE;
2033 }
2034
2035 *val = exp.X_add_number;
2036 return TRUE;
2037 }
2038
2039 static uint32_t
2040 encode_imm_float_bits (uint32_t imm)
2041 {
2042 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2043 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2044 }
2045
2046 /* Return TRUE if the single-precision floating-point value encoded in IMM
2047 can be expressed in the AArch64 8-bit signed floating-point format with
2048 3-bit exponent and normalized 4 bits of precision; in other words, the
2049 floating-point value must be expressable as
2050 (+/-) n / 16 * power (2, r)
2051 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2052
2053 static bfd_boolean
2054 aarch64_imm_float_p (uint32_t imm)
2055 {
2056 /* If a single-precision floating-point value has the following bit
2057 pattern, it can be expressed in the AArch64 8-bit floating-point
2058 format:
2059
2060 3 32222222 2221111111111
2061 1 09876543 21098765432109876543210
2062 n Eeeeeexx xxxx0000000000000000000
2063
2064 where n, e and each x are either 0 or 1 independently, with
2065 E == ~ e. */
2066
2067 uint32_t pattern;
2068
2069 /* Prepare the pattern for 'Eeeeee'. */
2070 if (((imm >> 30) & 0x1) == 0)
2071 pattern = 0x3e000000;
2072 else
2073 pattern = 0x40000000;
2074
2075 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2076 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2077 }
2078
2079 /* Like aarch64_imm_float_p but for a double-precision floating-point value.
2080
2081 Return TRUE if the value encoded in IMM can be expressed in the AArch64
2082 8-bit signed floating-point format with 3-bit exponent and normalized 4
2083 bits of precision (i.e. can be used in an FMOV instruction); return the
2084 equivalent single-precision encoding in *FPWORD.
2085
2086 Otherwise return FALSE. */
2087
2088 static bfd_boolean
2089 aarch64_double_precision_fmovable (uint64_t imm, uint32_t *fpword)
2090 {
2091 /* If a double-precision floating-point value has the following bit
2092 pattern, it can be expressed in the AArch64 8-bit floating-point
2093 format:
2094
2095 6 66655555555 554444444...21111111111
2096 3 21098765432 109876543...098765432109876543210
2097 n Eeeeeeeeexx xxxx00000...000000000000000000000
2098
2099 where n, e and each x are either 0 or 1 independently, with
2100 E == ~ e. */
2101
2102 uint32_t pattern;
2103 uint32_t high32 = imm >> 32;
2104
2105 /* Lower 32 bits need to be 0s. */
2106 if ((imm & 0xffffffff) != 0)
2107 return FALSE;
2108
2109 /* Prepare the pattern for 'Eeeeeeeee'. */
2110 if (((high32 >> 30) & 0x1) == 0)
2111 pattern = 0x3fc00000;
2112 else
2113 pattern = 0x40000000;
2114
2115 if ((high32 & 0xffff) == 0 /* bits 32 - 47 are 0. */
2116 && (high32 & 0x7fc00000) == pattern) /* bits 54 - 61 == ~ bit 62. */
2117 {
2118 /* Convert to the single-precision encoding.
2119 i.e. convert
2120 n Eeeeeeeeexx xxxx00000...000000000000000000000
2121 to
2122 n Eeeeeexx xxxx0000000000000000000. */
2123 *fpword = ((high32 & 0xfe000000) /* nEeeeee. */
2124 | (((high32 >> 16) & 0x3f) << 19)); /* xxxxxx. */
2125 return TRUE;
2126 }
2127 else
2128 return FALSE;
2129 }
2130
2131 /* Parse a floating-point immediate. Return TRUE on success and return the
2132 value in *IMMED in the format of IEEE754 single-precision encoding.
2133 *CCP points to the start of the string; DP_P is TRUE when the immediate
2134 is expected to be in double-precision (N.B. this only matters when
2135 hexadecimal representation is involved).
2136
2137 N.B. 0.0 is accepted by this function. */
2138
2139 static bfd_boolean
2140 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p)
2141 {
2142 char *str = *ccp;
2143 char *fpnum;
2144 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2145 int found_fpchar = 0;
2146 int64_t val = 0;
2147 unsigned fpword = 0;
2148 bfd_boolean hex_p = FALSE;
2149
2150 skip_past_char (&str, '#');
2151
2152 fpnum = str;
2153 skip_whitespace (fpnum);
2154
2155 if (strncmp (fpnum, "0x", 2) == 0)
2156 {
2157 /* Support the hexadecimal representation of the IEEE754 encoding.
2158 Double-precision is expected when DP_P is TRUE, otherwise the
2159 representation should be in single-precision. */
2160 if (! parse_constant_immediate (&str, &val))
2161 goto invalid_fp;
2162
2163 if (dp_p)
2164 {
2165 if (! aarch64_double_precision_fmovable (val, &fpword))
2166 goto invalid_fp;
2167 }
2168 else if ((uint64_t) val > 0xffffffff)
2169 goto invalid_fp;
2170 else
2171 fpword = val;
2172
2173 hex_p = TRUE;
2174 }
2175 else
2176 {
2177 /* We must not accidentally parse an integer as a floating-point number.
2178 Make sure that the value we parse is not an integer by checking for
2179 special characters '.' or 'e'. */
2180 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
2181 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
2182 {
2183 found_fpchar = 1;
2184 break;
2185 }
2186
2187 if (!found_fpchar)
2188 return FALSE;
2189 }
2190
2191 if (! hex_p)
2192 {
2193 int i;
2194
2195 if ((str = atof_ieee (str, 's', words)) == NULL)
2196 goto invalid_fp;
2197
2198 /* Our FP word must be 32 bits (single-precision FP). */
2199 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2200 {
2201 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2202 fpword |= words[i];
2203 }
2204 }
2205
2206 if (aarch64_imm_float_p (fpword) || (fpword & 0x7fffffff) == 0)
2207 {
2208 *immed = fpword;
2209 *ccp = str;
2210 return TRUE;
2211 }
2212
2213 invalid_fp:
2214 set_fatal_syntax_error (_("invalid floating-point constant"));
2215 return FALSE;
2216 }
2217
2218 /* Less-generic immediate-value read function with the possibility of loading
2219 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2220 instructions.
2221
2222 To prevent the expression parser from pushing a register name into the
2223 symbol table as an undefined symbol, a check is firstly done to find
2224 out whether STR is a valid register name followed by a comma or the end
2225 of line. Return FALSE if STR is such a register. */
2226
2227 static bfd_boolean
2228 parse_big_immediate (char **str, int64_t *imm)
2229 {
2230 char *ptr = *str;
2231
2232 if (reg_name_p (ptr, REG_TYPE_R_Z_BHSDQ_V))
2233 {
2234 set_syntax_error (_("immediate operand required"));
2235 return FALSE;
2236 }
2237
2238 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2239
2240 if (inst.reloc.exp.X_op == O_constant)
2241 *imm = inst.reloc.exp.X_add_number;
2242
2243 *str = ptr;
2244
2245 return TRUE;
2246 }
2247
2248 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2249 if NEED_LIBOPCODES is non-zero, the fixup will need
2250 assistance from the libopcodes. */
2251
2252 static inline void
2253 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2254 const aarch64_opnd_info *operand,
2255 int need_libopcodes_p)
2256 {
2257 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2258 reloc->opnd = operand->type;
2259 if (need_libopcodes_p)
2260 reloc->need_libopcodes_p = 1;
2261 };
2262
2263 /* Return TRUE if the instruction needs to be fixed up later internally by
2264 the GAS; otherwise return FALSE. */
2265
2266 static inline bfd_boolean
2267 aarch64_gas_internal_fixup_p (void)
2268 {
2269 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2270 }
2271
2272 /* Assign the immediate value to the relavant field in *OPERAND if
2273 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2274 needs an internal fixup in a later stage.
2275 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2276 IMM.VALUE that may get assigned with the constant. */
2277 static inline void
2278 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2279 aarch64_opnd_info *operand,
2280 int addr_off_p,
2281 int need_libopcodes_p,
2282 int skip_p)
2283 {
2284 if (reloc->exp.X_op == O_constant)
2285 {
2286 if (addr_off_p)
2287 operand->addr.offset.imm = reloc->exp.X_add_number;
2288 else
2289 operand->imm.value = reloc->exp.X_add_number;
2290 reloc->type = BFD_RELOC_UNUSED;
2291 }
2292 else
2293 {
2294 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2295 /* Tell libopcodes to ignore this operand or not. This is helpful
2296 when one of the operands needs to be fixed up later but we need
2297 libopcodes to check the other operands. */
2298 operand->skip = skip_p;
2299 }
2300 }
2301
2302 /* Relocation modifiers. Each entry in the table contains the textual
2303 name for the relocation which may be placed before a symbol used as
2304 a load/store offset, or add immediate. It must be surrounded by a
2305 leading and trailing colon, for example:
2306
2307 ldr x0, [x1, #:rello:varsym]
2308 add x0, x1, #:rello:varsym */
2309
2310 struct reloc_table_entry
2311 {
2312 const char *name;
2313 int pc_rel;
2314 bfd_reloc_code_real_type adr_type;
2315 bfd_reloc_code_real_type adrp_type;
2316 bfd_reloc_code_real_type movw_type;
2317 bfd_reloc_code_real_type add_type;
2318 bfd_reloc_code_real_type ldst_type;
2319 bfd_reloc_code_real_type ld_literal_type;
2320 };
2321
2322 static struct reloc_table_entry reloc_table[] = {
2323 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2324 {"lo12", 0,
2325 0, /* adr_type */
2326 0,
2327 0,
2328 BFD_RELOC_AARCH64_ADD_LO12,
2329 BFD_RELOC_AARCH64_LDST_LO12,
2330 0},
2331
2332 /* Higher 21 bits of pc-relative page offset: ADRP */
2333 {"pg_hi21", 1,
2334 0, /* adr_type */
2335 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2336 0,
2337 0,
2338 0,
2339 0},
2340
2341 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2342 {"pg_hi21_nc", 1,
2343 0, /* adr_type */
2344 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2345 0,
2346 0,
2347 0,
2348 0},
2349
2350 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2351 {"abs_g0", 0,
2352 0, /* adr_type */
2353 0,
2354 BFD_RELOC_AARCH64_MOVW_G0,
2355 0,
2356 0,
2357 0},
2358
2359 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2360 {"abs_g0_s", 0,
2361 0, /* adr_type */
2362 0,
2363 BFD_RELOC_AARCH64_MOVW_G0_S,
2364 0,
2365 0,
2366 0},
2367
2368 /* Less significant bits 0-15 of address/value: MOVK, no check */
2369 {"abs_g0_nc", 0,
2370 0, /* adr_type */
2371 0,
2372 BFD_RELOC_AARCH64_MOVW_G0_NC,
2373 0,
2374 0,
2375 0},
2376
2377 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2378 {"abs_g1", 0,
2379 0, /* adr_type */
2380 0,
2381 BFD_RELOC_AARCH64_MOVW_G1,
2382 0,
2383 0,
2384 0},
2385
2386 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2387 {"abs_g1_s", 0,
2388 0, /* adr_type */
2389 0,
2390 BFD_RELOC_AARCH64_MOVW_G1_S,
2391 0,
2392 0,
2393 0},
2394
2395 /* Less significant bits 16-31 of address/value: MOVK, no check */
2396 {"abs_g1_nc", 0,
2397 0, /* adr_type */
2398 0,
2399 BFD_RELOC_AARCH64_MOVW_G1_NC,
2400 0,
2401 0,
2402 0},
2403
2404 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2405 {"abs_g2", 0,
2406 0, /* adr_type */
2407 0,
2408 BFD_RELOC_AARCH64_MOVW_G2,
2409 0,
2410 0,
2411 0},
2412
2413 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2414 {"abs_g2_s", 0,
2415 0, /* adr_type */
2416 0,
2417 BFD_RELOC_AARCH64_MOVW_G2_S,
2418 0,
2419 0,
2420 0},
2421
2422 /* Less significant bits 32-47 of address/value: MOVK, no check */
2423 {"abs_g2_nc", 0,
2424 0, /* adr_type */
2425 0,
2426 BFD_RELOC_AARCH64_MOVW_G2_NC,
2427 0,
2428 0,
2429 0},
2430
2431 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2432 {"abs_g3", 0,
2433 0, /* adr_type */
2434 0,
2435 BFD_RELOC_AARCH64_MOVW_G3,
2436 0,
2437 0,
2438 0},
2439
2440 /* Get to the page containing GOT entry for a symbol. */
2441 {"got", 1,
2442 0, /* adr_type */
2443 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2444 0,
2445 0,
2446 0,
2447 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2448
2449 /* 12 bit offset into the page containing GOT entry for that symbol. */
2450 {"got_lo12", 0,
2451 0, /* adr_type */
2452 0,
2453 0,
2454 0,
2455 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2456 0},
2457
2458 /* 15 bit offset into the page containing GOT entry for that symbol. */
2459 {"gotoff_lo15", 0,
2460 0, /* adr_type */
2461 0,
2462 0,
2463 0,
2464 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2465 0},
2466
2467 /* Get to the page containing GOT TLS entry for a symbol */
2468 {"tlsgd", 0,
2469 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2470 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2471 0,
2472 0,
2473 0,
2474 0},
2475
2476 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2477 {"tlsgd_lo12", 0,
2478 0, /* adr_type */
2479 0,
2480 0,
2481 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2482 0,
2483 0},
2484
2485 /* Get to the page containing GOT TLS entry for a symbol */
2486 {"tlsdesc", 0,
2487 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2488 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2489 0,
2490 0,
2491 0,
2492 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2493
2494 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2495 {"tlsdesc_lo12", 0,
2496 0, /* adr_type */
2497 0,
2498 0,
2499 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC,
2500 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2501 0},
2502
2503 /* Get to the page containing GOT TLS entry for a symbol.
2504 The same as GD, we allocate two consecutive GOT slots
2505 for module index and module offset, the only difference
2506 with GD is the module offset should be intialized to
2507 zero without any outstanding runtime relocation. */
2508 {"tlsldm", 0,
2509 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2510 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2511 0,
2512 0,
2513 0,
2514 0},
2515
2516 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2517 {"tlsldm_lo12_nc", 0,
2518 0, /* adr_type */
2519 0,
2520 0,
2521 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2522 0,
2523 0},
2524
2525 /* 12 bit offset into the module TLS base address. */
2526 {"dtprel_lo12", 0,
2527 0, /* adr_type */
2528 0,
2529 0,
2530 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2531 0,
2532 0},
2533
2534 /* Get to the page containing GOT TLS entry for a symbol */
2535 {"gottprel", 0,
2536 0, /* adr_type */
2537 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2538 0,
2539 0,
2540 0,
2541 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2542
2543 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2544 {"gottprel_lo12", 0,
2545 0, /* adr_type */
2546 0,
2547 0,
2548 0,
2549 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2550 0},
2551
2552 /* Get tp offset for a symbol. */
2553 {"tprel", 0,
2554 0, /* adr_type */
2555 0,
2556 0,
2557 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2558 0,
2559 0},
2560
2561 /* Get tp offset for a symbol. */
2562 {"tprel_lo12", 0,
2563 0, /* adr_type */
2564 0,
2565 0,
2566 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2567 0,
2568 0},
2569
2570 /* Get tp offset for a symbol. */
2571 {"tprel_hi12", 0,
2572 0, /* adr_type */
2573 0,
2574 0,
2575 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2576 0,
2577 0},
2578
2579 /* Get tp offset for a symbol. */
2580 {"tprel_lo12_nc", 0,
2581 0, /* adr_type */
2582 0,
2583 0,
2584 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2585 0,
2586 0},
2587
2588 /* Most significant bits 32-47 of address/value: MOVZ. */
2589 {"tprel_g2", 0,
2590 0, /* adr_type */
2591 0,
2592 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2593 0,
2594 0,
2595 0},
2596
2597 /* Most significant bits 16-31 of address/value: MOVZ. */
2598 {"tprel_g1", 0,
2599 0, /* adr_type */
2600 0,
2601 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2602 0,
2603 0,
2604 0},
2605
2606 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2607 {"tprel_g1_nc", 0,
2608 0, /* adr_type */
2609 0,
2610 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2611 0,
2612 0,
2613 0},
2614
2615 /* Most significant bits 0-15 of address/value: MOVZ. */
2616 {"tprel_g0", 0,
2617 0, /* adr_type */
2618 0,
2619 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2620 0,
2621 0,
2622 0},
2623
2624 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2625 {"tprel_g0_nc", 0,
2626 0, /* adr_type */
2627 0,
2628 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2629 0,
2630 0,
2631 0},
2632
2633 /* 15bit offset from got entry to base address of GOT table. */
2634 {"gotpage_lo15", 0,
2635 0,
2636 0,
2637 0,
2638 0,
2639 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
2640 0},
2641
2642 /* 14bit offset from got entry to base address of GOT table. */
2643 {"gotpage_lo14", 0,
2644 0,
2645 0,
2646 0,
2647 0,
2648 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
2649 0},
2650 };
2651
2652 /* Given the address of a pointer pointing to the textual name of a
2653 relocation as may appear in assembler source, attempt to find its
2654 details in reloc_table. The pointer will be updated to the character
2655 after the trailing colon. On failure, NULL will be returned;
2656 otherwise return the reloc_table_entry. */
2657
2658 static struct reloc_table_entry *
2659 find_reloc_table_entry (char **str)
2660 {
2661 unsigned int i;
2662 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2663 {
2664 int length = strlen (reloc_table[i].name);
2665
2666 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2667 && (*str)[length] == ':')
2668 {
2669 *str += (length + 1);
2670 return &reloc_table[i];
2671 }
2672 }
2673
2674 return NULL;
2675 }
2676
2677 /* Mode argument to parse_shift and parser_shifter_operand. */
2678 enum parse_shift_mode
2679 {
2680 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
2681 "#imm{,lsl #n}" */
2682 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
2683 "#imm" */
2684 SHIFTED_LSL, /* bare "lsl #n" */
2685 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
2686 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
2687 };
2688
2689 /* Parse a <shift> operator on an AArch64 data processing instruction.
2690 Return TRUE on success; otherwise return FALSE. */
2691 static bfd_boolean
2692 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
2693 {
2694 const struct aarch64_name_value_pair *shift_op;
2695 enum aarch64_modifier_kind kind;
2696 expressionS exp;
2697 int exp_has_prefix;
2698 char *s = *str;
2699 char *p = s;
2700
2701 for (p = *str; ISALPHA (*p); p++)
2702 ;
2703
2704 if (p == *str)
2705 {
2706 set_syntax_error (_("shift expression expected"));
2707 return FALSE;
2708 }
2709
2710 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
2711
2712 if (shift_op == NULL)
2713 {
2714 set_syntax_error (_("shift operator expected"));
2715 return FALSE;
2716 }
2717
2718 kind = aarch64_get_operand_modifier (shift_op);
2719
2720 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
2721 {
2722 set_syntax_error (_("invalid use of 'MSL'"));
2723 return FALSE;
2724 }
2725
2726 switch (mode)
2727 {
2728 case SHIFTED_LOGIC_IMM:
2729 if (aarch64_extend_operator_p (kind) == TRUE)
2730 {
2731 set_syntax_error (_("extending shift is not permitted"));
2732 return FALSE;
2733 }
2734 break;
2735
2736 case SHIFTED_ARITH_IMM:
2737 if (kind == AARCH64_MOD_ROR)
2738 {
2739 set_syntax_error (_("'ROR' shift is not permitted"));
2740 return FALSE;
2741 }
2742 break;
2743
2744 case SHIFTED_LSL:
2745 if (kind != AARCH64_MOD_LSL)
2746 {
2747 set_syntax_error (_("only 'LSL' shift is permitted"));
2748 return FALSE;
2749 }
2750 break;
2751
2752 case SHIFTED_REG_OFFSET:
2753 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
2754 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
2755 {
2756 set_fatal_syntax_error
2757 (_("invalid shift for the register offset addressing mode"));
2758 return FALSE;
2759 }
2760 break;
2761
2762 case SHIFTED_LSL_MSL:
2763 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
2764 {
2765 set_syntax_error (_("invalid shift operator"));
2766 return FALSE;
2767 }
2768 break;
2769
2770 default:
2771 abort ();
2772 }
2773
2774 /* Whitespace can appear here if the next thing is a bare digit. */
2775 skip_whitespace (p);
2776
2777 /* Parse shift amount. */
2778 exp_has_prefix = 0;
2779 if (mode == SHIFTED_REG_OFFSET && *p == ']')
2780 exp.X_op = O_absent;
2781 else
2782 {
2783 if (is_immediate_prefix (*p))
2784 {
2785 p++;
2786 exp_has_prefix = 1;
2787 }
2788 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
2789 }
2790 if (exp.X_op == O_absent)
2791 {
2792 if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix)
2793 {
2794 set_syntax_error (_("missing shift amount"));
2795 return FALSE;
2796 }
2797 operand->shifter.amount = 0;
2798 }
2799 else if (exp.X_op != O_constant)
2800 {
2801 set_syntax_error (_("constant shift amount required"));
2802 return FALSE;
2803 }
2804 else if (exp.X_add_number < 0 || exp.X_add_number > 63)
2805 {
2806 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
2807 return FALSE;
2808 }
2809 else
2810 {
2811 operand->shifter.amount = exp.X_add_number;
2812 operand->shifter.amount_present = 1;
2813 }
2814
2815 operand->shifter.operator_present = 1;
2816 operand->shifter.kind = kind;
2817
2818 *str = p;
2819 return TRUE;
2820 }
2821
2822 /* Parse a <shifter_operand> for a data processing instruction:
2823
2824 #<immediate>
2825 #<immediate>, LSL #imm
2826
2827 Validation of immediate operands is deferred to md_apply_fix.
2828
2829 Return TRUE on success; otherwise return FALSE. */
2830
2831 static bfd_boolean
2832 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
2833 enum parse_shift_mode mode)
2834 {
2835 char *p;
2836
2837 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
2838 return FALSE;
2839
2840 p = *str;
2841
2842 /* Accept an immediate expression. */
2843 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
2844 return FALSE;
2845
2846 /* Accept optional LSL for arithmetic immediate values. */
2847 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
2848 if (! parse_shift (&p, operand, SHIFTED_LSL))
2849 return FALSE;
2850
2851 /* Not accept any shifter for logical immediate values. */
2852 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
2853 && parse_shift (&p, operand, mode))
2854 {
2855 set_syntax_error (_("unexpected shift operator"));
2856 return FALSE;
2857 }
2858
2859 *str = p;
2860 return TRUE;
2861 }
2862
2863 /* Parse a <shifter_operand> for a data processing instruction:
2864
2865 <Rm>
2866 <Rm>, <shift>
2867 #<immediate>
2868 #<immediate>, LSL #imm
2869
2870 where <shift> is handled by parse_shift above, and the last two
2871 cases are handled by the function above.
2872
2873 Validation of immediate operands is deferred to md_apply_fix.
2874
2875 Return TRUE on success; otherwise return FALSE. */
2876
2877 static bfd_boolean
2878 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
2879 enum parse_shift_mode mode)
2880 {
2881 int reg;
2882 int isreg32, isregzero;
2883 enum aarch64_operand_class opd_class
2884 = aarch64_get_operand_class (operand->type);
2885
2886 if ((reg =
2887 aarch64_reg_parse_32_64 (str, 0, 0, &isreg32, &isregzero)) != PARSE_FAIL)
2888 {
2889 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
2890 {
2891 set_syntax_error (_("unexpected register in the immediate operand"));
2892 return FALSE;
2893 }
2894
2895 if (!isregzero && reg == REG_SP)
2896 {
2897 set_syntax_error (BAD_SP);
2898 return FALSE;
2899 }
2900
2901 operand->reg.regno = reg;
2902 operand->qualifier = isreg32 ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2903
2904 /* Accept optional shift operation on register. */
2905 if (! skip_past_comma (str))
2906 return TRUE;
2907
2908 if (! parse_shift (str, operand, mode))
2909 return FALSE;
2910
2911 return TRUE;
2912 }
2913 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
2914 {
2915 set_syntax_error
2916 (_("integer register expected in the extended/shifted operand "
2917 "register"));
2918 return FALSE;
2919 }
2920
2921 /* We have a shifted immediate variable. */
2922 return parse_shifter_operand_imm (str, operand, mode);
2923 }
2924
2925 /* Return TRUE on success; return FALSE otherwise. */
2926
2927 static bfd_boolean
2928 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
2929 enum parse_shift_mode mode)
2930 {
2931 char *p = *str;
2932
2933 /* Determine if we have the sequence of characters #: or just :
2934 coming next. If we do, then we check for a :rello: relocation
2935 modifier. If we don't, punt the whole lot to
2936 parse_shifter_operand. */
2937
2938 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
2939 {
2940 struct reloc_table_entry *entry;
2941
2942 if (p[0] == '#')
2943 p += 2;
2944 else
2945 p++;
2946 *str = p;
2947
2948 /* Try to parse a relocation. Anything else is an error. */
2949 if (!(entry = find_reloc_table_entry (str)))
2950 {
2951 set_syntax_error (_("unknown relocation modifier"));
2952 return FALSE;
2953 }
2954
2955 if (entry->add_type == 0)
2956 {
2957 set_syntax_error
2958 (_("this relocation modifier is not allowed on this instruction"));
2959 return FALSE;
2960 }
2961
2962 /* Save str before we decompose it. */
2963 p = *str;
2964
2965 /* Next, we parse the expression. */
2966 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
2967 return FALSE;
2968
2969 /* Record the relocation type (use the ADD variant here). */
2970 inst.reloc.type = entry->add_type;
2971 inst.reloc.pc_rel = entry->pc_rel;
2972
2973 /* If str is empty, we've reached the end, stop here. */
2974 if (**str == '\0')
2975 return TRUE;
2976
2977 /* Otherwise, we have a shifted reloc modifier, so rewind to
2978 recover the variable name and continue parsing for the shifter. */
2979 *str = p;
2980 return parse_shifter_operand_imm (str, operand, mode);
2981 }
2982
2983 return parse_shifter_operand (str, operand, mode);
2984 }
2985
2986 /* Parse all forms of an address expression. Information is written
2987 to *OPERAND and/or inst.reloc.
2988
2989 The A64 instruction set has the following addressing modes:
2990
2991 Offset
2992 [base] // in SIMD ld/st structure
2993 [base{,#0}] // in ld/st exclusive
2994 [base{,#imm}]
2995 [base,Xm{,LSL #imm}]
2996 [base,Xm,SXTX {#imm}]
2997 [base,Wm,(S|U)XTW {#imm}]
2998 Pre-indexed
2999 [base,#imm]!
3000 Post-indexed
3001 [base],#imm
3002 [base],Xm // in SIMD ld/st structure
3003 PC-relative (literal)
3004 label
3005 =immediate
3006
3007 (As a convenience, the notation "=immediate" is permitted in conjunction
3008 with the pc-relative literal load instructions to automatically place an
3009 immediate value or symbolic address in a nearby literal pool and generate
3010 a hidden label which references it.)
3011
3012 Upon a successful parsing, the address structure in *OPERAND will be
3013 filled in the following way:
3014
3015 .base_regno = <base>
3016 .offset.is_reg // 1 if the offset is a register
3017 .offset.imm = <imm>
3018 .offset.regno = <Rm>
3019
3020 For different addressing modes defined in the A64 ISA:
3021
3022 Offset
3023 .pcrel=0; .preind=1; .postind=0; .writeback=0
3024 Pre-indexed
3025 .pcrel=0; .preind=1; .postind=0; .writeback=1
3026 Post-indexed
3027 .pcrel=0; .preind=0; .postind=1; .writeback=1
3028 PC-relative (literal)
3029 .pcrel=1; .preind=1; .postind=0; .writeback=0
3030
3031 The shift/extension information, if any, will be stored in .shifter.
3032
3033 It is the caller's responsibility to check for addressing modes not
3034 supported by the instruction, and to set inst.reloc.type. */
3035
3036 static bfd_boolean
3037 parse_address_main (char **str, aarch64_opnd_info *operand, int reloc,
3038 int accept_reg_post_index)
3039 {
3040 char *p = *str;
3041 int reg;
3042 int isreg32, isregzero;
3043 expressionS *exp = &inst.reloc.exp;
3044
3045 if (! skip_past_char (&p, '['))
3046 {
3047 /* =immediate or label. */
3048 operand->addr.pcrel = 1;
3049 operand->addr.preind = 1;
3050
3051 /* #:<reloc_op>:<symbol> */
3052 skip_past_char (&p, '#');
3053 if (reloc && skip_past_char (&p, ':'))
3054 {
3055 bfd_reloc_code_real_type ty;
3056 struct reloc_table_entry *entry;
3057
3058 /* Try to parse a relocation modifier. Anything else is
3059 an error. */
3060 entry = find_reloc_table_entry (&p);
3061 if (! entry)
3062 {
3063 set_syntax_error (_("unknown relocation modifier"));
3064 return FALSE;
3065 }
3066
3067 switch (operand->type)
3068 {
3069 case AARCH64_OPND_ADDR_PCREL21:
3070 /* adr */
3071 ty = entry->adr_type;
3072 break;
3073
3074 default:
3075 ty = entry->ld_literal_type;
3076 break;
3077 }
3078
3079 if (ty == 0)
3080 {
3081 set_syntax_error
3082 (_("this relocation modifier is not allowed on this "
3083 "instruction"));
3084 return FALSE;
3085 }
3086
3087 /* #:<reloc_op>: */
3088 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3089 {
3090 set_syntax_error (_("invalid relocation expression"));
3091 return FALSE;
3092 }
3093
3094 /* #:<reloc_op>:<expr> */
3095 /* Record the relocation type. */
3096 inst.reloc.type = ty;
3097 inst.reloc.pc_rel = entry->pc_rel;
3098 }
3099 else
3100 {
3101
3102 if (skip_past_char (&p, '='))
3103 /* =immediate; need to generate the literal in the literal pool. */
3104 inst.gen_lit_pool = 1;
3105
3106 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3107 {
3108 set_syntax_error (_("invalid address"));
3109 return FALSE;
3110 }
3111 }
3112
3113 *str = p;
3114 return TRUE;
3115 }
3116
3117 /* [ */
3118
3119 /* Accept SP and reject ZR */
3120 reg = aarch64_reg_parse_32_64 (&p, 0, 1, &isreg32, &isregzero);
3121 if (reg == PARSE_FAIL || isreg32)
3122 {
3123 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3124 return FALSE;
3125 }
3126 operand->addr.base_regno = reg;
3127
3128 /* [Xn */
3129 if (skip_past_comma (&p))
3130 {
3131 /* [Xn, */
3132 operand->addr.preind = 1;
3133
3134 /* Reject SP and accept ZR */
3135 reg = aarch64_reg_parse_32_64 (&p, 1, 0, &isreg32, &isregzero);
3136 if (reg != PARSE_FAIL)
3137 {
3138 /* [Xn,Rm */
3139 operand->addr.offset.regno = reg;
3140 operand->addr.offset.is_reg = 1;
3141 /* Shifted index. */
3142 if (skip_past_comma (&p))
3143 {
3144 /* [Xn,Rm, */
3145 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3146 /* Use the diagnostics set in parse_shift, so not set new
3147 error message here. */
3148 return FALSE;
3149 }
3150 /* We only accept:
3151 [base,Xm{,LSL #imm}]
3152 [base,Xm,SXTX {#imm}]
3153 [base,Wm,(S|U)XTW {#imm}] */
3154 if (operand->shifter.kind == AARCH64_MOD_NONE
3155 || operand->shifter.kind == AARCH64_MOD_LSL
3156 || operand->shifter.kind == AARCH64_MOD_SXTX)
3157 {
3158 if (isreg32)
3159 {
3160 set_syntax_error (_("invalid use of 32-bit register offset"));
3161 return FALSE;
3162 }
3163 }
3164 else if (!isreg32)
3165 {
3166 set_syntax_error (_("invalid use of 64-bit register offset"));
3167 return FALSE;
3168 }
3169 }
3170 else
3171 {
3172 /* [Xn,#:<reloc_op>:<symbol> */
3173 skip_past_char (&p, '#');
3174 if (reloc && skip_past_char (&p, ':'))
3175 {
3176 struct reloc_table_entry *entry;
3177
3178 /* Try to parse a relocation modifier. Anything else is
3179 an error. */
3180 if (!(entry = find_reloc_table_entry (&p)))
3181 {
3182 set_syntax_error (_("unknown relocation modifier"));
3183 return FALSE;
3184 }
3185
3186 if (entry->ldst_type == 0)
3187 {
3188 set_syntax_error
3189 (_("this relocation modifier is not allowed on this "
3190 "instruction"));
3191 return FALSE;
3192 }
3193
3194 /* [Xn,#:<reloc_op>: */
3195 /* We now have the group relocation table entry corresponding to
3196 the name in the assembler source. Next, we parse the
3197 expression. */
3198 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3199 {
3200 set_syntax_error (_("invalid relocation expression"));
3201 return FALSE;
3202 }
3203
3204 /* [Xn,#:<reloc_op>:<expr> */
3205 /* Record the load/store relocation type. */
3206 inst.reloc.type = entry->ldst_type;
3207 inst.reloc.pc_rel = entry->pc_rel;
3208 }
3209 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3210 {
3211 set_syntax_error (_("invalid expression in the address"));
3212 return FALSE;
3213 }
3214 /* [Xn,<expr> */
3215 }
3216 }
3217
3218 if (! skip_past_char (&p, ']'))
3219 {
3220 set_syntax_error (_("']' expected"));
3221 return FALSE;
3222 }
3223
3224 if (skip_past_char (&p, '!'))
3225 {
3226 if (operand->addr.preind && operand->addr.offset.is_reg)
3227 {
3228 set_syntax_error (_("register offset not allowed in pre-indexed "
3229 "addressing mode"));
3230 return FALSE;
3231 }
3232 /* [Xn]! */
3233 operand->addr.writeback = 1;
3234 }
3235 else if (skip_past_comma (&p))
3236 {
3237 /* [Xn], */
3238 operand->addr.postind = 1;
3239 operand->addr.writeback = 1;
3240
3241 if (operand->addr.preind)
3242 {
3243 set_syntax_error (_("cannot combine pre- and post-indexing"));
3244 return FALSE;
3245 }
3246
3247 if (accept_reg_post_index
3248 && (reg = aarch64_reg_parse_32_64 (&p, 1, 1, &isreg32,
3249 &isregzero)) != PARSE_FAIL)
3250 {
3251 /* [Xn],Xm */
3252 if (isreg32)
3253 {
3254 set_syntax_error (_("invalid 32-bit register offset"));
3255 return FALSE;
3256 }
3257 operand->addr.offset.regno = reg;
3258 operand->addr.offset.is_reg = 1;
3259 }
3260 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3261 {
3262 /* [Xn],#expr */
3263 set_syntax_error (_("invalid expression in the address"));
3264 return FALSE;
3265 }
3266 }
3267
3268 /* If at this point neither .preind nor .postind is set, we have a
3269 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
3270 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3271 {
3272 if (operand->addr.writeback)
3273 {
3274 /* Reject [Rn]! */
3275 set_syntax_error (_("missing offset in the pre-indexed address"));
3276 return FALSE;
3277 }
3278 operand->addr.preind = 1;
3279 inst.reloc.exp.X_op = O_constant;
3280 inst.reloc.exp.X_add_number = 0;
3281 }
3282
3283 *str = p;
3284 return TRUE;
3285 }
3286
3287 /* Return TRUE on success; otherwise return FALSE. */
3288 static bfd_boolean
3289 parse_address (char **str, aarch64_opnd_info *operand,
3290 int accept_reg_post_index)
3291 {
3292 return parse_address_main (str, operand, 0, accept_reg_post_index);
3293 }
3294
3295 /* Return TRUE on success; otherwise return FALSE. */
3296 static bfd_boolean
3297 parse_address_reloc (char **str, aarch64_opnd_info *operand)
3298 {
3299 return parse_address_main (str, operand, 1, 0);
3300 }
3301
3302 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3303 Return TRUE on success; otherwise return FALSE. */
3304 static bfd_boolean
3305 parse_half (char **str, int *internal_fixup_p)
3306 {
3307 char *p, *saved;
3308 int dummy;
3309
3310 p = *str;
3311 skip_past_char (&p, '#');
3312
3313 gas_assert (internal_fixup_p);
3314 *internal_fixup_p = 0;
3315
3316 if (*p == ':')
3317 {
3318 struct reloc_table_entry *entry;
3319
3320 /* Try to parse a relocation. Anything else is an error. */
3321 ++p;
3322 if (!(entry = find_reloc_table_entry (&p)))
3323 {
3324 set_syntax_error (_("unknown relocation modifier"));
3325 return FALSE;
3326 }
3327
3328 if (entry->movw_type == 0)
3329 {
3330 set_syntax_error
3331 (_("this relocation modifier is not allowed on this instruction"));
3332 return FALSE;
3333 }
3334
3335 inst.reloc.type = entry->movw_type;
3336 }
3337 else
3338 *internal_fixup_p = 1;
3339
3340 /* Avoid parsing a register as a general symbol. */
3341 saved = p;
3342 if (aarch64_reg_parse_32_64 (&p, 0, 0, &dummy, &dummy) != PARSE_FAIL)
3343 return FALSE;
3344 p = saved;
3345
3346 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3347 return FALSE;
3348
3349 *str = p;
3350 return TRUE;
3351 }
3352
3353 /* Parse an operand for an ADRP instruction:
3354 ADRP <Xd>, <label>
3355 Return TRUE on success; otherwise return FALSE. */
3356
3357 static bfd_boolean
3358 parse_adrp (char **str)
3359 {
3360 char *p;
3361
3362 p = *str;
3363 if (*p == ':')
3364 {
3365 struct reloc_table_entry *entry;
3366
3367 /* Try to parse a relocation. Anything else is an error. */
3368 ++p;
3369 if (!(entry = find_reloc_table_entry (&p)))
3370 {
3371 set_syntax_error (_("unknown relocation modifier"));
3372 return FALSE;
3373 }
3374
3375 if (entry->adrp_type == 0)
3376 {
3377 set_syntax_error
3378 (_("this relocation modifier is not allowed on this instruction"));
3379 return FALSE;
3380 }
3381
3382 inst.reloc.type = entry->adrp_type;
3383 }
3384 else
3385 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3386
3387 inst.reloc.pc_rel = 1;
3388
3389 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3390 return FALSE;
3391
3392 *str = p;
3393 return TRUE;
3394 }
3395
3396 /* Miscellaneous. */
3397
3398 /* Parse an option for a preload instruction. Returns the encoding for the
3399 option, or PARSE_FAIL. */
3400
3401 static int
3402 parse_pldop (char **str)
3403 {
3404 char *p, *q;
3405 const struct aarch64_name_value_pair *o;
3406
3407 p = q = *str;
3408 while (ISALNUM (*q))
3409 q++;
3410
3411 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3412 if (!o)
3413 return PARSE_FAIL;
3414
3415 *str = q;
3416 return o->value;
3417 }
3418
3419 /* Parse an option for a barrier instruction. Returns the encoding for the
3420 option, or PARSE_FAIL. */
3421
3422 static int
3423 parse_barrier (char **str)
3424 {
3425 char *p, *q;
3426 const asm_barrier_opt *o;
3427
3428 p = q = *str;
3429 while (ISALPHA (*q))
3430 q++;
3431
3432 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3433 if (!o)
3434 return PARSE_FAIL;
3435
3436 *str = q;
3437 return o->value;
3438 }
3439
3440 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3441 Returns the encoding for the option, or PARSE_FAIL.
3442
3443 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3444 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
3445
3446 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
3447 field, otherwise as a system register.
3448 */
3449
3450 static int
3451 parse_sys_reg (char **str, struct hash_control *sys_regs,
3452 int imple_defined_p, int pstatefield_p)
3453 {
3454 char *p, *q;
3455 char buf[32];
3456 const aarch64_sys_reg *o;
3457 int value;
3458
3459 p = buf;
3460 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3461 if (p < buf + 31)
3462 *p++ = TOLOWER (*q);
3463 *p = '\0';
3464 /* Assert that BUF be large enough. */
3465 gas_assert (p - buf == q - *str);
3466
3467 o = hash_find (sys_regs, buf);
3468 if (!o)
3469 {
3470 if (!imple_defined_p)
3471 return PARSE_FAIL;
3472 else
3473 {
3474 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
3475 unsigned int op0, op1, cn, cm, op2;
3476
3477 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
3478 != 5)
3479 return PARSE_FAIL;
3480 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
3481 return PARSE_FAIL;
3482 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3483 }
3484 }
3485 else
3486 {
3487 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
3488 as_bad (_("selected processor does not support PSTATE field "
3489 "name '%s'"), buf);
3490 if (!pstatefield_p && !aarch64_sys_reg_supported_p (cpu_variant, o))
3491 as_bad (_("selected processor does not support system register "
3492 "name '%s'"), buf);
3493 if (aarch64_sys_reg_deprecated_p (o))
3494 as_warn (_("system register name '%s' is deprecated and may be "
3495 "removed in a future release"), buf);
3496 value = o->value;
3497 }
3498
3499 *str = q;
3500 return value;
3501 }
3502
3503 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
3504 for the option, or NULL. */
3505
3506 static const aarch64_sys_ins_reg *
3507 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3508 {
3509 char *p, *q;
3510 char buf[32];
3511 const aarch64_sys_ins_reg *o;
3512
3513 p = buf;
3514 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3515 if (p < buf + 31)
3516 *p++ = TOLOWER (*q);
3517 *p = '\0';
3518
3519 o = hash_find (sys_ins_regs, buf);
3520 if (!o)
3521 return NULL;
3522
3523 *str = q;
3524 return o;
3525 }
3526 \f
3527 #define po_char_or_fail(chr) do { \
3528 if (! skip_past_char (&str, chr)) \
3529 goto failure; \
3530 } while (0)
3531
3532 #define po_reg_or_fail(regtype) do { \
3533 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
3534 if (val == PARSE_FAIL) \
3535 { \
3536 set_default_error (); \
3537 goto failure; \
3538 } \
3539 } while (0)
3540
3541 #define po_int_reg_or_fail(reject_sp, reject_rz) do { \
3542 val = aarch64_reg_parse_32_64 (&str, reject_sp, reject_rz, \
3543 &isreg32, &isregzero); \
3544 if (val == PARSE_FAIL) \
3545 { \
3546 set_default_error (); \
3547 goto failure; \
3548 } \
3549 info->reg.regno = val; \
3550 if (isreg32) \
3551 info->qualifier = AARCH64_OPND_QLF_W; \
3552 else \
3553 info->qualifier = AARCH64_OPND_QLF_X; \
3554 } while (0)
3555
3556 #define po_imm_nc_or_fail() do { \
3557 if (! parse_constant_immediate (&str, &val)) \
3558 goto failure; \
3559 } while (0)
3560
3561 #define po_imm_or_fail(min, max) do { \
3562 if (! parse_constant_immediate (&str, &val)) \
3563 goto failure; \
3564 if (val < min || val > max) \
3565 { \
3566 set_fatal_syntax_error (_("immediate value out of range "\
3567 #min " to "#max)); \
3568 goto failure; \
3569 } \
3570 } while (0)
3571
3572 #define po_misc_or_fail(expr) do { \
3573 if (!expr) \
3574 goto failure; \
3575 } while (0)
3576 \f
3577 /* encode the 12-bit imm field of Add/sub immediate */
3578 static inline uint32_t
3579 encode_addsub_imm (uint32_t imm)
3580 {
3581 return imm << 10;
3582 }
3583
3584 /* encode the shift amount field of Add/sub immediate */
3585 static inline uint32_t
3586 encode_addsub_imm_shift_amount (uint32_t cnt)
3587 {
3588 return cnt << 22;
3589 }
3590
3591
3592 /* encode the imm field of Adr instruction */
3593 static inline uint32_t
3594 encode_adr_imm (uint32_t imm)
3595 {
3596 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
3597 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
3598 }
3599
3600 /* encode the immediate field of Move wide immediate */
3601 static inline uint32_t
3602 encode_movw_imm (uint32_t imm)
3603 {
3604 return imm << 5;
3605 }
3606
3607 /* encode the 26-bit offset of unconditional branch */
3608 static inline uint32_t
3609 encode_branch_ofs_26 (uint32_t ofs)
3610 {
3611 return ofs & ((1 << 26) - 1);
3612 }
3613
3614 /* encode the 19-bit offset of conditional branch and compare & branch */
3615 static inline uint32_t
3616 encode_cond_branch_ofs_19 (uint32_t ofs)
3617 {
3618 return (ofs & ((1 << 19) - 1)) << 5;
3619 }
3620
3621 /* encode the 19-bit offset of ld literal */
3622 static inline uint32_t
3623 encode_ld_lit_ofs_19 (uint32_t ofs)
3624 {
3625 return (ofs & ((1 << 19) - 1)) << 5;
3626 }
3627
3628 /* Encode the 14-bit offset of test & branch. */
3629 static inline uint32_t
3630 encode_tst_branch_ofs_14 (uint32_t ofs)
3631 {
3632 return (ofs & ((1 << 14) - 1)) << 5;
3633 }
3634
3635 /* Encode the 16-bit imm field of svc/hvc/smc. */
3636 static inline uint32_t
3637 encode_svc_imm (uint32_t imm)
3638 {
3639 return imm << 5;
3640 }
3641
3642 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
3643 static inline uint32_t
3644 reencode_addsub_switch_add_sub (uint32_t opcode)
3645 {
3646 return opcode ^ (1 << 30);
3647 }
3648
3649 static inline uint32_t
3650 reencode_movzn_to_movz (uint32_t opcode)
3651 {
3652 return opcode | (1 << 30);
3653 }
3654
3655 static inline uint32_t
3656 reencode_movzn_to_movn (uint32_t opcode)
3657 {
3658 return opcode & ~(1 << 30);
3659 }
3660
3661 /* Overall per-instruction processing. */
3662
3663 /* We need to be able to fix up arbitrary expressions in some statements.
3664 This is so that we can handle symbols that are an arbitrary distance from
3665 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
3666 which returns part of an address in a form which will be valid for
3667 a data instruction. We do this by pushing the expression into a symbol
3668 in the expr_section, and creating a fix for that. */
3669
3670 static fixS *
3671 fix_new_aarch64 (fragS * frag,
3672 int where,
3673 short int size, expressionS * exp, int pc_rel, int reloc)
3674 {
3675 fixS *new_fix;
3676
3677 switch (exp->X_op)
3678 {
3679 case O_constant:
3680 case O_symbol:
3681 case O_add:
3682 case O_subtract:
3683 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
3684 break;
3685
3686 default:
3687 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
3688 pc_rel, reloc);
3689 break;
3690 }
3691 return new_fix;
3692 }
3693 \f
3694 /* Diagnostics on operands errors. */
3695
3696 /* By default, output verbose error message.
3697 Disable the verbose error message by -mno-verbose-error. */
3698 static int verbose_error_p = 1;
3699
3700 #ifdef DEBUG_AARCH64
3701 /* N.B. this is only for the purpose of debugging. */
3702 const char* operand_mismatch_kind_names[] =
3703 {
3704 "AARCH64_OPDE_NIL",
3705 "AARCH64_OPDE_RECOVERABLE",
3706 "AARCH64_OPDE_SYNTAX_ERROR",
3707 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
3708 "AARCH64_OPDE_INVALID_VARIANT",
3709 "AARCH64_OPDE_OUT_OF_RANGE",
3710 "AARCH64_OPDE_UNALIGNED",
3711 "AARCH64_OPDE_REG_LIST",
3712 "AARCH64_OPDE_OTHER_ERROR",
3713 };
3714 #endif /* DEBUG_AARCH64 */
3715
3716 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
3717
3718 When multiple errors of different kinds are found in the same assembly
3719 line, only the error of the highest severity will be picked up for
3720 issuing the diagnostics. */
3721
3722 static inline bfd_boolean
3723 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
3724 enum aarch64_operand_error_kind rhs)
3725 {
3726 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
3727 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
3728 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
3729 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
3730 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
3731 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
3732 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
3733 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
3734 return lhs > rhs;
3735 }
3736
3737 /* Helper routine to get the mnemonic name from the assembly instruction
3738 line; should only be called for the diagnosis purpose, as there is
3739 string copy operation involved, which may affect the runtime
3740 performance if used in elsewhere. */
3741
3742 static const char*
3743 get_mnemonic_name (const char *str)
3744 {
3745 static char mnemonic[32];
3746 char *ptr;
3747
3748 /* Get the first 15 bytes and assume that the full name is included. */
3749 strncpy (mnemonic, str, 31);
3750 mnemonic[31] = '\0';
3751
3752 /* Scan up to the end of the mnemonic, which must end in white space,
3753 '.', or end of string. */
3754 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
3755 ;
3756
3757 *ptr = '\0';
3758
3759 /* Append '...' to the truncated long name. */
3760 if (ptr - mnemonic == 31)
3761 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
3762
3763 return mnemonic;
3764 }
3765
3766 static void
3767 reset_aarch64_instruction (aarch64_instruction *instruction)
3768 {
3769 memset (instruction, '\0', sizeof (aarch64_instruction));
3770 instruction->reloc.type = BFD_RELOC_UNUSED;
3771 }
3772
3773 /* Data strutures storing one user error in the assembly code related to
3774 operands. */
3775
3776 struct operand_error_record
3777 {
3778 const aarch64_opcode *opcode;
3779 aarch64_operand_error detail;
3780 struct operand_error_record *next;
3781 };
3782
3783 typedef struct operand_error_record operand_error_record;
3784
3785 struct operand_errors
3786 {
3787 operand_error_record *head;
3788 operand_error_record *tail;
3789 };
3790
3791 typedef struct operand_errors operand_errors;
3792
3793 /* Top-level data structure reporting user errors for the current line of
3794 the assembly code.
3795 The way md_assemble works is that all opcodes sharing the same mnemonic
3796 name are iterated to find a match to the assembly line. In this data
3797 structure, each of the such opcodes will have one operand_error_record
3798 allocated and inserted. In other words, excessive errors related with
3799 a single opcode are disregarded. */
3800 operand_errors operand_error_report;
3801
3802 /* Free record nodes. */
3803 static operand_error_record *free_opnd_error_record_nodes = NULL;
3804
3805 /* Initialize the data structure that stores the operand mismatch
3806 information on assembling one line of the assembly code. */
3807 static void
3808 init_operand_error_report (void)
3809 {
3810 if (operand_error_report.head != NULL)
3811 {
3812 gas_assert (operand_error_report.tail != NULL);
3813 operand_error_report.tail->next = free_opnd_error_record_nodes;
3814 free_opnd_error_record_nodes = operand_error_report.head;
3815 operand_error_report.head = NULL;
3816 operand_error_report.tail = NULL;
3817 return;
3818 }
3819 gas_assert (operand_error_report.tail == NULL);
3820 }
3821
3822 /* Return TRUE if some operand error has been recorded during the
3823 parsing of the current assembly line using the opcode *OPCODE;
3824 otherwise return FALSE. */
3825 static inline bfd_boolean
3826 opcode_has_operand_error_p (const aarch64_opcode *opcode)
3827 {
3828 operand_error_record *record = operand_error_report.head;
3829 return record && record->opcode == opcode;
3830 }
3831
3832 /* Add the error record *NEW_RECORD to operand_error_report. The record's
3833 OPCODE field is initialized with OPCODE.
3834 N.B. only one record for each opcode, i.e. the maximum of one error is
3835 recorded for each instruction template. */
3836
3837 static void
3838 add_operand_error_record (const operand_error_record* new_record)
3839 {
3840 const aarch64_opcode *opcode = new_record->opcode;
3841 operand_error_record* record = operand_error_report.head;
3842
3843 /* The record may have been created for this opcode. If not, we need
3844 to prepare one. */
3845 if (! opcode_has_operand_error_p (opcode))
3846 {
3847 /* Get one empty record. */
3848 if (free_opnd_error_record_nodes == NULL)
3849 {
3850 record = xmalloc (sizeof (operand_error_record));
3851 if (record == NULL)
3852 abort ();
3853 }
3854 else
3855 {
3856 record = free_opnd_error_record_nodes;
3857 free_opnd_error_record_nodes = record->next;
3858 }
3859 record->opcode = opcode;
3860 /* Insert at the head. */
3861 record->next = operand_error_report.head;
3862 operand_error_report.head = record;
3863 if (operand_error_report.tail == NULL)
3864 operand_error_report.tail = record;
3865 }
3866 else if (record->detail.kind != AARCH64_OPDE_NIL
3867 && record->detail.index <= new_record->detail.index
3868 && operand_error_higher_severity_p (record->detail.kind,
3869 new_record->detail.kind))
3870 {
3871 /* In the case of multiple errors found on operands related with a
3872 single opcode, only record the error of the leftmost operand and
3873 only if the error is of higher severity. */
3874 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
3875 " the existing error %s on operand %d",
3876 operand_mismatch_kind_names[new_record->detail.kind],
3877 new_record->detail.index,
3878 operand_mismatch_kind_names[record->detail.kind],
3879 record->detail.index);
3880 return;
3881 }
3882
3883 record->detail = new_record->detail;
3884 }
3885
3886 static inline void
3887 record_operand_error_info (const aarch64_opcode *opcode,
3888 aarch64_operand_error *error_info)
3889 {
3890 operand_error_record record;
3891 record.opcode = opcode;
3892 record.detail = *error_info;
3893 add_operand_error_record (&record);
3894 }
3895
3896 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
3897 error message *ERROR, for operand IDX (count from 0). */
3898
3899 static void
3900 record_operand_error (const aarch64_opcode *opcode, int idx,
3901 enum aarch64_operand_error_kind kind,
3902 const char* error)
3903 {
3904 aarch64_operand_error info;
3905 memset(&info, 0, sizeof (info));
3906 info.index = idx;
3907 info.kind = kind;
3908 info.error = error;
3909 record_operand_error_info (opcode, &info);
3910 }
3911
3912 static void
3913 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
3914 enum aarch64_operand_error_kind kind,
3915 const char* error, const int *extra_data)
3916 {
3917 aarch64_operand_error info;
3918 info.index = idx;
3919 info.kind = kind;
3920 info.error = error;
3921 info.data[0] = extra_data[0];
3922 info.data[1] = extra_data[1];
3923 info.data[2] = extra_data[2];
3924 record_operand_error_info (opcode, &info);
3925 }
3926
3927 static void
3928 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
3929 const char* error, int lower_bound,
3930 int upper_bound)
3931 {
3932 int data[3] = {lower_bound, upper_bound, 0};
3933 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
3934 error, data);
3935 }
3936
3937 /* Remove the operand error record for *OPCODE. */
3938 static void ATTRIBUTE_UNUSED
3939 remove_operand_error_record (const aarch64_opcode *opcode)
3940 {
3941 if (opcode_has_operand_error_p (opcode))
3942 {
3943 operand_error_record* record = operand_error_report.head;
3944 gas_assert (record != NULL && operand_error_report.tail != NULL);
3945 operand_error_report.head = record->next;
3946 record->next = free_opnd_error_record_nodes;
3947 free_opnd_error_record_nodes = record;
3948 if (operand_error_report.head == NULL)
3949 {
3950 gas_assert (operand_error_report.tail == record);
3951 operand_error_report.tail = NULL;
3952 }
3953 }
3954 }
3955
3956 /* Given the instruction in *INSTR, return the index of the best matched
3957 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
3958
3959 Return -1 if there is no qualifier sequence; return the first match
3960 if there is multiple matches found. */
3961
3962 static int
3963 find_best_match (const aarch64_inst *instr,
3964 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
3965 {
3966 int i, num_opnds, max_num_matched, idx;
3967
3968 num_opnds = aarch64_num_of_operands (instr->opcode);
3969 if (num_opnds == 0)
3970 {
3971 DEBUG_TRACE ("no operand");
3972 return -1;
3973 }
3974
3975 max_num_matched = 0;
3976 idx = -1;
3977
3978 /* For each pattern. */
3979 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
3980 {
3981 int j, num_matched;
3982 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
3983
3984 /* Most opcodes has much fewer patterns in the list. */
3985 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
3986 {
3987 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
3988 if (i != 0 && idx == -1)
3989 /* If nothing has been matched, return the 1st sequence. */
3990 idx = 0;
3991 break;
3992 }
3993
3994 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
3995 if (*qualifiers == instr->operands[j].qualifier)
3996 ++num_matched;
3997
3998 if (num_matched > max_num_matched)
3999 {
4000 max_num_matched = num_matched;
4001 idx = i;
4002 }
4003 }
4004
4005 DEBUG_TRACE ("return with %d", idx);
4006 return idx;
4007 }
4008
4009 /* Assign qualifiers in the qualifier seqence (headed by QUALIFIERS) to the
4010 corresponding operands in *INSTR. */
4011
4012 static inline void
4013 assign_qualifier_sequence (aarch64_inst *instr,
4014 const aarch64_opnd_qualifier_t *qualifiers)
4015 {
4016 int i = 0;
4017 int num_opnds = aarch64_num_of_operands (instr->opcode);
4018 gas_assert (num_opnds);
4019 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4020 instr->operands[i].qualifier = *qualifiers;
4021 }
4022
4023 /* Print operands for the diagnosis purpose. */
4024
4025 static void
4026 print_operands (char *buf, const aarch64_opcode *opcode,
4027 const aarch64_opnd_info *opnds)
4028 {
4029 int i;
4030
4031 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4032 {
4033 const size_t size = 128;
4034 char str[size];
4035
4036 /* We regard the opcode operand info more, however we also look into
4037 the inst->operands to support the disassembling of the optional
4038 operand.
4039 The two operand code should be the same in all cases, apart from
4040 when the operand can be optional. */
4041 if (opcode->operands[i] == AARCH64_OPND_NIL
4042 || opnds[i].type == AARCH64_OPND_NIL)
4043 break;
4044
4045 /* Generate the operand string in STR. */
4046 aarch64_print_operand (str, size, 0, opcode, opnds, i, NULL, NULL);
4047
4048 /* Delimiter. */
4049 if (str[0] != '\0')
4050 strcat (buf, i == 0 ? " " : ",");
4051
4052 /* Append the operand string. */
4053 strcat (buf, str);
4054 }
4055 }
4056
4057 /* Send to stderr a string as information. */
4058
4059 static void
4060 output_info (const char *format, ...)
4061 {
4062 char *file;
4063 unsigned int line;
4064 va_list args;
4065
4066 as_where (&file, &line);
4067 if (file)
4068 {
4069 if (line != 0)
4070 fprintf (stderr, "%s:%u: ", file, line);
4071 else
4072 fprintf (stderr, "%s: ", file);
4073 }
4074 fprintf (stderr, _("Info: "));
4075 va_start (args, format);
4076 vfprintf (stderr, format, args);
4077 va_end (args);
4078 (void) putc ('\n', stderr);
4079 }
4080
4081 /* Output one operand error record. */
4082
4083 static void
4084 output_operand_error_record (const operand_error_record *record, char *str)
4085 {
4086 const aarch64_operand_error *detail = &record->detail;
4087 int idx = detail->index;
4088 const aarch64_opcode *opcode = record->opcode;
4089 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4090 : AARCH64_OPND_NIL);
4091
4092 switch (detail->kind)
4093 {
4094 case AARCH64_OPDE_NIL:
4095 gas_assert (0);
4096 break;
4097
4098 case AARCH64_OPDE_SYNTAX_ERROR:
4099 case AARCH64_OPDE_RECOVERABLE:
4100 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4101 case AARCH64_OPDE_OTHER_ERROR:
4102 /* Use the prepared error message if there is, otherwise use the
4103 operand description string to describe the error. */
4104 if (detail->error != NULL)
4105 {
4106 if (idx < 0)
4107 as_bad (_("%s -- `%s'"), detail->error, str);
4108 else
4109 as_bad (_("%s at operand %d -- `%s'"),
4110 detail->error, idx + 1, str);
4111 }
4112 else
4113 {
4114 gas_assert (idx >= 0);
4115 as_bad (_("operand %d should be %s -- `%s'"), idx + 1,
4116 aarch64_get_operand_desc (opd_code), str);
4117 }
4118 break;
4119
4120 case AARCH64_OPDE_INVALID_VARIANT:
4121 as_bad (_("operand mismatch -- `%s'"), str);
4122 if (verbose_error_p)
4123 {
4124 /* We will try to correct the erroneous instruction and also provide
4125 more information e.g. all other valid variants.
4126
4127 The string representation of the corrected instruction and other
4128 valid variants are generated by
4129
4130 1) obtaining the intermediate representation of the erroneous
4131 instruction;
4132 2) manipulating the IR, e.g. replacing the operand qualifier;
4133 3) printing out the instruction by calling the printer functions
4134 shared with the disassembler.
4135
4136 The limitation of this method is that the exact input assembly
4137 line cannot be accurately reproduced in some cases, for example an
4138 optional operand present in the actual assembly line will be
4139 omitted in the output; likewise for the optional syntax rules,
4140 e.g. the # before the immediate. Another limitation is that the
4141 assembly symbols and relocation operations in the assembly line
4142 currently cannot be printed out in the error report. Last but not
4143 least, when there is other error(s) co-exist with this error, the
4144 'corrected' instruction may be still incorrect, e.g. given
4145 'ldnp h0,h1,[x0,#6]!'
4146 this diagnosis will provide the version:
4147 'ldnp s0,s1,[x0,#6]!'
4148 which is still not right. */
4149 size_t len = strlen (get_mnemonic_name (str));
4150 int i, qlf_idx;
4151 bfd_boolean result;
4152 const size_t size = 2048;
4153 char buf[size];
4154 aarch64_inst *inst_base = &inst.base;
4155 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4156
4157 /* Init inst. */
4158 reset_aarch64_instruction (&inst);
4159 inst_base->opcode = opcode;
4160
4161 /* Reset the error report so that there is no side effect on the
4162 following operand parsing. */
4163 init_operand_error_report ();
4164
4165 /* Fill inst. */
4166 result = parse_operands (str + len, opcode)
4167 && programmer_friendly_fixup (&inst);
4168 gas_assert (result);
4169 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4170 NULL, NULL);
4171 gas_assert (!result);
4172
4173 /* Find the most matched qualifier sequence. */
4174 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4175 gas_assert (qlf_idx > -1);
4176
4177 /* Assign the qualifiers. */
4178 assign_qualifier_sequence (inst_base,
4179 opcode->qualifiers_list[qlf_idx]);
4180
4181 /* Print the hint. */
4182 output_info (_(" did you mean this?"));
4183 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4184 print_operands (buf, opcode, inst_base->operands);
4185 output_info (_(" %s"), buf);
4186
4187 /* Print out other variant(s) if there is any. */
4188 if (qlf_idx != 0 ||
4189 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4190 output_info (_(" other valid variant(s):"));
4191
4192 /* For each pattern. */
4193 qualifiers_list = opcode->qualifiers_list;
4194 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4195 {
4196 /* Most opcodes has much fewer patterns in the list.
4197 First NIL qualifier indicates the end in the list. */
4198 if (empty_qualifier_sequence_p (*qualifiers_list) == TRUE)
4199 break;
4200
4201 if (i != qlf_idx)
4202 {
4203 /* Mnemonics name. */
4204 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4205
4206 /* Assign the qualifiers. */
4207 assign_qualifier_sequence (inst_base, *qualifiers_list);
4208
4209 /* Print instruction. */
4210 print_operands (buf, opcode, inst_base->operands);
4211
4212 output_info (_(" %s"), buf);
4213 }
4214 }
4215 }
4216 break;
4217
4218 case AARCH64_OPDE_OUT_OF_RANGE:
4219 if (detail->data[0] != detail->data[1])
4220 as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
4221 detail->error ? detail->error : _("immediate value"),
4222 detail->data[0], detail->data[1], idx + 1, str);
4223 else
4224 as_bad (_("%s expected to be %d at operand %d -- `%s'"),
4225 detail->error ? detail->error : _("immediate value"),
4226 detail->data[0], idx + 1, str);
4227 break;
4228
4229 case AARCH64_OPDE_REG_LIST:
4230 if (detail->data[0] == 1)
4231 as_bad (_("invalid number of registers in the list; "
4232 "only 1 register is expected at operand %d -- `%s'"),
4233 idx + 1, str);
4234 else
4235 as_bad (_("invalid number of registers in the list; "
4236 "%d registers are expected at operand %d -- `%s'"),
4237 detail->data[0], idx + 1, str);
4238 break;
4239
4240 case AARCH64_OPDE_UNALIGNED:
4241 as_bad (_("immediate value should be a multiple of "
4242 "%d at operand %d -- `%s'"),
4243 detail->data[0], idx + 1, str);
4244 break;
4245
4246 default:
4247 gas_assert (0);
4248 break;
4249 }
4250 }
4251
4252 /* Process and output the error message about the operand mismatching.
4253
4254 When this function is called, the operand error information had
4255 been collected for an assembly line and there will be multiple
4256 errors in the case of mulitple instruction templates; output the
4257 error message that most closely describes the problem. */
4258
4259 static void
4260 output_operand_error_report (char *str)
4261 {
4262 int largest_error_pos;
4263 const char *msg = NULL;
4264 enum aarch64_operand_error_kind kind;
4265 operand_error_record *curr;
4266 operand_error_record *head = operand_error_report.head;
4267 operand_error_record *record = NULL;
4268
4269 /* No error to report. */
4270 if (head == NULL)
4271 return;
4272
4273 gas_assert (head != NULL && operand_error_report.tail != NULL);
4274
4275 /* Only one error. */
4276 if (head == operand_error_report.tail)
4277 {
4278 DEBUG_TRACE ("single opcode entry with error kind: %s",
4279 operand_mismatch_kind_names[head->detail.kind]);
4280 output_operand_error_record (head, str);
4281 return;
4282 }
4283
4284 /* Find the error kind of the highest severity. */
4285 DEBUG_TRACE ("multiple opcode entres with error kind");
4286 kind = AARCH64_OPDE_NIL;
4287 for (curr = head; curr != NULL; curr = curr->next)
4288 {
4289 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4290 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4291 if (operand_error_higher_severity_p (curr->detail.kind, kind))
4292 kind = curr->detail.kind;
4293 }
4294 gas_assert (kind != AARCH64_OPDE_NIL);
4295
4296 /* Pick up one of errors of KIND to report. */
4297 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4298 for (curr = head; curr != NULL; curr = curr->next)
4299 {
4300 if (curr->detail.kind != kind)
4301 continue;
4302 /* If there are multiple errors, pick up the one with the highest
4303 mismatching operand index. In the case of multiple errors with
4304 the equally highest operand index, pick up the first one or the
4305 first one with non-NULL error message. */
4306 if (curr->detail.index > largest_error_pos
4307 || (curr->detail.index == largest_error_pos && msg == NULL
4308 && curr->detail.error != NULL))
4309 {
4310 largest_error_pos = curr->detail.index;
4311 record = curr;
4312 msg = record->detail.error;
4313 }
4314 }
4315
4316 gas_assert (largest_error_pos != -2 && record != NULL);
4317 DEBUG_TRACE ("Pick up error kind %s to report",
4318 operand_mismatch_kind_names[record->detail.kind]);
4319
4320 /* Output. */
4321 output_operand_error_record (record, str);
4322 }
4323 \f
4324 /* Write an AARCH64 instruction to buf - always little-endian. */
4325 static void
4326 put_aarch64_insn (char *buf, uint32_t insn)
4327 {
4328 unsigned char *where = (unsigned char *) buf;
4329 where[0] = insn;
4330 where[1] = insn >> 8;
4331 where[2] = insn >> 16;
4332 where[3] = insn >> 24;
4333 }
4334
4335 static uint32_t
4336 get_aarch64_insn (char *buf)
4337 {
4338 unsigned char *where = (unsigned char *) buf;
4339 uint32_t result;
4340 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4341 return result;
4342 }
4343
4344 static void
4345 output_inst (struct aarch64_inst *new_inst)
4346 {
4347 char *to = NULL;
4348
4349 to = frag_more (INSN_SIZE);
4350
4351 frag_now->tc_frag_data.recorded = 1;
4352
4353 put_aarch64_insn (to, inst.base.value);
4354
4355 if (inst.reloc.type != BFD_RELOC_UNUSED)
4356 {
4357 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4358 INSN_SIZE, &inst.reloc.exp,
4359 inst.reloc.pc_rel,
4360 inst.reloc.type);
4361 DEBUG_TRACE ("Prepared relocation fix up");
4362 /* Don't check the addend value against the instruction size,
4363 that's the job of our code in md_apply_fix(). */
4364 fixp->fx_no_overflow = 1;
4365 if (new_inst != NULL)
4366 fixp->tc_fix_data.inst = new_inst;
4367 if (aarch64_gas_internal_fixup_p ())
4368 {
4369 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4370 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4371 fixp->fx_addnumber = inst.reloc.flags;
4372 }
4373 }
4374
4375 dwarf2_emit_insn (INSN_SIZE);
4376 }
4377
4378 /* Link together opcodes of the same name. */
4379
4380 struct templates
4381 {
4382 aarch64_opcode *opcode;
4383 struct templates *next;
4384 };
4385
4386 typedef struct templates templates;
4387
4388 static templates *
4389 lookup_mnemonic (const char *start, int len)
4390 {
4391 templates *templ = NULL;
4392
4393 templ = hash_find_n (aarch64_ops_hsh, start, len);
4394 return templ;
4395 }
4396
4397 /* Subroutine of md_assemble, responsible for looking up the primary
4398 opcode from the mnemonic the user wrote. STR points to the
4399 beginning of the mnemonic. */
4400
4401 static templates *
4402 opcode_lookup (char **str)
4403 {
4404 char *end, *base;
4405 const aarch64_cond *cond;
4406 char condname[16];
4407 int len;
4408
4409 /* Scan up to the end of the mnemonic, which must end in white space,
4410 '.', or end of string. */
4411 for (base = end = *str; is_part_of_name(*end); end++)
4412 if (*end == '.')
4413 break;
4414
4415 if (end == base)
4416 return 0;
4417
4418 inst.cond = COND_ALWAYS;
4419
4420 /* Handle a possible condition. */
4421 if (end[0] == '.')
4422 {
4423 cond = hash_find_n (aarch64_cond_hsh, end + 1, 2);
4424 if (cond)
4425 {
4426 inst.cond = cond->value;
4427 *str = end + 3;
4428 }
4429 else
4430 {
4431 *str = end;
4432 return 0;
4433 }
4434 }
4435 else
4436 *str = end;
4437
4438 len = end - base;
4439
4440 if (inst.cond == COND_ALWAYS)
4441 {
4442 /* Look for unaffixed mnemonic. */
4443 return lookup_mnemonic (base, len);
4444 }
4445 else if (len <= 13)
4446 {
4447 /* append ".c" to mnemonic if conditional */
4448 memcpy (condname, base, len);
4449 memcpy (condname + len, ".c", 2);
4450 base = condname;
4451 len += 2;
4452 return lookup_mnemonic (base, len);
4453 }
4454
4455 return NULL;
4456 }
4457
4458 /* Internal helper routine converting a vector neon_type_el structure
4459 *VECTYPE to a corresponding operand qualifier. */
4460
4461 static inline aarch64_opnd_qualifier_t
4462 vectype_to_qualifier (const struct neon_type_el *vectype)
4463 {
4464 /* Element size in bytes indexed by neon_el_type. */
4465 const unsigned char ele_size[5]
4466 = {1, 2, 4, 8, 16};
4467
4468 if (!vectype->defined || vectype->type == NT_invtype)
4469 goto vectype_conversion_fail;
4470
4471 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
4472
4473 if (vectype->defined & NTA_HASINDEX)
4474 /* Vector element register. */
4475 return AARCH64_OPND_QLF_S_B + vectype->type;
4476 else
4477 {
4478 /* Vector register. */
4479 int reg_size = ele_size[vectype->type] * vectype->width;
4480 unsigned offset;
4481 if (reg_size != 16 && reg_size != 8)
4482 goto vectype_conversion_fail;
4483 /* The conversion is calculated based on the relation of the order of
4484 qualifiers to the vector element size and vector register size. */
4485 offset = (vectype->type == NT_q)
4486 ? 8 : (vectype->type << 1) + (reg_size >> 4);
4487 gas_assert (offset <= 8);
4488 return AARCH64_OPND_QLF_V_8B + offset;
4489 }
4490
4491 vectype_conversion_fail:
4492 first_error (_("bad vector arrangement type"));
4493 return AARCH64_OPND_QLF_NIL;
4494 }
4495
4496 /* Process an optional operand that is found omitted from the assembly line.
4497 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
4498 instruction's opcode entry while IDX is the index of this omitted operand.
4499 */
4500
4501 static void
4502 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
4503 int idx, aarch64_opnd_info *operand)
4504 {
4505 aarch64_insn default_value = get_optional_operand_default_value (opcode);
4506 gas_assert (optional_operand_p (opcode, idx));
4507 gas_assert (!operand->present);
4508
4509 switch (type)
4510 {
4511 case AARCH64_OPND_Rd:
4512 case AARCH64_OPND_Rn:
4513 case AARCH64_OPND_Rm:
4514 case AARCH64_OPND_Rt:
4515 case AARCH64_OPND_Rt2:
4516 case AARCH64_OPND_Rs:
4517 case AARCH64_OPND_Ra:
4518 case AARCH64_OPND_Rt_SYS:
4519 case AARCH64_OPND_Rd_SP:
4520 case AARCH64_OPND_Rn_SP:
4521 case AARCH64_OPND_Fd:
4522 case AARCH64_OPND_Fn:
4523 case AARCH64_OPND_Fm:
4524 case AARCH64_OPND_Fa:
4525 case AARCH64_OPND_Ft:
4526 case AARCH64_OPND_Ft2:
4527 case AARCH64_OPND_Sd:
4528 case AARCH64_OPND_Sn:
4529 case AARCH64_OPND_Sm:
4530 case AARCH64_OPND_Vd:
4531 case AARCH64_OPND_Vn:
4532 case AARCH64_OPND_Vm:
4533 case AARCH64_OPND_VdD1:
4534 case AARCH64_OPND_VnD1:
4535 operand->reg.regno = default_value;
4536 break;
4537
4538 case AARCH64_OPND_Ed:
4539 case AARCH64_OPND_En:
4540 case AARCH64_OPND_Em:
4541 operand->reglane.regno = default_value;
4542 break;
4543
4544 case AARCH64_OPND_IDX:
4545 case AARCH64_OPND_BIT_NUM:
4546 case AARCH64_OPND_IMMR:
4547 case AARCH64_OPND_IMMS:
4548 case AARCH64_OPND_SHLL_IMM:
4549 case AARCH64_OPND_IMM_VLSL:
4550 case AARCH64_OPND_IMM_VLSR:
4551 case AARCH64_OPND_CCMP_IMM:
4552 case AARCH64_OPND_FBITS:
4553 case AARCH64_OPND_UIMM4:
4554 case AARCH64_OPND_UIMM3_OP1:
4555 case AARCH64_OPND_UIMM3_OP2:
4556 case AARCH64_OPND_IMM:
4557 case AARCH64_OPND_WIDTH:
4558 case AARCH64_OPND_UIMM7:
4559 case AARCH64_OPND_NZCV:
4560 operand->imm.value = default_value;
4561 break;
4562
4563 case AARCH64_OPND_EXCEPTION:
4564 inst.reloc.type = BFD_RELOC_UNUSED;
4565 break;
4566
4567 case AARCH64_OPND_BARRIER_ISB:
4568 operand->barrier = aarch64_barrier_options + default_value;
4569
4570 default:
4571 break;
4572 }
4573 }
4574
4575 /* Process the relocation type for move wide instructions.
4576 Return TRUE on success; otherwise return FALSE. */
4577
4578 static bfd_boolean
4579 process_movw_reloc_info (void)
4580 {
4581 int is32;
4582 unsigned shift;
4583
4584 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
4585
4586 if (inst.base.opcode->op == OP_MOVK)
4587 switch (inst.reloc.type)
4588 {
4589 case BFD_RELOC_AARCH64_MOVW_G0_S:
4590 case BFD_RELOC_AARCH64_MOVW_G1_S:
4591 case BFD_RELOC_AARCH64_MOVW_G2_S:
4592 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4593 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4594 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4595 set_syntax_error
4596 (_("the specified relocation type is not allowed for MOVK"));
4597 return FALSE;
4598 default:
4599 break;
4600 }
4601
4602 switch (inst.reloc.type)
4603 {
4604 case BFD_RELOC_AARCH64_MOVW_G0:
4605 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4606 case BFD_RELOC_AARCH64_MOVW_G0_S:
4607 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4608 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4609 shift = 0;
4610 break;
4611 case BFD_RELOC_AARCH64_MOVW_G1:
4612 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4613 case BFD_RELOC_AARCH64_MOVW_G1_S:
4614 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4615 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4616 shift = 16;
4617 break;
4618 case BFD_RELOC_AARCH64_MOVW_G2:
4619 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4620 case BFD_RELOC_AARCH64_MOVW_G2_S:
4621 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4622 if (is32)
4623 {
4624 set_fatal_syntax_error
4625 (_("the specified relocation type is not allowed for 32-bit "
4626 "register"));
4627 return FALSE;
4628 }
4629 shift = 32;
4630 break;
4631 case BFD_RELOC_AARCH64_MOVW_G3:
4632 if (is32)
4633 {
4634 set_fatal_syntax_error
4635 (_("the specified relocation type is not allowed for 32-bit "
4636 "register"));
4637 return FALSE;
4638 }
4639 shift = 48;
4640 break;
4641 default:
4642 /* More cases should be added when more MOVW-related relocation types
4643 are supported in GAS. */
4644 gas_assert (aarch64_gas_internal_fixup_p ());
4645 /* The shift amount should have already been set by the parser. */
4646 return TRUE;
4647 }
4648 inst.base.operands[1].shifter.amount = shift;
4649 return TRUE;
4650 }
4651
4652 /* A primitive log caculator. */
4653
4654 static inline unsigned int
4655 get_logsz (unsigned int size)
4656 {
4657 const unsigned char ls[16] =
4658 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
4659 if (size > 16)
4660 {
4661 gas_assert (0);
4662 return -1;
4663 }
4664 gas_assert (ls[size - 1] != (unsigned char)-1);
4665 return ls[size - 1];
4666 }
4667
4668 /* Determine and return the real reloc type code for an instruction
4669 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
4670
4671 static inline bfd_reloc_code_real_type
4672 ldst_lo12_determine_real_reloc_type (void)
4673 {
4674 int logsz;
4675 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
4676 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
4677
4678 const bfd_reloc_code_real_type reloc_ldst_lo12[5] = {
4679 BFD_RELOC_AARCH64_LDST8_LO12, BFD_RELOC_AARCH64_LDST16_LO12,
4680 BFD_RELOC_AARCH64_LDST32_LO12, BFD_RELOC_AARCH64_LDST64_LO12,
4681 BFD_RELOC_AARCH64_LDST128_LO12
4682 };
4683
4684 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12);
4685 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
4686
4687 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
4688 opd1_qlf =
4689 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
4690 1, opd0_qlf, 0);
4691 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
4692
4693 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
4694 gas_assert (logsz >= 0 && logsz <= 4);
4695
4696 return reloc_ldst_lo12[logsz];
4697 }
4698
4699 /* Check whether a register list REGINFO is valid. The registers must be
4700 numbered in increasing order (modulo 32), in increments of one or two.
4701
4702 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
4703 increments of two.
4704
4705 Return FALSE if such a register list is invalid, otherwise return TRUE. */
4706
4707 static bfd_boolean
4708 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
4709 {
4710 uint32_t i, nb_regs, prev_regno, incr;
4711
4712 nb_regs = 1 + (reginfo & 0x3);
4713 reginfo >>= 2;
4714 prev_regno = reginfo & 0x1f;
4715 incr = accept_alternate ? 2 : 1;
4716
4717 for (i = 1; i < nb_regs; ++i)
4718 {
4719 uint32_t curr_regno;
4720 reginfo >>= 5;
4721 curr_regno = reginfo & 0x1f;
4722 if (curr_regno != ((prev_regno + incr) & 0x1f))
4723 return FALSE;
4724 prev_regno = curr_regno;
4725 }
4726
4727 return TRUE;
4728 }
4729
4730 /* Generic instruction operand parser. This does no encoding and no
4731 semantic validation; it merely squirrels values away in the inst
4732 structure. Returns TRUE or FALSE depending on whether the
4733 specified grammar matched. */
4734
4735 static bfd_boolean
4736 parse_operands (char *str, const aarch64_opcode *opcode)
4737 {
4738 int i;
4739 char *backtrack_pos = 0;
4740 const enum aarch64_opnd *operands = opcode->operands;
4741
4742 clear_error ();
4743 skip_whitespace (str);
4744
4745 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
4746 {
4747 int64_t val;
4748 int isreg32, isregzero;
4749 int comma_skipped_p = 0;
4750 aarch64_reg_type rtype;
4751 struct neon_type_el vectype;
4752 aarch64_opnd_info *info = &inst.base.operands[i];
4753
4754 DEBUG_TRACE ("parse operand %d", i);
4755
4756 /* Assign the operand code. */
4757 info->type = operands[i];
4758
4759 if (optional_operand_p (opcode, i))
4760 {
4761 /* Remember where we are in case we need to backtrack. */
4762 gas_assert (!backtrack_pos);
4763 backtrack_pos = str;
4764 }
4765
4766 /* Expect comma between operands; the backtrack mechanizm will take
4767 care of cases of omitted optional operand. */
4768 if (i > 0 && ! skip_past_char (&str, ','))
4769 {
4770 set_syntax_error (_("comma expected between operands"));
4771 goto failure;
4772 }
4773 else
4774 comma_skipped_p = 1;
4775
4776 switch (operands[i])
4777 {
4778 case AARCH64_OPND_Rd:
4779 case AARCH64_OPND_Rn:
4780 case AARCH64_OPND_Rm:
4781 case AARCH64_OPND_Rt:
4782 case AARCH64_OPND_Rt2:
4783 case AARCH64_OPND_Rs:
4784 case AARCH64_OPND_Ra:
4785 case AARCH64_OPND_Rt_SYS:
4786 case AARCH64_OPND_PAIRREG:
4787 po_int_reg_or_fail (1, 0);
4788 break;
4789
4790 case AARCH64_OPND_Rd_SP:
4791 case AARCH64_OPND_Rn_SP:
4792 po_int_reg_or_fail (0, 1);
4793 break;
4794
4795 case AARCH64_OPND_Rm_EXT:
4796 case AARCH64_OPND_Rm_SFT:
4797 po_misc_or_fail (parse_shifter_operand
4798 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
4799 ? SHIFTED_ARITH_IMM
4800 : SHIFTED_LOGIC_IMM)));
4801 if (!info->shifter.operator_present)
4802 {
4803 /* Default to LSL if not present. Libopcodes prefers shifter
4804 kind to be explicit. */
4805 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4806 info->shifter.kind = AARCH64_MOD_LSL;
4807 /* For Rm_EXT, libopcodes will carry out further check on whether
4808 or not stack pointer is used in the instruction (Recall that
4809 "the extend operator is not optional unless at least one of
4810 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
4811 }
4812 break;
4813
4814 case AARCH64_OPND_Fd:
4815 case AARCH64_OPND_Fn:
4816 case AARCH64_OPND_Fm:
4817 case AARCH64_OPND_Fa:
4818 case AARCH64_OPND_Ft:
4819 case AARCH64_OPND_Ft2:
4820 case AARCH64_OPND_Sd:
4821 case AARCH64_OPND_Sn:
4822 case AARCH64_OPND_Sm:
4823 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
4824 if (val == PARSE_FAIL)
4825 {
4826 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
4827 goto failure;
4828 }
4829 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
4830
4831 info->reg.regno = val;
4832 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
4833 break;
4834
4835 case AARCH64_OPND_Vd:
4836 case AARCH64_OPND_Vn:
4837 case AARCH64_OPND_Vm:
4838 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4839 if (val == PARSE_FAIL)
4840 {
4841 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4842 goto failure;
4843 }
4844 if (vectype.defined & NTA_HASINDEX)
4845 goto failure;
4846
4847 info->reg.regno = val;
4848 info->qualifier = vectype_to_qualifier (&vectype);
4849 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4850 goto failure;
4851 break;
4852
4853 case AARCH64_OPND_VdD1:
4854 case AARCH64_OPND_VnD1:
4855 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4856 if (val == PARSE_FAIL)
4857 {
4858 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4859 goto failure;
4860 }
4861 if (vectype.type != NT_d || vectype.index != 1)
4862 {
4863 set_fatal_syntax_error
4864 (_("the top half of a 128-bit FP/SIMD register is expected"));
4865 goto failure;
4866 }
4867 info->reg.regno = val;
4868 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
4869 here; it is correct for the purpose of encoding/decoding since
4870 only the register number is explicitly encoded in the related
4871 instructions, although this appears a bit hacky. */
4872 info->qualifier = AARCH64_OPND_QLF_S_D;
4873 break;
4874
4875 case AARCH64_OPND_Ed:
4876 case AARCH64_OPND_En:
4877 case AARCH64_OPND_Em:
4878 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4879 if (val == PARSE_FAIL)
4880 {
4881 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4882 goto failure;
4883 }
4884 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
4885 goto failure;
4886
4887 info->reglane.regno = val;
4888 info->reglane.index = vectype.index;
4889 info->qualifier = vectype_to_qualifier (&vectype);
4890 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4891 goto failure;
4892 break;
4893
4894 case AARCH64_OPND_LVn:
4895 case AARCH64_OPND_LVt:
4896 case AARCH64_OPND_LVt_AL:
4897 case AARCH64_OPND_LEt:
4898 if ((val = parse_neon_reg_list (&str, &vectype)) == PARSE_FAIL)
4899 goto failure;
4900 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
4901 {
4902 set_fatal_syntax_error (_("invalid register list"));
4903 goto failure;
4904 }
4905 info->reglist.first_regno = (val >> 2) & 0x1f;
4906 info->reglist.num_regs = (val & 0x3) + 1;
4907 if (operands[i] == AARCH64_OPND_LEt)
4908 {
4909 if (!(vectype.defined & NTA_HASINDEX))
4910 goto failure;
4911 info->reglist.has_index = 1;
4912 info->reglist.index = vectype.index;
4913 }
4914 else if (!(vectype.defined & NTA_HASTYPE))
4915 goto failure;
4916 info->qualifier = vectype_to_qualifier (&vectype);
4917 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4918 goto failure;
4919 break;
4920
4921 case AARCH64_OPND_Cn:
4922 case AARCH64_OPND_Cm:
4923 po_reg_or_fail (REG_TYPE_CN);
4924 if (val > 15)
4925 {
4926 set_fatal_syntax_error (_(get_reg_expected_msg (REG_TYPE_CN)));
4927 goto failure;
4928 }
4929 inst.base.operands[i].reg.regno = val;
4930 break;
4931
4932 case AARCH64_OPND_SHLL_IMM:
4933 case AARCH64_OPND_IMM_VLSR:
4934 po_imm_or_fail (1, 64);
4935 info->imm.value = val;
4936 break;
4937
4938 case AARCH64_OPND_CCMP_IMM:
4939 case AARCH64_OPND_FBITS:
4940 case AARCH64_OPND_UIMM4:
4941 case AARCH64_OPND_UIMM3_OP1:
4942 case AARCH64_OPND_UIMM3_OP2:
4943 case AARCH64_OPND_IMM_VLSL:
4944 case AARCH64_OPND_IMM:
4945 case AARCH64_OPND_WIDTH:
4946 po_imm_nc_or_fail ();
4947 info->imm.value = val;
4948 break;
4949
4950 case AARCH64_OPND_UIMM7:
4951 po_imm_or_fail (0, 127);
4952 info->imm.value = val;
4953 break;
4954
4955 case AARCH64_OPND_IDX:
4956 case AARCH64_OPND_BIT_NUM:
4957 case AARCH64_OPND_IMMR:
4958 case AARCH64_OPND_IMMS:
4959 po_imm_or_fail (0, 63);
4960 info->imm.value = val;
4961 break;
4962
4963 case AARCH64_OPND_IMM0:
4964 po_imm_nc_or_fail ();
4965 if (val != 0)
4966 {
4967 set_fatal_syntax_error (_("immediate zero expected"));
4968 goto failure;
4969 }
4970 info->imm.value = 0;
4971 break;
4972
4973 case AARCH64_OPND_FPIMM0:
4974 {
4975 int qfloat;
4976 bfd_boolean res1 = FALSE, res2 = FALSE;
4977 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
4978 it is probably not worth the effort to support it. */
4979 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE))
4980 && !(res2 = parse_constant_immediate (&str, &val)))
4981 goto failure;
4982 if ((res1 && qfloat == 0) || (res2 && val == 0))
4983 {
4984 info->imm.value = 0;
4985 info->imm.is_fp = 1;
4986 break;
4987 }
4988 set_fatal_syntax_error (_("immediate zero expected"));
4989 goto failure;
4990 }
4991
4992 case AARCH64_OPND_IMM_MOV:
4993 {
4994 char *saved = str;
4995 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
4996 reg_name_p (str, REG_TYPE_VN))
4997 goto failure;
4998 str = saved;
4999 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5000 GE_OPT_PREFIX, 1));
5001 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
5002 later. fix_mov_imm_insn will try to determine a machine
5003 instruction (MOVZ, MOVN or ORR) for it and will issue an error
5004 message if the immediate cannot be moved by a single
5005 instruction. */
5006 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5007 inst.base.operands[i].skip = 1;
5008 }
5009 break;
5010
5011 case AARCH64_OPND_SIMD_IMM:
5012 case AARCH64_OPND_SIMD_IMM_SFT:
5013 if (! parse_big_immediate (&str, &val))
5014 goto failure;
5015 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5016 /* addr_off_p */ 0,
5017 /* need_libopcodes_p */ 1,
5018 /* skip_p */ 1);
5019 /* Parse shift.
5020 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
5021 shift, we don't check it here; we leave the checking to
5022 the libopcodes (operand_general_constraint_met_p). By
5023 doing this, we achieve better diagnostics. */
5024 if (skip_past_comma (&str)
5025 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
5026 goto failure;
5027 if (!info->shifter.operator_present
5028 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
5029 {
5030 /* Default to LSL if not present. Libopcodes prefers shifter
5031 kind to be explicit. */
5032 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5033 info->shifter.kind = AARCH64_MOD_LSL;
5034 }
5035 break;
5036
5037 case AARCH64_OPND_FPIMM:
5038 case AARCH64_OPND_SIMD_FPIMM:
5039 {
5040 int qfloat;
5041 bfd_boolean dp_p
5042 = (aarch64_get_qualifier_esize (inst.base.operands[0].qualifier)
5043 == 8);
5044 if (! parse_aarch64_imm_float (&str, &qfloat, dp_p))
5045 goto failure;
5046 if (qfloat == 0)
5047 {
5048 set_fatal_syntax_error (_("invalid floating-point constant"));
5049 goto failure;
5050 }
5051 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
5052 inst.base.operands[i].imm.is_fp = 1;
5053 }
5054 break;
5055
5056 case AARCH64_OPND_LIMM:
5057 po_misc_or_fail (parse_shifter_operand (&str, info,
5058 SHIFTED_LOGIC_IMM));
5059 if (info->shifter.operator_present)
5060 {
5061 set_fatal_syntax_error
5062 (_("shift not allowed for bitmask immediate"));
5063 goto failure;
5064 }
5065 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5066 /* addr_off_p */ 0,
5067 /* need_libopcodes_p */ 1,
5068 /* skip_p */ 1);
5069 break;
5070
5071 case AARCH64_OPND_AIMM:
5072 if (opcode->op == OP_ADD)
5073 /* ADD may have relocation types. */
5074 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
5075 SHIFTED_ARITH_IMM));
5076 else
5077 po_misc_or_fail (parse_shifter_operand (&str, info,
5078 SHIFTED_ARITH_IMM));
5079 switch (inst.reloc.type)
5080 {
5081 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5082 info->shifter.amount = 12;
5083 break;
5084 case BFD_RELOC_UNUSED:
5085 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5086 if (info->shifter.kind != AARCH64_MOD_NONE)
5087 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
5088 inst.reloc.pc_rel = 0;
5089 break;
5090 default:
5091 break;
5092 }
5093 info->imm.value = 0;
5094 if (!info->shifter.operator_present)
5095 {
5096 /* Default to LSL if not present. Libopcodes prefers shifter
5097 kind to be explicit. */
5098 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5099 info->shifter.kind = AARCH64_MOD_LSL;
5100 }
5101 break;
5102
5103 case AARCH64_OPND_HALF:
5104 {
5105 /* #<imm16> or relocation. */
5106 int internal_fixup_p;
5107 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
5108 if (internal_fixup_p)
5109 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5110 skip_whitespace (str);
5111 if (skip_past_comma (&str))
5112 {
5113 /* {, LSL #<shift>} */
5114 if (! aarch64_gas_internal_fixup_p ())
5115 {
5116 set_fatal_syntax_error (_("can't mix relocation modifier "
5117 "with explicit shift"));
5118 goto failure;
5119 }
5120 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5121 }
5122 else
5123 inst.base.operands[i].shifter.amount = 0;
5124 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5125 inst.base.operands[i].imm.value = 0;
5126 if (! process_movw_reloc_info ())
5127 goto failure;
5128 }
5129 break;
5130
5131 case AARCH64_OPND_EXCEPTION:
5132 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp));
5133 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5134 /* addr_off_p */ 0,
5135 /* need_libopcodes_p */ 0,
5136 /* skip_p */ 1);
5137 break;
5138
5139 case AARCH64_OPND_NZCV:
5140 {
5141 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
5142 if (nzcv != NULL)
5143 {
5144 str += 4;
5145 info->imm.value = nzcv->value;
5146 break;
5147 }
5148 po_imm_or_fail (0, 15);
5149 info->imm.value = val;
5150 }
5151 break;
5152
5153 case AARCH64_OPND_COND:
5154 case AARCH64_OPND_COND1:
5155 info->cond = hash_find_n (aarch64_cond_hsh, str, 2);
5156 str += 2;
5157 if (info->cond == NULL)
5158 {
5159 set_syntax_error (_("invalid condition"));
5160 goto failure;
5161 }
5162 else if (operands[i] == AARCH64_OPND_COND1
5163 && (info->cond->value & 0xe) == 0xe)
5164 {
5165 /* Not allow AL or NV. */
5166 set_default_error ();
5167 goto failure;
5168 }
5169 break;
5170
5171 case AARCH64_OPND_ADDR_ADRP:
5172 po_misc_or_fail (parse_adrp (&str));
5173 /* Clear the value as operand needs to be relocated. */
5174 info->imm.value = 0;
5175 break;
5176
5177 case AARCH64_OPND_ADDR_PCREL14:
5178 case AARCH64_OPND_ADDR_PCREL19:
5179 case AARCH64_OPND_ADDR_PCREL21:
5180 case AARCH64_OPND_ADDR_PCREL26:
5181 po_misc_or_fail (parse_address_reloc (&str, info));
5182 if (!info->addr.pcrel)
5183 {
5184 set_syntax_error (_("invalid pc-relative address"));
5185 goto failure;
5186 }
5187 if (inst.gen_lit_pool
5188 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
5189 {
5190 /* Only permit "=value" in the literal load instructions.
5191 The literal will be generated by programmer_friendly_fixup. */
5192 set_syntax_error (_("invalid use of \"=immediate\""));
5193 goto failure;
5194 }
5195 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
5196 {
5197 set_syntax_error (_("unrecognized relocation suffix"));
5198 goto failure;
5199 }
5200 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
5201 {
5202 info->imm.value = inst.reloc.exp.X_add_number;
5203 inst.reloc.type = BFD_RELOC_UNUSED;
5204 }
5205 else
5206 {
5207 info->imm.value = 0;
5208 if (inst.reloc.type == BFD_RELOC_UNUSED)
5209 switch (opcode->iclass)
5210 {
5211 case compbranch:
5212 case condbranch:
5213 /* e.g. CBZ or B.COND */
5214 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5215 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
5216 break;
5217 case testbranch:
5218 /* e.g. TBZ */
5219 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
5220 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
5221 break;
5222 case branch_imm:
5223 /* e.g. B or BL */
5224 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
5225 inst.reloc.type =
5226 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
5227 : BFD_RELOC_AARCH64_JUMP26;
5228 break;
5229 case loadlit:
5230 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5231 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
5232 break;
5233 case pcreladdr:
5234 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
5235 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
5236 break;
5237 default:
5238 gas_assert (0);
5239 abort ();
5240 }
5241 inst.reloc.pc_rel = 1;
5242 }
5243 break;
5244
5245 case AARCH64_OPND_ADDR_SIMPLE:
5246 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
5247 /* [<Xn|SP>{, #<simm>}] */
5248 po_char_or_fail ('[');
5249 po_reg_or_fail (REG_TYPE_R64_SP);
5250 /* Accept optional ", #0". */
5251 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
5252 && skip_past_char (&str, ','))
5253 {
5254 skip_past_char (&str, '#');
5255 if (! skip_past_char (&str, '0'))
5256 {
5257 set_fatal_syntax_error
5258 (_("the optional immediate offset can only be 0"));
5259 goto failure;
5260 }
5261 }
5262 po_char_or_fail (']');
5263 info->addr.base_regno = val;
5264 break;
5265
5266 case AARCH64_OPND_ADDR_REGOFF:
5267 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
5268 po_misc_or_fail (parse_address (&str, info, 0));
5269 if (info->addr.pcrel || !info->addr.offset.is_reg
5270 || !info->addr.preind || info->addr.postind
5271 || info->addr.writeback)
5272 {
5273 set_syntax_error (_("invalid addressing mode"));
5274 goto failure;
5275 }
5276 if (!info->shifter.operator_present)
5277 {
5278 /* Default to LSL if not present. Libopcodes prefers shifter
5279 kind to be explicit. */
5280 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5281 info->shifter.kind = AARCH64_MOD_LSL;
5282 }
5283 /* Qualifier to be deduced by libopcodes. */
5284 break;
5285
5286 case AARCH64_OPND_ADDR_SIMM7:
5287 po_misc_or_fail (parse_address (&str, info, 0));
5288 if (info->addr.pcrel || info->addr.offset.is_reg
5289 || (!info->addr.preind && !info->addr.postind))
5290 {
5291 set_syntax_error (_("invalid addressing mode"));
5292 goto failure;
5293 }
5294 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5295 /* addr_off_p */ 1,
5296 /* need_libopcodes_p */ 1,
5297 /* skip_p */ 0);
5298 break;
5299
5300 case AARCH64_OPND_ADDR_SIMM9:
5301 case AARCH64_OPND_ADDR_SIMM9_2:
5302 po_misc_or_fail (parse_address_reloc (&str, info));
5303 if (info->addr.pcrel || info->addr.offset.is_reg
5304 || (!info->addr.preind && !info->addr.postind)
5305 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
5306 && info->addr.writeback))
5307 {
5308 set_syntax_error (_("invalid addressing mode"));
5309 goto failure;
5310 }
5311 if (inst.reloc.type != BFD_RELOC_UNUSED)
5312 {
5313 set_syntax_error (_("relocation not allowed"));
5314 goto failure;
5315 }
5316 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5317 /* addr_off_p */ 1,
5318 /* need_libopcodes_p */ 1,
5319 /* skip_p */ 0);
5320 break;
5321
5322 case AARCH64_OPND_ADDR_UIMM12:
5323 po_misc_or_fail (parse_address_reloc (&str, info));
5324 if (info->addr.pcrel || info->addr.offset.is_reg
5325 || !info->addr.preind || info->addr.writeback)
5326 {
5327 set_syntax_error (_("invalid addressing mode"));
5328 goto failure;
5329 }
5330 if (inst.reloc.type == BFD_RELOC_UNUSED)
5331 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5332 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12)
5333 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
5334 /* Leave qualifier to be determined by libopcodes. */
5335 break;
5336
5337 case AARCH64_OPND_SIMD_ADDR_POST:
5338 /* [<Xn|SP>], <Xm|#<amount>> */
5339 po_misc_or_fail (parse_address (&str, info, 1));
5340 if (!info->addr.postind || !info->addr.writeback)
5341 {
5342 set_syntax_error (_("invalid addressing mode"));
5343 goto failure;
5344 }
5345 if (!info->addr.offset.is_reg)
5346 {
5347 if (inst.reloc.exp.X_op == O_constant)
5348 info->addr.offset.imm = inst.reloc.exp.X_add_number;
5349 else
5350 {
5351 set_fatal_syntax_error
5352 (_("writeback value should be an immediate constant"));
5353 goto failure;
5354 }
5355 }
5356 /* No qualifier. */
5357 break;
5358
5359 case AARCH64_OPND_SYSREG:
5360 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0))
5361 == PARSE_FAIL)
5362 {
5363 set_syntax_error (_("unknown or missing system register name"));
5364 goto failure;
5365 }
5366 inst.base.operands[i].sysreg = val;
5367 break;
5368
5369 case AARCH64_OPND_PSTATEFIELD:
5370 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1))
5371 == PARSE_FAIL)
5372 {
5373 set_syntax_error (_("unknown or missing PSTATE field name"));
5374 goto failure;
5375 }
5376 inst.base.operands[i].pstatefield = val;
5377 break;
5378
5379 case AARCH64_OPND_SYSREG_IC:
5380 inst.base.operands[i].sysins_op =
5381 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
5382 goto sys_reg_ins;
5383 case AARCH64_OPND_SYSREG_DC:
5384 inst.base.operands[i].sysins_op =
5385 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
5386 goto sys_reg_ins;
5387 case AARCH64_OPND_SYSREG_AT:
5388 inst.base.operands[i].sysins_op =
5389 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
5390 goto sys_reg_ins;
5391 case AARCH64_OPND_SYSREG_TLBI:
5392 inst.base.operands[i].sysins_op =
5393 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
5394 sys_reg_ins:
5395 if (inst.base.operands[i].sysins_op == NULL)
5396 {
5397 set_fatal_syntax_error ( _("unknown or missing operation name"));
5398 goto failure;
5399 }
5400 break;
5401
5402 case AARCH64_OPND_BARRIER:
5403 case AARCH64_OPND_BARRIER_ISB:
5404 val = parse_barrier (&str);
5405 if (val != PARSE_FAIL
5406 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
5407 {
5408 /* ISB only accepts options name 'sy'. */
5409 set_syntax_error
5410 (_("the specified option is not accepted in ISB"));
5411 /* Turn off backtrack as this optional operand is present. */
5412 backtrack_pos = 0;
5413 goto failure;
5414 }
5415 /* This is an extension to accept a 0..15 immediate. */
5416 if (val == PARSE_FAIL)
5417 po_imm_or_fail (0, 15);
5418 info->barrier = aarch64_barrier_options + val;
5419 break;
5420
5421 case AARCH64_OPND_PRFOP:
5422 val = parse_pldop (&str);
5423 /* This is an extension to accept a 0..31 immediate. */
5424 if (val == PARSE_FAIL)
5425 po_imm_or_fail (0, 31);
5426 inst.base.operands[i].prfop = aarch64_prfops + val;
5427 break;
5428
5429 default:
5430 as_fatal (_("unhandled operand code %d"), operands[i]);
5431 }
5432
5433 /* If we get here, this operand was successfully parsed. */
5434 inst.base.operands[i].present = 1;
5435 continue;
5436
5437 failure:
5438 /* The parse routine should already have set the error, but in case
5439 not, set a default one here. */
5440 if (! error_p ())
5441 set_default_error ();
5442
5443 if (! backtrack_pos)
5444 goto parse_operands_return;
5445
5446 {
5447 /* We reach here because this operand is marked as optional, and
5448 either no operand was supplied or the operand was supplied but it
5449 was syntactically incorrect. In the latter case we report an
5450 error. In the former case we perform a few more checks before
5451 dropping through to the code to insert the default operand. */
5452
5453 char *tmp = backtrack_pos;
5454 char endchar = END_OF_INSN;
5455
5456 if (i != (aarch64_num_of_operands (opcode) - 1))
5457 endchar = ',';
5458 skip_past_char (&tmp, ',');
5459
5460 if (*tmp != endchar)
5461 /* The user has supplied an operand in the wrong format. */
5462 goto parse_operands_return;
5463
5464 /* Make sure there is not a comma before the optional operand.
5465 For example the fifth operand of 'sys' is optional:
5466
5467 sys #0,c0,c0,#0, <--- wrong
5468 sys #0,c0,c0,#0 <--- correct. */
5469 if (comma_skipped_p && i && endchar == END_OF_INSN)
5470 {
5471 set_fatal_syntax_error
5472 (_("unexpected comma before the omitted optional operand"));
5473 goto parse_operands_return;
5474 }
5475 }
5476
5477 /* Reaching here means we are dealing with an optional operand that is
5478 omitted from the assembly line. */
5479 gas_assert (optional_operand_p (opcode, i));
5480 info->present = 0;
5481 process_omitted_operand (operands[i], opcode, i, info);
5482
5483 /* Try again, skipping the optional operand at backtrack_pos. */
5484 str = backtrack_pos;
5485 backtrack_pos = 0;
5486
5487 /* Clear any error record after the omitted optional operand has been
5488 successfully handled. */
5489 clear_error ();
5490 }
5491
5492 /* Check if we have parsed all the operands. */
5493 if (*str != '\0' && ! error_p ())
5494 {
5495 /* Set I to the index of the last present operand; this is
5496 for the purpose of diagnostics. */
5497 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
5498 ;
5499 set_fatal_syntax_error
5500 (_("unexpected characters following instruction"));
5501 }
5502
5503 parse_operands_return:
5504
5505 if (error_p ())
5506 {
5507 DEBUG_TRACE ("parsing FAIL: %s - %s",
5508 operand_mismatch_kind_names[get_error_kind ()],
5509 get_error_message ());
5510 /* Record the operand error properly; this is useful when there
5511 are multiple instruction templates for a mnemonic name, so that
5512 later on, we can select the error that most closely describes
5513 the problem. */
5514 record_operand_error (opcode, i, get_error_kind (),
5515 get_error_message ());
5516 return FALSE;
5517 }
5518 else
5519 {
5520 DEBUG_TRACE ("parsing SUCCESS");
5521 return TRUE;
5522 }
5523 }
5524
5525 /* It does some fix-up to provide some programmer friendly feature while
5526 keeping the libopcodes happy, i.e. libopcodes only accepts
5527 the preferred architectural syntax.
5528 Return FALSE if there is any failure; otherwise return TRUE. */
5529
5530 static bfd_boolean
5531 programmer_friendly_fixup (aarch64_instruction *instr)
5532 {
5533 aarch64_inst *base = &instr->base;
5534 const aarch64_opcode *opcode = base->opcode;
5535 enum aarch64_op op = opcode->op;
5536 aarch64_opnd_info *operands = base->operands;
5537
5538 DEBUG_TRACE ("enter");
5539
5540 switch (opcode->iclass)
5541 {
5542 case testbranch:
5543 /* TBNZ Xn|Wn, #uimm6, label
5544 Test and Branch Not Zero: conditionally jumps to label if bit number
5545 uimm6 in register Xn is not zero. The bit number implies the width of
5546 the register, which may be written and should be disassembled as Wn if
5547 uimm is less than 32. */
5548 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
5549 {
5550 if (operands[1].imm.value >= 32)
5551 {
5552 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
5553 0, 31);
5554 return FALSE;
5555 }
5556 operands[0].qualifier = AARCH64_OPND_QLF_X;
5557 }
5558 break;
5559 case loadlit:
5560 /* LDR Wt, label | =value
5561 As a convenience assemblers will typically permit the notation
5562 "=value" in conjunction with the pc-relative literal load instructions
5563 to automatically place an immediate value or symbolic address in a
5564 nearby literal pool and generate a hidden label which references it.
5565 ISREG has been set to 0 in the case of =value. */
5566 if (instr->gen_lit_pool
5567 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
5568 {
5569 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
5570 if (op == OP_LDRSW_LIT)
5571 size = 4;
5572 if (instr->reloc.exp.X_op != O_constant
5573 && instr->reloc.exp.X_op != O_big
5574 && instr->reloc.exp.X_op != O_symbol)
5575 {
5576 record_operand_error (opcode, 1,
5577 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
5578 _("constant expression expected"));
5579 return FALSE;
5580 }
5581 if (! add_to_lit_pool (&instr->reloc.exp, size))
5582 {
5583 record_operand_error (opcode, 1,
5584 AARCH64_OPDE_OTHER_ERROR,
5585 _("literal pool insertion failed"));
5586 return FALSE;
5587 }
5588 }
5589 break;
5590 case log_shift:
5591 case bitfield:
5592 /* UXT[BHW] Wd, Wn
5593 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
5594 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
5595 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
5596 A programmer-friendly assembler should accept a destination Xd in
5597 place of Wd, however that is not the preferred form for disassembly.
5598 */
5599 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
5600 && operands[1].qualifier == AARCH64_OPND_QLF_W
5601 && operands[0].qualifier == AARCH64_OPND_QLF_X)
5602 operands[0].qualifier = AARCH64_OPND_QLF_W;
5603 break;
5604
5605 case addsub_ext:
5606 {
5607 /* In the 64-bit form, the final register operand is written as Wm
5608 for all but the (possibly omitted) UXTX/LSL and SXTX
5609 operators.
5610 As a programmer-friendly assembler, we accept e.g.
5611 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
5612 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
5613 int idx = aarch64_operand_index (opcode->operands,
5614 AARCH64_OPND_Rm_EXT);
5615 gas_assert (idx == 1 || idx == 2);
5616 if (operands[0].qualifier == AARCH64_OPND_QLF_X
5617 && operands[idx].qualifier == AARCH64_OPND_QLF_X
5618 && operands[idx].shifter.kind != AARCH64_MOD_LSL
5619 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
5620 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
5621 operands[idx].qualifier = AARCH64_OPND_QLF_W;
5622 }
5623 break;
5624
5625 default:
5626 break;
5627 }
5628
5629 DEBUG_TRACE ("exit with SUCCESS");
5630 return TRUE;
5631 }
5632
5633 /* Check for loads and stores that will cause unpredictable behavior. */
5634
5635 static void
5636 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
5637 {
5638 aarch64_inst *base = &instr->base;
5639 const aarch64_opcode *opcode = base->opcode;
5640 const aarch64_opnd_info *opnds = base->operands;
5641 switch (opcode->iclass)
5642 {
5643 case ldst_pos:
5644 case ldst_imm9:
5645 case ldst_unscaled:
5646 case ldst_unpriv:
5647 /* Loading/storing the base register is unpredictable if writeback. */
5648 if ((aarch64_get_operand_class (opnds[0].type)
5649 == AARCH64_OPND_CLASS_INT_REG)
5650 && opnds[0].reg.regno == opnds[1].addr.base_regno
5651 && opnds[1].addr.base_regno != REG_SP
5652 && opnds[1].addr.writeback)
5653 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
5654 break;
5655 case ldstpair_off:
5656 case ldstnapair_offs:
5657 case ldstpair_indexed:
5658 /* Loading/storing the base register is unpredictable if writeback. */
5659 if ((aarch64_get_operand_class (opnds[0].type)
5660 == AARCH64_OPND_CLASS_INT_REG)
5661 && (opnds[0].reg.regno == opnds[2].addr.base_regno
5662 || opnds[1].reg.regno == opnds[2].addr.base_regno)
5663 && opnds[2].addr.base_regno != REG_SP
5664 && opnds[2].addr.writeback)
5665 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
5666 /* Load operations must load different registers. */
5667 if ((opcode->opcode & (1 << 22))
5668 && opnds[0].reg.regno == opnds[1].reg.regno)
5669 as_warn (_("unpredictable load of register pair -- `%s'"), str);
5670 break;
5671 default:
5672 break;
5673 }
5674 }
5675
5676 /* A wrapper function to interface with libopcodes on encoding and
5677 record the error message if there is any.
5678
5679 Return TRUE on success; otherwise return FALSE. */
5680
5681 static bfd_boolean
5682 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
5683 aarch64_insn *code)
5684 {
5685 aarch64_operand_error error_info;
5686 error_info.kind = AARCH64_OPDE_NIL;
5687 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info))
5688 return TRUE;
5689 else
5690 {
5691 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
5692 record_operand_error_info (opcode, &error_info);
5693 return FALSE;
5694 }
5695 }
5696
5697 #ifdef DEBUG_AARCH64
5698 static inline void
5699 dump_opcode_operands (const aarch64_opcode *opcode)
5700 {
5701 int i = 0;
5702 while (opcode->operands[i] != AARCH64_OPND_NIL)
5703 {
5704 aarch64_verbose ("\t\t opnd%d: %s", i,
5705 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
5706 ? aarch64_get_operand_name (opcode->operands[i])
5707 : aarch64_get_operand_desc (opcode->operands[i]));
5708 ++i;
5709 }
5710 }
5711 #endif /* DEBUG_AARCH64 */
5712
5713 /* This is the guts of the machine-dependent assembler. STR points to a
5714 machine dependent instruction. This function is supposed to emit
5715 the frags/bytes it assembles to. */
5716
5717 void
5718 md_assemble (char *str)
5719 {
5720 char *p = str;
5721 templates *template;
5722 aarch64_opcode *opcode;
5723 aarch64_inst *inst_base;
5724 unsigned saved_cond;
5725
5726 /* Align the previous label if needed. */
5727 if (last_label_seen != NULL)
5728 {
5729 symbol_set_frag (last_label_seen, frag_now);
5730 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
5731 S_SET_SEGMENT (last_label_seen, now_seg);
5732 }
5733
5734 inst.reloc.type = BFD_RELOC_UNUSED;
5735
5736 DEBUG_TRACE ("\n\n");
5737 DEBUG_TRACE ("==============================");
5738 DEBUG_TRACE ("Enter md_assemble with %s", str);
5739
5740 template = opcode_lookup (&p);
5741 if (!template)
5742 {
5743 /* It wasn't an instruction, but it might be a register alias of
5744 the form alias .req reg directive. */
5745 if (!create_register_alias (str, p))
5746 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
5747 str);
5748 return;
5749 }
5750
5751 skip_whitespace (p);
5752 if (*p == ',')
5753 {
5754 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
5755 get_mnemonic_name (str), str);
5756 return;
5757 }
5758
5759 init_operand_error_report ();
5760
5761 /* Sections are assumed to start aligned. In executable section, there is no
5762 MAP_DATA symbol pending. So we only align the address during
5763 MAP_DATA --> MAP_INSN transition.
5764 For other sections, this is not guaranteed. */
5765 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
5766 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
5767 frag_align_code (2, 0);
5768
5769 saved_cond = inst.cond;
5770 reset_aarch64_instruction (&inst);
5771 inst.cond = saved_cond;
5772
5773 /* Iterate through all opcode entries with the same mnemonic name. */
5774 do
5775 {
5776 opcode = template->opcode;
5777
5778 DEBUG_TRACE ("opcode %s found", opcode->name);
5779 #ifdef DEBUG_AARCH64
5780 if (debug_dump)
5781 dump_opcode_operands (opcode);
5782 #endif /* DEBUG_AARCH64 */
5783
5784 mapping_state (MAP_INSN);
5785
5786 inst_base = &inst.base;
5787 inst_base->opcode = opcode;
5788
5789 /* Truly conditionally executed instructions, e.g. b.cond. */
5790 if (opcode->flags & F_COND)
5791 {
5792 gas_assert (inst.cond != COND_ALWAYS);
5793 inst_base->cond = get_cond_from_value (inst.cond);
5794 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
5795 }
5796 else if (inst.cond != COND_ALWAYS)
5797 {
5798 /* It shouldn't arrive here, where the assembly looks like a
5799 conditional instruction but the found opcode is unconditional. */
5800 gas_assert (0);
5801 continue;
5802 }
5803
5804 if (parse_operands (p, opcode)
5805 && programmer_friendly_fixup (&inst)
5806 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
5807 {
5808 /* Check that this instruction is supported for this CPU. */
5809 if (!opcode->avariant
5810 || !AARCH64_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
5811 {
5812 as_bad (_("selected processor does not support `%s'"), str);
5813 return;
5814 }
5815
5816 warn_unpredictable_ldst (&inst, str);
5817
5818 if (inst.reloc.type == BFD_RELOC_UNUSED
5819 || !inst.reloc.need_libopcodes_p)
5820 output_inst (NULL);
5821 else
5822 {
5823 /* If there is relocation generated for the instruction,
5824 store the instruction information for the future fix-up. */
5825 struct aarch64_inst *copy;
5826 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
5827 if ((copy = xmalloc (sizeof (struct aarch64_inst))) == NULL)
5828 abort ();
5829 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
5830 output_inst (copy);
5831 }
5832 return;
5833 }
5834
5835 template = template->next;
5836 if (template != NULL)
5837 {
5838 reset_aarch64_instruction (&inst);
5839 inst.cond = saved_cond;
5840 }
5841 }
5842 while (template != NULL);
5843
5844 /* Issue the error messages if any. */
5845 output_operand_error_report (str);
5846 }
5847
5848 /* Various frobbings of labels and their addresses. */
5849
5850 void
5851 aarch64_start_line_hook (void)
5852 {
5853 last_label_seen = NULL;
5854 }
5855
5856 void
5857 aarch64_frob_label (symbolS * sym)
5858 {
5859 last_label_seen = sym;
5860
5861 dwarf2_emit_label (sym);
5862 }
5863
5864 int
5865 aarch64_data_in_code (void)
5866 {
5867 if (!strncmp (input_line_pointer + 1, "data:", 5))
5868 {
5869 *input_line_pointer = '/';
5870 input_line_pointer += 5;
5871 *input_line_pointer = 0;
5872 return 1;
5873 }
5874
5875 return 0;
5876 }
5877
5878 char *
5879 aarch64_canonicalize_symbol_name (char *name)
5880 {
5881 int len;
5882
5883 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
5884 *(name + len - 5) = 0;
5885
5886 return name;
5887 }
5888 \f
5889 /* Table of all register names defined by default. The user can
5890 define additional names with .req. Note that all register names
5891 should appear in both upper and lowercase variants. Some registers
5892 also have mixed-case names. */
5893
5894 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
5895 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
5896 #define REGSET31(p,t) \
5897 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
5898 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
5899 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
5900 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t), \
5901 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
5902 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
5903 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
5904 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
5905 #define REGSET(p,t) \
5906 REGSET31(p,t), REGNUM(p,31,t)
5907
5908 /* These go into aarch64_reg_hsh hash-table. */
5909 static const reg_entry reg_names[] = {
5910 /* Integer registers. */
5911 REGSET31 (x, R_64), REGSET31 (X, R_64),
5912 REGSET31 (w, R_32), REGSET31 (W, R_32),
5913
5914 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
5915 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
5916
5917 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
5918 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
5919
5920 /* Coprocessor register numbers. */
5921 REGSET (c, CN), REGSET (C, CN),
5922
5923 /* Floating-point single precision registers. */
5924 REGSET (s, FP_S), REGSET (S, FP_S),
5925
5926 /* Floating-point double precision registers. */
5927 REGSET (d, FP_D), REGSET (D, FP_D),
5928
5929 /* Floating-point half precision registers. */
5930 REGSET (h, FP_H), REGSET (H, FP_H),
5931
5932 /* Floating-point byte precision registers. */
5933 REGSET (b, FP_B), REGSET (B, FP_B),
5934
5935 /* Floating-point quad precision registers. */
5936 REGSET (q, FP_Q), REGSET (Q, FP_Q),
5937
5938 /* FP/SIMD registers. */
5939 REGSET (v, VN), REGSET (V, VN),
5940 };
5941
5942 #undef REGDEF
5943 #undef REGNUM
5944 #undef REGSET
5945
5946 #define N 1
5947 #define n 0
5948 #define Z 1
5949 #define z 0
5950 #define C 1
5951 #define c 0
5952 #define V 1
5953 #define v 0
5954 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
5955 static const asm_nzcv nzcv_names[] = {
5956 {"nzcv", B (n, z, c, v)},
5957 {"nzcV", B (n, z, c, V)},
5958 {"nzCv", B (n, z, C, v)},
5959 {"nzCV", B (n, z, C, V)},
5960 {"nZcv", B (n, Z, c, v)},
5961 {"nZcV", B (n, Z, c, V)},
5962 {"nZCv", B (n, Z, C, v)},
5963 {"nZCV", B (n, Z, C, V)},
5964 {"Nzcv", B (N, z, c, v)},
5965 {"NzcV", B (N, z, c, V)},
5966 {"NzCv", B (N, z, C, v)},
5967 {"NzCV", B (N, z, C, V)},
5968 {"NZcv", B (N, Z, c, v)},
5969 {"NZcV", B (N, Z, c, V)},
5970 {"NZCv", B (N, Z, C, v)},
5971 {"NZCV", B (N, Z, C, V)}
5972 };
5973
5974 #undef N
5975 #undef n
5976 #undef Z
5977 #undef z
5978 #undef C
5979 #undef c
5980 #undef V
5981 #undef v
5982 #undef B
5983 \f
5984 /* MD interface: bits in the object file. */
5985
5986 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
5987 for use in the a.out file, and stores them in the array pointed to by buf.
5988 This knows about the endian-ness of the target machine and does
5989 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
5990 2 (short) and 4 (long) Floating numbers are put out as a series of
5991 LITTLENUMS (shorts, here at least). */
5992
5993 void
5994 md_number_to_chars (char *buf, valueT val, int n)
5995 {
5996 if (target_big_endian)
5997 number_to_chars_bigendian (buf, val, n);
5998 else
5999 number_to_chars_littleendian (buf, val, n);
6000 }
6001
6002 /* MD interface: Sections. */
6003
6004 /* Estimate the size of a frag before relaxing. Assume everything fits in
6005 4 bytes. */
6006
6007 int
6008 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
6009 {
6010 fragp->fr_var = 4;
6011 return 4;
6012 }
6013
6014 /* Round up a section size to the appropriate boundary. */
6015
6016 valueT
6017 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
6018 {
6019 return size;
6020 }
6021
6022 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
6023 of an rs_align_code fragment.
6024
6025 Here we fill the frag with the appropriate info for padding the
6026 output stream. The resulting frag will consist of a fixed (fr_fix)
6027 and of a repeating (fr_var) part.
6028
6029 The fixed content is always emitted before the repeating content and
6030 these two parts are used as follows in constructing the output:
6031 - the fixed part will be used to align to a valid instruction word
6032 boundary, in case that we start at a misaligned address; as no
6033 executable instruction can live at the misaligned location, we
6034 simply fill with zeros;
6035 - the variable part will be used to cover the remaining padding and
6036 we fill using the AArch64 NOP instruction.
6037
6038 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
6039 enough storage space for up to 3 bytes for padding the back to a valid
6040 instruction alignment and exactly 4 bytes to store the NOP pattern. */
6041
6042 void
6043 aarch64_handle_align (fragS * fragP)
6044 {
6045 /* NOP = d503201f */
6046 /* AArch64 instructions are always little-endian. */
6047 static char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
6048
6049 int bytes, fix, noop_size;
6050 char *p;
6051
6052 if (fragP->fr_type != rs_align_code)
6053 return;
6054
6055 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
6056 p = fragP->fr_literal + fragP->fr_fix;
6057
6058 #ifdef OBJ_ELF
6059 gas_assert (fragP->tc_frag_data.recorded);
6060 #endif
6061
6062 noop_size = sizeof (aarch64_noop);
6063
6064 fix = bytes & (noop_size - 1);
6065 if (fix)
6066 {
6067 #ifdef OBJ_ELF
6068 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
6069 #endif
6070 memset (p, 0, fix);
6071 p += fix;
6072 fragP->fr_fix += fix;
6073 }
6074
6075 if (noop_size)
6076 memcpy (p, aarch64_noop, noop_size);
6077 fragP->fr_var = noop_size;
6078 }
6079
6080 /* Perform target specific initialisation of a frag.
6081 Note - despite the name this initialisation is not done when the frag
6082 is created, but only when its type is assigned. A frag can be created
6083 and used a long time before its type is set, so beware of assuming that
6084 this initialisationis performed first. */
6085
6086 #ifndef OBJ_ELF
6087 void
6088 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
6089 int max_chars ATTRIBUTE_UNUSED)
6090 {
6091 }
6092
6093 #else /* OBJ_ELF is defined. */
6094 void
6095 aarch64_init_frag (fragS * fragP, int max_chars)
6096 {
6097 /* Record a mapping symbol for alignment frags. We will delete this
6098 later if the alignment ends up empty. */
6099 if (!fragP->tc_frag_data.recorded)
6100 fragP->tc_frag_data.recorded = 1;
6101
6102 switch (fragP->fr_type)
6103 {
6104 case rs_align:
6105 case rs_align_test:
6106 case rs_fill:
6107 mapping_state_2 (MAP_DATA, max_chars);
6108 break;
6109 case rs_align_code:
6110 mapping_state_2 (MAP_INSN, max_chars);
6111 break;
6112 default:
6113 break;
6114 }
6115 }
6116 \f
6117 /* Initialize the DWARF-2 unwind information for this procedure. */
6118
6119 void
6120 tc_aarch64_frame_initial_instructions (void)
6121 {
6122 cfi_add_CFA_def_cfa (REG_SP, 0);
6123 }
6124 #endif /* OBJ_ELF */
6125
6126 /* Convert REGNAME to a DWARF-2 register number. */
6127
6128 int
6129 tc_aarch64_regname_to_dw2regnum (char *regname)
6130 {
6131 const reg_entry *reg = parse_reg (&regname);
6132 if (reg == NULL)
6133 return -1;
6134
6135 switch (reg->type)
6136 {
6137 case REG_TYPE_SP_32:
6138 case REG_TYPE_SP_64:
6139 case REG_TYPE_R_32:
6140 case REG_TYPE_R_64:
6141 return reg->number;
6142
6143 case REG_TYPE_FP_B:
6144 case REG_TYPE_FP_H:
6145 case REG_TYPE_FP_S:
6146 case REG_TYPE_FP_D:
6147 case REG_TYPE_FP_Q:
6148 return reg->number + 64;
6149
6150 default:
6151 break;
6152 }
6153 return -1;
6154 }
6155
6156 /* Implement DWARF2_ADDR_SIZE. */
6157
6158 int
6159 aarch64_dwarf2_addr_size (void)
6160 {
6161 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
6162 if (ilp32_p)
6163 return 4;
6164 #endif
6165 return bfd_arch_bits_per_address (stdoutput) / 8;
6166 }
6167
6168 /* MD interface: Symbol and relocation handling. */
6169
6170 /* Return the address within the segment that a PC-relative fixup is
6171 relative to. For AArch64 PC-relative fixups applied to instructions
6172 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
6173
6174 long
6175 md_pcrel_from_section (fixS * fixP, segT seg)
6176 {
6177 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
6178
6179 /* If this is pc-relative and we are going to emit a relocation
6180 then we just want to put out any pipeline compensation that the linker
6181 will need. Otherwise we want to use the calculated base. */
6182 if (fixP->fx_pcrel
6183 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
6184 || aarch64_force_relocation (fixP)))
6185 base = 0;
6186
6187 /* AArch64 should be consistent for all pc-relative relocations. */
6188 return base + AARCH64_PCREL_OFFSET;
6189 }
6190
6191 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
6192 Otherwise we have no need to default values of symbols. */
6193
6194 symbolS *
6195 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
6196 {
6197 #ifdef OBJ_ELF
6198 if (name[0] == '_' && name[1] == 'G'
6199 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
6200 {
6201 if (!GOT_symbol)
6202 {
6203 if (symbol_find (name))
6204 as_bad (_("GOT already in the symbol table"));
6205
6206 GOT_symbol = symbol_new (name, undefined_section,
6207 (valueT) 0, &zero_address_frag);
6208 }
6209
6210 return GOT_symbol;
6211 }
6212 #endif
6213
6214 return 0;
6215 }
6216
6217 /* Return non-zero if the indicated VALUE has overflowed the maximum
6218 range expressible by a unsigned number with the indicated number of
6219 BITS. */
6220
6221 static bfd_boolean
6222 unsigned_overflow (valueT value, unsigned bits)
6223 {
6224 valueT lim;
6225 if (bits >= sizeof (valueT) * 8)
6226 return FALSE;
6227 lim = (valueT) 1 << bits;
6228 return (value >= lim);
6229 }
6230
6231
6232 /* Return non-zero if the indicated VALUE has overflowed the maximum
6233 range expressible by an signed number with the indicated number of
6234 BITS. */
6235
6236 static bfd_boolean
6237 signed_overflow (offsetT value, unsigned bits)
6238 {
6239 offsetT lim;
6240 if (bits >= sizeof (offsetT) * 8)
6241 return FALSE;
6242 lim = (offsetT) 1 << (bits - 1);
6243 return (value < -lim || value >= lim);
6244 }
6245
6246 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
6247 unsigned immediate offset load/store instruction, try to encode it as
6248 an unscaled, 9-bit, signed immediate offset load/store instruction.
6249 Return TRUE if it is successful; otherwise return FALSE.
6250
6251 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
6252 in response to the standard LDR/STR mnemonics when the immediate offset is
6253 unambiguous, i.e. when it is negative or unaligned. */
6254
6255 static bfd_boolean
6256 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
6257 {
6258 int idx;
6259 enum aarch64_op new_op;
6260 const aarch64_opcode *new_opcode;
6261
6262 gas_assert (instr->opcode->iclass == ldst_pos);
6263
6264 switch (instr->opcode->op)
6265 {
6266 case OP_LDRB_POS:new_op = OP_LDURB; break;
6267 case OP_STRB_POS: new_op = OP_STURB; break;
6268 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
6269 case OP_LDRH_POS: new_op = OP_LDURH; break;
6270 case OP_STRH_POS: new_op = OP_STURH; break;
6271 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
6272 case OP_LDR_POS: new_op = OP_LDUR; break;
6273 case OP_STR_POS: new_op = OP_STUR; break;
6274 case OP_LDRF_POS: new_op = OP_LDURV; break;
6275 case OP_STRF_POS: new_op = OP_STURV; break;
6276 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
6277 case OP_PRFM_POS: new_op = OP_PRFUM; break;
6278 default: new_op = OP_NIL; break;
6279 }
6280
6281 if (new_op == OP_NIL)
6282 return FALSE;
6283
6284 new_opcode = aarch64_get_opcode (new_op);
6285 gas_assert (new_opcode != NULL);
6286
6287 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
6288 instr->opcode->op, new_opcode->op);
6289
6290 aarch64_replace_opcode (instr, new_opcode);
6291
6292 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
6293 qualifier matching may fail because the out-of-date qualifier will
6294 prevent the operand being updated with a new and correct qualifier. */
6295 idx = aarch64_operand_index (instr->opcode->operands,
6296 AARCH64_OPND_ADDR_SIMM9);
6297 gas_assert (idx == 1);
6298 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
6299
6300 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
6301
6302 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
6303 return FALSE;
6304
6305 return TRUE;
6306 }
6307
6308 /* Called by fix_insn to fix a MOV immediate alias instruction.
6309
6310 Operand for a generic move immediate instruction, which is an alias
6311 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
6312 a 32-bit/64-bit immediate value into general register. An assembler error
6313 shall result if the immediate cannot be created by a single one of these
6314 instructions. If there is a choice, then to ensure reversability an
6315 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
6316
6317 static void
6318 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
6319 {
6320 const aarch64_opcode *opcode;
6321
6322 /* Need to check if the destination is SP/ZR. The check has to be done
6323 before any aarch64_replace_opcode. */
6324 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
6325 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
6326
6327 instr->operands[1].imm.value = value;
6328 instr->operands[1].skip = 0;
6329
6330 if (try_mov_wide_p)
6331 {
6332 /* Try the MOVZ alias. */
6333 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
6334 aarch64_replace_opcode (instr, opcode);
6335 if (aarch64_opcode_encode (instr->opcode, instr,
6336 &instr->value, NULL, NULL))
6337 {
6338 put_aarch64_insn (buf, instr->value);
6339 return;
6340 }
6341 /* Try the MOVK alias. */
6342 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
6343 aarch64_replace_opcode (instr, opcode);
6344 if (aarch64_opcode_encode (instr->opcode, instr,
6345 &instr->value, NULL, NULL))
6346 {
6347 put_aarch64_insn (buf, instr->value);
6348 return;
6349 }
6350 }
6351
6352 if (try_mov_bitmask_p)
6353 {
6354 /* Try the ORR alias. */
6355 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
6356 aarch64_replace_opcode (instr, opcode);
6357 if (aarch64_opcode_encode (instr->opcode, instr,
6358 &instr->value, NULL, NULL))
6359 {
6360 put_aarch64_insn (buf, instr->value);
6361 return;
6362 }
6363 }
6364
6365 as_bad_where (fixP->fx_file, fixP->fx_line,
6366 _("immediate cannot be moved by a single instruction"));
6367 }
6368
6369 /* An instruction operand which is immediate related may have symbol used
6370 in the assembly, e.g.
6371
6372 mov w0, u32
6373 .set u32, 0x00ffff00
6374
6375 At the time when the assembly instruction is parsed, a referenced symbol,
6376 like 'u32' in the above example may not have been seen; a fixS is created
6377 in such a case and is handled here after symbols have been resolved.
6378 Instruction is fixed up with VALUE using the information in *FIXP plus
6379 extra information in FLAGS.
6380
6381 This function is called by md_apply_fix to fix up instructions that need
6382 a fix-up described above but does not involve any linker-time relocation. */
6383
6384 static void
6385 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
6386 {
6387 int idx;
6388 uint32_t insn;
6389 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6390 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
6391 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
6392
6393 if (new_inst)
6394 {
6395 /* Now the instruction is about to be fixed-up, so the operand that
6396 was previously marked as 'ignored' needs to be unmarked in order
6397 to get the encoding done properly. */
6398 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6399 new_inst->operands[idx].skip = 0;
6400 }
6401
6402 gas_assert (opnd != AARCH64_OPND_NIL);
6403
6404 switch (opnd)
6405 {
6406 case AARCH64_OPND_EXCEPTION:
6407 if (unsigned_overflow (value, 16))
6408 as_bad_where (fixP->fx_file, fixP->fx_line,
6409 _("immediate out of range"));
6410 insn = get_aarch64_insn (buf);
6411 insn |= encode_svc_imm (value);
6412 put_aarch64_insn (buf, insn);
6413 break;
6414
6415 case AARCH64_OPND_AIMM:
6416 /* ADD or SUB with immediate.
6417 NOTE this assumes we come here with a add/sub shifted reg encoding
6418 3 322|2222|2 2 2 21111 111111
6419 1 098|7654|3 2 1 09876 543210 98765 43210
6420 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
6421 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
6422 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
6423 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
6424 ->
6425 3 322|2222|2 2 221111111111
6426 1 098|7654|3 2 109876543210 98765 43210
6427 11000000 sf 001|0001|shift imm12 Rn Rd ADD
6428 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
6429 51000000 sf 101|0001|shift imm12 Rn Rd SUB
6430 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
6431 Fields sf Rn Rd are already set. */
6432 insn = get_aarch64_insn (buf);
6433 if (value < 0)
6434 {
6435 /* Add <-> sub. */
6436 insn = reencode_addsub_switch_add_sub (insn);
6437 value = -value;
6438 }
6439
6440 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
6441 && unsigned_overflow (value, 12))
6442 {
6443 /* Try to shift the value by 12 to make it fit. */
6444 if (((value >> 12) << 12) == value
6445 && ! unsigned_overflow (value, 12 + 12))
6446 {
6447 value >>= 12;
6448 insn |= encode_addsub_imm_shift_amount (1);
6449 }
6450 }
6451
6452 if (unsigned_overflow (value, 12))
6453 as_bad_where (fixP->fx_file, fixP->fx_line,
6454 _("immediate out of range"));
6455
6456 insn |= encode_addsub_imm (value);
6457
6458 put_aarch64_insn (buf, insn);
6459 break;
6460
6461 case AARCH64_OPND_SIMD_IMM:
6462 case AARCH64_OPND_SIMD_IMM_SFT:
6463 case AARCH64_OPND_LIMM:
6464 /* Bit mask immediate. */
6465 gas_assert (new_inst != NULL);
6466 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6467 new_inst->operands[idx].imm.value = value;
6468 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6469 &new_inst->value, NULL, NULL))
6470 put_aarch64_insn (buf, new_inst->value);
6471 else
6472 as_bad_where (fixP->fx_file, fixP->fx_line,
6473 _("invalid immediate"));
6474 break;
6475
6476 case AARCH64_OPND_HALF:
6477 /* 16-bit unsigned immediate. */
6478 if (unsigned_overflow (value, 16))
6479 as_bad_where (fixP->fx_file, fixP->fx_line,
6480 _("immediate out of range"));
6481 insn = get_aarch64_insn (buf);
6482 insn |= encode_movw_imm (value & 0xffff);
6483 put_aarch64_insn (buf, insn);
6484 break;
6485
6486 case AARCH64_OPND_IMM_MOV:
6487 /* Operand for a generic move immediate instruction, which is
6488 an alias instruction that generates a single MOVZ, MOVN or ORR
6489 instruction to loads a 32-bit/64-bit immediate value into general
6490 register. An assembler error shall result if the immediate cannot be
6491 created by a single one of these instructions. If there is a choice,
6492 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
6493 and MOVZ or MOVN to ORR. */
6494 gas_assert (new_inst != NULL);
6495 fix_mov_imm_insn (fixP, buf, new_inst, value);
6496 break;
6497
6498 case AARCH64_OPND_ADDR_SIMM7:
6499 case AARCH64_OPND_ADDR_SIMM9:
6500 case AARCH64_OPND_ADDR_SIMM9_2:
6501 case AARCH64_OPND_ADDR_UIMM12:
6502 /* Immediate offset in an address. */
6503 insn = get_aarch64_insn (buf);
6504
6505 gas_assert (new_inst != NULL && new_inst->value == insn);
6506 gas_assert (new_inst->opcode->operands[1] == opnd
6507 || new_inst->opcode->operands[2] == opnd);
6508
6509 /* Get the index of the address operand. */
6510 if (new_inst->opcode->operands[1] == opnd)
6511 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
6512 idx = 1;
6513 else
6514 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
6515 idx = 2;
6516
6517 /* Update the resolved offset value. */
6518 new_inst->operands[idx].addr.offset.imm = value;
6519
6520 /* Encode/fix-up. */
6521 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6522 &new_inst->value, NULL, NULL))
6523 {
6524 put_aarch64_insn (buf, new_inst->value);
6525 break;
6526 }
6527 else if (new_inst->opcode->iclass == ldst_pos
6528 && try_to_encode_as_unscaled_ldst (new_inst))
6529 {
6530 put_aarch64_insn (buf, new_inst->value);
6531 break;
6532 }
6533
6534 as_bad_where (fixP->fx_file, fixP->fx_line,
6535 _("immediate offset out of range"));
6536 break;
6537
6538 default:
6539 gas_assert (0);
6540 as_fatal (_("unhandled operand code %d"), opnd);
6541 }
6542 }
6543
6544 /* Apply a fixup (fixP) to segment data, once it has been determined
6545 by our caller that we have all the info we need to fix it up.
6546
6547 Parameter valP is the pointer to the value of the bits. */
6548
6549 void
6550 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
6551 {
6552 offsetT value = *valP;
6553 uint32_t insn;
6554 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6555 int scale;
6556 unsigned flags = fixP->fx_addnumber;
6557
6558 DEBUG_TRACE ("\n\n");
6559 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
6560 DEBUG_TRACE ("Enter md_apply_fix");
6561
6562 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
6563
6564 /* Note whether this will delete the relocation. */
6565
6566 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
6567 fixP->fx_done = 1;
6568
6569 /* Process the relocations. */
6570 switch (fixP->fx_r_type)
6571 {
6572 case BFD_RELOC_NONE:
6573 /* This will need to go in the object file. */
6574 fixP->fx_done = 0;
6575 break;
6576
6577 case BFD_RELOC_8:
6578 case BFD_RELOC_8_PCREL:
6579 if (fixP->fx_done || !seg->use_rela_p)
6580 md_number_to_chars (buf, value, 1);
6581 break;
6582
6583 case BFD_RELOC_16:
6584 case BFD_RELOC_16_PCREL:
6585 if (fixP->fx_done || !seg->use_rela_p)
6586 md_number_to_chars (buf, value, 2);
6587 break;
6588
6589 case BFD_RELOC_32:
6590 case BFD_RELOC_32_PCREL:
6591 if (fixP->fx_done || !seg->use_rela_p)
6592 md_number_to_chars (buf, value, 4);
6593 break;
6594
6595 case BFD_RELOC_64:
6596 case BFD_RELOC_64_PCREL:
6597 if (fixP->fx_done || !seg->use_rela_p)
6598 md_number_to_chars (buf, value, 8);
6599 break;
6600
6601 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6602 /* We claim that these fixups have been processed here, even if
6603 in fact we generate an error because we do not have a reloc
6604 for them, so tc_gen_reloc() will reject them. */
6605 fixP->fx_done = 1;
6606 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
6607 {
6608 as_bad_where (fixP->fx_file, fixP->fx_line,
6609 _("undefined symbol %s used as an immediate value"),
6610 S_GET_NAME (fixP->fx_addsy));
6611 goto apply_fix_return;
6612 }
6613 fix_insn (fixP, flags, value);
6614 break;
6615
6616 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
6617 if (fixP->fx_done || !seg->use_rela_p)
6618 {
6619 if (value & 3)
6620 as_bad_where (fixP->fx_file, fixP->fx_line,
6621 _("pc-relative load offset not word aligned"));
6622 if (signed_overflow (value, 21))
6623 as_bad_where (fixP->fx_file, fixP->fx_line,
6624 _("pc-relative load offset out of range"));
6625 insn = get_aarch64_insn (buf);
6626 insn |= encode_ld_lit_ofs_19 (value >> 2);
6627 put_aarch64_insn (buf, insn);
6628 }
6629 break;
6630
6631 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6632 if (fixP->fx_done || !seg->use_rela_p)
6633 {
6634 if (signed_overflow (value, 21))
6635 as_bad_where (fixP->fx_file, fixP->fx_line,
6636 _("pc-relative address offset out of range"));
6637 insn = get_aarch64_insn (buf);
6638 insn |= encode_adr_imm (value);
6639 put_aarch64_insn (buf, insn);
6640 }
6641 break;
6642
6643 case BFD_RELOC_AARCH64_BRANCH19:
6644 if (fixP->fx_done || !seg->use_rela_p)
6645 {
6646 if (value & 3)
6647 as_bad_where (fixP->fx_file, fixP->fx_line,
6648 _("conditional branch target not word aligned"));
6649 if (signed_overflow (value, 21))
6650 as_bad_where (fixP->fx_file, fixP->fx_line,
6651 _("conditional branch out of range"));
6652 insn = get_aarch64_insn (buf);
6653 insn |= encode_cond_branch_ofs_19 (value >> 2);
6654 put_aarch64_insn (buf, insn);
6655 }
6656 break;
6657
6658 case BFD_RELOC_AARCH64_TSTBR14:
6659 if (fixP->fx_done || !seg->use_rela_p)
6660 {
6661 if (value & 3)
6662 as_bad_where (fixP->fx_file, fixP->fx_line,
6663 _("conditional branch target not word aligned"));
6664 if (signed_overflow (value, 16))
6665 as_bad_where (fixP->fx_file, fixP->fx_line,
6666 _("conditional branch out of range"));
6667 insn = get_aarch64_insn (buf);
6668 insn |= encode_tst_branch_ofs_14 (value >> 2);
6669 put_aarch64_insn (buf, insn);
6670 }
6671 break;
6672
6673 case BFD_RELOC_AARCH64_CALL26:
6674 case BFD_RELOC_AARCH64_JUMP26:
6675 if (fixP->fx_done || !seg->use_rela_p)
6676 {
6677 if (value & 3)
6678 as_bad_where (fixP->fx_file, fixP->fx_line,
6679 _("branch target not word aligned"));
6680 if (signed_overflow (value, 28))
6681 as_bad_where (fixP->fx_file, fixP->fx_line,
6682 _("branch out of range"));
6683 insn = get_aarch64_insn (buf);
6684 insn |= encode_branch_ofs_26 (value >> 2);
6685 put_aarch64_insn (buf, insn);
6686 }
6687 break;
6688
6689 case BFD_RELOC_AARCH64_MOVW_G0:
6690 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6691 case BFD_RELOC_AARCH64_MOVW_G0_S:
6692 scale = 0;
6693 goto movw_common;
6694 case BFD_RELOC_AARCH64_MOVW_G1:
6695 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6696 case BFD_RELOC_AARCH64_MOVW_G1_S:
6697 scale = 16;
6698 goto movw_common;
6699 case BFD_RELOC_AARCH64_MOVW_G2:
6700 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6701 case BFD_RELOC_AARCH64_MOVW_G2_S:
6702 scale = 32;
6703 goto movw_common;
6704 case BFD_RELOC_AARCH64_MOVW_G3:
6705 scale = 48;
6706 movw_common:
6707 if (fixP->fx_done || !seg->use_rela_p)
6708 {
6709 insn = get_aarch64_insn (buf);
6710
6711 if (!fixP->fx_done)
6712 {
6713 /* REL signed addend must fit in 16 bits */
6714 if (signed_overflow (value, 16))
6715 as_bad_where (fixP->fx_file, fixP->fx_line,
6716 _("offset out of range"));
6717 }
6718 else
6719 {
6720 /* Check for overflow and scale. */
6721 switch (fixP->fx_r_type)
6722 {
6723 case BFD_RELOC_AARCH64_MOVW_G0:
6724 case BFD_RELOC_AARCH64_MOVW_G1:
6725 case BFD_RELOC_AARCH64_MOVW_G2:
6726 case BFD_RELOC_AARCH64_MOVW_G3:
6727 if (unsigned_overflow (value, scale + 16))
6728 as_bad_where (fixP->fx_file, fixP->fx_line,
6729 _("unsigned value out of range"));
6730 break;
6731 case BFD_RELOC_AARCH64_MOVW_G0_S:
6732 case BFD_RELOC_AARCH64_MOVW_G1_S:
6733 case BFD_RELOC_AARCH64_MOVW_G2_S:
6734 /* NOTE: We can only come here with movz or movn. */
6735 if (signed_overflow (value, scale + 16))
6736 as_bad_where (fixP->fx_file, fixP->fx_line,
6737 _("signed value out of range"));
6738 if (value < 0)
6739 {
6740 /* Force use of MOVN. */
6741 value = ~value;
6742 insn = reencode_movzn_to_movn (insn);
6743 }
6744 else
6745 {
6746 /* Force use of MOVZ. */
6747 insn = reencode_movzn_to_movz (insn);
6748 }
6749 break;
6750 default:
6751 /* Unchecked relocations. */
6752 break;
6753 }
6754 value >>= scale;
6755 }
6756
6757 /* Insert value into MOVN/MOVZ/MOVK instruction. */
6758 insn |= encode_movw_imm (value & 0xffff);
6759
6760 put_aarch64_insn (buf, insn);
6761 }
6762 break;
6763
6764 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
6765 fixP->fx_r_type = (ilp32_p
6766 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
6767 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
6768 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6769 /* Should always be exported to object file, see
6770 aarch64_force_relocation(). */
6771 gas_assert (!fixP->fx_done);
6772 gas_assert (seg->use_rela_p);
6773 break;
6774
6775 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
6776 fixP->fx_r_type = (ilp32_p
6777 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
6778 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC);
6779 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6780 /* Should always be exported to object file, see
6781 aarch64_force_relocation(). */
6782 gas_assert (!fixP->fx_done);
6783 gas_assert (seg->use_rela_p);
6784 break;
6785
6786 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6787 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6788 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6789 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6790 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6791 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6792 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6793 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6794 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6795 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6796 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6797 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6798 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6799 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
6800 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
6801 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
6802 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6803 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6804 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6805 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6806 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6807 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6808 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6809 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6810 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6811 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6812 /* Should always be exported to object file, see
6813 aarch64_force_relocation(). */
6814 gas_assert (!fixP->fx_done);
6815 gas_assert (seg->use_rela_p);
6816 break;
6817
6818 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
6819 /* Should always be exported to object file, see
6820 aarch64_force_relocation(). */
6821 fixP->fx_r_type = (ilp32_p
6822 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
6823 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
6824 gas_assert (!fixP->fx_done);
6825 gas_assert (seg->use_rela_p);
6826 break;
6827
6828 case BFD_RELOC_AARCH64_ADD_LO12:
6829 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6830 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6831 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6832 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6833 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6834 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6835 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
6836 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6837 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6838 case BFD_RELOC_AARCH64_LDST128_LO12:
6839 case BFD_RELOC_AARCH64_LDST16_LO12:
6840 case BFD_RELOC_AARCH64_LDST32_LO12:
6841 case BFD_RELOC_AARCH64_LDST64_LO12:
6842 case BFD_RELOC_AARCH64_LDST8_LO12:
6843 /* Should always be exported to object file, see
6844 aarch64_force_relocation(). */
6845 gas_assert (!fixP->fx_done);
6846 gas_assert (seg->use_rela_p);
6847 break;
6848
6849 case BFD_RELOC_AARCH64_TLSDESC_ADD:
6850 case BFD_RELOC_AARCH64_TLSDESC_CALL:
6851 case BFD_RELOC_AARCH64_TLSDESC_LDR:
6852 break;
6853
6854 case BFD_RELOC_UNUSED:
6855 /* An error will already have been reported. */
6856 break;
6857
6858 default:
6859 as_bad_where (fixP->fx_file, fixP->fx_line,
6860 _("unexpected %s fixup"),
6861 bfd_get_reloc_code_name (fixP->fx_r_type));
6862 break;
6863 }
6864
6865 apply_fix_return:
6866 /* Free the allocated the struct aarch64_inst.
6867 N.B. currently there are very limited number of fix-up types actually use
6868 this field, so the impact on the performance should be minimal . */
6869 if (fixP->tc_fix_data.inst != NULL)
6870 free (fixP->tc_fix_data.inst);
6871
6872 return;
6873 }
6874
6875 /* Translate internal representation of relocation info to BFD target
6876 format. */
6877
6878 arelent *
6879 tc_gen_reloc (asection * section, fixS * fixp)
6880 {
6881 arelent *reloc;
6882 bfd_reloc_code_real_type code;
6883
6884 reloc = xmalloc (sizeof (arelent));
6885
6886 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
6887 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
6888 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
6889
6890 if (fixp->fx_pcrel)
6891 {
6892 if (section->use_rela_p)
6893 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
6894 else
6895 fixp->fx_offset = reloc->address;
6896 }
6897 reloc->addend = fixp->fx_offset;
6898
6899 code = fixp->fx_r_type;
6900 switch (code)
6901 {
6902 case BFD_RELOC_16:
6903 if (fixp->fx_pcrel)
6904 code = BFD_RELOC_16_PCREL;
6905 break;
6906
6907 case BFD_RELOC_32:
6908 if (fixp->fx_pcrel)
6909 code = BFD_RELOC_32_PCREL;
6910 break;
6911
6912 case BFD_RELOC_64:
6913 if (fixp->fx_pcrel)
6914 code = BFD_RELOC_64_PCREL;
6915 break;
6916
6917 default:
6918 break;
6919 }
6920
6921 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
6922 if (reloc->howto == NULL)
6923 {
6924 as_bad_where (fixp->fx_file, fixp->fx_line,
6925 _
6926 ("cannot represent %s relocation in this object file format"),
6927 bfd_get_reloc_code_name (code));
6928 return NULL;
6929 }
6930
6931 return reloc;
6932 }
6933
6934 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
6935
6936 void
6937 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
6938 {
6939 bfd_reloc_code_real_type type;
6940 int pcrel = 0;
6941
6942 /* Pick a reloc.
6943 FIXME: @@ Should look at CPU word size. */
6944 switch (size)
6945 {
6946 case 1:
6947 type = BFD_RELOC_8;
6948 break;
6949 case 2:
6950 type = BFD_RELOC_16;
6951 break;
6952 case 4:
6953 type = BFD_RELOC_32;
6954 break;
6955 case 8:
6956 type = BFD_RELOC_64;
6957 break;
6958 default:
6959 as_bad (_("cannot do %u-byte relocation"), size);
6960 type = BFD_RELOC_UNUSED;
6961 break;
6962 }
6963
6964 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
6965 }
6966
6967 int
6968 aarch64_force_relocation (struct fix *fixp)
6969 {
6970 switch (fixp->fx_r_type)
6971 {
6972 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6973 /* Perform these "immediate" internal relocations
6974 even if the symbol is extern or weak. */
6975 return 0;
6976
6977 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
6978 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
6979 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
6980 /* Pseudo relocs that need to be fixed up according to
6981 ilp32_p. */
6982 return 0;
6983
6984 case BFD_RELOC_AARCH64_ADD_LO12:
6985 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6986 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6987 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6988 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6989 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6990 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6991 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
6992 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6993 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6994 case BFD_RELOC_AARCH64_LDST128_LO12:
6995 case BFD_RELOC_AARCH64_LDST16_LO12:
6996 case BFD_RELOC_AARCH64_LDST32_LO12:
6997 case BFD_RELOC_AARCH64_LDST64_LO12:
6998 case BFD_RELOC_AARCH64_LDST8_LO12:
6999 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
7000 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7001 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7002 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7003 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
7004 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7005 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7006 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7007 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7008 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7009 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7010 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7011 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7012 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
7013 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7014 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7015 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7016 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7017 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
7018 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7019 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
7020 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7021 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7022 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7023 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7024 /* Always leave these relocations for the linker. */
7025 return 1;
7026
7027 default:
7028 break;
7029 }
7030
7031 return generic_force_reloc (fixp);
7032 }
7033
7034 #ifdef OBJ_ELF
7035
7036 const char *
7037 elf64_aarch64_target_format (void)
7038 {
7039 if (target_big_endian)
7040 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
7041 else
7042 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
7043 }
7044
7045 void
7046 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
7047 {
7048 elf_frob_symbol (symp, puntp);
7049 }
7050 #endif
7051
7052 /* MD interface: Finalization. */
7053
7054 /* A good place to do this, although this was probably not intended
7055 for this kind of use. We need to dump the literal pool before
7056 references are made to a null symbol pointer. */
7057
7058 void
7059 aarch64_cleanup (void)
7060 {
7061 literal_pool *pool;
7062
7063 for (pool = list_of_pools; pool; pool = pool->next)
7064 {
7065 /* Put it at the end of the relevant section. */
7066 subseg_set (pool->section, pool->sub_section);
7067 s_ltorg (0);
7068 }
7069 }
7070
7071 #ifdef OBJ_ELF
7072 /* Remove any excess mapping symbols generated for alignment frags in
7073 SEC. We may have created a mapping symbol before a zero byte
7074 alignment; remove it if there's a mapping symbol after the
7075 alignment. */
7076 static void
7077 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
7078 void *dummy ATTRIBUTE_UNUSED)
7079 {
7080 segment_info_type *seginfo = seg_info (sec);
7081 fragS *fragp;
7082
7083 if (seginfo == NULL || seginfo->frchainP == NULL)
7084 return;
7085
7086 for (fragp = seginfo->frchainP->frch_root;
7087 fragp != NULL; fragp = fragp->fr_next)
7088 {
7089 symbolS *sym = fragp->tc_frag_data.last_map;
7090 fragS *next = fragp->fr_next;
7091
7092 /* Variable-sized frags have been converted to fixed size by
7093 this point. But if this was variable-sized to start with,
7094 there will be a fixed-size frag after it. So don't handle
7095 next == NULL. */
7096 if (sym == NULL || next == NULL)
7097 continue;
7098
7099 if (S_GET_VALUE (sym) < next->fr_address)
7100 /* Not at the end of this frag. */
7101 continue;
7102 know (S_GET_VALUE (sym) == next->fr_address);
7103
7104 do
7105 {
7106 if (next->tc_frag_data.first_map != NULL)
7107 {
7108 /* Next frag starts with a mapping symbol. Discard this
7109 one. */
7110 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
7111 break;
7112 }
7113
7114 if (next->fr_next == NULL)
7115 {
7116 /* This mapping symbol is at the end of the section. Discard
7117 it. */
7118 know (next->fr_fix == 0 && next->fr_var == 0);
7119 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
7120 break;
7121 }
7122
7123 /* As long as we have empty frags without any mapping symbols,
7124 keep looking. */
7125 /* If the next frag is non-empty and does not start with a
7126 mapping symbol, then this mapping symbol is required. */
7127 if (next->fr_address != next->fr_next->fr_address)
7128 break;
7129
7130 next = next->fr_next;
7131 }
7132 while (next != NULL);
7133 }
7134 }
7135 #endif
7136
7137 /* Adjust the symbol table. */
7138
7139 void
7140 aarch64_adjust_symtab (void)
7141 {
7142 #ifdef OBJ_ELF
7143 /* Remove any overlapping mapping symbols generated by alignment frags. */
7144 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
7145 /* Now do generic ELF adjustments. */
7146 elf_adjust_symtab ();
7147 #endif
7148 }
7149
7150 static void
7151 checked_hash_insert (struct hash_control *table, const char *key, void *value)
7152 {
7153 const char *hash_err;
7154
7155 hash_err = hash_insert (table, key, value);
7156 if (hash_err)
7157 printf ("Internal Error: Can't hash %s\n", key);
7158 }
7159
7160 static void
7161 fill_instruction_hash_table (void)
7162 {
7163 aarch64_opcode *opcode = aarch64_opcode_table;
7164
7165 while (opcode->name != NULL)
7166 {
7167 templates *templ, *new_templ;
7168 templ = hash_find (aarch64_ops_hsh, opcode->name);
7169
7170 new_templ = (templates *) xmalloc (sizeof (templates));
7171 new_templ->opcode = opcode;
7172 new_templ->next = NULL;
7173
7174 if (!templ)
7175 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
7176 else
7177 {
7178 new_templ->next = templ->next;
7179 templ->next = new_templ;
7180 }
7181 ++opcode;
7182 }
7183 }
7184
7185 static inline void
7186 convert_to_upper (char *dst, const char *src, size_t num)
7187 {
7188 unsigned int i;
7189 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
7190 *dst = TOUPPER (*src);
7191 *dst = '\0';
7192 }
7193
7194 /* Assume STR point to a lower-case string, allocate, convert and return
7195 the corresponding upper-case string. */
7196 static inline const char*
7197 get_upper_str (const char *str)
7198 {
7199 char *ret;
7200 size_t len = strlen (str);
7201 if ((ret = xmalloc (len + 1)) == NULL)
7202 abort ();
7203 convert_to_upper (ret, str, len);
7204 return ret;
7205 }
7206
7207 /* MD interface: Initialization. */
7208
7209 void
7210 md_begin (void)
7211 {
7212 unsigned mach;
7213 unsigned int i;
7214
7215 if ((aarch64_ops_hsh = hash_new ()) == NULL
7216 || (aarch64_cond_hsh = hash_new ()) == NULL
7217 || (aarch64_shift_hsh = hash_new ()) == NULL
7218 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
7219 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
7220 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
7221 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
7222 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
7223 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
7224 || (aarch64_reg_hsh = hash_new ()) == NULL
7225 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
7226 || (aarch64_nzcv_hsh = hash_new ()) == NULL
7227 || (aarch64_pldop_hsh = hash_new ()) == NULL)
7228 as_fatal (_("virtual memory exhausted"));
7229
7230 fill_instruction_hash_table ();
7231
7232 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
7233 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
7234 (void *) (aarch64_sys_regs + i));
7235
7236 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
7237 checked_hash_insert (aarch64_pstatefield_hsh,
7238 aarch64_pstatefields[i].name,
7239 (void *) (aarch64_pstatefields + i));
7240
7241 for (i = 0; aarch64_sys_regs_ic[i].template != NULL; i++)
7242 checked_hash_insert (aarch64_sys_regs_ic_hsh,
7243 aarch64_sys_regs_ic[i].template,
7244 (void *) (aarch64_sys_regs_ic + i));
7245
7246 for (i = 0; aarch64_sys_regs_dc[i].template != NULL; i++)
7247 checked_hash_insert (aarch64_sys_regs_dc_hsh,
7248 aarch64_sys_regs_dc[i].template,
7249 (void *) (aarch64_sys_regs_dc + i));
7250
7251 for (i = 0; aarch64_sys_regs_at[i].template != NULL; i++)
7252 checked_hash_insert (aarch64_sys_regs_at_hsh,
7253 aarch64_sys_regs_at[i].template,
7254 (void *) (aarch64_sys_regs_at + i));
7255
7256 for (i = 0; aarch64_sys_regs_tlbi[i].template != NULL; i++)
7257 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
7258 aarch64_sys_regs_tlbi[i].template,
7259 (void *) (aarch64_sys_regs_tlbi + i));
7260
7261 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
7262 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
7263 (void *) (reg_names + i));
7264
7265 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
7266 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
7267 (void *) (nzcv_names + i));
7268
7269 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
7270 {
7271 const char *name = aarch64_operand_modifiers[i].name;
7272 checked_hash_insert (aarch64_shift_hsh, name,
7273 (void *) (aarch64_operand_modifiers + i));
7274 /* Also hash the name in the upper case. */
7275 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
7276 (void *) (aarch64_operand_modifiers + i));
7277 }
7278
7279 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
7280 {
7281 unsigned int j;
7282 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
7283 the same condition code. */
7284 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
7285 {
7286 const char *name = aarch64_conds[i].names[j];
7287 if (name == NULL)
7288 break;
7289 checked_hash_insert (aarch64_cond_hsh, name,
7290 (void *) (aarch64_conds + i));
7291 /* Also hash the name in the upper case. */
7292 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
7293 (void *) (aarch64_conds + i));
7294 }
7295 }
7296
7297 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
7298 {
7299 const char *name = aarch64_barrier_options[i].name;
7300 /* Skip xx00 - the unallocated values of option. */
7301 if ((i & 0x3) == 0)
7302 continue;
7303 checked_hash_insert (aarch64_barrier_opt_hsh, name,
7304 (void *) (aarch64_barrier_options + i));
7305 /* Also hash the name in the upper case. */
7306 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
7307 (void *) (aarch64_barrier_options + i));
7308 }
7309
7310 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
7311 {
7312 const char* name = aarch64_prfops[i].name;
7313 /* Skip the unallocated hint encodings. */
7314 if (name == NULL)
7315 continue;
7316 checked_hash_insert (aarch64_pldop_hsh, name,
7317 (void *) (aarch64_prfops + i));
7318 /* Also hash the name in the upper case. */
7319 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
7320 (void *) (aarch64_prfops + i));
7321 }
7322
7323 /* Set the cpu variant based on the command-line options. */
7324 if (!mcpu_cpu_opt)
7325 mcpu_cpu_opt = march_cpu_opt;
7326
7327 if (!mcpu_cpu_opt)
7328 mcpu_cpu_opt = &cpu_default;
7329
7330 cpu_variant = *mcpu_cpu_opt;
7331
7332 /* Record the CPU type. */
7333 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
7334
7335 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
7336 }
7337
7338 /* Command line processing. */
7339
7340 const char *md_shortopts = "m:";
7341
7342 #ifdef AARCH64_BI_ENDIAN
7343 #define OPTION_EB (OPTION_MD_BASE + 0)
7344 #define OPTION_EL (OPTION_MD_BASE + 1)
7345 #else
7346 #if TARGET_BYTES_BIG_ENDIAN
7347 #define OPTION_EB (OPTION_MD_BASE + 0)
7348 #else
7349 #define OPTION_EL (OPTION_MD_BASE + 1)
7350 #endif
7351 #endif
7352
7353 struct option md_longopts[] = {
7354 #ifdef OPTION_EB
7355 {"EB", no_argument, NULL, OPTION_EB},
7356 #endif
7357 #ifdef OPTION_EL
7358 {"EL", no_argument, NULL, OPTION_EL},
7359 #endif
7360 {NULL, no_argument, NULL, 0}
7361 };
7362
7363 size_t md_longopts_size = sizeof (md_longopts);
7364
7365 struct aarch64_option_table
7366 {
7367 char *option; /* Option name to match. */
7368 char *help; /* Help information. */
7369 int *var; /* Variable to change. */
7370 int value; /* What to change it to. */
7371 char *deprecated; /* If non-null, print this message. */
7372 };
7373
7374 static struct aarch64_option_table aarch64_opts[] = {
7375 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
7376 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
7377 NULL},
7378 #ifdef DEBUG_AARCH64
7379 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
7380 #endif /* DEBUG_AARCH64 */
7381 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
7382 NULL},
7383 {"mno-verbose-error", N_("do not output verbose error messages"),
7384 &verbose_error_p, 0, NULL},
7385 {NULL, NULL, NULL, 0, NULL}
7386 };
7387
7388 struct aarch64_cpu_option_table
7389 {
7390 char *name;
7391 const aarch64_feature_set value;
7392 /* The canonical name of the CPU, or NULL to use NAME converted to upper
7393 case. */
7394 const char *canonical_name;
7395 };
7396
7397 /* This list should, at a minimum, contain all the cpu names
7398 recognized by GCC. */
7399 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
7400 {"all", AARCH64_ANY, NULL},
7401 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
7402 AARCH64_FEATURE_CRC), "Cortex-A53"},
7403 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
7404 AARCH64_FEATURE_CRC), "Cortex-A57"},
7405 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
7406 AARCH64_FEATURE_CRC), "Cortex-A72"},
7407 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
7408 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
7409 "Samsung Exynos M1"},
7410 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
7411 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
7412 "Cavium ThunderX"},
7413 /* The 'xgene-1' name is an older name for 'xgene1', which was used
7414 in earlier releases and is superseded by 'xgene1' in all
7415 tools. */
7416 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7417 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7418 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
7419 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
7420 {"generic", AARCH64_ARCH_V8, NULL},
7421
7422 {NULL, AARCH64_ARCH_NONE, NULL}
7423 };
7424
7425 struct aarch64_arch_option_table
7426 {
7427 char *name;
7428 const aarch64_feature_set value;
7429 };
7430
7431 /* This list should, at a minimum, contain all the architecture names
7432 recognized by GCC. */
7433 static const struct aarch64_arch_option_table aarch64_archs[] = {
7434 {"all", AARCH64_ANY},
7435 {"armv8-a", AARCH64_ARCH_V8},
7436 {"armv8.1-a", AARCH64_ARCH_V8_1},
7437 {NULL, AARCH64_ARCH_NONE}
7438 };
7439
7440 /* ISA extensions. */
7441 struct aarch64_option_cpu_value_table
7442 {
7443 char *name;
7444 const aarch64_feature_set value;
7445 };
7446
7447 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
7448 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0)},
7449 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0)},
7450 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
7451 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0)},
7452 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
7453 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0)},
7454 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0)},
7455 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_SIMD
7456 | AARCH64_FEATURE_RDMA, 0)},
7457 {NULL, AARCH64_ARCH_NONE}
7458 };
7459
7460 struct aarch64_long_option_table
7461 {
7462 char *option; /* Substring to match. */
7463 char *help; /* Help information. */
7464 int (*func) (char *subopt); /* Function to decode sub-option. */
7465 char *deprecated; /* If non-null, print this message. */
7466 };
7467
7468 static int
7469 aarch64_parse_features (char *str, const aarch64_feature_set **opt_p,
7470 bfd_boolean ext_only)
7471 {
7472 /* We insist on extensions being added before being removed. We achieve
7473 this by using the ADDING_VALUE variable to indicate whether we are
7474 adding an extension (1) or removing it (0) and only allowing it to
7475 change in the order -1 -> 1 -> 0. */
7476 int adding_value = -1;
7477 aarch64_feature_set *ext_set = xmalloc (sizeof (aarch64_feature_set));
7478
7479 /* Copy the feature set, so that we can modify it. */
7480 *ext_set = **opt_p;
7481 *opt_p = ext_set;
7482
7483 while (str != NULL && *str != 0)
7484 {
7485 const struct aarch64_option_cpu_value_table *opt;
7486 char *ext = NULL;
7487 int optlen;
7488
7489 if (!ext_only)
7490 {
7491 if (*str != '+')
7492 {
7493 as_bad (_("invalid architectural extension"));
7494 return 0;
7495 }
7496
7497 ext = strchr (++str, '+');
7498 }
7499
7500 if (ext != NULL)
7501 optlen = ext - str;
7502 else
7503 optlen = strlen (str);
7504
7505 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
7506 {
7507 if (adding_value != 0)
7508 adding_value = 0;
7509 optlen -= 2;
7510 str += 2;
7511 }
7512 else if (optlen > 0)
7513 {
7514 if (adding_value == -1)
7515 adding_value = 1;
7516 else if (adding_value != 1)
7517 {
7518 as_bad (_("must specify extensions to add before specifying "
7519 "those to remove"));
7520 return FALSE;
7521 }
7522 }
7523
7524 if (optlen == 0)
7525 {
7526 as_bad (_("missing architectural extension"));
7527 return 0;
7528 }
7529
7530 gas_assert (adding_value != -1);
7531
7532 for (opt = aarch64_features; opt->name != NULL; opt++)
7533 if (strncmp (opt->name, str, optlen) == 0)
7534 {
7535 /* Add or remove the extension. */
7536 if (adding_value)
7537 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
7538 else
7539 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
7540 break;
7541 }
7542
7543 if (opt->name == NULL)
7544 {
7545 as_bad (_("unknown architectural extension `%s'"), str);
7546 return 0;
7547 }
7548
7549 str = ext;
7550 };
7551
7552 return 1;
7553 }
7554
7555 static int
7556 aarch64_parse_cpu (char *str)
7557 {
7558 const struct aarch64_cpu_option_table *opt;
7559 char *ext = strchr (str, '+');
7560 size_t optlen;
7561
7562 if (ext != NULL)
7563 optlen = ext - str;
7564 else
7565 optlen = strlen (str);
7566
7567 if (optlen == 0)
7568 {
7569 as_bad (_("missing cpu name `%s'"), str);
7570 return 0;
7571 }
7572
7573 for (opt = aarch64_cpus; opt->name != NULL; opt++)
7574 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7575 {
7576 mcpu_cpu_opt = &opt->value;
7577 if (ext != NULL)
7578 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
7579
7580 return 1;
7581 }
7582
7583 as_bad (_("unknown cpu `%s'"), str);
7584 return 0;
7585 }
7586
7587 static int
7588 aarch64_parse_arch (char *str)
7589 {
7590 const struct aarch64_arch_option_table *opt;
7591 char *ext = strchr (str, '+');
7592 size_t optlen;
7593
7594 if (ext != NULL)
7595 optlen = ext - str;
7596 else
7597 optlen = strlen (str);
7598
7599 if (optlen == 0)
7600 {
7601 as_bad (_("missing architecture name `%s'"), str);
7602 return 0;
7603 }
7604
7605 for (opt = aarch64_archs; opt->name != NULL; opt++)
7606 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7607 {
7608 march_cpu_opt = &opt->value;
7609 if (ext != NULL)
7610 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
7611
7612 return 1;
7613 }
7614
7615 as_bad (_("unknown architecture `%s'\n"), str);
7616 return 0;
7617 }
7618
7619 /* ABIs. */
7620 struct aarch64_option_abi_value_table
7621 {
7622 char *name;
7623 enum aarch64_abi_type value;
7624 };
7625
7626 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
7627 {"ilp32", AARCH64_ABI_ILP32},
7628 {"lp64", AARCH64_ABI_LP64},
7629 {NULL, 0}
7630 };
7631
7632 static int
7633 aarch64_parse_abi (char *str)
7634 {
7635 const struct aarch64_option_abi_value_table *opt;
7636 size_t optlen = strlen (str);
7637
7638 if (optlen == 0)
7639 {
7640 as_bad (_("missing abi name `%s'"), str);
7641 return 0;
7642 }
7643
7644 for (opt = aarch64_abis; opt->name != NULL; opt++)
7645 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7646 {
7647 aarch64_abi = opt->value;
7648 return 1;
7649 }
7650
7651 as_bad (_("unknown abi `%s'\n"), str);
7652 return 0;
7653 }
7654
7655 static struct aarch64_long_option_table aarch64_long_opts[] = {
7656 #ifdef OBJ_ELF
7657 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
7658 aarch64_parse_abi, NULL},
7659 #endif /* OBJ_ELF */
7660 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
7661 aarch64_parse_cpu, NULL},
7662 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
7663 aarch64_parse_arch, NULL},
7664 {NULL, NULL, 0, NULL}
7665 };
7666
7667 int
7668 md_parse_option (int c, char *arg)
7669 {
7670 struct aarch64_option_table *opt;
7671 struct aarch64_long_option_table *lopt;
7672
7673 switch (c)
7674 {
7675 #ifdef OPTION_EB
7676 case OPTION_EB:
7677 target_big_endian = 1;
7678 break;
7679 #endif
7680
7681 #ifdef OPTION_EL
7682 case OPTION_EL:
7683 target_big_endian = 0;
7684 break;
7685 #endif
7686
7687 case 'a':
7688 /* Listing option. Just ignore these, we don't support additional
7689 ones. */
7690 return 0;
7691
7692 default:
7693 for (opt = aarch64_opts; opt->option != NULL; opt++)
7694 {
7695 if (c == opt->option[0]
7696 && ((arg == NULL && opt->option[1] == 0)
7697 || streq (arg, opt->option + 1)))
7698 {
7699 /* If the option is deprecated, tell the user. */
7700 if (opt->deprecated != NULL)
7701 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
7702 arg ? arg : "", _(opt->deprecated));
7703
7704 if (opt->var != NULL)
7705 *opt->var = opt->value;
7706
7707 return 1;
7708 }
7709 }
7710
7711 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7712 {
7713 /* These options are expected to have an argument. */
7714 if (c == lopt->option[0]
7715 && arg != NULL
7716 && strncmp (arg, lopt->option + 1,
7717 strlen (lopt->option + 1)) == 0)
7718 {
7719 /* If the option is deprecated, tell the user. */
7720 if (lopt->deprecated != NULL)
7721 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
7722 _(lopt->deprecated));
7723
7724 /* Call the sup-option parser. */
7725 return lopt->func (arg + strlen (lopt->option) - 1);
7726 }
7727 }
7728
7729 return 0;
7730 }
7731
7732 return 1;
7733 }
7734
7735 void
7736 md_show_usage (FILE * fp)
7737 {
7738 struct aarch64_option_table *opt;
7739 struct aarch64_long_option_table *lopt;
7740
7741 fprintf (fp, _(" AArch64-specific assembler options:\n"));
7742
7743 for (opt = aarch64_opts; opt->option != NULL; opt++)
7744 if (opt->help != NULL)
7745 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
7746
7747 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7748 if (lopt->help != NULL)
7749 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
7750
7751 #ifdef OPTION_EB
7752 fprintf (fp, _("\
7753 -EB assemble code for a big-endian cpu\n"));
7754 #endif
7755
7756 #ifdef OPTION_EL
7757 fprintf (fp, _("\
7758 -EL assemble code for a little-endian cpu\n"));
7759 #endif
7760 }
7761
7762 /* Parse a .cpu directive. */
7763
7764 static void
7765 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
7766 {
7767 const struct aarch64_cpu_option_table *opt;
7768 char saved_char;
7769 char *name;
7770 char *ext;
7771 size_t optlen;
7772
7773 name = input_line_pointer;
7774 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7775 input_line_pointer++;
7776 saved_char = *input_line_pointer;
7777 *input_line_pointer = 0;
7778
7779 ext = strchr (name, '+');
7780
7781 if (ext != NULL)
7782 optlen = ext - name;
7783 else
7784 optlen = strlen (name);
7785
7786 /* Skip the first "all" entry. */
7787 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
7788 if (strlen (opt->name) == optlen
7789 && strncmp (name, opt->name, optlen) == 0)
7790 {
7791 mcpu_cpu_opt = &opt->value;
7792 if (ext != NULL)
7793 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
7794 return;
7795
7796 cpu_variant = *mcpu_cpu_opt;
7797
7798 *input_line_pointer = saved_char;
7799 demand_empty_rest_of_line ();
7800 return;
7801 }
7802 as_bad (_("unknown cpu `%s'"), name);
7803 *input_line_pointer = saved_char;
7804 ignore_rest_of_line ();
7805 }
7806
7807
7808 /* Parse a .arch directive. */
7809
7810 static void
7811 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
7812 {
7813 const struct aarch64_arch_option_table *opt;
7814 char saved_char;
7815 char *name;
7816 char *ext;
7817 size_t optlen;
7818
7819 name = input_line_pointer;
7820 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7821 input_line_pointer++;
7822 saved_char = *input_line_pointer;
7823 *input_line_pointer = 0;
7824
7825 ext = strchr (name, '+');
7826
7827 if (ext != NULL)
7828 optlen = ext - name;
7829 else
7830 optlen = strlen (name);
7831
7832 /* Skip the first "all" entry. */
7833 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
7834 if (strlen (opt->name) == optlen
7835 && strncmp (name, opt->name, optlen) == 0)
7836 {
7837 mcpu_cpu_opt = &opt->value;
7838 if (ext != NULL)
7839 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
7840 return;
7841
7842 cpu_variant = *mcpu_cpu_opt;
7843
7844 *input_line_pointer = saved_char;
7845 demand_empty_rest_of_line ();
7846 return;
7847 }
7848
7849 as_bad (_("unknown architecture `%s'\n"), name);
7850 *input_line_pointer = saved_char;
7851 ignore_rest_of_line ();
7852 }
7853
7854 /* Parse a .arch_extension directive. */
7855
7856 static void
7857 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
7858 {
7859 char saved_char;
7860 char *ext = input_line_pointer;;
7861
7862 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7863 input_line_pointer++;
7864 saved_char = *input_line_pointer;
7865 *input_line_pointer = 0;
7866
7867 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
7868 return;
7869
7870 cpu_variant = *mcpu_cpu_opt;
7871
7872 *input_line_pointer = saved_char;
7873 demand_empty_rest_of_line ();
7874 }
7875
7876 /* Copy symbol information. */
7877
7878 void
7879 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
7880 {
7881 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
7882 }
This page took 0.194346 seconds and 4 git commands to generate.