[AArch64, ILP32] 2/6 Parametrize elfnn-aarch64.c and add basic support in ld
[deliverable/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright 2009, 2010, 2011, 2012, 2013
4 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
6
7 This file is part of GAS.
8
9 GAS is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the license, or
12 (at your option) any later version.
13
14 GAS is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING3. If not,
21 see <http://www.gnu.org/licenses/>. */
22
23 #include "as.h"
24 #include <limits.h>
25 #include <stdarg.h>
26 #include "bfd_stdint.h"
27 #define NO_RELOC 0
28 #include "safe-ctype.h"
29 #include "subsegs.h"
30 #include "obstack.h"
31
32 #ifdef OBJ_ELF
33 #include "elf/aarch64.h"
34 #include "dw2gencfi.h"
35 #endif
36
37 #include "dwarf2dbg.h"
38
39 /* Types of processor to assemble for. */
40 #ifndef CPU_DEFAULT
41 #define CPU_DEFAULT AARCH64_ARCH_V8
42 #endif
43
44 #define streq(a, b) (strcmp (a, b) == 0)
45
46 static aarch64_feature_set cpu_variant;
47
48 /* Variables that we set while parsing command-line options. Once all
49 options have been read we re-process these values to set the real
50 assembly flags. */
51 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
52 static const aarch64_feature_set *march_cpu_opt = NULL;
53
54 /* Constants for known architecture features. */
55 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
56
57 static const aarch64_feature_set aarch64_arch_any = AARCH64_ANY;
58 static const aarch64_feature_set aarch64_arch_none = AARCH64_ARCH_NONE;
59
60 #ifdef OBJ_ELF
61 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
62 static symbolS *GOT_symbol;
63
64 /* When non-zero, program to a 32-bit model, in which the C data types
65 int, long and all pointer types are 32-bit objects (ILP32); or to a
66 64-bit model, in which the C int type is 32-bits but the C long type
67 and all pointer types are 64-bit objects (LP64). */
68 static int ilp32_p = 0;
69 #endif
70
71 enum neon_el_type
72 {
73 NT_invtype = -1,
74 NT_b,
75 NT_h,
76 NT_s,
77 NT_d,
78 NT_q
79 };
80
81 /* Bits for DEFINED field in neon_type_el. */
82 #define NTA_HASTYPE 1
83 #define NTA_HASINDEX 2
84
85 struct neon_type_el
86 {
87 enum neon_el_type type;
88 unsigned char defined;
89 unsigned width;
90 int64_t index;
91 };
92
93 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
94
95 struct reloc
96 {
97 bfd_reloc_code_real_type type;
98 expressionS exp;
99 int pc_rel;
100 enum aarch64_opnd opnd;
101 uint32_t flags;
102 unsigned need_libopcodes_p : 1;
103 };
104
105 struct aarch64_instruction
106 {
107 /* libopcodes structure for instruction intermediate representation. */
108 aarch64_inst base;
109 /* Record assembly errors found during the parsing. */
110 struct
111 {
112 enum aarch64_operand_error_kind kind;
113 const char *error;
114 } parsing_error;
115 /* The condition that appears in the assembly line. */
116 int cond;
117 /* Relocation information (including the GAS internal fixup). */
118 struct reloc reloc;
119 /* Need to generate an immediate in the literal pool. */
120 unsigned gen_lit_pool : 1;
121 };
122
123 typedef struct aarch64_instruction aarch64_instruction;
124
125 static aarch64_instruction inst;
126
127 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
128 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
129
130 /* Diagnostics inline function utilites.
131
132 These are lightweight utlities which should only be called by parse_operands
133 and other parsers. GAS processes each assembly line by parsing it against
134 instruction template(s), in the case of multiple templates (for the same
135 mnemonic name), those templates are tried one by one until one succeeds or
136 all fail. An assembly line may fail a few templates before being
137 successfully parsed; an error saved here in most cases is not a user error
138 but an error indicating the current template is not the right template.
139 Therefore it is very important that errors can be saved at a low cost during
140 the parsing; we don't want to slow down the whole parsing by recording
141 non-user errors in detail.
142
143 Remember that the objective is to help GAS pick up the most approapriate
144 error message in the case of multiple templates, e.g. FMOV which has 8
145 templates. */
146
147 static inline void
148 clear_error (void)
149 {
150 inst.parsing_error.kind = AARCH64_OPDE_NIL;
151 inst.parsing_error.error = NULL;
152 }
153
154 static inline bfd_boolean
155 error_p (void)
156 {
157 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
158 }
159
160 static inline const char *
161 get_error_message (void)
162 {
163 return inst.parsing_error.error;
164 }
165
166 static inline void
167 set_error_message (const char *error)
168 {
169 inst.parsing_error.error = error;
170 }
171
172 static inline enum aarch64_operand_error_kind
173 get_error_kind (void)
174 {
175 return inst.parsing_error.kind;
176 }
177
178 static inline void
179 set_error_kind (enum aarch64_operand_error_kind kind)
180 {
181 inst.parsing_error.kind = kind;
182 }
183
184 static inline void
185 set_error (enum aarch64_operand_error_kind kind, const char *error)
186 {
187 inst.parsing_error.kind = kind;
188 inst.parsing_error.error = error;
189 }
190
191 static inline void
192 set_recoverable_error (const char *error)
193 {
194 set_error (AARCH64_OPDE_RECOVERABLE, error);
195 }
196
197 /* Use the DESC field of the corresponding aarch64_operand entry to compose
198 the error message. */
199 static inline void
200 set_default_error (void)
201 {
202 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
203 }
204
205 static inline void
206 set_syntax_error (const char *error)
207 {
208 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
209 }
210
211 static inline void
212 set_first_syntax_error (const char *error)
213 {
214 if (! error_p ())
215 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
216 }
217
218 static inline void
219 set_fatal_syntax_error (const char *error)
220 {
221 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
222 }
223 \f
224 /* Number of littlenums required to hold an extended precision number. */
225 #define MAX_LITTLENUMS 6
226
227 /* Return value for certain parsers when the parsing fails; those parsers
228 return the information of the parsed result, e.g. register number, on
229 success. */
230 #define PARSE_FAIL -1
231
232 /* This is an invalid condition code that means no conditional field is
233 present. */
234 #define COND_ALWAYS 0x10
235
236 typedef struct
237 {
238 const char *template;
239 unsigned long value;
240 } asm_barrier_opt;
241
242 typedef struct
243 {
244 const char *template;
245 uint32_t value;
246 } asm_nzcv;
247
248 struct reloc_entry
249 {
250 char *name;
251 bfd_reloc_code_real_type reloc;
252 };
253
254 /* Structure for a hash table entry for a register. */
255 typedef struct
256 {
257 const char *name;
258 unsigned char number;
259 unsigned char type;
260 unsigned char builtin;
261 } reg_entry;
262
263 /* Macros to define the register types and masks for the purpose
264 of parsing. */
265
266 #undef AARCH64_REG_TYPES
267 #define AARCH64_REG_TYPES \
268 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
269 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
270 BASIC_REG_TYPE(SP_32) /* wsp */ \
271 BASIC_REG_TYPE(SP_64) /* sp */ \
272 BASIC_REG_TYPE(Z_32) /* wzr */ \
273 BASIC_REG_TYPE(Z_64) /* xzr */ \
274 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
275 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
276 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
277 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
278 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
279 BASIC_REG_TYPE(CN) /* c[0-7] */ \
280 BASIC_REG_TYPE(VN) /* v[0-31] */ \
281 /* Typecheck: any 64-bit int reg (inc SP exc XZR) */ \
282 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
283 /* Typecheck: any int (inc {W}SP inc [WX]ZR) */ \
284 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
285 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
286 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
287 /* Typecheck: any [BHSDQ]P FP. */ \
288 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
289 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
290 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR) */ \
291 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
292 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
293 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
294 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
295 /* Any integer register; used for error messages only. */ \
296 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
297 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
298 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
299 /* Pseudo type to mark the end of the enumerator sequence. */ \
300 BASIC_REG_TYPE(MAX)
301
302 #undef BASIC_REG_TYPE
303 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
304 #undef MULTI_REG_TYPE
305 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
306
307 /* Register type enumerators. */
308 typedef enum
309 {
310 /* A list of REG_TYPE_*. */
311 AARCH64_REG_TYPES
312 } aarch64_reg_type;
313
314 #undef BASIC_REG_TYPE
315 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
316 #undef REG_TYPE
317 #define REG_TYPE(T) (1 << REG_TYPE_##T)
318 #undef MULTI_REG_TYPE
319 #define MULTI_REG_TYPE(T,V) V,
320
321 /* Values indexed by aarch64_reg_type to assist the type checking. */
322 static const unsigned reg_type_masks[] =
323 {
324 AARCH64_REG_TYPES
325 };
326
327 #undef BASIC_REG_TYPE
328 #undef REG_TYPE
329 #undef MULTI_REG_TYPE
330 #undef AARCH64_REG_TYPES
331
332 /* Diagnostics used when we don't get a register of the expected type.
333 Note: this has to synchronized with aarch64_reg_type definitions
334 above. */
335 static const char *
336 get_reg_expected_msg (aarch64_reg_type reg_type)
337 {
338 const char *msg;
339
340 switch (reg_type)
341 {
342 case REG_TYPE_R_32:
343 msg = N_("integer 32-bit register expected");
344 break;
345 case REG_TYPE_R_64:
346 msg = N_("integer 64-bit register expected");
347 break;
348 case REG_TYPE_R_N:
349 msg = N_("integer register expected");
350 break;
351 case REG_TYPE_R_Z_SP:
352 msg = N_("integer, zero or SP register expected");
353 break;
354 case REG_TYPE_FP_B:
355 msg = N_("8-bit SIMD scalar register expected");
356 break;
357 case REG_TYPE_FP_H:
358 msg = N_("16-bit SIMD scalar or floating-point half precision "
359 "register expected");
360 break;
361 case REG_TYPE_FP_S:
362 msg = N_("32-bit SIMD scalar or floating-point single precision "
363 "register expected");
364 break;
365 case REG_TYPE_FP_D:
366 msg = N_("64-bit SIMD scalar or floating-point double precision "
367 "register expected");
368 break;
369 case REG_TYPE_FP_Q:
370 msg = N_("128-bit SIMD scalar or floating-point quad precision "
371 "register expected");
372 break;
373 case REG_TYPE_CN:
374 msg = N_("C0 - C15 expected");
375 break;
376 case REG_TYPE_R_Z_BHSDQ_V:
377 msg = N_("register expected");
378 break;
379 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
380 msg = N_("SIMD scalar or floating-point register expected");
381 break;
382 case REG_TYPE_VN: /* any V reg */
383 msg = N_("vector register expected");
384 break;
385 default:
386 as_fatal (_("invalid register type %d"), reg_type);
387 }
388 return msg;
389 }
390
391 /* Some well known registers that we refer to directly elsewhere. */
392 #define REG_SP 31
393
394 /* Instructions take 4 bytes in the object file. */
395 #define INSN_SIZE 4
396
397 /* Define some common error messages. */
398 #define BAD_SP _("SP not allowed here")
399
400 static struct hash_control *aarch64_ops_hsh;
401 static struct hash_control *aarch64_cond_hsh;
402 static struct hash_control *aarch64_shift_hsh;
403 static struct hash_control *aarch64_sys_regs_hsh;
404 static struct hash_control *aarch64_pstatefield_hsh;
405 static struct hash_control *aarch64_sys_regs_ic_hsh;
406 static struct hash_control *aarch64_sys_regs_dc_hsh;
407 static struct hash_control *aarch64_sys_regs_at_hsh;
408 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
409 static struct hash_control *aarch64_reg_hsh;
410 static struct hash_control *aarch64_barrier_opt_hsh;
411 static struct hash_control *aarch64_nzcv_hsh;
412 static struct hash_control *aarch64_pldop_hsh;
413
414 /* Stuff needed to resolve the label ambiguity
415 As:
416 ...
417 label: <insn>
418 may differ from:
419 ...
420 label:
421 <insn> */
422
423 static symbolS *last_label_seen;
424
425 /* Literal pool structure. Held on a per-section
426 and per-sub-section basis. */
427
428 #define MAX_LITERAL_POOL_SIZE 1024
429 typedef struct literal_pool
430 {
431 expressionS literals[MAX_LITERAL_POOL_SIZE];
432 unsigned int next_free_entry;
433 unsigned int id;
434 symbolS *symbol;
435 segT section;
436 subsegT sub_section;
437 int size;
438 struct literal_pool *next;
439 } literal_pool;
440
441 /* Pointer to a linked list of literal pools. */
442 static literal_pool *list_of_pools = NULL;
443 \f
444 /* Pure syntax. */
445
446 /* This array holds the chars that always start a comment. If the
447 pre-processor is disabled, these aren't very useful. */
448 const char comment_chars[] = "";
449
450 /* This array holds the chars that only start a comment at the beginning of
451 a line. If the line seems to have the form '# 123 filename'
452 .line and .file directives will appear in the pre-processed output. */
453 /* Note that input_file.c hand checks for '#' at the beginning of the
454 first line of the input file. This is because the compiler outputs
455 #NO_APP at the beginning of its output. */
456 /* Also note that comments like this one will always work. */
457 const char line_comment_chars[] = "#";
458
459 const char line_separator_chars[] = ";";
460
461 /* Chars that can be used to separate mant
462 from exp in floating point numbers. */
463 const char EXP_CHARS[] = "eE";
464
465 /* Chars that mean this number is a floating point constant. */
466 /* As in 0f12.456 */
467 /* or 0d1.2345e12 */
468
469 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
470
471 /* Prefix character that indicates the start of an immediate value. */
472 #define is_immediate_prefix(C) ((C) == '#')
473
474 /* Separator character handling. */
475
476 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
477
478 static inline bfd_boolean
479 skip_past_char (char **str, char c)
480 {
481 if (**str == c)
482 {
483 (*str)++;
484 return TRUE;
485 }
486 else
487 return FALSE;
488 }
489
490 #define skip_past_comma(str) skip_past_char (str, ',')
491
492 /* Arithmetic expressions (possibly involving symbols). */
493
494 static bfd_boolean in_my_get_expression_p = FALSE;
495
496 /* Third argument to my_get_expression. */
497 #define GE_NO_PREFIX 0
498 #define GE_OPT_PREFIX 1
499
500 /* Return TRUE if the string pointed by *STR is successfully parsed
501 as an valid expression; *EP will be filled with the information of
502 such an expression. Otherwise return FALSE. */
503
504 static bfd_boolean
505 my_get_expression (expressionS * ep, char **str, int prefix_mode,
506 int reject_absent)
507 {
508 char *save_in;
509 segT seg;
510 int prefix_present_p = 0;
511
512 switch (prefix_mode)
513 {
514 case GE_NO_PREFIX:
515 break;
516 case GE_OPT_PREFIX:
517 if (is_immediate_prefix (**str))
518 {
519 (*str)++;
520 prefix_present_p = 1;
521 }
522 break;
523 default:
524 abort ();
525 }
526
527 memset (ep, 0, sizeof (expressionS));
528
529 save_in = input_line_pointer;
530 input_line_pointer = *str;
531 in_my_get_expression_p = TRUE;
532 seg = expression (ep);
533 in_my_get_expression_p = FALSE;
534
535 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
536 {
537 /* We found a bad expression in md_operand(). */
538 *str = input_line_pointer;
539 input_line_pointer = save_in;
540 if (prefix_present_p && ! error_p ())
541 set_fatal_syntax_error (_("bad expression"));
542 else
543 set_first_syntax_error (_("bad expression"));
544 return FALSE;
545 }
546
547 #ifdef OBJ_AOUT
548 if (seg != absolute_section
549 && seg != text_section
550 && seg != data_section
551 && seg != bss_section && seg != undefined_section)
552 {
553 set_syntax_error (_("bad segment"));
554 *str = input_line_pointer;
555 input_line_pointer = save_in;
556 return FALSE;
557 }
558 #else
559 (void) seg;
560 #endif
561
562 *str = input_line_pointer;
563 input_line_pointer = save_in;
564 return TRUE;
565 }
566
567 /* Turn a string in input_line_pointer into a floating point constant
568 of type TYPE, and store the appropriate bytes in *LITP. The number
569 of LITTLENUMS emitted is stored in *SIZEP. An error message is
570 returned, or NULL on OK. */
571
572 char *
573 md_atof (int type, char *litP, int *sizeP)
574 {
575 return ieee_md_atof (type, litP, sizeP, target_big_endian);
576 }
577
578 /* We handle all bad expressions here, so that we can report the faulty
579 instruction in the error message. */
580 void
581 md_operand (expressionS * exp)
582 {
583 if (in_my_get_expression_p)
584 exp->X_op = O_illegal;
585 }
586
587 /* Immediate values. */
588
589 /* Errors may be set multiple times during parsing or bit encoding
590 (particularly in the Neon bits), but usually the earliest error which is set
591 will be the most meaningful. Avoid overwriting it with later (cascading)
592 errors by calling this function. */
593
594 static void
595 first_error (const char *error)
596 {
597 if (! error_p ())
598 set_syntax_error (error);
599 }
600
601 /* Similiar to first_error, but this function accepts formatted error
602 message. */
603 static void
604 first_error_fmt (const char *format, ...)
605 {
606 va_list args;
607 enum
608 { size = 100 };
609 /* N.B. this single buffer will not cause error messages for different
610 instructions to pollute each other; this is because at the end of
611 processing of each assembly line, error message if any will be
612 collected by as_bad. */
613 static char buffer[size];
614
615 if (! error_p ())
616 {
617 int ret ATTRIBUTE_UNUSED;
618 va_start (args, format);
619 ret = vsnprintf (buffer, size, format, args);
620 know (ret <= size - 1 && ret >= 0);
621 va_end (args);
622 set_syntax_error (buffer);
623 }
624 }
625
626 /* Register parsing. */
627
628 /* Generic register parser which is called by other specialized
629 register parsers.
630 CCP points to what should be the beginning of a register name.
631 If it is indeed a valid register name, advance CCP over it and
632 return the reg_entry structure; otherwise return NULL.
633 It does not issue diagnostics. */
634
635 static reg_entry *
636 parse_reg (char **ccp)
637 {
638 char *start = *ccp;
639 char *p;
640 reg_entry *reg;
641
642 #ifdef REGISTER_PREFIX
643 if (*start != REGISTER_PREFIX)
644 return NULL;
645 start++;
646 #endif
647
648 p = start;
649 if (!ISALPHA (*p) || !is_name_beginner (*p))
650 return NULL;
651
652 do
653 p++;
654 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
655
656 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
657
658 if (!reg)
659 return NULL;
660
661 *ccp = p;
662 return reg;
663 }
664
665 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
666 return FALSE. */
667 static bfd_boolean
668 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
669 {
670 if (reg->type == type)
671 return TRUE;
672
673 switch (type)
674 {
675 case REG_TYPE_R64_SP: /* 64-bit integer reg (inc SP exc XZR). */
676 case REG_TYPE_R_Z_SP: /* Integer reg (inc {X}SP inc [WX]ZR). */
677 case REG_TYPE_R_Z_BHSDQ_V: /* Any register apart from Cn. */
678 case REG_TYPE_BHSDQ: /* Any [BHSDQ]P FP or SIMD scalar register. */
679 case REG_TYPE_VN: /* Vector register. */
680 gas_assert (reg->type < REG_TYPE_MAX && type < REG_TYPE_MAX);
681 return ((reg_type_masks[reg->type] & reg_type_masks[type])
682 == reg_type_masks[reg->type]);
683 default:
684 as_fatal ("unhandled type %d", type);
685 abort ();
686 }
687 }
688
689 /* Parse a register and return PARSE_FAIL if the register is not of type R_Z_SP.
690 Return the register number otherwise. *ISREG32 is set to one if the
691 register is 32-bit wide; *ISREGZERO is set to one if the register is
692 of type Z_32 or Z_64.
693 Note that this function does not issue any diagnostics. */
694
695 static int
696 aarch64_reg_parse_32_64 (char **ccp, int reject_sp, int reject_rz,
697 int *isreg32, int *isregzero)
698 {
699 char *str = *ccp;
700 const reg_entry *reg = parse_reg (&str);
701
702 if (reg == NULL)
703 return PARSE_FAIL;
704
705 if (! aarch64_check_reg_type (reg, REG_TYPE_R_Z_SP))
706 return PARSE_FAIL;
707
708 switch (reg->type)
709 {
710 case REG_TYPE_SP_32:
711 case REG_TYPE_SP_64:
712 if (reject_sp)
713 return PARSE_FAIL;
714 *isreg32 = reg->type == REG_TYPE_SP_32;
715 *isregzero = 0;
716 break;
717 case REG_TYPE_R_32:
718 case REG_TYPE_R_64:
719 *isreg32 = reg->type == REG_TYPE_R_32;
720 *isregzero = 0;
721 break;
722 case REG_TYPE_Z_32:
723 case REG_TYPE_Z_64:
724 if (reject_rz)
725 return PARSE_FAIL;
726 *isreg32 = reg->type == REG_TYPE_Z_32;
727 *isregzero = 1;
728 break;
729 default:
730 return PARSE_FAIL;
731 }
732
733 *ccp = str;
734
735 return reg->number;
736 }
737
738 /* Parse the qualifier of a SIMD vector register or a SIMD vector element.
739 Fill in *PARSED_TYPE and return TRUE if the parsing succeeds;
740 otherwise return FALSE.
741
742 Accept only one occurrence of:
743 8b 16b 4h 8h 2s 4s 1d 2d
744 b h s d q */
745 static bfd_boolean
746 parse_neon_type_for_operand (struct neon_type_el *parsed_type, char **str)
747 {
748 char *ptr = *str;
749 unsigned width;
750 unsigned element_size;
751 enum neon_el_type type;
752
753 /* skip '.' */
754 ptr++;
755
756 if (!ISDIGIT (*ptr))
757 {
758 width = 0;
759 goto elt_size;
760 }
761 width = strtoul (ptr, &ptr, 10);
762 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
763 {
764 first_error_fmt (_("bad size %d in vector width specifier"), width);
765 return FALSE;
766 }
767
768 elt_size:
769 switch (TOLOWER (*ptr))
770 {
771 case 'b':
772 type = NT_b;
773 element_size = 8;
774 break;
775 case 'h':
776 type = NT_h;
777 element_size = 16;
778 break;
779 case 's':
780 type = NT_s;
781 element_size = 32;
782 break;
783 case 'd':
784 type = NT_d;
785 element_size = 64;
786 break;
787 case 'q':
788 if (width == 1)
789 {
790 type = NT_q;
791 element_size = 128;
792 break;
793 }
794 /* fall through. */
795 default:
796 if (*ptr != '\0')
797 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
798 else
799 first_error (_("missing element size"));
800 return FALSE;
801 }
802 if (width != 0 && width * element_size != 64 && width * element_size != 128)
803 {
804 first_error_fmt (_
805 ("invalid element size %d and vector size combination %c"),
806 width, *ptr);
807 return FALSE;
808 }
809 ptr++;
810
811 parsed_type->type = type;
812 parsed_type->width = width;
813
814 *str = ptr;
815
816 return TRUE;
817 }
818
819 /* Parse a single type, e.g. ".8b", leading period included.
820 Only applicable to Vn registers.
821
822 Return TRUE on success; otherwise return FALSE. */
823 static bfd_boolean
824 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
825 {
826 char *str = *ccp;
827
828 if (*str == '.')
829 {
830 if (! parse_neon_type_for_operand (vectype, &str))
831 {
832 first_error (_("vector type expected"));
833 return FALSE;
834 }
835 }
836 else
837 return FALSE;
838
839 *ccp = str;
840
841 return TRUE;
842 }
843
844 /* Parse a register of the type TYPE.
845
846 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
847 name or the parsed register is not of TYPE.
848
849 Otherwise return the register number, and optionally fill in the actual
850 type of the register in *RTYPE when multiple alternatives were given, and
851 return the register shape and element index information in *TYPEINFO.
852
853 IN_REG_LIST should be set with TRUE if the caller is parsing a register
854 list. */
855
856 static int
857 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
858 struct neon_type_el *typeinfo, bfd_boolean in_reg_list)
859 {
860 char *str = *ccp;
861 const reg_entry *reg = parse_reg (&str);
862 struct neon_type_el atype;
863 struct neon_type_el parsetype;
864 bfd_boolean is_typed_vecreg = FALSE;
865
866 atype.defined = 0;
867 atype.type = NT_invtype;
868 atype.width = -1;
869 atype.index = 0;
870
871 if (reg == NULL)
872 {
873 if (typeinfo)
874 *typeinfo = atype;
875 set_default_error ();
876 return PARSE_FAIL;
877 }
878
879 if (! aarch64_check_reg_type (reg, type))
880 {
881 DEBUG_TRACE ("reg type check failed");
882 set_default_error ();
883 return PARSE_FAIL;
884 }
885 type = reg->type;
886
887 if (type == REG_TYPE_VN
888 && parse_neon_operand_type (&parsetype, &str))
889 {
890 /* Register if of the form Vn.[bhsdq]. */
891 is_typed_vecreg = TRUE;
892
893 if (parsetype.width == 0)
894 /* Expect index. In the new scheme we cannot have
895 Vn.[bhsdq] represent a scalar. Therefore any
896 Vn.[bhsdq] should have an index following it.
897 Except in reglists ofcourse. */
898 atype.defined |= NTA_HASINDEX;
899 else
900 atype.defined |= NTA_HASTYPE;
901
902 atype.type = parsetype.type;
903 atype.width = parsetype.width;
904 }
905
906 if (skip_past_char (&str, '['))
907 {
908 expressionS exp;
909
910 /* Reject Sn[index] syntax. */
911 if (!is_typed_vecreg)
912 {
913 first_error (_("this type of register can't be indexed"));
914 return PARSE_FAIL;
915 }
916
917 if (in_reg_list == TRUE)
918 {
919 first_error (_("index not allowed inside register list"));
920 return PARSE_FAIL;
921 }
922
923 atype.defined |= NTA_HASINDEX;
924
925 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
926
927 if (exp.X_op != O_constant)
928 {
929 first_error (_("constant expression required"));
930 return PARSE_FAIL;
931 }
932
933 if (! skip_past_char (&str, ']'))
934 return PARSE_FAIL;
935
936 atype.index = exp.X_add_number;
937 }
938 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
939 {
940 /* Indexed vector register expected. */
941 first_error (_("indexed vector register expected"));
942 return PARSE_FAIL;
943 }
944
945 /* A vector reg Vn should be typed or indexed. */
946 if (type == REG_TYPE_VN && atype.defined == 0)
947 {
948 first_error (_("invalid use of vector register"));
949 }
950
951 if (typeinfo)
952 *typeinfo = atype;
953
954 if (rtype)
955 *rtype = type;
956
957 *ccp = str;
958
959 return reg->number;
960 }
961
962 /* Parse register.
963
964 Return the register number on success; return PARSE_FAIL otherwise.
965
966 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
967 the register (e.g. NEON double or quad reg when either has been requested).
968
969 If this is a NEON vector register with additional type information, fill
970 in the struct pointed to by VECTYPE (if non-NULL).
971
972 This parser does not handle register list. */
973
974 static int
975 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
976 aarch64_reg_type *rtype, struct neon_type_el *vectype)
977 {
978 struct neon_type_el atype;
979 char *str = *ccp;
980 int reg = parse_typed_reg (&str, type, rtype, &atype,
981 /*in_reg_list= */ FALSE);
982
983 if (reg == PARSE_FAIL)
984 return PARSE_FAIL;
985
986 if (vectype)
987 *vectype = atype;
988
989 *ccp = str;
990
991 return reg;
992 }
993
994 static inline bfd_boolean
995 eq_neon_type_el (struct neon_type_el e1, struct neon_type_el e2)
996 {
997 return
998 e1.type == e2.type
999 && e1.defined == e2.defined
1000 && e1.width == e2.width && e1.index == e2.index;
1001 }
1002
1003 /* This function parses the NEON register list. On success, it returns
1004 the parsed register list information in the following encoded format:
1005
1006 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1007 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1008
1009 The information of the register shape and/or index is returned in
1010 *VECTYPE.
1011
1012 It returns PARSE_FAIL if the register list is invalid.
1013
1014 The list contains one to four registers.
1015 Each register can be one of:
1016 <Vt>.<T>[<index>]
1017 <Vt>.<T>
1018 All <T> should be identical.
1019 All <index> should be identical.
1020 There are restrictions on <Vt> numbers which are checked later
1021 (by reg_list_valid_p). */
1022
1023 static int
1024 parse_neon_reg_list (char **ccp, struct neon_type_el *vectype)
1025 {
1026 char *str = *ccp;
1027 int nb_regs;
1028 struct neon_type_el typeinfo, typeinfo_first;
1029 int val, val_range;
1030 int in_range;
1031 int ret_val;
1032 int i;
1033 bfd_boolean error = FALSE;
1034 bfd_boolean expect_index = FALSE;
1035
1036 if (*str != '{')
1037 {
1038 set_syntax_error (_("expecting {"));
1039 return PARSE_FAIL;
1040 }
1041 str++;
1042
1043 nb_regs = 0;
1044 typeinfo_first.defined = 0;
1045 typeinfo_first.type = NT_invtype;
1046 typeinfo_first.width = -1;
1047 typeinfo_first.index = 0;
1048 ret_val = 0;
1049 val = -1;
1050 val_range = -1;
1051 in_range = 0;
1052 do
1053 {
1054 if (in_range)
1055 {
1056 str++; /* skip over '-' */
1057 val_range = val;
1058 }
1059 val = parse_typed_reg (&str, REG_TYPE_VN, NULL, &typeinfo,
1060 /*in_reg_list= */ TRUE);
1061 if (val == PARSE_FAIL)
1062 {
1063 set_first_syntax_error (_("invalid vector register in list"));
1064 error = TRUE;
1065 continue;
1066 }
1067 /* reject [bhsd]n */
1068 if (typeinfo.defined == 0)
1069 {
1070 set_first_syntax_error (_("invalid scalar register in list"));
1071 error = TRUE;
1072 continue;
1073 }
1074
1075 if (typeinfo.defined & NTA_HASINDEX)
1076 expect_index = TRUE;
1077
1078 if (in_range)
1079 {
1080 if (val < val_range)
1081 {
1082 set_first_syntax_error
1083 (_("invalid range in vector register list"));
1084 error = TRUE;
1085 }
1086 val_range++;
1087 }
1088 else
1089 {
1090 val_range = val;
1091 if (nb_regs == 0)
1092 typeinfo_first = typeinfo;
1093 else if (! eq_neon_type_el (typeinfo_first, typeinfo))
1094 {
1095 set_first_syntax_error
1096 (_("type mismatch in vector register list"));
1097 error = TRUE;
1098 }
1099 }
1100 if (! error)
1101 for (i = val_range; i <= val; i++)
1102 {
1103 ret_val |= i << (5 * nb_regs);
1104 nb_regs++;
1105 }
1106 in_range = 0;
1107 }
1108 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1109
1110 skip_whitespace (str);
1111 if (*str != '}')
1112 {
1113 set_first_syntax_error (_("end of vector register list not found"));
1114 error = TRUE;
1115 }
1116 str++;
1117
1118 skip_whitespace (str);
1119
1120 if (expect_index)
1121 {
1122 if (skip_past_char (&str, '['))
1123 {
1124 expressionS exp;
1125
1126 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1127 if (exp.X_op != O_constant)
1128 {
1129 set_first_syntax_error (_("constant expression required."));
1130 error = TRUE;
1131 }
1132 if (! skip_past_char (&str, ']'))
1133 error = TRUE;
1134 else
1135 typeinfo_first.index = exp.X_add_number;
1136 }
1137 else
1138 {
1139 set_first_syntax_error (_("expected index"));
1140 error = TRUE;
1141 }
1142 }
1143
1144 if (nb_regs > 4)
1145 {
1146 set_first_syntax_error (_("too many registers in vector register list"));
1147 error = TRUE;
1148 }
1149 else if (nb_regs == 0)
1150 {
1151 set_first_syntax_error (_("empty vector register list"));
1152 error = TRUE;
1153 }
1154
1155 *ccp = str;
1156 if (! error)
1157 *vectype = typeinfo_first;
1158
1159 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1160 }
1161
1162 /* Directives: register aliases. */
1163
1164 static reg_entry *
1165 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1166 {
1167 reg_entry *new;
1168 const char *name;
1169
1170 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1171 {
1172 if (new->builtin)
1173 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1174 str);
1175
1176 /* Only warn about a redefinition if it's not defined as the
1177 same register. */
1178 else if (new->number != number || new->type != type)
1179 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1180
1181 return NULL;
1182 }
1183
1184 name = xstrdup (str);
1185 new = xmalloc (sizeof (reg_entry));
1186
1187 new->name = name;
1188 new->number = number;
1189 new->type = type;
1190 new->builtin = FALSE;
1191
1192 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1193 abort ();
1194
1195 return new;
1196 }
1197
1198 /* Look for the .req directive. This is of the form:
1199
1200 new_register_name .req existing_register_name
1201
1202 If we find one, or if it looks sufficiently like one that we want to
1203 handle any error here, return TRUE. Otherwise return FALSE. */
1204
1205 static bfd_boolean
1206 create_register_alias (char *newname, char *p)
1207 {
1208 const reg_entry *old;
1209 char *oldname, *nbuf;
1210 size_t nlen;
1211
1212 /* The input scrubber ensures that whitespace after the mnemonic is
1213 collapsed to single spaces. */
1214 oldname = p;
1215 if (strncmp (oldname, " .req ", 6) != 0)
1216 return FALSE;
1217
1218 oldname += 6;
1219 if (*oldname == '\0')
1220 return FALSE;
1221
1222 old = hash_find (aarch64_reg_hsh, oldname);
1223 if (!old)
1224 {
1225 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1226 return TRUE;
1227 }
1228
1229 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1230 the desired alias name, and p points to its end. If not, then
1231 the desired alias name is in the global original_case_string. */
1232 #ifdef TC_CASE_SENSITIVE
1233 nlen = p - newname;
1234 #else
1235 newname = original_case_string;
1236 nlen = strlen (newname);
1237 #endif
1238
1239 nbuf = alloca (nlen + 1);
1240 memcpy (nbuf, newname, nlen);
1241 nbuf[nlen] = '\0';
1242
1243 /* Create aliases under the new name as stated; an all-lowercase
1244 version of the new name; and an all-uppercase version of the new
1245 name. */
1246 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1247 {
1248 for (p = nbuf; *p; p++)
1249 *p = TOUPPER (*p);
1250
1251 if (strncmp (nbuf, newname, nlen))
1252 {
1253 /* If this attempt to create an additional alias fails, do not bother
1254 trying to create the all-lower case alias. We will fail and issue
1255 a second, duplicate error message. This situation arises when the
1256 programmer does something like:
1257 foo .req r0
1258 Foo .req r1
1259 The second .req creates the "Foo" alias but then fails to create
1260 the artificial FOO alias because it has already been created by the
1261 first .req. */
1262 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1263 return TRUE;
1264 }
1265
1266 for (p = nbuf; *p; p++)
1267 *p = TOLOWER (*p);
1268
1269 if (strncmp (nbuf, newname, nlen))
1270 insert_reg_alias (nbuf, old->number, old->type);
1271 }
1272
1273 return TRUE;
1274 }
1275
1276 /* Should never be called, as .req goes between the alias and the
1277 register name, not at the beginning of the line. */
1278 static void
1279 s_req (int a ATTRIBUTE_UNUSED)
1280 {
1281 as_bad (_("invalid syntax for .req directive"));
1282 }
1283
1284 /* The .unreq directive deletes an alias which was previously defined
1285 by .req. For example:
1286
1287 my_alias .req r11
1288 .unreq my_alias */
1289
1290 static void
1291 s_unreq (int a ATTRIBUTE_UNUSED)
1292 {
1293 char *name;
1294 char saved_char;
1295
1296 name = input_line_pointer;
1297
1298 while (*input_line_pointer != 0
1299 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1300 ++input_line_pointer;
1301
1302 saved_char = *input_line_pointer;
1303 *input_line_pointer = 0;
1304
1305 if (!*name)
1306 as_bad (_("invalid syntax for .unreq directive"));
1307 else
1308 {
1309 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1310
1311 if (!reg)
1312 as_bad (_("unknown register alias '%s'"), name);
1313 else if (reg->builtin)
1314 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1315 name);
1316 else
1317 {
1318 char *p;
1319 char *nbuf;
1320
1321 hash_delete (aarch64_reg_hsh, name, FALSE);
1322 free ((char *) reg->name);
1323 free (reg);
1324
1325 /* Also locate the all upper case and all lower case versions.
1326 Do not complain if we cannot find one or the other as it
1327 was probably deleted above. */
1328
1329 nbuf = strdup (name);
1330 for (p = nbuf; *p; p++)
1331 *p = TOUPPER (*p);
1332 reg = hash_find (aarch64_reg_hsh, nbuf);
1333 if (reg)
1334 {
1335 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1336 free ((char *) reg->name);
1337 free (reg);
1338 }
1339
1340 for (p = nbuf; *p; p++)
1341 *p = TOLOWER (*p);
1342 reg = hash_find (aarch64_reg_hsh, nbuf);
1343 if (reg)
1344 {
1345 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1346 free ((char *) reg->name);
1347 free (reg);
1348 }
1349
1350 free (nbuf);
1351 }
1352 }
1353
1354 *input_line_pointer = saved_char;
1355 demand_empty_rest_of_line ();
1356 }
1357
1358 /* Directives: Instruction set selection. */
1359
1360 #ifdef OBJ_ELF
1361 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1362 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1363 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1364 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1365
1366 /* Create a new mapping symbol for the transition to STATE. */
1367
1368 static void
1369 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1370 {
1371 symbolS *symbolP;
1372 const char *symname;
1373 int type;
1374
1375 switch (state)
1376 {
1377 case MAP_DATA:
1378 symname = "$d";
1379 type = BSF_NO_FLAGS;
1380 break;
1381 case MAP_INSN:
1382 symname = "$x";
1383 type = BSF_NO_FLAGS;
1384 break;
1385 default:
1386 abort ();
1387 }
1388
1389 symbolP = symbol_new (symname, now_seg, value, frag);
1390 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1391
1392 /* Save the mapping symbols for future reference. Also check that
1393 we do not place two mapping symbols at the same offset within a
1394 frag. We'll handle overlap between frags in
1395 check_mapping_symbols.
1396
1397 If .fill or other data filling directive generates zero sized data,
1398 the mapping symbol for the following code will have the same value
1399 as the one generated for the data filling directive. In this case,
1400 we replace the old symbol with the new one at the same address. */
1401 if (value == 0)
1402 {
1403 if (frag->tc_frag_data.first_map != NULL)
1404 {
1405 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1406 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1407 &symbol_lastP);
1408 }
1409 frag->tc_frag_data.first_map = symbolP;
1410 }
1411 if (frag->tc_frag_data.last_map != NULL)
1412 {
1413 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1414 S_GET_VALUE (symbolP));
1415 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1416 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1417 &symbol_lastP);
1418 }
1419 frag->tc_frag_data.last_map = symbolP;
1420 }
1421
1422 /* We must sometimes convert a region marked as code to data during
1423 code alignment, if an odd number of bytes have to be padded. The
1424 code mapping symbol is pushed to an aligned address. */
1425
1426 static void
1427 insert_data_mapping_symbol (enum mstate state,
1428 valueT value, fragS * frag, offsetT bytes)
1429 {
1430 /* If there was already a mapping symbol, remove it. */
1431 if (frag->tc_frag_data.last_map != NULL
1432 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1433 frag->fr_address + value)
1434 {
1435 symbolS *symp = frag->tc_frag_data.last_map;
1436
1437 if (value == 0)
1438 {
1439 know (frag->tc_frag_data.first_map == symp);
1440 frag->tc_frag_data.first_map = NULL;
1441 }
1442 frag->tc_frag_data.last_map = NULL;
1443 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1444 }
1445
1446 make_mapping_symbol (MAP_DATA, value, frag);
1447 make_mapping_symbol (state, value + bytes, frag);
1448 }
1449
1450 static void mapping_state_2 (enum mstate state, int max_chars);
1451
1452 /* Set the mapping state to STATE. Only call this when about to
1453 emit some STATE bytes to the file. */
1454
1455 void
1456 mapping_state (enum mstate state)
1457 {
1458 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1459
1460 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1461
1462 if (mapstate == state)
1463 /* The mapping symbol has already been emitted.
1464 There is nothing else to do. */
1465 return;
1466 else if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
1467 /* This case will be evaluated later in the next else. */
1468 return;
1469 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1470 {
1471 /* Only add the symbol if the offset is > 0:
1472 if we're at the first frag, check it's size > 0;
1473 if we're not at the first frag, then for sure
1474 the offset is > 0. */
1475 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1476 const int add_symbol = (frag_now != frag_first)
1477 || (frag_now_fix () > 0);
1478
1479 if (add_symbol)
1480 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1481 }
1482
1483 mapping_state_2 (state, 0);
1484 #undef TRANSITION
1485 }
1486
1487 /* Same as mapping_state, but MAX_CHARS bytes have already been
1488 allocated. Put the mapping symbol that far back. */
1489
1490 static void
1491 mapping_state_2 (enum mstate state, int max_chars)
1492 {
1493 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1494
1495 if (!SEG_NORMAL (now_seg))
1496 return;
1497
1498 if (mapstate == state)
1499 /* The mapping symbol has already been emitted.
1500 There is nothing else to do. */
1501 return;
1502
1503 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1504 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1505 }
1506 #else
1507 #define mapping_state(x) /* nothing */
1508 #define mapping_state_2(x, y) /* nothing */
1509 #endif
1510
1511 /* Directives: sectioning and alignment. */
1512
1513 static void
1514 s_bss (int ignore ATTRIBUTE_UNUSED)
1515 {
1516 /* We don't support putting frags in the BSS segment, we fake it by
1517 marking in_bss, then looking at s_skip for clues. */
1518 subseg_set (bss_section, 0);
1519 demand_empty_rest_of_line ();
1520 mapping_state (MAP_DATA);
1521 }
1522
1523 static void
1524 s_even (int ignore ATTRIBUTE_UNUSED)
1525 {
1526 /* Never make frag if expect extra pass. */
1527 if (!need_pass_2)
1528 frag_align (1, 0, 0);
1529
1530 record_alignment (now_seg, 1);
1531
1532 demand_empty_rest_of_line ();
1533 }
1534
1535 /* Directives: Literal pools. */
1536
1537 static literal_pool *
1538 find_literal_pool (int size)
1539 {
1540 literal_pool *pool;
1541
1542 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1543 {
1544 if (pool->section == now_seg
1545 && pool->sub_section == now_subseg && pool->size == size)
1546 break;
1547 }
1548
1549 return pool;
1550 }
1551
1552 static literal_pool *
1553 find_or_make_literal_pool (int size)
1554 {
1555 /* Next literal pool ID number. */
1556 static unsigned int latest_pool_num = 1;
1557 literal_pool *pool;
1558
1559 pool = find_literal_pool (size);
1560
1561 if (pool == NULL)
1562 {
1563 /* Create a new pool. */
1564 pool = xmalloc (sizeof (*pool));
1565 if (!pool)
1566 return NULL;
1567
1568 /* Currently we always put the literal pool in the current text
1569 section. If we were generating "small" model code where we
1570 knew that all code and initialised data was within 1MB then
1571 we could output literals to mergeable, read-only data
1572 sections. */
1573
1574 pool->next_free_entry = 0;
1575 pool->section = now_seg;
1576 pool->sub_section = now_subseg;
1577 pool->size = size;
1578 pool->next = list_of_pools;
1579 pool->symbol = NULL;
1580
1581 /* Add it to the list. */
1582 list_of_pools = pool;
1583 }
1584
1585 /* New pools, and emptied pools, will have a NULL symbol. */
1586 if (pool->symbol == NULL)
1587 {
1588 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1589 (valueT) 0, &zero_address_frag);
1590 pool->id = latest_pool_num++;
1591 }
1592
1593 /* Done. */
1594 return pool;
1595 }
1596
1597 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1598 Return TRUE on success, otherwise return FALSE. */
1599 static bfd_boolean
1600 add_to_lit_pool (expressionS *exp, int size)
1601 {
1602 literal_pool *pool;
1603 unsigned int entry;
1604
1605 pool = find_or_make_literal_pool (size);
1606
1607 /* Check if this literal value is already in the pool. */
1608 for (entry = 0; entry < pool->next_free_entry; entry++)
1609 {
1610 if ((pool->literals[entry].X_op == exp->X_op)
1611 && (exp->X_op == O_constant)
1612 && (pool->literals[entry].X_add_number == exp->X_add_number)
1613 && (pool->literals[entry].X_unsigned == exp->X_unsigned))
1614 break;
1615
1616 if ((pool->literals[entry].X_op == exp->X_op)
1617 && (exp->X_op == O_symbol)
1618 && (pool->literals[entry].X_add_number == exp->X_add_number)
1619 && (pool->literals[entry].X_add_symbol == exp->X_add_symbol)
1620 && (pool->literals[entry].X_op_symbol == exp->X_op_symbol))
1621 break;
1622 }
1623
1624 /* Do we need to create a new entry? */
1625 if (entry == pool->next_free_entry)
1626 {
1627 if (entry >= MAX_LITERAL_POOL_SIZE)
1628 {
1629 set_syntax_error (_("literal pool overflow"));
1630 return FALSE;
1631 }
1632
1633 pool->literals[entry] = *exp;
1634 pool->next_free_entry += 1;
1635 }
1636
1637 exp->X_op = O_symbol;
1638 exp->X_add_number = ((int) entry) * size;
1639 exp->X_add_symbol = pool->symbol;
1640
1641 return TRUE;
1642 }
1643
1644 /* Can't use symbol_new here, so have to create a symbol and then at
1645 a later date assign it a value. Thats what these functions do. */
1646
1647 static void
1648 symbol_locate (symbolS * symbolP,
1649 const char *name,/* It is copied, the caller can modify. */
1650 segT segment, /* Segment identifier (SEG_<something>). */
1651 valueT valu, /* Symbol value. */
1652 fragS * frag) /* Associated fragment. */
1653 {
1654 unsigned int name_length;
1655 char *preserved_copy_of_name;
1656
1657 name_length = strlen (name) + 1; /* +1 for \0. */
1658 obstack_grow (&notes, name, name_length);
1659 preserved_copy_of_name = obstack_finish (&notes);
1660
1661 #ifdef tc_canonicalize_symbol_name
1662 preserved_copy_of_name =
1663 tc_canonicalize_symbol_name (preserved_copy_of_name);
1664 #endif
1665
1666 S_SET_NAME (symbolP, preserved_copy_of_name);
1667
1668 S_SET_SEGMENT (symbolP, segment);
1669 S_SET_VALUE (symbolP, valu);
1670 symbol_clear_list_pointers (symbolP);
1671
1672 symbol_set_frag (symbolP, frag);
1673
1674 /* Link to end of symbol chain. */
1675 {
1676 extern int symbol_table_frozen;
1677
1678 if (symbol_table_frozen)
1679 abort ();
1680 }
1681
1682 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1683
1684 obj_symbol_new_hook (symbolP);
1685
1686 #ifdef tc_symbol_new_hook
1687 tc_symbol_new_hook (symbolP);
1688 #endif
1689
1690 #ifdef DEBUG_SYMS
1691 verify_symbol_chain (symbol_rootP, symbol_lastP);
1692 #endif /* DEBUG_SYMS */
1693 }
1694
1695
1696 static void
1697 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1698 {
1699 unsigned int entry;
1700 literal_pool *pool;
1701 char sym_name[20];
1702 int align;
1703
1704 for (align = 2; align <= 4; align++)
1705 {
1706 int size = 1 << align;
1707
1708 pool = find_literal_pool (size);
1709 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1710 continue;
1711
1712 mapping_state (MAP_DATA);
1713
1714 /* Align pool as you have word accesses.
1715 Only make a frag if we have to. */
1716 if (!need_pass_2)
1717 frag_align (align, 0, 0);
1718
1719 record_alignment (now_seg, align);
1720
1721 sprintf (sym_name, "$$lit_\002%x", pool->id);
1722
1723 symbol_locate (pool->symbol, sym_name, now_seg,
1724 (valueT) frag_now_fix (), frag_now);
1725 symbol_table_insert (pool->symbol);
1726
1727 for (entry = 0; entry < pool->next_free_entry; entry++)
1728 /* First output the expression in the instruction to the pool. */
1729 emit_expr (&(pool->literals[entry]), size); /* .word|.xword */
1730
1731 /* Mark the pool as empty. */
1732 pool->next_free_entry = 0;
1733 pool->symbol = NULL;
1734 }
1735 }
1736
1737 #ifdef OBJ_ELF
1738 /* Forward declarations for functions below, in the MD interface
1739 section. */
1740 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1741 static struct reloc_table_entry * find_reloc_table_entry (char **);
1742
1743 /* Directives: Data. */
1744 /* N.B. the support for relocation suffix in this directive needs to be
1745 implemented properly. */
1746
1747 static void
1748 s_aarch64_elf_cons (int nbytes)
1749 {
1750 expressionS exp;
1751
1752 #ifdef md_flush_pending_output
1753 md_flush_pending_output ();
1754 #endif
1755
1756 if (is_it_end_of_statement ())
1757 {
1758 demand_empty_rest_of_line ();
1759 return;
1760 }
1761
1762 #ifdef md_cons_align
1763 md_cons_align (nbytes);
1764 #endif
1765
1766 mapping_state (MAP_DATA);
1767 do
1768 {
1769 struct reloc_table_entry *reloc;
1770
1771 expression (&exp);
1772
1773 if (exp.X_op != O_symbol)
1774 emit_expr (&exp, (unsigned int) nbytes);
1775 else
1776 {
1777 skip_past_char (&input_line_pointer, '#');
1778 if (skip_past_char (&input_line_pointer, ':'))
1779 {
1780 reloc = find_reloc_table_entry (&input_line_pointer);
1781 if (reloc == NULL)
1782 as_bad (_("unrecognized relocation suffix"));
1783 else
1784 as_bad (_("unimplemented relocation suffix"));
1785 ignore_rest_of_line ();
1786 return;
1787 }
1788 else
1789 emit_expr (&exp, (unsigned int) nbytes);
1790 }
1791 }
1792 while (*input_line_pointer++ == ',');
1793
1794 /* Put terminator back into stream. */
1795 input_line_pointer--;
1796 demand_empty_rest_of_line ();
1797 }
1798
1799 #endif /* OBJ_ELF */
1800
1801 /* Output a 32-bit word, but mark as an instruction. */
1802
1803 static void
1804 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1805 {
1806 expressionS exp;
1807
1808 #ifdef md_flush_pending_output
1809 md_flush_pending_output ();
1810 #endif
1811
1812 if (is_it_end_of_statement ())
1813 {
1814 demand_empty_rest_of_line ();
1815 return;
1816 }
1817
1818 if (!need_pass_2)
1819 frag_align_code (2, 0);
1820 #ifdef OBJ_ELF
1821 mapping_state (MAP_INSN);
1822 #endif
1823
1824 do
1825 {
1826 expression (&exp);
1827 if (exp.X_op != O_constant)
1828 {
1829 as_bad (_("constant expression required"));
1830 ignore_rest_of_line ();
1831 return;
1832 }
1833
1834 if (target_big_endian)
1835 {
1836 unsigned int val = exp.X_add_number;
1837 exp.X_add_number = SWAP_32 (val);
1838 }
1839 emit_expr (&exp, 4);
1840 }
1841 while (*input_line_pointer++ == ',');
1842
1843 /* Put terminator back into stream. */
1844 input_line_pointer--;
1845 demand_empty_rest_of_line ();
1846 }
1847
1848 #ifdef OBJ_ELF
1849 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
1850
1851 static void
1852 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
1853 {
1854 expressionS exp;
1855
1856 /* Since we're just labelling the code, there's no need to define a
1857 mapping symbol. */
1858 expression (&exp);
1859 /* Make sure there is enough room in this frag for the following
1860 blr. This trick only works if the blr follows immediately after
1861 the .tlsdesc directive. */
1862 frag_grow (4);
1863 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1864 BFD_RELOC_AARCH64_TLSDESC_CALL);
1865
1866 demand_empty_rest_of_line ();
1867 }
1868 #endif /* OBJ_ELF */
1869
1870 static void s_aarch64_arch (int);
1871 static void s_aarch64_cpu (int);
1872
1873 /* This table describes all the machine specific pseudo-ops the assembler
1874 has to support. The fields are:
1875 pseudo-op name without dot
1876 function to call to execute this pseudo-op
1877 Integer arg to pass to the function. */
1878
1879 const pseudo_typeS md_pseudo_table[] = {
1880 /* Never called because '.req' does not start a line. */
1881 {"req", s_req, 0},
1882 {"unreq", s_unreq, 0},
1883 {"bss", s_bss, 0},
1884 {"even", s_even, 0},
1885 {"ltorg", s_ltorg, 0},
1886 {"pool", s_ltorg, 0},
1887 {"cpu", s_aarch64_cpu, 0},
1888 {"arch", s_aarch64_arch, 0},
1889 {"inst", s_aarch64_inst, 0},
1890 #ifdef OBJ_ELF
1891 {"tlsdesccall", s_tlsdesccall, 0},
1892 {"word", s_aarch64_elf_cons, 4},
1893 {"long", s_aarch64_elf_cons, 4},
1894 {"xword", s_aarch64_elf_cons, 8},
1895 {"dword", s_aarch64_elf_cons, 8},
1896 #endif
1897 {0, 0, 0}
1898 };
1899 \f
1900
1901 /* Check whether STR points to a register name followed by a comma or the
1902 end of line; REG_TYPE indicates which register types are checked
1903 against. Return TRUE if STR is such a register name; otherwise return
1904 FALSE. The function does not intend to produce any diagnostics, but since
1905 the register parser aarch64_reg_parse, which is called by this function,
1906 does produce diagnostics, we call clear_error to clear any diagnostics
1907 that may be generated by aarch64_reg_parse.
1908 Also, the function returns FALSE directly if there is any user error
1909 present at the function entry. This prevents the existing diagnostics
1910 state from being spoiled.
1911 The function currently serves parse_constant_immediate and
1912 parse_big_immediate only. */
1913 static bfd_boolean
1914 reg_name_p (char *str, aarch64_reg_type reg_type)
1915 {
1916 int reg;
1917
1918 /* Prevent the diagnostics state from being spoiled. */
1919 if (error_p ())
1920 return FALSE;
1921
1922 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
1923
1924 /* Clear the parsing error that may be set by the reg parser. */
1925 clear_error ();
1926
1927 if (reg == PARSE_FAIL)
1928 return FALSE;
1929
1930 skip_whitespace (str);
1931 if (*str == ',' || is_end_of_line[(unsigned int) *str])
1932 return TRUE;
1933
1934 return FALSE;
1935 }
1936
1937 /* Parser functions used exclusively in instruction operands. */
1938
1939 /* Parse an immediate expression which may not be constant.
1940
1941 To prevent the expression parser from pushing a register name
1942 into the symbol table as an undefined symbol, firstly a check is
1943 done to find out whether STR is a valid register name followed
1944 by a comma or the end of line. Return FALSE if STR is such a
1945 string. */
1946
1947 static bfd_boolean
1948 parse_immediate_expression (char **str, expressionS *exp)
1949 {
1950 if (reg_name_p (*str, REG_TYPE_R_Z_BHSDQ_V))
1951 {
1952 set_recoverable_error (_("immediate operand required"));
1953 return FALSE;
1954 }
1955
1956 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
1957
1958 if (exp->X_op == O_absent)
1959 {
1960 set_fatal_syntax_error (_("missing immediate expression"));
1961 return FALSE;
1962 }
1963
1964 return TRUE;
1965 }
1966
1967 /* Constant immediate-value read function for use in insn parsing.
1968 STR points to the beginning of the immediate (with the optional
1969 leading #); *VAL receives the value.
1970
1971 Return TRUE on success; otherwise return FALSE. */
1972
1973 static bfd_boolean
1974 parse_constant_immediate (char **str, int64_t * val)
1975 {
1976 expressionS exp;
1977
1978 if (! parse_immediate_expression (str, &exp))
1979 return FALSE;
1980
1981 if (exp.X_op != O_constant)
1982 {
1983 set_syntax_error (_("constant expression required"));
1984 return FALSE;
1985 }
1986
1987 *val = exp.X_add_number;
1988 return TRUE;
1989 }
1990
1991 static uint32_t
1992 encode_imm_float_bits (uint32_t imm)
1993 {
1994 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
1995 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
1996 }
1997
1998 /* Return TRUE if the single-precision floating-point value encoded in IMM
1999 can be expressed in the AArch64 8-bit signed floating-point format with
2000 3-bit exponent and normalized 4 bits of precision; in other words, the
2001 floating-point value must be expressable as
2002 (+/-) n / 16 * power (2, r)
2003 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2004
2005 static bfd_boolean
2006 aarch64_imm_float_p (uint32_t imm)
2007 {
2008 /* If a single-precision floating-point value has the following bit
2009 pattern, it can be expressed in the AArch64 8-bit floating-point
2010 format:
2011
2012 3 32222222 2221111111111
2013 1 09876543 21098765432109876543210
2014 n Eeeeeexx xxxx0000000000000000000
2015
2016 where n, e and each x are either 0 or 1 independently, with
2017 E == ~ e. */
2018
2019 uint32_t pattern;
2020
2021 /* Prepare the pattern for 'Eeeeee'. */
2022 if (((imm >> 30) & 0x1) == 0)
2023 pattern = 0x3e000000;
2024 else
2025 pattern = 0x40000000;
2026
2027 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2028 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2029 }
2030
2031 /* Like aarch64_imm_float_p but for a double-precision floating-point value.
2032
2033 Return TRUE if the value encoded in IMM can be expressed in the AArch64
2034 8-bit signed floating-point format with 3-bit exponent and normalized 4
2035 bits of precision (i.e. can be used in an FMOV instruction); return the
2036 equivalent single-precision encoding in *FPWORD.
2037
2038 Otherwise return FALSE. */
2039
2040 static bfd_boolean
2041 aarch64_double_precision_fmovable (uint64_t imm, uint32_t *fpword)
2042 {
2043 /* If a double-precision floating-point value has the following bit
2044 pattern, it can be expressed in the AArch64 8-bit floating-point
2045 format:
2046
2047 6 66655555555 554444444...21111111111
2048 3 21098765432 109876543...098765432109876543210
2049 n Eeeeeeeeexx xxxx00000...000000000000000000000
2050
2051 where n, e and each x are either 0 or 1 independently, with
2052 E == ~ e. */
2053
2054 uint32_t pattern;
2055 uint32_t high32 = imm >> 32;
2056
2057 /* Lower 32 bits need to be 0s. */
2058 if ((imm & 0xffffffff) != 0)
2059 return FALSE;
2060
2061 /* Prepare the pattern for 'Eeeeeeeee'. */
2062 if (((high32 >> 30) & 0x1) == 0)
2063 pattern = 0x3fc00000;
2064 else
2065 pattern = 0x40000000;
2066
2067 if ((high32 & 0xffff) == 0 /* bits 32 - 47 are 0. */
2068 && (high32 & 0x7fc00000) == pattern) /* bits 54 - 61 == ~ bit 62. */
2069 {
2070 /* Convert to the single-precision encoding.
2071 i.e. convert
2072 n Eeeeeeeeexx xxxx00000...000000000000000000000
2073 to
2074 n Eeeeeexx xxxx0000000000000000000. */
2075 *fpword = ((high32 & 0xfe000000) /* nEeeeee. */
2076 | (((high32 >> 16) & 0x3f) << 19)); /* xxxxxx. */
2077 return TRUE;
2078 }
2079 else
2080 return FALSE;
2081 }
2082
2083 /* Parse a floating-point immediate. Return TRUE on success and return the
2084 value in *IMMED in the format of IEEE754 single-precision encoding.
2085 *CCP points to the start of the string; DP_P is TRUE when the immediate
2086 is expected to be in double-precision (N.B. this only matters when
2087 hexadecimal representation is involved).
2088
2089 N.B. 0.0 is accepted by this function. */
2090
2091 static bfd_boolean
2092 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p)
2093 {
2094 char *str = *ccp;
2095 char *fpnum;
2096 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2097 int found_fpchar = 0;
2098 int64_t val = 0;
2099 unsigned fpword = 0;
2100 bfd_boolean hex_p = FALSE;
2101
2102 skip_past_char (&str, '#');
2103
2104 fpnum = str;
2105 skip_whitespace (fpnum);
2106
2107 if (strncmp (fpnum, "0x", 2) == 0)
2108 {
2109 /* Support the hexadecimal representation of the IEEE754 encoding.
2110 Double-precision is expected when DP_P is TRUE, otherwise the
2111 representation should be in single-precision. */
2112 if (! parse_constant_immediate (&str, &val))
2113 goto invalid_fp;
2114
2115 if (dp_p)
2116 {
2117 if (! aarch64_double_precision_fmovable (val, &fpword))
2118 goto invalid_fp;
2119 }
2120 else if ((uint64_t) val > 0xffffffff)
2121 goto invalid_fp;
2122 else
2123 fpword = val;
2124
2125 hex_p = TRUE;
2126 }
2127 else
2128 {
2129 /* We must not accidentally parse an integer as a floating-point number.
2130 Make sure that the value we parse is not an integer by checking for
2131 special characters '.' or 'e'. */
2132 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
2133 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
2134 {
2135 found_fpchar = 1;
2136 break;
2137 }
2138
2139 if (!found_fpchar)
2140 return FALSE;
2141 }
2142
2143 if (! hex_p)
2144 {
2145 int i;
2146
2147 if ((str = atof_ieee (str, 's', words)) == NULL)
2148 goto invalid_fp;
2149
2150 /* Our FP word must be 32 bits (single-precision FP). */
2151 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2152 {
2153 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2154 fpword |= words[i];
2155 }
2156 }
2157
2158 if (aarch64_imm_float_p (fpword) || (fpword & 0x7fffffff) == 0)
2159 {
2160 *immed = fpword;
2161 *ccp = str;
2162 return TRUE;
2163 }
2164
2165 invalid_fp:
2166 set_fatal_syntax_error (_("invalid floating-point constant"));
2167 return FALSE;
2168 }
2169
2170 /* Less-generic immediate-value read function with the possibility of loading
2171 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2172 instructions.
2173
2174 To prevent the expression parser from pushing a register name into the
2175 symbol table as an undefined symbol, a check is firstly done to find
2176 out whether STR is a valid register name followed by a comma or the end
2177 of line. Return FALSE if STR is such a register. */
2178
2179 static bfd_boolean
2180 parse_big_immediate (char **str, int64_t *imm)
2181 {
2182 char *ptr = *str;
2183
2184 if (reg_name_p (ptr, REG_TYPE_R_Z_BHSDQ_V))
2185 {
2186 set_syntax_error (_("immediate operand required"));
2187 return FALSE;
2188 }
2189
2190 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2191
2192 if (inst.reloc.exp.X_op == O_constant)
2193 *imm = inst.reloc.exp.X_add_number;
2194
2195 *str = ptr;
2196
2197 return TRUE;
2198 }
2199
2200 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2201 if NEED_LIBOPCODES is non-zero, the fixup will need
2202 assistance from the libopcodes. */
2203
2204 static inline void
2205 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2206 const aarch64_opnd_info *operand,
2207 int need_libopcodes_p)
2208 {
2209 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2210 reloc->opnd = operand->type;
2211 if (need_libopcodes_p)
2212 reloc->need_libopcodes_p = 1;
2213 };
2214
2215 /* Return TRUE if the instruction needs to be fixed up later internally by
2216 the GAS; otherwise return FALSE. */
2217
2218 static inline bfd_boolean
2219 aarch64_gas_internal_fixup_p (void)
2220 {
2221 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2222 }
2223
2224 /* Assign the immediate value to the relavant field in *OPERAND if
2225 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2226 needs an internal fixup in a later stage.
2227 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2228 IMM.VALUE that may get assigned with the constant. */
2229 static inline void
2230 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2231 aarch64_opnd_info *operand,
2232 int addr_off_p,
2233 int need_libopcodes_p,
2234 int skip_p)
2235 {
2236 if (reloc->exp.X_op == O_constant)
2237 {
2238 if (addr_off_p)
2239 operand->addr.offset.imm = reloc->exp.X_add_number;
2240 else
2241 operand->imm.value = reloc->exp.X_add_number;
2242 reloc->type = BFD_RELOC_UNUSED;
2243 }
2244 else
2245 {
2246 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2247 /* Tell libopcodes to ignore this operand or not. This is helpful
2248 when one of the operands needs to be fixed up later but we need
2249 libopcodes to check the other operands. */
2250 operand->skip = skip_p;
2251 }
2252 }
2253
2254 /* Relocation modifiers. Each entry in the table contains the textual
2255 name for the relocation which may be placed before a symbol used as
2256 a load/store offset, or add immediate. It must be surrounded by a
2257 leading and trailing colon, for example:
2258
2259 ldr x0, [x1, #:rello:varsym]
2260 add x0, x1, #:rello:varsym */
2261
2262 struct reloc_table_entry
2263 {
2264 const char *name;
2265 int pc_rel;
2266 bfd_reloc_code_real_type adrp_type;
2267 bfd_reloc_code_real_type movw_type;
2268 bfd_reloc_code_real_type add_type;
2269 bfd_reloc_code_real_type ldst_type;
2270 };
2271
2272 static struct reloc_table_entry reloc_table[] = {
2273 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2274 {"lo12", 0,
2275 0,
2276 0,
2277 BFD_RELOC_AARCH64_ADD_LO12,
2278 BFD_RELOC_AARCH64_LDST_LO12},
2279
2280 /* Higher 21 bits of pc-relative page offset: ADRP */
2281 {"pg_hi21", 1,
2282 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2283 0,
2284 0,
2285 0},
2286
2287 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2288 {"pg_hi21_nc", 1,
2289 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2290 0,
2291 0,
2292 0},
2293
2294 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2295 {"abs_g0", 0,
2296 0,
2297 BFD_RELOC_AARCH64_MOVW_G0,
2298 0,
2299 0},
2300
2301 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2302 {"abs_g0_s", 0,
2303 0,
2304 BFD_RELOC_AARCH64_MOVW_G0_S,
2305 0,
2306 0},
2307
2308 /* Less significant bits 0-15 of address/value: MOVK, no check */
2309 {"abs_g0_nc", 0,
2310 0,
2311 BFD_RELOC_AARCH64_MOVW_G0_NC,
2312 0,
2313 0},
2314
2315 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2316 {"abs_g1", 0,
2317 0,
2318 BFD_RELOC_AARCH64_MOVW_G1,
2319 0,
2320 0},
2321
2322 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2323 {"abs_g1_s", 0,
2324 0,
2325 BFD_RELOC_AARCH64_MOVW_G1_S,
2326 0,
2327 0},
2328
2329 /* Less significant bits 16-31 of address/value: MOVK, no check */
2330 {"abs_g1_nc", 0,
2331 0,
2332 BFD_RELOC_AARCH64_MOVW_G1_NC,
2333 0,
2334 0},
2335
2336 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2337 {"abs_g2", 0,
2338 0,
2339 BFD_RELOC_AARCH64_MOVW_G2,
2340 0,
2341 0},
2342
2343 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2344 {"abs_g2_s", 0,
2345 0,
2346 BFD_RELOC_AARCH64_MOVW_G2_S,
2347 0,
2348 0},
2349
2350 /* Less significant bits 32-47 of address/value: MOVK, no check */
2351 {"abs_g2_nc", 0,
2352 0,
2353 BFD_RELOC_AARCH64_MOVW_G2_NC,
2354 0,
2355 0},
2356
2357 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2358 {"abs_g3", 0,
2359 0,
2360 BFD_RELOC_AARCH64_MOVW_G3,
2361 0,
2362 0},
2363 /* Get to the GOT entry for a symbol. */
2364 {"got_prel19", 0,
2365 0,
2366 0,
2367 0,
2368 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2369 /* Get to the page containing GOT entry for a symbol. */
2370 {"got", 1,
2371 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2372 0,
2373 0,
2374 0},
2375 /* 12 bit offset into the page containing GOT entry for that symbol. */
2376 {"got_lo12", 0,
2377 0,
2378 0,
2379 0,
2380 BFD_RELOC_AARCH64_LD64_GOT_LO12_NC},
2381
2382 /* Get to the page containing GOT TLS entry for a symbol */
2383 {"tlsgd", 0,
2384 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2385 0,
2386 0,
2387 0},
2388
2389 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2390 {"tlsgd_lo12", 0,
2391 0,
2392 0,
2393 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2394 0},
2395
2396 /* Get to the page containing GOT TLS entry for a symbol */
2397 {"tlsdesc", 0,
2398 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2399 0,
2400 0,
2401 0},
2402
2403 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2404 {"tlsdesc_lo12", 0,
2405 0,
2406 0,
2407 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC,
2408 BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC},
2409
2410 /* Get to the page containing GOT TLS entry for a symbol */
2411 {"gottprel", 0,
2412 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2413 0,
2414 0,
2415 0},
2416
2417 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2418 {"gottprel_lo12", 0,
2419 0,
2420 0,
2421 0,
2422 BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC},
2423
2424 /* Get tp offset for a symbol. */
2425 {"tprel", 0,
2426 0,
2427 0,
2428 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2429 0},
2430
2431 /* Get tp offset for a symbol. */
2432 {"tprel_lo12", 0,
2433 0,
2434 0,
2435 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2436 0},
2437
2438 /* Get tp offset for a symbol. */
2439 {"tprel_hi12", 0,
2440 0,
2441 0,
2442 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2443 0},
2444
2445 /* Get tp offset for a symbol. */
2446 {"tprel_lo12_nc", 0,
2447 0,
2448 0,
2449 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2450 0},
2451
2452 /* Most significant bits 32-47 of address/value: MOVZ. */
2453 {"tprel_g2", 0,
2454 0,
2455 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2456 0,
2457 0},
2458
2459 /* Most significant bits 16-31 of address/value: MOVZ. */
2460 {"tprel_g1", 0,
2461 0,
2462 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2463 0,
2464 0},
2465
2466 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2467 {"tprel_g1_nc", 0,
2468 0,
2469 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2470 0,
2471 0},
2472
2473 /* Most significant bits 0-15 of address/value: MOVZ. */
2474 {"tprel_g0", 0,
2475 0,
2476 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2477 0,
2478 0},
2479
2480 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2481 {"tprel_g0_nc", 0,
2482 0,
2483 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2484 0,
2485 0},
2486 };
2487
2488 /* Given the address of a pointer pointing to the textual name of a
2489 relocation as may appear in assembler source, attempt to find its
2490 details in reloc_table. The pointer will be updated to the character
2491 after the trailing colon. On failure, NULL will be returned;
2492 otherwise return the reloc_table_entry. */
2493
2494 static struct reloc_table_entry *
2495 find_reloc_table_entry (char **str)
2496 {
2497 unsigned int i;
2498 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2499 {
2500 int length = strlen (reloc_table[i].name);
2501
2502 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2503 && (*str)[length] == ':')
2504 {
2505 *str += (length + 1);
2506 return &reloc_table[i];
2507 }
2508 }
2509
2510 return NULL;
2511 }
2512
2513 /* Mode argument to parse_shift and parser_shifter_operand. */
2514 enum parse_shift_mode
2515 {
2516 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
2517 "#imm{,lsl #n}" */
2518 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
2519 "#imm" */
2520 SHIFTED_LSL, /* bare "lsl #n" */
2521 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
2522 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
2523 };
2524
2525 /* Parse a <shift> operator on an AArch64 data processing instruction.
2526 Return TRUE on success; otherwise return FALSE. */
2527 static bfd_boolean
2528 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
2529 {
2530 const struct aarch64_name_value_pair *shift_op;
2531 enum aarch64_modifier_kind kind;
2532 expressionS exp;
2533 int exp_has_prefix;
2534 char *s = *str;
2535 char *p = s;
2536
2537 for (p = *str; ISALPHA (*p); p++)
2538 ;
2539
2540 if (p == *str)
2541 {
2542 set_syntax_error (_("shift expression expected"));
2543 return FALSE;
2544 }
2545
2546 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
2547
2548 if (shift_op == NULL)
2549 {
2550 set_syntax_error (_("shift operator expected"));
2551 return FALSE;
2552 }
2553
2554 kind = aarch64_get_operand_modifier (shift_op);
2555
2556 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
2557 {
2558 set_syntax_error (_("invalid use of 'MSL'"));
2559 return FALSE;
2560 }
2561
2562 switch (mode)
2563 {
2564 case SHIFTED_LOGIC_IMM:
2565 if (aarch64_extend_operator_p (kind) == TRUE)
2566 {
2567 set_syntax_error (_("extending shift is not permitted"));
2568 return FALSE;
2569 }
2570 break;
2571
2572 case SHIFTED_ARITH_IMM:
2573 if (kind == AARCH64_MOD_ROR)
2574 {
2575 set_syntax_error (_("'ROR' shift is not permitted"));
2576 return FALSE;
2577 }
2578 break;
2579
2580 case SHIFTED_LSL:
2581 if (kind != AARCH64_MOD_LSL)
2582 {
2583 set_syntax_error (_("only 'LSL' shift is permitted"));
2584 return FALSE;
2585 }
2586 break;
2587
2588 case SHIFTED_REG_OFFSET:
2589 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
2590 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
2591 {
2592 set_fatal_syntax_error
2593 (_("invalid shift for the register offset addressing mode"));
2594 return FALSE;
2595 }
2596 break;
2597
2598 case SHIFTED_LSL_MSL:
2599 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
2600 {
2601 set_syntax_error (_("invalid shift operator"));
2602 return FALSE;
2603 }
2604 break;
2605
2606 default:
2607 abort ();
2608 }
2609
2610 /* Whitespace can appear here if the next thing is a bare digit. */
2611 skip_whitespace (p);
2612
2613 /* Parse shift amount. */
2614 exp_has_prefix = 0;
2615 if (mode == SHIFTED_REG_OFFSET && *p == ']')
2616 exp.X_op = O_absent;
2617 else
2618 {
2619 if (is_immediate_prefix (*p))
2620 {
2621 p++;
2622 exp_has_prefix = 1;
2623 }
2624 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
2625 }
2626 if (exp.X_op == O_absent)
2627 {
2628 if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix)
2629 {
2630 set_syntax_error (_("missing shift amount"));
2631 return FALSE;
2632 }
2633 operand->shifter.amount = 0;
2634 }
2635 else if (exp.X_op != O_constant)
2636 {
2637 set_syntax_error (_("constant shift amount required"));
2638 return FALSE;
2639 }
2640 else if (exp.X_add_number < 0 || exp.X_add_number > 63)
2641 {
2642 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
2643 return FALSE;
2644 }
2645 else
2646 {
2647 operand->shifter.amount = exp.X_add_number;
2648 operand->shifter.amount_present = 1;
2649 }
2650
2651 operand->shifter.operator_present = 1;
2652 operand->shifter.kind = kind;
2653
2654 *str = p;
2655 return TRUE;
2656 }
2657
2658 /* Parse a <shifter_operand> for a data processing instruction:
2659
2660 #<immediate>
2661 #<immediate>, LSL #imm
2662
2663 Validation of immediate operands is deferred to md_apply_fix.
2664
2665 Return TRUE on success; otherwise return FALSE. */
2666
2667 static bfd_boolean
2668 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
2669 enum parse_shift_mode mode)
2670 {
2671 char *p;
2672
2673 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
2674 return FALSE;
2675
2676 p = *str;
2677
2678 /* Accept an immediate expression. */
2679 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
2680 return FALSE;
2681
2682 /* Accept optional LSL for arithmetic immediate values. */
2683 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
2684 if (! parse_shift (&p, operand, SHIFTED_LSL))
2685 return FALSE;
2686
2687 /* Not accept any shifter for logical immediate values. */
2688 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
2689 && parse_shift (&p, operand, mode))
2690 {
2691 set_syntax_error (_("unexpected shift operator"));
2692 return FALSE;
2693 }
2694
2695 *str = p;
2696 return TRUE;
2697 }
2698
2699 /* Parse a <shifter_operand> for a data processing instruction:
2700
2701 <Rm>
2702 <Rm>, <shift>
2703 #<immediate>
2704 #<immediate>, LSL #imm
2705
2706 where <shift> is handled by parse_shift above, and the last two
2707 cases are handled by the function above.
2708
2709 Validation of immediate operands is deferred to md_apply_fix.
2710
2711 Return TRUE on success; otherwise return FALSE. */
2712
2713 static bfd_boolean
2714 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
2715 enum parse_shift_mode mode)
2716 {
2717 int reg;
2718 int isreg32, isregzero;
2719 enum aarch64_operand_class opd_class
2720 = aarch64_get_operand_class (operand->type);
2721
2722 if ((reg =
2723 aarch64_reg_parse_32_64 (str, 0, 0, &isreg32, &isregzero)) != PARSE_FAIL)
2724 {
2725 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
2726 {
2727 set_syntax_error (_("unexpected register in the immediate operand"));
2728 return FALSE;
2729 }
2730
2731 if (!isregzero && reg == REG_SP)
2732 {
2733 set_syntax_error (BAD_SP);
2734 return FALSE;
2735 }
2736
2737 operand->reg.regno = reg;
2738 operand->qualifier = isreg32 ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2739
2740 /* Accept optional shift operation on register. */
2741 if (! skip_past_comma (str))
2742 return TRUE;
2743
2744 if (! parse_shift (str, operand, mode))
2745 return FALSE;
2746
2747 return TRUE;
2748 }
2749 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
2750 {
2751 set_syntax_error
2752 (_("integer register expected in the extended/shifted operand "
2753 "register"));
2754 return FALSE;
2755 }
2756
2757 /* We have a shifted immediate variable. */
2758 return parse_shifter_operand_imm (str, operand, mode);
2759 }
2760
2761 /* Return TRUE on success; return FALSE otherwise. */
2762
2763 static bfd_boolean
2764 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
2765 enum parse_shift_mode mode)
2766 {
2767 char *p = *str;
2768
2769 /* Determine if we have the sequence of characters #: or just :
2770 coming next. If we do, then we check for a :rello: relocation
2771 modifier. If we don't, punt the whole lot to
2772 parse_shifter_operand. */
2773
2774 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
2775 {
2776 struct reloc_table_entry *entry;
2777
2778 if (p[0] == '#')
2779 p += 2;
2780 else
2781 p++;
2782 *str = p;
2783
2784 /* Try to parse a relocation. Anything else is an error. */
2785 if (!(entry = find_reloc_table_entry (str)))
2786 {
2787 set_syntax_error (_("unknown relocation modifier"));
2788 return FALSE;
2789 }
2790
2791 if (entry->add_type == 0)
2792 {
2793 set_syntax_error
2794 (_("this relocation modifier is not allowed on this instruction"));
2795 return FALSE;
2796 }
2797
2798 /* Save str before we decompose it. */
2799 p = *str;
2800
2801 /* Next, we parse the expression. */
2802 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
2803 return FALSE;
2804
2805 /* Record the relocation type (use the ADD variant here). */
2806 inst.reloc.type = entry->add_type;
2807 inst.reloc.pc_rel = entry->pc_rel;
2808
2809 /* If str is empty, we've reached the end, stop here. */
2810 if (**str == '\0')
2811 return TRUE;
2812
2813 /* Otherwise, we have a shifted reloc modifier, so rewind to
2814 recover the variable name and continue parsing for the shifter. */
2815 *str = p;
2816 return parse_shifter_operand_imm (str, operand, mode);
2817 }
2818
2819 return parse_shifter_operand (str, operand, mode);
2820 }
2821
2822 /* Parse all forms of an address expression. Information is written
2823 to *OPERAND and/or inst.reloc.
2824
2825 The A64 instruction set has the following addressing modes:
2826
2827 Offset
2828 [base] // in SIMD ld/st structure
2829 [base{,#0}] // in ld/st exclusive
2830 [base{,#imm}]
2831 [base,Xm{,LSL #imm}]
2832 [base,Xm,SXTX {#imm}]
2833 [base,Wm,(S|U)XTW {#imm}]
2834 Pre-indexed
2835 [base,#imm]!
2836 Post-indexed
2837 [base],#imm
2838 [base],Xm // in SIMD ld/st structure
2839 PC-relative (literal)
2840 label
2841 =immediate
2842
2843 (As a convenience, the notation "=immediate" is permitted in conjunction
2844 with the pc-relative literal load instructions to automatically place an
2845 immediate value or symbolic address in a nearby literal pool and generate
2846 a hidden label which references it.)
2847
2848 Upon a successful parsing, the address structure in *OPERAND will be
2849 filled in the following way:
2850
2851 .base_regno = <base>
2852 .offset.is_reg // 1 if the offset is a register
2853 .offset.imm = <imm>
2854 .offset.regno = <Rm>
2855
2856 For different addressing modes defined in the A64 ISA:
2857
2858 Offset
2859 .pcrel=0; .preind=1; .postind=0; .writeback=0
2860 Pre-indexed
2861 .pcrel=0; .preind=1; .postind=0; .writeback=1
2862 Post-indexed
2863 .pcrel=0; .preind=0; .postind=1; .writeback=1
2864 PC-relative (literal)
2865 .pcrel=1; .preind=1; .postind=0; .writeback=0
2866
2867 The shift/extension information, if any, will be stored in .shifter.
2868
2869 It is the caller's responsibility to check for addressing modes not
2870 supported by the instruction, and to set inst.reloc.type. */
2871
2872 static bfd_boolean
2873 parse_address_main (char **str, aarch64_opnd_info *operand, int reloc,
2874 int accept_reg_post_index)
2875 {
2876 char *p = *str;
2877 int reg;
2878 int isreg32, isregzero;
2879 expressionS *exp = &inst.reloc.exp;
2880
2881 if (! skip_past_char (&p, '['))
2882 {
2883 /* =immediate or label. */
2884 operand->addr.pcrel = 1;
2885 operand->addr.preind = 1;
2886
2887 /* #:<reloc_op>:<symbol> */
2888 skip_past_char (&p, '#');
2889 if (reloc && skip_past_char (&p, ':'))
2890 {
2891 struct reloc_table_entry *entry;
2892
2893 /* Try to parse a relocation modifier. Anything else is
2894 an error. */
2895 entry = find_reloc_table_entry (&p);
2896 if (! entry)
2897 {
2898 set_syntax_error (_("unknown relocation modifier"));
2899 return FALSE;
2900 }
2901
2902 if (entry->ldst_type == 0)
2903 {
2904 set_syntax_error
2905 (_("this relocation modifier is not allowed on this "
2906 "instruction"));
2907 return FALSE;
2908 }
2909
2910 /* #:<reloc_op>: */
2911 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
2912 {
2913 set_syntax_error (_("invalid relocation expression"));
2914 return FALSE;
2915 }
2916
2917 /* #:<reloc_op>:<expr> */
2918 /* Record the load/store relocation type. */
2919 inst.reloc.type = entry->ldst_type;
2920 inst.reloc.pc_rel = entry->pc_rel;
2921 }
2922 else
2923 {
2924
2925 if (skip_past_char (&p, '='))
2926 /* =immediate; need to generate the literal in the literal pool. */
2927 inst.gen_lit_pool = 1;
2928
2929 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
2930 {
2931 set_syntax_error (_("invalid address"));
2932 return FALSE;
2933 }
2934 }
2935
2936 *str = p;
2937 return TRUE;
2938 }
2939
2940 /* [ */
2941
2942 /* Accept SP and reject ZR */
2943 reg = aarch64_reg_parse_32_64 (&p, 0, 1, &isreg32, &isregzero);
2944 if (reg == PARSE_FAIL || isreg32)
2945 {
2946 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
2947 return FALSE;
2948 }
2949 operand->addr.base_regno = reg;
2950
2951 /* [Xn */
2952 if (skip_past_comma (&p))
2953 {
2954 /* [Xn, */
2955 operand->addr.preind = 1;
2956
2957 /* Reject SP and accept ZR */
2958 reg = aarch64_reg_parse_32_64 (&p, 1, 0, &isreg32, &isregzero);
2959 if (reg != PARSE_FAIL)
2960 {
2961 /* [Xn,Rm */
2962 operand->addr.offset.regno = reg;
2963 operand->addr.offset.is_reg = 1;
2964 /* Shifted index. */
2965 if (skip_past_comma (&p))
2966 {
2967 /* [Xn,Rm, */
2968 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
2969 /* Use the diagnostics set in parse_shift, so not set new
2970 error message here. */
2971 return FALSE;
2972 }
2973 /* We only accept:
2974 [base,Xm{,LSL #imm}]
2975 [base,Xm,SXTX {#imm}]
2976 [base,Wm,(S|U)XTW {#imm}] */
2977 if (operand->shifter.kind == AARCH64_MOD_NONE
2978 || operand->shifter.kind == AARCH64_MOD_LSL
2979 || operand->shifter.kind == AARCH64_MOD_SXTX)
2980 {
2981 if (isreg32)
2982 {
2983 set_syntax_error (_("invalid use of 32-bit register offset"));
2984 return FALSE;
2985 }
2986 }
2987 else if (!isreg32)
2988 {
2989 set_syntax_error (_("invalid use of 64-bit register offset"));
2990 return FALSE;
2991 }
2992 }
2993 else
2994 {
2995 /* [Xn,#:<reloc_op>:<symbol> */
2996 skip_past_char (&p, '#');
2997 if (reloc && skip_past_char (&p, ':'))
2998 {
2999 struct reloc_table_entry *entry;
3000
3001 /* Try to parse a relocation modifier. Anything else is
3002 an error. */
3003 if (!(entry = find_reloc_table_entry (&p)))
3004 {
3005 set_syntax_error (_("unknown relocation modifier"));
3006 return FALSE;
3007 }
3008
3009 if (entry->ldst_type == 0)
3010 {
3011 set_syntax_error
3012 (_("this relocation modifier is not allowed on this "
3013 "instruction"));
3014 return FALSE;
3015 }
3016
3017 /* [Xn,#:<reloc_op>: */
3018 /* We now have the group relocation table entry corresponding to
3019 the name in the assembler source. Next, we parse the
3020 expression. */
3021 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3022 {
3023 set_syntax_error (_("invalid relocation expression"));
3024 return FALSE;
3025 }
3026
3027 /* [Xn,#:<reloc_op>:<expr> */
3028 /* Record the load/store relocation type. */
3029 inst.reloc.type = entry->ldst_type;
3030 inst.reloc.pc_rel = entry->pc_rel;
3031 }
3032 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3033 {
3034 set_syntax_error (_("invalid expression in the address"));
3035 return FALSE;
3036 }
3037 /* [Xn,<expr> */
3038 }
3039 }
3040
3041 if (! skip_past_char (&p, ']'))
3042 {
3043 set_syntax_error (_("']' expected"));
3044 return FALSE;
3045 }
3046
3047 if (skip_past_char (&p, '!'))
3048 {
3049 if (operand->addr.preind && operand->addr.offset.is_reg)
3050 {
3051 set_syntax_error (_("register offset not allowed in pre-indexed "
3052 "addressing mode"));
3053 return FALSE;
3054 }
3055 /* [Xn]! */
3056 operand->addr.writeback = 1;
3057 }
3058 else if (skip_past_comma (&p))
3059 {
3060 /* [Xn], */
3061 operand->addr.postind = 1;
3062 operand->addr.writeback = 1;
3063
3064 if (operand->addr.preind)
3065 {
3066 set_syntax_error (_("cannot combine pre- and post-indexing"));
3067 return FALSE;
3068 }
3069
3070 if (accept_reg_post_index
3071 && (reg = aarch64_reg_parse_32_64 (&p, 1, 1, &isreg32,
3072 &isregzero)) != PARSE_FAIL)
3073 {
3074 /* [Xn],Xm */
3075 if (isreg32)
3076 {
3077 set_syntax_error (_("invalid 32-bit register offset"));
3078 return FALSE;
3079 }
3080 operand->addr.offset.regno = reg;
3081 operand->addr.offset.is_reg = 1;
3082 }
3083 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3084 {
3085 /* [Xn],#expr */
3086 set_syntax_error (_("invalid expression in the address"));
3087 return FALSE;
3088 }
3089 }
3090
3091 /* If at this point neither .preind nor .postind is set, we have a
3092 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
3093 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3094 {
3095 if (operand->addr.writeback)
3096 {
3097 /* Reject [Rn]! */
3098 set_syntax_error (_("missing offset in the pre-indexed address"));
3099 return FALSE;
3100 }
3101 operand->addr.preind = 1;
3102 inst.reloc.exp.X_op = O_constant;
3103 inst.reloc.exp.X_add_number = 0;
3104 }
3105
3106 *str = p;
3107 return TRUE;
3108 }
3109
3110 /* Return TRUE on success; otherwise return FALSE. */
3111 static bfd_boolean
3112 parse_address (char **str, aarch64_opnd_info *operand,
3113 int accept_reg_post_index)
3114 {
3115 return parse_address_main (str, operand, 0, accept_reg_post_index);
3116 }
3117
3118 /* Return TRUE on success; otherwise return FALSE. */
3119 static bfd_boolean
3120 parse_address_reloc (char **str, aarch64_opnd_info *operand)
3121 {
3122 return parse_address_main (str, operand, 1, 0);
3123 }
3124
3125 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3126 Return TRUE on success; otherwise return FALSE. */
3127 static bfd_boolean
3128 parse_half (char **str, int *internal_fixup_p)
3129 {
3130 char *p, *saved;
3131 int dummy;
3132
3133 p = *str;
3134 skip_past_char (&p, '#');
3135
3136 gas_assert (internal_fixup_p);
3137 *internal_fixup_p = 0;
3138
3139 if (*p == ':')
3140 {
3141 struct reloc_table_entry *entry;
3142
3143 /* Try to parse a relocation. Anything else is an error. */
3144 ++p;
3145 if (!(entry = find_reloc_table_entry (&p)))
3146 {
3147 set_syntax_error (_("unknown relocation modifier"));
3148 return FALSE;
3149 }
3150
3151 if (entry->movw_type == 0)
3152 {
3153 set_syntax_error
3154 (_("this relocation modifier is not allowed on this instruction"));
3155 return FALSE;
3156 }
3157
3158 inst.reloc.type = entry->movw_type;
3159 }
3160 else
3161 *internal_fixup_p = 1;
3162
3163 /* Avoid parsing a register as a general symbol. */
3164 saved = p;
3165 if (aarch64_reg_parse_32_64 (&p, 0, 0, &dummy, &dummy) != PARSE_FAIL)
3166 return FALSE;
3167 p = saved;
3168
3169 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3170 return FALSE;
3171
3172 *str = p;
3173 return TRUE;
3174 }
3175
3176 /* Parse an operand for an ADRP instruction:
3177 ADRP <Xd>, <label>
3178 Return TRUE on success; otherwise return FALSE. */
3179
3180 static bfd_boolean
3181 parse_adrp (char **str)
3182 {
3183 char *p;
3184
3185 p = *str;
3186 if (*p == ':')
3187 {
3188 struct reloc_table_entry *entry;
3189
3190 /* Try to parse a relocation. Anything else is an error. */
3191 ++p;
3192 if (!(entry = find_reloc_table_entry (&p)))
3193 {
3194 set_syntax_error (_("unknown relocation modifier"));
3195 return FALSE;
3196 }
3197
3198 if (entry->adrp_type == 0)
3199 {
3200 set_syntax_error
3201 (_("this relocation modifier is not allowed on this instruction"));
3202 return FALSE;
3203 }
3204
3205 inst.reloc.type = entry->adrp_type;
3206 }
3207 else
3208 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3209
3210 inst.reloc.pc_rel = 1;
3211
3212 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3213 return FALSE;
3214
3215 *str = p;
3216 return TRUE;
3217 }
3218
3219 /* Miscellaneous. */
3220
3221 /* Parse an option for a preload instruction. Returns the encoding for the
3222 option, or PARSE_FAIL. */
3223
3224 static int
3225 parse_pldop (char **str)
3226 {
3227 char *p, *q;
3228 const struct aarch64_name_value_pair *o;
3229
3230 p = q = *str;
3231 while (ISALNUM (*q))
3232 q++;
3233
3234 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3235 if (!o)
3236 return PARSE_FAIL;
3237
3238 *str = q;
3239 return o->value;
3240 }
3241
3242 /* Parse an option for a barrier instruction. Returns the encoding for the
3243 option, or PARSE_FAIL. */
3244
3245 static int
3246 parse_barrier (char **str)
3247 {
3248 char *p, *q;
3249 const asm_barrier_opt *o;
3250
3251 p = q = *str;
3252 while (ISALPHA (*q))
3253 q++;
3254
3255 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3256 if (!o)
3257 return PARSE_FAIL;
3258
3259 *str = q;
3260 return o->value;
3261 }
3262
3263 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3264 Returns the encoding for the option, or PARSE_FAIL.
3265
3266 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3267 implementation defined system register name S3_<op1>_<Cn>_<Cm>_<op2>. */
3268
3269 static int
3270 parse_sys_reg (char **str, struct hash_control *sys_regs, int imple_defined_p)
3271 {
3272 char *p, *q;
3273 char buf[32];
3274 const struct aarch64_name_value_pair *o;
3275 int value;
3276
3277 p = buf;
3278 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3279 if (p < buf + 31)
3280 *p++ = TOLOWER (*q);
3281 *p = '\0';
3282 /* Assert that BUF be large enough. */
3283 gas_assert (p - buf == q - *str);
3284
3285 o = hash_find (sys_regs, buf);
3286 if (!o)
3287 {
3288 if (!imple_defined_p)
3289 return PARSE_FAIL;
3290 else
3291 {
3292 /* Parse S3_<op1>_<Cn>_<Cm>_<op2>, the implementation defined
3293 registers. */
3294 unsigned int op0, op1, cn, cm, op2;
3295 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2) != 5)
3296 return PARSE_FAIL;
3297 /* The architecture specifies the encoding space for implementation
3298 defined registers as:
3299 op0 op1 CRn CRm op2
3300 11 xxx 1x11 xxxx xxx
3301 For convenience GAS accepts a wider encoding space, as follows:
3302 op0 op1 CRn CRm op2
3303 11 xxx xxxx xxxx xxx */
3304 if (op0 != 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
3305 return PARSE_FAIL;
3306 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3307 }
3308 }
3309 else
3310 value = o->value;
3311
3312 *str = q;
3313 return value;
3314 }
3315
3316 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
3317 for the option, or NULL. */
3318
3319 static const aarch64_sys_ins_reg *
3320 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3321 {
3322 char *p, *q;
3323 char buf[32];
3324 const aarch64_sys_ins_reg *o;
3325
3326 p = buf;
3327 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3328 if (p < buf + 31)
3329 *p++ = TOLOWER (*q);
3330 *p = '\0';
3331
3332 o = hash_find (sys_ins_regs, buf);
3333 if (!o)
3334 return NULL;
3335
3336 *str = q;
3337 return o;
3338 }
3339 \f
3340 #define po_char_or_fail(chr) do { \
3341 if (! skip_past_char (&str, chr)) \
3342 goto failure; \
3343 } while (0)
3344
3345 #define po_reg_or_fail(regtype) do { \
3346 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
3347 if (val == PARSE_FAIL) \
3348 { \
3349 set_default_error (); \
3350 goto failure; \
3351 } \
3352 } while (0)
3353
3354 #define po_int_reg_or_fail(reject_sp, reject_rz) do { \
3355 val = aarch64_reg_parse_32_64 (&str, reject_sp, reject_rz, \
3356 &isreg32, &isregzero); \
3357 if (val == PARSE_FAIL) \
3358 { \
3359 set_default_error (); \
3360 goto failure; \
3361 } \
3362 info->reg.regno = val; \
3363 if (isreg32) \
3364 info->qualifier = AARCH64_OPND_QLF_W; \
3365 else \
3366 info->qualifier = AARCH64_OPND_QLF_X; \
3367 } while (0)
3368
3369 #define po_imm_nc_or_fail() do { \
3370 if (! parse_constant_immediate (&str, &val)) \
3371 goto failure; \
3372 } while (0)
3373
3374 #define po_imm_or_fail(min, max) do { \
3375 if (! parse_constant_immediate (&str, &val)) \
3376 goto failure; \
3377 if (val < min || val > max) \
3378 { \
3379 set_fatal_syntax_error (_("immediate value out of range "\
3380 #min " to "#max)); \
3381 goto failure; \
3382 } \
3383 } while (0)
3384
3385 #define po_misc_or_fail(expr) do { \
3386 if (!expr) \
3387 goto failure; \
3388 } while (0)
3389 \f
3390 /* encode the 12-bit imm field of Add/sub immediate */
3391 static inline uint32_t
3392 encode_addsub_imm (uint32_t imm)
3393 {
3394 return imm << 10;
3395 }
3396
3397 /* encode the shift amount field of Add/sub immediate */
3398 static inline uint32_t
3399 encode_addsub_imm_shift_amount (uint32_t cnt)
3400 {
3401 return cnt << 22;
3402 }
3403
3404
3405 /* encode the imm field of Adr instruction */
3406 static inline uint32_t
3407 encode_adr_imm (uint32_t imm)
3408 {
3409 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
3410 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
3411 }
3412
3413 /* encode the immediate field of Move wide immediate */
3414 static inline uint32_t
3415 encode_movw_imm (uint32_t imm)
3416 {
3417 return imm << 5;
3418 }
3419
3420 /* encode the 26-bit offset of unconditional branch */
3421 static inline uint32_t
3422 encode_branch_ofs_26 (uint32_t ofs)
3423 {
3424 return ofs & ((1 << 26) - 1);
3425 }
3426
3427 /* encode the 19-bit offset of conditional branch and compare & branch */
3428 static inline uint32_t
3429 encode_cond_branch_ofs_19 (uint32_t ofs)
3430 {
3431 return (ofs & ((1 << 19) - 1)) << 5;
3432 }
3433
3434 /* encode the 19-bit offset of ld literal */
3435 static inline uint32_t
3436 encode_ld_lit_ofs_19 (uint32_t ofs)
3437 {
3438 return (ofs & ((1 << 19) - 1)) << 5;
3439 }
3440
3441 /* Encode the 14-bit offset of test & branch. */
3442 static inline uint32_t
3443 encode_tst_branch_ofs_14 (uint32_t ofs)
3444 {
3445 return (ofs & ((1 << 14) - 1)) << 5;
3446 }
3447
3448 /* Encode the 16-bit imm field of svc/hvc/smc. */
3449 static inline uint32_t
3450 encode_svc_imm (uint32_t imm)
3451 {
3452 return imm << 5;
3453 }
3454
3455 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
3456 static inline uint32_t
3457 reencode_addsub_switch_add_sub (uint32_t opcode)
3458 {
3459 return opcode ^ (1 << 30);
3460 }
3461
3462 static inline uint32_t
3463 reencode_movzn_to_movz (uint32_t opcode)
3464 {
3465 return opcode | (1 << 30);
3466 }
3467
3468 static inline uint32_t
3469 reencode_movzn_to_movn (uint32_t opcode)
3470 {
3471 return opcode & ~(1 << 30);
3472 }
3473
3474 /* Overall per-instruction processing. */
3475
3476 /* We need to be able to fix up arbitrary expressions in some statements.
3477 This is so that we can handle symbols that are an arbitrary distance from
3478 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
3479 which returns part of an address in a form which will be valid for
3480 a data instruction. We do this by pushing the expression into a symbol
3481 in the expr_section, and creating a fix for that. */
3482
3483 static fixS *
3484 fix_new_aarch64 (fragS * frag,
3485 int where,
3486 short int size, expressionS * exp, int pc_rel, int reloc)
3487 {
3488 fixS *new_fix;
3489
3490 switch (exp->X_op)
3491 {
3492 case O_constant:
3493 case O_symbol:
3494 case O_add:
3495 case O_subtract:
3496 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
3497 break;
3498
3499 default:
3500 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
3501 pc_rel, reloc);
3502 break;
3503 }
3504 return new_fix;
3505 }
3506 \f
3507 /* Diagnostics on operands errors. */
3508
3509 /* By default, output one-line error message only.
3510 Enable the verbose error message by -merror-verbose. */
3511 static int verbose_error_p = 0;
3512
3513 #ifdef DEBUG_AARCH64
3514 /* N.B. this is only for the purpose of debugging. */
3515 const char* operand_mismatch_kind_names[] =
3516 {
3517 "AARCH64_OPDE_NIL",
3518 "AARCH64_OPDE_RECOVERABLE",
3519 "AARCH64_OPDE_SYNTAX_ERROR",
3520 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
3521 "AARCH64_OPDE_INVALID_VARIANT",
3522 "AARCH64_OPDE_OUT_OF_RANGE",
3523 "AARCH64_OPDE_UNALIGNED",
3524 "AARCH64_OPDE_REG_LIST",
3525 "AARCH64_OPDE_OTHER_ERROR",
3526 };
3527 #endif /* DEBUG_AARCH64 */
3528
3529 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
3530
3531 When multiple errors of different kinds are found in the same assembly
3532 line, only the error of the highest severity will be picked up for
3533 issuing the diagnostics. */
3534
3535 static inline bfd_boolean
3536 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
3537 enum aarch64_operand_error_kind rhs)
3538 {
3539 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
3540 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
3541 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
3542 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
3543 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
3544 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
3545 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
3546 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
3547 return lhs > rhs;
3548 }
3549
3550 /* Helper routine to get the mnemonic name from the assembly instruction
3551 line; should only be called for the diagnosis purpose, as there is
3552 string copy operation involved, which may affect the runtime
3553 performance if used in elsewhere. */
3554
3555 static const char*
3556 get_mnemonic_name (const char *str)
3557 {
3558 static char mnemonic[32];
3559 char *ptr;
3560
3561 /* Get the first 15 bytes and assume that the full name is included. */
3562 strncpy (mnemonic, str, 31);
3563 mnemonic[31] = '\0';
3564
3565 /* Scan up to the end of the mnemonic, which must end in white space,
3566 '.', or end of string. */
3567 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
3568 ;
3569
3570 *ptr = '\0';
3571
3572 /* Append '...' to the truncated long name. */
3573 if (ptr - mnemonic == 31)
3574 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
3575
3576 return mnemonic;
3577 }
3578
3579 static void
3580 reset_aarch64_instruction (aarch64_instruction *instruction)
3581 {
3582 memset (instruction, '\0', sizeof (aarch64_instruction));
3583 instruction->reloc.type = BFD_RELOC_UNUSED;
3584 }
3585
3586 /* Data strutures storing one user error in the assembly code related to
3587 operands. */
3588
3589 struct operand_error_record
3590 {
3591 const aarch64_opcode *opcode;
3592 aarch64_operand_error detail;
3593 struct operand_error_record *next;
3594 };
3595
3596 typedef struct operand_error_record operand_error_record;
3597
3598 struct operand_errors
3599 {
3600 operand_error_record *head;
3601 operand_error_record *tail;
3602 };
3603
3604 typedef struct operand_errors operand_errors;
3605
3606 /* Top-level data structure reporting user errors for the current line of
3607 the assembly code.
3608 The way md_assemble works is that all opcodes sharing the same mnemonic
3609 name are iterated to find a match to the assembly line. In this data
3610 structure, each of the such opcodes will have one operand_error_record
3611 allocated and inserted. In other words, excessive errors related with
3612 a single opcode are disregarded. */
3613 operand_errors operand_error_report;
3614
3615 /* Free record nodes. */
3616 static operand_error_record *free_opnd_error_record_nodes = NULL;
3617
3618 /* Initialize the data structure that stores the operand mismatch
3619 information on assembling one line of the assembly code. */
3620 static void
3621 init_operand_error_report (void)
3622 {
3623 if (operand_error_report.head != NULL)
3624 {
3625 gas_assert (operand_error_report.tail != NULL);
3626 operand_error_report.tail->next = free_opnd_error_record_nodes;
3627 free_opnd_error_record_nodes = operand_error_report.head;
3628 operand_error_report.head = NULL;
3629 operand_error_report.tail = NULL;
3630 return;
3631 }
3632 gas_assert (operand_error_report.tail == NULL);
3633 }
3634
3635 /* Return TRUE if some operand error has been recorded during the
3636 parsing of the current assembly line using the opcode *OPCODE;
3637 otherwise return FALSE. */
3638 static inline bfd_boolean
3639 opcode_has_operand_error_p (const aarch64_opcode *opcode)
3640 {
3641 operand_error_record *record = operand_error_report.head;
3642 return record && record->opcode == opcode;
3643 }
3644
3645 /* Add the error record *NEW_RECORD to operand_error_report. The record's
3646 OPCODE field is initialized with OPCODE.
3647 N.B. only one record for each opcode, i.e. the maximum of one error is
3648 recorded for each instruction template. */
3649
3650 static void
3651 add_operand_error_record (const operand_error_record* new_record)
3652 {
3653 const aarch64_opcode *opcode = new_record->opcode;
3654 operand_error_record* record = operand_error_report.head;
3655
3656 /* The record may have been created for this opcode. If not, we need
3657 to prepare one. */
3658 if (! opcode_has_operand_error_p (opcode))
3659 {
3660 /* Get one empty record. */
3661 if (free_opnd_error_record_nodes == NULL)
3662 {
3663 record = xmalloc (sizeof (operand_error_record));
3664 if (record == NULL)
3665 abort ();
3666 }
3667 else
3668 {
3669 record = free_opnd_error_record_nodes;
3670 free_opnd_error_record_nodes = record->next;
3671 }
3672 record->opcode = opcode;
3673 /* Insert at the head. */
3674 record->next = operand_error_report.head;
3675 operand_error_report.head = record;
3676 if (operand_error_report.tail == NULL)
3677 operand_error_report.tail = record;
3678 }
3679 else if (record->detail.kind != AARCH64_OPDE_NIL
3680 && record->detail.index <= new_record->detail.index
3681 && operand_error_higher_severity_p (record->detail.kind,
3682 new_record->detail.kind))
3683 {
3684 /* In the case of multiple errors found on operands related with a
3685 single opcode, only record the error of the leftmost operand and
3686 only if the error is of higher severity. */
3687 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
3688 " the existing error %s on operand %d",
3689 operand_mismatch_kind_names[new_record->detail.kind],
3690 new_record->detail.index,
3691 operand_mismatch_kind_names[record->detail.kind],
3692 record->detail.index);
3693 return;
3694 }
3695
3696 record->detail = new_record->detail;
3697 }
3698
3699 static inline void
3700 record_operand_error_info (const aarch64_opcode *opcode,
3701 aarch64_operand_error *error_info)
3702 {
3703 operand_error_record record;
3704 record.opcode = opcode;
3705 record.detail = *error_info;
3706 add_operand_error_record (&record);
3707 }
3708
3709 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
3710 error message *ERROR, for operand IDX (count from 0). */
3711
3712 static void
3713 record_operand_error (const aarch64_opcode *opcode, int idx,
3714 enum aarch64_operand_error_kind kind,
3715 const char* error)
3716 {
3717 aarch64_operand_error info;
3718 memset(&info, 0, sizeof (info));
3719 info.index = idx;
3720 info.kind = kind;
3721 info.error = error;
3722 record_operand_error_info (opcode, &info);
3723 }
3724
3725 static void
3726 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
3727 enum aarch64_operand_error_kind kind,
3728 const char* error, const int *extra_data)
3729 {
3730 aarch64_operand_error info;
3731 info.index = idx;
3732 info.kind = kind;
3733 info.error = error;
3734 info.data[0] = extra_data[0];
3735 info.data[1] = extra_data[1];
3736 info.data[2] = extra_data[2];
3737 record_operand_error_info (opcode, &info);
3738 }
3739
3740 static void
3741 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
3742 const char* error, int lower_bound,
3743 int upper_bound)
3744 {
3745 int data[3] = {lower_bound, upper_bound, 0};
3746 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
3747 error, data);
3748 }
3749
3750 /* Remove the operand error record for *OPCODE. */
3751 static void ATTRIBUTE_UNUSED
3752 remove_operand_error_record (const aarch64_opcode *opcode)
3753 {
3754 if (opcode_has_operand_error_p (opcode))
3755 {
3756 operand_error_record* record = operand_error_report.head;
3757 gas_assert (record != NULL && operand_error_report.tail != NULL);
3758 operand_error_report.head = record->next;
3759 record->next = free_opnd_error_record_nodes;
3760 free_opnd_error_record_nodes = record;
3761 if (operand_error_report.head == NULL)
3762 {
3763 gas_assert (operand_error_report.tail == record);
3764 operand_error_report.tail = NULL;
3765 }
3766 }
3767 }
3768
3769 /* Given the instruction in *INSTR, return the index of the best matched
3770 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
3771
3772 Return -1 if there is no qualifier sequence; return the first match
3773 if there is multiple matches found. */
3774
3775 static int
3776 find_best_match (const aarch64_inst *instr,
3777 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
3778 {
3779 int i, num_opnds, max_num_matched, idx;
3780
3781 num_opnds = aarch64_num_of_operands (instr->opcode);
3782 if (num_opnds == 0)
3783 {
3784 DEBUG_TRACE ("no operand");
3785 return -1;
3786 }
3787
3788 max_num_matched = 0;
3789 idx = -1;
3790
3791 /* For each pattern. */
3792 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
3793 {
3794 int j, num_matched;
3795 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
3796
3797 /* Most opcodes has much fewer patterns in the list. */
3798 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
3799 {
3800 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
3801 if (i != 0 && idx == -1)
3802 /* If nothing has been matched, return the 1st sequence. */
3803 idx = 0;
3804 break;
3805 }
3806
3807 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
3808 if (*qualifiers == instr->operands[j].qualifier)
3809 ++num_matched;
3810
3811 if (num_matched > max_num_matched)
3812 {
3813 max_num_matched = num_matched;
3814 idx = i;
3815 }
3816 }
3817
3818 DEBUG_TRACE ("return with %d", idx);
3819 return idx;
3820 }
3821
3822 /* Assign qualifiers in the qualifier seqence (headed by QUALIFIERS) to the
3823 corresponding operands in *INSTR. */
3824
3825 static inline void
3826 assign_qualifier_sequence (aarch64_inst *instr,
3827 const aarch64_opnd_qualifier_t *qualifiers)
3828 {
3829 int i = 0;
3830 int num_opnds = aarch64_num_of_operands (instr->opcode);
3831 gas_assert (num_opnds);
3832 for (i = 0; i < num_opnds; ++i, ++qualifiers)
3833 instr->operands[i].qualifier = *qualifiers;
3834 }
3835
3836 /* Print operands for the diagnosis purpose. */
3837
3838 static void
3839 print_operands (char *buf, const aarch64_opcode *opcode,
3840 const aarch64_opnd_info *opnds)
3841 {
3842 int i;
3843
3844 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3845 {
3846 const size_t size = 128;
3847 char str[size];
3848
3849 /* We regard the opcode operand info more, however we also look into
3850 the inst->operands to support the disassembling of the optional
3851 operand.
3852 The two operand code should be the same in all cases, apart from
3853 when the operand can be optional. */
3854 if (opcode->operands[i] == AARCH64_OPND_NIL
3855 || opnds[i].type == AARCH64_OPND_NIL)
3856 break;
3857
3858 /* Generate the operand string in STR. */
3859 aarch64_print_operand (str, size, 0, opcode, opnds, i, NULL, NULL);
3860
3861 /* Delimiter. */
3862 if (str[0] != '\0')
3863 strcat (buf, i == 0 ? " " : ",");
3864
3865 /* Append the operand string. */
3866 strcat (buf, str);
3867 }
3868 }
3869
3870 /* Send to stderr a string as information. */
3871
3872 static void
3873 output_info (const char *format, ...)
3874 {
3875 char *file;
3876 unsigned int line;
3877 va_list args;
3878
3879 as_where (&file, &line);
3880 if (file)
3881 {
3882 if (line != 0)
3883 fprintf (stderr, "%s:%u: ", file, line);
3884 else
3885 fprintf (stderr, "%s: ", file);
3886 }
3887 fprintf (stderr, _("Info: "));
3888 va_start (args, format);
3889 vfprintf (stderr, format, args);
3890 va_end (args);
3891 (void) putc ('\n', stderr);
3892 }
3893
3894 /* Output one operand error record. */
3895
3896 static void
3897 output_operand_error_record (const operand_error_record *record, char *str)
3898 {
3899 int idx = record->detail.index;
3900 const aarch64_opcode *opcode = record->opcode;
3901 enum aarch64_opnd opd_code = (idx != -1 ? opcode->operands[idx]
3902 : AARCH64_OPND_NIL);
3903 const aarch64_operand_error *detail = &record->detail;
3904
3905 switch (detail->kind)
3906 {
3907 case AARCH64_OPDE_NIL:
3908 gas_assert (0);
3909 break;
3910
3911 case AARCH64_OPDE_SYNTAX_ERROR:
3912 case AARCH64_OPDE_RECOVERABLE:
3913 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
3914 case AARCH64_OPDE_OTHER_ERROR:
3915 gas_assert (idx >= 0);
3916 /* Use the prepared error message if there is, otherwise use the
3917 operand description string to describe the error. */
3918 if (detail->error != NULL)
3919 {
3920 if (detail->index == -1)
3921 as_bad (_("%s -- `%s'"), detail->error, str);
3922 else
3923 as_bad (_("%s at operand %d -- `%s'"),
3924 detail->error, detail->index + 1, str);
3925 }
3926 else
3927 as_bad (_("operand %d should be %s -- `%s'"), idx + 1,
3928 aarch64_get_operand_desc (opd_code), str);
3929 break;
3930
3931 case AARCH64_OPDE_INVALID_VARIANT:
3932 as_bad (_("operand mismatch -- `%s'"), str);
3933 if (verbose_error_p)
3934 {
3935 /* We will try to correct the erroneous instruction and also provide
3936 more information e.g. all other valid variants.
3937
3938 The string representation of the corrected instruction and other
3939 valid variants are generated by
3940
3941 1) obtaining the intermediate representation of the erroneous
3942 instruction;
3943 2) manipulating the IR, e.g. replacing the operand qualifier;
3944 3) printing out the instruction by calling the printer functions
3945 shared with the disassembler.
3946
3947 The limitation of this method is that the exact input assembly
3948 line cannot be accurately reproduced in some cases, for example an
3949 optional operand present in the actual assembly line will be
3950 omitted in the output; likewise for the optional syntax rules,
3951 e.g. the # before the immediate. Another limitation is that the
3952 assembly symbols and relocation operations in the assembly line
3953 currently cannot be printed out in the error report. Last but not
3954 least, when there is other error(s) co-exist with this error, the
3955 'corrected' instruction may be still incorrect, e.g. given
3956 'ldnp h0,h1,[x0,#6]!'
3957 this diagnosis will provide the version:
3958 'ldnp s0,s1,[x0,#6]!'
3959 which is still not right. */
3960 size_t len = strlen (get_mnemonic_name (str));
3961 int i, qlf_idx;
3962 bfd_boolean result;
3963 const size_t size = 2048;
3964 char buf[size];
3965 aarch64_inst *inst_base = &inst.base;
3966 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
3967
3968 /* Init inst. */
3969 reset_aarch64_instruction (&inst);
3970 inst_base->opcode = opcode;
3971
3972 /* Reset the error report so that there is no side effect on the
3973 following operand parsing. */
3974 init_operand_error_report ();
3975
3976 /* Fill inst. */
3977 result = parse_operands (str + len, opcode)
3978 && programmer_friendly_fixup (&inst);
3979 gas_assert (result);
3980 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
3981 NULL, NULL);
3982 gas_assert (!result);
3983
3984 /* Find the most matched qualifier sequence. */
3985 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
3986 gas_assert (qlf_idx > -1);
3987
3988 /* Assign the qualifiers. */
3989 assign_qualifier_sequence (inst_base,
3990 opcode->qualifiers_list[qlf_idx]);
3991
3992 /* Print the hint. */
3993 output_info (_(" did you mean this?"));
3994 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
3995 print_operands (buf, opcode, inst_base->operands);
3996 output_info (_(" %s"), buf);
3997
3998 /* Print out other variant(s) if there is any. */
3999 if (qlf_idx != 0 ||
4000 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4001 output_info (_(" other valid variant(s):"));
4002
4003 /* For each pattern. */
4004 qualifiers_list = opcode->qualifiers_list;
4005 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4006 {
4007 /* Most opcodes has much fewer patterns in the list.
4008 First NIL qualifier indicates the end in the list. */
4009 if (empty_qualifier_sequence_p (*qualifiers_list) == TRUE)
4010 break;
4011
4012 if (i != qlf_idx)
4013 {
4014 /* Mnemonics name. */
4015 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4016
4017 /* Assign the qualifiers. */
4018 assign_qualifier_sequence (inst_base, *qualifiers_list);
4019
4020 /* Print instruction. */
4021 print_operands (buf, opcode, inst_base->operands);
4022
4023 output_info (_(" %s"), buf);
4024 }
4025 }
4026 }
4027 break;
4028
4029 case AARCH64_OPDE_OUT_OF_RANGE:
4030 if (detail->data[0] != detail->data[1])
4031 as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
4032 detail->error ? detail->error : _("immediate value"),
4033 detail->data[0], detail->data[1], detail->index + 1, str);
4034 else
4035 as_bad (_("%s expected to be %d at operand %d -- `%s'"),
4036 detail->error ? detail->error : _("immediate value"),
4037 detail->data[0], detail->index + 1, str);
4038 break;
4039
4040 case AARCH64_OPDE_REG_LIST:
4041 if (detail->data[0] == 1)
4042 as_bad (_("invalid number of registers in the list; "
4043 "only 1 register is expected at operand %d -- `%s'"),
4044 detail->index + 1, str);
4045 else
4046 as_bad (_("invalid number of registers in the list; "
4047 "%d registers are expected at operand %d -- `%s'"),
4048 detail->data[0], detail->index + 1, str);
4049 break;
4050
4051 case AARCH64_OPDE_UNALIGNED:
4052 as_bad (_("immediate value should be a multiple of "
4053 "%d at operand %d -- `%s'"),
4054 detail->data[0], detail->index + 1, str);
4055 break;
4056
4057 default:
4058 gas_assert (0);
4059 break;
4060 }
4061 }
4062
4063 /* Process and output the error message about the operand mismatching.
4064
4065 When this function is called, the operand error information had
4066 been collected for an assembly line and there will be multiple
4067 errors in the case of mulitple instruction templates; output the
4068 error message that most closely describes the problem. */
4069
4070 static void
4071 output_operand_error_report (char *str)
4072 {
4073 int largest_error_pos;
4074 const char *msg = NULL;
4075 enum aarch64_operand_error_kind kind;
4076 operand_error_record *curr;
4077 operand_error_record *head = operand_error_report.head;
4078 operand_error_record *record = NULL;
4079
4080 /* No error to report. */
4081 if (head == NULL)
4082 return;
4083
4084 gas_assert (head != NULL && operand_error_report.tail != NULL);
4085
4086 /* Only one error. */
4087 if (head == operand_error_report.tail)
4088 {
4089 DEBUG_TRACE ("single opcode entry with error kind: %s",
4090 operand_mismatch_kind_names[head->detail.kind]);
4091 output_operand_error_record (head, str);
4092 return;
4093 }
4094
4095 /* Find the error kind of the highest severity. */
4096 DEBUG_TRACE ("multiple opcode entres with error kind");
4097 kind = AARCH64_OPDE_NIL;
4098 for (curr = head; curr != NULL; curr = curr->next)
4099 {
4100 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4101 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4102 if (operand_error_higher_severity_p (curr->detail.kind, kind))
4103 kind = curr->detail.kind;
4104 }
4105 gas_assert (kind != AARCH64_OPDE_NIL);
4106
4107 /* Pick up one of errors of KIND to report. */
4108 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4109 for (curr = head; curr != NULL; curr = curr->next)
4110 {
4111 if (curr->detail.kind != kind)
4112 continue;
4113 /* If there are multiple errors, pick up the one with the highest
4114 mismatching operand index. In the case of multiple errors with
4115 the equally highest operand index, pick up the first one or the
4116 first one with non-NULL error message. */
4117 if (curr->detail.index > largest_error_pos
4118 || (curr->detail.index == largest_error_pos && msg == NULL
4119 && curr->detail.error != NULL))
4120 {
4121 largest_error_pos = curr->detail.index;
4122 record = curr;
4123 msg = record->detail.error;
4124 }
4125 }
4126
4127 gas_assert (largest_error_pos != -2 && record != NULL);
4128 DEBUG_TRACE ("Pick up error kind %s to report",
4129 operand_mismatch_kind_names[record->detail.kind]);
4130
4131 /* Output. */
4132 output_operand_error_record (record, str);
4133 }
4134 \f
4135 /* Write an AARCH64 instruction to buf - always little-endian. */
4136 static void
4137 put_aarch64_insn (char *buf, uint32_t insn)
4138 {
4139 unsigned char *where = (unsigned char *) buf;
4140 where[0] = insn;
4141 where[1] = insn >> 8;
4142 where[2] = insn >> 16;
4143 where[3] = insn >> 24;
4144 }
4145
4146 static uint32_t
4147 get_aarch64_insn (char *buf)
4148 {
4149 unsigned char *where = (unsigned char *) buf;
4150 uint32_t result;
4151 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4152 return result;
4153 }
4154
4155 static void
4156 output_inst (struct aarch64_inst *new_inst)
4157 {
4158 char *to = NULL;
4159
4160 to = frag_more (INSN_SIZE);
4161
4162 frag_now->tc_frag_data.recorded = 1;
4163
4164 put_aarch64_insn (to, inst.base.value);
4165
4166 if (inst.reloc.type != BFD_RELOC_UNUSED)
4167 {
4168 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4169 INSN_SIZE, &inst.reloc.exp,
4170 inst.reloc.pc_rel,
4171 inst.reloc.type);
4172 DEBUG_TRACE ("Prepared relocation fix up");
4173 /* Don't check the addend value against the instruction size,
4174 that's the job of our code in md_apply_fix(). */
4175 fixp->fx_no_overflow = 1;
4176 if (new_inst != NULL)
4177 fixp->tc_fix_data.inst = new_inst;
4178 if (aarch64_gas_internal_fixup_p ())
4179 {
4180 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4181 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4182 fixp->fx_addnumber = inst.reloc.flags;
4183 }
4184 }
4185
4186 dwarf2_emit_insn (INSN_SIZE);
4187 }
4188
4189 /* Link together opcodes of the same name. */
4190
4191 struct templates
4192 {
4193 aarch64_opcode *opcode;
4194 struct templates *next;
4195 };
4196
4197 typedef struct templates templates;
4198
4199 static templates *
4200 lookup_mnemonic (const char *start, int len)
4201 {
4202 templates *templ = NULL;
4203
4204 templ = hash_find_n (aarch64_ops_hsh, start, len);
4205 return templ;
4206 }
4207
4208 /* Subroutine of md_assemble, responsible for looking up the primary
4209 opcode from the mnemonic the user wrote. STR points to the
4210 beginning of the mnemonic. */
4211
4212 static templates *
4213 opcode_lookup (char **str)
4214 {
4215 char *end, *base;
4216 const aarch64_cond *cond;
4217 char condname[16];
4218 int len;
4219
4220 /* Scan up to the end of the mnemonic, which must end in white space,
4221 '.', or end of string. */
4222 for (base = end = *str; is_part_of_name(*end); end++)
4223 if (*end == '.')
4224 break;
4225
4226 if (end == base)
4227 return 0;
4228
4229 inst.cond = COND_ALWAYS;
4230
4231 /* Handle a possible condition. */
4232 if (end[0] == '.')
4233 {
4234 cond = hash_find_n (aarch64_cond_hsh, end + 1, 2);
4235 if (cond)
4236 {
4237 inst.cond = cond->value;
4238 *str = end + 3;
4239 }
4240 else
4241 {
4242 *str = end;
4243 return 0;
4244 }
4245 }
4246 else
4247 *str = end;
4248
4249 len = end - base;
4250
4251 if (inst.cond == COND_ALWAYS)
4252 {
4253 /* Look for unaffixed mnemonic. */
4254 return lookup_mnemonic (base, len);
4255 }
4256 else if (len <= 13)
4257 {
4258 /* append ".c" to mnemonic if conditional */
4259 memcpy (condname, base, len);
4260 memcpy (condname + len, ".c", 2);
4261 base = condname;
4262 len += 2;
4263 return lookup_mnemonic (base, len);
4264 }
4265
4266 return NULL;
4267 }
4268
4269 /* Internal helper routine converting a vector neon_type_el structure
4270 *VECTYPE to a corresponding operand qualifier. */
4271
4272 static inline aarch64_opnd_qualifier_t
4273 vectype_to_qualifier (const struct neon_type_el *vectype)
4274 {
4275 /* Element size in bytes indexed by neon_el_type. */
4276 const unsigned char ele_size[5]
4277 = {1, 2, 4, 8, 16};
4278
4279 if (!vectype->defined || vectype->type == NT_invtype)
4280 goto vectype_conversion_fail;
4281
4282 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
4283
4284 if (vectype->defined & NTA_HASINDEX)
4285 /* Vector element register. */
4286 return AARCH64_OPND_QLF_S_B + vectype->type;
4287 else
4288 {
4289 /* Vector register. */
4290 int reg_size = ele_size[vectype->type] * vectype->width;
4291 unsigned offset;
4292 if (reg_size != 16 && reg_size != 8)
4293 goto vectype_conversion_fail;
4294 /* The conversion is calculated based on the relation of the order of
4295 qualifiers to the vector element size and vector register size. */
4296 offset = (vectype->type == NT_q)
4297 ? 8 : (vectype->type << 1) + (reg_size >> 4);
4298 gas_assert (offset <= 8);
4299 return AARCH64_OPND_QLF_V_8B + offset;
4300 }
4301
4302 vectype_conversion_fail:
4303 first_error (_("bad vector arrangement type"));
4304 return AARCH64_OPND_QLF_NIL;
4305 }
4306
4307 /* Process an optional operand that is found omitted from the assembly line.
4308 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
4309 instruction's opcode entry while IDX is the index of this omitted operand.
4310 */
4311
4312 static void
4313 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
4314 int idx, aarch64_opnd_info *operand)
4315 {
4316 aarch64_insn default_value = get_optional_operand_default_value (opcode);
4317 gas_assert (optional_operand_p (opcode, idx));
4318 gas_assert (!operand->present);
4319
4320 switch (type)
4321 {
4322 case AARCH64_OPND_Rd:
4323 case AARCH64_OPND_Rn:
4324 case AARCH64_OPND_Rm:
4325 case AARCH64_OPND_Rt:
4326 case AARCH64_OPND_Rt2:
4327 case AARCH64_OPND_Rs:
4328 case AARCH64_OPND_Ra:
4329 case AARCH64_OPND_Rt_SYS:
4330 case AARCH64_OPND_Rd_SP:
4331 case AARCH64_OPND_Rn_SP:
4332 case AARCH64_OPND_Fd:
4333 case AARCH64_OPND_Fn:
4334 case AARCH64_OPND_Fm:
4335 case AARCH64_OPND_Fa:
4336 case AARCH64_OPND_Ft:
4337 case AARCH64_OPND_Ft2:
4338 case AARCH64_OPND_Sd:
4339 case AARCH64_OPND_Sn:
4340 case AARCH64_OPND_Sm:
4341 case AARCH64_OPND_Vd:
4342 case AARCH64_OPND_Vn:
4343 case AARCH64_OPND_Vm:
4344 case AARCH64_OPND_VdD1:
4345 case AARCH64_OPND_VnD1:
4346 operand->reg.regno = default_value;
4347 break;
4348
4349 case AARCH64_OPND_Ed:
4350 case AARCH64_OPND_En:
4351 case AARCH64_OPND_Em:
4352 operand->reglane.regno = default_value;
4353 break;
4354
4355 case AARCH64_OPND_IDX:
4356 case AARCH64_OPND_BIT_NUM:
4357 case AARCH64_OPND_IMMR:
4358 case AARCH64_OPND_IMMS:
4359 case AARCH64_OPND_SHLL_IMM:
4360 case AARCH64_OPND_IMM_VLSL:
4361 case AARCH64_OPND_IMM_VLSR:
4362 case AARCH64_OPND_CCMP_IMM:
4363 case AARCH64_OPND_FBITS:
4364 case AARCH64_OPND_UIMM4:
4365 case AARCH64_OPND_UIMM3_OP1:
4366 case AARCH64_OPND_UIMM3_OP2:
4367 case AARCH64_OPND_IMM:
4368 case AARCH64_OPND_WIDTH:
4369 case AARCH64_OPND_UIMM7:
4370 case AARCH64_OPND_NZCV:
4371 operand->imm.value = default_value;
4372 break;
4373
4374 case AARCH64_OPND_EXCEPTION:
4375 inst.reloc.type = BFD_RELOC_UNUSED;
4376 break;
4377
4378 case AARCH64_OPND_BARRIER_ISB:
4379 operand->barrier = aarch64_barrier_options + default_value;
4380
4381 default:
4382 break;
4383 }
4384 }
4385
4386 /* Process the relocation type for move wide instructions.
4387 Return TRUE on success; otherwise return FALSE. */
4388
4389 static bfd_boolean
4390 process_movw_reloc_info (void)
4391 {
4392 int is32;
4393 unsigned shift;
4394
4395 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
4396
4397 if (inst.base.opcode->op == OP_MOVK)
4398 switch (inst.reloc.type)
4399 {
4400 case BFD_RELOC_AARCH64_MOVW_G0_S:
4401 case BFD_RELOC_AARCH64_MOVW_G1_S:
4402 case BFD_RELOC_AARCH64_MOVW_G2_S:
4403 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4404 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4405 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4406 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4407 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4408 set_syntax_error
4409 (_("the specified relocation type is not allowed for MOVK"));
4410 return FALSE;
4411 default:
4412 break;
4413 }
4414
4415 switch (inst.reloc.type)
4416 {
4417 case BFD_RELOC_AARCH64_MOVW_G0:
4418 case BFD_RELOC_AARCH64_MOVW_G0_S:
4419 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4420 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4421 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4422 shift = 0;
4423 break;
4424 case BFD_RELOC_AARCH64_MOVW_G1:
4425 case BFD_RELOC_AARCH64_MOVW_G1_S:
4426 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4427 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4428 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4429 shift = 16;
4430 break;
4431 case BFD_RELOC_AARCH64_MOVW_G2:
4432 case BFD_RELOC_AARCH64_MOVW_G2_S:
4433 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4434 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4435 if (is32)
4436 {
4437 set_fatal_syntax_error
4438 (_("the specified relocation type is not allowed for 32-bit "
4439 "register"));
4440 return FALSE;
4441 }
4442 shift = 32;
4443 break;
4444 case BFD_RELOC_AARCH64_MOVW_G3:
4445 if (is32)
4446 {
4447 set_fatal_syntax_error
4448 (_("the specified relocation type is not allowed for 32-bit "
4449 "register"));
4450 return FALSE;
4451 }
4452 shift = 48;
4453 break;
4454 default:
4455 /* More cases should be added when more MOVW-related relocation types
4456 are supported in GAS. */
4457 gas_assert (aarch64_gas_internal_fixup_p ());
4458 /* The shift amount should have already been set by the parser. */
4459 return TRUE;
4460 }
4461 inst.base.operands[1].shifter.amount = shift;
4462 return TRUE;
4463 }
4464
4465 /* A primitive log caculator. */
4466
4467 static inline unsigned int
4468 get_logsz (unsigned int size)
4469 {
4470 const unsigned char ls[16] =
4471 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
4472 if (size > 16)
4473 {
4474 gas_assert (0);
4475 return -1;
4476 }
4477 gas_assert (ls[size - 1] != (unsigned char)-1);
4478 return ls[size - 1];
4479 }
4480
4481 /* Determine and return the real reloc type code for an instruction
4482 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
4483
4484 static inline bfd_reloc_code_real_type
4485 ldst_lo12_determine_real_reloc_type (void)
4486 {
4487 int logsz;
4488 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
4489 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
4490
4491 const bfd_reloc_code_real_type reloc_ldst_lo12[5] = {
4492 BFD_RELOC_AARCH64_LDST8_LO12, BFD_RELOC_AARCH64_LDST16_LO12,
4493 BFD_RELOC_AARCH64_LDST32_LO12, BFD_RELOC_AARCH64_LDST64_LO12,
4494 BFD_RELOC_AARCH64_LDST128_LO12
4495 };
4496
4497 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12);
4498 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
4499
4500 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
4501 opd1_qlf =
4502 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
4503 1, opd0_qlf, 0);
4504 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
4505
4506 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
4507 gas_assert (logsz >= 0 && logsz <= 4);
4508
4509 return reloc_ldst_lo12[logsz];
4510 }
4511
4512 /* Check whether a register list REGINFO is valid. The registers must be
4513 numbered in increasing order (modulo 32), in increments of one or two.
4514
4515 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
4516 increments of two.
4517
4518 Return FALSE if such a register list is invalid, otherwise return TRUE. */
4519
4520 static bfd_boolean
4521 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
4522 {
4523 uint32_t i, nb_regs, prev_regno, incr;
4524
4525 nb_regs = 1 + (reginfo & 0x3);
4526 reginfo >>= 2;
4527 prev_regno = reginfo & 0x1f;
4528 incr = accept_alternate ? 2 : 1;
4529
4530 for (i = 1; i < nb_regs; ++i)
4531 {
4532 uint32_t curr_regno;
4533 reginfo >>= 5;
4534 curr_regno = reginfo & 0x1f;
4535 if (curr_regno != ((prev_regno + incr) & 0x1f))
4536 return FALSE;
4537 prev_regno = curr_regno;
4538 }
4539
4540 return TRUE;
4541 }
4542
4543 /* Generic instruction operand parser. This does no encoding and no
4544 semantic validation; it merely squirrels values away in the inst
4545 structure. Returns TRUE or FALSE depending on whether the
4546 specified grammar matched. */
4547
4548 static bfd_boolean
4549 parse_operands (char *str, const aarch64_opcode *opcode)
4550 {
4551 int i;
4552 char *backtrack_pos = 0;
4553 const enum aarch64_opnd *operands = opcode->operands;
4554
4555 clear_error ();
4556 skip_whitespace (str);
4557
4558 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
4559 {
4560 int64_t val;
4561 int isreg32, isregzero;
4562 int comma_skipped_p = 0;
4563 aarch64_reg_type rtype;
4564 struct neon_type_el vectype;
4565 aarch64_opnd_info *info = &inst.base.operands[i];
4566
4567 DEBUG_TRACE ("parse operand %d", i);
4568
4569 /* Assign the operand code. */
4570 info->type = operands[i];
4571
4572 if (optional_operand_p (opcode, i))
4573 {
4574 /* Remember where we are in case we need to backtrack. */
4575 gas_assert (!backtrack_pos);
4576 backtrack_pos = str;
4577 }
4578
4579 /* Expect comma between operands; the backtrack mechanizm will take
4580 care of cases of omitted optional operand. */
4581 if (i > 0 && ! skip_past_char (&str, ','))
4582 {
4583 set_syntax_error (_("comma expected between operands"));
4584 goto failure;
4585 }
4586 else
4587 comma_skipped_p = 1;
4588
4589 switch (operands[i])
4590 {
4591 case AARCH64_OPND_Rd:
4592 case AARCH64_OPND_Rn:
4593 case AARCH64_OPND_Rm:
4594 case AARCH64_OPND_Rt:
4595 case AARCH64_OPND_Rt2:
4596 case AARCH64_OPND_Rs:
4597 case AARCH64_OPND_Ra:
4598 case AARCH64_OPND_Rt_SYS:
4599 po_int_reg_or_fail (1, 0);
4600 break;
4601
4602 case AARCH64_OPND_Rd_SP:
4603 case AARCH64_OPND_Rn_SP:
4604 po_int_reg_or_fail (0, 1);
4605 break;
4606
4607 case AARCH64_OPND_Rm_EXT:
4608 case AARCH64_OPND_Rm_SFT:
4609 po_misc_or_fail (parse_shifter_operand
4610 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
4611 ? SHIFTED_ARITH_IMM
4612 : SHIFTED_LOGIC_IMM)));
4613 if (!info->shifter.operator_present)
4614 {
4615 /* Default to LSL if not present. Libopcodes prefers shifter
4616 kind to be explicit. */
4617 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4618 info->shifter.kind = AARCH64_MOD_LSL;
4619 /* For Rm_EXT, libopcodes will carry out further check on whether
4620 or not stack pointer is used in the instruction (Recall that
4621 "the extend operator is not optional unless at least one of
4622 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
4623 }
4624 break;
4625
4626 case AARCH64_OPND_Fd:
4627 case AARCH64_OPND_Fn:
4628 case AARCH64_OPND_Fm:
4629 case AARCH64_OPND_Fa:
4630 case AARCH64_OPND_Ft:
4631 case AARCH64_OPND_Ft2:
4632 case AARCH64_OPND_Sd:
4633 case AARCH64_OPND_Sn:
4634 case AARCH64_OPND_Sm:
4635 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
4636 if (val == PARSE_FAIL)
4637 {
4638 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
4639 goto failure;
4640 }
4641 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
4642
4643 info->reg.regno = val;
4644 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
4645 break;
4646
4647 case AARCH64_OPND_Vd:
4648 case AARCH64_OPND_Vn:
4649 case AARCH64_OPND_Vm:
4650 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4651 if (val == PARSE_FAIL)
4652 {
4653 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4654 goto failure;
4655 }
4656 if (vectype.defined & NTA_HASINDEX)
4657 goto failure;
4658
4659 info->reg.regno = val;
4660 info->qualifier = vectype_to_qualifier (&vectype);
4661 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4662 goto failure;
4663 break;
4664
4665 case AARCH64_OPND_VdD1:
4666 case AARCH64_OPND_VnD1:
4667 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4668 if (val == PARSE_FAIL)
4669 {
4670 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4671 goto failure;
4672 }
4673 if (vectype.type != NT_d || vectype.index != 1)
4674 {
4675 set_fatal_syntax_error
4676 (_("the top half of a 128-bit FP/SIMD register is expected"));
4677 goto failure;
4678 }
4679 info->reg.regno = val;
4680 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
4681 here; it is correct for the purpose of encoding/decoding since
4682 only the register number is explicitly encoded in the related
4683 instructions, although this appears a bit hacky. */
4684 info->qualifier = AARCH64_OPND_QLF_S_D;
4685 break;
4686
4687 case AARCH64_OPND_Ed:
4688 case AARCH64_OPND_En:
4689 case AARCH64_OPND_Em:
4690 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4691 if (val == PARSE_FAIL)
4692 {
4693 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4694 goto failure;
4695 }
4696 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
4697 goto failure;
4698
4699 info->reglane.regno = val;
4700 info->reglane.index = vectype.index;
4701 info->qualifier = vectype_to_qualifier (&vectype);
4702 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4703 goto failure;
4704 break;
4705
4706 case AARCH64_OPND_LVn:
4707 case AARCH64_OPND_LVt:
4708 case AARCH64_OPND_LVt_AL:
4709 case AARCH64_OPND_LEt:
4710 if ((val = parse_neon_reg_list (&str, &vectype)) == PARSE_FAIL)
4711 goto failure;
4712 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
4713 {
4714 set_fatal_syntax_error (_("invalid register list"));
4715 goto failure;
4716 }
4717 info->reglist.first_regno = (val >> 2) & 0x1f;
4718 info->reglist.num_regs = (val & 0x3) + 1;
4719 if (operands[i] == AARCH64_OPND_LEt)
4720 {
4721 if (!(vectype.defined & NTA_HASINDEX))
4722 goto failure;
4723 info->reglist.has_index = 1;
4724 info->reglist.index = vectype.index;
4725 }
4726 else if (!(vectype.defined & NTA_HASTYPE))
4727 goto failure;
4728 info->qualifier = vectype_to_qualifier (&vectype);
4729 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4730 goto failure;
4731 break;
4732
4733 case AARCH64_OPND_Cn:
4734 case AARCH64_OPND_Cm:
4735 po_reg_or_fail (REG_TYPE_CN);
4736 if (val > 15)
4737 {
4738 set_fatal_syntax_error (_(get_reg_expected_msg (REG_TYPE_CN)));
4739 goto failure;
4740 }
4741 inst.base.operands[i].reg.regno = val;
4742 break;
4743
4744 case AARCH64_OPND_SHLL_IMM:
4745 case AARCH64_OPND_IMM_VLSR:
4746 po_imm_or_fail (1, 64);
4747 info->imm.value = val;
4748 break;
4749
4750 case AARCH64_OPND_CCMP_IMM:
4751 case AARCH64_OPND_FBITS:
4752 case AARCH64_OPND_UIMM4:
4753 case AARCH64_OPND_UIMM3_OP1:
4754 case AARCH64_OPND_UIMM3_OP2:
4755 case AARCH64_OPND_IMM_VLSL:
4756 case AARCH64_OPND_IMM:
4757 case AARCH64_OPND_WIDTH:
4758 po_imm_nc_or_fail ();
4759 info->imm.value = val;
4760 break;
4761
4762 case AARCH64_OPND_UIMM7:
4763 po_imm_or_fail (0, 127);
4764 info->imm.value = val;
4765 break;
4766
4767 case AARCH64_OPND_IDX:
4768 case AARCH64_OPND_BIT_NUM:
4769 case AARCH64_OPND_IMMR:
4770 case AARCH64_OPND_IMMS:
4771 po_imm_or_fail (0, 63);
4772 info->imm.value = val;
4773 break;
4774
4775 case AARCH64_OPND_IMM0:
4776 po_imm_nc_or_fail ();
4777 if (val != 0)
4778 {
4779 set_fatal_syntax_error (_("immediate zero expected"));
4780 goto failure;
4781 }
4782 info->imm.value = 0;
4783 break;
4784
4785 case AARCH64_OPND_FPIMM0:
4786 {
4787 int qfloat;
4788 bfd_boolean res1 = FALSE, res2 = FALSE;
4789 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
4790 it is probably not worth the effort to support it. */
4791 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE))
4792 && !(res2 = parse_constant_immediate (&str, &val)))
4793 goto failure;
4794 if ((res1 && qfloat == 0) || (res2 && val == 0))
4795 {
4796 info->imm.value = 0;
4797 info->imm.is_fp = 1;
4798 break;
4799 }
4800 set_fatal_syntax_error (_("immediate zero expected"));
4801 goto failure;
4802 }
4803
4804 case AARCH64_OPND_IMM_MOV:
4805 {
4806 char *saved = str;
4807 if (reg_name_p (str, REG_TYPE_R_Z_SP))
4808 goto failure;
4809 str = saved;
4810 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
4811 GE_OPT_PREFIX, 1));
4812 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
4813 later. fix_mov_imm_insn will try to determine a machine
4814 instruction (MOVZ, MOVN or ORR) for it and will issue an error
4815 message if the immediate cannot be moved by a single
4816 instruction. */
4817 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
4818 inst.base.operands[i].skip = 1;
4819 }
4820 break;
4821
4822 case AARCH64_OPND_SIMD_IMM:
4823 case AARCH64_OPND_SIMD_IMM_SFT:
4824 if (! parse_big_immediate (&str, &val))
4825 goto failure;
4826 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4827 /* addr_off_p */ 0,
4828 /* need_libopcodes_p */ 1,
4829 /* skip_p */ 1);
4830 /* Parse shift.
4831 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
4832 shift, we don't check it here; we leave the checking to
4833 the libopcodes (operand_general_constraint_met_p). By
4834 doing this, we achieve better diagnostics. */
4835 if (skip_past_comma (&str)
4836 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
4837 goto failure;
4838 if (!info->shifter.operator_present
4839 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
4840 {
4841 /* Default to LSL if not present. Libopcodes prefers shifter
4842 kind to be explicit. */
4843 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4844 info->shifter.kind = AARCH64_MOD_LSL;
4845 }
4846 break;
4847
4848 case AARCH64_OPND_FPIMM:
4849 case AARCH64_OPND_SIMD_FPIMM:
4850 {
4851 int qfloat;
4852 bfd_boolean dp_p
4853 = (aarch64_get_qualifier_esize (inst.base.operands[0].qualifier)
4854 == 8);
4855 if (! parse_aarch64_imm_float (&str, &qfloat, dp_p))
4856 goto failure;
4857 if (qfloat == 0)
4858 {
4859 set_fatal_syntax_error (_("invalid floating-point constant"));
4860 goto failure;
4861 }
4862 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
4863 inst.base.operands[i].imm.is_fp = 1;
4864 }
4865 break;
4866
4867 case AARCH64_OPND_LIMM:
4868 po_misc_or_fail (parse_shifter_operand (&str, info,
4869 SHIFTED_LOGIC_IMM));
4870 if (info->shifter.operator_present)
4871 {
4872 set_fatal_syntax_error
4873 (_("shift not allowed for bitmask immediate"));
4874 goto failure;
4875 }
4876 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4877 /* addr_off_p */ 0,
4878 /* need_libopcodes_p */ 1,
4879 /* skip_p */ 1);
4880 break;
4881
4882 case AARCH64_OPND_AIMM:
4883 if (opcode->op == OP_ADD)
4884 /* ADD may have relocation types. */
4885 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
4886 SHIFTED_ARITH_IMM));
4887 else
4888 po_misc_or_fail (parse_shifter_operand (&str, info,
4889 SHIFTED_ARITH_IMM));
4890 switch (inst.reloc.type)
4891 {
4892 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
4893 info->shifter.amount = 12;
4894 break;
4895 case BFD_RELOC_UNUSED:
4896 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
4897 if (info->shifter.kind != AARCH64_MOD_NONE)
4898 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
4899 inst.reloc.pc_rel = 0;
4900 break;
4901 default:
4902 break;
4903 }
4904 info->imm.value = 0;
4905 if (!info->shifter.operator_present)
4906 {
4907 /* Default to LSL if not present. Libopcodes prefers shifter
4908 kind to be explicit. */
4909 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4910 info->shifter.kind = AARCH64_MOD_LSL;
4911 }
4912 break;
4913
4914 case AARCH64_OPND_HALF:
4915 {
4916 /* #<imm16> or relocation. */
4917 int internal_fixup_p;
4918 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
4919 if (internal_fixup_p)
4920 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
4921 skip_whitespace (str);
4922 if (skip_past_comma (&str))
4923 {
4924 /* {, LSL #<shift>} */
4925 if (! aarch64_gas_internal_fixup_p ())
4926 {
4927 set_fatal_syntax_error (_("can't mix relocation modifier "
4928 "with explicit shift"));
4929 goto failure;
4930 }
4931 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
4932 }
4933 else
4934 inst.base.operands[i].shifter.amount = 0;
4935 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
4936 inst.base.operands[i].imm.value = 0;
4937 if (! process_movw_reloc_info ())
4938 goto failure;
4939 }
4940 break;
4941
4942 case AARCH64_OPND_EXCEPTION:
4943 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp));
4944 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4945 /* addr_off_p */ 0,
4946 /* need_libopcodes_p */ 0,
4947 /* skip_p */ 1);
4948 break;
4949
4950 case AARCH64_OPND_NZCV:
4951 {
4952 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
4953 if (nzcv != NULL)
4954 {
4955 str += 4;
4956 info->imm.value = nzcv->value;
4957 break;
4958 }
4959 po_imm_or_fail (0, 15);
4960 info->imm.value = val;
4961 }
4962 break;
4963
4964 case AARCH64_OPND_COND:
4965 info->cond = hash_find_n (aarch64_cond_hsh, str, 2);
4966 str += 2;
4967 if (info->cond == NULL)
4968 {
4969 set_syntax_error (_("invalid condition"));
4970 goto failure;
4971 }
4972 break;
4973
4974 case AARCH64_OPND_ADDR_ADRP:
4975 po_misc_or_fail (parse_adrp (&str));
4976 /* Clear the value as operand needs to be relocated. */
4977 info->imm.value = 0;
4978 break;
4979
4980 case AARCH64_OPND_ADDR_PCREL14:
4981 case AARCH64_OPND_ADDR_PCREL19:
4982 case AARCH64_OPND_ADDR_PCREL21:
4983 case AARCH64_OPND_ADDR_PCREL26:
4984 po_misc_or_fail (parse_address_reloc (&str, info));
4985 if (!info->addr.pcrel)
4986 {
4987 set_syntax_error (_("invalid pc-relative address"));
4988 goto failure;
4989 }
4990 if (inst.gen_lit_pool
4991 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
4992 {
4993 /* Only permit "=value" in the literal load instructions.
4994 The literal will be generated by programmer_friendly_fixup. */
4995 set_syntax_error (_("invalid use of \"=immediate\""));
4996 goto failure;
4997 }
4998 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
4999 {
5000 set_syntax_error (_("unrecognized relocation suffix"));
5001 goto failure;
5002 }
5003 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
5004 {
5005 info->imm.value = inst.reloc.exp.X_add_number;
5006 inst.reloc.type = BFD_RELOC_UNUSED;
5007 }
5008 else
5009 {
5010 info->imm.value = 0;
5011 if (inst.reloc.type == BFD_RELOC_UNUSED)
5012 switch (opcode->iclass)
5013 {
5014 case compbranch:
5015 case condbranch:
5016 /* e.g. CBZ or B.COND */
5017 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5018 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
5019 break;
5020 case testbranch:
5021 /* e.g. TBZ */
5022 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
5023 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
5024 break;
5025 case branch_imm:
5026 /* e.g. B or BL */
5027 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
5028 inst.reloc.type =
5029 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
5030 : BFD_RELOC_AARCH64_JUMP26;
5031 break;
5032 case loadlit:
5033 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5034 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
5035 break;
5036 case pcreladdr:
5037 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
5038 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
5039 break;
5040 default:
5041 gas_assert (0);
5042 abort ();
5043 }
5044 inst.reloc.pc_rel = 1;
5045 }
5046 break;
5047
5048 case AARCH64_OPND_ADDR_SIMPLE:
5049 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
5050 /* [<Xn|SP>{, #<simm>}] */
5051 po_char_or_fail ('[');
5052 po_reg_or_fail (REG_TYPE_R64_SP);
5053 /* Accept optional ", #0". */
5054 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
5055 && skip_past_char (&str, ','))
5056 {
5057 skip_past_char (&str, '#');
5058 if (! skip_past_char (&str, '0'))
5059 {
5060 set_fatal_syntax_error
5061 (_("the optional immediate offset can only be 0"));
5062 goto failure;
5063 }
5064 }
5065 po_char_or_fail (']');
5066 info->addr.base_regno = val;
5067 break;
5068
5069 case AARCH64_OPND_ADDR_REGOFF:
5070 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
5071 po_misc_or_fail (parse_address (&str, info, 0));
5072 if (info->addr.pcrel || !info->addr.offset.is_reg
5073 || !info->addr.preind || info->addr.postind
5074 || info->addr.writeback)
5075 {
5076 set_syntax_error (_("invalid addressing mode"));
5077 goto failure;
5078 }
5079 if (!info->shifter.operator_present)
5080 {
5081 /* Default to LSL if not present. Libopcodes prefers shifter
5082 kind to be explicit. */
5083 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5084 info->shifter.kind = AARCH64_MOD_LSL;
5085 }
5086 /* Qualifier to be deduced by libopcodes. */
5087 break;
5088
5089 case AARCH64_OPND_ADDR_SIMM7:
5090 po_misc_or_fail (parse_address (&str, info, 0));
5091 if (info->addr.pcrel || info->addr.offset.is_reg
5092 || (!info->addr.preind && !info->addr.postind))
5093 {
5094 set_syntax_error (_("invalid addressing mode"));
5095 goto failure;
5096 }
5097 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5098 /* addr_off_p */ 1,
5099 /* need_libopcodes_p */ 1,
5100 /* skip_p */ 0);
5101 break;
5102
5103 case AARCH64_OPND_ADDR_SIMM9:
5104 case AARCH64_OPND_ADDR_SIMM9_2:
5105 po_misc_or_fail (parse_address_reloc (&str, info));
5106 if (info->addr.pcrel || info->addr.offset.is_reg
5107 || (!info->addr.preind && !info->addr.postind)
5108 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
5109 && info->addr.writeback))
5110 {
5111 set_syntax_error (_("invalid addressing mode"));
5112 goto failure;
5113 }
5114 if (inst.reloc.type != BFD_RELOC_UNUSED)
5115 {
5116 set_syntax_error (_("relocation not allowed"));
5117 goto failure;
5118 }
5119 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5120 /* addr_off_p */ 1,
5121 /* need_libopcodes_p */ 1,
5122 /* skip_p */ 0);
5123 break;
5124
5125 case AARCH64_OPND_ADDR_UIMM12:
5126 po_misc_or_fail (parse_address_reloc (&str, info));
5127 if (info->addr.pcrel || info->addr.offset.is_reg
5128 || !info->addr.preind || info->addr.writeback)
5129 {
5130 set_syntax_error (_("invalid addressing mode"));
5131 goto failure;
5132 }
5133 if (inst.reloc.type == BFD_RELOC_UNUSED)
5134 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5135 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12)
5136 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
5137 /* Leave qualifier to be determined by libopcodes. */
5138 break;
5139
5140 case AARCH64_OPND_SIMD_ADDR_POST:
5141 /* [<Xn|SP>], <Xm|#<amount>> */
5142 po_misc_or_fail (parse_address (&str, info, 1));
5143 if (!info->addr.postind || !info->addr.writeback)
5144 {
5145 set_syntax_error (_("invalid addressing mode"));
5146 goto failure;
5147 }
5148 if (!info->addr.offset.is_reg)
5149 {
5150 if (inst.reloc.exp.X_op == O_constant)
5151 info->addr.offset.imm = inst.reloc.exp.X_add_number;
5152 else
5153 {
5154 set_fatal_syntax_error
5155 (_("writeback value should be an immediate constant"));
5156 goto failure;
5157 }
5158 }
5159 /* No qualifier. */
5160 break;
5161
5162 case AARCH64_OPND_SYSREG:
5163 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1))
5164 == PARSE_FAIL)
5165 {
5166 set_syntax_error (_("unknown or missing system register name"));
5167 goto failure;
5168 }
5169 inst.base.operands[i].sysreg = val;
5170 break;
5171
5172 case AARCH64_OPND_PSTATEFIELD:
5173 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0))
5174 == PARSE_FAIL)
5175 {
5176 set_syntax_error (_("unknown or missing PSTATE field name"));
5177 goto failure;
5178 }
5179 inst.base.operands[i].pstatefield = val;
5180 break;
5181
5182 case AARCH64_OPND_SYSREG_IC:
5183 inst.base.operands[i].sysins_op =
5184 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
5185 goto sys_reg_ins;
5186 case AARCH64_OPND_SYSREG_DC:
5187 inst.base.operands[i].sysins_op =
5188 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
5189 goto sys_reg_ins;
5190 case AARCH64_OPND_SYSREG_AT:
5191 inst.base.operands[i].sysins_op =
5192 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
5193 goto sys_reg_ins;
5194 case AARCH64_OPND_SYSREG_TLBI:
5195 inst.base.operands[i].sysins_op =
5196 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
5197 sys_reg_ins:
5198 if (inst.base.operands[i].sysins_op == NULL)
5199 {
5200 set_fatal_syntax_error ( _("unknown or missing operation name"));
5201 goto failure;
5202 }
5203 break;
5204
5205 case AARCH64_OPND_BARRIER:
5206 case AARCH64_OPND_BARRIER_ISB:
5207 val = parse_barrier (&str);
5208 if (val != PARSE_FAIL
5209 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
5210 {
5211 /* ISB only accepts options name 'sy'. */
5212 set_syntax_error
5213 (_("the specified option is not accepted in ISB"));
5214 /* Turn off backtrack as this optional operand is present. */
5215 backtrack_pos = 0;
5216 goto failure;
5217 }
5218 /* This is an extension to accept a 0..15 immediate. */
5219 if (val == PARSE_FAIL)
5220 po_imm_or_fail (0, 15);
5221 info->barrier = aarch64_barrier_options + val;
5222 break;
5223
5224 case AARCH64_OPND_PRFOP:
5225 val = parse_pldop (&str);
5226 /* This is an extension to accept a 0..31 immediate. */
5227 if (val == PARSE_FAIL)
5228 po_imm_or_fail (0, 31);
5229 inst.base.operands[i].prfop = aarch64_prfops + val;
5230 break;
5231
5232 default:
5233 as_fatal (_("unhandled operand code %d"), operands[i]);
5234 }
5235
5236 /* If we get here, this operand was successfully parsed. */
5237 inst.base.operands[i].present = 1;
5238 continue;
5239
5240 failure:
5241 /* The parse routine should already have set the error, but in case
5242 not, set a default one here. */
5243 if (! error_p ())
5244 set_default_error ();
5245
5246 if (! backtrack_pos)
5247 goto parse_operands_return;
5248
5249 /* Reaching here means we are dealing with an optional operand that is
5250 omitted from the assembly line. */
5251 gas_assert (optional_operand_p (opcode, i));
5252 info->present = 0;
5253 process_omitted_operand (operands[i], opcode, i, info);
5254
5255 /* Try again, skipping the optional operand at backtrack_pos. */
5256 str = backtrack_pos;
5257 backtrack_pos = 0;
5258
5259 /* If this is the last operand that is optional and omitted, but without
5260 the presence of a comma. */
5261 if (i && comma_skipped_p && i == aarch64_num_of_operands (opcode) - 1)
5262 {
5263 set_fatal_syntax_error
5264 (_("unexpected comma before the omitted optional operand"));
5265 goto parse_operands_return;
5266 }
5267
5268 /* Clear any error record after the omitted optional operand has been
5269 successfully handled. */
5270 clear_error ();
5271 }
5272
5273 /* Check if we have parsed all the operands. */
5274 if (*str != '\0' && ! error_p ())
5275 {
5276 /* Set I to the index of the last present operand; this is
5277 for the purpose of diagnostics. */
5278 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
5279 ;
5280 set_fatal_syntax_error
5281 (_("unexpected characters following instruction"));
5282 }
5283
5284 parse_operands_return:
5285
5286 if (error_p ())
5287 {
5288 DEBUG_TRACE ("parsing FAIL: %s - %s",
5289 operand_mismatch_kind_names[get_error_kind ()],
5290 get_error_message ());
5291 /* Record the operand error properly; this is useful when there
5292 are multiple instruction templates for a mnemonic name, so that
5293 later on, we can select the error that most closely describes
5294 the problem. */
5295 record_operand_error (opcode, i, get_error_kind (),
5296 get_error_message ());
5297 return FALSE;
5298 }
5299 else
5300 {
5301 DEBUG_TRACE ("parsing SUCCESS");
5302 return TRUE;
5303 }
5304 }
5305
5306 /* It does some fix-up to provide some programmer friendly feature while
5307 keeping the libopcodes happy, i.e. libopcodes only accepts
5308 the preferred architectural syntax.
5309 Return FALSE if there is any failure; otherwise return TRUE. */
5310
5311 static bfd_boolean
5312 programmer_friendly_fixup (aarch64_instruction *instr)
5313 {
5314 aarch64_inst *base = &instr->base;
5315 const aarch64_opcode *opcode = base->opcode;
5316 enum aarch64_op op = opcode->op;
5317 aarch64_opnd_info *operands = base->operands;
5318
5319 DEBUG_TRACE ("enter");
5320
5321 switch (opcode->iclass)
5322 {
5323 case testbranch:
5324 /* TBNZ Xn|Wn, #uimm6, label
5325 Test and Branch Not Zero: conditionally jumps to label if bit number
5326 uimm6 in register Xn is not zero. The bit number implies the width of
5327 the register, which may be written and should be disassembled as Wn if
5328 uimm is less than 32. */
5329 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
5330 {
5331 if (operands[1].imm.value >= 32)
5332 {
5333 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
5334 0, 31);
5335 return FALSE;
5336 }
5337 operands[0].qualifier = AARCH64_OPND_QLF_X;
5338 }
5339 break;
5340 case loadlit:
5341 /* LDR Wt, label | =value
5342 As a convenience assemblers will typically permit the notation
5343 "=value" in conjunction with the pc-relative literal load instructions
5344 to automatically place an immediate value or symbolic address in a
5345 nearby literal pool and generate a hidden label which references it.
5346 ISREG has been set to 0 in the case of =value. */
5347 if (instr->gen_lit_pool
5348 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
5349 {
5350 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
5351 if (op == OP_LDRSW_LIT)
5352 size = 4;
5353 if (instr->reloc.exp.X_op != O_constant
5354 && instr->reloc.exp.X_op != O_big
5355 && instr->reloc.exp.X_op != O_symbol)
5356 {
5357 record_operand_error (opcode, 1,
5358 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
5359 _("constant expression expected"));
5360 return FALSE;
5361 }
5362 if (! add_to_lit_pool (&instr->reloc.exp, size))
5363 {
5364 record_operand_error (opcode, 1,
5365 AARCH64_OPDE_OTHER_ERROR,
5366 _("literal pool insertion failed"));
5367 return FALSE;
5368 }
5369 }
5370 break;
5371 case log_shift:
5372 case bitfield:
5373 /* UXT[BHW] Wd, Wn
5374 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
5375 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
5376 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
5377 A programmer-friendly assembler should accept a destination Xd in
5378 place of Wd, however that is not the preferred form for disassembly.
5379 */
5380 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
5381 && operands[1].qualifier == AARCH64_OPND_QLF_W
5382 && operands[0].qualifier == AARCH64_OPND_QLF_X)
5383 operands[0].qualifier = AARCH64_OPND_QLF_W;
5384 break;
5385
5386 case addsub_ext:
5387 {
5388 /* In the 64-bit form, the final register operand is written as Wm
5389 for all but the (possibly omitted) UXTX/LSL and SXTX
5390 operators.
5391 As a programmer-friendly assembler, we accept e.g.
5392 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
5393 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
5394 int idx = aarch64_operand_index (opcode->operands,
5395 AARCH64_OPND_Rm_EXT);
5396 gas_assert (idx == 1 || idx == 2);
5397 if (operands[0].qualifier == AARCH64_OPND_QLF_X
5398 && operands[idx].qualifier == AARCH64_OPND_QLF_X
5399 && operands[idx].shifter.kind != AARCH64_MOD_LSL
5400 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
5401 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
5402 operands[idx].qualifier = AARCH64_OPND_QLF_W;
5403 }
5404 break;
5405
5406 default:
5407 break;
5408 }
5409
5410 DEBUG_TRACE ("exit with SUCCESS");
5411 return TRUE;
5412 }
5413
5414 /* A wrapper function to interface with libopcodes on encoding and
5415 record the error message if there is any.
5416
5417 Return TRUE on success; otherwise return FALSE. */
5418
5419 static bfd_boolean
5420 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
5421 aarch64_insn *code)
5422 {
5423 aarch64_operand_error error_info;
5424 error_info.kind = AARCH64_OPDE_NIL;
5425 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info))
5426 return TRUE;
5427 else
5428 {
5429 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
5430 record_operand_error_info (opcode, &error_info);
5431 return FALSE;
5432 }
5433 }
5434
5435 #ifdef DEBUG_AARCH64
5436 static inline void
5437 dump_opcode_operands (const aarch64_opcode *opcode)
5438 {
5439 int i = 0;
5440 while (opcode->operands[i] != AARCH64_OPND_NIL)
5441 {
5442 aarch64_verbose ("\t\t opnd%d: %s", i,
5443 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
5444 ? aarch64_get_operand_name (opcode->operands[i])
5445 : aarch64_get_operand_desc (opcode->operands[i]));
5446 ++i;
5447 }
5448 }
5449 #endif /* DEBUG_AARCH64 */
5450
5451 /* This is the guts of the machine-dependent assembler. STR points to a
5452 machine dependent instruction. This function is supposed to emit
5453 the frags/bytes it assembles to. */
5454
5455 void
5456 md_assemble (char *str)
5457 {
5458 char *p = str;
5459 templates *template;
5460 aarch64_opcode *opcode;
5461 aarch64_inst *inst_base;
5462 unsigned saved_cond;
5463
5464 /* Align the previous label if needed. */
5465 if (last_label_seen != NULL)
5466 {
5467 symbol_set_frag (last_label_seen, frag_now);
5468 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
5469 S_SET_SEGMENT (last_label_seen, now_seg);
5470 }
5471
5472 inst.reloc.type = BFD_RELOC_UNUSED;
5473
5474 DEBUG_TRACE ("\n\n");
5475 DEBUG_TRACE ("==============================");
5476 DEBUG_TRACE ("Enter md_assemble with %s", str);
5477
5478 template = opcode_lookup (&p);
5479 if (!template)
5480 {
5481 /* It wasn't an instruction, but it might be a register alias of
5482 the form alias .req reg directive. */
5483 if (!create_register_alias (str, p))
5484 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
5485 str);
5486 return;
5487 }
5488
5489 skip_whitespace (p);
5490 if (*p == ',')
5491 {
5492 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
5493 get_mnemonic_name (str), str);
5494 return;
5495 }
5496
5497 init_operand_error_report ();
5498
5499 saved_cond = inst.cond;
5500 reset_aarch64_instruction (&inst);
5501 inst.cond = saved_cond;
5502
5503 /* Iterate through all opcode entries with the same mnemonic name. */
5504 do
5505 {
5506 opcode = template->opcode;
5507
5508 DEBUG_TRACE ("opcode %s found", opcode->name);
5509 #ifdef DEBUG_AARCH64
5510 if (debug_dump)
5511 dump_opcode_operands (opcode);
5512 #endif /* DEBUG_AARCH64 */
5513
5514 /* Check that this instruction is supported for this CPU. */
5515 if (!opcode->avariant
5516 || !AARCH64_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
5517 {
5518 as_bad (_("selected processor does not support `%s'"), str);
5519 return;
5520 }
5521
5522 mapping_state (MAP_INSN);
5523
5524 inst_base = &inst.base;
5525 inst_base->opcode = opcode;
5526
5527 /* Truly conditionally executed instructions, e.g. b.cond. */
5528 if (opcode->flags & F_COND)
5529 {
5530 gas_assert (inst.cond != COND_ALWAYS);
5531 inst_base->cond = get_cond_from_value (inst.cond);
5532 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
5533 }
5534 else if (inst.cond != COND_ALWAYS)
5535 {
5536 /* It shouldn't arrive here, where the assembly looks like a
5537 conditional instruction but the found opcode is unconditional. */
5538 gas_assert (0);
5539 continue;
5540 }
5541
5542 if (parse_operands (p, opcode)
5543 && programmer_friendly_fixup (&inst)
5544 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
5545 {
5546 if (inst.reloc.type == BFD_RELOC_UNUSED
5547 || !inst.reloc.need_libopcodes_p)
5548 output_inst (NULL);
5549 else
5550 {
5551 /* If there is relocation generated for the instruction,
5552 store the instruction information for the future fix-up. */
5553 struct aarch64_inst *copy;
5554 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
5555 if ((copy = xmalloc (sizeof (struct aarch64_inst))) == NULL)
5556 abort ();
5557 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
5558 output_inst (copy);
5559 }
5560 return;
5561 }
5562
5563 template = template->next;
5564 if (template != NULL)
5565 {
5566 reset_aarch64_instruction (&inst);
5567 inst.cond = saved_cond;
5568 }
5569 }
5570 while (template != NULL);
5571
5572 /* Issue the error messages if any. */
5573 output_operand_error_report (str);
5574 }
5575
5576 /* Various frobbings of labels and their addresses. */
5577
5578 void
5579 aarch64_start_line_hook (void)
5580 {
5581 last_label_seen = NULL;
5582 }
5583
5584 void
5585 aarch64_frob_label (symbolS * sym)
5586 {
5587 last_label_seen = sym;
5588
5589 dwarf2_emit_label (sym);
5590 }
5591
5592 int
5593 aarch64_data_in_code (void)
5594 {
5595 if (!strncmp (input_line_pointer + 1, "data:", 5))
5596 {
5597 *input_line_pointer = '/';
5598 input_line_pointer += 5;
5599 *input_line_pointer = 0;
5600 return 1;
5601 }
5602
5603 return 0;
5604 }
5605
5606 char *
5607 aarch64_canonicalize_symbol_name (char *name)
5608 {
5609 int len;
5610
5611 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
5612 *(name + len - 5) = 0;
5613
5614 return name;
5615 }
5616 \f
5617 /* Table of all register names defined by default. The user can
5618 define additional names with .req. Note that all register names
5619 should appear in both upper and lowercase variants. Some registers
5620 also have mixed-case names. */
5621
5622 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
5623 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
5624 #define REGSET31(p,t) \
5625 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
5626 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
5627 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
5628 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t), \
5629 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
5630 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
5631 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
5632 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
5633 #define REGSET(p,t) \
5634 REGSET31(p,t), REGNUM(p,31,t)
5635
5636 /* These go into aarch64_reg_hsh hash-table. */
5637 static const reg_entry reg_names[] = {
5638 /* Integer registers. */
5639 REGSET31 (x, R_64), REGSET31 (X, R_64),
5640 REGSET31 (w, R_32), REGSET31 (W, R_32),
5641
5642 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
5643 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
5644
5645 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
5646 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
5647
5648 /* Coprocessor register numbers. */
5649 REGSET (c, CN), REGSET (C, CN),
5650
5651 /* Floating-point single precision registers. */
5652 REGSET (s, FP_S), REGSET (S, FP_S),
5653
5654 /* Floating-point double precision registers. */
5655 REGSET (d, FP_D), REGSET (D, FP_D),
5656
5657 /* Floating-point half precision registers. */
5658 REGSET (h, FP_H), REGSET (H, FP_H),
5659
5660 /* Floating-point byte precision registers. */
5661 REGSET (b, FP_B), REGSET (B, FP_B),
5662
5663 /* Floating-point quad precision registers. */
5664 REGSET (q, FP_Q), REGSET (Q, FP_Q),
5665
5666 /* FP/SIMD registers. */
5667 REGSET (v, VN), REGSET (V, VN),
5668 };
5669
5670 #undef REGDEF
5671 #undef REGNUM
5672 #undef REGSET
5673
5674 #define N 1
5675 #define n 0
5676 #define Z 1
5677 #define z 0
5678 #define C 1
5679 #define c 0
5680 #define V 1
5681 #define v 0
5682 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
5683 static const asm_nzcv nzcv_names[] = {
5684 {"nzcv", B (n, z, c, v)},
5685 {"nzcV", B (n, z, c, V)},
5686 {"nzCv", B (n, z, C, v)},
5687 {"nzCV", B (n, z, C, V)},
5688 {"nZcv", B (n, Z, c, v)},
5689 {"nZcV", B (n, Z, c, V)},
5690 {"nZCv", B (n, Z, C, v)},
5691 {"nZCV", B (n, Z, C, V)},
5692 {"Nzcv", B (N, z, c, v)},
5693 {"NzcV", B (N, z, c, V)},
5694 {"NzCv", B (N, z, C, v)},
5695 {"NzCV", B (N, z, C, V)},
5696 {"NZcv", B (N, Z, c, v)},
5697 {"NZcV", B (N, Z, c, V)},
5698 {"NZCv", B (N, Z, C, v)},
5699 {"NZCV", B (N, Z, C, V)}
5700 };
5701
5702 #undef N
5703 #undef n
5704 #undef Z
5705 #undef z
5706 #undef C
5707 #undef c
5708 #undef V
5709 #undef v
5710 #undef B
5711 \f
5712 /* MD interface: bits in the object file. */
5713
5714 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
5715 for use in the a.out file, and stores them in the array pointed to by buf.
5716 This knows about the endian-ness of the target machine and does
5717 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
5718 2 (short) and 4 (long) Floating numbers are put out as a series of
5719 LITTLENUMS (shorts, here at least). */
5720
5721 void
5722 md_number_to_chars (char *buf, valueT val, int n)
5723 {
5724 if (target_big_endian)
5725 number_to_chars_bigendian (buf, val, n);
5726 else
5727 number_to_chars_littleendian (buf, val, n);
5728 }
5729
5730 /* MD interface: Sections. */
5731
5732 /* Estimate the size of a frag before relaxing. Assume everything fits in
5733 4 bytes. */
5734
5735 int
5736 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
5737 {
5738 fragp->fr_var = 4;
5739 return 4;
5740 }
5741
5742 /* Round up a section size to the appropriate boundary. */
5743
5744 valueT
5745 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
5746 {
5747 return size;
5748 }
5749
5750 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
5751 of an rs_align_code fragment. */
5752
5753 void
5754 aarch64_handle_align (fragS * fragP)
5755 {
5756 /* NOP = d503201f */
5757 /* AArch64 instructions are always little-endian. */
5758 static char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
5759
5760 int bytes, fix, noop_size;
5761 char *p;
5762 const char *noop;
5763
5764 if (fragP->fr_type != rs_align_code)
5765 return;
5766
5767 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
5768 p = fragP->fr_literal + fragP->fr_fix;
5769 fix = 0;
5770
5771 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
5772 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
5773
5774 #ifdef OBJ_ELF
5775 gas_assert (fragP->tc_frag_data.recorded);
5776 #endif
5777
5778 noop = aarch64_noop;
5779 noop_size = sizeof (aarch64_noop);
5780 fragP->fr_var = noop_size;
5781
5782 if (bytes & (noop_size - 1))
5783 {
5784 fix = bytes & (noop_size - 1);
5785 #ifdef OBJ_ELF
5786 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
5787 #endif
5788 memset (p, 0, fix);
5789 p += fix;
5790 bytes -= fix;
5791 }
5792
5793 while (bytes >= noop_size)
5794 {
5795 memcpy (p, noop, noop_size);
5796 p += noop_size;
5797 bytes -= noop_size;
5798 fix += noop_size;
5799 }
5800
5801 fragP->fr_fix += fix;
5802 }
5803
5804 /* Called from md_do_align. Used to create an alignment
5805 frag in a code section. */
5806
5807 void
5808 aarch64_frag_align_code (int n, int max)
5809 {
5810 char *p;
5811
5812 /* We assume that there will never be a requirement
5813 to support alignments greater than x bytes. */
5814 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
5815 as_fatal (_
5816 ("alignments greater than %d bytes not supported in .text sections"),
5817 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
5818
5819 p = frag_var (rs_align_code,
5820 MAX_MEM_FOR_RS_ALIGN_CODE,
5821 1,
5822 (relax_substateT) max,
5823 (symbolS *) NULL, (offsetT) n, (char *) NULL);
5824 *p = 0;
5825 }
5826
5827 /* Perform target specific initialisation of a frag.
5828 Note - despite the name this initialisation is not done when the frag
5829 is created, but only when its type is assigned. A frag can be created
5830 and used a long time before its type is set, so beware of assuming that
5831 this initialisationis performed first. */
5832
5833 #ifndef OBJ_ELF
5834 void
5835 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
5836 int max_chars ATTRIBUTE_UNUSED)
5837 {
5838 }
5839
5840 #else /* OBJ_ELF is defined. */
5841 void
5842 aarch64_init_frag (fragS * fragP, int max_chars)
5843 {
5844 /* Record a mapping symbol for alignment frags. We will delete this
5845 later if the alignment ends up empty. */
5846 if (!fragP->tc_frag_data.recorded)
5847 {
5848 fragP->tc_frag_data.recorded = 1;
5849 switch (fragP->fr_type)
5850 {
5851 case rs_align:
5852 case rs_align_test:
5853 case rs_fill:
5854 mapping_state_2 (MAP_DATA, max_chars);
5855 break;
5856 case rs_align_code:
5857 mapping_state_2 (MAP_INSN, max_chars);
5858 break;
5859 default:
5860 break;
5861 }
5862 }
5863 }
5864 \f
5865 /* Initialize the DWARF-2 unwind information for this procedure. */
5866
5867 void
5868 tc_aarch64_frame_initial_instructions (void)
5869 {
5870 cfi_add_CFA_def_cfa (REG_SP, 0);
5871 }
5872 #endif /* OBJ_ELF */
5873
5874 /* Convert REGNAME to a DWARF-2 register number. */
5875
5876 int
5877 tc_aarch64_regname_to_dw2regnum (char *regname)
5878 {
5879 const reg_entry *reg = parse_reg (&regname);
5880 if (reg == NULL)
5881 return -1;
5882
5883 switch (reg->type)
5884 {
5885 case REG_TYPE_SP_32:
5886 case REG_TYPE_SP_64:
5887 case REG_TYPE_R_32:
5888 case REG_TYPE_R_64:
5889 case REG_TYPE_FP_B:
5890 case REG_TYPE_FP_H:
5891 case REG_TYPE_FP_S:
5892 case REG_TYPE_FP_D:
5893 case REG_TYPE_FP_Q:
5894 return reg->number;
5895 default:
5896 break;
5897 }
5898 return -1;
5899 }
5900
5901 /* Implement DWARF2_ADDR_SIZE. */
5902
5903 int
5904 aarch64_dwarf2_addr_size (void)
5905 {
5906 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
5907 if (ilp32_p)
5908 return 4;
5909 #endif
5910 return bfd_arch_bits_per_address (stdoutput) / 8;
5911 }
5912
5913 /* MD interface: Symbol and relocation handling. */
5914
5915 /* Return the address within the segment that a PC-relative fixup is
5916 relative to. For AArch64 PC-relative fixups applied to instructions
5917 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
5918
5919 long
5920 md_pcrel_from_section (fixS * fixP, segT seg)
5921 {
5922 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
5923
5924 /* If this is pc-relative and we are going to emit a relocation
5925 then we just want to put out any pipeline compensation that the linker
5926 will need. Otherwise we want to use the calculated base. */
5927 if (fixP->fx_pcrel
5928 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
5929 || aarch64_force_relocation (fixP)))
5930 base = 0;
5931
5932 /* AArch64 should be consistent for all pc-relative relocations. */
5933 return base + AARCH64_PCREL_OFFSET;
5934 }
5935
5936 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
5937 Otherwise we have no need to default values of symbols. */
5938
5939 symbolS *
5940 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
5941 {
5942 #ifdef OBJ_ELF
5943 if (name[0] == '_' && name[1] == 'G'
5944 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
5945 {
5946 if (!GOT_symbol)
5947 {
5948 if (symbol_find (name))
5949 as_bad (_("GOT already in the symbol table"));
5950
5951 GOT_symbol = symbol_new (name, undefined_section,
5952 (valueT) 0, &zero_address_frag);
5953 }
5954
5955 return GOT_symbol;
5956 }
5957 #endif
5958
5959 return 0;
5960 }
5961
5962 /* Return non-zero if the indicated VALUE has overflowed the maximum
5963 range expressible by a unsigned number with the indicated number of
5964 BITS. */
5965
5966 static bfd_boolean
5967 unsigned_overflow (valueT value, unsigned bits)
5968 {
5969 valueT lim;
5970 if (bits >= sizeof (valueT) * 8)
5971 return FALSE;
5972 lim = (valueT) 1 << bits;
5973 return (value >= lim);
5974 }
5975
5976
5977 /* Return non-zero if the indicated VALUE has overflowed the maximum
5978 range expressible by an signed number with the indicated number of
5979 BITS. */
5980
5981 static bfd_boolean
5982 signed_overflow (offsetT value, unsigned bits)
5983 {
5984 offsetT lim;
5985 if (bits >= sizeof (offsetT) * 8)
5986 return FALSE;
5987 lim = (offsetT) 1 << (bits - 1);
5988 return (value < -lim || value >= lim);
5989 }
5990
5991 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
5992 unsigned immediate offset load/store instruction, try to encode it as
5993 an unscaled, 9-bit, signed immediate offset load/store instruction.
5994 Return TRUE if it is successful; otherwise return FALSE.
5995
5996 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
5997 in response to the standard LDR/STR mnemonics when the immediate offset is
5998 unambiguous, i.e. when it is negative or unaligned. */
5999
6000 static bfd_boolean
6001 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
6002 {
6003 int idx;
6004 enum aarch64_op new_op;
6005 const aarch64_opcode *new_opcode;
6006
6007 gas_assert (instr->opcode->iclass == ldst_pos);
6008
6009 switch (instr->opcode->op)
6010 {
6011 case OP_LDRB_POS:new_op = OP_LDURB; break;
6012 case OP_STRB_POS: new_op = OP_STURB; break;
6013 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
6014 case OP_LDRH_POS: new_op = OP_LDURH; break;
6015 case OP_STRH_POS: new_op = OP_STURH; break;
6016 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
6017 case OP_LDR_POS: new_op = OP_LDUR; break;
6018 case OP_STR_POS: new_op = OP_STUR; break;
6019 case OP_LDRF_POS: new_op = OP_LDURV; break;
6020 case OP_STRF_POS: new_op = OP_STURV; break;
6021 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
6022 case OP_PRFM_POS: new_op = OP_PRFUM; break;
6023 default: new_op = OP_NIL; break;
6024 }
6025
6026 if (new_op == OP_NIL)
6027 return FALSE;
6028
6029 new_opcode = aarch64_get_opcode (new_op);
6030 gas_assert (new_opcode != NULL);
6031
6032 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
6033 instr->opcode->op, new_opcode->op);
6034
6035 aarch64_replace_opcode (instr, new_opcode);
6036
6037 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
6038 qualifier matching may fail because the out-of-date qualifier will
6039 prevent the operand being updated with a new and correct qualifier. */
6040 idx = aarch64_operand_index (instr->opcode->operands,
6041 AARCH64_OPND_ADDR_SIMM9);
6042 gas_assert (idx == 1);
6043 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
6044
6045 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
6046
6047 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
6048 return FALSE;
6049
6050 return TRUE;
6051 }
6052
6053 /* Called by fix_insn to fix a MOV immediate alias instruction.
6054
6055 Operand for a generic move immediate instruction, which is an alias
6056 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
6057 a 32-bit/64-bit immediate value into general register. An assembler error
6058 shall result if the immediate cannot be created by a single one of these
6059 instructions. If there is a choice, then to ensure reversability an
6060 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
6061
6062 static void
6063 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
6064 {
6065 const aarch64_opcode *opcode;
6066
6067 /* Need to check if the destination is SP/ZR. The check has to be done
6068 before any aarch64_replace_opcode. */
6069 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
6070 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
6071
6072 instr->operands[1].imm.value = value;
6073 instr->operands[1].skip = 0;
6074
6075 if (try_mov_wide_p)
6076 {
6077 /* Try the MOVZ alias. */
6078 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
6079 aarch64_replace_opcode (instr, opcode);
6080 if (aarch64_opcode_encode (instr->opcode, instr,
6081 &instr->value, NULL, NULL))
6082 {
6083 put_aarch64_insn (buf, instr->value);
6084 return;
6085 }
6086 /* Try the MOVK alias. */
6087 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
6088 aarch64_replace_opcode (instr, opcode);
6089 if (aarch64_opcode_encode (instr->opcode, instr,
6090 &instr->value, NULL, NULL))
6091 {
6092 put_aarch64_insn (buf, instr->value);
6093 return;
6094 }
6095 }
6096
6097 if (try_mov_bitmask_p)
6098 {
6099 /* Try the ORR alias. */
6100 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
6101 aarch64_replace_opcode (instr, opcode);
6102 if (aarch64_opcode_encode (instr->opcode, instr,
6103 &instr->value, NULL, NULL))
6104 {
6105 put_aarch64_insn (buf, instr->value);
6106 return;
6107 }
6108 }
6109
6110 as_bad_where (fixP->fx_file, fixP->fx_line,
6111 _("immediate cannot be moved by a single instruction"));
6112 }
6113
6114 /* An instruction operand which is immediate related may have symbol used
6115 in the assembly, e.g.
6116
6117 mov w0, u32
6118 .set u32, 0x00ffff00
6119
6120 At the time when the assembly instruction is parsed, a referenced symbol,
6121 like 'u32' in the above example may not have been seen; a fixS is created
6122 in such a case and is handled here after symbols have been resolved.
6123 Instruction is fixed up with VALUE using the information in *FIXP plus
6124 extra information in FLAGS.
6125
6126 This function is called by md_apply_fix to fix up instructions that need
6127 a fix-up described above but does not involve any linker-time relocation. */
6128
6129 static void
6130 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
6131 {
6132 int idx;
6133 uint32_t insn;
6134 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6135 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
6136 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
6137
6138 if (new_inst)
6139 {
6140 /* Now the instruction is about to be fixed-up, so the operand that
6141 was previously marked as 'ignored' needs to be unmarked in order
6142 to get the encoding done properly. */
6143 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6144 new_inst->operands[idx].skip = 0;
6145 }
6146
6147 gas_assert (opnd != AARCH64_OPND_NIL);
6148
6149 switch (opnd)
6150 {
6151 case AARCH64_OPND_EXCEPTION:
6152 if (unsigned_overflow (value, 16))
6153 as_bad_where (fixP->fx_file, fixP->fx_line,
6154 _("immediate out of range"));
6155 insn = get_aarch64_insn (buf);
6156 insn |= encode_svc_imm (value);
6157 put_aarch64_insn (buf, insn);
6158 break;
6159
6160 case AARCH64_OPND_AIMM:
6161 /* ADD or SUB with immediate.
6162 NOTE this assumes we come here with a add/sub shifted reg encoding
6163 3 322|2222|2 2 2 21111 111111
6164 1 098|7654|3 2 1 09876 543210 98765 43210
6165 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
6166 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
6167 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
6168 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
6169 ->
6170 3 322|2222|2 2 221111111111
6171 1 098|7654|3 2 109876543210 98765 43210
6172 11000000 sf 001|0001|shift imm12 Rn Rd ADD
6173 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
6174 51000000 sf 101|0001|shift imm12 Rn Rd SUB
6175 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
6176 Fields sf Rn Rd are already set. */
6177 insn = get_aarch64_insn (buf);
6178 if (value < 0)
6179 {
6180 /* Add <-> sub. */
6181 insn = reencode_addsub_switch_add_sub (insn);
6182 value = -value;
6183 }
6184
6185 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
6186 && unsigned_overflow (value, 12))
6187 {
6188 /* Try to shift the value by 12 to make it fit. */
6189 if (((value >> 12) << 12) == value
6190 && ! unsigned_overflow (value, 12 + 12))
6191 {
6192 value >>= 12;
6193 insn |= encode_addsub_imm_shift_amount (1);
6194 }
6195 }
6196
6197 if (unsigned_overflow (value, 12))
6198 as_bad_where (fixP->fx_file, fixP->fx_line,
6199 _("immediate out of range"));
6200
6201 insn |= encode_addsub_imm (value);
6202
6203 put_aarch64_insn (buf, insn);
6204 break;
6205
6206 case AARCH64_OPND_SIMD_IMM:
6207 case AARCH64_OPND_SIMD_IMM_SFT:
6208 case AARCH64_OPND_LIMM:
6209 /* Bit mask immediate. */
6210 gas_assert (new_inst != NULL);
6211 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6212 new_inst->operands[idx].imm.value = value;
6213 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6214 &new_inst->value, NULL, NULL))
6215 put_aarch64_insn (buf, new_inst->value);
6216 else
6217 as_bad_where (fixP->fx_file, fixP->fx_line,
6218 _("invalid immediate"));
6219 break;
6220
6221 case AARCH64_OPND_HALF:
6222 /* 16-bit unsigned immediate. */
6223 if (unsigned_overflow (value, 16))
6224 as_bad_where (fixP->fx_file, fixP->fx_line,
6225 _("immediate out of range"));
6226 insn = get_aarch64_insn (buf);
6227 insn |= encode_movw_imm (value & 0xffff);
6228 put_aarch64_insn (buf, insn);
6229 break;
6230
6231 case AARCH64_OPND_IMM_MOV:
6232 /* Operand for a generic move immediate instruction, which is
6233 an alias instruction that generates a single MOVZ, MOVN or ORR
6234 instruction to loads a 32-bit/64-bit immediate value into general
6235 register. An assembler error shall result if the immediate cannot be
6236 created by a single one of these instructions. If there is a choice,
6237 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
6238 and MOVZ or MOVN to ORR. */
6239 gas_assert (new_inst != NULL);
6240 fix_mov_imm_insn (fixP, buf, new_inst, value);
6241 break;
6242
6243 case AARCH64_OPND_ADDR_SIMM7:
6244 case AARCH64_OPND_ADDR_SIMM9:
6245 case AARCH64_OPND_ADDR_SIMM9_2:
6246 case AARCH64_OPND_ADDR_UIMM12:
6247 /* Immediate offset in an address. */
6248 insn = get_aarch64_insn (buf);
6249
6250 gas_assert (new_inst != NULL && new_inst->value == insn);
6251 gas_assert (new_inst->opcode->operands[1] == opnd
6252 || new_inst->opcode->operands[2] == opnd);
6253
6254 /* Get the index of the address operand. */
6255 if (new_inst->opcode->operands[1] == opnd)
6256 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
6257 idx = 1;
6258 else
6259 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
6260 idx = 2;
6261
6262 /* Update the resolved offset value. */
6263 new_inst->operands[idx].addr.offset.imm = value;
6264
6265 /* Encode/fix-up. */
6266 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6267 &new_inst->value, NULL, NULL))
6268 {
6269 put_aarch64_insn (buf, new_inst->value);
6270 break;
6271 }
6272 else if (new_inst->opcode->iclass == ldst_pos
6273 && try_to_encode_as_unscaled_ldst (new_inst))
6274 {
6275 put_aarch64_insn (buf, new_inst->value);
6276 break;
6277 }
6278
6279 as_bad_where (fixP->fx_file, fixP->fx_line,
6280 _("immediate offset out of range"));
6281 break;
6282
6283 default:
6284 gas_assert (0);
6285 as_fatal (_("unhandled operand code %d"), opnd);
6286 }
6287 }
6288
6289 /* Apply a fixup (fixP) to segment data, once it has been determined
6290 by our caller that we have all the info we need to fix it up.
6291
6292 Parameter valP is the pointer to the value of the bits. */
6293
6294 void
6295 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
6296 {
6297 offsetT value = *valP;
6298 uint32_t insn;
6299 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6300 int scale;
6301 unsigned flags = fixP->fx_addnumber;
6302
6303 DEBUG_TRACE ("\n\n");
6304 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
6305 DEBUG_TRACE ("Enter md_apply_fix");
6306
6307 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
6308
6309 /* Note whether this will delete the relocation. */
6310
6311 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
6312 fixP->fx_done = 1;
6313
6314 /* Process the relocations. */
6315 switch (fixP->fx_r_type)
6316 {
6317 case BFD_RELOC_NONE:
6318 /* This will need to go in the object file. */
6319 fixP->fx_done = 0;
6320 break;
6321
6322 case BFD_RELOC_8:
6323 case BFD_RELOC_8_PCREL:
6324 if (fixP->fx_done || !seg->use_rela_p)
6325 md_number_to_chars (buf, value, 1);
6326 break;
6327
6328 case BFD_RELOC_16:
6329 case BFD_RELOC_16_PCREL:
6330 if (fixP->fx_done || !seg->use_rela_p)
6331 md_number_to_chars (buf, value, 2);
6332 break;
6333
6334 case BFD_RELOC_32:
6335 case BFD_RELOC_32_PCREL:
6336 if (fixP->fx_done || !seg->use_rela_p)
6337 md_number_to_chars (buf, value, 4);
6338 break;
6339
6340 case BFD_RELOC_64:
6341 case BFD_RELOC_64_PCREL:
6342 if (fixP->fx_done || !seg->use_rela_p)
6343 md_number_to_chars (buf, value, 8);
6344 break;
6345
6346 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6347 /* We claim that these fixups have been processed here, even if
6348 in fact we generate an error because we do not have a reloc
6349 for them, so tc_gen_reloc() will reject them. */
6350 fixP->fx_done = 1;
6351 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
6352 {
6353 as_bad_where (fixP->fx_file, fixP->fx_line,
6354 _("undefined symbol %s used as an immediate value"),
6355 S_GET_NAME (fixP->fx_addsy));
6356 goto apply_fix_return;
6357 }
6358 fix_insn (fixP, flags, value);
6359 break;
6360
6361 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
6362 if (fixP->fx_done || !seg->use_rela_p)
6363 {
6364 if (value & 3)
6365 as_bad_where (fixP->fx_file, fixP->fx_line,
6366 _("pc-relative load offset not word aligned"));
6367 if (signed_overflow (value, 21))
6368 as_bad_where (fixP->fx_file, fixP->fx_line,
6369 _("pc-relative load offset out of range"));
6370 insn = get_aarch64_insn (buf);
6371 insn |= encode_ld_lit_ofs_19 (value >> 2);
6372 put_aarch64_insn (buf, insn);
6373 }
6374 break;
6375
6376 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6377 if (fixP->fx_done || !seg->use_rela_p)
6378 {
6379 if (signed_overflow (value, 21))
6380 as_bad_where (fixP->fx_file, fixP->fx_line,
6381 _("pc-relative address offset out of range"));
6382 insn = get_aarch64_insn (buf);
6383 insn |= encode_adr_imm (value);
6384 put_aarch64_insn (buf, insn);
6385 }
6386 break;
6387
6388 case BFD_RELOC_AARCH64_BRANCH19:
6389 if (fixP->fx_done || !seg->use_rela_p)
6390 {
6391 if (value & 3)
6392 as_bad_where (fixP->fx_file, fixP->fx_line,
6393 _("conditional branch target not word aligned"));
6394 if (signed_overflow (value, 21))
6395 as_bad_where (fixP->fx_file, fixP->fx_line,
6396 _("conditional branch out of range"));
6397 insn = get_aarch64_insn (buf);
6398 insn |= encode_cond_branch_ofs_19 (value >> 2);
6399 put_aarch64_insn (buf, insn);
6400 }
6401 break;
6402
6403 case BFD_RELOC_AARCH64_TSTBR14:
6404 if (fixP->fx_done || !seg->use_rela_p)
6405 {
6406 if (value & 3)
6407 as_bad_where (fixP->fx_file, fixP->fx_line,
6408 _("conditional branch target not word aligned"));
6409 if (signed_overflow (value, 16))
6410 as_bad_where (fixP->fx_file, fixP->fx_line,
6411 _("conditional branch out of range"));
6412 insn = get_aarch64_insn (buf);
6413 insn |= encode_tst_branch_ofs_14 (value >> 2);
6414 put_aarch64_insn (buf, insn);
6415 }
6416 break;
6417
6418 case BFD_RELOC_AARCH64_JUMP26:
6419 case BFD_RELOC_AARCH64_CALL26:
6420 if (fixP->fx_done || !seg->use_rela_p)
6421 {
6422 if (value & 3)
6423 as_bad_where (fixP->fx_file, fixP->fx_line,
6424 _("branch target not word aligned"));
6425 if (signed_overflow (value, 28))
6426 as_bad_where (fixP->fx_file, fixP->fx_line,
6427 _("branch out of range"));
6428 insn = get_aarch64_insn (buf);
6429 insn |= encode_branch_ofs_26 (value >> 2);
6430 put_aarch64_insn (buf, insn);
6431 }
6432 break;
6433
6434 case BFD_RELOC_AARCH64_MOVW_G0:
6435 case BFD_RELOC_AARCH64_MOVW_G0_S:
6436 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6437 scale = 0;
6438 goto movw_common;
6439 case BFD_RELOC_AARCH64_MOVW_G1:
6440 case BFD_RELOC_AARCH64_MOVW_G1_S:
6441 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6442 scale = 16;
6443 goto movw_common;
6444 case BFD_RELOC_AARCH64_MOVW_G2:
6445 case BFD_RELOC_AARCH64_MOVW_G2_S:
6446 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6447 scale = 32;
6448 goto movw_common;
6449 case BFD_RELOC_AARCH64_MOVW_G3:
6450 scale = 48;
6451 movw_common:
6452 if (fixP->fx_done || !seg->use_rela_p)
6453 {
6454 insn = get_aarch64_insn (buf);
6455
6456 if (!fixP->fx_done)
6457 {
6458 /* REL signed addend must fit in 16 bits */
6459 if (signed_overflow (value, 16))
6460 as_bad_where (fixP->fx_file, fixP->fx_line,
6461 _("offset out of range"));
6462 }
6463 else
6464 {
6465 /* Check for overflow and scale. */
6466 switch (fixP->fx_r_type)
6467 {
6468 case BFD_RELOC_AARCH64_MOVW_G0:
6469 case BFD_RELOC_AARCH64_MOVW_G1:
6470 case BFD_RELOC_AARCH64_MOVW_G2:
6471 case BFD_RELOC_AARCH64_MOVW_G3:
6472 if (unsigned_overflow (value, scale + 16))
6473 as_bad_where (fixP->fx_file, fixP->fx_line,
6474 _("unsigned value out of range"));
6475 break;
6476 case BFD_RELOC_AARCH64_MOVW_G0_S:
6477 case BFD_RELOC_AARCH64_MOVW_G1_S:
6478 case BFD_RELOC_AARCH64_MOVW_G2_S:
6479 /* NOTE: We can only come here with movz or movn. */
6480 if (signed_overflow (value, scale + 16))
6481 as_bad_where (fixP->fx_file, fixP->fx_line,
6482 _("signed value out of range"));
6483 if (value < 0)
6484 {
6485 /* Force use of MOVN. */
6486 value = ~value;
6487 insn = reencode_movzn_to_movn (insn);
6488 }
6489 else
6490 {
6491 /* Force use of MOVZ. */
6492 insn = reencode_movzn_to_movz (insn);
6493 }
6494 break;
6495 default:
6496 /* Unchecked relocations. */
6497 break;
6498 }
6499 value >>= scale;
6500 }
6501
6502 /* Insert value into MOVN/MOVZ/MOVK instruction. */
6503 insn |= encode_movw_imm (value & 0xffff);
6504
6505 put_aarch64_insn (buf, insn);
6506 }
6507 break;
6508
6509 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6510 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6511 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6512 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6513 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6514 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6515 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6516 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6517 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6518 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6519 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6520 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6521 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6522 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6523 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6524 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6525 /* Should always be exported to object file, see
6526 aarch64_force_relocation(). */
6527 gas_assert (!fixP->fx_done);
6528 gas_assert (seg->use_rela_p);
6529 break;
6530
6531 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6532 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6533 case BFD_RELOC_AARCH64_ADD_LO12:
6534 case BFD_RELOC_AARCH64_LDST8_LO12:
6535 case BFD_RELOC_AARCH64_LDST16_LO12:
6536 case BFD_RELOC_AARCH64_LDST32_LO12:
6537 case BFD_RELOC_AARCH64_LDST64_LO12:
6538 case BFD_RELOC_AARCH64_LDST128_LO12:
6539 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6540 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6541 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6542 /* Should always be exported to object file, see
6543 aarch64_force_relocation(). */
6544 gas_assert (!fixP->fx_done);
6545 gas_assert (seg->use_rela_p);
6546 break;
6547
6548 case BFD_RELOC_AARCH64_TLSDESC_ADD:
6549 case BFD_RELOC_AARCH64_TLSDESC_LDR:
6550 case BFD_RELOC_AARCH64_TLSDESC_CALL:
6551 break;
6552
6553 default:
6554 as_bad_where (fixP->fx_file, fixP->fx_line,
6555 _("unexpected %s fixup"),
6556 bfd_get_reloc_code_name (fixP->fx_r_type));
6557 break;
6558 }
6559
6560 apply_fix_return:
6561 /* Free the allocated the struct aarch64_inst.
6562 N.B. currently there are very limited number of fix-up types actually use
6563 this field, so the impact on the performance should be minimal . */
6564 if (fixP->tc_fix_data.inst != NULL)
6565 free (fixP->tc_fix_data.inst);
6566
6567 return;
6568 }
6569
6570 /* Translate internal representation of relocation info to BFD target
6571 format. */
6572
6573 arelent *
6574 tc_gen_reloc (asection * section, fixS * fixp)
6575 {
6576 arelent *reloc;
6577 bfd_reloc_code_real_type code;
6578
6579 reloc = xmalloc (sizeof (arelent));
6580
6581 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
6582 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
6583 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
6584
6585 if (fixp->fx_pcrel)
6586 {
6587 if (section->use_rela_p)
6588 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
6589 else
6590 fixp->fx_offset = reloc->address;
6591 }
6592 reloc->addend = fixp->fx_offset;
6593
6594 code = fixp->fx_r_type;
6595 switch (code)
6596 {
6597 case BFD_RELOC_16:
6598 if (fixp->fx_pcrel)
6599 code = BFD_RELOC_16_PCREL;
6600 break;
6601
6602 case BFD_RELOC_32:
6603 if (fixp->fx_pcrel)
6604 code = BFD_RELOC_32_PCREL;
6605 break;
6606
6607 case BFD_RELOC_64:
6608 if (fixp->fx_pcrel)
6609 code = BFD_RELOC_64_PCREL;
6610 break;
6611
6612 default:
6613 break;
6614 }
6615
6616 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
6617 if (reloc->howto == NULL)
6618 {
6619 as_bad_where (fixp->fx_file, fixp->fx_line,
6620 _
6621 ("cannot represent %s relocation in this object file format"),
6622 bfd_get_reloc_code_name (code));
6623 return NULL;
6624 }
6625
6626 return reloc;
6627 }
6628
6629 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
6630
6631 void
6632 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
6633 {
6634 bfd_reloc_code_real_type type;
6635 int pcrel = 0;
6636
6637 /* Pick a reloc.
6638 FIXME: @@ Should look at CPU word size. */
6639 switch (size)
6640 {
6641 case 1:
6642 type = BFD_RELOC_8;
6643 break;
6644 case 2:
6645 type = BFD_RELOC_16;
6646 break;
6647 case 4:
6648 type = BFD_RELOC_32;
6649 break;
6650 case 8:
6651 type = BFD_RELOC_64;
6652 break;
6653 default:
6654 as_bad (_("cannot do %u-byte relocation"), size);
6655 type = BFD_RELOC_UNUSED;
6656 break;
6657 }
6658
6659 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
6660 }
6661
6662 int
6663 aarch64_force_relocation (struct fix *fixp)
6664 {
6665 switch (fixp->fx_r_type)
6666 {
6667 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6668 /* Perform these "immediate" internal relocations
6669 even if the symbol is extern or weak. */
6670 return 0;
6671
6672 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6673 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6674 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6675 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6676 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6677 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6678 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6679 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6680 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6681 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6682 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6683 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6684 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6685 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6686 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6687 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6688 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6689 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6690 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6691 case BFD_RELOC_AARCH64_ADD_LO12:
6692 case BFD_RELOC_AARCH64_LDST8_LO12:
6693 case BFD_RELOC_AARCH64_LDST16_LO12:
6694 case BFD_RELOC_AARCH64_LDST32_LO12:
6695 case BFD_RELOC_AARCH64_LDST64_LO12:
6696 case BFD_RELOC_AARCH64_LDST128_LO12:
6697 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6698 /* Always leave these relocations for the linker. */
6699 return 1;
6700
6701 default:
6702 break;
6703 }
6704
6705 return generic_force_reloc (fixp);
6706 }
6707
6708 #ifdef OBJ_ELF
6709
6710 const char *
6711 elf64_aarch64_target_format (void)
6712 {
6713 if (target_big_endian)
6714 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
6715 else
6716 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
6717 }
6718
6719 void
6720 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
6721 {
6722 elf_frob_symbol (symp, puntp);
6723 }
6724 #endif
6725
6726 /* MD interface: Finalization. */
6727
6728 /* A good place to do this, although this was probably not intended
6729 for this kind of use. We need to dump the literal pool before
6730 references are made to a null symbol pointer. */
6731
6732 void
6733 aarch64_cleanup (void)
6734 {
6735 literal_pool *pool;
6736
6737 for (pool = list_of_pools; pool; pool = pool->next)
6738 {
6739 /* Put it at the end of the relevant section. */
6740 subseg_set (pool->section, pool->sub_section);
6741 s_ltorg (0);
6742 }
6743 }
6744
6745 #ifdef OBJ_ELF
6746 /* Remove any excess mapping symbols generated for alignment frags in
6747 SEC. We may have created a mapping symbol before a zero byte
6748 alignment; remove it if there's a mapping symbol after the
6749 alignment. */
6750 static void
6751 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
6752 void *dummy ATTRIBUTE_UNUSED)
6753 {
6754 segment_info_type *seginfo = seg_info (sec);
6755 fragS *fragp;
6756
6757 if (seginfo == NULL || seginfo->frchainP == NULL)
6758 return;
6759
6760 for (fragp = seginfo->frchainP->frch_root;
6761 fragp != NULL; fragp = fragp->fr_next)
6762 {
6763 symbolS *sym = fragp->tc_frag_data.last_map;
6764 fragS *next = fragp->fr_next;
6765
6766 /* Variable-sized frags have been converted to fixed size by
6767 this point. But if this was variable-sized to start with,
6768 there will be a fixed-size frag after it. So don't handle
6769 next == NULL. */
6770 if (sym == NULL || next == NULL)
6771 continue;
6772
6773 if (S_GET_VALUE (sym) < next->fr_address)
6774 /* Not at the end of this frag. */
6775 continue;
6776 know (S_GET_VALUE (sym) == next->fr_address);
6777
6778 do
6779 {
6780 if (next->tc_frag_data.first_map != NULL)
6781 {
6782 /* Next frag starts with a mapping symbol. Discard this
6783 one. */
6784 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
6785 break;
6786 }
6787
6788 if (next->fr_next == NULL)
6789 {
6790 /* This mapping symbol is at the end of the section. Discard
6791 it. */
6792 know (next->fr_fix == 0 && next->fr_var == 0);
6793 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
6794 break;
6795 }
6796
6797 /* As long as we have empty frags without any mapping symbols,
6798 keep looking. */
6799 /* If the next frag is non-empty and does not start with a
6800 mapping symbol, then this mapping symbol is required. */
6801 if (next->fr_address != next->fr_next->fr_address)
6802 break;
6803
6804 next = next->fr_next;
6805 }
6806 while (next != NULL);
6807 }
6808 }
6809 #endif
6810
6811 /* Adjust the symbol table. */
6812
6813 void
6814 aarch64_adjust_symtab (void)
6815 {
6816 #ifdef OBJ_ELF
6817 /* Remove any overlapping mapping symbols generated by alignment frags. */
6818 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
6819 /* Now do generic ELF adjustments. */
6820 elf_adjust_symtab ();
6821 #endif
6822 }
6823
6824 static void
6825 checked_hash_insert (struct hash_control *table, const char *key, void *value)
6826 {
6827 const char *hash_err;
6828
6829 hash_err = hash_insert (table, key, value);
6830 if (hash_err)
6831 printf ("Internal Error: Can't hash %s\n", key);
6832 }
6833
6834 static void
6835 fill_instruction_hash_table (void)
6836 {
6837 aarch64_opcode *opcode = aarch64_opcode_table;
6838
6839 while (opcode->name != NULL)
6840 {
6841 templates *templ, *new_templ;
6842 templ = hash_find (aarch64_ops_hsh, opcode->name);
6843
6844 new_templ = (templates *) xmalloc (sizeof (templates));
6845 new_templ->opcode = opcode;
6846 new_templ->next = NULL;
6847
6848 if (!templ)
6849 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
6850 else
6851 {
6852 new_templ->next = templ->next;
6853 templ->next = new_templ;
6854 }
6855 ++opcode;
6856 }
6857 }
6858
6859 static inline void
6860 convert_to_upper (char *dst, const char *src, size_t num)
6861 {
6862 unsigned int i;
6863 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
6864 *dst = TOUPPER (*src);
6865 *dst = '\0';
6866 }
6867
6868 /* Assume STR point to a lower-case string, allocate, convert and return
6869 the corresponding upper-case string. */
6870 static inline const char*
6871 get_upper_str (const char *str)
6872 {
6873 char *ret;
6874 size_t len = strlen (str);
6875 if ((ret = xmalloc (len + 1)) == NULL)
6876 abort ();
6877 convert_to_upper (ret, str, len);
6878 return ret;
6879 }
6880
6881 /* MD interface: Initialization. */
6882
6883 void
6884 md_begin (void)
6885 {
6886 unsigned mach;
6887 unsigned int i;
6888
6889 if ((aarch64_ops_hsh = hash_new ()) == NULL
6890 || (aarch64_cond_hsh = hash_new ()) == NULL
6891 || (aarch64_shift_hsh = hash_new ()) == NULL
6892 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
6893 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
6894 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
6895 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
6896 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
6897 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
6898 || (aarch64_reg_hsh = hash_new ()) == NULL
6899 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
6900 || (aarch64_nzcv_hsh = hash_new ()) == NULL
6901 || (aarch64_pldop_hsh = hash_new ()) == NULL)
6902 as_fatal (_("virtual memory exhausted"));
6903
6904 fill_instruction_hash_table ();
6905
6906 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
6907 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
6908 (void *) (aarch64_sys_regs + i));
6909
6910 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
6911 checked_hash_insert (aarch64_pstatefield_hsh,
6912 aarch64_pstatefields[i].name,
6913 (void *) (aarch64_pstatefields + i));
6914
6915 for (i = 0; aarch64_sys_regs_ic[i].template != NULL; i++)
6916 checked_hash_insert (aarch64_sys_regs_ic_hsh,
6917 aarch64_sys_regs_ic[i].template,
6918 (void *) (aarch64_sys_regs_ic + i));
6919
6920 for (i = 0; aarch64_sys_regs_dc[i].template != NULL; i++)
6921 checked_hash_insert (aarch64_sys_regs_dc_hsh,
6922 aarch64_sys_regs_dc[i].template,
6923 (void *) (aarch64_sys_regs_dc + i));
6924
6925 for (i = 0; aarch64_sys_regs_at[i].template != NULL; i++)
6926 checked_hash_insert (aarch64_sys_regs_at_hsh,
6927 aarch64_sys_regs_at[i].template,
6928 (void *) (aarch64_sys_regs_at + i));
6929
6930 for (i = 0; aarch64_sys_regs_tlbi[i].template != NULL; i++)
6931 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
6932 aarch64_sys_regs_tlbi[i].template,
6933 (void *) (aarch64_sys_regs_tlbi + i));
6934
6935 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
6936 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
6937 (void *) (reg_names + i));
6938
6939 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
6940 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
6941 (void *) (nzcv_names + i));
6942
6943 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
6944 {
6945 const char *name = aarch64_operand_modifiers[i].name;
6946 checked_hash_insert (aarch64_shift_hsh, name,
6947 (void *) (aarch64_operand_modifiers + i));
6948 /* Also hash the name in the upper case. */
6949 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
6950 (void *) (aarch64_operand_modifiers + i));
6951 }
6952
6953 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
6954 {
6955 unsigned int j;
6956 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
6957 the same condition code. */
6958 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
6959 {
6960 const char *name = aarch64_conds[i].names[j];
6961 if (name == NULL)
6962 break;
6963 checked_hash_insert (aarch64_cond_hsh, name,
6964 (void *) (aarch64_conds + i));
6965 /* Also hash the name in the upper case. */
6966 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
6967 (void *) (aarch64_conds + i));
6968 }
6969 }
6970
6971 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
6972 {
6973 const char *name = aarch64_barrier_options[i].name;
6974 /* Skip xx00 - the unallocated values of option. */
6975 if ((i & 0x3) == 0)
6976 continue;
6977 checked_hash_insert (aarch64_barrier_opt_hsh, name,
6978 (void *) (aarch64_barrier_options + i));
6979 /* Also hash the name in the upper case. */
6980 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
6981 (void *) (aarch64_barrier_options + i));
6982 }
6983
6984 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
6985 {
6986 const char* name = aarch64_prfops[i].name;
6987 /* Skip the unallocated hint encodings. */
6988 if (name == NULL)
6989 continue;
6990 checked_hash_insert (aarch64_pldop_hsh, name,
6991 (void *) (aarch64_prfops + i));
6992 /* Also hash the name in the upper case. */
6993 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
6994 (void *) (aarch64_prfops + i));
6995 }
6996
6997 /* Set the cpu variant based on the command-line options. */
6998 if (!mcpu_cpu_opt)
6999 mcpu_cpu_opt = march_cpu_opt;
7000
7001 if (!mcpu_cpu_opt)
7002 mcpu_cpu_opt = &cpu_default;
7003
7004 cpu_variant = *mcpu_cpu_opt;
7005
7006 /* Record the CPU type. */
7007 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
7008
7009 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
7010 }
7011
7012 /* Command line processing. */
7013
7014 const char *md_shortopts = "m:";
7015
7016 #ifdef AARCH64_BI_ENDIAN
7017 #define OPTION_EB (OPTION_MD_BASE + 0)
7018 #define OPTION_EL (OPTION_MD_BASE + 1)
7019 #else
7020 #if TARGET_BYTES_BIG_ENDIAN
7021 #define OPTION_EB (OPTION_MD_BASE + 0)
7022 #else
7023 #define OPTION_EL (OPTION_MD_BASE + 1)
7024 #endif
7025 #endif
7026
7027 struct option md_longopts[] = {
7028 #ifdef OPTION_EB
7029 {"EB", no_argument, NULL, OPTION_EB},
7030 #endif
7031 #ifdef OPTION_EL
7032 {"EL", no_argument, NULL, OPTION_EL},
7033 #endif
7034 {NULL, no_argument, NULL, 0}
7035 };
7036
7037 size_t md_longopts_size = sizeof (md_longopts);
7038
7039 struct aarch64_option_table
7040 {
7041 char *option; /* Option name to match. */
7042 char *help; /* Help information. */
7043 int *var; /* Variable to change. */
7044 int value; /* What to change it to. */
7045 char *deprecated; /* If non-null, print this message. */
7046 };
7047
7048 static struct aarch64_option_table aarch64_opts[] = {
7049 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
7050 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
7051 NULL},
7052 #ifdef OBJ_ELF
7053 {"mlp64", N_("select the LP64 model"), &ilp32_p, 0, NULL},
7054 {"milp32", N_("select the ILP32 model"), &ilp32_p, 1, NULL},
7055 #endif /* OBJ_ELF */
7056 #ifdef DEBUG_AARCH64
7057 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
7058 #endif /* DEBUG_AARCH64 */
7059 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
7060 NULL},
7061 {NULL, NULL, NULL, 0, NULL}
7062 };
7063
7064 struct aarch64_cpu_option_table
7065 {
7066 char *name;
7067 const aarch64_feature_set value;
7068 /* The canonical name of the CPU, or NULL to use NAME converted to upper
7069 case. */
7070 const char *canonical_name;
7071 };
7072
7073 /* This list should, at a minimum, contain all the cpu names
7074 recognized by GCC. */
7075 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
7076 {"all", AARCH64_ANY, NULL},
7077 {"cortex-a53", AARCH64_ARCH_V8, "Cortex-A53"},
7078 {"cortex-a57", AARCH64_ARCH_V8, "Cortex-A57"},
7079 {"generic", AARCH64_ARCH_V8, NULL},
7080
7081 /* These two are example CPUs supported in GCC, once we have real
7082 CPUs they will be removed. */
7083 {"example-1", AARCH64_ARCH_V8, NULL},
7084 {"example-2", AARCH64_ARCH_V8, NULL},
7085
7086 {NULL, AARCH64_ARCH_NONE, NULL}
7087 };
7088
7089 struct aarch64_arch_option_table
7090 {
7091 char *name;
7092 const aarch64_feature_set value;
7093 };
7094
7095 /* This list should, at a minimum, contain all the architecture names
7096 recognized by GCC. */
7097 static const struct aarch64_arch_option_table aarch64_archs[] = {
7098 {"all", AARCH64_ANY},
7099 {"armv8-a", AARCH64_ARCH_V8},
7100 {NULL, AARCH64_ARCH_NONE}
7101 };
7102
7103 /* ISA extensions. */
7104 struct aarch64_option_cpu_value_table
7105 {
7106 char *name;
7107 const aarch64_feature_set value;
7108 };
7109
7110 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
7111 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0)},
7112 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0)},
7113 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
7114 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
7115 {NULL, AARCH64_ARCH_NONE}
7116 };
7117
7118 struct aarch64_long_option_table
7119 {
7120 char *option; /* Substring to match. */
7121 char *help; /* Help information. */
7122 int (*func) (char *subopt); /* Function to decode sub-option. */
7123 char *deprecated; /* If non-null, print this message. */
7124 };
7125
7126 static int
7127 aarch64_parse_features (char *str, const aarch64_feature_set **opt_p)
7128 {
7129 /* We insist on extensions being added before being removed. We achieve
7130 this by using the ADDING_VALUE variable to indicate whether we are
7131 adding an extension (1) or removing it (0) and only allowing it to
7132 change in the order -1 -> 1 -> 0. */
7133 int adding_value = -1;
7134 aarch64_feature_set *ext_set = xmalloc (sizeof (aarch64_feature_set));
7135
7136 /* Copy the feature set, so that we can modify it. */
7137 *ext_set = **opt_p;
7138 *opt_p = ext_set;
7139
7140 while (str != NULL && *str != 0)
7141 {
7142 const struct aarch64_option_cpu_value_table *opt;
7143 char *ext;
7144 int optlen;
7145
7146 if (*str != '+')
7147 {
7148 as_bad (_("invalid architectural extension"));
7149 return 0;
7150 }
7151
7152 str++;
7153 ext = strchr (str, '+');
7154
7155 if (ext != NULL)
7156 optlen = ext - str;
7157 else
7158 optlen = strlen (str);
7159
7160 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
7161 {
7162 if (adding_value != 0)
7163 adding_value = 0;
7164 optlen -= 2;
7165 str += 2;
7166 }
7167 else if (optlen > 0)
7168 {
7169 if (adding_value == -1)
7170 adding_value = 1;
7171 else if (adding_value != 1)
7172 {
7173 as_bad (_("must specify extensions to add before specifying "
7174 "those to remove"));
7175 return FALSE;
7176 }
7177 }
7178
7179 if (optlen == 0)
7180 {
7181 as_bad (_("missing architectural extension"));
7182 return 0;
7183 }
7184
7185 gas_assert (adding_value != -1);
7186
7187 for (opt = aarch64_features; opt->name != NULL; opt++)
7188 if (strncmp (opt->name, str, optlen) == 0)
7189 {
7190 /* Add or remove the extension. */
7191 if (adding_value)
7192 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
7193 else
7194 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
7195 break;
7196 }
7197
7198 if (opt->name == NULL)
7199 {
7200 as_bad (_("unknown architectural extension `%s'"), str);
7201 return 0;
7202 }
7203
7204 str = ext;
7205 };
7206
7207 return 1;
7208 }
7209
7210 static int
7211 aarch64_parse_cpu (char *str)
7212 {
7213 const struct aarch64_cpu_option_table *opt;
7214 char *ext = strchr (str, '+');
7215 size_t optlen;
7216
7217 if (ext != NULL)
7218 optlen = ext - str;
7219 else
7220 optlen = strlen (str);
7221
7222 if (optlen == 0)
7223 {
7224 as_bad (_("missing cpu name `%s'"), str);
7225 return 0;
7226 }
7227
7228 for (opt = aarch64_cpus; opt->name != NULL; opt++)
7229 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7230 {
7231 mcpu_cpu_opt = &opt->value;
7232 if (ext != NULL)
7233 return aarch64_parse_features (ext, &mcpu_cpu_opt);
7234
7235 return 1;
7236 }
7237
7238 as_bad (_("unknown cpu `%s'"), str);
7239 return 0;
7240 }
7241
7242 static int
7243 aarch64_parse_arch (char *str)
7244 {
7245 const struct aarch64_arch_option_table *opt;
7246 char *ext = strchr (str, '+');
7247 size_t optlen;
7248
7249 if (ext != NULL)
7250 optlen = ext - str;
7251 else
7252 optlen = strlen (str);
7253
7254 if (optlen == 0)
7255 {
7256 as_bad (_("missing architecture name `%s'"), str);
7257 return 0;
7258 }
7259
7260 for (opt = aarch64_archs; opt->name != NULL; opt++)
7261 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7262 {
7263 march_cpu_opt = &opt->value;
7264 if (ext != NULL)
7265 return aarch64_parse_features (ext, &march_cpu_opt);
7266
7267 return 1;
7268 }
7269
7270 as_bad (_("unknown architecture `%s'\n"), str);
7271 return 0;
7272 }
7273
7274 static struct aarch64_long_option_table aarch64_long_opts[] = {
7275 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
7276 aarch64_parse_cpu, NULL},
7277 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
7278 aarch64_parse_arch, NULL},
7279 {NULL, NULL, 0, NULL}
7280 };
7281
7282 int
7283 md_parse_option (int c, char *arg)
7284 {
7285 struct aarch64_option_table *opt;
7286 struct aarch64_long_option_table *lopt;
7287
7288 switch (c)
7289 {
7290 #ifdef OPTION_EB
7291 case OPTION_EB:
7292 target_big_endian = 1;
7293 break;
7294 #endif
7295
7296 #ifdef OPTION_EL
7297 case OPTION_EL:
7298 target_big_endian = 0;
7299 break;
7300 #endif
7301
7302 case 'a':
7303 /* Listing option. Just ignore these, we don't support additional
7304 ones. */
7305 return 0;
7306
7307 default:
7308 for (opt = aarch64_opts; opt->option != NULL; opt++)
7309 {
7310 if (c == opt->option[0]
7311 && ((arg == NULL && opt->option[1] == 0)
7312 || streq (arg, opt->option + 1)))
7313 {
7314 /* If the option is deprecated, tell the user. */
7315 if (opt->deprecated != NULL)
7316 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
7317 arg ? arg : "", _(opt->deprecated));
7318
7319 if (opt->var != NULL)
7320 *opt->var = opt->value;
7321
7322 return 1;
7323 }
7324 }
7325
7326 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7327 {
7328 /* These options are expected to have an argument. */
7329 if (c == lopt->option[0]
7330 && arg != NULL
7331 && strncmp (arg, lopt->option + 1,
7332 strlen (lopt->option + 1)) == 0)
7333 {
7334 /* If the option is deprecated, tell the user. */
7335 if (lopt->deprecated != NULL)
7336 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
7337 _(lopt->deprecated));
7338
7339 /* Call the sup-option parser. */
7340 return lopt->func (arg + strlen (lopt->option) - 1);
7341 }
7342 }
7343
7344 return 0;
7345 }
7346
7347 return 1;
7348 }
7349
7350 void
7351 md_show_usage (FILE * fp)
7352 {
7353 struct aarch64_option_table *opt;
7354 struct aarch64_long_option_table *lopt;
7355
7356 fprintf (fp, _(" AArch64-specific assembler options:\n"));
7357
7358 for (opt = aarch64_opts; opt->option != NULL; opt++)
7359 if (opt->help != NULL)
7360 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
7361
7362 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7363 if (lopt->help != NULL)
7364 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
7365
7366 #ifdef OPTION_EB
7367 fprintf (fp, _("\
7368 -EB assemble code for a big-endian cpu\n"));
7369 #endif
7370
7371 #ifdef OPTION_EL
7372 fprintf (fp, _("\
7373 -EL assemble code for a little-endian cpu\n"));
7374 #endif
7375 }
7376
7377 /* Parse a .cpu directive. */
7378
7379 static void
7380 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
7381 {
7382 const struct aarch64_cpu_option_table *opt;
7383 char saved_char;
7384 char *name;
7385 char *ext;
7386 size_t optlen;
7387
7388 name = input_line_pointer;
7389 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7390 input_line_pointer++;
7391 saved_char = *input_line_pointer;
7392 *input_line_pointer = 0;
7393
7394 ext = strchr (name, '+');
7395
7396 if (ext != NULL)
7397 optlen = ext - name;
7398 else
7399 optlen = strlen (name);
7400
7401 /* Skip the first "all" entry. */
7402 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
7403 if (strlen (opt->name) == optlen
7404 && strncmp (name, opt->name, optlen) == 0)
7405 {
7406 mcpu_cpu_opt = &opt->value;
7407 if (ext != NULL)
7408 if (!aarch64_parse_features (ext, &mcpu_cpu_opt))
7409 return;
7410
7411 cpu_variant = *mcpu_cpu_opt;
7412
7413 *input_line_pointer = saved_char;
7414 demand_empty_rest_of_line ();
7415 return;
7416 }
7417 as_bad (_("unknown cpu `%s'"), name);
7418 *input_line_pointer = saved_char;
7419 ignore_rest_of_line ();
7420 }
7421
7422
7423 /* Parse a .arch directive. */
7424
7425 static void
7426 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
7427 {
7428 const struct aarch64_arch_option_table *opt;
7429 char saved_char;
7430 char *name;
7431 char *ext;
7432 size_t optlen;
7433
7434 name = input_line_pointer;
7435 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7436 input_line_pointer++;
7437 saved_char = *input_line_pointer;
7438 *input_line_pointer = 0;
7439
7440 ext = strchr (name, '+');
7441
7442 if (ext != NULL)
7443 optlen = ext - name;
7444 else
7445 optlen = strlen (name);
7446
7447 /* Skip the first "all" entry. */
7448 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
7449 if (strlen (opt->name) == optlen
7450 && strncmp (name, opt->name, optlen) == 0)
7451 {
7452 mcpu_cpu_opt = &opt->value;
7453 if (ext != NULL)
7454 if (!aarch64_parse_features (ext, &mcpu_cpu_opt))
7455 return;
7456
7457 cpu_variant = *mcpu_cpu_opt;
7458
7459 *input_line_pointer = saved_char;
7460 demand_empty_rest_of_line ();
7461 return;
7462 }
7463
7464 as_bad (_("unknown architecture `%s'\n"), name);
7465 *input_line_pointer = saved_char;
7466 ignore_rest_of_line ();
7467 }
7468
7469 /* Copy symbol information. */
7470
7471 void
7472 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
7473 {
7474 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
7475 }
This page took 0.213665 seconds and 5 git commands to generate.