gas/
[deliverable/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright 2009, 2010, 2011, 2012, 2013
4 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
6
7 This file is part of GAS.
8
9 GAS is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the license, or
12 (at your option) any later version.
13
14 GAS is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING3. If not,
21 see <http://www.gnu.org/licenses/>. */
22
23 #include "as.h"
24 #include <limits.h>
25 #include <stdarg.h>
26 #include "bfd_stdint.h"
27 #define NO_RELOC 0
28 #include "safe-ctype.h"
29 #include "subsegs.h"
30 #include "obstack.h"
31
32 #ifdef OBJ_ELF
33 #include "elf/aarch64.h"
34 #include "dw2gencfi.h"
35 #endif
36
37 #include "dwarf2dbg.h"
38
39 /* Types of processor to assemble for. */
40 #ifndef CPU_DEFAULT
41 #define CPU_DEFAULT AARCH64_ARCH_V8
42 #endif
43
44 #define streq(a, b) (strcmp (a, b) == 0)
45
46 static aarch64_feature_set cpu_variant;
47
48 /* Variables that we set while parsing command-line options. Once all
49 options have been read we re-process these values to set the real
50 assembly flags. */
51 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
52 static const aarch64_feature_set *march_cpu_opt = NULL;
53
54 /* Constants for known architecture features. */
55 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
56
57 static const aarch64_feature_set aarch64_arch_any = AARCH64_ANY;
58 static const aarch64_feature_set aarch64_arch_none = AARCH64_ARCH_NONE;
59
60 #ifdef OBJ_ELF
61 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
62 static symbolS *GOT_symbol;
63 #endif
64
65 enum neon_el_type
66 {
67 NT_invtype = -1,
68 NT_b,
69 NT_h,
70 NT_s,
71 NT_d,
72 NT_q
73 };
74
75 /* Bits for DEFINED field in neon_type_el. */
76 #define NTA_HASTYPE 1
77 #define NTA_HASINDEX 2
78
79 struct neon_type_el
80 {
81 enum neon_el_type type;
82 unsigned char defined;
83 unsigned width;
84 int64_t index;
85 };
86
87 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
88
89 struct reloc
90 {
91 bfd_reloc_code_real_type type;
92 expressionS exp;
93 int pc_rel;
94 enum aarch64_opnd opnd;
95 uint32_t flags;
96 unsigned need_libopcodes_p : 1;
97 };
98
99 struct aarch64_instruction
100 {
101 /* libopcodes structure for instruction intermediate representation. */
102 aarch64_inst base;
103 /* Record assembly errors found during the parsing. */
104 struct
105 {
106 enum aarch64_operand_error_kind kind;
107 const char *error;
108 } parsing_error;
109 /* The condition that appears in the assembly line. */
110 int cond;
111 /* Relocation information (including the GAS internal fixup). */
112 struct reloc reloc;
113 /* Need to generate an immediate in the literal pool. */
114 unsigned gen_lit_pool : 1;
115 };
116
117 typedef struct aarch64_instruction aarch64_instruction;
118
119 static aarch64_instruction inst;
120
121 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
122 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
123
124 /* Diagnostics inline function utilites.
125
126 These are lightweight utlities which should only be called by parse_operands
127 and other parsers. GAS processes each assembly line by parsing it against
128 instruction template(s), in the case of multiple templates (for the same
129 mnemonic name), those templates are tried one by one until one succeeds or
130 all fail. An assembly line may fail a few templates before being
131 successfully parsed; an error saved here in most cases is not a user error
132 but an error indicating the current template is not the right template.
133 Therefore it is very important that errors can be saved at a low cost during
134 the parsing; we don't want to slow down the whole parsing by recording
135 non-user errors in detail.
136
137 Remember that the objective is to help GAS pick up the most approapriate
138 error message in the case of multiple templates, e.g. FMOV which has 8
139 templates. */
140
141 static inline void
142 clear_error (void)
143 {
144 inst.parsing_error.kind = AARCH64_OPDE_NIL;
145 inst.parsing_error.error = NULL;
146 }
147
148 static inline bfd_boolean
149 error_p (void)
150 {
151 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
152 }
153
154 static inline const char *
155 get_error_message (void)
156 {
157 return inst.parsing_error.error;
158 }
159
160 static inline void
161 set_error_message (const char *error)
162 {
163 inst.parsing_error.error = error;
164 }
165
166 static inline enum aarch64_operand_error_kind
167 get_error_kind (void)
168 {
169 return inst.parsing_error.kind;
170 }
171
172 static inline void
173 set_error_kind (enum aarch64_operand_error_kind kind)
174 {
175 inst.parsing_error.kind = kind;
176 }
177
178 static inline void
179 set_error (enum aarch64_operand_error_kind kind, const char *error)
180 {
181 inst.parsing_error.kind = kind;
182 inst.parsing_error.error = error;
183 }
184
185 static inline void
186 set_recoverable_error (const char *error)
187 {
188 set_error (AARCH64_OPDE_RECOVERABLE, error);
189 }
190
191 /* Use the DESC field of the corresponding aarch64_operand entry to compose
192 the error message. */
193 static inline void
194 set_default_error (void)
195 {
196 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
197 }
198
199 static inline void
200 set_syntax_error (const char *error)
201 {
202 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
203 }
204
205 static inline void
206 set_first_syntax_error (const char *error)
207 {
208 if (! error_p ())
209 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
210 }
211
212 static inline void
213 set_fatal_syntax_error (const char *error)
214 {
215 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
216 }
217 \f
218 /* Number of littlenums required to hold an extended precision number. */
219 #define MAX_LITTLENUMS 6
220
221 /* Return value for certain parsers when the parsing fails; those parsers
222 return the information of the parsed result, e.g. register number, on
223 success. */
224 #define PARSE_FAIL -1
225
226 /* This is an invalid condition code that means no conditional field is
227 present. */
228 #define COND_ALWAYS 0x10
229
230 typedef struct
231 {
232 const char *template;
233 unsigned long value;
234 } asm_barrier_opt;
235
236 typedef struct
237 {
238 const char *template;
239 uint32_t value;
240 } asm_nzcv;
241
242 struct reloc_entry
243 {
244 char *name;
245 bfd_reloc_code_real_type reloc;
246 };
247
248 /* Structure for a hash table entry for a register. */
249 typedef struct
250 {
251 const char *name;
252 unsigned char number;
253 unsigned char type;
254 unsigned char builtin;
255 } reg_entry;
256
257 /* Macros to define the register types and masks for the purpose
258 of parsing. */
259
260 #undef AARCH64_REG_TYPES
261 #define AARCH64_REG_TYPES \
262 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
263 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
264 BASIC_REG_TYPE(SP_32) /* wsp */ \
265 BASIC_REG_TYPE(SP_64) /* sp */ \
266 BASIC_REG_TYPE(Z_32) /* wzr */ \
267 BASIC_REG_TYPE(Z_64) /* xzr */ \
268 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
269 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
270 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
271 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
272 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
273 BASIC_REG_TYPE(CN) /* c[0-7] */ \
274 BASIC_REG_TYPE(VN) /* v[0-31] */ \
275 /* Typecheck: any 64-bit int reg (inc SP exc XZR) */ \
276 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
277 /* Typecheck: any int (inc {W}SP inc [WX]ZR) */ \
278 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
279 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
280 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
281 /* Typecheck: any [BHSDQ]P FP. */ \
282 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
283 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
284 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR) */ \
285 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
286 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
287 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
288 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
289 /* Any integer register; used for error messages only. */ \
290 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
291 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
292 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
293 /* Pseudo type to mark the end of the enumerator sequence. */ \
294 BASIC_REG_TYPE(MAX)
295
296 #undef BASIC_REG_TYPE
297 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
298 #undef MULTI_REG_TYPE
299 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
300
301 /* Register type enumerators. */
302 typedef enum
303 {
304 /* A list of REG_TYPE_*. */
305 AARCH64_REG_TYPES
306 } aarch64_reg_type;
307
308 #undef BASIC_REG_TYPE
309 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
310 #undef REG_TYPE
311 #define REG_TYPE(T) (1 << REG_TYPE_##T)
312 #undef MULTI_REG_TYPE
313 #define MULTI_REG_TYPE(T,V) V,
314
315 /* Values indexed by aarch64_reg_type to assist the type checking. */
316 static const unsigned reg_type_masks[] =
317 {
318 AARCH64_REG_TYPES
319 };
320
321 #undef BASIC_REG_TYPE
322 #undef REG_TYPE
323 #undef MULTI_REG_TYPE
324 #undef AARCH64_REG_TYPES
325
326 /* Diagnostics used when we don't get a register of the expected type.
327 Note: this has to synchronized with aarch64_reg_type definitions
328 above. */
329 static const char *
330 get_reg_expected_msg (aarch64_reg_type reg_type)
331 {
332 const char *msg;
333
334 switch (reg_type)
335 {
336 case REG_TYPE_R_32:
337 msg = N_("integer 32-bit register expected");
338 break;
339 case REG_TYPE_R_64:
340 msg = N_("integer 64-bit register expected");
341 break;
342 case REG_TYPE_R_N:
343 msg = N_("integer register expected");
344 break;
345 case REG_TYPE_R_Z_SP:
346 msg = N_("integer, zero or SP register expected");
347 break;
348 case REG_TYPE_FP_B:
349 msg = N_("8-bit SIMD scalar register expected");
350 break;
351 case REG_TYPE_FP_H:
352 msg = N_("16-bit SIMD scalar or floating-point half precision "
353 "register expected");
354 break;
355 case REG_TYPE_FP_S:
356 msg = N_("32-bit SIMD scalar or floating-point single precision "
357 "register expected");
358 break;
359 case REG_TYPE_FP_D:
360 msg = N_("64-bit SIMD scalar or floating-point double precision "
361 "register expected");
362 break;
363 case REG_TYPE_FP_Q:
364 msg = N_("128-bit SIMD scalar or floating-point quad precision "
365 "register expected");
366 break;
367 case REG_TYPE_CN:
368 msg = N_("C0 - C15 expected");
369 break;
370 case REG_TYPE_R_Z_BHSDQ_V:
371 msg = N_("register expected");
372 break;
373 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
374 msg = N_("SIMD scalar or floating-point register expected");
375 break;
376 case REG_TYPE_VN: /* any V reg */
377 msg = N_("vector register expected");
378 break;
379 default:
380 as_fatal (_("invalid register type %d"), reg_type);
381 }
382 return msg;
383 }
384
385 /* Some well known registers that we refer to directly elsewhere. */
386 #define REG_SP 31
387
388 /* Instructions take 4 bytes in the object file. */
389 #define INSN_SIZE 4
390
391 /* Define some common error messages. */
392 #define BAD_SP _("SP not allowed here")
393
394 static struct hash_control *aarch64_ops_hsh;
395 static struct hash_control *aarch64_cond_hsh;
396 static struct hash_control *aarch64_shift_hsh;
397 static struct hash_control *aarch64_sys_regs_hsh;
398 static struct hash_control *aarch64_pstatefield_hsh;
399 static struct hash_control *aarch64_sys_regs_ic_hsh;
400 static struct hash_control *aarch64_sys_regs_dc_hsh;
401 static struct hash_control *aarch64_sys_regs_at_hsh;
402 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
403 static struct hash_control *aarch64_reg_hsh;
404 static struct hash_control *aarch64_barrier_opt_hsh;
405 static struct hash_control *aarch64_nzcv_hsh;
406 static struct hash_control *aarch64_pldop_hsh;
407
408 /* Stuff needed to resolve the label ambiguity
409 As:
410 ...
411 label: <insn>
412 may differ from:
413 ...
414 label:
415 <insn> */
416
417 static symbolS *last_label_seen;
418
419 /* Literal pool structure. Held on a per-section
420 and per-sub-section basis. */
421
422 #define MAX_LITERAL_POOL_SIZE 1024
423 typedef struct literal_pool
424 {
425 expressionS literals[MAX_LITERAL_POOL_SIZE];
426 unsigned int next_free_entry;
427 unsigned int id;
428 symbolS *symbol;
429 segT section;
430 subsegT sub_section;
431 int size;
432 struct literal_pool *next;
433 } literal_pool;
434
435 /* Pointer to a linked list of literal pools. */
436 static literal_pool *list_of_pools = NULL;
437 \f
438 /* Pure syntax. */
439
440 /* This array holds the chars that always start a comment. If the
441 pre-processor is disabled, these aren't very useful. */
442 const char comment_chars[] = "";
443
444 /* This array holds the chars that only start a comment at the beginning of
445 a line. If the line seems to have the form '# 123 filename'
446 .line and .file directives will appear in the pre-processed output. */
447 /* Note that input_file.c hand checks for '#' at the beginning of the
448 first line of the input file. This is because the compiler outputs
449 #NO_APP at the beginning of its output. */
450 /* Also note that comments like this one will always work. */
451 const char line_comment_chars[] = "#";
452
453 const char line_separator_chars[] = ";";
454
455 /* Chars that can be used to separate mant
456 from exp in floating point numbers. */
457 const char EXP_CHARS[] = "eE";
458
459 /* Chars that mean this number is a floating point constant. */
460 /* As in 0f12.456 */
461 /* or 0d1.2345e12 */
462
463 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
464
465 /* Prefix character that indicates the start of an immediate value. */
466 #define is_immediate_prefix(C) ((C) == '#')
467
468 /* Separator character handling. */
469
470 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
471
472 static inline bfd_boolean
473 skip_past_char (char **str, char c)
474 {
475 if (**str == c)
476 {
477 (*str)++;
478 return TRUE;
479 }
480 else
481 return FALSE;
482 }
483
484 #define skip_past_comma(str) skip_past_char (str, ',')
485
486 /* Arithmetic expressions (possibly involving symbols). */
487
488 static bfd_boolean in_my_get_expression_p = FALSE;
489
490 /* Third argument to my_get_expression. */
491 #define GE_NO_PREFIX 0
492 #define GE_OPT_PREFIX 1
493
494 /* Return TRUE if the string pointed by *STR is successfully parsed
495 as an valid expression; *EP will be filled with the information of
496 such an expression. Otherwise return FALSE. */
497
498 static bfd_boolean
499 my_get_expression (expressionS * ep, char **str, int prefix_mode,
500 int reject_absent)
501 {
502 char *save_in;
503 segT seg;
504 int prefix_present_p = 0;
505
506 switch (prefix_mode)
507 {
508 case GE_NO_PREFIX:
509 break;
510 case GE_OPT_PREFIX:
511 if (is_immediate_prefix (**str))
512 {
513 (*str)++;
514 prefix_present_p = 1;
515 }
516 break;
517 default:
518 abort ();
519 }
520
521 memset (ep, 0, sizeof (expressionS));
522
523 save_in = input_line_pointer;
524 input_line_pointer = *str;
525 in_my_get_expression_p = TRUE;
526 seg = expression (ep);
527 in_my_get_expression_p = FALSE;
528
529 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
530 {
531 /* We found a bad expression in md_operand(). */
532 *str = input_line_pointer;
533 input_line_pointer = save_in;
534 if (prefix_present_p && ! error_p ())
535 set_fatal_syntax_error (_("bad expression"));
536 else
537 set_first_syntax_error (_("bad expression"));
538 return FALSE;
539 }
540
541 #ifdef OBJ_AOUT
542 if (seg != absolute_section
543 && seg != text_section
544 && seg != data_section
545 && seg != bss_section && seg != undefined_section)
546 {
547 set_syntax_error (_("bad segment"));
548 *str = input_line_pointer;
549 input_line_pointer = save_in;
550 return FALSE;
551 }
552 #else
553 (void) seg;
554 #endif
555
556 *str = input_line_pointer;
557 input_line_pointer = save_in;
558 return TRUE;
559 }
560
561 /* Turn a string in input_line_pointer into a floating point constant
562 of type TYPE, and store the appropriate bytes in *LITP. The number
563 of LITTLENUMS emitted is stored in *SIZEP. An error message is
564 returned, or NULL on OK. */
565
566 char *
567 md_atof (int type, char *litP, int *sizeP)
568 {
569 return ieee_md_atof (type, litP, sizeP, target_big_endian);
570 }
571
572 /* We handle all bad expressions here, so that we can report the faulty
573 instruction in the error message. */
574 void
575 md_operand (expressionS * exp)
576 {
577 if (in_my_get_expression_p)
578 exp->X_op = O_illegal;
579 }
580
581 /* Immediate values. */
582
583 /* Errors may be set multiple times during parsing or bit encoding
584 (particularly in the Neon bits), but usually the earliest error which is set
585 will be the most meaningful. Avoid overwriting it with later (cascading)
586 errors by calling this function. */
587
588 static void
589 first_error (const char *error)
590 {
591 if (! error_p ())
592 set_syntax_error (error);
593 }
594
595 /* Similiar to first_error, but this function accepts formatted error
596 message. */
597 static void
598 first_error_fmt (const char *format, ...)
599 {
600 va_list args;
601 enum
602 { size = 100 };
603 /* N.B. this single buffer will not cause error messages for different
604 instructions to pollute each other; this is because at the end of
605 processing of each assembly line, error message if any will be
606 collected by as_bad. */
607 static char buffer[size];
608
609 if (! error_p ())
610 {
611 int ret ATTRIBUTE_UNUSED;
612 va_start (args, format);
613 ret = vsnprintf (buffer, size, format, args);
614 know (ret <= size - 1 && ret >= 0);
615 va_end (args);
616 set_syntax_error (buffer);
617 }
618 }
619
620 /* Register parsing. */
621
622 /* Generic register parser which is called by other specialized
623 register parsers.
624 CCP points to what should be the beginning of a register name.
625 If it is indeed a valid register name, advance CCP over it and
626 return the reg_entry structure; otherwise return NULL.
627 It does not issue diagnostics. */
628
629 static reg_entry *
630 parse_reg (char **ccp)
631 {
632 char *start = *ccp;
633 char *p;
634 reg_entry *reg;
635
636 #ifdef REGISTER_PREFIX
637 if (*start != REGISTER_PREFIX)
638 return NULL;
639 start++;
640 #endif
641
642 p = start;
643 if (!ISALPHA (*p) || !is_name_beginner (*p))
644 return NULL;
645
646 do
647 p++;
648 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
649
650 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
651
652 if (!reg)
653 return NULL;
654
655 *ccp = p;
656 return reg;
657 }
658
659 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
660 return FALSE. */
661 static bfd_boolean
662 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
663 {
664 if (reg->type == type)
665 return TRUE;
666
667 switch (type)
668 {
669 case REG_TYPE_R64_SP: /* 64-bit integer reg (inc SP exc XZR). */
670 case REG_TYPE_R_Z_SP: /* Integer reg (inc {X}SP inc [WX]ZR). */
671 case REG_TYPE_R_Z_BHSDQ_V: /* Any register apart from Cn. */
672 case REG_TYPE_BHSDQ: /* Any [BHSDQ]P FP or SIMD scalar register. */
673 case REG_TYPE_VN: /* Vector register. */
674 gas_assert (reg->type < REG_TYPE_MAX && type < REG_TYPE_MAX);
675 return ((reg_type_masks[reg->type] & reg_type_masks[type])
676 == reg_type_masks[reg->type]);
677 default:
678 as_fatal ("unhandled type %d", type);
679 abort ();
680 }
681 }
682
683 /* Parse a register and return PARSE_FAIL if the register is not of type R_Z_SP.
684 Return the register number otherwise. *ISREG32 is set to one if the
685 register is 32-bit wide; *ISREGZERO is set to one if the register is
686 of type Z_32 or Z_64.
687 Note that this function does not issue any diagnostics. */
688
689 static int
690 aarch64_reg_parse_32_64 (char **ccp, int reject_sp, int reject_rz,
691 int *isreg32, int *isregzero)
692 {
693 char *str = *ccp;
694 const reg_entry *reg = parse_reg (&str);
695
696 if (reg == NULL)
697 return PARSE_FAIL;
698
699 if (! aarch64_check_reg_type (reg, REG_TYPE_R_Z_SP))
700 return PARSE_FAIL;
701
702 switch (reg->type)
703 {
704 case REG_TYPE_SP_32:
705 case REG_TYPE_SP_64:
706 if (reject_sp)
707 return PARSE_FAIL;
708 *isreg32 = reg->type == REG_TYPE_SP_32;
709 *isregzero = 0;
710 break;
711 case REG_TYPE_R_32:
712 case REG_TYPE_R_64:
713 *isreg32 = reg->type == REG_TYPE_R_32;
714 *isregzero = 0;
715 break;
716 case REG_TYPE_Z_32:
717 case REG_TYPE_Z_64:
718 if (reject_rz)
719 return PARSE_FAIL;
720 *isreg32 = reg->type == REG_TYPE_Z_32;
721 *isregzero = 1;
722 break;
723 default:
724 return PARSE_FAIL;
725 }
726
727 *ccp = str;
728
729 return reg->number;
730 }
731
732 /* Parse the qualifier of a SIMD vector register or a SIMD vector element.
733 Fill in *PARSED_TYPE and return TRUE if the parsing succeeds;
734 otherwise return FALSE.
735
736 Accept only one occurrence of:
737 8b 16b 4h 8h 2s 4s 1d 2d
738 b h s d q */
739 static bfd_boolean
740 parse_neon_type_for_operand (struct neon_type_el *parsed_type, char **str)
741 {
742 char *ptr = *str;
743 unsigned width;
744 unsigned element_size;
745 enum neon_el_type type;
746
747 /* skip '.' */
748 ptr++;
749
750 if (!ISDIGIT (*ptr))
751 {
752 width = 0;
753 goto elt_size;
754 }
755 width = strtoul (ptr, &ptr, 10);
756 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
757 {
758 first_error_fmt (_("bad size %d in vector width specifier"), width);
759 return FALSE;
760 }
761
762 elt_size:
763 switch (TOLOWER (*ptr))
764 {
765 case 'b':
766 type = NT_b;
767 element_size = 8;
768 break;
769 case 'h':
770 type = NT_h;
771 element_size = 16;
772 break;
773 case 's':
774 type = NT_s;
775 element_size = 32;
776 break;
777 case 'd':
778 type = NT_d;
779 element_size = 64;
780 break;
781 case 'q':
782 if (width == 1)
783 {
784 type = NT_q;
785 element_size = 128;
786 break;
787 }
788 /* fall through. */
789 default:
790 if (*ptr != '\0')
791 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
792 else
793 first_error (_("missing element size"));
794 return FALSE;
795 }
796 if (width != 0 && width * element_size != 64 && width * element_size != 128)
797 {
798 first_error_fmt (_
799 ("invalid element size %d and vector size combination %c"),
800 width, *ptr);
801 return FALSE;
802 }
803 ptr++;
804
805 parsed_type->type = type;
806 parsed_type->width = width;
807
808 *str = ptr;
809
810 return TRUE;
811 }
812
813 /* Parse a single type, e.g. ".8b", leading period included.
814 Only applicable to Vn registers.
815
816 Return TRUE on success; otherwise return FALSE. */
817 static bfd_boolean
818 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
819 {
820 char *str = *ccp;
821
822 if (*str == '.')
823 {
824 if (! parse_neon_type_for_operand (vectype, &str))
825 {
826 first_error (_("vector type expected"));
827 return FALSE;
828 }
829 }
830 else
831 return FALSE;
832
833 *ccp = str;
834
835 return TRUE;
836 }
837
838 /* Parse a register of the type TYPE.
839
840 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
841 name or the parsed register is not of TYPE.
842
843 Otherwise return the register number, and optionally fill in the actual
844 type of the register in *RTYPE when multiple alternatives were given, and
845 return the register shape and element index information in *TYPEINFO.
846
847 IN_REG_LIST should be set with TRUE if the caller is parsing a register
848 list. */
849
850 static int
851 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
852 struct neon_type_el *typeinfo, bfd_boolean in_reg_list)
853 {
854 char *str = *ccp;
855 const reg_entry *reg = parse_reg (&str);
856 struct neon_type_el atype;
857 struct neon_type_el parsetype;
858 bfd_boolean is_typed_vecreg = FALSE;
859
860 atype.defined = 0;
861 atype.type = NT_invtype;
862 atype.width = -1;
863 atype.index = 0;
864
865 if (reg == NULL)
866 {
867 if (typeinfo)
868 *typeinfo = atype;
869 set_default_error ();
870 return PARSE_FAIL;
871 }
872
873 if (! aarch64_check_reg_type (reg, type))
874 {
875 DEBUG_TRACE ("reg type check failed");
876 set_default_error ();
877 return PARSE_FAIL;
878 }
879 type = reg->type;
880
881 if (type == REG_TYPE_VN
882 && parse_neon_operand_type (&parsetype, &str))
883 {
884 /* Register if of the form Vn.[bhsdq]. */
885 is_typed_vecreg = TRUE;
886
887 if (parsetype.width == 0)
888 /* Expect index. In the new scheme we cannot have
889 Vn.[bhsdq] represent a scalar. Therefore any
890 Vn.[bhsdq] should have an index following it.
891 Except in reglists ofcourse. */
892 atype.defined |= NTA_HASINDEX;
893 else
894 atype.defined |= NTA_HASTYPE;
895
896 atype.type = parsetype.type;
897 atype.width = parsetype.width;
898 }
899
900 if (skip_past_char (&str, '['))
901 {
902 expressionS exp;
903
904 /* Reject Sn[index] syntax. */
905 if (!is_typed_vecreg)
906 {
907 first_error (_("this type of register can't be indexed"));
908 return PARSE_FAIL;
909 }
910
911 if (in_reg_list == TRUE)
912 {
913 first_error (_("index not allowed inside register list"));
914 return PARSE_FAIL;
915 }
916
917 atype.defined |= NTA_HASINDEX;
918
919 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
920
921 if (exp.X_op != O_constant)
922 {
923 first_error (_("constant expression required"));
924 return PARSE_FAIL;
925 }
926
927 if (! skip_past_char (&str, ']'))
928 return PARSE_FAIL;
929
930 atype.index = exp.X_add_number;
931 }
932 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
933 {
934 /* Indexed vector register expected. */
935 first_error (_("indexed vector register expected"));
936 return PARSE_FAIL;
937 }
938
939 /* A vector reg Vn should be typed or indexed. */
940 if (type == REG_TYPE_VN && atype.defined == 0)
941 {
942 first_error (_("invalid use of vector register"));
943 }
944
945 if (typeinfo)
946 *typeinfo = atype;
947
948 if (rtype)
949 *rtype = type;
950
951 *ccp = str;
952
953 return reg->number;
954 }
955
956 /* Parse register.
957
958 Return the register number on success; return PARSE_FAIL otherwise.
959
960 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
961 the register (e.g. NEON double or quad reg when either has been requested).
962
963 If this is a NEON vector register with additional type information, fill
964 in the struct pointed to by VECTYPE (if non-NULL).
965
966 This parser does not handle register list. */
967
968 static int
969 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
970 aarch64_reg_type *rtype, struct neon_type_el *vectype)
971 {
972 struct neon_type_el atype;
973 char *str = *ccp;
974 int reg = parse_typed_reg (&str, type, rtype, &atype,
975 /*in_reg_list= */ FALSE);
976
977 if (reg == PARSE_FAIL)
978 return PARSE_FAIL;
979
980 if (vectype)
981 *vectype = atype;
982
983 *ccp = str;
984
985 return reg;
986 }
987
988 static inline bfd_boolean
989 eq_neon_type_el (struct neon_type_el e1, struct neon_type_el e2)
990 {
991 return
992 e1.type == e2.type
993 && e1.defined == e2.defined
994 && e1.width == e2.width && e1.index == e2.index;
995 }
996
997 /* This function parses the NEON register list. On success, it returns
998 the parsed register list information in the following encoded format:
999
1000 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1001 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1002
1003 The information of the register shape and/or index is returned in
1004 *VECTYPE.
1005
1006 It returns PARSE_FAIL if the register list is invalid.
1007
1008 The list contains one to four registers.
1009 Each register can be one of:
1010 <Vt>.<T>[<index>]
1011 <Vt>.<T>
1012 All <T> should be identical.
1013 All <index> should be identical.
1014 There are restrictions on <Vt> numbers which are checked later
1015 (by reg_list_valid_p). */
1016
1017 static int
1018 parse_neon_reg_list (char **ccp, struct neon_type_el *vectype)
1019 {
1020 char *str = *ccp;
1021 int nb_regs;
1022 struct neon_type_el typeinfo, typeinfo_first;
1023 int val, val_range;
1024 int in_range;
1025 int ret_val;
1026 int i;
1027 bfd_boolean error = FALSE;
1028 bfd_boolean expect_index = FALSE;
1029
1030 if (*str != '{')
1031 {
1032 set_syntax_error (_("expecting {"));
1033 return PARSE_FAIL;
1034 }
1035 str++;
1036
1037 nb_regs = 0;
1038 typeinfo_first.defined = 0;
1039 typeinfo_first.type = NT_invtype;
1040 typeinfo_first.width = -1;
1041 typeinfo_first.index = 0;
1042 ret_val = 0;
1043 val = -1;
1044 val_range = -1;
1045 in_range = 0;
1046 do
1047 {
1048 if (in_range)
1049 {
1050 str++; /* skip over '-' */
1051 val_range = val;
1052 }
1053 val = parse_typed_reg (&str, REG_TYPE_VN, NULL, &typeinfo,
1054 /*in_reg_list= */ TRUE);
1055 if (val == PARSE_FAIL)
1056 {
1057 set_first_syntax_error (_("invalid vector register in list"));
1058 error = TRUE;
1059 continue;
1060 }
1061 /* reject [bhsd]n */
1062 if (typeinfo.defined == 0)
1063 {
1064 set_first_syntax_error (_("invalid scalar register in list"));
1065 error = TRUE;
1066 continue;
1067 }
1068
1069 if (typeinfo.defined & NTA_HASINDEX)
1070 expect_index = TRUE;
1071
1072 if (in_range)
1073 {
1074 if (val < val_range)
1075 {
1076 set_first_syntax_error
1077 (_("invalid range in vector register list"));
1078 error = TRUE;
1079 }
1080 val_range++;
1081 }
1082 else
1083 {
1084 val_range = val;
1085 if (nb_regs == 0)
1086 typeinfo_first = typeinfo;
1087 else if (! eq_neon_type_el (typeinfo_first, typeinfo))
1088 {
1089 set_first_syntax_error
1090 (_("type mismatch in vector register list"));
1091 error = TRUE;
1092 }
1093 }
1094 if (! error)
1095 for (i = val_range; i <= val; i++)
1096 {
1097 ret_val |= i << (5 * nb_regs);
1098 nb_regs++;
1099 }
1100 in_range = 0;
1101 }
1102 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1103
1104 skip_whitespace (str);
1105 if (*str != '}')
1106 {
1107 set_first_syntax_error (_("end of vector register list not found"));
1108 error = TRUE;
1109 }
1110 str++;
1111
1112 skip_whitespace (str);
1113
1114 if (expect_index)
1115 {
1116 if (skip_past_char (&str, '['))
1117 {
1118 expressionS exp;
1119
1120 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1121 if (exp.X_op != O_constant)
1122 {
1123 set_first_syntax_error (_("constant expression required."));
1124 error = TRUE;
1125 }
1126 if (! skip_past_char (&str, ']'))
1127 error = TRUE;
1128 else
1129 typeinfo_first.index = exp.X_add_number;
1130 }
1131 else
1132 {
1133 set_first_syntax_error (_("expected index"));
1134 error = TRUE;
1135 }
1136 }
1137
1138 if (nb_regs > 4)
1139 {
1140 set_first_syntax_error (_("too many registers in vector register list"));
1141 error = TRUE;
1142 }
1143 else if (nb_regs == 0)
1144 {
1145 set_first_syntax_error (_("empty vector register list"));
1146 error = TRUE;
1147 }
1148
1149 *ccp = str;
1150 if (! error)
1151 *vectype = typeinfo_first;
1152
1153 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1154 }
1155
1156 /* Directives: register aliases. */
1157
1158 static reg_entry *
1159 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1160 {
1161 reg_entry *new;
1162 const char *name;
1163
1164 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1165 {
1166 if (new->builtin)
1167 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1168 str);
1169
1170 /* Only warn about a redefinition if it's not defined as the
1171 same register. */
1172 else if (new->number != number || new->type != type)
1173 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1174
1175 return NULL;
1176 }
1177
1178 name = xstrdup (str);
1179 new = xmalloc (sizeof (reg_entry));
1180
1181 new->name = name;
1182 new->number = number;
1183 new->type = type;
1184 new->builtin = FALSE;
1185
1186 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1187 abort ();
1188
1189 return new;
1190 }
1191
1192 /* Look for the .req directive. This is of the form:
1193
1194 new_register_name .req existing_register_name
1195
1196 If we find one, or if it looks sufficiently like one that we want to
1197 handle any error here, return TRUE. Otherwise return FALSE. */
1198
1199 static bfd_boolean
1200 create_register_alias (char *newname, char *p)
1201 {
1202 const reg_entry *old;
1203 char *oldname, *nbuf;
1204 size_t nlen;
1205
1206 /* The input scrubber ensures that whitespace after the mnemonic is
1207 collapsed to single spaces. */
1208 oldname = p;
1209 if (strncmp (oldname, " .req ", 6) != 0)
1210 return FALSE;
1211
1212 oldname += 6;
1213 if (*oldname == '\0')
1214 return FALSE;
1215
1216 old = hash_find (aarch64_reg_hsh, oldname);
1217 if (!old)
1218 {
1219 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1220 return TRUE;
1221 }
1222
1223 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1224 the desired alias name, and p points to its end. If not, then
1225 the desired alias name is in the global original_case_string. */
1226 #ifdef TC_CASE_SENSITIVE
1227 nlen = p - newname;
1228 #else
1229 newname = original_case_string;
1230 nlen = strlen (newname);
1231 #endif
1232
1233 nbuf = alloca (nlen + 1);
1234 memcpy (nbuf, newname, nlen);
1235 nbuf[nlen] = '\0';
1236
1237 /* Create aliases under the new name as stated; an all-lowercase
1238 version of the new name; and an all-uppercase version of the new
1239 name. */
1240 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1241 {
1242 for (p = nbuf; *p; p++)
1243 *p = TOUPPER (*p);
1244
1245 if (strncmp (nbuf, newname, nlen))
1246 {
1247 /* If this attempt to create an additional alias fails, do not bother
1248 trying to create the all-lower case alias. We will fail and issue
1249 a second, duplicate error message. This situation arises when the
1250 programmer does something like:
1251 foo .req r0
1252 Foo .req r1
1253 The second .req creates the "Foo" alias but then fails to create
1254 the artificial FOO alias because it has already been created by the
1255 first .req. */
1256 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1257 return TRUE;
1258 }
1259
1260 for (p = nbuf; *p; p++)
1261 *p = TOLOWER (*p);
1262
1263 if (strncmp (nbuf, newname, nlen))
1264 insert_reg_alias (nbuf, old->number, old->type);
1265 }
1266
1267 return TRUE;
1268 }
1269
1270 /* Should never be called, as .req goes between the alias and the
1271 register name, not at the beginning of the line. */
1272 static void
1273 s_req (int a ATTRIBUTE_UNUSED)
1274 {
1275 as_bad (_("invalid syntax for .req directive"));
1276 }
1277
1278 /* The .unreq directive deletes an alias which was previously defined
1279 by .req. For example:
1280
1281 my_alias .req r11
1282 .unreq my_alias */
1283
1284 static void
1285 s_unreq (int a ATTRIBUTE_UNUSED)
1286 {
1287 char *name;
1288 char saved_char;
1289
1290 name = input_line_pointer;
1291
1292 while (*input_line_pointer != 0
1293 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1294 ++input_line_pointer;
1295
1296 saved_char = *input_line_pointer;
1297 *input_line_pointer = 0;
1298
1299 if (!*name)
1300 as_bad (_("invalid syntax for .unreq directive"));
1301 else
1302 {
1303 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1304
1305 if (!reg)
1306 as_bad (_("unknown register alias '%s'"), name);
1307 else if (reg->builtin)
1308 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1309 name);
1310 else
1311 {
1312 char *p;
1313 char *nbuf;
1314
1315 hash_delete (aarch64_reg_hsh, name, FALSE);
1316 free ((char *) reg->name);
1317 free (reg);
1318
1319 /* Also locate the all upper case and all lower case versions.
1320 Do not complain if we cannot find one or the other as it
1321 was probably deleted above. */
1322
1323 nbuf = strdup (name);
1324 for (p = nbuf; *p; p++)
1325 *p = TOUPPER (*p);
1326 reg = hash_find (aarch64_reg_hsh, nbuf);
1327 if (reg)
1328 {
1329 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1330 free ((char *) reg->name);
1331 free (reg);
1332 }
1333
1334 for (p = nbuf; *p; p++)
1335 *p = TOLOWER (*p);
1336 reg = hash_find (aarch64_reg_hsh, nbuf);
1337 if (reg)
1338 {
1339 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1340 free ((char *) reg->name);
1341 free (reg);
1342 }
1343
1344 free (nbuf);
1345 }
1346 }
1347
1348 *input_line_pointer = saved_char;
1349 demand_empty_rest_of_line ();
1350 }
1351
1352 /* Directives: Instruction set selection. */
1353
1354 #ifdef OBJ_ELF
1355 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1356 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1357 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1358 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1359
1360 /* Create a new mapping symbol for the transition to STATE. */
1361
1362 static void
1363 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1364 {
1365 symbolS *symbolP;
1366 const char *symname;
1367 int type;
1368
1369 switch (state)
1370 {
1371 case MAP_DATA:
1372 symname = "$d";
1373 type = BSF_NO_FLAGS;
1374 break;
1375 case MAP_INSN:
1376 symname = "$x";
1377 type = BSF_NO_FLAGS;
1378 break;
1379 default:
1380 abort ();
1381 }
1382
1383 symbolP = symbol_new (symname, now_seg, value, frag);
1384 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1385
1386 /* Save the mapping symbols for future reference. Also check that
1387 we do not place two mapping symbols at the same offset within a
1388 frag. We'll handle overlap between frags in
1389 check_mapping_symbols.
1390
1391 If .fill or other data filling directive generates zero sized data,
1392 the mapping symbol for the following code will have the same value
1393 as the one generated for the data filling directive. In this case,
1394 we replace the old symbol with the new one at the same address. */
1395 if (value == 0)
1396 {
1397 if (frag->tc_frag_data.first_map != NULL)
1398 {
1399 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1400 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1401 &symbol_lastP);
1402 }
1403 frag->tc_frag_data.first_map = symbolP;
1404 }
1405 if (frag->tc_frag_data.last_map != NULL)
1406 {
1407 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1408 S_GET_VALUE (symbolP));
1409 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1410 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1411 &symbol_lastP);
1412 }
1413 frag->tc_frag_data.last_map = symbolP;
1414 }
1415
1416 /* We must sometimes convert a region marked as code to data during
1417 code alignment, if an odd number of bytes have to be padded. The
1418 code mapping symbol is pushed to an aligned address. */
1419
1420 static void
1421 insert_data_mapping_symbol (enum mstate state,
1422 valueT value, fragS * frag, offsetT bytes)
1423 {
1424 /* If there was already a mapping symbol, remove it. */
1425 if (frag->tc_frag_data.last_map != NULL
1426 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1427 frag->fr_address + value)
1428 {
1429 symbolS *symp = frag->tc_frag_data.last_map;
1430
1431 if (value == 0)
1432 {
1433 know (frag->tc_frag_data.first_map == symp);
1434 frag->tc_frag_data.first_map = NULL;
1435 }
1436 frag->tc_frag_data.last_map = NULL;
1437 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1438 }
1439
1440 make_mapping_symbol (MAP_DATA, value, frag);
1441 make_mapping_symbol (state, value + bytes, frag);
1442 }
1443
1444 static void mapping_state_2 (enum mstate state, int max_chars);
1445
1446 /* Set the mapping state to STATE. Only call this when about to
1447 emit some STATE bytes to the file. */
1448
1449 void
1450 mapping_state (enum mstate state)
1451 {
1452 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1453
1454 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1455
1456 if (mapstate == state)
1457 /* The mapping symbol has already been emitted.
1458 There is nothing else to do. */
1459 return;
1460 else if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
1461 /* This case will be evaluated later in the next else. */
1462 return;
1463 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1464 {
1465 /* Only add the symbol if the offset is > 0:
1466 if we're at the first frag, check it's size > 0;
1467 if we're not at the first frag, then for sure
1468 the offset is > 0. */
1469 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1470 const int add_symbol = (frag_now != frag_first)
1471 || (frag_now_fix () > 0);
1472
1473 if (add_symbol)
1474 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1475 }
1476
1477 mapping_state_2 (state, 0);
1478 #undef TRANSITION
1479 }
1480
1481 /* Same as mapping_state, but MAX_CHARS bytes have already been
1482 allocated. Put the mapping symbol that far back. */
1483
1484 static void
1485 mapping_state_2 (enum mstate state, int max_chars)
1486 {
1487 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1488
1489 if (!SEG_NORMAL (now_seg))
1490 return;
1491
1492 if (mapstate == state)
1493 /* The mapping symbol has already been emitted.
1494 There is nothing else to do. */
1495 return;
1496
1497 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1498 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1499 }
1500 #else
1501 #define mapping_state(x) /* nothing */
1502 #define mapping_state_2(x, y) /* nothing */
1503 #endif
1504
1505 /* Directives: sectioning and alignment. */
1506
1507 static void
1508 s_bss (int ignore ATTRIBUTE_UNUSED)
1509 {
1510 /* We don't support putting frags in the BSS segment, we fake it by
1511 marking in_bss, then looking at s_skip for clues. */
1512 subseg_set (bss_section, 0);
1513 demand_empty_rest_of_line ();
1514 mapping_state (MAP_DATA);
1515 }
1516
1517 static void
1518 s_even (int ignore ATTRIBUTE_UNUSED)
1519 {
1520 /* Never make frag if expect extra pass. */
1521 if (!need_pass_2)
1522 frag_align (1, 0, 0);
1523
1524 record_alignment (now_seg, 1);
1525
1526 demand_empty_rest_of_line ();
1527 }
1528
1529 /* Directives: Literal pools. */
1530
1531 static literal_pool *
1532 find_literal_pool (int size)
1533 {
1534 literal_pool *pool;
1535
1536 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1537 {
1538 if (pool->section == now_seg
1539 && pool->sub_section == now_subseg && pool->size == size)
1540 break;
1541 }
1542
1543 return pool;
1544 }
1545
1546 static literal_pool *
1547 find_or_make_literal_pool (int size)
1548 {
1549 /* Next literal pool ID number. */
1550 static unsigned int latest_pool_num = 1;
1551 literal_pool *pool;
1552
1553 pool = find_literal_pool (size);
1554
1555 if (pool == NULL)
1556 {
1557 /* Create a new pool. */
1558 pool = xmalloc (sizeof (*pool));
1559 if (!pool)
1560 return NULL;
1561
1562 /* Currently we always put the literal pool in the current text
1563 section. If we were generating "small" model code where we
1564 knew that all code and initialised data was within 1MB then
1565 we could output literals to mergeable, read-only data
1566 sections. */
1567
1568 pool->next_free_entry = 0;
1569 pool->section = now_seg;
1570 pool->sub_section = now_subseg;
1571 pool->size = size;
1572 pool->next = list_of_pools;
1573 pool->symbol = NULL;
1574
1575 /* Add it to the list. */
1576 list_of_pools = pool;
1577 }
1578
1579 /* New pools, and emptied pools, will have a NULL symbol. */
1580 if (pool->symbol == NULL)
1581 {
1582 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1583 (valueT) 0, &zero_address_frag);
1584 pool->id = latest_pool_num++;
1585 }
1586
1587 /* Done. */
1588 return pool;
1589 }
1590
1591 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1592 Return TRUE on success, otherwise return FALSE. */
1593 static bfd_boolean
1594 add_to_lit_pool (expressionS *exp, int size)
1595 {
1596 literal_pool *pool;
1597 unsigned int entry;
1598
1599 pool = find_or_make_literal_pool (size);
1600
1601 /* Check if this literal value is already in the pool. */
1602 for (entry = 0; entry < pool->next_free_entry; entry++)
1603 {
1604 if ((pool->literals[entry].X_op == exp->X_op)
1605 && (exp->X_op == O_constant)
1606 && (pool->literals[entry].X_add_number == exp->X_add_number)
1607 && (pool->literals[entry].X_unsigned == exp->X_unsigned))
1608 break;
1609
1610 if ((pool->literals[entry].X_op == exp->X_op)
1611 && (exp->X_op == O_symbol)
1612 && (pool->literals[entry].X_add_number == exp->X_add_number)
1613 && (pool->literals[entry].X_add_symbol == exp->X_add_symbol)
1614 && (pool->literals[entry].X_op_symbol == exp->X_op_symbol))
1615 break;
1616 }
1617
1618 /* Do we need to create a new entry? */
1619 if (entry == pool->next_free_entry)
1620 {
1621 if (entry >= MAX_LITERAL_POOL_SIZE)
1622 {
1623 set_syntax_error (_("literal pool overflow"));
1624 return FALSE;
1625 }
1626
1627 pool->literals[entry] = *exp;
1628 pool->next_free_entry += 1;
1629 }
1630
1631 exp->X_op = O_symbol;
1632 exp->X_add_number = ((int) entry) * size;
1633 exp->X_add_symbol = pool->symbol;
1634
1635 return TRUE;
1636 }
1637
1638 /* Can't use symbol_new here, so have to create a symbol and then at
1639 a later date assign it a value. Thats what these functions do. */
1640
1641 static void
1642 symbol_locate (symbolS * symbolP,
1643 const char *name,/* It is copied, the caller can modify. */
1644 segT segment, /* Segment identifier (SEG_<something>). */
1645 valueT valu, /* Symbol value. */
1646 fragS * frag) /* Associated fragment. */
1647 {
1648 unsigned int name_length;
1649 char *preserved_copy_of_name;
1650
1651 name_length = strlen (name) + 1; /* +1 for \0. */
1652 obstack_grow (&notes, name, name_length);
1653 preserved_copy_of_name = obstack_finish (&notes);
1654
1655 #ifdef tc_canonicalize_symbol_name
1656 preserved_copy_of_name =
1657 tc_canonicalize_symbol_name (preserved_copy_of_name);
1658 #endif
1659
1660 S_SET_NAME (symbolP, preserved_copy_of_name);
1661
1662 S_SET_SEGMENT (symbolP, segment);
1663 S_SET_VALUE (symbolP, valu);
1664 symbol_clear_list_pointers (symbolP);
1665
1666 symbol_set_frag (symbolP, frag);
1667
1668 /* Link to end of symbol chain. */
1669 {
1670 extern int symbol_table_frozen;
1671
1672 if (symbol_table_frozen)
1673 abort ();
1674 }
1675
1676 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1677
1678 obj_symbol_new_hook (symbolP);
1679
1680 #ifdef tc_symbol_new_hook
1681 tc_symbol_new_hook (symbolP);
1682 #endif
1683
1684 #ifdef DEBUG_SYMS
1685 verify_symbol_chain (symbol_rootP, symbol_lastP);
1686 #endif /* DEBUG_SYMS */
1687 }
1688
1689
1690 static void
1691 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1692 {
1693 unsigned int entry;
1694 literal_pool *pool;
1695 char sym_name[20];
1696 int align;
1697
1698 for (align = 2; align <= 4; align++)
1699 {
1700 int size = 1 << align;
1701
1702 pool = find_literal_pool (size);
1703 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1704 continue;
1705
1706 mapping_state (MAP_DATA);
1707
1708 /* Align pool as you have word accesses.
1709 Only make a frag if we have to. */
1710 if (!need_pass_2)
1711 frag_align (align, 0, 0);
1712
1713 record_alignment (now_seg, align);
1714
1715 sprintf (sym_name, "$$lit_\002%x", pool->id);
1716
1717 symbol_locate (pool->symbol, sym_name, now_seg,
1718 (valueT) frag_now_fix (), frag_now);
1719 symbol_table_insert (pool->symbol);
1720
1721 for (entry = 0; entry < pool->next_free_entry; entry++)
1722 /* First output the expression in the instruction to the pool. */
1723 emit_expr (&(pool->literals[entry]), size); /* .word|.xword */
1724
1725 /* Mark the pool as empty. */
1726 pool->next_free_entry = 0;
1727 pool->symbol = NULL;
1728 }
1729 }
1730
1731 #ifdef OBJ_ELF
1732 /* Forward declarations for functions below, in the MD interface
1733 section. */
1734 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1735 static struct reloc_table_entry * find_reloc_table_entry (char **);
1736
1737 /* Directives: Data. */
1738 /* N.B. the support for relocation suffix in this directive needs to be
1739 implemented properly. */
1740
1741 static void
1742 s_aarch64_elf_cons (int nbytes)
1743 {
1744 expressionS exp;
1745
1746 #ifdef md_flush_pending_output
1747 md_flush_pending_output ();
1748 #endif
1749
1750 if (is_it_end_of_statement ())
1751 {
1752 demand_empty_rest_of_line ();
1753 return;
1754 }
1755
1756 #ifdef md_cons_align
1757 md_cons_align (nbytes);
1758 #endif
1759
1760 mapping_state (MAP_DATA);
1761 do
1762 {
1763 struct reloc_table_entry *reloc;
1764
1765 expression (&exp);
1766
1767 if (exp.X_op != O_symbol)
1768 emit_expr (&exp, (unsigned int) nbytes);
1769 else
1770 {
1771 skip_past_char (&input_line_pointer, '#');
1772 if (skip_past_char (&input_line_pointer, ':'))
1773 {
1774 reloc = find_reloc_table_entry (&input_line_pointer);
1775 if (reloc == NULL)
1776 as_bad (_("unrecognized relocation suffix"));
1777 else
1778 as_bad (_("unimplemented relocation suffix"));
1779 ignore_rest_of_line ();
1780 return;
1781 }
1782 else
1783 emit_expr (&exp, (unsigned int) nbytes);
1784 }
1785 }
1786 while (*input_line_pointer++ == ',');
1787
1788 /* Put terminator back into stream. */
1789 input_line_pointer--;
1790 demand_empty_rest_of_line ();
1791 }
1792
1793 #endif /* OBJ_ELF */
1794
1795 /* Output a 32-bit word, but mark as an instruction. */
1796
1797 static void
1798 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1799 {
1800 expressionS exp;
1801
1802 #ifdef md_flush_pending_output
1803 md_flush_pending_output ();
1804 #endif
1805
1806 if (is_it_end_of_statement ())
1807 {
1808 demand_empty_rest_of_line ();
1809 return;
1810 }
1811
1812 if (!need_pass_2)
1813 frag_align_code (2, 0);
1814 #ifdef OBJ_ELF
1815 mapping_state (MAP_INSN);
1816 #endif
1817
1818 do
1819 {
1820 expression (&exp);
1821 if (exp.X_op != O_constant)
1822 {
1823 as_bad (_("constant expression required"));
1824 ignore_rest_of_line ();
1825 return;
1826 }
1827
1828 if (target_big_endian)
1829 {
1830 unsigned int val = exp.X_add_number;
1831 exp.X_add_number = SWAP_32 (val);
1832 }
1833 emit_expr (&exp, 4);
1834 }
1835 while (*input_line_pointer++ == ',');
1836
1837 /* Put terminator back into stream. */
1838 input_line_pointer--;
1839 demand_empty_rest_of_line ();
1840 }
1841
1842 #ifdef OBJ_ELF
1843 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
1844
1845 static void
1846 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
1847 {
1848 expressionS exp;
1849
1850 /* Since we're just labelling the code, there's no need to define a
1851 mapping symbol. */
1852 expression (&exp);
1853 /* Make sure there is enough room in this frag for the following
1854 blr. This trick only works if the blr follows immediately after
1855 the .tlsdesc directive. */
1856 frag_grow (4);
1857 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1858 BFD_RELOC_AARCH64_TLSDESC_CALL);
1859
1860 demand_empty_rest_of_line ();
1861 }
1862 #endif /* OBJ_ELF */
1863
1864 static void s_aarch64_arch (int);
1865 static void s_aarch64_cpu (int);
1866
1867 /* This table describes all the machine specific pseudo-ops the assembler
1868 has to support. The fields are:
1869 pseudo-op name without dot
1870 function to call to execute this pseudo-op
1871 Integer arg to pass to the function. */
1872
1873 const pseudo_typeS md_pseudo_table[] = {
1874 /* Never called because '.req' does not start a line. */
1875 {"req", s_req, 0},
1876 {"unreq", s_unreq, 0},
1877 {"bss", s_bss, 0},
1878 {"even", s_even, 0},
1879 {"ltorg", s_ltorg, 0},
1880 {"pool", s_ltorg, 0},
1881 {"cpu", s_aarch64_cpu, 0},
1882 {"arch", s_aarch64_arch, 0},
1883 {"inst", s_aarch64_inst, 0},
1884 #ifdef OBJ_ELF
1885 {"tlsdesccall", s_tlsdesccall, 0},
1886 {"word", s_aarch64_elf_cons, 4},
1887 {"long", s_aarch64_elf_cons, 4},
1888 {"xword", s_aarch64_elf_cons, 8},
1889 {"dword", s_aarch64_elf_cons, 8},
1890 #endif
1891 {0, 0, 0}
1892 };
1893 \f
1894
1895 /* Check whether STR points to a register name followed by a comma or the
1896 end of line; REG_TYPE indicates which register types are checked
1897 against. Return TRUE if STR is such a register name; otherwise return
1898 FALSE. The function does not intend to produce any diagnostics, but since
1899 the register parser aarch64_reg_parse, which is called by this function,
1900 does produce diagnostics, we call clear_error to clear any diagnostics
1901 that may be generated by aarch64_reg_parse.
1902 Also, the function returns FALSE directly if there is any user error
1903 present at the function entry. This prevents the existing diagnostics
1904 state from being spoiled.
1905 The function currently serves parse_constant_immediate and
1906 parse_big_immediate only. */
1907 static bfd_boolean
1908 reg_name_p (char *str, aarch64_reg_type reg_type)
1909 {
1910 int reg;
1911
1912 /* Prevent the diagnostics state from being spoiled. */
1913 if (error_p ())
1914 return FALSE;
1915
1916 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
1917
1918 /* Clear the parsing error that may be set by the reg parser. */
1919 clear_error ();
1920
1921 if (reg == PARSE_FAIL)
1922 return FALSE;
1923
1924 skip_whitespace (str);
1925 if (*str == ',' || is_end_of_line[(unsigned int) *str])
1926 return TRUE;
1927
1928 return FALSE;
1929 }
1930
1931 /* Parser functions used exclusively in instruction operands. */
1932
1933 /* Parse an immediate expression which may not be constant.
1934
1935 To prevent the expression parser from pushing a register name
1936 into the symbol table as an undefined symbol, firstly a check is
1937 done to find out whether STR is a valid register name followed
1938 by a comma or the end of line. Return FALSE if STR is such a
1939 string. */
1940
1941 static bfd_boolean
1942 parse_immediate_expression (char **str, expressionS *exp)
1943 {
1944 if (reg_name_p (*str, REG_TYPE_R_Z_BHSDQ_V))
1945 {
1946 set_recoverable_error (_("immediate operand required"));
1947 return FALSE;
1948 }
1949
1950 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
1951
1952 if (exp->X_op == O_absent)
1953 {
1954 set_fatal_syntax_error (_("missing immediate expression"));
1955 return FALSE;
1956 }
1957
1958 return TRUE;
1959 }
1960
1961 /* Constant immediate-value read function for use in insn parsing.
1962 STR points to the beginning of the immediate (with the optional
1963 leading #); *VAL receives the value.
1964
1965 Return TRUE on success; otherwise return FALSE. */
1966
1967 static bfd_boolean
1968 parse_constant_immediate (char **str, int64_t * val)
1969 {
1970 expressionS exp;
1971
1972 if (! parse_immediate_expression (str, &exp))
1973 return FALSE;
1974
1975 if (exp.X_op != O_constant)
1976 {
1977 set_syntax_error (_("constant expression required"));
1978 return FALSE;
1979 }
1980
1981 *val = exp.X_add_number;
1982 return TRUE;
1983 }
1984
1985 static uint32_t
1986 encode_imm_float_bits (uint32_t imm)
1987 {
1988 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
1989 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
1990 }
1991
1992 /* Return TRUE if IMM is a valid floating-point immediate; return FALSE
1993 otherwise. */
1994 static bfd_boolean
1995 aarch64_imm_float_p (uint32_t imm)
1996 {
1997 /* 3 32222222 2221111111111
1998 1 09876543 21098765432109876543210
1999 n Eeeeeexx xxxx0000000000000000000 */
2000 uint32_t e;
2001
2002 e = (imm >> 30) & 0x1;
2003 if (e == 0)
2004 e = 0x3e000000;
2005 else
2006 e = 0x40000000;
2007 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0 */
2008 && ((imm & 0x7e000000) == e); /* bits 25-29 = ~ bit 30 */
2009 }
2010
2011 /* Note: this accepts the floating-point 0 constant. */
2012 static bfd_boolean
2013 parse_aarch64_imm_float (char **ccp, int *immed)
2014 {
2015 char *str = *ccp;
2016 char *fpnum;
2017 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2018 int found_fpchar = 0;
2019
2020 skip_past_char (&str, '#');
2021
2022 /* We must not accidentally parse an integer as a floating-point number. Make
2023 sure that the value we parse is not an integer by checking for special
2024 characters '.' or 'e'.
2025 FIXME: This is a hack that is not very efficient, but doing better is
2026 tricky because type information isn't in a very usable state at parse
2027 time. */
2028 fpnum = str;
2029 skip_whitespace (fpnum);
2030
2031 if (strncmp (fpnum, "0x", 2) == 0)
2032 return FALSE;
2033 else
2034 {
2035 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
2036 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
2037 {
2038 found_fpchar = 1;
2039 break;
2040 }
2041
2042 if (!found_fpchar)
2043 return FALSE;
2044 }
2045
2046 if ((str = atof_ieee (str, 's', words)) != NULL)
2047 {
2048 unsigned fpword = 0;
2049 int i;
2050
2051 /* Our FP word must be 32 bits (single-precision FP). */
2052 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2053 {
2054 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2055 fpword |= words[i];
2056 }
2057
2058 if (aarch64_imm_float_p (fpword) || (fpword & 0x7fffffff) == 0)
2059 *immed = fpword;
2060 else
2061 goto invalid_fp;
2062
2063 *ccp = str;
2064
2065 return TRUE;
2066 }
2067
2068 invalid_fp:
2069 set_fatal_syntax_error (_("invalid floating-point constant"));
2070 return FALSE;
2071 }
2072
2073 /* Less-generic immediate-value read function with the possibility of loading
2074 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2075 instructions.
2076
2077 To prevent the expression parser from pushing a register name into the
2078 symbol table as an undefined symbol, a check is firstly done to find
2079 out whether STR is a valid register name followed by a comma or the end
2080 of line. Return FALSE if STR is such a register. */
2081
2082 static bfd_boolean
2083 parse_big_immediate (char **str, int64_t *imm)
2084 {
2085 char *ptr = *str;
2086
2087 if (reg_name_p (ptr, REG_TYPE_R_Z_BHSDQ_V))
2088 {
2089 set_syntax_error (_("immediate operand required"));
2090 return FALSE;
2091 }
2092
2093 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2094
2095 if (inst.reloc.exp.X_op == O_constant)
2096 *imm = inst.reloc.exp.X_add_number;
2097
2098 *str = ptr;
2099
2100 return TRUE;
2101 }
2102
2103 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2104 if NEED_LIBOPCODES is non-zero, the fixup will need
2105 assistance from the libopcodes. */
2106
2107 static inline void
2108 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2109 const aarch64_opnd_info *operand,
2110 int need_libopcodes_p)
2111 {
2112 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2113 reloc->opnd = operand->type;
2114 if (need_libopcodes_p)
2115 reloc->need_libopcodes_p = 1;
2116 };
2117
2118 /* Return TRUE if the instruction needs to be fixed up later internally by
2119 the GAS; otherwise return FALSE. */
2120
2121 static inline bfd_boolean
2122 aarch64_gas_internal_fixup_p (void)
2123 {
2124 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2125 }
2126
2127 /* Assign the immediate value to the relavant field in *OPERAND if
2128 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2129 needs an internal fixup in a later stage.
2130 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2131 IMM.VALUE that may get assigned with the constant. */
2132 static inline void
2133 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2134 aarch64_opnd_info *operand,
2135 int addr_off_p,
2136 int need_libopcodes_p,
2137 int skip_p)
2138 {
2139 if (reloc->exp.X_op == O_constant)
2140 {
2141 if (addr_off_p)
2142 operand->addr.offset.imm = reloc->exp.X_add_number;
2143 else
2144 operand->imm.value = reloc->exp.X_add_number;
2145 reloc->type = BFD_RELOC_UNUSED;
2146 }
2147 else
2148 {
2149 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2150 /* Tell libopcodes to ignore this operand or not. This is helpful
2151 when one of the operands needs to be fixed up later but we need
2152 libopcodes to check the other operands. */
2153 operand->skip = skip_p;
2154 }
2155 }
2156
2157 /* Relocation modifiers. Each entry in the table contains the textual
2158 name for the relocation which may be placed before a symbol used as
2159 a load/store offset, or add immediate. It must be surrounded by a
2160 leading and trailing colon, for example:
2161
2162 ldr x0, [x1, #:rello:varsym]
2163 add x0, x1, #:rello:varsym */
2164
2165 struct reloc_table_entry
2166 {
2167 const char *name;
2168 int pc_rel;
2169 bfd_reloc_code_real_type adrp_type;
2170 bfd_reloc_code_real_type movw_type;
2171 bfd_reloc_code_real_type add_type;
2172 bfd_reloc_code_real_type ldst_type;
2173 };
2174
2175 static struct reloc_table_entry reloc_table[] = {
2176 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2177 {"lo12", 0,
2178 0,
2179 0,
2180 BFD_RELOC_AARCH64_ADD_LO12,
2181 BFD_RELOC_AARCH64_LDST_LO12},
2182
2183 /* Higher 21 bits of pc-relative page offset: ADRP */
2184 {"pg_hi21", 1,
2185 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2186 0,
2187 0,
2188 0},
2189
2190 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2191 {"pg_hi21_nc", 1,
2192 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2193 0,
2194 0,
2195 0},
2196
2197 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2198 {"abs_g0", 0,
2199 0,
2200 BFD_RELOC_AARCH64_MOVW_G0,
2201 0,
2202 0},
2203
2204 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2205 {"abs_g0_s", 0,
2206 0,
2207 BFD_RELOC_AARCH64_MOVW_G0_S,
2208 0,
2209 0},
2210
2211 /* Less significant bits 0-15 of address/value: MOVK, no check */
2212 {"abs_g0_nc", 0,
2213 0,
2214 BFD_RELOC_AARCH64_MOVW_G0_NC,
2215 0,
2216 0},
2217
2218 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2219 {"abs_g1", 0,
2220 0,
2221 BFD_RELOC_AARCH64_MOVW_G1,
2222 0,
2223 0},
2224
2225 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2226 {"abs_g1_s", 0,
2227 0,
2228 BFD_RELOC_AARCH64_MOVW_G1_S,
2229 0,
2230 0},
2231
2232 /* Less significant bits 16-31 of address/value: MOVK, no check */
2233 {"abs_g1_nc", 0,
2234 0,
2235 BFD_RELOC_AARCH64_MOVW_G1_NC,
2236 0,
2237 0},
2238
2239 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2240 {"abs_g2", 0,
2241 0,
2242 BFD_RELOC_AARCH64_MOVW_G2,
2243 0,
2244 0},
2245
2246 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2247 {"abs_g2_s", 0,
2248 0,
2249 BFD_RELOC_AARCH64_MOVW_G2_S,
2250 0,
2251 0},
2252
2253 /* Less significant bits 32-47 of address/value: MOVK, no check */
2254 {"abs_g2_nc", 0,
2255 0,
2256 BFD_RELOC_AARCH64_MOVW_G2_NC,
2257 0,
2258 0},
2259
2260 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2261 {"abs_g3", 0,
2262 0,
2263 BFD_RELOC_AARCH64_MOVW_G3,
2264 0,
2265 0},
2266 /* Get to the GOT entry for a symbol. */
2267 {"got_prel19", 0,
2268 0,
2269 0,
2270 0,
2271 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2272 /* Get to the page containing GOT entry for a symbol. */
2273 {"got", 1,
2274 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2275 0,
2276 0,
2277 0},
2278 /* 12 bit offset into the page containing GOT entry for that symbol. */
2279 {"got_lo12", 0,
2280 0,
2281 0,
2282 0,
2283 BFD_RELOC_AARCH64_LD64_GOT_LO12_NC},
2284
2285 /* Get to the page containing GOT TLS entry for a symbol */
2286 {"tlsgd", 0,
2287 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2288 0,
2289 0,
2290 0},
2291
2292 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2293 {"tlsgd_lo12", 0,
2294 0,
2295 0,
2296 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2297 0},
2298
2299 /* Get to the page containing GOT TLS entry for a symbol */
2300 {"tlsdesc", 0,
2301 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE,
2302 0,
2303 0,
2304 0},
2305
2306 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2307 {"tlsdesc_lo12", 0,
2308 0,
2309 0,
2310 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC,
2311 BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC},
2312
2313 /* Get to the page containing GOT TLS entry for a symbol */
2314 {"gottprel", 0,
2315 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2316 0,
2317 0,
2318 0},
2319
2320 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2321 {"gottprel_lo12", 0,
2322 0,
2323 0,
2324 0,
2325 BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC},
2326
2327 /* Get tp offset for a symbol. */
2328 {"tprel", 0,
2329 0,
2330 0,
2331 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2332 0},
2333
2334 /* Get tp offset for a symbol. */
2335 {"tprel_lo12", 0,
2336 0,
2337 0,
2338 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2339 0},
2340
2341 /* Get tp offset for a symbol. */
2342 {"tprel_hi12", 0,
2343 0,
2344 0,
2345 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2346 0},
2347
2348 /* Get tp offset for a symbol. */
2349 {"tprel_lo12_nc", 0,
2350 0,
2351 0,
2352 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2353 0},
2354
2355 /* Most significant bits 32-47 of address/value: MOVZ. */
2356 {"tprel_g2", 0,
2357 0,
2358 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2359 0,
2360 0},
2361
2362 /* Most significant bits 16-31 of address/value: MOVZ. */
2363 {"tprel_g1", 0,
2364 0,
2365 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2366 0,
2367 0},
2368
2369 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2370 {"tprel_g1_nc", 0,
2371 0,
2372 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2373 0,
2374 0},
2375
2376 /* Most significant bits 0-15 of address/value: MOVZ. */
2377 {"tprel_g0", 0,
2378 0,
2379 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2380 0,
2381 0},
2382
2383 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2384 {"tprel_g0_nc", 0,
2385 0,
2386 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2387 0,
2388 0},
2389 };
2390
2391 /* Given the address of a pointer pointing to the textual name of a
2392 relocation as may appear in assembler source, attempt to find its
2393 details in reloc_table. The pointer will be updated to the character
2394 after the trailing colon. On failure, NULL will be returned;
2395 otherwise return the reloc_table_entry. */
2396
2397 static struct reloc_table_entry *
2398 find_reloc_table_entry (char **str)
2399 {
2400 unsigned int i;
2401 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2402 {
2403 int length = strlen (reloc_table[i].name);
2404
2405 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2406 && (*str)[length] == ':')
2407 {
2408 *str += (length + 1);
2409 return &reloc_table[i];
2410 }
2411 }
2412
2413 return NULL;
2414 }
2415
2416 /* Mode argument to parse_shift and parser_shifter_operand. */
2417 enum parse_shift_mode
2418 {
2419 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
2420 "#imm{,lsl #n}" */
2421 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
2422 "#imm" */
2423 SHIFTED_LSL, /* bare "lsl #n" */
2424 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
2425 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
2426 };
2427
2428 /* Parse a <shift> operator on an AArch64 data processing instruction.
2429 Return TRUE on success; otherwise return FALSE. */
2430 static bfd_boolean
2431 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
2432 {
2433 const struct aarch64_name_value_pair *shift_op;
2434 enum aarch64_modifier_kind kind;
2435 expressionS exp;
2436 int exp_has_prefix;
2437 char *s = *str;
2438 char *p = s;
2439
2440 for (p = *str; ISALPHA (*p); p++)
2441 ;
2442
2443 if (p == *str)
2444 {
2445 set_syntax_error (_("shift expression expected"));
2446 return FALSE;
2447 }
2448
2449 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
2450
2451 if (shift_op == NULL)
2452 {
2453 set_syntax_error (_("shift operator expected"));
2454 return FALSE;
2455 }
2456
2457 kind = aarch64_get_operand_modifier (shift_op);
2458
2459 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
2460 {
2461 set_syntax_error (_("invalid use of 'MSL'"));
2462 return FALSE;
2463 }
2464
2465 switch (mode)
2466 {
2467 case SHIFTED_LOGIC_IMM:
2468 if (aarch64_extend_operator_p (kind) == TRUE)
2469 {
2470 set_syntax_error (_("extending shift is not permitted"));
2471 return FALSE;
2472 }
2473 break;
2474
2475 case SHIFTED_ARITH_IMM:
2476 if (kind == AARCH64_MOD_ROR)
2477 {
2478 set_syntax_error (_("'ROR' shift is not permitted"));
2479 return FALSE;
2480 }
2481 break;
2482
2483 case SHIFTED_LSL:
2484 if (kind != AARCH64_MOD_LSL)
2485 {
2486 set_syntax_error (_("only 'LSL' shift is permitted"));
2487 return FALSE;
2488 }
2489 break;
2490
2491 case SHIFTED_REG_OFFSET:
2492 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
2493 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
2494 {
2495 set_fatal_syntax_error
2496 (_("invalid shift for the register offset addressing mode"));
2497 return FALSE;
2498 }
2499 break;
2500
2501 case SHIFTED_LSL_MSL:
2502 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
2503 {
2504 set_syntax_error (_("invalid shift operator"));
2505 return FALSE;
2506 }
2507 break;
2508
2509 default:
2510 abort ();
2511 }
2512
2513 /* Whitespace can appear here if the next thing is a bare digit. */
2514 skip_whitespace (p);
2515
2516 /* Parse shift amount. */
2517 exp_has_prefix = 0;
2518 if (mode == SHIFTED_REG_OFFSET && *p == ']')
2519 exp.X_op = O_absent;
2520 else
2521 {
2522 if (is_immediate_prefix (*p))
2523 {
2524 p++;
2525 exp_has_prefix = 1;
2526 }
2527 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
2528 }
2529 if (exp.X_op == O_absent)
2530 {
2531 if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix)
2532 {
2533 set_syntax_error (_("missing shift amount"));
2534 return FALSE;
2535 }
2536 operand->shifter.amount = 0;
2537 }
2538 else if (exp.X_op != O_constant)
2539 {
2540 set_syntax_error (_("constant shift amount required"));
2541 return FALSE;
2542 }
2543 else if (exp.X_add_number < 0 || exp.X_add_number > 63)
2544 {
2545 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
2546 return FALSE;
2547 }
2548 else
2549 {
2550 operand->shifter.amount = exp.X_add_number;
2551 operand->shifter.amount_present = 1;
2552 }
2553
2554 operand->shifter.operator_present = 1;
2555 operand->shifter.kind = kind;
2556
2557 *str = p;
2558 return TRUE;
2559 }
2560
2561 /* Parse a <shifter_operand> for a data processing instruction:
2562
2563 #<immediate>
2564 #<immediate>, LSL #imm
2565
2566 Validation of immediate operands is deferred to md_apply_fix.
2567
2568 Return TRUE on success; otherwise return FALSE. */
2569
2570 static bfd_boolean
2571 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
2572 enum parse_shift_mode mode)
2573 {
2574 char *p;
2575
2576 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
2577 return FALSE;
2578
2579 p = *str;
2580
2581 /* Accept an immediate expression. */
2582 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
2583 return FALSE;
2584
2585 /* Accept optional LSL for arithmetic immediate values. */
2586 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
2587 if (! parse_shift (&p, operand, SHIFTED_LSL))
2588 return FALSE;
2589
2590 /* Not accept any shifter for logical immediate values. */
2591 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
2592 && parse_shift (&p, operand, mode))
2593 {
2594 set_syntax_error (_("unexpected shift operator"));
2595 return FALSE;
2596 }
2597
2598 *str = p;
2599 return TRUE;
2600 }
2601
2602 /* Parse a <shifter_operand> for a data processing instruction:
2603
2604 <Rm>
2605 <Rm>, <shift>
2606 #<immediate>
2607 #<immediate>, LSL #imm
2608
2609 where <shift> is handled by parse_shift above, and the last two
2610 cases are handled by the function above.
2611
2612 Validation of immediate operands is deferred to md_apply_fix.
2613
2614 Return TRUE on success; otherwise return FALSE. */
2615
2616 static bfd_boolean
2617 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
2618 enum parse_shift_mode mode)
2619 {
2620 int reg;
2621 int isreg32, isregzero;
2622 enum aarch64_operand_class opd_class
2623 = aarch64_get_operand_class (operand->type);
2624
2625 if ((reg =
2626 aarch64_reg_parse_32_64 (str, 0, 0, &isreg32, &isregzero)) != PARSE_FAIL)
2627 {
2628 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
2629 {
2630 set_syntax_error (_("unexpected register in the immediate operand"));
2631 return FALSE;
2632 }
2633
2634 if (!isregzero && reg == REG_SP)
2635 {
2636 set_syntax_error (BAD_SP);
2637 return FALSE;
2638 }
2639
2640 operand->reg.regno = reg;
2641 operand->qualifier = isreg32 ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2642
2643 /* Accept optional shift operation on register. */
2644 if (! skip_past_comma (str))
2645 return TRUE;
2646
2647 if (! parse_shift (str, operand, mode))
2648 return FALSE;
2649
2650 return TRUE;
2651 }
2652 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
2653 {
2654 set_syntax_error
2655 (_("integer register expected in the extended/shifted operand "
2656 "register"));
2657 return FALSE;
2658 }
2659
2660 /* We have a shifted immediate variable. */
2661 return parse_shifter_operand_imm (str, operand, mode);
2662 }
2663
2664 /* Return TRUE on success; return FALSE otherwise. */
2665
2666 static bfd_boolean
2667 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
2668 enum parse_shift_mode mode)
2669 {
2670 char *p = *str;
2671
2672 /* Determine if we have the sequence of characters #: or just :
2673 coming next. If we do, then we check for a :rello: relocation
2674 modifier. If we don't, punt the whole lot to
2675 parse_shifter_operand. */
2676
2677 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
2678 {
2679 struct reloc_table_entry *entry;
2680
2681 if (p[0] == '#')
2682 p += 2;
2683 else
2684 p++;
2685 *str = p;
2686
2687 /* Try to parse a relocation. Anything else is an error. */
2688 if (!(entry = find_reloc_table_entry (str)))
2689 {
2690 set_syntax_error (_("unknown relocation modifier"));
2691 return FALSE;
2692 }
2693
2694 if (entry->add_type == 0)
2695 {
2696 set_syntax_error
2697 (_("this relocation modifier is not allowed on this instruction"));
2698 return FALSE;
2699 }
2700
2701 /* Save str before we decompose it. */
2702 p = *str;
2703
2704 /* Next, we parse the expression. */
2705 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
2706 return FALSE;
2707
2708 /* Record the relocation type (use the ADD variant here). */
2709 inst.reloc.type = entry->add_type;
2710 inst.reloc.pc_rel = entry->pc_rel;
2711
2712 /* If str is empty, we've reached the end, stop here. */
2713 if (**str == '\0')
2714 return TRUE;
2715
2716 /* Otherwise, we have a shifted reloc modifier, so rewind to
2717 recover the variable name and continue parsing for the shifter. */
2718 *str = p;
2719 return parse_shifter_operand_imm (str, operand, mode);
2720 }
2721
2722 return parse_shifter_operand (str, operand, mode);
2723 }
2724
2725 /* Parse all forms of an address expression. Information is written
2726 to *OPERAND and/or inst.reloc.
2727
2728 The A64 instruction set has the following addressing modes:
2729
2730 Offset
2731 [base] // in SIMD ld/st structure
2732 [base{,#0}] // in ld/st exclusive
2733 [base{,#imm}]
2734 [base,Xm{,LSL #imm}]
2735 [base,Xm,SXTX {#imm}]
2736 [base,Wm,(S|U)XTW {#imm}]
2737 Pre-indexed
2738 [base,#imm]!
2739 Post-indexed
2740 [base],#imm
2741 [base],Xm // in SIMD ld/st structure
2742 PC-relative (literal)
2743 label
2744 =immediate
2745
2746 (As a convenience, the notation "=immediate" is permitted in conjunction
2747 with the pc-relative literal load instructions to automatically place an
2748 immediate value or symbolic address in a nearby literal pool and generate
2749 a hidden label which references it.)
2750
2751 Upon a successful parsing, the address structure in *OPERAND will be
2752 filled in the following way:
2753
2754 .base_regno = <base>
2755 .offset.is_reg // 1 if the offset is a register
2756 .offset.imm = <imm>
2757 .offset.regno = <Rm>
2758
2759 For different addressing modes defined in the A64 ISA:
2760
2761 Offset
2762 .pcrel=0; .preind=1; .postind=0; .writeback=0
2763 Pre-indexed
2764 .pcrel=0; .preind=1; .postind=0; .writeback=1
2765 Post-indexed
2766 .pcrel=0; .preind=0; .postind=1; .writeback=1
2767 PC-relative (literal)
2768 .pcrel=1; .preind=1; .postind=0; .writeback=0
2769
2770 The shift/extension information, if any, will be stored in .shifter.
2771
2772 It is the caller's responsibility to check for addressing modes not
2773 supported by the instruction, and to set inst.reloc.type. */
2774
2775 static bfd_boolean
2776 parse_address_main (char **str, aarch64_opnd_info *operand, int reloc,
2777 int accept_reg_post_index)
2778 {
2779 char *p = *str;
2780 int reg;
2781 int isreg32, isregzero;
2782 expressionS *exp = &inst.reloc.exp;
2783
2784 if (! skip_past_char (&p, '['))
2785 {
2786 /* =immediate or label. */
2787 operand->addr.pcrel = 1;
2788 operand->addr.preind = 1;
2789
2790 /* #:<reloc_op>:<symbol> */
2791 skip_past_char (&p, '#');
2792 if (reloc && skip_past_char (&p, ':'))
2793 {
2794 struct reloc_table_entry *entry;
2795
2796 /* Try to parse a relocation modifier. Anything else is
2797 an error. */
2798 entry = find_reloc_table_entry (&p);
2799 if (! entry)
2800 {
2801 set_syntax_error (_("unknown relocation modifier"));
2802 return FALSE;
2803 }
2804
2805 if (entry->ldst_type == 0)
2806 {
2807 set_syntax_error
2808 (_("this relocation modifier is not allowed on this "
2809 "instruction"));
2810 return FALSE;
2811 }
2812
2813 /* #:<reloc_op>: */
2814 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
2815 {
2816 set_syntax_error (_("invalid relocation expression"));
2817 return FALSE;
2818 }
2819
2820 /* #:<reloc_op>:<expr> */
2821 /* Record the load/store relocation type. */
2822 inst.reloc.type = entry->ldst_type;
2823 inst.reloc.pc_rel = entry->pc_rel;
2824 }
2825 else
2826 {
2827
2828 if (skip_past_char (&p, '='))
2829 /* =immediate; need to generate the literal in the literal pool. */
2830 inst.gen_lit_pool = 1;
2831
2832 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
2833 {
2834 set_syntax_error (_("invalid address"));
2835 return FALSE;
2836 }
2837 }
2838
2839 *str = p;
2840 return TRUE;
2841 }
2842
2843 /* [ */
2844
2845 /* Accept SP and reject ZR */
2846 reg = aarch64_reg_parse_32_64 (&p, 0, 1, &isreg32, &isregzero);
2847 if (reg == PARSE_FAIL || isreg32)
2848 {
2849 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
2850 return FALSE;
2851 }
2852 operand->addr.base_regno = reg;
2853
2854 /* [Xn */
2855 if (skip_past_comma (&p))
2856 {
2857 /* [Xn, */
2858 operand->addr.preind = 1;
2859
2860 /* Reject SP and accept ZR */
2861 reg = aarch64_reg_parse_32_64 (&p, 1, 0, &isreg32, &isregzero);
2862 if (reg != PARSE_FAIL)
2863 {
2864 /* [Xn,Rm */
2865 operand->addr.offset.regno = reg;
2866 operand->addr.offset.is_reg = 1;
2867 /* Shifted index. */
2868 if (skip_past_comma (&p))
2869 {
2870 /* [Xn,Rm, */
2871 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
2872 /* Use the diagnostics set in parse_shift, so not set new
2873 error message here. */
2874 return FALSE;
2875 }
2876 /* We only accept:
2877 [base,Xm{,LSL #imm}]
2878 [base,Xm,SXTX {#imm}]
2879 [base,Wm,(S|U)XTW {#imm}] */
2880 if (operand->shifter.kind == AARCH64_MOD_NONE
2881 || operand->shifter.kind == AARCH64_MOD_LSL
2882 || operand->shifter.kind == AARCH64_MOD_SXTX)
2883 {
2884 if (isreg32)
2885 {
2886 set_syntax_error (_("invalid use of 32-bit register offset"));
2887 return FALSE;
2888 }
2889 }
2890 else if (!isreg32)
2891 {
2892 set_syntax_error (_("invalid use of 64-bit register offset"));
2893 return FALSE;
2894 }
2895 }
2896 else
2897 {
2898 /* [Xn,#:<reloc_op>:<symbol> */
2899 skip_past_char (&p, '#');
2900 if (reloc && skip_past_char (&p, ':'))
2901 {
2902 struct reloc_table_entry *entry;
2903
2904 /* Try to parse a relocation modifier. Anything else is
2905 an error. */
2906 if (!(entry = find_reloc_table_entry (&p)))
2907 {
2908 set_syntax_error (_("unknown relocation modifier"));
2909 return FALSE;
2910 }
2911
2912 if (entry->ldst_type == 0)
2913 {
2914 set_syntax_error
2915 (_("this relocation modifier is not allowed on this "
2916 "instruction"));
2917 return FALSE;
2918 }
2919
2920 /* [Xn,#:<reloc_op>: */
2921 /* We now have the group relocation table entry corresponding to
2922 the name in the assembler source. Next, we parse the
2923 expression. */
2924 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
2925 {
2926 set_syntax_error (_("invalid relocation expression"));
2927 return FALSE;
2928 }
2929
2930 /* [Xn,#:<reloc_op>:<expr> */
2931 /* Record the load/store relocation type. */
2932 inst.reloc.type = entry->ldst_type;
2933 inst.reloc.pc_rel = entry->pc_rel;
2934 }
2935 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
2936 {
2937 set_syntax_error (_("invalid expression in the address"));
2938 return FALSE;
2939 }
2940 /* [Xn,<expr> */
2941 }
2942 }
2943
2944 if (! skip_past_char (&p, ']'))
2945 {
2946 set_syntax_error (_("']' expected"));
2947 return FALSE;
2948 }
2949
2950 if (skip_past_char (&p, '!'))
2951 {
2952 if (operand->addr.preind && operand->addr.offset.is_reg)
2953 {
2954 set_syntax_error (_("register offset not allowed in pre-indexed "
2955 "addressing mode"));
2956 return FALSE;
2957 }
2958 /* [Xn]! */
2959 operand->addr.writeback = 1;
2960 }
2961 else if (skip_past_comma (&p))
2962 {
2963 /* [Xn], */
2964 operand->addr.postind = 1;
2965 operand->addr.writeback = 1;
2966
2967 if (operand->addr.preind)
2968 {
2969 set_syntax_error (_("cannot combine pre- and post-indexing"));
2970 return FALSE;
2971 }
2972
2973 if (accept_reg_post_index
2974 && (reg = aarch64_reg_parse_32_64 (&p, 1, 1, &isreg32,
2975 &isregzero)) != PARSE_FAIL)
2976 {
2977 /* [Xn],Xm */
2978 if (isreg32)
2979 {
2980 set_syntax_error (_("invalid 32-bit register offset"));
2981 return FALSE;
2982 }
2983 operand->addr.offset.regno = reg;
2984 operand->addr.offset.is_reg = 1;
2985 }
2986 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
2987 {
2988 /* [Xn],#expr */
2989 set_syntax_error (_("invalid expression in the address"));
2990 return FALSE;
2991 }
2992 }
2993
2994 /* If at this point neither .preind nor .postind is set, we have a
2995 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
2996 if (operand->addr.preind == 0 && operand->addr.postind == 0)
2997 {
2998 if (operand->addr.writeback)
2999 {
3000 /* Reject [Rn]! */
3001 set_syntax_error (_("missing offset in the pre-indexed address"));
3002 return FALSE;
3003 }
3004 operand->addr.preind = 1;
3005 inst.reloc.exp.X_op = O_constant;
3006 inst.reloc.exp.X_add_number = 0;
3007 }
3008
3009 *str = p;
3010 return TRUE;
3011 }
3012
3013 /* Return TRUE on success; otherwise return FALSE. */
3014 static bfd_boolean
3015 parse_address (char **str, aarch64_opnd_info *operand,
3016 int accept_reg_post_index)
3017 {
3018 return parse_address_main (str, operand, 0, accept_reg_post_index);
3019 }
3020
3021 /* Return TRUE on success; otherwise return FALSE. */
3022 static bfd_boolean
3023 parse_address_reloc (char **str, aarch64_opnd_info *operand)
3024 {
3025 return parse_address_main (str, operand, 1, 0);
3026 }
3027
3028 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3029 Return TRUE on success; otherwise return FALSE. */
3030 static bfd_boolean
3031 parse_half (char **str, int *internal_fixup_p)
3032 {
3033 char *p, *saved;
3034 int dummy;
3035
3036 p = *str;
3037 skip_past_char (&p, '#');
3038
3039 gas_assert (internal_fixup_p);
3040 *internal_fixup_p = 0;
3041
3042 if (*p == ':')
3043 {
3044 struct reloc_table_entry *entry;
3045
3046 /* Try to parse a relocation. Anything else is an error. */
3047 ++p;
3048 if (!(entry = find_reloc_table_entry (&p)))
3049 {
3050 set_syntax_error (_("unknown relocation modifier"));
3051 return FALSE;
3052 }
3053
3054 if (entry->movw_type == 0)
3055 {
3056 set_syntax_error
3057 (_("this relocation modifier is not allowed on this instruction"));
3058 return FALSE;
3059 }
3060
3061 inst.reloc.type = entry->movw_type;
3062 }
3063 else
3064 *internal_fixup_p = 1;
3065
3066 /* Avoid parsing a register as a general symbol. */
3067 saved = p;
3068 if (aarch64_reg_parse_32_64 (&p, 0, 0, &dummy, &dummy) != PARSE_FAIL)
3069 return FALSE;
3070 p = saved;
3071
3072 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3073 return FALSE;
3074
3075 *str = p;
3076 return TRUE;
3077 }
3078
3079 /* Parse an operand for an ADRP instruction:
3080 ADRP <Xd>, <label>
3081 Return TRUE on success; otherwise return FALSE. */
3082
3083 static bfd_boolean
3084 parse_adrp (char **str)
3085 {
3086 char *p;
3087
3088 p = *str;
3089 if (*p == ':')
3090 {
3091 struct reloc_table_entry *entry;
3092
3093 /* Try to parse a relocation. Anything else is an error. */
3094 ++p;
3095 if (!(entry = find_reloc_table_entry (&p)))
3096 {
3097 set_syntax_error (_("unknown relocation modifier"));
3098 return FALSE;
3099 }
3100
3101 if (entry->adrp_type == 0)
3102 {
3103 set_syntax_error
3104 (_("this relocation modifier is not allowed on this instruction"));
3105 return FALSE;
3106 }
3107
3108 inst.reloc.type = entry->adrp_type;
3109 }
3110 else
3111 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3112
3113 inst.reloc.pc_rel = 1;
3114
3115 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3116 return FALSE;
3117
3118 *str = p;
3119 return TRUE;
3120 }
3121
3122 /* Miscellaneous. */
3123
3124 /* Parse an option for a preload instruction. Returns the encoding for the
3125 option, or PARSE_FAIL. */
3126
3127 static int
3128 parse_pldop (char **str)
3129 {
3130 char *p, *q;
3131 const struct aarch64_name_value_pair *o;
3132
3133 p = q = *str;
3134 while (ISALNUM (*q))
3135 q++;
3136
3137 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3138 if (!o)
3139 return PARSE_FAIL;
3140
3141 *str = q;
3142 return o->value;
3143 }
3144
3145 /* Parse an option for a barrier instruction. Returns the encoding for the
3146 option, or PARSE_FAIL. */
3147
3148 static int
3149 parse_barrier (char **str)
3150 {
3151 char *p, *q;
3152 const asm_barrier_opt *o;
3153
3154 p = q = *str;
3155 while (ISALPHA (*q))
3156 q++;
3157
3158 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3159 if (!o)
3160 return PARSE_FAIL;
3161
3162 *str = q;
3163 return o->value;
3164 }
3165
3166 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3167 Returns the encoding for the option, or PARSE_FAIL.
3168
3169 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3170 implementation defined system register name S3_<op1>_<Cn>_<Cm>_<op2>. */
3171
3172 static int
3173 parse_sys_reg (char **str, struct hash_control *sys_regs, int imple_defined_p)
3174 {
3175 char *p, *q;
3176 char buf[32];
3177 const struct aarch64_name_value_pair *o;
3178 int value;
3179
3180 p = buf;
3181 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3182 if (p < buf + 31)
3183 *p++ = TOLOWER (*q);
3184 *p = '\0';
3185 /* Assert that BUF be large enough. */
3186 gas_assert (p - buf == q - *str);
3187
3188 o = hash_find (sys_regs, buf);
3189 if (!o)
3190 {
3191 if (!imple_defined_p)
3192 return PARSE_FAIL;
3193 else
3194 {
3195 /* Parse S3_<op1>_<Cn>_<Cm>_<op2>, the implementation defined
3196 registers. */
3197 unsigned int op0, op1, cn, cm, op2;
3198 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2) != 5)
3199 return PARSE_FAIL;
3200 /* Register access is encoded as follows:
3201 op0 op1 CRn CRm op2
3202 11 xxx 1x11 xxxx xxx. */
3203 if (op0 != 3 || op1 > 7 || (cn | 0x4) != 0xf || cm > 15 || op2 > 7)
3204 return PARSE_FAIL;
3205 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3206 }
3207 }
3208 else
3209 value = o->value;
3210
3211 *str = q;
3212 return value;
3213 }
3214
3215 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
3216 for the option, or NULL. */
3217
3218 static const aarch64_sys_ins_reg *
3219 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3220 {
3221 char *p, *q;
3222 char buf[32];
3223 const aarch64_sys_ins_reg *o;
3224
3225 p = buf;
3226 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3227 if (p < buf + 31)
3228 *p++ = TOLOWER (*q);
3229 *p = '\0';
3230
3231 o = hash_find (sys_ins_regs, buf);
3232 if (!o)
3233 return NULL;
3234
3235 *str = q;
3236 return o;
3237 }
3238 \f
3239 #define po_char_or_fail(chr) do { \
3240 if (! skip_past_char (&str, chr)) \
3241 goto failure; \
3242 } while (0)
3243
3244 #define po_reg_or_fail(regtype) do { \
3245 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
3246 if (val == PARSE_FAIL) \
3247 { \
3248 set_default_error (); \
3249 goto failure; \
3250 } \
3251 } while (0)
3252
3253 #define po_int_reg_or_fail(reject_sp, reject_rz) do { \
3254 val = aarch64_reg_parse_32_64 (&str, reject_sp, reject_rz, \
3255 &isreg32, &isregzero); \
3256 if (val == PARSE_FAIL) \
3257 { \
3258 set_default_error (); \
3259 goto failure; \
3260 } \
3261 info->reg.regno = val; \
3262 if (isreg32) \
3263 info->qualifier = AARCH64_OPND_QLF_W; \
3264 else \
3265 info->qualifier = AARCH64_OPND_QLF_X; \
3266 } while (0)
3267
3268 #define po_imm_nc_or_fail() do { \
3269 if (! parse_constant_immediate (&str, &val)) \
3270 goto failure; \
3271 } while (0)
3272
3273 #define po_imm_or_fail(min, max) do { \
3274 if (! parse_constant_immediate (&str, &val)) \
3275 goto failure; \
3276 if (val < min || val > max) \
3277 { \
3278 set_fatal_syntax_error (_("immediate value out of range "\
3279 #min " to "#max)); \
3280 goto failure; \
3281 } \
3282 } while (0)
3283
3284 #define po_misc_or_fail(expr) do { \
3285 if (!expr) \
3286 goto failure; \
3287 } while (0)
3288 \f
3289 /* encode the 12-bit imm field of Add/sub immediate */
3290 static inline uint32_t
3291 encode_addsub_imm (uint32_t imm)
3292 {
3293 return imm << 10;
3294 }
3295
3296 /* encode the shift amount field of Add/sub immediate */
3297 static inline uint32_t
3298 encode_addsub_imm_shift_amount (uint32_t cnt)
3299 {
3300 return cnt << 22;
3301 }
3302
3303
3304 /* encode the imm field of Adr instruction */
3305 static inline uint32_t
3306 encode_adr_imm (uint32_t imm)
3307 {
3308 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
3309 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
3310 }
3311
3312 /* encode the immediate field of Move wide immediate */
3313 static inline uint32_t
3314 encode_movw_imm (uint32_t imm)
3315 {
3316 return imm << 5;
3317 }
3318
3319 /* encode the 26-bit offset of unconditional branch */
3320 static inline uint32_t
3321 encode_branch_ofs_26 (uint32_t ofs)
3322 {
3323 return ofs & ((1 << 26) - 1);
3324 }
3325
3326 /* encode the 19-bit offset of conditional branch and compare & branch */
3327 static inline uint32_t
3328 encode_cond_branch_ofs_19 (uint32_t ofs)
3329 {
3330 return (ofs & ((1 << 19) - 1)) << 5;
3331 }
3332
3333 /* encode the 19-bit offset of ld literal */
3334 static inline uint32_t
3335 encode_ld_lit_ofs_19 (uint32_t ofs)
3336 {
3337 return (ofs & ((1 << 19) - 1)) << 5;
3338 }
3339
3340 /* Encode the 14-bit offset of test & branch. */
3341 static inline uint32_t
3342 encode_tst_branch_ofs_14 (uint32_t ofs)
3343 {
3344 return (ofs & ((1 << 14) - 1)) << 5;
3345 }
3346
3347 /* Encode the 16-bit imm field of svc/hvc/smc. */
3348 static inline uint32_t
3349 encode_svc_imm (uint32_t imm)
3350 {
3351 return imm << 5;
3352 }
3353
3354 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
3355 static inline uint32_t
3356 reencode_addsub_switch_add_sub (uint32_t opcode)
3357 {
3358 return opcode ^ (1 << 30);
3359 }
3360
3361 static inline uint32_t
3362 reencode_movzn_to_movz (uint32_t opcode)
3363 {
3364 return opcode | (1 << 30);
3365 }
3366
3367 static inline uint32_t
3368 reencode_movzn_to_movn (uint32_t opcode)
3369 {
3370 return opcode & ~(1 << 30);
3371 }
3372
3373 /* Overall per-instruction processing. */
3374
3375 /* We need to be able to fix up arbitrary expressions in some statements.
3376 This is so that we can handle symbols that are an arbitrary distance from
3377 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
3378 which returns part of an address in a form which will be valid for
3379 a data instruction. We do this by pushing the expression into a symbol
3380 in the expr_section, and creating a fix for that. */
3381
3382 static fixS *
3383 fix_new_aarch64 (fragS * frag,
3384 int where,
3385 short int size, expressionS * exp, int pc_rel, int reloc)
3386 {
3387 fixS *new_fix;
3388
3389 switch (exp->X_op)
3390 {
3391 case O_constant:
3392 case O_symbol:
3393 case O_add:
3394 case O_subtract:
3395 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
3396 break;
3397
3398 default:
3399 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
3400 pc_rel, reloc);
3401 break;
3402 }
3403 return new_fix;
3404 }
3405 \f
3406 /* Diagnostics on operands errors. */
3407
3408 /* By default, output one-line error message only.
3409 Enable the verbose error message by -merror-verbose. */
3410 static int verbose_error_p = 0;
3411
3412 #ifdef DEBUG_AARCH64
3413 /* N.B. this is only for the purpose of debugging. */
3414 const char* operand_mismatch_kind_names[] =
3415 {
3416 "AARCH64_OPDE_NIL",
3417 "AARCH64_OPDE_RECOVERABLE",
3418 "AARCH64_OPDE_SYNTAX_ERROR",
3419 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
3420 "AARCH64_OPDE_INVALID_VARIANT",
3421 "AARCH64_OPDE_OUT_OF_RANGE",
3422 "AARCH64_OPDE_UNALIGNED",
3423 "AARCH64_OPDE_REG_LIST",
3424 "AARCH64_OPDE_OTHER_ERROR",
3425 };
3426 #endif /* DEBUG_AARCH64 */
3427
3428 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
3429
3430 When multiple errors of different kinds are found in the same assembly
3431 line, only the error of the highest severity will be picked up for
3432 issuing the diagnostics. */
3433
3434 static inline bfd_boolean
3435 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
3436 enum aarch64_operand_error_kind rhs)
3437 {
3438 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
3439 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
3440 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
3441 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
3442 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
3443 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
3444 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
3445 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
3446 return lhs > rhs;
3447 }
3448
3449 /* Helper routine to get the mnemonic name from the assembly instruction
3450 line; should only be called for the diagnosis purpose, as there is
3451 string copy operation involved, which may affect the runtime
3452 performance if used in elsewhere. */
3453
3454 static const char*
3455 get_mnemonic_name (const char *str)
3456 {
3457 static char mnemonic[32];
3458 char *ptr;
3459
3460 /* Get the first 15 bytes and assume that the full name is included. */
3461 strncpy (mnemonic, str, 31);
3462 mnemonic[31] = '\0';
3463
3464 /* Scan up to the end of the mnemonic, which must end in white space,
3465 '.', or end of string. */
3466 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
3467 ;
3468
3469 *ptr = '\0';
3470
3471 /* Append '...' to the truncated long name. */
3472 if (ptr - mnemonic == 31)
3473 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
3474
3475 return mnemonic;
3476 }
3477
3478 static void
3479 reset_aarch64_instruction (aarch64_instruction *instruction)
3480 {
3481 memset (instruction, '\0', sizeof (aarch64_instruction));
3482 instruction->reloc.type = BFD_RELOC_UNUSED;
3483 }
3484
3485 /* Data strutures storing one user error in the assembly code related to
3486 operands. */
3487
3488 struct operand_error_record
3489 {
3490 const aarch64_opcode *opcode;
3491 aarch64_operand_error detail;
3492 struct operand_error_record *next;
3493 };
3494
3495 typedef struct operand_error_record operand_error_record;
3496
3497 struct operand_errors
3498 {
3499 operand_error_record *head;
3500 operand_error_record *tail;
3501 };
3502
3503 typedef struct operand_errors operand_errors;
3504
3505 /* Top-level data structure reporting user errors for the current line of
3506 the assembly code.
3507 The way md_assemble works is that all opcodes sharing the same mnemonic
3508 name are iterated to find a match to the assembly line. In this data
3509 structure, each of the such opcodes will have one operand_error_record
3510 allocated and inserted. In other words, excessive errors related with
3511 a single opcode are disregarded. */
3512 operand_errors operand_error_report;
3513
3514 /* Free record nodes. */
3515 static operand_error_record *free_opnd_error_record_nodes = NULL;
3516
3517 /* Initialize the data structure that stores the operand mismatch
3518 information on assembling one line of the assembly code. */
3519 static void
3520 init_operand_error_report (void)
3521 {
3522 if (operand_error_report.head != NULL)
3523 {
3524 gas_assert (operand_error_report.tail != NULL);
3525 operand_error_report.tail->next = free_opnd_error_record_nodes;
3526 free_opnd_error_record_nodes = operand_error_report.head;
3527 operand_error_report.head = NULL;
3528 operand_error_report.tail = NULL;
3529 return;
3530 }
3531 gas_assert (operand_error_report.tail == NULL);
3532 }
3533
3534 /* Return TRUE if some operand error has been recorded during the
3535 parsing of the current assembly line using the opcode *OPCODE;
3536 otherwise return FALSE. */
3537 static inline bfd_boolean
3538 opcode_has_operand_error_p (const aarch64_opcode *opcode)
3539 {
3540 operand_error_record *record = operand_error_report.head;
3541 return record && record->opcode == opcode;
3542 }
3543
3544 /* Add the error record *NEW_RECORD to operand_error_report. The record's
3545 OPCODE field is initialized with OPCODE.
3546 N.B. only one record for each opcode, i.e. the maximum of one error is
3547 recorded for each instruction template. */
3548
3549 static void
3550 add_operand_error_record (const operand_error_record* new_record)
3551 {
3552 const aarch64_opcode *opcode = new_record->opcode;
3553 operand_error_record* record = operand_error_report.head;
3554
3555 /* The record may have been created for this opcode. If not, we need
3556 to prepare one. */
3557 if (! opcode_has_operand_error_p (opcode))
3558 {
3559 /* Get one empty record. */
3560 if (free_opnd_error_record_nodes == NULL)
3561 {
3562 record = xmalloc (sizeof (operand_error_record));
3563 if (record == NULL)
3564 abort ();
3565 }
3566 else
3567 {
3568 record = free_opnd_error_record_nodes;
3569 free_opnd_error_record_nodes = record->next;
3570 }
3571 record->opcode = opcode;
3572 /* Insert at the head. */
3573 record->next = operand_error_report.head;
3574 operand_error_report.head = record;
3575 if (operand_error_report.tail == NULL)
3576 operand_error_report.tail = record;
3577 }
3578 else if (record->detail.kind != AARCH64_OPDE_NIL
3579 && record->detail.index <= new_record->detail.index
3580 && operand_error_higher_severity_p (record->detail.kind,
3581 new_record->detail.kind))
3582 {
3583 /* In the case of multiple errors found on operands related with a
3584 single opcode, only record the error of the leftmost operand and
3585 only if the error is of higher severity. */
3586 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
3587 " the existing error %s on operand %d",
3588 operand_mismatch_kind_names[new_record->detail.kind],
3589 new_record->detail.index,
3590 operand_mismatch_kind_names[record->detail.kind],
3591 record->detail.index);
3592 return;
3593 }
3594
3595 record->detail = new_record->detail;
3596 }
3597
3598 static inline void
3599 record_operand_error_info (const aarch64_opcode *opcode,
3600 aarch64_operand_error *error_info)
3601 {
3602 operand_error_record record;
3603 record.opcode = opcode;
3604 record.detail = *error_info;
3605 add_operand_error_record (&record);
3606 }
3607
3608 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
3609 error message *ERROR, for operand IDX (count from 0). */
3610
3611 static void
3612 record_operand_error (const aarch64_opcode *opcode, int idx,
3613 enum aarch64_operand_error_kind kind,
3614 const char* error)
3615 {
3616 aarch64_operand_error info;
3617 memset(&info, 0, sizeof (info));
3618 info.index = idx;
3619 info.kind = kind;
3620 info.error = error;
3621 record_operand_error_info (opcode, &info);
3622 }
3623
3624 static void
3625 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
3626 enum aarch64_operand_error_kind kind,
3627 const char* error, const int *extra_data)
3628 {
3629 aarch64_operand_error info;
3630 info.index = idx;
3631 info.kind = kind;
3632 info.error = error;
3633 info.data[0] = extra_data[0];
3634 info.data[1] = extra_data[1];
3635 info.data[2] = extra_data[2];
3636 record_operand_error_info (opcode, &info);
3637 }
3638
3639 static void
3640 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
3641 const char* error, int lower_bound,
3642 int upper_bound)
3643 {
3644 int data[3] = {lower_bound, upper_bound, 0};
3645 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
3646 error, data);
3647 }
3648
3649 /* Remove the operand error record for *OPCODE. */
3650 static void ATTRIBUTE_UNUSED
3651 remove_operand_error_record (const aarch64_opcode *opcode)
3652 {
3653 if (opcode_has_operand_error_p (opcode))
3654 {
3655 operand_error_record* record = operand_error_report.head;
3656 gas_assert (record != NULL && operand_error_report.tail != NULL);
3657 operand_error_report.head = record->next;
3658 record->next = free_opnd_error_record_nodes;
3659 free_opnd_error_record_nodes = record;
3660 if (operand_error_report.head == NULL)
3661 {
3662 gas_assert (operand_error_report.tail == record);
3663 operand_error_report.tail = NULL;
3664 }
3665 }
3666 }
3667
3668 /* Given the instruction in *INSTR, return the index of the best matched
3669 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
3670
3671 Return -1 if there is no qualifier sequence; return the first match
3672 if there is multiple matches found. */
3673
3674 static int
3675 find_best_match (const aarch64_inst *instr,
3676 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
3677 {
3678 int i, num_opnds, max_num_matched, idx;
3679
3680 num_opnds = aarch64_num_of_operands (instr->opcode);
3681 if (num_opnds == 0)
3682 {
3683 DEBUG_TRACE ("no operand");
3684 return -1;
3685 }
3686
3687 max_num_matched = 0;
3688 idx = -1;
3689
3690 /* For each pattern. */
3691 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
3692 {
3693 int j, num_matched;
3694 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
3695
3696 /* Most opcodes has much fewer patterns in the list. */
3697 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
3698 {
3699 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
3700 if (i != 0 && idx == -1)
3701 /* If nothing has been matched, return the 1st sequence. */
3702 idx = 0;
3703 break;
3704 }
3705
3706 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
3707 if (*qualifiers == instr->operands[j].qualifier)
3708 ++num_matched;
3709
3710 if (num_matched > max_num_matched)
3711 {
3712 max_num_matched = num_matched;
3713 idx = i;
3714 }
3715 }
3716
3717 DEBUG_TRACE ("return with %d", idx);
3718 return idx;
3719 }
3720
3721 /* Assign qualifiers in the qualifier seqence (headed by QUALIFIERS) to the
3722 corresponding operands in *INSTR. */
3723
3724 static inline void
3725 assign_qualifier_sequence (aarch64_inst *instr,
3726 const aarch64_opnd_qualifier_t *qualifiers)
3727 {
3728 int i = 0;
3729 int num_opnds = aarch64_num_of_operands (instr->opcode);
3730 gas_assert (num_opnds);
3731 for (i = 0; i < num_opnds; ++i, ++qualifiers)
3732 instr->operands[i].qualifier = *qualifiers;
3733 }
3734
3735 /* Print operands for the diagnosis purpose. */
3736
3737 static void
3738 print_operands (char *buf, const aarch64_opcode *opcode,
3739 const aarch64_opnd_info *opnds)
3740 {
3741 int i;
3742
3743 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3744 {
3745 const size_t size = 128;
3746 char str[size];
3747
3748 /* We regard the opcode operand info more, however we also look into
3749 the inst->operands to support the disassembling of the optional
3750 operand.
3751 The two operand code should be the same in all cases, apart from
3752 when the operand can be optional. */
3753 if (opcode->operands[i] == AARCH64_OPND_NIL
3754 || opnds[i].type == AARCH64_OPND_NIL)
3755 break;
3756
3757 /* Generate the operand string in STR. */
3758 aarch64_print_operand (str, size, 0, opcode, opnds, i, NULL, NULL);
3759
3760 /* Delimiter. */
3761 if (str[0] != '\0')
3762 strcat (buf, i == 0 ? " " : ",");
3763
3764 /* Append the operand string. */
3765 strcat (buf, str);
3766 }
3767 }
3768
3769 /* Send to stderr a string as information. */
3770
3771 static void
3772 output_info (const char *format, ...)
3773 {
3774 char *file;
3775 unsigned int line;
3776 va_list args;
3777
3778 as_where (&file, &line);
3779 if (file)
3780 {
3781 if (line != 0)
3782 fprintf (stderr, "%s:%u: ", file, line);
3783 else
3784 fprintf (stderr, "%s: ", file);
3785 }
3786 fprintf (stderr, _("Info: "));
3787 va_start (args, format);
3788 vfprintf (stderr, format, args);
3789 va_end (args);
3790 (void) putc ('\n', stderr);
3791 }
3792
3793 /* Output one operand error record. */
3794
3795 static void
3796 output_operand_error_record (const operand_error_record *record, char *str)
3797 {
3798 int idx = record->detail.index;
3799 const aarch64_opcode *opcode = record->opcode;
3800 enum aarch64_opnd opd_code = (idx != -1 ? opcode->operands[idx]
3801 : AARCH64_OPND_NIL);
3802 const aarch64_operand_error *detail = &record->detail;
3803
3804 switch (detail->kind)
3805 {
3806 case AARCH64_OPDE_NIL:
3807 gas_assert (0);
3808 break;
3809
3810 case AARCH64_OPDE_SYNTAX_ERROR:
3811 case AARCH64_OPDE_RECOVERABLE:
3812 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
3813 case AARCH64_OPDE_OTHER_ERROR:
3814 gas_assert (idx >= 0);
3815 /* Use the prepared error message if there is, otherwise use the
3816 operand description string to describe the error. */
3817 if (detail->error != NULL)
3818 {
3819 if (detail->index == -1)
3820 as_bad (_("%s -- `%s'"), detail->error, str);
3821 else
3822 as_bad (_("%s at operand %d -- `%s'"),
3823 detail->error, detail->index + 1, str);
3824 }
3825 else
3826 as_bad (_("operand %d should be %s -- `%s'"), idx + 1,
3827 aarch64_get_operand_desc (opd_code), str);
3828 break;
3829
3830 case AARCH64_OPDE_INVALID_VARIANT:
3831 as_bad (_("operand mismatch -- `%s'"), str);
3832 if (verbose_error_p)
3833 {
3834 /* We will try to correct the erroneous instruction and also provide
3835 more information e.g. all other valid variants.
3836
3837 The string representation of the corrected instruction and other
3838 valid variants are generated by
3839
3840 1) obtaining the intermediate representation of the erroneous
3841 instruction;
3842 2) manipulating the IR, e.g. replacing the operand qualifier;
3843 3) printing out the instruction by calling the printer functions
3844 shared with the disassembler.
3845
3846 The limitation of this method is that the exact input assembly
3847 line cannot be accurately reproduced in some cases, for example an
3848 optional operand present in the actual assembly line will be
3849 omitted in the output; likewise for the optional syntax rules,
3850 e.g. the # before the immediate. Another limitation is that the
3851 assembly symbols and relocation operations in the assembly line
3852 currently cannot be printed out in the error report. Last but not
3853 least, when there is other error(s) co-exist with this error, the
3854 'corrected' instruction may be still incorrect, e.g. given
3855 'ldnp h0,h1,[x0,#6]!'
3856 this diagnosis will provide the version:
3857 'ldnp s0,s1,[x0,#6]!'
3858 which is still not right. */
3859 size_t len = strlen (get_mnemonic_name (str));
3860 int i, qlf_idx;
3861 bfd_boolean result;
3862 const size_t size = 2048;
3863 char buf[size];
3864 aarch64_inst *inst_base = &inst.base;
3865 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
3866
3867 /* Init inst. */
3868 reset_aarch64_instruction (&inst);
3869 inst_base->opcode = opcode;
3870
3871 /* Reset the error report so that there is no side effect on the
3872 following operand parsing. */
3873 init_operand_error_report ();
3874
3875 /* Fill inst. */
3876 result = parse_operands (str + len, opcode)
3877 && programmer_friendly_fixup (&inst);
3878 gas_assert (result);
3879 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
3880 NULL, NULL);
3881 gas_assert (!result);
3882
3883 /* Find the most matched qualifier sequence. */
3884 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
3885 gas_assert (qlf_idx > -1);
3886
3887 /* Assign the qualifiers. */
3888 assign_qualifier_sequence (inst_base,
3889 opcode->qualifiers_list[qlf_idx]);
3890
3891 /* Print the hint. */
3892 output_info (_(" did you mean this?"));
3893 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
3894 print_operands (buf, opcode, inst_base->operands);
3895 output_info (_(" %s"), buf);
3896
3897 /* Print out other variant(s) if there is any. */
3898 if (qlf_idx != 0 ||
3899 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
3900 output_info (_(" other valid variant(s):"));
3901
3902 /* For each pattern. */
3903 qualifiers_list = opcode->qualifiers_list;
3904 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
3905 {
3906 /* Most opcodes has much fewer patterns in the list.
3907 First NIL qualifier indicates the end in the list. */
3908 if (empty_qualifier_sequence_p (*qualifiers_list) == TRUE)
3909 break;
3910
3911 if (i != qlf_idx)
3912 {
3913 /* Mnemonics name. */
3914 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
3915
3916 /* Assign the qualifiers. */
3917 assign_qualifier_sequence (inst_base, *qualifiers_list);
3918
3919 /* Print instruction. */
3920 print_operands (buf, opcode, inst_base->operands);
3921
3922 output_info (_(" %s"), buf);
3923 }
3924 }
3925 }
3926 break;
3927
3928 case AARCH64_OPDE_OUT_OF_RANGE:
3929 as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
3930 detail->error ? detail->error : _("immediate value"),
3931 detail->data[0], detail->data[1], detail->index + 1, str);
3932 break;
3933
3934 case AARCH64_OPDE_REG_LIST:
3935 if (detail->data[0] == 1)
3936 as_bad (_("invalid number of registers in the list; "
3937 "only 1 register is expected at operand %d -- `%s'"),
3938 detail->index + 1, str);
3939 else
3940 as_bad (_("invalid number of registers in the list; "
3941 "%d registers are expected at operand %d -- `%s'"),
3942 detail->data[0], detail->index + 1, str);
3943 break;
3944
3945 case AARCH64_OPDE_UNALIGNED:
3946 as_bad (_("immediate value should be a multiple of "
3947 "%d at operand %d -- `%s'"),
3948 detail->data[0], detail->index + 1, str);
3949 break;
3950
3951 default:
3952 gas_assert (0);
3953 break;
3954 }
3955 }
3956
3957 /* Process and output the error message about the operand mismatching.
3958
3959 When this function is called, the operand error information had
3960 been collected for an assembly line and there will be multiple
3961 errors in the case of mulitple instruction templates; output the
3962 error message that most closely describes the problem. */
3963
3964 static void
3965 output_operand_error_report (char *str)
3966 {
3967 int largest_error_pos;
3968 const char *msg = NULL;
3969 enum aarch64_operand_error_kind kind;
3970 operand_error_record *curr;
3971 operand_error_record *head = operand_error_report.head;
3972 operand_error_record *record = NULL;
3973
3974 /* No error to report. */
3975 if (head == NULL)
3976 return;
3977
3978 gas_assert (head != NULL && operand_error_report.tail != NULL);
3979
3980 /* Only one error. */
3981 if (head == operand_error_report.tail)
3982 {
3983 DEBUG_TRACE ("single opcode entry with error kind: %s",
3984 operand_mismatch_kind_names[head->detail.kind]);
3985 output_operand_error_record (head, str);
3986 return;
3987 }
3988
3989 /* Find the error kind of the highest severity. */
3990 DEBUG_TRACE ("multiple opcode entres with error kind");
3991 kind = AARCH64_OPDE_NIL;
3992 for (curr = head; curr != NULL; curr = curr->next)
3993 {
3994 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
3995 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
3996 if (operand_error_higher_severity_p (curr->detail.kind, kind))
3997 kind = curr->detail.kind;
3998 }
3999 gas_assert (kind != AARCH64_OPDE_NIL);
4000
4001 /* Pick up one of errors of KIND to report. */
4002 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4003 for (curr = head; curr != NULL; curr = curr->next)
4004 {
4005 if (curr->detail.kind != kind)
4006 continue;
4007 /* If there are multiple errors, pick up the one with the highest
4008 mismatching operand index. In the case of multiple errors with
4009 the equally highest operand index, pick up the first one or the
4010 first one with non-NULL error message. */
4011 if (curr->detail.index > largest_error_pos
4012 || (curr->detail.index == largest_error_pos && msg == NULL
4013 && curr->detail.error != NULL))
4014 {
4015 largest_error_pos = curr->detail.index;
4016 record = curr;
4017 msg = record->detail.error;
4018 }
4019 }
4020
4021 gas_assert (largest_error_pos != -2 && record != NULL);
4022 DEBUG_TRACE ("Pick up error kind %s to report",
4023 operand_mismatch_kind_names[record->detail.kind]);
4024
4025 /* Output. */
4026 output_operand_error_record (record, str);
4027 }
4028 \f
4029 /* Write an AARCH64 instruction to buf - always little-endian. */
4030 static void
4031 put_aarch64_insn (char *buf, uint32_t insn)
4032 {
4033 unsigned char *where = (unsigned char *) buf;
4034 where[0] = insn;
4035 where[1] = insn >> 8;
4036 where[2] = insn >> 16;
4037 where[3] = insn >> 24;
4038 }
4039
4040 static uint32_t
4041 get_aarch64_insn (char *buf)
4042 {
4043 unsigned char *where = (unsigned char *) buf;
4044 uint32_t result;
4045 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4046 return result;
4047 }
4048
4049 static void
4050 output_inst (struct aarch64_inst *new_inst)
4051 {
4052 char *to = NULL;
4053
4054 to = frag_more (INSN_SIZE);
4055
4056 frag_now->tc_frag_data.recorded = 1;
4057
4058 put_aarch64_insn (to, inst.base.value);
4059
4060 if (inst.reloc.type != BFD_RELOC_UNUSED)
4061 {
4062 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4063 INSN_SIZE, &inst.reloc.exp,
4064 inst.reloc.pc_rel,
4065 inst.reloc.type);
4066 DEBUG_TRACE ("Prepared relocation fix up");
4067 /* Don't check the addend value against the instruction size,
4068 that's the job of our code in md_apply_fix(). */
4069 fixp->fx_no_overflow = 1;
4070 if (new_inst != NULL)
4071 fixp->tc_fix_data.inst = new_inst;
4072 if (aarch64_gas_internal_fixup_p ())
4073 {
4074 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4075 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4076 fixp->fx_addnumber = inst.reloc.flags;
4077 }
4078 }
4079
4080 dwarf2_emit_insn (INSN_SIZE);
4081 }
4082
4083 /* Link together opcodes of the same name. */
4084
4085 struct templates
4086 {
4087 aarch64_opcode *opcode;
4088 struct templates *next;
4089 };
4090
4091 typedef struct templates templates;
4092
4093 static templates *
4094 lookup_mnemonic (const char *start, int len)
4095 {
4096 templates *templ = NULL;
4097
4098 templ = hash_find_n (aarch64_ops_hsh, start, len);
4099 return templ;
4100 }
4101
4102 /* Subroutine of md_assemble, responsible for looking up the primary
4103 opcode from the mnemonic the user wrote. STR points to the
4104 beginning of the mnemonic. */
4105
4106 static templates *
4107 opcode_lookup (char **str)
4108 {
4109 char *end, *base;
4110 const aarch64_cond *cond;
4111 char condname[16];
4112 int len;
4113
4114 /* Scan up to the end of the mnemonic, which must end in white space,
4115 '.', or end of string. */
4116 for (base = end = *str; is_part_of_name(*end); end++)
4117 if (*end == '.')
4118 break;
4119
4120 if (end == base)
4121 return 0;
4122
4123 inst.cond = COND_ALWAYS;
4124
4125 /* Handle a possible condition. */
4126 if (end[0] == '.')
4127 {
4128 cond = hash_find_n (aarch64_cond_hsh, end + 1, 2);
4129 if (cond)
4130 {
4131 inst.cond = cond->value;
4132 *str = end + 3;
4133 }
4134 else
4135 {
4136 *str = end;
4137 return 0;
4138 }
4139 }
4140 else
4141 *str = end;
4142
4143 len = end - base;
4144
4145 if (inst.cond == COND_ALWAYS)
4146 {
4147 /* Look for unaffixed mnemonic. */
4148 return lookup_mnemonic (base, len);
4149 }
4150 else if (len <= 13)
4151 {
4152 /* append ".c" to mnemonic if conditional */
4153 memcpy (condname, base, len);
4154 memcpy (condname + len, ".c", 2);
4155 base = condname;
4156 len += 2;
4157 return lookup_mnemonic (base, len);
4158 }
4159
4160 return NULL;
4161 }
4162
4163 /* Internal helper routine converting a vector neon_type_el structure
4164 *VECTYPE to a corresponding operand qualifier. */
4165
4166 static inline aarch64_opnd_qualifier_t
4167 vectype_to_qualifier (const struct neon_type_el *vectype)
4168 {
4169 /* Element size in bytes indexed by neon_el_type. */
4170 const unsigned char ele_size[5]
4171 = {1, 2, 4, 8, 16};
4172
4173 if (!vectype->defined || vectype->type == NT_invtype)
4174 goto vectype_conversion_fail;
4175
4176 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
4177
4178 if (vectype->defined & NTA_HASINDEX)
4179 /* Vector element register. */
4180 return AARCH64_OPND_QLF_S_B + vectype->type;
4181 else
4182 {
4183 /* Vector register. */
4184 int reg_size = ele_size[vectype->type] * vectype->width;
4185 unsigned offset;
4186 if (reg_size != 16 && reg_size != 8)
4187 goto vectype_conversion_fail;
4188 /* The conversion is calculated based on the relation of the order of
4189 qualifiers to the vector element size and vector register size. */
4190 offset = (vectype->type == NT_q)
4191 ? 8 : (vectype->type << 1) + (reg_size >> 4);
4192 gas_assert (offset <= 8);
4193 return AARCH64_OPND_QLF_V_8B + offset;
4194 }
4195
4196 vectype_conversion_fail:
4197 first_error (_("bad vector arrangement type"));
4198 return AARCH64_OPND_QLF_NIL;
4199 }
4200
4201 /* Process an optional operand that is found omitted from the assembly line.
4202 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
4203 instruction's opcode entry while IDX is the index of this omitted operand.
4204 */
4205
4206 static void
4207 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
4208 int idx, aarch64_opnd_info *operand)
4209 {
4210 aarch64_insn default_value = get_optional_operand_default_value (opcode);
4211 gas_assert (optional_operand_p (opcode, idx));
4212 gas_assert (!operand->present);
4213
4214 switch (type)
4215 {
4216 case AARCH64_OPND_Rd:
4217 case AARCH64_OPND_Rn:
4218 case AARCH64_OPND_Rm:
4219 case AARCH64_OPND_Rt:
4220 case AARCH64_OPND_Rt2:
4221 case AARCH64_OPND_Rs:
4222 case AARCH64_OPND_Ra:
4223 case AARCH64_OPND_Rt_SYS:
4224 case AARCH64_OPND_Rd_SP:
4225 case AARCH64_OPND_Rn_SP:
4226 case AARCH64_OPND_Fd:
4227 case AARCH64_OPND_Fn:
4228 case AARCH64_OPND_Fm:
4229 case AARCH64_OPND_Fa:
4230 case AARCH64_OPND_Ft:
4231 case AARCH64_OPND_Ft2:
4232 case AARCH64_OPND_Sd:
4233 case AARCH64_OPND_Sn:
4234 case AARCH64_OPND_Sm:
4235 case AARCH64_OPND_Vd:
4236 case AARCH64_OPND_Vn:
4237 case AARCH64_OPND_Vm:
4238 case AARCH64_OPND_VdD1:
4239 case AARCH64_OPND_VnD1:
4240 operand->reg.regno = default_value;
4241 break;
4242
4243 case AARCH64_OPND_Ed:
4244 case AARCH64_OPND_En:
4245 case AARCH64_OPND_Em:
4246 operand->reglane.regno = default_value;
4247 break;
4248
4249 case AARCH64_OPND_IDX:
4250 case AARCH64_OPND_BIT_NUM:
4251 case AARCH64_OPND_IMMR:
4252 case AARCH64_OPND_IMMS:
4253 case AARCH64_OPND_SHLL_IMM:
4254 case AARCH64_OPND_IMM_VLSL:
4255 case AARCH64_OPND_IMM_VLSR:
4256 case AARCH64_OPND_CCMP_IMM:
4257 case AARCH64_OPND_FBITS:
4258 case AARCH64_OPND_UIMM4:
4259 case AARCH64_OPND_UIMM3_OP1:
4260 case AARCH64_OPND_UIMM3_OP2:
4261 case AARCH64_OPND_IMM:
4262 case AARCH64_OPND_WIDTH:
4263 case AARCH64_OPND_UIMM7:
4264 case AARCH64_OPND_NZCV:
4265 operand->imm.value = default_value;
4266 break;
4267
4268 case AARCH64_OPND_EXCEPTION:
4269 inst.reloc.type = BFD_RELOC_UNUSED;
4270 break;
4271
4272 case AARCH64_OPND_BARRIER_ISB:
4273 operand->barrier = aarch64_barrier_options + default_value;
4274
4275 default:
4276 break;
4277 }
4278 }
4279
4280 /* Process the relocation type for move wide instructions.
4281 Return TRUE on success; otherwise return FALSE. */
4282
4283 static bfd_boolean
4284 process_movw_reloc_info (void)
4285 {
4286 int is32;
4287 unsigned shift;
4288
4289 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
4290
4291 if (inst.base.opcode->op == OP_MOVK)
4292 switch (inst.reloc.type)
4293 {
4294 case BFD_RELOC_AARCH64_MOVW_G0_S:
4295 case BFD_RELOC_AARCH64_MOVW_G1_S:
4296 case BFD_RELOC_AARCH64_MOVW_G2_S:
4297 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4298 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4299 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4300 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4301 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4302 set_syntax_error
4303 (_("the specified relocation type is not allowed for MOVK"));
4304 return FALSE;
4305 default:
4306 break;
4307 }
4308
4309 switch (inst.reloc.type)
4310 {
4311 case BFD_RELOC_AARCH64_MOVW_G0:
4312 case BFD_RELOC_AARCH64_MOVW_G0_S:
4313 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4314 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4315 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4316 shift = 0;
4317 break;
4318 case BFD_RELOC_AARCH64_MOVW_G1:
4319 case BFD_RELOC_AARCH64_MOVW_G1_S:
4320 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4321 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4322 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4323 shift = 16;
4324 break;
4325 case BFD_RELOC_AARCH64_MOVW_G2:
4326 case BFD_RELOC_AARCH64_MOVW_G2_S:
4327 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4328 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4329 if (is32)
4330 {
4331 set_fatal_syntax_error
4332 (_("the specified relocation type is not allowed for 32-bit "
4333 "register"));
4334 return FALSE;
4335 }
4336 shift = 32;
4337 break;
4338 case BFD_RELOC_AARCH64_MOVW_G3:
4339 if (is32)
4340 {
4341 set_fatal_syntax_error
4342 (_("the specified relocation type is not allowed for 32-bit "
4343 "register"));
4344 return FALSE;
4345 }
4346 shift = 48;
4347 break;
4348 default:
4349 /* More cases should be added when more MOVW-related relocation types
4350 are supported in GAS. */
4351 gas_assert (aarch64_gas_internal_fixup_p ());
4352 /* The shift amount should have already been set by the parser. */
4353 return TRUE;
4354 }
4355 inst.base.operands[1].shifter.amount = shift;
4356 return TRUE;
4357 }
4358
4359 /* A primitive log caculator. */
4360
4361 static inline unsigned int
4362 get_logsz (unsigned int size)
4363 {
4364 const unsigned char ls[16] =
4365 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
4366 if (size > 16)
4367 {
4368 gas_assert (0);
4369 return -1;
4370 }
4371 gas_assert (ls[size - 1] != (unsigned char)-1);
4372 return ls[size - 1];
4373 }
4374
4375 /* Determine and return the real reloc type code for an instruction
4376 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
4377
4378 static inline bfd_reloc_code_real_type
4379 ldst_lo12_determine_real_reloc_type (void)
4380 {
4381 int logsz;
4382 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
4383 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
4384
4385 const bfd_reloc_code_real_type reloc_ldst_lo12[5] = {
4386 BFD_RELOC_AARCH64_LDST8_LO12, BFD_RELOC_AARCH64_LDST16_LO12,
4387 BFD_RELOC_AARCH64_LDST32_LO12, BFD_RELOC_AARCH64_LDST64_LO12,
4388 BFD_RELOC_AARCH64_LDST128_LO12
4389 };
4390
4391 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12);
4392 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
4393
4394 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
4395 opd1_qlf =
4396 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
4397 1, opd0_qlf, 0);
4398 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
4399
4400 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
4401 gas_assert (logsz >= 0 && logsz <= 4);
4402
4403 return reloc_ldst_lo12[logsz];
4404 }
4405
4406 /* Check whether a register list REGINFO is valid. The registers must be
4407 numbered in increasing order (modulo 32), in increments of one or two.
4408
4409 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
4410 increments of two.
4411
4412 Return FALSE if such a register list is invalid, otherwise return TRUE. */
4413
4414 static bfd_boolean
4415 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
4416 {
4417 uint32_t i, nb_regs, prev_regno, incr;
4418
4419 nb_regs = 1 + (reginfo & 0x3);
4420 reginfo >>= 2;
4421 prev_regno = reginfo & 0x1f;
4422 incr = accept_alternate ? 2 : 1;
4423
4424 for (i = 1; i < nb_regs; ++i)
4425 {
4426 uint32_t curr_regno;
4427 reginfo >>= 5;
4428 curr_regno = reginfo & 0x1f;
4429 if (curr_regno != ((prev_regno + incr) & 0x1f))
4430 return FALSE;
4431 prev_regno = curr_regno;
4432 }
4433
4434 return TRUE;
4435 }
4436
4437 /* Generic instruction operand parser. This does no encoding and no
4438 semantic validation; it merely squirrels values away in the inst
4439 structure. Returns TRUE or FALSE depending on whether the
4440 specified grammar matched. */
4441
4442 static bfd_boolean
4443 parse_operands (char *str, const aarch64_opcode *opcode)
4444 {
4445 int i;
4446 char *backtrack_pos = 0;
4447 const enum aarch64_opnd *operands = opcode->operands;
4448
4449 clear_error ();
4450 skip_whitespace (str);
4451
4452 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
4453 {
4454 int64_t val;
4455 int isreg32, isregzero;
4456 int comma_skipped_p = 0;
4457 aarch64_reg_type rtype;
4458 struct neon_type_el vectype;
4459 aarch64_opnd_info *info = &inst.base.operands[i];
4460
4461 DEBUG_TRACE ("parse operand %d", i);
4462
4463 /* Assign the operand code. */
4464 info->type = operands[i];
4465
4466 if (optional_operand_p (opcode, i))
4467 {
4468 /* Remember where we are in case we need to backtrack. */
4469 gas_assert (!backtrack_pos);
4470 backtrack_pos = str;
4471 }
4472
4473 /* Expect comma between operands; the backtrack mechanizm will take
4474 care of cases of omitted optional operand. */
4475 if (i > 0 && ! skip_past_char (&str, ','))
4476 {
4477 set_syntax_error (_("comma expected between operands"));
4478 goto failure;
4479 }
4480 else
4481 comma_skipped_p = 1;
4482
4483 switch (operands[i])
4484 {
4485 case AARCH64_OPND_Rd:
4486 case AARCH64_OPND_Rn:
4487 case AARCH64_OPND_Rm:
4488 case AARCH64_OPND_Rt:
4489 case AARCH64_OPND_Rt2:
4490 case AARCH64_OPND_Rs:
4491 case AARCH64_OPND_Ra:
4492 case AARCH64_OPND_Rt_SYS:
4493 po_int_reg_or_fail (1, 0);
4494 break;
4495
4496 case AARCH64_OPND_Rd_SP:
4497 case AARCH64_OPND_Rn_SP:
4498 po_int_reg_or_fail (0, 1);
4499 break;
4500
4501 case AARCH64_OPND_Rm_EXT:
4502 case AARCH64_OPND_Rm_SFT:
4503 po_misc_or_fail (parse_shifter_operand
4504 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
4505 ? SHIFTED_ARITH_IMM
4506 : SHIFTED_LOGIC_IMM)));
4507 if (!info->shifter.operator_present)
4508 {
4509 /* Default to LSL if not present. Libopcodes prefers shifter
4510 kind to be explicit. */
4511 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4512 info->shifter.kind = AARCH64_MOD_LSL;
4513 /* For Rm_EXT, libopcodes will carry out further check on whether
4514 or not stack pointer is used in the instruction (Recall that
4515 "the extend operator is not optional unless at least one of
4516 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
4517 }
4518 break;
4519
4520 case AARCH64_OPND_Fd:
4521 case AARCH64_OPND_Fn:
4522 case AARCH64_OPND_Fm:
4523 case AARCH64_OPND_Fa:
4524 case AARCH64_OPND_Ft:
4525 case AARCH64_OPND_Ft2:
4526 case AARCH64_OPND_Sd:
4527 case AARCH64_OPND_Sn:
4528 case AARCH64_OPND_Sm:
4529 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
4530 if (val == PARSE_FAIL)
4531 {
4532 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
4533 goto failure;
4534 }
4535 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
4536
4537 info->reg.regno = val;
4538 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
4539 break;
4540
4541 case AARCH64_OPND_Vd:
4542 case AARCH64_OPND_Vn:
4543 case AARCH64_OPND_Vm:
4544 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4545 if (val == PARSE_FAIL)
4546 {
4547 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4548 goto failure;
4549 }
4550 if (vectype.defined & NTA_HASINDEX)
4551 goto failure;
4552
4553 info->reg.regno = val;
4554 info->qualifier = vectype_to_qualifier (&vectype);
4555 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4556 goto failure;
4557 break;
4558
4559 case AARCH64_OPND_VdD1:
4560 case AARCH64_OPND_VnD1:
4561 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4562 if (val == PARSE_FAIL)
4563 {
4564 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4565 goto failure;
4566 }
4567 if (vectype.type != NT_d || vectype.index != 1)
4568 {
4569 set_fatal_syntax_error
4570 (_("the top half of a 128-bit FP/SIMD register is expected"));
4571 goto failure;
4572 }
4573 info->reg.regno = val;
4574 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
4575 here; it is correct for the purpose of encoding/decoding since
4576 only the register number is explicitly encoded in the related
4577 instructions, although this appears a bit hacky. */
4578 info->qualifier = AARCH64_OPND_QLF_S_D;
4579 break;
4580
4581 case AARCH64_OPND_Ed:
4582 case AARCH64_OPND_En:
4583 case AARCH64_OPND_Em:
4584 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4585 if (val == PARSE_FAIL)
4586 {
4587 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4588 goto failure;
4589 }
4590 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
4591 goto failure;
4592
4593 info->reglane.regno = val;
4594 info->reglane.index = vectype.index;
4595 info->qualifier = vectype_to_qualifier (&vectype);
4596 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4597 goto failure;
4598 break;
4599
4600 case AARCH64_OPND_LVn:
4601 case AARCH64_OPND_LVt:
4602 case AARCH64_OPND_LVt_AL:
4603 case AARCH64_OPND_LEt:
4604 if ((val = parse_neon_reg_list (&str, &vectype)) == PARSE_FAIL)
4605 goto failure;
4606 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
4607 {
4608 set_fatal_syntax_error (_("invalid register list"));
4609 goto failure;
4610 }
4611 info->reglist.first_regno = (val >> 2) & 0x1f;
4612 info->reglist.num_regs = (val & 0x3) + 1;
4613 if (operands[i] == AARCH64_OPND_LEt)
4614 {
4615 if (!(vectype.defined & NTA_HASINDEX))
4616 goto failure;
4617 info->reglist.has_index = 1;
4618 info->reglist.index = vectype.index;
4619 }
4620 else if (!(vectype.defined & NTA_HASTYPE))
4621 goto failure;
4622 info->qualifier = vectype_to_qualifier (&vectype);
4623 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4624 goto failure;
4625 break;
4626
4627 case AARCH64_OPND_Cn:
4628 case AARCH64_OPND_Cm:
4629 po_reg_or_fail (REG_TYPE_CN);
4630 if (val > 15)
4631 {
4632 set_fatal_syntax_error (_(get_reg_expected_msg (REG_TYPE_CN)));
4633 goto failure;
4634 }
4635 inst.base.operands[i].reg.regno = val;
4636 break;
4637
4638 case AARCH64_OPND_SHLL_IMM:
4639 case AARCH64_OPND_IMM_VLSR:
4640 po_imm_or_fail (1, 64);
4641 info->imm.value = val;
4642 break;
4643
4644 case AARCH64_OPND_CCMP_IMM:
4645 case AARCH64_OPND_FBITS:
4646 case AARCH64_OPND_UIMM4:
4647 case AARCH64_OPND_UIMM3_OP1:
4648 case AARCH64_OPND_UIMM3_OP2:
4649 case AARCH64_OPND_IMM_VLSL:
4650 case AARCH64_OPND_IMM:
4651 case AARCH64_OPND_WIDTH:
4652 po_imm_nc_or_fail ();
4653 info->imm.value = val;
4654 break;
4655
4656 case AARCH64_OPND_UIMM7:
4657 po_imm_or_fail (0, 127);
4658 info->imm.value = val;
4659 break;
4660
4661 case AARCH64_OPND_IDX:
4662 case AARCH64_OPND_BIT_NUM:
4663 case AARCH64_OPND_IMMR:
4664 case AARCH64_OPND_IMMS:
4665 po_imm_or_fail (0, 63);
4666 info->imm.value = val;
4667 break;
4668
4669 case AARCH64_OPND_IMM0:
4670 po_imm_nc_or_fail ();
4671 if (val != 0)
4672 {
4673 set_fatal_syntax_error (_("immediate zero expected"));
4674 goto failure;
4675 }
4676 info->imm.value = 0;
4677 break;
4678
4679 case AARCH64_OPND_FPIMM0:
4680 {
4681 int qfloat;
4682 bfd_boolean res1 = FALSE, res2 = FALSE;
4683 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
4684 it is probably not worth the effort to support it. */
4685 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat))
4686 && !(res2 = parse_constant_immediate (&str, &val)))
4687 goto failure;
4688 if ((res1 && qfloat == 0) || (res2 && val == 0))
4689 {
4690 info->imm.value = 0;
4691 info->imm.is_fp = 1;
4692 break;
4693 }
4694 set_fatal_syntax_error (_("immediate zero expected"));
4695 goto failure;
4696 }
4697
4698 case AARCH64_OPND_IMM_MOV:
4699 {
4700 char *saved = str;
4701 if (reg_name_p (str, REG_TYPE_R_Z_SP))
4702 goto failure;
4703 str = saved;
4704 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
4705 GE_OPT_PREFIX, 1));
4706 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
4707 later. fix_mov_imm_insn will try to determine a machine
4708 instruction (MOVZ, MOVN or ORR) for it and will issue an error
4709 message if the immediate cannot be moved by a single
4710 instruction. */
4711 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
4712 inst.base.operands[i].skip = 1;
4713 }
4714 break;
4715
4716 case AARCH64_OPND_SIMD_IMM:
4717 case AARCH64_OPND_SIMD_IMM_SFT:
4718 if (! parse_big_immediate (&str, &val))
4719 goto failure;
4720 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4721 /* addr_off_p */ 0,
4722 /* need_libopcodes_p */ 1,
4723 /* skip_p */ 1);
4724 /* Parse shift.
4725 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
4726 shift, we don't check it here; we leave the checking to
4727 the libopcodes (operand_general_constraint_met_p). By
4728 doing this, we achieve better diagnostics. */
4729 if (skip_past_comma (&str)
4730 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
4731 goto failure;
4732 if (!info->shifter.operator_present
4733 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
4734 {
4735 /* Default to LSL if not present. Libopcodes prefers shifter
4736 kind to be explicit. */
4737 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4738 info->shifter.kind = AARCH64_MOD_LSL;
4739 }
4740 break;
4741
4742 case AARCH64_OPND_FPIMM:
4743 case AARCH64_OPND_SIMD_FPIMM:
4744 {
4745 int qfloat;
4746 if (! parse_aarch64_imm_float (&str, &qfloat))
4747 goto failure;
4748 if (qfloat == 0)
4749 {
4750 set_fatal_syntax_error (_("invalid floating-point constant"));
4751 goto failure;
4752 }
4753 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
4754 inst.base.operands[i].imm.is_fp = 1;
4755 }
4756 break;
4757
4758 case AARCH64_OPND_LIMM:
4759 po_misc_or_fail (parse_shifter_operand (&str, info,
4760 SHIFTED_LOGIC_IMM));
4761 if (info->shifter.operator_present)
4762 {
4763 set_fatal_syntax_error
4764 (_("shift not allowed for bitmask immediate"));
4765 goto failure;
4766 }
4767 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4768 /* addr_off_p */ 0,
4769 /* need_libopcodes_p */ 1,
4770 /* skip_p */ 1);
4771 break;
4772
4773 case AARCH64_OPND_AIMM:
4774 if (opcode->op == OP_ADD)
4775 /* ADD may have relocation types. */
4776 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
4777 SHIFTED_ARITH_IMM));
4778 else
4779 po_misc_or_fail (parse_shifter_operand (&str, info,
4780 SHIFTED_ARITH_IMM));
4781 switch (inst.reloc.type)
4782 {
4783 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
4784 info->shifter.amount = 12;
4785 break;
4786 case BFD_RELOC_UNUSED:
4787 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
4788 if (info->shifter.kind != AARCH64_MOD_NONE)
4789 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
4790 inst.reloc.pc_rel = 0;
4791 break;
4792 default:
4793 break;
4794 }
4795 info->imm.value = 0;
4796 if (!info->shifter.operator_present)
4797 {
4798 /* Default to LSL if not present. Libopcodes prefers shifter
4799 kind to be explicit. */
4800 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4801 info->shifter.kind = AARCH64_MOD_LSL;
4802 }
4803 break;
4804
4805 case AARCH64_OPND_HALF:
4806 {
4807 /* #<imm16> or relocation. */
4808 int internal_fixup_p;
4809 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
4810 if (internal_fixup_p)
4811 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
4812 skip_whitespace (str);
4813 if (skip_past_comma (&str))
4814 {
4815 /* {, LSL #<shift>} */
4816 if (! aarch64_gas_internal_fixup_p ())
4817 {
4818 set_fatal_syntax_error (_("can't mix relocation modifier "
4819 "with explicit shift"));
4820 goto failure;
4821 }
4822 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
4823 }
4824 else
4825 inst.base.operands[i].shifter.amount = 0;
4826 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
4827 inst.base.operands[i].imm.value = 0;
4828 if (! process_movw_reloc_info ())
4829 goto failure;
4830 }
4831 break;
4832
4833 case AARCH64_OPND_EXCEPTION:
4834 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp));
4835 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4836 /* addr_off_p */ 0,
4837 /* need_libopcodes_p */ 0,
4838 /* skip_p */ 1);
4839 break;
4840
4841 case AARCH64_OPND_NZCV:
4842 {
4843 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
4844 if (nzcv != NULL)
4845 {
4846 str += 4;
4847 info->imm.value = nzcv->value;
4848 break;
4849 }
4850 po_imm_or_fail (0, 15);
4851 info->imm.value = val;
4852 }
4853 break;
4854
4855 case AARCH64_OPND_COND:
4856 info->cond = hash_find_n (aarch64_cond_hsh, str, 2);
4857 str += 2;
4858 if (info->cond == NULL)
4859 {
4860 set_syntax_error (_("invalid condition"));
4861 goto failure;
4862 }
4863 break;
4864
4865 case AARCH64_OPND_ADDR_ADRP:
4866 po_misc_or_fail (parse_adrp (&str));
4867 /* Clear the value as operand needs to be relocated. */
4868 info->imm.value = 0;
4869 break;
4870
4871 case AARCH64_OPND_ADDR_PCREL14:
4872 case AARCH64_OPND_ADDR_PCREL19:
4873 case AARCH64_OPND_ADDR_PCREL21:
4874 case AARCH64_OPND_ADDR_PCREL26:
4875 po_misc_or_fail (parse_address_reloc (&str, info));
4876 if (!info->addr.pcrel)
4877 {
4878 set_syntax_error (_("invalid pc-relative address"));
4879 goto failure;
4880 }
4881 if (inst.gen_lit_pool
4882 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
4883 {
4884 /* Only permit "=value" in the literal load instructions.
4885 The literal will be generated by programmer_friendly_fixup. */
4886 set_syntax_error (_("invalid use of \"=immediate\""));
4887 goto failure;
4888 }
4889 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
4890 {
4891 set_syntax_error (_("unrecognized relocation suffix"));
4892 goto failure;
4893 }
4894 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
4895 {
4896 info->imm.value = inst.reloc.exp.X_add_number;
4897 inst.reloc.type = BFD_RELOC_UNUSED;
4898 }
4899 else
4900 {
4901 info->imm.value = 0;
4902 if (inst.reloc.type == BFD_RELOC_UNUSED)
4903 switch (opcode->iclass)
4904 {
4905 case compbranch:
4906 case condbranch:
4907 /* e.g. CBZ or B.COND */
4908 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
4909 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
4910 break;
4911 case testbranch:
4912 /* e.g. TBZ */
4913 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
4914 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
4915 break;
4916 case branch_imm:
4917 /* e.g. B or BL */
4918 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
4919 inst.reloc.type =
4920 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
4921 : BFD_RELOC_AARCH64_JUMP26;
4922 break;
4923 case loadlit:
4924 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
4925 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
4926 break;
4927 case pcreladdr:
4928 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
4929 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
4930 break;
4931 default:
4932 gas_assert (0);
4933 abort ();
4934 }
4935 inst.reloc.pc_rel = 1;
4936 }
4937 break;
4938
4939 case AARCH64_OPND_ADDR_SIMPLE:
4940 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
4941 /* [<Xn|SP>{, #<simm>}] */
4942 po_char_or_fail ('[');
4943 po_reg_or_fail (REG_TYPE_R64_SP);
4944 /* Accept optional ", #0". */
4945 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
4946 && skip_past_char (&str, ','))
4947 {
4948 skip_past_char (&str, '#');
4949 if (! skip_past_char (&str, '0'))
4950 {
4951 set_fatal_syntax_error
4952 (_("the optional immediate offset can only be 0"));
4953 goto failure;
4954 }
4955 }
4956 po_char_or_fail (']');
4957 info->addr.base_regno = val;
4958 break;
4959
4960 case AARCH64_OPND_ADDR_REGOFF:
4961 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
4962 po_misc_or_fail (parse_address (&str, info, 0));
4963 if (info->addr.pcrel || !info->addr.offset.is_reg
4964 || !info->addr.preind || info->addr.postind
4965 || info->addr.writeback)
4966 {
4967 set_syntax_error (_("invalid addressing mode"));
4968 goto failure;
4969 }
4970 if (!info->shifter.operator_present)
4971 {
4972 /* Default to LSL if not present. Libopcodes prefers shifter
4973 kind to be explicit. */
4974 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4975 info->shifter.kind = AARCH64_MOD_LSL;
4976 }
4977 /* Qualifier to be deduced by libopcodes. */
4978 break;
4979
4980 case AARCH64_OPND_ADDR_SIMM7:
4981 po_misc_or_fail (parse_address (&str, info, 0));
4982 if (info->addr.pcrel || info->addr.offset.is_reg
4983 || (!info->addr.preind && !info->addr.postind))
4984 {
4985 set_syntax_error (_("invalid addressing mode"));
4986 goto failure;
4987 }
4988 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4989 /* addr_off_p */ 1,
4990 /* need_libopcodes_p */ 1,
4991 /* skip_p */ 0);
4992 break;
4993
4994 case AARCH64_OPND_ADDR_SIMM9:
4995 case AARCH64_OPND_ADDR_SIMM9_2:
4996 po_misc_or_fail (parse_address_reloc (&str, info));
4997 if (info->addr.pcrel || info->addr.offset.is_reg
4998 || (!info->addr.preind && !info->addr.postind)
4999 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
5000 && info->addr.writeback))
5001 {
5002 set_syntax_error (_("invalid addressing mode"));
5003 goto failure;
5004 }
5005 if (inst.reloc.type != BFD_RELOC_UNUSED)
5006 {
5007 set_syntax_error (_("relocation not allowed"));
5008 goto failure;
5009 }
5010 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5011 /* addr_off_p */ 1,
5012 /* need_libopcodes_p */ 1,
5013 /* skip_p */ 0);
5014 break;
5015
5016 case AARCH64_OPND_ADDR_UIMM12:
5017 po_misc_or_fail (parse_address_reloc (&str, info));
5018 if (info->addr.pcrel || info->addr.offset.is_reg
5019 || !info->addr.preind || info->addr.writeback)
5020 {
5021 set_syntax_error (_("invalid addressing mode"));
5022 goto failure;
5023 }
5024 if (inst.reloc.type == BFD_RELOC_UNUSED)
5025 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5026 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12)
5027 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
5028 /* Leave qualifier to be determined by libopcodes. */
5029 break;
5030
5031 case AARCH64_OPND_SIMD_ADDR_POST:
5032 /* [<Xn|SP>], <Xm|#<amount>> */
5033 po_misc_or_fail (parse_address (&str, info, 1));
5034 if (!info->addr.postind || !info->addr.writeback)
5035 {
5036 set_syntax_error (_("invalid addressing mode"));
5037 goto failure;
5038 }
5039 if (!info->addr.offset.is_reg)
5040 {
5041 if (inst.reloc.exp.X_op == O_constant)
5042 info->addr.offset.imm = inst.reloc.exp.X_add_number;
5043 else
5044 {
5045 set_fatal_syntax_error
5046 (_("writeback value should be an immediate constant"));
5047 goto failure;
5048 }
5049 }
5050 /* No qualifier. */
5051 break;
5052
5053 case AARCH64_OPND_SYSREG:
5054 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1))
5055 == PARSE_FAIL)
5056 {
5057 set_syntax_error (_("unknown or missing system register name"));
5058 goto failure;
5059 }
5060 inst.base.operands[i].sysreg = val;
5061 break;
5062
5063 case AARCH64_OPND_PSTATEFIELD:
5064 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0))
5065 == PARSE_FAIL)
5066 {
5067 set_syntax_error (_("unknown or missing PSTATE field name"));
5068 goto failure;
5069 }
5070 inst.base.operands[i].pstatefield = val;
5071 break;
5072
5073 case AARCH64_OPND_SYSREG_IC:
5074 inst.base.operands[i].sysins_op =
5075 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
5076 goto sys_reg_ins;
5077 case AARCH64_OPND_SYSREG_DC:
5078 inst.base.operands[i].sysins_op =
5079 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
5080 goto sys_reg_ins;
5081 case AARCH64_OPND_SYSREG_AT:
5082 inst.base.operands[i].sysins_op =
5083 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
5084 goto sys_reg_ins;
5085 case AARCH64_OPND_SYSREG_TLBI:
5086 inst.base.operands[i].sysins_op =
5087 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
5088 sys_reg_ins:
5089 if (inst.base.operands[i].sysins_op == NULL)
5090 {
5091 set_fatal_syntax_error ( _("unknown or missing operation name"));
5092 goto failure;
5093 }
5094 break;
5095
5096 case AARCH64_OPND_BARRIER:
5097 case AARCH64_OPND_BARRIER_ISB:
5098 val = parse_barrier (&str);
5099 if (val != PARSE_FAIL
5100 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
5101 {
5102 /* ISB only accepts options name 'sy'. */
5103 set_syntax_error
5104 (_("the specified option is not accepted in ISB"));
5105 /* Turn off backtrack as this optional operand is present. */
5106 backtrack_pos = 0;
5107 goto failure;
5108 }
5109 /* This is an extension to accept a 0..15 immediate. */
5110 if (val == PARSE_FAIL)
5111 po_imm_or_fail (0, 15);
5112 info->barrier = aarch64_barrier_options + val;
5113 break;
5114
5115 case AARCH64_OPND_PRFOP:
5116 val = parse_pldop (&str);
5117 /* This is an extension to accept a 0..31 immediate. */
5118 if (val == PARSE_FAIL)
5119 po_imm_or_fail (0, 31);
5120 inst.base.operands[i].prfop = aarch64_prfops + val;
5121 break;
5122
5123 default:
5124 as_fatal (_("unhandled operand code %d"), operands[i]);
5125 }
5126
5127 /* If we get here, this operand was successfully parsed. */
5128 inst.base.operands[i].present = 1;
5129 continue;
5130
5131 failure:
5132 /* The parse routine should already have set the error, but in case
5133 not, set a default one here. */
5134 if (! error_p ())
5135 set_default_error ();
5136
5137 if (! backtrack_pos)
5138 goto parse_operands_return;
5139
5140 /* Reaching here means we are dealing with an optional operand that is
5141 omitted from the assembly line. */
5142 gas_assert (optional_operand_p (opcode, i));
5143 info->present = 0;
5144 process_omitted_operand (operands[i], opcode, i, info);
5145
5146 /* Try again, skipping the optional operand at backtrack_pos. */
5147 str = backtrack_pos;
5148 backtrack_pos = 0;
5149
5150 /* If this is the last operand that is optional and omitted, but without
5151 the presence of a comma. */
5152 if (i && comma_skipped_p && i == aarch64_num_of_operands (opcode) - 1)
5153 {
5154 set_fatal_syntax_error
5155 (_("unexpected comma before the omitted optional operand"));
5156 goto parse_operands_return;
5157 }
5158
5159 /* Clear any error record after the omitted optional operand has been
5160 successfully handled. */
5161 clear_error ();
5162 }
5163
5164 /* Check if we have parsed all the operands. */
5165 if (*str != '\0' && ! error_p ())
5166 {
5167 /* Set I to the index of the last present operand; this is
5168 for the purpose of diagnostics. */
5169 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
5170 ;
5171 set_fatal_syntax_error
5172 (_("unexpected characters following instruction"));
5173 }
5174
5175 parse_operands_return:
5176
5177 if (error_p ())
5178 {
5179 DEBUG_TRACE ("parsing FAIL: %s - %s",
5180 operand_mismatch_kind_names[get_error_kind ()],
5181 get_error_message ());
5182 /* Record the operand error properly; this is useful when there
5183 are multiple instruction templates for a mnemonic name, so that
5184 later on, we can select the error that most closely describes
5185 the problem. */
5186 record_operand_error (opcode, i, get_error_kind (),
5187 get_error_message ());
5188 return FALSE;
5189 }
5190 else
5191 {
5192 DEBUG_TRACE ("parsing SUCCESS");
5193 return TRUE;
5194 }
5195 }
5196
5197 /* It does some fix-up to provide some programmer friendly feature while
5198 keeping the libopcodes happy, i.e. libopcodes only accepts
5199 the preferred architectural syntax.
5200 Return FALSE if there is any failure; otherwise return TRUE. */
5201
5202 static bfd_boolean
5203 programmer_friendly_fixup (aarch64_instruction *instr)
5204 {
5205 aarch64_inst *base = &instr->base;
5206 const aarch64_opcode *opcode = base->opcode;
5207 enum aarch64_op op = opcode->op;
5208 aarch64_opnd_info *operands = base->operands;
5209
5210 DEBUG_TRACE ("enter");
5211
5212 switch (opcode->iclass)
5213 {
5214 case testbranch:
5215 /* TBNZ Xn|Wn, #uimm6, label
5216 Test and Branch Not Zero: conditionally jumps to label if bit number
5217 uimm6 in register Xn is not zero. The bit number implies the width of
5218 the register, which may be written and should be disassembled as Wn if
5219 uimm is less than 32. */
5220 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
5221 {
5222 if (operands[1].imm.value >= 32)
5223 {
5224 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
5225 0, 31);
5226 return FALSE;
5227 }
5228 operands[0].qualifier = AARCH64_OPND_QLF_X;
5229 }
5230 break;
5231 case loadlit:
5232 /* LDR Wt, label | =value
5233 As a convenience assemblers will typically permit the notation
5234 "=value" in conjunction with the pc-relative literal load instructions
5235 to automatically place an immediate value or symbolic address in a
5236 nearby literal pool and generate a hidden label which references it.
5237 ISREG has been set to 0 in the case of =value. */
5238 if (instr->gen_lit_pool
5239 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
5240 {
5241 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
5242 if (op == OP_LDRSW_LIT)
5243 size = 4;
5244 if (instr->reloc.exp.X_op != O_constant
5245 && instr->reloc.exp.X_op != O_big
5246 && instr->reloc.exp.X_op != O_symbol)
5247 {
5248 record_operand_error (opcode, 1,
5249 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
5250 _("constant expression expected"));
5251 return FALSE;
5252 }
5253 if (! add_to_lit_pool (&instr->reloc.exp, size))
5254 {
5255 record_operand_error (opcode, 1,
5256 AARCH64_OPDE_OTHER_ERROR,
5257 _("literal pool insertion failed"));
5258 return FALSE;
5259 }
5260 }
5261 break;
5262 case asimdimm:
5263 /* Allow MOVI V0.16B, 97, LSL 0, although the preferred architectural
5264 syntax requires that the LSL shifter can only be used when the
5265 destination register has the shape of 4H, 8H, 2S or 4S. */
5266 if (op == OP_V_MOVI_B && operands[1].shifter.kind == AARCH64_MOD_LSL
5267 && (operands[0].qualifier == AARCH64_OPND_QLF_V_8B
5268 || operands[0].qualifier == AARCH64_OPND_QLF_V_16B))
5269 {
5270 if (operands[1].shifter.amount != 0)
5271 {
5272 record_operand_error (opcode, 1,
5273 AARCH64_OPDE_OTHER_ERROR,
5274 _("shift amount non-zero"));
5275 return FALSE;
5276 }
5277 operands[1].shifter.kind = AARCH64_MOD_NONE;
5278 operands[1].qualifier = AARCH64_OPND_QLF_NIL;
5279 }
5280 break;
5281 case log_shift:
5282 case bitfield:
5283 /* UXT[BHW] Wd, Wn
5284 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
5285 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
5286 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
5287 A programmer-friendly assembler should accept a destination Xd in
5288 place of Wd, however that is not the preferred form for disassembly.
5289 */
5290 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
5291 && operands[1].qualifier == AARCH64_OPND_QLF_W
5292 && operands[0].qualifier == AARCH64_OPND_QLF_X)
5293 operands[0].qualifier = AARCH64_OPND_QLF_W;
5294 break;
5295
5296 case addsub_ext:
5297 {
5298 /* In the 64-bit form, the final register operand is written as Wm
5299 for all but the (possibly omitted) UXTX/LSL and SXTX
5300 operators.
5301 As a programmer-friendly assembler, we accept e.g.
5302 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
5303 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
5304 int idx = aarch64_operand_index (opcode->operands,
5305 AARCH64_OPND_Rm_EXT);
5306 gas_assert (idx == 1 || idx == 2);
5307 if (operands[0].qualifier == AARCH64_OPND_QLF_X
5308 && operands[idx].qualifier == AARCH64_OPND_QLF_X
5309 && operands[idx].shifter.kind != AARCH64_MOD_LSL
5310 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
5311 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
5312 operands[idx].qualifier = AARCH64_OPND_QLF_W;
5313 }
5314 break;
5315
5316 default:
5317 break;
5318 }
5319
5320 DEBUG_TRACE ("exit with SUCCESS");
5321 return TRUE;
5322 }
5323
5324 /* A wrapper function to interface with libopcodes on encoding and
5325 record the error message if there is any.
5326
5327 Return TRUE on success; otherwise return FALSE. */
5328
5329 static bfd_boolean
5330 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
5331 aarch64_insn *code)
5332 {
5333 aarch64_operand_error error_info;
5334 error_info.kind = AARCH64_OPDE_NIL;
5335 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info))
5336 return TRUE;
5337 else
5338 {
5339 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
5340 record_operand_error_info (opcode, &error_info);
5341 return FALSE;
5342 }
5343 }
5344
5345 #ifdef DEBUG_AARCH64
5346 static inline void
5347 dump_opcode_operands (const aarch64_opcode *opcode)
5348 {
5349 int i = 0;
5350 while (opcode->operands[i] != AARCH64_OPND_NIL)
5351 {
5352 aarch64_verbose ("\t\t opnd%d: %s", i,
5353 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
5354 ? aarch64_get_operand_name (opcode->operands[i])
5355 : aarch64_get_operand_desc (opcode->operands[i]));
5356 ++i;
5357 }
5358 }
5359 #endif /* DEBUG_AARCH64 */
5360
5361 /* This is the guts of the machine-dependent assembler. STR points to a
5362 machine dependent instruction. This function is supposed to emit
5363 the frags/bytes it assembles to. */
5364
5365 void
5366 md_assemble (char *str)
5367 {
5368 char *p = str;
5369 templates *template;
5370 aarch64_opcode *opcode;
5371 aarch64_inst *inst_base;
5372 unsigned saved_cond;
5373
5374 /* Align the previous label if needed. */
5375 if (last_label_seen != NULL)
5376 {
5377 symbol_set_frag (last_label_seen, frag_now);
5378 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
5379 S_SET_SEGMENT (last_label_seen, now_seg);
5380 }
5381
5382 inst.reloc.type = BFD_RELOC_UNUSED;
5383
5384 DEBUG_TRACE ("\n\n");
5385 DEBUG_TRACE ("==============================");
5386 DEBUG_TRACE ("Enter md_assemble with %s", str);
5387
5388 template = opcode_lookup (&p);
5389 if (!template)
5390 {
5391 /* It wasn't an instruction, but it might be a register alias of
5392 the form alias .req reg directive. */
5393 if (!create_register_alias (str, p))
5394 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
5395 str);
5396 return;
5397 }
5398
5399 skip_whitespace (p);
5400 if (*p == ',')
5401 {
5402 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
5403 get_mnemonic_name (str), str);
5404 return;
5405 }
5406
5407 init_operand_error_report ();
5408
5409 saved_cond = inst.cond;
5410 reset_aarch64_instruction (&inst);
5411 inst.cond = saved_cond;
5412
5413 /* Iterate through all opcode entries with the same mnemonic name. */
5414 do
5415 {
5416 opcode = template->opcode;
5417
5418 DEBUG_TRACE ("opcode %s found", opcode->name);
5419 #ifdef DEBUG_AARCH64
5420 if (debug_dump)
5421 dump_opcode_operands (opcode);
5422 #endif /* DEBUG_AARCH64 */
5423
5424 /* Check that this instruction is supported for this CPU. */
5425 if (!opcode->avariant
5426 || !AARCH64_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
5427 {
5428 as_bad (_("selected processor does not support `%s'"), str);
5429 return;
5430 }
5431
5432 mapping_state (MAP_INSN);
5433
5434 inst_base = &inst.base;
5435 inst_base->opcode = opcode;
5436
5437 /* Truly conditionally executed instructions, e.g. b.cond. */
5438 if (opcode->flags & F_COND)
5439 {
5440 gas_assert (inst.cond != COND_ALWAYS);
5441 inst_base->cond = get_cond_from_value (inst.cond);
5442 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
5443 }
5444 else if (inst.cond != COND_ALWAYS)
5445 {
5446 /* It shouldn't arrive here, where the assembly looks like a
5447 conditional instruction but the found opcode is unconditional. */
5448 gas_assert (0);
5449 continue;
5450 }
5451
5452 if (parse_operands (p, opcode)
5453 && programmer_friendly_fixup (&inst)
5454 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
5455 {
5456 if (inst.reloc.type == BFD_RELOC_UNUSED
5457 || !inst.reloc.need_libopcodes_p)
5458 output_inst (NULL);
5459 else
5460 {
5461 /* If there is relocation generated for the instruction,
5462 store the instruction information for the future fix-up. */
5463 struct aarch64_inst *copy;
5464 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
5465 if ((copy = xmalloc (sizeof (struct aarch64_inst))) == NULL)
5466 abort ();
5467 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
5468 output_inst (copy);
5469 }
5470 return;
5471 }
5472
5473 template = template->next;
5474 if (template != NULL)
5475 {
5476 reset_aarch64_instruction (&inst);
5477 inst.cond = saved_cond;
5478 }
5479 }
5480 while (template != NULL);
5481
5482 /* Issue the error messages if any. */
5483 output_operand_error_report (str);
5484 }
5485
5486 /* Various frobbings of labels and their addresses. */
5487
5488 void
5489 aarch64_start_line_hook (void)
5490 {
5491 last_label_seen = NULL;
5492 }
5493
5494 void
5495 aarch64_frob_label (symbolS * sym)
5496 {
5497 last_label_seen = sym;
5498
5499 dwarf2_emit_label (sym);
5500 }
5501
5502 int
5503 aarch64_data_in_code (void)
5504 {
5505 if (!strncmp (input_line_pointer + 1, "data:", 5))
5506 {
5507 *input_line_pointer = '/';
5508 input_line_pointer += 5;
5509 *input_line_pointer = 0;
5510 return 1;
5511 }
5512
5513 return 0;
5514 }
5515
5516 char *
5517 aarch64_canonicalize_symbol_name (char *name)
5518 {
5519 int len;
5520
5521 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
5522 *(name + len - 5) = 0;
5523
5524 return name;
5525 }
5526 \f
5527 /* Table of all register names defined by default. The user can
5528 define additional names with .req. Note that all register names
5529 should appear in both upper and lowercase variants. Some registers
5530 also have mixed-case names. */
5531
5532 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
5533 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
5534 #define REGSET31(p,t) \
5535 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
5536 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
5537 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
5538 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t), \
5539 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
5540 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
5541 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
5542 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
5543 #define REGSET(p,t) \
5544 REGSET31(p,t), REGNUM(p,31,t)
5545
5546 /* These go into aarch64_reg_hsh hash-table. */
5547 static const reg_entry reg_names[] = {
5548 /* Integer registers. */
5549 REGSET31 (x, R_64), REGSET31 (X, R_64),
5550 REGSET31 (w, R_32), REGSET31 (W, R_32),
5551
5552 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
5553 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
5554
5555 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
5556 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
5557
5558 /* Coprocessor register numbers. */
5559 REGSET (c, CN), REGSET (C, CN),
5560
5561 /* Floating-point single precision registers. */
5562 REGSET (s, FP_S), REGSET (S, FP_S),
5563
5564 /* Floating-point double precision registers. */
5565 REGSET (d, FP_D), REGSET (D, FP_D),
5566
5567 /* Floating-point half precision registers. */
5568 REGSET (h, FP_H), REGSET (H, FP_H),
5569
5570 /* Floating-point byte precision registers. */
5571 REGSET (b, FP_B), REGSET (B, FP_B),
5572
5573 /* Floating-point quad precision registers. */
5574 REGSET (q, FP_Q), REGSET (Q, FP_Q),
5575
5576 /* FP/SIMD registers. */
5577 REGSET (v, VN), REGSET (V, VN),
5578 };
5579
5580 #undef REGDEF
5581 #undef REGNUM
5582 #undef REGSET
5583
5584 #define N 1
5585 #define n 0
5586 #define Z 1
5587 #define z 0
5588 #define C 1
5589 #define c 0
5590 #define V 1
5591 #define v 0
5592 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
5593 static const asm_nzcv nzcv_names[] = {
5594 {"nzcv", B (n, z, c, v)},
5595 {"nzcV", B (n, z, c, V)},
5596 {"nzCv", B (n, z, C, v)},
5597 {"nzCV", B (n, z, C, V)},
5598 {"nZcv", B (n, Z, c, v)},
5599 {"nZcV", B (n, Z, c, V)},
5600 {"nZCv", B (n, Z, C, v)},
5601 {"nZCV", B (n, Z, C, V)},
5602 {"Nzcv", B (N, z, c, v)},
5603 {"NzcV", B (N, z, c, V)},
5604 {"NzCv", B (N, z, C, v)},
5605 {"NzCV", B (N, z, C, V)},
5606 {"NZcv", B (N, Z, c, v)},
5607 {"NZcV", B (N, Z, c, V)},
5608 {"NZCv", B (N, Z, C, v)},
5609 {"NZCV", B (N, Z, C, V)}
5610 };
5611
5612 #undef N
5613 #undef n
5614 #undef Z
5615 #undef z
5616 #undef C
5617 #undef c
5618 #undef V
5619 #undef v
5620 #undef B
5621 \f
5622 /* MD interface: bits in the object file. */
5623
5624 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
5625 for use in the a.out file, and stores them in the array pointed to by buf.
5626 This knows about the endian-ness of the target machine and does
5627 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
5628 2 (short) and 4 (long) Floating numbers are put out as a series of
5629 LITTLENUMS (shorts, here at least). */
5630
5631 void
5632 md_number_to_chars (char *buf, valueT val, int n)
5633 {
5634 if (target_big_endian)
5635 number_to_chars_bigendian (buf, val, n);
5636 else
5637 number_to_chars_littleendian (buf, val, n);
5638 }
5639
5640 /* MD interface: Sections. */
5641
5642 /* Estimate the size of a frag before relaxing. Assume everything fits in
5643 4 bytes. */
5644
5645 int
5646 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
5647 {
5648 fragp->fr_var = 4;
5649 return 4;
5650 }
5651
5652 /* Round up a section size to the appropriate boundary. */
5653
5654 valueT
5655 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
5656 {
5657 return size;
5658 }
5659
5660 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
5661 of an rs_align_code fragment. */
5662
5663 void
5664 aarch64_handle_align (fragS * fragP)
5665 {
5666 /* NOP = d503201f */
5667 /* AArch64 instructions are always little-endian. */
5668 static char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
5669
5670 int bytes, fix, noop_size;
5671 char *p;
5672 const char *noop;
5673
5674 if (fragP->fr_type != rs_align_code)
5675 return;
5676
5677 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
5678 p = fragP->fr_literal + fragP->fr_fix;
5679 fix = 0;
5680
5681 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
5682 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
5683
5684 #ifdef OBJ_ELF
5685 gas_assert (fragP->tc_frag_data.recorded);
5686 #endif
5687
5688 noop = aarch64_noop;
5689 noop_size = sizeof (aarch64_noop);
5690 fragP->fr_var = noop_size;
5691
5692 if (bytes & (noop_size - 1))
5693 {
5694 fix = bytes & (noop_size - 1);
5695 #ifdef OBJ_ELF
5696 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
5697 #endif
5698 memset (p, 0, fix);
5699 p += fix;
5700 bytes -= fix;
5701 }
5702
5703 while (bytes >= noop_size)
5704 {
5705 memcpy (p, noop, noop_size);
5706 p += noop_size;
5707 bytes -= noop_size;
5708 fix += noop_size;
5709 }
5710
5711 fragP->fr_fix += fix;
5712 }
5713
5714 /* Called from md_do_align. Used to create an alignment
5715 frag in a code section. */
5716
5717 void
5718 aarch64_frag_align_code (int n, int max)
5719 {
5720 char *p;
5721
5722 /* We assume that there will never be a requirement
5723 to support alignments greater than x bytes. */
5724 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
5725 as_fatal (_
5726 ("alignments greater than %d bytes not supported in .text sections"),
5727 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
5728
5729 p = frag_var (rs_align_code,
5730 MAX_MEM_FOR_RS_ALIGN_CODE,
5731 1,
5732 (relax_substateT) max,
5733 (symbolS *) NULL, (offsetT) n, (char *) NULL);
5734 *p = 0;
5735 }
5736
5737 /* Perform target specific initialisation of a frag.
5738 Note - despite the name this initialisation is not done when the frag
5739 is created, but only when its type is assigned. A frag can be created
5740 and used a long time before its type is set, so beware of assuming that
5741 this initialisationis performed first. */
5742
5743 #ifndef OBJ_ELF
5744 void
5745 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
5746 int max_chars ATTRIBUTE_UNUSED)
5747 {
5748 }
5749
5750 #else /* OBJ_ELF is defined. */
5751 void
5752 aarch64_init_frag (fragS * fragP, int max_chars)
5753 {
5754 /* Record a mapping symbol for alignment frags. We will delete this
5755 later if the alignment ends up empty. */
5756 if (!fragP->tc_frag_data.recorded)
5757 {
5758 fragP->tc_frag_data.recorded = 1;
5759 switch (fragP->fr_type)
5760 {
5761 case rs_align:
5762 case rs_align_test:
5763 case rs_fill:
5764 mapping_state_2 (MAP_DATA, max_chars);
5765 break;
5766 case rs_align_code:
5767 mapping_state_2 (MAP_INSN, max_chars);
5768 break;
5769 default:
5770 break;
5771 }
5772 }
5773 }
5774 \f
5775 /* Initialize the DWARF-2 unwind information for this procedure. */
5776
5777 void
5778 tc_aarch64_frame_initial_instructions (void)
5779 {
5780 cfi_add_CFA_def_cfa (REG_SP, 0);
5781 }
5782 #endif /* OBJ_ELF */
5783
5784 /* Convert REGNAME to a DWARF-2 register number. */
5785
5786 int
5787 tc_aarch64_regname_to_dw2regnum (char *regname)
5788 {
5789 const reg_entry *reg = parse_reg (&regname);
5790 if (reg == NULL)
5791 return -1;
5792
5793 switch (reg->type)
5794 {
5795 case REG_TYPE_SP_32:
5796 case REG_TYPE_SP_64:
5797 case REG_TYPE_R_32:
5798 case REG_TYPE_R_64:
5799 case REG_TYPE_FP_B:
5800 case REG_TYPE_FP_H:
5801 case REG_TYPE_FP_S:
5802 case REG_TYPE_FP_D:
5803 case REG_TYPE_FP_Q:
5804 return reg->number;
5805 default:
5806 break;
5807 }
5808 return -1;
5809 }
5810
5811 /* MD interface: Symbol and relocation handling. */
5812
5813 /* Return the address within the segment that a PC-relative fixup is
5814 relative to. For AArch64 PC-relative fixups applied to instructions
5815 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
5816
5817 long
5818 md_pcrel_from_section (fixS * fixP, segT seg)
5819 {
5820 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
5821
5822 /* If this is pc-relative and we are going to emit a relocation
5823 then we just want to put out any pipeline compensation that the linker
5824 will need. Otherwise we want to use the calculated base. */
5825 if (fixP->fx_pcrel
5826 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
5827 || aarch64_force_relocation (fixP)))
5828 base = 0;
5829
5830 /* AArch64 should be consistent for all pc-relative relocations. */
5831 return base + AARCH64_PCREL_OFFSET;
5832 }
5833
5834 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
5835 Otherwise we have no need to default values of symbols. */
5836
5837 symbolS *
5838 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
5839 {
5840 #ifdef OBJ_ELF
5841 if (name[0] == '_' && name[1] == 'G'
5842 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
5843 {
5844 if (!GOT_symbol)
5845 {
5846 if (symbol_find (name))
5847 as_bad (_("GOT already in the symbol table"));
5848
5849 GOT_symbol = symbol_new (name, undefined_section,
5850 (valueT) 0, &zero_address_frag);
5851 }
5852
5853 return GOT_symbol;
5854 }
5855 #endif
5856
5857 return 0;
5858 }
5859
5860 /* Return non-zero if the indicated VALUE has overflowed the maximum
5861 range expressible by a unsigned number with the indicated number of
5862 BITS. */
5863
5864 static bfd_boolean
5865 unsigned_overflow (valueT value, unsigned bits)
5866 {
5867 valueT lim;
5868 if (bits >= sizeof (valueT) * 8)
5869 return FALSE;
5870 lim = (valueT) 1 << bits;
5871 return (value >= lim);
5872 }
5873
5874
5875 /* Return non-zero if the indicated VALUE has overflowed the maximum
5876 range expressible by an signed number with the indicated number of
5877 BITS. */
5878
5879 static bfd_boolean
5880 signed_overflow (offsetT value, unsigned bits)
5881 {
5882 offsetT lim;
5883 if (bits >= sizeof (offsetT) * 8)
5884 return FALSE;
5885 lim = (offsetT) 1 << (bits - 1);
5886 return (value < -lim || value >= lim);
5887 }
5888
5889 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
5890 unsigned immediate offset load/store instruction, try to encode it as
5891 an unscaled, 9-bit, signed immediate offset load/store instruction.
5892 Return TRUE if it is successful; otherwise return FALSE.
5893
5894 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
5895 in response to the standard LDR/STR mnemonics when the immediate offset is
5896 unambiguous, i.e. when it is negative or unaligned. */
5897
5898 static bfd_boolean
5899 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
5900 {
5901 int idx;
5902 enum aarch64_op new_op;
5903 const aarch64_opcode *new_opcode;
5904
5905 gas_assert (instr->opcode->iclass == ldst_pos);
5906
5907 switch (instr->opcode->op)
5908 {
5909 case OP_LDRB_POS:new_op = OP_LDURB; break;
5910 case OP_STRB_POS: new_op = OP_STURB; break;
5911 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
5912 case OP_LDRH_POS: new_op = OP_LDURH; break;
5913 case OP_STRH_POS: new_op = OP_STURH; break;
5914 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
5915 case OP_LDR_POS: new_op = OP_LDUR; break;
5916 case OP_STR_POS: new_op = OP_STUR; break;
5917 case OP_LDRF_POS: new_op = OP_LDURV; break;
5918 case OP_STRF_POS: new_op = OP_STURV; break;
5919 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
5920 case OP_PRFM_POS: new_op = OP_PRFUM; break;
5921 default: new_op = OP_NIL; break;
5922 }
5923
5924 if (new_op == OP_NIL)
5925 return FALSE;
5926
5927 new_opcode = aarch64_get_opcode (new_op);
5928 gas_assert (new_opcode != NULL);
5929
5930 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
5931 instr->opcode->op, new_opcode->op);
5932
5933 aarch64_replace_opcode (instr, new_opcode);
5934
5935 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
5936 qualifier matching may fail because the out-of-date qualifier will
5937 prevent the operand being updated with a new and correct qualifier. */
5938 idx = aarch64_operand_index (instr->opcode->operands,
5939 AARCH64_OPND_ADDR_SIMM9);
5940 gas_assert (idx == 1);
5941 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
5942
5943 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
5944
5945 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
5946 return FALSE;
5947
5948 return TRUE;
5949 }
5950
5951 /* Called by fix_insn to fix a MOV immediate alias instruction.
5952
5953 Operand for a generic move immediate instruction, which is an alias
5954 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
5955 a 32-bit/64-bit immediate value into general register. An assembler error
5956 shall result if the immediate cannot be created by a single one of these
5957 instructions. If there is a choice, then to ensure reversability an
5958 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
5959
5960 static void
5961 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
5962 {
5963 const aarch64_opcode *opcode;
5964
5965 /* Need to check if the destination is SP/ZR. The check has to be done
5966 before any aarch64_replace_opcode. */
5967 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
5968 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
5969
5970 instr->operands[1].imm.value = value;
5971 instr->operands[1].skip = 0;
5972
5973 if (try_mov_wide_p)
5974 {
5975 /* Try the MOVZ alias. */
5976 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
5977 aarch64_replace_opcode (instr, opcode);
5978 if (aarch64_opcode_encode (instr->opcode, instr,
5979 &instr->value, NULL, NULL))
5980 {
5981 put_aarch64_insn (buf, instr->value);
5982 return;
5983 }
5984 /* Try the MOVK alias. */
5985 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
5986 aarch64_replace_opcode (instr, opcode);
5987 if (aarch64_opcode_encode (instr->opcode, instr,
5988 &instr->value, NULL, NULL))
5989 {
5990 put_aarch64_insn (buf, instr->value);
5991 return;
5992 }
5993 }
5994
5995 if (try_mov_bitmask_p)
5996 {
5997 /* Try the ORR alias. */
5998 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
5999 aarch64_replace_opcode (instr, opcode);
6000 if (aarch64_opcode_encode (instr->opcode, instr,
6001 &instr->value, NULL, NULL))
6002 {
6003 put_aarch64_insn (buf, instr->value);
6004 return;
6005 }
6006 }
6007
6008 as_bad_where (fixP->fx_file, fixP->fx_line,
6009 _("immediate cannot be moved by a single instruction"));
6010 }
6011
6012 /* An instruction operand which is immediate related may have symbol used
6013 in the assembly, e.g.
6014
6015 mov w0, u32
6016 .set u32, 0x00ffff00
6017
6018 At the time when the assembly instruction is parsed, a referenced symbol,
6019 like 'u32' in the above example may not have been seen; a fixS is created
6020 in such a case and is handled here after symbols have been resolved.
6021 Instruction is fixed up with VALUE using the information in *FIXP plus
6022 extra information in FLAGS.
6023
6024 This function is called by md_apply_fix to fix up instructions that need
6025 a fix-up described above but does not involve any linker-time relocation. */
6026
6027 static void
6028 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
6029 {
6030 int idx;
6031 uint32_t insn;
6032 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6033 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
6034 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
6035
6036 if (new_inst)
6037 {
6038 /* Now the instruction is about to be fixed-up, so the operand that
6039 was previously marked as 'ignored' needs to be unmarked in order
6040 to get the encoding done properly. */
6041 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6042 new_inst->operands[idx].skip = 0;
6043 }
6044
6045 gas_assert (opnd != AARCH64_OPND_NIL);
6046
6047 switch (opnd)
6048 {
6049 case AARCH64_OPND_EXCEPTION:
6050 if (unsigned_overflow (value, 16))
6051 as_bad_where (fixP->fx_file, fixP->fx_line,
6052 _("immediate out of range"));
6053 insn = get_aarch64_insn (buf);
6054 insn |= encode_svc_imm (value);
6055 put_aarch64_insn (buf, insn);
6056 break;
6057
6058 case AARCH64_OPND_AIMM:
6059 /* ADD or SUB with immediate.
6060 NOTE this assumes we come here with a add/sub shifted reg encoding
6061 3 322|2222|2 2 2 21111 111111
6062 1 098|7654|3 2 1 09876 543210 98765 43210
6063 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
6064 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
6065 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
6066 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
6067 ->
6068 3 322|2222|2 2 221111111111
6069 1 098|7654|3 2 109876543210 98765 43210
6070 11000000 sf 001|0001|shift imm12 Rn Rd ADD
6071 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
6072 51000000 sf 101|0001|shift imm12 Rn Rd SUB
6073 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
6074 Fields sf Rn Rd are already set. */
6075 insn = get_aarch64_insn (buf);
6076 if (value < 0)
6077 {
6078 /* Add <-> sub. */
6079 insn = reencode_addsub_switch_add_sub (insn);
6080 value = -value;
6081 }
6082
6083 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
6084 && unsigned_overflow (value, 12))
6085 {
6086 /* Try to shift the value by 12 to make it fit. */
6087 if (((value >> 12) << 12) == value
6088 && ! unsigned_overflow (value, 12 + 12))
6089 {
6090 value >>= 12;
6091 insn |= encode_addsub_imm_shift_amount (1);
6092 }
6093 }
6094
6095 if (unsigned_overflow (value, 12))
6096 as_bad_where (fixP->fx_file, fixP->fx_line,
6097 _("immediate out of range"));
6098
6099 insn |= encode_addsub_imm (value);
6100
6101 put_aarch64_insn (buf, insn);
6102 break;
6103
6104 case AARCH64_OPND_SIMD_IMM:
6105 case AARCH64_OPND_SIMD_IMM_SFT:
6106 case AARCH64_OPND_LIMM:
6107 /* Bit mask immediate. */
6108 gas_assert (new_inst != NULL);
6109 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6110 new_inst->operands[idx].imm.value = value;
6111 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6112 &new_inst->value, NULL, NULL))
6113 put_aarch64_insn (buf, new_inst->value);
6114 else
6115 as_bad_where (fixP->fx_file, fixP->fx_line,
6116 _("invalid immediate"));
6117 break;
6118
6119 case AARCH64_OPND_HALF:
6120 /* 16-bit unsigned immediate. */
6121 if (unsigned_overflow (value, 16))
6122 as_bad_where (fixP->fx_file, fixP->fx_line,
6123 _("immediate out of range"));
6124 insn = get_aarch64_insn (buf);
6125 insn |= encode_movw_imm (value & 0xffff);
6126 put_aarch64_insn (buf, insn);
6127 break;
6128
6129 case AARCH64_OPND_IMM_MOV:
6130 /* Operand for a generic move immediate instruction, which is
6131 an alias instruction that generates a single MOVZ, MOVN or ORR
6132 instruction to loads a 32-bit/64-bit immediate value into general
6133 register. An assembler error shall result if the immediate cannot be
6134 created by a single one of these instructions. If there is a choice,
6135 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
6136 and MOVZ or MOVN to ORR. */
6137 gas_assert (new_inst != NULL);
6138 fix_mov_imm_insn (fixP, buf, new_inst, value);
6139 break;
6140
6141 case AARCH64_OPND_ADDR_SIMM7:
6142 case AARCH64_OPND_ADDR_SIMM9:
6143 case AARCH64_OPND_ADDR_SIMM9_2:
6144 case AARCH64_OPND_ADDR_UIMM12:
6145 /* Immediate offset in an address. */
6146 insn = get_aarch64_insn (buf);
6147
6148 gas_assert (new_inst != NULL && new_inst->value == insn);
6149 gas_assert (new_inst->opcode->operands[1] == opnd
6150 || new_inst->opcode->operands[2] == opnd);
6151
6152 /* Get the index of the address operand. */
6153 if (new_inst->opcode->operands[1] == opnd)
6154 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
6155 idx = 1;
6156 else
6157 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
6158 idx = 2;
6159
6160 /* Update the resolved offset value. */
6161 new_inst->operands[idx].addr.offset.imm = value;
6162
6163 /* Encode/fix-up. */
6164 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6165 &new_inst->value, NULL, NULL))
6166 {
6167 put_aarch64_insn (buf, new_inst->value);
6168 break;
6169 }
6170 else if (new_inst->opcode->iclass == ldst_pos
6171 && try_to_encode_as_unscaled_ldst (new_inst))
6172 {
6173 put_aarch64_insn (buf, new_inst->value);
6174 break;
6175 }
6176
6177 as_bad_where (fixP->fx_file, fixP->fx_line,
6178 _("immediate offset out of range"));
6179 break;
6180
6181 default:
6182 gas_assert (0);
6183 as_fatal (_("unhandled operand code %d"), opnd);
6184 }
6185 }
6186
6187 /* Apply a fixup (fixP) to segment data, once it has been determined
6188 by our caller that we have all the info we need to fix it up.
6189
6190 Parameter valP is the pointer to the value of the bits. */
6191
6192 void
6193 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
6194 {
6195 offsetT value = *valP;
6196 uint32_t insn;
6197 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6198 int scale;
6199 unsigned flags = fixP->fx_addnumber;
6200
6201 DEBUG_TRACE ("\n\n");
6202 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
6203 DEBUG_TRACE ("Enter md_apply_fix");
6204
6205 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
6206
6207 /* Note whether this will delete the relocation. */
6208
6209 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
6210 fixP->fx_done = 1;
6211
6212 /* Process the relocations. */
6213 switch (fixP->fx_r_type)
6214 {
6215 case BFD_RELOC_NONE:
6216 /* This will need to go in the object file. */
6217 fixP->fx_done = 0;
6218 break;
6219
6220 case BFD_RELOC_8:
6221 case BFD_RELOC_8_PCREL:
6222 if (fixP->fx_done || !seg->use_rela_p)
6223 md_number_to_chars (buf, value, 1);
6224 break;
6225
6226 case BFD_RELOC_16:
6227 case BFD_RELOC_16_PCREL:
6228 if (fixP->fx_done || !seg->use_rela_p)
6229 md_number_to_chars (buf, value, 2);
6230 break;
6231
6232 case BFD_RELOC_32:
6233 case BFD_RELOC_32_PCREL:
6234 if (fixP->fx_done || !seg->use_rela_p)
6235 md_number_to_chars (buf, value, 4);
6236 break;
6237
6238 case BFD_RELOC_64:
6239 case BFD_RELOC_64_PCREL:
6240 if (fixP->fx_done || !seg->use_rela_p)
6241 md_number_to_chars (buf, value, 8);
6242 break;
6243
6244 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6245 /* We claim that these fixups have been processed here, even if
6246 in fact we generate an error because we do not have a reloc
6247 for them, so tc_gen_reloc() will reject them. */
6248 fixP->fx_done = 1;
6249 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
6250 {
6251 as_bad_where (fixP->fx_file, fixP->fx_line,
6252 _("undefined symbol %s used as an immediate value"),
6253 S_GET_NAME (fixP->fx_addsy));
6254 goto apply_fix_return;
6255 }
6256 fix_insn (fixP, flags, value);
6257 break;
6258
6259 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
6260 if (value & 3)
6261 as_bad_where (fixP->fx_file, fixP->fx_line,
6262 _("pc-relative load offset not word aligned"));
6263 if (signed_overflow (value, 21))
6264 as_bad_where (fixP->fx_file, fixP->fx_line,
6265 _("pc-relative load offset out of range"));
6266 if (fixP->fx_done || !seg->use_rela_p)
6267 {
6268 insn = get_aarch64_insn (buf);
6269 insn |= encode_ld_lit_ofs_19 (value >> 2);
6270 put_aarch64_insn (buf, insn);
6271 }
6272 break;
6273
6274 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6275 if (signed_overflow (value, 21))
6276 as_bad_where (fixP->fx_file, fixP->fx_line,
6277 _("pc-relative address offset out of range"));
6278 if (fixP->fx_done || !seg->use_rela_p)
6279 {
6280 insn = get_aarch64_insn (buf);
6281 insn |= encode_adr_imm (value);
6282 put_aarch64_insn (buf, insn);
6283 }
6284 break;
6285
6286 case BFD_RELOC_AARCH64_BRANCH19:
6287 if (value & 3)
6288 as_bad_where (fixP->fx_file, fixP->fx_line,
6289 _("conditional branch target not word aligned"));
6290 if (signed_overflow (value, 21))
6291 as_bad_where (fixP->fx_file, fixP->fx_line,
6292 _("conditional branch out of range"));
6293 if (fixP->fx_done || !seg->use_rela_p)
6294 {
6295 insn = get_aarch64_insn (buf);
6296 insn |= encode_cond_branch_ofs_19 (value >> 2);
6297 put_aarch64_insn (buf, insn);
6298 }
6299 break;
6300
6301 case BFD_RELOC_AARCH64_TSTBR14:
6302 if (value & 3)
6303 as_bad_where (fixP->fx_file, fixP->fx_line,
6304 _("conditional branch target not word aligned"));
6305 if (signed_overflow (value, 16))
6306 as_bad_where (fixP->fx_file, fixP->fx_line,
6307 _("conditional branch out of range"));
6308 if (fixP->fx_done || !seg->use_rela_p)
6309 {
6310 insn = get_aarch64_insn (buf);
6311 insn |= encode_tst_branch_ofs_14 (value >> 2);
6312 put_aarch64_insn (buf, insn);
6313 }
6314 break;
6315
6316 case BFD_RELOC_AARCH64_JUMP26:
6317 case BFD_RELOC_AARCH64_CALL26:
6318 if (value & 3)
6319 as_bad_where (fixP->fx_file, fixP->fx_line,
6320 _("branch target not word aligned"));
6321 if (signed_overflow (value, 28))
6322 as_bad_where (fixP->fx_file, fixP->fx_line, _("branch out of range"));
6323 if (fixP->fx_done || !seg->use_rela_p)
6324 {
6325 insn = get_aarch64_insn (buf);
6326 insn |= encode_branch_ofs_26 (value >> 2);
6327 put_aarch64_insn (buf, insn);
6328 }
6329 break;
6330
6331 case BFD_RELOC_AARCH64_MOVW_G0:
6332 case BFD_RELOC_AARCH64_MOVW_G0_S:
6333 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6334 scale = 0;
6335 goto movw_common;
6336 case BFD_RELOC_AARCH64_MOVW_G1:
6337 case BFD_RELOC_AARCH64_MOVW_G1_S:
6338 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6339 scale = 16;
6340 goto movw_common;
6341 case BFD_RELOC_AARCH64_MOVW_G2:
6342 case BFD_RELOC_AARCH64_MOVW_G2_S:
6343 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6344 scale = 32;
6345 goto movw_common;
6346 case BFD_RELOC_AARCH64_MOVW_G3:
6347 scale = 48;
6348 movw_common:
6349 if (fixP->fx_done || !seg->use_rela_p)
6350 {
6351 insn = get_aarch64_insn (buf);
6352
6353 if (!fixP->fx_done)
6354 {
6355 /* REL signed addend must fit in 16 bits */
6356 if (signed_overflow (value, 16))
6357 as_bad_where (fixP->fx_file, fixP->fx_line,
6358 _("offset out of range"));
6359 }
6360 else
6361 {
6362 /* Check for overflow and scale. */
6363 switch (fixP->fx_r_type)
6364 {
6365 case BFD_RELOC_AARCH64_MOVW_G0:
6366 case BFD_RELOC_AARCH64_MOVW_G1:
6367 case BFD_RELOC_AARCH64_MOVW_G2:
6368 case BFD_RELOC_AARCH64_MOVW_G3:
6369 if (unsigned_overflow (value, scale + 16))
6370 as_bad_where (fixP->fx_file, fixP->fx_line,
6371 _("unsigned value out of range"));
6372 break;
6373 case BFD_RELOC_AARCH64_MOVW_G0_S:
6374 case BFD_RELOC_AARCH64_MOVW_G1_S:
6375 case BFD_RELOC_AARCH64_MOVW_G2_S:
6376 /* NOTE: We can only come here with movz or movn. */
6377 if (signed_overflow (value, scale + 16))
6378 as_bad_where (fixP->fx_file, fixP->fx_line,
6379 _("signed value out of range"));
6380 if (value < 0)
6381 {
6382 /* Force use of MOVN. */
6383 value = ~value;
6384 insn = reencode_movzn_to_movn (insn);
6385 }
6386 else
6387 {
6388 /* Force use of MOVZ. */
6389 insn = reencode_movzn_to_movz (insn);
6390 }
6391 break;
6392 default:
6393 /* Unchecked relocations. */
6394 break;
6395 }
6396 value >>= scale;
6397 }
6398
6399 /* Insert value into MOVN/MOVZ/MOVK instruction. */
6400 insn |= encode_movw_imm (value & 0xffff);
6401
6402 put_aarch64_insn (buf, insn);
6403 }
6404 break;
6405
6406 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6407 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6408 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6409 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6410 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6411 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6412 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6413 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6414 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6415 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6416 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6417 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6418 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE:
6419 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6420 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6421 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6422 /* Should always be exported to object file, see
6423 aarch64_force_relocation(). */
6424 gas_assert (!fixP->fx_done);
6425 gas_assert (seg->use_rela_p);
6426 break;
6427
6428 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6429 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6430 case BFD_RELOC_AARCH64_ADD_LO12:
6431 case BFD_RELOC_AARCH64_LDST8_LO12:
6432 case BFD_RELOC_AARCH64_LDST16_LO12:
6433 case BFD_RELOC_AARCH64_LDST32_LO12:
6434 case BFD_RELOC_AARCH64_LDST64_LO12:
6435 case BFD_RELOC_AARCH64_LDST128_LO12:
6436 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6437 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6438 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6439 /* Should always be exported to object file, see
6440 aarch64_force_relocation(). */
6441 gas_assert (!fixP->fx_done);
6442 gas_assert (seg->use_rela_p);
6443 break;
6444
6445 case BFD_RELOC_AARCH64_TLSDESC_ADD:
6446 case BFD_RELOC_AARCH64_TLSDESC_LDR:
6447 case BFD_RELOC_AARCH64_TLSDESC_CALL:
6448 break;
6449
6450 default:
6451 as_bad_where (fixP->fx_file, fixP->fx_line,
6452 _("unexpected %s fixup"),
6453 bfd_get_reloc_code_name (fixP->fx_r_type));
6454 break;
6455 }
6456
6457 apply_fix_return:
6458 /* Free the allocated the struct aarch64_inst.
6459 N.B. currently there are very limited number of fix-up types actually use
6460 this field, so the impact on the performance should be minimal . */
6461 if (fixP->tc_fix_data.inst != NULL)
6462 free (fixP->tc_fix_data.inst);
6463
6464 return;
6465 }
6466
6467 /* Translate internal representation of relocation info to BFD target
6468 format. */
6469
6470 arelent *
6471 tc_gen_reloc (asection * section, fixS * fixp)
6472 {
6473 arelent *reloc;
6474 bfd_reloc_code_real_type code;
6475
6476 reloc = xmalloc (sizeof (arelent));
6477
6478 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
6479 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
6480 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
6481
6482 if (fixp->fx_pcrel)
6483 {
6484 if (section->use_rela_p)
6485 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
6486 else
6487 fixp->fx_offset = reloc->address;
6488 }
6489 reloc->addend = fixp->fx_offset;
6490
6491 code = fixp->fx_r_type;
6492 switch (code)
6493 {
6494 case BFD_RELOC_16:
6495 if (fixp->fx_pcrel)
6496 code = BFD_RELOC_16_PCREL;
6497 break;
6498
6499 case BFD_RELOC_32:
6500 if (fixp->fx_pcrel)
6501 code = BFD_RELOC_32_PCREL;
6502 break;
6503
6504 case BFD_RELOC_64:
6505 if (fixp->fx_pcrel)
6506 code = BFD_RELOC_64_PCREL;
6507 break;
6508
6509 default:
6510 break;
6511 }
6512
6513 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
6514 if (reloc->howto == NULL)
6515 {
6516 as_bad_where (fixp->fx_file, fixp->fx_line,
6517 _
6518 ("cannot represent %s relocation in this object file format"),
6519 bfd_get_reloc_code_name (code));
6520 return NULL;
6521 }
6522
6523 return reloc;
6524 }
6525
6526 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
6527
6528 void
6529 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
6530 {
6531 bfd_reloc_code_real_type type;
6532 int pcrel = 0;
6533
6534 /* Pick a reloc.
6535 FIXME: @@ Should look at CPU word size. */
6536 switch (size)
6537 {
6538 case 1:
6539 type = BFD_RELOC_8;
6540 break;
6541 case 2:
6542 type = BFD_RELOC_16;
6543 break;
6544 case 4:
6545 type = BFD_RELOC_32;
6546 break;
6547 case 8:
6548 type = BFD_RELOC_64;
6549 break;
6550 default:
6551 as_bad (_("cannot do %u-byte relocation"), size);
6552 type = BFD_RELOC_UNUSED;
6553 break;
6554 }
6555
6556 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
6557 }
6558
6559 int
6560 aarch64_force_relocation (struct fix *fixp)
6561 {
6562 switch (fixp->fx_r_type)
6563 {
6564 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6565 /* Perform these "immediate" internal relocations
6566 even if the symbol is extern or weak. */
6567 return 0;
6568
6569 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6570 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6571 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6572 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6573 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6574 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6575 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6576 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6577 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6578 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6579 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6580 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6581 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE:
6582 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6583 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6584 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6585 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6586 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6587 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6588 case BFD_RELOC_AARCH64_ADD_LO12:
6589 case BFD_RELOC_AARCH64_LDST8_LO12:
6590 case BFD_RELOC_AARCH64_LDST16_LO12:
6591 case BFD_RELOC_AARCH64_LDST32_LO12:
6592 case BFD_RELOC_AARCH64_LDST64_LO12:
6593 case BFD_RELOC_AARCH64_LDST128_LO12:
6594 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6595 /* Always leave these relocations for the linker. */
6596 return 1;
6597
6598 default:
6599 break;
6600 }
6601
6602 return generic_force_reloc (fixp);
6603 }
6604
6605 #ifdef OBJ_ELF
6606
6607 const char *
6608 elf64_aarch64_target_format (void)
6609 {
6610 if (target_big_endian)
6611 return "elf64-bigaarch64";
6612 else
6613 return "elf64-littleaarch64";
6614 }
6615
6616 void
6617 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
6618 {
6619 elf_frob_symbol (symp, puntp);
6620 }
6621 #endif
6622
6623 /* MD interface: Finalization. */
6624
6625 /* A good place to do this, although this was probably not intended
6626 for this kind of use. We need to dump the literal pool before
6627 references are made to a null symbol pointer. */
6628
6629 void
6630 aarch64_cleanup (void)
6631 {
6632 literal_pool *pool;
6633
6634 for (pool = list_of_pools; pool; pool = pool->next)
6635 {
6636 /* Put it at the end of the relevant section. */
6637 subseg_set (pool->section, pool->sub_section);
6638 s_ltorg (0);
6639 }
6640 }
6641
6642 #ifdef OBJ_ELF
6643 /* Remove any excess mapping symbols generated for alignment frags in
6644 SEC. We may have created a mapping symbol before a zero byte
6645 alignment; remove it if there's a mapping symbol after the
6646 alignment. */
6647 static void
6648 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
6649 void *dummy ATTRIBUTE_UNUSED)
6650 {
6651 segment_info_type *seginfo = seg_info (sec);
6652 fragS *fragp;
6653
6654 if (seginfo == NULL || seginfo->frchainP == NULL)
6655 return;
6656
6657 for (fragp = seginfo->frchainP->frch_root;
6658 fragp != NULL; fragp = fragp->fr_next)
6659 {
6660 symbolS *sym = fragp->tc_frag_data.last_map;
6661 fragS *next = fragp->fr_next;
6662
6663 /* Variable-sized frags have been converted to fixed size by
6664 this point. But if this was variable-sized to start with,
6665 there will be a fixed-size frag after it. So don't handle
6666 next == NULL. */
6667 if (sym == NULL || next == NULL)
6668 continue;
6669
6670 if (S_GET_VALUE (sym) < next->fr_address)
6671 /* Not at the end of this frag. */
6672 continue;
6673 know (S_GET_VALUE (sym) == next->fr_address);
6674
6675 do
6676 {
6677 if (next->tc_frag_data.first_map != NULL)
6678 {
6679 /* Next frag starts with a mapping symbol. Discard this
6680 one. */
6681 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
6682 break;
6683 }
6684
6685 if (next->fr_next == NULL)
6686 {
6687 /* This mapping symbol is at the end of the section. Discard
6688 it. */
6689 know (next->fr_fix == 0 && next->fr_var == 0);
6690 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
6691 break;
6692 }
6693
6694 /* As long as we have empty frags without any mapping symbols,
6695 keep looking. */
6696 /* If the next frag is non-empty and does not start with a
6697 mapping symbol, then this mapping symbol is required. */
6698 if (next->fr_address != next->fr_next->fr_address)
6699 break;
6700
6701 next = next->fr_next;
6702 }
6703 while (next != NULL);
6704 }
6705 }
6706 #endif
6707
6708 /* Adjust the symbol table. */
6709
6710 void
6711 aarch64_adjust_symtab (void)
6712 {
6713 #ifdef OBJ_ELF
6714 /* Remove any overlapping mapping symbols generated by alignment frags. */
6715 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
6716 /* Now do generic ELF adjustments. */
6717 elf_adjust_symtab ();
6718 #endif
6719 }
6720
6721 static void
6722 checked_hash_insert (struct hash_control *table, const char *key, void *value)
6723 {
6724 const char *hash_err;
6725
6726 hash_err = hash_insert (table, key, value);
6727 if (hash_err)
6728 printf ("Internal Error: Can't hash %s\n", key);
6729 }
6730
6731 static void
6732 fill_instruction_hash_table (void)
6733 {
6734 aarch64_opcode *opcode = aarch64_opcode_table;
6735
6736 while (opcode->name != NULL)
6737 {
6738 templates *templ, *new_templ;
6739 templ = hash_find (aarch64_ops_hsh, opcode->name);
6740
6741 new_templ = (templates *) xmalloc (sizeof (templates));
6742 new_templ->opcode = opcode;
6743 new_templ->next = NULL;
6744
6745 if (!templ)
6746 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
6747 else
6748 {
6749 new_templ->next = templ->next;
6750 templ->next = new_templ;
6751 }
6752 ++opcode;
6753 }
6754 }
6755
6756 static inline void
6757 convert_to_upper (char *dst, const char *src, size_t num)
6758 {
6759 unsigned int i;
6760 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
6761 *dst = TOUPPER (*src);
6762 *dst = '\0';
6763 }
6764
6765 /* Assume STR point to a lower-case string, allocate, convert and return
6766 the corresponding upper-case string. */
6767 static inline const char*
6768 get_upper_str (const char *str)
6769 {
6770 char *ret;
6771 size_t len = strlen (str);
6772 if ((ret = xmalloc (len + 1)) == NULL)
6773 abort ();
6774 convert_to_upper (ret, str, len);
6775 return ret;
6776 }
6777
6778 /* MD interface: Initialization. */
6779
6780 void
6781 md_begin (void)
6782 {
6783 unsigned mach;
6784 unsigned int i;
6785
6786 if ((aarch64_ops_hsh = hash_new ()) == NULL
6787 || (aarch64_cond_hsh = hash_new ()) == NULL
6788 || (aarch64_shift_hsh = hash_new ()) == NULL
6789 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
6790 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
6791 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
6792 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
6793 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
6794 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
6795 || (aarch64_reg_hsh = hash_new ()) == NULL
6796 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
6797 || (aarch64_nzcv_hsh = hash_new ()) == NULL
6798 || (aarch64_pldop_hsh = hash_new ()) == NULL)
6799 as_fatal (_("virtual memory exhausted"));
6800
6801 fill_instruction_hash_table ();
6802
6803 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
6804 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
6805 (void *) (aarch64_sys_regs + i));
6806
6807 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
6808 checked_hash_insert (aarch64_pstatefield_hsh,
6809 aarch64_pstatefields[i].name,
6810 (void *) (aarch64_pstatefields + i));
6811
6812 for (i = 0; aarch64_sys_regs_ic[i].template != NULL; i++)
6813 checked_hash_insert (aarch64_sys_regs_ic_hsh,
6814 aarch64_sys_regs_ic[i].template,
6815 (void *) (aarch64_sys_regs_ic + i));
6816
6817 for (i = 0; aarch64_sys_regs_dc[i].template != NULL; i++)
6818 checked_hash_insert (aarch64_sys_regs_dc_hsh,
6819 aarch64_sys_regs_dc[i].template,
6820 (void *) (aarch64_sys_regs_dc + i));
6821
6822 for (i = 0; aarch64_sys_regs_at[i].template != NULL; i++)
6823 checked_hash_insert (aarch64_sys_regs_at_hsh,
6824 aarch64_sys_regs_at[i].template,
6825 (void *) (aarch64_sys_regs_at + i));
6826
6827 for (i = 0; aarch64_sys_regs_tlbi[i].template != NULL; i++)
6828 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
6829 aarch64_sys_regs_tlbi[i].template,
6830 (void *) (aarch64_sys_regs_tlbi + i));
6831
6832 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
6833 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
6834 (void *) (reg_names + i));
6835
6836 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
6837 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
6838 (void *) (nzcv_names + i));
6839
6840 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
6841 {
6842 const char *name = aarch64_operand_modifiers[i].name;
6843 checked_hash_insert (aarch64_shift_hsh, name,
6844 (void *) (aarch64_operand_modifiers + i));
6845 /* Also hash the name in the upper case. */
6846 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
6847 (void *) (aarch64_operand_modifiers + i));
6848 }
6849
6850 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
6851 {
6852 unsigned int j;
6853 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
6854 the same condition code. */
6855 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
6856 {
6857 const char *name = aarch64_conds[i].names[j];
6858 if (name == NULL)
6859 break;
6860 checked_hash_insert (aarch64_cond_hsh, name,
6861 (void *) (aarch64_conds + i));
6862 /* Also hash the name in the upper case. */
6863 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
6864 (void *) (aarch64_conds + i));
6865 }
6866 }
6867
6868 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
6869 {
6870 const char *name = aarch64_barrier_options[i].name;
6871 /* Skip xx00 - the unallocated values of option. */
6872 if ((i & 0x3) == 0)
6873 continue;
6874 checked_hash_insert (aarch64_barrier_opt_hsh, name,
6875 (void *) (aarch64_barrier_options + i));
6876 /* Also hash the name in the upper case. */
6877 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
6878 (void *) (aarch64_barrier_options + i));
6879 }
6880
6881 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
6882 {
6883 const char* name = aarch64_prfops[i].name;
6884 /* Skip 0011x, 01xxx, 1011x and 11xxx - the unallocated hint encodings
6885 as a 5-bit immediate #uimm5. */
6886 if ((i & 0xf) >= 6)
6887 continue;
6888 checked_hash_insert (aarch64_pldop_hsh, name,
6889 (void *) (aarch64_prfops + i));
6890 /* Also hash the name in the upper case. */
6891 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
6892 (void *) (aarch64_prfops + i));
6893 }
6894
6895 /* Set the cpu variant based on the command-line options. */
6896 if (!mcpu_cpu_opt)
6897 mcpu_cpu_opt = march_cpu_opt;
6898
6899 if (!mcpu_cpu_opt)
6900 mcpu_cpu_opt = &cpu_default;
6901
6902 cpu_variant = *mcpu_cpu_opt;
6903
6904 /* Record the CPU type. */
6905 mach = bfd_mach_aarch64;
6906
6907 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
6908 }
6909
6910 /* Command line processing. */
6911
6912 const char *md_shortopts = "m:";
6913
6914 #ifdef AARCH64_BI_ENDIAN
6915 #define OPTION_EB (OPTION_MD_BASE + 0)
6916 #define OPTION_EL (OPTION_MD_BASE + 1)
6917 #else
6918 #if TARGET_BYTES_BIG_ENDIAN
6919 #define OPTION_EB (OPTION_MD_BASE + 0)
6920 #else
6921 #define OPTION_EL (OPTION_MD_BASE + 1)
6922 #endif
6923 #endif
6924
6925 struct option md_longopts[] = {
6926 #ifdef OPTION_EB
6927 {"EB", no_argument, NULL, OPTION_EB},
6928 #endif
6929 #ifdef OPTION_EL
6930 {"EL", no_argument, NULL, OPTION_EL},
6931 #endif
6932 {NULL, no_argument, NULL, 0}
6933 };
6934
6935 size_t md_longopts_size = sizeof (md_longopts);
6936
6937 struct aarch64_option_table
6938 {
6939 char *option; /* Option name to match. */
6940 char *help; /* Help information. */
6941 int *var; /* Variable to change. */
6942 int value; /* What to change it to. */
6943 char *deprecated; /* If non-null, print this message. */
6944 };
6945
6946 static struct aarch64_option_table aarch64_opts[] = {
6947 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
6948 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
6949 NULL},
6950 #ifdef DEBUG_AARCH64
6951 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
6952 #endif /* DEBUG_AARCH64 */
6953 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
6954 NULL},
6955 {NULL, NULL, NULL, 0, NULL}
6956 };
6957
6958 struct aarch64_cpu_option_table
6959 {
6960 char *name;
6961 const aarch64_feature_set value;
6962 /* The canonical name of the CPU, or NULL to use NAME converted to upper
6963 case. */
6964 const char *canonical_name;
6965 };
6966
6967 /* This list should, at a minimum, contain all the cpu names
6968 recognized by GCC. */
6969 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
6970 {"all", AARCH64_ANY, NULL},
6971 {"cortex-a53", AARCH64_ARCH_V8, "Cortex-A53"},
6972 {"cortex-a57", AARCH64_ARCH_V8, "Cortex-A57"},
6973 {"generic", AARCH64_ARCH_V8, NULL},
6974
6975 /* These two are example CPUs supported in GCC, once we have real
6976 CPUs they will be removed. */
6977 {"example-1", AARCH64_ARCH_V8, NULL},
6978 {"example-2", AARCH64_ARCH_V8, NULL},
6979
6980 {NULL, AARCH64_ARCH_NONE, NULL}
6981 };
6982
6983 struct aarch64_arch_option_table
6984 {
6985 char *name;
6986 const aarch64_feature_set value;
6987 };
6988
6989 /* This list should, at a minimum, contain all the architecture names
6990 recognized by GCC. */
6991 static const struct aarch64_arch_option_table aarch64_archs[] = {
6992 {"all", AARCH64_ANY},
6993 {"armv8-a", AARCH64_ARCH_V8},
6994 {NULL, AARCH64_ARCH_NONE}
6995 };
6996
6997 /* ISA extensions. */
6998 struct aarch64_option_cpu_value_table
6999 {
7000 char *name;
7001 const aarch64_feature_set value;
7002 };
7003
7004 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
7005 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0)},
7006 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
7007 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
7008 {NULL, AARCH64_ARCH_NONE}
7009 };
7010
7011 struct aarch64_long_option_table
7012 {
7013 char *option; /* Substring to match. */
7014 char *help; /* Help information. */
7015 int (*func) (char *subopt); /* Function to decode sub-option. */
7016 char *deprecated; /* If non-null, print this message. */
7017 };
7018
7019 static int
7020 aarch64_parse_features (char *str, const aarch64_feature_set **opt_p)
7021 {
7022 /* We insist on extensions being added before being removed. We achieve
7023 this by using the ADDING_VALUE variable to indicate whether we are
7024 adding an extension (1) or removing it (0) and only allowing it to
7025 change in the order -1 -> 1 -> 0. */
7026 int adding_value = -1;
7027 aarch64_feature_set *ext_set = xmalloc (sizeof (aarch64_feature_set));
7028
7029 /* Copy the feature set, so that we can modify it. */
7030 *ext_set = **opt_p;
7031 *opt_p = ext_set;
7032
7033 while (str != NULL && *str != 0)
7034 {
7035 const struct aarch64_option_cpu_value_table *opt;
7036 char *ext;
7037 int optlen;
7038
7039 if (*str != '+')
7040 {
7041 as_bad (_("invalid architectural extension"));
7042 return 0;
7043 }
7044
7045 str++;
7046 ext = strchr (str, '+');
7047
7048 if (ext != NULL)
7049 optlen = ext - str;
7050 else
7051 optlen = strlen (str);
7052
7053 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
7054 {
7055 if (adding_value != 0)
7056 adding_value = 0;
7057 optlen -= 2;
7058 str += 2;
7059 }
7060 else if (optlen > 0)
7061 {
7062 if (adding_value == -1)
7063 adding_value = 1;
7064 else if (adding_value != 1)
7065 {
7066 as_bad (_("must specify extensions to add before specifying "
7067 "those to remove"));
7068 return FALSE;
7069 }
7070 }
7071
7072 if (optlen == 0)
7073 {
7074 as_bad (_("missing architectural extension"));
7075 return 0;
7076 }
7077
7078 gas_assert (adding_value != -1);
7079
7080 for (opt = aarch64_features; opt->name != NULL; opt++)
7081 if (strncmp (opt->name, str, optlen) == 0)
7082 {
7083 /* Add or remove the extension. */
7084 if (adding_value)
7085 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
7086 else
7087 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
7088 break;
7089 }
7090
7091 if (opt->name == NULL)
7092 {
7093 as_bad (_("unknown architectural extension `%s'"), str);
7094 return 0;
7095 }
7096
7097 str = ext;
7098 };
7099
7100 return 1;
7101 }
7102
7103 static int
7104 aarch64_parse_cpu (char *str)
7105 {
7106 const struct aarch64_cpu_option_table *opt;
7107 char *ext = strchr (str, '+');
7108 size_t optlen;
7109
7110 if (ext != NULL)
7111 optlen = ext - str;
7112 else
7113 optlen = strlen (str);
7114
7115 if (optlen == 0)
7116 {
7117 as_bad (_("missing cpu name `%s'"), str);
7118 return 0;
7119 }
7120
7121 for (opt = aarch64_cpus; opt->name != NULL; opt++)
7122 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7123 {
7124 mcpu_cpu_opt = &opt->value;
7125 if (ext != NULL)
7126 return aarch64_parse_features (ext, &mcpu_cpu_opt);
7127
7128 return 1;
7129 }
7130
7131 as_bad (_("unknown cpu `%s'"), str);
7132 return 0;
7133 }
7134
7135 static int
7136 aarch64_parse_arch (char *str)
7137 {
7138 const struct aarch64_arch_option_table *opt;
7139 char *ext = strchr (str, '+');
7140 size_t optlen;
7141
7142 if (ext != NULL)
7143 optlen = ext - str;
7144 else
7145 optlen = strlen (str);
7146
7147 if (optlen == 0)
7148 {
7149 as_bad (_("missing architecture name `%s'"), str);
7150 return 0;
7151 }
7152
7153 for (opt = aarch64_archs; opt->name != NULL; opt++)
7154 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7155 {
7156 march_cpu_opt = &opt->value;
7157 if (ext != NULL)
7158 return aarch64_parse_features (ext, &march_cpu_opt);
7159
7160 return 1;
7161 }
7162
7163 as_bad (_("unknown architecture `%s'\n"), str);
7164 return 0;
7165 }
7166
7167 static struct aarch64_long_option_table aarch64_long_opts[] = {
7168 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
7169 aarch64_parse_cpu, NULL},
7170 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
7171 aarch64_parse_arch, NULL},
7172 {NULL, NULL, 0, NULL}
7173 };
7174
7175 int
7176 md_parse_option (int c, char *arg)
7177 {
7178 struct aarch64_option_table *opt;
7179 struct aarch64_long_option_table *lopt;
7180
7181 switch (c)
7182 {
7183 #ifdef OPTION_EB
7184 case OPTION_EB:
7185 target_big_endian = 1;
7186 break;
7187 #endif
7188
7189 #ifdef OPTION_EL
7190 case OPTION_EL:
7191 target_big_endian = 0;
7192 break;
7193 #endif
7194
7195 case 'a':
7196 /* Listing option. Just ignore these, we don't support additional
7197 ones. */
7198 return 0;
7199
7200 default:
7201 for (opt = aarch64_opts; opt->option != NULL; opt++)
7202 {
7203 if (c == opt->option[0]
7204 && ((arg == NULL && opt->option[1] == 0)
7205 || streq (arg, opt->option + 1)))
7206 {
7207 /* If the option is deprecated, tell the user. */
7208 if (opt->deprecated != NULL)
7209 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
7210 arg ? arg : "", _(opt->deprecated));
7211
7212 if (opt->var != NULL)
7213 *opt->var = opt->value;
7214
7215 return 1;
7216 }
7217 }
7218
7219 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7220 {
7221 /* These options are expected to have an argument. */
7222 if (c == lopt->option[0]
7223 && arg != NULL
7224 && strncmp (arg, lopt->option + 1,
7225 strlen (lopt->option + 1)) == 0)
7226 {
7227 /* If the option is deprecated, tell the user. */
7228 if (lopt->deprecated != NULL)
7229 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
7230 _(lopt->deprecated));
7231
7232 /* Call the sup-option parser. */
7233 return lopt->func (arg + strlen (lopt->option) - 1);
7234 }
7235 }
7236
7237 return 0;
7238 }
7239
7240 return 1;
7241 }
7242
7243 void
7244 md_show_usage (FILE * fp)
7245 {
7246 struct aarch64_option_table *opt;
7247 struct aarch64_long_option_table *lopt;
7248
7249 fprintf (fp, _(" AArch64-specific assembler options:\n"));
7250
7251 for (opt = aarch64_opts; opt->option != NULL; opt++)
7252 if (opt->help != NULL)
7253 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
7254
7255 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7256 if (lopt->help != NULL)
7257 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
7258
7259 #ifdef OPTION_EB
7260 fprintf (fp, _("\
7261 -EB assemble code for a big-endian cpu\n"));
7262 #endif
7263
7264 #ifdef OPTION_EL
7265 fprintf (fp, _("\
7266 -EL assemble code for a little-endian cpu\n"));
7267 #endif
7268 }
7269
7270 /* Parse a .cpu directive. */
7271
7272 static void
7273 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
7274 {
7275 const struct aarch64_cpu_option_table *opt;
7276 char saved_char;
7277 char *name;
7278 char *ext;
7279 size_t optlen;
7280
7281 name = input_line_pointer;
7282 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7283 input_line_pointer++;
7284 saved_char = *input_line_pointer;
7285 *input_line_pointer = 0;
7286
7287 ext = strchr (name, '+');
7288
7289 if (ext != NULL)
7290 optlen = ext - name;
7291 else
7292 optlen = strlen (name);
7293
7294 /* Skip the first "all" entry. */
7295 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
7296 if (strlen (opt->name) == optlen
7297 && strncmp (name, opt->name, optlen) == 0)
7298 {
7299 mcpu_cpu_opt = &opt->value;
7300 if (ext != NULL)
7301 if (!aarch64_parse_features (ext, &mcpu_cpu_opt))
7302 return;
7303
7304 cpu_variant = *mcpu_cpu_opt;
7305
7306 *input_line_pointer = saved_char;
7307 demand_empty_rest_of_line ();
7308 return;
7309 }
7310 as_bad (_("unknown cpu `%s'"), name);
7311 *input_line_pointer = saved_char;
7312 ignore_rest_of_line ();
7313 }
7314
7315
7316 /* Parse a .arch directive. */
7317
7318 static void
7319 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
7320 {
7321 const struct aarch64_arch_option_table *opt;
7322 char saved_char;
7323 char *name;
7324 char *ext;
7325 size_t optlen;
7326
7327 name = input_line_pointer;
7328 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7329 input_line_pointer++;
7330 saved_char = *input_line_pointer;
7331 *input_line_pointer = 0;
7332
7333 ext = strchr (name, '+');
7334
7335 if (ext != NULL)
7336 optlen = ext - name;
7337 else
7338 optlen = strlen (name);
7339
7340 /* Skip the first "all" entry. */
7341 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
7342 if (strlen (opt->name) == optlen
7343 && strncmp (name, opt->name, optlen) == 0)
7344 {
7345 mcpu_cpu_opt = &opt->value;
7346 if (ext != NULL)
7347 if (!aarch64_parse_features (ext, &mcpu_cpu_opt))
7348 return;
7349
7350 cpu_variant = *mcpu_cpu_opt;
7351
7352 *input_line_pointer = saved_char;
7353 demand_empty_rest_of_line ();
7354 return;
7355 }
7356
7357 as_bad (_("unknown architecture `%s'\n"), name);
7358 *input_line_pointer = saved_char;
7359 ignore_rest_of_line ();
7360 }
7361
7362 /* Copy symbol information. */
7363
7364 void
7365 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
7366 {
7367 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
7368 }
This page took 0.183806 seconds and 4 git commands to generate.