gas/
[deliverable/binutils-gdb.git] / gas / config / tc-aarch64.c
CommitLineData
a06ea964
NC
1/* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22#include "as.h"
23#include <limits.h>
24#include <stdarg.h>
25#include "bfd_stdint.h"
26#define NO_RELOC 0
27#include "safe-ctype.h"
28#include "subsegs.h"
29#include "obstack.h"
30
31#ifdef OBJ_ELF
32#include "elf/aarch64.h"
33#include "dw2gencfi.h"
34#endif
35
36#include "dwarf2dbg.h"
37
38/* Types of processor to assemble for. */
39#ifndef CPU_DEFAULT
40#define CPU_DEFAULT AARCH64_ARCH_V8
41#endif
42
43#define streq(a, b) (strcmp (a, b) == 0)
44
45static aarch64_feature_set cpu_variant;
46
47/* Variables that we set while parsing command-line options. Once all
48 options have been read we re-process these values to set the real
49 assembly flags. */
50static const aarch64_feature_set *mcpu_cpu_opt = NULL;
51static const aarch64_feature_set *march_cpu_opt = NULL;
52
53/* Constants for known architecture features. */
54static const aarch64_feature_set cpu_default = CPU_DEFAULT;
55
56static const aarch64_feature_set aarch64_arch_any = AARCH64_ANY;
57static const aarch64_feature_set aarch64_arch_none = AARCH64_ARCH_NONE;
58
59#ifdef OBJ_ELF
60/* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
61static symbolS *GOT_symbol;
62#endif
63
64enum neon_el_type
65{
66 NT_invtype = -1,
67 NT_b,
68 NT_h,
69 NT_s,
70 NT_d,
71 NT_q
72};
73
74/* Bits for DEFINED field in neon_type_el. */
75#define NTA_HASTYPE 1
76#define NTA_HASINDEX 2
77
78struct neon_type_el
79{
80 enum neon_el_type type;
81 unsigned char defined;
82 unsigned width;
83 int64_t index;
84};
85
86#define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
87
88struct reloc
89{
90 bfd_reloc_code_real_type type;
91 expressionS exp;
92 int pc_rel;
93 enum aarch64_opnd opnd;
94 uint32_t flags;
95 unsigned need_libopcodes_p : 1;
96};
97
98struct aarch64_instruction
99{
100 /* libopcodes structure for instruction intermediate representation. */
101 aarch64_inst base;
102 /* Record assembly errors found during the parsing. */
103 struct
104 {
105 enum aarch64_operand_error_kind kind;
106 const char *error;
107 } parsing_error;
108 /* The condition that appears in the assembly line. */
109 int cond;
110 /* Relocation information (including the GAS internal fixup). */
111 struct reloc reloc;
112 /* Need to generate an immediate in the literal pool. */
113 unsigned gen_lit_pool : 1;
114};
115
116typedef struct aarch64_instruction aarch64_instruction;
117
118static aarch64_instruction inst;
119
120static bfd_boolean parse_operands (char *, const aarch64_opcode *);
121static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
122
123/* Diagnostics inline function utilites.
124
125 These are lightweight utlities which should only be called by parse_operands
126 and other parsers. GAS processes each assembly line by parsing it against
127 instruction template(s), in the case of multiple templates (for the same
128 mnemonic name), those templates are tried one by one until one succeeds or
129 all fail. An assembly line may fail a few templates before being
130 successfully parsed; an error saved here in most cases is not a user error
131 but an error indicating the current template is not the right template.
132 Therefore it is very important that errors can be saved at a low cost during
133 the parsing; we don't want to slow down the whole parsing by recording
134 non-user errors in detail.
135
136 Remember that the objective is to help GAS pick up the most approapriate
137 error message in the case of multiple templates, e.g. FMOV which has 8
138 templates. */
139
140static inline void
141clear_error (void)
142{
143 inst.parsing_error.kind = AARCH64_OPDE_NIL;
144 inst.parsing_error.error = NULL;
145}
146
147static inline bfd_boolean
148error_p (void)
149{
150 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
151}
152
153static inline const char *
154get_error_message (void)
155{
156 return inst.parsing_error.error;
157}
158
159static inline void
160set_error_message (const char *error)
161{
162 inst.parsing_error.error = error;
163}
164
165static inline enum aarch64_operand_error_kind
166get_error_kind (void)
167{
168 return inst.parsing_error.kind;
169}
170
171static inline void
172set_error_kind (enum aarch64_operand_error_kind kind)
173{
174 inst.parsing_error.kind = kind;
175}
176
177static inline void
178set_error (enum aarch64_operand_error_kind kind, const char *error)
179{
180 inst.parsing_error.kind = kind;
181 inst.parsing_error.error = error;
182}
183
184static inline void
185set_recoverable_error (const char *error)
186{
187 set_error (AARCH64_OPDE_RECOVERABLE, error);
188}
189
190/* Use the DESC field of the corresponding aarch64_operand entry to compose
191 the error message. */
192static inline void
193set_default_error (void)
194{
195 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
196}
197
198static inline void
199set_syntax_error (const char *error)
200{
201 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
202}
203
204static inline void
205set_first_syntax_error (const char *error)
206{
207 if (! error_p ())
208 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
209}
210
211static inline void
212set_fatal_syntax_error (const char *error)
213{
214 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
215}
216\f
217/* Number of littlenums required to hold an extended precision number. */
218#define MAX_LITTLENUMS 6
219
220/* Return value for certain parsers when the parsing fails; those parsers
221 return the information of the parsed result, e.g. register number, on
222 success. */
223#define PARSE_FAIL -1
224
225/* This is an invalid condition code that means no conditional field is
226 present. */
227#define COND_ALWAYS 0x10
228
229typedef struct
230{
231 const char *template;
232 unsigned long value;
233} asm_barrier_opt;
234
235typedef struct
236{
237 const char *template;
238 uint32_t value;
239} asm_nzcv;
240
241struct reloc_entry
242{
243 char *name;
244 bfd_reloc_code_real_type reloc;
245};
246
247/* Structure for a hash table entry for a register. */
248typedef struct
249{
250 const char *name;
251 unsigned char number;
252 unsigned char type;
253 unsigned char builtin;
254} reg_entry;
255
256/* Macros to define the register types and masks for the purpose
257 of parsing. */
258
259#undef AARCH64_REG_TYPES
260#define AARCH64_REG_TYPES \
261 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
262 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
263 BASIC_REG_TYPE(SP_32) /* wsp */ \
264 BASIC_REG_TYPE(SP_64) /* sp */ \
265 BASIC_REG_TYPE(Z_32) /* wzr */ \
266 BASIC_REG_TYPE(Z_64) /* xzr */ \
267 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
268 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
269 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
270 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
271 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
272 BASIC_REG_TYPE(CN) /* c[0-7] */ \
273 BASIC_REG_TYPE(VN) /* v[0-31] */ \
274 /* Typecheck: any 64-bit int reg (inc SP exc XZR) */ \
275 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
276 /* Typecheck: any int (inc {W}SP inc [WX]ZR) */ \
277 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
278 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
279 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
280 /* Typecheck: any [BHSDQ]P FP. */ \
281 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
282 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
283 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR) */ \
284 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
285 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
286 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
287 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
288 /* Any integer register; used for error messages only. */ \
289 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
290 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
291 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
292 /* Pseudo type to mark the end of the enumerator sequence. */ \
293 BASIC_REG_TYPE(MAX)
294
295#undef BASIC_REG_TYPE
296#define BASIC_REG_TYPE(T) REG_TYPE_##T,
297#undef MULTI_REG_TYPE
298#define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
299
300/* Register type enumerators. */
301typedef enum
302{
303 /* A list of REG_TYPE_*. */
304 AARCH64_REG_TYPES
305} aarch64_reg_type;
306
307#undef BASIC_REG_TYPE
308#define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
309#undef REG_TYPE
310#define REG_TYPE(T) (1 << REG_TYPE_##T)
311#undef MULTI_REG_TYPE
312#define MULTI_REG_TYPE(T,V) V,
313
314/* Values indexed by aarch64_reg_type to assist the type checking. */
315static const unsigned reg_type_masks[] =
316{
317 AARCH64_REG_TYPES
318};
319
320#undef BASIC_REG_TYPE
321#undef REG_TYPE
322#undef MULTI_REG_TYPE
323#undef AARCH64_REG_TYPES
324
325/* Diagnostics used when we don't get a register of the expected type.
326 Note: this has to synchronized with aarch64_reg_type definitions
327 above. */
328static const char *
329get_reg_expected_msg (aarch64_reg_type reg_type)
330{
331 const char *msg;
332
333 switch (reg_type)
334 {
335 case REG_TYPE_R_32:
336 msg = N_("integer 32-bit register expected");
337 break;
338 case REG_TYPE_R_64:
339 msg = N_("integer 64-bit register expected");
340 break;
341 case REG_TYPE_R_N:
342 msg = N_("integer register expected");
343 break;
344 case REG_TYPE_R_Z_SP:
345 msg = N_("integer, zero or SP register expected");
346 break;
347 case REG_TYPE_FP_B:
348 msg = N_("8-bit SIMD scalar register expected");
349 break;
350 case REG_TYPE_FP_H:
351 msg = N_("16-bit SIMD scalar or floating-point half precision "
352 "register expected");
353 break;
354 case REG_TYPE_FP_S:
355 msg = N_("32-bit SIMD scalar or floating-point single precision "
356 "register expected");
357 break;
358 case REG_TYPE_FP_D:
359 msg = N_("64-bit SIMD scalar or floating-point double precision "
360 "register expected");
361 break;
362 case REG_TYPE_FP_Q:
363 msg = N_("128-bit SIMD scalar or floating-point quad precision "
364 "register expected");
365 break;
366 case REG_TYPE_CN:
367 msg = N_("C0 - C15 expected");
368 break;
369 case REG_TYPE_R_Z_BHSDQ_V:
370 msg = N_("register expected");
371 break;
372 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
373 msg = N_("SIMD scalar or floating-point register expected");
374 break;
375 case REG_TYPE_VN: /* any V reg */
376 msg = N_("vector register expected");
377 break;
378 default:
379 as_fatal (_("invalid register type %d"), reg_type);
380 }
381 return msg;
382}
383
384/* Some well known registers that we refer to directly elsewhere. */
385#define REG_SP 31
386
387/* Instructions take 4 bytes in the object file. */
388#define INSN_SIZE 4
389
390/* Define some common error messages. */
391#define BAD_SP _("SP not allowed here")
392
393static struct hash_control *aarch64_ops_hsh;
394static struct hash_control *aarch64_cond_hsh;
395static struct hash_control *aarch64_shift_hsh;
396static struct hash_control *aarch64_sys_regs_hsh;
397static struct hash_control *aarch64_pstatefield_hsh;
398static struct hash_control *aarch64_sys_regs_ic_hsh;
399static struct hash_control *aarch64_sys_regs_dc_hsh;
400static struct hash_control *aarch64_sys_regs_at_hsh;
401static struct hash_control *aarch64_sys_regs_tlbi_hsh;
402static struct hash_control *aarch64_reg_hsh;
403static struct hash_control *aarch64_barrier_opt_hsh;
404static struct hash_control *aarch64_nzcv_hsh;
405static struct hash_control *aarch64_pldop_hsh;
406
407/* Stuff needed to resolve the label ambiguity
408 As:
409 ...
410 label: <insn>
411 may differ from:
412 ...
413 label:
414 <insn> */
415
416static symbolS *last_label_seen;
417
418/* Literal pool structure. Held on a per-section
419 and per-sub-section basis. */
420
421#define MAX_LITERAL_POOL_SIZE 1024
422typedef struct literal_pool
423{
424 expressionS literals[MAX_LITERAL_POOL_SIZE];
425 unsigned int next_free_entry;
426 unsigned int id;
427 symbolS *symbol;
428 segT section;
429 subsegT sub_section;
430 int size;
431 struct literal_pool *next;
432} literal_pool;
433
434/* Pointer to a linked list of literal pools. */
435static literal_pool *list_of_pools = NULL;
436\f
437/* Pure syntax. */
438
439/* This array holds the chars that always start a comment. If the
440 pre-processor is disabled, these aren't very useful. */
441const char comment_chars[] = "";
442
443/* This array holds the chars that only start a comment at the beginning of
444 a line. If the line seems to have the form '# 123 filename'
445 .line and .file directives will appear in the pre-processed output. */
446/* Note that input_file.c hand checks for '#' at the beginning of the
447 first line of the input file. This is because the compiler outputs
448 #NO_APP at the beginning of its output. */
449/* Also note that comments like this one will always work. */
450const char line_comment_chars[] = "#";
451
452const char line_separator_chars[] = ";";
453
454/* Chars that can be used to separate mant
455 from exp in floating point numbers. */
456const char EXP_CHARS[] = "eE";
457
458/* Chars that mean this number is a floating point constant. */
459/* As in 0f12.456 */
460/* or 0d1.2345e12 */
461
462const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
463
464/* Prefix character that indicates the start of an immediate value. */
465#define is_immediate_prefix(C) ((C) == '#')
466
467/* Separator character handling. */
468
469#define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
470
471static inline bfd_boolean
472skip_past_char (char **str, char c)
473{
474 if (**str == c)
475 {
476 (*str)++;
477 return TRUE;
478 }
479 else
480 return FALSE;
481}
482
483#define skip_past_comma(str) skip_past_char (str, ',')
484
485/* Arithmetic expressions (possibly involving symbols). */
486
a06ea964
NC
487static bfd_boolean in_my_get_expression_p = FALSE;
488
489/* Third argument to my_get_expression. */
490#define GE_NO_PREFIX 0
491#define GE_OPT_PREFIX 1
492
493/* Return TRUE if the string pointed by *STR is successfully parsed
494 as an valid expression; *EP will be filled with the information of
495 such an expression. Otherwise return FALSE. */
496
497static bfd_boolean
498my_get_expression (expressionS * ep, char **str, int prefix_mode,
499 int reject_absent)
500{
501 char *save_in;
502 segT seg;
503 int prefix_present_p = 0;
504
505 switch (prefix_mode)
506 {
507 case GE_NO_PREFIX:
508 break;
509 case GE_OPT_PREFIX:
510 if (is_immediate_prefix (**str))
511 {
512 (*str)++;
513 prefix_present_p = 1;
514 }
515 break;
516 default:
517 abort ();
518 }
519
520 memset (ep, 0, sizeof (expressionS));
521
522 save_in = input_line_pointer;
523 input_line_pointer = *str;
524 in_my_get_expression_p = TRUE;
525 seg = expression (ep);
526 in_my_get_expression_p = FALSE;
527
528 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
529 {
530 /* We found a bad expression in md_operand(). */
531 *str = input_line_pointer;
532 input_line_pointer = save_in;
533 if (prefix_present_p && ! error_p ())
534 set_fatal_syntax_error (_("bad expression"));
535 else
536 set_first_syntax_error (_("bad expression"));
537 return FALSE;
538 }
539
540#ifdef OBJ_AOUT
541 if (seg != absolute_section
542 && seg != text_section
543 && seg != data_section
544 && seg != bss_section && seg != undefined_section)
545 {
546 set_syntax_error (_("bad segment"));
547 *str = input_line_pointer;
548 input_line_pointer = save_in;
549 return FALSE;
550 }
551#else
552 (void) seg;
553#endif
554
a06ea964
NC
555 *str = input_line_pointer;
556 input_line_pointer = save_in;
557 return TRUE;
558}
559
560/* Turn a string in input_line_pointer into a floating point constant
561 of type TYPE, and store the appropriate bytes in *LITP. The number
562 of LITTLENUMS emitted is stored in *SIZEP. An error message is
563 returned, or NULL on OK. */
564
565char *
566md_atof (int type, char *litP, int *sizeP)
567{
568 return ieee_md_atof (type, litP, sizeP, target_big_endian);
569}
570
571/* We handle all bad expressions here, so that we can report the faulty
572 instruction in the error message. */
573void
574md_operand (expressionS * exp)
575{
576 if (in_my_get_expression_p)
577 exp->X_op = O_illegal;
578}
579
580/* Immediate values. */
581
582/* Errors may be set multiple times during parsing or bit encoding
583 (particularly in the Neon bits), but usually the earliest error which is set
584 will be the most meaningful. Avoid overwriting it with later (cascading)
585 errors by calling this function. */
586
587static void
588first_error (const char *error)
589{
590 if (! error_p ())
591 set_syntax_error (error);
592}
593
594/* Similiar to first_error, but this function accepts formatted error
595 message. */
596static void
597first_error_fmt (const char *format, ...)
598{
599 va_list args;
600 enum
601 { size = 100 };
602 /* N.B. this single buffer will not cause error messages for different
603 instructions to pollute each other; this is because at the end of
604 processing of each assembly line, error message if any will be
605 collected by as_bad. */
606 static char buffer[size];
607
608 if (! error_p ())
609 {
3e0baa28 610 int ret ATTRIBUTE_UNUSED;
a06ea964
NC
611 va_start (args, format);
612 ret = vsnprintf (buffer, size, format, args);
613 know (ret <= size - 1 && ret >= 0);
614 va_end (args);
615 set_syntax_error (buffer);
616 }
617}
618
619/* Register parsing. */
620
621/* Generic register parser which is called by other specialized
622 register parsers.
623 CCP points to what should be the beginning of a register name.
624 If it is indeed a valid register name, advance CCP over it and
625 return the reg_entry structure; otherwise return NULL.
626 It does not issue diagnostics. */
627
628static reg_entry *
629parse_reg (char **ccp)
630{
631 char *start = *ccp;
632 char *p;
633 reg_entry *reg;
634
635#ifdef REGISTER_PREFIX
636 if (*start != REGISTER_PREFIX)
637 return NULL;
638 start++;
639#endif
640
641 p = start;
642 if (!ISALPHA (*p) || !is_name_beginner (*p))
643 return NULL;
644
645 do
646 p++;
647 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
648
649 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
650
651 if (!reg)
652 return NULL;
653
654 *ccp = p;
655 return reg;
656}
657
658/* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
659 return FALSE. */
660static bfd_boolean
661aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
662{
663 if (reg->type == type)
664 return TRUE;
665
666 switch (type)
667 {
668 case REG_TYPE_R64_SP: /* 64-bit integer reg (inc SP exc XZR). */
669 case REG_TYPE_R_Z_SP: /* Integer reg (inc {X}SP inc [WX]ZR). */
670 case REG_TYPE_R_Z_BHSDQ_V: /* Any register apart from Cn. */
671 case REG_TYPE_BHSDQ: /* Any [BHSDQ]P FP or SIMD scalar register. */
672 case REG_TYPE_VN: /* Vector register. */
673 gas_assert (reg->type < REG_TYPE_MAX && type < REG_TYPE_MAX);
674 return ((reg_type_masks[reg->type] & reg_type_masks[type])
675 == reg_type_masks[reg->type]);
676 default:
677 as_fatal ("unhandled type %d", type);
678 abort ();
679 }
680}
681
682/* Parse a register and return PARSE_FAIL if the register is not of type R_Z_SP.
683 Return the register number otherwise. *ISREG32 is set to one if the
684 register is 32-bit wide; *ISREGZERO is set to one if the register is
685 of type Z_32 or Z_64.
686 Note that this function does not issue any diagnostics. */
687
688static int
689aarch64_reg_parse_32_64 (char **ccp, int reject_sp, int reject_rz,
690 int *isreg32, int *isregzero)
691{
692 char *str = *ccp;
693 const reg_entry *reg = parse_reg (&str);
694
695 if (reg == NULL)
696 return PARSE_FAIL;
697
698 if (! aarch64_check_reg_type (reg, REG_TYPE_R_Z_SP))
699 return PARSE_FAIL;
700
701 switch (reg->type)
702 {
703 case REG_TYPE_SP_32:
704 case REG_TYPE_SP_64:
705 if (reject_sp)
706 return PARSE_FAIL;
707 *isreg32 = reg->type == REG_TYPE_SP_32;
708 *isregzero = 0;
709 break;
710 case REG_TYPE_R_32:
711 case REG_TYPE_R_64:
712 *isreg32 = reg->type == REG_TYPE_R_32;
713 *isregzero = 0;
714 break;
715 case REG_TYPE_Z_32:
716 case REG_TYPE_Z_64:
717 if (reject_rz)
718 return PARSE_FAIL;
719 *isreg32 = reg->type == REG_TYPE_Z_32;
720 *isregzero = 1;
721 break;
722 default:
723 return PARSE_FAIL;
724 }
725
726 *ccp = str;
727
728 return reg->number;
729}
730
731/* Parse the qualifier of a SIMD vector register or a SIMD vector element.
732 Fill in *PARSED_TYPE and return TRUE if the parsing succeeds;
733 otherwise return FALSE.
734
735 Accept only one occurrence of:
736 8b 16b 4h 8h 2s 4s 1d 2d
737 b h s d q */
738static bfd_boolean
739parse_neon_type_for_operand (struct neon_type_el *parsed_type, char **str)
740{
741 char *ptr = *str;
742 unsigned width;
743 unsigned element_size;
744 enum neon_el_type type;
745
746 /* skip '.' */
747 ptr++;
748
749 if (!ISDIGIT (*ptr))
750 {
751 width = 0;
752 goto elt_size;
753 }
754 width = strtoul (ptr, &ptr, 10);
755 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
756 {
757 first_error_fmt (_("bad size %d in vector width specifier"), width);
758 return FALSE;
759 }
760
761elt_size:
762 switch (TOLOWER (*ptr))
763 {
764 case 'b':
765 type = NT_b;
766 element_size = 8;
767 break;
768 case 'h':
769 type = NT_h;
770 element_size = 16;
771 break;
772 case 's':
773 type = NT_s;
774 element_size = 32;
775 break;
776 case 'd':
777 type = NT_d;
778 element_size = 64;
779 break;
780 case 'q':
781 if (width == 1)
782 {
783 type = NT_q;
784 element_size = 128;
785 break;
786 }
787 /* fall through. */
788 default:
789 if (*ptr != '\0')
790 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
791 else
792 first_error (_("missing element size"));
793 return FALSE;
794 }
795 if (width != 0 && width * element_size != 64 && width * element_size != 128)
796 {
797 first_error_fmt (_
798 ("invalid element size %d and vector size combination %c"),
799 width, *ptr);
800 return FALSE;
801 }
802 ptr++;
803
804 parsed_type->type = type;
805 parsed_type->width = width;
806
807 *str = ptr;
808
809 return TRUE;
810}
811
812/* Parse a single type, e.g. ".8b", leading period included.
813 Only applicable to Vn registers.
814
815 Return TRUE on success; otherwise return FALSE. */
816static bfd_boolean
817parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
818{
819 char *str = *ccp;
820
821 if (*str == '.')
822 {
823 if (! parse_neon_type_for_operand (vectype, &str))
824 {
825 first_error (_("vector type expected"));
826 return FALSE;
827 }
828 }
829 else
830 return FALSE;
831
832 *ccp = str;
833
834 return TRUE;
835}
836
837/* Parse a register of the type TYPE.
838
839 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
840 name or the parsed register is not of TYPE.
841
842 Otherwise return the register number, and optionally fill in the actual
843 type of the register in *RTYPE when multiple alternatives were given, and
844 return the register shape and element index information in *TYPEINFO.
845
846 IN_REG_LIST should be set with TRUE if the caller is parsing a register
847 list. */
848
849static int
850parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
851 struct neon_type_el *typeinfo, bfd_boolean in_reg_list)
852{
853 char *str = *ccp;
854 const reg_entry *reg = parse_reg (&str);
855 struct neon_type_el atype;
856 struct neon_type_el parsetype;
857 bfd_boolean is_typed_vecreg = FALSE;
858
859 atype.defined = 0;
860 atype.type = NT_invtype;
861 atype.width = -1;
862 atype.index = 0;
863
864 if (reg == NULL)
865 {
866 if (typeinfo)
867 *typeinfo = atype;
868 set_default_error ();
869 return PARSE_FAIL;
870 }
871
872 if (! aarch64_check_reg_type (reg, type))
873 {
874 DEBUG_TRACE ("reg type check failed");
875 set_default_error ();
876 return PARSE_FAIL;
877 }
878 type = reg->type;
879
880 if (type == REG_TYPE_VN
881 && parse_neon_operand_type (&parsetype, &str))
882 {
883 /* Register if of the form Vn.[bhsdq]. */
884 is_typed_vecreg = TRUE;
885
886 if (parsetype.width == 0)
887 /* Expect index. In the new scheme we cannot have
888 Vn.[bhsdq] represent a scalar. Therefore any
889 Vn.[bhsdq] should have an index following it.
890 Except in reglists ofcourse. */
891 atype.defined |= NTA_HASINDEX;
892 else
893 atype.defined |= NTA_HASTYPE;
894
895 atype.type = parsetype.type;
896 atype.width = parsetype.width;
897 }
898
899 if (skip_past_char (&str, '['))
900 {
901 expressionS exp;
902
903 /* Reject Sn[index] syntax. */
904 if (!is_typed_vecreg)
905 {
906 first_error (_("this type of register can't be indexed"));
907 return PARSE_FAIL;
908 }
909
910 if (in_reg_list == TRUE)
911 {
912 first_error (_("index not allowed inside register list"));
913 return PARSE_FAIL;
914 }
915
916 atype.defined |= NTA_HASINDEX;
917
918 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
919
920 if (exp.X_op != O_constant)
921 {
922 first_error (_("constant expression required"));
923 return PARSE_FAIL;
924 }
925
926 if (! skip_past_char (&str, ']'))
927 return PARSE_FAIL;
928
929 atype.index = exp.X_add_number;
930 }
931 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
932 {
933 /* Indexed vector register expected. */
934 first_error (_("indexed vector register expected"));
935 return PARSE_FAIL;
936 }
937
938 /* A vector reg Vn should be typed or indexed. */
939 if (type == REG_TYPE_VN && atype.defined == 0)
940 {
941 first_error (_("invalid use of vector register"));
942 }
943
944 if (typeinfo)
945 *typeinfo = atype;
946
947 if (rtype)
948 *rtype = type;
949
950 *ccp = str;
951
952 return reg->number;
953}
954
955/* Parse register.
956
957 Return the register number on success; return PARSE_FAIL otherwise.
958
959 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
960 the register (e.g. NEON double or quad reg when either has been requested).
961
962 If this is a NEON vector register with additional type information, fill
963 in the struct pointed to by VECTYPE (if non-NULL).
964
965 This parser does not handle register list. */
966
967static int
968aarch64_reg_parse (char **ccp, aarch64_reg_type type,
969 aarch64_reg_type *rtype, struct neon_type_el *vectype)
970{
971 struct neon_type_el atype;
972 char *str = *ccp;
973 int reg = parse_typed_reg (&str, type, rtype, &atype,
974 /*in_reg_list= */ FALSE);
975
976 if (reg == PARSE_FAIL)
977 return PARSE_FAIL;
978
979 if (vectype)
980 *vectype = atype;
981
982 *ccp = str;
983
984 return reg;
985}
986
987static inline bfd_boolean
988eq_neon_type_el (struct neon_type_el e1, struct neon_type_el e2)
989{
990 return
991 e1.type == e2.type
992 && e1.defined == e2.defined
993 && e1.width == e2.width && e1.index == e2.index;
994}
995
996/* This function parses the NEON register list. On success, it returns
997 the parsed register list information in the following encoded format:
998
999 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1000 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1001
1002 The information of the register shape and/or index is returned in
1003 *VECTYPE.
1004
1005 It returns PARSE_FAIL if the register list is invalid.
1006
1007 The list contains one to four registers.
1008 Each register can be one of:
1009 <Vt>.<T>[<index>]
1010 <Vt>.<T>
1011 All <T> should be identical.
1012 All <index> should be identical.
1013 There are restrictions on <Vt> numbers which are checked later
1014 (by reg_list_valid_p). */
1015
1016static int
1017parse_neon_reg_list (char **ccp, struct neon_type_el *vectype)
1018{
1019 char *str = *ccp;
1020 int nb_regs;
1021 struct neon_type_el typeinfo, typeinfo_first;
1022 int val, val_range;
1023 int in_range;
1024 int ret_val;
1025 int i;
1026 bfd_boolean error = FALSE;
1027 bfd_boolean expect_index = FALSE;
1028
1029 if (*str != '{')
1030 {
1031 set_syntax_error (_("expecting {"));
1032 return PARSE_FAIL;
1033 }
1034 str++;
1035
1036 nb_regs = 0;
1037 typeinfo_first.defined = 0;
1038 typeinfo_first.type = NT_invtype;
1039 typeinfo_first.width = -1;
1040 typeinfo_first.index = 0;
1041 ret_val = 0;
1042 val = -1;
1043 val_range = -1;
1044 in_range = 0;
1045 do
1046 {
1047 if (in_range)
1048 {
1049 str++; /* skip over '-' */
1050 val_range = val;
1051 }
1052 val = parse_typed_reg (&str, REG_TYPE_VN, NULL, &typeinfo,
1053 /*in_reg_list= */ TRUE);
1054 if (val == PARSE_FAIL)
1055 {
1056 set_first_syntax_error (_("invalid vector register in list"));
1057 error = TRUE;
1058 continue;
1059 }
1060 /* reject [bhsd]n */
1061 if (typeinfo.defined == 0)
1062 {
1063 set_first_syntax_error (_("invalid scalar register in list"));
1064 error = TRUE;
1065 continue;
1066 }
1067
1068 if (typeinfo.defined & NTA_HASINDEX)
1069 expect_index = TRUE;
1070
1071 if (in_range)
1072 {
1073 if (val < val_range)
1074 {
1075 set_first_syntax_error
1076 (_("invalid range in vector register list"));
1077 error = TRUE;
1078 }
1079 val_range++;
1080 }
1081 else
1082 {
1083 val_range = val;
1084 if (nb_regs == 0)
1085 typeinfo_first = typeinfo;
1086 else if (! eq_neon_type_el (typeinfo_first, typeinfo))
1087 {
1088 set_first_syntax_error
1089 (_("type mismatch in vector register list"));
1090 error = TRUE;
1091 }
1092 }
1093 if (! error)
1094 for (i = val_range; i <= val; i++)
1095 {
1096 ret_val |= i << (5 * nb_regs);
1097 nb_regs++;
1098 }
1099 in_range = 0;
1100 }
1101 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1102
1103 skip_whitespace (str);
1104 if (*str != '}')
1105 {
1106 set_first_syntax_error (_("end of vector register list not found"));
1107 error = TRUE;
1108 }
1109 str++;
1110
1111 skip_whitespace (str);
1112
1113 if (expect_index)
1114 {
1115 if (skip_past_char (&str, '['))
1116 {
1117 expressionS exp;
1118
1119 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1120 if (exp.X_op != O_constant)
1121 {
1122 set_first_syntax_error (_("constant expression required."));
1123 error = TRUE;
1124 }
1125 if (! skip_past_char (&str, ']'))
1126 error = TRUE;
1127 else
1128 typeinfo_first.index = exp.X_add_number;
1129 }
1130 else
1131 {
1132 set_first_syntax_error (_("expected index"));
1133 error = TRUE;
1134 }
1135 }
1136
1137 if (nb_regs > 4)
1138 {
1139 set_first_syntax_error (_("too many registers in vector register list"));
1140 error = TRUE;
1141 }
1142 else if (nb_regs == 0)
1143 {
1144 set_first_syntax_error (_("empty vector register list"));
1145 error = TRUE;
1146 }
1147
1148 *ccp = str;
1149 if (! error)
1150 *vectype = typeinfo_first;
1151
1152 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1153}
1154
1155/* Directives: register aliases. */
1156
1157static reg_entry *
1158insert_reg_alias (char *str, int number, aarch64_reg_type type)
1159{
1160 reg_entry *new;
1161 const char *name;
1162
1163 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1164 {
1165 if (new->builtin)
1166 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1167 str);
1168
1169 /* Only warn about a redefinition if it's not defined as the
1170 same register. */
1171 else if (new->number != number || new->type != type)
1172 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1173
1174 return NULL;
1175 }
1176
1177 name = xstrdup (str);
1178 new = xmalloc (sizeof (reg_entry));
1179
1180 new->name = name;
1181 new->number = number;
1182 new->type = type;
1183 new->builtin = FALSE;
1184
1185 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1186 abort ();
1187
1188 return new;
1189}
1190
1191/* Look for the .req directive. This is of the form:
1192
1193 new_register_name .req existing_register_name
1194
1195 If we find one, or if it looks sufficiently like one that we want to
1196 handle any error here, return TRUE. Otherwise return FALSE. */
1197
1198static bfd_boolean
1199create_register_alias (char *newname, char *p)
1200{
1201 const reg_entry *old;
1202 char *oldname, *nbuf;
1203 size_t nlen;
1204
1205 /* The input scrubber ensures that whitespace after the mnemonic is
1206 collapsed to single spaces. */
1207 oldname = p;
1208 if (strncmp (oldname, " .req ", 6) != 0)
1209 return FALSE;
1210
1211 oldname += 6;
1212 if (*oldname == '\0')
1213 return FALSE;
1214
1215 old = hash_find (aarch64_reg_hsh, oldname);
1216 if (!old)
1217 {
1218 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1219 return TRUE;
1220 }
1221
1222 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1223 the desired alias name, and p points to its end. If not, then
1224 the desired alias name is in the global original_case_string. */
1225#ifdef TC_CASE_SENSITIVE
1226 nlen = p - newname;
1227#else
1228 newname = original_case_string;
1229 nlen = strlen (newname);
1230#endif
1231
1232 nbuf = alloca (nlen + 1);
1233 memcpy (nbuf, newname, nlen);
1234 nbuf[nlen] = '\0';
1235
1236 /* Create aliases under the new name as stated; an all-lowercase
1237 version of the new name; and an all-uppercase version of the new
1238 name. */
1239 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1240 {
1241 for (p = nbuf; *p; p++)
1242 *p = TOUPPER (*p);
1243
1244 if (strncmp (nbuf, newname, nlen))
1245 {
1246 /* If this attempt to create an additional alias fails, do not bother
1247 trying to create the all-lower case alias. We will fail and issue
1248 a second, duplicate error message. This situation arises when the
1249 programmer does something like:
1250 foo .req r0
1251 Foo .req r1
1252 The second .req creates the "Foo" alias but then fails to create
1253 the artificial FOO alias because it has already been created by the
1254 first .req. */
1255 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1256 return TRUE;
1257 }
1258
1259 for (p = nbuf; *p; p++)
1260 *p = TOLOWER (*p);
1261
1262 if (strncmp (nbuf, newname, nlen))
1263 insert_reg_alias (nbuf, old->number, old->type);
1264 }
1265
1266 return TRUE;
1267}
1268
1269/* Should never be called, as .req goes between the alias and the
1270 register name, not at the beginning of the line. */
1271static void
1272s_req (int a ATTRIBUTE_UNUSED)
1273{
1274 as_bad (_("invalid syntax for .req directive"));
1275}
1276
1277/* The .unreq directive deletes an alias which was previously defined
1278 by .req. For example:
1279
1280 my_alias .req r11
1281 .unreq my_alias */
1282
1283static void
1284s_unreq (int a ATTRIBUTE_UNUSED)
1285{
1286 char *name;
1287 char saved_char;
1288
1289 name = input_line_pointer;
1290
1291 while (*input_line_pointer != 0
1292 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1293 ++input_line_pointer;
1294
1295 saved_char = *input_line_pointer;
1296 *input_line_pointer = 0;
1297
1298 if (!*name)
1299 as_bad (_("invalid syntax for .unreq directive"));
1300 else
1301 {
1302 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1303
1304 if (!reg)
1305 as_bad (_("unknown register alias '%s'"), name);
1306 else if (reg->builtin)
1307 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1308 name);
1309 else
1310 {
1311 char *p;
1312 char *nbuf;
1313
1314 hash_delete (aarch64_reg_hsh, name, FALSE);
1315 free ((char *) reg->name);
1316 free (reg);
1317
1318 /* Also locate the all upper case and all lower case versions.
1319 Do not complain if we cannot find one or the other as it
1320 was probably deleted above. */
1321
1322 nbuf = strdup (name);
1323 for (p = nbuf; *p; p++)
1324 *p = TOUPPER (*p);
1325 reg = hash_find (aarch64_reg_hsh, nbuf);
1326 if (reg)
1327 {
1328 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1329 free ((char *) reg->name);
1330 free (reg);
1331 }
1332
1333 for (p = nbuf; *p; p++)
1334 *p = TOLOWER (*p);
1335 reg = hash_find (aarch64_reg_hsh, nbuf);
1336 if (reg)
1337 {
1338 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1339 free ((char *) reg->name);
1340 free (reg);
1341 }
1342
1343 free (nbuf);
1344 }
1345 }
1346
1347 *input_line_pointer = saved_char;
1348 demand_empty_rest_of_line ();
1349}
1350
1351/* Directives: Instruction set selection. */
1352
1353#ifdef OBJ_ELF
1354/* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1355 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1356 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1357 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1358
1359/* Create a new mapping symbol for the transition to STATE. */
1360
1361static void
1362make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1363{
1364 symbolS *symbolP;
1365 const char *symname;
1366 int type;
1367
1368 switch (state)
1369 {
1370 case MAP_DATA:
1371 symname = "$d";
1372 type = BSF_NO_FLAGS;
1373 break;
1374 case MAP_INSN:
1375 symname = "$x";
1376 type = BSF_NO_FLAGS;
1377 break;
1378 default:
1379 abort ();
1380 }
1381
1382 symbolP = symbol_new (symname, now_seg, value, frag);
1383 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1384
1385 /* Save the mapping symbols for future reference. Also check that
1386 we do not place two mapping symbols at the same offset within a
1387 frag. We'll handle overlap between frags in
1388 check_mapping_symbols.
1389
1390 If .fill or other data filling directive generates zero sized data,
1391 the mapping symbol for the following code will have the same value
1392 as the one generated for the data filling directive. In this case,
1393 we replace the old symbol with the new one at the same address. */
1394 if (value == 0)
1395 {
1396 if (frag->tc_frag_data.first_map != NULL)
1397 {
1398 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1399 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1400 &symbol_lastP);
1401 }
1402 frag->tc_frag_data.first_map = symbolP;
1403 }
1404 if (frag->tc_frag_data.last_map != NULL)
1405 {
1406 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1407 S_GET_VALUE (symbolP));
1408 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1409 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1410 &symbol_lastP);
1411 }
1412 frag->tc_frag_data.last_map = symbolP;
1413}
1414
1415/* We must sometimes convert a region marked as code to data during
1416 code alignment, if an odd number of bytes have to be padded. The
1417 code mapping symbol is pushed to an aligned address. */
1418
1419static void
1420insert_data_mapping_symbol (enum mstate state,
1421 valueT value, fragS * frag, offsetT bytes)
1422{
1423 /* If there was already a mapping symbol, remove it. */
1424 if (frag->tc_frag_data.last_map != NULL
1425 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1426 frag->fr_address + value)
1427 {
1428 symbolS *symp = frag->tc_frag_data.last_map;
1429
1430 if (value == 0)
1431 {
1432 know (frag->tc_frag_data.first_map == symp);
1433 frag->tc_frag_data.first_map = NULL;
1434 }
1435 frag->tc_frag_data.last_map = NULL;
1436 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1437 }
1438
1439 make_mapping_symbol (MAP_DATA, value, frag);
1440 make_mapping_symbol (state, value + bytes, frag);
1441}
1442
1443static void mapping_state_2 (enum mstate state, int max_chars);
1444
1445/* Set the mapping state to STATE. Only call this when about to
1446 emit some STATE bytes to the file. */
1447
1448void
1449mapping_state (enum mstate state)
1450{
1451 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1452
1453#define TRANSITION(from, to) (mapstate == (from) && state == (to))
1454
1455 if (mapstate == state)
1456 /* The mapping symbol has already been emitted.
1457 There is nothing else to do. */
1458 return;
1459 else if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
1460 /* This case will be evaluated later in the next else. */
1461 return;
1462 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1463 {
1464 /* Only add the symbol if the offset is > 0:
1465 if we're at the first frag, check it's size > 0;
1466 if we're not at the first frag, then for sure
1467 the offset is > 0. */
1468 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1469 const int add_symbol = (frag_now != frag_first)
1470 || (frag_now_fix () > 0);
1471
1472 if (add_symbol)
1473 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1474 }
1475
1476 mapping_state_2 (state, 0);
1477#undef TRANSITION
1478}
1479
1480/* Same as mapping_state, but MAX_CHARS bytes have already been
1481 allocated. Put the mapping symbol that far back. */
1482
1483static void
1484mapping_state_2 (enum mstate state, int max_chars)
1485{
1486 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1487
1488 if (!SEG_NORMAL (now_seg))
1489 return;
1490
1491 if (mapstate == state)
1492 /* The mapping symbol has already been emitted.
1493 There is nothing else to do. */
1494 return;
1495
1496 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1497 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1498}
1499#else
1500#define mapping_state(x) /* nothing */
1501#define mapping_state_2(x, y) /* nothing */
1502#endif
1503
1504/* Directives: sectioning and alignment. */
1505
1506static void
1507s_bss (int ignore ATTRIBUTE_UNUSED)
1508{
1509 /* We don't support putting frags in the BSS segment, we fake it by
1510 marking in_bss, then looking at s_skip for clues. */
1511 subseg_set (bss_section, 0);
1512 demand_empty_rest_of_line ();
1513 mapping_state (MAP_DATA);
1514}
1515
1516static void
1517s_even (int ignore ATTRIBUTE_UNUSED)
1518{
1519 /* Never make frag if expect extra pass. */
1520 if (!need_pass_2)
1521 frag_align (1, 0, 0);
1522
1523 record_alignment (now_seg, 1);
1524
1525 demand_empty_rest_of_line ();
1526}
1527
1528/* Directives: Literal pools. */
1529
1530static literal_pool *
1531find_literal_pool (int size)
1532{
1533 literal_pool *pool;
1534
1535 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1536 {
1537 if (pool->section == now_seg
1538 && pool->sub_section == now_subseg && pool->size == size)
1539 break;
1540 }
1541
1542 return pool;
1543}
1544
1545static literal_pool *
1546find_or_make_literal_pool (int size)
1547{
1548 /* Next literal pool ID number. */
1549 static unsigned int latest_pool_num = 1;
1550 literal_pool *pool;
1551
1552 pool = find_literal_pool (size);
1553
1554 if (pool == NULL)
1555 {
1556 /* Create a new pool. */
1557 pool = xmalloc (sizeof (*pool));
1558 if (!pool)
1559 return NULL;
1560
1561 /* Currently we always put the literal pool in the current text
1562 section. If we were generating "small" model code where we
1563 knew that all code and initialised data was within 1MB then
1564 we could output literals to mergeable, read-only data
1565 sections. */
1566
1567 pool->next_free_entry = 0;
1568 pool->section = now_seg;
1569 pool->sub_section = now_subseg;
1570 pool->size = size;
1571 pool->next = list_of_pools;
1572 pool->symbol = NULL;
1573
1574 /* Add it to the list. */
1575 list_of_pools = pool;
1576 }
1577
1578 /* New pools, and emptied pools, will have a NULL symbol. */
1579 if (pool->symbol == NULL)
1580 {
1581 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1582 (valueT) 0, &zero_address_frag);
1583 pool->id = latest_pool_num++;
1584 }
1585
1586 /* Done. */
1587 return pool;
1588}
1589
1590/* Add the literal of size SIZE in *EXP to the relevant literal pool.
1591 Return TRUE on success, otherwise return FALSE. */
1592static bfd_boolean
1593add_to_lit_pool (expressionS *exp, int size)
1594{
1595 literal_pool *pool;
1596 unsigned int entry;
1597
1598 pool = find_or_make_literal_pool (size);
1599
1600 /* Check if this literal value is already in the pool. */
1601 for (entry = 0; entry < pool->next_free_entry; entry++)
1602 {
1603 if ((pool->literals[entry].X_op == exp->X_op)
1604 && (exp->X_op == O_constant)
1605 && (pool->literals[entry].X_add_number == exp->X_add_number)
1606 && (pool->literals[entry].X_unsigned == exp->X_unsigned))
1607 break;
1608
1609 if ((pool->literals[entry].X_op == exp->X_op)
1610 && (exp->X_op == O_symbol)
1611 && (pool->literals[entry].X_add_number == exp->X_add_number)
1612 && (pool->literals[entry].X_add_symbol == exp->X_add_symbol)
1613 && (pool->literals[entry].X_op_symbol == exp->X_op_symbol))
1614 break;
1615 }
1616
1617 /* Do we need to create a new entry? */
1618 if (entry == pool->next_free_entry)
1619 {
1620 if (entry >= MAX_LITERAL_POOL_SIZE)
1621 {
1622 set_syntax_error (_("literal pool overflow"));
1623 return FALSE;
1624 }
1625
1626 pool->literals[entry] = *exp;
1627 pool->next_free_entry += 1;
1628 }
1629
1630 exp->X_op = O_symbol;
1631 exp->X_add_number = ((int) entry) * size;
1632 exp->X_add_symbol = pool->symbol;
1633
1634 return TRUE;
1635}
1636
1637/* Can't use symbol_new here, so have to create a symbol and then at
1638 a later date assign it a value. Thats what these functions do. */
1639
1640static void
1641symbol_locate (symbolS * symbolP,
1642 const char *name,/* It is copied, the caller can modify. */
1643 segT segment, /* Segment identifier (SEG_<something>). */
1644 valueT valu, /* Symbol value. */
1645 fragS * frag) /* Associated fragment. */
1646{
1647 unsigned int name_length;
1648 char *preserved_copy_of_name;
1649
1650 name_length = strlen (name) + 1; /* +1 for \0. */
1651 obstack_grow (&notes, name, name_length);
1652 preserved_copy_of_name = obstack_finish (&notes);
1653
1654#ifdef tc_canonicalize_symbol_name
1655 preserved_copy_of_name =
1656 tc_canonicalize_symbol_name (preserved_copy_of_name);
1657#endif
1658
1659 S_SET_NAME (symbolP, preserved_copy_of_name);
1660
1661 S_SET_SEGMENT (symbolP, segment);
1662 S_SET_VALUE (symbolP, valu);
1663 symbol_clear_list_pointers (symbolP);
1664
1665 symbol_set_frag (symbolP, frag);
1666
1667 /* Link to end of symbol chain. */
1668 {
1669 extern int symbol_table_frozen;
1670
1671 if (symbol_table_frozen)
1672 abort ();
1673 }
1674
1675 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1676
1677 obj_symbol_new_hook (symbolP);
1678
1679#ifdef tc_symbol_new_hook
1680 tc_symbol_new_hook (symbolP);
1681#endif
1682
1683#ifdef DEBUG_SYMS
1684 verify_symbol_chain (symbol_rootP, symbol_lastP);
1685#endif /* DEBUG_SYMS */
1686}
1687
1688
1689static void
1690s_ltorg (int ignored ATTRIBUTE_UNUSED)
1691{
1692 unsigned int entry;
1693 literal_pool *pool;
1694 char sym_name[20];
1695 int align;
1696
67a32447 1697 for (align = 2; align <= 4; align++)
a06ea964
NC
1698 {
1699 int size = 1 << align;
1700
1701 pool = find_literal_pool (size);
1702 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1703 continue;
1704
1705 mapping_state (MAP_DATA);
1706
1707 /* Align pool as you have word accesses.
1708 Only make a frag if we have to. */
1709 if (!need_pass_2)
1710 frag_align (align, 0, 0);
1711
1712 record_alignment (now_seg, align);
1713
1714 sprintf (sym_name, "$$lit_\002%x", pool->id);
1715
1716 symbol_locate (pool->symbol, sym_name, now_seg,
1717 (valueT) frag_now_fix (), frag_now);
1718 symbol_table_insert (pool->symbol);
1719
1720 for (entry = 0; entry < pool->next_free_entry; entry++)
1721 /* First output the expression in the instruction to the pool. */
1722 emit_expr (&(pool->literals[entry]), size); /* .word|.xword */
1723
1724 /* Mark the pool as empty. */
1725 pool->next_free_entry = 0;
1726 pool->symbol = NULL;
1727 }
1728}
1729
1730#ifdef OBJ_ELF
1731/* Forward declarations for functions below, in the MD interface
1732 section. */
1733static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1734static struct reloc_table_entry * find_reloc_table_entry (char **);
1735
1736/* Directives: Data. */
1737/* N.B. the support for relocation suffix in this directive needs to be
1738 implemented properly. */
1739
1740static void
1741s_aarch64_elf_cons (int nbytes)
1742{
1743 expressionS exp;
1744
1745#ifdef md_flush_pending_output
1746 md_flush_pending_output ();
1747#endif
1748
1749 if (is_it_end_of_statement ())
1750 {
1751 demand_empty_rest_of_line ();
1752 return;
1753 }
1754
1755#ifdef md_cons_align
1756 md_cons_align (nbytes);
1757#endif
1758
1759 mapping_state (MAP_DATA);
1760 do
1761 {
1762 struct reloc_table_entry *reloc;
1763
1764 expression (&exp);
1765
1766 if (exp.X_op != O_symbol)
1767 emit_expr (&exp, (unsigned int) nbytes);
1768 else
1769 {
1770 skip_past_char (&input_line_pointer, '#');
1771 if (skip_past_char (&input_line_pointer, ':'))
1772 {
1773 reloc = find_reloc_table_entry (&input_line_pointer);
1774 if (reloc == NULL)
1775 as_bad (_("unrecognized relocation suffix"));
1776 else
1777 as_bad (_("unimplemented relocation suffix"));
1778 ignore_rest_of_line ();
1779 return;
1780 }
1781 else
1782 emit_expr (&exp, (unsigned int) nbytes);
1783 }
1784 }
1785 while (*input_line_pointer++ == ',');
1786
1787 /* Put terminator back into stream. */
1788 input_line_pointer--;
1789 demand_empty_rest_of_line ();
1790}
1791
1792#endif /* OBJ_ELF */
1793
1794/* Output a 32-bit word, but mark as an instruction. */
1795
1796static void
1797s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1798{
1799 expressionS exp;
1800
1801#ifdef md_flush_pending_output
1802 md_flush_pending_output ();
1803#endif
1804
1805 if (is_it_end_of_statement ())
1806 {
1807 demand_empty_rest_of_line ();
1808 return;
1809 }
1810
1811 if (!need_pass_2)
1812 frag_align_code (2, 0);
1813#ifdef OBJ_ELF
1814 mapping_state (MAP_INSN);
1815#endif
1816
1817 do
1818 {
1819 expression (&exp);
1820 if (exp.X_op != O_constant)
1821 {
1822 as_bad (_("constant expression required"));
1823 ignore_rest_of_line ();
1824 return;
1825 }
1826
1827 if (target_big_endian)
1828 {
1829 unsigned int val = exp.X_add_number;
1830 exp.X_add_number = SWAP_32 (val);
1831 }
1832 emit_expr (&exp, 4);
1833 }
1834 while (*input_line_pointer++ == ',');
1835
1836 /* Put terminator back into stream. */
1837 input_line_pointer--;
1838 demand_empty_rest_of_line ();
1839}
1840
1841#ifdef OBJ_ELF
1842/* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
1843
1844static void
1845s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
1846{
1847 expressionS exp;
1848
1849 /* Since we're just labelling the code, there's no need to define a
1850 mapping symbol. */
1851 expression (&exp);
1852 /* Make sure there is enough room in this frag for the following
1853 blr. This trick only works if the blr follows immediately after
1854 the .tlsdesc directive. */
1855 frag_grow (4);
1856 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1857 BFD_RELOC_AARCH64_TLSDESC_CALL);
1858
1859 demand_empty_rest_of_line ();
1860}
1861#endif /* OBJ_ELF */
1862
1863static void s_aarch64_arch (int);
1864static void s_aarch64_cpu (int);
1865
1866/* This table describes all the machine specific pseudo-ops the assembler
1867 has to support. The fields are:
1868 pseudo-op name without dot
1869 function to call to execute this pseudo-op
1870 Integer arg to pass to the function. */
1871
1872const pseudo_typeS md_pseudo_table[] = {
1873 /* Never called because '.req' does not start a line. */
1874 {"req", s_req, 0},
1875 {"unreq", s_unreq, 0},
1876 {"bss", s_bss, 0},
1877 {"even", s_even, 0},
1878 {"ltorg", s_ltorg, 0},
1879 {"pool", s_ltorg, 0},
1880 {"cpu", s_aarch64_cpu, 0},
1881 {"arch", s_aarch64_arch, 0},
1882 {"inst", s_aarch64_inst, 0},
1883#ifdef OBJ_ELF
1884 {"tlsdesccall", s_tlsdesccall, 0},
1885 {"word", s_aarch64_elf_cons, 4},
1886 {"long", s_aarch64_elf_cons, 4},
1887 {"xword", s_aarch64_elf_cons, 8},
1888 {"dword", s_aarch64_elf_cons, 8},
1889#endif
1890 {0, 0, 0}
1891};
1892\f
1893
1894/* Check whether STR points to a register name followed by a comma or the
1895 end of line; REG_TYPE indicates which register types are checked
1896 against. Return TRUE if STR is such a register name; otherwise return
1897 FALSE. The function does not intend to produce any diagnostics, but since
1898 the register parser aarch64_reg_parse, which is called by this function,
1899 does produce diagnostics, we call clear_error to clear any diagnostics
1900 that may be generated by aarch64_reg_parse.
1901 Also, the function returns FALSE directly if there is any user error
1902 present at the function entry. This prevents the existing diagnostics
1903 state from being spoiled.
1904 The function currently serves parse_constant_immediate and
1905 parse_big_immediate only. */
1906static bfd_boolean
1907reg_name_p (char *str, aarch64_reg_type reg_type)
1908{
1909 int reg;
1910
1911 /* Prevent the diagnostics state from being spoiled. */
1912 if (error_p ())
1913 return FALSE;
1914
1915 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
1916
1917 /* Clear the parsing error that may be set by the reg parser. */
1918 clear_error ();
1919
1920 if (reg == PARSE_FAIL)
1921 return FALSE;
1922
1923 skip_whitespace (str);
1924 if (*str == ',' || is_end_of_line[(unsigned int) *str])
1925 return TRUE;
1926
1927 return FALSE;
1928}
1929
1930/* Parser functions used exclusively in instruction operands. */
1931
1932/* Parse an immediate expression which may not be constant.
1933
1934 To prevent the expression parser from pushing a register name
1935 into the symbol table as an undefined symbol, firstly a check is
1936 done to find out whether STR is a valid register name followed
1937 by a comma or the end of line. Return FALSE if STR is such a
1938 string. */
1939
1940static bfd_boolean
1941parse_immediate_expression (char **str, expressionS *exp)
1942{
1943 if (reg_name_p (*str, REG_TYPE_R_Z_BHSDQ_V))
1944 {
1945 set_recoverable_error (_("immediate operand required"));
1946 return FALSE;
1947 }
1948
1949 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
1950
1951 if (exp->X_op == O_absent)
1952 {
1953 set_fatal_syntax_error (_("missing immediate expression"));
1954 return FALSE;
1955 }
1956
1957 return TRUE;
1958}
1959
1960/* Constant immediate-value read function for use in insn parsing.
1961 STR points to the beginning of the immediate (with the optional
1962 leading #); *VAL receives the value.
1963
1964 Return TRUE on success; otherwise return FALSE. */
1965
1966static bfd_boolean
1967parse_constant_immediate (char **str, int64_t * val)
1968{
1969 expressionS exp;
1970
1971 if (! parse_immediate_expression (str, &exp))
1972 return FALSE;
1973
1974 if (exp.X_op != O_constant)
1975 {
1976 set_syntax_error (_("constant expression required"));
1977 return FALSE;
1978 }
1979
1980 *val = exp.X_add_number;
1981 return TRUE;
1982}
1983
1984static uint32_t
1985encode_imm_float_bits (uint32_t imm)
1986{
1987 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
1988 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
1989}
1990
1991/* Return TRUE if IMM is a valid floating-point immediate; return FALSE
1992 otherwise. */
1993static bfd_boolean
1994aarch64_imm_float_p (uint32_t imm)
1995{
1996 /* 3 32222222 2221111111111
1997 1 09876543 21098765432109876543210
1998 n Eeeeeexx xxxx0000000000000000000 */
1999 uint32_t e;
2000
2001 e = (imm >> 30) & 0x1;
2002 if (e == 0)
2003 e = 0x3e000000;
2004 else
2005 e = 0x40000000;
2006 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0 */
2007 && ((imm & 0x7e000000) == e); /* bits 25-29 = ~ bit 30 */
2008}
2009
2010/* Note: this accepts the floating-point 0 constant. */
2011static bfd_boolean
2012parse_aarch64_imm_float (char **ccp, int *immed)
2013{
2014 char *str = *ccp;
2015 char *fpnum;
2016 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2017 int found_fpchar = 0;
2018
2019 skip_past_char (&str, '#');
2020
2021 /* We must not accidentally parse an integer as a floating-point number. Make
2022 sure that the value we parse is not an integer by checking for special
2023 characters '.' or 'e'.
2024 FIXME: This is a hack that is not very efficient, but doing better is
2025 tricky because type information isn't in a very usable state at parse
2026 time. */
2027 fpnum = str;
2028 skip_whitespace (fpnum);
2029
2030 if (strncmp (fpnum, "0x", 2) == 0)
2031 return FALSE;
2032 else
2033 {
2034 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
2035 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
2036 {
2037 found_fpchar = 1;
2038 break;
2039 }
2040
2041 if (!found_fpchar)
2042 return FALSE;
2043 }
2044
2045 if ((str = atof_ieee (str, 's', words)) != NULL)
2046 {
2047 unsigned fpword = 0;
2048 int i;
2049
2050 /* Our FP word must be 32 bits (single-precision FP). */
2051 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2052 {
2053 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2054 fpword |= words[i];
2055 }
2056
2057 if (aarch64_imm_float_p (fpword) || (fpword & 0x7fffffff) == 0)
2058 *immed = fpword;
2059 else
2060 goto invalid_fp;
2061
2062 *ccp = str;
2063
2064 return TRUE;
2065 }
2066
2067invalid_fp:
2068 set_fatal_syntax_error (_("invalid floating-point constant"));
2069 return FALSE;
2070}
2071
2072/* Less-generic immediate-value read function with the possibility of loading
2073 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2074 instructions.
2075
2076 To prevent the expression parser from pushing a register name into the
2077 symbol table as an undefined symbol, a check is firstly done to find
2078 out whether STR is a valid register name followed by a comma or the end
2079 of line. Return FALSE if STR is such a register. */
2080
2081static bfd_boolean
2082parse_big_immediate (char **str, int64_t *imm)
2083{
2084 char *ptr = *str;
2085
2086 if (reg_name_p (ptr, REG_TYPE_R_Z_BHSDQ_V))
2087 {
2088 set_syntax_error (_("immediate operand required"));
2089 return FALSE;
2090 }
2091
2092 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2093
2094 if (inst.reloc.exp.X_op == O_constant)
2095 *imm = inst.reloc.exp.X_add_number;
2096
2097 *str = ptr;
2098
2099 return TRUE;
2100}
2101
2102/* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2103 if NEED_LIBOPCODES is non-zero, the fixup will need
2104 assistance from the libopcodes. */
2105
2106static inline void
2107aarch64_set_gas_internal_fixup (struct reloc *reloc,
2108 const aarch64_opnd_info *operand,
2109 int need_libopcodes_p)
2110{
2111 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2112 reloc->opnd = operand->type;
2113 if (need_libopcodes_p)
2114 reloc->need_libopcodes_p = 1;
2115};
2116
2117/* Return TRUE if the instruction needs to be fixed up later internally by
2118 the GAS; otherwise return FALSE. */
2119
2120static inline bfd_boolean
2121aarch64_gas_internal_fixup_p (void)
2122{
2123 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2124}
2125
2126/* Assign the immediate value to the relavant field in *OPERAND if
2127 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2128 needs an internal fixup in a later stage.
2129 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2130 IMM.VALUE that may get assigned with the constant. */
2131static inline void
2132assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2133 aarch64_opnd_info *operand,
2134 int addr_off_p,
2135 int need_libopcodes_p,
2136 int skip_p)
2137{
2138 if (reloc->exp.X_op == O_constant)
2139 {
2140 if (addr_off_p)
2141 operand->addr.offset.imm = reloc->exp.X_add_number;
2142 else
2143 operand->imm.value = reloc->exp.X_add_number;
2144 reloc->type = BFD_RELOC_UNUSED;
2145 }
2146 else
2147 {
2148 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2149 /* Tell libopcodes to ignore this operand or not. This is helpful
2150 when one of the operands needs to be fixed up later but we need
2151 libopcodes to check the other operands. */
2152 operand->skip = skip_p;
2153 }
2154}
2155
2156/* Relocation modifiers. Each entry in the table contains the textual
2157 name for the relocation which may be placed before a symbol used as
2158 a load/store offset, or add immediate. It must be surrounded by a
2159 leading and trailing colon, for example:
2160
2161 ldr x0, [x1, #:rello:varsym]
2162 add x0, x1, #:rello:varsym */
2163
2164struct reloc_table_entry
2165{
2166 const char *name;
2167 int pc_rel;
2168 bfd_reloc_code_real_type adrp_type;
2169 bfd_reloc_code_real_type movw_type;
2170 bfd_reloc_code_real_type add_type;
2171 bfd_reloc_code_real_type ldst_type;
2172};
2173
2174static struct reloc_table_entry reloc_table[] = {
2175 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2176 {"lo12", 0,
2177 0,
2178 0,
2179 BFD_RELOC_AARCH64_ADD_LO12,
2180 BFD_RELOC_AARCH64_LDST_LO12},
2181
2182 /* Higher 21 bits of pc-relative page offset: ADRP */
2183 {"pg_hi21", 1,
2184 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2185 0,
2186 0,
2187 0},
2188
2189 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2190 {"pg_hi21_nc", 1,
2191 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2192 0,
2193 0,
2194 0},
2195
2196 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2197 {"abs_g0", 0,
2198 0,
2199 BFD_RELOC_AARCH64_MOVW_G0,
2200 0,
2201 0},
2202
2203 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2204 {"abs_g0_s", 0,
2205 0,
2206 BFD_RELOC_AARCH64_MOVW_G0_S,
2207 0,
2208 0},
2209
2210 /* Less significant bits 0-15 of address/value: MOVK, no check */
2211 {"abs_g0_nc", 0,
2212 0,
2213 BFD_RELOC_AARCH64_MOVW_G0_NC,
2214 0,
2215 0},
2216
2217 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2218 {"abs_g1", 0,
2219 0,
2220 BFD_RELOC_AARCH64_MOVW_G1,
2221 0,
2222 0},
2223
2224 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2225 {"abs_g1_s", 0,
2226 0,
2227 BFD_RELOC_AARCH64_MOVW_G1_S,
2228 0,
2229 0},
2230
2231 /* Less significant bits 16-31 of address/value: MOVK, no check */
2232 {"abs_g1_nc", 0,
2233 0,
2234 BFD_RELOC_AARCH64_MOVW_G1_NC,
2235 0,
2236 0},
2237
2238 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2239 {"abs_g2", 0,
2240 0,
2241 BFD_RELOC_AARCH64_MOVW_G2,
2242 0,
2243 0},
2244
2245 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2246 {"abs_g2_s", 0,
2247 0,
2248 BFD_RELOC_AARCH64_MOVW_G2_S,
2249 0,
2250 0},
2251
2252 /* Less significant bits 32-47 of address/value: MOVK, no check */
2253 {"abs_g2_nc", 0,
2254 0,
2255 BFD_RELOC_AARCH64_MOVW_G2_NC,
2256 0,
2257 0},
2258
2259 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2260 {"abs_g3", 0,
2261 0,
2262 BFD_RELOC_AARCH64_MOVW_G3,
2263 0,
2264 0},
f41aef5f
RE
2265 /* Get to the GOT entry for a symbol. */
2266 {"got_prel19", 0,
2267 0,
2268 0,
2269 0,
2270 BFD_RELOC_AARCH64_GOT_LD_PREL19},
a06ea964
NC
2271 /* Get to the page containing GOT entry for a symbol. */
2272 {"got", 1,
2273 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2274 0,
2275 0,
2276 0},
2277 /* 12 bit offset into the page containing GOT entry for that symbol. */
2278 {"got_lo12", 0,
2279 0,
2280 0,
2281 0,
2282 BFD_RELOC_AARCH64_LD64_GOT_LO12_NC},
2283
2284 /* Get to the page containing GOT TLS entry for a symbol */
2285 {"tlsgd", 0,
2286 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2287 0,
2288 0,
2289 0},
2290
2291 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2292 {"tlsgd_lo12", 0,
2293 0,
2294 0,
2295 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2296 0},
2297
2298 /* Get to the page containing GOT TLS entry for a symbol */
2299 {"tlsdesc", 0,
2300 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE,
2301 0,
2302 0,
2303 0},
2304
2305 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2306 {"tlsdesc_lo12", 0,
2307 0,
2308 0,
2309 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC,
2310 BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC},
2311
2312 /* Get to the page containing GOT TLS entry for a symbol */
2313 {"gottprel", 0,
2314 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2315 0,
2316 0,
2317 0},
2318
2319 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2320 {"gottprel_lo12", 0,
2321 0,
2322 0,
2323 0,
2324 BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC},
2325
2326 /* Get tp offset for a symbol. */
2327 {"tprel", 0,
2328 0,
2329 0,
2330 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2331 0},
2332
2333 /* Get tp offset for a symbol. */
2334 {"tprel_lo12", 0,
2335 0,
2336 0,
2337 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2338 0},
2339
2340 /* Get tp offset for a symbol. */
2341 {"tprel_hi12", 0,
2342 0,
2343 0,
2344 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2345 0},
2346
2347 /* Get tp offset for a symbol. */
2348 {"tprel_lo12_nc", 0,
2349 0,
2350 0,
2351 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2352 0},
2353
2354 /* Most significant bits 32-47 of address/value: MOVZ. */
2355 {"tprel_g2", 0,
2356 0,
2357 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2358 0,
2359 0},
2360
2361 /* Most significant bits 16-31 of address/value: MOVZ. */
2362 {"tprel_g1", 0,
2363 0,
2364 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2365 0,
2366 0},
2367
2368 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2369 {"tprel_g1_nc", 0,
2370 0,
2371 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2372 0,
2373 0},
2374
2375 /* Most significant bits 0-15 of address/value: MOVZ. */
2376 {"tprel_g0", 0,
2377 0,
2378 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2379 0,
2380 0},
2381
2382 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2383 {"tprel_g0_nc", 0,
2384 0,
2385 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2386 0,
2387 0},
2388};
2389
2390/* Given the address of a pointer pointing to the textual name of a
2391 relocation as may appear in assembler source, attempt to find its
2392 details in reloc_table. The pointer will be updated to the character
2393 after the trailing colon. On failure, NULL will be returned;
2394 otherwise return the reloc_table_entry. */
2395
2396static struct reloc_table_entry *
2397find_reloc_table_entry (char **str)
2398{
2399 unsigned int i;
2400 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2401 {
2402 int length = strlen (reloc_table[i].name);
2403
2404 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2405 && (*str)[length] == ':')
2406 {
2407 *str += (length + 1);
2408 return &reloc_table[i];
2409 }
2410 }
2411
2412 return NULL;
2413}
2414
2415/* Mode argument to parse_shift and parser_shifter_operand. */
2416enum parse_shift_mode
2417{
2418 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
2419 "#imm{,lsl #n}" */
2420 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
2421 "#imm" */
2422 SHIFTED_LSL, /* bare "lsl #n" */
2423 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
2424 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
2425};
2426
2427/* Parse a <shift> operator on an AArch64 data processing instruction.
2428 Return TRUE on success; otherwise return FALSE. */
2429static bfd_boolean
2430parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
2431{
2432 const struct aarch64_name_value_pair *shift_op;
2433 enum aarch64_modifier_kind kind;
2434 expressionS exp;
2435 int exp_has_prefix;
2436 char *s = *str;
2437 char *p = s;
2438
2439 for (p = *str; ISALPHA (*p); p++)
2440 ;
2441
2442 if (p == *str)
2443 {
2444 set_syntax_error (_("shift expression expected"));
2445 return FALSE;
2446 }
2447
2448 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
2449
2450 if (shift_op == NULL)
2451 {
2452 set_syntax_error (_("shift operator expected"));
2453 return FALSE;
2454 }
2455
2456 kind = aarch64_get_operand_modifier (shift_op);
2457
2458 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
2459 {
2460 set_syntax_error (_("invalid use of 'MSL'"));
2461 return FALSE;
2462 }
2463
2464 switch (mode)
2465 {
2466 case SHIFTED_LOGIC_IMM:
2467 if (aarch64_extend_operator_p (kind) == TRUE)
2468 {
2469 set_syntax_error (_("extending shift is not permitted"));
2470 return FALSE;
2471 }
2472 break;
2473
2474 case SHIFTED_ARITH_IMM:
2475 if (kind == AARCH64_MOD_ROR)
2476 {
2477 set_syntax_error (_("'ROR' shift is not permitted"));
2478 return FALSE;
2479 }
2480 break;
2481
2482 case SHIFTED_LSL:
2483 if (kind != AARCH64_MOD_LSL)
2484 {
2485 set_syntax_error (_("only 'LSL' shift is permitted"));
2486 return FALSE;
2487 }
2488 break;
2489
2490 case SHIFTED_REG_OFFSET:
2491 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
2492 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
2493 {
2494 set_fatal_syntax_error
2495 (_("invalid shift for the register offset addressing mode"));
2496 return FALSE;
2497 }
2498 break;
2499
2500 case SHIFTED_LSL_MSL:
2501 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
2502 {
2503 set_syntax_error (_("invalid shift operator"));
2504 return FALSE;
2505 }
2506 break;
2507
2508 default:
2509 abort ();
2510 }
2511
2512 /* Whitespace can appear here if the next thing is a bare digit. */
2513 skip_whitespace (p);
2514
2515 /* Parse shift amount. */
2516 exp_has_prefix = 0;
2517 if (mode == SHIFTED_REG_OFFSET && *p == ']')
2518 exp.X_op = O_absent;
2519 else
2520 {
2521 if (is_immediate_prefix (*p))
2522 {
2523 p++;
2524 exp_has_prefix = 1;
2525 }
2526 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
2527 }
2528 if (exp.X_op == O_absent)
2529 {
2530 if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix)
2531 {
2532 set_syntax_error (_("missing shift amount"));
2533 return FALSE;
2534 }
2535 operand->shifter.amount = 0;
2536 }
2537 else if (exp.X_op != O_constant)
2538 {
2539 set_syntax_error (_("constant shift amount required"));
2540 return FALSE;
2541 }
2542 else if (exp.X_add_number < 0 || exp.X_add_number > 63)
2543 {
2544 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
2545 return FALSE;
2546 }
2547 else
2548 {
2549 operand->shifter.amount = exp.X_add_number;
2550 operand->shifter.amount_present = 1;
2551 }
2552
2553 operand->shifter.operator_present = 1;
2554 operand->shifter.kind = kind;
2555
2556 *str = p;
2557 return TRUE;
2558}
2559
2560/* Parse a <shifter_operand> for a data processing instruction:
2561
2562 #<immediate>
2563 #<immediate>, LSL #imm
2564
2565 Validation of immediate operands is deferred to md_apply_fix.
2566
2567 Return TRUE on success; otherwise return FALSE. */
2568
2569static bfd_boolean
2570parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
2571 enum parse_shift_mode mode)
2572{
2573 char *p;
2574
2575 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
2576 return FALSE;
2577
2578 p = *str;
2579
2580 /* Accept an immediate expression. */
2581 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
2582 return FALSE;
2583
2584 /* Accept optional LSL for arithmetic immediate values. */
2585 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
2586 if (! parse_shift (&p, operand, SHIFTED_LSL))
2587 return FALSE;
2588
2589 /* Not accept any shifter for logical immediate values. */
2590 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
2591 && parse_shift (&p, operand, mode))
2592 {
2593 set_syntax_error (_("unexpected shift operator"));
2594 return FALSE;
2595 }
2596
2597 *str = p;
2598 return TRUE;
2599}
2600
2601/* Parse a <shifter_operand> for a data processing instruction:
2602
2603 <Rm>
2604 <Rm>, <shift>
2605 #<immediate>
2606 #<immediate>, LSL #imm
2607
2608 where <shift> is handled by parse_shift above, and the last two
2609 cases are handled by the function above.
2610
2611 Validation of immediate operands is deferred to md_apply_fix.
2612
2613 Return TRUE on success; otherwise return FALSE. */
2614
2615static bfd_boolean
2616parse_shifter_operand (char **str, aarch64_opnd_info *operand,
2617 enum parse_shift_mode mode)
2618{
2619 int reg;
2620 int isreg32, isregzero;
2621 enum aarch64_operand_class opd_class
2622 = aarch64_get_operand_class (operand->type);
2623
2624 if ((reg =
2625 aarch64_reg_parse_32_64 (str, 0, 0, &isreg32, &isregzero)) != PARSE_FAIL)
2626 {
2627 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
2628 {
2629 set_syntax_error (_("unexpected register in the immediate operand"));
2630 return FALSE;
2631 }
2632
2633 if (!isregzero && reg == REG_SP)
2634 {
2635 set_syntax_error (BAD_SP);
2636 return FALSE;
2637 }
2638
2639 operand->reg.regno = reg;
2640 operand->qualifier = isreg32 ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2641
2642 /* Accept optional shift operation on register. */
2643 if (! skip_past_comma (str))
2644 return TRUE;
2645
2646 if (! parse_shift (str, operand, mode))
2647 return FALSE;
2648
2649 return TRUE;
2650 }
2651 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
2652 {
2653 set_syntax_error
2654 (_("integer register expected in the extended/shifted operand "
2655 "register"));
2656 return FALSE;
2657 }
2658
2659 /* We have a shifted immediate variable. */
2660 return parse_shifter_operand_imm (str, operand, mode);
2661}
2662
2663/* Return TRUE on success; return FALSE otherwise. */
2664
2665static bfd_boolean
2666parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
2667 enum parse_shift_mode mode)
2668{
2669 char *p = *str;
2670
2671 /* Determine if we have the sequence of characters #: or just :
2672 coming next. If we do, then we check for a :rello: relocation
2673 modifier. If we don't, punt the whole lot to
2674 parse_shifter_operand. */
2675
2676 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
2677 {
2678 struct reloc_table_entry *entry;
2679
2680 if (p[0] == '#')
2681 p += 2;
2682 else
2683 p++;
2684 *str = p;
2685
2686 /* Try to parse a relocation. Anything else is an error. */
2687 if (!(entry = find_reloc_table_entry (str)))
2688 {
2689 set_syntax_error (_("unknown relocation modifier"));
2690 return FALSE;
2691 }
2692
2693 if (entry->add_type == 0)
2694 {
2695 set_syntax_error
2696 (_("this relocation modifier is not allowed on this instruction"));
2697 return FALSE;
2698 }
2699
2700 /* Save str before we decompose it. */
2701 p = *str;
2702
2703 /* Next, we parse the expression. */
2704 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
2705 return FALSE;
2706
2707 /* Record the relocation type (use the ADD variant here). */
2708 inst.reloc.type = entry->add_type;
2709 inst.reloc.pc_rel = entry->pc_rel;
2710
2711 /* If str is empty, we've reached the end, stop here. */
2712 if (**str == '\0')
2713 return TRUE;
2714
2715 /* Otherwise, we have a shifted reloc modifier, so rewind to
2716 recover the variable name and continue parsing for the shifter. */
2717 *str = p;
2718 return parse_shifter_operand_imm (str, operand, mode);
2719 }
2720
2721 return parse_shifter_operand (str, operand, mode);
2722}
2723
2724/* Parse all forms of an address expression. Information is written
2725 to *OPERAND and/or inst.reloc.
2726
2727 The A64 instruction set has the following addressing modes:
2728
2729 Offset
2730 [base] // in SIMD ld/st structure
2731 [base{,#0}] // in ld/st exclusive
2732 [base{,#imm}]
2733 [base,Xm{,LSL #imm}]
2734 [base,Xm,SXTX {#imm}]
2735 [base,Wm,(S|U)XTW {#imm}]
2736 Pre-indexed
2737 [base,#imm]!
2738 Post-indexed
2739 [base],#imm
2740 [base],Xm // in SIMD ld/st structure
2741 PC-relative (literal)
2742 label
2743 =immediate
2744
2745 (As a convenience, the notation "=immediate" is permitted in conjunction
2746 with the pc-relative literal load instructions to automatically place an
2747 immediate value or symbolic address in a nearby literal pool and generate
2748 a hidden label which references it.)
2749
2750 Upon a successful parsing, the address structure in *OPERAND will be
2751 filled in the following way:
2752
2753 .base_regno = <base>
2754 .offset.is_reg // 1 if the offset is a register
2755 .offset.imm = <imm>
2756 .offset.regno = <Rm>
2757
2758 For different addressing modes defined in the A64 ISA:
2759
2760 Offset
2761 .pcrel=0; .preind=1; .postind=0; .writeback=0
2762 Pre-indexed
2763 .pcrel=0; .preind=1; .postind=0; .writeback=1
2764 Post-indexed
2765 .pcrel=0; .preind=0; .postind=1; .writeback=1
2766 PC-relative (literal)
2767 .pcrel=1; .preind=1; .postind=0; .writeback=0
2768
2769 The shift/extension information, if any, will be stored in .shifter.
2770
2771 It is the caller's responsibility to check for addressing modes not
2772 supported by the instruction, and to set inst.reloc.type. */
2773
2774static bfd_boolean
2775parse_address_main (char **str, aarch64_opnd_info *operand, int reloc,
2776 int accept_reg_post_index)
2777{
2778 char *p = *str;
2779 int reg;
2780 int isreg32, isregzero;
2781 expressionS *exp = &inst.reloc.exp;
2782
2783 if (! skip_past_char (&p, '['))
2784 {
2785 /* =immediate or label. */
2786 operand->addr.pcrel = 1;
2787 operand->addr.preind = 1;
2788
f41aef5f
RE
2789 /* #:<reloc_op>:<symbol> */
2790 skip_past_char (&p, '#');
2791 if (reloc && skip_past_char (&p, ':'))
2792 {
2793 struct reloc_table_entry *entry;
2794
2795 /* Try to parse a relocation modifier. Anything else is
2796 an error. */
2797 entry = find_reloc_table_entry (&p);
2798 if (! entry)
2799 {
2800 set_syntax_error (_("unknown relocation modifier"));
2801 return FALSE;
2802 }
2803
2804 if (entry->ldst_type == 0)
2805 {
2806 set_syntax_error
2807 (_("this relocation modifier is not allowed on this "
2808 "instruction"));
2809 return FALSE;
2810 }
2811
2812 /* #:<reloc_op>: */
2813 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
2814 {
2815 set_syntax_error (_("invalid relocation expression"));
2816 return FALSE;
2817 }
a06ea964 2818
f41aef5f
RE
2819 /* #:<reloc_op>:<expr> */
2820 /* Record the load/store relocation type. */
2821 inst.reloc.type = entry->ldst_type;
2822 inst.reloc.pc_rel = entry->pc_rel;
2823 }
2824 else
a06ea964 2825 {
f41aef5f
RE
2826
2827 if (skip_past_char (&p, '='))
2828 /* =immediate; need to generate the literal in the literal pool. */
2829 inst.gen_lit_pool = 1;
2830
2831 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
2832 {
2833 set_syntax_error (_("invalid address"));
2834 return FALSE;
2835 }
a06ea964
NC
2836 }
2837
2838 *str = p;
2839 return TRUE;
2840 }
2841
2842 /* [ */
2843
2844 /* Accept SP and reject ZR */
2845 reg = aarch64_reg_parse_32_64 (&p, 0, 1, &isreg32, &isregzero);
2846 if (reg == PARSE_FAIL || isreg32)
2847 {
2848 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
2849 return FALSE;
2850 }
2851 operand->addr.base_regno = reg;
2852
2853 /* [Xn */
2854 if (skip_past_comma (&p))
2855 {
2856 /* [Xn, */
2857 operand->addr.preind = 1;
2858
2859 /* Reject SP and accept ZR */
2860 reg = aarch64_reg_parse_32_64 (&p, 1, 0, &isreg32, &isregzero);
2861 if (reg != PARSE_FAIL)
2862 {
2863 /* [Xn,Rm */
2864 operand->addr.offset.regno = reg;
2865 operand->addr.offset.is_reg = 1;
2866 /* Shifted index. */
2867 if (skip_past_comma (&p))
2868 {
2869 /* [Xn,Rm, */
2870 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
2871 /* Use the diagnostics set in parse_shift, so not set new
2872 error message here. */
2873 return FALSE;
2874 }
2875 /* We only accept:
2876 [base,Xm{,LSL #imm}]
2877 [base,Xm,SXTX {#imm}]
2878 [base,Wm,(S|U)XTW {#imm}] */
2879 if (operand->shifter.kind == AARCH64_MOD_NONE
2880 || operand->shifter.kind == AARCH64_MOD_LSL
2881 || operand->shifter.kind == AARCH64_MOD_SXTX)
2882 {
2883 if (isreg32)
2884 {
2885 set_syntax_error (_("invalid use of 32-bit register offset"));
2886 return FALSE;
2887 }
2888 }
2889 else if (!isreg32)
2890 {
2891 set_syntax_error (_("invalid use of 64-bit register offset"));
2892 return FALSE;
2893 }
2894 }
2895 else
2896 {
2897 /* [Xn,#:<reloc_op>:<symbol> */
2898 skip_past_char (&p, '#');
2899 if (reloc && skip_past_char (&p, ':'))
2900 {
2901 struct reloc_table_entry *entry;
2902
2903 /* Try to parse a relocation modifier. Anything else is
2904 an error. */
2905 if (!(entry = find_reloc_table_entry (&p)))
2906 {
2907 set_syntax_error (_("unknown relocation modifier"));
2908 return FALSE;
2909 }
2910
2911 if (entry->ldst_type == 0)
2912 {
2913 set_syntax_error
2914 (_("this relocation modifier is not allowed on this "
2915 "instruction"));
2916 return FALSE;
2917 }
2918
2919 /* [Xn,#:<reloc_op>: */
2920 /* We now have the group relocation table entry corresponding to
2921 the name in the assembler source. Next, we parse the
2922 expression. */
2923 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
2924 {
2925 set_syntax_error (_("invalid relocation expression"));
2926 return FALSE;
2927 }
2928
2929 /* [Xn,#:<reloc_op>:<expr> */
2930 /* Record the load/store relocation type. */
2931 inst.reloc.type = entry->ldst_type;
2932 inst.reloc.pc_rel = entry->pc_rel;
2933 }
2934 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
2935 {
2936 set_syntax_error (_("invalid expression in the address"));
2937 return FALSE;
2938 }
2939 /* [Xn,<expr> */
2940 }
2941 }
2942
2943 if (! skip_past_char (&p, ']'))
2944 {
2945 set_syntax_error (_("']' expected"));
2946 return FALSE;
2947 }
2948
2949 if (skip_past_char (&p, '!'))
2950 {
2951 if (operand->addr.preind && operand->addr.offset.is_reg)
2952 {
2953 set_syntax_error (_("register offset not allowed in pre-indexed "
2954 "addressing mode"));
2955 return FALSE;
2956 }
2957 /* [Xn]! */
2958 operand->addr.writeback = 1;
2959 }
2960 else if (skip_past_comma (&p))
2961 {
2962 /* [Xn], */
2963 operand->addr.postind = 1;
2964 operand->addr.writeback = 1;
2965
2966 if (operand->addr.preind)
2967 {
2968 set_syntax_error (_("cannot combine pre- and post-indexing"));
2969 return FALSE;
2970 }
2971
2972 if (accept_reg_post_index
2973 && (reg = aarch64_reg_parse_32_64 (&p, 1, 1, &isreg32,
2974 &isregzero)) != PARSE_FAIL)
2975 {
2976 /* [Xn],Xm */
2977 if (isreg32)
2978 {
2979 set_syntax_error (_("invalid 32-bit register offset"));
2980 return FALSE;
2981 }
2982 operand->addr.offset.regno = reg;
2983 operand->addr.offset.is_reg = 1;
2984 }
2985 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
2986 {
2987 /* [Xn],#expr */
2988 set_syntax_error (_("invalid expression in the address"));
2989 return FALSE;
2990 }
2991 }
2992
2993 /* If at this point neither .preind nor .postind is set, we have a
2994 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
2995 if (operand->addr.preind == 0 && operand->addr.postind == 0)
2996 {
2997 if (operand->addr.writeback)
2998 {
2999 /* Reject [Rn]! */
3000 set_syntax_error (_("missing offset in the pre-indexed address"));
3001 return FALSE;
3002 }
3003 operand->addr.preind = 1;
3004 inst.reloc.exp.X_op = O_constant;
3005 inst.reloc.exp.X_add_number = 0;
3006 }
3007
3008 *str = p;
3009 return TRUE;
3010}
3011
3012/* Return TRUE on success; otherwise return FALSE. */
3013static bfd_boolean
3014parse_address (char **str, aarch64_opnd_info *operand,
3015 int accept_reg_post_index)
3016{
3017 return parse_address_main (str, operand, 0, accept_reg_post_index);
3018}
3019
3020/* Return TRUE on success; otherwise return FALSE. */
3021static bfd_boolean
3022parse_address_reloc (char **str, aarch64_opnd_info *operand)
3023{
3024 return parse_address_main (str, operand, 1, 0);
3025}
3026
3027/* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3028 Return TRUE on success; otherwise return FALSE. */
3029static bfd_boolean
3030parse_half (char **str, int *internal_fixup_p)
3031{
3032 char *p, *saved;
3033 int dummy;
3034
3035 p = *str;
3036 skip_past_char (&p, '#');
3037
3038 gas_assert (internal_fixup_p);
3039 *internal_fixup_p = 0;
3040
3041 if (*p == ':')
3042 {
3043 struct reloc_table_entry *entry;
3044
3045 /* Try to parse a relocation. Anything else is an error. */
3046 ++p;
3047 if (!(entry = find_reloc_table_entry (&p)))
3048 {
3049 set_syntax_error (_("unknown relocation modifier"));
3050 return FALSE;
3051 }
3052
3053 if (entry->movw_type == 0)
3054 {
3055 set_syntax_error
3056 (_("this relocation modifier is not allowed on this instruction"));
3057 return FALSE;
3058 }
3059
3060 inst.reloc.type = entry->movw_type;
3061 }
3062 else
3063 *internal_fixup_p = 1;
3064
3065 /* Avoid parsing a register as a general symbol. */
3066 saved = p;
3067 if (aarch64_reg_parse_32_64 (&p, 0, 0, &dummy, &dummy) != PARSE_FAIL)
3068 return FALSE;
3069 p = saved;
3070
3071 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3072 return FALSE;
3073
3074 *str = p;
3075 return TRUE;
3076}
3077
3078/* Parse an operand for an ADRP instruction:
3079 ADRP <Xd>, <label>
3080 Return TRUE on success; otherwise return FALSE. */
3081
3082static bfd_boolean
3083parse_adrp (char **str)
3084{
3085 char *p;
3086
3087 p = *str;
3088 if (*p == ':')
3089 {
3090 struct reloc_table_entry *entry;
3091
3092 /* Try to parse a relocation. Anything else is an error. */
3093 ++p;
3094 if (!(entry = find_reloc_table_entry (&p)))
3095 {
3096 set_syntax_error (_("unknown relocation modifier"));
3097 return FALSE;
3098 }
3099
3100 if (entry->adrp_type == 0)
3101 {
3102 set_syntax_error
3103 (_("this relocation modifier is not allowed on this instruction"));
3104 return FALSE;
3105 }
3106
3107 inst.reloc.type = entry->adrp_type;
3108 }
3109 else
3110 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3111
3112 inst.reloc.pc_rel = 1;
3113
3114 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3115 return FALSE;
3116
3117 *str = p;
3118 return TRUE;
3119}
3120
3121/* Miscellaneous. */
3122
3123/* Parse an option for a preload instruction. Returns the encoding for the
3124 option, or PARSE_FAIL. */
3125
3126static int
3127parse_pldop (char **str)
3128{
3129 char *p, *q;
3130 const struct aarch64_name_value_pair *o;
3131
3132 p = q = *str;
3133 while (ISALNUM (*q))
3134 q++;
3135
3136 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3137 if (!o)
3138 return PARSE_FAIL;
3139
3140 *str = q;
3141 return o->value;
3142}
3143
3144/* Parse an option for a barrier instruction. Returns the encoding for the
3145 option, or PARSE_FAIL. */
3146
3147static int
3148parse_barrier (char **str)
3149{
3150 char *p, *q;
3151 const asm_barrier_opt *o;
3152
3153 p = q = *str;
3154 while (ISALPHA (*q))
3155 q++;
3156
3157 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3158 if (!o)
3159 return PARSE_FAIL;
3160
3161 *str = q;
3162 return o->value;
3163}
3164
3165/* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3166 Returns the encoding for the option, or PARSE_FAIL.
3167
3168 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3169 implementation defined system register name S3_<op1>_<Cn>_<Cm>_<op2>. */
3170
3171static int
3172parse_sys_reg (char **str, struct hash_control *sys_regs, int imple_defined_p)
3173{
3174 char *p, *q;
3175 char buf[32];
3176 const struct aarch64_name_value_pair *o;
3177 int value;
3178
3179 p = buf;
3180 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3181 if (p < buf + 31)
3182 *p++ = TOLOWER (*q);
3183 *p = '\0';
3184 /* Assert that BUF be large enough. */
3185 gas_assert (p - buf == q - *str);
3186
3187 o = hash_find (sys_regs, buf);
3188 if (!o)
3189 {
3190 if (!imple_defined_p)
3191 return PARSE_FAIL;
3192 else
3193 {
3194 /* Parse S3_<op1>_<Cn>_<Cm>_<op2>, the implementation defined
3195 registers. */
3196 unsigned int op0, op1, cn, cm, op2;
3197 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2) != 5)
3198 return PARSE_FAIL;
3199 /* Register access is encoded as follows:
3200 op0 op1 CRn CRm op2
3201 11 xxx 1x11 xxxx xxx. */
3202 if (op0 != 3 || op1 > 7 || (cn | 0x4) != 0xf || cm > 15 || op2 > 7)
3203 return PARSE_FAIL;
3204 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3205 }
3206 }
3207 else
3208 value = o->value;
3209
3210 *str = q;
3211 return value;
3212}
3213
3214/* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
3215 for the option, or NULL. */
3216
3217static const aarch64_sys_ins_reg *
3218parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3219{
3220 char *p, *q;
3221 char buf[32];
3222 const aarch64_sys_ins_reg *o;
3223
3224 p = buf;
3225 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3226 if (p < buf + 31)
3227 *p++ = TOLOWER (*q);
3228 *p = '\0';
3229
3230 o = hash_find (sys_ins_regs, buf);
3231 if (!o)
3232 return NULL;
3233
3234 *str = q;
3235 return o;
3236}
3237\f
3238#define po_char_or_fail(chr) do { \
3239 if (! skip_past_char (&str, chr)) \
3240 goto failure; \
3241} while (0)
3242
3243#define po_reg_or_fail(regtype) do { \
3244 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
3245 if (val == PARSE_FAIL) \
3246 { \
3247 set_default_error (); \
3248 goto failure; \
3249 } \
3250 } while (0)
3251
3252#define po_int_reg_or_fail(reject_sp, reject_rz) do { \
3253 val = aarch64_reg_parse_32_64 (&str, reject_sp, reject_rz, \
3254 &isreg32, &isregzero); \
3255 if (val == PARSE_FAIL) \
3256 { \
3257 set_default_error (); \
3258 goto failure; \
3259 } \
3260 info->reg.regno = val; \
3261 if (isreg32) \
3262 info->qualifier = AARCH64_OPND_QLF_W; \
3263 else \
3264 info->qualifier = AARCH64_OPND_QLF_X; \
3265 } while (0)
3266
3267#define po_imm_nc_or_fail() do { \
3268 if (! parse_constant_immediate (&str, &val)) \
3269 goto failure; \
3270 } while (0)
3271
3272#define po_imm_or_fail(min, max) do { \
3273 if (! parse_constant_immediate (&str, &val)) \
3274 goto failure; \
3275 if (val < min || val > max) \
3276 { \
3277 set_fatal_syntax_error (_("immediate value out of range "\
3278#min " to "#max)); \
3279 goto failure; \
3280 } \
3281 } while (0)
3282
3283#define po_misc_or_fail(expr) do { \
3284 if (!expr) \
3285 goto failure; \
3286 } while (0)
3287\f
3288/* encode the 12-bit imm field of Add/sub immediate */
3289static inline uint32_t
3290encode_addsub_imm (uint32_t imm)
3291{
3292 return imm << 10;
3293}
3294
3295/* encode the shift amount field of Add/sub immediate */
3296static inline uint32_t
3297encode_addsub_imm_shift_amount (uint32_t cnt)
3298{
3299 return cnt << 22;
3300}
3301
3302
3303/* encode the imm field of Adr instruction */
3304static inline uint32_t
3305encode_adr_imm (uint32_t imm)
3306{
3307 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
3308 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
3309}
3310
3311/* encode the immediate field of Move wide immediate */
3312static inline uint32_t
3313encode_movw_imm (uint32_t imm)
3314{
3315 return imm << 5;
3316}
3317
3318/* encode the 26-bit offset of unconditional branch */
3319static inline uint32_t
3320encode_branch_ofs_26 (uint32_t ofs)
3321{
3322 return ofs & ((1 << 26) - 1);
3323}
3324
3325/* encode the 19-bit offset of conditional branch and compare & branch */
3326static inline uint32_t
3327encode_cond_branch_ofs_19 (uint32_t ofs)
3328{
3329 return (ofs & ((1 << 19) - 1)) << 5;
3330}
3331
3332/* encode the 19-bit offset of ld literal */
3333static inline uint32_t
3334encode_ld_lit_ofs_19 (uint32_t ofs)
3335{
3336 return (ofs & ((1 << 19) - 1)) << 5;
3337}
3338
3339/* Encode the 14-bit offset of test & branch. */
3340static inline uint32_t
3341encode_tst_branch_ofs_14 (uint32_t ofs)
3342{
3343 return (ofs & ((1 << 14) - 1)) << 5;
3344}
3345
3346/* Encode the 16-bit imm field of svc/hvc/smc. */
3347static inline uint32_t
3348encode_svc_imm (uint32_t imm)
3349{
3350 return imm << 5;
3351}
3352
3353/* Reencode add(s) to sub(s), or sub(s) to add(s). */
3354static inline uint32_t
3355reencode_addsub_switch_add_sub (uint32_t opcode)
3356{
3357 return opcode ^ (1 << 30);
3358}
3359
3360static inline uint32_t
3361reencode_movzn_to_movz (uint32_t opcode)
3362{
3363 return opcode | (1 << 30);
3364}
3365
3366static inline uint32_t
3367reencode_movzn_to_movn (uint32_t opcode)
3368{
3369 return opcode & ~(1 << 30);
3370}
3371
3372/* Overall per-instruction processing. */
3373
3374/* We need to be able to fix up arbitrary expressions in some statements.
3375 This is so that we can handle symbols that are an arbitrary distance from
3376 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
3377 which returns part of an address in a form which will be valid for
3378 a data instruction. We do this by pushing the expression into a symbol
3379 in the expr_section, and creating a fix for that. */
3380
3381static fixS *
3382fix_new_aarch64 (fragS * frag,
3383 int where,
3384 short int size, expressionS * exp, int pc_rel, int reloc)
3385{
3386 fixS *new_fix;
3387
3388 switch (exp->X_op)
3389 {
3390 case O_constant:
3391 case O_symbol:
3392 case O_add:
3393 case O_subtract:
3394 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
3395 break;
3396
3397 default:
3398 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
3399 pc_rel, reloc);
3400 break;
3401 }
3402 return new_fix;
3403}
3404\f
3405/* Diagnostics on operands errors. */
3406
3407/* By default, output one-line error message only.
3408 Enable the verbose error message by -merror-verbose. */
3409static int verbose_error_p = 0;
3410
3411#ifdef DEBUG_AARCH64
3412/* N.B. this is only for the purpose of debugging. */
3413const char* operand_mismatch_kind_names[] =
3414{
3415 "AARCH64_OPDE_NIL",
3416 "AARCH64_OPDE_RECOVERABLE",
3417 "AARCH64_OPDE_SYNTAX_ERROR",
3418 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
3419 "AARCH64_OPDE_INVALID_VARIANT",
3420 "AARCH64_OPDE_OUT_OF_RANGE",
3421 "AARCH64_OPDE_UNALIGNED",
3422 "AARCH64_OPDE_REG_LIST",
3423 "AARCH64_OPDE_OTHER_ERROR",
3424};
3425#endif /* DEBUG_AARCH64 */
3426
3427/* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
3428
3429 When multiple errors of different kinds are found in the same assembly
3430 line, only the error of the highest severity will be picked up for
3431 issuing the diagnostics. */
3432
3433static inline bfd_boolean
3434operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
3435 enum aarch64_operand_error_kind rhs)
3436{
3437 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
3438 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
3439 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
3440 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
3441 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
3442 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
3443 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
3444 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
3445 return lhs > rhs;
3446}
3447
3448/* Helper routine to get the mnemonic name from the assembly instruction
3449 line; should only be called for the diagnosis purpose, as there is
3450 string copy operation involved, which may affect the runtime
3451 performance if used in elsewhere. */
3452
3453static const char*
3454get_mnemonic_name (const char *str)
3455{
3456 static char mnemonic[32];
3457 char *ptr;
3458
3459 /* Get the first 15 bytes and assume that the full name is included. */
3460 strncpy (mnemonic, str, 31);
3461 mnemonic[31] = '\0';
3462
3463 /* Scan up to the end of the mnemonic, which must end in white space,
3464 '.', or end of string. */
3465 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
3466 ;
3467
3468 *ptr = '\0';
3469
3470 /* Append '...' to the truncated long name. */
3471 if (ptr - mnemonic == 31)
3472 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
3473
3474 return mnemonic;
3475}
3476
3477static void
3478reset_aarch64_instruction (aarch64_instruction *instruction)
3479{
3480 memset (instruction, '\0', sizeof (aarch64_instruction));
3481 instruction->reloc.type = BFD_RELOC_UNUSED;
3482}
3483
3484/* Data strutures storing one user error in the assembly code related to
3485 operands. */
3486
3487struct operand_error_record
3488{
3489 const aarch64_opcode *opcode;
3490 aarch64_operand_error detail;
3491 struct operand_error_record *next;
3492};
3493
3494typedef struct operand_error_record operand_error_record;
3495
3496struct operand_errors
3497{
3498 operand_error_record *head;
3499 operand_error_record *tail;
3500};
3501
3502typedef struct operand_errors operand_errors;
3503
3504/* Top-level data structure reporting user errors for the current line of
3505 the assembly code.
3506 The way md_assemble works is that all opcodes sharing the same mnemonic
3507 name are iterated to find a match to the assembly line. In this data
3508 structure, each of the such opcodes will have one operand_error_record
3509 allocated and inserted. In other words, excessive errors related with
3510 a single opcode are disregarded. */
3511operand_errors operand_error_report;
3512
3513/* Free record nodes. */
3514static operand_error_record *free_opnd_error_record_nodes = NULL;
3515
3516/* Initialize the data structure that stores the operand mismatch
3517 information on assembling one line of the assembly code. */
3518static void
3519init_operand_error_report (void)
3520{
3521 if (operand_error_report.head != NULL)
3522 {
3523 gas_assert (operand_error_report.tail != NULL);
3524 operand_error_report.tail->next = free_opnd_error_record_nodes;
3525 free_opnd_error_record_nodes = operand_error_report.head;
3526 operand_error_report.head = NULL;
3527 operand_error_report.tail = NULL;
3528 return;
3529 }
3530 gas_assert (operand_error_report.tail == NULL);
3531}
3532
3533/* Return TRUE if some operand error has been recorded during the
3534 parsing of the current assembly line using the opcode *OPCODE;
3535 otherwise return FALSE. */
3536static inline bfd_boolean
3537opcode_has_operand_error_p (const aarch64_opcode *opcode)
3538{
3539 operand_error_record *record = operand_error_report.head;
3540 return record && record->opcode == opcode;
3541}
3542
3543/* Add the error record *NEW_RECORD to operand_error_report. The record's
3544 OPCODE field is initialized with OPCODE.
3545 N.B. only one record for each opcode, i.e. the maximum of one error is
3546 recorded for each instruction template. */
3547
3548static void
3549add_operand_error_record (const operand_error_record* new_record)
3550{
3551 const aarch64_opcode *opcode = new_record->opcode;
3552 operand_error_record* record = operand_error_report.head;
3553
3554 /* The record may have been created for this opcode. If not, we need
3555 to prepare one. */
3556 if (! opcode_has_operand_error_p (opcode))
3557 {
3558 /* Get one empty record. */
3559 if (free_opnd_error_record_nodes == NULL)
3560 {
3561 record = xmalloc (sizeof (operand_error_record));
3562 if (record == NULL)
3563 abort ();
3564 }
3565 else
3566 {
3567 record = free_opnd_error_record_nodes;
3568 free_opnd_error_record_nodes = record->next;
3569 }
3570 record->opcode = opcode;
3571 /* Insert at the head. */
3572 record->next = operand_error_report.head;
3573 operand_error_report.head = record;
3574 if (operand_error_report.tail == NULL)
3575 operand_error_report.tail = record;
3576 }
3577 else if (record->detail.kind != AARCH64_OPDE_NIL
3578 && record->detail.index <= new_record->detail.index
3579 && operand_error_higher_severity_p (record->detail.kind,
3580 new_record->detail.kind))
3581 {
3582 /* In the case of multiple errors found on operands related with a
3583 single opcode, only record the error of the leftmost operand and
3584 only if the error is of higher severity. */
3585 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
3586 " the existing error %s on operand %d",
3587 operand_mismatch_kind_names[new_record->detail.kind],
3588 new_record->detail.index,
3589 operand_mismatch_kind_names[record->detail.kind],
3590 record->detail.index);
3591 return;
3592 }
3593
3594 record->detail = new_record->detail;
3595}
3596
3597static inline void
3598record_operand_error_info (const aarch64_opcode *opcode,
3599 aarch64_operand_error *error_info)
3600{
3601 operand_error_record record;
3602 record.opcode = opcode;
3603 record.detail = *error_info;
3604 add_operand_error_record (&record);
3605}
3606
3607/* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
3608 error message *ERROR, for operand IDX (count from 0). */
3609
3610static void
3611record_operand_error (const aarch64_opcode *opcode, int idx,
3612 enum aarch64_operand_error_kind kind,
3613 const char* error)
3614{
3615 aarch64_operand_error info;
3616 memset(&info, 0, sizeof (info));
3617 info.index = idx;
3618 info.kind = kind;
3619 info.error = error;
3620 record_operand_error_info (opcode, &info);
3621}
3622
3623static void
3624record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
3625 enum aarch64_operand_error_kind kind,
3626 const char* error, const int *extra_data)
3627{
3628 aarch64_operand_error info;
3629 info.index = idx;
3630 info.kind = kind;
3631 info.error = error;
3632 info.data[0] = extra_data[0];
3633 info.data[1] = extra_data[1];
3634 info.data[2] = extra_data[2];
3635 record_operand_error_info (opcode, &info);
3636}
3637
3638static void
3639record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
3640 const char* error, int lower_bound,
3641 int upper_bound)
3642{
3643 int data[3] = {lower_bound, upper_bound, 0};
3644 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
3645 error, data);
3646}
3647
3648/* Remove the operand error record for *OPCODE. */
3649static void ATTRIBUTE_UNUSED
3650remove_operand_error_record (const aarch64_opcode *opcode)
3651{
3652 if (opcode_has_operand_error_p (opcode))
3653 {
3654 operand_error_record* record = operand_error_report.head;
3655 gas_assert (record != NULL && operand_error_report.tail != NULL);
3656 operand_error_report.head = record->next;
3657 record->next = free_opnd_error_record_nodes;
3658 free_opnd_error_record_nodes = record;
3659 if (operand_error_report.head == NULL)
3660 {
3661 gas_assert (operand_error_report.tail == record);
3662 operand_error_report.tail = NULL;
3663 }
3664 }
3665}
3666
3667/* Given the instruction in *INSTR, return the index of the best matched
3668 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
3669
3670 Return -1 if there is no qualifier sequence; return the first match
3671 if there is multiple matches found. */
3672
3673static int
3674find_best_match (const aarch64_inst *instr,
3675 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
3676{
3677 int i, num_opnds, max_num_matched, idx;
3678
3679 num_opnds = aarch64_num_of_operands (instr->opcode);
3680 if (num_opnds == 0)
3681 {
3682 DEBUG_TRACE ("no operand");
3683 return -1;
3684 }
3685
3686 max_num_matched = 0;
3687 idx = -1;
3688
3689 /* For each pattern. */
3690 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
3691 {
3692 int j, num_matched;
3693 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
3694
3695 /* Most opcodes has much fewer patterns in the list. */
3696 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
3697 {
3698 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
3699 if (i != 0 && idx == -1)
3700 /* If nothing has been matched, return the 1st sequence. */
3701 idx = 0;
3702 break;
3703 }
3704
3705 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
3706 if (*qualifiers == instr->operands[j].qualifier)
3707 ++num_matched;
3708
3709 if (num_matched > max_num_matched)
3710 {
3711 max_num_matched = num_matched;
3712 idx = i;
3713 }
3714 }
3715
3716 DEBUG_TRACE ("return with %d", idx);
3717 return idx;
3718}
3719
3720/* Assign qualifiers in the qualifier seqence (headed by QUALIFIERS) to the
3721 corresponding operands in *INSTR. */
3722
3723static inline void
3724assign_qualifier_sequence (aarch64_inst *instr,
3725 const aarch64_opnd_qualifier_t *qualifiers)
3726{
3727 int i = 0;
3728 int num_opnds = aarch64_num_of_operands (instr->opcode);
3729 gas_assert (num_opnds);
3730 for (i = 0; i < num_opnds; ++i, ++qualifiers)
3731 instr->operands[i].qualifier = *qualifiers;
3732}
3733
3734/* Print operands for the diagnosis purpose. */
3735
3736static void
3737print_operands (char *buf, const aarch64_opcode *opcode,
3738 const aarch64_opnd_info *opnds)
3739{
3740 int i;
3741
3742 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3743 {
3744 const size_t size = 128;
3745 char str[size];
3746
3747 /* We regard the opcode operand info more, however we also look into
3748 the inst->operands to support the disassembling of the optional
3749 operand.
3750 The two operand code should be the same in all cases, apart from
3751 when the operand can be optional. */
3752 if (opcode->operands[i] == AARCH64_OPND_NIL
3753 || opnds[i].type == AARCH64_OPND_NIL)
3754 break;
3755
3756 /* Generate the operand string in STR. */
3757 aarch64_print_operand (str, size, 0, opcode, opnds, i, NULL, NULL);
3758
3759 /* Delimiter. */
3760 if (str[0] != '\0')
3761 strcat (buf, i == 0 ? " " : ",");
3762
3763 /* Append the operand string. */
3764 strcat (buf, str);
3765 }
3766}
3767
3768/* Send to stderr a string as information. */
3769
3770static void
3771output_info (const char *format, ...)
3772{
3773 char *file;
3774 unsigned int line;
3775 va_list args;
3776
3777 as_where (&file, &line);
3778 if (file)
3779 {
3780 if (line != 0)
3781 fprintf (stderr, "%s:%u: ", file, line);
3782 else
3783 fprintf (stderr, "%s: ", file);
3784 }
3785 fprintf (stderr, _("Info: "));
3786 va_start (args, format);
3787 vfprintf (stderr, format, args);
3788 va_end (args);
3789 (void) putc ('\n', stderr);
3790}
3791
3792/* Output one operand error record. */
3793
3794static void
3795output_operand_error_record (const operand_error_record *record, char *str)
3796{
3797 int idx = record->detail.index;
3798 const aarch64_opcode *opcode = record->opcode;
3799 enum aarch64_opnd opd_code = (idx != -1 ? opcode->operands[idx]
3800 : AARCH64_OPND_NIL);
3801 const aarch64_operand_error *detail = &record->detail;
3802
3803 switch (detail->kind)
3804 {
3805 case AARCH64_OPDE_NIL:
3806 gas_assert (0);
3807 break;
3808
3809 case AARCH64_OPDE_SYNTAX_ERROR:
3810 case AARCH64_OPDE_RECOVERABLE:
3811 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
3812 case AARCH64_OPDE_OTHER_ERROR:
3813 gas_assert (idx >= 0);
3814 /* Use the prepared error message if there is, otherwise use the
3815 operand description string to describe the error. */
3816 if (detail->error != NULL)
3817 {
3818 if (detail->index == -1)
3819 as_bad (_("%s -- `%s'"), detail->error, str);
3820 else
3821 as_bad (_("%s at operand %d -- `%s'"),
3822 detail->error, detail->index + 1, str);
3823 }
3824 else
3825 as_bad (_("operand %d should be %s -- `%s'"), idx + 1,
3826 aarch64_get_operand_desc (opd_code), str);
3827 break;
3828
3829 case AARCH64_OPDE_INVALID_VARIANT:
3830 as_bad (_("operand mismatch -- `%s'"), str);
3831 if (verbose_error_p)
3832 {
3833 /* We will try to correct the erroneous instruction and also provide
3834 more information e.g. all other valid variants.
3835
3836 The string representation of the corrected instruction and other
3837 valid variants are generated by
3838
3839 1) obtaining the intermediate representation of the erroneous
3840 instruction;
3841 2) manipulating the IR, e.g. replacing the operand qualifier;
3842 3) printing out the instruction by calling the printer functions
3843 shared with the disassembler.
3844
3845 The limitation of this method is that the exact input assembly
3846 line cannot be accurately reproduced in some cases, for example an
3847 optional operand present in the actual assembly line will be
3848 omitted in the output; likewise for the optional syntax rules,
3849 e.g. the # before the immediate. Another limitation is that the
3850 assembly symbols and relocation operations in the assembly line
3851 currently cannot be printed out in the error report. Last but not
3852 least, when there is other error(s) co-exist with this error, the
3853 'corrected' instruction may be still incorrect, e.g. given
3854 'ldnp h0,h1,[x0,#6]!'
3855 this diagnosis will provide the version:
3856 'ldnp s0,s1,[x0,#6]!'
3857 which is still not right. */
3858 size_t len = strlen (get_mnemonic_name (str));
3859 int i, qlf_idx;
3860 bfd_boolean result;
3861 const size_t size = 2048;
3862 char buf[size];
3863 aarch64_inst *inst_base = &inst.base;
3864 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
3865
3866 /* Init inst. */
3867 reset_aarch64_instruction (&inst);
3868 inst_base->opcode = opcode;
3869
3870 /* Reset the error report so that there is no side effect on the
3871 following operand parsing. */
3872 init_operand_error_report ();
3873
3874 /* Fill inst. */
3875 result = parse_operands (str + len, opcode)
3876 && programmer_friendly_fixup (&inst);
3877 gas_assert (result);
3878 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
3879 NULL, NULL);
3880 gas_assert (!result);
3881
3882 /* Find the most matched qualifier sequence. */
3883 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
3884 gas_assert (qlf_idx > -1);
3885
3886 /* Assign the qualifiers. */
3887 assign_qualifier_sequence (inst_base,
3888 opcode->qualifiers_list[qlf_idx]);
3889
3890 /* Print the hint. */
3891 output_info (_(" did you mean this?"));
3892 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
3893 print_operands (buf, opcode, inst_base->operands);
3894 output_info (_(" %s"), buf);
3895
3896 /* Print out other variant(s) if there is any. */
3897 if (qlf_idx != 0 ||
3898 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
3899 output_info (_(" other valid variant(s):"));
3900
3901 /* For each pattern. */
3902 qualifiers_list = opcode->qualifiers_list;
3903 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
3904 {
3905 /* Most opcodes has much fewer patterns in the list.
3906 First NIL qualifier indicates the end in the list. */
3907 if (empty_qualifier_sequence_p (*qualifiers_list) == TRUE)
3908 break;
3909
3910 if (i != qlf_idx)
3911 {
3912 /* Mnemonics name. */
3913 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
3914
3915 /* Assign the qualifiers. */
3916 assign_qualifier_sequence (inst_base, *qualifiers_list);
3917
3918 /* Print instruction. */
3919 print_operands (buf, opcode, inst_base->operands);
3920
3921 output_info (_(" %s"), buf);
3922 }
3923 }
3924 }
3925 break;
3926
3927 case AARCH64_OPDE_OUT_OF_RANGE:
3928 as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
3929 detail->error ? detail->error : _("immediate value"),
3930 detail->data[0], detail->data[1], detail->index + 1, str);
3931 break;
3932
3933 case AARCH64_OPDE_REG_LIST:
3934 if (detail->data[0] == 1)
3935 as_bad (_("invalid number of registers in the list; "
3936 "only 1 register is expected at operand %d -- `%s'"),
3937 detail->index + 1, str);
3938 else
3939 as_bad (_("invalid number of registers in the list; "
3940 "%d registers are expected at operand %d -- `%s'"),
3941 detail->data[0], detail->index + 1, str);
3942 break;
3943
3944 case AARCH64_OPDE_UNALIGNED:
3945 as_bad (_("immediate value should be a multiple of "
3946 "%d at operand %d -- `%s'"),
3947 detail->data[0], detail->index + 1, str);
3948 break;
3949
3950 default:
3951 gas_assert (0);
3952 break;
3953 }
3954}
3955
3956/* Process and output the error message about the operand mismatching.
3957
3958 When this function is called, the operand error information had
3959 been collected for an assembly line and there will be multiple
3960 errors in the case of mulitple instruction templates; output the
3961 error message that most closely describes the problem. */
3962
3963static void
3964output_operand_error_report (char *str)
3965{
3966 int largest_error_pos;
3967 const char *msg = NULL;
3968 enum aarch64_operand_error_kind kind;
3969 operand_error_record *curr;
3970 operand_error_record *head = operand_error_report.head;
3971 operand_error_record *record = NULL;
3972
3973 /* No error to report. */
3974 if (head == NULL)
3975 return;
3976
3977 gas_assert (head != NULL && operand_error_report.tail != NULL);
3978
3979 /* Only one error. */
3980 if (head == operand_error_report.tail)
3981 {
3982 DEBUG_TRACE ("single opcode entry with error kind: %s",
3983 operand_mismatch_kind_names[head->detail.kind]);
3984 output_operand_error_record (head, str);
3985 return;
3986 }
3987
3988 /* Find the error kind of the highest severity. */
3989 DEBUG_TRACE ("multiple opcode entres with error kind");
3990 kind = AARCH64_OPDE_NIL;
3991 for (curr = head; curr != NULL; curr = curr->next)
3992 {
3993 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
3994 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
3995 if (operand_error_higher_severity_p (curr->detail.kind, kind))
3996 kind = curr->detail.kind;
3997 }
3998 gas_assert (kind != AARCH64_OPDE_NIL);
3999
4000 /* Pick up one of errors of KIND to report. */
4001 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4002 for (curr = head; curr != NULL; curr = curr->next)
4003 {
4004 if (curr->detail.kind != kind)
4005 continue;
4006 /* If there are multiple errors, pick up the one with the highest
4007 mismatching operand index. In the case of multiple errors with
4008 the equally highest operand index, pick up the first one or the
4009 first one with non-NULL error message. */
4010 if (curr->detail.index > largest_error_pos
4011 || (curr->detail.index == largest_error_pos && msg == NULL
4012 && curr->detail.error != NULL))
4013 {
4014 largest_error_pos = curr->detail.index;
4015 record = curr;
4016 msg = record->detail.error;
4017 }
4018 }
4019
4020 gas_assert (largest_error_pos != -2 && record != NULL);
4021 DEBUG_TRACE ("Pick up error kind %s to report",
4022 operand_mismatch_kind_names[record->detail.kind]);
4023
4024 /* Output. */
4025 output_operand_error_record (record, str);
4026}
4027\f
4028/* Write an AARCH64 instruction to buf - always little-endian. */
4029static void
4030put_aarch64_insn (char *buf, uint32_t insn)
4031{
4032 unsigned char *where = (unsigned char *) buf;
4033 where[0] = insn;
4034 where[1] = insn >> 8;
4035 where[2] = insn >> 16;
4036 where[3] = insn >> 24;
4037}
4038
4039static uint32_t
4040get_aarch64_insn (char *buf)
4041{
4042 unsigned char *where = (unsigned char *) buf;
4043 uint32_t result;
4044 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4045 return result;
4046}
4047
4048static void
4049output_inst (struct aarch64_inst *new_inst)
4050{
4051 char *to = NULL;
4052
4053 to = frag_more (INSN_SIZE);
4054
4055 frag_now->tc_frag_data.recorded = 1;
4056
4057 put_aarch64_insn (to, inst.base.value);
4058
4059 if (inst.reloc.type != BFD_RELOC_UNUSED)
4060 {
4061 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4062 INSN_SIZE, &inst.reloc.exp,
4063 inst.reloc.pc_rel,
4064 inst.reloc.type);
4065 DEBUG_TRACE ("Prepared relocation fix up");
4066 /* Don't check the addend value against the instruction size,
4067 that's the job of our code in md_apply_fix(). */
4068 fixp->fx_no_overflow = 1;
4069 if (new_inst != NULL)
4070 fixp->tc_fix_data.inst = new_inst;
4071 if (aarch64_gas_internal_fixup_p ())
4072 {
4073 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4074 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4075 fixp->fx_addnumber = inst.reloc.flags;
4076 }
4077 }
4078
4079 dwarf2_emit_insn (INSN_SIZE);
4080}
4081
4082/* Link together opcodes of the same name. */
4083
4084struct templates
4085{
4086 aarch64_opcode *opcode;
4087 struct templates *next;
4088};
4089
4090typedef struct templates templates;
4091
4092static templates *
4093lookup_mnemonic (const char *start, int len)
4094{
4095 templates *templ = NULL;
4096
4097 templ = hash_find_n (aarch64_ops_hsh, start, len);
4098 return templ;
4099}
4100
4101/* Subroutine of md_assemble, responsible for looking up the primary
4102 opcode from the mnemonic the user wrote. STR points to the
4103 beginning of the mnemonic. */
4104
4105static templates *
4106opcode_lookup (char **str)
4107{
4108 char *end, *base;
4109 const aarch64_cond *cond;
4110 char condname[16];
4111 int len;
4112
4113 /* Scan up to the end of the mnemonic, which must end in white space,
4114 '.', or end of string. */
4115 for (base = end = *str; is_part_of_name(*end); end++)
4116 if (*end == '.')
4117 break;
4118
4119 if (end == base)
4120 return 0;
4121
4122 inst.cond = COND_ALWAYS;
4123
4124 /* Handle a possible condition. */
4125 if (end[0] == '.')
4126 {
4127 cond = hash_find_n (aarch64_cond_hsh, end + 1, 2);
4128 if (cond)
4129 {
4130 inst.cond = cond->value;
4131 *str = end + 3;
4132 }
4133 else
4134 {
4135 *str = end;
4136 return 0;
4137 }
4138 }
4139 else
4140 *str = end;
4141
4142 len = end - base;
4143
4144 if (inst.cond == COND_ALWAYS)
4145 {
4146 /* Look for unaffixed mnemonic. */
4147 return lookup_mnemonic (base, len);
4148 }
4149 else if (len <= 13)
4150 {
4151 /* append ".c" to mnemonic if conditional */
4152 memcpy (condname, base, len);
4153 memcpy (condname + len, ".c", 2);
4154 base = condname;
4155 len += 2;
4156 return lookup_mnemonic (base, len);
4157 }
4158
4159 return NULL;
4160}
4161
4162/* Internal helper routine converting a vector neon_type_el structure
4163 *VECTYPE to a corresponding operand qualifier. */
4164
4165static inline aarch64_opnd_qualifier_t
4166vectype_to_qualifier (const struct neon_type_el *vectype)
4167{
4168 /* Element size in bytes indexed by neon_el_type. */
4169 const unsigned char ele_size[5]
4170 = {1, 2, 4, 8, 16};
4171
4172 if (!vectype->defined || vectype->type == NT_invtype)
4173 goto vectype_conversion_fail;
4174
4175 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
4176
4177 if (vectype->defined & NTA_HASINDEX)
4178 /* Vector element register. */
4179 return AARCH64_OPND_QLF_S_B + vectype->type;
4180 else
4181 {
4182 /* Vector register. */
4183 int reg_size = ele_size[vectype->type] * vectype->width;
4184 unsigned offset;
4185 if (reg_size != 16 && reg_size != 8)
4186 goto vectype_conversion_fail;
4187 /* The conversion is calculated based on the relation of the order of
4188 qualifiers to the vector element size and vector register size. */
4189 offset = (vectype->type == NT_q)
4190 ? 8 : (vectype->type << 1) + (reg_size >> 4);
4191 gas_assert (offset <= 8);
4192 return AARCH64_OPND_QLF_V_8B + offset;
4193 }
4194
4195vectype_conversion_fail:
4196 first_error (_("bad vector arrangement type"));
4197 return AARCH64_OPND_QLF_NIL;
4198}
4199
4200/* Process an optional operand that is found omitted from the assembly line.
4201 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
4202 instruction's opcode entry while IDX is the index of this omitted operand.
4203 */
4204
4205static void
4206process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
4207 int idx, aarch64_opnd_info *operand)
4208{
4209 aarch64_insn default_value = get_optional_operand_default_value (opcode);
4210 gas_assert (optional_operand_p (opcode, idx));
4211 gas_assert (!operand->present);
4212
4213 switch (type)
4214 {
4215 case AARCH64_OPND_Rd:
4216 case AARCH64_OPND_Rn:
4217 case AARCH64_OPND_Rm:
4218 case AARCH64_OPND_Rt:
4219 case AARCH64_OPND_Rt2:
4220 case AARCH64_OPND_Rs:
4221 case AARCH64_OPND_Ra:
4222 case AARCH64_OPND_Rt_SYS:
4223 case AARCH64_OPND_Rd_SP:
4224 case AARCH64_OPND_Rn_SP:
4225 case AARCH64_OPND_Fd:
4226 case AARCH64_OPND_Fn:
4227 case AARCH64_OPND_Fm:
4228 case AARCH64_OPND_Fa:
4229 case AARCH64_OPND_Ft:
4230 case AARCH64_OPND_Ft2:
4231 case AARCH64_OPND_Sd:
4232 case AARCH64_OPND_Sn:
4233 case AARCH64_OPND_Sm:
4234 case AARCH64_OPND_Vd:
4235 case AARCH64_OPND_Vn:
4236 case AARCH64_OPND_Vm:
4237 case AARCH64_OPND_VdD1:
4238 case AARCH64_OPND_VnD1:
4239 operand->reg.regno = default_value;
4240 break;
4241
4242 case AARCH64_OPND_Ed:
4243 case AARCH64_OPND_En:
4244 case AARCH64_OPND_Em:
4245 operand->reglane.regno = default_value;
4246 break;
4247
4248 case AARCH64_OPND_IDX:
4249 case AARCH64_OPND_BIT_NUM:
4250 case AARCH64_OPND_IMMR:
4251 case AARCH64_OPND_IMMS:
4252 case AARCH64_OPND_SHLL_IMM:
4253 case AARCH64_OPND_IMM_VLSL:
4254 case AARCH64_OPND_IMM_VLSR:
4255 case AARCH64_OPND_CCMP_IMM:
4256 case AARCH64_OPND_FBITS:
4257 case AARCH64_OPND_UIMM4:
4258 case AARCH64_OPND_UIMM3_OP1:
4259 case AARCH64_OPND_UIMM3_OP2:
4260 case AARCH64_OPND_IMM:
4261 case AARCH64_OPND_WIDTH:
4262 case AARCH64_OPND_UIMM7:
4263 case AARCH64_OPND_NZCV:
4264 operand->imm.value = default_value;
4265 break;
4266
4267 case AARCH64_OPND_EXCEPTION:
4268 inst.reloc.type = BFD_RELOC_UNUSED;
4269 break;
4270
4271 case AARCH64_OPND_BARRIER_ISB:
4272 operand->barrier = aarch64_barrier_options + default_value;
4273
4274 default:
4275 break;
4276 }
4277}
4278
4279/* Process the relocation type for move wide instructions.
4280 Return TRUE on success; otherwise return FALSE. */
4281
4282static bfd_boolean
4283process_movw_reloc_info (void)
4284{
4285 int is32;
4286 unsigned shift;
4287
4288 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
4289
4290 if (inst.base.opcode->op == OP_MOVK)
4291 switch (inst.reloc.type)
4292 {
4293 case BFD_RELOC_AARCH64_MOVW_G0_S:
4294 case BFD_RELOC_AARCH64_MOVW_G1_S:
4295 case BFD_RELOC_AARCH64_MOVW_G2_S:
4296 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4297 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4298 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4299 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4300 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4301 set_syntax_error
4302 (_("the specified relocation type is not allowed for MOVK"));
4303 return FALSE;
4304 default:
4305 break;
4306 }
4307
4308 switch (inst.reloc.type)
4309 {
4310 case BFD_RELOC_AARCH64_MOVW_G0:
4311 case BFD_RELOC_AARCH64_MOVW_G0_S:
4312 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4313 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4314 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4315 shift = 0;
4316 break;
4317 case BFD_RELOC_AARCH64_MOVW_G1:
4318 case BFD_RELOC_AARCH64_MOVW_G1_S:
4319 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4320 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4321 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4322 shift = 16;
4323 break;
4324 case BFD_RELOC_AARCH64_MOVW_G2:
4325 case BFD_RELOC_AARCH64_MOVW_G2_S:
4326 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4327 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4328 if (is32)
4329 {
4330 set_fatal_syntax_error
4331 (_("the specified relocation type is not allowed for 32-bit "
4332 "register"));
4333 return FALSE;
4334 }
4335 shift = 32;
4336 break;
4337 case BFD_RELOC_AARCH64_MOVW_G3:
4338 if (is32)
4339 {
4340 set_fatal_syntax_error
4341 (_("the specified relocation type is not allowed for 32-bit "
4342 "register"));
4343 return FALSE;
4344 }
4345 shift = 48;
4346 break;
4347 default:
4348 /* More cases should be added when more MOVW-related relocation types
4349 are supported in GAS. */
4350 gas_assert (aarch64_gas_internal_fixup_p ());
4351 /* The shift amount should have already been set by the parser. */
4352 return TRUE;
4353 }
4354 inst.base.operands[1].shifter.amount = shift;
4355 return TRUE;
4356}
4357
4358/* A primitive log caculator. */
4359
4360static inline unsigned int
4361get_logsz (unsigned int size)
4362{
4363 const unsigned char ls[16] =
4364 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
4365 if (size > 16)
4366 {
4367 gas_assert (0);
4368 return -1;
4369 }
4370 gas_assert (ls[size - 1] != (unsigned char)-1);
4371 return ls[size - 1];
4372}
4373
4374/* Determine and return the real reloc type code for an instruction
4375 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
4376
4377static inline bfd_reloc_code_real_type
4378ldst_lo12_determine_real_reloc_type (void)
4379{
4380 int logsz;
4381 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
4382 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
4383
4384 const bfd_reloc_code_real_type reloc_ldst_lo12[5] = {
4385 BFD_RELOC_AARCH64_LDST8_LO12, BFD_RELOC_AARCH64_LDST16_LO12,
4386 BFD_RELOC_AARCH64_LDST32_LO12, BFD_RELOC_AARCH64_LDST64_LO12,
4387 BFD_RELOC_AARCH64_LDST128_LO12
4388 };
4389
4390 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12);
4391 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
4392
4393 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
4394 opd1_qlf =
4395 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
4396 1, opd0_qlf, 0);
4397 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
4398
4399 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
4400 gas_assert (logsz >= 0 && logsz <= 4);
4401
4402 return reloc_ldst_lo12[logsz];
4403}
4404
4405/* Check whether a register list REGINFO is valid. The registers must be
4406 numbered in increasing order (modulo 32), in increments of one or two.
4407
4408 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
4409 increments of two.
4410
4411 Return FALSE if such a register list is invalid, otherwise return TRUE. */
4412
4413static bfd_boolean
4414reg_list_valid_p (uint32_t reginfo, int accept_alternate)
4415{
4416 uint32_t i, nb_regs, prev_regno, incr;
4417
4418 nb_regs = 1 + (reginfo & 0x3);
4419 reginfo >>= 2;
4420 prev_regno = reginfo & 0x1f;
4421 incr = accept_alternate ? 2 : 1;
4422
4423 for (i = 1; i < nb_regs; ++i)
4424 {
4425 uint32_t curr_regno;
4426 reginfo >>= 5;
4427 curr_regno = reginfo & 0x1f;
4428 if (curr_regno != ((prev_regno + incr) & 0x1f))
4429 return FALSE;
4430 prev_regno = curr_regno;
4431 }
4432
4433 return TRUE;
4434}
4435
4436/* Generic instruction operand parser. This does no encoding and no
4437 semantic validation; it merely squirrels values away in the inst
4438 structure. Returns TRUE or FALSE depending on whether the
4439 specified grammar matched. */
4440
4441static bfd_boolean
4442parse_operands (char *str, const aarch64_opcode *opcode)
4443{
4444 int i;
4445 char *backtrack_pos = 0;
4446 const enum aarch64_opnd *operands = opcode->operands;
4447
4448 clear_error ();
4449 skip_whitespace (str);
4450
4451 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
4452 {
4453 int64_t val;
4454 int isreg32, isregzero;
4455 int comma_skipped_p = 0;
4456 aarch64_reg_type rtype;
4457 struct neon_type_el vectype;
4458 aarch64_opnd_info *info = &inst.base.operands[i];
4459
4460 DEBUG_TRACE ("parse operand %d", i);
4461
4462 /* Assign the operand code. */
4463 info->type = operands[i];
4464
4465 if (optional_operand_p (opcode, i))
4466 {
4467 /* Remember where we are in case we need to backtrack. */
4468 gas_assert (!backtrack_pos);
4469 backtrack_pos = str;
4470 }
4471
4472 /* Expect comma between operands; the backtrack mechanizm will take
4473 care of cases of omitted optional operand. */
4474 if (i > 0 && ! skip_past_char (&str, ','))
4475 {
4476 set_syntax_error (_("comma expected between operands"));
4477 goto failure;
4478 }
4479 else
4480 comma_skipped_p = 1;
4481
4482 switch (operands[i])
4483 {
4484 case AARCH64_OPND_Rd:
4485 case AARCH64_OPND_Rn:
4486 case AARCH64_OPND_Rm:
4487 case AARCH64_OPND_Rt:
4488 case AARCH64_OPND_Rt2:
4489 case AARCH64_OPND_Rs:
4490 case AARCH64_OPND_Ra:
4491 case AARCH64_OPND_Rt_SYS:
4492 po_int_reg_or_fail (1, 0);
4493 break;
4494
4495 case AARCH64_OPND_Rd_SP:
4496 case AARCH64_OPND_Rn_SP:
4497 po_int_reg_or_fail (0, 1);
4498 break;
4499
4500 case AARCH64_OPND_Rm_EXT:
4501 case AARCH64_OPND_Rm_SFT:
4502 po_misc_or_fail (parse_shifter_operand
4503 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
4504 ? SHIFTED_ARITH_IMM
4505 : SHIFTED_LOGIC_IMM)));
4506 if (!info->shifter.operator_present)
4507 {
4508 /* Default to LSL if not present. Libopcodes prefers shifter
4509 kind to be explicit. */
4510 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4511 info->shifter.kind = AARCH64_MOD_LSL;
4512 /* For Rm_EXT, libopcodes will carry out further check on whether
4513 or not stack pointer is used in the instruction (Recall that
4514 "the extend operator is not optional unless at least one of
4515 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
4516 }
4517 break;
4518
4519 case AARCH64_OPND_Fd:
4520 case AARCH64_OPND_Fn:
4521 case AARCH64_OPND_Fm:
4522 case AARCH64_OPND_Fa:
4523 case AARCH64_OPND_Ft:
4524 case AARCH64_OPND_Ft2:
4525 case AARCH64_OPND_Sd:
4526 case AARCH64_OPND_Sn:
4527 case AARCH64_OPND_Sm:
4528 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
4529 if (val == PARSE_FAIL)
4530 {
4531 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
4532 goto failure;
4533 }
4534 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
4535
4536 info->reg.regno = val;
4537 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
4538 break;
4539
4540 case AARCH64_OPND_Vd:
4541 case AARCH64_OPND_Vn:
4542 case AARCH64_OPND_Vm:
4543 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4544 if (val == PARSE_FAIL)
4545 {
4546 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4547 goto failure;
4548 }
4549 if (vectype.defined & NTA_HASINDEX)
4550 goto failure;
4551
4552 info->reg.regno = val;
4553 info->qualifier = vectype_to_qualifier (&vectype);
4554 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4555 goto failure;
4556 break;
4557
4558 case AARCH64_OPND_VdD1:
4559 case AARCH64_OPND_VnD1:
4560 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4561 if (val == PARSE_FAIL)
4562 {
4563 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4564 goto failure;
4565 }
4566 if (vectype.type != NT_d || vectype.index != 1)
4567 {
4568 set_fatal_syntax_error
4569 (_("the top half of a 128-bit FP/SIMD register is expected"));
4570 goto failure;
4571 }
4572 info->reg.regno = val;
4573 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
4574 here; it is correct for the purpose of encoding/decoding since
4575 only the register number is explicitly encoded in the related
4576 instructions, although this appears a bit hacky. */
4577 info->qualifier = AARCH64_OPND_QLF_S_D;
4578 break;
4579
4580 case AARCH64_OPND_Ed:
4581 case AARCH64_OPND_En:
4582 case AARCH64_OPND_Em:
4583 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4584 if (val == PARSE_FAIL)
4585 {
4586 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4587 goto failure;
4588 }
4589 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
4590 goto failure;
4591
4592 info->reglane.regno = val;
4593 info->reglane.index = vectype.index;
4594 info->qualifier = vectype_to_qualifier (&vectype);
4595 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4596 goto failure;
4597 break;
4598
4599 case AARCH64_OPND_LVn:
4600 case AARCH64_OPND_LVt:
4601 case AARCH64_OPND_LVt_AL:
4602 case AARCH64_OPND_LEt:
4603 if ((val = parse_neon_reg_list (&str, &vectype)) == PARSE_FAIL)
4604 goto failure;
4605 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
4606 {
4607 set_fatal_syntax_error (_("invalid register list"));
4608 goto failure;
4609 }
4610 info->reglist.first_regno = (val >> 2) & 0x1f;
4611 info->reglist.num_regs = (val & 0x3) + 1;
4612 if (operands[i] == AARCH64_OPND_LEt)
4613 {
4614 if (!(vectype.defined & NTA_HASINDEX))
4615 goto failure;
4616 info->reglist.has_index = 1;
4617 info->reglist.index = vectype.index;
4618 }
4619 else if (!(vectype.defined & NTA_HASTYPE))
4620 goto failure;
4621 info->qualifier = vectype_to_qualifier (&vectype);
4622 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4623 goto failure;
4624 break;
4625
4626 case AARCH64_OPND_Cn:
4627 case AARCH64_OPND_Cm:
4628 po_reg_or_fail (REG_TYPE_CN);
4629 if (val > 15)
4630 {
4631 set_fatal_syntax_error (_(get_reg_expected_msg (REG_TYPE_CN)));
4632 goto failure;
4633 }
4634 inst.base.operands[i].reg.regno = val;
4635 break;
4636
4637 case AARCH64_OPND_SHLL_IMM:
4638 case AARCH64_OPND_IMM_VLSR:
4639 po_imm_or_fail (1, 64);
4640 info->imm.value = val;
4641 break;
4642
4643 case AARCH64_OPND_CCMP_IMM:
4644 case AARCH64_OPND_FBITS:
4645 case AARCH64_OPND_UIMM4:
4646 case AARCH64_OPND_UIMM3_OP1:
4647 case AARCH64_OPND_UIMM3_OP2:
4648 case AARCH64_OPND_IMM_VLSL:
4649 case AARCH64_OPND_IMM:
4650 case AARCH64_OPND_WIDTH:
4651 po_imm_nc_or_fail ();
4652 info->imm.value = val;
4653 break;
4654
4655 case AARCH64_OPND_UIMM7:
4656 po_imm_or_fail (0, 127);
4657 info->imm.value = val;
4658 break;
4659
4660 case AARCH64_OPND_IDX:
4661 case AARCH64_OPND_BIT_NUM:
4662 case AARCH64_OPND_IMMR:
4663 case AARCH64_OPND_IMMS:
4664 po_imm_or_fail (0, 63);
4665 info->imm.value = val;
4666 break;
4667
4668 case AARCH64_OPND_IMM0:
4669 po_imm_nc_or_fail ();
4670 if (val != 0)
4671 {
4672 set_fatal_syntax_error (_("immediate zero expected"));
4673 goto failure;
4674 }
4675 info->imm.value = 0;
4676 break;
4677
4678 case AARCH64_OPND_FPIMM0:
4679 {
4680 int qfloat;
4681 bfd_boolean res1 = FALSE, res2 = FALSE;
4682 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
4683 it is probably not worth the effort to support it. */
4684 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat))
4685 && !(res2 = parse_constant_immediate (&str, &val)))
4686 goto failure;
4687 if ((res1 && qfloat == 0) || (res2 && val == 0))
4688 {
4689 info->imm.value = 0;
4690 info->imm.is_fp = 1;
4691 break;
4692 }
4693 set_fatal_syntax_error (_("immediate zero expected"));
4694 goto failure;
4695 }
4696
4697 case AARCH64_OPND_IMM_MOV:
4698 {
4699 char *saved = str;
4700 if (reg_name_p (str, REG_TYPE_R_Z_SP))
4701 goto failure;
4702 str = saved;
4703 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
4704 GE_OPT_PREFIX, 1));
4705 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
4706 later. fix_mov_imm_insn will try to determine a machine
4707 instruction (MOVZ, MOVN or ORR) for it and will issue an error
4708 message if the immediate cannot be moved by a single
4709 instruction. */
4710 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
4711 inst.base.operands[i].skip = 1;
4712 }
4713 break;
4714
4715 case AARCH64_OPND_SIMD_IMM:
4716 case AARCH64_OPND_SIMD_IMM_SFT:
4717 if (! parse_big_immediate (&str, &val))
4718 goto failure;
4719 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4720 /* addr_off_p */ 0,
4721 /* need_libopcodes_p */ 1,
4722 /* skip_p */ 1);
4723 /* Parse shift.
4724 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
4725 shift, we don't check it here; we leave the checking to
4726 the libopcodes (operand_general_constraint_met_p). By
4727 doing this, we achieve better diagnostics. */
4728 if (skip_past_comma (&str)
4729 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
4730 goto failure;
4731 if (!info->shifter.operator_present
4732 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
4733 {
4734 /* Default to LSL if not present. Libopcodes prefers shifter
4735 kind to be explicit. */
4736 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4737 info->shifter.kind = AARCH64_MOD_LSL;
4738 }
4739 break;
4740
4741 case AARCH64_OPND_FPIMM:
4742 case AARCH64_OPND_SIMD_FPIMM:
4743 {
4744 int qfloat;
4745 if (! parse_aarch64_imm_float (&str, &qfloat))
4746 goto failure;
4747 if (qfloat == 0)
4748 {
4749 set_fatal_syntax_error (_("invalid floating-point constant"));
4750 goto failure;
4751 }
4752 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
4753 inst.base.operands[i].imm.is_fp = 1;
4754 }
4755 break;
4756
4757 case AARCH64_OPND_LIMM:
4758 po_misc_or_fail (parse_shifter_operand (&str, info,
4759 SHIFTED_LOGIC_IMM));
4760 if (info->shifter.operator_present)
4761 {
4762 set_fatal_syntax_error
4763 (_("shift not allowed for bitmask immediate"));
4764 goto failure;
4765 }
4766 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4767 /* addr_off_p */ 0,
4768 /* need_libopcodes_p */ 1,
4769 /* skip_p */ 1);
4770 break;
4771
4772 case AARCH64_OPND_AIMM:
4773 if (opcode->op == OP_ADD)
4774 /* ADD may have relocation types. */
4775 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
4776 SHIFTED_ARITH_IMM));
4777 else
4778 po_misc_or_fail (parse_shifter_operand (&str, info,
4779 SHIFTED_ARITH_IMM));
4780 switch (inst.reloc.type)
4781 {
4782 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
4783 info->shifter.amount = 12;
4784 break;
4785 case BFD_RELOC_UNUSED:
4786 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
4787 if (info->shifter.kind != AARCH64_MOD_NONE)
4788 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
4789 inst.reloc.pc_rel = 0;
4790 break;
4791 default:
4792 break;
4793 }
4794 info->imm.value = 0;
4795 if (!info->shifter.operator_present)
4796 {
4797 /* Default to LSL if not present. Libopcodes prefers shifter
4798 kind to be explicit. */
4799 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4800 info->shifter.kind = AARCH64_MOD_LSL;
4801 }
4802 break;
4803
4804 case AARCH64_OPND_HALF:
4805 {
4806 /* #<imm16> or relocation. */
4807 int internal_fixup_p;
4808 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
4809 if (internal_fixup_p)
4810 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
4811 skip_whitespace (str);
4812 if (skip_past_comma (&str))
4813 {
4814 /* {, LSL #<shift>} */
4815 if (! aarch64_gas_internal_fixup_p ())
4816 {
4817 set_fatal_syntax_error (_("can't mix relocation modifier "
4818 "with explicit shift"));
4819 goto failure;
4820 }
4821 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
4822 }
4823 else
4824 inst.base.operands[i].shifter.amount = 0;
4825 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
4826 inst.base.operands[i].imm.value = 0;
4827 if (! process_movw_reloc_info ())
4828 goto failure;
4829 }
4830 break;
4831
4832 case AARCH64_OPND_EXCEPTION:
4833 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp));
4834 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4835 /* addr_off_p */ 0,
4836 /* need_libopcodes_p */ 0,
4837 /* skip_p */ 1);
4838 break;
4839
4840 case AARCH64_OPND_NZCV:
4841 {
4842 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
4843 if (nzcv != NULL)
4844 {
4845 str += 4;
4846 info->imm.value = nzcv->value;
4847 break;
4848 }
4849 po_imm_or_fail (0, 15);
4850 info->imm.value = val;
4851 }
4852 break;
4853
4854 case AARCH64_OPND_COND:
4855 info->cond = hash_find_n (aarch64_cond_hsh, str, 2);
4856 str += 2;
4857 if (info->cond == NULL)
4858 {
4859 set_syntax_error (_("invalid condition"));
4860 goto failure;
4861 }
4862 break;
4863
4864 case AARCH64_OPND_ADDR_ADRP:
4865 po_misc_or_fail (parse_adrp (&str));
4866 /* Clear the value as operand needs to be relocated. */
4867 info->imm.value = 0;
4868 break;
4869
4870 case AARCH64_OPND_ADDR_PCREL14:
4871 case AARCH64_OPND_ADDR_PCREL19:
4872 case AARCH64_OPND_ADDR_PCREL21:
4873 case AARCH64_OPND_ADDR_PCREL26:
4874 po_misc_or_fail (parse_address_reloc (&str, info));
4875 if (!info->addr.pcrel)
4876 {
4877 set_syntax_error (_("invalid pc-relative address"));
4878 goto failure;
4879 }
4880 if (inst.gen_lit_pool
4881 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
4882 {
4883 /* Only permit "=value" in the literal load instructions.
4884 The literal will be generated by programmer_friendly_fixup. */
4885 set_syntax_error (_("invalid use of \"=immediate\""));
4886 goto failure;
4887 }
4888 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
4889 {
4890 set_syntax_error (_("unrecognized relocation suffix"));
4891 goto failure;
4892 }
4893 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
4894 {
4895 info->imm.value = inst.reloc.exp.X_add_number;
4896 inst.reloc.type = BFD_RELOC_UNUSED;
4897 }
4898 else
4899 {
4900 info->imm.value = 0;
f41aef5f
RE
4901 if (inst.reloc.type == BFD_RELOC_UNUSED)
4902 switch (opcode->iclass)
4903 {
4904 case compbranch:
4905 case condbranch:
4906 /* e.g. CBZ or B.COND */
4907 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
4908 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
4909 break;
4910 case testbranch:
4911 /* e.g. TBZ */
4912 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
4913 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
4914 break;
4915 case branch_imm:
4916 /* e.g. B or BL */
4917 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
4918 inst.reloc.type =
4919 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
4920 : BFD_RELOC_AARCH64_JUMP26;
4921 break;
4922 case loadlit:
4923 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
4924 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
4925 break;
4926 case pcreladdr:
4927 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
4928 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
4929 break;
4930 default:
4931 gas_assert (0);
4932 abort ();
4933 }
a06ea964
NC
4934 inst.reloc.pc_rel = 1;
4935 }
4936 break;
4937
4938 case AARCH64_OPND_ADDR_SIMPLE:
4939 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
4940 /* [<Xn|SP>{, #<simm>}] */
4941 po_char_or_fail ('[');
4942 po_reg_or_fail (REG_TYPE_R64_SP);
4943 /* Accept optional ", #0". */
4944 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
4945 && skip_past_char (&str, ','))
4946 {
4947 skip_past_char (&str, '#');
4948 if (! skip_past_char (&str, '0'))
4949 {
4950 set_fatal_syntax_error
4951 (_("the optional immediate offset can only be 0"));
4952 goto failure;
4953 }
4954 }
4955 po_char_or_fail (']');
4956 info->addr.base_regno = val;
4957 break;
4958
4959 case AARCH64_OPND_ADDR_REGOFF:
4960 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
4961 po_misc_or_fail (parse_address (&str, info, 0));
4962 if (info->addr.pcrel || !info->addr.offset.is_reg
4963 || !info->addr.preind || info->addr.postind
4964 || info->addr.writeback)
4965 {
4966 set_syntax_error (_("invalid addressing mode"));
4967 goto failure;
4968 }
4969 if (!info->shifter.operator_present)
4970 {
4971 /* Default to LSL if not present. Libopcodes prefers shifter
4972 kind to be explicit. */
4973 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4974 info->shifter.kind = AARCH64_MOD_LSL;
4975 }
4976 /* Qualifier to be deduced by libopcodes. */
4977 break;
4978
4979 case AARCH64_OPND_ADDR_SIMM7:
4980 po_misc_or_fail (parse_address (&str, info, 0));
4981 if (info->addr.pcrel || info->addr.offset.is_reg
4982 || (!info->addr.preind && !info->addr.postind))
4983 {
4984 set_syntax_error (_("invalid addressing mode"));
4985 goto failure;
4986 }
4987 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4988 /* addr_off_p */ 1,
4989 /* need_libopcodes_p */ 1,
4990 /* skip_p */ 0);
4991 break;
4992
4993 case AARCH64_OPND_ADDR_SIMM9:
4994 case AARCH64_OPND_ADDR_SIMM9_2:
4995 po_misc_or_fail (parse_address_reloc (&str, info));
4996 if (info->addr.pcrel || info->addr.offset.is_reg
4997 || (!info->addr.preind && !info->addr.postind)
4998 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
4999 && info->addr.writeback))
5000 {
5001 set_syntax_error (_("invalid addressing mode"));
5002 goto failure;
5003 }
5004 if (inst.reloc.type != BFD_RELOC_UNUSED)
5005 {
5006 set_syntax_error (_("relocation not allowed"));
5007 goto failure;
5008 }
5009 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5010 /* addr_off_p */ 1,
5011 /* need_libopcodes_p */ 1,
5012 /* skip_p */ 0);
5013 break;
5014
5015 case AARCH64_OPND_ADDR_UIMM12:
5016 po_misc_or_fail (parse_address_reloc (&str, info));
5017 if (info->addr.pcrel || info->addr.offset.is_reg
5018 || !info->addr.preind || info->addr.writeback)
5019 {
5020 set_syntax_error (_("invalid addressing mode"));
5021 goto failure;
5022 }
5023 if (inst.reloc.type == BFD_RELOC_UNUSED)
5024 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5025 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12)
5026 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
5027 /* Leave qualifier to be determined by libopcodes. */
5028 break;
5029
5030 case AARCH64_OPND_SIMD_ADDR_POST:
5031 /* [<Xn|SP>], <Xm|#<amount>> */
5032 po_misc_or_fail (parse_address (&str, info, 1));
5033 if (!info->addr.postind || !info->addr.writeback)
5034 {
5035 set_syntax_error (_("invalid addressing mode"));
5036 goto failure;
5037 }
5038 if (!info->addr.offset.is_reg)
5039 {
5040 if (inst.reloc.exp.X_op == O_constant)
5041 info->addr.offset.imm = inst.reloc.exp.X_add_number;
5042 else
5043 {
5044 set_fatal_syntax_error
5045 (_("writeback value should be an immediate constant"));
5046 goto failure;
5047 }
5048 }
5049 /* No qualifier. */
5050 break;
5051
5052 case AARCH64_OPND_SYSREG:
5053 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1)) == FALSE)
5054 {
5055 set_syntax_error (_("unknown or missing system register name"));
5056 goto failure;
5057 }
5058 inst.base.operands[i].sysreg = val;
5059 break;
5060
5061 case AARCH64_OPND_PSTATEFIELD:
5062 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0)) == FALSE)
5063 {
5064 set_syntax_error (_("unknown or missing PSTATE field name"));
5065 goto failure;
5066 }
5067 inst.base.operands[i].pstatefield = val;
5068 break;
5069
5070 case AARCH64_OPND_SYSREG_IC:
5071 inst.base.operands[i].sysins_op =
5072 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
5073 goto sys_reg_ins;
5074 case AARCH64_OPND_SYSREG_DC:
5075 inst.base.operands[i].sysins_op =
5076 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
5077 goto sys_reg_ins;
5078 case AARCH64_OPND_SYSREG_AT:
5079 inst.base.operands[i].sysins_op =
5080 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
5081 goto sys_reg_ins;
5082 case AARCH64_OPND_SYSREG_TLBI:
5083 inst.base.operands[i].sysins_op =
5084 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
5085sys_reg_ins:
5086 if (inst.base.operands[i].sysins_op == NULL)
5087 {
5088 set_fatal_syntax_error ( _("unknown or missing operation name"));
5089 goto failure;
5090 }
5091 break;
5092
5093 case AARCH64_OPND_BARRIER:
5094 case AARCH64_OPND_BARRIER_ISB:
5095 val = parse_barrier (&str);
5096 if (val != PARSE_FAIL
5097 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
5098 {
5099 /* ISB only accepts options name 'sy'. */
5100 set_syntax_error
5101 (_("the specified option is not accepted in ISB"));
5102 /* Turn off backtrack as this optional operand is present. */
5103 backtrack_pos = 0;
5104 goto failure;
5105 }
5106 /* This is an extension to accept a 0..15 immediate. */
5107 if (val == PARSE_FAIL)
5108 po_imm_or_fail (0, 15);
5109 info->barrier = aarch64_barrier_options + val;
5110 break;
5111
5112 case AARCH64_OPND_PRFOP:
5113 val = parse_pldop (&str);
5114 /* This is an extension to accept a 0..31 immediate. */
5115 if (val == PARSE_FAIL)
5116 po_imm_or_fail (0, 31);
5117 inst.base.operands[i].prfop = aarch64_prfops + val;
5118 break;
5119
5120 default:
5121 as_fatal (_("unhandled operand code %d"), operands[i]);
5122 }
5123
5124 /* If we get here, this operand was successfully parsed. */
5125 inst.base.operands[i].present = 1;
5126 continue;
5127
5128failure:
5129 /* The parse routine should already have set the error, but in case
5130 not, set a default one here. */
5131 if (! error_p ())
5132 set_default_error ();
5133
5134 if (! backtrack_pos)
5135 goto parse_operands_return;
5136
5137 /* Reaching here means we are dealing with an optional operand that is
5138 omitted from the assembly line. */
5139 gas_assert (optional_operand_p (opcode, i));
5140 info->present = 0;
5141 process_omitted_operand (operands[i], opcode, i, info);
5142
5143 /* Try again, skipping the optional operand at backtrack_pos. */
5144 str = backtrack_pos;
5145 backtrack_pos = 0;
5146
5147 /* If this is the last operand that is optional and omitted, but without
5148 the presence of a comma. */
5149 if (i && comma_skipped_p && i == aarch64_num_of_operands (opcode) - 1)
5150 {
5151 set_fatal_syntax_error
5152 (_("unexpected comma before the omitted optional operand"));
5153 goto parse_operands_return;
5154 }
5155
5156 /* Clear any error record after the omitted optional operand has been
5157 successfully handled. */
5158 clear_error ();
5159 }
5160
5161 /* Check if we have parsed all the operands. */
5162 if (*str != '\0' && ! error_p ())
5163 {
5164 /* Set I to the index of the last present operand; this is
5165 for the purpose of diagnostics. */
5166 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
5167 ;
5168 set_fatal_syntax_error
5169 (_("unexpected characters following instruction"));
5170 }
5171
5172parse_operands_return:
5173
5174 if (error_p ())
5175 {
5176 DEBUG_TRACE ("parsing FAIL: %s - %s",
5177 operand_mismatch_kind_names[get_error_kind ()],
5178 get_error_message ());
5179 /* Record the operand error properly; this is useful when there
5180 are multiple instruction templates for a mnemonic name, so that
5181 later on, we can select the error that most closely describes
5182 the problem. */
5183 record_operand_error (opcode, i, get_error_kind (),
5184 get_error_message ());
5185 return FALSE;
5186 }
5187 else
5188 {
5189 DEBUG_TRACE ("parsing SUCCESS");
5190 return TRUE;
5191 }
5192}
5193
5194/* It does some fix-up to provide some programmer friendly feature while
5195 keeping the libopcodes happy, i.e. libopcodes only accepts
5196 the preferred architectural syntax.
5197 Return FALSE if there is any failure; otherwise return TRUE. */
5198
5199static bfd_boolean
5200programmer_friendly_fixup (aarch64_instruction *instr)
5201{
5202 aarch64_inst *base = &instr->base;
5203 const aarch64_opcode *opcode = base->opcode;
5204 enum aarch64_op op = opcode->op;
5205 aarch64_opnd_info *operands = base->operands;
5206
5207 DEBUG_TRACE ("enter");
5208
5209 switch (opcode->iclass)
5210 {
5211 case testbranch:
5212 /* TBNZ Xn|Wn, #uimm6, label
5213 Test and Branch Not Zero: conditionally jumps to label if bit number
5214 uimm6 in register Xn is not zero. The bit number implies the width of
5215 the register, which may be written and should be disassembled as Wn if
5216 uimm is less than 32. */
5217 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
5218 {
5219 if (operands[1].imm.value >= 32)
5220 {
5221 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
5222 0, 31);
5223 return FALSE;
5224 }
5225 operands[0].qualifier = AARCH64_OPND_QLF_X;
5226 }
5227 break;
5228 case loadlit:
5229 /* LDR Wt, label | =value
5230 As a convenience assemblers will typically permit the notation
5231 "=value" in conjunction with the pc-relative literal load instructions
5232 to automatically place an immediate value or symbolic address in a
5233 nearby literal pool and generate a hidden label which references it.
5234 ISREG has been set to 0 in the case of =value. */
5235 if (instr->gen_lit_pool
5236 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
5237 {
5238 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
5239 if (op == OP_LDRSW_LIT)
5240 size = 4;
5241 if (instr->reloc.exp.X_op != O_constant
67a32447 5242 && instr->reloc.exp.X_op != O_big
a06ea964
NC
5243 && instr->reloc.exp.X_op != O_symbol)
5244 {
5245 record_operand_error (opcode, 1,
5246 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
5247 _("constant expression expected"));
5248 return FALSE;
5249 }
5250 if (! add_to_lit_pool (&instr->reloc.exp, size))
5251 {
5252 record_operand_error (opcode, 1,
5253 AARCH64_OPDE_OTHER_ERROR,
5254 _("literal pool insertion failed"));
5255 return FALSE;
5256 }
5257 }
5258 break;
5259 case asimdimm:
5260 /* Allow MOVI V0.16B, 97, LSL 0, although the preferred architectural
5261 syntax requires that the LSL shifter can only be used when the
5262 destination register has the shape of 4H, 8H, 2S or 4S. */
5263 if (op == OP_V_MOVI_B && operands[1].shifter.kind == AARCH64_MOD_LSL
5264 && (operands[0].qualifier == AARCH64_OPND_QLF_V_8B
5265 || operands[0].qualifier == AARCH64_OPND_QLF_V_16B))
5266 {
5267 if (operands[1].shifter.amount != 0)
5268 {
5269 record_operand_error (opcode, 1,
5270 AARCH64_OPDE_OTHER_ERROR,
5271 _("shift amount non-zero"));
5272 return FALSE;
5273 }
5274 operands[1].shifter.kind = AARCH64_MOD_NONE;
5275 operands[1].qualifier = AARCH64_OPND_QLF_NIL;
5276 }
5277 break;
5278 case log_shift:
5279 case bitfield:
5280 /* UXT[BHW] Wd, Wn
5281 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
5282 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
5283 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
5284 A programmer-friendly assembler should accept a destination Xd in
5285 place of Wd, however that is not the preferred form for disassembly.
5286 */
5287 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
5288 && operands[1].qualifier == AARCH64_OPND_QLF_W
5289 && operands[0].qualifier == AARCH64_OPND_QLF_X)
5290 operands[0].qualifier = AARCH64_OPND_QLF_W;
5291 break;
5292
5293 case addsub_ext:
5294 {
5295 /* In the 64-bit form, the final register operand is written as Wm
5296 for all but the (possibly omitted) UXTX/LSL and SXTX
5297 operators.
5298 As a programmer-friendly assembler, we accept e.g.
5299 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
5300 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
5301 int idx = aarch64_operand_index (opcode->operands,
5302 AARCH64_OPND_Rm_EXT);
5303 gas_assert (idx == 1 || idx == 2);
5304 if (operands[0].qualifier == AARCH64_OPND_QLF_X
5305 && operands[idx].qualifier == AARCH64_OPND_QLF_X
5306 && operands[idx].shifter.kind != AARCH64_MOD_LSL
5307 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
5308 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
5309 operands[idx].qualifier = AARCH64_OPND_QLF_W;
5310 }
5311 break;
5312
5313 default:
5314 break;
5315 }
5316
5317 DEBUG_TRACE ("exit with SUCCESS");
5318 return TRUE;
5319}
5320
5321/* A wrapper function to interface with libopcodes on encoding and
5322 record the error message if there is any.
5323
5324 Return TRUE on success; otherwise return FALSE. */
5325
5326static bfd_boolean
5327do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
5328 aarch64_insn *code)
5329{
5330 aarch64_operand_error error_info;
5331 error_info.kind = AARCH64_OPDE_NIL;
5332 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info))
5333 return TRUE;
5334 else
5335 {
5336 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
5337 record_operand_error_info (opcode, &error_info);
5338 return FALSE;
5339 }
5340}
5341
5342#ifdef DEBUG_AARCH64
5343static inline void
5344dump_opcode_operands (const aarch64_opcode *opcode)
5345{
5346 int i = 0;
5347 while (opcode->operands[i] != AARCH64_OPND_NIL)
5348 {
5349 aarch64_verbose ("\t\t opnd%d: %s", i,
5350 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
5351 ? aarch64_get_operand_name (opcode->operands[i])
5352 : aarch64_get_operand_desc (opcode->operands[i]));
5353 ++i;
5354 }
5355}
5356#endif /* DEBUG_AARCH64 */
5357
5358/* This is the guts of the machine-dependent assembler. STR points to a
5359 machine dependent instruction. This function is supposed to emit
5360 the frags/bytes it assembles to. */
5361
5362void
5363md_assemble (char *str)
5364{
5365 char *p = str;
5366 templates *template;
5367 aarch64_opcode *opcode;
5368 aarch64_inst *inst_base;
5369 unsigned saved_cond;
5370
5371 /* Align the previous label if needed. */
5372 if (last_label_seen != NULL)
5373 {
5374 symbol_set_frag (last_label_seen, frag_now);
5375 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
5376 S_SET_SEGMENT (last_label_seen, now_seg);
5377 }
5378
5379 inst.reloc.type = BFD_RELOC_UNUSED;
5380
5381 DEBUG_TRACE ("\n\n");
5382 DEBUG_TRACE ("==============================");
5383 DEBUG_TRACE ("Enter md_assemble with %s", str);
5384
5385 template = opcode_lookup (&p);
5386 if (!template)
5387 {
5388 /* It wasn't an instruction, but it might be a register alias of
5389 the form alias .req reg directive. */
5390 if (!create_register_alias (str, p))
5391 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
5392 str);
5393 return;
5394 }
5395
5396 skip_whitespace (p);
5397 if (*p == ',')
5398 {
5399 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
5400 get_mnemonic_name (str), str);
5401 return;
5402 }
5403
5404 init_operand_error_report ();
5405
5406 saved_cond = inst.cond;
5407 reset_aarch64_instruction (&inst);
5408 inst.cond = saved_cond;
5409
5410 /* Iterate through all opcode entries with the same mnemonic name. */
5411 do
5412 {
5413 opcode = template->opcode;
5414
5415 DEBUG_TRACE ("opcode %s found", opcode->name);
5416#ifdef DEBUG_AARCH64
5417 if (debug_dump)
5418 dump_opcode_operands (opcode);
5419#endif /* DEBUG_AARCH64 */
5420
5421 /* Check that this instruction is supported for this CPU. */
5422 if (!opcode->avariant
5423 || !AARCH64_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
5424 {
5425 as_bad (_("selected processor does not support `%s'"), str);
5426 return;
5427 }
5428
5429 mapping_state (MAP_INSN);
5430
5431 inst_base = &inst.base;
5432 inst_base->opcode = opcode;
5433
5434 /* Truly conditionally executed instructions, e.g. b.cond. */
5435 if (opcode->flags & F_COND)
5436 {
5437 gas_assert (inst.cond != COND_ALWAYS);
5438 inst_base->cond = get_cond_from_value (inst.cond);
5439 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
5440 }
5441 else if (inst.cond != COND_ALWAYS)
5442 {
5443 /* It shouldn't arrive here, where the assembly looks like a
5444 conditional instruction but the found opcode is unconditional. */
5445 gas_assert (0);
5446 continue;
5447 }
5448
5449 if (parse_operands (p, opcode)
5450 && programmer_friendly_fixup (&inst)
5451 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
5452 {
5453 if (inst.reloc.type == BFD_RELOC_UNUSED
5454 || !inst.reloc.need_libopcodes_p)
5455 output_inst (NULL);
5456 else
5457 {
5458 /* If there is relocation generated for the instruction,
5459 store the instruction information for the future fix-up. */
5460 struct aarch64_inst *copy;
5461 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
5462 if ((copy = xmalloc (sizeof (struct aarch64_inst))) == NULL)
5463 abort ();
5464 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
5465 output_inst (copy);
5466 }
5467 return;
5468 }
5469
5470 template = template->next;
5471 if (template != NULL)
5472 {
5473 reset_aarch64_instruction (&inst);
5474 inst.cond = saved_cond;
5475 }
5476 }
5477 while (template != NULL);
5478
5479 /* Issue the error messages if any. */
5480 output_operand_error_report (str);
5481}
5482
5483/* Various frobbings of labels and their addresses. */
5484
5485void
5486aarch64_start_line_hook (void)
5487{
5488 last_label_seen = NULL;
5489}
5490
5491void
5492aarch64_frob_label (symbolS * sym)
5493{
5494 last_label_seen = sym;
5495
5496 dwarf2_emit_label (sym);
5497}
5498
5499int
5500aarch64_data_in_code (void)
5501{
5502 if (!strncmp (input_line_pointer + 1, "data:", 5))
5503 {
5504 *input_line_pointer = '/';
5505 input_line_pointer += 5;
5506 *input_line_pointer = 0;
5507 return 1;
5508 }
5509
5510 return 0;
5511}
5512
5513char *
5514aarch64_canonicalize_symbol_name (char *name)
5515{
5516 int len;
5517
5518 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
5519 *(name + len - 5) = 0;
5520
5521 return name;
5522}
5523\f
5524/* Table of all register names defined by default. The user can
5525 define additional names with .req. Note that all register names
5526 should appear in both upper and lowercase variants. Some registers
5527 also have mixed-case names. */
5528
5529#define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
5530#define REGNUM(p,n,t) REGDEF(p##n, n, t)
5531#define REGSET31(p,t) \
5532 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
5533 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
5534 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
5535 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t), \
5536 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
5537 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
5538 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
5539 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
5540#define REGSET(p,t) \
5541 REGSET31(p,t), REGNUM(p,31,t)
5542
5543/* These go into aarch64_reg_hsh hash-table. */
5544static const reg_entry reg_names[] = {
5545 /* Integer registers. */
5546 REGSET31 (x, R_64), REGSET31 (X, R_64),
5547 REGSET31 (w, R_32), REGSET31 (W, R_32),
5548
5549 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
5550 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
5551
5552 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
5553 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
5554
5555 /* Coprocessor register numbers. */
5556 REGSET (c, CN), REGSET (C, CN),
5557
5558 /* Floating-point single precision registers. */
5559 REGSET (s, FP_S), REGSET (S, FP_S),
5560
5561 /* Floating-point double precision registers. */
5562 REGSET (d, FP_D), REGSET (D, FP_D),
5563
5564 /* Floating-point half precision registers. */
5565 REGSET (h, FP_H), REGSET (H, FP_H),
5566
5567 /* Floating-point byte precision registers. */
5568 REGSET (b, FP_B), REGSET (B, FP_B),
5569
5570 /* Floating-point quad precision registers. */
5571 REGSET (q, FP_Q), REGSET (Q, FP_Q),
5572
5573 /* FP/SIMD registers. */
5574 REGSET (v, VN), REGSET (V, VN),
5575};
5576
5577#undef REGDEF
5578#undef REGNUM
5579#undef REGSET
5580
5581#define N 1
5582#define n 0
5583#define Z 1
5584#define z 0
5585#define C 1
5586#define c 0
5587#define V 1
5588#define v 0
5589#define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
5590static const asm_nzcv nzcv_names[] = {
5591 {"nzcv", B (n, z, c, v)},
5592 {"nzcV", B (n, z, c, V)},
5593 {"nzCv", B (n, z, C, v)},
5594 {"nzCV", B (n, z, C, V)},
5595 {"nZcv", B (n, Z, c, v)},
5596 {"nZcV", B (n, Z, c, V)},
5597 {"nZCv", B (n, Z, C, v)},
5598 {"nZCV", B (n, Z, C, V)},
5599 {"Nzcv", B (N, z, c, v)},
5600 {"NzcV", B (N, z, c, V)},
5601 {"NzCv", B (N, z, C, v)},
5602 {"NzCV", B (N, z, C, V)},
5603 {"NZcv", B (N, Z, c, v)},
5604 {"NZcV", B (N, Z, c, V)},
5605 {"NZCv", B (N, Z, C, v)},
5606 {"NZCV", B (N, Z, C, V)}
5607};
5608
5609#undef N
5610#undef n
5611#undef Z
5612#undef z
5613#undef C
5614#undef c
5615#undef V
5616#undef v
5617#undef B
5618\f
5619/* MD interface: bits in the object file. */
5620
5621/* Turn an integer of n bytes (in val) into a stream of bytes appropriate
5622 for use in the a.out file, and stores them in the array pointed to by buf.
5623 This knows about the endian-ness of the target machine and does
5624 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
5625 2 (short) and 4 (long) Floating numbers are put out as a series of
5626 LITTLENUMS (shorts, here at least). */
5627
5628void
5629md_number_to_chars (char *buf, valueT val, int n)
5630{
5631 if (target_big_endian)
5632 number_to_chars_bigendian (buf, val, n);
5633 else
5634 number_to_chars_littleendian (buf, val, n);
5635}
5636
5637/* MD interface: Sections. */
5638
5639/* Estimate the size of a frag before relaxing. Assume everything fits in
5640 4 bytes. */
5641
5642int
5643md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
5644{
5645 fragp->fr_var = 4;
5646 return 4;
5647}
5648
5649/* Round up a section size to the appropriate boundary. */
5650
5651valueT
5652md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
5653{
5654 return size;
5655}
5656
5657/* This is called from HANDLE_ALIGN in write.c. Fill in the contents
5658 of an rs_align_code fragment. */
5659
5660void
5661aarch64_handle_align (fragS * fragP)
5662{
5663 /* NOP = d503201f */
5664 /* AArch64 instructions are always little-endian. */
5665 static char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
5666
5667 int bytes, fix, noop_size;
5668 char *p;
5669 const char *noop;
5670
5671 if (fragP->fr_type != rs_align_code)
5672 return;
5673
5674 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
5675 p = fragP->fr_literal + fragP->fr_fix;
5676 fix = 0;
5677
5678 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
5679 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
5680
5681#ifdef OBJ_ELF
5682 gas_assert (fragP->tc_frag_data.recorded);
5683#endif
5684
5685 noop = aarch64_noop;
5686 noop_size = sizeof (aarch64_noop);
5687 fragP->fr_var = noop_size;
5688
5689 if (bytes & (noop_size - 1))
5690 {
5691 fix = bytes & (noop_size - 1);
5692#ifdef OBJ_ELF
5693 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
5694#endif
5695 memset (p, 0, fix);
5696 p += fix;
5697 bytes -= fix;
5698 }
5699
5700 while (bytes >= noop_size)
5701 {
5702 memcpy (p, noop, noop_size);
5703 p += noop_size;
5704 bytes -= noop_size;
5705 fix += noop_size;
5706 }
5707
5708 fragP->fr_fix += fix;
5709}
5710
5711/* Called from md_do_align. Used to create an alignment
5712 frag in a code section. */
5713
5714void
5715aarch64_frag_align_code (int n, int max)
5716{
5717 char *p;
5718
5719 /* We assume that there will never be a requirement
5720 to support alignments greater than x bytes. */
5721 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
5722 as_fatal (_
5723 ("alignments greater than %d bytes not supported in .text sections"),
5724 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
5725
5726 p = frag_var (rs_align_code,
5727 MAX_MEM_FOR_RS_ALIGN_CODE,
5728 1,
5729 (relax_substateT) max,
5730 (symbolS *) NULL, (offsetT) n, (char *) NULL);
5731 *p = 0;
5732}
5733
5734/* Perform target specific initialisation of a frag.
5735 Note - despite the name this initialisation is not done when the frag
5736 is created, but only when its type is assigned. A frag can be created
5737 and used a long time before its type is set, so beware of assuming that
5738 this initialisationis performed first. */
5739
5740#ifndef OBJ_ELF
5741void
5742aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
5743 int max_chars ATTRIBUTE_UNUSED)
5744{
5745}
5746
5747#else /* OBJ_ELF is defined. */
5748void
5749aarch64_init_frag (fragS * fragP, int max_chars)
5750{
5751 /* Record a mapping symbol for alignment frags. We will delete this
5752 later if the alignment ends up empty. */
5753 if (!fragP->tc_frag_data.recorded)
5754 {
5755 fragP->tc_frag_data.recorded = 1;
5756 switch (fragP->fr_type)
5757 {
5758 case rs_align:
5759 case rs_align_test:
5760 case rs_fill:
5761 mapping_state_2 (MAP_DATA, max_chars);
5762 break;
5763 case rs_align_code:
5764 mapping_state_2 (MAP_INSN, max_chars);
5765 break;
5766 default:
5767 break;
5768 }
5769 }
5770}
5771\f
5772/* Initialize the DWARF-2 unwind information for this procedure. */
5773
5774void
5775tc_aarch64_frame_initial_instructions (void)
5776{
5777 cfi_add_CFA_def_cfa (REG_SP, 0);
5778}
5779#endif /* OBJ_ELF */
5780
5781/* Convert REGNAME to a DWARF-2 register number. */
5782
5783int
5784tc_aarch64_regname_to_dw2regnum (char *regname)
5785{
5786 const reg_entry *reg = parse_reg (&regname);
5787 if (reg == NULL)
5788 return -1;
5789
5790 switch (reg->type)
5791 {
5792 case REG_TYPE_SP_32:
5793 case REG_TYPE_SP_64:
5794 case REG_TYPE_R_32:
5795 case REG_TYPE_R_64:
5796 case REG_TYPE_FP_B:
5797 case REG_TYPE_FP_H:
5798 case REG_TYPE_FP_S:
5799 case REG_TYPE_FP_D:
5800 case REG_TYPE_FP_Q:
5801 return reg->number;
5802 default:
5803 break;
5804 }
5805 return -1;
5806}
5807
5808/* MD interface: Symbol and relocation handling. */
5809
5810/* Return the address within the segment that a PC-relative fixup is
5811 relative to. For AArch64 PC-relative fixups applied to instructions
5812 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
5813
5814long
5815md_pcrel_from_section (fixS * fixP, segT seg)
5816{
5817 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
5818
5819 /* If this is pc-relative and we are going to emit a relocation
5820 then we just want to put out any pipeline compensation that the linker
5821 will need. Otherwise we want to use the calculated base. */
5822 if (fixP->fx_pcrel
5823 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
5824 || aarch64_force_relocation (fixP)))
5825 base = 0;
5826
5827 /* AArch64 should be consistent for all pc-relative relocations. */
5828 return base + AARCH64_PCREL_OFFSET;
5829}
5830
5831/* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
5832 Otherwise we have no need to default values of symbols. */
5833
5834symbolS *
5835md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
5836{
5837#ifdef OBJ_ELF
5838 if (name[0] == '_' && name[1] == 'G'
5839 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
5840 {
5841 if (!GOT_symbol)
5842 {
5843 if (symbol_find (name))
5844 as_bad (_("GOT already in the symbol table"));
5845
5846 GOT_symbol = symbol_new (name, undefined_section,
5847 (valueT) 0, &zero_address_frag);
5848 }
5849
5850 return GOT_symbol;
5851 }
5852#endif
5853
5854 return 0;
5855}
5856
5857/* Return non-zero if the indicated VALUE has overflowed the maximum
5858 range expressible by a unsigned number with the indicated number of
5859 BITS. */
5860
5861static bfd_boolean
5862unsigned_overflow (valueT value, unsigned bits)
5863{
5864 valueT lim;
5865 if (bits >= sizeof (valueT) * 8)
5866 return FALSE;
5867 lim = (valueT) 1 << bits;
5868 return (value >= lim);
5869}
5870
5871
5872/* Return non-zero if the indicated VALUE has overflowed the maximum
5873 range expressible by an signed number with the indicated number of
5874 BITS. */
5875
5876static bfd_boolean
5877signed_overflow (offsetT value, unsigned bits)
5878{
5879 offsetT lim;
5880 if (bits >= sizeof (offsetT) * 8)
5881 return FALSE;
5882 lim = (offsetT) 1 << (bits - 1);
5883 return (value < -lim || value >= lim);
5884}
5885
5886/* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
5887 unsigned immediate offset load/store instruction, try to encode it as
5888 an unscaled, 9-bit, signed immediate offset load/store instruction.
5889 Return TRUE if it is successful; otherwise return FALSE.
5890
5891 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
5892 in response to the standard LDR/STR mnemonics when the immediate offset is
5893 unambiguous, i.e. when it is negative or unaligned. */
5894
5895static bfd_boolean
5896try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
5897{
5898 int idx;
5899 enum aarch64_op new_op;
5900 const aarch64_opcode *new_opcode;
5901
5902 gas_assert (instr->opcode->iclass == ldst_pos);
5903
5904 switch (instr->opcode->op)
5905 {
5906 case OP_LDRB_POS:new_op = OP_LDURB; break;
5907 case OP_STRB_POS: new_op = OP_STURB; break;
5908 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
5909 case OP_LDRH_POS: new_op = OP_LDURH; break;
5910 case OP_STRH_POS: new_op = OP_STURH; break;
5911 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
5912 case OP_LDR_POS: new_op = OP_LDUR; break;
5913 case OP_STR_POS: new_op = OP_STUR; break;
5914 case OP_LDRF_POS: new_op = OP_LDURV; break;
5915 case OP_STRF_POS: new_op = OP_STURV; break;
5916 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
5917 case OP_PRFM_POS: new_op = OP_PRFUM; break;
5918 default: new_op = OP_NIL; break;
5919 }
5920
5921 if (new_op == OP_NIL)
5922 return FALSE;
5923
5924 new_opcode = aarch64_get_opcode (new_op);
5925 gas_assert (new_opcode != NULL);
5926
5927 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
5928 instr->opcode->op, new_opcode->op);
5929
5930 aarch64_replace_opcode (instr, new_opcode);
5931
5932 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
5933 qualifier matching may fail because the out-of-date qualifier will
5934 prevent the operand being updated with a new and correct qualifier. */
5935 idx = aarch64_operand_index (instr->opcode->operands,
5936 AARCH64_OPND_ADDR_SIMM9);
5937 gas_assert (idx == 1);
5938 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
5939
5940 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
5941
5942 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
5943 return FALSE;
5944
5945 return TRUE;
5946}
5947
5948/* Called by fix_insn to fix a MOV immediate alias instruction.
5949
5950 Operand for a generic move immediate instruction, which is an alias
5951 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
5952 a 32-bit/64-bit immediate value into general register. An assembler error
5953 shall result if the immediate cannot be created by a single one of these
5954 instructions. If there is a choice, then to ensure reversability an
5955 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
5956
5957static void
5958fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
5959{
5960 const aarch64_opcode *opcode;
5961
5962 /* Need to check if the destination is SP/ZR. The check has to be done
5963 before any aarch64_replace_opcode. */
5964 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
5965 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
5966
5967 instr->operands[1].imm.value = value;
5968 instr->operands[1].skip = 0;
5969
5970 if (try_mov_wide_p)
5971 {
5972 /* Try the MOVZ alias. */
5973 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
5974 aarch64_replace_opcode (instr, opcode);
5975 if (aarch64_opcode_encode (instr->opcode, instr,
5976 &instr->value, NULL, NULL))
5977 {
5978 put_aarch64_insn (buf, instr->value);
5979 return;
5980 }
5981 /* Try the MOVK alias. */
5982 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
5983 aarch64_replace_opcode (instr, opcode);
5984 if (aarch64_opcode_encode (instr->opcode, instr,
5985 &instr->value, NULL, NULL))
5986 {
5987 put_aarch64_insn (buf, instr->value);
5988 return;
5989 }
5990 }
5991
5992 if (try_mov_bitmask_p)
5993 {
5994 /* Try the ORR alias. */
5995 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
5996 aarch64_replace_opcode (instr, opcode);
5997 if (aarch64_opcode_encode (instr->opcode, instr,
5998 &instr->value, NULL, NULL))
5999 {
6000 put_aarch64_insn (buf, instr->value);
6001 return;
6002 }
6003 }
6004
6005 as_bad_where (fixP->fx_file, fixP->fx_line,
6006 _("immediate cannot be moved by a single instruction"));
6007}
6008
6009/* An instruction operand which is immediate related may have symbol used
6010 in the assembly, e.g.
6011
6012 mov w0, u32
6013 .set u32, 0x00ffff00
6014
6015 At the time when the assembly instruction is parsed, a referenced symbol,
6016 like 'u32' in the above example may not have been seen; a fixS is created
6017 in such a case and is handled here after symbols have been resolved.
6018 Instruction is fixed up with VALUE using the information in *FIXP plus
6019 extra information in FLAGS.
6020
6021 This function is called by md_apply_fix to fix up instructions that need
6022 a fix-up described above but does not involve any linker-time relocation. */
6023
6024static void
6025fix_insn (fixS *fixP, uint32_t flags, offsetT value)
6026{
6027 int idx;
6028 uint32_t insn;
6029 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6030 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
6031 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
6032
6033 if (new_inst)
6034 {
6035 /* Now the instruction is about to be fixed-up, so the operand that
6036 was previously marked as 'ignored' needs to be unmarked in order
6037 to get the encoding done properly. */
6038 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6039 new_inst->operands[idx].skip = 0;
6040 }
6041
6042 gas_assert (opnd != AARCH64_OPND_NIL);
6043
6044 switch (opnd)
6045 {
6046 case AARCH64_OPND_EXCEPTION:
6047 if (unsigned_overflow (value, 16))
6048 as_bad_where (fixP->fx_file, fixP->fx_line,
6049 _("immediate out of range"));
6050 insn = get_aarch64_insn (buf);
6051 insn |= encode_svc_imm (value);
6052 put_aarch64_insn (buf, insn);
6053 break;
6054
6055 case AARCH64_OPND_AIMM:
6056 /* ADD or SUB with immediate.
6057 NOTE this assumes we come here with a add/sub shifted reg encoding
6058 3 322|2222|2 2 2 21111 111111
6059 1 098|7654|3 2 1 09876 543210 98765 43210
6060 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
6061 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
6062 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
6063 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
6064 ->
6065 3 322|2222|2 2 221111111111
6066 1 098|7654|3 2 109876543210 98765 43210
6067 11000000 sf 001|0001|shift imm12 Rn Rd ADD
6068 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
6069 51000000 sf 101|0001|shift imm12 Rn Rd SUB
6070 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
6071 Fields sf Rn Rd are already set. */
6072 insn = get_aarch64_insn (buf);
6073 if (value < 0)
6074 {
6075 /* Add <-> sub. */
6076 insn = reencode_addsub_switch_add_sub (insn);
6077 value = -value;
6078 }
6079
6080 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
6081 && unsigned_overflow (value, 12))
6082 {
6083 /* Try to shift the value by 12 to make it fit. */
6084 if (((value >> 12) << 12) == value
6085 && ! unsigned_overflow (value, 12 + 12))
6086 {
6087 value >>= 12;
6088 insn |= encode_addsub_imm_shift_amount (1);
6089 }
6090 }
6091
6092 if (unsigned_overflow (value, 12))
6093 as_bad_where (fixP->fx_file, fixP->fx_line,
6094 _("immediate out of range"));
6095
6096 insn |= encode_addsub_imm (value);
6097
6098 put_aarch64_insn (buf, insn);
6099 break;
6100
6101 case AARCH64_OPND_SIMD_IMM:
6102 case AARCH64_OPND_SIMD_IMM_SFT:
6103 case AARCH64_OPND_LIMM:
6104 /* Bit mask immediate. */
6105 gas_assert (new_inst != NULL);
6106 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6107 new_inst->operands[idx].imm.value = value;
6108 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6109 &new_inst->value, NULL, NULL))
6110 put_aarch64_insn (buf, new_inst->value);
6111 else
6112 as_bad_where (fixP->fx_file, fixP->fx_line,
6113 _("invalid immediate"));
6114 break;
6115
6116 case AARCH64_OPND_HALF:
6117 /* 16-bit unsigned immediate. */
6118 if (unsigned_overflow (value, 16))
6119 as_bad_where (fixP->fx_file, fixP->fx_line,
6120 _("immediate out of range"));
6121 insn = get_aarch64_insn (buf);
6122 insn |= encode_movw_imm (value & 0xffff);
6123 put_aarch64_insn (buf, insn);
6124 break;
6125
6126 case AARCH64_OPND_IMM_MOV:
6127 /* Operand for a generic move immediate instruction, which is
6128 an alias instruction that generates a single MOVZ, MOVN or ORR
6129 instruction to loads a 32-bit/64-bit immediate value into general
6130 register. An assembler error shall result if the immediate cannot be
6131 created by a single one of these instructions. If there is a choice,
6132 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
6133 and MOVZ or MOVN to ORR. */
6134 gas_assert (new_inst != NULL);
6135 fix_mov_imm_insn (fixP, buf, new_inst, value);
6136 break;
6137
6138 case AARCH64_OPND_ADDR_SIMM7:
6139 case AARCH64_OPND_ADDR_SIMM9:
6140 case AARCH64_OPND_ADDR_SIMM9_2:
6141 case AARCH64_OPND_ADDR_UIMM12:
6142 /* Immediate offset in an address. */
6143 insn = get_aarch64_insn (buf);
6144
6145 gas_assert (new_inst != NULL && new_inst->value == insn);
6146 gas_assert (new_inst->opcode->operands[1] == opnd
6147 || new_inst->opcode->operands[2] == opnd);
6148
6149 /* Get the index of the address operand. */
6150 if (new_inst->opcode->operands[1] == opnd)
6151 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
6152 idx = 1;
6153 else
6154 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
6155 idx = 2;
6156
6157 /* Update the resolved offset value. */
6158 new_inst->operands[idx].addr.offset.imm = value;
6159
6160 /* Encode/fix-up. */
6161 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6162 &new_inst->value, NULL, NULL))
6163 {
6164 put_aarch64_insn (buf, new_inst->value);
6165 break;
6166 }
6167 else if (new_inst->opcode->iclass == ldst_pos
6168 && try_to_encode_as_unscaled_ldst (new_inst))
6169 {
6170 put_aarch64_insn (buf, new_inst->value);
6171 break;
6172 }
6173
6174 as_bad_where (fixP->fx_file, fixP->fx_line,
6175 _("immediate offset out of range"));
6176 break;
6177
6178 default:
6179 gas_assert (0);
6180 as_fatal (_("unhandled operand code %d"), opnd);
6181 }
6182}
6183
6184/* Apply a fixup (fixP) to segment data, once it has been determined
6185 by our caller that we have all the info we need to fix it up.
6186
6187 Parameter valP is the pointer to the value of the bits. */
6188
6189void
6190md_apply_fix (fixS * fixP, valueT * valP, segT seg)
6191{
6192 offsetT value = *valP;
6193 uint32_t insn;
6194 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6195 int scale;
6196 unsigned flags = fixP->fx_addnumber;
6197
6198 DEBUG_TRACE ("\n\n");
6199 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
6200 DEBUG_TRACE ("Enter md_apply_fix");
6201
6202 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
6203
6204 /* Note whether this will delete the relocation. */
6205
6206 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
6207 fixP->fx_done = 1;
6208
6209 /* Process the relocations. */
6210 switch (fixP->fx_r_type)
6211 {
6212 case BFD_RELOC_NONE:
6213 /* This will need to go in the object file. */
6214 fixP->fx_done = 0;
6215 break;
6216
6217 case BFD_RELOC_8:
6218 case BFD_RELOC_8_PCREL:
6219 if (fixP->fx_done || !seg->use_rela_p)
6220 md_number_to_chars (buf, value, 1);
6221 break;
6222
6223 case BFD_RELOC_16:
6224 case BFD_RELOC_16_PCREL:
6225 if (fixP->fx_done || !seg->use_rela_p)
6226 md_number_to_chars (buf, value, 2);
6227 break;
6228
6229 case BFD_RELOC_32:
6230 case BFD_RELOC_32_PCREL:
6231 if (fixP->fx_done || !seg->use_rela_p)
6232 md_number_to_chars (buf, value, 4);
6233 break;
6234
6235 case BFD_RELOC_64:
6236 case BFD_RELOC_64_PCREL:
6237 if (fixP->fx_done || !seg->use_rela_p)
6238 md_number_to_chars (buf, value, 8);
6239 break;
6240
6241 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6242 /* We claim that these fixups have been processed here, even if
6243 in fact we generate an error because we do not have a reloc
6244 for them, so tc_gen_reloc() will reject them. */
6245 fixP->fx_done = 1;
6246 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
6247 {
6248 as_bad_where (fixP->fx_file, fixP->fx_line,
6249 _("undefined symbol %s used as an immediate value"),
6250 S_GET_NAME (fixP->fx_addsy));
6251 goto apply_fix_return;
6252 }
6253 fix_insn (fixP, flags, value);
6254 break;
6255
6256 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
6257 if (value & 3)
6258 as_bad_where (fixP->fx_file, fixP->fx_line,
6259 _("pc-relative load offset not word aligned"));
6260 if (signed_overflow (value, 21))
6261 as_bad_where (fixP->fx_file, fixP->fx_line,
6262 _("pc-relative load offset out of range"));
6263 if (fixP->fx_done || !seg->use_rela_p)
6264 {
6265 insn = get_aarch64_insn (buf);
6266 insn |= encode_ld_lit_ofs_19 (value >> 2);
6267 put_aarch64_insn (buf, insn);
6268 }
6269 break;
6270
6271 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6272 if (signed_overflow (value, 21))
6273 as_bad_where (fixP->fx_file, fixP->fx_line,
6274 _("pc-relative address offset out of range"));
6275 if (fixP->fx_done || !seg->use_rela_p)
6276 {
6277 insn = get_aarch64_insn (buf);
6278 insn |= encode_adr_imm (value);
6279 put_aarch64_insn (buf, insn);
6280 }
6281 break;
6282
6283 case BFD_RELOC_AARCH64_BRANCH19:
6284 if (value & 3)
6285 as_bad_where (fixP->fx_file, fixP->fx_line,
6286 _("conditional branch target not word aligned"));
6287 if (signed_overflow (value, 21))
6288 as_bad_where (fixP->fx_file, fixP->fx_line,
6289 _("conditional branch out of range"));
6290 if (fixP->fx_done || !seg->use_rela_p)
6291 {
6292 insn = get_aarch64_insn (buf);
6293 insn |= encode_cond_branch_ofs_19 (value >> 2);
6294 put_aarch64_insn (buf, insn);
6295 }
6296 break;
6297
6298 case BFD_RELOC_AARCH64_TSTBR14:
6299 if (value & 3)
6300 as_bad_where (fixP->fx_file, fixP->fx_line,
6301 _("conditional branch target not word aligned"));
6302 if (signed_overflow (value, 16))
6303 as_bad_where (fixP->fx_file, fixP->fx_line,
6304 _("conditional branch out of range"));
6305 if (fixP->fx_done || !seg->use_rela_p)
6306 {
6307 insn = get_aarch64_insn (buf);
6308 insn |= encode_tst_branch_ofs_14 (value >> 2);
6309 put_aarch64_insn (buf, insn);
6310 }
6311 break;
6312
6313 case BFD_RELOC_AARCH64_JUMP26:
6314 case BFD_RELOC_AARCH64_CALL26:
6315 if (value & 3)
6316 as_bad_where (fixP->fx_file, fixP->fx_line,
6317 _("branch target not word aligned"));
6318 if (signed_overflow (value, 28))
6319 as_bad_where (fixP->fx_file, fixP->fx_line, _("branch out of range"));
6320 if (fixP->fx_done || !seg->use_rela_p)
6321 {
6322 insn = get_aarch64_insn (buf);
6323 insn |= encode_branch_ofs_26 (value >> 2);
6324 put_aarch64_insn (buf, insn);
6325 }
6326 break;
6327
6328 case BFD_RELOC_AARCH64_MOVW_G0:
6329 case BFD_RELOC_AARCH64_MOVW_G0_S:
6330 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6331 scale = 0;
6332 goto movw_common;
6333 case BFD_RELOC_AARCH64_MOVW_G1:
6334 case BFD_RELOC_AARCH64_MOVW_G1_S:
6335 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6336 scale = 16;
6337 goto movw_common;
6338 case BFD_RELOC_AARCH64_MOVW_G2:
6339 case BFD_RELOC_AARCH64_MOVW_G2_S:
6340 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6341 scale = 32;
6342 goto movw_common;
6343 case BFD_RELOC_AARCH64_MOVW_G3:
6344 scale = 48;
6345 movw_common:
6346 if (fixP->fx_done || !seg->use_rela_p)
6347 {
6348 insn = get_aarch64_insn (buf);
6349
6350 if (!fixP->fx_done)
6351 {
6352 /* REL signed addend must fit in 16 bits */
6353 if (signed_overflow (value, 16))
6354 as_bad_where (fixP->fx_file, fixP->fx_line,
6355 _("offset out of range"));
6356 }
6357 else
6358 {
6359 /* Check for overflow and scale. */
6360 switch (fixP->fx_r_type)
6361 {
6362 case BFD_RELOC_AARCH64_MOVW_G0:
6363 case BFD_RELOC_AARCH64_MOVW_G1:
6364 case BFD_RELOC_AARCH64_MOVW_G2:
6365 case BFD_RELOC_AARCH64_MOVW_G3:
6366 if (unsigned_overflow (value, scale + 16))
6367 as_bad_where (fixP->fx_file, fixP->fx_line,
6368 _("unsigned value out of range"));
6369 break;
6370 case BFD_RELOC_AARCH64_MOVW_G0_S:
6371 case BFD_RELOC_AARCH64_MOVW_G1_S:
6372 case BFD_RELOC_AARCH64_MOVW_G2_S:
6373 /* NOTE: We can only come here with movz or movn. */
6374 if (signed_overflow (value, scale + 16))
6375 as_bad_where (fixP->fx_file, fixP->fx_line,
6376 _("signed value out of range"));
6377 if (value < 0)
6378 {
6379 /* Force use of MOVN. */
6380 value = ~value;
6381 insn = reencode_movzn_to_movn (insn);
6382 }
6383 else
6384 {
6385 /* Force use of MOVZ. */
6386 insn = reencode_movzn_to_movz (insn);
6387 }
6388 break;
6389 default:
6390 /* Unchecked relocations. */
6391 break;
6392 }
6393 value >>= scale;
6394 }
6395
6396 /* Insert value into MOVN/MOVZ/MOVK instruction. */
6397 insn |= encode_movw_imm (value & 0xffff);
6398
6399 put_aarch64_insn (buf, insn);
6400 }
6401 break;
6402
6403 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6404 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6405 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6406 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6407 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6408 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6409 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6410 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6411 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6412 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6413 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6414 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6415 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE:
6416 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6417 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6418 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6419 /* Should always be exported to object file, see
6420 aarch64_force_relocation(). */
6421 gas_assert (!fixP->fx_done);
6422 gas_assert (seg->use_rela_p);
6423 break;
6424
6425 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6426 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6427 case BFD_RELOC_AARCH64_ADD_LO12:
6428 case BFD_RELOC_AARCH64_LDST8_LO12:
6429 case BFD_RELOC_AARCH64_LDST16_LO12:
6430 case BFD_RELOC_AARCH64_LDST32_LO12:
6431 case BFD_RELOC_AARCH64_LDST64_LO12:
6432 case BFD_RELOC_AARCH64_LDST128_LO12:
f41aef5f 6433 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
a06ea964
NC
6434 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6435 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6436 /* Should always be exported to object file, see
6437 aarch64_force_relocation(). */
6438 gas_assert (!fixP->fx_done);
6439 gas_assert (seg->use_rela_p);
6440 break;
6441
6442 case BFD_RELOC_AARCH64_TLSDESC_ADD:
6443 case BFD_RELOC_AARCH64_TLSDESC_LDR:
6444 case BFD_RELOC_AARCH64_TLSDESC_CALL:
6445 break;
6446
6447 default:
6448 as_bad_where (fixP->fx_file, fixP->fx_line,
6449 _("unexpected %s fixup"),
6450 bfd_get_reloc_code_name (fixP->fx_r_type));
6451 break;
6452 }
6453
6454apply_fix_return:
6455 /* Free the allocated the struct aarch64_inst.
6456 N.B. currently there are very limited number of fix-up types actually use
6457 this field, so the impact on the performance should be minimal . */
6458 if (fixP->tc_fix_data.inst != NULL)
6459 free (fixP->tc_fix_data.inst);
6460
6461 return;
6462}
6463
6464/* Translate internal representation of relocation info to BFD target
6465 format. */
6466
6467arelent *
6468tc_gen_reloc (asection * section, fixS * fixp)
6469{
6470 arelent *reloc;
6471 bfd_reloc_code_real_type code;
6472
6473 reloc = xmalloc (sizeof (arelent));
6474
6475 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
6476 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
6477 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
6478
6479 if (fixp->fx_pcrel)
6480 {
6481 if (section->use_rela_p)
6482 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
6483 else
6484 fixp->fx_offset = reloc->address;
6485 }
6486 reloc->addend = fixp->fx_offset;
6487
6488 code = fixp->fx_r_type;
6489 switch (code)
6490 {
6491 case BFD_RELOC_16:
6492 if (fixp->fx_pcrel)
6493 code = BFD_RELOC_16_PCREL;
6494 break;
6495
6496 case BFD_RELOC_32:
6497 if (fixp->fx_pcrel)
6498 code = BFD_RELOC_32_PCREL;
6499 break;
6500
6501 case BFD_RELOC_64:
6502 if (fixp->fx_pcrel)
6503 code = BFD_RELOC_64_PCREL;
6504 break;
6505
6506 default:
6507 break;
6508 }
6509
6510 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
6511 if (reloc->howto == NULL)
6512 {
6513 as_bad_where (fixp->fx_file, fixp->fx_line,
6514 _
6515 ("cannot represent %s relocation in this object file format"),
6516 bfd_get_reloc_code_name (code));
6517 return NULL;
6518 }
6519
6520 return reloc;
6521}
6522
6523/* This fix_new is called by cons via TC_CONS_FIX_NEW. */
6524
6525void
6526cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
6527{
6528 bfd_reloc_code_real_type type;
6529 int pcrel = 0;
6530
6531 /* Pick a reloc.
6532 FIXME: @@ Should look at CPU word size. */
6533 switch (size)
6534 {
6535 case 1:
6536 type = BFD_RELOC_8;
6537 break;
6538 case 2:
6539 type = BFD_RELOC_16;
6540 break;
6541 case 4:
6542 type = BFD_RELOC_32;
6543 break;
6544 case 8:
6545 type = BFD_RELOC_64;
6546 break;
6547 default:
6548 as_bad (_("cannot do %u-byte relocation"), size);
6549 type = BFD_RELOC_UNUSED;
6550 break;
6551 }
6552
6553 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
6554}
6555
6556int
6557aarch64_force_relocation (struct fix *fixp)
6558{
6559 switch (fixp->fx_r_type)
6560 {
6561 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6562 /* Perform these "immediate" internal relocations
6563 even if the symbol is extern or weak. */
6564 return 0;
6565
6566 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6567 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6568 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6569 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6570 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6571 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6572 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6573 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6574 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6575 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6576 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6577 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6578 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE:
6579 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6580 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6581 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6582 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6583 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6584 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6585 case BFD_RELOC_AARCH64_ADD_LO12:
6586 case BFD_RELOC_AARCH64_LDST8_LO12:
6587 case BFD_RELOC_AARCH64_LDST16_LO12:
6588 case BFD_RELOC_AARCH64_LDST32_LO12:
6589 case BFD_RELOC_AARCH64_LDST64_LO12:
6590 case BFD_RELOC_AARCH64_LDST128_LO12:
f41aef5f 6591 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
a06ea964
NC
6592 /* Always leave these relocations for the linker. */
6593 return 1;
6594
6595 default:
6596 break;
6597 }
6598
6599 return generic_force_reloc (fixp);
6600}
6601
6602#ifdef OBJ_ELF
6603
6604const char *
6605elf64_aarch64_target_format (void)
6606{
6607 if (target_big_endian)
6608 return "elf64-bigaarch64";
6609 else
6610 return "elf64-littleaarch64";
6611}
6612
6613void
6614aarch64elf_frob_symbol (symbolS * symp, int *puntp)
6615{
6616 elf_frob_symbol (symp, puntp);
6617}
6618#endif
6619
6620/* MD interface: Finalization. */
6621
6622/* A good place to do this, although this was probably not intended
6623 for this kind of use. We need to dump the literal pool before
6624 references are made to a null symbol pointer. */
6625
6626void
6627aarch64_cleanup (void)
6628{
6629 literal_pool *pool;
6630
6631 for (pool = list_of_pools; pool; pool = pool->next)
6632 {
6633 /* Put it at the end of the relevant section. */
6634 subseg_set (pool->section, pool->sub_section);
6635 s_ltorg (0);
6636 }
6637}
6638
6639#ifdef OBJ_ELF
6640/* Remove any excess mapping symbols generated for alignment frags in
6641 SEC. We may have created a mapping symbol before a zero byte
6642 alignment; remove it if there's a mapping symbol after the
6643 alignment. */
6644static void
6645check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
6646 void *dummy ATTRIBUTE_UNUSED)
6647{
6648 segment_info_type *seginfo = seg_info (sec);
6649 fragS *fragp;
6650
6651 if (seginfo == NULL || seginfo->frchainP == NULL)
6652 return;
6653
6654 for (fragp = seginfo->frchainP->frch_root;
6655 fragp != NULL; fragp = fragp->fr_next)
6656 {
6657 symbolS *sym = fragp->tc_frag_data.last_map;
6658 fragS *next = fragp->fr_next;
6659
6660 /* Variable-sized frags have been converted to fixed size by
6661 this point. But if this was variable-sized to start with,
6662 there will be a fixed-size frag after it. So don't handle
6663 next == NULL. */
6664 if (sym == NULL || next == NULL)
6665 continue;
6666
6667 if (S_GET_VALUE (sym) < next->fr_address)
6668 /* Not at the end of this frag. */
6669 continue;
6670 know (S_GET_VALUE (sym) == next->fr_address);
6671
6672 do
6673 {
6674 if (next->tc_frag_data.first_map != NULL)
6675 {
6676 /* Next frag starts with a mapping symbol. Discard this
6677 one. */
6678 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
6679 break;
6680 }
6681
6682 if (next->fr_next == NULL)
6683 {
6684 /* This mapping symbol is at the end of the section. Discard
6685 it. */
6686 know (next->fr_fix == 0 && next->fr_var == 0);
6687 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
6688 break;
6689 }
6690
6691 /* As long as we have empty frags without any mapping symbols,
6692 keep looking. */
6693 /* If the next frag is non-empty and does not start with a
6694 mapping symbol, then this mapping symbol is required. */
6695 if (next->fr_address != next->fr_next->fr_address)
6696 break;
6697
6698 next = next->fr_next;
6699 }
6700 while (next != NULL);
6701 }
6702}
6703#endif
6704
6705/* Adjust the symbol table. */
6706
6707void
6708aarch64_adjust_symtab (void)
6709{
6710#ifdef OBJ_ELF
6711 /* Remove any overlapping mapping symbols generated by alignment frags. */
6712 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
6713 /* Now do generic ELF adjustments. */
6714 elf_adjust_symtab ();
6715#endif
6716}
6717
6718static void
6719checked_hash_insert (struct hash_control *table, const char *key, void *value)
6720{
6721 const char *hash_err;
6722
6723 hash_err = hash_insert (table, key, value);
6724 if (hash_err)
6725 printf ("Internal Error: Can't hash %s\n", key);
6726}
6727
6728static void
6729fill_instruction_hash_table (void)
6730{
6731 aarch64_opcode *opcode = aarch64_opcode_table;
6732
6733 while (opcode->name != NULL)
6734 {
6735 templates *templ, *new_templ;
6736 templ = hash_find (aarch64_ops_hsh, opcode->name);
6737
6738 new_templ = (templates *) xmalloc (sizeof (templates));
6739 new_templ->opcode = opcode;
6740 new_templ->next = NULL;
6741
6742 if (!templ)
6743 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
6744 else
6745 {
6746 new_templ->next = templ->next;
6747 templ->next = new_templ;
6748 }
6749 ++opcode;
6750 }
6751}
6752
6753static inline void
6754convert_to_upper (char *dst, const char *src, size_t num)
6755{
6756 unsigned int i;
6757 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
6758 *dst = TOUPPER (*src);
6759 *dst = '\0';
6760}
6761
6762/* Assume STR point to a lower-case string, allocate, convert and return
6763 the corresponding upper-case string. */
6764static inline const char*
6765get_upper_str (const char *str)
6766{
6767 char *ret;
6768 size_t len = strlen (str);
6769 if ((ret = xmalloc (len + 1)) == NULL)
6770 abort ();
6771 convert_to_upper (ret, str, len);
6772 return ret;
6773}
6774
6775/* MD interface: Initialization. */
6776
6777void
6778md_begin (void)
6779{
6780 unsigned mach;
6781 unsigned int i;
6782
6783 if ((aarch64_ops_hsh = hash_new ()) == NULL
6784 || (aarch64_cond_hsh = hash_new ()) == NULL
6785 || (aarch64_shift_hsh = hash_new ()) == NULL
6786 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
6787 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
6788 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
6789 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
6790 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
6791 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
6792 || (aarch64_reg_hsh = hash_new ()) == NULL
6793 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
6794 || (aarch64_nzcv_hsh = hash_new ()) == NULL
6795 || (aarch64_pldop_hsh = hash_new ()) == NULL)
6796 as_fatal (_("virtual memory exhausted"));
6797
6798 fill_instruction_hash_table ();
6799
6800 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
6801 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
6802 (void *) (aarch64_sys_regs + i));
6803
6804 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
6805 checked_hash_insert (aarch64_pstatefield_hsh,
6806 aarch64_pstatefields[i].name,
6807 (void *) (aarch64_pstatefields + i));
6808
6809 for (i = 0; aarch64_sys_regs_ic[i].template != NULL; i++)
6810 checked_hash_insert (aarch64_sys_regs_ic_hsh,
6811 aarch64_sys_regs_ic[i].template,
6812 (void *) (aarch64_sys_regs_ic + i));
6813
6814 for (i = 0; aarch64_sys_regs_dc[i].template != NULL; i++)
6815 checked_hash_insert (aarch64_sys_regs_dc_hsh,
6816 aarch64_sys_regs_dc[i].template,
6817 (void *) (aarch64_sys_regs_dc + i));
6818
6819 for (i = 0; aarch64_sys_regs_at[i].template != NULL; i++)
6820 checked_hash_insert (aarch64_sys_regs_at_hsh,
6821 aarch64_sys_regs_at[i].template,
6822 (void *) (aarch64_sys_regs_at + i));
6823
6824 for (i = 0; aarch64_sys_regs_tlbi[i].template != NULL; i++)
6825 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
6826 aarch64_sys_regs_tlbi[i].template,
6827 (void *) (aarch64_sys_regs_tlbi + i));
6828
6829 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
6830 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
6831 (void *) (reg_names + i));
6832
6833 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
6834 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
6835 (void *) (nzcv_names + i));
6836
6837 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
6838 {
6839 const char *name = aarch64_operand_modifiers[i].name;
6840 checked_hash_insert (aarch64_shift_hsh, name,
6841 (void *) (aarch64_operand_modifiers + i));
6842 /* Also hash the name in the upper case. */
6843 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
6844 (void *) (aarch64_operand_modifiers + i));
6845 }
6846
6847 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
6848 {
6849 unsigned int j;
6850 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
6851 the same condition code. */
6852 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
6853 {
6854 const char *name = aarch64_conds[i].names[j];
6855 if (name == NULL)
6856 break;
6857 checked_hash_insert (aarch64_cond_hsh, name,
6858 (void *) (aarch64_conds + i));
6859 /* Also hash the name in the upper case. */
6860 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
6861 (void *) (aarch64_conds + i));
6862 }
6863 }
6864
6865 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
6866 {
6867 const char *name = aarch64_barrier_options[i].name;
6868 /* Skip xx00 - the unallocated values of option. */
6869 if ((i & 0x3) == 0)
6870 continue;
6871 checked_hash_insert (aarch64_barrier_opt_hsh, name,
6872 (void *) (aarch64_barrier_options + i));
6873 /* Also hash the name in the upper case. */
6874 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
6875 (void *) (aarch64_barrier_options + i));
6876 }
6877
6878 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
6879 {
6880 const char* name = aarch64_prfops[i].name;
6881 /* Skip 0011x, 01xxx, 1011x and 11xxx - the unallocated hint encodings
6882 as a 5-bit immediate #uimm5. */
6883 if ((i & 0xf) >= 6)
6884 continue;
6885 checked_hash_insert (aarch64_pldop_hsh, name,
6886 (void *) (aarch64_prfops + i));
6887 /* Also hash the name in the upper case. */
6888 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
6889 (void *) (aarch64_prfops + i));
6890 }
6891
6892 /* Set the cpu variant based on the command-line options. */
6893 if (!mcpu_cpu_opt)
6894 mcpu_cpu_opt = march_cpu_opt;
6895
6896 if (!mcpu_cpu_opt)
6897 mcpu_cpu_opt = &cpu_default;
6898
6899 cpu_variant = *mcpu_cpu_opt;
6900
6901 /* Record the CPU type. */
6902 mach = bfd_mach_aarch64;
6903
6904 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
6905}
6906
6907/* Command line processing. */
6908
6909const char *md_shortopts = "m:";
6910
6911#ifdef AARCH64_BI_ENDIAN
6912#define OPTION_EB (OPTION_MD_BASE + 0)
6913#define OPTION_EL (OPTION_MD_BASE + 1)
6914#else
6915#if TARGET_BYTES_BIG_ENDIAN
6916#define OPTION_EB (OPTION_MD_BASE + 0)
6917#else
6918#define OPTION_EL (OPTION_MD_BASE + 1)
6919#endif
6920#endif
6921
6922struct option md_longopts[] = {
6923#ifdef OPTION_EB
6924 {"EB", no_argument, NULL, OPTION_EB},
6925#endif
6926#ifdef OPTION_EL
6927 {"EL", no_argument, NULL, OPTION_EL},
6928#endif
6929 {NULL, no_argument, NULL, 0}
6930};
6931
6932size_t md_longopts_size = sizeof (md_longopts);
6933
6934struct aarch64_option_table
6935{
6936 char *option; /* Option name to match. */
6937 char *help; /* Help information. */
6938 int *var; /* Variable to change. */
6939 int value; /* What to change it to. */
6940 char *deprecated; /* If non-null, print this message. */
6941};
6942
6943static struct aarch64_option_table aarch64_opts[] = {
6944 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
6945 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
6946 NULL},
6947#ifdef DEBUG_AARCH64
6948 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
6949#endif /* DEBUG_AARCH64 */
6950 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
6951 NULL},
6952 {NULL, NULL, NULL, 0, NULL}
6953};
6954
6955struct aarch64_cpu_option_table
6956{
6957 char *name;
6958 const aarch64_feature_set value;
6959 /* The canonical name of the CPU, or NULL to use NAME converted to upper
6960 case. */
6961 const char *canonical_name;
6962};
6963
6964/* This list should, at a minimum, contain all the cpu names
6965 recognized by GCC. */
6966static const struct aarch64_cpu_option_table aarch64_cpus[] = {
6967 {"all", AARCH64_ANY, NULL},
95830fd1
YZ
6968 {"cortex-a53", AARCH64_ARCH_V8, "Cortex-A53"},
6969 {"cortex-a57", AARCH64_ARCH_V8, "Cortex-A57"},
a06ea964
NC
6970 {"generic", AARCH64_ARCH_V8, NULL},
6971
6972 /* These two are example CPUs supported in GCC, once we have real
6973 CPUs they will be removed. */
6974 {"example-1", AARCH64_ARCH_V8, NULL},
6975 {"example-2", AARCH64_ARCH_V8, NULL},
6976
6977 {NULL, AARCH64_ARCH_NONE, NULL}
6978};
6979
6980struct aarch64_arch_option_table
6981{
6982 char *name;
6983 const aarch64_feature_set value;
6984};
6985
6986/* This list should, at a minimum, contain all the architecture names
6987 recognized by GCC. */
6988static const struct aarch64_arch_option_table aarch64_archs[] = {
6989 {"all", AARCH64_ANY},
5a1ad39d 6990 {"armv8-a", AARCH64_ARCH_V8},
a06ea964
NC
6991 {NULL, AARCH64_ARCH_NONE}
6992};
6993
6994/* ISA extensions. */
6995struct aarch64_option_cpu_value_table
6996{
6997 char *name;
6998 const aarch64_feature_set value;
6999};
7000
7001static const struct aarch64_option_cpu_value_table aarch64_features[] = {
7002 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0)},
7003 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
7004 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
7005 {NULL, AARCH64_ARCH_NONE}
7006};
7007
7008struct aarch64_long_option_table
7009{
7010 char *option; /* Substring to match. */
7011 char *help; /* Help information. */
7012 int (*func) (char *subopt); /* Function to decode sub-option. */
7013 char *deprecated; /* If non-null, print this message. */
7014};
7015
7016static int
7017aarch64_parse_features (char *str, const aarch64_feature_set **opt_p)
7018{
7019 /* We insist on extensions being added before being removed. We achieve
7020 this by using the ADDING_VALUE variable to indicate whether we are
7021 adding an extension (1) or removing it (0) and only allowing it to
7022 change in the order -1 -> 1 -> 0. */
7023 int adding_value = -1;
7024 aarch64_feature_set *ext_set = xmalloc (sizeof (aarch64_feature_set));
7025
7026 /* Copy the feature set, so that we can modify it. */
7027 *ext_set = **opt_p;
7028 *opt_p = ext_set;
7029
7030 while (str != NULL && *str != 0)
7031 {
7032 const struct aarch64_option_cpu_value_table *opt;
7033 char *ext;
7034 int optlen;
7035
7036 if (*str != '+')
7037 {
7038 as_bad (_("invalid architectural extension"));
7039 return 0;
7040 }
7041
7042 str++;
7043 ext = strchr (str, '+');
7044
7045 if (ext != NULL)
7046 optlen = ext - str;
7047 else
7048 optlen = strlen (str);
7049
7050 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
7051 {
7052 if (adding_value != 0)
7053 adding_value = 0;
7054 optlen -= 2;
7055 str += 2;
7056 }
7057 else if (optlen > 0)
7058 {
7059 if (adding_value == -1)
7060 adding_value = 1;
7061 else if (adding_value != 1)
7062 {
7063 as_bad (_("must specify extensions to add before specifying "
7064 "those to remove"));
7065 return FALSE;
7066 }
7067 }
7068
7069 if (optlen == 0)
7070 {
7071 as_bad (_("missing architectural extension"));
7072 return 0;
7073 }
7074
7075 gas_assert (adding_value != -1);
7076
7077 for (opt = aarch64_features; opt->name != NULL; opt++)
7078 if (strncmp (opt->name, str, optlen) == 0)
7079 {
7080 /* Add or remove the extension. */
7081 if (adding_value)
7082 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
7083 else
7084 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
7085 break;
7086 }
7087
7088 if (opt->name == NULL)
7089 {
7090 as_bad (_("unknown architectural extension `%s'"), str);
7091 return 0;
7092 }
7093
7094 str = ext;
7095 };
7096
7097 return 1;
7098}
7099
7100static int
7101aarch64_parse_cpu (char *str)
7102{
7103 const struct aarch64_cpu_option_table *opt;
7104 char *ext = strchr (str, '+');
7105 size_t optlen;
7106
7107 if (ext != NULL)
7108 optlen = ext - str;
7109 else
7110 optlen = strlen (str);
7111
7112 if (optlen == 0)
7113 {
7114 as_bad (_("missing cpu name `%s'"), str);
7115 return 0;
7116 }
7117
7118 for (opt = aarch64_cpus; opt->name != NULL; opt++)
7119 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7120 {
7121 mcpu_cpu_opt = &opt->value;
7122 if (ext != NULL)
7123 return aarch64_parse_features (ext, &mcpu_cpu_opt);
7124
7125 return 1;
7126 }
7127
7128 as_bad (_("unknown cpu `%s'"), str);
7129 return 0;
7130}
7131
7132static int
7133aarch64_parse_arch (char *str)
7134{
7135 const struct aarch64_arch_option_table *opt;
7136 char *ext = strchr (str, '+');
7137 size_t optlen;
7138
7139 if (ext != NULL)
7140 optlen = ext - str;
7141 else
7142 optlen = strlen (str);
7143
7144 if (optlen == 0)
7145 {
7146 as_bad (_("missing architecture name `%s'"), str);
7147 return 0;
7148 }
7149
7150 for (opt = aarch64_archs; opt->name != NULL; opt++)
7151 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7152 {
7153 march_cpu_opt = &opt->value;
7154 if (ext != NULL)
7155 return aarch64_parse_features (ext, &march_cpu_opt);
7156
7157 return 1;
7158 }
7159
7160 as_bad (_("unknown architecture `%s'\n"), str);
7161 return 0;
7162}
7163
7164static struct aarch64_long_option_table aarch64_long_opts[] = {
7165 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
7166 aarch64_parse_cpu, NULL},
7167 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
7168 aarch64_parse_arch, NULL},
7169 {NULL, NULL, 0, NULL}
7170};
7171
7172int
7173md_parse_option (int c, char *arg)
7174{
7175 struct aarch64_option_table *opt;
7176 struct aarch64_long_option_table *lopt;
7177
7178 switch (c)
7179 {
7180#ifdef OPTION_EB
7181 case OPTION_EB:
7182 target_big_endian = 1;
7183 break;
7184#endif
7185
7186#ifdef OPTION_EL
7187 case OPTION_EL:
7188 target_big_endian = 0;
7189 break;
7190#endif
7191
7192 case 'a':
7193 /* Listing option. Just ignore these, we don't support additional
7194 ones. */
7195 return 0;
7196
7197 default:
7198 for (opt = aarch64_opts; opt->option != NULL; opt++)
7199 {
7200 if (c == opt->option[0]
7201 && ((arg == NULL && opt->option[1] == 0)
7202 || streq (arg, opt->option + 1)))
7203 {
7204 /* If the option is deprecated, tell the user. */
7205 if (opt->deprecated != NULL)
7206 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
7207 arg ? arg : "", _(opt->deprecated));
7208
7209 if (opt->var != NULL)
7210 *opt->var = opt->value;
7211
7212 return 1;
7213 }
7214 }
7215
7216 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7217 {
7218 /* These options are expected to have an argument. */
7219 if (c == lopt->option[0]
7220 && arg != NULL
7221 && strncmp (arg, lopt->option + 1,
7222 strlen (lopt->option + 1)) == 0)
7223 {
7224 /* If the option is deprecated, tell the user. */
7225 if (lopt->deprecated != NULL)
7226 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
7227 _(lopt->deprecated));
7228
7229 /* Call the sup-option parser. */
7230 return lopt->func (arg + strlen (lopt->option) - 1);
7231 }
7232 }
7233
7234 return 0;
7235 }
7236
7237 return 1;
7238}
7239
7240void
7241md_show_usage (FILE * fp)
7242{
7243 struct aarch64_option_table *opt;
7244 struct aarch64_long_option_table *lopt;
7245
7246 fprintf (fp, _(" AArch64-specific assembler options:\n"));
7247
7248 for (opt = aarch64_opts; opt->option != NULL; opt++)
7249 if (opt->help != NULL)
7250 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
7251
7252 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7253 if (lopt->help != NULL)
7254 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
7255
7256#ifdef OPTION_EB
7257 fprintf (fp, _("\
7258 -EB assemble code for a big-endian cpu\n"));
7259#endif
7260
7261#ifdef OPTION_EL
7262 fprintf (fp, _("\
7263 -EL assemble code for a little-endian cpu\n"));
7264#endif
7265}
7266
7267/* Parse a .cpu directive. */
7268
7269static void
7270s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
7271{
7272 const struct aarch64_cpu_option_table *opt;
7273 char saved_char;
7274 char *name;
7275 char *ext;
7276 size_t optlen;
7277
7278 name = input_line_pointer;
7279 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7280 input_line_pointer++;
7281 saved_char = *input_line_pointer;
7282 *input_line_pointer = 0;
7283
7284 ext = strchr (name, '+');
7285
7286 if (ext != NULL)
7287 optlen = ext - name;
7288 else
7289 optlen = strlen (name);
7290
7291 /* Skip the first "all" entry. */
7292 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
7293 if (strlen (opt->name) == optlen
7294 && strncmp (name, opt->name, optlen) == 0)
7295 {
7296 mcpu_cpu_opt = &opt->value;
7297 if (ext != NULL)
7298 if (!aarch64_parse_features (ext, &mcpu_cpu_opt))
7299 return;
7300
7301 cpu_variant = *mcpu_cpu_opt;
7302
7303 *input_line_pointer = saved_char;
7304 demand_empty_rest_of_line ();
7305 return;
7306 }
7307 as_bad (_("unknown cpu `%s'"), name);
7308 *input_line_pointer = saved_char;
7309 ignore_rest_of_line ();
7310}
7311
7312
7313/* Parse a .arch directive. */
7314
7315static void
7316s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
7317{
7318 const struct aarch64_arch_option_table *opt;
7319 char saved_char;
7320 char *name;
7321 char *ext;
7322 size_t optlen;
7323
7324 name = input_line_pointer;
7325 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7326 input_line_pointer++;
7327 saved_char = *input_line_pointer;
7328 *input_line_pointer = 0;
7329
7330 ext = strchr (name, '+');
7331
7332 if (ext != NULL)
7333 optlen = ext - name;
7334 else
7335 optlen = strlen (name);
7336
7337 /* Skip the first "all" entry. */
7338 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
7339 if (strlen (opt->name) == optlen
7340 && strncmp (name, opt->name, optlen) == 0)
7341 {
7342 mcpu_cpu_opt = &opt->value;
7343 if (ext != NULL)
7344 if (!aarch64_parse_features (ext, &mcpu_cpu_opt))
7345 return;
7346
7347 cpu_variant = *mcpu_cpu_opt;
7348
7349 *input_line_pointer = saved_char;
7350 demand_empty_rest_of_line ();
7351 return;
7352 }
7353
7354 as_bad (_("unknown architecture `%s'\n"), name);
7355 *input_line_pointer = saved_char;
7356 ignore_rest_of_line ();
7357}
7358
7359/* Copy symbol information. */
7360
7361void
7362aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
7363{
7364 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
7365}
This page took 0.3116 seconds and 4 git commands to generate.