2014-11-05 Jan-Benedict Glaw <jbglaw@lug-owl.de>
[deliverable/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2014 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 static const aarch64_feature_set aarch64_arch_any = AARCH64_ANY;
59 static const aarch64_feature_set aarch64_arch_none = AARCH64_ARCH_NONE;
60
61 #ifdef OBJ_ELF
62 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
63 static symbolS *GOT_symbol;
64
65 /* Which ABI to use. */
66 enum aarch64_abi_type
67 {
68 AARCH64_ABI_LP64 = 0,
69 AARCH64_ABI_ILP32 = 1
70 };
71
72 /* AArch64 ABI for the output file. */
73 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_LP64;
74
75 /* When non-zero, program to a 32-bit model, in which the C data types
76 int, long and all pointer types are 32-bit objects (ILP32); or to a
77 64-bit model, in which the C int type is 32-bits but the C long type
78 and all pointer types are 64-bit objects (LP64). */
79 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
80 #endif
81
82 enum neon_el_type
83 {
84 NT_invtype = -1,
85 NT_b,
86 NT_h,
87 NT_s,
88 NT_d,
89 NT_q
90 };
91
92 /* Bits for DEFINED field in neon_type_el. */
93 #define NTA_HASTYPE 1
94 #define NTA_HASINDEX 2
95
96 struct neon_type_el
97 {
98 enum neon_el_type type;
99 unsigned char defined;
100 unsigned width;
101 int64_t index;
102 };
103
104 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
105
106 struct reloc
107 {
108 bfd_reloc_code_real_type type;
109 expressionS exp;
110 int pc_rel;
111 enum aarch64_opnd opnd;
112 uint32_t flags;
113 unsigned need_libopcodes_p : 1;
114 };
115
116 struct aarch64_instruction
117 {
118 /* libopcodes structure for instruction intermediate representation. */
119 aarch64_inst base;
120 /* Record assembly errors found during the parsing. */
121 struct
122 {
123 enum aarch64_operand_error_kind kind;
124 const char *error;
125 } parsing_error;
126 /* The condition that appears in the assembly line. */
127 int cond;
128 /* Relocation information (including the GAS internal fixup). */
129 struct reloc reloc;
130 /* Need to generate an immediate in the literal pool. */
131 unsigned gen_lit_pool : 1;
132 };
133
134 typedef struct aarch64_instruction aarch64_instruction;
135
136 static aarch64_instruction inst;
137
138 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
139 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
140
141 /* Diagnostics inline function utilites.
142
143 These are lightweight utlities which should only be called by parse_operands
144 and other parsers. GAS processes each assembly line by parsing it against
145 instruction template(s), in the case of multiple templates (for the same
146 mnemonic name), those templates are tried one by one until one succeeds or
147 all fail. An assembly line may fail a few templates before being
148 successfully parsed; an error saved here in most cases is not a user error
149 but an error indicating the current template is not the right template.
150 Therefore it is very important that errors can be saved at a low cost during
151 the parsing; we don't want to slow down the whole parsing by recording
152 non-user errors in detail.
153
154 Remember that the objective is to help GAS pick up the most approapriate
155 error message in the case of multiple templates, e.g. FMOV which has 8
156 templates. */
157
158 static inline void
159 clear_error (void)
160 {
161 inst.parsing_error.kind = AARCH64_OPDE_NIL;
162 inst.parsing_error.error = NULL;
163 }
164
165 static inline bfd_boolean
166 error_p (void)
167 {
168 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
169 }
170
171 static inline const char *
172 get_error_message (void)
173 {
174 return inst.parsing_error.error;
175 }
176
177 static inline void
178 set_error_message (const char *error)
179 {
180 inst.parsing_error.error = error;
181 }
182
183 static inline enum aarch64_operand_error_kind
184 get_error_kind (void)
185 {
186 return inst.parsing_error.kind;
187 }
188
189 static inline void
190 set_error_kind (enum aarch64_operand_error_kind kind)
191 {
192 inst.parsing_error.kind = kind;
193 }
194
195 static inline void
196 set_error (enum aarch64_operand_error_kind kind, const char *error)
197 {
198 inst.parsing_error.kind = kind;
199 inst.parsing_error.error = error;
200 }
201
202 static inline void
203 set_recoverable_error (const char *error)
204 {
205 set_error (AARCH64_OPDE_RECOVERABLE, error);
206 }
207
208 /* Use the DESC field of the corresponding aarch64_operand entry to compose
209 the error message. */
210 static inline void
211 set_default_error (void)
212 {
213 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
214 }
215
216 static inline void
217 set_syntax_error (const char *error)
218 {
219 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
220 }
221
222 static inline void
223 set_first_syntax_error (const char *error)
224 {
225 if (! error_p ())
226 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
227 }
228
229 static inline void
230 set_fatal_syntax_error (const char *error)
231 {
232 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
233 }
234 \f
235 /* Number of littlenums required to hold an extended precision number. */
236 #define MAX_LITTLENUMS 6
237
238 /* Return value for certain parsers when the parsing fails; those parsers
239 return the information of the parsed result, e.g. register number, on
240 success. */
241 #define PARSE_FAIL -1
242
243 /* This is an invalid condition code that means no conditional field is
244 present. */
245 #define COND_ALWAYS 0x10
246
247 typedef struct
248 {
249 const char *template;
250 unsigned long value;
251 } asm_barrier_opt;
252
253 typedef struct
254 {
255 const char *template;
256 uint32_t value;
257 } asm_nzcv;
258
259 struct reloc_entry
260 {
261 char *name;
262 bfd_reloc_code_real_type reloc;
263 };
264
265 /* Structure for a hash table entry for a register. */
266 typedef struct
267 {
268 const char *name;
269 unsigned char number;
270 unsigned char type;
271 unsigned char builtin;
272 } reg_entry;
273
274 /* Macros to define the register types and masks for the purpose
275 of parsing. */
276
277 #undef AARCH64_REG_TYPES
278 #define AARCH64_REG_TYPES \
279 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
280 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
281 BASIC_REG_TYPE(SP_32) /* wsp */ \
282 BASIC_REG_TYPE(SP_64) /* sp */ \
283 BASIC_REG_TYPE(Z_32) /* wzr */ \
284 BASIC_REG_TYPE(Z_64) /* xzr */ \
285 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
286 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
287 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
288 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
289 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
290 BASIC_REG_TYPE(CN) /* c[0-7] */ \
291 BASIC_REG_TYPE(VN) /* v[0-31] */ \
292 /* Typecheck: any 64-bit int reg (inc SP exc XZR) */ \
293 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
294 /* Typecheck: any int (inc {W}SP inc [WX]ZR) */ \
295 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
296 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
297 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
298 /* Typecheck: any [BHSDQ]P FP. */ \
299 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
300 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
301 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR) */ \
302 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
303 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
304 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
305 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
306 /* Any integer register; used for error messages only. */ \
307 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
308 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
309 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
310 /* Pseudo type to mark the end of the enumerator sequence. */ \
311 BASIC_REG_TYPE(MAX)
312
313 #undef BASIC_REG_TYPE
314 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
315 #undef MULTI_REG_TYPE
316 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
317
318 /* Register type enumerators. */
319 typedef enum
320 {
321 /* A list of REG_TYPE_*. */
322 AARCH64_REG_TYPES
323 } aarch64_reg_type;
324
325 #undef BASIC_REG_TYPE
326 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
327 #undef REG_TYPE
328 #define REG_TYPE(T) (1 << REG_TYPE_##T)
329 #undef MULTI_REG_TYPE
330 #define MULTI_REG_TYPE(T,V) V,
331
332 /* Values indexed by aarch64_reg_type to assist the type checking. */
333 static const unsigned reg_type_masks[] =
334 {
335 AARCH64_REG_TYPES
336 };
337
338 #undef BASIC_REG_TYPE
339 #undef REG_TYPE
340 #undef MULTI_REG_TYPE
341 #undef AARCH64_REG_TYPES
342
343 /* Diagnostics used when we don't get a register of the expected type.
344 Note: this has to synchronized with aarch64_reg_type definitions
345 above. */
346 static const char *
347 get_reg_expected_msg (aarch64_reg_type reg_type)
348 {
349 const char *msg;
350
351 switch (reg_type)
352 {
353 case REG_TYPE_R_32:
354 msg = N_("integer 32-bit register expected");
355 break;
356 case REG_TYPE_R_64:
357 msg = N_("integer 64-bit register expected");
358 break;
359 case REG_TYPE_R_N:
360 msg = N_("integer register expected");
361 break;
362 case REG_TYPE_R_Z_SP:
363 msg = N_("integer, zero or SP register expected");
364 break;
365 case REG_TYPE_FP_B:
366 msg = N_("8-bit SIMD scalar register expected");
367 break;
368 case REG_TYPE_FP_H:
369 msg = N_("16-bit SIMD scalar or floating-point half precision "
370 "register expected");
371 break;
372 case REG_TYPE_FP_S:
373 msg = N_("32-bit SIMD scalar or floating-point single precision "
374 "register expected");
375 break;
376 case REG_TYPE_FP_D:
377 msg = N_("64-bit SIMD scalar or floating-point double precision "
378 "register expected");
379 break;
380 case REG_TYPE_FP_Q:
381 msg = N_("128-bit SIMD scalar or floating-point quad precision "
382 "register expected");
383 break;
384 case REG_TYPE_CN:
385 msg = N_("C0 - C15 expected");
386 break;
387 case REG_TYPE_R_Z_BHSDQ_V:
388 msg = N_("register expected");
389 break;
390 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
391 msg = N_("SIMD scalar or floating-point register expected");
392 break;
393 case REG_TYPE_VN: /* any V reg */
394 msg = N_("vector register expected");
395 break;
396 default:
397 as_fatal (_("invalid register type %d"), reg_type);
398 }
399 return msg;
400 }
401
402 /* Some well known registers that we refer to directly elsewhere. */
403 #define REG_SP 31
404
405 /* Instructions take 4 bytes in the object file. */
406 #define INSN_SIZE 4
407
408 /* Define some common error messages. */
409 #define BAD_SP _("SP not allowed here")
410
411 static struct hash_control *aarch64_ops_hsh;
412 static struct hash_control *aarch64_cond_hsh;
413 static struct hash_control *aarch64_shift_hsh;
414 static struct hash_control *aarch64_sys_regs_hsh;
415 static struct hash_control *aarch64_pstatefield_hsh;
416 static struct hash_control *aarch64_sys_regs_ic_hsh;
417 static struct hash_control *aarch64_sys_regs_dc_hsh;
418 static struct hash_control *aarch64_sys_regs_at_hsh;
419 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
420 static struct hash_control *aarch64_reg_hsh;
421 static struct hash_control *aarch64_barrier_opt_hsh;
422 static struct hash_control *aarch64_nzcv_hsh;
423 static struct hash_control *aarch64_pldop_hsh;
424
425 /* Stuff needed to resolve the label ambiguity
426 As:
427 ...
428 label: <insn>
429 may differ from:
430 ...
431 label:
432 <insn> */
433
434 static symbolS *last_label_seen;
435
436 /* Literal pool structure. Held on a per-section
437 and per-sub-section basis. */
438
439 #define MAX_LITERAL_POOL_SIZE 1024
440 typedef struct literal_expression
441 {
442 expressionS exp;
443 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
444 LITTLENUM_TYPE * bignum;
445 } literal_expression;
446
447 typedef struct literal_pool
448 {
449 literal_expression literals[MAX_LITERAL_POOL_SIZE];
450 unsigned int next_free_entry;
451 unsigned int id;
452 symbolS *symbol;
453 segT section;
454 subsegT sub_section;
455 int size;
456 struct literal_pool *next;
457 } literal_pool;
458
459 /* Pointer to a linked list of literal pools. */
460 static literal_pool *list_of_pools = NULL;
461 \f
462 /* Pure syntax. */
463
464 /* This array holds the chars that always start a comment. If the
465 pre-processor is disabled, these aren't very useful. */
466 const char comment_chars[] = "";
467
468 /* This array holds the chars that only start a comment at the beginning of
469 a line. If the line seems to have the form '# 123 filename'
470 .line and .file directives will appear in the pre-processed output. */
471 /* Note that input_file.c hand checks for '#' at the beginning of the
472 first line of the input file. This is because the compiler outputs
473 #NO_APP at the beginning of its output. */
474 /* Also note that comments like this one will always work. */
475 const char line_comment_chars[] = "#";
476
477 const char line_separator_chars[] = ";";
478
479 /* Chars that can be used to separate mant
480 from exp in floating point numbers. */
481 const char EXP_CHARS[] = "eE";
482
483 /* Chars that mean this number is a floating point constant. */
484 /* As in 0f12.456 */
485 /* or 0d1.2345e12 */
486
487 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
488
489 /* Prefix character that indicates the start of an immediate value. */
490 #define is_immediate_prefix(C) ((C) == '#')
491
492 /* Separator character handling. */
493
494 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
495
496 static inline bfd_boolean
497 skip_past_char (char **str, char c)
498 {
499 if (**str == c)
500 {
501 (*str)++;
502 return TRUE;
503 }
504 else
505 return FALSE;
506 }
507
508 #define skip_past_comma(str) skip_past_char (str, ',')
509
510 /* Arithmetic expressions (possibly involving symbols). */
511
512 static bfd_boolean in_my_get_expression_p = FALSE;
513
514 /* Third argument to my_get_expression. */
515 #define GE_NO_PREFIX 0
516 #define GE_OPT_PREFIX 1
517
518 /* Return TRUE if the string pointed by *STR is successfully parsed
519 as an valid expression; *EP will be filled with the information of
520 such an expression. Otherwise return FALSE. */
521
522 static bfd_boolean
523 my_get_expression (expressionS * ep, char **str, int prefix_mode,
524 int reject_absent)
525 {
526 char *save_in;
527 segT seg;
528 int prefix_present_p = 0;
529
530 switch (prefix_mode)
531 {
532 case GE_NO_PREFIX:
533 break;
534 case GE_OPT_PREFIX:
535 if (is_immediate_prefix (**str))
536 {
537 (*str)++;
538 prefix_present_p = 1;
539 }
540 break;
541 default:
542 abort ();
543 }
544
545 memset (ep, 0, sizeof (expressionS));
546
547 save_in = input_line_pointer;
548 input_line_pointer = *str;
549 in_my_get_expression_p = TRUE;
550 seg = expression (ep);
551 in_my_get_expression_p = FALSE;
552
553 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
554 {
555 /* We found a bad expression in md_operand(). */
556 *str = input_line_pointer;
557 input_line_pointer = save_in;
558 if (prefix_present_p && ! error_p ())
559 set_fatal_syntax_error (_("bad expression"));
560 else
561 set_first_syntax_error (_("bad expression"));
562 return FALSE;
563 }
564
565 #ifdef OBJ_AOUT
566 if (seg != absolute_section
567 && seg != text_section
568 && seg != data_section
569 && seg != bss_section && seg != undefined_section)
570 {
571 set_syntax_error (_("bad segment"));
572 *str = input_line_pointer;
573 input_line_pointer = save_in;
574 return FALSE;
575 }
576 #else
577 (void) seg;
578 #endif
579
580 *str = input_line_pointer;
581 input_line_pointer = save_in;
582 return TRUE;
583 }
584
585 /* Turn a string in input_line_pointer into a floating point constant
586 of type TYPE, and store the appropriate bytes in *LITP. The number
587 of LITTLENUMS emitted is stored in *SIZEP. An error message is
588 returned, or NULL on OK. */
589
590 char *
591 md_atof (int type, char *litP, int *sizeP)
592 {
593 return ieee_md_atof (type, litP, sizeP, target_big_endian);
594 }
595
596 /* We handle all bad expressions here, so that we can report the faulty
597 instruction in the error message. */
598 void
599 md_operand (expressionS * exp)
600 {
601 if (in_my_get_expression_p)
602 exp->X_op = O_illegal;
603 }
604
605 /* Immediate values. */
606
607 /* Errors may be set multiple times during parsing or bit encoding
608 (particularly in the Neon bits), but usually the earliest error which is set
609 will be the most meaningful. Avoid overwriting it with later (cascading)
610 errors by calling this function. */
611
612 static void
613 first_error (const char *error)
614 {
615 if (! error_p ())
616 set_syntax_error (error);
617 }
618
619 /* Similiar to first_error, but this function accepts formatted error
620 message. */
621 static void
622 first_error_fmt (const char *format, ...)
623 {
624 va_list args;
625 enum
626 { size = 100 };
627 /* N.B. this single buffer will not cause error messages for different
628 instructions to pollute each other; this is because at the end of
629 processing of each assembly line, error message if any will be
630 collected by as_bad. */
631 static char buffer[size];
632
633 if (! error_p ())
634 {
635 int ret ATTRIBUTE_UNUSED;
636 va_start (args, format);
637 ret = vsnprintf (buffer, size, format, args);
638 know (ret <= size - 1 && ret >= 0);
639 va_end (args);
640 set_syntax_error (buffer);
641 }
642 }
643
644 /* Register parsing. */
645
646 /* Generic register parser which is called by other specialized
647 register parsers.
648 CCP points to what should be the beginning of a register name.
649 If it is indeed a valid register name, advance CCP over it and
650 return the reg_entry structure; otherwise return NULL.
651 It does not issue diagnostics. */
652
653 static reg_entry *
654 parse_reg (char **ccp)
655 {
656 char *start = *ccp;
657 char *p;
658 reg_entry *reg;
659
660 #ifdef REGISTER_PREFIX
661 if (*start != REGISTER_PREFIX)
662 return NULL;
663 start++;
664 #endif
665
666 p = start;
667 if (!ISALPHA (*p) || !is_name_beginner (*p))
668 return NULL;
669
670 do
671 p++;
672 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
673
674 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
675
676 if (!reg)
677 return NULL;
678
679 *ccp = p;
680 return reg;
681 }
682
683 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
684 return FALSE. */
685 static bfd_boolean
686 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
687 {
688 if (reg->type == type)
689 return TRUE;
690
691 switch (type)
692 {
693 case REG_TYPE_R64_SP: /* 64-bit integer reg (inc SP exc XZR). */
694 case REG_TYPE_R_Z_SP: /* Integer reg (inc {X}SP inc [WX]ZR). */
695 case REG_TYPE_R_Z_BHSDQ_V: /* Any register apart from Cn. */
696 case REG_TYPE_BHSDQ: /* Any [BHSDQ]P FP or SIMD scalar register. */
697 case REG_TYPE_VN: /* Vector register. */
698 gas_assert (reg->type < REG_TYPE_MAX && type < REG_TYPE_MAX);
699 return ((reg_type_masks[reg->type] & reg_type_masks[type])
700 == reg_type_masks[reg->type]);
701 default:
702 as_fatal ("unhandled type %d", type);
703 abort ();
704 }
705 }
706
707 /* Parse a register and return PARSE_FAIL if the register is not of type R_Z_SP.
708 Return the register number otherwise. *ISREG32 is set to one if the
709 register is 32-bit wide; *ISREGZERO is set to one if the register is
710 of type Z_32 or Z_64.
711 Note that this function does not issue any diagnostics. */
712
713 static int
714 aarch64_reg_parse_32_64 (char **ccp, int reject_sp, int reject_rz,
715 int *isreg32, int *isregzero)
716 {
717 char *str = *ccp;
718 const reg_entry *reg = parse_reg (&str);
719
720 if (reg == NULL)
721 return PARSE_FAIL;
722
723 if (! aarch64_check_reg_type (reg, REG_TYPE_R_Z_SP))
724 return PARSE_FAIL;
725
726 switch (reg->type)
727 {
728 case REG_TYPE_SP_32:
729 case REG_TYPE_SP_64:
730 if (reject_sp)
731 return PARSE_FAIL;
732 *isreg32 = reg->type == REG_TYPE_SP_32;
733 *isregzero = 0;
734 break;
735 case REG_TYPE_R_32:
736 case REG_TYPE_R_64:
737 *isreg32 = reg->type == REG_TYPE_R_32;
738 *isregzero = 0;
739 break;
740 case REG_TYPE_Z_32:
741 case REG_TYPE_Z_64:
742 if (reject_rz)
743 return PARSE_FAIL;
744 *isreg32 = reg->type == REG_TYPE_Z_32;
745 *isregzero = 1;
746 break;
747 default:
748 return PARSE_FAIL;
749 }
750
751 *ccp = str;
752
753 return reg->number;
754 }
755
756 /* Parse the qualifier of a SIMD vector register or a SIMD vector element.
757 Fill in *PARSED_TYPE and return TRUE if the parsing succeeds;
758 otherwise return FALSE.
759
760 Accept only one occurrence of:
761 8b 16b 4h 8h 2s 4s 1d 2d
762 b h s d q */
763 static bfd_boolean
764 parse_neon_type_for_operand (struct neon_type_el *parsed_type, char **str)
765 {
766 char *ptr = *str;
767 unsigned width;
768 unsigned element_size;
769 enum neon_el_type type;
770
771 /* skip '.' */
772 ptr++;
773
774 if (!ISDIGIT (*ptr))
775 {
776 width = 0;
777 goto elt_size;
778 }
779 width = strtoul (ptr, &ptr, 10);
780 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
781 {
782 first_error_fmt (_("bad size %d in vector width specifier"), width);
783 return FALSE;
784 }
785
786 elt_size:
787 switch (TOLOWER (*ptr))
788 {
789 case 'b':
790 type = NT_b;
791 element_size = 8;
792 break;
793 case 'h':
794 type = NT_h;
795 element_size = 16;
796 break;
797 case 's':
798 type = NT_s;
799 element_size = 32;
800 break;
801 case 'd':
802 type = NT_d;
803 element_size = 64;
804 break;
805 case 'q':
806 if (width == 1)
807 {
808 type = NT_q;
809 element_size = 128;
810 break;
811 }
812 /* fall through. */
813 default:
814 if (*ptr != '\0')
815 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
816 else
817 first_error (_("missing element size"));
818 return FALSE;
819 }
820 if (width != 0 && width * element_size != 64 && width * element_size != 128)
821 {
822 first_error_fmt (_
823 ("invalid element size %d and vector size combination %c"),
824 width, *ptr);
825 return FALSE;
826 }
827 ptr++;
828
829 parsed_type->type = type;
830 parsed_type->width = width;
831
832 *str = ptr;
833
834 return TRUE;
835 }
836
837 /* Parse a single type, e.g. ".8b", leading period included.
838 Only applicable to Vn registers.
839
840 Return TRUE on success; otherwise return FALSE. */
841 static bfd_boolean
842 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
843 {
844 char *str = *ccp;
845
846 if (*str == '.')
847 {
848 if (! parse_neon_type_for_operand (vectype, &str))
849 {
850 first_error (_("vector type expected"));
851 return FALSE;
852 }
853 }
854 else
855 return FALSE;
856
857 *ccp = str;
858
859 return TRUE;
860 }
861
862 /* Parse a register of the type TYPE.
863
864 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
865 name or the parsed register is not of TYPE.
866
867 Otherwise return the register number, and optionally fill in the actual
868 type of the register in *RTYPE when multiple alternatives were given, and
869 return the register shape and element index information in *TYPEINFO.
870
871 IN_REG_LIST should be set with TRUE if the caller is parsing a register
872 list. */
873
874 static int
875 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
876 struct neon_type_el *typeinfo, bfd_boolean in_reg_list)
877 {
878 char *str = *ccp;
879 const reg_entry *reg = parse_reg (&str);
880 struct neon_type_el atype;
881 struct neon_type_el parsetype;
882 bfd_boolean is_typed_vecreg = FALSE;
883
884 atype.defined = 0;
885 atype.type = NT_invtype;
886 atype.width = -1;
887 atype.index = 0;
888
889 if (reg == NULL)
890 {
891 if (typeinfo)
892 *typeinfo = atype;
893 set_default_error ();
894 return PARSE_FAIL;
895 }
896
897 if (! aarch64_check_reg_type (reg, type))
898 {
899 DEBUG_TRACE ("reg type check failed");
900 set_default_error ();
901 return PARSE_FAIL;
902 }
903 type = reg->type;
904
905 if (type == REG_TYPE_VN
906 && parse_neon_operand_type (&parsetype, &str))
907 {
908 /* Register if of the form Vn.[bhsdq]. */
909 is_typed_vecreg = TRUE;
910
911 if (parsetype.width == 0)
912 /* Expect index. In the new scheme we cannot have
913 Vn.[bhsdq] represent a scalar. Therefore any
914 Vn.[bhsdq] should have an index following it.
915 Except in reglists ofcourse. */
916 atype.defined |= NTA_HASINDEX;
917 else
918 atype.defined |= NTA_HASTYPE;
919
920 atype.type = parsetype.type;
921 atype.width = parsetype.width;
922 }
923
924 if (skip_past_char (&str, '['))
925 {
926 expressionS exp;
927
928 /* Reject Sn[index] syntax. */
929 if (!is_typed_vecreg)
930 {
931 first_error (_("this type of register can't be indexed"));
932 return PARSE_FAIL;
933 }
934
935 if (in_reg_list == TRUE)
936 {
937 first_error (_("index not allowed inside register list"));
938 return PARSE_FAIL;
939 }
940
941 atype.defined |= NTA_HASINDEX;
942
943 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
944
945 if (exp.X_op != O_constant)
946 {
947 first_error (_("constant expression required"));
948 return PARSE_FAIL;
949 }
950
951 if (! skip_past_char (&str, ']'))
952 return PARSE_FAIL;
953
954 atype.index = exp.X_add_number;
955 }
956 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
957 {
958 /* Indexed vector register expected. */
959 first_error (_("indexed vector register expected"));
960 return PARSE_FAIL;
961 }
962
963 /* A vector reg Vn should be typed or indexed. */
964 if (type == REG_TYPE_VN && atype.defined == 0)
965 {
966 first_error (_("invalid use of vector register"));
967 }
968
969 if (typeinfo)
970 *typeinfo = atype;
971
972 if (rtype)
973 *rtype = type;
974
975 *ccp = str;
976
977 return reg->number;
978 }
979
980 /* Parse register.
981
982 Return the register number on success; return PARSE_FAIL otherwise.
983
984 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
985 the register (e.g. NEON double or quad reg when either has been requested).
986
987 If this is a NEON vector register with additional type information, fill
988 in the struct pointed to by VECTYPE (if non-NULL).
989
990 This parser does not handle register list. */
991
992 static int
993 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
994 aarch64_reg_type *rtype, struct neon_type_el *vectype)
995 {
996 struct neon_type_el atype;
997 char *str = *ccp;
998 int reg = parse_typed_reg (&str, type, rtype, &atype,
999 /*in_reg_list= */ FALSE);
1000
1001 if (reg == PARSE_FAIL)
1002 return PARSE_FAIL;
1003
1004 if (vectype)
1005 *vectype = atype;
1006
1007 *ccp = str;
1008
1009 return reg;
1010 }
1011
1012 static inline bfd_boolean
1013 eq_neon_type_el (struct neon_type_el e1, struct neon_type_el e2)
1014 {
1015 return
1016 e1.type == e2.type
1017 && e1.defined == e2.defined
1018 && e1.width == e2.width && e1.index == e2.index;
1019 }
1020
1021 /* This function parses the NEON register list. On success, it returns
1022 the parsed register list information in the following encoded format:
1023
1024 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1025 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1026
1027 The information of the register shape and/or index is returned in
1028 *VECTYPE.
1029
1030 It returns PARSE_FAIL if the register list is invalid.
1031
1032 The list contains one to four registers.
1033 Each register can be one of:
1034 <Vt>.<T>[<index>]
1035 <Vt>.<T>
1036 All <T> should be identical.
1037 All <index> should be identical.
1038 There are restrictions on <Vt> numbers which are checked later
1039 (by reg_list_valid_p). */
1040
1041 static int
1042 parse_neon_reg_list (char **ccp, struct neon_type_el *vectype)
1043 {
1044 char *str = *ccp;
1045 int nb_regs;
1046 struct neon_type_el typeinfo, typeinfo_first;
1047 int val, val_range;
1048 int in_range;
1049 int ret_val;
1050 int i;
1051 bfd_boolean error = FALSE;
1052 bfd_boolean expect_index = FALSE;
1053
1054 if (*str != '{')
1055 {
1056 set_syntax_error (_("expecting {"));
1057 return PARSE_FAIL;
1058 }
1059 str++;
1060
1061 nb_regs = 0;
1062 typeinfo_first.defined = 0;
1063 typeinfo_first.type = NT_invtype;
1064 typeinfo_first.width = -1;
1065 typeinfo_first.index = 0;
1066 ret_val = 0;
1067 val = -1;
1068 val_range = -1;
1069 in_range = 0;
1070 do
1071 {
1072 if (in_range)
1073 {
1074 str++; /* skip over '-' */
1075 val_range = val;
1076 }
1077 val = parse_typed_reg (&str, REG_TYPE_VN, NULL, &typeinfo,
1078 /*in_reg_list= */ TRUE);
1079 if (val == PARSE_FAIL)
1080 {
1081 set_first_syntax_error (_("invalid vector register in list"));
1082 error = TRUE;
1083 continue;
1084 }
1085 /* reject [bhsd]n */
1086 if (typeinfo.defined == 0)
1087 {
1088 set_first_syntax_error (_("invalid scalar register in list"));
1089 error = TRUE;
1090 continue;
1091 }
1092
1093 if (typeinfo.defined & NTA_HASINDEX)
1094 expect_index = TRUE;
1095
1096 if (in_range)
1097 {
1098 if (val < val_range)
1099 {
1100 set_first_syntax_error
1101 (_("invalid range in vector register list"));
1102 error = TRUE;
1103 }
1104 val_range++;
1105 }
1106 else
1107 {
1108 val_range = val;
1109 if (nb_regs == 0)
1110 typeinfo_first = typeinfo;
1111 else if (! eq_neon_type_el (typeinfo_first, typeinfo))
1112 {
1113 set_first_syntax_error
1114 (_("type mismatch in vector register list"));
1115 error = TRUE;
1116 }
1117 }
1118 if (! error)
1119 for (i = val_range; i <= val; i++)
1120 {
1121 ret_val |= i << (5 * nb_regs);
1122 nb_regs++;
1123 }
1124 in_range = 0;
1125 }
1126 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1127
1128 skip_whitespace (str);
1129 if (*str != '}')
1130 {
1131 set_first_syntax_error (_("end of vector register list not found"));
1132 error = TRUE;
1133 }
1134 str++;
1135
1136 skip_whitespace (str);
1137
1138 if (expect_index)
1139 {
1140 if (skip_past_char (&str, '['))
1141 {
1142 expressionS exp;
1143
1144 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1145 if (exp.X_op != O_constant)
1146 {
1147 set_first_syntax_error (_("constant expression required."));
1148 error = TRUE;
1149 }
1150 if (! skip_past_char (&str, ']'))
1151 error = TRUE;
1152 else
1153 typeinfo_first.index = exp.X_add_number;
1154 }
1155 else
1156 {
1157 set_first_syntax_error (_("expected index"));
1158 error = TRUE;
1159 }
1160 }
1161
1162 if (nb_regs > 4)
1163 {
1164 set_first_syntax_error (_("too many registers in vector register list"));
1165 error = TRUE;
1166 }
1167 else if (nb_regs == 0)
1168 {
1169 set_first_syntax_error (_("empty vector register list"));
1170 error = TRUE;
1171 }
1172
1173 *ccp = str;
1174 if (! error)
1175 *vectype = typeinfo_first;
1176
1177 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1178 }
1179
1180 /* Directives: register aliases. */
1181
1182 static reg_entry *
1183 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1184 {
1185 reg_entry *new;
1186 const char *name;
1187
1188 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1189 {
1190 if (new->builtin)
1191 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1192 str);
1193
1194 /* Only warn about a redefinition if it's not defined as the
1195 same register. */
1196 else if (new->number != number || new->type != type)
1197 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1198
1199 return NULL;
1200 }
1201
1202 name = xstrdup (str);
1203 new = xmalloc (sizeof (reg_entry));
1204
1205 new->name = name;
1206 new->number = number;
1207 new->type = type;
1208 new->builtin = FALSE;
1209
1210 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1211 abort ();
1212
1213 return new;
1214 }
1215
1216 /* Look for the .req directive. This is of the form:
1217
1218 new_register_name .req existing_register_name
1219
1220 If we find one, or if it looks sufficiently like one that we want to
1221 handle any error here, return TRUE. Otherwise return FALSE. */
1222
1223 static bfd_boolean
1224 create_register_alias (char *newname, char *p)
1225 {
1226 const reg_entry *old;
1227 char *oldname, *nbuf;
1228 size_t nlen;
1229
1230 /* The input scrubber ensures that whitespace after the mnemonic is
1231 collapsed to single spaces. */
1232 oldname = p;
1233 if (strncmp (oldname, " .req ", 6) != 0)
1234 return FALSE;
1235
1236 oldname += 6;
1237 if (*oldname == '\0')
1238 return FALSE;
1239
1240 old = hash_find (aarch64_reg_hsh, oldname);
1241 if (!old)
1242 {
1243 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1244 return TRUE;
1245 }
1246
1247 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1248 the desired alias name, and p points to its end. If not, then
1249 the desired alias name is in the global original_case_string. */
1250 #ifdef TC_CASE_SENSITIVE
1251 nlen = p - newname;
1252 #else
1253 newname = original_case_string;
1254 nlen = strlen (newname);
1255 #endif
1256
1257 nbuf = alloca (nlen + 1);
1258 memcpy (nbuf, newname, nlen);
1259 nbuf[nlen] = '\0';
1260
1261 /* Create aliases under the new name as stated; an all-lowercase
1262 version of the new name; and an all-uppercase version of the new
1263 name. */
1264 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1265 {
1266 for (p = nbuf; *p; p++)
1267 *p = TOUPPER (*p);
1268
1269 if (strncmp (nbuf, newname, nlen))
1270 {
1271 /* If this attempt to create an additional alias fails, do not bother
1272 trying to create the all-lower case alias. We will fail and issue
1273 a second, duplicate error message. This situation arises when the
1274 programmer does something like:
1275 foo .req r0
1276 Foo .req r1
1277 The second .req creates the "Foo" alias but then fails to create
1278 the artificial FOO alias because it has already been created by the
1279 first .req. */
1280 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1281 return TRUE;
1282 }
1283
1284 for (p = nbuf; *p; p++)
1285 *p = TOLOWER (*p);
1286
1287 if (strncmp (nbuf, newname, nlen))
1288 insert_reg_alias (nbuf, old->number, old->type);
1289 }
1290
1291 return TRUE;
1292 }
1293
1294 /* Should never be called, as .req goes between the alias and the
1295 register name, not at the beginning of the line. */
1296 static void
1297 s_req (int a ATTRIBUTE_UNUSED)
1298 {
1299 as_bad (_("invalid syntax for .req directive"));
1300 }
1301
1302 /* The .unreq directive deletes an alias which was previously defined
1303 by .req. For example:
1304
1305 my_alias .req r11
1306 .unreq my_alias */
1307
1308 static void
1309 s_unreq (int a ATTRIBUTE_UNUSED)
1310 {
1311 char *name;
1312 char saved_char;
1313
1314 name = input_line_pointer;
1315
1316 while (*input_line_pointer != 0
1317 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1318 ++input_line_pointer;
1319
1320 saved_char = *input_line_pointer;
1321 *input_line_pointer = 0;
1322
1323 if (!*name)
1324 as_bad (_("invalid syntax for .unreq directive"));
1325 else
1326 {
1327 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1328
1329 if (!reg)
1330 as_bad (_("unknown register alias '%s'"), name);
1331 else if (reg->builtin)
1332 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1333 name);
1334 else
1335 {
1336 char *p;
1337 char *nbuf;
1338
1339 hash_delete (aarch64_reg_hsh, name, FALSE);
1340 free ((char *) reg->name);
1341 free (reg);
1342
1343 /* Also locate the all upper case and all lower case versions.
1344 Do not complain if we cannot find one or the other as it
1345 was probably deleted above. */
1346
1347 nbuf = strdup (name);
1348 for (p = nbuf; *p; p++)
1349 *p = TOUPPER (*p);
1350 reg = hash_find (aarch64_reg_hsh, nbuf);
1351 if (reg)
1352 {
1353 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1354 free ((char *) reg->name);
1355 free (reg);
1356 }
1357
1358 for (p = nbuf; *p; p++)
1359 *p = TOLOWER (*p);
1360 reg = hash_find (aarch64_reg_hsh, nbuf);
1361 if (reg)
1362 {
1363 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1364 free ((char *) reg->name);
1365 free (reg);
1366 }
1367
1368 free (nbuf);
1369 }
1370 }
1371
1372 *input_line_pointer = saved_char;
1373 demand_empty_rest_of_line ();
1374 }
1375
1376 /* Directives: Instruction set selection. */
1377
1378 #ifdef OBJ_ELF
1379 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1380 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1381 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1382 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1383
1384 /* Create a new mapping symbol for the transition to STATE. */
1385
1386 static void
1387 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1388 {
1389 symbolS *symbolP;
1390 const char *symname;
1391 int type;
1392
1393 switch (state)
1394 {
1395 case MAP_DATA:
1396 symname = "$d";
1397 type = BSF_NO_FLAGS;
1398 break;
1399 case MAP_INSN:
1400 symname = "$x";
1401 type = BSF_NO_FLAGS;
1402 break;
1403 default:
1404 abort ();
1405 }
1406
1407 symbolP = symbol_new (symname, now_seg, value, frag);
1408 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1409
1410 /* Save the mapping symbols for future reference. Also check that
1411 we do not place two mapping symbols at the same offset within a
1412 frag. We'll handle overlap between frags in
1413 check_mapping_symbols.
1414
1415 If .fill or other data filling directive generates zero sized data,
1416 the mapping symbol for the following code will have the same value
1417 as the one generated for the data filling directive. In this case,
1418 we replace the old symbol with the new one at the same address. */
1419 if (value == 0)
1420 {
1421 if (frag->tc_frag_data.first_map != NULL)
1422 {
1423 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1424 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1425 &symbol_lastP);
1426 }
1427 frag->tc_frag_data.first_map = symbolP;
1428 }
1429 if (frag->tc_frag_data.last_map != NULL)
1430 {
1431 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1432 S_GET_VALUE (symbolP));
1433 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1434 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1435 &symbol_lastP);
1436 }
1437 frag->tc_frag_data.last_map = symbolP;
1438 }
1439
1440 /* We must sometimes convert a region marked as code to data during
1441 code alignment, if an odd number of bytes have to be padded. The
1442 code mapping symbol is pushed to an aligned address. */
1443
1444 static void
1445 insert_data_mapping_symbol (enum mstate state,
1446 valueT value, fragS * frag, offsetT bytes)
1447 {
1448 /* If there was already a mapping symbol, remove it. */
1449 if (frag->tc_frag_data.last_map != NULL
1450 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1451 frag->fr_address + value)
1452 {
1453 symbolS *symp = frag->tc_frag_data.last_map;
1454
1455 if (value == 0)
1456 {
1457 know (frag->tc_frag_data.first_map == symp);
1458 frag->tc_frag_data.first_map = NULL;
1459 }
1460 frag->tc_frag_data.last_map = NULL;
1461 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1462 }
1463
1464 make_mapping_symbol (MAP_DATA, value, frag);
1465 make_mapping_symbol (state, value + bytes, frag);
1466 }
1467
1468 static void mapping_state_2 (enum mstate state, int max_chars);
1469
1470 /* Set the mapping state to STATE. Only call this when about to
1471 emit some STATE bytes to the file. */
1472
1473 void
1474 mapping_state (enum mstate state)
1475 {
1476 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1477
1478 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1479
1480 if (mapstate == state)
1481 /* The mapping symbol has already been emitted.
1482 There is nothing else to do. */
1483 return;
1484 else if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
1485 /* This case will be evaluated later in the next else. */
1486 return;
1487 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1488 {
1489 /* Only add the symbol if the offset is > 0:
1490 if we're at the first frag, check it's size > 0;
1491 if we're not at the first frag, then for sure
1492 the offset is > 0. */
1493 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1494 const int add_symbol = (frag_now != frag_first)
1495 || (frag_now_fix () > 0);
1496
1497 if (add_symbol)
1498 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1499 }
1500
1501 mapping_state_2 (state, 0);
1502 #undef TRANSITION
1503 }
1504
1505 /* Same as mapping_state, but MAX_CHARS bytes have already been
1506 allocated. Put the mapping symbol that far back. */
1507
1508 static void
1509 mapping_state_2 (enum mstate state, int max_chars)
1510 {
1511 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1512
1513 if (!SEG_NORMAL (now_seg))
1514 return;
1515
1516 if (mapstate == state)
1517 /* The mapping symbol has already been emitted.
1518 There is nothing else to do. */
1519 return;
1520
1521 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1522 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1523 }
1524 #else
1525 #define mapping_state(x) /* nothing */
1526 #define mapping_state_2(x, y) /* nothing */
1527 #endif
1528
1529 /* Directives: sectioning and alignment. */
1530
1531 static void
1532 s_bss (int ignore ATTRIBUTE_UNUSED)
1533 {
1534 /* We don't support putting frags in the BSS segment, we fake it by
1535 marking in_bss, then looking at s_skip for clues. */
1536 subseg_set (bss_section, 0);
1537 demand_empty_rest_of_line ();
1538 mapping_state (MAP_DATA);
1539 }
1540
1541 static void
1542 s_even (int ignore ATTRIBUTE_UNUSED)
1543 {
1544 /* Never make frag if expect extra pass. */
1545 if (!need_pass_2)
1546 frag_align (1, 0, 0);
1547
1548 record_alignment (now_seg, 1);
1549
1550 demand_empty_rest_of_line ();
1551 }
1552
1553 /* Directives: Literal pools. */
1554
1555 static literal_pool *
1556 find_literal_pool (int size)
1557 {
1558 literal_pool *pool;
1559
1560 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1561 {
1562 if (pool->section == now_seg
1563 && pool->sub_section == now_subseg && pool->size == size)
1564 break;
1565 }
1566
1567 return pool;
1568 }
1569
1570 static literal_pool *
1571 find_or_make_literal_pool (int size)
1572 {
1573 /* Next literal pool ID number. */
1574 static unsigned int latest_pool_num = 1;
1575 literal_pool *pool;
1576
1577 pool = find_literal_pool (size);
1578
1579 if (pool == NULL)
1580 {
1581 /* Create a new pool. */
1582 pool = xmalloc (sizeof (*pool));
1583 if (!pool)
1584 return NULL;
1585
1586 /* Currently we always put the literal pool in the current text
1587 section. If we were generating "small" model code where we
1588 knew that all code and initialised data was within 1MB then
1589 we could output literals to mergeable, read-only data
1590 sections. */
1591
1592 pool->next_free_entry = 0;
1593 pool->section = now_seg;
1594 pool->sub_section = now_subseg;
1595 pool->size = size;
1596 pool->next = list_of_pools;
1597 pool->symbol = NULL;
1598
1599 /* Add it to the list. */
1600 list_of_pools = pool;
1601 }
1602
1603 /* New pools, and emptied pools, will have a NULL symbol. */
1604 if (pool->symbol == NULL)
1605 {
1606 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1607 (valueT) 0, &zero_address_frag);
1608 pool->id = latest_pool_num++;
1609 }
1610
1611 /* Done. */
1612 return pool;
1613 }
1614
1615 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1616 Return TRUE on success, otherwise return FALSE. */
1617 static bfd_boolean
1618 add_to_lit_pool (expressionS *exp, int size)
1619 {
1620 literal_pool *pool;
1621 unsigned int entry;
1622
1623 pool = find_or_make_literal_pool (size);
1624
1625 /* Check if this literal value is already in the pool. */
1626 for (entry = 0; entry < pool->next_free_entry; entry++)
1627 {
1628 expressionS * litexp = & pool->literals[entry].exp;
1629
1630 if ((litexp->X_op == exp->X_op)
1631 && (exp->X_op == O_constant)
1632 && (litexp->X_add_number == exp->X_add_number)
1633 && (litexp->X_unsigned == exp->X_unsigned))
1634 break;
1635
1636 if ((litexp->X_op == exp->X_op)
1637 && (exp->X_op == O_symbol)
1638 && (litexp->X_add_number == exp->X_add_number)
1639 && (litexp->X_add_symbol == exp->X_add_symbol)
1640 && (litexp->X_op_symbol == exp->X_op_symbol))
1641 break;
1642 }
1643
1644 /* Do we need to create a new entry? */
1645 if (entry == pool->next_free_entry)
1646 {
1647 if (entry >= MAX_LITERAL_POOL_SIZE)
1648 {
1649 set_syntax_error (_("literal pool overflow"));
1650 return FALSE;
1651 }
1652
1653 pool->literals[entry].exp = *exp;
1654 pool->next_free_entry += 1;
1655 if (exp->X_op == O_big)
1656 {
1657 /* PR 16688: Bignums are held in a single global array. We must
1658 copy and preserve that value now, before it is overwritten. */
1659 pool->literals[entry].bignum = xmalloc (CHARS_PER_LITTLENUM * exp->X_add_number);
1660 memcpy (pool->literals[entry].bignum, generic_bignum,
1661 CHARS_PER_LITTLENUM * exp->X_add_number);
1662 }
1663 else
1664 pool->literals[entry].bignum = NULL;
1665 }
1666
1667 exp->X_op = O_symbol;
1668 exp->X_add_number = ((int) entry) * size;
1669 exp->X_add_symbol = pool->symbol;
1670
1671 return TRUE;
1672 }
1673
1674 /* Can't use symbol_new here, so have to create a symbol and then at
1675 a later date assign it a value. Thats what these functions do. */
1676
1677 static void
1678 symbol_locate (symbolS * symbolP,
1679 const char *name,/* It is copied, the caller can modify. */
1680 segT segment, /* Segment identifier (SEG_<something>). */
1681 valueT valu, /* Symbol value. */
1682 fragS * frag) /* Associated fragment. */
1683 {
1684 size_t name_length;
1685 char *preserved_copy_of_name;
1686
1687 name_length = strlen (name) + 1; /* +1 for \0. */
1688 obstack_grow (&notes, name, name_length);
1689 preserved_copy_of_name = obstack_finish (&notes);
1690
1691 #ifdef tc_canonicalize_symbol_name
1692 preserved_copy_of_name =
1693 tc_canonicalize_symbol_name (preserved_copy_of_name);
1694 #endif
1695
1696 S_SET_NAME (symbolP, preserved_copy_of_name);
1697
1698 S_SET_SEGMENT (symbolP, segment);
1699 S_SET_VALUE (symbolP, valu);
1700 symbol_clear_list_pointers (symbolP);
1701
1702 symbol_set_frag (symbolP, frag);
1703
1704 /* Link to end of symbol chain. */
1705 {
1706 extern int symbol_table_frozen;
1707
1708 if (symbol_table_frozen)
1709 abort ();
1710 }
1711
1712 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1713
1714 obj_symbol_new_hook (symbolP);
1715
1716 #ifdef tc_symbol_new_hook
1717 tc_symbol_new_hook (symbolP);
1718 #endif
1719
1720 #ifdef DEBUG_SYMS
1721 verify_symbol_chain (symbol_rootP, symbol_lastP);
1722 #endif /* DEBUG_SYMS */
1723 }
1724
1725
1726 static void
1727 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1728 {
1729 unsigned int entry;
1730 literal_pool *pool;
1731 char sym_name[20];
1732 int align;
1733
1734 for (align = 2; align <= 4; align++)
1735 {
1736 int size = 1 << align;
1737
1738 pool = find_literal_pool (size);
1739 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1740 continue;
1741
1742 mapping_state (MAP_DATA);
1743
1744 /* Align pool as you have word accesses.
1745 Only make a frag if we have to. */
1746 if (!need_pass_2)
1747 frag_align (align, 0, 0);
1748
1749 record_alignment (now_seg, align);
1750
1751 sprintf (sym_name, "$$lit_\002%x", pool->id);
1752
1753 symbol_locate (pool->symbol, sym_name, now_seg,
1754 (valueT) frag_now_fix (), frag_now);
1755 symbol_table_insert (pool->symbol);
1756
1757 for (entry = 0; entry < pool->next_free_entry; entry++)
1758 {
1759 expressionS * exp = & pool->literals[entry].exp;
1760
1761 if (exp->X_op == O_big)
1762 {
1763 /* PR 16688: Restore the global bignum value. */
1764 gas_assert (pool->literals[entry].bignum != NULL);
1765 memcpy (generic_bignum, pool->literals[entry].bignum,
1766 CHARS_PER_LITTLENUM * exp->X_add_number);
1767 }
1768
1769 /* First output the expression in the instruction to the pool. */
1770 emit_expr (exp, size); /* .word|.xword */
1771
1772 if (exp->X_op == O_big)
1773 {
1774 free (pool->literals[entry].bignum);
1775 pool->literals[entry].bignum = NULL;
1776 }
1777 }
1778
1779 /* Mark the pool as empty. */
1780 pool->next_free_entry = 0;
1781 pool->symbol = NULL;
1782 }
1783 }
1784
1785 #ifdef OBJ_ELF
1786 /* Forward declarations for functions below, in the MD interface
1787 section. */
1788 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1789 static struct reloc_table_entry * find_reloc_table_entry (char **);
1790
1791 /* Directives: Data. */
1792 /* N.B. the support for relocation suffix in this directive needs to be
1793 implemented properly. */
1794
1795 static void
1796 s_aarch64_elf_cons (int nbytes)
1797 {
1798 expressionS exp;
1799
1800 #ifdef md_flush_pending_output
1801 md_flush_pending_output ();
1802 #endif
1803
1804 if (is_it_end_of_statement ())
1805 {
1806 demand_empty_rest_of_line ();
1807 return;
1808 }
1809
1810 #ifdef md_cons_align
1811 md_cons_align (nbytes);
1812 #endif
1813
1814 mapping_state (MAP_DATA);
1815 do
1816 {
1817 struct reloc_table_entry *reloc;
1818
1819 expression (&exp);
1820
1821 if (exp.X_op != O_symbol)
1822 emit_expr (&exp, (unsigned int) nbytes);
1823 else
1824 {
1825 skip_past_char (&input_line_pointer, '#');
1826 if (skip_past_char (&input_line_pointer, ':'))
1827 {
1828 reloc = find_reloc_table_entry (&input_line_pointer);
1829 if (reloc == NULL)
1830 as_bad (_("unrecognized relocation suffix"));
1831 else
1832 as_bad (_("unimplemented relocation suffix"));
1833 ignore_rest_of_line ();
1834 return;
1835 }
1836 else
1837 emit_expr (&exp, (unsigned int) nbytes);
1838 }
1839 }
1840 while (*input_line_pointer++ == ',');
1841
1842 /* Put terminator back into stream. */
1843 input_line_pointer--;
1844 demand_empty_rest_of_line ();
1845 }
1846
1847 #endif /* OBJ_ELF */
1848
1849 /* Output a 32-bit word, but mark as an instruction. */
1850
1851 static void
1852 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1853 {
1854 expressionS exp;
1855
1856 #ifdef md_flush_pending_output
1857 md_flush_pending_output ();
1858 #endif
1859
1860 if (is_it_end_of_statement ())
1861 {
1862 demand_empty_rest_of_line ();
1863 return;
1864 }
1865
1866 if (!need_pass_2)
1867 frag_align_code (2, 0);
1868 #ifdef OBJ_ELF
1869 mapping_state (MAP_INSN);
1870 #endif
1871
1872 do
1873 {
1874 expression (&exp);
1875 if (exp.X_op != O_constant)
1876 {
1877 as_bad (_("constant expression required"));
1878 ignore_rest_of_line ();
1879 return;
1880 }
1881
1882 if (target_big_endian)
1883 {
1884 unsigned int val = exp.X_add_number;
1885 exp.X_add_number = SWAP_32 (val);
1886 }
1887 emit_expr (&exp, 4);
1888 }
1889 while (*input_line_pointer++ == ',');
1890
1891 /* Put terminator back into stream. */
1892 input_line_pointer--;
1893 demand_empty_rest_of_line ();
1894 }
1895
1896 #ifdef OBJ_ELF
1897 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
1898
1899 static void
1900 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
1901 {
1902 expressionS exp;
1903
1904 /* Since we're just labelling the code, there's no need to define a
1905 mapping symbol. */
1906 expression (&exp);
1907 /* Make sure there is enough room in this frag for the following
1908 blr. This trick only works if the blr follows immediately after
1909 the .tlsdesc directive. */
1910 frag_grow (4);
1911 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1912 BFD_RELOC_AARCH64_TLSDESC_CALL);
1913
1914 demand_empty_rest_of_line ();
1915 }
1916 #endif /* OBJ_ELF */
1917
1918 static void s_aarch64_arch (int);
1919 static void s_aarch64_cpu (int);
1920
1921 /* This table describes all the machine specific pseudo-ops the assembler
1922 has to support. The fields are:
1923 pseudo-op name without dot
1924 function to call to execute this pseudo-op
1925 Integer arg to pass to the function. */
1926
1927 const pseudo_typeS md_pseudo_table[] = {
1928 /* Never called because '.req' does not start a line. */
1929 {"req", s_req, 0},
1930 {"unreq", s_unreq, 0},
1931 {"bss", s_bss, 0},
1932 {"even", s_even, 0},
1933 {"ltorg", s_ltorg, 0},
1934 {"pool", s_ltorg, 0},
1935 {"cpu", s_aarch64_cpu, 0},
1936 {"arch", s_aarch64_arch, 0},
1937 {"inst", s_aarch64_inst, 0},
1938 #ifdef OBJ_ELF
1939 {"tlsdesccall", s_tlsdesccall, 0},
1940 {"word", s_aarch64_elf_cons, 4},
1941 {"long", s_aarch64_elf_cons, 4},
1942 {"xword", s_aarch64_elf_cons, 8},
1943 {"dword", s_aarch64_elf_cons, 8},
1944 #endif
1945 {0, 0, 0}
1946 };
1947 \f
1948
1949 /* Check whether STR points to a register name followed by a comma or the
1950 end of line; REG_TYPE indicates which register types are checked
1951 against. Return TRUE if STR is such a register name; otherwise return
1952 FALSE. The function does not intend to produce any diagnostics, but since
1953 the register parser aarch64_reg_parse, which is called by this function,
1954 does produce diagnostics, we call clear_error to clear any diagnostics
1955 that may be generated by aarch64_reg_parse.
1956 Also, the function returns FALSE directly if there is any user error
1957 present at the function entry. This prevents the existing diagnostics
1958 state from being spoiled.
1959 The function currently serves parse_constant_immediate and
1960 parse_big_immediate only. */
1961 static bfd_boolean
1962 reg_name_p (char *str, aarch64_reg_type reg_type)
1963 {
1964 int reg;
1965
1966 /* Prevent the diagnostics state from being spoiled. */
1967 if (error_p ())
1968 return FALSE;
1969
1970 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
1971
1972 /* Clear the parsing error that may be set by the reg parser. */
1973 clear_error ();
1974
1975 if (reg == PARSE_FAIL)
1976 return FALSE;
1977
1978 skip_whitespace (str);
1979 if (*str == ',' || is_end_of_line[(unsigned int) *str])
1980 return TRUE;
1981
1982 return FALSE;
1983 }
1984
1985 /* Parser functions used exclusively in instruction operands. */
1986
1987 /* Parse an immediate expression which may not be constant.
1988
1989 To prevent the expression parser from pushing a register name
1990 into the symbol table as an undefined symbol, firstly a check is
1991 done to find out whether STR is a valid register name followed
1992 by a comma or the end of line. Return FALSE if STR is such a
1993 string. */
1994
1995 static bfd_boolean
1996 parse_immediate_expression (char **str, expressionS *exp)
1997 {
1998 if (reg_name_p (*str, REG_TYPE_R_Z_BHSDQ_V))
1999 {
2000 set_recoverable_error (_("immediate operand required"));
2001 return FALSE;
2002 }
2003
2004 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2005
2006 if (exp->X_op == O_absent)
2007 {
2008 set_fatal_syntax_error (_("missing immediate expression"));
2009 return FALSE;
2010 }
2011
2012 return TRUE;
2013 }
2014
2015 /* Constant immediate-value read function for use in insn parsing.
2016 STR points to the beginning of the immediate (with the optional
2017 leading #); *VAL receives the value.
2018
2019 Return TRUE on success; otherwise return FALSE. */
2020
2021 static bfd_boolean
2022 parse_constant_immediate (char **str, int64_t * val)
2023 {
2024 expressionS exp;
2025
2026 if (! parse_immediate_expression (str, &exp))
2027 return FALSE;
2028
2029 if (exp.X_op != O_constant)
2030 {
2031 set_syntax_error (_("constant expression required"));
2032 return FALSE;
2033 }
2034
2035 *val = exp.X_add_number;
2036 return TRUE;
2037 }
2038
2039 static uint32_t
2040 encode_imm_float_bits (uint32_t imm)
2041 {
2042 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2043 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2044 }
2045
2046 /* Return TRUE if the single-precision floating-point value encoded in IMM
2047 can be expressed in the AArch64 8-bit signed floating-point format with
2048 3-bit exponent and normalized 4 bits of precision; in other words, the
2049 floating-point value must be expressable as
2050 (+/-) n / 16 * power (2, r)
2051 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2052
2053 static bfd_boolean
2054 aarch64_imm_float_p (uint32_t imm)
2055 {
2056 /* If a single-precision floating-point value has the following bit
2057 pattern, it can be expressed in the AArch64 8-bit floating-point
2058 format:
2059
2060 3 32222222 2221111111111
2061 1 09876543 21098765432109876543210
2062 n Eeeeeexx xxxx0000000000000000000
2063
2064 where n, e and each x are either 0 or 1 independently, with
2065 E == ~ e. */
2066
2067 uint32_t pattern;
2068
2069 /* Prepare the pattern for 'Eeeeee'. */
2070 if (((imm >> 30) & 0x1) == 0)
2071 pattern = 0x3e000000;
2072 else
2073 pattern = 0x40000000;
2074
2075 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2076 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2077 }
2078
2079 /* Like aarch64_imm_float_p but for a double-precision floating-point value.
2080
2081 Return TRUE if the value encoded in IMM can be expressed in the AArch64
2082 8-bit signed floating-point format with 3-bit exponent and normalized 4
2083 bits of precision (i.e. can be used in an FMOV instruction); return the
2084 equivalent single-precision encoding in *FPWORD.
2085
2086 Otherwise return FALSE. */
2087
2088 static bfd_boolean
2089 aarch64_double_precision_fmovable (uint64_t imm, uint32_t *fpword)
2090 {
2091 /* If a double-precision floating-point value has the following bit
2092 pattern, it can be expressed in the AArch64 8-bit floating-point
2093 format:
2094
2095 6 66655555555 554444444...21111111111
2096 3 21098765432 109876543...098765432109876543210
2097 n Eeeeeeeeexx xxxx00000...000000000000000000000
2098
2099 where n, e and each x are either 0 or 1 independently, with
2100 E == ~ e. */
2101
2102 uint32_t pattern;
2103 uint32_t high32 = imm >> 32;
2104
2105 /* Lower 32 bits need to be 0s. */
2106 if ((imm & 0xffffffff) != 0)
2107 return FALSE;
2108
2109 /* Prepare the pattern for 'Eeeeeeeee'. */
2110 if (((high32 >> 30) & 0x1) == 0)
2111 pattern = 0x3fc00000;
2112 else
2113 pattern = 0x40000000;
2114
2115 if ((high32 & 0xffff) == 0 /* bits 32 - 47 are 0. */
2116 && (high32 & 0x7fc00000) == pattern) /* bits 54 - 61 == ~ bit 62. */
2117 {
2118 /* Convert to the single-precision encoding.
2119 i.e. convert
2120 n Eeeeeeeeexx xxxx00000...000000000000000000000
2121 to
2122 n Eeeeeexx xxxx0000000000000000000. */
2123 *fpword = ((high32 & 0xfe000000) /* nEeeeee. */
2124 | (((high32 >> 16) & 0x3f) << 19)); /* xxxxxx. */
2125 return TRUE;
2126 }
2127 else
2128 return FALSE;
2129 }
2130
2131 /* Parse a floating-point immediate. Return TRUE on success and return the
2132 value in *IMMED in the format of IEEE754 single-precision encoding.
2133 *CCP points to the start of the string; DP_P is TRUE when the immediate
2134 is expected to be in double-precision (N.B. this only matters when
2135 hexadecimal representation is involved).
2136
2137 N.B. 0.0 is accepted by this function. */
2138
2139 static bfd_boolean
2140 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p)
2141 {
2142 char *str = *ccp;
2143 char *fpnum;
2144 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2145 int found_fpchar = 0;
2146 int64_t val = 0;
2147 unsigned fpword = 0;
2148 bfd_boolean hex_p = FALSE;
2149
2150 skip_past_char (&str, '#');
2151
2152 fpnum = str;
2153 skip_whitespace (fpnum);
2154
2155 if (strncmp (fpnum, "0x", 2) == 0)
2156 {
2157 /* Support the hexadecimal representation of the IEEE754 encoding.
2158 Double-precision is expected when DP_P is TRUE, otherwise the
2159 representation should be in single-precision. */
2160 if (! parse_constant_immediate (&str, &val))
2161 goto invalid_fp;
2162
2163 if (dp_p)
2164 {
2165 if (! aarch64_double_precision_fmovable (val, &fpword))
2166 goto invalid_fp;
2167 }
2168 else if ((uint64_t) val > 0xffffffff)
2169 goto invalid_fp;
2170 else
2171 fpword = val;
2172
2173 hex_p = TRUE;
2174 }
2175 else
2176 {
2177 /* We must not accidentally parse an integer as a floating-point number.
2178 Make sure that the value we parse is not an integer by checking for
2179 special characters '.' or 'e'. */
2180 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
2181 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
2182 {
2183 found_fpchar = 1;
2184 break;
2185 }
2186
2187 if (!found_fpchar)
2188 return FALSE;
2189 }
2190
2191 if (! hex_p)
2192 {
2193 int i;
2194
2195 if ((str = atof_ieee (str, 's', words)) == NULL)
2196 goto invalid_fp;
2197
2198 /* Our FP word must be 32 bits (single-precision FP). */
2199 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2200 {
2201 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2202 fpword |= words[i];
2203 }
2204 }
2205
2206 if (aarch64_imm_float_p (fpword) || (fpword & 0x7fffffff) == 0)
2207 {
2208 *immed = fpword;
2209 *ccp = str;
2210 return TRUE;
2211 }
2212
2213 invalid_fp:
2214 set_fatal_syntax_error (_("invalid floating-point constant"));
2215 return FALSE;
2216 }
2217
2218 /* Less-generic immediate-value read function with the possibility of loading
2219 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2220 instructions.
2221
2222 To prevent the expression parser from pushing a register name into the
2223 symbol table as an undefined symbol, a check is firstly done to find
2224 out whether STR is a valid register name followed by a comma or the end
2225 of line. Return FALSE if STR is such a register. */
2226
2227 static bfd_boolean
2228 parse_big_immediate (char **str, int64_t *imm)
2229 {
2230 char *ptr = *str;
2231
2232 if (reg_name_p (ptr, REG_TYPE_R_Z_BHSDQ_V))
2233 {
2234 set_syntax_error (_("immediate operand required"));
2235 return FALSE;
2236 }
2237
2238 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2239
2240 if (inst.reloc.exp.X_op == O_constant)
2241 *imm = inst.reloc.exp.X_add_number;
2242
2243 *str = ptr;
2244
2245 return TRUE;
2246 }
2247
2248 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2249 if NEED_LIBOPCODES is non-zero, the fixup will need
2250 assistance from the libopcodes. */
2251
2252 static inline void
2253 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2254 const aarch64_opnd_info *operand,
2255 int need_libopcodes_p)
2256 {
2257 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2258 reloc->opnd = operand->type;
2259 if (need_libopcodes_p)
2260 reloc->need_libopcodes_p = 1;
2261 };
2262
2263 /* Return TRUE if the instruction needs to be fixed up later internally by
2264 the GAS; otherwise return FALSE. */
2265
2266 static inline bfd_boolean
2267 aarch64_gas_internal_fixup_p (void)
2268 {
2269 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2270 }
2271
2272 /* Assign the immediate value to the relavant field in *OPERAND if
2273 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2274 needs an internal fixup in a later stage.
2275 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2276 IMM.VALUE that may get assigned with the constant. */
2277 static inline void
2278 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2279 aarch64_opnd_info *operand,
2280 int addr_off_p,
2281 int need_libopcodes_p,
2282 int skip_p)
2283 {
2284 if (reloc->exp.X_op == O_constant)
2285 {
2286 if (addr_off_p)
2287 operand->addr.offset.imm = reloc->exp.X_add_number;
2288 else
2289 operand->imm.value = reloc->exp.X_add_number;
2290 reloc->type = BFD_RELOC_UNUSED;
2291 }
2292 else
2293 {
2294 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2295 /* Tell libopcodes to ignore this operand or not. This is helpful
2296 when one of the operands needs to be fixed up later but we need
2297 libopcodes to check the other operands. */
2298 operand->skip = skip_p;
2299 }
2300 }
2301
2302 /* Relocation modifiers. Each entry in the table contains the textual
2303 name for the relocation which may be placed before a symbol used as
2304 a load/store offset, or add immediate. It must be surrounded by a
2305 leading and trailing colon, for example:
2306
2307 ldr x0, [x1, #:rello:varsym]
2308 add x0, x1, #:rello:varsym */
2309
2310 struct reloc_table_entry
2311 {
2312 const char *name;
2313 int pc_rel;
2314 bfd_reloc_code_real_type adrp_type;
2315 bfd_reloc_code_real_type movw_type;
2316 bfd_reloc_code_real_type add_type;
2317 bfd_reloc_code_real_type ldst_type;
2318 };
2319
2320 static struct reloc_table_entry reloc_table[] = {
2321 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2322 {"lo12", 0,
2323 0,
2324 0,
2325 BFD_RELOC_AARCH64_ADD_LO12,
2326 BFD_RELOC_AARCH64_LDST_LO12},
2327
2328 /* Higher 21 bits of pc-relative page offset: ADRP */
2329 {"pg_hi21", 1,
2330 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2331 0,
2332 0,
2333 0},
2334
2335 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2336 {"pg_hi21_nc", 1,
2337 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2338 0,
2339 0,
2340 0},
2341
2342 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2343 {"abs_g0", 0,
2344 0,
2345 BFD_RELOC_AARCH64_MOVW_G0,
2346 0,
2347 0},
2348
2349 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2350 {"abs_g0_s", 0,
2351 0,
2352 BFD_RELOC_AARCH64_MOVW_G0_S,
2353 0,
2354 0},
2355
2356 /* Less significant bits 0-15 of address/value: MOVK, no check */
2357 {"abs_g0_nc", 0,
2358 0,
2359 BFD_RELOC_AARCH64_MOVW_G0_NC,
2360 0,
2361 0},
2362
2363 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2364 {"abs_g1", 0,
2365 0,
2366 BFD_RELOC_AARCH64_MOVW_G1,
2367 0,
2368 0},
2369
2370 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2371 {"abs_g1_s", 0,
2372 0,
2373 BFD_RELOC_AARCH64_MOVW_G1_S,
2374 0,
2375 0},
2376
2377 /* Less significant bits 16-31 of address/value: MOVK, no check */
2378 {"abs_g1_nc", 0,
2379 0,
2380 BFD_RELOC_AARCH64_MOVW_G1_NC,
2381 0,
2382 0},
2383
2384 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2385 {"abs_g2", 0,
2386 0,
2387 BFD_RELOC_AARCH64_MOVW_G2,
2388 0,
2389 0},
2390
2391 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2392 {"abs_g2_s", 0,
2393 0,
2394 BFD_RELOC_AARCH64_MOVW_G2_S,
2395 0,
2396 0},
2397
2398 /* Less significant bits 32-47 of address/value: MOVK, no check */
2399 {"abs_g2_nc", 0,
2400 0,
2401 BFD_RELOC_AARCH64_MOVW_G2_NC,
2402 0,
2403 0},
2404
2405 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2406 {"abs_g3", 0,
2407 0,
2408 BFD_RELOC_AARCH64_MOVW_G3,
2409 0,
2410 0},
2411
2412 /* Get to the page containing GOT entry for a symbol. */
2413 {"got", 1,
2414 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2415 0,
2416 0,
2417 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2418
2419 /* 12 bit offset into the page containing GOT entry for that symbol. */
2420 {"got_lo12", 0,
2421 0,
2422 0,
2423 0,
2424 BFD_RELOC_AARCH64_LD_GOT_LO12_NC},
2425
2426 /* Get to the page containing GOT TLS entry for a symbol */
2427 {"tlsgd", 0,
2428 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2429 0,
2430 0,
2431 0},
2432
2433 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2434 {"tlsgd_lo12", 0,
2435 0,
2436 0,
2437 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2438 0},
2439
2440 /* Get to the page containing GOT TLS entry for a symbol */
2441 {"tlsdesc", 0,
2442 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2443 0,
2444 0,
2445 0},
2446
2447 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2448 {"tlsdesc_lo12", 0,
2449 0,
2450 0,
2451 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC,
2452 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC},
2453
2454 /* Get to the page containing GOT TLS entry for a symbol */
2455 {"gottprel", 0,
2456 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2457 0,
2458 0,
2459 0},
2460
2461 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2462 {"gottprel_lo12", 0,
2463 0,
2464 0,
2465 0,
2466 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC},
2467
2468 /* Get tp offset for a symbol. */
2469 {"tprel", 0,
2470 0,
2471 0,
2472 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2473 0},
2474
2475 /* Get tp offset for a symbol. */
2476 {"tprel_lo12", 0,
2477 0,
2478 0,
2479 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2480 0},
2481
2482 /* Get tp offset for a symbol. */
2483 {"tprel_hi12", 0,
2484 0,
2485 0,
2486 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2487 0},
2488
2489 /* Get tp offset for a symbol. */
2490 {"tprel_lo12_nc", 0,
2491 0,
2492 0,
2493 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2494 0},
2495
2496 /* Most significant bits 32-47 of address/value: MOVZ. */
2497 {"tprel_g2", 0,
2498 0,
2499 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2500 0,
2501 0},
2502
2503 /* Most significant bits 16-31 of address/value: MOVZ. */
2504 {"tprel_g1", 0,
2505 0,
2506 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2507 0,
2508 0},
2509
2510 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2511 {"tprel_g1_nc", 0,
2512 0,
2513 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2514 0,
2515 0},
2516
2517 /* Most significant bits 0-15 of address/value: MOVZ. */
2518 {"tprel_g0", 0,
2519 0,
2520 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2521 0,
2522 0},
2523
2524 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2525 {"tprel_g0_nc", 0,
2526 0,
2527 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2528 0,
2529 0},
2530 };
2531
2532 /* Given the address of a pointer pointing to the textual name of a
2533 relocation as may appear in assembler source, attempt to find its
2534 details in reloc_table. The pointer will be updated to the character
2535 after the trailing colon. On failure, NULL will be returned;
2536 otherwise return the reloc_table_entry. */
2537
2538 static struct reloc_table_entry *
2539 find_reloc_table_entry (char **str)
2540 {
2541 unsigned int i;
2542 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2543 {
2544 int length = strlen (reloc_table[i].name);
2545
2546 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2547 && (*str)[length] == ':')
2548 {
2549 *str += (length + 1);
2550 return &reloc_table[i];
2551 }
2552 }
2553
2554 return NULL;
2555 }
2556
2557 /* Mode argument to parse_shift and parser_shifter_operand. */
2558 enum parse_shift_mode
2559 {
2560 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
2561 "#imm{,lsl #n}" */
2562 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
2563 "#imm" */
2564 SHIFTED_LSL, /* bare "lsl #n" */
2565 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
2566 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
2567 };
2568
2569 /* Parse a <shift> operator on an AArch64 data processing instruction.
2570 Return TRUE on success; otherwise return FALSE. */
2571 static bfd_boolean
2572 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
2573 {
2574 const struct aarch64_name_value_pair *shift_op;
2575 enum aarch64_modifier_kind kind;
2576 expressionS exp;
2577 int exp_has_prefix;
2578 char *s = *str;
2579 char *p = s;
2580
2581 for (p = *str; ISALPHA (*p); p++)
2582 ;
2583
2584 if (p == *str)
2585 {
2586 set_syntax_error (_("shift expression expected"));
2587 return FALSE;
2588 }
2589
2590 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
2591
2592 if (shift_op == NULL)
2593 {
2594 set_syntax_error (_("shift operator expected"));
2595 return FALSE;
2596 }
2597
2598 kind = aarch64_get_operand_modifier (shift_op);
2599
2600 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
2601 {
2602 set_syntax_error (_("invalid use of 'MSL'"));
2603 return FALSE;
2604 }
2605
2606 switch (mode)
2607 {
2608 case SHIFTED_LOGIC_IMM:
2609 if (aarch64_extend_operator_p (kind) == TRUE)
2610 {
2611 set_syntax_error (_("extending shift is not permitted"));
2612 return FALSE;
2613 }
2614 break;
2615
2616 case SHIFTED_ARITH_IMM:
2617 if (kind == AARCH64_MOD_ROR)
2618 {
2619 set_syntax_error (_("'ROR' shift is not permitted"));
2620 return FALSE;
2621 }
2622 break;
2623
2624 case SHIFTED_LSL:
2625 if (kind != AARCH64_MOD_LSL)
2626 {
2627 set_syntax_error (_("only 'LSL' shift is permitted"));
2628 return FALSE;
2629 }
2630 break;
2631
2632 case SHIFTED_REG_OFFSET:
2633 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
2634 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
2635 {
2636 set_fatal_syntax_error
2637 (_("invalid shift for the register offset addressing mode"));
2638 return FALSE;
2639 }
2640 break;
2641
2642 case SHIFTED_LSL_MSL:
2643 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
2644 {
2645 set_syntax_error (_("invalid shift operator"));
2646 return FALSE;
2647 }
2648 break;
2649
2650 default:
2651 abort ();
2652 }
2653
2654 /* Whitespace can appear here if the next thing is a bare digit. */
2655 skip_whitespace (p);
2656
2657 /* Parse shift amount. */
2658 exp_has_prefix = 0;
2659 if (mode == SHIFTED_REG_OFFSET && *p == ']')
2660 exp.X_op = O_absent;
2661 else
2662 {
2663 if (is_immediate_prefix (*p))
2664 {
2665 p++;
2666 exp_has_prefix = 1;
2667 }
2668 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
2669 }
2670 if (exp.X_op == O_absent)
2671 {
2672 if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix)
2673 {
2674 set_syntax_error (_("missing shift amount"));
2675 return FALSE;
2676 }
2677 operand->shifter.amount = 0;
2678 }
2679 else if (exp.X_op != O_constant)
2680 {
2681 set_syntax_error (_("constant shift amount required"));
2682 return FALSE;
2683 }
2684 else if (exp.X_add_number < 0 || exp.X_add_number > 63)
2685 {
2686 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
2687 return FALSE;
2688 }
2689 else
2690 {
2691 operand->shifter.amount = exp.X_add_number;
2692 operand->shifter.amount_present = 1;
2693 }
2694
2695 operand->shifter.operator_present = 1;
2696 operand->shifter.kind = kind;
2697
2698 *str = p;
2699 return TRUE;
2700 }
2701
2702 /* Parse a <shifter_operand> for a data processing instruction:
2703
2704 #<immediate>
2705 #<immediate>, LSL #imm
2706
2707 Validation of immediate operands is deferred to md_apply_fix.
2708
2709 Return TRUE on success; otherwise return FALSE. */
2710
2711 static bfd_boolean
2712 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
2713 enum parse_shift_mode mode)
2714 {
2715 char *p;
2716
2717 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
2718 return FALSE;
2719
2720 p = *str;
2721
2722 /* Accept an immediate expression. */
2723 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
2724 return FALSE;
2725
2726 /* Accept optional LSL for arithmetic immediate values. */
2727 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
2728 if (! parse_shift (&p, operand, SHIFTED_LSL))
2729 return FALSE;
2730
2731 /* Not accept any shifter for logical immediate values. */
2732 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
2733 && parse_shift (&p, operand, mode))
2734 {
2735 set_syntax_error (_("unexpected shift operator"));
2736 return FALSE;
2737 }
2738
2739 *str = p;
2740 return TRUE;
2741 }
2742
2743 /* Parse a <shifter_operand> for a data processing instruction:
2744
2745 <Rm>
2746 <Rm>, <shift>
2747 #<immediate>
2748 #<immediate>, LSL #imm
2749
2750 where <shift> is handled by parse_shift above, and the last two
2751 cases are handled by the function above.
2752
2753 Validation of immediate operands is deferred to md_apply_fix.
2754
2755 Return TRUE on success; otherwise return FALSE. */
2756
2757 static bfd_boolean
2758 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
2759 enum parse_shift_mode mode)
2760 {
2761 int reg;
2762 int isreg32, isregzero;
2763 enum aarch64_operand_class opd_class
2764 = aarch64_get_operand_class (operand->type);
2765
2766 if ((reg =
2767 aarch64_reg_parse_32_64 (str, 0, 0, &isreg32, &isregzero)) != PARSE_FAIL)
2768 {
2769 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
2770 {
2771 set_syntax_error (_("unexpected register in the immediate operand"));
2772 return FALSE;
2773 }
2774
2775 if (!isregzero && reg == REG_SP)
2776 {
2777 set_syntax_error (BAD_SP);
2778 return FALSE;
2779 }
2780
2781 operand->reg.regno = reg;
2782 operand->qualifier = isreg32 ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2783
2784 /* Accept optional shift operation on register. */
2785 if (! skip_past_comma (str))
2786 return TRUE;
2787
2788 if (! parse_shift (str, operand, mode))
2789 return FALSE;
2790
2791 return TRUE;
2792 }
2793 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
2794 {
2795 set_syntax_error
2796 (_("integer register expected in the extended/shifted operand "
2797 "register"));
2798 return FALSE;
2799 }
2800
2801 /* We have a shifted immediate variable. */
2802 return parse_shifter_operand_imm (str, operand, mode);
2803 }
2804
2805 /* Return TRUE on success; return FALSE otherwise. */
2806
2807 static bfd_boolean
2808 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
2809 enum parse_shift_mode mode)
2810 {
2811 char *p = *str;
2812
2813 /* Determine if we have the sequence of characters #: or just :
2814 coming next. If we do, then we check for a :rello: relocation
2815 modifier. If we don't, punt the whole lot to
2816 parse_shifter_operand. */
2817
2818 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
2819 {
2820 struct reloc_table_entry *entry;
2821
2822 if (p[0] == '#')
2823 p += 2;
2824 else
2825 p++;
2826 *str = p;
2827
2828 /* Try to parse a relocation. Anything else is an error. */
2829 if (!(entry = find_reloc_table_entry (str)))
2830 {
2831 set_syntax_error (_("unknown relocation modifier"));
2832 return FALSE;
2833 }
2834
2835 if (entry->add_type == 0)
2836 {
2837 set_syntax_error
2838 (_("this relocation modifier is not allowed on this instruction"));
2839 return FALSE;
2840 }
2841
2842 /* Save str before we decompose it. */
2843 p = *str;
2844
2845 /* Next, we parse the expression. */
2846 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
2847 return FALSE;
2848
2849 /* Record the relocation type (use the ADD variant here). */
2850 inst.reloc.type = entry->add_type;
2851 inst.reloc.pc_rel = entry->pc_rel;
2852
2853 /* If str is empty, we've reached the end, stop here. */
2854 if (**str == '\0')
2855 return TRUE;
2856
2857 /* Otherwise, we have a shifted reloc modifier, so rewind to
2858 recover the variable name and continue parsing for the shifter. */
2859 *str = p;
2860 return parse_shifter_operand_imm (str, operand, mode);
2861 }
2862
2863 return parse_shifter_operand (str, operand, mode);
2864 }
2865
2866 /* Parse all forms of an address expression. Information is written
2867 to *OPERAND and/or inst.reloc.
2868
2869 The A64 instruction set has the following addressing modes:
2870
2871 Offset
2872 [base] // in SIMD ld/st structure
2873 [base{,#0}] // in ld/st exclusive
2874 [base{,#imm}]
2875 [base,Xm{,LSL #imm}]
2876 [base,Xm,SXTX {#imm}]
2877 [base,Wm,(S|U)XTW {#imm}]
2878 Pre-indexed
2879 [base,#imm]!
2880 Post-indexed
2881 [base],#imm
2882 [base],Xm // in SIMD ld/st structure
2883 PC-relative (literal)
2884 label
2885 =immediate
2886
2887 (As a convenience, the notation "=immediate" is permitted in conjunction
2888 with the pc-relative literal load instructions to automatically place an
2889 immediate value or symbolic address in a nearby literal pool and generate
2890 a hidden label which references it.)
2891
2892 Upon a successful parsing, the address structure in *OPERAND will be
2893 filled in the following way:
2894
2895 .base_regno = <base>
2896 .offset.is_reg // 1 if the offset is a register
2897 .offset.imm = <imm>
2898 .offset.regno = <Rm>
2899
2900 For different addressing modes defined in the A64 ISA:
2901
2902 Offset
2903 .pcrel=0; .preind=1; .postind=0; .writeback=0
2904 Pre-indexed
2905 .pcrel=0; .preind=1; .postind=0; .writeback=1
2906 Post-indexed
2907 .pcrel=0; .preind=0; .postind=1; .writeback=1
2908 PC-relative (literal)
2909 .pcrel=1; .preind=1; .postind=0; .writeback=0
2910
2911 The shift/extension information, if any, will be stored in .shifter.
2912
2913 It is the caller's responsibility to check for addressing modes not
2914 supported by the instruction, and to set inst.reloc.type. */
2915
2916 static bfd_boolean
2917 parse_address_main (char **str, aarch64_opnd_info *operand, int reloc,
2918 int accept_reg_post_index)
2919 {
2920 char *p = *str;
2921 int reg;
2922 int isreg32, isregzero;
2923 expressionS *exp = &inst.reloc.exp;
2924
2925 if (! skip_past_char (&p, '['))
2926 {
2927 /* =immediate or label. */
2928 operand->addr.pcrel = 1;
2929 operand->addr.preind = 1;
2930
2931 /* #:<reloc_op>:<symbol> */
2932 skip_past_char (&p, '#');
2933 if (reloc && skip_past_char (&p, ':'))
2934 {
2935 struct reloc_table_entry *entry;
2936
2937 /* Try to parse a relocation modifier. Anything else is
2938 an error. */
2939 entry = find_reloc_table_entry (&p);
2940 if (! entry)
2941 {
2942 set_syntax_error (_("unknown relocation modifier"));
2943 return FALSE;
2944 }
2945
2946 if (entry->ldst_type == 0)
2947 {
2948 set_syntax_error
2949 (_("this relocation modifier is not allowed on this "
2950 "instruction"));
2951 return FALSE;
2952 }
2953
2954 /* #:<reloc_op>: */
2955 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
2956 {
2957 set_syntax_error (_("invalid relocation expression"));
2958 return FALSE;
2959 }
2960
2961 /* #:<reloc_op>:<expr> */
2962 /* Record the load/store relocation type. */
2963 inst.reloc.type = entry->ldst_type;
2964 inst.reloc.pc_rel = entry->pc_rel;
2965 }
2966 else
2967 {
2968
2969 if (skip_past_char (&p, '='))
2970 /* =immediate; need to generate the literal in the literal pool. */
2971 inst.gen_lit_pool = 1;
2972
2973 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
2974 {
2975 set_syntax_error (_("invalid address"));
2976 return FALSE;
2977 }
2978 }
2979
2980 *str = p;
2981 return TRUE;
2982 }
2983
2984 /* [ */
2985
2986 /* Accept SP and reject ZR */
2987 reg = aarch64_reg_parse_32_64 (&p, 0, 1, &isreg32, &isregzero);
2988 if (reg == PARSE_FAIL || isreg32)
2989 {
2990 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
2991 return FALSE;
2992 }
2993 operand->addr.base_regno = reg;
2994
2995 /* [Xn */
2996 if (skip_past_comma (&p))
2997 {
2998 /* [Xn, */
2999 operand->addr.preind = 1;
3000
3001 /* Reject SP and accept ZR */
3002 reg = aarch64_reg_parse_32_64 (&p, 1, 0, &isreg32, &isregzero);
3003 if (reg != PARSE_FAIL)
3004 {
3005 /* [Xn,Rm */
3006 operand->addr.offset.regno = reg;
3007 operand->addr.offset.is_reg = 1;
3008 /* Shifted index. */
3009 if (skip_past_comma (&p))
3010 {
3011 /* [Xn,Rm, */
3012 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3013 /* Use the diagnostics set in parse_shift, so not set new
3014 error message here. */
3015 return FALSE;
3016 }
3017 /* We only accept:
3018 [base,Xm{,LSL #imm}]
3019 [base,Xm,SXTX {#imm}]
3020 [base,Wm,(S|U)XTW {#imm}] */
3021 if (operand->shifter.kind == AARCH64_MOD_NONE
3022 || operand->shifter.kind == AARCH64_MOD_LSL
3023 || operand->shifter.kind == AARCH64_MOD_SXTX)
3024 {
3025 if (isreg32)
3026 {
3027 set_syntax_error (_("invalid use of 32-bit register offset"));
3028 return FALSE;
3029 }
3030 }
3031 else if (!isreg32)
3032 {
3033 set_syntax_error (_("invalid use of 64-bit register offset"));
3034 return FALSE;
3035 }
3036 }
3037 else
3038 {
3039 /* [Xn,#:<reloc_op>:<symbol> */
3040 skip_past_char (&p, '#');
3041 if (reloc && skip_past_char (&p, ':'))
3042 {
3043 struct reloc_table_entry *entry;
3044
3045 /* Try to parse a relocation modifier. Anything else is
3046 an error. */
3047 if (!(entry = find_reloc_table_entry (&p)))
3048 {
3049 set_syntax_error (_("unknown relocation modifier"));
3050 return FALSE;
3051 }
3052
3053 if (entry->ldst_type == 0)
3054 {
3055 set_syntax_error
3056 (_("this relocation modifier is not allowed on this "
3057 "instruction"));
3058 return FALSE;
3059 }
3060
3061 /* [Xn,#:<reloc_op>: */
3062 /* We now have the group relocation table entry corresponding to
3063 the name in the assembler source. Next, we parse the
3064 expression. */
3065 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3066 {
3067 set_syntax_error (_("invalid relocation expression"));
3068 return FALSE;
3069 }
3070
3071 /* [Xn,#:<reloc_op>:<expr> */
3072 /* Record the load/store relocation type. */
3073 inst.reloc.type = entry->ldst_type;
3074 inst.reloc.pc_rel = entry->pc_rel;
3075 }
3076 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3077 {
3078 set_syntax_error (_("invalid expression in the address"));
3079 return FALSE;
3080 }
3081 /* [Xn,<expr> */
3082 }
3083 }
3084
3085 if (! skip_past_char (&p, ']'))
3086 {
3087 set_syntax_error (_("']' expected"));
3088 return FALSE;
3089 }
3090
3091 if (skip_past_char (&p, '!'))
3092 {
3093 if (operand->addr.preind && operand->addr.offset.is_reg)
3094 {
3095 set_syntax_error (_("register offset not allowed in pre-indexed "
3096 "addressing mode"));
3097 return FALSE;
3098 }
3099 /* [Xn]! */
3100 operand->addr.writeback = 1;
3101 }
3102 else if (skip_past_comma (&p))
3103 {
3104 /* [Xn], */
3105 operand->addr.postind = 1;
3106 operand->addr.writeback = 1;
3107
3108 if (operand->addr.preind)
3109 {
3110 set_syntax_error (_("cannot combine pre- and post-indexing"));
3111 return FALSE;
3112 }
3113
3114 if (accept_reg_post_index
3115 && (reg = aarch64_reg_parse_32_64 (&p, 1, 1, &isreg32,
3116 &isregzero)) != PARSE_FAIL)
3117 {
3118 /* [Xn],Xm */
3119 if (isreg32)
3120 {
3121 set_syntax_error (_("invalid 32-bit register offset"));
3122 return FALSE;
3123 }
3124 operand->addr.offset.regno = reg;
3125 operand->addr.offset.is_reg = 1;
3126 }
3127 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3128 {
3129 /* [Xn],#expr */
3130 set_syntax_error (_("invalid expression in the address"));
3131 return FALSE;
3132 }
3133 }
3134
3135 /* If at this point neither .preind nor .postind is set, we have a
3136 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
3137 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3138 {
3139 if (operand->addr.writeback)
3140 {
3141 /* Reject [Rn]! */
3142 set_syntax_error (_("missing offset in the pre-indexed address"));
3143 return FALSE;
3144 }
3145 operand->addr.preind = 1;
3146 inst.reloc.exp.X_op = O_constant;
3147 inst.reloc.exp.X_add_number = 0;
3148 }
3149
3150 *str = p;
3151 return TRUE;
3152 }
3153
3154 /* Return TRUE on success; otherwise return FALSE. */
3155 static bfd_boolean
3156 parse_address (char **str, aarch64_opnd_info *operand,
3157 int accept_reg_post_index)
3158 {
3159 return parse_address_main (str, operand, 0, accept_reg_post_index);
3160 }
3161
3162 /* Return TRUE on success; otherwise return FALSE. */
3163 static bfd_boolean
3164 parse_address_reloc (char **str, aarch64_opnd_info *operand)
3165 {
3166 return parse_address_main (str, operand, 1, 0);
3167 }
3168
3169 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3170 Return TRUE on success; otherwise return FALSE. */
3171 static bfd_boolean
3172 parse_half (char **str, int *internal_fixup_p)
3173 {
3174 char *p, *saved;
3175 int dummy;
3176
3177 p = *str;
3178 skip_past_char (&p, '#');
3179
3180 gas_assert (internal_fixup_p);
3181 *internal_fixup_p = 0;
3182
3183 if (*p == ':')
3184 {
3185 struct reloc_table_entry *entry;
3186
3187 /* Try to parse a relocation. Anything else is an error. */
3188 ++p;
3189 if (!(entry = find_reloc_table_entry (&p)))
3190 {
3191 set_syntax_error (_("unknown relocation modifier"));
3192 return FALSE;
3193 }
3194
3195 if (entry->movw_type == 0)
3196 {
3197 set_syntax_error
3198 (_("this relocation modifier is not allowed on this instruction"));
3199 return FALSE;
3200 }
3201
3202 inst.reloc.type = entry->movw_type;
3203 }
3204 else
3205 *internal_fixup_p = 1;
3206
3207 /* Avoid parsing a register as a general symbol. */
3208 saved = p;
3209 if (aarch64_reg_parse_32_64 (&p, 0, 0, &dummy, &dummy) != PARSE_FAIL)
3210 return FALSE;
3211 p = saved;
3212
3213 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3214 return FALSE;
3215
3216 *str = p;
3217 return TRUE;
3218 }
3219
3220 /* Parse an operand for an ADRP instruction:
3221 ADRP <Xd>, <label>
3222 Return TRUE on success; otherwise return FALSE. */
3223
3224 static bfd_boolean
3225 parse_adrp (char **str)
3226 {
3227 char *p;
3228
3229 p = *str;
3230 if (*p == ':')
3231 {
3232 struct reloc_table_entry *entry;
3233
3234 /* Try to parse a relocation. Anything else is an error. */
3235 ++p;
3236 if (!(entry = find_reloc_table_entry (&p)))
3237 {
3238 set_syntax_error (_("unknown relocation modifier"));
3239 return FALSE;
3240 }
3241
3242 if (entry->adrp_type == 0)
3243 {
3244 set_syntax_error
3245 (_("this relocation modifier is not allowed on this instruction"));
3246 return FALSE;
3247 }
3248
3249 inst.reloc.type = entry->adrp_type;
3250 }
3251 else
3252 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3253
3254 inst.reloc.pc_rel = 1;
3255
3256 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3257 return FALSE;
3258
3259 *str = p;
3260 return TRUE;
3261 }
3262
3263 /* Miscellaneous. */
3264
3265 /* Parse an option for a preload instruction. Returns the encoding for the
3266 option, or PARSE_FAIL. */
3267
3268 static int
3269 parse_pldop (char **str)
3270 {
3271 char *p, *q;
3272 const struct aarch64_name_value_pair *o;
3273
3274 p = q = *str;
3275 while (ISALNUM (*q))
3276 q++;
3277
3278 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3279 if (!o)
3280 return PARSE_FAIL;
3281
3282 *str = q;
3283 return o->value;
3284 }
3285
3286 /* Parse an option for a barrier instruction. Returns the encoding for the
3287 option, or PARSE_FAIL. */
3288
3289 static int
3290 parse_barrier (char **str)
3291 {
3292 char *p, *q;
3293 const asm_barrier_opt *o;
3294
3295 p = q = *str;
3296 while (ISALPHA (*q))
3297 q++;
3298
3299 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3300 if (!o)
3301 return PARSE_FAIL;
3302
3303 *str = q;
3304 return o->value;
3305 }
3306
3307 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3308 Returns the encoding for the option, or PARSE_FAIL.
3309
3310 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3311 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
3312
3313 static int
3314 parse_sys_reg (char **str, struct hash_control *sys_regs, int imple_defined_p)
3315 {
3316 char *p, *q;
3317 char buf[32];
3318 const aarch64_sys_reg *o;
3319 int value;
3320
3321 p = buf;
3322 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3323 if (p < buf + 31)
3324 *p++ = TOLOWER (*q);
3325 *p = '\0';
3326 /* Assert that BUF be large enough. */
3327 gas_assert (p - buf == q - *str);
3328
3329 o = hash_find (sys_regs, buf);
3330 if (!o)
3331 {
3332 if (!imple_defined_p)
3333 return PARSE_FAIL;
3334 else
3335 {
3336 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
3337 unsigned int op0, op1, cn, cm, op2;
3338
3339 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
3340 != 5)
3341 return PARSE_FAIL;
3342 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
3343 return PARSE_FAIL;
3344 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3345 }
3346 }
3347 else
3348 {
3349 if (aarch64_sys_reg_deprecated_p (o))
3350 as_warn (_("system register name '%s' is deprecated and may be "
3351 "removed in a future release"), buf);
3352 value = o->value;
3353 }
3354
3355 *str = q;
3356 return value;
3357 }
3358
3359 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
3360 for the option, or NULL. */
3361
3362 static const aarch64_sys_ins_reg *
3363 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3364 {
3365 char *p, *q;
3366 char buf[32];
3367 const aarch64_sys_ins_reg *o;
3368
3369 p = buf;
3370 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3371 if (p < buf + 31)
3372 *p++ = TOLOWER (*q);
3373 *p = '\0';
3374
3375 o = hash_find (sys_ins_regs, buf);
3376 if (!o)
3377 return NULL;
3378
3379 *str = q;
3380 return o;
3381 }
3382 \f
3383 #define po_char_or_fail(chr) do { \
3384 if (! skip_past_char (&str, chr)) \
3385 goto failure; \
3386 } while (0)
3387
3388 #define po_reg_or_fail(regtype) do { \
3389 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
3390 if (val == PARSE_FAIL) \
3391 { \
3392 set_default_error (); \
3393 goto failure; \
3394 } \
3395 } while (0)
3396
3397 #define po_int_reg_or_fail(reject_sp, reject_rz) do { \
3398 val = aarch64_reg_parse_32_64 (&str, reject_sp, reject_rz, \
3399 &isreg32, &isregzero); \
3400 if (val == PARSE_FAIL) \
3401 { \
3402 set_default_error (); \
3403 goto failure; \
3404 } \
3405 info->reg.regno = val; \
3406 if (isreg32) \
3407 info->qualifier = AARCH64_OPND_QLF_W; \
3408 else \
3409 info->qualifier = AARCH64_OPND_QLF_X; \
3410 } while (0)
3411
3412 #define po_imm_nc_or_fail() do { \
3413 if (! parse_constant_immediate (&str, &val)) \
3414 goto failure; \
3415 } while (0)
3416
3417 #define po_imm_or_fail(min, max) do { \
3418 if (! parse_constant_immediate (&str, &val)) \
3419 goto failure; \
3420 if (val < min || val > max) \
3421 { \
3422 set_fatal_syntax_error (_("immediate value out of range "\
3423 #min " to "#max)); \
3424 goto failure; \
3425 } \
3426 } while (0)
3427
3428 #define po_misc_or_fail(expr) do { \
3429 if (!expr) \
3430 goto failure; \
3431 } while (0)
3432 \f
3433 /* encode the 12-bit imm field of Add/sub immediate */
3434 static inline uint32_t
3435 encode_addsub_imm (uint32_t imm)
3436 {
3437 return imm << 10;
3438 }
3439
3440 /* encode the shift amount field of Add/sub immediate */
3441 static inline uint32_t
3442 encode_addsub_imm_shift_amount (uint32_t cnt)
3443 {
3444 return cnt << 22;
3445 }
3446
3447
3448 /* encode the imm field of Adr instruction */
3449 static inline uint32_t
3450 encode_adr_imm (uint32_t imm)
3451 {
3452 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
3453 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
3454 }
3455
3456 /* encode the immediate field of Move wide immediate */
3457 static inline uint32_t
3458 encode_movw_imm (uint32_t imm)
3459 {
3460 return imm << 5;
3461 }
3462
3463 /* encode the 26-bit offset of unconditional branch */
3464 static inline uint32_t
3465 encode_branch_ofs_26 (uint32_t ofs)
3466 {
3467 return ofs & ((1 << 26) - 1);
3468 }
3469
3470 /* encode the 19-bit offset of conditional branch and compare & branch */
3471 static inline uint32_t
3472 encode_cond_branch_ofs_19 (uint32_t ofs)
3473 {
3474 return (ofs & ((1 << 19) - 1)) << 5;
3475 }
3476
3477 /* encode the 19-bit offset of ld literal */
3478 static inline uint32_t
3479 encode_ld_lit_ofs_19 (uint32_t ofs)
3480 {
3481 return (ofs & ((1 << 19) - 1)) << 5;
3482 }
3483
3484 /* Encode the 14-bit offset of test & branch. */
3485 static inline uint32_t
3486 encode_tst_branch_ofs_14 (uint32_t ofs)
3487 {
3488 return (ofs & ((1 << 14) - 1)) << 5;
3489 }
3490
3491 /* Encode the 16-bit imm field of svc/hvc/smc. */
3492 static inline uint32_t
3493 encode_svc_imm (uint32_t imm)
3494 {
3495 return imm << 5;
3496 }
3497
3498 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
3499 static inline uint32_t
3500 reencode_addsub_switch_add_sub (uint32_t opcode)
3501 {
3502 return opcode ^ (1 << 30);
3503 }
3504
3505 static inline uint32_t
3506 reencode_movzn_to_movz (uint32_t opcode)
3507 {
3508 return opcode | (1 << 30);
3509 }
3510
3511 static inline uint32_t
3512 reencode_movzn_to_movn (uint32_t opcode)
3513 {
3514 return opcode & ~(1 << 30);
3515 }
3516
3517 /* Overall per-instruction processing. */
3518
3519 /* We need to be able to fix up arbitrary expressions in some statements.
3520 This is so that we can handle symbols that are an arbitrary distance from
3521 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
3522 which returns part of an address in a form which will be valid for
3523 a data instruction. We do this by pushing the expression into a symbol
3524 in the expr_section, and creating a fix for that. */
3525
3526 static fixS *
3527 fix_new_aarch64 (fragS * frag,
3528 int where,
3529 short int size, expressionS * exp, int pc_rel, int reloc)
3530 {
3531 fixS *new_fix;
3532
3533 switch (exp->X_op)
3534 {
3535 case O_constant:
3536 case O_symbol:
3537 case O_add:
3538 case O_subtract:
3539 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
3540 break;
3541
3542 default:
3543 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
3544 pc_rel, reloc);
3545 break;
3546 }
3547 return new_fix;
3548 }
3549 \f
3550 /* Diagnostics on operands errors. */
3551
3552 /* By default, output verbose error message.
3553 Disable the verbose error message by -mno-verbose-error. */
3554 static int verbose_error_p = 1;
3555
3556 #ifdef DEBUG_AARCH64
3557 /* N.B. this is only for the purpose of debugging. */
3558 const char* operand_mismatch_kind_names[] =
3559 {
3560 "AARCH64_OPDE_NIL",
3561 "AARCH64_OPDE_RECOVERABLE",
3562 "AARCH64_OPDE_SYNTAX_ERROR",
3563 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
3564 "AARCH64_OPDE_INVALID_VARIANT",
3565 "AARCH64_OPDE_OUT_OF_RANGE",
3566 "AARCH64_OPDE_UNALIGNED",
3567 "AARCH64_OPDE_REG_LIST",
3568 "AARCH64_OPDE_OTHER_ERROR",
3569 };
3570 #endif /* DEBUG_AARCH64 */
3571
3572 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
3573
3574 When multiple errors of different kinds are found in the same assembly
3575 line, only the error of the highest severity will be picked up for
3576 issuing the diagnostics. */
3577
3578 static inline bfd_boolean
3579 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
3580 enum aarch64_operand_error_kind rhs)
3581 {
3582 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
3583 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
3584 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
3585 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
3586 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
3587 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
3588 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
3589 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
3590 return lhs > rhs;
3591 }
3592
3593 /* Helper routine to get the mnemonic name from the assembly instruction
3594 line; should only be called for the diagnosis purpose, as there is
3595 string copy operation involved, which may affect the runtime
3596 performance if used in elsewhere. */
3597
3598 static const char*
3599 get_mnemonic_name (const char *str)
3600 {
3601 static char mnemonic[32];
3602 char *ptr;
3603
3604 /* Get the first 15 bytes and assume that the full name is included. */
3605 strncpy (mnemonic, str, 31);
3606 mnemonic[31] = '\0';
3607
3608 /* Scan up to the end of the mnemonic, which must end in white space,
3609 '.', or end of string. */
3610 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
3611 ;
3612
3613 *ptr = '\0';
3614
3615 /* Append '...' to the truncated long name. */
3616 if (ptr - mnemonic == 31)
3617 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
3618
3619 return mnemonic;
3620 }
3621
3622 static void
3623 reset_aarch64_instruction (aarch64_instruction *instruction)
3624 {
3625 memset (instruction, '\0', sizeof (aarch64_instruction));
3626 instruction->reloc.type = BFD_RELOC_UNUSED;
3627 }
3628
3629 /* Data strutures storing one user error in the assembly code related to
3630 operands. */
3631
3632 struct operand_error_record
3633 {
3634 const aarch64_opcode *opcode;
3635 aarch64_operand_error detail;
3636 struct operand_error_record *next;
3637 };
3638
3639 typedef struct operand_error_record operand_error_record;
3640
3641 struct operand_errors
3642 {
3643 operand_error_record *head;
3644 operand_error_record *tail;
3645 };
3646
3647 typedef struct operand_errors operand_errors;
3648
3649 /* Top-level data structure reporting user errors for the current line of
3650 the assembly code.
3651 The way md_assemble works is that all opcodes sharing the same mnemonic
3652 name are iterated to find a match to the assembly line. In this data
3653 structure, each of the such opcodes will have one operand_error_record
3654 allocated and inserted. In other words, excessive errors related with
3655 a single opcode are disregarded. */
3656 operand_errors operand_error_report;
3657
3658 /* Free record nodes. */
3659 static operand_error_record *free_opnd_error_record_nodes = NULL;
3660
3661 /* Initialize the data structure that stores the operand mismatch
3662 information on assembling one line of the assembly code. */
3663 static void
3664 init_operand_error_report (void)
3665 {
3666 if (operand_error_report.head != NULL)
3667 {
3668 gas_assert (operand_error_report.tail != NULL);
3669 operand_error_report.tail->next = free_opnd_error_record_nodes;
3670 free_opnd_error_record_nodes = operand_error_report.head;
3671 operand_error_report.head = NULL;
3672 operand_error_report.tail = NULL;
3673 return;
3674 }
3675 gas_assert (operand_error_report.tail == NULL);
3676 }
3677
3678 /* Return TRUE if some operand error has been recorded during the
3679 parsing of the current assembly line using the opcode *OPCODE;
3680 otherwise return FALSE. */
3681 static inline bfd_boolean
3682 opcode_has_operand_error_p (const aarch64_opcode *opcode)
3683 {
3684 operand_error_record *record = operand_error_report.head;
3685 return record && record->opcode == opcode;
3686 }
3687
3688 /* Add the error record *NEW_RECORD to operand_error_report. The record's
3689 OPCODE field is initialized with OPCODE.
3690 N.B. only one record for each opcode, i.e. the maximum of one error is
3691 recorded for each instruction template. */
3692
3693 static void
3694 add_operand_error_record (const operand_error_record* new_record)
3695 {
3696 const aarch64_opcode *opcode = new_record->opcode;
3697 operand_error_record* record = operand_error_report.head;
3698
3699 /* The record may have been created for this opcode. If not, we need
3700 to prepare one. */
3701 if (! opcode_has_operand_error_p (opcode))
3702 {
3703 /* Get one empty record. */
3704 if (free_opnd_error_record_nodes == NULL)
3705 {
3706 record = xmalloc (sizeof (operand_error_record));
3707 if (record == NULL)
3708 abort ();
3709 }
3710 else
3711 {
3712 record = free_opnd_error_record_nodes;
3713 free_opnd_error_record_nodes = record->next;
3714 }
3715 record->opcode = opcode;
3716 /* Insert at the head. */
3717 record->next = operand_error_report.head;
3718 operand_error_report.head = record;
3719 if (operand_error_report.tail == NULL)
3720 operand_error_report.tail = record;
3721 }
3722 else if (record->detail.kind != AARCH64_OPDE_NIL
3723 && record->detail.index <= new_record->detail.index
3724 && operand_error_higher_severity_p (record->detail.kind,
3725 new_record->detail.kind))
3726 {
3727 /* In the case of multiple errors found on operands related with a
3728 single opcode, only record the error of the leftmost operand and
3729 only if the error is of higher severity. */
3730 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
3731 " the existing error %s on operand %d",
3732 operand_mismatch_kind_names[new_record->detail.kind],
3733 new_record->detail.index,
3734 operand_mismatch_kind_names[record->detail.kind],
3735 record->detail.index);
3736 return;
3737 }
3738
3739 record->detail = new_record->detail;
3740 }
3741
3742 static inline void
3743 record_operand_error_info (const aarch64_opcode *opcode,
3744 aarch64_operand_error *error_info)
3745 {
3746 operand_error_record record;
3747 record.opcode = opcode;
3748 record.detail = *error_info;
3749 add_operand_error_record (&record);
3750 }
3751
3752 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
3753 error message *ERROR, for operand IDX (count from 0). */
3754
3755 static void
3756 record_operand_error (const aarch64_opcode *opcode, int idx,
3757 enum aarch64_operand_error_kind kind,
3758 const char* error)
3759 {
3760 aarch64_operand_error info;
3761 memset(&info, 0, sizeof (info));
3762 info.index = idx;
3763 info.kind = kind;
3764 info.error = error;
3765 record_operand_error_info (opcode, &info);
3766 }
3767
3768 static void
3769 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
3770 enum aarch64_operand_error_kind kind,
3771 const char* error, const int *extra_data)
3772 {
3773 aarch64_operand_error info;
3774 info.index = idx;
3775 info.kind = kind;
3776 info.error = error;
3777 info.data[0] = extra_data[0];
3778 info.data[1] = extra_data[1];
3779 info.data[2] = extra_data[2];
3780 record_operand_error_info (opcode, &info);
3781 }
3782
3783 static void
3784 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
3785 const char* error, int lower_bound,
3786 int upper_bound)
3787 {
3788 int data[3] = {lower_bound, upper_bound, 0};
3789 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
3790 error, data);
3791 }
3792
3793 /* Remove the operand error record for *OPCODE. */
3794 static void ATTRIBUTE_UNUSED
3795 remove_operand_error_record (const aarch64_opcode *opcode)
3796 {
3797 if (opcode_has_operand_error_p (opcode))
3798 {
3799 operand_error_record* record = operand_error_report.head;
3800 gas_assert (record != NULL && operand_error_report.tail != NULL);
3801 operand_error_report.head = record->next;
3802 record->next = free_opnd_error_record_nodes;
3803 free_opnd_error_record_nodes = record;
3804 if (operand_error_report.head == NULL)
3805 {
3806 gas_assert (operand_error_report.tail == record);
3807 operand_error_report.tail = NULL;
3808 }
3809 }
3810 }
3811
3812 /* Given the instruction in *INSTR, return the index of the best matched
3813 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
3814
3815 Return -1 if there is no qualifier sequence; return the first match
3816 if there is multiple matches found. */
3817
3818 static int
3819 find_best_match (const aarch64_inst *instr,
3820 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
3821 {
3822 int i, num_opnds, max_num_matched, idx;
3823
3824 num_opnds = aarch64_num_of_operands (instr->opcode);
3825 if (num_opnds == 0)
3826 {
3827 DEBUG_TRACE ("no operand");
3828 return -1;
3829 }
3830
3831 max_num_matched = 0;
3832 idx = -1;
3833
3834 /* For each pattern. */
3835 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
3836 {
3837 int j, num_matched;
3838 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
3839
3840 /* Most opcodes has much fewer patterns in the list. */
3841 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
3842 {
3843 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
3844 if (i != 0 && idx == -1)
3845 /* If nothing has been matched, return the 1st sequence. */
3846 idx = 0;
3847 break;
3848 }
3849
3850 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
3851 if (*qualifiers == instr->operands[j].qualifier)
3852 ++num_matched;
3853
3854 if (num_matched > max_num_matched)
3855 {
3856 max_num_matched = num_matched;
3857 idx = i;
3858 }
3859 }
3860
3861 DEBUG_TRACE ("return with %d", idx);
3862 return idx;
3863 }
3864
3865 /* Assign qualifiers in the qualifier seqence (headed by QUALIFIERS) to the
3866 corresponding operands in *INSTR. */
3867
3868 static inline void
3869 assign_qualifier_sequence (aarch64_inst *instr,
3870 const aarch64_opnd_qualifier_t *qualifiers)
3871 {
3872 int i = 0;
3873 int num_opnds = aarch64_num_of_operands (instr->opcode);
3874 gas_assert (num_opnds);
3875 for (i = 0; i < num_opnds; ++i, ++qualifiers)
3876 instr->operands[i].qualifier = *qualifiers;
3877 }
3878
3879 /* Print operands for the diagnosis purpose. */
3880
3881 static void
3882 print_operands (char *buf, const aarch64_opcode *opcode,
3883 const aarch64_opnd_info *opnds)
3884 {
3885 int i;
3886
3887 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3888 {
3889 const size_t size = 128;
3890 char str[size];
3891
3892 /* We regard the opcode operand info more, however we also look into
3893 the inst->operands to support the disassembling of the optional
3894 operand.
3895 The two operand code should be the same in all cases, apart from
3896 when the operand can be optional. */
3897 if (opcode->operands[i] == AARCH64_OPND_NIL
3898 || opnds[i].type == AARCH64_OPND_NIL)
3899 break;
3900
3901 /* Generate the operand string in STR. */
3902 aarch64_print_operand (str, size, 0, opcode, opnds, i, NULL, NULL);
3903
3904 /* Delimiter. */
3905 if (str[0] != '\0')
3906 strcat (buf, i == 0 ? " " : ",");
3907
3908 /* Append the operand string. */
3909 strcat (buf, str);
3910 }
3911 }
3912
3913 /* Send to stderr a string as information. */
3914
3915 static void
3916 output_info (const char *format, ...)
3917 {
3918 char *file;
3919 unsigned int line;
3920 va_list args;
3921
3922 as_where (&file, &line);
3923 if (file)
3924 {
3925 if (line != 0)
3926 fprintf (stderr, "%s:%u: ", file, line);
3927 else
3928 fprintf (stderr, "%s: ", file);
3929 }
3930 fprintf (stderr, _("Info: "));
3931 va_start (args, format);
3932 vfprintf (stderr, format, args);
3933 va_end (args);
3934 (void) putc ('\n', stderr);
3935 }
3936
3937 /* Output one operand error record. */
3938
3939 static void
3940 output_operand_error_record (const operand_error_record *record, char *str)
3941 {
3942 const aarch64_operand_error *detail = &record->detail;
3943 int idx = detail->index;
3944 const aarch64_opcode *opcode = record->opcode;
3945 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
3946 : AARCH64_OPND_NIL);
3947
3948 switch (detail->kind)
3949 {
3950 case AARCH64_OPDE_NIL:
3951 gas_assert (0);
3952 break;
3953
3954 case AARCH64_OPDE_SYNTAX_ERROR:
3955 case AARCH64_OPDE_RECOVERABLE:
3956 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
3957 case AARCH64_OPDE_OTHER_ERROR:
3958 /* Use the prepared error message if there is, otherwise use the
3959 operand description string to describe the error. */
3960 if (detail->error != NULL)
3961 {
3962 if (idx < 0)
3963 as_bad (_("%s -- `%s'"), detail->error, str);
3964 else
3965 as_bad (_("%s at operand %d -- `%s'"),
3966 detail->error, idx + 1, str);
3967 }
3968 else
3969 {
3970 gas_assert (idx >= 0);
3971 as_bad (_("operand %d should be %s -- `%s'"), idx + 1,
3972 aarch64_get_operand_desc (opd_code), str);
3973 }
3974 break;
3975
3976 case AARCH64_OPDE_INVALID_VARIANT:
3977 as_bad (_("operand mismatch -- `%s'"), str);
3978 if (verbose_error_p)
3979 {
3980 /* We will try to correct the erroneous instruction and also provide
3981 more information e.g. all other valid variants.
3982
3983 The string representation of the corrected instruction and other
3984 valid variants are generated by
3985
3986 1) obtaining the intermediate representation of the erroneous
3987 instruction;
3988 2) manipulating the IR, e.g. replacing the operand qualifier;
3989 3) printing out the instruction by calling the printer functions
3990 shared with the disassembler.
3991
3992 The limitation of this method is that the exact input assembly
3993 line cannot be accurately reproduced in some cases, for example an
3994 optional operand present in the actual assembly line will be
3995 omitted in the output; likewise for the optional syntax rules,
3996 e.g. the # before the immediate. Another limitation is that the
3997 assembly symbols and relocation operations in the assembly line
3998 currently cannot be printed out in the error report. Last but not
3999 least, when there is other error(s) co-exist with this error, the
4000 'corrected' instruction may be still incorrect, e.g. given
4001 'ldnp h0,h1,[x0,#6]!'
4002 this diagnosis will provide the version:
4003 'ldnp s0,s1,[x0,#6]!'
4004 which is still not right. */
4005 size_t len = strlen (get_mnemonic_name (str));
4006 int i, qlf_idx;
4007 bfd_boolean result;
4008 const size_t size = 2048;
4009 char buf[size];
4010 aarch64_inst *inst_base = &inst.base;
4011 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4012
4013 /* Init inst. */
4014 reset_aarch64_instruction (&inst);
4015 inst_base->opcode = opcode;
4016
4017 /* Reset the error report so that there is no side effect on the
4018 following operand parsing. */
4019 init_operand_error_report ();
4020
4021 /* Fill inst. */
4022 result = parse_operands (str + len, opcode)
4023 && programmer_friendly_fixup (&inst);
4024 gas_assert (result);
4025 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4026 NULL, NULL);
4027 gas_assert (!result);
4028
4029 /* Find the most matched qualifier sequence. */
4030 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4031 gas_assert (qlf_idx > -1);
4032
4033 /* Assign the qualifiers. */
4034 assign_qualifier_sequence (inst_base,
4035 opcode->qualifiers_list[qlf_idx]);
4036
4037 /* Print the hint. */
4038 output_info (_(" did you mean this?"));
4039 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4040 print_operands (buf, opcode, inst_base->operands);
4041 output_info (_(" %s"), buf);
4042
4043 /* Print out other variant(s) if there is any. */
4044 if (qlf_idx != 0 ||
4045 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4046 output_info (_(" other valid variant(s):"));
4047
4048 /* For each pattern. */
4049 qualifiers_list = opcode->qualifiers_list;
4050 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4051 {
4052 /* Most opcodes has much fewer patterns in the list.
4053 First NIL qualifier indicates the end in the list. */
4054 if (empty_qualifier_sequence_p (*qualifiers_list) == TRUE)
4055 break;
4056
4057 if (i != qlf_idx)
4058 {
4059 /* Mnemonics name. */
4060 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4061
4062 /* Assign the qualifiers. */
4063 assign_qualifier_sequence (inst_base, *qualifiers_list);
4064
4065 /* Print instruction. */
4066 print_operands (buf, opcode, inst_base->operands);
4067
4068 output_info (_(" %s"), buf);
4069 }
4070 }
4071 }
4072 break;
4073
4074 case AARCH64_OPDE_OUT_OF_RANGE:
4075 if (detail->data[0] != detail->data[1])
4076 as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
4077 detail->error ? detail->error : _("immediate value"),
4078 detail->data[0], detail->data[1], idx + 1, str);
4079 else
4080 as_bad (_("%s expected to be %d at operand %d -- `%s'"),
4081 detail->error ? detail->error : _("immediate value"),
4082 detail->data[0], idx + 1, str);
4083 break;
4084
4085 case AARCH64_OPDE_REG_LIST:
4086 if (detail->data[0] == 1)
4087 as_bad (_("invalid number of registers in the list; "
4088 "only 1 register is expected at operand %d -- `%s'"),
4089 idx + 1, str);
4090 else
4091 as_bad (_("invalid number of registers in the list; "
4092 "%d registers are expected at operand %d -- `%s'"),
4093 detail->data[0], idx + 1, str);
4094 break;
4095
4096 case AARCH64_OPDE_UNALIGNED:
4097 as_bad (_("immediate value should be a multiple of "
4098 "%d at operand %d -- `%s'"),
4099 detail->data[0], idx + 1, str);
4100 break;
4101
4102 default:
4103 gas_assert (0);
4104 break;
4105 }
4106 }
4107
4108 /* Process and output the error message about the operand mismatching.
4109
4110 When this function is called, the operand error information had
4111 been collected for an assembly line and there will be multiple
4112 errors in the case of mulitple instruction templates; output the
4113 error message that most closely describes the problem. */
4114
4115 static void
4116 output_operand_error_report (char *str)
4117 {
4118 int largest_error_pos;
4119 const char *msg = NULL;
4120 enum aarch64_operand_error_kind kind;
4121 operand_error_record *curr;
4122 operand_error_record *head = operand_error_report.head;
4123 operand_error_record *record = NULL;
4124
4125 /* No error to report. */
4126 if (head == NULL)
4127 return;
4128
4129 gas_assert (head != NULL && operand_error_report.tail != NULL);
4130
4131 /* Only one error. */
4132 if (head == operand_error_report.tail)
4133 {
4134 DEBUG_TRACE ("single opcode entry with error kind: %s",
4135 operand_mismatch_kind_names[head->detail.kind]);
4136 output_operand_error_record (head, str);
4137 return;
4138 }
4139
4140 /* Find the error kind of the highest severity. */
4141 DEBUG_TRACE ("multiple opcode entres with error kind");
4142 kind = AARCH64_OPDE_NIL;
4143 for (curr = head; curr != NULL; curr = curr->next)
4144 {
4145 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4146 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4147 if (operand_error_higher_severity_p (curr->detail.kind, kind))
4148 kind = curr->detail.kind;
4149 }
4150 gas_assert (kind != AARCH64_OPDE_NIL);
4151
4152 /* Pick up one of errors of KIND to report. */
4153 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4154 for (curr = head; curr != NULL; curr = curr->next)
4155 {
4156 if (curr->detail.kind != kind)
4157 continue;
4158 /* If there are multiple errors, pick up the one with the highest
4159 mismatching operand index. In the case of multiple errors with
4160 the equally highest operand index, pick up the first one or the
4161 first one with non-NULL error message. */
4162 if (curr->detail.index > largest_error_pos
4163 || (curr->detail.index == largest_error_pos && msg == NULL
4164 && curr->detail.error != NULL))
4165 {
4166 largest_error_pos = curr->detail.index;
4167 record = curr;
4168 msg = record->detail.error;
4169 }
4170 }
4171
4172 gas_assert (largest_error_pos != -2 && record != NULL);
4173 DEBUG_TRACE ("Pick up error kind %s to report",
4174 operand_mismatch_kind_names[record->detail.kind]);
4175
4176 /* Output. */
4177 output_operand_error_record (record, str);
4178 }
4179 \f
4180 /* Write an AARCH64 instruction to buf - always little-endian. */
4181 static void
4182 put_aarch64_insn (char *buf, uint32_t insn)
4183 {
4184 unsigned char *where = (unsigned char *) buf;
4185 where[0] = insn;
4186 where[1] = insn >> 8;
4187 where[2] = insn >> 16;
4188 where[3] = insn >> 24;
4189 }
4190
4191 static uint32_t
4192 get_aarch64_insn (char *buf)
4193 {
4194 unsigned char *where = (unsigned char *) buf;
4195 uint32_t result;
4196 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4197 return result;
4198 }
4199
4200 static void
4201 output_inst (struct aarch64_inst *new_inst)
4202 {
4203 char *to = NULL;
4204
4205 to = frag_more (INSN_SIZE);
4206
4207 frag_now->tc_frag_data.recorded = 1;
4208
4209 put_aarch64_insn (to, inst.base.value);
4210
4211 if (inst.reloc.type != BFD_RELOC_UNUSED)
4212 {
4213 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4214 INSN_SIZE, &inst.reloc.exp,
4215 inst.reloc.pc_rel,
4216 inst.reloc.type);
4217 DEBUG_TRACE ("Prepared relocation fix up");
4218 /* Don't check the addend value against the instruction size,
4219 that's the job of our code in md_apply_fix(). */
4220 fixp->fx_no_overflow = 1;
4221 if (new_inst != NULL)
4222 fixp->tc_fix_data.inst = new_inst;
4223 if (aarch64_gas_internal_fixup_p ())
4224 {
4225 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4226 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4227 fixp->fx_addnumber = inst.reloc.flags;
4228 }
4229 }
4230
4231 dwarf2_emit_insn (INSN_SIZE);
4232 }
4233
4234 /* Link together opcodes of the same name. */
4235
4236 struct templates
4237 {
4238 aarch64_opcode *opcode;
4239 struct templates *next;
4240 };
4241
4242 typedef struct templates templates;
4243
4244 static templates *
4245 lookup_mnemonic (const char *start, int len)
4246 {
4247 templates *templ = NULL;
4248
4249 templ = hash_find_n (aarch64_ops_hsh, start, len);
4250 return templ;
4251 }
4252
4253 /* Subroutine of md_assemble, responsible for looking up the primary
4254 opcode from the mnemonic the user wrote. STR points to the
4255 beginning of the mnemonic. */
4256
4257 static templates *
4258 opcode_lookup (char **str)
4259 {
4260 char *end, *base;
4261 const aarch64_cond *cond;
4262 char condname[16];
4263 int len;
4264
4265 /* Scan up to the end of the mnemonic, which must end in white space,
4266 '.', or end of string. */
4267 for (base = end = *str; is_part_of_name(*end); end++)
4268 if (*end == '.')
4269 break;
4270
4271 if (end == base)
4272 return 0;
4273
4274 inst.cond = COND_ALWAYS;
4275
4276 /* Handle a possible condition. */
4277 if (end[0] == '.')
4278 {
4279 cond = hash_find_n (aarch64_cond_hsh, end + 1, 2);
4280 if (cond)
4281 {
4282 inst.cond = cond->value;
4283 *str = end + 3;
4284 }
4285 else
4286 {
4287 *str = end;
4288 return 0;
4289 }
4290 }
4291 else
4292 *str = end;
4293
4294 len = end - base;
4295
4296 if (inst.cond == COND_ALWAYS)
4297 {
4298 /* Look for unaffixed mnemonic. */
4299 return lookup_mnemonic (base, len);
4300 }
4301 else if (len <= 13)
4302 {
4303 /* append ".c" to mnemonic if conditional */
4304 memcpy (condname, base, len);
4305 memcpy (condname + len, ".c", 2);
4306 base = condname;
4307 len += 2;
4308 return lookup_mnemonic (base, len);
4309 }
4310
4311 return NULL;
4312 }
4313
4314 /* Internal helper routine converting a vector neon_type_el structure
4315 *VECTYPE to a corresponding operand qualifier. */
4316
4317 static inline aarch64_opnd_qualifier_t
4318 vectype_to_qualifier (const struct neon_type_el *vectype)
4319 {
4320 /* Element size in bytes indexed by neon_el_type. */
4321 const unsigned char ele_size[5]
4322 = {1, 2, 4, 8, 16};
4323
4324 if (!vectype->defined || vectype->type == NT_invtype)
4325 goto vectype_conversion_fail;
4326
4327 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
4328
4329 if (vectype->defined & NTA_HASINDEX)
4330 /* Vector element register. */
4331 return AARCH64_OPND_QLF_S_B + vectype->type;
4332 else
4333 {
4334 /* Vector register. */
4335 int reg_size = ele_size[vectype->type] * vectype->width;
4336 unsigned offset;
4337 if (reg_size != 16 && reg_size != 8)
4338 goto vectype_conversion_fail;
4339 /* The conversion is calculated based on the relation of the order of
4340 qualifiers to the vector element size and vector register size. */
4341 offset = (vectype->type == NT_q)
4342 ? 8 : (vectype->type << 1) + (reg_size >> 4);
4343 gas_assert (offset <= 8);
4344 return AARCH64_OPND_QLF_V_8B + offset;
4345 }
4346
4347 vectype_conversion_fail:
4348 first_error (_("bad vector arrangement type"));
4349 return AARCH64_OPND_QLF_NIL;
4350 }
4351
4352 /* Process an optional operand that is found omitted from the assembly line.
4353 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
4354 instruction's opcode entry while IDX is the index of this omitted operand.
4355 */
4356
4357 static void
4358 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
4359 int idx, aarch64_opnd_info *operand)
4360 {
4361 aarch64_insn default_value = get_optional_operand_default_value (opcode);
4362 gas_assert (optional_operand_p (opcode, idx));
4363 gas_assert (!operand->present);
4364
4365 switch (type)
4366 {
4367 case AARCH64_OPND_Rd:
4368 case AARCH64_OPND_Rn:
4369 case AARCH64_OPND_Rm:
4370 case AARCH64_OPND_Rt:
4371 case AARCH64_OPND_Rt2:
4372 case AARCH64_OPND_Rs:
4373 case AARCH64_OPND_Ra:
4374 case AARCH64_OPND_Rt_SYS:
4375 case AARCH64_OPND_Rd_SP:
4376 case AARCH64_OPND_Rn_SP:
4377 case AARCH64_OPND_Fd:
4378 case AARCH64_OPND_Fn:
4379 case AARCH64_OPND_Fm:
4380 case AARCH64_OPND_Fa:
4381 case AARCH64_OPND_Ft:
4382 case AARCH64_OPND_Ft2:
4383 case AARCH64_OPND_Sd:
4384 case AARCH64_OPND_Sn:
4385 case AARCH64_OPND_Sm:
4386 case AARCH64_OPND_Vd:
4387 case AARCH64_OPND_Vn:
4388 case AARCH64_OPND_Vm:
4389 case AARCH64_OPND_VdD1:
4390 case AARCH64_OPND_VnD1:
4391 operand->reg.regno = default_value;
4392 break;
4393
4394 case AARCH64_OPND_Ed:
4395 case AARCH64_OPND_En:
4396 case AARCH64_OPND_Em:
4397 operand->reglane.regno = default_value;
4398 break;
4399
4400 case AARCH64_OPND_IDX:
4401 case AARCH64_OPND_BIT_NUM:
4402 case AARCH64_OPND_IMMR:
4403 case AARCH64_OPND_IMMS:
4404 case AARCH64_OPND_SHLL_IMM:
4405 case AARCH64_OPND_IMM_VLSL:
4406 case AARCH64_OPND_IMM_VLSR:
4407 case AARCH64_OPND_CCMP_IMM:
4408 case AARCH64_OPND_FBITS:
4409 case AARCH64_OPND_UIMM4:
4410 case AARCH64_OPND_UIMM3_OP1:
4411 case AARCH64_OPND_UIMM3_OP2:
4412 case AARCH64_OPND_IMM:
4413 case AARCH64_OPND_WIDTH:
4414 case AARCH64_OPND_UIMM7:
4415 case AARCH64_OPND_NZCV:
4416 operand->imm.value = default_value;
4417 break;
4418
4419 case AARCH64_OPND_EXCEPTION:
4420 inst.reloc.type = BFD_RELOC_UNUSED;
4421 break;
4422
4423 case AARCH64_OPND_BARRIER_ISB:
4424 operand->barrier = aarch64_barrier_options + default_value;
4425
4426 default:
4427 break;
4428 }
4429 }
4430
4431 /* Process the relocation type for move wide instructions.
4432 Return TRUE on success; otherwise return FALSE. */
4433
4434 static bfd_boolean
4435 process_movw_reloc_info (void)
4436 {
4437 int is32;
4438 unsigned shift;
4439
4440 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
4441
4442 if (inst.base.opcode->op == OP_MOVK)
4443 switch (inst.reloc.type)
4444 {
4445 case BFD_RELOC_AARCH64_MOVW_G0_S:
4446 case BFD_RELOC_AARCH64_MOVW_G1_S:
4447 case BFD_RELOC_AARCH64_MOVW_G2_S:
4448 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4449 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4450 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4451 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4452 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4453 set_syntax_error
4454 (_("the specified relocation type is not allowed for MOVK"));
4455 return FALSE;
4456 default:
4457 break;
4458 }
4459
4460 switch (inst.reloc.type)
4461 {
4462 case BFD_RELOC_AARCH64_MOVW_G0:
4463 case BFD_RELOC_AARCH64_MOVW_G0_S:
4464 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4465 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4466 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4467 shift = 0;
4468 break;
4469 case BFD_RELOC_AARCH64_MOVW_G1:
4470 case BFD_RELOC_AARCH64_MOVW_G1_S:
4471 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4472 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4473 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4474 shift = 16;
4475 break;
4476 case BFD_RELOC_AARCH64_MOVW_G2:
4477 case BFD_RELOC_AARCH64_MOVW_G2_S:
4478 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4479 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4480 if (is32)
4481 {
4482 set_fatal_syntax_error
4483 (_("the specified relocation type is not allowed for 32-bit "
4484 "register"));
4485 return FALSE;
4486 }
4487 shift = 32;
4488 break;
4489 case BFD_RELOC_AARCH64_MOVW_G3:
4490 if (is32)
4491 {
4492 set_fatal_syntax_error
4493 (_("the specified relocation type is not allowed for 32-bit "
4494 "register"));
4495 return FALSE;
4496 }
4497 shift = 48;
4498 break;
4499 default:
4500 /* More cases should be added when more MOVW-related relocation types
4501 are supported in GAS. */
4502 gas_assert (aarch64_gas_internal_fixup_p ());
4503 /* The shift amount should have already been set by the parser. */
4504 return TRUE;
4505 }
4506 inst.base.operands[1].shifter.amount = shift;
4507 return TRUE;
4508 }
4509
4510 /* A primitive log caculator. */
4511
4512 static inline unsigned int
4513 get_logsz (unsigned int size)
4514 {
4515 const unsigned char ls[16] =
4516 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
4517 if (size > 16)
4518 {
4519 gas_assert (0);
4520 return -1;
4521 }
4522 gas_assert (ls[size - 1] != (unsigned char)-1);
4523 return ls[size - 1];
4524 }
4525
4526 /* Determine and return the real reloc type code for an instruction
4527 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
4528
4529 static inline bfd_reloc_code_real_type
4530 ldst_lo12_determine_real_reloc_type (void)
4531 {
4532 int logsz;
4533 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
4534 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
4535
4536 const bfd_reloc_code_real_type reloc_ldst_lo12[5] = {
4537 BFD_RELOC_AARCH64_LDST8_LO12, BFD_RELOC_AARCH64_LDST16_LO12,
4538 BFD_RELOC_AARCH64_LDST32_LO12, BFD_RELOC_AARCH64_LDST64_LO12,
4539 BFD_RELOC_AARCH64_LDST128_LO12
4540 };
4541
4542 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12);
4543 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
4544
4545 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
4546 opd1_qlf =
4547 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
4548 1, opd0_qlf, 0);
4549 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
4550
4551 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
4552 gas_assert (logsz >= 0 && logsz <= 4);
4553
4554 return reloc_ldst_lo12[logsz];
4555 }
4556
4557 /* Check whether a register list REGINFO is valid. The registers must be
4558 numbered in increasing order (modulo 32), in increments of one or two.
4559
4560 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
4561 increments of two.
4562
4563 Return FALSE if such a register list is invalid, otherwise return TRUE. */
4564
4565 static bfd_boolean
4566 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
4567 {
4568 uint32_t i, nb_regs, prev_regno, incr;
4569
4570 nb_regs = 1 + (reginfo & 0x3);
4571 reginfo >>= 2;
4572 prev_regno = reginfo & 0x1f;
4573 incr = accept_alternate ? 2 : 1;
4574
4575 for (i = 1; i < nb_regs; ++i)
4576 {
4577 uint32_t curr_regno;
4578 reginfo >>= 5;
4579 curr_regno = reginfo & 0x1f;
4580 if (curr_regno != ((prev_regno + incr) & 0x1f))
4581 return FALSE;
4582 prev_regno = curr_regno;
4583 }
4584
4585 return TRUE;
4586 }
4587
4588 /* Generic instruction operand parser. This does no encoding and no
4589 semantic validation; it merely squirrels values away in the inst
4590 structure. Returns TRUE or FALSE depending on whether the
4591 specified grammar matched. */
4592
4593 static bfd_boolean
4594 parse_operands (char *str, const aarch64_opcode *opcode)
4595 {
4596 int i;
4597 char *backtrack_pos = 0;
4598 const enum aarch64_opnd *operands = opcode->operands;
4599
4600 clear_error ();
4601 skip_whitespace (str);
4602
4603 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
4604 {
4605 int64_t val;
4606 int isreg32, isregzero;
4607 int comma_skipped_p = 0;
4608 aarch64_reg_type rtype;
4609 struct neon_type_el vectype;
4610 aarch64_opnd_info *info = &inst.base.operands[i];
4611
4612 DEBUG_TRACE ("parse operand %d", i);
4613
4614 /* Assign the operand code. */
4615 info->type = operands[i];
4616
4617 if (optional_operand_p (opcode, i))
4618 {
4619 /* Remember where we are in case we need to backtrack. */
4620 gas_assert (!backtrack_pos);
4621 backtrack_pos = str;
4622 }
4623
4624 /* Expect comma between operands; the backtrack mechanizm will take
4625 care of cases of omitted optional operand. */
4626 if (i > 0 && ! skip_past_char (&str, ','))
4627 {
4628 set_syntax_error (_("comma expected between operands"));
4629 goto failure;
4630 }
4631 else
4632 comma_skipped_p = 1;
4633
4634 switch (operands[i])
4635 {
4636 case AARCH64_OPND_Rd:
4637 case AARCH64_OPND_Rn:
4638 case AARCH64_OPND_Rm:
4639 case AARCH64_OPND_Rt:
4640 case AARCH64_OPND_Rt2:
4641 case AARCH64_OPND_Rs:
4642 case AARCH64_OPND_Ra:
4643 case AARCH64_OPND_Rt_SYS:
4644 case AARCH64_OPND_PAIRREG:
4645 po_int_reg_or_fail (1, 0);
4646 break;
4647
4648 case AARCH64_OPND_Rd_SP:
4649 case AARCH64_OPND_Rn_SP:
4650 po_int_reg_or_fail (0, 1);
4651 break;
4652
4653 case AARCH64_OPND_Rm_EXT:
4654 case AARCH64_OPND_Rm_SFT:
4655 po_misc_or_fail (parse_shifter_operand
4656 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
4657 ? SHIFTED_ARITH_IMM
4658 : SHIFTED_LOGIC_IMM)));
4659 if (!info->shifter.operator_present)
4660 {
4661 /* Default to LSL if not present. Libopcodes prefers shifter
4662 kind to be explicit. */
4663 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4664 info->shifter.kind = AARCH64_MOD_LSL;
4665 /* For Rm_EXT, libopcodes will carry out further check on whether
4666 or not stack pointer is used in the instruction (Recall that
4667 "the extend operator is not optional unless at least one of
4668 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
4669 }
4670 break;
4671
4672 case AARCH64_OPND_Fd:
4673 case AARCH64_OPND_Fn:
4674 case AARCH64_OPND_Fm:
4675 case AARCH64_OPND_Fa:
4676 case AARCH64_OPND_Ft:
4677 case AARCH64_OPND_Ft2:
4678 case AARCH64_OPND_Sd:
4679 case AARCH64_OPND_Sn:
4680 case AARCH64_OPND_Sm:
4681 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
4682 if (val == PARSE_FAIL)
4683 {
4684 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
4685 goto failure;
4686 }
4687 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
4688
4689 info->reg.regno = val;
4690 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
4691 break;
4692
4693 case AARCH64_OPND_Vd:
4694 case AARCH64_OPND_Vn:
4695 case AARCH64_OPND_Vm:
4696 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4697 if (val == PARSE_FAIL)
4698 {
4699 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4700 goto failure;
4701 }
4702 if (vectype.defined & NTA_HASINDEX)
4703 goto failure;
4704
4705 info->reg.regno = val;
4706 info->qualifier = vectype_to_qualifier (&vectype);
4707 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4708 goto failure;
4709 break;
4710
4711 case AARCH64_OPND_VdD1:
4712 case AARCH64_OPND_VnD1:
4713 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4714 if (val == PARSE_FAIL)
4715 {
4716 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4717 goto failure;
4718 }
4719 if (vectype.type != NT_d || vectype.index != 1)
4720 {
4721 set_fatal_syntax_error
4722 (_("the top half of a 128-bit FP/SIMD register is expected"));
4723 goto failure;
4724 }
4725 info->reg.regno = val;
4726 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
4727 here; it is correct for the purpose of encoding/decoding since
4728 only the register number is explicitly encoded in the related
4729 instructions, although this appears a bit hacky. */
4730 info->qualifier = AARCH64_OPND_QLF_S_D;
4731 break;
4732
4733 case AARCH64_OPND_Ed:
4734 case AARCH64_OPND_En:
4735 case AARCH64_OPND_Em:
4736 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4737 if (val == PARSE_FAIL)
4738 {
4739 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4740 goto failure;
4741 }
4742 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
4743 goto failure;
4744
4745 info->reglane.regno = val;
4746 info->reglane.index = vectype.index;
4747 info->qualifier = vectype_to_qualifier (&vectype);
4748 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4749 goto failure;
4750 break;
4751
4752 case AARCH64_OPND_LVn:
4753 case AARCH64_OPND_LVt:
4754 case AARCH64_OPND_LVt_AL:
4755 case AARCH64_OPND_LEt:
4756 if ((val = parse_neon_reg_list (&str, &vectype)) == PARSE_FAIL)
4757 goto failure;
4758 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
4759 {
4760 set_fatal_syntax_error (_("invalid register list"));
4761 goto failure;
4762 }
4763 info->reglist.first_regno = (val >> 2) & 0x1f;
4764 info->reglist.num_regs = (val & 0x3) + 1;
4765 if (operands[i] == AARCH64_OPND_LEt)
4766 {
4767 if (!(vectype.defined & NTA_HASINDEX))
4768 goto failure;
4769 info->reglist.has_index = 1;
4770 info->reglist.index = vectype.index;
4771 }
4772 else if (!(vectype.defined & NTA_HASTYPE))
4773 goto failure;
4774 info->qualifier = vectype_to_qualifier (&vectype);
4775 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4776 goto failure;
4777 break;
4778
4779 case AARCH64_OPND_Cn:
4780 case AARCH64_OPND_Cm:
4781 po_reg_or_fail (REG_TYPE_CN);
4782 if (val > 15)
4783 {
4784 set_fatal_syntax_error (_(get_reg_expected_msg (REG_TYPE_CN)));
4785 goto failure;
4786 }
4787 inst.base.operands[i].reg.regno = val;
4788 break;
4789
4790 case AARCH64_OPND_SHLL_IMM:
4791 case AARCH64_OPND_IMM_VLSR:
4792 po_imm_or_fail (1, 64);
4793 info->imm.value = val;
4794 break;
4795
4796 case AARCH64_OPND_CCMP_IMM:
4797 case AARCH64_OPND_FBITS:
4798 case AARCH64_OPND_UIMM4:
4799 case AARCH64_OPND_UIMM3_OP1:
4800 case AARCH64_OPND_UIMM3_OP2:
4801 case AARCH64_OPND_IMM_VLSL:
4802 case AARCH64_OPND_IMM:
4803 case AARCH64_OPND_WIDTH:
4804 po_imm_nc_or_fail ();
4805 info->imm.value = val;
4806 break;
4807
4808 case AARCH64_OPND_UIMM7:
4809 po_imm_or_fail (0, 127);
4810 info->imm.value = val;
4811 break;
4812
4813 case AARCH64_OPND_IDX:
4814 case AARCH64_OPND_BIT_NUM:
4815 case AARCH64_OPND_IMMR:
4816 case AARCH64_OPND_IMMS:
4817 po_imm_or_fail (0, 63);
4818 info->imm.value = val;
4819 break;
4820
4821 case AARCH64_OPND_IMM0:
4822 po_imm_nc_or_fail ();
4823 if (val != 0)
4824 {
4825 set_fatal_syntax_error (_("immediate zero expected"));
4826 goto failure;
4827 }
4828 info->imm.value = 0;
4829 break;
4830
4831 case AARCH64_OPND_FPIMM0:
4832 {
4833 int qfloat;
4834 bfd_boolean res1 = FALSE, res2 = FALSE;
4835 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
4836 it is probably not worth the effort to support it. */
4837 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE))
4838 && !(res2 = parse_constant_immediate (&str, &val)))
4839 goto failure;
4840 if ((res1 && qfloat == 0) || (res2 && val == 0))
4841 {
4842 info->imm.value = 0;
4843 info->imm.is_fp = 1;
4844 break;
4845 }
4846 set_fatal_syntax_error (_("immediate zero expected"));
4847 goto failure;
4848 }
4849
4850 case AARCH64_OPND_IMM_MOV:
4851 {
4852 char *saved = str;
4853 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
4854 reg_name_p (str, REG_TYPE_VN))
4855 goto failure;
4856 str = saved;
4857 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
4858 GE_OPT_PREFIX, 1));
4859 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
4860 later. fix_mov_imm_insn will try to determine a machine
4861 instruction (MOVZ, MOVN or ORR) for it and will issue an error
4862 message if the immediate cannot be moved by a single
4863 instruction. */
4864 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
4865 inst.base.operands[i].skip = 1;
4866 }
4867 break;
4868
4869 case AARCH64_OPND_SIMD_IMM:
4870 case AARCH64_OPND_SIMD_IMM_SFT:
4871 if (! parse_big_immediate (&str, &val))
4872 goto failure;
4873 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4874 /* addr_off_p */ 0,
4875 /* need_libopcodes_p */ 1,
4876 /* skip_p */ 1);
4877 /* Parse shift.
4878 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
4879 shift, we don't check it here; we leave the checking to
4880 the libopcodes (operand_general_constraint_met_p). By
4881 doing this, we achieve better diagnostics. */
4882 if (skip_past_comma (&str)
4883 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
4884 goto failure;
4885 if (!info->shifter.operator_present
4886 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
4887 {
4888 /* Default to LSL if not present. Libopcodes prefers shifter
4889 kind to be explicit. */
4890 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4891 info->shifter.kind = AARCH64_MOD_LSL;
4892 }
4893 break;
4894
4895 case AARCH64_OPND_FPIMM:
4896 case AARCH64_OPND_SIMD_FPIMM:
4897 {
4898 int qfloat;
4899 bfd_boolean dp_p
4900 = (aarch64_get_qualifier_esize (inst.base.operands[0].qualifier)
4901 == 8);
4902 if (! parse_aarch64_imm_float (&str, &qfloat, dp_p))
4903 goto failure;
4904 if (qfloat == 0)
4905 {
4906 set_fatal_syntax_error (_("invalid floating-point constant"));
4907 goto failure;
4908 }
4909 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
4910 inst.base.operands[i].imm.is_fp = 1;
4911 }
4912 break;
4913
4914 case AARCH64_OPND_LIMM:
4915 po_misc_or_fail (parse_shifter_operand (&str, info,
4916 SHIFTED_LOGIC_IMM));
4917 if (info->shifter.operator_present)
4918 {
4919 set_fatal_syntax_error
4920 (_("shift not allowed for bitmask immediate"));
4921 goto failure;
4922 }
4923 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4924 /* addr_off_p */ 0,
4925 /* need_libopcodes_p */ 1,
4926 /* skip_p */ 1);
4927 break;
4928
4929 case AARCH64_OPND_AIMM:
4930 if (opcode->op == OP_ADD)
4931 /* ADD may have relocation types. */
4932 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
4933 SHIFTED_ARITH_IMM));
4934 else
4935 po_misc_or_fail (parse_shifter_operand (&str, info,
4936 SHIFTED_ARITH_IMM));
4937 switch (inst.reloc.type)
4938 {
4939 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
4940 info->shifter.amount = 12;
4941 break;
4942 case BFD_RELOC_UNUSED:
4943 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
4944 if (info->shifter.kind != AARCH64_MOD_NONE)
4945 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
4946 inst.reloc.pc_rel = 0;
4947 break;
4948 default:
4949 break;
4950 }
4951 info->imm.value = 0;
4952 if (!info->shifter.operator_present)
4953 {
4954 /* Default to LSL if not present. Libopcodes prefers shifter
4955 kind to be explicit. */
4956 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4957 info->shifter.kind = AARCH64_MOD_LSL;
4958 }
4959 break;
4960
4961 case AARCH64_OPND_HALF:
4962 {
4963 /* #<imm16> or relocation. */
4964 int internal_fixup_p;
4965 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
4966 if (internal_fixup_p)
4967 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
4968 skip_whitespace (str);
4969 if (skip_past_comma (&str))
4970 {
4971 /* {, LSL #<shift>} */
4972 if (! aarch64_gas_internal_fixup_p ())
4973 {
4974 set_fatal_syntax_error (_("can't mix relocation modifier "
4975 "with explicit shift"));
4976 goto failure;
4977 }
4978 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
4979 }
4980 else
4981 inst.base.operands[i].shifter.amount = 0;
4982 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
4983 inst.base.operands[i].imm.value = 0;
4984 if (! process_movw_reloc_info ())
4985 goto failure;
4986 }
4987 break;
4988
4989 case AARCH64_OPND_EXCEPTION:
4990 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp));
4991 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4992 /* addr_off_p */ 0,
4993 /* need_libopcodes_p */ 0,
4994 /* skip_p */ 1);
4995 break;
4996
4997 case AARCH64_OPND_NZCV:
4998 {
4999 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
5000 if (nzcv != NULL)
5001 {
5002 str += 4;
5003 info->imm.value = nzcv->value;
5004 break;
5005 }
5006 po_imm_or_fail (0, 15);
5007 info->imm.value = val;
5008 }
5009 break;
5010
5011 case AARCH64_OPND_COND:
5012 case AARCH64_OPND_COND1:
5013 info->cond = hash_find_n (aarch64_cond_hsh, str, 2);
5014 str += 2;
5015 if (info->cond == NULL)
5016 {
5017 set_syntax_error (_("invalid condition"));
5018 goto failure;
5019 }
5020 else if (operands[i] == AARCH64_OPND_COND1
5021 && (info->cond->value & 0xe) == 0xe)
5022 {
5023 /* Not allow AL or NV. */
5024 set_default_error ();
5025 goto failure;
5026 }
5027 break;
5028
5029 case AARCH64_OPND_ADDR_ADRP:
5030 po_misc_or_fail (parse_adrp (&str));
5031 /* Clear the value as operand needs to be relocated. */
5032 info->imm.value = 0;
5033 break;
5034
5035 case AARCH64_OPND_ADDR_PCREL14:
5036 case AARCH64_OPND_ADDR_PCREL19:
5037 case AARCH64_OPND_ADDR_PCREL21:
5038 case AARCH64_OPND_ADDR_PCREL26:
5039 po_misc_or_fail (parse_address_reloc (&str, info));
5040 if (!info->addr.pcrel)
5041 {
5042 set_syntax_error (_("invalid pc-relative address"));
5043 goto failure;
5044 }
5045 if (inst.gen_lit_pool
5046 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
5047 {
5048 /* Only permit "=value" in the literal load instructions.
5049 The literal will be generated by programmer_friendly_fixup. */
5050 set_syntax_error (_("invalid use of \"=immediate\""));
5051 goto failure;
5052 }
5053 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
5054 {
5055 set_syntax_error (_("unrecognized relocation suffix"));
5056 goto failure;
5057 }
5058 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
5059 {
5060 info->imm.value = inst.reloc.exp.X_add_number;
5061 inst.reloc.type = BFD_RELOC_UNUSED;
5062 }
5063 else
5064 {
5065 info->imm.value = 0;
5066 if (inst.reloc.type == BFD_RELOC_UNUSED)
5067 switch (opcode->iclass)
5068 {
5069 case compbranch:
5070 case condbranch:
5071 /* e.g. CBZ or B.COND */
5072 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5073 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
5074 break;
5075 case testbranch:
5076 /* e.g. TBZ */
5077 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
5078 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
5079 break;
5080 case branch_imm:
5081 /* e.g. B or BL */
5082 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
5083 inst.reloc.type =
5084 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
5085 : BFD_RELOC_AARCH64_JUMP26;
5086 break;
5087 case loadlit:
5088 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5089 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
5090 break;
5091 case pcreladdr:
5092 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
5093 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
5094 break;
5095 default:
5096 gas_assert (0);
5097 abort ();
5098 }
5099 inst.reloc.pc_rel = 1;
5100 }
5101 break;
5102
5103 case AARCH64_OPND_ADDR_SIMPLE:
5104 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
5105 /* [<Xn|SP>{, #<simm>}] */
5106 po_char_or_fail ('[');
5107 po_reg_or_fail (REG_TYPE_R64_SP);
5108 /* Accept optional ", #0". */
5109 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
5110 && skip_past_char (&str, ','))
5111 {
5112 skip_past_char (&str, '#');
5113 if (! skip_past_char (&str, '0'))
5114 {
5115 set_fatal_syntax_error
5116 (_("the optional immediate offset can only be 0"));
5117 goto failure;
5118 }
5119 }
5120 po_char_or_fail (']');
5121 info->addr.base_regno = val;
5122 break;
5123
5124 case AARCH64_OPND_ADDR_REGOFF:
5125 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
5126 po_misc_or_fail (parse_address (&str, info, 0));
5127 if (info->addr.pcrel || !info->addr.offset.is_reg
5128 || !info->addr.preind || info->addr.postind
5129 || info->addr.writeback)
5130 {
5131 set_syntax_error (_("invalid addressing mode"));
5132 goto failure;
5133 }
5134 if (!info->shifter.operator_present)
5135 {
5136 /* Default to LSL if not present. Libopcodes prefers shifter
5137 kind to be explicit. */
5138 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5139 info->shifter.kind = AARCH64_MOD_LSL;
5140 }
5141 /* Qualifier to be deduced by libopcodes. */
5142 break;
5143
5144 case AARCH64_OPND_ADDR_SIMM7:
5145 po_misc_or_fail (parse_address (&str, info, 0));
5146 if (info->addr.pcrel || info->addr.offset.is_reg
5147 || (!info->addr.preind && !info->addr.postind))
5148 {
5149 set_syntax_error (_("invalid addressing mode"));
5150 goto failure;
5151 }
5152 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5153 /* addr_off_p */ 1,
5154 /* need_libopcodes_p */ 1,
5155 /* skip_p */ 0);
5156 break;
5157
5158 case AARCH64_OPND_ADDR_SIMM9:
5159 case AARCH64_OPND_ADDR_SIMM9_2:
5160 po_misc_or_fail (parse_address_reloc (&str, info));
5161 if (info->addr.pcrel || info->addr.offset.is_reg
5162 || (!info->addr.preind && !info->addr.postind)
5163 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
5164 && info->addr.writeback))
5165 {
5166 set_syntax_error (_("invalid addressing mode"));
5167 goto failure;
5168 }
5169 if (inst.reloc.type != BFD_RELOC_UNUSED)
5170 {
5171 set_syntax_error (_("relocation not allowed"));
5172 goto failure;
5173 }
5174 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5175 /* addr_off_p */ 1,
5176 /* need_libopcodes_p */ 1,
5177 /* skip_p */ 0);
5178 break;
5179
5180 case AARCH64_OPND_ADDR_UIMM12:
5181 po_misc_or_fail (parse_address_reloc (&str, info));
5182 if (info->addr.pcrel || info->addr.offset.is_reg
5183 || !info->addr.preind || info->addr.writeback)
5184 {
5185 set_syntax_error (_("invalid addressing mode"));
5186 goto failure;
5187 }
5188 if (inst.reloc.type == BFD_RELOC_UNUSED)
5189 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5190 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12)
5191 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
5192 /* Leave qualifier to be determined by libopcodes. */
5193 break;
5194
5195 case AARCH64_OPND_SIMD_ADDR_POST:
5196 /* [<Xn|SP>], <Xm|#<amount>> */
5197 po_misc_or_fail (parse_address (&str, info, 1));
5198 if (!info->addr.postind || !info->addr.writeback)
5199 {
5200 set_syntax_error (_("invalid addressing mode"));
5201 goto failure;
5202 }
5203 if (!info->addr.offset.is_reg)
5204 {
5205 if (inst.reloc.exp.X_op == O_constant)
5206 info->addr.offset.imm = inst.reloc.exp.X_add_number;
5207 else
5208 {
5209 set_fatal_syntax_error
5210 (_("writeback value should be an immediate constant"));
5211 goto failure;
5212 }
5213 }
5214 /* No qualifier. */
5215 break;
5216
5217 case AARCH64_OPND_SYSREG:
5218 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1))
5219 == PARSE_FAIL)
5220 {
5221 set_syntax_error (_("unknown or missing system register name"));
5222 goto failure;
5223 }
5224 inst.base.operands[i].sysreg = val;
5225 break;
5226
5227 case AARCH64_OPND_PSTATEFIELD:
5228 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0))
5229 == PARSE_FAIL)
5230 {
5231 set_syntax_error (_("unknown or missing PSTATE field name"));
5232 goto failure;
5233 }
5234 inst.base.operands[i].pstatefield = val;
5235 break;
5236
5237 case AARCH64_OPND_SYSREG_IC:
5238 inst.base.operands[i].sysins_op =
5239 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
5240 goto sys_reg_ins;
5241 case AARCH64_OPND_SYSREG_DC:
5242 inst.base.operands[i].sysins_op =
5243 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
5244 goto sys_reg_ins;
5245 case AARCH64_OPND_SYSREG_AT:
5246 inst.base.operands[i].sysins_op =
5247 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
5248 goto sys_reg_ins;
5249 case AARCH64_OPND_SYSREG_TLBI:
5250 inst.base.operands[i].sysins_op =
5251 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
5252 sys_reg_ins:
5253 if (inst.base.operands[i].sysins_op == NULL)
5254 {
5255 set_fatal_syntax_error ( _("unknown or missing operation name"));
5256 goto failure;
5257 }
5258 break;
5259
5260 case AARCH64_OPND_BARRIER:
5261 case AARCH64_OPND_BARRIER_ISB:
5262 val = parse_barrier (&str);
5263 if (val != PARSE_FAIL
5264 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
5265 {
5266 /* ISB only accepts options name 'sy'. */
5267 set_syntax_error
5268 (_("the specified option is not accepted in ISB"));
5269 /* Turn off backtrack as this optional operand is present. */
5270 backtrack_pos = 0;
5271 goto failure;
5272 }
5273 /* This is an extension to accept a 0..15 immediate. */
5274 if (val == PARSE_FAIL)
5275 po_imm_or_fail (0, 15);
5276 info->barrier = aarch64_barrier_options + val;
5277 break;
5278
5279 case AARCH64_OPND_PRFOP:
5280 val = parse_pldop (&str);
5281 /* This is an extension to accept a 0..31 immediate. */
5282 if (val == PARSE_FAIL)
5283 po_imm_or_fail (0, 31);
5284 inst.base.operands[i].prfop = aarch64_prfops + val;
5285 break;
5286
5287 default:
5288 as_fatal (_("unhandled operand code %d"), operands[i]);
5289 }
5290
5291 /* If we get here, this operand was successfully parsed. */
5292 inst.base.operands[i].present = 1;
5293 continue;
5294
5295 failure:
5296 /* The parse routine should already have set the error, but in case
5297 not, set a default one here. */
5298 if (! error_p ())
5299 set_default_error ();
5300
5301 if (! backtrack_pos)
5302 goto parse_operands_return;
5303
5304 {
5305 /* We reach here because this operand is marked as optional, and
5306 either no operand was supplied or the operand was supplied but it
5307 was syntactically incorrect. In the latter case we report an
5308 error. In the former case we perform a few more checks before
5309 dropping through to the code to insert the default operand. */
5310
5311 char *tmp = backtrack_pos;
5312 char endchar = END_OF_INSN;
5313
5314 if (i != (aarch64_num_of_operands (opcode) - 1))
5315 endchar = ',';
5316 skip_past_char (&tmp, ',');
5317
5318 if (*tmp != endchar)
5319 /* The user has supplied an operand in the wrong format. */
5320 goto parse_operands_return;
5321
5322 /* Make sure there is not a comma before the optional operand.
5323 For example the fifth operand of 'sys' is optional:
5324
5325 sys #0,c0,c0,#0, <--- wrong
5326 sys #0,c0,c0,#0 <--- correct. */
5327 if (comma_skipped_p && i && endchar == END_OF_INSN)
5328 {
5329 set_fatal_syntax_error
5330 (_("unexpected comma before the omitted optional operand"));
5331 goto parse_operands_return;
5332 }
5333 }
5334
5335 /* Reaching here means we are dealing with an optional operand that is
5336 omitted from the assembly line. */
5337 gas_assert (optional_operand_p (opcode, i));
5338 info->present = 0;
5339 process_omitted_operand (operands[i], opcode, i, info);
5340
5341 /* Try again, skipping the optional operand at backtrack_pos. */
5342 str = backtrack_pos;
5343 backtrack_pos = 0;
5344
5345 /* Clear any error record after the omitted optional operand has been
5346 successfully handled. */
5347 clear_error ();
5348 }
5349
5350 /* Check if we have parsed all the operands. */
5351 if (*str != '\0' && ! error_p ())
5352 {
5353 /* Set I to the index of the last present operand; this is
5354 for the purpose of diagnostics. */
5355 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
5356 ;
5357 set_fatal_syntax_error
5358 (_("unexpected characters following instruction"));
5359 }
5360
5361 parse_operands_return:
5362
5363 if (error_p ())
5364 {
5365 DEBUG_TRACE ("parsing FAIL: %s - %s",
5366 operand_mismatch_kind_names[get_error_kind ()],
5367 get_error_message ());
5368 /* Record the operand error properly; this is useful when there
5369 are multiple instruction templates for a mnemonic name, so that
5370 later on, we can select the error that most closely describes
5371 the problem. */
5372 record_operand_error (opcode, i, get_error_kind (),
5373 get_error_message ());
5374 return FALSE;
5375 }
5376 else
5377 {
5378 DEBUG_TRACE ("parsing SUCCESS");
5379 return TRUE;
5380 }
5381 }
5382
5383 /* It does some fix-up to provide some programmer friendly feature while
5384 keeping the libopcodes happy, i.e. libopcodes only accepts
5385 the preferred architectural syntax.
5386 Return FALSE if there is any failure; otherwise return TRUE. */
5387
5388 static bfd_boolean
5389 programmer_friendly_fixup (aarch64_instruction *instr)
5390 {
5391 aarch64_inst *base = &instr->base;
5392 const aarch64_opcode *opcode = base->opcode;
5393 enum aarch64_op op = opcode->op;
5394 aarch64_opnd_info *operands = base->operands;
5395
5396 DEBUG_TRACE ("enter");
5397
5398 switch (opcode->iclass)
5399 {
5400 case testbranch:
5401 /* TBNZ Xn|Wn, #uimm6, label
5402 Test and Branch Not Zero: conditionally jumps to label if bit number
5403 uimm6 in register Xn is not zero. The bit number implies the width of
5404 the register, which may be written and should be disassembled as Wn if
5405 uimm is less than 32. */
5406 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
5407 {
5408 if (operands[1].imm.value >= 32)
5409 {
5410 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
5411 0, 31);
5412 return FALSE;
5413 }
5414 operands[0].qualifier = AARCH64_OPND_QLF_X;
5415 }
5416 break;
5417 case loadlit:
5418 /* LDR Wt, label | =value
5419 As a convenience assemblers will typically permit the notation
5420 "=value" in conjunction with the pc-relative literal load instructions
5421 to automatically place an immediate value or symbolic address in a
5422 nearby literal pool and generate a hidden label which references it.
5423 ISREG has been set to 0 in the case of =value. */
5424 if (instr->gen_lit_pool
5425 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
5426 {
5427 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
5428 if (op == OP_LDRSW_LIT)
5429 size = 4;
5430 if (instr->reloc.exp.X_op != O_constant
5431 && instr->reloc.exp.X_op != O_big
5432 && instr->reloc.exp.X_op != O_symbol)
5433 {
5434 record_operand_error (opcode, 1,
5435 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
5436 _("constant expression expected"));
5437 return FALSE;
5438 }
5439 if (! add_to_lit_pool (&instr->reloc.exp, size))
5440 {
5441 record_operand_error (opcode, 1,
5442 AARCH64_OPDE_OTHER_ERROR,
5443 _("literal pool insertion failed"));
5444 return FALSE;
5445 }
5446 }
5447 break;
5448 case log_shift:
5449 case bitfield:
5450 /* UXT[BHW] Wd, Wn
5451 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
5452 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
5453 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
5454 A programmer-friendly assembler should accept a destination Xd in
5455 place of Wd, however that is not the preferred form for disassembly.
5456 */
5457 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
5458 && operands[1].qualifier == AARCH64_OPND_QLF_W
5459 && operands[0].qualifier == AARCH64_OPND_QLF_X)
5460 operands[0].qualifier = AARCH64_OPND_QLF_W;
5461 break;
5462
5463 case addsub_ext:
5464 {
5465 /* In the 64-bit form, the final register operand is written as Wm
5466 for all but the (possibly omitted) UXTX/LSL and SXTX
5467 operators.
5468 As a programmer-friendly assembler, we accept e.g.
5469 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
5470 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
5471 int idx = aarch64_operand_index (opcode->operands,
5472 AARCH64_OPND_Rm_EXT);
5473 gas_assert (idx == 1 || idx == 2);
5474 if (operands[0].qualifier == AARCH64_OPND_QLF_X
5475 && operands[idx].qualifier == AARCH64_OPND_QLF_X
5476 && operands[idx].shifter.kind != AARCH64_MOD_LSL
5477 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
5478 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
5479 operands[idx].qualifier = AARCH64_OPND_QLF_W;
5480 }
5481 break;
5482
5483 default:
5484 break;
5485 }
5486
5487 DEBUG_TRACE ("exit with SUCCESS");
5488 return TRUE;
5489 }
5490
5491 /* A wrapper function to interface with libopcodes on encoding and
5492 record the error message if there is any.
5493
5494 Return TRUE on success; otherwise return FALSE. */
5495
5496 static bfd_boolean
5497 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
5498 aarch64_insn *code)
5499 {
5500 aarch64_operand_error error_info;
5501 error_info.kind = AARCH64_OPDE_NIL;
5502 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info))
5503 return TRUE;
5504 else
5505 {
5506 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
5507 record_operand_error_info (opcode, &error_info);
5508 return FALSE;
5509 }
5510 }
5511
5512 #ifdef DEBUG_AARCH64
5513 static inline void
5514 dump_opcode_operands (const aarch64_opcode *opcode)
5515 {
5516 int i = 0;
5517 while (opcode->operands[i] != AARCH64_OPND_NIL)
5518 {
5519 aarch64_verbose ("\t\t opnd%d: %s", i,
5520 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
5521 ? aarch64_get_operand_name (opcode->operands[i])
5522 : aarch64_get_operand_desc (opcode->operands[i]));
5523 ++i;
5524 }
5525 }
5526 #endif /* DEBUG_AARCH64 */
5527
5528 /* This is the guts of the machine-dependent assembler. STR points to a
5529 machine dependent instruction. This function is supposed to emit
5530 the frags/bytes it assembles to. */
5531
5532 void
5533 md_assemble (char *str)
5534 {
5535 char *p = str;
5536 templates *template;
5537 aarch64_opcode *opcode;
5538 aarch64_inst *inst_base;
5539 unsigned saved_cond;
5540
5541 /* Align the previous label if needed. */
5542 if (last_label_seen != NULL)
5543 {
5544 symbol_set_frag (last_label_seen, frag_now);
5545 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
5546 S_SET_SEGMENT (last_label_seen, now_seg);
5547 }
5548
5549 inst.reloc.type = BFD_RELOC_UNUSED;
5550
5551 DEBUG_TRACE ("\n\n");
5552 DEBUG_TRACE ("==============================");
5553 DEBUG_TRACE ("Enter md_assemble with %s", str);
5554
5555 template = opcode_lookup (&p);
5556 if (!template)
5557 {
5558 /* It wasn't an instruction, but it might be a register alias of
5559 the form alias .req reg directive. */
5560 if (!create_register_alias (str, p))
5561 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
5562 str);
5563 return;
5564 }
5565
5566 skip_whitespace (p);
5567 if (*p == ',')
5568 {
5569 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
5570 get_mnemonic_name (str), str);
5571 return;
5572 }
5573
5574 init_operand_error_report ();
5575
5576 saved_cond = inst.cond;
5577 reset_aarch64_instruction (&inst);
5578 inst.cond = saved_cond;
5579
5580 /* Iterate through all opcode entries with the same mnemonic name. */
5581 do
5582 {
5583 opcode = template->opcode;
5584
5585 DEBUG_TRACE ("opcode %s found", opcode->name);
5586 #ifdef DEBUG_AARCH64
5587 if (debug_dump)
5588 dump_opcode_operands (opcode);
5589 #endif /* DEBUG_AARCH64 */
5590
5591 mapping_state (MAP_INSN);
5592
5593 inst_base = &inst.base;
5594 inst_base->opcode = opcode;
5595
5596 /* Truly conditionally executed instructions, e.g. b.cond. */
5597 if (opcode->flags & F_COND)
5598 {
5599 gas_assert (inst.cond != COND_ALWAYS);
5600 inst_base->cond = get_cond_from_value (inst.cond);
5601 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
5602 }
5603 else if (inst.cond != COND_ALWAYS)
5604 {
5605 /* It shouldn't arrive here, where the assembly looks like a
5606 conditional instruction but the found opcode is unconditional. */
5607 gas_assert (0);
5608 continue;
5609 }
5610
5611 if (parse_operands (p, opcode)
5612 && programmer_friendly_fixup (&inst)
5613 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
5614 {
5615 /* Check that this instruction is supported for this CPU. */
5616 if (!opcode->avariant
5617 || !AARCH64_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
5618 {
5619 as_bad (_("selected processor does not support `%s'"), str);
5620 return;
5621 }
5622
5623 if (inst.reloc.type == BFD_RELOC_UNUSED
5624 || !inst.reloc.need_libopcodes_p)
5625 output_inst (NULL);
5626 else
5627 {
5628 /* If there is relocation generated for the instruction,
5629 store the instruction information for the future fix-up. */
5630 struct aarch64_inst *copy;
5631 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
5632 if ((copy = xmalloc (sizeof (struct aarch64_inst))) == NULL)
5633 abort ();
5634 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
5635 output_inst (copy);
5636 }
5637 return;
5638 }
5639
5640 template = template->next;
5641 if (template != NULL)
5642 {
5643 reset_aarch64_instruction (&inst);
5644 inst.cond = saved_cond;
5645 }
5646 }
5647 while (template != NULL);
5648
5649 /* Issue the error messages if any. */
5650 output_operand_error_report (str);
5651 }
5652
5653 /* Various frobbings of labels and their addresses. */
5654
5655 void
5656 aarch64_start_line_hook (void)
5657 {
5658 last_label_seen = NULL;
5659 }
5660
5661 void
5662 aarch64_frob_label (symbolS * sym)
5663 {
5664 last_label_seen = sym;
5665
5666 dwarf2_emit_label (sym);
5667 }
5668
5669 int
5670 aarch64_data_in_code (void)
5671 {
5672 if (!strncmp (input_line_pointer + 1, "data:", 5))
5673 {
5674 *input_line_pointer = '/';
5675 input_line_pointer += 5;
5676 *input_line_pointer = 0;
5677 return 1;
5678 }
5679
5680 return 0;
5681 }
5682
5683 char *
5684 aarch64_canonicalize_symbol_name (char *name)
5685 {
5686 int len;
5687
5688 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
5689 *(name + len - 5) = 0;
5690
5691 return name;
5692 }
5693 \f
5694 /* Table of all register names defined by default. The user can
5695 define additional names with .req. Note that all register names
5696 should appear in both upper and lowercase variants. Some registers
5697 also have mixed-case names. */
5698
5699 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
5700 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
5701 #define REGSET31(p,t) \
5702 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
5703 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
5704 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
5705 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t), \
5706 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
5707 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
5708 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
5709 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
5710 #define REGSET(p,t) \
5711 REGSET31(p,t), REGNUM(p,31,t)
5712
5713 /* These go into aarch64_reg_hsh hash-table. */
5714 static const reg_entry reg_names[] = {
5715 /* Integer registers. */
5716 REGSET31 (x, R_64), REGSET31 (X, R_64),
5717 REGSET31 (w, R_32), REGSET31 (W, R_32),
5718
5719 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
5720 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
5721
5722 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
5723 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
5724
5725 /* Coprocessor register numbers. */
5726 REGSET (c, CN), REGSET (C, CN),
5727
5728 /* Floating-point single precision registers. */
5729 REGSET (s, FP_S), REGSET (S, FP_S),
5730
5731 /* Floating-point double precision registers. */
5732 REGSET (d, FP_D), REGSET (D, FP_D),
5733
5734 /* Floating-point half precision registers. */
5735 REGSET (h, FP_H), REGSET (H, FP_H),
5736
5737 /* Floating-point byte precision registers. */
5738 REGSET (b, FP_B), REGSET (B, FP_B),
5739
5740 /* Floating-point quad precision registers. */
5741 REGSET (q, FP_Q), REGSET (Q, FP_Q),
5742
5743 /* FP/SIMD registers. */
5744 REGSET (v, VN), REGSET (V, VN),
5745 };
5746
5747 #undef REGDEF
5748 #undef REGNUM
5749 #undef REGSET
5750
5751 #define N 1
5752 #define n 0
5753 #define Z 1
5754 #define z 0
5755 #define C 1
5756 #define c 0
5757 #define V 1
5758 #define v 0
5759 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
5760 static const asm_nzcv nzcv_names[] = {
5761 {"nzcv", B (n, z, c, v)},
5762 {"nzcV", B (n, z, c, V)},
5763 {"nzCv", B (n, z, C, v)},
5764 {"nzCV", B (n, z, C, V)},
5765 {"nZcv", B (n, Z, c, v)},
5766 {"nZcV", B (n, Z, c, V)},
5767 {"nZCv", B (n, Z, C, v)},
5768 {"nZCV", B (n, Z, C, V)},
5769 {"Nzcv", B (N, z, c, v)},
5770 {"NzcV", B (N, z, c, V)},
5771 {"NzCv", B (N, z, C, v)},
5772 {"NzCV", B (N, z, C, V)},
5773 {"NZcv", B (N, Z, c, v)},
5774 {"NZcV", B (N, Z, c, V)},
5775 {"NZCv", B (N, Z, C, v)},
5776 {"NZCV", B (N, Z, C, V)}
5777 };
5778
5779 #undef N
5780 #undef n
5781 #undef Z
5782 #undef z
5783 #undef C
5784 #undef c
5785 #undef V
5786 #undef v
5787 #undef B
5788 \f
5789 /* MD interface: bits in the object file. */
5790
5791 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
5792 for use in the a.out file, and stores them in the array pointed to by buf.
5793 This knows about the endian-ness of the target machine and does
5794 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
5795 2 (short) and 4 (long) Floating numbers are put out as a series of
5796 LITTLENUMS (shorts, here at least). */
5797
5798 void
5799 md_number_to_chars (char *buf, valueT val, int n)
5800 {
5801 if (target_big_endian)
5802 number_to_chars_bigendian (buf, val, n);
5803 else
5804 number_to_chars_littleendian (buf, val, n);
5805 }
5806
5807 /* MD interface: Sections. */
5808
5809 /* Estimate the size of a frag before relaxing. Assume everything fits in
5810 4 bytes. */
5811
5812 int
5813 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
5814 {
5815 fragp->fr_var = 4;
5816 return 4;
5817 }
5818
5819 /* Round up a section size to the appropriate boundary. */
5820
5821 valueT
5822 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
5823 {
5824 return size;
5825 }
5826
5827 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
5828 of an rs_align_code fragment.
5829
5830 Here we fill the frag with the appropriate info for padding the
5831 output stream. The resulting frag will consist of a fixed (fr_fix)
5832 and of a repeating (fr_var) part.
5833
5834 The fixed content is always emitted before the repeating content and
5835 these two parts are used as follows in constructing the output:
5836 - the fixed part will be used to align to a valid instruction word
5837 boundary, in case that we start at a misaligned address; as no
5838 executable instruction can live at the misaligned location, we
5839 simply fill with zeros;
5840 - the variable part will be used to cover the remaining padding and
5841 we fill using the AArch64 NOP instruction.
5842
5843 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
5844 enough storage space for up to 3 bytes for padding the back to a valid
5845 instruction alignment and exactly 4 bytes to store the NOP pattern. */
5846
5847 void
5848 aarch64_handle_align (fragS * fragP)
5849 {
5850 /* NOP = d503201f */
5851 /* AArch64 instructions are always little-endian. */
5852 static char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
5853
5854 int bytes, fix, noop_size;
5855 char *p;
5856
5857 if (fragP->fr_type != rs_align_code)
5858 return;
5859
5860 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
5861 p = fragP->fr_literal + fragP->fr_fix;
5862
5863 #ifdef OBJ_ELF
5864 gas_assert (fragP->tc_frag_data.recorded);
5865 #endif
5866
5867 noop_size = sizeof (aarch64_noop);
5868
5869 fix = bytes & (noop_size - 1);
5870 if (fix)
5871 {
5872 #ifdef OBJ_ELF
5873 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
5874 #endif
5875 memset (p, 0, fix);
5876 p += fix;
5877 fragP->fr_fix += fix;
5878 }
5879
5880 if (noop_size)
5881 memcpy (p, aarch64_noop, noop_size);
5882 fragP->fr_var = noop_size;
5883 }
5884
5885 /* Perform target specific initialisation of a frag.
5886 Note - despite the name this initialisation is not done when the frag
5887 is created, but only when its type is assigned. A frag can be created
5888 and used a long time before its type is set, so beware of assuming that
5889 this initialisationis performed first. */
5890
5891 #ifndef OBJ_ELF
5892 void
5893 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
5894 int max_chars ATTRIBUTE_UNUSED)
5895 {
5896 }
5897
5898 #else /* OBJ_ELF is defined. */
5899 void
5900 aarch64_init_frag (fragS * fragP, int max_chars)
5901 {
5902 /* Record a mapping symbol for alignment frags. We will delete this
5903 later if the alignment ends up empty. */
5904 if (!fragP->tc_frag_data.recorded)
5905 {
5906 fragP->tc_frag_data.recorded = 1;
5907 switch (fragP->fr_type)
5908 {
5909 case rs_align:
5910 case rs_align_test:
5911 case rs_fill:
5912 mapping_state_2 (MAP_DATA, max_chars);
5913 break;
5914 case rs_align_code:
5915 mapping_state_2 (MAP_INSN, max_chars);
5916 break;
5917 default:
5918 break;
5919 }
5920 }
5921 }
5922 \f
5923 /* Initialize the DWARF-2 unwind information for this procedure. */
5924
5925 void
5926 tc_aarch64_frame_initial_instructions (void)
5927 {
5928 cfi_add_CFA_def_cfa (REG_SP, 0);
5929 }
5930 #endif /* OBJ_ELF */
5931
5932 /* Convert REGNAME to a DWARF-2 register number. */
5933
5934 int
5935 tc_aarch64_regname_to_dw2regnum (char *regname)
5936 {
5937 const reg_entry *reg = parse_reg (&regname);
5938 if (reg == NULL)
5939 return -1;
5940
5941 switch (reg->type)
5942 {
5943 case REG_TYPE_SP_32:
5944 case REG_TYPE_SP_64:
5945 case REG_TYPE_R_32:
5946 case REG_TYPE_R_64:
5947 return reg->number;
5948
5949 case REG_TYPE_FP_B:
5950 case REG_TYPE_FP_H:
5951 case REG_TYPE_FP_S:
5952 case REG_TYPE_FP_D:
5953 case REG_TYPE_FP_Q:
5954 return reg->number + 64;
5955
5956 default:
5957 break;
5958 }
5959 return -1;
5960 }
5961
5962 /* Implement DWARF2_ADDR_SIZE. */
5963
5964 int
5965 aarch64_dwarf2_addr_size (void)
5966 {
5967 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
5968 if (ilp32_p)
5969 return 4;
5970 #endif
5971 return bfd_arch_bits_per_address (stdoutput) / 8;
5972 }
5973
5974 /* MD interface: Symbol and relocation handling. */
5975
5976 /* Return the address within the segment that a PC-relative fixup is
5977 relative to. For AArch64 PC-relative fixups applied to instructions
5978 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
5979
5980 long
5981 md_pcrel_from_section (fixS * fixP, segT seg)
5982 {
5983 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
5984
5985 /* If this is pc-relative and we are going to emit a relocation
5986 then we just want to put out any pipeline compensation that the linker
5987 will need. Otherwise we want to use the calculated base. */
5988 if (fixP->fx_pcrel
5989 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
5990 || aarch64_force_relocation (fixP)))
5991 base = 0;
5992
5993 /* AArch64 should be consistent for all pc-relative relocations. */
5994 return base + AARCH64_PCREL_OFFSET;
5995 }
5996
5997 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
5998 Otherwise we have no need to default values of symbols. */
5999
6000 symbolS *
6001 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
6002 {
6003 #ifdef OBJ_ELF
6004 if (name[0] == '_' && name[1] == 'G'
6005 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
6006 {
6007 if (!GOT_symbol)
6008 {
6009 if (symbol_find (name))
6010 as_bad (_("GOT already in the symbol table"));
6011
6012 GOT_symbol = symbol_new (name, undefined_section,
6013 (valueT) 0, &zero_address_frag);
6014 }
6015
6016 return GOT_symbol;
6017 }
6018 #endif
6019
6020 return 0;
6021 }
6022
6023 /* Return non-zero if the indicated VALUE has overflowed the maximum
6024 range expressible by a unsigned number with the indicated number of
6025 BITS. */
6026
6027 static bfd_boolean
6028 unsigned_overflow (valueT value, unsigned bits)
6029 {
6030 valueT lim;
6031 if (bits >= sizeof (valueT) * 8)
6032 return FALSE;
6033 lim = (valueT) 1 << bits;
6034 return (value >= lim);
6035 }
6036
6037
6038 /* Return non-zero if the indicated VALUE has overflowed the maximum
6039 range expressible by an signed number with the indicated number of
6040 BITS. */
6041
6042 static bfd_boolean
6043 signed_overflow (offsetT value, unsigned bits)
6044 {
6045 offsetT lim;
6046 if (bits >= sizeof (offsetT) * 8)
6047 return FALSE;
6048 lim = (offsetT) 1 << (bits - 1);
6049 return (value < -lim || value >= lim);
6050 }
6051
6052 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
6053 unsigned immediate offset load/store instruction, try to encode it as
6054 an unscaled, 9-bit, signed immediate offset load/store instruction.
6055 Return TRUE if it is successful; otherwise return FALSE.
6056
6057 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
6058 in response to the standard LDR/STR mnemonics when the immediate offset is
6059 unambiguous, i.e. when it is negative or unaligned. */
6060
6061 static bfd_boolean
6062 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
6063 {
6064 int idx;
6065 enum aarch64_op new_op;
6066 const aarch64_opcode *new_opcode;
6067
6068 gas_assert (instr->opcode->iclass == ldst_pos);
6069
6070 switch (instr->opcode->op)
6071 {
6072 case OP_LDRB_POS:new_op = OP_LDURB; break;
6073 case OP_STRB_POS: new_op = OP_STURB; break;
6074 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
6075 case OP_LDRH_POS: new_op = OP_LDURH; break;
6076 case OP_STRH_POS: new_op = OP_STURH; break;
6077 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
6078 case OP_LDR_POS: new_op = OP_LDUR; break;
6079 case OP_STR_POS: new_op = OP_STUR; break;
6080 case OP_LDRF_POS: new_op = OP_LDURV; break;
6081 case OP_STRF_POS: new_op = OP_STURV; break;
6082 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
6083 case OP_PRFM_POS: new_op = OP_PRFUM; break;
6084 default: new_op = OP_NIL; break;
6085 }
6086
6087 if (new_op == OP_NIL)
6088 return FALSE;
6089
6090 new_opcode = aarch64_get_opcode (new_op);
6091 gas_assert (new_opcode != NULL);
6092
6093 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
6094 instr->opcode->op, new_opcode->op);
6095
6096 aarch64_replace_opcode (instr, new_opcode);
6097
6098 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
6099 qualifier matching may fail because the out-of-date qualifier will
6100 prevent the operand being updated with a new and correct qualifier. */
6101 idx = aarch64_operand_index (instr->opcode->operands,
6102 AARCH64_OPND_ADDR_SIMM9);
6103 gas_assert (idx == 1);
6104 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
6105
6106 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
6107
6108 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
6109 return FALSE;
6110
6111 return TRUE;
6112 }
6113
6114 /* Called by fix_insn to fix a MOV immediate alias instruction.
6115
6116 Operand for a generic move immediate instruction, which is an alias
6117 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
6118 a 32-bit/64-bit immediate value into general register. An assembler error
6119 shall result if the immediate cannot be created by a single one of these
6120 instructions. If there is a choice, then to ensure reversability an
6121 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
6122
6123 static void
6124 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
6125 {
6126 const aarch64_opcode *opcode;
6127
6128 /* Need to check if the destination is SP/ZR. The check has to be done
6129 before any aarch64_replace_opcode. */
6130 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
6131 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
6132
6133 instr->operands[1].imm.value = value;
6134 instr->operands[1].skip = 0;
6135
6136 if (try_mov_wide_p)
6137 {
6138 /* Try the MOVZ alias. */
6139 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
6140 aarch64_replace_opcode (instr, opcode);
6141 if (aarch64_opcode_encode (instr->opcode, instr,
6142 &instr->value, NULL, NULL))
6143 {
6144 put_aarch64_insn (buf, instr->value);
6145 return;
6146 }
6147 /* Try the MOVK alias. */
6148 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
6149 aarch64_replace_opcode (instr, opcode);
6150 if (aarch64_opcode_encode (instr->opcode, instr,
6151 &instr->value, NULL, NULL))
6152 {
6153 put_aarch64_insn (buf, instr->value);
6154 return;
6155 }
6156 }
6157
6158 if (try_mov_bitmask_p)
6159 {
6160 /* Try the ORR alias. */
6161 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
6162 aarch64_replace_opcode (instr, opcode);
6163 if (aarch64_opcode_encode (instr->opcode, instr,
6164 &instr->value, NULL, NULL))
6165 {
6166 put_aarch64_insn (buf, instr->value);
6167 return;
6168 }
6169 }
6170
6171 as_bad_where (fixP->fx_file, fixP->fx_line,
6172 _("immediate cannot be moved by a single instruction"));
6173 }
6174
6175 /* An instruction operand which is immediate related may have symbol used
6176 in the assembly, e.g.
6177
6178 mov w0, u32
6179 .set u32, 0x00ffff00
6180
6181 At the time when the assembly instruction is parsed, a referenced symbol,
6182 like 'u32' in the above example may not have been seen; a fixS is created
6183 in such a case and is handled here after symbols have been resolved.
6184 Instruction is fixed up with VALUE using the information in *FIXP plus
6185 extra information in FLAGS.
6186
6187 This function is called by md_apply_fix to fix up instructions that need
6188 a fix-up described above but does not involve any linker-time relocation. */
6189
6190 static void
6191 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
6192 {
6193 int idx;
6194 uint32_t insn;
6195 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6196 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
6197 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
6198
6199 if (new_inst)
6200 {
6201 /* Now the instruction is about to be fixed-up, so the operand that
6202 was previously marked as 'ignored' needs to be unmarked in order
6203 to get the encoding done properly. */
6204 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6205 new_inst->operands[idx].skip = 0;
6206 }
6207
6208 gas_assert (opnd != AARCH64_OPND_NIL);
6209
6210 switch (opnd)
6211 {
6212 case AARCH64_OPND_EXCEPTION:
6213 if (unsigned_overflow (value, 16))
6214 as_bad_where (fixP->fx_file, fixP->fx_line,
6215 _("immediate out of range"));
6216 insn = get_aarch64_insn (buf);
6217 insn |= encode_svc_imm (value);
6218 put_aarch64_insn (buf, insn);
6219 break;
6220
6221 case AARCH64_OPND_AIMM:
6222 /* ADD or SUB with immediate.
6223 NOTE this assumes we come here with a add/sub shifted reg encoding
6224 3 322|2222|2 2 2 21111 111111
6225 1 098|7654|3 2 1 09876 543210 98765 43210
6226 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
6227 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
6228 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
6229 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
6230 ->
6231 3 322|2222|2 2 221111111111
6232 1 098|7654|3 2 109876543210 98765 43210
6233 11000000 sf 001|0001|shift imm12 Rn Rd ADD
6234 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
6235 51000000 sf 101|0001|shift imm12 Rn Rd SUB
6236 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
6237 Fields sf Rn Rd are already set. */
6238 insn = get_aarch64_insn (buf);
6239 if (value < 0)
6240 {
6241 /* Add <-> sub. */
6242 insn = reencode_addsub_switch_add_sub (insn);
6243 value = -value;
6244 }
6245
6246 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
6247 && unsigned_overflow (value, 12))
6248 {
6249 /* Try to shift the value by 12 to make it fit. */
6250 if (((value >> 12) << 12) == value
6251 && ! unsigned_overflow (value, 12 + 12))
6252 {
6253 value >>= 12;
6254 insn |= encode_addsub_imm_shift_amount (1);
6255 }
6256 }
6257
6258 if (unsigned_overflow (value, 12))
6259 as_bad_where (fixP->fx_file, fixP->fx_line,
6260 _("immediate out of range"));
6261
6262 insn |= encode_addsub_imm (value);
6263
6264 put_aarch64_insn (buf, insn);
6265 break;
6266
6267 case AARCH64_OPND_SIMD_IMM:
6268 case AARCH64_OPND_SIMD_IMM_SFT:
6269 case AARCH64_OPND_LIMM:
6270 /* Bit mask immediate. */
6271 gas_assert (new_inst != NULL);
6272 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6273 new_inst->operands[idx].imm.value = value;
6274 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6275 &new_inst->value, NULL, NULL))
6276 put_aarch64_insn (buf, new_inst->value);
6277 else
6278 as_bad_where (fixP->fx_file, fixP->fx_line,
6279 _("invalid immediate"));
6280 break;
6281
6282 case AARCH64_OPND_HALF:
6283 /* 16-bit unsigned immediate. */
6284 if (unsigned_overflow (value, 16))
6285 as_bad_where (fixP->fx_file, fixP->fx_line,
6286 _("immediate out of range"));
6287 insn = get_aarch64_insn (buf);
6288 insn |= encode_movw_imm (value & 0xffff);
6289 put_aarch64_insn (buf, insn);
6290 break;
6291
6292 case AARCH64_OPND_IMM_MOV:
6293 /* Operand for a generic move immediate instruction, which is
6294 an alias instruction that generates a single MOVZ, MOVN or ORR
6295 instruction to loads a 32-bit/64-bit immediate value into general
6296 register. An assembler error shall result if the immediate cannot be
6297 created by a single one of these instructions. If there is a choice,
6298 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
6299 and MOVZ or MOVN to ORR. */
6300 gas_assert (new_inst != NULL);
6301 fix_mov_imm_insn (fixP, buf, new_inst, value);
6302 break;
6303
6304 case AARCH64_OPND_ADDR_SIMM7:
6305 case AARCH64_OPND_ADDR_SIMM9:
6306 case AARCH64_OPND_ADDR_SIMM9_2:
6307 case AARCH64_OPND_ADDR_UIMM12:
6308 /* Immediate offset in an address. */
6309 insn = get_aarch64_insn (buf);
6310
6311 gas_assert (new_inst != NULL && new_inst->value == insn);
6312 gas_assert (new_inst->opcode->operands[1] == opnd
6313 || new_inst->opcode->operands[2] == opnd);
6314
6315 /* Get the index of the address operand. */
6316 if (new_inst->opcode->operands[1] == opnd)
6317 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
6318 idx = 1;
6319 else
6320 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
6321 idx = 2;
6322
6323 /* Update the resolved offset value. */
6324 new_inst->operands[idx].addr.offset.imm = value;
6325
6326 /* Encode/fix-up. */
6327 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6328 &new_inst->value, NULL, NULL))
6329 {
6330 put_aarch64_insn (buf, new_inst->value);
6331 break;
6332 }
6333 else if (new_inst->opcode->iclass == ldst_pos
6334 && try_to_encode_as_unscaled_ldst (new_inst))
6335 {
6336 put_aarch64_insn (buf, new_inst->value);
6337 break;
6338 }
6339
6340 as_bad_where (fixP->fx_file, fixP->fx_line,
6341 _("immediate offset out of range"));
6342 break;
6343
6344 default:
6345 gas_assert (0);
6346 as_fatal (_("unhandled operand code %d"), opnd);
6347 }
6348 }
6349
6350 /* Apply a fixup (fixP) to segment data, once it has been determined
6351 by our caller that we have all the info we need to fix it up.
6352
6353 Parameter valP is the pointer to the value of the bits. */
6354
6355 void
6356 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
6357 {
6358 offsetT value = *valP;
6359 uint32_t insn;
6360 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6361 int scale;
6362 unsigned flags = fixP->fx_addnumber;
6363
6364 DEBUG_TRACE ("\n\n");
6365 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
6366 DEBUG_TRACE ("Enter md_apply_fix");
6367
6368 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
6369
6370 /* Note whether this will delete the relocation. */
6371
6372 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
6373 fixP->fx_done = 1;
6374
6375 /* Process the relocations. */
6376 switch (fixP->fx_r_type)
6377 {
6378 case BFD_RELOC_NONE:
6379 /* This will need to go in the object file. */
6380 fixP->fx_done = 0;
6381 break;
6382
6383 case BFD_RELOC_8:
6384 case BFD_RELOC_8_PCREL:
6385 if (fixP->fx_done || !seg->use_rela_p)
6386 md_number_to_chars (buf, value, 1);
6387 break;
6388
6389 case BFD_RELOC_16:
6390 case BFD_RELOC_16_PCREL:
6391 if (fixP->fx_done || !seg->use_rela_p)
6392 md_number_to_chars (buf, value, 2);
6393 break;
6394
6395 case BFD_RELOC_32:
6396 case BFD_RELOC_32_PCREL:
6397 if (fixP->fx_done || !seg->use_rela_p)
6398 md_number_to_chars (buf, value, 4);
6399 break;
6400
6401 case BFD_RELOC_64:
6402 case BFD_RELOC_64_PCREL:
6403 if (fixP->fx_done || !seg->use_rela_p)
6404 md_number_to_chars (buf, value, 8);
6405 break;
6406
6407 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6408 /* We claim that these fixups have been processed here, even if
6409 in fact we generate an error because we do not have a reloc
6410 for them, so tc_gen_reloc() will reject them. */
6411 fixP->fx_done = 1;
6412 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
6413 {
6414 as_bad_where (fixP->fx_file, fixP->fx_line,
6415 _("undefined symbol %s used as an immediate value"),
6416 S_GET_NAME (fixP->fx_addsy));
6417 goto apply_fix_return;
6418 }
6419 fix_insn (fixP, flags, value);
6420 break;
6421
6422 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
6423 if (fixP->fx_done || !seg->use_rela_p)
6424 {
6425 if (value & 3)
6426 as_bad_where (fixP->fx_file, fixP->fx_line,
6427 _("pc-relative load offset not word aligned"));
6428 if (signed_overflow (value, 21))
6429 as_bad_where (fixP->fx_file, fixP->fx_line,
6430 _("pc-relative load offset out of range"));
6431 insn = get_aarch64_insn (buf);
6432 insn |= encode_ld_lit_ofs_19 (value >> 2);
6433 put_aarch64_insn (buf, insn);
6434 }
6435 break;
6436
6437 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6438 if (fixP->fx_done || !seg->use_rela_p)
6439 {
6440 if (signed_overflow (value, 21))
6441 as_bad_where (fixP->fx_file, fixP->fx_line,
6442 _("pc-relative address offset out of range"));
6443 insn = get_aarch64_insn (buf);
6444 insn |= encode_adr_imm (value);
6445 put_aarch64_insn (buf, insn);
6446 }
6447 break;
6448
6449 case BFD_RELOC_AARCH64_BRANCH19:
6450 if (fixP->fx_done || !seg->use_rela_p)
6451 {
6452 if (value & 3)
6453 as_bad_where (fixP->fx_file, fixP->fx_line,
6454 _("conditional branch target not word aligned"));
6455 if (signed_overflow (value, 21))
6456 as_bad_where (fixP->fx_file, fixP->fx_line,
6457 _("conditional branch out of range"));
6458 insn = get_aarch64_insn (buf);
6459 insn |= encode_cond_branch_ofs_19 (value >> 2);
6460 put_aarch64_insn (buf, insn);
6461 }
6462 break;
6463
6464 case BFD_RELOC_AARCH64_TSTBR14:
6465 if (fixP->fx_done || !seg->use_rela_p)
6466 {
6467 if (value & 3)
6468 as_bad_where (fixP->fx_file, fixP->fx_line,
6469 _("conditional branch target not word aligned"));
6470 if (signed_overflow (value, 16))
6471 as_bad_where (fixP->fx_file, fixP->fx_line,
6472 _("conditional branch out of range"));
6473 insn = get_aarch64_insn (buf);
6474 insn |= encode_tst_branch_ofs_14 (value >> 2);
6475 put_aarch64_insn (buf, insn);
6476 }
6477 break;
6478
6479 case BFD_RELOC_AARCH64_JUMP26:
6480 case BFD_RELOC_AARCH64_CALL26:
6481 if (fixP->fx_done || !seg->use_rela_p)
6482 {
6483 if (value & 3)
6484 as_bad_where (fixP->fx_file, fixP->fx_line,
6485 _("branch target not word aligned"));
6486 if (signed_overflow (value, 28))
6487 as_bad_where (fixP->fx_file, fixP->fx_line,
6488 _("branch out of range"));
6489 insn = get_aarch64_insn (buf);
6490 insn |= encode_branch_ofs_26 (value >> 2);
6491 put_aarch64_insn (buf, insn);
6492 }
6493 break;
6494
6495 case BFD_RELOC_AARCH64_MOVW_G0:
6496 case BFD_RELOC_AARCH64_MOVW_G0_S:
6497 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6498 scale = 0;
6499 goto movw_common;
6500 case BFD_RELOC_AARCH64_MOVW_G1:
6501 case BFD_RELOC_AARCH64_MOVW_G1_S:
6502 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6503 scale = 16;
6504 goto movw_common;
6505 case BFD_RELOC_AARCH64_MOVW_G2:
6506 case BFD_RELOC_AARCH64_MOVW_G2_S:
6507 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6508 scale = 32;
6509 goto movw_common;
6510 case BFD_RELOC_AARCH64_MOVW_G3:
6511 scale = 48;
6512 movw_common:
6513 if (fixP->fx_done || !seg->use_rela_p)
6514 {
6515 insn = get_aarch64_insn (buf);
6516
6517 if (!fixP->fx_done)
6518 {
6519 /* REL signed addend must fit in 16 bits */
6520 if (signed_overflow (value, 16))
6521 as_bad_where (fixP->fx_file, fixP->fx_line,
6522 _("offset out of range"));
6523 }
6524 else
6525 {
6526 /* Check for overflow and scale. */
6527 switch (fixP->fx_r_type)
6528 {
6529 case BFD_RELOC_AARCH64_MOVW_G0:
6530 case BFD_RELOC_AARCH64_MOVW_G1:
6531 case BFD_RELOC_AARCH64_MOVW_G2:
6532 case BFD_RELOC_AARCH64_MOVW_G3:
6533 if (unsigned_overflow (value, scale + 16))
6534 as_bad_where (fixP->fx_file, fixP->fx_line,
6535 _("unsigned value out of range"));
6536 break;
6537 case BFD_RELOC_AARCH64_MOVW_G0_S:
6538 case BFD_RELOC_AARCH64_MOVW_G1_S:
6539 case BFD_RELOC_AARCH64_MOVW_G2_S:
6540 /* NOTE: We can only come here with movz or movn. */
6541 if (signed_overflow (value, scale + 16))
6542 as_bad_where (fixP->fx_file, fixP->fx_line,
6543 _("signed value out of range"));
6544 if (value < 0)
6545 {
6546 /* Force use of MOVN. */
6547 value = ~value;
6548 insn = reencode_movzn_to_movn (insn);
6549 }
6550 else
6551 {
6552 /* Force use of MOVZ. */
6553 insn = reencode_movzn_to_movz (insn);
6554 }
6555 break;
6556 default:
6557 /* Unchecked relocations. */
6558 break;
6559 }
6560 value >>= scale;
6561 }
6562
6563 /* Insert value into MOVN/MOVZ/MOVK instruction. */
6564 insn |= encode_movw_imm (value & 0xffff);
6565
6566 put_aarch64_insn (buf, insn);
6567 }
6568 break;
6569
6570 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
6571 fixP->fx_r_type = (ilp32_p
6572 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
6573 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
6574 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6575 /* Should always be exported to object file, see
6576 aarch64_force_relocation(). */
6577 gas_assert (!fixP->fx_done);
6578 gas_assert (seg->use_rela_p);
6579 break;
6580
6581 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
6582 fixP->fx_r_type = (ilp32_p
6583 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
6584 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC);
6585 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6586 /* Should always be exported to object file, see
6587 aarch64_force_relocation(). */
6588 gas_assert (!fixP->fx_done);
6589 gas_assert (seg->use_rela_p);
6590 break;
6591
6592 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6593 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6594 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6595 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6596 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6597 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6598 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6599 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6600 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6601 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6602 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6603 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6604 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6605 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6606 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6607 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6608 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6609 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6610 /* Should always be exported to object file, see
6611 aarch64_force_relocation(). */
6612 gas_assert (!fixP->fx_done);
6613 gas_assert (seg->use_rela_p);
6614 break;
6615
6616 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
6617 /* Should always be exported to object file, see
6618 aarch64_force_relocation(). */
6619 fixP->fx_r_type = (ilp32_p
6620 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
6621 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
6622 gas_assert (!fixP->fx_done);
6623 gas_assert (seg->use_rela_p);
6624 break;
6625
6626 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6627 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6628 case BFD_RELOC_AARCH64_ADD_LO12:
6629 case BFD_RELOC_AARCH64_LDST8_LO12:
6630 case BFD_RELOC_AARCH64_LDST16_LO12:
6631 case BFD_RELOC_AARCH64_LDST32_LO12:
6632 case BFD_RELOC_AARCH64_LDST64_LO12:
6633 case BFD_RELOC_AARCH64_LDST128_LO12:
6634 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6635 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6636 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6637 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6638 /* Should always be exported to object file, see
6639 aarch64_force_relocation(). */
6640 gas_assert (!fixP->fx_done);
6641 gas_assert (seg->use_rela_p);
6642 break;
6643
6644 case BFD_RELOC_AARCH64_TLSDESC_ADD:
6645 case BFD_RELOC_AARCH64_TLSDESC_LDR:
6646 case BFD_RELOC_AARCH64_TLSDESC_CALL:
6647 break;
6648
6649 case BFD_RELOC_UNUSED:
6650 /* An error will already have been reported. */
6651 break;
6652
6653 default:
6654 as_bad_where (fixP->fx_file, fixP->fx_line,
6655 _("unexpected %s fixup"),
6656 bfd_get_reloc_code_name (fixP->fx_r_type));
6657 break;
6658 }
6659
6660 apply_fix_return:
6661 /* Free the allocated the struct aarch64_inst.
6662 N.B. currently there are very limited number of fix-up types actually use
6663 this field, so the impact on the performance should be minimal . */
6664 if (fixP->tc_fix_data.inst != NULL)
6665 free (fixP->tc_fix_data.inst);
6666
6667 return;
6668 }
6669
6670 /* Translate internal representation of relocation info to BFD target
6671 format. */
6672
6673 arelent *
6674 tc_gen_reloc (asection * section, fixS * fixp)
6675 {
6676 arelent *reloc;
6677 bfd_reloc_code_real_type code;
6678
6679 reloc = xmalloc (sizeof (arelent));
6680
6681 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
6682 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
6683 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
6684
6685 if (fixp->fx_pcrel)
6686 {
6687 if (section->use_rela_p)
6688 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
6689 else
6690 fixp->fx_offset = reloc->address;
6691 }
6692 reloc->addend = fixp->fx_offset;
6693
6694 code = fixp->fx_r_type;
6695 switch (code)
6696 {
6697 case BFD_RELOC_16:
6698 if (fixp->fx_pcrel)
6699 code = BFD_RELOC_16_PCREL;
6700 break;
6701
6702 case BFD_RELOC_32:
6703 if (fixp->fx_pcrel)
6704 code = BFD_RELOC_32_PCREL;
6705 break;
6706
6707 case BFD_RELOC_64:
6708 if (fixp->fx_pcrel)
6709 code = BFD_RELOC_64_PCREL;
6710 break;
6711
6712 default:
6713 break;
6714 }
6715
6716 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
6717 if (reloc->howto == NULL)
6718 {
6719 as_bad_where (fixp->fx_file, fixp->fx_line,
6720 _
6721 ("cannot represent %s relocation in this object file format"),
6722 bfd_get_reloc_code_name (code));
6723 return NULL;
6724 }
6725
6726 return reloc;
6727 }
6728
6729 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
6730
6731 void
6732 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
6733 {
6734 bfd_reloc_code_real_type type;
6735 int pcrel = 0;
6736
6737 /* Pick a reloc.
6738 FIXME: @@ Should look at CPU word size. */
6739 switch (size)
6740 {
6741 case 1:
6742 type = BFD_RELOC_8;
6743 break;
6744 case 2:
6745 type = BFD_RELOC_16;
6746 break;
6747 case 4:
6748 type = BFD_RELOC_32;
6749 break;
6750 case 8:
6751 type = BFD_RELOC_64;
6752 break;
6753 default:
6754 as_bad (_("cannot do %u-byte relocation"), size);
6755 type = BFD_RELOC_UNUSED;
6756 break;
6757 }
6758
6759 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
6760 }
6761
6762 int
6763 aarch64_force_relocation (struct fix *fixp)
6764 {
6765 switch (fixp->fx_r_type)
6766 {
6767 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6768 /* Perform these "immediate" internal relocations
6769 even if the symbol is extern or weak. */
6770 return 0;
6771
6772 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
6773 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
6774 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
6775 /* Pseudo relocs that need to be fixed up according to
6776 ilp32_p. */
6777 return 0;
6778
6779 case BFD_RELOC_AARCH64_ADD_LO12:
6780 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6781 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6782 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6783 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6784 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6785 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6786 case BFD_RELOC_AARCH64_LDST128_LO12:
6787 case BFD_RELOC_AARCH64_LDST16_LO12:
6788 case BFD_RELOC_AARCH64_LDST32_LO12:
6789 case BFD_RELOC_AARCH64_LDST64_LO12:
6790 case BFD_RELOC_AARCH64_LDST8_LO12:
6791 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6792 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6793 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6794 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6795 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6796 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6797 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6798 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6799 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6800 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6801 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6802 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6803 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6804 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6805 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6806 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6807 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6808 /* Always leave these relocations for the linker. */
6809 return 1;
6810
6811 default:
6812 break;
6813 }
6814
6815 return generic_force_reloc (fixp);
6816 }
6817
6818 #ifdef OBJ_ELF
6819
6820 const char *
6821 elf64_aarch64_target_format (void)
6822 {
6823 if (target_big_endian)
6824 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
6825 else
6826 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
6827 }
6828
6829 void
6830 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
6831 {
6832 elf_frob_symbol (symp, puntp);
6833 }
6834 #endif
6835
6836 /* MD interface: Finalization. */
6837
6838 /* A good place to do this, although this was probably not intended
6839 for this kind of use. We need to dump the literal pool before
6840 references are made to a null symbol pointer. */
6841
6842 void
6843 aarch64_cleanup (void)
6844 {
6845 literal_pool *pool;
6846
6847 for (pool = list_of_pools; pool; pool = pool->next)
6848 {
6849 /* Put it at the end of the relevant section. */
6850 subseg_set (pool->section, pool->sub_section);
6851 s_ltorg (0);
6852 }
6853 }
6854
6855 #ifdef OBJ_ELF
6856 /* Remove any excess mapping symbols generated for alignment frags in
6857 SEC. We may have created a mapping symbol before a zero byte
6858 alignment; remove it if there's a mapping symbol after the
6859 alignment. */
6860 static void
6861 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
6862 void *dummy ATTRIBUTE_UNUSED)
6863 {
6864 segment_info_type *seginfo = seg_info (sec);
6865 fragS *fragp;
6866
6867 if (seginfo == NULL || seginfo->frchainP == NULL)
6868 return;
6869
6870 for (fragp = seginfo->frchainP->frch_root;
6871 fragp != NULL; fragp = fragp->fr_next)
6872 {
6873 symbolS *sym = fragp->tc_frag_data.last_map;
6874 fragS *next = fragp->fr_next;
6875
6876 /* Variable-sized frags have been converted to fixed size by
6877 this point. But if this was variable-sized to start with,
6878 there will be a fixed-size frag after it. So don't handle
6879 next == NULL. */
6880 if (sym == NULL || next == NULL)
6881 continue;
6882
6883 if (S_GET_VALUE (sym) < next->fr_address)
6884 /* Not at the end of this frag. */
6885 continue;
6886 know (S_GET_VALUE (sym) == next->fr_address);
6887
6888 do
6889 {
6890 if (next->tc_frag_data.first_map != NULL)
6891 {
6892 /* Next frag starts with a mapping symbol. Discard this
6893 one. */
6894 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
6895 break;
6896 }
6897
6898 if (next->fr_next == NULL)
6899 {
6900 /* This mapping symbol is at the end of the section. Discard
6901 it. */
6902 know (next->fr_fix == 0 && next->fr_var == 0);
6903 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
6904 break;
6905 }
6906
6907 /* As long as we have empty frags without any mapping symbols,
6908 keep looking. */
6909 /* If the next frag is non-empty and does not start with a
6910 mapping symbol, then this mapping symbol is required. */
6911 if (next->fr_address != next->fr_next->fr_address)
6912 break;
6913
6914 next = next->fr_next;
6915 }
6916 while (next != NULL);
6917 }
6918 }
6919 #endif
6920
6921 /* Adjust the symbol table. */
6922
6923 void
6924 aarch64_adjust_symtab (void)
6925 {
6926 #ifdef OBJ_ELF
6927 /* Remove any overlapping mapping symbols generated by alignment frags. */
6928 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
6929 /* Now do generic ELF adjustments. */
6930 elf_adjust_symtab ();
6931 #endif
6932 }
6933
6934 static void
6935 checked_hash_insert (struct hash_control *table, const char *key, void *value)
6936 {
6937 const char *hash_err;
6938
6939 hash_err = hash_insert (table, key, value);
6940 if (hash_err)
6941 printf ("Internal Error: Can't hash %s\n", key);
6942 }
6943
6944 static void
6945 fill_instruction_hash_table (void)
6946 {
6947 aarch64_opcode *opcode = aarch64_opcode_table;
6948
6949 while (opcode->name != NULL)
6950 {
6951 templates *templ, *new_templ;
6952 templ = hash_find (aarch64_ops_hsh, opcode->name);
6953
6954 new_templ = (templates *) xmalloc (sizeof (templates));
6955 new_templ->opcode = opcode;
6956 new_templ->next = NULL;
6957
6958 if (!templ)
6959 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
6960 else
6961 {
6962 new_templ->next = templ->next;
6963 templ->next = new_templ;
6964 }
6965 ++opcode;
6966 }
6967 }
6968
6969 static inline void
6970 convert_to_upper (char *dst, const char *src, size_t num)
6971 {
6972 unsigned int i;
6973 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
6974 *dst = TOUPPER (*src);
6975 *dst = '\0';
6976 }
6977
6978 /* Assume STR point to a lower-case string, allocate, convert and return
6979 the corresponding upper-case string. */
6980 static inline const char*
6981 get_upper_str (const char *str)
6982 {
6983 char *ret;
6984 size_t len = strlen (str);
6985 if ((ret = xmalloc (len + 1)) == NULL)
6986 abort ();
6987 convert_to_upper (ret, str, len);
6988 return ret;
6989 }
6990
6991 /* MD interface: Initialization. */
6992
6993 void
6994 md_begin (void)
6995 {
6996 unsigned mach;
6997 unsigned int i;
6998
6999 if ((aarch64_ops_hsh = hash_new ()) == NULL
7000 || (aarch64_cond_hsh = hash_new ()) == NULL
7001 || (aarch64_shift_hsh = hash_new ()) == NULL
7002 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
7003 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
7004 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
7005 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
7006 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
7007 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
7008 || (aarch64_reg_hsh = hash_new ()) == NULL
7009 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
7010 || (aarch64_nzcv_hsh = hash_new ()) == NULL
7011 || (aarch64_pldop_hsh = hash_new ()) == NULL)
7012 as_fatal (_("virtual memory exhausted"));
7013
7014 fill_instruction_hash_table ();
7015
7016 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
7017 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
7018 (void *) (aarch64_sys_regs + i));
7019
7020 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
7021 checked_hash_insert (aarch64_pstatefield_hsh,
7022 aarch64_pstatefields[i].name,
7023 (void *) (aarch64_pstatefields + i));
7024
7025 for (i = 0; aarch64_sys_regs_ic[i].template != NULL; i++)
7026 checked_hash_insert (aarch64_sys_regs_ic_hsh,
7027 aarch64_sys_regs_ic[i].template,
7028 (void *) (aarch64_sys_regs_ic + i));
7029
7030 for (i = 0; aarch64_sys_regs_dc[i].template != NULL; i++)
7031 checked_hash_insert (aarch64_sys_regs_dc_hsh,
7032 aarch64_sys_regs_dc[i].template,
7033 (void *) (aarch64_sys_regs_dc + i));
7034
7035 for (i = 0; aarch64_sys_regs_at[i].template != NULL; i++)
7036 checked_hash_insert (aarch64_sys_regs_at_hsh,
7037 aarch64_sys_regs_at[i].template,
7038 (void *) (aarch64_sys_regs_at + i));
7039
7040 for (i = 0; aarch64_sys_regs_tlbi[i].template != NULL; i++)
7041 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
7042 aarch64_sys_regs_tlbi[i].template,
7043 (void *) (aarch64_sys_regs_tlbi + i));
7044
7045 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
7046 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
7047 (void *) (reg_names + i));
7048
7049 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
7050 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
7051 (void *) (nzcv_names + i));
7052
7053 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
7054 {
7055 const char *name = aarch64_operand_modifiers[i].name;
7056 checked_hash_insert (aarch64_shift_hsh, name,
7057 (void *) (aarch64_operand_modifiers + i));
7058 /* Also hash the name in the upper case. */
7059 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
7060 (void *) (aarch64_operand_modifiers + i));
7061 }
7062
7063 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
7064 {
7065 unsigned int j;
7066 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
7067 the same condition code. */
7068 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
7069 {
7070 const char *name = aarch64_conds[i].names[j];
7071 if (name == NULL)
7072 break;
7073 checked_hash_insert (aarch64_cond_hsh, name,
7074 (void *) (aarch64_conds + i));
7075 /* Also hash the name in the upper case. */
7076 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
7077 (void *) (aarch64_conds + i));
7078 }
7079 }
7080
7081 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
7082 {
7083 const char *name = aarch64_barrier_options[i].name;
7084 /* Skip xx00 - the unallocated values of option. */
7085 if ((i & 0x3) == 0)
7086 continue;
7087 checked_hash_insert (aarch64_barrier_opt_hsh, name,
7088 (void *) (aarch64_barrier_options + i));
7089 /* Also hash the name in the upper case. */
7090 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
7091 (void *) (aarch64_barrier_options + i));
7092 }
7093
7094 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
7095 {
7096 const char* name = aarch64_prfops[i].name;
7097 /* Skip the unallocated hint encodings. */
7098 if (name == NULL)
7099 continue;
7100 checked_hash_insert (aarch64_pldop_hsh, name,
7101 (void *) (aarch64_prfops + i));
7102 /* Also hash the name in the upper case. */
7103 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
7104 (void *) (aarch64_prfops + i));
7105 }
7106
7107 /* Set the cpu variant based on the command-line options. */
7108 if (!mcpu_cpu_opt)
7109 mcpu_cpu_opt = march_cpu_opt;
7110
7111 if (!mcpu_cpu_opt)
7112 mcpu_cpu_opt = &cpu_default;
7113
7114 cpu_variant = *mcpu_cpu_opt;
7115
7116 /* Record the CPU type. */
7117 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
7118
7119 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
7120 }
7121
7122 /* Command line processing. */
7123
7124 const char *md_shortopts = "m:";
7125
7126 #ifdef AARCH64_BI_ENDIAN
7127 #define OPTION_EB (OPTION_MD_BASE + 0)
7128 #define OPTION_EL (OPTION_MD_BASE + 1)
7129 #else
7130 #if TARGET_BYTES_BIG_ENDIAN
7131 #define OPTION_EB (OPTION_MD_BASE + 0)
7132 #else
7133 #define OPTION_EL (OPTION_MD_BASE + 1)
7134 #endif
7135 #endif
7136
7137 struct option md_longopts[] = {
7138 #ifdef OPTION_EB
7139 {"EB", no_argument, NULL, OPTION_EB},
7140 #endif
7141 #ifdef OPTION_EL
7142 {"EL", no_argument, NULL, OPTION_EL},
7143 #endif
7144 {NULL, no_argument, NULL, 0}
7145 };
7146
7147 size_t md_longopts_size = sizeof (md_longopts);
7148
7149 struct aarch64_option_table
7150 {
7151 char *option; /* Option name to match. */
7152 char *help; /* Help information. */
7153 int *var; /* Variable to change. */
7154 int value; /* What to change it to. */
7155 char *deprecated; /* If non-null, print this message. */
7156 };
7157
7158 static struct aarch64_option_table aarch64_opts[] = {
7159 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
7160 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
7161 NULL},
7162 #ifdef DEBUG_AARCH64
7163 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
7164 #endif /* DEBUG_AARCH64 */
7165 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
7166 NULL},
7167 {"mno-verbose-error", N_("do not output verbose error messages"),
7168 &verbose_error_p, 0, NULL},
7169 {NULL, NULL, NULL, 0, NULL}
7170 };
7171
7172 struct aarch64_cpu_option_table
7173 {
7174 char *name;
7175 const aarch64_feature_set value;
7176 /* The canonical name of the CPU, or NULL to use NAME converted to upper
7177 case. */
7178 const char *canonical_name;
7179 };
7180
7181 /* This list should, at a minimum, contain all the cpu names
7182 recognized by GCC. */
7183 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
7184 {"all", AARCH64_ANY, NULL},
7185 {"cortex-a53", AARCH64_ARCH_V8, "Cortex-A53"},
7186 {"cortex-a57", AARCH64_ARCH_V8, "Cortex-A57"},
7187 {"thunderx", AARCH64_ARCH_V8, "Cavium ThunderX"},
7188 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7189 {"generic", AARCH64_ARCH_V8, NULL},
7190
7191 /* These two are example CPUs supported in GCC, once we have real
7192 CPUs they will be removed. */
7193 {"example-1", AARCH64_ARCH_V8, NULL},
7194 {"example-2", AARCH64_ARCH_V8, NULL},
7195
7196 {NULL, AARCH64_ARCH_NONE, NULL}
7197 };
7198
7199 struct aarch64_arch_option_table
7200 {
7201 char *name;
7202 const aarch64_feature_set value;
7203 };
7204
7205 /* This list should, at a minimum, contain all the architecture names
7206 recognized by GCC. */
7207 static const struct aarch64_arch_option_table aarch64_archs[] = {
7208 {"all", AARCH64_ANY},
7209 {"armv8-a", AARCH64_ARCH_V8},
7210 {NULL, AARCH64_ARCH_NONE}
7211 };
7212
7213 /* ISA extensions. */
7214 struct aarch64_option_cpu_value_table
7215 {
7216 char *name;
7217 const aarch64_feature_set value;
7218 };
7219
7220 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
7221 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0)},
7222 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0)},
7223 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
7224 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0)},
7225 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
7226 {NULL, AARCH64_ARCH_NONE}
7227 };
7228
7229 struct aarch64_long_option_table
7230 {
7231 char *option; /* Substring to match. */
7232 char *help; /* Help information. */
7233 int (*func) (char *subopt); /* Function to decode sub-option. */
7234 char *deprecated; /* If non-null, print this message. */
7235 };
7236
7237 static int
7238 aarch64_parse_features (char *str, const aarch64_feature_set **opt_p)
7239 {
7240 /* We insist on extensions being added before being removed. We achieve
7241 this by using the ADDING_VALUE variable to indicate whether we are
7242 adding an extension (1) or removing it (0) and only allowing it to
7243 change in the order -1 -> 1 -> 0. */
7244 int adding_value = -1;
7245 aarch64_feature_set *ext_set = xmalloc (sizeof (aarch64_feature_set));
7246
7247 /* Copy the feature set, so that we can modify it. */
7248 *ext_set = **opt_p;
7249 *opt_p = ext_set;
7250
7251 while (str != NULL && *str != 0)
7252 {
7253 const struct aarch64_option_cpu_value_table *opt;
7254 char *ext;
7255 int optlen;
7256
7257 if (*str != '+')
7258 {
7259 as_bad (_("invalid architectural extension"));
7260 return 0;
7261 }
7262
7263 str++;
7264 ext = strchr (str, '+');
7265
7266 if (ext != NULL)
7267 optlen = ext - str;
7268 else
7269 optlen = strlen (str);
7270
7271 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
7272 {
7273 if (adding_value != 0)
7274 adding_value = 0;
7275 optlen -= 2;
7276 str += 2;
7277 }
7278 else if (optlen > 0)
7279 {
7280 if (adding_value == -1)
7281 adding_value = 1;
7282 else if (adding_value != 1)
7283 {
7284 as_bad (_("must specify extensions to add before specifying "
7285 "those to remove"));
7286 return FALSE;
7287 }
7288 }
7289
7290 if (optlen == 0)
7291 {
7292 as_bad (_("missing architectural extension"));
7293 return 0;
7294 }
7295
7296 gas_assert (adding_value != -1);
7297
7298 for (opt = aarch64_features; opt->name != NULL; opt++)
7299 if (strncmp (opt->name, str, optlen) == 0)
7300 {
7301 /* Add or remove the extension. */
7302 if (adding_value)
7303 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
7304 else
7305 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
7306 break;
7307 }
7308
7309 if (opt->name == NULL)
7310 {
7311 as_bad (_("unknown architectural extension `%s'"), str);
7312 return 0;
7313 }
7314
7315 str = ext;
7316 };
7317
7318 return 1;
7319 }
7320
7321 static int
7322 aarch64_parse_cpu (char *str)
7323 {
7324 const struct aarch64_cpu_option_table *opt;
7325 char *ext = strchr (str, '+');
7326 size_t optlen;
7327
7328 if (ext != NULL)
7329 optlen = ext - str;
7330 else
7331 optlen = strlen (str);
7332
7333 if (optlen == 0)
7334 {
7335 as_bad (_("missing cpu name `%s'"), str);
7336 return 0;
7337 }
7338
7339 for (opt = aarch64_cpus; opt->name != NULL; opt++)
7340 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7341 {
7342 mcpu_cpu_opt = &opt->value;
7343 if (ext != NULL)
7344 return aarch64_parse_features (ext, &mcpu_cpu_opt);
7345
7346 return 1;
7347 }
7348
7349 as_bad (_("unknown cpu `%s'"), str);
7350 return 0;
7351 }
7352
7353 static int
7354 aarch64_parse_arch (char *str)
7355 {
7356 const struct aarch64_arch_option_table *opt;
7357 char *ext = strchr (str, '+');
7358 size_t optlen;
7359
7360 if (ext != NULL)
7361 optlen = ext - str;
7362 else
7363 optlen = strlen (str);
7364
7365 if (optlen == 0)
7366 {
7367 as_bad (_("missing architecture name `%s'"), str);
7368 return 0;
7369 }
7370
7371 for (opt = aarch64_archs; opt->name != NULL; opt++)
7372 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7373 {
7374 march_cpu_opt = &opt->value;
7375 if (ext != NULL)
7376 return aarch64_parse_features (ext, &march_cpu_opt);
7377
7378 return 1;
7379 }
7380
7381 as_bad (_("unknown architecture `%s'\n"), str);
7382 return 0;
7383 }
7384
7385 /* ABIs. */
7386 struct aarch64_option_abi_value_table
7387 {
7388 char *name;
7389 enum aarch64_abi_type value;
7390 };
7391
7392 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
7393 {"ilp32", AARCH64_ABI_ILP32},
7394 {"lp64", AARCH64_ABI_LP64},
7395 {NULL, 0}
7396 };
7397
7398 static int
7399 aarch64_parse_abi (char *str)
7400 {
7401 const struct aarch64_option_abi_value_table *opt;
7402 size_t optlen = strlen (str);
7403
7404 if (optlen == 0)
7405 {
7406 as_bad (_("missing abi name `%s'"), str);
7407 return 0;
7408 }
7409
7410 for (opt = aarch64_abis; opt->name != NULL; opt++)
7411 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7412 {
7413 aarch64_abi = opt->value;
7414 return 1;
7415 }
7416
7417 as_bad (_("unknown abi `%s'\n"), str);
7418 return 0;
7419 }
7420
7421 static struct aarch64_long_option_table aarch64_long_opts[] = {
7422 #ifdef OBJ_ELF
7423 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
7424 aarch64_parse_abi, NULL},
7425 #endif /* OBJ_ELF */
7426 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
7427 aarch64_parse_cpu, NULL},
7428 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
7429 aarch64_parse_arch, NULL},
7430 {NULL, NULL, 0, NULL}
7431 };
7432
7433 int
7434 md_parse_option (int c, char *arg)
7435 {
7436 struct aarch64_option_table *opt;
7437 struct aarch64_long_option_table *lopt;
7438
7439 switch (c)
7440 {
7441 #ifdef OPTION_EB
7442 case OPTION_EB:
7443 target_big_endian = 1;
7444 break;
7445 #endif
7446
7447 #ifdef OPTION_EL
7448 case OPTION_EL:
7449 target_big_endian = 0;
7450 break;
7451 #endif
7452
7453 case 'a':
7454 /* Listing option. Just ignore these, we don't support additional
7455 ones. */
7456 return 0;
7457
7458 default:
7459 for (opt = aarch64_opts; opt->option != NULL; opt++)
7460 {
7461 if (c == opt->option[0]
7462 && ((arg == NULL && opt->option[1] == 0)
7463 || streq (arg, opt->option + 1)))
7464 {
7465 /* If the option is deprecated, tell the user. */
7466 if (opt->deprecated != NULL)
7467 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
7468 arg ? arg : "", _(opt->deprecated));
7469
7470 if (opt->var != NULL)
7471 *opt->var = opt->value;
7472
7473 return 1;
7474 }
7475 }
7476
7477 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7478 {
7479 /* These options are expected to have an argument. */
7480 if (c == lopt->option[0]
7481 && arg != NULL
7482 && strncmp (arg, lopt->option + 1,
7483 strlen (lopt->option + 1)) == 0)
7484 {
7485 /* If the option is deprecated, tell the user. */
7486 if (lopt->deprecated != NULL)
7487 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
7488 _(lopt->deprecated));
7489
7490 /* Call the sup-option parser. */
7491 return lopt->func (arg + strlen (lopt->option) - 1);
7492 }
7493 }
7494
7495 return 0;
7496 }
7497
7498 return 1;
7499 }
7500
7501 void
7502 md_show_usage (FILE * fp)
7503 {
7504 struct aarch64_option_table *opt;
7505 struct aarch64_long_option_table *lopt;
7506
7507 fprintf (fp, _(" AArch64-specific assembler options:\n"));
7508
7509 for (opt = aarch64_opts; opt->option != NULL; opt++)
7510 if (opt->help != NULL)
7511 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
7512
7513 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7514 if (lopt->help != NULL)
7515 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
7516
7517 #ifdef OPTION_EB
7518 fprintf (fp, _("\
7519 -EB assemble code for a big-endian cpu\n"));
7520 #endif
7521
7522 #ifdef OPTION_EL
7523 fprintf (fp, _("\
7524 -EL assemble code for a little-endian cpu\n"));
7525 #endif
7526 }
7527
7528 /* Parse a .cpu directive. */
7529
7530 static void
7531 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
7532 {
7533 const struct aarch64_cpu_option_table *opt;
7534 char saved_char;
7535 char *name;
7536 char *ext;
7537 size_t optlen;
7538
7539 name = input_line_pointer;
7540 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7541 input_line_pointer++;
7542 saved_char = *input_line_pointer;
7543 *input_line_pointer = 0;
7544
7545 ext = strchr (name, '+');
7546
7547 if (ext != NULL)
7548 optlen = ext - name;
7549 else
7550 optlen = strlen (name);
7551
7552 /* Skip the first "all" entry. */
7553 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
7554 if (strlen (opt->name) == optlen
7555 && strncmp (name, opt->name, optlen) == 0)
7556 {
7557 mcpu_cpu_opt = &opt->value;
7558 if (ext != NULL)
7559 if (!aarch64_parse_features (ext, &mcpu_cpu_opt))
7560 return;
7561
7562 cpu_variant = *mcpu_cpu_opt;
7563
7564 *input_line_pointer = saved_char;
7565 demand_empty_rest_of_line ();
7566 return;
7567 }
7568 as_bad (_("unknown cpu `%s'"), name);
7569 *input_line_pointer = saved_char;
7570 ignore_rest_of_line ();
7571 }
7572
7573
7574 /* Parse a .arch directive. */
7575
7576 static void
7577 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
7578 {
7579 const struct aarch64_arch_option_table *opt;
7580 char saved_char;
7581 char *name;
7582 char *ext;
7583 size_t optlen;
7584
7585 name = input_line_pointer;
7586 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7587 input_line_pointer++;
7588 saved_char = *input_line_pointer;
7589 *input_line_pointer = 0;
7590
7591 ext = strchr (name, '+');
7592
7593 if (ext != NULL)
7594 optlen = ext - name;
7595 else
7596 optlen = strlen (name);
7597
7598 /* Skip the first "all" entry. */
7599 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
7600 if (strlen (opt->name) == optlen
7601 && strncmp (name, opt->name, optlen) == 0)
7602 {
7603 mcpu_cpu_opt = &opt->value;
7604 if (ext != NULL)
7605 if (!aarch64_parse_features (ext, &mcpu_cpu_opt))
7606 return;
7607
7608 cpu_variant = *mcpu_cpu_opt;
7609
7610 *input_line_pointer = saved_char;
7611 demand_empty_rest_of_line ();
7612 return;
7613 }
7614
7615 as_bad (_("unknown architecture `%s'\n"), name);
7616 *input_line_pointer = saved_char;
7617 ignore_rest_of_line ();
7618 }
7619
7620 /* Copy symbol information. */
7621
7622 void
7623 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
7624 {
7625 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
7626 }
This page took 0.190178 seconds and 4 git commands to generate.